diff --git "a/3562.jsonl" "b/3562.jsonl" new file mode 100644--- /dev/null +++ "b/3562.jsonl" @@ -0,0 +1,50 @@ +{"seq_id":"5050437","text":"from django.urls import path\n\nfrom . import views\nfrom apps.iamstudent.views import student_list_view\n\nurlpatterns = [\n path('students///', student_list_view, name='list_by_plz'),\n #path('students_testing///', views.student_list_view, name='student_list_view'),\n path('hospitals//', views.hospital_list, name='hospital_list'),\n path('hospital_registration', views.hospital_registration, name='hospital_registration'),\n path('hospital_map', views.hospital_overview, name='hopsital_map'),\n path('hospital_view//', views.hospital_view, name='hospital_view'),\n]\n","sub_path":"backend/apps/ineedstudent/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":668,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"252923424","text":"import websockets\nimport json\nimport os\nimport logging\n\n\nasync def send_message(message: str):\n response = \"Something bad happened\" \n try:\n uri = os.environ.get(\"HA_WEBSOCKET\", \"ws://supervisor/core/websocket\")\n logging.info(f'Sending message to {uri}')\n async with websockets.connect(uri) as websocket:\n\n logging.debug(f'Connecting to websocket')\n await websocket.recv()\n logging.debug(f'Connected to websocket')\n\n await websocket.send(json.dumps({\n \"type\": \"auth\",\n \"access_token\": os.environ.get(\"SUPERVISOR_TOKEN\")\n }))\n auth_response = json.loads(await websocket.recv())\n logging.debug(f'Authenticated to websocket')\n if \"type\" in auth_response and auth_response[\"type\"] == \"auth_ok\":\n await websocket.send(json.dumps({\n \"id\": 1,\n \"type\": \"conversation/process\",\n \"text\": message,\n }))\n conversation_response = json.loads(await websocket.recv())\n if \"type\" in conversation_response and \"success\" in conversation_response:\n if conversation_response[\"success\"] and conversation_response[\"type\"] == \"result\":\n response = conversation_response['result']['speech']['plain']['speech']\n else:\n logging.error(f'did not receive an expected response from home assistant {conversation_response}')\n else:\n logging.error(f'unexpected response {conversation_response}')\n else:\n logging.error(f'could not authenticate with home assistant {auth_response}')\n except Exception as e:\n logging.error(f'Unexpected error {e}')\n return response\n","sub_path":"signal/root/app/ws.py","file_name":"ws.py","file_ext":"py","file_size_in_byte":1846,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"286063604","text":"import numpy as np\nfrom skimage.transform import rotate\nimport cv2.cv2 as cv2\nimport os\nimport tifffile as tiff\nfrom scipy.ndimage.filters import gaussian_filter\nimport random\nfrom imgaug import augmenters as iaa\n\n\ndef img_cnsf(img):\n print(img.shape)\n list = [0, 1, 2]\n list_og = [0, 1, 2]\n while True:\n random.shuffle(list)\n if not list == list_og:\n break\n result = np.zeros(img.shape)\n\n result[:,:,0] = img[:,:,list[0]]\n result[:,:,1] = img[:, :, list[1]]\n result[:,:,2] = img[:, :, list[2]]\n return result\n\ndef gauss_filter(img, sigma = 0.5):\n img = gaussian_filter(img, sigma)\n return img\n\n\ndef get_data_aug(img, img_name, out_folder):\n\n img_flipud = np.flipud(img)\n\n img_fliplr = np.fliplr(img)\n\n img_ro90 = rotate(img, 90, preserve_range= 'true')\n img_ro180 = rotate(img, 180, preserve_range='true')\n img_ro270 = rotate(img, 270, preserve_range='true')\n\n img_gs03 = gauss_filter(img, sigma= 0.3)\n img_gs07 = gauss_filter(img, sigma=0.7)\n\n img_cnshuffle = img_cnsf(img)\n\n img_ab_name = os.path.splitext(img_name)[0]\n\n if (img_name.endswith('.png')):\n\n cv2.imwrite(os.path.join(out_folder, img_ab_name + '_og' + '.png'), img)\n cv2.imwrite(os.path.join(out_folder, img_ab_name + '_flipud' + '.png'), img_flipud)\n cv2.imwrite(os.path.join(out_folder, img_ab_name + '_fliplr' + '.png'), img_fliplr)\n cv2.imwrite(os.path.join(out_folder, img_ab_name + '_ro90'+ '.png'), img_ro90)\n cv2.imwrite(os.path.join(out_folder, img_ab_name + '_ro180'+ '.png'), img_ro180)\n cv2.imwrite(os.path.join(out_folder, img_ab_name + '_ro270'+ '.png'), img_ro270)\n cv2.imwrite(os.path.join(out_folder, img_ab_name + '_gs03' + '.png'), img_gs03)\n cv2.imwrite(os.path.join(out_folder, img_ab_name + '_gs07' + '.png'), img_gs07)\n cv2.imwrite(os.path.join(out_folder, img_ab_name + '_cnshuffle' + '.png'), img_cnshuffle)\n\n elif (img_name.endswith('.tif')):\n\n tiff.imsave(os.path.join(out_folder, img_ab_name + '_og' + '.tif'), img.astype(np.uint16))\n tiff.imsave(os.path.join(out_folder, img_ab_name + '_flipud' + '.tif'), img_flipud.astype(np.uint16))\n tiff.imsave(os.path.join(out_folder, img_ab_name + '_fliplr' + '.tif'), img_fliplr.astype(np.uint16))\n tiff.imsave(os.path.join(out_folder, img_ab_name + '_ro90' + '.tif'), img_ro90.astype(np.uint16))\n tiff.imsave(os.path.join(out_folder, img_ab_name + '_ro180' + '.tif'), img_ro180.astype(np.uint16))\n tiff.imsave(os.path.join(out_folder, img_ab_name + '_ro270' + '.tif'), img_ro270.astype(np.uint16))\n tiff.imsave(os.path.join(out_folder, img_ab_name + '_gs03' + '.tif'), img.astype(np.uint16))\n tiff.imsave(os.path.join(out_folder, img_ab_name + '_gs07' + '.tif'), img.astype(np.uint16))\n tiff.imsave(os.path.join(out_folder, img_ab_name + '_cnshuffle' + '.tif'), img.astype(np.uint16))\n\n\n\n #print('done')\n\n# out_folder = '/home/neuron/medical_data/ISBI2013/ann_demo'\n#\n#\n# for i in range(1,4):\n# img_name = str(i) + '.tif'\n# img_path = os.path.join(out_folder, img_name)\n#\n# img = tiff.imread(img_path)\n#\n# get_data_aug(img,img_name,out_folder)\n#\n#\nin_folder = '/home/neuron/medical_data/TNBC_NucleiSegmentation/demo_img'\nout_folder = '/home/neuron/medical_data/TNBC_NucleiSegmentation/demo_aug'\n\n\ntry:\n os.stat(os.path.dirname(out_folder + '/'))\nexcept:\n os.mkdir(os.path.dirname(out_folder + '/'))\n\n\nimg_file = os.listdir(in_folder)\n\nfor img_file_name in img_file:\n img_name = os.path.join(in_folder, img_file_name)\n img = cv2.imread(img_name)\n\n get_data_aug(img, img_file_name, out_folder)\n\n","sub_path":"examples/shapes/get_data_aug.py","file_name":"get_data_aug.py","file_ext":"py","file_size_in_byte":3685,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"287445213","text":"import os\n\nfrom setuptools import setup, find_packages\n\nhere = os.path.abspath(os.path.dirname(__file__))\nwith open(os.path.join(here, 'README.rst')) as f:\n README = f.read()\nwith open(os.path.join(here, 'CHANGES.txt')) as f:\n CHANGES = f.read()\n\nrequires = [\n # 'Babel >= 1.3', # Only required for generating i18n translations\n 'beaker >= 1.5.4',\n 'boto >= 2.23.0',\n 'chameleon >= 2.5.3',\n 'gevent >= 0.13.8', # Note: gevent 1.0 no longer requires libevent, it bundles libev instead\n 'greenlet >= 0.3.1',\n 'gunicorn >= 18.0',\n # 'lingua >= 1.5', # Only required for generating i18n translations\n 'M2Crypto >= 0.20.2',\n 'pycrypto >= 2.6',\n 'Paste >= 1.5',\n 'pyramid >= 1.4',\n 'pyramid_beaker >= 0.8',\n 'pyramid_chameleon >= 0.1',\n # 'pyramid_debugtoolbar', # Optional -- helpful for development/debugging\n 'pyramid_layout >= 0.8',\n # 'pyramid_mailer >= 0.13',\n # 'pyramid_tm >= 0.7',\n 'python-dateutil <= 1.5', # Don't use 2.x series unless on Python 3\n 'simplejson >= 2.0.9',\n # 'SQLAlchemy == 0.8.3',\n # 'waitress >= 0.8.8', # Pure python WSGI server\n 'WTForms >= 1.0.2',\n]\n\nmessage_extractors = {'.': [\n ('**.py', 'lingua_python', None),\n ('**.pt', 'lingua_xml', None),\n]}\n\nsetup(\n name='koala',\n version='4.0.0-prealpha',\n description='Koala, the Eucalyptus Management Console',\n long_description=README + '\\n\\n' + CHANGES,\n classifiers=[\n \"Programming Language :: Python\",\n \"Framework :: Pyramid\",\n \"Topic :: Internet :: WWW/HTTP\",\n \"Topic :: Internet :: WWW/HTTP :: WSGI :: Application\",\n ],\n author='Eucalyptus Systems',\n author_email='info@eucalyptus.com',\n url='http://www.eucalyptus.com',\n keywords='web pyramid pylons',\n packages=find_packages(),\n include_package_data=True,\n zip_safe=False,\n install_requires=requires,\n tests_require=[],\n message_extractors=message_extractors,\n test_suite=\"tests\",\n entry_points=\"\"\"\\\n [paste.app_factory]\n main = koala:main\n \"\"\",\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":2056,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"410697519","text":"from google.appengine.ext.webapp.mail_handlers import InboundMailHandler\nfrom google.appengine.api import mail\nfrom google.appengine.ext import ndb\nfrom bs4 import BeautifulSoup\n\nfrom notedb import AppUser\n\nimport logging\nimport webapp2\nimport re\n\nemail_match = re.compile(r\"^\\{ *?user: *?(\\w+?@\\w+?\\.[A-Za-z]+?) *?}$\")\n\n\nclass IncomingMailHandler(InboundMailHandler):\n \n def post(self):\n message = mail.InboundEmailMessage(self.request.body) # _self_.request.body gets assigned by the http POST.\n self.sender = message.sender\n self.recipients = message.to\n try:\n self.attachments = message.attachments\n logging.info(\"msg.attachments type: %s\" % type(message.attachments))\n except AttributeError:\n self.attachments = None\n logging.info(\"Incoming email from: \"+self.sender)\n plaintext_bodies = message.bodies('text/plain')\n html_bodies = message.bodies('text/html')\n self.decoded_bodies = list()\n\n for content_type, html in html_bodies:\n self.decoded_bodies.append(BeautifulSoup(html.decode()).get_text())\n\n for content_type, plaintext in plaintext_bodies:\n\n if not plaintext.decode() in self.decoded_bodies:\n self.decoded_bodies.append(plaintext.decode())\n\n if self.attachments:\n for a in self.attachments:\n logging.info(\"attachment detected! Type: \"+str(a[0])+\" content: \"+str(a[1]))\n if a[0].decode()[1][-4::] == \".txt\":\n self.decoded_bodies.append(a[1].decode())\n logging.info(\"Bodies:\\n\"+\"\\n\".join(self.decoded_bodies))\n\n\ndef process_mail(text, sender):\n match = email_match.match(text)\n if match:\n usr_string = match.group(0)\n users_qry = AppUser.query(AppUser.email == sender)\n if users_qry:\n logging.info(\"Email %s tried to attach to user %s (Already attached)\" % sender % usr_string)\n return\n users_qry = AppUser.query(AppUser.user == usr_string)\n \n\n qry = AppUser.query(AppUser.emails == match.group(0))\n\n\n\n\n\n\napp = webapp2.WSGIApplication(\n [IncomingMailHandler.mapping()],\n debug=True\n)\n","sub_path":"get_mail.py","file_name":"get_mail.py","file_ext":"py","file_size_in_byte":2201,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"332068271","text":"import logging\nimport time\n\nfrom django.core.management.base import BaseCommand\n\nfrom atf_eregs.atf_resources import fetch_and_save_resources\n\nlogger = logging.getLogger(__name__)\n\n\ndef infinite_loop():\n \"\"\"Allow a hook for tests, etc. to break the infinite loop.\"\"\"\n return True\n\n\nclass Command(BaseCommand):\n help = 'Load \"Additional Resources\" data from atf.gov'\n\n def add_arguments(self, parser):\n parser.add_argument(\n '--period', type=int, help='Period of repetition, in seconds')\n\n def handle(self, *args, **options):\n while infinite_loop():\n prev_run = time.time()\n try:\n fetch_and_save_resources()\n except (IOError, ConnectionError):\n logger.exception('Error retrieving data')\n\n if options.get('period') is None:\n break\n\n next_run = prev_run + options['period']\n # Use `max` to account for running too long\n time.sleep(max(0, next_run - time.time()))\n","sub_path":"atf_eregs/management/commands/fetch_atf_resources.py","file_name":"fetch_atf_resources.py","file_ext":"py","file_size_in_byte":1022,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"181656122","text":"# Python standard library modules\nimport configparser\nimport curses\nimport json\nimport os\nimport sys\n\n\nclass ConfigError(Exception):\n pass\n\n\nclass Config(object):\n def __init__(self, config_path):\n self.config_path = config_path\n if os.path.isfile(config_path):\n try:\n config_raw = open(config_path, 'r').read()\n except IOError:\n raise ConfigError('Could not open/read config file \"{0:s}\"'.format(config_path))\n\n try:\n self.data = json.loads(config_raw)\n except ValueError:\n raise ConfigError('Could not parse config file \"{0:s}\" as JSON'.format(config_path))\n else:\n self.data = {\n 'assumed_roles': {},\n 'default_profile_name': None,\n 'default_profile_type': None,\n 'key_pairs': {},\n }\n\n def get_active_key_pair(self, key_pair_name):\n if key_pair_name not in self.data['key_pairs']:\n raise ConfigError('\"{0:s}\" key pair not found in config'.format(key_pair_name))\n\n if self.data['key_pairs'][key_pair_name]['use_temporary_credentials']:\n key_data = {\n 'aws_access_key_id': self.data['key_pairs'][key_pair_name]['temporary_credentials']['access_key_id'],\n 'aws_secret_access_key': self.data['key_pairs'][key_pair_name]['temporary_credentials']['secret_access_key'],\n 'aws_session_token': self.data['key_pairs'][key_pair_name]['temporary_credentials']['session_token'],\n\n # aws_security_token is deprecated, but required by boto\n 'aws_security_token': self.data['key_pairs'][key_pair_name]['temporary_credentials']['session_token'],\n }\n else:\n key_data = {\n 'aws_access_key_id': self.data['key_pairs'][key_pair_name]['access_key_id'],\n 'aws_secret_access_key': self.data['key_pairs'][key_pair_name]['secret_access_key'],\n }\n\n for k, v in self.data['key_pairs'][key_pair_name]['options'].items():\n key_data[k] = v\n\n return key_data\n\n def save(self):\n # Check the config has at least one keypair before attempting to save it\n if len(self.data['key_pairs'].keys()) == 0:\n raise ConfigError('No keypairs in config')\n\n # Write the config file data\n try:\n open(self.config_path, 'w').write(json.dumps(self.data, sort_keys=True, indent=2, separators=(',', ': ')))\n except IOError:\n raise ConfigError('Could not open the config file \"{0:s}\" for writing'.format(self.config_path))\n\n # Create the AWS credentials INI file\n config_data = configparser.ConfigParser()\n default_profile_name = self.data['default_profile_name']\n if self.data['default_profile_type'] == 'assumed_role':\n config_data['default'] = {\n 'aws_access_key_id': self.data['assumed_roles'][default_profile_name]['access_key_id'],\n 'aws_secret_access_key': self.data['assumed_roles'][default_profile_name]['secret_access_key'],\n 'aws_session_token': self.data['assumed_roles'][default_profile_name]['session_token'],\n\n # aws_security_token is deprecated, but required by boto\n 'aws_security_token': self.data['assumed_roles'][default_profile_name]['session_token'],\n }\n options = self.data['assumed_roles'][default_profile_name].get('options', {})\n for k, v in options.items():\n config_data['default'][k] = v\n else:\n config_data['default'] = self.get_active_key_pair(default_profile_name)\n\n # Output the rest of the key pairs & assumed roles\n for key_pair_name in self.data['key_pairs'].keys():\n config_data[key_pair_name] = self.get_active_key_pair(key_pair_name)\n\n for role_name in self.data['assumed_roles'].keys():\n config_data[role_name] = {\n 'aws_access_key_id': self.data['assumed_roles'][role_name]['access_key_id'],\n 'aws_secret_access_key': self.data['assumed_roles'][role_name]['secret_access_key'],\n 'aws_session_token': self.data['assumed_roles'][role_name]['session_token'],\n\n # aws_security_token is deprecated, but required by boto\n 'aws_security_token': self.data['assumed_roles'][role_name]['session_token'],\n }\n options = self.data['assumed_roles'][role_name].get('options', {})\n for k, v in options.items():\n config_data[role_name][k] = v\n\n # Create ~/.aws directory if it doesn't already exist\n aws_config_dir = os.path.join(os.path.expanduser('~'), '.aws')\n os.makedirs(aws_config_dir, exist_ok=True)\n\n # Write the AWS credentials INI file\n aws_config_file = os.path.join(aws_config_dir, 'credentials')\n try:\n with open(aws_config_file, 'w') as configfile:\n config_data.write(configfile)\n except IOError:\n raise ConfigError('Could not write the AWS credentials file \"{0:s}\"'.format(aws_config_file))\n\n\ndef exit_cleanup(signal, frame):\n curses.endwin()\n sys.exit(0)\n","sub_path":"util/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":5273,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"165579501","text":"'''\nThis file contains a lot of helper methods that are used to visualize details of CPPN\nThese are mostly used for debugging and studying CPPN genotypes\n'''\nimport matplotlib.pyplot as plt\nimport colorsys\n\n\n'''\nCreates bar graph showing the most common numbers of hidden nodes\nshows frequency of each hidden node value in the bar graph\n@param pop population for which hidden nodes are being visualized\npost: print out a graph to visualize number of hidden nodes in the given population\n'''\ndef visHiddenNodes(pop):\n\tallHiddenVals = []\n\t# generate all total hidden nodes for each genotype in the population\n\tfor ind in pop:\n\t\tallHiddenVals.append(ind.getHiddenNodes())\n\t# store all data for the frequency of hidden nodes in a dictionary\n\tdataDict = {}\n\tfor val in allHiddenVals:\n\t\tif(val in dataDict.keys()):\n\t\t\tdataDict[val] = dataDict[val] + 1\n\t\telse:\n\t\t\tdataDict[val] = 1\n\t# create bar graph to display with given data\n\tax = plt.subplot(111)\n\tax.bar(dataDict.keys(), dataDict.values() ,width=0.2,color='b',align='center')\n\tplt.title(\"NUMBER OF HIDDEN NODES AMONG INDIVIDUALS\")\n\tplt.xlabel(\"Number of Hidden Nodes\")\n\tplt.ylabel(\"Frequency\")\n\t# display the graph\n\tplt.show()\n\n'''\ncreates bar graphing showing the number of connections in a the population's networks\nshows frequency of each number of connections\n@param pop population for which visualization is being generated\n'''\ndef visConnections(pop):\n\tallConnections = []\n\t# generate all total number connection values for population\n\tfor ind in pop:\n\t\tallConnections.append(len(ind.connections))\n\t# store all data in dictionary so that it can be turned into a bar graph\n\tdataDict = {}\n\tfor val in allConnections:\n\t\tif(val in dataDict.keys()):\n\t\t\tdataDict[val] = dataDict[val] + 1\n\t\telse:\n\t\t\tdataDict[val] = 1\n\t# create bar graph to display with connection data\n\tax = plt.subplot(111)\n\tax.bar(dataDict.keys(), dataDict.values() ,width=0.2,color='b',align='center')\n\tplt.title(\"NUMBER OF CONNECTIONS AMONG INDIVIDUALS\")\n\tplt.xlabel(\"Number of Connections\")\n\tplt.ylabel(\"Frequency\")\n\t# display the graph\n\tplt.show()\n\n\n'''\nThe following function is used to visualize a general set of data\ngenerates a bar graph containing all values and the frequencies \nassociated with them \n@param dataSet the data set for which the bar graph is being generated\npost: graph is displayed to user\n'''\ndef visGeneralData(dataSet):\n\tdataDict = {}\n\t# generate dictionary based on given data\n\tfor d in dataSet:\n\t\tif(d in dataDict.keys()):\n\t\t\tdataDict[d] = dataDict[d] + 1\n\t\telse:\n\t\t\tdataDict[d] = 1\n\t# plot data and show to user\n\tax = plt.subplot(111)\n\tax.bar(dataDict.keys(), dataDict.values() ,width=0.2,color='g',align='center')\n\tplt.title(\"VISUALIZATIN OF DATA SET\")\n\tplt.xlabel(\"Value\")\n\tplt.ylabel(\"Frequency\")\n\tplt.show()\n\n\n'''\nmethod for quickly finding the number of solutions that passed XOR\n@param pop the population of solutions that are being tested on XOR\n@return tuple containing (number that passed test, number that failed test)\n'''\ndef findNumGoodSolutions(pop):\n\tinputs = [[0,0],[0,1],[1,0],[1,1]]\n\tnumSolved = 0\n\tnumFailed = 0\n\t# evaluate XOR for every individual in population\n\tfor ind in pop:\n\t\toutputs = []\n\t\tfor ins in inputs:\n\t\t\toutputs.append(ind.getOutput(ins)[0])\n\t\tGOOD_THRESH = .4\n\t\tif(outputs[0] < GOOD_THRESH and outputs[1] > GOOD_THRESH and outputs[2] > GOOD_THRESH and outputs[3] < GOOD_THRESH):\n\t\t\tnumSolved += 1\n\t\telse:\n\t\t\tnumFailed +=1 \n\n\treturn (numSolved, numFailed)\n\n\n'''\ncreates a heat map in matplotlib using a provided numpy array\nof outputs from the CPPN\n@param outputs the numpy array of outputs being used to create the heatmap\npost: heat map displayed to user\n'''\ndef showHeatMap(outputs):\n\t# create heat map using imshow and display to user\n\tplt.imshow(outputs, cmap='hot', interpolation='nearest')\n\tplt.show()\n\n\t\ndef plot_pareto_front(par_frnts, colors, labels):\n\t\"\"\"This method finds the fitness values of the\n\tpareto front (2D) and plots the pareto front\n\ton a matplotlib scatter plot to be visualized\n\t\"\"\"\n\n\tfor par_frnt, c, l in zip(par_frnts, colors, labels):\n\t\tx1_vals = [ind.fitness.values[0] for ind in par_frnt]\n\t\tx2_vals = [ind.fitness.values[1] for ind in par_frnt]\n\t\tplt.scatter(x1_vals, x2_vals, c=c, label=l)\n\tplt.xlabel(\"Closeness to Target\")\n\tplt.ylabel(\"Connection Cost\")\n\tplt.title(\"Visualization of Pareto Optimal Front\")\n\tplt.legend()\n\tplt.show()\n\ndef get_n_colors(N=5):\n\t\"\"\"This function is created to get a set of N\n\tcolors to use on a matplotlib graph that are as\n\tdistributed as possible\n\t\"\"\"\n\n\tHSV_tuples = [(x * 1.0 / N, 0.5, 0.5) for x in range(N)]\n\thex_out = []\n\tfor rgb in HSV_tuples:\n\t\trgb = map(lambda x: int(x * 255), colorsys.hsv_to_rgb(*rgb))\n\t\thex_out.append('#%02x%02x%02x' % tuple(rgb))\n\t\n\treturn hex_out\n\n\n","sub_path":"FULL_CPPN_vis.py","file_name":"FULL_CPPN_vis.py","file_ext":"py","file_size_in_byte":4725,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"408414481","text":"school_results = [\n {'school_class': '1a', 'scores': [3,5,5,5,3,3,5,3,4,5,5,4,3,3,3,4,4,5,2]},\n {'school_class': '1b', 'scores': [3,5,5,5,2,2,3,5,5,4,3,3,3,4,4,5,2]},\n {'school_class': '1v', 'scores': [3,5,5,5,2,3,4,4,3,2,2,3,5,5,4,3,3,3,4,4,5,2]},\n {'school_class': '1g', 'scores': [3,5,5,5,2,2,3,5,5,3,2,3,4,3,2,2,4,3,3,3,4,4,5]},\n]\n\nschool_average = 0\nfor i in range(len(school_results)):\n scores = school_results[i]['scores']\n class_average = 0\n for j in range(len(scores)):\n class_average = class_average + scores[j]\n class_average = class_average/len(scores)\n school_average = school_average + class_average\n print(\"Средний балл в классе\", school_results[i]['school_class'],class_average)\nprint(\"Средний балл в школе\", school_average/len(school_results))\n","sub_path":"homework2/03_FOR/task3.py","file_name":"task3.py","file_ext":"py","file_size_in_byte":833,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"375264522","text":"import logging\nimport torch\nimport io\nfrom torchtext.utils import download_from_url, extract_archive, unicode_csv_reader\nfrom torchtext.data.utils import ngrams_iterator\nfrom torchtext.data.utils import get_tokenizer\nfrom torchtext.vocab import build_vocab_from_iterator\nfrom torchtext.vocab import Vocab\nfrom tqdm import tqdm\nfrom torchtext.datasets.text_classification import _csv_iterator, _create_data_from_iterator, TextClassificationDataset\n\n\nNGRAMS=2\n\ndef _csv_iterator(data_path, ngrams, yield_cls=False, label=-1):\n tokenizer = get_tokenizer(\"basic_english\")\n with io.open(data_path, encoding=\"utf8\") as f:\n reader = unicode_csv_reader(f, delimiter=\"\\t\")\n for row in reader:\n tokens = ' '.join([row[5]])\n #print(row[5])\n tokens=tokenizer(tokens)\n\n if yield_cls:\n yield row[7], ngrams_iterator(tokens, ngrams)\n else:\n yield ngrams_iterator(tokens, ngrams)\n\n\ndef _create_data_from_iterator(vocab, iterator, include_unk):\n data = []\n labels = []\n with tqdm(unit_scale=0, unit='lines') as t:\n for cls, tokens in iterator:\n if include_unk:\n tokens = torch.tensor([vocab[token] for token in tokens])\n else:\n token_ids = list(filter(lambda x: x is not Vocab.UNK, [vocab[token] for token in tokens]))\n tokens = torch.tensor(token_ids)\n if len(tokens) == 0:\n logging.info('Row contains no tokens.')\n data.append((cls, tokens))\n labels.append(cls)\n t.update(1)\n return data, set(labels)\n\n\n\"\"\"\ntrain_csv_path = \"./data/train.csv\"\ntest_csv_path = \"./data/test.csv\"\n\"\"\"\n#-1: default\n#0: am\n#1: nam\ndef setup_datasets(train_csv_path, test_csv_path, include_unk=False):\n iterator=_csv_iterator(train_csv_path, NGRAMS)\n vocab = build_vocab_from_iterator(iterator)\n train_data, train_labels = _create_data_from_iterator(vocab, _csv_iterator(train_csv_path, NGRAMS, yield_cls=True, label=0), include_unk)\n test_data, test_labels = _create_data_from_iterator(vocab, _csv_iterator(test_csv_path, NGRAMS, yield_cls=True, label=0), include_unk)\n\n\n return TextClassificationDataset(vocab, train_data, train_labels), TextClassificationDataset(vocab, test_data, test_labels)\n\n\n\n#print(setup_datasets(train_csv_path, test_csv_path, include_unk=False))\n","sub_path":"analysis_seb/first_model/.ipynb_checkpoints/data_formatting_torch-checkpoint.py","file_name":"data_formatting_torch-checkpoint.py","file_ext":"py","file_size_in_byte":2401,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"429179074","text":"from typing import List\nimport numpy as np\nfrom abc import ABC, abstractmethod\n\nfrom agent import Agent\n\n\nclass Policy(ABC):\n\n @abstractmethod\n def apply_policy(self, agents: List[Agent]):\n pass\n\n\nclass MolecularPolicy(Policy):\n\n def __init__(self, spread_factor: float):\n self.sf = spread_factor\n\n def apply_policy(self, agents: List[Agent]):\n \"\"\"\n In molecular policy the probability spreads among cluster participants\n proportionally to total probability\n :param agents:\n :return:\n \"\"\"\n\n n_infected = sum([1 for agent in agents if agent.state == 'INFECTED'])\n\n if n_infected == 0 or n_infected == len(agents):\n return\n\n if len(agents) - n_infected > 0:\n prob = self.sf * n_infected / (len(agents) - n_infected)\n\n for k in range(len(agents)):\n if agents[k].state == 'SUSCEPTIBLE':\n if np.random.uniform() < prob:\n agents[k].state = 'INFECTED'\n","sub_path":"policy.py","file_name":"policy.py","file_ext":"py","file_size_in_byte":1024,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"600098542","text":"from django.shortcuts import render\nfrom django.http import HttpResponse\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth import authenticate, login\nfrom forms import SettingsForm\n\n\n\n@login_required(login_url=\"login/\")\ndef home(request):\n return render(request, \"dashboard/home.html\")\n\n\n#Settings allows users to set their first and last name which are optional fields and change email\ndef settings(request):\n if request.method == \"POST\":\n #Get the form data and associate it with the user who requested it\n form = SettingsForm(request.POST, instance=request.user)\n if form.is_valid():\n #Save the data to the user object instance\n userchange = form.save(commit = False) #Pretty sure this is redundant, need to test\n userchange.save()\n return HttpResponseRedirect(reverse('home'))\n else:\n #If the request is not POST populate the form with the previous data for the user\n form = SettingsForm(initial={'first_name':request.user.get_short_name(), 'last_name':request.user.last_name, 'email':request.user.email}) \n return render(request, 'dashboard/settings.html', {'form': form})\n\n #Delete user, this should use delete, not post\ndef delete_confirm(request):\n #If the request is POST\n if request.method == \"POST\":\n #this will just be the requester since there is no data associated with the form\n form = DeleteForm(request.POST)\n if form.is_valid():\n #Return the user object who requested the delete\n u = User.objects.get(username=request.user)\n #Delete the account and return to home, which will be the login screen now\n u.delete()\n return HttpResponseRedirect(reverse('home'))\n else:\n #If the request is not post then show the form and return it with the template\n form = DeleteForm()\n return render(request, \"dashboard/delete_confirm.html\", {'form': form})\n","sub_path":"coffee/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1988,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"298909342","text":"# -*- coding: utf-8 -*-\n\"\"\"\nThe function TDMASolve implements the Thomas algorithm to solve a tridiagonal linear system \n\"\"\"\n\ndef TDMASolve(a, b, c, d):\n \"\"\"\n 'a' is the lower diagonal of size n-1\n 'b' is the system diagonal of size n\n 'c' is the upper diagonal of size n-1\n 'd' is the rhs vector\n The function returns the corresponding solution vector\n Carefull: The function modifies b[] and d[] inputs while solving\n \"\"\"\n n = len(d) # n is the numbers of rows, a and c has length n-1\n for i in range(n-1):\n d[i+1] -= d[i] * a[i] / b[i]\n b[i+1] -= c[i] * a[i] / b[i]\n for i in reversed(range(n-1)):\n d[i] -= d[i+1] * c[i] / b[i+1]\n return [d[i] / b[i] for i in range(n)] # return the solution","sub_path":"workshops/TDMA.py","file_name":"TDMA.py","file_ext":"py","file_size_in_byte":765,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"232004782","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.7 (3394)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.macosx-10.15-x86_64/egg/wavescli/downloader.py\n# Compiled at: 2019-12-04 17:33:36\n# Size of source mod 2**32: 813 bytes\nimport os, urllib.parse, urllib.request\nfrom wavescli import awsadapter\n\ndef get_file(remote_uri, local_target_dir, basename=None):\n if remote_uri.startswith('s3://'):\n return awsadapter.get_file(remote_uri, local_target_dir, basename)\n if remote_uri.startswith('http://') or remote_uri.startswith('https://'):\n if not basename:\n schema = urllib.parse.urlparse(remote_uri)\n basename = os.path.basename(schema.path)\n target_path = os.path.join(local_target_dir, basename)\n response = urllib.request.urlopen(remote_uri)\n with open(target_path, 'wb') as (localfile):\n localfile.write(response.read())\n return target_path\n raise RuntimeError(\"Couldn't download the URL: {}\".format(repr(remote_uri)))","sub_path":"pycfiles/wavescli-0.0.42-py3.7/downloader.cpython-37.py","file_name":"downloader.cpython-37.py","file_ext":"py","file_size_in_byte":1069,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"439222502","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Jun 9 16:02:57 2018\n\n@author: aidanrocke\n\"\"\"\nimport tensorflow as tf\nimport matplotlib.pyplot as plt\nfrom plotting import *\n\ndef train(data,vae):\n \n ## load mnist:\n mnist = data.read_data_sets('MNIST_data/')\n \n fig, ax = plt.subplots(nrows=vae.epochs, ncols=11, figsize=(10, 20))\n \n with tf.train.MonitoredSession() as sess:\n \n sess.run(vae.init_g)\n sess.run(vae.init_l)\n\n for epoch in range(vae.epochs):\n feed = {vae.data: mnist.test.images.reshape([-1, 28, 28])}\n test_elbo, test_codes, test_samples = sess.run([vae.elbo, vae.code, vae.samples], feed)\n print('Epoch', epoch, 'elbo', test_elbo)\n ax[epoch, 0].set_ylabel('Epoch {}'.format(epoch))\n \n plot_codes(ax[epoch, 0], test_codes, mnist.test.labels)\n plot_samples(ax[epoch, 1:], test_samples)\n for _ in range(600):\n feed = {vae.data: mnist.train.next_batch(vae.batch_size)[0].reshape([-1, 28, 28])}\n sess.run(vae.optimize, feed)\n plt.savefig('vae-mnist.png', dpi=300, transparent=True, bbox_inches='tight')","sub_path":"train_vae.py","file_name":"train_vae.py","file_ext":"py","file_size_in_byte":1195,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"414899609","text":"# Getting number of points\r\n\r\nn = int(input (\"Please define n=\"))\r\n\r\n\r\n\r\n# Getting coordinates from user\r\n\r\ncoordinates = []\r\n\r\nfor i in range(1, n):\r\n\r\n raw= input(\"Please enter \" + str(i) + \"th coordinates \").split( \",\" )\r\n\r\n x = float(raw[0])\r\n\r\n y = float(raw[1])\r\n\r\n point=(x,y)\r\n\r\n coordinates.append( point )\r\n\r\nprint(\"Coordinates:\", coordinates)\r\n\r\n\r\n\r\n# Calculating center of mass\r\n\r\nsumx = 0\r\n\r\nsumy = 0\r\n\r\nfor point in coordinates:\r\n\r\n sumx += point[0]\r\n\r\n sumy += point[1]\r\n\r\ncenter_of_mass = (sumx/n, sumy/n)\r\n\r\nprint(\"Center of mass:\", center_of_mass)\r\n\r\n\r\n\r\n# Calculate distances\r\n\r\ndistances = []\r\n\r\nfor point, index in zip(coordinates, range(n)):\r\n\r\n distance = (center_of_mass[0]-point[0])**2 + (center_of_mass[1]-point[1])**2\r\n\r\n distance = distance**.5\r\n\r\n distances.append( (distance, index) )\r\n\r\nprint(\"Distances are:\", distances)\r\n\r\n\r\n\r\n# Find closest point\r\n\r\nclosest_point = distances[0]\r\n\r\nfor distance, index in distances:\r\n\r\n if distance < closest_point[0]:\r\n\r\n closest_point = (distance, index)\r\n\r\nprint(\"Closest point is:\", coordinates[closest_point[1]], \"with distance:\", closest_point[0])\r\n\r\n\r\n\r\n# Find furthest point\r\n\r\nfurthest_point = distances[0]\r\n\r\nfor distance, index in distances:\r\n\r\n if distance >furthest_point[0]:\r\n\r\n furthest_point = (distance, index)\r\n\r\nprint(\"Furthest point is:\", coordinates[furthest_point[1]], \"with distance:\", furthest_point[0])","sub_path":"HW2.py","file_name":"HW2.py","file_ext":"py","file_size_in_byte":1444,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"613974078","text":"import os\n\nimport numpy as np\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import LabelEncoder\nfrom algorithm import *\n\nos.chdir(os.path.dirname(os.path.realpath(__file__)))\n\n# example 1\nd = [\n [1.47, 52.21],\n [1.5, 53.12],\n [1.52, 54.48],\n [1.55, 55.84],\n [1.57, 57.2],\n [1.6, 58.57],\n [1.63, 59.93],\n [1.65, 61.29],\n [1.68, 63.11],\n [1.7, 64.47],\n [1.73, 66.28],\n [1.75, 68.1],\n [1.78, 69.92],\n [1.8, 72.19],\n [1.83, 74.46]\n]\n\nb0, b1 = coefficients(d)\nprint('b0={0}, b1={1}'.format(b0, b1))\n\n# example 2\nf = '../Data/insurance.csv'\nd = pd.read_csv(f, header=None)\ntrain, test = train_test_split(d, test_size=0.4)\ntrain, test = train.values.tolist(), test.values.tolist()\n\nb0, b1 = coefficients(train)\n\np = predict(train, test)\na = [row[-1] for row in test]\nr = rmse(a, p)\n\nprint('b0={0}, b1={1}, rmse={2}'.format(b0, b1, r))\n","sub_path":"Simple_Linear_Regression/example.py","file_name":"example.py","file_ext":"py","file_size_in_byte":927,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"543201966","text":"# -*- coding: utf-8 -*-\n\"\"\"Advent of Code 2020 - Day 21 - Allergen Assesment.\"\"\"\n\nimport argparse\nimport pdb\nimport traceback\nfrom re import match\n\n\ndef parse_puzzle(lines: list[str]):\n puzzle: list[tuple[list[str], list[str]]] = []\n for line in lines:\n m = match(r\"^([^(]+)\\(contains ([^)]+)\\)$\", line)\n ingredients = m[1].strip().split()\n allergens = m[2].strip().split(\", \")\n puzzle.append((ingredients, allergens))\n return puzzle\n\n\ndef part_one(puzzle):\n all_ingredients: set[str] = set()\n all_allergens: set[str] = set()\n allergen_ingredient_map: dict[str, set[str]] = {}\n for ingredients, allergens in puzzle:\n all_ingredients.update(ingredients)\n all_allergens.update(allergens)\n for allergen in allergens:\n new_ingredients = allergen_ingredient_map.get(allergen, set())\n new_ingredients.update(ingredients)\n allergen_ingredient_map[allergen] = new_ingredients\n\n remaining_ingredients = all_ingredients.copy()\n for allergen, ingredients in allergen_ingredient_map.items():\n remaining_ingredients.difference_update(ingredients)\n\n return None\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(\n description=\"Advent of Code - 2020 - Day 21 - Allergen Assesment.\"\n )\n parser.add_argument(\n \"input\",\n type=str,\n default=\"input.txt\",\n nargs=\"?\",\n help=\"The puzzle input. (Default %(default)s)\",\n )\n args = parser.parse_args()\n\n try:\n with open(args.input, \"rt\") as inf:\n lines = inf.readlines()\n puzzle = parse_puzzle(lines)\n print(part_one(puzzle))\n except Exception:\n traceback.print_exc()\n pdb.post_mortem()\n","sub_path":"2020/21-allergen_assesment/allergen_assesment.py","file_name":"allergen_assesment.py","file_ext":"py","file_size_in_byte":1754,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"466092245","text":"import pytest\n\nfrom aiogram import Bot, types\nfrom . import FakeTelegram, TOKEN, BOT_ID\n\npytestmark = pytest.mark.asyncio\n\n\n@pytest.yield_fixture(name='bot')\nasync def bot_fixture(event_loop):\n \"\"\" Bot fixture \"\"\"\n _bot = Bot(TOKEN, loop=event_loop, parse_mode=types.ParseMode.MARKDOWN)\n yield _bot\n await _bot.close()\n\n\nasync def test_get_me(bot: Bot, event_loop):\n \"\"\" getMe method test \"\"\"\n from .types.dataset import USER\n user = types.User(**USER)\n\n async with FakeTelegram(message_data=USER, loop=event_loop):\n result = await bot.me\n assert result == user\n\n\nasync def test_log_out(bot: Bot, event_loop):\n \"\"\" logOut method test \"\"\"\n\n async with FakeTelegram(message_data=True, loop=event_loop):\n result = await bot.log_out()\n assert result is True\n\n\nasync def test_close_bot(bot: Bot, event_loop):\n \"\"\" close method test \"\"\"\n\n async with FakeTelegram(message_data=True, loop=event_loop):\n result = await bot.close_bot()\n assert result is True\n\n\nasync def test_send_message(bot: Bot, event_loop):\n \"\"\" sendMessage method test \"\"\"\n from .types.dataset import MESSAGE\n msg = types.Message(**MESSAGE)\n\n async with FakeTelegram(message_data=MESSAGE, loop=event_loop):\n result = await bot.send_message(chat_id=msg.chat.id, text=msg.text)\n assert result == msg\n\n\nasync def test_forward_message(bot: Bot, event_loop):\n \"\"\" forwardMessage method test \"\"\"\n from .types.dataset import FORWARDED_MESSAGE\n msg = types.Message(**FORWARDED_MESSAGE)\n\n async with FakeTelegram(message_data=FORWARDED_MESSAGE, loop=event_loop):\n result = await bot.forward_message(chat_id=msg.chat.id, from_chat_id=msg.forward_from_chat.id,\n message_id=msg.forward_from_message_id)\n assert result == msg\n\n\nasync def test_send_photo(bot: Bot, event_loop):\n \"\"\" sendPhoto method test with file_id \"\"\"\n from .types.dataset import MESSAGE_WITH_PHOTO, PHOTO\n msg = types.Message(**MESSAGE_WITH_PHOTO)\n photo = types.PhotoSize(**PHOTO)\n\n async with FakeTelegram(message_data=MESSAGE_WITH_PHOTO, loop=event_loop):\n result = await bot.send_photo(msg.chat.id, photo=photo.file_id, caption=msg.caption,\n parse_mode=types.ParseMode.HTML, disable_notification=False)\n assert result == msg\n\n\nasync def test_send_audio(bot: Bot, event_loop):\n \"\"\" sendAudio method test with file_id \"\"\"\n from .types.dataset import MESSAGE_WITH_AUDIO\n msg = types.Message(**MESSAGE_WITH_AUDIO)\n\n async with FakeTelegram(message_data=MESSAGE_WITH_AUDIO, loop=event_loop):\n result = await bot.send_audio(chat_id=msg.chat.id, audio=msg.audio.file_id, caption=msg.caption,\n parse_mode=types.ParseMode.HTML, duration=msg.audio.duration,\n performer=msg.audio.performer, title=msg.audio.title, disable_notification=False)\n assert result == msg\n\n\nasync def test_send_document(bot: Bot, event_loop):\n \"\"\" sendDocument method test with file_id \"\"\"\n from .types.dataset import MESSAGE_WITH_DOCUMENT\n msg = types.Message(**MESSAGE_WITH_DOCUMENT)\n\n async with FakeTelegram(message_data=MESSAGE_WITH_DOCUMENT, loop=event_loop):\n result = await bot.send_document(chat_id=msg.chat.id, document=msg.document.file_id, caption=msg.caption,\n parse_mode=types.ParseMode.HTML, disable_notification=False)\n assert result == msg\n\n\nasync def test_send_video(bot: Bot, event_loop):\n \"\"\" sendVideo method test with file_id \"\"\"\n from .types.dataset import MESSAGE_WITH_VIDEO, VIDEO\n msg = types.Message(**MESSAGE_WITH_VIDEO)\n video = types.Video(**VIDEO)\n\n async with FakeTelegram(message_data=MESSAGE_WITH_VIDEO, loop=event_loop):\n result = await bot.send_video(chat_id=msg.chat.id, video=video.file_id, duration=video.duration,\n width=video.width, height=video.height, caption=msg.caption,\n parse_mode=types.ParseMode.HTML, supports_streaming=True,\n disable_notification=False)\n assert result == msg\n\n\nasync def test_send_voice(bot: Bot, event_loop):\n \"\"\" sendVoice method test with file_id \"\"\"\n from .types.dataset import MESSAGE_WITH_VOICE, VOICE\n msg = types.Message(**MESSAGE_WITH_VOICE)\n voice = types.Voice(**VOICE)\n\n async with FakeTelegram(message_data=MESSAGE_WITH_VOICE, loop=event_loop):\n result = await bot.send_voice(chat_id=msg.chat.id, voice=voice.file_id, caption=msg.caption,\n parse_mode=types.ParseMode.HTML, duration=voice.duration,\n disable_notification=False)\n assert result == msg\n\n\nasync def test_send_video_note(bot: Bot, event_loop):\n \"\"\" sendVideoNote method test with file_id \"\"\"\n from .types.dataset import MESSAGE_WITH_VIDEO_NOTE, VIDEO_NOTE\n msg = types.Message(**MESSAGE_WITH_VIDEO_NOTE)\n video_note = types.VideoNote(**VIDEO_NOTE)\n\n async with FakeTelegram(message_data=MESSAGE_WITH_VIDEO_NOTE, loop=event_loop):\n result = await bot.send_video_note(chat_id=msg.chat.id, video_note=video_note.file_id,\n duration=video_note.duration, length=video_note.length,\n disable_notification=False)\n assert result == msg\n\n\nasync def test_send_media_group(bot: Bot, event_loop):\n \"\"\" sendMediaGroup method test with file_id \"\"\"\n from .types.dataset import MESSAGE_WITH_MEDIA_GROUP, PHOTO\n msg = types.Message(**MESSAGE_WITH_MEDIA_GROUP)\n photo = types.PhotoSize(**PHOTO)\n media = [types.InputMediaPhoto(media=photo.file_id), types.InputMediaPhoto(media=photo.file_id)]\n\n async with FakeTelegram(message_data=[MESSAGE_WITH_MEDIA_GROUP, MESSAGE_WITH_MEDIA_GROUP], loop=event_loop):\n result = await bot.send_media_group(msg.chat.id, media=media, disable_notification=False)\n assert len(result) == len(media)\n assert result.pop().media_group_id\n\n\nasync def test_send_location(bot: Bot, event_loop):\n \"\"\" sendLocation method test \"\"\"\n from .types.dataset import MESSAGE_WITH_LOCATION, LOCATION\n msg = types.Message(**MESSAGE_WITH_LOCATION)\n location = types.Location(**LOCATION)\n\n async with FakeTelegram(message_data=MESSAGE_WITH_LOCATION, loop=event_loop):\n result = await bot.send_location(msg.chat.id, latitude=location.latitude, longitude=location.longitude,\n live_period=10, disable_notification=False)\n assert result == msg\n\n\nasync def test_edit_message_live_location_by_bot(bot: Bot, event_loop):\n \"\"\" editMessageLiveLocation method test \"\"\"\n from .types.dataset import MESSAGE_WITH_LOCATION, LOCATION\n msg = types.Message(**MESSAGE_WITH_LOCATION)\n location = types.Location(**LOCATION)\n\n # editing bot message\n async with FakeTelegram(message_data=MESSAGE_WITH_LOCATION, loop=event_loop):\n result = await bot.edit_message_live_location(chat_id=msg.chat.id, message_id=msg.message_id,\n latitude=location.latitude, longitude=location.longitude)\n assert result == msg\n\n\nasync def test_edit_message_live_location_by_user(bot: Bot, event_loop):\n \"\"\" editMessageLiveLocation method test \"\"\"\n from .types.dataset import MESSAGE_WITH_LOCATION, LOCATION\n msg = types.Message(**MESSAGE_WITH_LOCATION)\n location = types.Location(**LOCATION)\n\n # editing user's message\n async with FakeTelegram(message_data=True, loop=event_loop):\n result = await bot.edit_message_live_location(chat_id=msg.chat.id, message_id=msg.message_id,\n latitude=location.latitude, longitude=location.longitude)\n assert isinstance(result, bool) and result is True\n\n\nasync def test_stop_message_live_location_by_bot(bot: Bot, event_loop):\n \"\"\" stopMessageLiveLocation method test \"\"\"\n from .types.dataset import MESSAGE_WITH_LOCATION\n msg = types.Message(**MESSAGE_WITH_LOCATION)\n\n # stopping bot message\n async with FakeTelegram(message_data=MESSAGE_WITH_LOCATION, loop=event_loop):\n result = await bot.stop_message_live_location(chat_id=msg.chat.id, message_id=msg.message_id)\n assert result == msg\n\n\nasync def test_stop_message_live_location_by_user(bot: Bot, event_loop):\n \"\"\" stopMessageLiveLocation method test \"\"\"\n from .types.dataset import MESSAGE_WITH_LOCATION\n msg = types.Message(**MESSAGE_WITH_LOCATION)\n\n # stopping user's message\n async with FakeTelegram(message_data=True, loop=event_loop):\n result = await bot.stop_message_live_location(chat_id=msg.chat.id, message_id=msg.message_id)\n assert isinstance(result, bool)\n assert result is True\n\n\nasync def test_send_venue(bot: Bot, event_loop):\n \"\"\" sendVenue method test \"\"\"\n from .types.dataset import MESSAGE_WITH_VENUE, VENUE, LOCATION\n msg = types.Message(**MESSAGE_WITH_VENUE)\n location = types.Location(**LOCATION)\n venue = types.Venue(**VENUE)\n\n async with FakeTelegram(message_data=MESSAGE_WITH_VENUE, loop=event_loop):\n result = await bot.send_venue(msg.chat.id, latitude=location.latitude, longitude=location.longitude,\n title=venue.title, address=venue.address, foursquare_id=venue.foursquare_id,\n disable_notification=False)\n assert result == msg\n\n\nasync def test_send_contact(bot: Bot, event_loop):\n \"\"\" sendContact method test \"\"\"\n from .types.dataset import MESSAGE_WITH_CONTACT, CONTACT\n msg = types.Message(**MESSAGE_WITH_CONTACT)\n contact = types.Contact(**CONTACT)\n\n async with FakeTelegram(message_data=MESSAGE_WITH_CONTACT, loop=event_loop):\n result = await bot.send_contact(msg.chat.id, phone_number=contact.phone_number, first_name=contact.first_name,\n last_name=contact.last_name, disable_notification=False)\n assert result == msg\n\n\nasync def test_send_dice(bot: Bot, event_loop):\n \"\"\" sendDice method test \"\"\"\n from .types.dataset import MESSAGE_WITH_DICE\n msg = types.Message(**MESSAGE_WITH_DICE)\n\n async with FakeTelegram(message_data=MESSAGE_WITH_DICE, loop=event_loop):\n result = await bot.send_dice(msg.chat.id, disable_notification=False)\n assert result == msg\n\n\nasync def test_send_chat_action(bot: Bot, event_loop):\n \"\"\" sendChatAction method test \"\"\"\n from .types.dataset import CHAT\n chat = types.Chat(**CHAT)\n\n async with FakeTelegram(message_data=True, loop=event_loop):\n result = await bot.send_chat_action(chat_id=chat.id, action=types.ChatActions.TYPING)\n assert isinstance(result, bool)\n assert result is True\n\n\nasync def test_get_user_profile_photo(bot: Bot, event_loop):\n \"\"\" getUserProfilePhotos method test \"\"\"\n from .types.dataset import USER_PROFILE_PHOTOS, USER\n user = types.User(**USER)\n\n async with FakeTelegram(message_data=USER_PROFILE_PHOTOS, loop=event_loop):\n result = await bot.get_user_profile_photos(user_id=user.id, offset=1, limit=1)\n assert isinstance(result, types.UserProfilePhotos)\n\n\nasync def test_get_file(bot: Bot, event_loop):\n \"\"\" getFile method test \"\"\"\n from .types.dataset import FILE\n file = types.File(**FILE)\n\n async with FakeTelegram(message_data=FILE, loop=event_loop):\n result = await bot.get_file(file_id=file.file_id)\n assert isinstance(result, types.File)\n\n\nasync def test_kick_chat_member(bot: Bot, event_loop):\n \"\"\" kickChatMember method test \"\"\"\n from .types.dataset import USER, CHAT\n user = types.User(**USER)\n chat = types.Chat(**CHAT)\n\n async with FakeTelegram(message_data=True, loop=event_loop):\n result = await bot.kick_chat_member(chat_id=chat.id, user_id=user.id, until_date=123)\n assert isinstance(result, bool)\n assert result is True\n\n\nasync def test_unban_chat_member(bot: Bot, event_loop):\n \"\"\" unbanChatMember method test \"\"\"\n from .types.dataset import USER, CHAT\n user = types.User(**USER)\n chat = types.Chat(**CHAT)\n\n async with FakeTelegram(message_data=True, loop=event_loop):\n result = await bot.unban_chat_member(chat_id=chat.id, user_id=user.id)\n assert isinstance(result, bool)\n assert result is True\n\n\nasync def test_restrict_chat_member(bot: Bot, event_loop):\n \"\"\" restrictChatMember method test \"\"\"\n from .types.dataset import USER, CHAT\n user = types.User(**USER)\n chat = types.Chat(**CHAT)\n\n async with FakeTelegram(message_data=True, loop=event_loop):\n result = await bot.restrict_chat_member(\n chat_id=chat.id,\n user_id=user.id,\n permissions=types.ChatPermissions(\n can_add_web_page_previews=False,\n can_send_media_messages=False,\n can_send_messages=False,\n can_send_other_messages=False\n ), until_date=123)\n assert isinstance(result, bool)\n assert result is True\n\n\nasync def test_promote_chat_member(bot: Bot, event_loop):\n \"\"\" promoteChatMember method test \"\"\"\n from .types.dataset import USER, CHAT\n user = types.User(**USER)\n chat = types.Chat(**CHAT)\n\n async with FakeTelegram(message_data=True, loop=event_loop):\n result = await bot.promote_chat_member(chat_id=chat.id, user_id=user.id, can_change_info=True,\n can_delete_messages=True, can_edit_messages=True,\n can_invite_users=True, can_pin_messages=True, can_post_messages=True,\n can_promote_members=True, can_restrict_members=True)\n assert isinstance(result, bool)\n assert result is True\n\n\nasync def test_export_chat_invite_link(bot: Bot, event_loop):\n \"\"\" exportChatInviteLink method test \"\"\"\n from .types.dataset import CHAT, INVITE_LINK\n chat = types.Chat(**CHAT)\n\n async with FakeTelegram(message_data=INVITE_LINK, loop=event_loop):\n result = await bot.export_chat_invite_link(chat_id=chat.id)\n assert result == INVITE_LINK\n\n\nasync def test_delete_chat_photo(bot: Bot, event_loop):\n \"\"\" deleteChatPhoto method test \"\"\"\n from .types.dataset import CHAT\n chat = types.Chat(**CHAT)\n\n async with FakeTelegram(message_data=True, loop=event_loop):\n result = await bot.delete_chat_photo(chat_id=chat.id)\n assert isinstance(result, bool)\n assert result is True\n\n\nasync def test_set_chat_title(bot: Bot, event_loop):\n \"\"\" setChatTitle method test \"\"\"\n from .types.dataset import CHAT\n chat = types.Chat(**CHAT)\n\n async with FakeTelegram(message_data=True, loop=event_loop):\n result = await bot.set_chat_title(chat_id=chat.id, title='Test title')\n assert isinstance(result, bool)\n assert result is True\n\n\nasync def test_set_chat_description(bot: Bot, event_loop):\n \"\"\" setChatDescription method test \"\"\"\n from .types.dataset import CHAT\n chat = types.Chat(**CHAT)\n\n async with FakeTelegram(message_data=True, loop=event_loop):\n result = await bot.set_chat_description(chat_id=chat.id, description='Test description')\n assert isinstance(result, bool)\n assert result is True\n\n\nasync def test_pin_chat_message(bot: Bot, event_loop):\n \"\"\" pinChatMessage method test \"\"\"\n from .types.dataset import MESSAGE\n message = types.Message(**MESSAGE)\n\n async with FakeTelegram(message_data=True, loop=event_loop):\n result = await bot.pin_chat_message(chat_id=message.chat.id, message_id=message.message_id,\n disable_notification=False)\n assert isinstance(result, bool)\n assert result is True\n\n\nasync def test_unpin_chat_message(bot: Bot, event_loop):\n \"\"\" unpinChatMessage method test \"\"\"\n from .types.dataset import CHAT\n chat = types.Chat(**CHAT)\n\n async with FakeTelegram(message_data=True, loop=event_loop):\n result = await bot.unpin_chat_message(chat_id=chat.id)\n assert isinstance(result, bool)\n assert result is True\n\n\nasync def test_leave_chat(bot: Bot, event_loop):\n \"\"\" leaveChat method test \"\"\"\n from .types.dataset import CHAT\n chat = types.Chat(**CHAT)\n\n async with FakeTelegram(message_data=True, loop=event_loop):\n result = await bot.leave_chat(chat_id=chat.id)\n assert isinstance(result, bool)\n assert result is True\n\n\nasync def test_get_chat(bot: Bot, event_loop):\n \"\"\" getChat method test \"\"\"\n from .types.dataset import CHAT\n chat = types.Chat(**CHAT)\n\n async with FakeTelegram(message_data=CHAT, loop=event_loop):\n result = await bot.get_chat(chat_id=chat.id)\n assert result == chat\n\n\nasync def test_get_chat_administrators(bot: Bot, event_loop):\n \"\"\" getChatAdministrators method test \"\"\"\n from .types.dataset import CHAT, CHAT_MEMBER\n chat = types.Chat(**CHAT)\n member = types.ChatMember(**CHAT_MEMBER)\n\n async with FakeTelegram(message_data=[CHAT_MEMBER, CHAT_MEMBER], loop=event_loop):\n result = await bot.get_chat_administrators(chat_id=chat.id)\n assert result[0] == member\n assert len(result) == 2\n\n\nasync def test_get_chat_members_count(bot: Bot, event_loop):\n \"\"\" getChatMembersCount method test \"\"\"\n from .types.dataset import CHAT\n chat = types.Chat(**CHAT)\n count = 5\n\n async with FakeTelegram(message_data=count, loop=event_loop):\n result = await bot.get_chat_members_count(chat_id=chat.id)\n assert result == count\n\n\nasync def test_get_chat_member(bot: Bot, event_loop):\n \"\"\" getChatMember method test \"\"\"\n from .types.dataset import CHAT, CHAT_MEMBER\n chat = types.Chat(**CHAT)\n member = types.ChatMember(**CHAT_MEMBER)\n\n async with FakeTelegram(message_data=CHAT_MEMBER, loop=event_loop):\n result = await bot.get_chat_member(chat_id=chat.id, user_id=member.user.id)\n assert isinstance(result, types.ChatMember)\n assert result == member\n\n\nasync def test_set_chat_sticker_set(bot: Bot, event_loop):\n \"\"\" setChatStickerSet method test \"\"\"\n from .types.dataset import CHAT\n chat = types.Chat(**CHAT)\n\n async with FakeTelegram(message_data=True, loop=event_loop):\n result = await bot.set_chat_sticker_set(chat_id=chat.id, sticker_set_name='aiogram_stickers')\n assert isinstance(result, bool)\n assert result is True\n\n\nasync def test_delete_chat_sticker_set(bot: Bot, event_loop):\n \"\"\" setChatStickerSet method test \"\"\"\n from .types.dataset import CHAT\n chat = types.Chat(**CHAT)\n\n async with FakeTelegram(message_data=True, loop=event_loop):\n result = await bot.delete_chat_sticker_set(chat_id=chat.id)\n assert isinstance(result, bool)\n assert result is True\n\n\nasync def test_answer_callback_query(bot: Bot, event_loop):\n \"\"\" answerCallbackQuery method test \"\"\"\n\n async with FakeTelegram(message_data=True, loop=event_loop):\n result = await bot.answer_callback_query(callback_query_id='QuERyId', text='Test Answer')\n assert isinstance(result, bool)\n assert result is True\n\n\nasync def test_set_my_commands(bot: Bot, event_loop):\n \"\"\" setMyCommands method test \"\"\"\n from .types.dataset import BOT_COMMAND\n\n async with FakeTelegram(message_data=True, loop=event_loop):\n commands = [types.BotCommand(**BOT_COMMAND), types.BotCommand(**BOT_COMMAND)]\n result = await bot.set_my_commands(commands)\n assert isinstance(result, bool)\n assert result is True\n\n\nasync def test_get_my_commands(bot: Bot, event_loop):\n \"\"\" getMyCommands method test \"\"\"\n from .types.dataset import BOT_COMMAND\n command = types.BotCommand(**BOT_COMMAND)\n commands = [command, command]\n async with FakeTelegram(message_data=commands, loop=event_loop):\n result = await bot.get_my_commands()\n assert isinstance(result, list)\n assert all([isinstance(command, types.BotCommand) for command in result])\n\n\nasync def test_edit_message_text_by_bot(bot: Bot, event_loop):\n \"\"\" editMessageText method test \"\"\"\n from .types.dataset import EDITED_MESSAGE\n msg = types.Message(**EDITED_MESSAGE)\n\n # message by bot\n async with FakeTelegram(message_data=EDITED_MESSAGE, loop=event_loop):\n result = await bot.edit_message_text(text=msg.text, chat_id=msg.chat.id, message_id=msg.message_id)\n assert result == msg\n\n\nasync def test_edit_message_text_by_user(bot: Bot, event_loop):\n \"\"\" editMessageText method test \"\"\"\n from .types.dataset import EDITED_MESSAGE\n msg = types.Message(**EDITED_MESSAGE)\n\n # message by user\n async with FakeTelegram(message_data=True, loop=event_loop):\n result = await bot.edit_message_text(text=msg.text, chat_id=msg.chat.id, message_id=msg.message_id)\n assert isinstance(result, bool)\n assert result is True\n\n\nasync def test_set_sticker_set_thumb(bot: Bot, event_loop):\n \"\"\" setStickerSetThumb method test \"\"\"\n\n async with FakeTelegram(message_data=True, loop=event_loop):\n result = await bot.set_sticker_set_thumb(name='test', user_id=123456789, thumb='file_id')\n assert isinstance(result, bool)\n assert result is True\n\n\nasync def test_bot_id(bot: Bot):\n \"\"\" Check getting id from token. \"\"\"\n bot = Bot(TOKEN)\n assert bot.id == BOT_ID # BOT_ID is a correct id from TOKEN\n","sub_path":"tests/test_bot.py","file_name":"test_bot.py","file_ext":"py","file_size_in_byte":21630,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"215527077","text":"import cStringIO as StringIO\nfrom skimage.io import imsave\n\nfrom flask import Flask, render_template, request\nfrom flask import send_file\n\nfrom demo.demo_embeddings_tf import parse_args, save_super_images, GenerativeModel\nfrom embedding.model import Model\nfrom embedding.preprocessing import normalize\nfrom misc.config import cfg, cfg_from_file\n\napp = Flask(__name__)\n\ntext_model = None\nimg_model = None\nNUM_IMGS = 8\n\n\ndef embed_text(text, text_model):\n texts = [normalize(str(text))]\n embeddings, num_embeddings = text_model.embed(texts), len(texts)\n print('Total number of sentences:', num_embeddings)\n print('num_embeddings:', num_embeddings, embeddings.shape)\n\n return embeddings, num_embeddings, texts\n\n\n@app.route('/')\ndef my_form():\n return render_template('index.html')\n\n\n@app.route('/', methods=['POST'])\ndef my_form_post():\n text = request.form['text'].lower()\n embeddings, num_embeddings, normalized_texts = embed_text(text, text_model)\n hr_imgs, lr_imgs = img_model.generate_n(embeddings, n=NUM_IMGS)\n imgs = save_super_images(lr_imgs, hr_imgs, normalized_texts, 1, startID=0)\n\n print('Generated: %d images' % len(hr_imgs))\n strIO = StringIO.StringIO()\n imsave(strIO, imgs[0], plugin='pil', format_str='png')\n strIO.seek(0)\n\n return send_file(strIO, mimetype='image/jpeg')\n\n\nif __name__ == \"__main__\":\n args = parse_args()\n if args.cfg_file is not None:\n cfg_from_file(args.cfg_file)\n if args.gpu_id != -1:\n cfg.GPU_ID = args.gpu_id\n\n text_model = Model(\n '/models/fashion/embedding_model/frozen_model.pb',\n '/models/fashion/embedding_model/tokenizer.pickle')\n\n img_model = GenerativeModel(cfg, 1, 1024)\n\n app.run(host='0.0.0.0', port=8080)\n","sub_path":"demo/webapp/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1744,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"625125881","text":"#! /usr/bin/python3\n# coding: utf-8\n\nimport requests, re, os, logging, json, urllib\n\nOK = 200\n\ndefheader ={\n\t'Referer': '',\n\t'X-SPF-Referer': '',\n\t'X-SPF-Previous': '',\n\t'Host':''\n\t}\n\t\t\nclass request(object):\n\t\n\tdef __init__(self):\n\t\t\n\t\tself.url = None\n\t\tself.href = None\n\t\tself.timestep = None\n\t\t\n\t\tself.header = {\n\t\t\t'Accept': '*/*',\n\t\t\t'Accept-Encoding': 'gzip',\n\t\t\t'Accept-Language': 'ja,en-US;q=0.7,en;q=0.3',\n\t\t\t'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:58.0) Gecko/20120101 Firefox/58.0',\n\t\t\t'Connection': 'keep-alive',\n\t\t\t'Content-Type': 'application/x-www-form-urlencoded'\n\t\t\t}\n\t\t\t\n\t\tself.param = {}\n\t\tself.data = None\n\t\tself.proxies = None\n\t\tself.session = requests.session()\n\t\tself.session.headers = self.header\n\n\tdef set_proxies(p):\n\t\tproxies = {}\n\t\tfor i in p.keys():\n\t\t\tif i == 'http' or i == 'https':\n\t\t\t\tproxies[i] = p[i]\n\t\t\n\t\tif len(proxies) > 0:\n\t\t\tself.proxies = proxies\n\n\tdef __set_args(self, url, href, data, params, headers, header_mixed_enable, proxies, proxies_enable):\n\t\tset_args = {}\n\t\tif url:\n\t\t\tself.url = url\n\t\t\tif href:\n\t\t\t\tlogging.warning('found \"href\", but not set')\n\t\telse:\n\t\t\tif href and self.url is not None:\n\t\t\t\tself.url = getBase(self.url) + href\n\t\t\telif href and self.url is None:\n\t\t\t\tlogging.error('error: set URL')\n\t\t\t\treturn \n\t\t\telse:\n\t\t\t\tlogging.error('error: set URL or href')\n\t\t\t\treturn\n\t\tset_args['url'] = self.url\n\n\t\tif data:\n\t\t\tlogging.info('found args \"data\"')\n\t\t\tif type(data) is dict:\n\t\t\t\tset_args['data'] = data\n\t\t\telse:\n\t\t\t\tlogging.warning('args \"data\" is not dict: cannot set data')\n\t\tif params:\n\t\t\tlogging.info('found args \"params\"')\n\t\t\tif type(params) is dict:\n\t\t\t\tset_args['params'] = params\n\t\t\telse:\n\t\t\t\tlogging.warning('args \"params\" is not dict: cannot set params')\n\t\tif headers:\n\t\t\tlogging.info('found args \"headers\"')\n\t\t\tif type(headers) is not dict:\n\t\t\t\tlogging.warning('args \"headers\" is not dict: cannot set headers')\n\t\t\telse:\n\t\t\t\tif header_mixed_enable:\n\t\t\t\t\tset_args['headers'] = self.session.headers\n\t\t\t\t\tset_args['headers'].update(headers)\n\t\t\t\telse:\n\t\t\t\t\tset_args['headers'] = headers\n\t\telse:\n\t\t\tset_args['headers'] = self.session.headers\n\n\t\tif (proxies or self.proxies) and proxies_enable:\n\t\t\tlogging.info('found args \"proxies\"')\n\t\t\tif type(proxies) is not dict:\n\t\t\t\tlogging.warning('args \"proxies\" is not dict: cannot set proxies')\n\t\t\telif proxies:\n\t\t\t\tif 'http' in proxies.keys() or 'https' in proxies.keys():\n\t\t\t\t\tset_args['proxies'] = proxies\n\t\t\telse:\n\t\t\t\tset_args['proxies'] = self.proxies\n\n\t\treturn set_args\n\n\tdef get(self, url=None, href=None, data=None, params=None, headers=None, header_mixed_enable=True, proxies=None, proxies_enable=True):\n\t\tset_args = self.__set_args(url, href, data, params, headers, header_mixed_enable, proxies, proxies_enable)\t\t\n\t\tres = self.session.get(**set_args)\n\t\tself.status_code = res.status_code\n\n\t\tif self.status_code == OK:\n\t\t\tself.payload = res\n\t\t\treturn res\n\t\telse:\n\t\t\tlogging.error('get error: bad status: {}'.format(str(res)))\n\n\tdef post(self, url=None, href=None, data=None, params=None, headers=None, header_mixed_enable=True, proxies=None, proxies_enable=True):\n\t\tset_args = self.__set_args(url, href, data, params, headers, header_mixed_enable, proxies, proxies_enable)\t\t\n\t\tres = self.session.post(**set_args)\n\t\tself.status_code = res.status_code\n\t\t\n\t\tif res.status_code == OK:\n\t\t\tself.payload = res\t\t\t\n\t\t\treturn res\n\t\twith open('r_error', 'wb') as f:\n\t\t\tf.write(res.content)\n\t\tlogging.error('post error: bad status: {}'.format(str(res)))\n\n\tdef setContentLength(self):\n\t\tlength = 0\n\t\tfor k, v in self.data.items():\n\t\t\tlength += len('{}={}'.format(k, urllib.parse.quote(v))) + 1\n\t\t\n\t\tContentLength = length - 1\n\t\tCLheader = {'Content-Length': ContentLength}\n\t\t\n\t\tself.header.update(CLheader)\n\t\t\n\t\treturn CLheader\n\t\t\t\n\tdef setReferer(self):\t\t\n\t\treturn self.header.update({'Referer': self.url})\n\t\t\n\tdef setHeader(self):\n\t\tself.session.headers = self.header\n\t\treturn self.header\n\t\t\n\tdef setHost(self):\n\t\t\n\t\thttp = r'http://(.+?)/'\n\t\thttps = r'https://(.+?)/'\n\t\t\n\t\turl = re.match(http, self.url) or re.match(https,self.url)\n\t\t\n\t\tif url:\n\t\t\treturn self.header.update({'Host':url.group(1)})\n\t\t\n\t\thttp = r'http://(.+)'\n\t\thttps = r'https://(.+)'\n\t\t\n\t\turl = re.match(http, self.url) or re.match(https, self.url)\n\t\t\n\t\tif url:\n\t\t\treturn self.header.update({'Host':url.group(1)})\n\n\tdef headers(self):\n\t\treturn self.session.headers\n\t\t\ndef getBase(url):\n\thttp = r'(http://.+?)/'\n\thttps = r'(https://.+?)/'\n\t\n\tbase = re.match(http, url) or re.match(https,url)\n\tif base:\n\t\treturn base.group(1)\n\t\n\thttp = r'(http://.+)'\n\thttps = r'(https://.+)'\n\t\n\tbase = re.match(http, url) or re.match(https, url)\n\tif base:\n\t\treturn base.group(1)","sub_path":"request.py","file_name":"request.py","file_ext":"py","file_size_in_byte":4648,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"138995102","text":"#! /usr/bin/python3\n\nimport pandas as pd\nimport quandl\nimport pickle\n\n\ndef grab_data():\n with open('quandlkey', 'r') as keyfile:\n authkey = keyfile.read().strip()\n\n datas = quandl.get(\"FMAC/HPI\", authtoken=authkey)\n\n with open('datas.pickle', 'wb') as pickle_out:\n pickle.dump(datas, pickle_out)\n\n\n# not used any more\ndef state_list():\n states = pd.read_html('https://www.infoplease.com/state-abbreviations-and-state-postal-codes')\n return states[0][2][1:]\n\n# grab_data()\n\nwith open('datas.pickle', 'rb') as pi:\n datas = pickle.load(pi)\n# print(datas.columns)\ndatas.drop(['United States not seasonaly adjusted'], axis=1, inplace=True)\n\ndatas.to_pickle('pdatas.pickle')\ndatas2 = pd.read_pickle('pdatas.pickle')\nprint(datas2.head())\n","sub_path":"pandas/07_pickle.py","file_name":"07_pickle.py","file_ext":"py","file_size_in_byte":763,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"18987547","text":"\n\nfrom xai.brain.wordbase.nouns._swinger import _SWINGER\n\n#calss header\nclass _SWINGERS(_SWINGER, ):\n\tdef __init__(self,): \n\t\t_SWINGER.__init__(self)\n\t\tself.name = \"SWINGERS\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"swinger\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_swingers.py","file_name":"_swingers.py","file_ext":"py","file_size_in_byte":245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"651162810","text":"# %%\nimport pandas as pd\nimport numpy as np\nfrom graphviz import Digraph\nimport scipy.stats\nimport plotly.graph_objs as go\nimport plotly.offline as py\nimport plotly.express as px\nfrom .Hypnogram import Hypnogram\n\nclass MarkovChain:\n\n def __init__(self, hypgr: Hypnogram):\n self.stage = None if hypgr==None else hypgr.stage\n self._probabilities = pd.DataFrame()\n \n \n def calc_probabilities(self):\n if not self._probabilities.empty: return self._probabilities\n dd = pd.DataFrame(self.stage)\n dd['next_stage'] = dd.stage.shift(-1)\n dd = dd.dropna()\n dd['trans_count'] = 0\n dd1 = dd.groupby(['stage','next_stage'], as_index =False).trans_count.count()\n dd1['prob'] = [x.trans_count/dd1.groupby(['stage']).trans_count.sum()[x.stage] for (_, x) in dd1.iterrows()]\n self._probabilities = dd1\n return self._probabilities\n\n def get_graph(self, title=None):\n df = self.calc_probabilities()\n G=Digraph(format='svg')\n \n G.graph_attr['fontname']=G.node_attr['fontname']=G.edge_attr['fontname']=font = 'arial'\n G.graph_attr['label']= f\"{title}\\n\\n\"\n G.graph_attr['labelloc']= 't'\n G.graph_attr['labeldistance']= '500'\n G.graph_attr['fontsize'] = '16'\n \n G.node_attr['shape']='circle' \n G.node_attr['style']='filled'\n G.node_attr['fillcolor']='#ddddff'\n G.node_attr['fontsize'] = '10'\n \n for (_,trans) in df.iterrows():\n G.edge(trans.stage, \n trans.next_stage, \n label=str(np.round(trans.prob*100, 1))+'%', \n penwidth=str(np.maximum(0.1,3.0*trans.prob)), \n fontsize=str(8+6.0*trans.prob))\n\n G.node('Wake', style='filled', fillcolor='#ddffdd')\n G.node('REM', style='filled', fillcolor='#ffdddd')\n return G\n\n def probability_matrix(self):\n df = self.calc_probabilities()\n prob_matrix = df.pivot('stage','next_stage','prob').fillna(0.0)\n return prob_matrix\n\n def probability_heatmap(self):\n probs = self.probability_matrix()*100\n fig = go.Figure(data=go.Heatmap(z=probs[probs.columns[::-1]],x=probs.columns,y=probs.index[::-1], colorscale=px.colors.sequential.Emrld))\n return fig\n \n\n# %%\n","sub_path":"hypnos/hypnograms/MarkovChain.py","file_name":"MarkovChain.py","file_ext":"py","file_size_in_byte":2335,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"413075055","text":"#Written by Maria L. File that allows us to send tweets to twitter from the chatbot. \n\nimport tweepy\n\n###### TWITTER API #############################################\n\nconsumer_key= \"EuvpBtbfkdpycmVxR14gH7mph\" \nconsumer_secret= \"jr8t52Xof1bliEQIFD5MUe1qK2eYEnBOrG7T5QReanW125KgAs\" \naccess_token = \"862722342501232640-E1p1EwoAOxZlgbRGWCUELKmgkrQpvLx\" \naccess_token_secret = \"LrvxY0HLqsxsYNvpUUi8kyEk0xMkjPgQz6lrO0TEz5AMC\" \n#Logging into twitter using the tweetpy wrapper\nauth = tweepy.OAuthHandler(consumer_key, consumer_secret)\nauth.set_access_token(access_token, access_token_secret)\napi = tweepy.API(auth)\n\ndef sendTweet(tweetData):\n api.update_status(tweetData)\n \n \ndef getRecentTweetLink():\n me = api.me()\n # print me.screen_name\n # print me.screenname\n # print me.id\n \n tweet = api.user_timeline(id=me.id, count = 1)[0]\n # print tweet\n # print tweet.id\n # print str(tweet.id)\n url = \"https://twitter.com/\" + me.screen_name + \"/status/\" + str(tweet.id)\n # print url\n return url\n \n # https://twitter.com/CST_205_BOT/status/862739416686743553","sub_path":"twitterPost.py","file_name":"twitterPost.py","file_ext":"py","file_size_in_byte":1096,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"88566081","text":"#! /usr/bin/python\n\nimport sys, os\n\nimport configparser, io\nfrom dynaconfig.read import *\n\nfrom utils import *\n\nimport pytest\n\n# ini parser loads everything as a string...\n\n#logging.basicConfig( level=logging.DEBUG )\n\ndef test_simple_ini():\n # the ini parser parses all values as strings.\n # so if we want values to be numbers, we have to put them into expressions.\n data = '''\n[main]\nvar1 = $(1)\nvar2 = some string\nvar3 = $(3)\nvar4 = $(${var3} + math.pi + 2)\nvar5 = $(${var4} + 2.0)\n[nest1]\nvar1 = $(11)\nvar2 = $(${var3} + 12)\nvar3 = $(${var1} + 12)\nvar4 = $(${var3} + 12)\nvar5 = $(${/nest1/var3} + 12)\n'''\n\n data = readConfig( data, parser = ini.load, ignore_unparsed_expressions = True )\n\n assert data['main']['var1'] == 1\n assert data['main']['var2'] == 'some string'\n assert data['main']['var3'] == 3\n assert data['main']['var4'] == Approx(3 + 3.14159 + 2)\n assert data['main']['var5'] == Approx(3 + 3.14159 + 2 + 2.0)\n assert data['nest1']['var1'] == 11\n assert data['nest1']['var2'] == 11 + 12 + 12\n assert data['nest1']['var3'] == 11 + 12\n assert data['nest1']['var4'] == 11 + 12 + 12\n assert data['nest1']['var5'] == 11 + 12 + 12\n\n@pytest.mark.skip(reason=\"need to figure out how to get ini to parse numbers as numbers.\")\ndef test_configParser():\n '''A vanilla configparser example'''\n text= '''\n[main]\nvar1 = 1\nvar2 = some string\nvar3 = 3\nvar4 = %(var3)s\nvar5 = %(var4)s\n[nest1]\nvar1 = 11\nvar2 = %(var3)s\nvar3 = %(var1)s\nvar4 = %(var3)s\n#var5 = %(main.var3)s # this will cause an interpolation error\n'''\n\n parser = configparser.ConfigParser()\n f = io.StringIO(text)\n parser.readfp( f )\n f.close()\n\n data = dict()\n for sec in parser.sections():\n data[sec] = dict()\n for opt in parser.options(sec):\n data[sec][opt] = parser.get( sec, opt )\n\n","sub_path":"testing/test_ini.py","file_name":"test_ini.py","file_ext":"py","file_size_in_byte":1782,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"616906352","text":"#!/usr/bin/python3\n# _*_ coding:utf-8 _*_\n\"\"\"\n@author:GT\n@file:sentimgtofriend.py\n@time:4/16/201811:24 AM\n\"\"\"\nimport itchat\nitchat.auto_login(hotReload=True)\nuser = itchat.search_friends(userName='@7bd1325175c9d816c7e9cd4fb228af0f')\nitchat.send_image(fileDir=r'T:\\inetpub\\wwwroot\\fis\\welcome.png',toUserName=user)\nprint('img sent')\nitchat.send(msg='程序自动发送测试',toUserName=user)\nprint('msg sent')","sub_path":"wechatpyops/sentimgtofriend.py","file_name":"sentimgtofriend.py","file_ext":"py","file_size_in_byte":409,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"205212783","text":"# built-in libraries\nfrom __future__ import division\nfrom copy import deepcopy\nimport time\n# external libraries\nimport numpy as np\n# my files\nfrom PCA_custom import PCA_custom\n\n\ndef pca_bdreg(bdpc, VamModel, BuildModel):\n print('## pca_bdreg.py')\n start = time.time()\n Nuu = int(round(len(bdpc.T[0])))\n bdpct = deepcopy(bdpc)\n\n if BuildModel:\n mmx = np.ones((Nuu, 1)) * np.mean(bdpct, axis=0)\n else:\n mmx = np.ones((Nuu, 1)) * VamModel['mdd']\n smx = np.ones(bdpct.shape)\n test = np.divide((bdpct - mmx), smx)\n if BuildModel:\n # latent is not used later\n pc, score, latent = PCA_custom(test)\n VamModel['pc'] = pc\n VamModel['eigenvalues'] = latent\n else:\n pc = VamModel['pc']\n score = np.dot(test, pc)\n mdd = mmx[0]\n VamModel['mdd'] = mdd\n end = time.time()\n print('For PCA bdreg, elapsed time is ' + str(end - start) + 'seconds...')\n return score, VamModel\n\n","sub_path":"VAMPIRE/pca_bdreg.py","file_name":"pca_bdreg.py","file_ext":"py","file_size_in_byte":958,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"585378094","text":"from turtle import *\nlong = int(input('Longueur : '))\nnc = int(input('Nombre de cotés : '))\n\ncolor('blue', 'gold')\n\nbegin_fill()\nfor _ in range(nc):\n forward(long)\n left(360 / nc)\nend_fill()\n\nmainloop()\n","sub_path":"codes/s01/p05.py","file_name":"p05.py","file_ext":"py","file_size_in_byte":210,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"98658163","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"Defines a tree structure that should be able to return the following:\n1. report the information gain at each node\n2. training and testing error for each tree\n3. limit the minimum number of data points at a node to k.\n4. limit the available features for sorting at each node to m.\n\"\"\"\n\nfrom __future__ import (absolute_import, division, print_function,\n unicode_literals)\nfrom scipy.stats import mode\nimport numpy as np\n\n__all__ = ['informationGain',\n 'H',\n 'tree',\n 'forest']\n\n\ndef informationGain(data, feature):\n \"\"\"Computes the information gain from dividing the data points into the\n two groups, Finds the best threshold in the data for a given feature.\n Returns the threshold and the information gain? information gain from using\n \"\"\"\n entropy0 = H(data)\n best_gain = 0\n best_threshold = 0\n\n # try all the thresholds\n for thresh in np.unique(data[:, feature]):\n A = data[data[:, feature] <= thresh, :]\n p = A.size / data.size\n B = data[data[:, feature] > thresh, :]\n\n gain = entropy0 - (p*H(A) + (1-p)*H(B))\n if gain > best_gain:\n best_gain = gain\n best_threshold = thresh\n\n # return the one with the maximum gain\n return best_threshold, best_gain\n\n\ndef H(data):\n \"\"\"p is an array of fractions in the range (0,1], and H is the entropy of\n the system.\"\"\"\n classes = np.unique(data[:, -1])\n p = np.zeros(classes.size)\n for c in range(0, classes.size):\n p[c] = np.sum(data[:, -1] == classes[c]) / data.shape[0]\n\n p[p == 0] = 1 # prevent negative infinity\n return -np.sum(p*np.log2(p))\n\n\nclass forest(object):\n \"\"\"A python random forest\"\"\"\n def __init__(self, data, size):\n self.data = np.atleast_2d(data)\n self.size = size\n self.result = None\n\n self.trees = [None] * size\n N = data.shape[0]\n for i in range(size):\n sampling = np.random.randint(0, N, N)\n subdata = data[sampling, :]\n self.trees[i] = tree(subdata)\n\n def train(self, k=1, m=None):\n for t in self.trees:\n t.train(k, m)\n\n def sort(self, data):\n self.data = np.atleast_2d(data)\n\n votes = -np.ones([self.data.shape[0], self.size])\n for i in range(len(self.trees)):\n votes[:, i] = self.trees[i].sort(self.data)\n\n self.result = mode(votes, axis=1).mode.flatten()\n return self.result\n\n @property\n def error(self):\n if self.result is None:\n self.sort(self.data)\n return np.sum(self.data[:, -1] != self.result) / self.data.shape[0]\n\n\nclass tree(object):\n \"\"\"A python tree.\"\"\"\n def __init__(self, data):\n if data.size == 0:\n raise ValueError(\"Data cannot be empty.\")\n self.data = np.atleast_2d(data)\n # determine the majority label\n self.label = np.argmax(np.bincount(self.data[:, -1].astype(int)))\n\n self.feature = None # the feature used to sort at this node\n self.test = None # the threshold for the feature at this node\n self.information_gain = 0\n\n # tree information\n self.left = None # feature <= test\n self.right = None # feature > test\n self.parent = None\n self.depth = None\n\n def __str__(self, label=1):\n if self.isLeaf:\n print(\"NODE %i: class is %i\" % (label, self.label))\n print(self.data)\n return ''\n else:\n print(\"NODE %i: threshold %i is %f\" % (label, self.feature,\n self.test))\n print(self.left.__str__(( label << 1) ))\n print(self.right.__str__((label << 1) + 1))\n return ''\n\n @property\n def isLeaf(self):\n \"\"\"Returns true when this node is a leaf.\"\"\"\n return self.left is None and self.right is None\n\n @property\n def error(self):\n \"\"\"Returns error for this branch of the tree.\"\"\"\n if self.data.size < 1:\n return 0\n if self.isLeaf:\n # Calculate the error\n return np.sum(self.label != self.data[:, -1]) / self.data.shape[0]\n else:\n # return the error of the children\n return (self.right.data.size * self.right.error +\n self.left.data.size * self.left.error) / self.data.size\n\n def sort(self, data):\n \"\"\"Sorts data into the tree. Returns class labels for each data point.\n \"\"\"\n self.data = np.atleast_2d(data)\n\n if self.isLeaf:\n return np.ones(self.data.shape[0])*self.label\n else:\n goesToLeftNode = self.data[:, self.feature] <= self.test\n goesToRightNode = np.invert(goesToLeftNode)\n\n leftlabels = self.left.sort(self.data[goesToLeftNode, :])\n rightlabels = self.right.sort(self.data[goesToRightNode, :])\n\n labels = -np.ones(self.data.shape[0])\n labels[goesToLeftNode] = leftlabels\n labels[goesToRightNode] = rightlabels\n assert(np.all(labels > -1))\n return labels\n\n def train(self, k=1, m=None):\n \"\"\"Learns a tree from the data.\n k : when the number of data points at this node is <= k,\n this node becomes a leaf. default is 1.\n m : the number of features that are chosen from at each split.\n \"\"\"\n assert(self.data is not None), \"Can't train on no data.\"\n # determine whether this node needs to be divided\n assert(k > 0), \"Can't have less than one data point at a leaf\"\n if self.data.shape[0] <= k or np.all(self.data[:, -1] == self.data[0, -1]):\n return # this is a leaf node\n\n # determine the features you are choosing from\n if m is None:\n m = self.data.shape[1] - 1\n assert(m > 0), \"At least one choice is required for each node.\"\n features = np.arange(self.data.shape[1] - 1)\n np.random.shuffle(features)\n features = features[:m]\n\n # try each feature\n for f in features:\n threshold, gain = informationGain(self.data, f)\n\n # keep the feature with the most information gain\n if gain > self.information_gain:\n self.information_gain = gain\n self.test = threshold\n self.feature = f\n\n if self.information_gain > 0:\n # split the data according to the best feature\n self.left = tree(self.data[self.data[:, self.feature] <= self.test, :])\n self.right = tree(self.data[self.data[:, self.feature] > self.test, :])\n\n # train the children\n self.left.train(k, m)\n self.right.train(k, m)\n\n return\n","sub_path":"trees/tools.py","file_name":"tools.py","file_ext":"py","file_size_in_byte":6798,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"638832239","text":"# https://www.indico.io/docs#quickstart\nimport indicoio\nimport csv\nINDICO_API_KEY='c0bc478143e7f803d52f87090980623d'\nindicoio.config.api_key = INDICO_API_KEY\n\ndata = open('data.csv')\nreader = csv.reader(data)\ndefinitions = []\nfor row in reader:\n\tdefinitions.append(row)\n\ndata.close()\n\nhighest = 0\nhighest2 = 0\nhighest3 = 0\n\n\nfor definition in definitions:\n\tif definition[0] == 'interrogation':\n\t\tvs = indicoio.sentiment(definition[1])\n\t\tif vs > highest:\n\t\t\thighest = vs\n\t\t\tinterrogation = definition[1]\n\telif definition[0] == 'surveillance':\n\t\tvs = indicoio.sentiment(definition[1])\n\t\tif vs > highest2:\n\t\t\thighest2 = vs\n\t\t\tsurveillance = definition[1]\n\telse:\n\t\tvs = indicoio.sentiment(definition[1])\n\t\tif vs > highest3:\n\t\t\thighest3 = vs\n\t\t\tprivacy = definition[1]\nprint('Most positive interrogation definition: ',highest, interrogation)\nprint('Most positive surveillance definition: ',highest2, surveillance)\nprint('Most positive privacy definition: ',highest3, privacy)\n#phrase = 'Privacy is the selective power to reveal onself to the world'\n# single example\n#vs = indicoio.sentiment(phrase)\n#vp = indicoio.political(phrase)\n\n#print('sentiment: ',vs)\n#print('politics: ',vp)","sub_path":"analyze.py","file_name":"analyze.py","file_ext":"py","file_size_in_byte":1176,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"347259203","text":"# -*- coding: utf-8 -*-\n\nimport json\nimport os.path\nimport sys\n\nimport inspect\n\ndef whoami(param=False):\n func_info = \"L{0}:{1}()\".format(*inspect.stack()[1][2:4])\n values = \"param={}\".format(inspect.getargvalues(inspect.stack()[1][0]).locals)\n return (func_info + \" \" + values) if param else (func_info)\n\n\nfrom PyQt5 import QtCore\nfrom PyQt5.QtGui import *\nfrom PyQt5.QtWidgets import *\n\n\nfrom simulator import scan, __serial_ver__, Simulator\n\nimport app_rc\n\n__title__ = \"Мониторинг последовательного канала КЭД\"\n__version__ = \"0.0.1\"\n__author__ = \"Александр Смирнов\"\n\nRANGES = (0, 50, 100)\n\nstates__ = {\n \"degaus\": {'id': 0, \"name\": \"РУ\", \"color\": \"#FFFFFF\"},\n \"max\": {\"id\": 1, \"name\": \"+9.99\", \"color\": \"#FFFF00\"},\n \"zero\": {\"id\": 2, \"name\": \"0.00\", \"color\": \"#FF00FF\"},\n \"min\": {\"id\": 3, \"name\": \"-9.99\", \"color\": \"#F0FFFF\"}\n}\n\ninfo = (__title__, __version__, sys.version[:6],\n QtCore.QT_VERSION_STR, QtCore.PYQT_VERSION_STR,\n __serial_ver__, __author__)\n\ninfo_msg = '{0} \\nСборка: 2017-05-22 версия {1}\\nАвтор: {6}\\n\\n' \\\n 'Python: {2}\\nQt: {3}\\nPyQt: {4}\\nPyserial: {5}\\n'.format(*info)\n\nsettings = {\n 'range': RANGES[0]\n}\n\n\nclass Ui(QMainWindow):\n rangeDefault = RANGES[0]\n stateDefault = '0.00'\n\n def __init__(self):\n super().__init__()\n\n self.simulator = Simulator()\n\n self.timer_id = 0\n self.rangeCurrent = self.rangeDefault\n self.status = {}\n\n self.createUI()\n\n def _center(self):\n frameGm = self.frameGeometry()\n screen = QApplication.desktop().screenNumber(QApplication.desktop().cursor().pos())\n centerPoint = QApplication.desktop().screenGeometry(screen).center()\n frameGm.moveCenter(centerPoint)\n self.move(frameGm.topLeft())\n\n def createUI(self):\n self.setWindowTitle(__title__)\n self.setWindowIcon(QIcon(\":/rc/logo.png\"))\n self.resize(700, 500)\n self.setMaximumSize(800, 500)\n\n self.centralWgt = QWidget(self)\n self.setCentralWidget(self.centralWgt)\n\n # Create widgets\n self.createButtons()\n self.createPortbox()\n self.createDegausBox()\n self.createCoilBox()\n self.createExtrabox()\n self.displays = LcdBox(self)\n self.tuner = TunerBox(self, initial=self.stateDefault)\n self.createStatusbar()\n\n # Layouts\n self.centralLayout = QVBoxLayout(self.centralWgt)\n settingsLayout = QHBoxLayout()\n settingsLayout.addWidget(self.buttonsWgt)\n settingsLayout.addWidget(self.portbox)\n settingsLayout.addWidget(self.channelbox)\n settingsLayout.addWidget(self.coilbox)\n settingsLayout.addWidget(self.extrabox, 2)\n\n self.centralLayout.addLayout(settingsLayout)\n self.centralLayout.addWidget(self.displays)\n self.centralLayout.addWidget(self.tuner)\n\n # Connect signals/slots for switch channel\n #self.channelGroup.buttonClicked.connect(self.onChannelChange)\n\n # Connect signals/slots for switch coil\n for action in (self.onCoilSwitch,\n self.displays.switch):\n self.coilGroup.buttonClicked['int'].connect(action)\n\n self._center()\n\n self.show()\n\n def createButtons(self):\n self.buttonsWgt = QWidget(self)\n buttonsLayout = QVBoxLayout(self.buttonsWgt)\n\n self.buttons = {}\n for (name, key, shortcut, enabled, action, icon) in (\n ('старт', 'start', 'Ctrl+R', True, self.onStart, ':/rc/red-start.png'),\n ('стоп', 'stop', 'Ctrl+S', False, self.onStop, ':/rc/red-stop.png'),\n ('справка', 'about', 'Ctrl+A', True, self.onAbout, ':/rc/red-about.png'),\n ('выход', 'exit', 'Esc', True, self.onQuit, ':/rc/red-quit.png')\n ):\n button = QPushButton(name.capitalize())\n button.setEnabled(enabled)\n button.setFixedSize(100, 25)\n button.setIcon(QIcon(icon))\n button.setStyleSheet(\"text-align: left\")\n button.clicked.connect(action)\n\n buttonsLayout.addWidget(button)\n buttonsLayout.setSpacing(1)\n\n self.buttons[key] = button\n\n def createPortbox(self):\n self.portbox = QGroupBox('Настройка портов', self)\n layout = QFormLayout(self.portbox)\n layout.setFieldGrowthPolicy(QFormLayout.AllNonFixedFieldsGrow)\n\n ports = self.ports = {}\n for name, key, action in (\n ('ацп', 'input', self.onInputChange),\n ('кэд', 'output', self.onOutputChange)\n ):\n cb = QComboBox()\n cb.setFixedWidth(80)\n layout.addRow(\"{}: \".format(name.upper()), cb)\n\n ports[key] = cb\n ports[key].currentIndexChanged['QString'].connect(action)\n\n # Add button for scan available ports\n btnRescan = QPushButton('Обновить')\n btnRescan.setFixedWidth(80)\n btnRescan.clicked.connect(self.on_rescan)\n layout.addWidget(btnRescan)\n\n def createDegausBox(self, test=1):\n print(whoami())\n self.channelbox = QGroupBox('Настройки РУ', self)\n layout = QFormLayout(self.channelbox)\n\n for name, items in (\n ('каналы', ['43К', '150К']),\n ('макс. ток', ['10A', '55A'])\n ):\n cb = QComboBox()\n cb.setFixedWidth(80)\n cb.addItems(items)\n cb.setStyleSheet(\"text-align: left\")\n layout.addRow(\"{}: \".format(name.capitalize()), cb)\n\n btnChannels = QPushButton('Каналы')\n btnChannels.setFixedWidth(80)\n btnChannels.clicked.connect(self.on_rescan)\n\n layout.addWidget(btnChannels)\n\n def createCoilBox(self):\n self.coilbox = QGroupBox('Группа обмоток', self)\n self.coilbox.setDisabled(True)\n layout = QVBoxLayout(self.coilbox)\n\n self.coilGroup = QButtonGroup(self.coilbox)\n for index, name in enumerate((\"1 - 50\", \"51 - 100\", \"101 - 150\")):\n rb = QRadioButton(name)\n if index == 0:\n rb.setChecked(True)\n layout.addWidget(rb)\n self.coilGroup.addButton(rb, index)\n layout.addStretch(2)\n\n def createExtrabox(self):\n self.extrabox = QGroupBox('Режим', self)\n layout = QVBoxLayout(self.extrabox)\n\n self.modeGroup = QButtonGroup(self.coilbox)\n for index, name in enumerate((\"Сдаточный\", \"Тестовый\")):\n rb = QRadioButton(name)\n if index == 0:\n rb.setChecked(True)\n self.modeGroup.addButton(rb, index)\n layout.addWidget(rb)\n layout.addStretch(2)\n\n def createStatusbar(self):\n for (key, text) in (\n ['rx', 'Rx'],\n ['tx', 'Tx'],\n ['er', 'Er'],\n ):\n wgt = QLabel(' {}: {}'.format(text, 0))\n wgt.setFixedWidth(60)\n stretch = 2 if key == 'er' else 0\n self.statusBar().addPermanentWidget(wgt, stretch)\n self.status[key] = wgt\n\n pix = QLabel()\n self.statusBar().addPermanentWidget(pix)\n self.status['pixmap'] = pix\n self.updatePixmap('idle')\n\n def onInputChange(self, name):\n if self.availablePorts:\n self.port_in = name\n if self.port_in and self.port_in == self.port_out:\n pass\n # self.onRechange(who='output')\n\n def onOutputChange(self, name):\n if self.availablePorts:\n self.port_out = name\n if self.port_out and self.port_out == self.port_in:\n pass\n # self.onRechange(who='input')\n\n def onRechange(self, who):\n print(inspect.currentframe())\n\n def on_rescan(self):\n self.availablePorts = scan()\n num = len(self.availablePorts)\n\n def onChannelChange(self):\n self.ced_model = self.channelGroup.checkedButton().text()\n\n # Disable switcher coil for 43-channel CED\n if self.ced_model == \"43\":\n self.coilbox.setDisabled(True)\n else:\n self.coilbox.setEnabled(True)\n\n def onCoilSwitch(self, index):\n self.rangeCurrent = RANGES[index]\n\n def updatePixmap(self, state=None):\n if not state:\n state = \"noconnect\"\n pixmaps = {\n 'noconnect': {'ico': \":/rc/network-offline.png\", 'description': 'нет подключения'},\n 'idle': {'ico': \":/rc/network-idle.png\", 'description': 'ожидание'},\n 'rx': {'ico': \":/rc/network-receive.png\", 'description': 'прием'},\n 'tx': {'ico': \":/rc/network-transmit.png\", 'description': 'передача'},\n 'error': {'ico': \":/rc/network-error.png\", 'description': 'ошибка'}\n }\n self.status['pixmap'].setPixmap(QPixmap(pixmaps[state]['ico']))\n self.status['pixmap'].setToolTip(pixmaps[state]['description'])\n\n def updateStatus(self, key, value):\n self.status[key].setText(' {}: {}'.format(key, value))\n\n def closeEvent(self, event):\n self.onQuit()\n\n def timerEvent(self, event):\n self.simulator.run()\n self.blinkPixmap()\n self.updateStatus('Tx', self.simulator.counterTx)\n\n def blinkPixmap(self):\n if self.isBlink:\n self.updatePixmap('tx')\n self.isBlink = False\n else:\n self.updatePixmap('rx')\n self.isBlink = True\n\n def onStart(self):\n print(self.tuner.getSettings())\n self.lock(True)\n # self.simulator.start(self, self.template, opt)\n # self.timer_id = self.startTimer(1000, timerType=QtCore.Qt.PreciseTimer)\n\n def onStop(self):\n pass\n # if self.timer_id:\n # self.killTimer(self.timer_id)\n # self.timer_id = 0\n # self.simulator.stop()\n self.lock(False)\n # self.displays.clear()\n # self.updateStatusPixmap('idle')\n\n def lock(self, is_lock):\n self.buttons['start'].setDisabled(is_lock)\n self.buttons['stop'].setEnabled(is_lock)\n self.portbox.setDisabled(is_lock)\n self.channelbox.setDisabled(is_lock)\n self.extrabox.setDisabled(is_lock)\n self.tuner.setDisabled(is_lock)\n\n def onAbout(self):\n QMessageBox.about(self, \"О программе\", info_msg)\n\n def update_lcd(self, data):\n self.displays.update(data)\n\n def onQuit(self):\n if self.timer_id:\n self.killTimer(self.timer_id)\n self.simulator.stop()\n QtCore.QCoreApplication.exit(0)\n\n\nclass LcdBox(QGroupBox):\n LCD_NOT_VALUE = \"-----\"\n\n def __init__(self, parent, *args, **kwargs):\n super().__init__(\"I, A\", parent, *args)\n self.setMinimumHeight(200)\n\n # For store (Label, LCDNumber)\n self._indicators = []\n\n self._createUi()\n\n def _createUi(self):\n layout = QGridLayout(self)\n\n for col in range(10):\n for row in range(5):\n iLayout = QHBoxLayout()\n\n label = QLabel(str(col * 5 + row + 1))\n label.setAlignment(QtCore.Qt.AlignCenter)\n label.setFixedWidth(20)\n iLayout.addWidget(label)\n\n lcd = QLCDNumber()\n lcd.setStyleSheet(\"QLCDNumber {background-color: #FFFFFF}\")\n lcd.setDigitCount(5)\n lcd.setSegmentStyle(QLCDNumber.Flat)\n lcd.display(self.LCD_NOT_VALUE)\n iLayout.addWidget(lcd)\n\n self._indicators.append([label, lcd])\n\n layout.addLayout(iLayout, row, col)\n\n def _clear(self):\n for _, lcd in self._indicators:\n lcd.display(self.LCD_NOT_VALUE)\n\n def switch(self, index):\n for n, (label, _) in enumerate(self._indicators):\n text = str((RANGES[index] + n) + 1)\n label.setText(text)\n\n def clear(self):\n data = 50 * ['-----']\n self.update(data)\n\n def update(self, data):\n for (value, (_, lcd)) in zip(data, self._indicators):\n lcd.display(value)\n\n\nclass TunerBox(QGroupBox):\n states = ('РУ', '+9.99', '0.00', '-9.99')\n\n def __init__(self, parent, states=None, default_state='0.00', *args, **kwargs):\n super(TunerBox, self).__init__('Настройка каналов', parent)\n self.setFixedHeight(180)\n self.parent_ = parent\n\n self.default_state = default_state\n self.state_index = self.states.index(self.default_state)\n self.createUi()\n\n def createUi(self):\n self.createGlobalSwitcher()\n self.createChannels()\n\n layout = QHBoxLayout(self)\n layout.addWidget(self.globalSwitcher)\n layout.addWidget(QSplitter(QtCore.Qt.Vertical))\n layout.addWidget(self.channelWgt)\n\n def createGlobalSwitcher(self):\n self.globalSwitcher = QWidget(self)\n self.globalSwitcher.setFixedWidth(80)\n\n layout = QVBoxLayout(self.globalSwitcher)\n\n self.switcherGroup = QButtonGroup(self.globalSwitcher)\n for index, name in enumerate(self.states):\n radio = QRadioButton(name)\n layout.addWidget(radio)\n if name == self.default_state:\n radio.setChecked(True)\n self.switcherGroup.addButton(radio, index)\n self.switcherGroup.buttonClicked[QAbstractButton].connect(self.globalSwitched)\n\n def createChannels(self):\n self.channelWgt = QWidget(self)\n\n layout = QGridLayout(self.channelWgt)\n\n self.channelGroup = QButtonGroup()\n for col in range(10):\n for row in range(5):\n index = col * 5 + row\n\n label = QLabel(str(index + 1))\n label.setAlignment(QtCore.Qt.AlignCenter)\n label.setFixedWidth(20)\n\n btn = QPushButton(self.default_state)\n btn.setFixedWidth(50)\n\n hbox = QHBoxLayout()\n hbox.addWidget(label)\n hbox.addWidget(btn)\n layout.addLayout(hbox, row, col)\n\n self.channelGroup.addButton(btn, index)\n self.channelGroup.buttonClicked[QAbstractButton].connect(self.switched)\n\n def globalSwitched(self, btn):\n for channel in self.channelGroup.buttons():\n channel.setText(btn.text())\n\n def switched(self, btn):\n state= btn.text()\n index = self.states.index(state) + 1\n index %= len(self.states)\n btn.setText(self.states[index])\n\n def getSettings(self):\n res = []\n for i,b in enumerate(self.channelGroup.buttons()):\n res.append(b.text())\n return res\n\nclass UserialMainWindow(Ui):\n pass\n\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n\n # Add icon in the taskbar (only windows)\n if sys.platform == 'win32':\n import ctypes\n\n myappid = u'navi-dals.kf1-m.qt5-userial.001' # arbitrary string\n ctypes.windll.shell32.SetCurrentProcessExplicitAppUserModelID(myappid)\n app.setWindowIcon(QIcon(':/rc/Interdit.ico'))\n\n model = Simulator()\n\n ex = UserialMainWindow()\n sys.exit(app.exec_())\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":15315,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"621573573","text":"#=========================================================================================#\n# USER NAME: Thach Le \n# FILE NAME: 022-climbing-the-leaderboard.py\n# FILE PATH: /E/thach-working/hackerrank/problem-solving/022-climbing-the-leaderboard.py\n#=========================================================================================#\n# import library\nimport os\nimport sys\nfrom collections import Counter\nimport numpy as np\n\n# main function \ndef main():\n\tfile_content = open(\"022-climbing-the-leaderboard-tc\", 'r').read().replace('\\r\\n', '\\n')\n\tline = file_content.split('\\n')\n\n\tinput_temp = line[0].strip()\n\t# input_temp = input().strip()\n\t# n = int(input_temp)\n\tinput_temp = line[1].strip()\n\t# input_temp = input().strip()\n\t\t# string \n\t\t# -> split to int element\n\t\t# -> add to Counter\n\t\t# -> convert to list with unique element \n\tlist_leaderboard = list(Counter([int(element) for element in input_temp.split()]))\n\t\t# -> convert the shortend list to array \n\tarray_leaderboard = np.array(list_leaderboard)\n\t\n\tinput_temp = line[2].strip()\n\t# input_temp = input().strip()\n\tm = int(input_temp)\n\tinput_temp = line[3].strip()\n\t# input_temp = input().strip()\n\tfor element in input_temp.split():\n\t\t\t# compare array_leaderboard with element -> boolean\n\t\t\t# -> convert to counter -> counter number of TRUE and FALSE \n\t\t\t# -> conver couter to dictionary -> to find how many TRUE and FALSE\n\t\tdic_greater = dict(Counter(array_leaderboard > int(element)))\n\t\tdic_greater_equal = dict(Counter(array_leaderboard >= int(element)))\n\n\t\tif (True in dic_greater) and (True in dic_greater_equal):\n\t\t\tif (dic_greater[True] == dic_greater_equal[True]):\n\t\t\t\tprint(dic_greater[True] + 1)\n\t\t\telse:\n\t\t\t\tprint(max(dic_greater[True], dic_greater_equal[True]))\n\t\telse:\n\t\t\tprint(\"1\")\n\n\t\t# greater = list_leaderboard > [int(element)] * len(list_leaderboard)\n\t\t# greater_equal = list_leaderboard >= [int(element)] * len(list_leaderboard)\n\t\t# print(dic_greater)\n\t\t# print(dic_greater_equal)\n\nif __name__ == \"__main__\":\n main()","sub_path":"problem-solving/022-climbing-the-leaderboard-numpy.py","file_name":"022-climbing-the-leaderboard-numpy.py","file_ext":"py","file_size_in_byte":2009,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"6663848","text":"from neomodel import StructuredNode, StringProperty, DateTimeProperty, Relationship\n\n\nclass User(StructuredNode):\n email = StringProperty()\n \n\nclass Tweet(StructuredNode):\n body = StringProperty()\n tweeted_on = DateTimeProperty(default_now=True)\n poster = Relationship('User', 'TWEETED_BY')\n liked_by = Relationship('User', 'LIKED_BY')\n comment_on = Relationship('Tweet', 'COMMENT_ON')\n\n","sub_path":"tweets/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":408,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"181123455","text":"\"\"\"\nUtils tests\n\"\"\"\n# coding=utf-8\n\nimport unittest\nimport mock\nfrom subprocess import CalledProcessError\n\nfrom idflow import Utils\n\n\nclass UtilsTest(unittest.TestCase):\n \"\"\"\n Tests for Utils\n \"\"\"\n\n def test_get_travis_branch(self):\n \"\"\"\n Utils: Should return the branch on Travis\n \"\"\"\n def se_os_getenv(var):\n if var == 'GIT_BRANCH':\n return 'travis'\n return None\n\n with mock.patch('idflow.utils.os.getenv',\n side_effect=se_os_getenv):\n self.assertEqual(\n Utils.get_branch(),\n \"travis\"\n )\n\n def test_get_jenkins2_branch(self):\n \"\"\"\n Utils: Should return the branch on Jenkins 2\n \"\"\"\n def se_os_getenv(var):\n if var == 'BRANCH_NAME':\n return 'jenkins2'\n return None\n\n with mock.patch('idflow.utils.os.getenv',\n side_effect=se_os_getenv):\n self.assertEqual(\n Utils.get_branch(),\n \"jenkins2\"\n )\n\n def test_get_git_branch(self):\n \"\"\"\n Utils: Should return the branch using Git\n \"\"\"\n def se_check_output(cmds):\n if cmds == \"git rev-parse --abbrev-ref HEAD\".split(\" \"):\n return \"git-branch\\n\".encode('utf-8')\n raise CalledProcessError(1, cmds)\n\n with mock.patch('idflow.utils.os.getenv', return_value=None):\n with mock.patch('idflow.utils.check_output',\n side_effect=se_check_output):\n self.assertEqual(\n Utils.get_branch(),\n \"git-branch\"\n )\n\n def test_get_version_with_tags(self):\n \"\"\"\n Utils: Should return the version using tags\n \"\"\"\n def se_check_output(cmds):\n if cmds == \"git describe --tags\".split(\" \"):\n return \"tag-version\\n\".encode('utf-8')\n raise CalledProcessError(1, cmds)\n\n with mock.patch('idflow.utils.check_output',\n side_effect=se_check_output):\n self.assertEqual(\n Utils.get_version(),\n \"tag-version\"\n )\n\n def test_get_version_with_short_commit(self):\n \"\"\"\n Utils: Should return the version using the short commit\n \"\"\"\n def se_check_output(cmds):\n if cmds == \"git rev-parse --short HEAD\".split(\" \"):\n return \"short-commit-version\\n\".encode('utf-8')\n raise CalledProcessError(1, cmds)\n\n with mock.patch('idflow.utils.check_output',\n side_effect=se_check_output):\n self.assertEqual(\n Utils.get_version(),\n \"short-commit-version\"\n )\n","sub_path":"tests/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2834,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"289807368","text":"import ccxt \nimport datetime\nimport time\nimport pandas as pd\nfrom pandas.core.frame import DataFrame\nimport pprint\n\n\n\n\n# COIN NAME \nSYMBOL1 = \"BTC/USDT\"\nSYMBOLPOSITION1 = \"BTCUSDT\"\n\nSYMBOL2 = \"BTC/USDT\"\nSYMBOLPOSITION2 = \"ETHUSDT\"\n\nSYMBOL3 = \"BNB/USDT\"\nSYMBOLPOSITION3 = \"BNBUSDT\"\n\nSYMBOL4 = \"XRP/USDT\"\nSYMBOLPOSITION4 = \"XRPUSDT\"\n\nSYMBOL5 = \"BTC/USDT\"\nSYMBOLPOSITION5 = \"DOGEUSDT\"\n\n# TIMEFRAME , SINCE , LIMIT , EMALENGTH\nTIMEFRAME = '30m'\nSINCE = None\nLIMIT = 200\n\n\n# ENTRY AMOUNT \n# BTCAMOUNT = 0.002\n# ETHAMOUNT = 0.004\n# BNBAMOUNT = 0.03\n# XRPAMOUNT = 13.3\n# DOGEAMOUNT = 64\nBTCAMOUNT = 0.001\nETHAMOUNT = 0.002\nBNBAMOUNT = 0.01\nXRPAMOUNT = 6.3\nDOGEAMOUNT = 32\n\n# LEVERAGE\nBTCLEVERAGE = 10\nARTLEVERAGE = 2\n\n\n\n\n\n\n# create key.txt in same folder , first line api_key , second secret_key \nwith open(\"./key.txt\") as f:\n lines = f.readlines()\n api_key = lines[0].strip()\n secret = lines[1].strip()\n\n# Login\nbinance = ccxt.binance(config={\n 'apiKey': api_key,\n 'secret': secret,\n 'enableRateLimit': True,\n 'options': {'defaultType': 'future'}\n})\n\n# Set Leverage\nmarkets = binance.load_markets()\nbtcmarket = binance.market(SYMBOL1)\nethmarket = binance.market(SYMBOL2)\nbnbmarket = binance.market(SYMBOL3)\nxrpmarket = binance.market(SYMBOL4)\ndogemarket = binance.market(SYMBOL5)\n\n\n \n\nstoploss = 0\n\n\nclass server:\n def __init__(self , le , coi ,tima):\n self.ema = le\n self.posi = 0\n self.sum = 0 \n self.stoploss = 0\n self.coin = coi\n self.last_signal = None\n self.df = []\n self.tima = tima\n def trendstatus(self, df):\n # Candle ---> Heikin Ashi Candle\n # heihigh == high , heilow == low \n heiopen = []\n heiclose = []\n\n # first heikin Ashi candle \n column = df.iloc[0]\n open_save = (column['open']+column['close'])/2\n close_save= (column['open']+column['close']+column['low']+column['high'])/4\n heiopen.append(open_save)\n heiclose.append(close_save)\n\n # anothers heikin Ashi candle\n for i in range(1,LIMIT):\n column = df.iloc[i]\n open_save = (open_save+close_save)/2\n heiopen.append(open_save)\n close_save = (column['open']+column['high']+column['low']+column['close'])/4\n heiclose.append(close_save)\n\n # df insert heiopen and heiclose\n df['heiopen'] = heiopen\n df['heiclose'] = heiclose\n\n # trend \n df['ohlc'] = (df['heiopen']+df['high']+df['low']+df['heiclose'])/4\n lis = [df['ohlc'][0]]\n temp=df['ohlc'][0]\n for i in range(1,LIMIT):\n a = (df['ohlc'][i] + temp)/2\n lis.append(a)\n temp = a\n df['haOpen'] = lis\n\n lis = []\n for i in range(LIMIT):\n lis.append((df['ohlc'][i]+\n df['haOpen'][i]+\n max(df['high'][i],df['haOpen'][i])+\n min(df['low'][i],df['haOpen'][i]))/4)\n \n \n df['haC'] = lis\n df['hlc3'] = (df['high']+df['low']+df['heiclose'])/3\n df['ema1'] = df['haC'].ewm(span=self.ema).mean()\n df['ema2'] = df['ema1'].ewm(span=self.ema).mean()\n df['ema3'] = df['ema2'].ewm(span=self.ema).mean()\n df['tma1'] = df['ema1']*3 - df['ema2']*3 + df['ema3']\n df['ema4'] = df['tma1'].ewm(span=self.ema).mean()\n df['ema5'] = df['ema4'].ewm(span=self.ema).mean()\n df['ema6'] = df['ema5'].ewm(span=self.ema).mean()\n df['tma2'] = df['ema4']*3 - df['ema5']*3 + df['ema6']\n df['IPEK'] = df['tma1'] - df['tma2']\n df['YASIN'] = df['tma1'] + df['IPEK']\n df['ema7'] = df['hlc3'].ewm(span=self.ema).mean()\n df['ema8'] = df['ema7'].ewm(span=self.ema).mean()\n df['ema9'] = df['ema8'].ewm(span=self.ema).mean()\n df['tma3'] = df['ema7']*3 - df['ema8']*3 + df['ema9']\n df['ema10'] = df['tma3'].ewm(span=self.ema).mean()\n df['ema11'] = df['ema10'].ewm(span=self.ema).mean()\n df['ema12'] = df['ema11'].ewm(span=self.ema).mean()\n df['tma4'] = df['ema10']*3 - df['ema11']*3 + df['ema12']\n df['IPEK1'] = df['tma3'] - df['tma4']\n df['YASIN1'] = df['tma3'] + df['IPEK1']\n df['mavi'] = df['YASIN1']\n df['kirmizi'] = df['YASIN']\n\n # current long or short trend \n if(df['mavi'][-1] > df['kirmizi'][-1] and df['mavi'][-2] <= df['kirmizi'][-2]):\n currentlongCond = True\n else:\n currentlongCond = False\n\n if(df['mavi'][-1] < df['kirmizi'][-1] and df['mavi'][-2] >= df['kirmizi'][-2]):\n currentshortcond = True \n else:\n currentshortcond = False\n \n # 현봉과 , 전봉이 모두 상승하며 , 수치도 상승을 예고한다\n if(self.last_signal != True and currentlongCond):\n self.last_signal = True\n return True \n # 현봉과 , 전봉이 모두 하강하며 , 수치도 하락을 예고한다\n elif(self.last_signal != False and currentshortcond):\n self.last_signal = False\n return False \n else:\n return None \n\n def Get_sum(self):\n return self.sum\n def GET_last_signal(self):\n return self.last_signal\n def Getdf(self):\n coin = binance.fetch_ohlcv(\n symbol = self.coin,\n timeframe= self.tima, \n since = None, \n limit = 200\n )\n self.df = pd.DataFrame(coin, columns=['datetime', 'open', 'high', 'low', 'close', 'volume'])\n self.df['datetime'] = pd.to_datetime(self.df['datetime'], unit='ms') \n self.df.set_index('datetime', inplace=True)\n return self.df\n def Get_currnet_price(self):\n return self.df['close'][-1]\n \n def trade(self):\n Trend = self.trendstatus(self.Getdf())\n if(self.posi==0):\n if(Trend == True):\n self.posi = self.df['close'][-1]\n self.sum -= (self.df['close'][-1]*4)/10000\n self.stoploss = self.df['low'][-2]\n elif(Trend == False):\n self.posi = -self.df['close'][-1]\n self.sum -= (self.df['close'][-1]*4)/10000\n self.stoploss = self.df['high'][-2]\n elif(self.posi>0):\n if(self.df['close'][-1] self.stoploss):\n self.sum -= (self.df['close'][-1]*4)/10000\n self.sum -= self.df['close'][-1] + self.posi\n self.posi = 0\n self.last_signal = None\n elif(Trend == True):\n self.sum -= (self.df['close'][-1]*8)/10000\n self.sum -= self.df['close'][-1] + self.posi\n self.posi = self.df['close'][-1]\n self.stoploss = self.df['low'][-2]\n\n\n\n\n\n\n\nif __name__ == \"__main__\":\n f = open('time.txt','a')\n f.writelines(\"[\")\n f.close\n\n f = open('./txt30min5.txt','a')\n f.writelines(\"[\")\n f.close()\n f = open('./txt30min6.txt','a')\n f.writelines(\"[\")\n f.close()\n f = open('./txt30min7.txt','a')\n f.writelines(\"[\")\n f.close()\n f = open('./txt30min8.txt','a')\n f.writelines(\"[\")\n f.close()\n f = open('./txt30min9.txt','a')\n f.writelines(\"[\")\n f.close()\n f = open('./txt30min10.txt','a')\n f.writelines(\"[\")\n f.close()\n f = open('./txt30min11.txt','a')\n f.writelines(\"[\")\n f.close()\n f = open('./txt30min12.txt','a')\n f.writelines(\"[\")\n f.close()\n f = open('./txt30min13.txt','a')\n f.writelines(\"[\")\n f.close()\n f = open('./txt30min14.txt','a')\n f.writelines(\"[\")\n f.close()\n f = open('./txt30min15.txt','a')\n f.writelines(\"[\")\n f.close()\n f = open('./txt30min16.txt','a')\n f.writelines(\"[\")\n f.close()\n\n\n f = open('./txt60min5.txt','a')\n f.writelines(\"[\")\n f.close()\n f = open('./txt60min6.txt','a')\n f.writelines(\"[\")\n f.close()\n f = open('./txt60min7.txt','a')\n f.writelines(\"[\")\n f.close()\n f = open('./txt60min8.txt','a')\n f.writelines(\"[\")\n f.close()\n f = open('./txt60min9.txt','a')\n f.writelines(\"[\")\n f.close()\n f = open('./txt60min10.txt','a')\n f.writelines(\"[\")\n f.close()\n f = open('./txt60min11.txt','a')\n f.writelines(\"[\")\n f.close()\n f = open('./txt60min12.txt','a')\n f.writelines(\"[\")\n f.close()\n f = open('./txt60min13.txt','a')\n f.writelines(\"[\")\n f.close()\n f = open('./txt60min14.txt','a')\n f.writelines(\"[\")\n f.close()\n f = open('./txt60min15.txt','a')\n f.writelines(\"[\")\n f.close()\n f = open('./txt60min16.txt','a')\n f.writelines(\"[\")\n f.close()\n\n\n\n f = open('./txt240min5.txt','a')\n f.writelines(\"[\")\n f.close()\n f = open('./txt240min6.txt','a')\n f.writelines(\"[\")\n f.close()\n f = open('./txt240min7.txt','a')\n f.writelines(\"[\")\n f.close()\n f = open('./txt240min8.txt','a')\n f.writelines(\"[\")\n f.close()\n f = open('./txt240min9.txt','a')\n f.writelines(\"[\")\n f.close()\n f = open('./txt240min10.txt','a')\n f.writelines(\"[\")\n f.close()\n f = open('./txt240min11.txt','a')\n f.writelines(\"[\")\n f.close()\n f = open('./txt240min12.txt','a')\n f.writelines(\"[\")\n f.close()\n f = open('./txt240min13.txt','a')\n f.writelines(\"[\")\n f.close()\n f = open('./txt240min14.txt','a')\n f.writelines(\"[\")\n f.close()\n f = open('./txt240min15.txt','a')\n f.writelines(\"[\")\n f.close()\n f = open('./txt240min16.txt','a')\n f.writelines(\"[\")\n f.close()\n f = open('./currnetprice.txt','a')\n f.writelines(\"[\")\n f.close()\n\n\n BtcCoin5 = server(5,\"BTC/USDT\",\"30m\")\n BtcCoin6 = server(6,\"BTC/USDT\",\"30m\")\n BtcCoin7 = server(7,\"BTC/USDT\",\"30m\")\n BtcCoin8 = server(8,\"BTC/USDT\",\"30m\")\n BtcCoin9 = server(9,\"BTC/USDT\",\"30m\")\n BtcCoin10 = server(10,\"BTC/USDT\",\"30m\")\n BtcCoin11 = server(11,\"BTC/USDT\",\"30m\")\n BtcCoin12 = server(12,\"BTC/USDT\",\"30m\")\n BtcCoin13 = server(13,\"BTC/USDT\",\"30m\")\n BtcCoin14 = server(14,\"BTC/USDT\",\"30m\")\n BtcCoin15 = server(15,\"BTC/USDT\",\"30m\")\n BtcCoin16 = server(16,\"BTC/USDT\",\"30m\")\n\n EthCoin5 = server(5,\"BTC/USDT\",'1h')\n EthCoin6 = server(6,\"BTC/USDT\",'1h')\n EthCoin7 = server(7,\"BTC/USDT\",'1h')\n EthCoin8 = server(8,\"BTC/USDT\",'1h')\n EthCoin9 = server(9,\"BTC/USDT\",'1h')\n EthCoin10 = server(10,\"BTC/USDT\",'1h')\n EthCoin11 = server(11,\"BTC/USDT\",'1h')\n EthCoin12 = server(12,\"BTC/USDT\",'1h')\n EthCoin13 = server(13,\"BTC/USDT\",'1h')\n EthCoin14 = server(14,\"BTC/USDT\",'1h')\n EthCoin15 = server(15,\"BTC/USDT\",'1h')\n EthCoin16 = server(16,\"BTC/USDT\",'1h')\n\n DogeCoin5 = server(5,\"BTC/USDT\",'4h')\n DogeCoin6 = server(6,\"BTC/USDT\",'4h')\n DogeCoin7 = server(7,\"BTC/USDT\",'4h')\n DogeCoin8 = server(8,\"BTC/USDT\",'4h')\n DogeCoin9 = server(9,\"BTC/USDT\",'4h')\n DogeCoin10 = server(10,\"BTC/USDT\",'4h')\n DogeCoin11 = server(11,\"BTC/USDT\",'4h')\n DogeCoin12 = server(12,\"BTC/USDT\",'4h')\n DogeCoin13 = server(13,\"BTC/USDT\",'4h')\n DogeCoin14 = server(14,\"BTC/USDT\",'4h')\n DogeCoin15 = server(15,\"BTC/USDT\",'4h')\n DogeCoin16 = server(16,\"BTC/USDT\",'4h')\n\n\n# BtcCoin5\n# BtcCoin5 \n# BtcCoin6 \n# BtcCoin7 \n# BtcCoin8 \n# BtcCoin9 \n# BtcCoin10\n# BtcCoin11\n# BtcCoin12 \n# BtcCoin13 \n# BtcCoin14\n# BtcCoin15 \n# BtcCoin16 \n\n# EthCoin5 \n# EthCoin6 \n# EthCoin7 \n# EthCoin8 \n# EthCoin9 \n# EthCoin10 \n# EthCoin11\n# EthCoin12 \n# EthCoin13 \n# EthCoin14 \n# EthCoin15 \n# EthCoin16 \n\n# DogeCoin5 \n# DogeCoin6 \n# DogeCoin7 \n# DogeCoin8 \n# DogeCoin9 \n# DogeCoin10 \n# DogeCoin11\n# DogeCoin12 \n# DogeCoin13 \n# DogeCoin14\n# DogeCoin15\n# DogeCoin16\n while(True):\n try:\n BtcCoin5.trade()\n BtcCoin5.trade() \n BtcCoin6.trade() \n BtcCoin7.trade() \n BtcCoin8.trade() \n BtcCoin9.trade() \n BtcCoin10.trade()\n BtcCoin11.trade()\n BtcCoin12.trade() \n BtcCoin13.trade() \n BtcCoin14.trade()\n BtcCoin15.trade() \n BtcCoin16.trade() \n time.sleep(0.3)\n EthCoin5.trade() \n EthCoin6.trade() \n EthCoin7.trade() \n EthCoin8.trade() \n EthCoin9.trade() \n EthCoin10.trade() \n EthCoin11.trade()\n EthCoin12.trade() \n EthCoin13.trade() \n EthCoin14.trade() \n EthCoin15.trade() \n EthCoin16.trade() \n time.sleep(0.3)\n DogeCoin5.trade() \n DogeCoin6.trade() \n DogeCoin7.trade() \n DogeCoin8.trade() \n DogeCoin9.trade() \n DogeCoin10.trade() \n DogeCoin11.trade()\n DogeCoin12.trade() \n DogeCoin13.trade() \n DogeCoin14.trade()\n DogeCoin15.trade()\n DogeCoin16.trade()\n time.sleep(25)\n \n f = open('간다잇.txt','a')\n f.writelines(\"\\\"\")\n f.writelines(str(datetime.datetime.now().day))\n f.writelines(\" \")\n f.writelines(str(datetime.datetime.now().hour))\n f.writelines(\" \")\n f.writelines(str(datetime.datetime.now().minute))\n f.writelines(\" \")\n f.writelines(str(datetime.datetime.now().second))\n f.writelines(\"\\\"\")\n f.writelines(\" ,\")\n f.close()\n\n f = open('./currnetprice.txt','a')\n f.writelines(str(BtcCoin5.Get_currnet_price()))\n f.writelines(',')\n f.close()\n \n # 30min btc\n f = open('./txt30min5.txt','a')\n f.writelines(str(BtcCoin5.Get_sum()))\n f.writelines(',')\n f.close()\n f = open('./txt30min6.txt','a')\n f.writelines(str(BtcCoin6.Get_sum()))\n f.writelines(',')\n f.close()\n \n f = open('./txt30min7.txt','a')\n f.writelines(str(BtcCoin7.Get_sum()))\n f.writelines(',')\n f.close()\n \n f = open('./txt30min8.txt','a')\n f.writelines(str(BtcCoin8.Get_sum()))\n f.writelines(',')\n f.close()\n \n f = open('./txt30min9.txt','a')\n f.writelines(str(BtcCoin9.Get_sum()))\n f.writelines(',')\n f.close()\n \n f = open('./txt30min10.txt','a')\n f.writelines(str(BtcCoin10.Get_sum()))\n f.writelines(',')\n f.close()\n \n f = open('./txt30min11.txt','a')\n f.writelines(str(BtcCoin11.Get_sum()))\n f.writelines(',')\n f.close()\n \n f = open('./txt30min12.txt','a')\n f.writelines(str(BtcCoin12.Get_sum()))\n f.writelines(',')\n f.close()\n \n f = open('./txt30min13.txt','a')\n f.writelines(str(BtcCoin13.Get_sum()))\n f.writelines(',')\n f.close()\n \n f = open('./txt30min14.txt','a')\n f.writelines(str(BtcCoin14.Get_sum()))\n f.writelines(',')\n f.close()\n \n f = open('./txt30min15.txt','a')\n f.writelines(str(BtcCoin15.Get_sum()))\n f.writelines(',')\n f.close()\n \n f = open('./txt30min16.txt','a')\n f.writelines(str(BtcCoin16.Get_sum()))\n f.writelines(',')\n f.close()\n \n\n f = open('./txt60min5.txt','a')\n f.writelines(str(EthCoin5.Get_sum()))\n f.writelines(',')\n f.close()\n f = open('./txt60min6.txt','a')\n f.writelines(str(EthCoin6.Get_sum()))\n f.writelines(',')\n f.close()\n f = open('./txt60min7.txt','a')\n f.writelines(str(EthCoin7.Get_sum()))\n f.writelines(',')\n f.close()\n f = open('./txt60min8.txt','a')\n f.writelines(str(EthCoin8.Get_sum()))\n f.writelines(',')\n f.close()\n f = open('./txt60min9.txt','a')\n f.writelines(str(EthCoin9.Get_sum()))\n f.writelines(',')\n f.close()\n f = open('./txt60min10.txt','a')\n f.writelines(str(EthCoin10.Get_sum()))\n f.writelines(',')\n f.close()\n f = open('./txt60min11.txt','a')\n f.writelines(str(EthCoin11.Get_sum()))\n f.writelines(',')\n f.close()\n f = open('./txt60min12.txt','a')\n f.writelines(str(EthCoin12.Get_sum()))\n f.writelines(',')\n f.close()\n f = open('./txt60min13.txt','a')\n f.writelines(str(EthCoin13.Get_sum()))\n f.writelines(',')\n f.close()\n f = open('./txt60min14.txt','a')\n f.writelines(str(EthCoin14.Get_sum()))\n f.writelines(',')\n f.close()\n f = open('./txt60min15.txt','a')\n f.writelines(str(EthCoin15.Get_sum()))\n f.writelines(',')\n f.close()\n f = open('./txt60min16.txt','a')\n f.writelines(str(EthCoin16.Get_sum()))\n f.writelines(',')\n f.close()\n\n\n f = open('./txt240min5.txt','a')\n f.writelines(str(DogeCoin5.Get_sum()))\n f.writelines(',')\n f.close()\n f = open('./txt240min6.txt','a')\n f.writelines(str(DogeCoin6.Get_sum()))\n f.writelines(',')\n f.close()\n f = open('./txt240min7.txt','a')\n f.writelines(str(DogeCoin7.Get_sum()))\n f.writelines(',')\n f.close()\n f = open('./txt240min8.txt','a')\n f.writelines(str(DogeCoin8.Get_sum()))\n f.writelines(',')\n f.close()\n f = open('./txt240min9.txt','a')\n f.writelines(str(DogeCoin9.Get_sum()))\n f.writelines(',')\n f.close()\n f = open('./txt240min10.txt','a')\n f.writelines(str(DogeCoin10.Get_sum()))\n f.writelines(',')\n f.close()\n f = open('./txt240min11.txt','a')\n f.writelines(str(DogeCoin11.Get_sum()))\n f.writelines(',')\n f.close()\n f = open('./txt240min12.txt','a')\n f.writelines(str(DogeCoin12.Get_sum()))\n f.writelines(',')\n f.close()\n f = open('./txt240min13.txt','a')\n f.writelines(str(DogeCoin13.Get_sum()))\n f.writelines(',')\n f.close()\n f = open('./txt240min14.txt','a')\n f.writelines(str(DogeCoin14.Get_sum()))\n f.writelines(',')\n f.close()\n f = open('./txt240min15.txt','a')\n f.writelines(str(DogeCoin15.Get_sum()))\n f.writelines(',')\n f.close()\n f = open('./txt240min16.txt','a')\n f.writelines(str(DogeCoin16.Get_sum()))\n f.writelines(',')\n f.close()\n\n except Exception as e:\n time.sleep(30)","sub_path":"test3060240.py","file_name":"test3060240.py","file_ext":"py","file_size_in_byte":19930,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"487192219","text":"from opentrons import protocol_api\n\n\nmetadata = {'apiLevel': '2.5',\n 'author': 'Jon Sanders'}\n\ncols = ['A1', 'A2', 'A3', 'A4', 'A5', 'A6',\n 'A7', 'A8', 'A9', 'A10', 'A11', 'A12']\n\nvol = 50\nextra = 20\n\ndef run(protocol: protocol_api.ProtocolContext):\n\n\n # tips\n tiprack = protocol.load_labware('opentrons_96_filtertiprack_200ul', \n 1)\n\n # stock plate\n stock = protocol.load_labware('tubeblockvwrhalfskirtpcrplate_96_wellplate_250ul',\n 2, 'stock')\n\n # dest plates\n\n plate_1 = protocol.load_labware('biorad_96_wellplate_200ul_pcr',\n 4, 'plate_1')\n plate_2 = protocol.load_labware('biorad_96_wellplate_200ul_pcr',\n 5, 'plate_2')\n plate_3 = protocol.load_labware('biorad_96_wellplate_200ul_pcr',\n 6, 'plate_3')\n\n # initialize pipettes\n pipette = protocol.load_instrument('p300_multi', \n 'left',\n tip_racks=[])\n\n\n for col in cols:\n pipette.pick_up_tip(tiprack[col])\n pipette.aspirate(vol*3 + extra, stock[col])\n pipette.dispense(vol, plate_1[col])\n pipette.dispense(vol, plate_2[col])\n pipette.dispense(vol, plate_3[col])\n pipette.dispense(extra, stock[col])\n pipette.drop_tip()","sub_path":"Misc/copy_plate.py","file_name":"copy_plate.py","file_ext":"py","file_size_in_byte":1431,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"178588358","text":"\"\"\"questions/views.py\n\"\"\"\n# Related third party imports.\nfrom django.contrib.auth.decorators import login_required\nfrom django.forms import modelformset_factory\nfrom django.shortcuts import render, redirect, get_object_or_404\nfrom django.views.generic.list import ListView\n\n# Local application/library specific imports.\nfrom .models import (\n Answer,\n Course,\n Objective,\n Source,\n QuestionBase,\n Question,\n)\nfrom .forms import (\n AnswerForm,\n AnswerFormSetHelper,\n DocumentForm,\n ImportQuestionsForm,\n QuestionForm,\n QuestionUpdateForm,\n QuestionListForm,\n)\nfrom .utils import save_objectives, save_questions\n\n# --------------------------------------------------------------------------- #\n# Question Views\n# --------------------------------------------------------------------------- #\n@login_required\ndef all_questions(request):\n courses = Course.objects.filter(owner=request.user)\n objectives = Objective.objects.none()\n #form = QuestionListForm(user=request.user, style='select_box')\n questions = QuestionBase.objects.none()\n template_name = \"questions/all_questions.html\",\n return render(request, template_name,\n {\n \"courses\":courses,\n \"objectives\":objectives,\n \"questions\":questions,\n #\"form\":form,\n },\n )\n\n\n@login_required\ndef add_question(request):\n \"\"\"add_question provides a view for adding questions, naturally. There are\n also 6 answer fields provided by default, and there is currently no way to\n increase this number dynamically. This should be changed in the future.\n \"\"\"\n # Setting up the question form\n form = QuestionForm(request.POST or None, user=request.user)\n\n # Setting up Answer Formset\n AnswerFormsetFactory = modelformset_factory(\n Answer,\n fields=('answer_text', 'correct'),\n exclude=(),\n extra=6,\n )\n formset = AnswerFormsetFactory(\n request.POST or None,\n queryset=Answer.objects.none(),\n )\n helper = AnswerFormSetHelper()\n\n # Checking if the question form is valid\n if form.is_valid():\n new_question = form.save(commit=False)\n new_question.owner = request.user\n new_question.save()\n\n # Validating the Answer Formset and setting up ForeignKey to the associated\n # question\n if formset.is_valid():\n for form_ in formset:\n answer = form_.save(commit=False)\n answer.question = new_question\n answer.save()\n\n return redirect('questions:all_questions')\n\n template_name = 'questions/add_question.html'\n return render(request, template_name,\n {\n 'form': form,\n 'answer_forms':formset,\n 'helper':helper,\n },\n )\n\n\n@login_required\ndef delete_question(request, pk):\n \"\"\"This is just a simple delete view. There is no validation done, nor is\n attempt at verification made. Pretty much, if you hit delete, it's gone.\n \"\"\"\n query = Question.objects.get(pk=pk)\n query.delete()\n return redirect('questions:all_questions')\n\n\n@login_required\ndef question_update_view(request, question_type, id):\n \"\"\"A view for updating existing questions. This view is very similar to\n the view provided for adding a question, but the form is already populated\n with the field contents of the existing object.\n \"\"\"\n # Setting up the question form\n instance = get_object_or_404(Question, id=id)\n form = QuestionUpdateForm(request.POST or None, instance=instance)\n\n # Setting up the answer formset\n answers = Answer.objects.filter(owner=request.user, question=instance)\n AnswerFormsetFactory = modelformset_factory(\n Answer,\n fields=('answer_text', 'correct'),\n exclude=(),\n extra=0,\n )\n formset = AnswerFormsetFactory(request.POST or None, queryset=answers)\n helper = AnswerFormSetHelper()\n\n # Validating the Answer Formset\n if formset.is_valid():\n formset.save()\n\n # Validating the Question Form\n if form.is_valid():\n form.save()\n return redirect('questions:all_questions')\n\n template_name = 'questions/question_update.html'\n return render(request, template_name,\n {\n 'form': form,\n 'answer_forms':formset,\n \"helper\":helper,\n },\n )\n\n@login_required\ndef import_questions(request):\n \"\"\"A view for importing questions based on a very specific format per Dr.\n Wallin's request. This will likely break otherwise.\n \"\"\"\n if request.method == 'POST':\n form = ImportQuestionsForm(\n request.user,\n request.POST,\n request.FILES,)\n if form.is_valid():\n data = form.cleaned_data\n document = form.cleaned_data['document']\n image_file = form.cleaned_data['compressed image file']\n save_questions(request, data)\n return redirect('questions:all_questions')\n else:\n form = ImportQuestionsForm(user=request.user)\n template_name = \"questions/import_questions.html\"\n return render(request, template_name, {'form':form})\n\n# --------------------------------------------------------------------------- #\n# Objective Views\n# --------------------------------------------------------------------------- #\n@login_required\ndef import_objectives(request):\n if request.method == 'POST':\n form = DocumentForm(\n request.user,\n request.POST,\n request.FILES,)\n if form.is_valid():\n document = form.save(False)\n document.owner = request.user\n document.save()\n save_objectives(document)\n return redirect('questions:all_questions')\n else:\n form = DocumentForm(user=request.user)\n\n template_name = 'questions/import_objectives.html'\n return render(request, template_name, {\"form\":form})\n\n\n@login_required\nclass ObjectiveListView(ListView):\n model = Objective\n context_object_name = 'objective'\n\n\n@login_required\ndef load_objectives(request, style):\n course_id = request.GET.get('course')\n if course_id != '':\n objectives = Objective.objects.filter(\n owner=request.user,\n course_id=course_id\n ).order_by('objective_number_major', 'objective_number_minor',)\n else:\n objectives = Objective.objects.none()\n\n template_name = 'questions/objective_dropdown_list_options.html'\n return render(request, template_name,\n {\n 'objectives':objectives,\n 'style':style,\n })\n\n\n@login_required\ndef load_questions(request, checks):\n course_id = request.GET.get('course')\n objective_id = request.GET.getlist('objectives[]')\n print(objective_id)\n if course_id != '':\n if objective_id[0] in [\"ALL\", '', ' '] or len(objective_id) == 0:\n questions = QuestionBase.objects.filter(\n course=course_id,\n owner=request.user).order_by('objective')\n else:\n objectives = []\n for objective in objective_id:\n objectives.append(Objective.objects.get(pk=objective))\n questions = \\\n QuestionBase.objects.filter(\n owner=request.user,\n objective__in=objectives).order_by('objective')\n else:\n questions=QuestionBase.objects.none()\n\n template_name = 'questions/question_selection_list.html'\n return render(request, template_name,\n {\n 'questions':questions,\n 'checks':checks,\n },\n )\n\n@login_required\ndef load_question_update_form(request):\n #course_id = request.GET.get('course')\n #objective_id = request.GET.get('objective')\n question_id = request.GET.get('question')\n question = QuestionBase.objects.get(\n owner=request.user,\n pk=question_id,\n )\n form = QuestionUpdateForm(user=request.user,)\n\n template_name = 'questions/question_update_popup.html',\n return render(request, {'question':question, 'form':form})\n\n","sub_path":"questions/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":8131,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"86050996","text":"from south.db import db\nfrom django.db import models\nfrom locationtracking.models import *\n\nclass Migration:\n def forwards(self, orm):\n # Adding model 'Location'\n db.create_table('locationtracking_location', (\n ('id', models.AutoField(primary_key=True)),\n ('name', models.CharField(max_length=128)),\n ('slug', models.SlugField()),\n ('point', models.PointField(srid=4326)),\n ('public', models.BooleanField()),\n ))\n db.send_create_signal('locationtracking', ['Location'])\n\n def backwards(self, orm):\n # Deleting model 'Location'\n db.delete_table('locationtracking_location')\n\n models = {\n 'locationtracking.location': {\n 'id': ('models.AutoField', [], {'primary_key': 'True'}),\n 'name': ('models.CharField', [], {'max_length': '128'}),\n 'point': ('models.PointField', [], {'srid': '4326'}),\n 'slug': ('models.SlugField', [], {}),\n 'public': ('models.BooleanField', [], {},),\n },\n 'locationtracking.positionreportsource': {\n 'auto_import': ('models.BooleanField', [], {'default': 'True'}),\n 'auto_import_interval': ('models.IntegerField', [], {'null': 'True', 'blank': 'True'}),\n 'display_description': ('models.TextField', [], {'null': 'True', 'blank': 'True'}),\n 'display_on_maps': ('models.BooleanField', [], {'default': 'False'}),\n 'id': ('models.AutoField', [], {'primary_key': 'True'}),\n 'name': ('models.CharField', [], {'max_length': '64'})\n },\n 'auth.user': {\n '_stub': True,\n 'id': ('models.AutoField', [], {'primary_key': 'True'})\n },\n 'locationtracking.positionreport': {\n 'active': ('models.BooleanField', [], {'default': 'False'}),\n 'altitude': ('models.IntegerField', [], {'null': 'True', 'blank': 'True'}),\n 'checksum_gpgga': ('models.CharField', [\"'$GPGGA Checksum'\"], {'max_length': '10', 'null': 'True', 'blank': 'True'}),\n 'checksum_gprmc': ('models.CharField', [\"'$GPRMC Checksum'\"], {'max_length': '10', 'null': 'True', 'blank': 'True'}),\n 'dgps_reference_station_id': ('models.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),\n 'duration_seconds': ('models.IntegerField', [\"'Duration (seconds)'\"], {'null': 'True', 'blank': 'True'}),\n 'geoid_height_above_wgs84_meters': ('models.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '3', 'blank': 'True'}),\n 'geoidal_seperation_meters': ('models.IntegerField', [], {'null': 'True', 'blank': 'True'}),\n 'hdop': ('models.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),\n 'heading': ('models.IntegerField', [], {'null': 'True', 'blank': 'True'}),\n 'id': ('models.AutoField', [], {'primary_key': 'True'}),\n 'latitude': ('models.DecimalField', [], {'null': 'True', 'max_digits': '11', 'decimal_places': '6', 'blank': 'True'}),\n 'longitude': ('models.DecimalField', [], {'null': 'True', 'max_digits': '11', 'decimal_places': '6', 'blank': 'True'}),\n 'magnetic_variation': ('models.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '3', 'blank': 'True'}),\n 'magnetic_variation_direction': ('models.CharField', [], {'max_length': '1', 'null': 'True', 'blank': 'True'}),\n 'point': ('models.PointField', [], {'srid': '4326', 'null': 'True', 'blank': 'True'}),\n 'public': ('models.BooleanField', [], {'default': 'True'}),\n 'quality': ('models.IntegerField', ['\\'The receiver-generated \"quality\" of this particular report.\\''], {'null': 'True', 'blank': 'True'}),\n 'receiver_warning': ('models.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),\n 'satellites_visible': ('models.IntegerField', [\"'The number of satellites visible to the receiver'\"], {'null': 'True', 'blank': 'True'}),\n 'should_tumble': ('models.BooleanField', [], {'default': 'True'}),\n 'source': ('models.ForeignKey', [\"orm['locationtracking.PositionReportSource']\"], {}),\n 'speed': ('models.IntegerField', [], {'null': 'True', 'blank': 'True'}),\n 'time_since_dgps_update': ('models.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),\n 'timestamp': ('models.DateTimeField', [], {'auto_now_add': 'True'}),\n 'timestamp_received': ('models.DateTimeField', [], {'null': 'True', 'blank': 'True'}),\n 'user': ('models.ForeignKey', [\"orm['auth.User']\"], {})\n }\n }\n\n complete_apps = ['locationtracking']\n","sub_path":"locationtracking/migrations/0004_add_location_model.py","file_name":"0004_add_location_model.py","file_ext":"py","file_size_in_byte":4745,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"479887474","text":"import logging\r\nimport os\r\nfrom pathlib import Path\r\n\r\nfrom atom.api import Event\r\n\r\n\r\n# Set up a verbose debugger level for tracing\r\nTRACE_LEVEL_NUM = 5\r\nlogging.addLevelName(TRACE_LEVEL_NUM, \"TRACE\")\r\ndef trace(self, message, *args, **kws):\r\n # Yes, logger takes its '*args' as 'args'.\r\n if self.isEnabledFor(TRACE_LEVEL_NUM):\r\n self._log(TRACE_LEVEL_NUM, message, args, **kws)\r\nlogging.Logger.trace = trace\r\n\r\nlog = logging.getLogger(__name__)\r\nlog.addHandler(logging.NullHandler())\r\n\r\n\r\n# Flag indicating whether user configuration file was loaded.\r\nCONFIG_LOADED = False\r\n\r\n\r\nexclude = ['_d_storage', '_d_engine', '_flags', '_parent', '_children']\r\n\r\n\r\nclass SimpleState(object):\r\n\r\n def __getstate__(self):\r\n state = super(SimpleState, self).__getstate__()\r\n for k, v in self.members().items():\r\n if isinstance(v, Event):\r\n del state[k]\r\n elif k in exclude:\r\n del state[k]\r\n elif v.metadata and v.metadata.get('transient', False):\r\n del state[k]\r\n return state\r\n\r\n def __setstate__(self, state):\r\n for key, value in state.items():\r\n setattr(self, key, value)\r\n\r\n\r\ndef get_config_folder():\r\n user_path = Path('~') / 'psi'\r\n return user_path.expanduser()\r\n\r\n\r\ndef get_config_file():\r\n default = get_config_folder() / 'config.py'\r\n return Path(os.environ.get('PSI_CONFIG', default))\r\n\r\n\r\ndef create_config(base_directory=None):\r\n config_template = Path(__file__).parent / 'templates' / 'config.txt'\r\n target = get_config_file()\r\n target.parent.mkdir(exist_ok=True, parents=True)\r\n\r\n if base_directory is None:\r\n base_directory = str(target.parent)\r\n\r\n config_text = config_template.read_text()\r\n config_text = config_text.format(base_directory)\r\n target.write_text(config_text)\r\n\r\n\r\ndef create_io_manifest(template):\r\n io_template = Path(__file__).parent / 'templates' / 'io' / template\r\n io_template = io_template.with_suffix('.enaml')\r\n io = Path(get_config('IO_ROOT')) / template\r\n io = io.with_suffix('.enaml')\r\n io.parent.mkdir(exist_ok=True, parents=True)\r\n io_text = io_template.read_text()\r\n io.write_text(io_text)\r\n\r\n\r\ndef create_config_dirs():\r\n config = load_config()\r\n for name, value in vars(config).items():\r\n if name.endswith('_ROOT'):\r\n Path(value).mkdir(exist_ok=True, parents=True)\r\n\r\n\r\ndef load_config():\r\n # Load the default settings\r\n global CONFIG_LOADED\r\n import importlib.util\r\n from os import environ\r\n from . import config\r\n\r\n config_path = get_config_file()\r\n if config_path.exists():\r\n try:\r\n spec = importlib.util.spec_from_file_location('settings', config_path)\r\n module = importlib.util.module_from_spec(spec)\r\n spec.loader.exec_module(module)\r\n for name, value in vars(module).items():\r\n if name == name.upper():\r\n setattr(config, name, value)\r\n CONFIG_LOADED = True\r\n except Exception as e:\r\n log.exception(e)\r\n\r\n for name, value in vars(config).items():\r\n if name == name.upper():\r\n log.debug('CONFIG %s : %r', name, value)\r\n\r\n return config\r\n\r\n_config = load_config()\r\n\r\n\r\ndef set_config(setting, value):\r\n '''\r\n Set value of setting\r\n '''\r\n setattr(_config, setting, value)\r\n\r\n\r\nCFG_ERR_MESG = '''\r\nCould not find setting \"{}\" in configuration. This may be because the\r\nconfiguration file is missing. Please run psi-config to create it.\r\n'''\r\n\r\n\r\ndef get_config(setting=None):\r\n '''\r\n Get value of setting\r\n '''\r\n if setting is not None:\r\n try:\r\n return getattr(_config, setting)\r\n except AttributeError as e:\r\n if CONFIG_LOADED:\r\n raise\r\n mesg = CFG_ERR_MESG.strip().format(setting)\r\n raise SystemError(mesg) from e\r\n else:\r\n setting_names = [s for s in dir(_config) if s.upper() == s]\r\n setting_values = [getattr(_config, s) for s in setting_names]\r\n return dict(zip(setting_names, setting_values))\r\n\r\n\r\n# Monkeypatch built-in JSON library to better handle special types. The\r\n# json-tricks library handles quite a few different types of Python objects\r\n# fairly well. This ensures that third-party libraries (e.g., bcolz) that see\r\n# psiexperiment data structures can properly deal with them.\r\nimport json\r\nimport json_tricks\r\n\r\nfor fn_name in ('dump', 'dumps', 'load', 'loads'):\r\n fn = getattr(json_tricks, fn_name)\r\n setattr(json, fn_name, fn)\r\nlog.debug('Monkeypatched system JSON')\r\n","sub_path":"psi/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":4636,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"413995705","text":"import PIL\nimport PIL.ImageTk\n\n_IMAGE_PATH = \"rong/images/{}.png\"\n_SPRITE_PATH = _IMAGE_PATH.format(\"sprites/{}\")\n\nRIGHT_POINTING_ARROW = PIL.Image.open(_IMAGE_PATH.format(\"arrow\"))\nLEFT_POINTING_ARROW = RIGHT_POINTING_ARROW.rotate(180)\nRIGHT_POINTING_BLACK_ARROW = PIL.Image.open(_IMAGE_PATH.format(\"arrow_black\"))\nLEFT_POINTING_BLACK_ARROW = RIGHT_POINTING_BLACK_ARROW.rotate(180)\nCHECKMARK = PIL.Image.open(_IMAGE_PATH.format(\"checkmark\"))\nBLACK_CHECKMARK = PIL.Image.open(_IMAGE_PATH.format(\"checkmark_black\"))\nPAUSE_GLYPH = PIL.Image.open(_IMAGE_PATH.format(\"pause\"))\nBLACK_PAUSE_GLYPH = PIL.Image.open(_IMAGE_PATH.format(\"pause_black\"))\n\nTKINTER_USABLE_RIGHT_POINTING_ARROW = PIL.ImageTk.PhotoImage(\n RIGHT_POINTING_ARROW\n)\n\nTKINTER_USABLE_LEFT_POINTING_ARROW = PIL.ImageTk.PhotoImage(\n LEFT_POINTING_ARROW\n)\n\nTKINTER_USABLE_RIGHT_POINTING_BLACK_ARROW = PIL.ImageTk.PhotoImage(\n RIGHT_POINTING_BLACK_ARROW\n)\n\nTKINTER_USABLE_LEFT_POINTING_BLACK_ARROW = PIL.ImageTk.PhotoImage(\n LEFT_POINTING_BLACK_ARROW\n)\n\nTKINTER_USABLE_CHECKMARK = PIL.ImageTk.PhotoImage(CHECKMARK)\nTKINTER_USABLE_BLACK_CHECKMARK = PIL.ImageTk.PhotoImage(BLACK_CHECKMARK)\n\nTKINTER_USEABLE_PAUSE_GLYPH = PIL.ImageTk.PhotoImage(PAUSE_GLYPH)\nTKINTER_USEABLE_BLACK_PAUSE_GLYPH = PIL.ImageTk.PhotoImage(BLACK_PAUSE_GLYPH)\n\n_LIST_OF_SPRITES = [\n \"ball_speed_boost\",\n \"ball_speed_reduction\",\n \"engorgement\",\n \"ensmallment\",\n \"lock_opponent_position\",\n \"lock_opponent_rotation\",\n \"multiple_balls\",\n \"other_narrowment\",\n \"other_paddle_speed_reduction\",\n \"own_paddle_speed_boost\",\n \"self_widenment\"\n]\n\nsprites = {}\n\nfor _sprite_name in _LIST_OF_SPRITES:\n _image = PIL.Image.open(_SPRITE_PATH.format(_sprite_name))\n sprites[_sprite_name] = PIL.ImageTk.PhotoImage(_image)\n","sub_path":"rong/images/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1785,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"332015781","text":"from setuptools import setup, find_packages\nimport pip\n\ntry:\n from tracker import __version__\nexcept:\n raise RuntimeError('no version information found in tracker package, cannot build/install')\n\nreqs = pip.req.parse_requirements('requirements.txt', session=pip.download.PipSession())\ninstall_requires = [str(r.req) for r in reqs]\n\nsetup(\n name='tracker',\n version=__version__,\n description='Tracking stat history',\n author='Tom Zagorski',\n author_email='tonmzagorski@gmail.com',\n url='https://github.com/tomzagorski/rocketleague-tracker',\n packages=find_packages(),\n install_requires=install_requires,\n entry_points={\n 'console_scripts': [\n 'rocketleague = tracker.cli:main',\n ],\n },\n license='Proprietary',\n classifiers=['Private :: Do Not Upload'],\n zip_safe=False,\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":841,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"307332046","text":"from bs4 import BeautifulSoup\nimport requests, urllib\n\n# Input\ndomain_name = \"www.youtube.com\"\n\n# Send request and get webpage\nbaseurl = \"https://www.google.com/search?\"\nparams = {\n \"q\": domain_name\n}\nheaders = {\n 'User-Agent': 'Firefox/3.0.15'\n}\nurl = baseurl + urllib.parse.urlencode(params)\nres = requests.get(url, headers=headers, verify=False)\nopen('/tmp/dbg.html', 'wb').write(res.content)\n\n# Parse Beautiful Soup and get keywords\nsoup = BeautifulSoup(res.text)\nstart = soup.get_text().find('ALL')\nend = min(soup.get_text().find(''.join([\n 'In order to show you the most relevant results, we ',\n 'have omitted some entries very similar to the 10 already displayed.']))%10000000,\n soup.get_text().find('Sign inSettingsPrivacyTerms')%10000000)\nkeywords = soup.get_text()[start:end]\n\n# Return True if string is allowed. Basically eliminating all string that are \n# bad\ndef disallowed(string):\n # Do not allow empty strings\n if len(string) == 0:\n return False\n # Do not allow strings that are any combination of these letters ONLY\n if all(x in ['.', '|', '<', '>', '›', '-', '—', '_', '\\n'] for x in list(set(string))):\n return False\n return True\n\n# Process every raw string - like eliminate new lines from them\ndef process(string):\n return string.replace('\\n', '')\nraw_keywords = list(filter(disallowed, keywords.split(' ')[1:]))\nprocessed_keywords = map(process, raw_keywords)\n\n# Result\nprint(raw_keywords)","sub_path":"6th sem/networks/project/old_resources/newer_but_still_old/bs4_get_keywords.py","file_name":"bs4_get_keywords.py","file_ext":"py","file_size_in_byte":1442,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"93019374","text":"import matplotlib.pyplot as plt\nimport numpy as np\nimport tensorflow as tf\n\n\nclass network(object):\n def __init__(self, train_dataset=[], mode='hebbian'):\n self.train_dataset = train_dataset\n self.num_training = len(self.train_dataset)\n self.num_neurons = len(self.train_dataset[0][0])\n\n if mode == 'hebbian':\n self.hebbian()\n else:\n self.storkey()\n\n def hebbian(self):\n # self.W = np.array([[0, -1, -3, 3],\n # [-1, 0, 1, -1],\n # [-3, 1, 0, -3],\n # [3, -1, -3, 0]])\n self.W = np.zeros([self.num_neurons, self.num_neurons])\n for image_vector, _ in self.train_dataset:\n temp = np.matmul(np.array(image_vector).reshape(len(image_vector), 1),\n np.array(image_vector).reshape(1, len(image_vector)))\n self.W = np.add(self.W, temp)\n np.fill_diagonal(self.W, 0)\n\n def storkey(self):\n # https://stats.stackexchange.com/questions/276889/whats-wrong-with-my-algorithm-for-implementing-the-storkey-learning-rule-for-ho\n self.W = np.zeros([self.num_neurons, self.num_neurons])\n\n for image_vector, _ in self.train_dataset:\n hebbian = np.outer(image_vector, image_vector)\n np.fill_diagonal(hebbian, 0)\n\n net = np.dot(self.W, image_vector)\n\n pre = np.outer(image_vector, net)\n post = np.outer(net, image_vector)\n\n self.W = np.add(self.W, np.divide(np.subtract(hebbian, np.add(pre, post)), self.num_neurons))\n\n np.fill_diagonal(self.W, 0)\n\n def stabilize(self, vector):\n changed = True\n\n while changed:\n changed = False\n indices = [i for i in range(0, len(vector))]\n np.random.shuffle(indices)\n\n new_vector = np.copy(vector)\n for i in range(0, len(vector)):\n neuron_index = indices.pop()\n s = np.dot(new_vector, self.W[neuron_index])\n\n if s > 0:\n new_vector[neuron_index] = 1\n elif s < 0:\n new_vector[neuron_index] = -1\n\n changed = (not vector[neuron_index] == new_vector[neuron_index]) or changed\n\n vector = new_vector\n\n return vector\n\n\n(x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data()\n\nx_train = x_train.astype(np.float)\ny_train = y_train.astype(np.float)\n\nx_test = x_test.astype(np.float)\ny_test = y_test.astype(np.float)\n\nones = []\nfives = []\n\nonesTest = []\nfivesTest = []\n\nfor j in range(len(x_train)):\n if y_train[j] == 1:\n ones.append(x_train[j].reshape([1, 784])[0])\n elif y_train[j] == 5:\n fives.append(x_train[j].reshape([1, 784])[0])\n\nones = [[1 if p > 0 else -1 for p in v] for v in ones]\nones = [(x, 1) for x in ones]\nnp.random.shuffle(ones)\n\nfives = [[1 if p > 0 else -1 for p in v] for v in fives]\nfives = [(x, 5) for x in fives]\nnp.random.shuffle(fives)\n\nfor j in range(len(x_test)):\n if y_test[j] == 1:\n onesTest.append(x_test[j].reshape([1, 784])[0])\n elif y_test[j] == 5:\n fivesTest.append(x_test[j].reshape([1, 784])[0])\n\nonesTest = [[1 if p > 0 else -1 for p in v] for v in onesTest]\nonesTest = [(x, 1) for x in onesTest]\nnp.random.shuffle(onesTest)\n\nfivesTest = [[1 if p > 0 else -1 for p in v] for v in fivesTest]\nfivesTest = [(x, 5) for x in fivesTest]\nnp.random.shuffle(fivesTest)\n\ntesting_set = onesTest[0:200] + fivesTest[0:200]\nnp.random.shuffle(testing_set)\n\n\ndef plot(x, y, title):\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.plot(x, y)\n\n plt.xlabel('Training Samples')\n plt.ylabel('Accuracy (%)')\n plt.title(title)\n plt.show()\n\n\ndef subshow(img, title=''):\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.imshow(img)\n ax.set_title(title)\n\n\ndef show(img, title='', suptitle=''):\n plt.imshow(img)\n plt.title(title)\n plt.suptitle(suptitle)\n plt.show()\n\n\ndef test(network, index, item, ones, fives, suptitle, plot=False):\n\n image = np.array(item[0]).reshape(28, 28)\n\n result = np.array(network.stabilize(item[0]))\n\n label = item[1]\n\n # comparing ones\n min_distance = float('inf')\n for j in ones:\n if plot:\n subshow(np.array(j[0]).reshape(28, 28), \"Training ones %s\" % index)\n dist = np.linalg.norm(result - j[0])\n if dist < min_distance:\n min_distance = dist\n winning_label = j[1]\n dist = np.linalg.norm(np.multiply(-1, result) - j[0])\n if dist < min_distance:\n min_distance = dist\n winning_label = j[1]\n\n # comparing fives\n for j in fives:\n if plot:\n subshow(np.array(j[0]).reshape(28, 28), \"Training 5 %s\" % index)\n dist = np.linalg.norm(result - j[0])\n if dist < min_distance:\n min_distance = dist\n winning_label = j[1]\n dist = np.linalg.norm(np.multiply(-1, result) - j[0])\n if dist < min_distance:\n min_distance = dist\n winning_label = j[1]\n\n if plot:\n subshow(image, \"Original %s\" % index, suptitle)\n subshow(result.reshape(28, 28), \"After %s\" % index, suptitle)\n print(\"winning label\", winning_label)\n plt.show()\n\n return winning_label == label\n\n\n# hebbian\nx = [0 for _ in range(1, 30)]\ny = [0 for _ in range(1, 30)]\n\nruns = 5\nfor run in range(runs):\n print(\"run\", run)\n np.random.shuffle(ones)\n np.random.shuffle(fives)\n for i in range(1, 30, 1):\n print(\"Training on hebbian\", i * 2)\n teXY = ones[:i] + fives[:i]\n\n np.random.shuffle(teXY)\n\n hebbian = network(train_dataset=teXY, mode='hebbian')\n\n hebb_acc = 0.\n for index, image in enumerate(testing_set):\n norm = test(hebbian, index, image, ones[:i], fives[:i],\n \"hebbian\", plot=False)\n hebb_acc += norm\n\n x[i - 1] += i * 2\n y[i - 1] += hebb_acc / len(testing_set)\n\n print(\"hebbian accuracy training samples\", i * 2, \"accuracy\", (hebb_acc / len(testing_set)))\n\n\nplot(np.divide(x, runs), np.divide(y, runs), \"Accuracy vs training samples\")\n\n# storkey\n\nx = [0 for _ in range(1, 30)]\ny = [0 for _ in range(1, 30)]\n\nfor run in range(runs):\n np.random.shuffle(ones)\n np.random.shuffle(fives)\n for i in range(1, 30, 1):\n print(\"Training on storkey\", i * 2)\n teXY = ones[:i] + fives[:i]\n np.random.shuffle(teXY)\n storkey = network(train_dataset=teXY, mode='storkey')\n\n ac = 0.\n print(\"starting\")\n for index, image in enumerate(testing_set):\n norm = test(storkey, index, image, ones[:i], fives[:i],\n \"storkey\", plot=False)\n ac += norm\n\n x[i - 1] += i * 2\n y[i - 1] += ac / len(testing_set)\n\n print(\"storkey accuracy training samples\", i * 2, \"accuracy\", (ac / len(testing_set)))\n\nplot(np.divide(x, runs), np.divide(y, runs), \"Storkey Accuracy vs training samples\")\n","sub_path":"Assignment3/q1.py","file_name":"q1.py","file_ext":"py","file_size_in_byte":7048,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"480906964","text":"# list comp\n\na= [[1,2,3,4,42,4,3,2],[4,2,3,9,5,83,8,2,9,0,3], [4,8,15,16,23,42]]\n\nb = []\nfor sub in a:\n for i in sub:\n b.append(str(i+1))\nprint(b)\n#make a list with all the values from these 3 lists using list comprehension\n\nb2 = [str(i+1) for sub in a for i in sub]\nprint(b2)\n\n\nb = [10,67,89,7,4,90,127,1024]\nc = [x*10 for x in b if x>10]\n\nprint(c)\n\nfor x in b:\n if x > 10:\n c.append(x*10)\n","sub_path":"InClass/Class25.py","file_name":"Class25.py","file_ext":"py","file_size_in_byte":411,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"52369150","text":"#-*-coding:UTF-8 -*\nimport os\n\nif __name__ == \"__main__\":\n annee = input(\"Saisi une année : \")\n annee = int(annee)\n\n if annee % 400 == 0 or (annee % 4 == 0 and annee % 100 != 0):\n print(f\"l'année {annee} est bissextile !\")\n else :\n print(f\"l'année {annee} n'est pas bissextile !\") \n\n\n\ndef table_par_mumu(mumu, max=10):\n nb = 0\n while nb < max :\n print(f\"{nb} * {mumu} =\", nb *mumu)\n nb += 1\n\nos.system(\"pause\")\n\ndef afficher_virgule(flottant):\n if type(flottant) != float:\n raise TypeError(\"le paramètre attendu doit être flottant...\")\n flottant = str(flottant)\n premier, deuxieme = flottant.split(\".\")\n return \",\".join([premier,deuxieme[:3]])\n\n\ndef afficher(*parametres, sep=' ', fin='\\n'):\n parametres = list(parametres)\n for i, parametre in enumerate(parametres):\n parametres[i] = str(parametre)\n chaine = sep.join(parametres)\n chaine += fin\n print(chaine)\n\n ","sub_path":"Python/Cours_OpenClassRoom/cours_python/exercice.py","file_name":"exercice.py","file_ext":"py","file_size_in_byte":964,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"565875860","text":"import sqlite3\n\n# Sqlite3 tables dictionary\ntables = {'general': 'generales', 'personal': 'personales', 'tv': 'televisión', 'movies': 'películas',\n 'books': 'libros', \"music\": 'música',\n 'tech': 'tecnología', 'sport': 'deportes', 'food': 'comida_cocina', 'travel': 'viajes', 'fashion': 'ropa',\n 'holi': 'feriados', 'edu': 'educación', 'strange': 'extrañas', 'phil': 'filo', 'lang': 'idiomas',\n 'games': 'juegos', 'open': 'open'}\n\ntables_keys = list(tables.keys())\ntables_first_two_characters = [key[0:2] for key in tables_keys]\ntables_values = list(tables.values())\n\n# SQLITE QUERY\nconnection = sqlite3.connect(\"cogs/utils/preguntas.db\")\n\"\"\"\nSQLITE QUERY TO GET RANDOM QUESTION FROM SPECIFIED TABLE\n{0} - English or Spanish question\n{1} - topic/category\n\"\"\"\n\nSELECT_RANDOM_QUESTION = \"\"\"\nSELECT * FROM {0}\nORDER BY RANDOM()\nLIMIT 1;\n\"\"\"\n\n\ndef random_question(table):\n if table in tables_values:\n with connection:\n cursor = connection.cursor()\n cursor.execute(SELECT_RANDOM_QUESTION.format(table))\n return cursor.fetchone()\n return\n\n# Below is a query and function to insert records into the database\n#\n# INSERT = 'INSERT INTO juegos (questions_spa, questions_eng) VALUES (?, ?);'\n#\n# def insert_into(lin1, lin2):\n# with connection:\n# connection.execute(INSERT, (lin1, lin2))\n#\n# with open(\"es.txt\", \"r\", encoding='utf 8') as archivo1, open(\"en.txt\", \"r\", encoding='utf 8') as archivo2:\n# for line1, line2 in zip(archivo1, archivo2):\n# lone_l = line1.strip()\n# ltwo_l = line2.strip()\n# insert_into(lone_l, ltwo_l)\n","sub_path":"cogs/convo_db.py","file_name":"convo_db.py","file_ext":"py","file_size_in_byte":1637,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"373861219","text":"class Solution:\n def binaryGap(self, N: int) -> int:\n b = str(bin(N))[2:]\n n = 1\n array = []\n length = len(b)\n for i in range(0,length):\n if b[i] == '1':\n array.append(i)\n value = 0\n p = len(array)\n for i in range(1,p):\n value = max(value,array[i] - array[i-1])\n #print(array)\n return value","sub_path":"868.py","file_name":"868.py","file_ext":"py","file_size_in_byte":400,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"424456600","text":"\"\"\"\nfxpmath\n\n---\n\nA python library for fractional fixed-point arithmetic.\n\n---\n\nThis software is provided under MIT License:\n\nMIT License\n\nCopyright (c) 2020 Franco, francof2a\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\"\"\"\n\n#%% \nimport numpy as np\nfrom .objects import Fxp\nfrom . import utils\n\ndef fxp_like(x, val=None):\n '''\n Returns a Fxp object like `x`.\n\n Parameters\n ---\n\n x : Fxp\n Object (Fxp) to copy.\n \n val : None or int or float or list or ndarray or str, optional, default=None\n Input value for the returned Fxp object.\n\n Returns\n ---\n\n y : Fxp\n New Fxp object like `x`.\n\n '''\n y = x.copy()\n return y(val)\n\ndef sum(x, sizes='best_sizes', axis=None, dtype=None, out=None, vdtype=None):\n '''\n Sum of array elements of a Fxp object, over a given axis.\n\n Paramters\n ---\n\n x : Fxp\n Elements to sum in a Fxp object.\n\n sizes : str, optional, default='best_sizes'\n Defines the returned Fxp sizes according input array size (val).\n * 'best_sizes': a extra word bit is added per couple of additions stage (log2(x().size))\n * 'tight_sizes': after calculate sum, the minimum sizes for n_word and n_frac are choosed.\n * 'same_sizes': same sizes than `x` are used to stored the result.\n\n If `dtype` or `out` are not None, `sizes` doesn't apply.\n\n axis : None or int or tuple of ints, optional, default=None\n Axis or axes along which a sum is performed. The default, axis=None, \n will sum all of the elements of the input array. \n If axis is negative it counts from the last to the first axis.\n\n dtype : str (Fxp dtype format), optional, default=None\n fxp-/-{complex}. i.e.: fxp-s16/15, fxp-u8/1, fxp-s32/24-complex\n If None, `sizes` or `out` are used to defined output format.\n\n A `dtype` can be alse extracted from a Fxp, i.e.: dtype=x.dtype\n\n out : Fxp, optional, default=None\n Alternative Fxp object to stored the result.\n If None, `sizes` or `dtype` are used to defined output format\n\n vdtype : dtype, optional, default=None\n The type of the returned array and of the accumulator in which the elements are summed.\n\n Returns\n ---\n sum_along_axis : Fxp\n A Fxp with an array with the same shape as `x` values, with the specified axis removed. \n If `x` val is a 0-d array, or if axis is None, a scalar value is returned inside Fxp. \n If an output array is specified, a reference to `out` is returned.\n\n '''\n if isinstance(x, Fxp):\n x_vals = x.get_val()\n else:\n x_vals = x\n\n x_sum = np.sum(x_vals, axis=axis, dtype=vdtype)\n\n if dtype is not None:\n signed, n_word, n_frac = utils.get_sizes_from_dtype(dtype)\n\n sum_along_axis = Fxp(x_sum, signed=signed, n_word=n_word, n_frac=n_frac)\n elif out is not None:\n if isinstance(out, Fxp):\n sum_along_axis = out(x_sum)\n else:\n raise TypeError('out argument must be a Fxp object!')\n elif sizes == 'best_sizes':\n signed = x.signed\n n_word = np.ceil(np.log2(x().size)).astype(int) + x.n_word\n n_frac = x.n_frac\n \n sum_along_axis = Fxp(x_sum, signed=signed, n_word=n_word, n_frac=n_frac)\n elif sizes == 'tight_sizes':\n sum_along_axis = Fxp(x_sum, signed=x.signed)\n elif sizes == 'same_sizes':\n sum_along_axis = Fxp(x_sum, like=x)\n\n return sum_along_axis\n","sub_path":"fxpmath/functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":4458,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"291744950","text":"# Copyright 2014 Mirantis Inc.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport httplib\nimport mock\n\nfrom oslo_serialization import jsonutils as json\n\nfrom magnetodb.tests.unittests.api.openstack.v1 import test_base_testcase\n\n\nclass BatchWriteItemTestCase(test_base_testcase.APITestCase):\n \"\"\"The test for batch_write_item method for v1 ReST API.\"\"\"\n\n @mock.patch('magnetodb.storage.execute_write_batch', return_value={})\n def test_batch_write_item(self, mock_execute_write_batch):\n headers = {'Content-Type': 'application/json',\n 'Accept': 'application/json'}\n\n conn = httplib.HTTPConnection('localhost:8080')\n url = '/v1/data/default_tenant/batch_write_item'\n body = \"\"\"\n {\n \"request_items\": {\n \"Forum\": [\n {\n \"put_request\": {\n \"item\": {\n \"Name\": {\"S\": \"MagnetoDB\"},\n \"Category\": {\"S\": \"OpenStack KVaaS\"}\n }\n }\n },\n {\n \"put_request\": {\n \"item\": {\n \"Name\": {\"S\": \"Nova\"},\n \"Category\": {\"S\": \"OpenStack Core\"}\n }\n }\n },\n {\n \"put_request\": {\n \"item\": {\n \"Name\": {\"S\": \"KeyStone\"},\n \"Category\": {\"S\": \"OpenStack Core\"}\n }\n }\n },\n {\n \"delete_request\": {\n \"key\": {\n \"Name\": {\"S\": \"Cinder\"},\n \"Category\": {\"S\": \"OpenStack Core\"}\n }\n }\n }\n ]\n }\n }\n \"\"\"\n conn.request(\"POST\", url, headers=headers, body=body)\n\n response = conn.getresponse()\n\n json_response = response.read()\n response_payload = json.loads(json_response)\n\n self.assertTrue(mock_execute_write_batch.called)\n self.assertEqual({'unprocessed_items': {}}, response_payload)\n\n def test_batch_write_item_malformed(self):\n self.maxDiff = None\n headers = {'Content-Type': 'application/json',\n 'Accept': 'application/json'}\n\n conn = httplib.HTTPConnection('localhost:8080')\n url = '/v1/data/default_tenant/batch_write_item'\n body = '{\"foo\": \"bar\"}'\n conn.request(\"POST\", url, headers=headers, body=body)\n\n response = conn.getresponse()\n\n json_response = response.read()\n response_payload = json.loads(json_response)\n\n self.assertEqual(400, response.status)\n\n expected_message = (\n \"Required property 'request_items' wasn't found or \"\n \"it's value is null\"\n )\n expected_type = 'ValidationError'\n\n self.assertEqual(expected_message,\n response_payload['error']['message'])\n self.assertEqual(expected_type,\n response_payload['error']['type'])\n","sub_path":"magnetodb/tests/unittests/api/openstack/v1/test_batch_write_item.py","file_name":"test_batch_write_item.py","file_ext":"py","file_size_in_byte":4062,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"}