diff --git "a/1236.jsonl" "b/1236.jsonl" new file mode 100644--- /dev/null +++ "b/1236.jsonl" @@ -0,0 +1,701 @@ +{"seq_id":"621887670","text":"from django.db import models\n\n# Create your models here.\n\n\nclass Actor(models.Model):\n\n GENDER = (\n ('male', 'Male'),\n ('female', 'Female'),\n ('other', 'Other')\n )\n actor_id = models.CharField(max_length=100, primary_key=True)\n name = models.CharField(max_length=100)\n actor_image = models.ImageField(upload_to='home/media/actors', height_field=None, width_field=None, max_length=None, null=True, blank=True) \n actor_discription = models.TextField(null=True)\n gender = models.CharField(max_length=50, choices=GENDER)\n fb_likes = models.IntegerField(null=True)\n date_of_birth = models.DateField(null=True)\n movies = models.ManyToManyField('Movie', through='Cast', blank=True)\n\n\n def __str__(self):\n return self.name\n\nclass Director(models.Model):\n GENDER = (\n ('male', 'Male'),\n ('female', 'Female'),\n ('other', 'Other')\n )\n name = models.CharField(max_length=100)\n director_image = models.ImageField(upload_to='home/media/directors', height_field=None, width_field=None, max_length=None, null=True, blank=True)\n director_discription = models.TextField(null=True)\n gender = models.CharField(max_length=50, choices=GENDER,null=True)\n no_of_facebook_likes = models.IntegerField(default=0)\n\n def __str__(self):\n return self.name\n\nclass Movie(models.Model):\n\n GENRES = (\n ('Action', 'Action'),\n ('Adventure', 'Adventure'),\n ('Animation', 'Animation'),\n ('Comedy', 'Comedy'),\n ('Documentary', 'Documentary'),\n ('Drama', 'Drama'),\n ('Family', 'Family'),\n ('Fantasy', 'Fantasy'),\n ('Horror', 'Horror'),\n ('Romance', 'Romance'),\n ('Sci-fi', 'Sci-fi'),\n ('Thriller', 'Thriller')\n \n )\n movie_id = models.CharField(max_length=500, primary_key=True)\n name = models.CharField(max_length=200)\n movie_poster = models.ImageField(upload_to='home/media/movies', height_field=None, width_field=None, max_length=None, null=True, blank=True)\n movie_discription = models.TextField(null=True)\n release_date = models.DateField((\"Released Date\"), auto_now=False, auto_now_add=False)\n imdb_link = models.CharField(max_length=1000)\n avg_rating = models.FloatField()\n budget = models.IntegerField(default=0)\n collections = models.FloatField()\n language = models.CharField(max_length=100, default='English')\n country = models.CharField(max_length=100, default='USA')\n likes_on_fb = models.IntegerField(default=0)\n genre = models.CharField(max_length=100,null=True, choices=GENRES)\n director = models.ForeignKey(Director, on_delete=models.CASCADE, null=True)\n\n def __str__(self):\n return self.name\n\n\nclass Cast(models.Model):\n actor = models.ForeignKey(Actor, on_delete=models.CASCADE)\n movie = models.ForeignKey(Movie, verbose_name=(\"Acted Movie\"), on_delete=models.CASCADE)\n role = models.CharField(max_length=50, null=True)\n is_debut_movie = models.BooleanField(default=False)\n\n def __str__(self):\n return self.actor.name + ' ' + self.movie.name + ' ' + self.role\n\n\n\n","sub_path":"imdb_project/imdb/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3124,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"275861065","text":"'''\nFile Name: writeppt.py\nDescription:\n@author:czf\nCreated on 18:08 12-04 2018\n'''\n\nimport win32com\nimport win32com.client\n\n\ndef writeppt(path):\n ppt = win32com.client.Dispatch(\"PowerPoint.Application\")\n ppt.Visible = True\n pptFile = ppt.Presentations.Add()\n page1 = pptFile.Slides.Add(1, 1) #页数 类型\n txt1 = page1.Shapes[0].TextFrame.TextRange\n txt1.Text = \"\"\n pptFile.SaveAs(path)\n pptFile.Close()\n ppt.Quit()","sub_path":"writeppt.py","file_name":"writeppt.py","file_ext":"py","file_size_in_byte":445,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"418913376","text":"import numpy as np\r\nfrom keras.models import Model \r\nfrom keras.layers import Dense, Input\r\nfrom keras.utils import np_utils\r\nfrom keras.datasets import mnist\r\n\r\nx = Input(shape=(784,), dtype=\"float32\")\r\nNN = Dense(512, activation=\"sigmoid\")(x)\r\nNN = Dense(10, activation=\"softmax\")(x)\r\n\r\nmodel = Model(inputs=[x], outputs=[NN])\r\nmodel.compile(loss=\"categorical_crossentropy\", optimizer=\"adam\", loss_weights=[0.5], metrics=[\"accuracy\"])\r\n\r\n(train_x, train_y), (test_x, test_y) = mnist.load_data()\r\ntrain_x = train_x.reshape(60000, 784) / 255\r\ntest_x = test_x.reshape(10000, 784) / 255\r\nprint(len(train_x))\r\ntrain_y = np_utils.to_categorical(train_y)\r\ntest_y = np_utils.to_categorical(test_y)\r\n# from _CreateBMP import CreateBMP\r\n# CreateBMP(train_x[0], train_y[0])\r\n# print(train_x[0], train_y[0])\r\n\r\nmodel.fit(x=[train_x], y=[train_y], epochs=30)\r\nscore = model.evaluate(x=[test_x], y=[test_y])\r\nprint(\"Loss:%f, Accuracy:%f\" % (score[0], score[1]))\r\n\r\nmodel.save_weights(\"mnist.h5\")\r\nwith open(\"mnist.json\", \"w\") as f:\r\n f.write(model.to_json())","sub_path":"ml/MNIST/FNN/krs_mnist.py","file_name":"krs_mnist.py","file_ext":"py","file_size_in_byte":1049,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"67802644","text":"case_number = 1\n\ndef print_case(k, r, c):\n global case_number\n print(\"Case #{}: {} {} {}\".format(case_number, k, r, c))\n case_number += 1\n\ndef calculate_trace(case, size):\n trace = 0\n\n for i in range(size):\n trace += int(case[i][i])\n \n return trace\n\ncases = int(input())\n\ncase_list = []\nfor i in range(cases):\n n = int(input())\n \n rows = []\n for j in range(n):\n rows.append(input().split())\n case_list.append(rows)\n\nfor case in case_list:\n trace = calculate_trace(case, len(case))\n\n repeated_rows = 0\n repeated_cols = 0\n\n for row in case:\n elements = []\n for ele in row:\n if ele in elements:\n repeated_rows += 1\n break\n else:\n elements.append(ele)\n \n for col in range(len(case)):\n elements = []\n\n for row in case:\n if row[col] in elements:\n repeated_cols += 1\n break\n else:\n elements.append(row[col])\n\n print_case(trace, repeated_rows, repeated_cols)","sub_path":"codejam/vestigium.py","file_name":"vestigium.py","file_ext":"py","file_size_in_byte":1083,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"462825746","text":"\"\"\"Hold functions to assist main.py.\"\"\"\r\nimport os\r\nimport json\r\nimport pickle\r\nimport time\r\nimport requests\r\nimport sql_helper\r\nimport pyfy\r\n\r\n\r\ndef add_track_to_user_library(user_id, track_id):\r\n \"\"\"Add track to user's library stored as a pkl.\"\"\"\r\n if not os.path.exists(\"var/libraries\"):\r\n os.mkdir(\"var/libraries\")\r\n if not os.path.exists(\"var/libraries/{0}_library.txt\".format(user_id)):\r\n with open(\"var/libraries/{0}_library.txt\".format(user_id), \"w+\") as f:\r\n f.write(\"{}\\n\".format(track_id))\r\n else:\r\n f_read = open(\"var/libraries/{0}_library.txt\".format(user_id), \"r\")\r\n songs = f_read.readlines()\r\n f_read.close()\r\n with open(\"var/libraries/{0}_library.txt\".format(user_id), \"a+\") as f:\r\n if \"{}\\n\".format(track_id) not in songs:\r\n f.write(\"{}\\n\".format(track_id))\r\n else:\r\n print(\"already in there\")\r\n\r\n\r\ndef get_user_tracks(in_spt, user_id):\r\n \"\"\"Get the tracks saved in a user's library.\"\"\"\r\n lim = 50\r\n tracks = in_spt.user_tracks(limit=lim)\r\n num_tracks = 0 \r\n\r\n to_continue = True\r\n\r\n while to_continue:\r\n if not tracks[\"next\"]:\r\n to_continue = False\r\n for track in tracks['items']:\r\n track_info = track['track']\r\n track_dict = {\r\n \"name\": track_info[\"name\"],\r\n \"popularity\": track_info[\"popularity\"],\r\n \"album_id\": track_info[\"album\"][\"id\"],\r\n \"id\": track_info[\"id\"],\r\n \"artist_names\": [],\r\n \"artist_ids\": []\r\n }\r\n\r\n if sql_helper.track_exists_in_db(track_dict[\"id\"]):\r\n add_track_to_user_library(user_id, track_dict[\"id\"])\r\n else:\r\n\r\n for artist in track_info[\"artists\"]:\r\n track_dict[\"artist_ids\"].append(artist[\"id\"])\r\n track_dict[\"artist_names\"].append(artist[\"name\"])\r\n\r\n # print(track_dict)\r\n\r\n for _ in range(10):\r\n try:\r\n track_features = in_spt.tracks_audio_features(track_dict[\"id\"])\r\n break\r\n except pyfy.excs.ApiError as e:\r\n print(\"SHIT\")\r\n if e.http_response.status_code == 429:\r\n time.sleep(int(e.http_response.headers['Retry-After']) * 2)\r\n else:\r\n print(\"Unkown problem with response.\")\r\n exit(1)\r\n \r\n add_track_to_user_library(user_id, track_dict[\"id\"])\r\n\r\n for _ in range(10):\r\n try:\r\n track_dict.update(\r\n get_desired_album_info(\r\n in_spt.albums(\r\n [track_dict[\"album_id\"]])))\r\n break\r\n except pyfy.excs.ApiError as e:\r\n print(\"FUCK\")\r\n if e.http_response.status_code == 429:\r\n time.sleep(int(e.http_response.headers['Retry-After']) * 2)\r\n else:\r\n print(\"Unkown problem with response.\")\r\n exit(1)\r\n\r\n sql_helper.add_track(track_dict[\"id\"], track_dict[\"name\"],\r\n track_dict[\"popularity\"], track_dict[\"album_id\"],\r\n track_dict[\"album_name\"], track_dict[\"album_popularity\"],\r\n json.dumps(track_dict[\"artist_ids\"]),\r\n json.dumps(track_dict[\"artist_names\"]),\r\n track_dict[\"release_date\"],\r\n track_dict[\"release_date_precision\"],\r\n track_features)\r\n num_tracks += lim\r\n for _ in range(10):\r\n try:\r\n tracks = in_spt.user_tracks(limit=lim, offset=num_tracks)\r\n break\r\n except pyfy.excs.ApiError as e:\r\n print(\"PISS\")\r\n if e.http_response.status_code == 429:\r\n time.sleep(int(e.http_response.headers['Retry-After']) * 2)\r\n else:\r\n print(\"Unkown problem with response.\")\r\n exit(1)\r\n print(\"Looked at roughly {} tracks\".format(num_tracks))\r\n return str(num_tracks)\r\n\r\n\r\ndef get_desired_album_info(album_response_json):\r\n \"\"\"Get only wanted features from an album.\"\"\"\r\n return {\"album_name\": album_response_json[\"name\"],\r\n \"album_popularity\": album_response_json[\"popularity\"],\r\n \"release_date\": album_response_json[\"release_date\"],\r\n \"release_date_precision\": album_response_json[\"release_date_precision\"],\r\n }\r\n","sub_path":"api/helper.py","file_name":"helper.py","file_ext":"py","file_size_in_byte":4928,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"62076088","text":"import numpy\n\ndef RudermanLABFwd(I):\n '''\n Transforms an RGB color image into the LAB space defined by Ruderman in the 1998 paper. \n *Inputs:\n I (rgbimage) - an RGB image of type unsigned char.\n *Outputs:\n LAB (rgbimage) - the LAB representation of the input image 'I'.\n *Related functions:\n RudermanLABInv, ReinhardNorm\n *References:\n \"Statistics of Cone Responses to Natural Images: Implications for Visual Coding\" J. Optical Society of America, vol. 15, no. 8, 1998, pages 2036-45.\n '''\n\n #get input image dimensions\n m = I.shape[0]\n n = I.shape[1]\n\n #define conversion matrices\n RGB2LMS = numpy.array([[0.3811,0.5783,0.0402],[0.1967,0.7244,0.0782],[0.0241,0.1288,0.8444]])\n LMS2LAB = numpy.array([[1/(3**(0.5)),0,0],[0,1/(6**(0.5)),0],[0,0,1/(2**(0.5))]]).dot(numpy.array([[1,1,1],[1,1,-2],[1,-1,0]]))\n\n #calculate LMS values from RGB\n I = numpy.reshape(I, (m*n,3))\n LMS = numpy.dot(RGB2LMS, numpy.transpose(I))\n LMS[LMS == 0] = numpy.spacing(1)\n logLMS = numpy.log(LMS)\n\n #calculate LAB values from LMS\n LAB = LMS2LAB.dot(logLMS)\n\n #reshape to 3-channel image\n LAB = numpy.reshape(LAB.transpose(), (m,n,3))\n\n return(LAB)","sub_path":"histomicstk/RudermanLABFwd.py","file_name":"RudermanLABFwd.py","file_ext":"py","file_size_in_byte":1222,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"475862145","text":"import pandas as pd\nimport math\nimport numpy as np\nclass Neptunee:\n def __init__(self,pandas_data):\n self.data_dict=dict()\n for one_data in pandas_data.index:\n if (len(pandas_data.iloc[one_data])) == 2:\n key = pandas_data.iloc[one_data][1]\n value = []\n value.append(pandas_data.iloc[one_data][0])\n if value[0] != value[0]:\n value = prev\n self.data_dict[key] = value\n if value[0] == value[0]:\n prev = value\n elif (len(pandas_data.iloc[one_data])) == 3:\n key = pandas_data.iloc[one_data][1]\n value = []\n value.append(pandas_data.iloc[one_data][0])\n value.append(pandas_data.iloc[one_data][2])\n if value[0] != value[0]:\n value = prev\n self.data_dict[key] = value\n if value[0] == value[0]:\n prev = value\n elif(len(pandas_data.iloc[one_data]))==4:\n key = pandas_data.iloc[one_data][2]\n value = []\n value.append(pandas_data.iloc[one_data][0])\n value.append(pandas_data.iloc[one_data][3])\n if value[0]!=value[0]:\n value=prev\n self.data_dict[key]=value\n if value[0]==value[0]:\n prev=value\n print(self.data_dict)\n def getData(self,key):\n return self.data_dict[key]\nfilename='Capacity Monitoring Reference v3 1.xlsx'\nFlex=pd.read_excel(filename,sheet_name='Flex')\nNeptune_MME=pd.read_excel(filename,sheet_name='Neptune MME')\nNeptune_SGW=pd.read_excel(filename,sheet_name='Neptune SGW')\nNeptune_CoreCS=pd.read_excel(filename,sheet_name='Neptune CoreCS')\nNeptune_Roaming=pd.read_excel(filename,sheet_name='Neptune Roaming')\nNephub=pd.read_excel(filename,sheet_name='Nephub')\nTDRhub=pd.read_excel(filename,sheet_name='TDRhub')\nMediation_Store=pd.read_excel(filename,sheet_name='Mediation Store')\nFlex=Neptunee(Flex)\nNeptune_MME=Neptunee(Neptune_MME)\nNeptune_SGW=Neptunee(Neptune_SGW)\nNeptune_CoreCS=Neptunee(Neptune_CoreCS)\nNeptune_Roaming=Neptunee(Neptune_Roaming)\nNephub=Neptunee(Nephub)\nTDRhub=Neptunee(TDRhub)\nMediation_Store=Neptunee(Mediation_Store)\n#sgw=Neptune_SGW.getData('UPFRAMEGN_5')\n#print('Example sgw',sgw)\n#mme=neptune_MME.getData('SV_OUT_CDR_5')\n#print('Example mme',mme)\n\n\n","sub_path":"neptuneDictionary.py","file_name":"neptuneDictionary.py","file_ext":"py","file_size_in_byte":2459,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"393224801","text":"from flask import Flask, render_template,request,session,redirect\nfrom flask_sqlalchemy import SQLAlchemy \nfrom flask_mail import Mail \nimport json\nfrom datetime import datetime\n\n\nwith open('config.json','r') as c:\n\tparams=json.load(c)[\"params\"]\n\nlocal_server=True\napp=Flask(__name__)\napp.secret_key=\"super-secret-key\"\napp.config.update(\n\tMAIL_SERVER='smtp.gmail.com',\n\tMAIL_PORT='465',\n\tMAIL_USE_SSL=True,\n\tMAIL_USERNAME=params['gmail-user'],\n\tMAIL_PASSWORD=params['gmail-password']\n\n)\nmail=Mail(app)\n\nif local_server:\n\tapp.config['SQLALCHEMY_DATABASE_URI']=params['local_uri']\nelse:\n\tapp.config['SQLALCHEMY_DATABASE_URI']=params['prod_uri']\ndb=SQLAlchemy(app)\n\nclass Contacts(db.Model):\n\n sno = db.Column(db.Integer, primary_key=True)\n Name = db.Column(db.String(80), nullable=False)\n Phone_no = db.Column(db.String(12), nullable=False)\n messages = db.Column(db.String(120), nullable=False)\n Date = db.Column(db.String(12),nullable=True)\n Email = db.Column(db.String(20), nullable=False)\n\nclass Posts(db.Model):\n\n sno = db.Column(db.Integer, primary_key=True)\n title = db.Column(db.String(80), nullable=False)\n slug = db.Column(db.String(21), nullable=False)\n content = db.Column(db.String(120), nullable=False)\n tagline = db.Column(db.String(120), nullable=False)\n date = db.Column(db.String(12),nullable=True)\n img_file = db.Column(db.String(12),nullable=True)\n \n \n\n@app.route('/')\ndef home():\n\tposts=Posts.query.filter_by().all()[0:params['no_of_posts']]\n\treturn render_template('index.html',params=params,posts=posts)\n\n@app.route('/about')\ndef about():\n\treturn render_template('about.html',params=params)\n\n@app.route('/dashboard',methods=['GET','POST'])\ndef dashboard():\n\tif 'user' in session and session['user']==params['admin_user']:\n\t\tposts=Posts.query.all()\n\t\treturn render_template('dashboard.html',params=params,posts=posts)\n\t\t\n\tif request.method==\"POST\":\n\t\tusername=request.form.get('uname')\n\t\tuserpass=request.form.get('pass')\n\t\tif username==params['admin_user'] and userpass==params['admin_password']:\n\t\t\t#set the session variable\n\t\t\tsession['user']=username\n\t\t\tposts=Posts.query.all()\n\t\t\treturn render_template('dashboard.html',params=params,posts=posts)\n\t\t\t\n\treturn render_template('login.html',params=params)\n\n@app.route('/post/',methods=['GET'])\ndef post_route(post_slug):\n\tpost=Posts.query.filter_by(slug=post_slug).first()\n\treturn render_template('post.html',params=params,post=post)\n\n@app.route(\"/edit/\",methods=['GET','POST'])\ndef edit(sno):\n\tif 'user' in session and session['user']==params['admin_user']:\n\t\tif request.method==\"POST\":\n\t\t\tbox_title=request.form.get('title')\n\t\t\ttline=request.form.get('tline')\n\t\t\tslug=request.form.get('slug')\n\t\t\tcontent=request.form.get('content')\n\t\t\timg_file=request.form.get('img_file')\n\t\t\tdate=datetime.now()\n\n\t\t\tif sno=='0':\n\t\t\t\tpost=Posts(title=box_title,slug=slug,content=content,tagline=tline,img_file=img_file,date=date)\n\t\t\t\tdb.session.add(post)\n\t\t\t\tdb.session.commit()\n\t\t\telse:\n\t\t\t\tpost=Posts.query.filter_by(sno=sno).first()\n\t\t\t\tpost.title=box_title\n\t\t\t\tpost.slug=slug\n\t\t\t\tpost.content=content\n\t\t\t\tpost.tagline=tline\n\t\t\t\tpost.img_file=img_file\n\t\t\t\tpost.date=date\n\t\t\t\tdb.session.commit()\n\t\t\t\treturn redirect('/edit/'+sno)\n\t\tpost=Posts.query.filter_by(sno=sno).first()\n\t\treturn render_template('edit.html', params=params,post=post)\n\n\n\n@app.route('/contact',methods=['GET','POST'])\ndef contact():\n\tif request.method=='POST':\n\n\t\t'''add entry to the db.'''\n\t\tname=request.form.get('name')\n\t\temail=request.form.get('email')\n\t\tphone=request.form.get('phone')\n\t\tmessage=request.form.get('message')\n\t\t\n\t\tentry=Contacts(Name=name,Phone_no=phone,messages=message,Date=datetime.now(),Email=email)\n\t\tdb.session.add(entry)\n\t\tdb.session.commit()\n\t\tmail.send_message('New Message from'+name,\n\t\t\tsender=email,\n\t\t\trecipients=[params['gmail-user']],\n\t\t\tbody=message+'\\n'+phone\n\t\t\t)\n\n\n\treturn render_template('contact.html',params=params)\n\n\n\napp.run(debug=True)","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3990,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"429298202","text":"# -*- coding: utf-8 -*-\n\"\"\"\n\nCreated on 2017/4/14\n\n@author: ybwang\n\"\"\"\nimport numpy as np\nfrom scipy.optimize import minimize\nfrom sklearn.metrics import roc_curve, roc_auc_score\nimport matplotlib.pyplot as plt\n\n\ndef rosen(x):\n return sum(100.0 * (x[1:] - x[:-1] ** 2.0) ** 2.0 + (1 - x[:-1]) ** 2.0)\n\n\ndef loss(w, data):\n x = data[:, :-1]\n y = data[:, -1]\n w = np.array([w])\n p = (1.0 / (1.0 + np.exp(-x.dot(w.T)))).flatten()\n mle = -np.sum(y * np.log(p + 1e-9) + (1 - y) * np.log(1 - p + 1e-9))\n return mle\n\n\ndef readTSV(tsvfile, skipcolumn=True, skiprow=True):\n data = []\n n = 0\n with open(tsvfile, 'r') as f:\n for line in f:\n if n == 0 and skiprow == True:\n n += 1\n continue\n ele = line.rstrip().split('\\t')\n if skipcolumn == True: ele = ele[1::]\n data.append(ele)\n return np.float64(np.array(data))\n\n\ndata = readTSV('E:/Pycharm/code/hp/dataPLS.txt')\n\n# loss(np.array([np.repeat(1, 62)]), np.hstack((np.ones((data.shape[0], 1)), data)))\n\nx0 = np.repeat(0, 62).tolist()\nd = np.hstack((np.ones((data.shape[0], 1)), data))\n# print loss(x0, d)\nres = minimize(loss, x0, args=(d,), method='BFGS')\n\nw = np.array([res.x] ).reshape((-1,1))\np = 1 / (1 + np.exp(-d[:,:-1].dot(w)))\nauc = roc_auc_score(d[:,-1].flatten(), p.flatten())\nfpr, tpr, threshold = roc_curve(d[:,-1].flatten(), p.flatten(),pos_label=1)\nplt.plot(fpr,tpr,'r-')\nplt.legend(['AUC = ' +str(auc)], loc=4)\nplt.show()\n","sub_path":"optim.py","file_name":"optim.py","file_ext":"py","file_size_in_byte":1490,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"382142685","text":"#!/usr/bin/env python\n\"\"\"Cloudflare API code - example\"\"\"\n\nimport os\nimport sys\n\nsys.path.insert(0, os.path.abspath('..'))\nimport CloudFlare\n\ndef main():\n \"\"\"Cloudflare API code - example\"\"\"\n\n cf = CloudFlare.CloudFlare()\n try:\n ips = cf.ips.get()\n except CloudFlare.exceptions.CloudFlareAPIError as e:\n exit('/ips - %d %s' % (e, e))\n except Exception as e:\n exit('/ips - %s - api call connection failed' % (e))\n\n print('ipv4_cidrs count = ', len(ips['ipv4_cidrs']))\n for cidr in sorted(set(ips['ipv4_cidrs'])):\n print('\\t', cidr)\n print('ipv6_cidrs count = ', len(ips['ipv6_cidrs']))\n for cidr in sorted(set(ips['ipv6_cidrs'])):\n print('\\t', cidr)\n exit(0)\n\nif __name__ == '__main__':\n main()\n\n","sub_path":"examples/example_ips.py","file_name":"example_ips.py","file_ext":"py","file_size_in_byte":763,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"494547117","text":"from turtle import *\n\ndef jump (distanz, winkel=0):\n pu ()\n rt (winkel)\n fd (distanz)\n lt (winkel)\n pd ()\n\ndef n_eck (eckenzahl, seitenlaenge):\n drehwinkel = 360.0 / eckenzahl\n for i in range (eckenzahl):\n fd (seitenlaenge)\n lt (drehwinkel)\n\ndef n_eck_demo (seitenlaenge):\n for i in range (a, b):\n n_eck (i, seitenlaenge)\n jump (100)\na = 4\nb = 9\nseitenlaenge = 20\n\njump (-200)\nn_eck_demo (seitenlaenge)\n","sub_path":"n_eck.py","file_name":"n_eck.py","file_ext":"py","file_size_in_byte":455,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"80859923","text":"import tensorflow as tf\nimport numpy as np\nimport tensorflow.contrib.losses as losses\nimport tensorflow.contrib.metrics as metrics\nfrom tensorflow.examples.tutorials.mnist import input_data\n\nmnist = input_data.read_data_sets(\"MNIST_data/\", one_hot=True)\n\nx = tf.placeholder(tf.float32, [None, 784])\nW = tf.Variable(tf.zeros([784, 10]))\nb = tf.Variable(tf.zeros([10]))\ny = tf.nn.softmax(tf.matmul(x, W) + b)\ny_ = tf.placeholder(tf.float32, [None, 10])\n\ncross_entropy = losses.mean_squared_error(y, y_)\ntrain_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)\n\nsess = tf.Session()\n\nsess.run(tf.initialize_all_variables())\nsaver = tf.train.Saver()\nsaver.restore(sess,\n \"/home/hardik/Desktop/MTech_Project/Scripts/Python/Brain_Research_Python/MNIST_data/myModel.ckpt\")\n\ncorrect_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))\naccuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\nprint(sess.run(accuracy, feed_dict={x: mnist.test.images, y_: mnist.test.labels}))\n","sub_path":"Scripts/Python/MTech_Brain_Research_Python/Semester3/Demo.py","file_name":"Demo.py","file_ext":"py","file_size_in_byte":1013,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"364570367","text":"#Autor: Ricardo Cornejo\n#Regresa tipo de triangulo de acuerdo con datos de los lados.\n\ndef lados(a,b, c):\n if a**2+b**2 == c**2 or b**2 == c**2-a**2 or a**2 == c**2-b**2:\n return \"Triangulo Rectangulo\"\n elif a==b and c==a and b==c:\n return \"Triangulo Equilatero\"\n elif a==b and c!=a or b==c and a!=b:\n return \"Triangulo Isoloceles\"\n else:\n return \"Estos lados no corresponden a un trianuglo\"\n\ndef main():\n LadoA=int(input(\"Teclea el valor del lado A: \"))\n LadoB=int(input(\"Teclea el valor del lado B: \"))\n LadoC=int(input(\"Teclea el valor del lado C: \"))\n resultado = lados(LadoA,LadoB,LadoC)\n return resultado\n\nmain()","sub_path":"Trianguloo.py","file_name":"Trianguloo.py","file_ext":"py","file_size_in_byte":673,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"429464282","text":"import discord\nfrom discord.ext import commands\nfrom aiohttp import ClientSession\nimport random\nfrom bot.paginators import EmbedPaginator\nfrom bot.reddit import Reddit \n\nclass Anime(commands.Cog):\n def __init__(self, bot):\n self.bot = bot\n \n async def anilist_request(self, query: str, variables):\n url = 'https://graphql.anilist.co'\n async with ClientSession() as cs:\n async with cs.post(url, json={'query': query, 'variables': variables}) as r:\n data = await r.json()\n\n return r.status, data\n\n def create_anime_embed(self, anime):\n title = anime['title']['romaji']\n score = anime['meanScore']\n description = anime['description']\n genres = \", \".join(anime['genres'])\n image = anime['coverImage']['large']\n url = anime['siteUrl']\n\n embed_title = f'{title} ({score}/100)'\n embed_description = f'**Genres**: {genres}\\n \\n {description}'\n\n anime_embed = discord.Embed(title=embed_title, description=embed_description, url=url)\n anime_embed.set_image(url=image)\n\n return anime_embed\n\n @commands.group()\n async def anime(self, ctx: commands.Context):\n '''The main commmand for anime!'''\n if ctx.invoked_subcommand is None:\n await ctx.send('**Usage:** \\n `,anime recommend/rec/rmd`\\n `,anime search/s `\\n `,anime meme`')\n\n @anime.command(aliases=['rec', 'rmd'])\n async def recommend(self, ctx: commands.Context):\n '''Recommends a random high rated anime.'''\n \n # First Request:\n query = '''\n query ($page: Int, $perPage: Int) {\n Page (page: $page, perPage: $perPage) {\n pageInfo {\n currentPage\n lastPage\n perPage\n }\n media (type: ANIME, averageScore_greater: 75) {\n id\n }\n }\n }\n '''\n\n variables = {\n 'page': 1,\n 'perPage': 5\n } \n\n status_code, data = await self.anilist_request(query, variables)\n\n if status_code != 200:\n await ctx.send(data['errors'][0]['message'])\n \n pages_amount = data['data']['Page']['pageInfo']['lastPage']\n\n # Second Request:\n\n query = '''\n query ($page: Int, $perPage: Int) {\n Page (page: $page, perPage: $perPage) {\n pageInfo {\n total\n currentPage\n lastPage\n perPage\n }\n media (type: ANIME, averageScore_greater: 75) {\n title {\n romaji\n }\n description(asHtml: false)\n meanScore\n genres\n coverImage {\n large\n medium\n }\n siteUrl\n }\n }\n }\n '''\n\n variables = {\n 'page': random.randint(1, pages_amount),\n 'perPage': 5\n }\n\n status_code, data = await self.anilist_request(query, variables)\n\n if status_code != 200:\n await ctx.send(data['errors'][0]['message'])\n \n page = data['data']['Page']['media']\n anime = page[random.randint(0, len(page) - 1)]\n\n anime_embed = self.create_anime_embed(anime)\n\n await ctx.send(embed=anime_embed)\n \n @anime.command(aliases=['s'])\n async def search(self, ctx: commands.Context, name: str = ''):\n '''Searches for an anime by name'''\n query = '''\n query ($page: Int, $perPage: Int, $search: String) {\n Page (page: $page, perPage: $perPage) {\n pageInfo {\n total\n currentPage\n lastPage\n perPage\n }\n media (type: ANIME, search: $search) {\n title {\n romaji\n }\n meanScore\n description(asHtml: false)\n genres\n coverImage {\n large\n medium\n }\n siteUrl\n }\n }\n }\n '''\n\n variables = {\n 'page': 1,\n 'perPage': 5,\n 'search': name\n }\n\n status_code, data = await self.anilist_request(query, variables)\n\n if status_code != 200:\n await ctx.send(data['errors'][0]['message'])\n \n page = data['data']['Page']['media']\n total = data['data']['Page']['pageInfo']['total']\n pages_amount = data['data']['Page']['pageInfo']['lastPage']\n anime_amount = int(total / pages_amount)\n\n\n #loop through media and get the anime list and paginate them\n embed_list = []\n for i in range(anime_amount):\n anime = page[i]\n anime_embed = self.create_anime_embed(anime)\n embed_list.append(anime_embed)\n \n if anime_amount == 0:\n await ctx.send(f'{name} Not Found!')\n elif anime_amount == 1:\n await ctx.send(embed=embed_list[0])\n else:\n paginator = EmbedPaginator(embeds=embed_list)\n await paginator.run(ctx) \n \n @anime.command()\n async def meme(self, ctx: commands.Context):\n '''Get a random anime meme'''\n reddit = Reddit()\n submission = reddit.get_random_submission('Animemes')\n embed = discord.Embed(title=submission.title)\n embed.set_image(url=submission.url)\n await ctx.send(embed=embed)\n\n @commands.command()\n async def jojo(self, ctx: commands.Context):\n '''Get a random jojo post'''\n subs_list = ['ShitPostCrusaders', 'StardustCrusaders', 'wholesomejojo']\n subreddit_name = random.choice(subs_list)\n\n reddit = Reddit()\n submission = reddit.get_random_submission(subreddit_name)\n embed = discord.Embed(title=submission.title)\n embed.set_image(url=submission.url)\n await ctx.send(embed=embed)\n \n\ndef setup(bot):\n bot.add_cog(Anime(bot))\n","sub_path":"cogs/anime.py","file_name":"anime.py","file_ext":"py","file_size_in_byte":6308,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"17163899","text":"import logging\nimport os\n\nfrom antlr4.TokenStreamRewriter import TokenStreamRewriter\n\nfrom gen.javaLabeled.JavaParserLabeled import JavaParserLabeled\nfrom gen.javaLabeled.JavaParserLabeledListener import JavaParserLabeledListener\nfrom refactorings.utils.utils2 import parse_and_walk\n\ntry:\n import understand as und\nexcept ImportError as e:\n print(e)\n\nlogging.basicConfig(level=logging.DEBUG)\nlogger = logging.getLogger(__file__)\n\nROOT_PACKAGE = \"(Unnamed_Package)\"\n\n\nclass DeleteSourceClassListener(JavaParserLabeledListener):\n def __init__(self, rewriter: TokenStreamRewriter, class_name: str):\n self.rewriter = rewriter\n self.class_name = class_name\n\n def enterClassDeclaration(self, ctx: JavaParserLabeled.ClassDeclarationContext):\n if ctx.IDENTIFIER().getText() == self.class_name:\n self.rewriter.delete(\n program_name=self.rewriter.DEFAULT_PROGRAM_NAME,\n from_idx=ctx.parentCtx.start.tokenIndex,\n to_idx=ctx.parentCtx.stop.tokenIndex\n )\n\n\nclass UpdateImportsListener(JavaParserLabeledListener):\n def __init__(self, rewriter: TokenStreamRewriter, source_package: str, target_package: str, class_name: str):\n self.rewriter = rewriter\n self.source_package = source_package\n self.target_package = target_package\n self.class_name = class_name\n self.current_package = None\n\n self.imported = False\n self.import_loc = None\n\n def enterPackageDeclaration(self, ctx: JavaParserLabeled.PackageDeclarationContext):\n self.current_package = ctx.qualifiedName().getText()\n\n def exitPackageDeclaration(self, ctx:JavaParserLabeled.PackageDeclarationContext):\n self.import_loc = ctx.stop\n\n def enterImportDeclaration(self, ctx: JavaParserLabeled.ImportDeclarationContext):\n if self.target_package in ctx.getText():\n self.imported = True\n if self.class_name in ctx.getText():\n if self.target_package == self.current_package:\n replace_text = \"\"\n else:\n replace_text = f\"import {self.target_package}.{self.class_name};\\n\"\n\n self.rewriter.replaceRangeTokens(\n from_token=ctx.start,\n to_token=ctx.stop,\n text=replace_text,\n program_name=self.rewriter.DEFAULT_PROGRAM_NAME\n )\n\n def exitCompilationUnit(self, ctx:JavaParserLabeled.CompilationUnitContext):\n if not self.imported:\n self.rewriter.insertAfterToken(\n token=self.import_loc,\n text=f\"\\nimport {self.target_package}.{self.class_name};\\n\",\n program_name=self.rewriter.DEFAULT_PROGRAM_NAME\n )\n\n\nclass MoveClassAPI:\n def __init__(self, udb_path: str, source_package: str, target_package: str, class_name: str):\n self.udb_path = udb_path\n self.source_package = source_package\n self.target_package = target_package\n self.class_name = class_name\n\n self.source_package_dir = None\n self.target_package_dir = None\n self.class_dir = None\n self.class_content = None\n self.usages = None\n self.new_class_path = None\n\n def check_preconditions(self) -> bool:\n if self.source_package == self.target_package:\n logger.error(\"Source and target packages are same.\")\n return False\n\n if self.source_package == ROOT_PACKAGE or self.target_package == ROOT_PACKAGE:\n logger.error(\"Can not move package to/from root package.\")\n return False\n\n # Get package directories\n source_package_dir, target_package_dir = self.get_package_directories()\n if source_package_dir is None or target_package_dir is None:\n logger.error(\"Package entity does not exists.\")\n return False\n\n if not os.path.exists(os.path.join(source_package_dir, f\"{self.class_name}.java\")):\n logger.error(\"Class does not exists in source package.\")\n return False\n\n # Get class directory\n class_dir, class_content, usages = self.get_class_info()\n if class_dir is None or class_content is None:\n logger.error(\"Class entity does not exists.\")\n return False\n\n new_class_path = os.path.join(target_package_dir, f\"{self.class_name}.java\")\n if os.path.exists(new_class_path):\n logger.error(\"Class already exists in target package.\")\n return False\n\n self.source_package_dir = source_package_dir\n self.target_package_dir = target_package_dir\n self.class_dir = class_dir\n self.class_content = class_content\n self.usages = usages\n self.new_class_path = new_class_path\n\n return True\n\n def get_package_directories(self):\n db = und.open(self.udb_path)\n sp = None\n tp = None\n for ent in db.ents(\"Package\"):\n long_name = ent.longname()\n if long_name == self.source_package and sp is None:\n sp = os.path.dirname(ent.parent().longname())\n if long_name == self.target_package and tp is None:\n tp = os.path.dirname(ent.parent().longname())\n db.close()\n return sp, tp\n\n def get_class_info(self):\n db = und.open(self.udb_path)\n class_path = None\n class_contents = None\n usages = set()\n\n for ent in db.ents(\"Class\"):\n simple_name = ent.simplename()\n if simple_name == self.class_name and class_path is None:\n class_contents = ent.contents()\n class_path = ent.parent().longname()\n\n for ref in ent.refs():\n if ref.file().simplename() != f\"{simple_name}.java\":\n usages.add(ref.file().longname())\n break\n db.close()\n return class_path, class_contents, usages\n\n def do_refactor(self):\n if not self.check_preconditions():\n logger.error(\"Pre conditions failed.\")\n return False\n print(self.usages)\n # Update usages\n for file_path in self.usages:\n parse_and_walk(\n file_path=file_path,\n listener_class=UpdateImportsListener,\n has_write=True,\n source_package=self.source_package,\n target_package=self.target_package,\n class_name=self.class_name\n )\n\n # Delete source class\n os.remove(self.class_dir)\n\n # Write the new class\n with open(self.new_class_path, 'w') as f:\n package = \"\"\n if self.target_package != ROOT_PACKAGE:\n package = f\"package {self.target_package};\\n\\n\"\n imports = \"\"\n if self.source_package != ROOT_PACKAGE:\n imports = f\"import {self.source_package}.*;\\n\\n\"\n\n f.write(package + imports + self.class_content)\n\n return True\n\n\ndef main(udb_path: str, source_package: str, target_package: str, class_name: str):\n return MoveClassAPI(\n udb_path, source_package, target_package, class_name\n ).do_refactor()\n\n\nif __name__ == '__main__':\n main(\n udb_path=\"D:\\Dev\\JavaSample\\JavaSample1.udb\",\n class_name=\"RemoveFlagArg\",\n source_package=\"my_package\",\n target_package=\"your_package\", # \"(Unnamed_Package)\"\n )\n","sub_path":"refactorings/move_class.py","file_name":"move_class.py","file_ext":"py","file_size_in_byte":7464,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"366967641","text":"import torch\nfrom collections import OrderedDict\nfrom torch.optim import Adam, SGD\nfrom model_TCN import Model, StaticModel\nimport torchvision.utils as vutils\nimport torch.nn.functional as F\nimport os\nimport numpy as np\nimport cv2\nimport torch.nn as nn\n\n\nEPSILON = 1e-8\np = OrderedDict()\n\np['lr_bone'] = 5e-5 # Learning rate\np['lr_branch'] = 0.025\np['wd'] = 0.0005 # Weight decay\np['momentum'] = 0.90 # Momentum\nlr_decay_epoch = [9, 20]\nnAveGrad = 10 # Update the weights once in 'nAveGrad' forward passes\nshowEvery = 50\ntmp_path = 'tmp_out'\n\nclass Solver(object):\n def __init__(self, train_loader, test_loader, config, save_fold=None):\n\n self.train_loader = train_loader\n self.test_loader = test_loader\n self.config = config\n self.save_fold = save_fold\n\n self.build_model()\n\n # if config.mode == 'test':\n # self.net_bone.eval()\n\n def print_network(self, model, name):\n num_params = 0\n for p in model.parameters():\n num_params += p.numel() # 返回一个tensor变量内所有元素个数\n print(name)\n print(model)\n print(\"The number of parameters: {}\".format(num_params))\n\n # build the network\n def build_model(self):\n print('mode: {}'.format(self.config.mode))\n print('------------------------------------------')\n if self.config.train_step == 1:\n self.net_bone = StaticModel(3)\n else:\n self.net_bone = Model(3)\n if self.config.cuda:\n self.net_bone = self.net_bone.cuda()\n\n if self.config.mode == 'train':\n if self.config.model_path != '':\n assert (os.path.exists(self.config.model_path)), ('please import correct pretrained model path!')\n self.net_bone.load_pretrain_model(self.config.model_path)\n if self.config.static_path != '':\n assert (os.path.exists(self.config.static_path)), ('please import correct pretrained model path!')\n self.net_bone.features.load_pretrain_model(self.config.static_path)\n else:\n assert (self.config.model_path != ''), ('Test mode, please import pretrained model path!')\n assert (os.path.exists(self.config.model_path)), ('please import correct pretrained model path!')\n self.net_bone.load_pretrain_model(self.config.model_path)\n\n self.lr_bone = p['lr_bone']\n self.lr_branch = p['lr_branch']\n self.optimizer_bone = Adam(filter(lambda p: p.requires_grad, self.net_bone.parameters()), lr=self.lr_bone,\n weight_decay=p['wd'])\n print('------------------------------------------')\n self.print_network(self.net_bone, 'DSNet')\n print('------------------------------------------')\n\n def test(self):\n kk = {}\n rr = {}\n\n if not os.path.exists(self.save_fold):\n os.makedirs(self.save_fold)\n for i, data_batch in enumerate(self.test_loader):\n frame1, frame2, frame3, frame4, frame5, label, split, size, name = data_batch['frame1'], data_batch['frame2'], data_batch['frame3'], data_batch['frame4'], data_batch['frame5'], data_batch['label'], data_batch['split'], data_batch['size'], data_batch['name']\n dataset = data_batch['dataset']\n\n if self.config.cuda:\n frame1, frame2, frame3, frame4, frame5 = frame1.cuda(), frame2.cuda(), frame3.cuda(), frame4.cuda(), frame5.cuda()\n with torch.no_grad():\n\n pre = self.net_bone(frame1, frame2, frame3, frame4, frame5)\n\n for i in range(self.config.test_batch_size):\n\n presavefold = os.path.join(self.save_fold, dataset[i], split[i])\n\n if not os.path.exists(presavefold):\n os.makedirs(presavefold)\n pre1 = torch.nn.Sigmoid()(pre[i])\n pre1 = (pre1 - torch.min(pre1)) / (torch.max(pre1) - torch.min(pre1))\n pre1 = np.squeeze(pre1.cpu().data.numpy()) * 255\n pre1 = cv2.resize(pre1, (size[0][1], size[0][0]))\n cv2.imwrite(presavefold + '/' + name[i], pre1)\n\n\n def train(self):\n\n # 一个epoch中训练iter_num个batch\n iter_num = len(self.train_loader.dataset) // self.config.batch_size\n aveGrad = 0\n if not os.path.exists(tmp_path):\n os.mkdir(tmp_path)\n for epoch in range(self.config.epoch):\n r_sum_loss= 0\n self.net_bone.zero_grad()\n for i, data_batch in enumerate(self.train_loader):\n\n frame1, frame2, frame3, frame4, frame5, label = data_batch['frame1'], data_batch['frame2'], data_batch['frame3'], data_batch['frame4'], data_batch['frame5'],data_batch['label']\n if frame3.size()[2:] != label.size()[2:]:\n print(\"Skip this batch\")\n continue\n if self.config.cuda:\n frame1, frame2, frame3, frame4, frame5, label = frame1.cuda(), frame2.cuda(), frame3.cuda(), frame4.cuda(), frame5.cuda(), label.cuda()\n\n if self.config.train_step == 1:\n pre1 = self.net_bone(frame1)\n else:\n\n pre1 = self.net_bone(frame1, frame2, frame3, frame4, frame5)\n bce = nn.BCEWithLogitsLoss()\n # g = gloss()\n b1 = bce(pre1, label)\n # g1 = g(pre1, label)\n\n loss = b1\n loss.backward()\n aveGrad += 1\n\n if aveGrad % nAveGrad == 0:\n self.optimizer_bone.step()\n self.optimizer_bone.zero_grad()\n aveGrad = 0\n\n if i % showEvery == 0:\n print('epoch: [%2d/%2d], iter: [%5d/%5d] Loss || sal : %10.4f' % (\n epoch, self.config.epoch, i, iter_num,\n loss) )\n print('Learning rate: ' + str(self.lr_bone))\n\n if i % 50 == 0:\n vutils.save_image(torch.sigmoid(pre1.data), tmp_path + '/iter%d-sal-0.jpg' % i,\n normalize=True, padding=0)\n # vutils.save_image(torch.sigmoid(edge_out.data), tmp_path + '/iter%d-edge-0.jpg' % i,\n # normalize=True, padding=0)\n vutils.save_image(frame2.data, tmp_path + '/iter%d-sal-data.jpg' % i, padding=0)\n vutils.save_image(label.data, tmp_path + '/iter%d-sal-target.jpg' % i, padding=0)\n\n if (epoch + 1) % self.config.epoch_save == 0:\n torch.save(self.net_bone.state_dict(),\n '%s/epoch_%d_bone.pth' % (self.config.save_fold, epoch + 1))\n\n if epoch in lr_decay_epoch:\n self.lr_bone = self.lr_bone * 0.2\n self.optimizer_bone = Adam(filter(lambda p: p.requires_grad, self.net_bone.parameters()),\n lr=self.lr_bone, weight_decay=p['wd'])\n\n torch.save(self.net_bone.state_dict(), '%s/models/final_bone.pth' % self.config.save_fold)\n\n\ndef gradient(x):\n # tf.image.image_gradients(image)\n h_x = x.size()[-2]\n w_x = x.size()[-1]\n # gradient step=1\n r = F.pad(x, [0, 1, 0, 0])[:, :, :, 1:]\n l = F.pad(x, [1, 0, 0, 0])[:, :, :, :w_x]\n t = F.pad(x, [0, 0, 1, 0])[:, :, :h_x, :]\n b = F.pad(x, [0, 0, 0, 1])[:, :, 1:, :]\n\n xgrad = torch.pow(torch.pow((r - l) * 0.5, 2) + torch.pow((t - b) * 0.5, 2), 0.5)\n\n return xgrad\n\nclass gloss(nn.Module):\n def __init__(self):\n super(gloss, self).__init__()\n\n def forward(self, x, gt):\n x_grad = gradient(x)\n gt_grad = gradient(gt)\n edge = torch.where(gt_grad>0, torch.ones_like(gt), torch.zeros_like(gt))\n gg = (1 - edge) * gt\n mask = torch.where(gg > 0, x_grad, torch.zeros_like(gt))\n l1 = torch.mean(mask)\n\n maske = torch.where(edge>0, x_grad, torch.zeros_like(gt))\n l2 = torch.exp(-torch.mean(maske))\n loss = l1*l2\n return loss\n\n\n","sub_path":"solver.py","file_name":"solver.py","file_ext":"py","file_size_in_byte":8127,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"337969677","text":"import sys\nfrom socket import *\nimport pickle\nimport hashlib\nimport time\n\n# This is the class that contains data, hashValue of data and sequence number and is transferred through sockets.\nclass DataPacket:\n def __init__(self, byteData, seqNumber, hashValue):\n self.byteData = byteData\n self.seqNumber = seqNumber\n self.hashValue = hashValue\n\n\n# Ip of the server\nserverName = \"10.10.1.2\"\n# serverName = \"127.0.0.1\"\nserverPort = 5002\n\n# File that we transfer got from system arguments\nfileName = sys.argv[1]\nfile = open(fileName, \"rb\")\n\n# Tcp socket which allows us to communicate between broker and source\nclientSocket = socket(AF_INET, SOCK_STREAM)\nclientSocket.connect((serverName, serverPort))\n\n# initial data of file\nreadData = file.read(845)\n\n# incremental sequence number\n# initially 0\nseqNumber = 0\n\n# while file is not empty\nwhile readData:\n\n # Takes the md5sum of file\n hashValue = hashlib.md5(readData).hexdigest()\n\n # We place sequence number as 4 digit and put it into object.\n # 0's is used to make the each packet size stabile\n if seqNumber < 10:\n packet = DataPacket(readData, \"000\" + str(seqNumber), hashValue)\n elif seqNumber < 100:\n packet = DataPacket(readData, \"00\" + str(seqNumber), hashValue)\n elif seqNumber < 1000:\n packet = DataPacket(readData, \"0\" + str(seqNumber), hashValue)\n else:\n packet = DataPacket(readData, str(seqNumber), hashValue)\n\n # Pickling the packet in order to send through socket\n # Pickle makes the object to the byte version in order to send it.\n packetToSend = pickle.dumps(packet)\n print(\"Packet Number : \" + str(seqNumber) + \" Packet Len : \" + str(len(packetToSend)))\n\n # Sending through socket\n clientSocket.send(packetToSend)\n\n # increment the seq number for next packet\n seqNumber += 1\n\n # Sleeping is necessary in order to not to overload the broker's socket.\n time.sleep(0.01)\n\n # Read next packet\n readData = file.read(845)\n\n","sub_path":"CENG_435_DATA_COMMUNICATIONS_AND_NETWORKING/Phase2/source.py","file_name":"source.py","file_ext":"py","file_size_in_byte":1993,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"569034357","text":"import sys\nsys.path.append('../')\n\nfrom wrapper.DataWrapH5py import Daily_DataBase\n\nclass QueryEngine:\n\n def get_data_point(self, loc, date_time, daily_hourly, historical_scraping, parameters):\n if daily_hourly == 'd':\n db = Daily_DataBase()\n return db.extract_data_point(loc, date_time, parameters)","sub_path":"query_engine/query_engine.py","file_name":"query_engine.py","file_ext":"py","file_size_in_byte":331,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"405010029","text":"# Copyright (c) 2014, The MITRE Corporation. All rights reserved.\r\n# See LICENSE.txt for complete terms.\r\n\r\nimport unittest\r\n\r\nfrom stix.campaign import Campaign\r\nfrom cybox.test import EntityTestCase\r\n\r\n\r\nclass CampaignTest(EntityTestCase, unittest.TestCase):\r\n klass = Campaign\r\n _full_dict = {\r\n 'id': \"example:Campaign-341\",\r\n 'timestamp': \"2014-01-31T06:14:46\",\r\n 'version': '1.1.1',\r\n 'title': 'Purple Elephant',\r\n 'description': 'A pretty novice set of actors.',\r\n 'short_description': 'novices',\r\n 'names': {\r\n 'names': [\"Dancing Hippos\", \"Crazy Squirrels\"],\r\n },\r\n 'intended_effects': [\r\n {\r\n 'timestamp': \"2014-03-11T06:24:26\",\r\n 'value': \"Doing bad stuff\",\r\n },\r\n {\r\n 'timestamp': \"2014-03-21T06:24:26\",\r\n 'value': \"Doing really bad stuff\",\r\n }\r\n ],\r\n 'status': \"Ongoing\",\r\n 'related_ttps': {\r\n 'scope': \"exclusive\",\r\n 'ttps': [\r\n {\r\n 'confidence': {'value': {'value': \"Medium\", 'xsi:type':'stixVocabs:HighMediumLowVocab-1.0'}},\r\n 'ttp': {'title': \"Stealth\", 'version': '1.1.1'},\r\n }\r\n ]\r\n },\r\n 'related_incidents': {\r\n 'scope': \"inclusive\",\r\n 'incidents': [\r\n {\r\n 'confidence': {'value': {'value': \"Medium\", 'xsi:type':'stixVocabs:HighMediumLowVocab-1.0'}},\r\n 'incident': {'idref': \"example:Incident-2\",\r\n 'version': '1.1.1'},\r\n }\r\n ]\r\n },\r\n 'related_indicators': {\r\n 'scope': \"inclusive\",\r\n 'indicators': [\r\n {\r\n 'confidence': {'value': {'value': \"Medium\", 'xsi:type':'stixVocabs:HighMediumLowVocab-1.0'}},\r\n 'indicator': {'idref': \"example:Indicator-77\",\r\n 'version': '2.1.1'},\r\n }\r\n ]\r\n },\r\n 'attribution': [{\r\n 'scope': \"inclusive\",\r\n 'threat_actors': [\r\n {\r\n 'confidence': {'value': {'value': \"Medium\", 'xsi:type':'stixVocabs:HighMediumLowVocab-1.0'}},\r\n 'threat_actor': {'title': \"Campaign Actor #1\",\r\n 'version': '1.1.1'},\r\n },\r\n {\r\n 'threat_actor': {'idref': \"example:ThreatActor-111\",\r\n 'version': '1.1.1'},\r\n },\r\n ],\r\n }],\r\n 'associated_campaigns': {\r\n 'scope': \"inclusive\",\r\n 'campaigns': [\r\n {\r\n 'confidence': {'value': {'value': \"Medium\", 'xsi:type':'stixVocabs:HighMediumLowVocab-1.0'}},\r\n 'information_source': {'description': \"Threat Feed\"},\r\n 'campaign': {'title': \"Baby Elephant\", 'version': '1.1.1'},\r\n }\r\n ],\r\n },\r\n 'confidence': {'value': {'value': \"Medium\", 'xsi:type':'stixVocabs:HighMediumLowVocab-1.0'}},\r\n 'activity': [\r\n {\r\n 'date_time': \"2012-01-01T08:45:31\",\r\n 'description': \"The first bad thing\"\r\n },\r\n {\r\n 'date_time': \"2012-01-02T08:45:31\",\r\n 'description': \"Another bad thing\"\r\n },\r\n ],\r\n 'information_source': {\r\n 'description': \"A former member of the campaign.\",\r\n 'identity': {\r\n 'name': \"Mr. D. Fector\",\r\n },\r\n },\r\n 'handling': [\r\n {\r\n 'marking_structures': [{\r\n 'marking_model_name': 'TLP',\r\n 'color': \"RED\",\r\n 'xsi:type': \"tlpMarking:TLPMarkingStructureType\",\r\n }]\r\n }\r\n ],\r\n 'related_packages': {\r\n 'packages': [\r\n {'idref': \"example:Package-AB\", 'relationship': \"Parent\"},\r\n {'idref': \"example:Package-CD\", 'relationship': \"Child\"}\r\n ]\r\n }\r\n }\r\n\r\n\r\nif __name__ == \"__main__\":\r\n unittest.main()\r\n","sub_path":"stix-1.1.1.0/stix/test/campaign_test.py","file_name":"campaign_test.py","file_ext":"py","file_size_in_byte":4342,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"253658980","text":"import argparse\nimport re\nfrom typing import Iterable, Dict\n\nimport requests\nfrom auth0.v3.management import Auth0\nfrom yaml import load\nfrom .utils import (\n ENDC,\n WARNING,\n auth0_token,\n get_users_from_auth0,\n stormpath_connection,\n)\n\n\ndef prompt_user(prompt: str='Please enter [y/n]: ', ttl=3) -> bool:\n \"\"\" \n Prompt the user for some action\n \n :param prompt: Prompt to display to the user\n :param ttl: Tries to live (an exception is thrown)\n \"\"\"\n if type(ttl) == int:\n if ttl <= 0:\n raise Exception('invalid choice entered.')\n ttl -= 1\n response = input(prompt)\n if response.lower() in {'no', 'n'}:\n return False\n elif response.lower() in {'yes', 'y'}:\n return True\n\n return prompt_user('Please enter [y/n]: ', ttl)\n\n\ndef add_to_auth0_group(auth0_users: Iterable[Dict], auth0_authz_jwt: str,\n group: Dict, webtask_url: str, interactive=True,\n dry_run: bool = False):\n \"\"\"\n Issue mass password resets of Auth0 user passwords\n\n :param auth0_users: Iterable of Auth0 user dicts\n :param auth0_authz_jwt: JWT with Auth0 Authorization audience\n :param interactive: Prompt before making any changes \n :param dry_run: Do not actually delete resources\n :param limit: Limit changes to these users\n :param skip: Skip these users\n :return: \n \"\"\"\n group_url_template = f\"{webtask_url}/users/%s/groups\"\n headers = {'Authorization': f'Bearer {auth0_authz_jwt}'}\n template = 'Password email to user \"{}\" {}'\n for user in auth0_users:\n email = user['email']\n user_id = user['user_id']\n\n if interactive:\n if not prompt_user(f'Add {email} to group {group[\"name\"]}? [y/n]: '):\n continue\n if dry_run:\n print(template.format(email,\n WARNING + 'would have been added to {group_name}' + ENDC))\n else:\n r = requests.patch(\n group_url_template % user_id,\n json=[group[\"id\"]],\n headers=headers\n )\n assert r.status_code == 204, 'Bad status'\n print(f'Added {email} to group \"{group[\"name\"]}\"')\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(\n description='Sync accounts from a Stormpath app to an Auth0 domain')\n parser.add_argument('config', help='configuration file')\n parser.add_argument('group', help='group to add users to')\n parser.add_argument('--regex', help='regex to use in filtering accounts by email')\n parser.add_argument('--dry-run', action='store_true',\n help='print results to screen but do not create/change resources')\n\n parser.add_argument('--yes', action='store_true',\n help='do not prompt when sending resets or deleting users')\n parser.add_argument('--no-prompt', action='store_true',\n help='do not prompt for each user when deleting or sending reset emails')\n\n args = parser.parse_args()\n\n email_regex = None\n if args.regex:\n email_regex = re.compile(args.regex)\n print(f'Filtering users by email with regex: {email_regex}')\n\n with open(args.config) as config_fp:\n config = load(config_fp)\n\n a0_cfg = config['auth0']\n auth0 = Auth0(config['auth0']['domain'], auth0_token(config['auth0']))\n authorization_token = auth0_token(config['auth0'], audience='urn:auth0-authz-api')\n\n token = auth0_token(a0_cfg, )\n auth0_users = get_users_from_auth0(auth0, regex=email_regex)\n add_to_auth0_group(auth0_users, auth0, args.group, authorization_token, a0_cfg['extensions']['authorization']['webtaskUrl'],\n dry_run=args.dry_run, interactive=not args.no_prompt)\n","sub_path":"groups.py","file_name":"groups.py","file_ext":"py","file_size_in_byte":3799,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"111619406","text":"import os\nimport json\nimport subprocess\nimport pandas\nfrom datetime import datetime\nimport numpy as np\n\nos.chdir(\"/home/user/Documents/Venitha/COVID_19_Meta/General/COVID-biorxiv\")\n\ncollection={}\n\ndef execute_commandRealtime(cmd):\n \"\"\"Execute shell command and print stdout in realtime.\n Function taken from pyrpipe Singh et.al. 2020\n usage:\n for output in execute_commandRealtime(['curl','-o',outfile,link]):\n print (output)/home/dell/Documents/Venitha/COVID_19_Meta/General/COVID-biorxiv\n \"\"\"\n popen = subprocess.Popen(cmd, stdout=subprocess.PIPE, universal_newlines=True)\n for stdout_line in iter(popen.stdout.readline, \"\"):\n yield stdout_line\n popen.stdout.close()\n return_code = popen.wait()\n if return_code:\n raise subprocess.CalledProcessError(return_code, cmd)\n\ndef read_collection():\n '''\n open file\n '''\n val=0\n filename='collection.json'\n with open(filename, \"r\") as f:\n data = json.load(f)\n #data is a list of dictionaries\n #print(type(data))\n return data\n\ndef get_terms():\n\tprint('Available terms: \\n')\n\tfor number, entry in enumerate(collection):\n\t\tx=[]\n\t\tfor keys, values in entry.items():\n\t\t\tx.append(keys)\n\t\treturn(np.unique(np.array(x)))\n \t\ndef search(term):\n\t#search in collection is a list of dicts\n\tprint('\\nSearching for keyword',term)\n\tresult=[]\n\tfor d in collection:\n\t\t#search in all keys\n\t\tif (term.lower() in d['rel_title'].lower()) or (term.lower() in d['rel_abs'].lower()):\n\t\t\tresult.append(d)\n\t\t\t#return(np.unique(np.array(result)))\n\treturn(result)\t\n\t\ndef searchall(keywords):\n\tresult=[]\n\tfor k in keywords:\n\t\tresult.extend(search(k))\n\treturn result\t\n\t\ndef removedupes(result):\n\tseen=[]\n\tnew_l=[]\n\tfor d in result:\n\t\tt = tuple(d.items())\n\t\tif t not in seen:\n\t\t\tseen.append(t)\n\t\t\tnew_l.append(d)\n\tprint(\"\\nNumber of matches for keywords \",tosearch,\"after removing duplicates is :\",len(new_l))\n\treturn(new_l)\n\t\ndef get_title(res):\n titles=[]\n for d in res:\n if not d['rel_title'] in titles:\n titles.append(d['rel_title'])\n #print(d['rel_title'])\n return titles\n\ndef get_date(res):\n dates=[]\n for d in res:\n if not d['rel_date'] in dates:\n dates.append(d['rel_date'])\n return dates\n\ndef get_doi(res):\n dois=[]\n for d in res:\n if not d['rel_doi'] in dois:\n dois.append(d['rel_doi'])\n return dois\n \ndef get_info(res):\n\ttitles=[]\n\tdates=[]\n\tdois=[]\n\tfor d in res:\n\t\tif not d['rel_title'] in titles:\n\t\t\ttitles.append(d['rel_title'])\n\t\t\tdates.append(d['rel_date'])\n\t\t\tdois.append(d['rel_doi'])\n\tfilename=datetime.today().strftime('%Y-%m-%d')\n\twith open(\"date_\" + filename + \".txt\", 'w') as f:\n\t\tfor item in dates:\n\t\t\tf.write(\"%s\\n\" % item)\n\n\twith open(\"doi_\" + filename + \".txt\", 'w') as f:\n\t\tfor item in dois:\n\t\t\tf.write(\"%s\\n\" % item)\n\treturn titles\n\t\t\n\t\t\n\ndef filter_date(res,startdate):\n '''\n keep results by date\n '''\n print('\\nFiltering results before',startdate)\n filtered=[]\n for d in res:\n if datetime.strptime(d['rel_date'], '%Y-%m-%d')<=startdate:\n filtered.append(d)\n return filtered\n\n\n#read collection in memory\ncollection=read_collection()\n\nprint(\"JSON API Collection is of type : \",type(collection), \"where it is a list of dictionaries \\n\")\n\n#see available terms\nprint(get_terms())\n\n#perform search\n\n#single keyword search\n#res=search('RNA-seq')\n\n#multiple keyword search\n#tosearch=['proteomics','proteome','mass spectrometry']\n#tosearch=['transcriptome','RNA-Seq','nasal','oropharyngeal','swab']\n#res=searchall(tosearch)\n\n\n\n#CRISPR\n#tosearch=['CRISPR','genome-wide screen']\n#res=[]\n#for d in collection:\n#\tif (tosearch[0].lower() in d['rel_abs'].lower() or tosearch[0].lower() in d['rel_title'].lower()) or (tosearch[1].lower() in d['rel_abs'].lower() or tosearch[1].lower() in d['rel_title'].lower()):\n#\t\tres.append(d)\t\n\n#Interactome\ntosearch=['Interactome','Protein-Protein Interaction','Protein-Protein Interactions','global proteome','Multi-omics','Multi-omic']\n#res=searchall(tosearch)\nres=[]\nfor d in collection:\n\tif tosearch[0].lower() in d['rel_abs'].lower() or tosearch[0].lower() in d['rel_title'].lower(): \n\t\tres.append(d)\n\telif (tosearch[1].lower() in d['rel_abs'].lower() or tosearch[1].lower() in d['rel_title'].lower()) or (tosearch[2].lower() in d['rel_abs'].lower() or tosearch[2].lower() in d['rel_title'].lower()):\n\t\tres.append(d)\n\telif tosearch[3].lower() in d['rel_abs'].lower() or tosearch[3].lower() in d['rel_title'].lower():\n\t\tres.append(d)\n\telif (tosearch[4].lower() in d['rel_abs'].lower() or tosearch[4].lower() in d['rel_title'].lower()) or (tosearch[5].lower() in d['rel_abs'].lower() or tosearch[5].lower() in d['rel_title'].lower()):\n\t\tres.append(d)\n\nprint(\"\\nNumber of matches for keywords \",tosearch,\"is :\",len(res))\n\n#Remove duplicate records\nfilt_res=removedupes(res)\n\n#Filtering by date\n#fdate=datetime.strptime('2020-09-15', '%Y-%m-%d')\nfdate=datetime.strptime('2020-10-12', '%Y-%m-%d')\n\nfinal_res=get_info(filter_date(filt_res,fdate))\n\nprint(\"\\nNumber of records matching \",tosearch,\"filtered before \",fdate,\"is \",len(final_res),\"\\n\")\n\nfilename=datetime.today().strftime('%Y-%m-%d')\n\nprint(\"****************************************************************************************************************************\")\nprint(\"\\nWriting results to file \",filename + \".txt\",\"\\n\")\n \nwith open(\"title_\" + filename + \".txt\", 'w') as f:\n for item in final_res:\n f.write(\"%s\\n\" % item)\n\ncommand=['sed','\"s/^/https:\\/\\/doi.org\\//\"',\"doi_\" + filename + \".txt\",\">\",\"doi_\" + filename+ \"_edited\" + \".txt\"]\ncommand= \" \".join(command)\n\nos.system(command)\n\ncommand=['paste',\"title_\" + filename + \".txt\",\"date_\" + filename + \".txt\",\"doi_\" + filename + \"_edited\" + \".txt\",\">\",filename + \".csv\"]\ncommand= \" \".join(command)\n\nos.system(command)\n","sub_path":"query.py","file_name":"query.py","file_ext":"py","file_size_in_byte":5851,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"288315529","text":"import argparse\nimport matplotlib.pyplot as plt\nimport itertools\nfrom wordcloud import WordCloud\nfrom VectReco.R2VModel import R2VModel\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--n\",default=30, type=int)\nparser.add_argument(\"model\", type=str)\nparser.add_argument(\"word\",type=str)\nargs = parser.parse_args()\n\n\nmod = R2VModel.from_w2v_text(args.model,binary=True)\n\nwords = mod.most_similar(vect=mod[args.word],limit=\"words\",topn=args.n)\n\nfreq = [(word,round(((((sim+1)/2)/1)*100))) for word,sim in words]\n\nduped = [[w]*int(f) for w,f in freq]\ntext = \" \".join(itertools.chain.from_iterable(duped))\n\n# text = \" \".join([w for w,s in words])\nwordcloud = WordCloud(width=200, height=200, margin=2, ranks_only=False, prefer_horizontal=0.9, mask=None, scale=1, max_words=200, background_color='white').generate(text)\n# Open a plot of the generated image.\nplt.imshow(wordcloud)\nplt.axis(\"off\")\nplt.show()","sub_path":"generate_wordcloud.py","file_name":"generate_wordcloud.py","file_ext":"py","file_size_in_byte":911,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"141220571","text":"import pygame\nimport random\nimport math\n\npygame.init()\n\nscreen = pygame.display.set_mode((852,480))\npygame.display.set_caption(\"catch eggs\")\nicon = pygame.image.load('basket.png')\npygame.display.set_icon(icon)\n\nbackground = pygame.image.load('backimg.jpg')\n\nhen = pygame.image.load('hen.png')\nhenx = random.randint(0,788)\nheny = 0\nhenx_change = 0.3\nheny_change = 0\n\nbasket = pygame.image.load('bbsket.png')\nbasketx = 20\nbaskety = 400\nbasketx_change = 0\nbaskety_change = 0\n\negg = pygame.image.load('egg.png')\neggx = henx\neggy = heny\neggx_change = 0\neggy_change = 0.3\n\nscore = 0\nmissed = 0\nfont = pygame.font.Font('freesansbold.ttf',32)\n\n\ndef displayScore(show,x,y):\n scrnscore = font.render (\"Egg Score : \"+str(score), show, (255,255,255))\n screen.blit(scrnscore, (x, y))\n\ndef displayMissedScore(show,x,y):\n missedScrnScore = font.render (\"Missed : \"+str(missed), show, (255,255,255))\n screen.blit(missedScrnScore, (x,y))\n\ndef isCollided(ex, ey, bx, by):\n distance = math.sqrt((math.pow(bx - ex, 2)) + (math.pow(by - ey, 2)))\n if distance < 40:\n return True\n else:\n return False\n\ndef putegg(x,y):\n screen.blit(egg, (x, y))\n\n\ngame_over = False\nwhile not game_over:\n screen.blit(background, (0, 0))\n screen.blit(hen, (henx, heny))\n screen.blit(basket, (basketx, baskety))\n putegg(eggx,eggy)\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n game_over = True\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_LEFT:\n basketx_change = -0.6\n if event.key == pygame.K_RIGHT:\n basketx_change = +0.6\n\n\n if event.type == pygame.KEYUP:\n if event.key == pygame.K_LEFT or event.key == pygame.K_RIGHT:\n basketx_change = 0\n\n basketx += basketx_change\n if basketx <= 0:\n basketx = 0\n elif basketx >= 788:\n basketx = 788\n\n henx += henx_change\n if henx <= 0:\n henx_change = 0.3\n heny += heny_change\n if heny >= 416:\n heny=50\n elif henx >= 788:\n henx_change = -0.3\n heny += heny_change\n if heny >= 416 :\n heny = 50\n\n if eggy >= 0:\n eggy += eggy_change\n if eggy >= 450:\n eggy,eggx = 0,henx\n\n eggy += eggy_change\n\n collision = isCollided(eggx, eggy, basketx, baskety)\n if collision:\n eggy , eggx = 0 , henx\n score +=1\n henx = random.randint(0,788)\n heny = 0\n elif collision == False and eggy>=449:\n missed+=0.5\n\n if missed >= 5.1:\n screen.fill((255,240,200))\n msg = pygame.font.SysFont('comicsansms', 60).render(\"GAME OVER\", True, (112, 114, 255))\n screen.blit(msg, [270, 150])\n\n\n displayScore(True,10,10)\n displayMissedScore(True,10,50)\n\n pygame.display.update()\n\npygame.quit()\nquit()","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2848,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"363105806","text":"# This Python file uses the following encoding: utf-8\n# do not remove the above comment as it is actually special!\n\n# built to test the issues in https://code.google.com/p/googleappengine/issues/detail?id=7981\n\nimport webapp2\nfrom google.appengine.ext.webapp.util import run_wsgi_app\n\nfrom google.appengine.api import search\n\nimport json\n\nproblem_strings = [u\"あああ123\", u\"あああ\", u\"(~ ̄▽ ̄)~\", u\" ̄▽ ̄\"]\n\n_INDEX_NAME = 'japaneseSpecialChars'\n\nclass MainPage(webapp2.RequestHandler):\n \n \n def get(self):\n self.response.headers['Content-Type'] = 'text/plain; charset=utf-8'\n # build the index and documents\n for ps in problem_strings:\n problem_doc = search.Document(\n fields=[search.TextField(name='author', value=\"anon\"),\n search.TextField(name='content', value=ps)])\n search.Index(name=_INDEX_NAME).put(problem_doc)\n # query the index and documents\n for ps in problem_strings:\n self.response.out.write (\"searching for \\\"\" + ps + \"\\\":\\n\")\n try:\n query_obj = search.Query(query_string=ps)\n results = search.Index(name=_INDEX_NAME).search(query=query_obj)\n # if we get here, there was no exception\n self.response.out.write (\"query succeeded \\n\")\n except: \n self.response.out.write (\"Failed to parse query \\\"\" + ps + \"\\\"\\n\")\n self.response.out.write (\"\\n\")\n\n\napplication = webapp2.WSGIApplication([('/', MainPage)], debug=True)\n\n\ndef main():\n run_wsgi_app(application)\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"japaneseUnicodeSearch.py","file_name":"japaneseUnicodeSearch.py","file_ext":"py","file_size_in_byte":1653,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"124644850","text":"# -*- coding: utf-8 -*-\n\nimport os\nfrom config.settings import BASE_DIR, PATHS\nimport qlikview.orcamento as orcamento\nimport qlikview.reducao as reducao\nimport qlikview.contratos as contratos\n\n\npath = os.path.join(BASE_DIR,\n PATHS['dir']['SAÍDA'])\n\ndef run():\n df = orcamento.hierarquia.parse_centro_custo()\n filename = os.path.join(path, 'hierarquia centros de custo.xlsx')\n df.to_excel(filename, index=False)\n del df\n\n df_classecusto = orcamento.hierarquia.parse_classe_custo()\n filename = os.path.join(path, 'hierarquia classes de custo.xlsx')\n df_classecusto.to_excel(filename, index=False)\n\n df_novopan = orcamento.novopan.parse()\n filename = os.path.join(path, 'novopan.xlsx')\n print(filename)\n df_novopan.to_excel(filename, index=False)\n\n df_acompanhamento = reducao.realizacao.parse(df_classecusto)\n filename = os.path.join(path, 'acompanhamento redução.xlsx')\n df_acompanhamento.to_excel(filename, index=False)\n\n del df_classecusto\n del df_novopan\n del df_acompanhamento\n\n df = orcamento.compartilhado.parse()\n filename = os.path.join(path, 'compartilhado.xlsx')\n df.to_excel(filename, index=False)\n del df\n\n df = orcamento.justificativas.parse()\n filename = os.path.join(path, 'justificativas.xlsx')\n df.to_excel(filename, index=False)\n del df\n\n df = orcamento.pacotes.parse()\n filename = os.path.join(path, 'pacotes.xlsx')\n df.to_excel(filename, index=False)\n del df\n\n df = reducao.contratos.parse()\n filename = os.path.join(path, 'contratos redução orçamentária.xlsx')\n df.to_excel(filename, index=False)\n del df\n\n df = reducao.comentarios.parse()\n filename = os.path.join(path, 'comentários redução orçamentária.xlsx')\n df.to_excel(filename, index=False)\n del df\n\n df = reducao.frs.parse()\n filename = os.path.join(path, 'frs redução orçamentária.xlsx')\n df.to_excel(filename, index=False)\n del df\n\n # df_contratos = contratos.contratos.parse()\n # df_frs = contratos.frscompartilhado.parse_sede()\n # df_contratos = contratos.process.status_prazo(df_contratos)\n # df_contratos = contratos.process.status_valor(df_contratos, df_frs)\n #\n # filename = os.path.join(path, 'contratos vigentes sede.xlsx')\n # df_contratos.to_excel(filename, index=False)\n # del df_contratos\n #\n # filename = os.path.join(path, 'custos contratos sede.xlsx')\n # df_frs.to_excel(filename, index=False)\n # del df_frs\n\nif __name__ == '__main__':\n run()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2534,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"36509247","text":"import cv2 as cv\nfrom mchqr.image import Image\nfrom mchqr.solution import AlgoPair, Detected, DetectedList\nimport numpy as np\nfrom pyzbar.pyzbar import decode, Decoded, ZBarSymbol\nfrom typing import Callable, List\n\nDecodedList = List[Decoded]\nDetector = Callable[[Image], AlgoPair]\n\ndef algo_pair(image: Image, detected_list: DetectedList):\n\treturn AlgoPair(image.name, detected_list)\n\ndef detector(function: Detector):\n\tdetectors[function.__name__] = function\n\treturn function\n\ndetectors = {}\n\n@detector\ndef cv_detector(image: Image):\n\t_, data_list, bounding_boxes, _ = cv.QRCodeDetector().detectAndDecodeMulti(image.matrix)\n\n\treturn algo_pair(\n\t\timage, [\n\t\t\tDetected(\n\t\t\t\tdata_list[i],\n\t\t\t\tnp.int32(\n\t\t\t\t\tbounding_boxes[i]\n\t\t\t\t)\n\t\t\t)\n\t\t\tfor i in range(\n\t\t\t\tlen(data_list)\n\t\t\t)\n\t\t]\n\t)\n\n@detector\ndef zbar(image: Image):\n\treturn algo_pair(\n\t\timage, [\n\t\t\tDetected(\n\t\t\t\tdecoded.data.decode('utf-8'),\n\t\t\t\tnp.array(decoded.polygon)\n\t\t\t)\n\t\t\tfor decoded in decode(\n\t\t\t\timage.matrix, [ZBarSymbol.QRCODE]\n\t\t\t)\n\t\t]\n\t)\n","sub_path":"source/mchqr/detector.py","file_name":"detector.py","file_ext":"py","file_size_in_byte":1009,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"167134612","text":"## Load Import necessary dependencies\nimport numpy as np # linear algebra\nimport os # accessing directory structure\nimport pandas as pd # data processing, CSV file I/O\nimport matplotlib.pyplot as plt # plotting\nimport seaborn as sns\nfrom sklearn import preprocessing\n\n## Load and Read DataSets\ndf = pd.read_csv('train.csv', sep=',', na_values=['N/A', 'no', '?'])\n## Return the first n rows.\nprint(df.head(10)) # n= 10\n## method for prints information about a DataFrame including the index dtype and columns, non-null values and memory usage\ndf.info() # rows = 318438 , columns = 18\n## To Visualize Data\n### pairplot will plot pairwise relationships across an entire dataframe (for the numerical columns)\n### and supports a color hue argument (for categorical columns)\n# sns.pairplot(df)\n# plt.savefig('visualize.jpg')\n# plt.show()\n# sns.pairplot(df, hue='Stay', height=3, aspect=1.3) #Use hue to show graph based on the hue category values\n# plt.savefig('visualize_hue_stay.jpg')\n# plt.show()\n# sns.jointplot(x='Admission_Deposit', y='Stay', data=df, kind='scatter')\n# plt.savefig('visualize_Admission_deposit_with_stay.jpg')\n# plt.show()\n#### Feature Transformations\n## Check Missing Data\n### number of missing data in City_Code_Patient column = 4532,\n### number of missing data in Bed Grade = 113 , missing data is less, so drop rows which contain missing data\nprint(df.isnull().sum())\nprint(df.describe())\nprint(df['Age'].value_counts())\n## Work with Missing Data\n## Drop Missing Data\ndf = df.dropna(axis=0) # drop rows from a data set containing missing values\ndf.info()\n## Check Missing Data\nprint(df.isnull().sum())\n\ncolumns =['case_id', 'Hospital_type_code', 'Hospital_region_code', 'Ward_Facility_Code', 'patientid', 'City_Code_Patient']\ndf = df.drop(columns, axis=1)\ndf.info()\n## # Converting float64 to int type\ndf['Bed Grade'] = df['Bed Grade'].astype(np.int64)\ndf['Admission_Deposit'] = df['Admission_Deposit'].astype(np.int64)\n\ndf.info()\n## Work with Categorical Data ## columns [Department, Ward_Type, Type of Admission, Severity of Illness, Age, Stay ]\n\ndf = pd.get_dummies(df, columns=['Department', 'Ward_Type', 'Type of Admission', 'Severity of Illness', 'Age'], drop_first=True)\nprint(df.columns)\ndf.info() # (total 29 columns), 313793 entries (rows)\nle = preprocessing.LabelEncoder() # to convert Y from categorical to label encoder\ndf['Stay'] = le.fit_transform(df['Stay'])\ndf['Stay'] = df['Stay'].astype(int)\nprint(list(le.classes_)) #['0-10', '11-20', '21-30', '31-40', '41-50', '51-60', '61-70', '71-80', '81-90', '91-100', 'More than 100 Days']\n#print(list(le.inverse_transform([0, 1, 2,3, 4, 5, 6, 7, 8, 9, 10])))\n\n## Detect and Handle Outliers\ncolumns =['Hospital_code','City_Code_Hospital', 'Available Extra Rooms in Hospital', 'Bed Grade', 'Visitors with Patient', 'Admission_Deposit']\n# for col in columns: # to show outliers\n# sns.boxplot(x=col, data=df)\n# sns.stripplot(x=col, data=df, color=\"#474646\")\n# plt.show()\n\nfrom datasist.structdata import detect_outliers\noutliers_indices = detect_outliers(df, 0, columns)\nprint(len(outliers_indices))\n# handle outliers\ndf.drop(outliers_indices, inplace=True)\ndf.info()\n\n### Deal with Imbalanced classes ## Stay column\nprint(df['Stay'].value_counts())\nfrom sklearn.model_selection import train_test_split\nx = df.drop('Stay', axis=1)\ny = df['Stay']\nx_train, x_test, y_train, y_test = train_test_split(x, y, test_size = 0.2, random_state=22)\nfrom imblearn.over_sampling import SMOTE\nsmote = SMOTE(random_state=22)\n# make smote in only training set\nx_train, y_train = smote.fit_sample(x_train, y_train)\nprint(y_train.value_counts())\nprint(x_train.shape)\n## features scaling\nfrom sklearn.preprocessing import RobustScaler, MinMaxScaler, StandardScaler\nscaling = StandardScaler()\nx_train = scaling.fit_transform(x_train)\nx_test = scaling.transform(x_test)\n#### Train model\nfrom sklearn.ensemble import RandomForestClassifier\nmodel = RandomForestClassifier()\nmodel.fit(x_train, y_train)\n## Evaluate Model\ny_pred = model.predict(x_test)\nfrom sklearn.metrics import classification_report, accuracy_score, confusion_matrix\nprint(accuracy_score(y_test, y_pred))\nprint(confusion_matrix(y_test, y_pred))\nprint(classification_report(y_test, y_pred))\n\n## in case used cross Validation\n# from sklearn.model_selection import cross_validate, cross_val_predict, cross_val_score\n# from sklearn.metrics import classification_report, accuracy_score, confusion_matrix\n#\n# cv_result = cross_validate(model, x_train, y_train, cv=10, return_train_score=True)\n# print(cv_result)\n# print(cv_result['test_score'].mean())\n# Y_predict = cross_val_predict(model, x_test, y_test, cv=10)\n# print(accuracy_score(Y_predict, y_test))\n\n\n","sub_path":"Healthcare Analytics/project/random_forest.py","file_name":"random_forest.py","file_ext":"py","file_size_in_byte":4765,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"167316389","text":"import select\nimport socket\nimport queue\nimport struct\n\ndef main():\n proxy_sock = socket.socket()\n proxy_sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n proxy_sock.setblocking(False)\n proxy_addr = (\"0.0.0.0\", 9999)\n proxy_sock.bind(proxy_addr)\n proxy_sock.listen(5)\n print(\"Prox Server is listening...\")\n\n rli = [proxy_sock]\n wli = []\n sock_to_queue = {}\n\n while True:\n readable, writeable, excepable = select.select(rli, wli, rli)\n for item in readable:\n if item is proxy_sock:\n # 处理新连接\n client_sock, client_addr = item.accept()\n print(\"Welecom %s:%s\" % (str(client_addr[0]), str(client_addr[1])))\n client_sock.setblocking(False)\n rli.append(client_sock)\n sock_to_queue[client_sock] = queue.Queue()\n else:\n # 处理客户端传来的信息\n data = item.recv(1024)\n if not data:\n print(\"Client is missing....\")\n rli.remove(item) # 从可读列表中删除此socket\n del sock_to_queue[item] # 从字典中删除内容\n continue\n sock_to_queue[item].put(data)\n wli.append(item)\n\n for item in writeable:\n data = sock_to_queue[item].get()\n if len(data) == 3 and data.startswith(b\"\\x05\"): # data == b\"\\x05\\x01\\x00\"\n print(\"握手\",flush=True)\n item.send(b\"\\x05\\x00\")\n elif len(data) > 3 and data.startswith(b\"\\x05\"): # data == b\"\\x05\\x01\\x00\\x03\\x0btwitter.com\\x01\\xbb\"\n # print(\"建立连接\", flush=True)\n # item.send(b\"\\x05\\x00\\x00\\x01\\xac\\x1f\\x1c\\x8e\\x048\")\n # data = b\"0x05 0x01 0x00 0x01 0x7f 0x00 0x00 0x01 0x1f 0x40\"\n addr_type = data[3]\n if addr_type == 1:\n # addr_ip = sock.recv(4)\n addr_ip = data[4:8]\n remote_addr = socket.inet_ntoa(addr_ip)\n print(remote_addr)\n elif addr_type == 3:\n # addr_len = int.from_bytes(sock.recv(1), byteorder='big')\n addr_len = int.from_bytes(data[4], byteorder = \"big\")\n # remote_addr = sock.recv(addr_len)\n remote_addr = socket.inet_ntoa(data[5:5 + addr_len])\n elif addr_type == 4:\n # addr_ip = sock.recv(16)\n addr_ip = data[4: 20]\n remote_addr = socket.inet_ntop(socket.AF_INET6, addr_ip)\n else:\n return\n # DST.PORT\n # remote_addr_port = struct.unpack('>H', sock.recv(2))\n remote_addr_port = struct.unpack('>H', data[-2:])\n prnt(\"端口号: \", remote_addr_port)\n # 返回给客户端 success\n reply = b\"\\x05\\x00\\x00\\x01\"\n reply += socket.inet_aton('0.0.0.0') + struct.pack(\">H\", 8888)\n item.send(reply)\n print(\"建立连接\", flush=True)\n # 建立远程连接\n # 拿到 remote address 的信息后,建立连接\n try:\n remote = socket.create_connection((remote_addr, remote_addr_port[0]))\n # remote = socket.socket()\n # remote.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n # remote.setblocking(False)\n # remote.bind((remote_addr, remote_addr_port[0]))\n # remote.listen(5)\n except socket.error as e:\n print(e)\n continue\n remote.setblocking(False)\n rli.append(remote) # 将远程socket放入到读队列中\n else:\n item.send(sock_to_queue[item].get())\n wli.remove(item)\n\n for item in excepable:\n if item in wli:\n wli.remove(item)\n rli.remove(item)\n del sock_to_queue[item]\n\n\nif __name__ == \"__main__\":\n main()","sub_path":"proxy_select3.py","file_name":"proxy_select3.py","file_ext":"py","file_size_in_byte":4206,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"17743316","text":"import numpy as np\r\n\r\na = np.array([[1,2,3],[4,5,6]])\r\nb = np.zeros_like(a) #建立一個跟 a 一樣 但所有數字為 0 的二微陣列 \r\n#print(b)\r\n\r\nc = np.ones_like(a) #建立一個跟 a 一樣 但所有數字為 1 的二微陣列 \r\n#print(c)\r\n\r\nd = np.eye(3) #產生對角線都是 1 的 陣列\r\n#print(d)\r\ne = np.eye(3, k=1) #指定索引 1 並從 1 開始的對角線都為 1 \r\n#print(e)\r\n \r\nf = np.random.rand(3) #隨機產生 3 個亂數為一維陣列\r\n#print(f)\r\ng = np.random.rand(3,3) #隨機產生 3 個 並且陣列為3個的 亂數為二維陣列\r\nprint(g) \r\n\r\n","sub_path":"第八章 向量矩陣運算/Ch8_2_2d.py","file_name":"Ch8_2_2d.py","file_ext":"py","file_size_in_byte":585,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"199036568","text":"import random # to generating random numbers\nimport sys\nfrom typing import Mapping # to exit program\nimport pygame \nfrom pygame.locals import * # basic pygame import\n\n# Globle variables\nFPS = 200\nSCREENWIDTH = 289\nSCREENHEIGHT = 511\nSCREEN = pygame.display.set_mode((SCREENWIDTH,SCREENHEIGHT))\nBACKGROUND = 'background.jpg'\nCOCKPIT = 'cockpit.png'\nWELLCOME = 'WELLCOME.jpg'\nUFO = 'UFO.png'\nASTEROID = 'asteroid.png'\nGAME_SPRITES = {}\nGAME_SOUNDS = {}\ndef MainGame():\n while True:\n ASTOX_CHANGE = 0\n ASTOY_CHANGE = 0\n Asteroid = []\n newAsteroid = randAsteroid()\n Asteroid.append(newAsteroid[0])\n print(Asteroid)\n for event in pygame.event.get():\n # if user presses cross button,close the game\n if event.type == pygame.QUIT or (event.type==KEYDOWN and event.key == K_ESCAPE):\n pygame.quit()\n sys.exit()\n \n SCREEN.blit(GAME_SPRITES['background'],(0,0))\n i = 1\n while i < 600:\n SCREEN.blit(GAME_SPRITES['background2'],(0,0))\n ASTEROID2 = pygame.transform.scale(GAME_SPRITES['asteroid'], (2*i,2*i))\n SCREEN.blit(ASTEROID2,(Asteroid[0]['x']-(i),Asteroid[0]['y']-(i)))\n SCREEN.blit(GAME_SPRITES['COCKPIT'],(0,441))\n pygame.display.update()\n if 0 < Asteroid[0]['x'] < 5 or 284 < Asteroid[0]['x'] < 289:\n if len(Asteroid) < 3:\n Asteroid.append(newAsteroid)\n if 0 < Asteroid[0]['y'] < 5 or 506 < Asteroid[0]['x'] < 511:\n if len(Asteroid) < 3:\n Asteroid.append(newAsteroid)\n if Asteroid[0]['x'] < -GAME_SPRITES['asteroid'].get_width() or Asteroid[0]['x'] > (SCREENWIDTH + GAME_SPRITES['asteroid'].get_width()):\n Asteroid.pop(0)\n i = 100\n break\n if Asteroid[0]['y'] < -GAME_SPRITES['asteroid'].get_height() or Asteroid[0]['y'] > (SCREENHEIGHT + GAME_SPRITES['asteroid'].get_height()):\n Asteroid.pop(0)\n i = 100\n break\n else:\n i += 1\n for event in pygame.event.get():\n # if user presses cross button,close the game\n if event.type == pygame.QUIT or (event.type==KEYDOWN and event.key == K_ESCAPE):\n pygame.quit()\n sys.exit()\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_LEFT:\n ASTOX_CHANGE = 1.5\n if event.key == pygame.K_RIGHT:\n ASTOX_CHANGE = -1.5\n if event.key == pygame.K_UP:\n ASTOY_CHANGE = 1.5\n if event.key == pygame.K_DOWN:\n ASTOY_CHANGE = -1.5\n if event.type == pygame.KEYUP:\n if event.key == pygame.K_LEFT or event.key == pygame.K_RIGHT or event.key == pygame.K_UP or event.key == pygame.K_DOWN:\n ASTOY_CHANGE = 0\n ASTOX_CHANGE = 0\n Asteroid[0]['x'] += ASTOX_CHANGE\n Asteroid[0]['y'] += ASTOY_CHANGE\n FPSCLOCK.tick(FPS)\ndef wellcomescreen():\n while True:\n for event in pygame.event.get():\n # if user presses cross button,close the game\n if event.type == pygame.QUIT or (event.type==KEYDOWN and event.key == K_ESCAPE):\n pygame.quit()\n sys.exit()\n elif event.type==KEYDOWN and (event.key==K_SPACE or event.key == K_UP):\n return\n else:\n SCREEN.blit(GAME_SPRITES['wellcome'],(0,0))\n pygame.display.update()\n FPSCLOCK.tick(FPS)\ndef randAsteroid():\n PLAYERX = random.randint(0,200)\n PLAYERY = random.randint(0,400)\n randAst = [\n {'x' : PLAYERX, 'y' : PLAYERY}\n ]\n return randAst\n\nif __name__ == \"__main__\":\n\n\n pygame.init() #initialize all pygame modules\n FPSCLOCK = pygame.time.Clock()\n pygame.display.set_caption('GAME by Mahendra')\n GAME_SPRITES['wellcome'] = pygame.image.load(WELLCOME).convert()\n GAME_SPRITES['background'] = pygame.image.load(BACKGROUND).convert()\n GAME_SPRITES['UFO'] = pygame.image.load(UFO).convert_alpha()\n GAME_SPRITES['asteroid'] = pygame.image.load(ASTEROID).convert_alpha()\n GAME_SPRITES['COCKPIT'] = pygame.image.load(COCKPIT).convert_alpha()\n GAME_SPRITES['background2'] = pygame.transform.scale(GAME_SPRITES['background'], (289,441))\n while True:\n wellcomescreen()\n MainGame()","sub_path":"space game/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4623,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"124707937","text":"# -*- coding: utf-8 -*-\n#!/usr/bin/env python\n# license removed for brevity\n# //======================================================================//\n# // This software is free: you can redistribute it and/or modify //\n# // it under the terms of the GNU General Public License Version 3, //\n# // as published by the Free Software Foundation. //\n# // This software is distributed in the hope that it will be useful, //\n# // but WITHOUT ANY WARRANTY; without even the implied warranty of //\n# // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.. See the //\n# // GNU General Public License for more details. //\n# // You should have received a copy of the GNU General Public License //\n# // Version 3 in the file COPYING that came with this distribution. //\n# // If not, see //\n# //======================================================================//\n# // //\n# // Copyright (c) 2019 SinfonIA Pepper RoboCup Team //\n# // Sinfonia - Colombia //\n# // https://sinfoniateam.github.io/sinfonia/index.html //\n# // //\n# //======================================================================//\n\nfrom person_cloud import PersonCloud\nfrom person_local import PersonLocal\nfrom person_cloud import Less_Blurred\nfrom edit_files import Group\nimport cv2 as cv2\nimport sys\nimport os\nimport json\n\n\n# import unicodedata\n\nclass Characterization:\n def __init__(self,source):\n self.ROOT_PATH = os.path.dirname(sys.modules['__main__'].__file__)\n print(self.ROOT_PATH)\n n_imas, percent, n_train = self.get_parameters()\n self.n_images_to_take = n_imas\n self.percent_of_face = percent\n self.n_images_to_train = n_train\n self.source = source\n self.persons = self.setPersonSource()\n print(self.n_images_to_take, self.n_images_to_train, self.percent_of_face) \n\n def setPersonSource(self):\n print(\"Use {} enviroment\".format(self.source))\n if (self.source == 'local'):\n return PersonLocal()\n if (self.source == 'cloud' ):\n return PersonCloud()\n\n def get_parameters(self):\n print (self.ROOT_PATH)\n with open(\"Resources/interaction_parameters.json\") as f:\n secretInfo = json.load(f)\n print(\"Interaction parameters: \", secretInfo)\n return secretInfo[\"n_images_to_take\"], secretInfo[\"percent_of_face\"], secretInfo[\"n_images_to_train\"]\n\n def detect_person(self, frame):\n people = self.persons.detectPerson(frame)\n return people\n\n def indentify_person(self, frame):\n people = self.persons.identifyPerson(frame)\n return people\n\n def add_person(self, name, images):\n # blurred = Less_Blurred(len(images))\n # images = blurred.sort_less_blurred(images)\n person = self.persons.enrol(name, images)\n return person\n\n def get_persons(self):\n personsList = self.persons.persons_in_group()\n for p in personsList:\n print(p)\n return personsList\n\n def delete_person(self, name):\n self.persons.delete_person_by_name(name)\n \n def delete_all_person(self):\n pass\n #delete all person group\n\n def get_persons_attributes(self):\n G = Group()\n for p in G.persons:\n print(p)\n return G.persons\n \n\n\n# c = Characterization(\"local\")\n# c.get_persons()\n# c.indentify_person(True)","sub_path":"Class/characterization.py","file_name":"characterization.py","file_ext":"py","file_size_in_byte":3719,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"13108336","text":"import urllib.request as request\nimport json\nimport ssl\nimport pymysql\nssl._create_default_https_context = ssl._create_unverified_context\n\ndb=pymysql.connect(host=\"127.0.0.1\",user=\"root\",password=\"5566\",database=\"TravelWeb\")\n\n# db = mysql.connector.connect(\n# host=\"127.0.0.1\", user=\"root\", password=\"5566\", database=\"TravelWeb\"\n# )\n\ncur = db.cursor()\n\n\nfilename = \"taipei-attractions.json\"\nwith open(filename) as json_file:\n data = json.load(json_file)\n\nlandList = data[\"result\"][\"results\"]\n\n# 景點放入db\nfor n in landList:\n attrId = n[\"_id\"] # 景點編號\n landName = n[\"stitle\"] # 景點名稱\n landMrt = n[\"MRT\"] # 景點捷運\n landType = n[\"CAT2\"] # 景點類別\n\n # 處理全部景點照片網址\n photoUrl = n[\"file\"].split(\"http\") #photoUrl是list\n pic_list=[]\n for i in photoUrl: # 逐一檢查網址是否為圖片檔\n my_suffixes = (\"JPG\", \"PNG\", \"jpg\", \"png\")\n if i.endswith(my_suffixes) != True or i == '' :\n continue\n pic='http'+ i\n pic_list.append(pic) # pic_list為所有可用景點網址的列表\n pic_list = str(pic_list) \n\n landIntro = n[\"xbody\"] # 景點簡介\n landAddr = n[\"address\"] # 景點地址\n landTrans = n[\"info\"] # 景點交通\n landLati = n[\"latitude\"] #景點緯度\n landLongi = n[\"longitude\"] #景點經度\n cur.execute('insert into `attractions`(attrId, title, mrt, type, pic_link, introduction, address, transportation, latitude, longitude) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s)',(attrId, landName, landMrt, landType, pic_list, landIntro, landAddr, landTrans, landLati, landLongi))\n db.commit()\n # print(pic_list)\n\n# 處理景點照片\n# photoUrl = landList[0][\"file\"].split(\"http\") #photoUrl是list\n# pic_list=[]\n# for i in photoUrl: # 逐一檢查網址是否為圖片檔\n# my_suffixes = (\"JPG\", \"PNG\", \"jpg\", \"png\")\n# if i.endswith(my_suffixes) != True or i == '' :\n# continue\n# pic='http'+ i\n# pic_list.append(pic) # pic_list為所有可用景點網址的列表\n# pic_list = str(pic_list) \n","sub_path":"data/set-attr.py","file_name":"set-attr.py","file_ext":"py","file_size_in_byte":2077,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"580750429","text":"# -*- coding: utf-8 -*-\nimport scrapy\n\n\nclass QuoteSpider(scrapy.Spider):\n name = 'quote'\n allowed_domains = ['quotes.toscrape.com']\n start_urls = ['http://quotes.toscrape.com/']\n\n def parse(self, response):\n response_next = response.css('.quote')\n print(response_next)\n for item in response_next:\n text = item.xpath('span[1]/text()').extract()\n author = item.xpath('span[2]/small/text()').extract()\n tags = item.xpath('div/a/text()').extract()\n # tags = item.xpath('div').re('a class=\"tag\" href=\".*?\">(.*?)')\n # tags = item.xpath('div[class=\"tag\"]/a/text').extract()\n print(tags)\n yield {\n 'text': text,\n 'author': author,\n 'tags': tags\n }\n\n next_url = response.xpath('/html/body/div/div[2]/div[1]/nav/ul/li[@class=\"next\"]/a/@href').extract()[0]\n url = 'http://quotes.toscrape.com'+next_url\n yield scrapy.Request(url, callback=self.parse)\n","sub_path":"scrapy/quotes/quotes/scrapy/quote.py","file_name":"quote.py","file_ext":"py","file_size_in_byte":1031,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"429655312","text":"from django.conf.urls.defaults import *\r\nfrom feeds import feed_dict\r\n\r\nurlpatterns = patterns('',\r\n (r'^feeds/(?P.*)/$', 'django.contrib.syndication.views.feed', {'feed_dict': feed_dict}),\r\n)\r\n\r\nfrom sitemaps import sitemaps\r\nurlpatterns += patterns('django.contrib.gis.sitemaps.views',\r\n (r'^sitemap.xml$', 'index', {'sitemaps' : sitemaps}),\r\n (r'^sitemaps/(?P
\\w+)\\.xml$', 'sitemap', {'sitemaps' : sitemaps}),\r\n (r'^sitemaps/kml/(?P