diff --git "a/1379.jsonl" "b/1379.jsonl" new file mode 100644--- /dev/null +++ "b/1379.jsonl" @@ -0,0 +1,1005 @@ +{"seq_id": "21109800457", "text": "import wandb\nfrom seq2seq_attention.train import train_seq2seq_with_attention\n\nif __name__ == \"__main__\":\n\n EXP_NAME = \"Uniform-Attention\"\n\n LR = 1e-4\n BATCH_SIZE = 128\n EPOCHS = 25\n MAX_VOCAB_SIZE = 8000\n MIN_FREQ = 2\n ENC_EMB_DIM = 256\n HIDDEN_DIM_ENC = 512\n HIDDEN_DIM_DEC = 512\n NUM_LAYERS_ENC = 1\n NUM_LAYERS_DEC = 1\n EMB_DIM_TRG = 256\n DEVICE = \"cuda\"\n TEACHER_FORCING = 0.5\n TRAIN_DIR = \"./data/processed/train.csv\"\n VAL_DIR = \"./data/processed/val.csv\"\n TEST_DIR = \"./data/processed/val.csv\"\n PROGRESS_BAR = False\n USE_WANDB = True\n DROPOUT = 0\n TRAIN_ATTENTION = False\n\n # Setup hyperparams for wandb\n hyper_params = {\n \"lr\": LR,\n \"batch_size\": BATCH_SIZE,\n \"epochs\": EPOCHS,\n \"max_vocab_size\": MAX_VOCAB_SIZE,\n \"min_freq\": MIN_FREQ,\n \"enc_hidden\": HIDDEN_DIM_ENC,\n \"dec_hidden\": HIDDEN_DIM_DEC,\n \"embedding_enc\": ENC_EMB_DIM,\n \"embedding_dec\": ENC_EMB_DIM,\n \"dropout\": DROPOUT,\n \"teacher_forcing\": TEACHER_FORCING,\n }\n\n # Init wandb\n if USE_WANDB:\n wandb.init(\n project=\"Seq2Seq-With-Attention\",\n name=EXP_NAME,\n # track hyperparameters and run metadata\n config=hyper_params,\n )\n\n train_seq2seq_with_attention(\n lr=LR,\n batch_size=BATCH_SIZE,\n epochs=EPOCHS,\n enc_emb_dim=ENC_EMB_DIM,\n hidden_dim_enc=HIDDEN_DIM_ENC,\n hidden_dim_dec=HIDDEN_DIM_DEC,\n num_layers_enc=NUM_LAYERS_ENC,\n num_layers_dec=NUM_LAYERS_DEC,\n emb_dim_trg=EMB_DIM_TRG,\n max_vocab_size=MAX_VOCAB_SIZE,\n min_freq=MIN_FREQ,\n device=DEVICE,\n teacher_forcing=TEACHER_FORCING,\n train_dir=TRAIN_DIR,\n val_dir=VAL_DIR,\n test_dir=TEST_DIR,\n progress_bar=PROGRESS_BAR,\n use_wandb=USE_WANDB,\n exp_name=EXP_NAME,\n dropout=DROPOUT,\n train_attention=TRAIN_ATTENTION,\n )\n", "repo_name": "PatrickSVM/Seq2Seq-with-Attention", "sub_path": "train_seq2seq.py", "file_name": "train_seq2seq.py", "file_ext": "py", "file_size_in_byte": 2002, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "48", "api": [{"api_name": "wandb.init", "line_number": 46, "usage_type": "call"}, {"api_name": "seq2seq_attention.train.train_seq2seq_with_attention", "line_number": 53, "usage_type": "call"}]} +{"seq_id": "71183045905", "text": "from django.conf.urls import include,url\n\nfrom . import views\n\nurlpatterns=[\n\turl(r'^$',views.post_list,name='post_list'),\n\turl(r'^post/new/$', views.post_new, name='post_new'),\n\turl(r'^post/(?P[0-9]+)/$',views.post_detail,name='post_detail'),\n\turl(r'^post/edit/(?P[0-9]+)/$', views.post_edit, name='post_edit'),\n\turl(r'^post/drafts/$',views.post_draft_list,name='post_draft_list'),\n\turl(r'^post/(?P[0-9]+)/publish/$', views.post_publish, name='post_publish'),\n\turl(r'^post/(?P[0-9]+)/remove/$', views.post_remove, name='post_remove'),\n\turl(r'^post/full_search/$', views.full_search, name='full_search'),\n]", "repo_name": "kleinzh/PythonBlog", "sub_path": "mysite/blog/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 622, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "48", "api": [{"api_name": "django.conf.urls.url", "line_number": 6, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 7, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 8, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 9, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 10, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 11, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 12, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 13, "usage_type": "call"}]} +{"seq_id": "74037077265", "text": "import smtplib;\n\nfrom email.mime.multipart import MIMEMultipart;\nfrom email.mime.text import MIMEText;\nfrom email.mime.base import MIMEBase;\nfrom email import encoders;\n\n#Instanciando a classe MIME que é responsável por elaborar a requisição do disparo do email\nOutlookApp = MIMEMultipart()\n\n#Informando quem é a origem do e-mail\nOutlookApp['From'] = \"contato_joaogabriel@outlook.com\"\n\n#Informando quem é o destinatário do e-mail\nOutlookApp['To'] = \"contato_joaogabriel@outlook.com\"\n\n#Informando qual é o assunto do e-mail\nOutlookApp['Subject'] = \"Titulo do e-mail\"\n\n#Construindo o corpo do e-mail\nCorpoEmail = \"\"\"\n\n\n \n

\n Fernanda cabeçuda\n

\n \n \n \n \n texto vermelho\n \n \n\n\"\"\"\n\n#Instanciando o corpo do e-mail em formato HTML\nOutlookApp.attach(MIMEText(CorpoEmail, 'html'));\n\n#Incluindo anexo\n#NomeArquivo = 'teste.pdf' #Nome no arquivo a adicionar\n#Anexo = open(NomeArquivo,'rb') #Abrindo arquivo\n\n#Realizando conversão para o tipo 64\n#part = MIMEBase('application', 'octet-stream')\n#part.set_payload((Anexo).read())\n#encoders.encode_base64(part)\n#part.add_header(\"Content-Disposition\", F\"attachment; filename= {NomeArquivo}\")\n#OutlookApp.attach(part)\n\n#Fechando o arquivo\n#Anexo.close()\n\n#Declarando variáveis da biblioteca SMTP\nserver = smtplib.SMTP('smtp.outlook.com', 587) #informando servidor e porta SMTP\nserver.starttls() #Informando tipo de validação\nserver.login(\"contato_joaogabriel@outlook.com\",\n \"123qwe321ewq!@#QWE#@!EWQ\") #Passando Login e Senha\nTexto = OutlookApp.as_string() #Informando mensagem do e-mail\nserver.sendmail(\"contato_joaogabriel@outlook.com\",\n \"contato_joaogabriel@outlook.com\",\n Texto) #Enviando o email\nserver.quit() #Fechando conexão com o servidor", "repo_name": "brjoaogabriel/disparador_email", "sub_path": "exemplo_disparo_email.py", "file_name": "exemplo_disparo_email.py", "file_ext": "py", "file_size_in_byte": 2169, "program_lang": "python", "lang": "pt", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "48", "api": [{"api_name": "email.mime.multipart.MIMEMultipart", "line_number": 9, "usage_type": "call"}, {"api_name": "email.mime.text.MIMEText", "line_number": 39, "usage_type": "call"}, {"api_name": "smtplib.SMTP", "line_number": 56, "usage_type": "call"}]} +{"seq_id": "6592981340", "text": "from re import M\r\nfrom flask import Flask,jsonify,request\r\nfrom storage import all_articles, liked_articles, notliked_articles\r\n\r\n\r\napp = Flask(__name__)\r\n\r\n@app.route(\"/get-article\")\r\ndef get_article():\r\n\r\n article_data = {\r\n \"title\": all_articles[0][12],\r\n \"url\": all_articles[0][11],\r\n \"text\": all_articles[0][13]\r\n }\r\n\r\n return jsonify({\r\n \"data\":article_data,\r\n \"status\":\"Success\"\r\n })\r\n\r\n\r\n@app.route(\"/liked-article\",methods = [\"POST\"])\r\ndef liked_article():\r\n articles = all_articles[0]\r\n liked_articles.append(articles)\r\n all_articles.pop(0)\r\n return jsonify({\r\n \"status\":\"Success\"\r\n }),201\r\n\r\n@app.route(\"/unliked-article\",methods = [\"POST\"])\r\ndef unliked_article():\r\n articles = all_articles[0]\r\n notliked_articles.append(articles)\r\n all_articles.pop(0)\r\n return jsonify({\r\n \"status\":\"Success\"\r\n }),201\r\n\r\nif __name__ == \"__main__\":\r\n app.run()", "repo_name": "Skymaster640/Project141", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 949, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "48", "api": [{"api_name": "flask.Flask", "line_number": 6, "usage_type": "call"}, {"api_name": "storage.all_articles", "line_number": 12, "usage_type": "name"}, {"api_name": "storage.all_articles", "line_number": 13, "usage_type": "name"}, {"api_name": "storage.all_articles", "line_number": 14, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 17, "usage_type": "call"}, {"api_name": "storage.all_articles", "line_number": 25, "usage_type": "name"}, {"api_name": "storage.liked_articles.append", "line_number": 26, "usage_type": "call"}, {"api_name": "storage.liked_articles", "line_number": 26, "usage_type": "name"}, {"api_name": "storage.all_articles.pop", "line_number": 27, "usage_type": "call"}, {"api_name": "storage.all_articles", "line_number": 27, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 28, "usage_type": "call"}, {"api_name": "storage.all_articles", "line_number": 34, "usage_type": "name"}, {"api_name": "storage.notliked_articles.append", "line_number": 35, "usage_type": "call"}, {"api_name": "storage.notliked_articles", "line_number": 35, "usage_type": "name"}, {"api_name": "storage.all_articles.pop", "line_number": 36, "usage_type": "call"}, {"api_name": "storage.all_articles", "line_number": 36, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 37, "usage_type": "call"}]} +{"seq_id": "11290253896", "text": "import cv2\nimport urllib.request\n\n# Set the path of the video file\nvideo_path = '/home/andrew/open-inference/data/cropped_mini.mp4'\n\n# Open the video file and read the first frame\nvideo_capture = cv2.VideoCapture(video_path)\nsuccess, first_frame = video_capture.read()\n\n# Check if the frame was successfully read\nif not success:\n print('Error: Could not read first frame of video')\n exit()\n\n# Save the first frame as an image file\ncv2.imwrite('first_frame.jpg', first_frame)\n\n# Display the first frame\ncv2.imshow('First Frame', first_frame)\n\n# Wait for a key press and then close the window\ncv2.waitKey(0)\ncv2.destroyAllWindows()", "repo_name": "HenryZheng1/Openpilot-Inference", "sub_path": "getfirst.py", "file_name": "getfirst.py", "file_ext": "py", "file_size_in_byte": 635, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "48", "api": [{"api_name": "cv2.VideoCapture", "line_number": 8, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 17, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 20, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 23, "usage_type": "call"}, {"api_name": "cv2.destroyAllWindows", "line_number": 24, "usage_type": "call"}]} +{"seq_id": "7644287143", "text": "import sqlite3\n\nclass ShowInfo:\n def __init__(self, ui):\n self.connect=sqlite3.connect(\"database.db\")\n self.cursor=self.connect.cursor()\n self.ui=ui\n self.loggedId=None\n self.ui.myInfo.clicked.connect(self.myInfoEvent)\n self.ui.ranking.clicked.connect(self.rankingEvent)\n\n def loadId(self, id):\n self.loggedId=id\n\n def myInfoEvent(self): \n self.ui.myInfoLabel.setText(self.loggedId+\"'S INFO\")\n self.cursor.execute(\"SELECT name, age, email FROM userInfo WHERE id=?\", [self.loggedId])\n result=self.cursor.fetchall() \n self.ui.idForMyInfoInput.setText(self.loggedId)\n self.ui.nameForMyInfoInput.setText(result[0][0])\n self.ui.ageForMyInfoInput.setText(str(result[0][1]))\n self.ui.mailForMyInfoInput.setText(result[0][2])\n self.cursor.execute(\"SELECT count FROM recordInfo WHERE id=? ORDER BY count, dateTime\", [self.loggedId])\n result=self.cursor.fetchall()\n if len(result)<1:\n self.ui.bestScoreForMyInfoInput.setText(\"NO SCORE\")\n self.ui.playGamesForMyInfoInput.setText(\"NO PLAY\")\n else:\n self.ui.bestScoreForMyInfoInput.setText(str(result[0][0]))\n self.ui.playGamesForMyInfoInput.setText(str(len(result)))\n\n def rankingEvent(self):\n self.cursor.execute(\"SELECT id, count, dateTime FROM recordInfo ORDER BY count, dateTime LIMIT 10\")\n result=self.cursor.fetchall()\n if len(result)<10:\n for i in range (10-len(result)):\n result.append([\"-\",\"-\",\"-\"])\n self.ui.rank1Input.setText(result[0][0]+\" / \"+str(result[0][1])+\" / \"+result[0][2])\n self.ui.rank2Input.setText(result[1][0]+\" / \"+str(result[1][1])+\" / \"+result[1][2])\n self.ui.rank3Input.setText(result[2][0]+\" / \"+str(result[2][1])+\" / \"+result[2][2])\n self.ui.rank4Input.setText(result[3][0]+\" / \"+str(result[3][1])+\" / \"+result[3][2])\n self.ui.rank5Input.setText(result[4][0]+\" / \"+str(result[4][1])+\" / \"+result[4][2])\n self.ui.rank6Input.setText(result[5][0]+\" / \"+str(result[5][1])+\" / \"+result[5][2])\n self.ui.rank7Input.setText(result[6][0]+\" / \"+str(result[6][1])+\" / \"+result[6][2])\n self.ui.rank8Input.setText(result[7][0]+\" / \"+str(result[7][1])+\" / \"+result[7][2])\n self.ui.rank9Input.setText(result[8][0]+\" / \"+str(result[8][1])+\" / \"+result[8][2])\n self.ui.rank10Input.setText(result[9][0]+\" / \"+str(result[9][1])+\" / \"+result[9][2])", "repo_name": "ehakyung/Python", "sub_path": "ShowInfo.py", "file_name": "ShowInfo.py", "file_ext": "py", "file_size_in_byte": 2496, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "48", "api": [{"api_name": "sqlite3.connect", "line_number": 5, "usage_type": "call"}]} +{"seq_id": "71599715666", "text": "from datetime import datetime\nfrom google.appengine.api import mail\nfrom model import *\nfrom utils import *\nimport prefix\nimport reveal\nimport sys\n\n\ndef get_entities_to_delete(person):\n # Gather all the entities that are attached to this person.\n entities = [person] + person.get_notes()\n if person.photo_url and person.photo_url.startswith('/photo?id='):\n photo = Photo.get_by_id(int(person.photo_url.split('=', 1)[1]))\n if photo:\n entities.append(photo)\n return entities\n\n\nclass Delete(Handler):\n def get(self):\n \"\"\"If no signature is present, offer to send out a deletion code.\n If a signature is present, confirm deletion before carrying it out.\"\"\"\n person = Person.get(self.subdomain, self.params.id)\n if not person:\n return self.error(400, 'No person with ID: %r' % self.params.id)\n\n self.render('templates/delete.html', person=person,\n entities=get_entities_to_delete(person),\n view_url=self.get_url('/view', id=self.params.id),\n save_url=self.get_url('/api/read', id=self.params.id))\n\n def post(self):\n \"\"\"If no signature is present, send out a deletion code.\n If a signature is present, carry out the deletion.\"\"\"\n person = Person.get(self.subdomain, self.params.id)\n if not person:\n return self.error(400, 'No person with ID: %r' % self.params.id)\n\n action = ('delete', str(self.params.id))\n if self.params.signature:\n if reveal.verify(action, self.params.signature):\n db.delete(get_entities_to_delete(person))\n # i18n: Message telling the user that a record has been deleted.\n return self.error(200, _('The record has been deleted.'))\n else:\n # i18n: Message for an unauthorized attempt to delete a record.\n return self.error(403, _('The authorization code was invalid.'))\n else:\n mail.send_mail(\n sender='do not reply ' % self.env.domain,\n to='<%s>' % person.author_email,\n # i18n: Subject line of an e-mail message that gives the\n # i18n: user a link to delete a record\n subject=_(\n 'Deletion request for %(given_name)s %(family_name)s'\n ) % {'given_name': person.first_name,\n 'family_name': person.last_name},\n # i18n: Body text of an e-mail message that gives the user\n # i18n: a link to delete a record\n body = _('''\nWe have received a deletion request for a missing person record at\n%(domain_name)s.\n\nYour e-mail address was entered as the author of this record, so we\nare contacting you to confirm whether you want to delete it.\n\nTo delete this record, use this link:\n\n %(delete_url)s\n\nTo view the record, use this link:\n\n %(view_url)s\n\n''') % {'domain_name': self.env.domain,\n 'delete_url': self.get_url('/delete', id=self.params.id,\n signature=reveal.sign(action, 24*3600)),\n 'view_url': self.get_url('/view', id=self.params.id)}\n )\n\n # i18n: Message explaining to the user that the e-mail message\n # i18n: containing a link to delete a record has been sent out.\n return self.error(200, _('An e-mail message with a deletion code has been sent. The code will expire in one day.'))\n\nif __name__ == '__main__':\n run(('/delete', Delete))\n", "repo_name": "istevens/personfinder-search-api", "sub_path": "app/delete.py", "file_name": "delete.py", "file_ext": "py", "file_size_in_byte": 3561, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "48", "api": [{"api_name": "reveal.verify", "line_number": 42, "usage_type": "call"}, {"api_name": "google.appengine.api.mail.send_mail", "line_number": 50, "usage_type": "call"}, {"api_name": "google.appengine.api.mail", "line_number": 50, "usage_type": "name"}, {"api_name": "reveal.sign", "line_number": 78, "usage_type": "call"}]} +{"seq_id": "19470118879", "text": "import pygame\nfrom constants import *\n\nclass Projectile:\n def __init__(self, x, y, direction_x, direction_y, speed,owner):\n self.x = x\n self.y = y\n self.direction_x = direction_x\n self.direction_y = direction_y\n self.speed = speed\n self.owner = owner\n self.rect = pygame.Rect(self.x,self.y,8,8)\n self.damage = 10\n\n def update(self,you,walls,projectiles,bind):\n self.x += self.direction_x * self.speed\n self.y += self.direction_y * self.speed\n self.rect = pygame.Rect(self.x,self.y,8,8)\n \n if self.rect.colliderect(you): \n if self.owner != you: \n if you.immunity['immunity'] == False:\n \n you.inflict_status(bind)\n try:\n projectiles.remove(self)\n except ValueError:\n pass\n\n cond1 = self.x > 1600 or self.x <0\n cond2 = self.y > 900 or self.y <0\n if cond1 or cond2:\n \n \n if self in projectiles:\n projectiles.remove(self)\n \n\n for wall in walls:\n if self.rect.colliderect(wall):\n \n if self in projectiles:\n projectiles.remove(self)\n \n def draw(self):\n \n pygame.draw.circle(window, (34, 100, 160), (int(self.x), int(self.y)), 8)\n\n @staticmethod\n def projectile_behaviour(projectiles,enemies,you,walls,bind):\n for projectile in projectiles:\n for enemy in enemies:\n if enemy.rect.colliderect(projectile):\n if projectile.owner not in enemies:\n enemy.hp -= projectile.damage\n if enemy.hp <= 0:\n enemies.remove(enemy)\n projectiles.remove(projectile)\n projectile.update(you,walls,projectiles,bind)\n projectile.draw()\n\n\n\n\n", "repo_name": "HerbertHomolka1/touhou", "sub_path": "projectile_class.py", "file_name": "projectile_class.py", "file_ext": "py", "file_size_in_byte": 2006, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "48", "api": [{"api_name": "pygame.Rect", "line_number": 12, "usage_type": "call"}, {"api_name": "pygame.Rect", "line_number": 18, "usage_type": "call"}, {"api_name": "pygame.draw.circle", "line_number": 47, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 47, "usage_type": "attribute"}]} +{"seq_id": "36523301394", "text": "from fabric import SerialGroup as Group, Connection\n\napp_ws_servers = [\n \"139.59.136.182\",\n \"165.22.89.205\",\n \"165.227.167.134\"\n]\n\nredis_server = \"165.22.89.227\"\n\n\ndef setup_systemd(conn):\n conn.run('cp ava-appserver/appserver.service /lib/systemd/system')\n conn.run('chmod 766 /lib/systemd/system/appserver.service')\n\n conn.run('cp ava-appserver/avalanche.service /lib/systemd/system')\n conn.run('chmod 766 /lib/systemd/system/appserver.service')\n\n conn.run('systemctl daemon-reload')\n\n\ndef install_avalanche(conn):\n conn.run('ulimit -n 500000')\n\n conn.run('rm -rf avalanche')\n conn.run('mkdir -p /var/log/avalanche')\n conn.run('touch /var/log/avalanche/avalanche.log')\n conn.run('chmod 766 /var/log/avalanche/avalanche.log')\n\n conn.run('git clone git@github.com:furqan-shakoor/avalanche.git')\n conn.put('prod_settings_avalanche.py', 'avalanche/settings.py')\n conn.run('cd avalanche && pip3 install -r requirements.txt')\n\n conn.run('systemctl restart avalanche.service')\n\n\ndef install_app_server(conn):\n conn.run('ulimit -n 500000')\n\n conn.run('rm -rf avalanche')\n conn.run('mkdir -p /var/log/avalanche')\n conn.run('touch /var/log/avalanche/appserver.log')\n conn.run('chmod 766 /var/log/avalanche/appserver.log')\n\n conn.run('rm -rf ava-appserver')\n conn.run('git clone git@github.com:furqan-shakoor/ava-appserver.git')\n conn.put('prod_settings.py', 'ava-appserver/settings.py')\n conn.run('cd ava-appserver && pip3 install -r requirements.txt')\n\n conn.run('systemctl restart appserver.service')\n\n\ndef install_perf_tests(conn):\n conn.run('ulimit -n 500000')\n\n conn.run('rm -rf ava-test')\n conn.run('git clone https://github.com/furqan-shakoor/ava-test.git')\n conn.run('cd ava-test && pip3 install -r requirements.txt')\n\n\ndef deploy():\n for app_ws_server_ip in app_ws_servers:\n app_ws = Connection(f\"root@{app_ws_server_ip}\")\n install_app_server(app_ws)\n install_avalanche(app_ws)\n\n redis_conn = Connection(f\"root@{redis_server}\")\n install_perf_tests(redis_conn)\n\n\nif __name__ == \"__main__\":\n deploy()\n", "repo_name": "furqan-shakoor/ava-appserver", "sub_path": "fabfile.py", "file_name": "fabfile.py", "file_ext": "py", "file_size_in_byte": 2126, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "48", "api": [{"api_name": "fabric.Connection", "line_number": 63, "usage_type": "call"}, {"api_name": "fabric.Connection", "line_number": 67, "usage_type": "call"}]} +{"seq_id": "31916821569", "text": "import requests\nfrom bs4 import BeautifulSoup\nimport datetime\nfrom lxml import etree\nimport re\ndef get_page(link):\n headers = {\n 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.84 Safari/537.36'\n }\n r = requests.get(link,headers=headers)\n html = r.content\n html = html.decode('utf-8')\n soup = BeautifulSoup(html,'lxml')\n return soup\n \n \ndef get_data(post_list,link):\n post = post_list.select('.for-list li')\n #print(post)\n \n lists = []\n for html in post:\n title = html.select('.truetit')[0] \n titles = title.text # 文章标题\n #print(titles)\n titles_urls = title.attrs['href'] # 获取文章url\n titles_url = \"https://bbs.hupu.com/\" + titles_urls # 拼接文章 url\n #print(titles_url)\n authors = html.select('.aulink')[0] #获取作者,及作者链接\n author = authors.text # 获取作者\n #print(author)\n author_url = authors.attrs['href'] # 获取作者链接\n #print(author_url)\n stick_time = html.select('a[style=\"color:#808080;cursor: initial; \"]')[0] # 获取帖子发布时间\n #print(stick_time.text)\n reply = html.select('span[class=\"ansour box\"]')[0] # 获取帖子的回复及浏览数\n #print(reply.text)\n end_time = html.select('div[class=\"endreply box\"] > a')[0] # 最后回复时间\n #print(end_time.text) \n end_author = html.select('div[class=\"endreply box\"] > span')[0] # 最后回复作者\n #print(end_author.text) \n \n lists.append([titles,titles_url,author,author_url,stick_time.text,reply.text,end_time.text,end_author.text])\n \n return lists\n\n\n'''\n dicts = {\n 'title': titles,\n 'titles_url': titles_url,\n 'author': author,\n 'author_url': author_url,\n 'stick_time': stick_time.text,\n 'reply': reply.text,\n 'end_time': end_time.text,\n 'nd_author':end_author.text,\n }\n print(dicts)\n# 爬取100页的 数据\nhttps://bbs.hupu.com/lol\nhttps://bbs.hupu.com/lol-2\nhttps://bbs.hupu.com/lol-3\n \n''' \ndef save_to_mysql():\n pass\n \n \ndef main():\n link = \"https://bbs.hupu.com/lol\"\n post_list = get_page(link)\n lists = get_data(post_list,link)\n for l in lists:\n print(l)\n \n \nif __name__ == '__main__':\n main()\n", "repo_name": "zl158218/PythonRelated", "sub_path": "HuPuBuXingJie_lol/hupu2.py", "file_name": "hupu2.py", "file_ext": "py", "file_size_in_byte": 2446, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "48", "api": [{"api_name": "requests.get", "line_number": 10, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 13, "usage_type": "call"}]} +{"seq_id": "71702320465", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('app', '0015_auto_20170808_1437'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='item',\n name='itemViews',\n field=models.IntegerField(null=True, verbose_name=b'\\xe6\\x9f\\xa5\\xe7\\x9c\\x8b\\xe6\\xac\\xa1\\xe6\\x95\\xb0', blank=True),\n ),\n ]\n", "repo_name": "cssharp/learnDjango", "sub_path": "memberSystem/app/migrations/0016_item_itemviews.py", "file_name": "0016_item_itemviews.py", "file_ext": "py", "file_size_in_byte": 474, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "48", "api": [{"api_name": "django.db.migrations.Migration", "line_number": 7, "usage_type": "attribute"}, {"api_name": "django.db.migrations", "line_number": 7, "usage_type": "name"}, {"api_name": "django.db.migrations.AddField", "line_number": 14, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 14, "usage_type": "name"}, {"api_name": "django.db.models.IntegerField", "line_number": 17, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 17, "usage_type": "name"}]} +{"seq_id": "24740184026", "text": "import sqlite3\nimport csv\nimport random\nfrom numpy import arange\n\nimport sys\nsys.path.append('./database/sellers/') \nfrom get_seller_ids import get_seller_ids\n\nlocal_path = \"./database/products/\"\ndatasets_path = \"./database/datasets/\"\n\n# Connect to the database\ndatabase = local_path + \"products.db\"\nconnection = sqlite3.connect(database)\ncursor = connection.cursor()\n\ndef verify_value(value):\n if value == '':\n return 'No Data'\n return value\n\n# Transfer the data from the csv file to the database\ndef transfer_products_data(filename):\n with open(filename, 'r') as csv_file:\n csv_reader = csv.reader(csv_file)\n\n for row in csv_reader:\n # Skip the header row\n if row[0] == 'Uniq Id':\n continue\n\n # Get the seller ids\n seller_ids = get_seller_ids()\n seller_id = random.choice(seller_ids)\n\n #! Product's Basic Infos\n product_id = verify_value(row[0]) \n name = verify_value(row[1])\n category = verify_value(row[4])\n price = verify_value(row[7])\n image_url = verify_value(row[15])\n rating = random.choice(arange(0, 5.5, 0.5))\n is_best_seller = 0\n\n # Insert the data into the database - \"products\" table\n try:\n cursor.execute('''\n INSERT INTO products (product_id, name, category, price, image_url, rating, is_best_seller) \n VALUES (?, ?, ?, ?, ?, ?, ?)\n ''', (product_id, name, category, price, image_url, rating, is_best_seller))\n connection.commit()\n except sqlite3.IntegrityError:\n connection.rollback()\n\n #! Product's Details\n model_number = verify_value(row[9])\n about_product = verify_value(row[10])\n product_specification = verify_value(row[11])\n technical_details = verify_value(row[12])\n shipping_weight = verify_value(row[13])\n product_dimensions = verify_value(row[14])\n upc_ean_code = verify_value(row[5])\n seller_id = seller_id[0]\n product_url = verify_value(row[18])\n\n # Insert the data into the database - \"product_details\" table\n try:\n cursor.execute('''\n INSERT INTO product_details (product_id, model_number, about_product, product_specification, technical_details, shipping_weight, product_dimensions, upc_ean_code, seller_id, product_url)\n VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)\n ''', (product_id, model_number, about_product, product_specification, technical_details, shipping_weight, product_dimensions, upc_ean_code, seller_id, product_url))\n connection.commit()\n except sqlite3.IntegrityError:\n connection.rollback()\n \n# Transfer the data from the csv file to the database\ndef transfer_best_sellers_data(filename):\n with open(filename, 'r') as csv_file:\n csv_reader = csv.reader(csv_file)\n\n for row in csv_reader:\n # Skip the header row\n if row[0] == 'ASIN':\n continue\n\n #! Best Seller's Basic Infos\n product_id = verify_value(row[0] + row[21]) \n name = verify_value(row[3])\n category = verify_value(row[2])\n price = verify_value(row[8])\n image_url = verify_value(row[4])\n rating = verify_value(row[16])\n is_best_seller = 1\n\n # Insert the data into the database - \"products\" table\n try:\n cursor.execute('''\n INSERT INTO products (product_id, name, category, price, image_url, rating, is_best_seller) \n VALUES (?, ?, ?, ?, ?, ?, ?)\n ''', (product_id, name, category, price, image_url, rating, is_best_seller))\n connection.commit()\n except sqlite3.IntegrityError:\n connection.rollback()\n\n #! Best Seller's Details\n fba_fee = verify_value(row[9])\n fbm_fee = verify_value(row[10])\n height = verify_value(row[11])\n length = verify_value(row[12])\n width = verify_value(row[13])\n weight = verify_value(row[14])\n review_count = verify_value(row[15])\n seller_id = verify_value(row[7])\n\n # Insert the data into the database - \"best_sellers_details\" table\n try:\n cursor.execute('''\n INSERT INTO best_sellers_details (product_id, fba_fee, fbm_fee, height, length, width, weight, review_count, seller_id)\n VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)\n ''', (product_id, fba_fee, fbm_fee, height, length, width, weight, review_count, seller_id))\n connection.commit()\n except sqlite3.IntegrityError:\n connection.rollback()\n\nif __name__ == '__main__':\n products_data = datasets_path + \"amazon_dataset.csv\"\n transfer_products_data(products_data)\n\n best_sellers_data = datasets_path + \"best_sellers_dataset.csv\"\n transfer_best_sellers_data(best_sellers_data)\n\n connection.close()\n \n print(\"products.db - Data transfer complete!\")", "repo_name": "minhducnguyen26/RoeRivr", "sub_path": "backend/database/products/transfer_data.py", "file_name": "transfer_data.py", "file_ext": "py", "file_size_in_byte": 5340, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "48", "api": [{"api_name": "sys.path.append", "line_number": 7, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 7, "usage_type": "attribute"}, {"api_name": "sqlite3.connect", "line_number": 15, "usage_type": "call"}, {"api_name": "csv.reader", "line_number": 26, "usage_type": "call"}, {"api_name": "get_seller_ids.get_seller_ids", "line_number": 34, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 35, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 43, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 43, "usage_type": "call"}, {"api_name": "sqlite3.IntegrityError", "line_number": 53, "usage_type": "attribute"}, {"api_name": "sqlite3.IntegrityError", "line_number": 74, "usage_type": "attribute"}, {"api_name": "csv.reader", "line_number": 80, "usage_type": "call"}, {"api_name": "sqlite3.IntegrityError", "line_number": 103, "usage_type": "attribute"}, {"api_name": "sqlite3.IntegrityError", "line_number": 123, "usage_type": "attribute"}]} +{"seq_id": "29331868760", "text": "import numpy as np\nimport cv2\n\ncap = cv2.VideoCapture(0)\n\nwhile True:\n ret, img = cap.read()\n if not ret:\n print(\"Unable to read from camera. Existing.\")\n exit()\n\n # img = cv2.imread('./src/image1.jpeg')\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\n sift = cv2.SIFT_create()\n # kp, des = sift.detectAndCompute(gray, None)\n kp = sift.detect(gray, None)\n\n img = cv2.drawKeypoints(gray, kp, img)\n cv2.imshow(\"sift_keypoints.jpg\", img)\n cv2.waitKey(16)\n", "repo_name": "geyang/vlad-python", "sub_path": "sift.py", "file_name": "sift.py", "file_ext": "py", "file_size_in_byte": 495, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "48", "api": [{"api_name": "cv2.VideoCapture", "line_number": 4, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 13, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 13, "usage_type": "attribute"}, {"api_name": "cv2.SIFT_create", "line_number": 15, "usage_type": "call"}, {"api_name": "cv2.drawKeypoints", "line_number": 19, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 20, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 21, "usage_type": "call"}]} +{"seq_id": "38498054527", "text": "from flask import Flask, Blueprint\nfrom flask_restful import Api, Resource, reqparse, url_for\nimport json\nimport datetime\nfrom app.auth.models import User\n\nfrom app.utilities.common import json_serial\n\nadmin_api = Blueprint('admin_api', __name__, url_prefix='/backend/admin/api/v1')\n\nadm_api = Api(admin_api)\n\n\nclass UsersAdministration(Resource):\n def get(self):\n \"\"\"\n Lists all the users and user's basic data\n \"\"\"\n data = []\n\n for user in User.objects:\n data.append({\n \"created\": user.pk.generation_time.timestamp(),\n \"uid\": user.uid,\n \"email\": user.email,\n \"first_name\": user.profile.first_name if user.profile.first_name else '',\n \"last_name\": user.profile.last_name if user.profile.last_name else '',\n \"email_verified\": format(user.activity.email_verification.email_verified),\n \"user_restricted\": user.activity.user_restricted,\n \"failed_logins\": user.activity.failed_logins,\n \"user_banned\": user.activity.user_banned\n })\n \n status = { 'success': True, 'result_length': len(data)}\n response = {'data': data, 'status': status}\n return response\n\nclass UserAdministration_Profile(Resource):\n def post(self):\n \"\"\"\n Lists an individual user's basic data\n \"\"\"\n data = {}\n email_verified = False\n first_name = ''\n last_name = ''\n gender = ''\n date_of_birth = ''\n \n parser = reqparse.RequestParser()\n parser.add_argument('uid', type=str, location='json', required=True, help='UID for the user to retrieve')\n args = parser.parse_args()\n\n user = User.objects.filter(uid=args[\"uid\"]).first()\n if user is not None:\n data[\"created\"] = user.pk.generation_time.timestamp()\n data[\"email\"] = user.email\n data[\"first_name\"] = user.profile.first_name\n data[\"last_name\"] = user.profile.last_name\n data[\"last_name\"] = user.profile.last_name\n data[\"gender\"] = user.profile.gender\n data[\"address\"] = {}\n data[\"address\"][\"door_no_name\"] = user.profile.addresses[0].door_no_name\n data[\"address\"][\"street\"] = user.profile.addresses[0].street\n data[\"address\"][\"line_2\"] = user.profile.addresses[0].line_2\n data[\"address\"][\"city\"] = user.profile.addresses[0].city\n data[\"address\"][\"couty\"] = user.profile.addresses[0].county\n data[\"address\"][\"postcode\"] = user.profile.addresses[0].postcode\n data[\"address\"][\"country\"] = user.profile.addresses[0].country\n data[\"date_of_birth\"] = format(user.profile.date_of_birth) \n data[\"email_verified\"] = format(user.activity.email_verification.email_verified)\n data[\"user_restricted\"] = user.activity.user_restricted\n data[\"failed_logins\"] = user.activity.failed_logins\n data[\"user_banned\"] = user.activity.user_banned\n \n if user is not None:\n status = { 'success': True, 'result_length': 1 }\n else:\n status = { 'success': False, 'result_length': 0 }\n \n response = {'data': data, 'status': status}\n return response\n\nadm_api.add_resource(UsersAdministration, '/userAdministration/getUsers')\nadm_api.add_resource(UserAdministration_Profile, '/userAdministration/getUserProfile')", "repo_name": "ai001/TimeLottery", "sub_path": "LotteryAPI/app/admin/api/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 3495, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "48", "api": [{"api_name": "flask.Blueprint", "line_number": 9, "usage_type": "call"}, {"api_name": "flask_restful.Api", "line_number": 11, "usage_type": "call"}, {"api_name": "flask_restful.Resource", "line_number": 14, "usage_type": "name"}, {"api_name": "app.auth.models.User.objects", "line_number": 21, "usage_type": "attribute"}, {"api_name": "app.auth.models.User", "line_number": 21, "usage_type": "name"}, {"api_name": "flask_restful.Resource", "line_number": 38, "usage_type": "name"}, {"api_name": "flask_restful.reqparse.RequestParser", "line_number": 50, "usage_type": "call"}, {"api_name": "flask_restful.reqparse", "line_number": 50, "usage_type": "name"}, {"api_name": "app.auth.models.User.objects.filter", "line_number": 54, "usage_type": "call"}, {"api_name": "app.auth.models.User.objects", "line_number": 54, "usage_type": "attribute"}, {"api_name": "app.auth.models.User", "line_number": 54, "usage_type": "name"}]} +{"seq_id": "43199539681", "text": "import json\nimport sys\nimport math\nimport importlib\nimport glob\nimport os\n\nfrom kaitaistruct import KaitaiStruct\n\ndef dump_struct(s, sections, prefix=\"\"):\n if isinstance(s, list):\n #print(\"list\")\n for i, item in enumerate(s):\n label = prefix + \"[\" + str(i) + \"]\"\n sections.append({\n \"label\": label,\n \"parent\": prefix\n })\n dump_struct(item, sections, label)\n elif isinstance(s, KaitaiStruct):\n #print(vars(s))\n if hasattr(s, \"_debug\"):\n for name, descr in s._debug.items():\n #print(f\"name desc: {name} , {descr}\")\n prop = getattr(s, name)\n #print(prop)\n #if isinstance(prop, KaitaiStruct):\n # print(vars(prop))\n #print(\"\")\n label = prefix + \".\" + name if prefix else name\n sections.append({\n \"start\": descr[\"start\"],\n \"end\": descr[\"end\"],\n \"label\": label,\n \"parent\": prefix\n })\n dump_struct(prop, sections, label)\n\ndef parse_data(input_filename, output_filename, action_progress):\n # locate the compiled struct module\n scripts = glob.glob(os.path.join(os.path.dirname(input_filename), '*.py'))\n if len(scripts) < 1:\n raise FileNotFoundError('Could not find the expected python kaitai parser - did the kaitai struct compiler fail?')\n module_file = os.path.basename(scripts[0])\n sys.path.append(os.path.dirname(scripts[0]))\n package_name = os.path.splitext(module_file)[0]\n class_name = package_name.capitalize()\n struct_module = importlib.__import__(package_name, fromlist=[class_name])\n Struct = getattr(struct_module, class_name)\n\n action_progress.set_progress_percent(5)\n\n # parse the input data\n target = Struct.from_file(input_filename)\n target._read()\n\n action_progress.set_progress_percent(70)\n\n # write the parser result to the output\n sections = []\n dump_struct(target, sections)\n\n #print(sections)\n\n action_progress.set_progress_percent(80)\n\n output_json = {\n \"sections\": sections\n }\n\n with open(output_filename, 'w') as output_file:\n json.dump(output_json, output_file)\n", "repo_name": "pombredanne/hobbits", "sub_path": "src/hobbits-plugins/analyzers/KaitaiStruct/scripts/runner.py", "file_name": "runner.py", "file_ext": "py", "file_size_in_byte": 2310, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "github-code", "pt": "48", "api": [{"api_name": "kaitaistruct.KaitaiStruct", "line_number": 20, "usage_type": "argument"}, {"api_name": "glob.glob", "line_number": 41, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 41, "usage_type": "call"}, {"api_name": "os.path", "line_number": 41, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 41, "usage_type": "call"}, {"api_name": "os.path.basename", "line_number": 44, "usage_type": "call"}, {"api_name": "os.path", "line_number": 44, "usage_type": "attribute"}, {"api_name": "sys.path.append", "line_number": 45, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 45, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 45, "usage_type": "call"}, {"api_name": "os.path", "line_number": 45, "usage_type": "attribute"}, {"api_name": "os.path.splitext", "line_number": 46, "usage_type": "call"}, {"api_name": "os.path", "line_number": 46, "usage_type": "attribute"}, {"api_name": "importlib.__import__", "line_number": 48, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 72, "usage_type": "call"}]} +{"seq_id": "43613971920", "text": "from django import forms\nfrom apps.cocktails.models import Cocktail\nfrom apps.ingredients.models import Ingredient\n\n\nclass CustomMMCF(forms.ModelMultipleChoiceField):\n def label_from_instance(self, member):\n return \"%s\" % member.name\n\n\nclass CocktailReceiptForm(forms.ModelForm):\n class Meta:\n model = Cocktail\n fields = ('image', 'name', 'description', 'ingredients')\n\n image = forms.ImageField(required=False)\n name = forms.CharField(widget=forms.TextInput(), required=True)\n description = forms.CharField(widget=forms.Textarea(), required=True)\n ingredients = CustomMMCF(\n queryset=Ingredient.objects.all(),\n widget=forms.SelectMultiple(),\n to_field_name=\"name\",\n )\n\n\n", "repo_name": "OriginalFlavour/InterestingName", "sub_path": "projectcocktails/apps/cocktails/forms.py", "file_name": "forms.py", "file_ext": "py", "file_size_in_byte": 734, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "48", "api": [{"api_name": "django.forms.ModelMultipleChoiceField", "line_number": 6, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 6, "usage_type": "name"}, {"api_name": "django.forms.ModelForm", "line_number": 11, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 11, "usage_type": "name"}, {"api_name": "apps.cocktails.models.Cocktail", "line_number": 13, "usage_type": "name"}, {"api_name": "django.forms.ImageField", "line_number": 16, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 16, "usage_type": "name"}, {"api_name": "django.forms.CharField", "line_number": 17, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 17, "usage_type": "name"}, {"api_name": "django.forms.TextInput", "line_number": 17, "usage_type": "call"}, {"api_name": "django.forms.CharField", "line_number": 18, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 18, "usage_type": "name"}, {"api_name": "django.forms.Textarea", "line_number": 18, "usage_type": "call"}, {"api_name": "apps.ingredients.models.Ingredient.objects.all", "line_number": 20, "usage_type": "call"}, {"api_name": "apps.ingredients.models.Ingredient.objects", "line_number": 20, "usage_type": "attribute"}, {"api_name": "apps.ingredients.models.Ingredient", "line_number": 20, "usage_type": "name"}, {"api_name": "django.forms.SelectMultiple", "line_number": 21, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 21, "usage_type": "name"}]} +{"seq_id": "73204959826", "text": "from django.conf.urls import url\n\nfrom ventas.reports import lista_ventas, factura_pdf\nfrom ventas.views import anular_venta, VentaDetailView, confirmar_venta\n\nurlpatterns = [\n url(r'^venta_detail/(?P\\d+)/$', VentaDetailView.as_view(), name='venta_detail'),\n url(r'^anular_venta/(?P\\d+)/$', anular_venta, name='anular_venta'),\n url(r'^confirmar_venta/(?P\\d+)/$', confirmar_venta, name='confirmar_venta'),\n url(r'^lista_ventas/$', lista_ventas, name='lista_ventas'),\n url(\n r'^generar_factura/(?P\\w+)/$',\n factura_pdf,\n name='generar_factura',\n ),\n]\n", "repo_name": "atiliopereira/distribution-centralizer", "sub_path": "ventas/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 604, "program_lang": "python", "lang": "pt", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "48", "api": [{"api_name": "django.conf.urls.url", "line_number": 7, "usage_type": "call"}, {"api_name": "ventas.views.VentaDetailView.as_view", "line_number": 7, "usage_type": "call"}, {"api_name": "ventas.views.VentaDetailView", "line_number": 7, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 8, "usage_type": "call"}, {"api_name": "ventas.views.anular_venta", "line_number": 8, "usage_type": "argument"}, {"api_name": "django.conf.urls.url", "line_number": 9, "usage_type": "call"}, {"api_name": "ventas.views.confirmar_venta", "line_number": 9, "usage_type": "argument"}, {"api_name": "django.conf.urls.url", "line_number": 10, "usage_type": "call"}, {"api_name": "ventas.reports.lista_ventas", "line_number": 10, "usage_type": "argument"}, {"api_name": "django.conf.urls.url", "line_number": 11, "usage_type": "call"}, {"api_name": "ventas.reports.factura_pdf", "line_number": 13, "usage_type": "argument"}]} +{"seq_id": "894334699", "text": "from itertools import permutations\nK = int(input())\nN = 8\nRC = []\nfor k in range(K):\n r, c = map(int,input().split())\n RC.append((r,c))\n\ndef diag(board):\n for i in range(2*N-1):\n sm = 0\n for j in range(i+1):\n if (i-j>=8 or j>=8):\n continue\n sm += board[i-j][j]\n if sm > 1:\n return False\n return True\n\ndef judge(ls):\n board = [[0]*N for _ in range(N)]\n for r in range(N):\n c = ls[r]\n board[r][c] = 1\n for r, c in RC:\n if board[r][c] == 0:\n return False\n \n if not diag(board):\n return False\n \n if not diag(board[::-1]):\n return False\n return True\n\nfor ls in permutations(range(N)):\n if judge(ls):\n for c in ls:\n s = ['.'] * N\n s[c] = 'Q'\n print (''.join(s))\n exit()\n\n\n\n", "repo_name": "ToshikiShimizu/AOJ", "sub_path": "ALDS1/13a.py", "file_name": "13a.py", "file_ext": "py", "file_size_in_byte": 868, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "48", "api": [{"api_name": "itertools.permutations", "line_number": 36, "usage_type": "call"}]} +{"seq_id": "37621313521", "text": "\"\"\"sitefucidi URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/3.0/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.contrib.auth.views import LoginView, LogoutView\nfrom django.urls import path, include\nfrom django.conf import settings\nfrom django.conf.urls.static import static\nurlpatterns = [\n path('admin/', admin.site.urls),\n path('administrador/', admin.site.urls,name='login'),\n path('', LoginView.as_view(template_name='usuarios/index.html'), name=\"Menu\"),\n path('salir/', LogoutView.as_view(template_name='usuarios/index.html'), name=\"logout\"),\n path('Menu', include('administrador.urls')),\n path('profile/', include(('administrador.urls', 'prof'), namespace='prof'),name='prof'),\n path('academia/', include(('Academico.urls', 'academia'), namespace='academia'),name='academia'),\n path('admision/', include(('Admision.urls', 'admision'), namespace='admision'),name='admision'),# incluimos las urls de admision para generar el sistema\n path('matricula/', include(('Matricula.urls', 'matricula'), namespace='matricula'), name='matricula'), # incluimos las urls de admision para generar el sistema\n path('pagos/', include(('Pago.urls', 'pago'), namespace='pago'), name='pago'), # incluimos las urls de admision para generar el sistema\n\n]+ static(settings.MEDIA_URL, document_root = settings.MEDIA_ROOT)\n", "repo_name": "ervinPinargote/sisfucidi", "sub_path": "sitefucidi/sitefucidi/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 1900, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "48", "api": [{"api_name": "django.urls.path", "line_number": 22, "usage_type": "call"}, {"api_name": "django.contrib.admin.site", "line_number": 22, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 22, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 23, "usage_type": "call"}, {"api_name": "django.contrib.admin.site", "line_number": 23, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 23, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 24, "usage_type": "call"}, {"api_name": "django.contrib.auth.views.LoginView.as_view", "line_number": 24, "usage_type": "call"}, {"api_name": "django.contrib.auth.views.LoginView", "line_number": 24, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 25, "usage_type": "call"}, {"api_name": "django.contrib.auth.views.LogoutView.as_view", "line_number": 25, "usage_type": "call"}, {"api_name": "django.contrib.auth.views.LogoutView", "line_number": 25, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 26, "usage_type": "call"}, {"api_name": "django.urls.include", "line_number": 26, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 27, "usage_type": "call"}, {"api_name": "django.urls.include", "line_number": 27, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 28, "usage_type": "call"}, {"api_name": "django.urls.include", "line_number": 28, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 29, "usage_type": "call"}, {"api_name": "django.urls.include", "line_number": 29, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 30, "usage_type": "call"}, {"api_name": "django.urls.include", "line_number": 30, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 31, "usage_type": "call"}, {"api_name": "django.urls.include", "line_number": 31, "usage_type": "call"}, {"api_name": "django.conf.urls.static.static", "line_number": 33, "usage_type": "call"}, {"api_name": "django.conf.settings.MEDIA_URL", "line_number": 33, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 33, "usage_type": "name"}, {"api_name": "django.conf.settings.MEDIA_ROOT", "line_number": 33, "usage_type": "attribute"}]} +{"seq_id": "6514828850", "text": "import time\nimport logging\nfrom app.cli import command, arg\n\ndef main():\n \n arg.set_vars_from_conf()\n args = arg.get_args()\n \n if args.verbose:\n logging.basicConfig(format=\"%(levelname)s: %(message)s\", level=logging.DEBUG)\n logging.info(\"Verbose output.\")\n else:\n logging.basicConfig(format=\"%(levelname)s: %(message)s\")\n\n if args.catalog:\n logging.debug(\"Catalog\")\n command.show_catalog()\n\n elif args.repository:\n logging.debug(\"Reposirory : {}\".format(args.repository))\n command.show_tags(args.repository)\n\n elif args.dry_run:\n logging.debug(\"Dry Run:\")\n command.dry_run()\n\n else :\n logging.debug(\"Delete:\")\n command.exec_del(True)\n\n\nif __name__ == \"__main__\":\n s = time.perf_counter()\n main()\n elapsed = time.perf_counter() - s\n logging.debug(f\"{__file__} executed in {elapsed:0.2f} seconds.\")", "repo_name": "Adrcpp/docker-rm", "sub_path": "docker-rm.py", "file_name": "docker-rm.py", "file_ext": "py", "file_size_in_byte": 915, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "48", "api": [{"api_name": "app.cli.arg.set_vars_from_conf", "line_number": 7, "usage_type": "call"}, {"api_name": "app.cli.arg", "line_number": 7, "usage_type": "name"}, {"api_name": "app.cli.arg.get_args", "line_number": 8, "usage_type": "call"}, {"api_name": "app.cli.arg", "line_number": 8, "usage_type": "name"}, {"api_name": "logging.basicConfig", "line_number": 11, "usage_type": "call"}, {"api_name": "logging.DEBUG", "line_number": 11, "usage_type": "attribute"}, {"api_name": "logging.info", "line_number": 12, "usage_type": "call"}, {"api_name": "logging.basicConfig", "line_number": 14, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 17, "usage_type": "call"}, {"api_name": "app.cli.command.show_catalog", "line_number": 18, "usage_type": "call"}, {"api_name": "app.cli.command", "line_number": 18, "usage_type": "name"}, {"api_name": "logging.debug", "line_number": 21, "usage_type": "call"}, {"api_name": "app.cli.command.show_tags", "line_number": 22, "usage_type": "call"}, {"api_name": "app.cli.command", "line_number": 22, "usage_type": "name"}, {"api_name": "logging.debug", "line_number": 25, "usage_type": "call"}, {"api_name": "app.cli.command.dry_run", "line_number": 26, "usage_type": "call"}, {"api_name": "app.cli.command", "line_number": 26, "usage_type": "name"}, {"api_name": "logging.debug", "line_number": 29, "usage_type": "call"}, {"api_name": "app.cli.command.exec_del", "line_number": 30, "usage_type": "call"}, {"api_name": "app.cli.command", "line_number": 30, "usage_type": "name"}, {"api_name": "time.perf_counter", "line_number": 34, "usage_type": "call"}, {"api_name": "time.perf_counter", "line_number": 36, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 37, "usage_type": "call"}]} +{"seq_id": "20888281644", "text": "# -*- coding: utf-8 -*-\n# ***********************************************************************\n# ****************** CANADIAN ASTRONOMY DATA CENTRE *******************\n# ************* CENTRE CANADIEN DE DONNÉES ASTRONOMIQUES **************\n#\n# (c) 2018. (c) 2018.\n# Government of Canada Gouvernement du Canada\n# National Research Council Conseil national de recherches\n# Ottawa, Canada, K1A 0R6 Ottawa, Canada, K1A 0R6\n# All rights reserved Tous droits réservés\n#\n# NRC disclaims any warranties, Le CNRC dénie toute garantie\n# expressed, implied, or énoncée, implicite ou légale,\n# statutory, of any kind with de quelque nature que ce\n# respect to the software, soit, concernant le logiciel,\n# including without limitation y compris sans restriction\n# any warranty of merchantability toute garantie de valeur\n# or fitness for a particular marchande ou de pertinence\n# purpose. NRC shall not be pour un usage particulier.\n# liable in any event for any Le CNRC ne pourra en aucun cas\n# damages, whether direct or être tenu responsable de tout\n# indirect, special or general, dommage, direct ou indirect,\n# consequential or incidental, particulier ou général,\n# arising from the use of the accessoire ou fortuit, résultant\n# software. Neither the name de l'utilisation du logiciel. Ni\n# of the National Research le nom du Conseil National de\n# Council of Canada nor the Recherches du Canada ni les noms\n# names of its contributors may de ses participants ne peuvent\n# be used to endorse or promote être utilisés pour approuver ou\n# products derived from this promouvoir les produits dérivés\n# software without specific prior de ce logiciel sans autorisation\n# written permission. préalable et particulière\n# par écrit.\n#\n# This file is part of the Ce fichier fait partie du projet\n# OpenCADC project. OpenCADC.\n#\n# OpenCADC is free software: OpenCADC est un logiciel libre ;\n# you can redistribute it and/or vous pouvez le redistribuer ou le\n# modify it under the terms of modifier suivant les termes de\n# the GNU Affero General Public la “GNU Affero General Public\n# License as published by the License” telle que publiée\n# Free Software Foundation, par la Free Software Foundation\n# either version 3 of the : soit la version 3 de cette\n# License, or (at your option) licence, soit (à votre gré)\n# any later version. toute version ultérieure.\n#\n# OpenCADC is distributed in the OpenCADC est distribué\n# hope that it will be useful, dans l’espoir qu’il vous\n# but WITHOUT ANY WARRANTY; sera utile, mais SANS AUCUNE\n# without even the implied GARANTIE : sans même la garantie\n# warranty of MERCHANTABILITY implicite de COMMERCIALISABILITÉ\n# or FITNESS FOR A PARTICULAR ni d’ADÉQUATION À UN OBJECTIF\n# PURPOSE. See the GNU Affero PARTICULIER. Consultez la Licence\n# General Public License for Générale Publique GNU Affero\n# more details. pour plus de détails.\n#\n# You should have received Vous devriez avoir reçu une\n# a copy of the GNU Affero copie de la Licence Générale\n# General Public License along Publique GNU Affero avec\n# with OpenCADC. If not, see OpenCADC ; si ce n’est\n# . pas le cas, consultez :\n# .\n#\n# $Revision: 4 $\n#\n# ***********************************************************************\n#\n\nimport importlib\nimport logging\nimport os\nimport sys\nimport traceback\n\nfrom caom2 import Observation, ProductType, DataProductType\nfrom caom2 import CalibrationLevel\nfrom caom2utils import ObsBlueprint, get_gen_proc_arg_parser, gen_proc\nfrom caom2pipe import manage_composable as mc\nfrom caom2pipe import execute_composable as ec\n\n\n__all__ = ['caom_main', 'update', 'AskapName', 'COLLECTION', 'APPLICATION']\n\n\nAPPLICATION = 'askap2caom2'\nCOLLECTION = 'ASKAP'\n\n\nclass AskapName(ec.StorageName):\n \"\"\"Naming rules:\n - support mixed-case file names and mixed-case obs id values\n \"\"\"\n\n ASKAP_NAME_PATTERN = '*'\n\n def __init__(self, fname_on_disk=None, file_name=None):\n self.fname_in_ad = file_name\n super(AskapName, self).__init__(\n obs_id=None, collection=COLLECTION,\n collection_pattern=AskapName.ASKAP_NAME_PATTERN,\n fname_on_disk=fname_on_disk)\n\n def is_valid(self):\n return True\n\n @property\n def file_uri(self):\n \"\"\"The external URI for the file.\"\"\"\n return '{}:{}/{}'.format(AskapName.scheme(), self.collection,\n self.file_name)\n\n @staticmethod\n def scheme():\n \"\"\"ASKAP schema - guessing.\"\"\"\n return 'casda'\n\n @staticmethod\n def get_obs_id(file_name):\n # based on the file names I've seen so far ....\n if file_name.startswith('image.restored.i'):\n result = file_name.split('.')[3]\n else:\n result = file_name.split('.')[2]\n return result\n\n @staticmethod\n def get_product_id(file_name):\n if file_name.startswith('component'):\n result = 'component_image'\n elif 'cont.taylor.0.restored' in file_name:\n if file_name.endswith('restored.components.csv'):\n result = 'fine_source_catalog'\n elif file_name.endswith('restored.islands.csv'):\n result = 'coarse_source_catalog'\n else:\n result = 'cont_taylor_0_restored'\n elif 'cont.taylor.0' in file_name:\n result = 'cont_taylor_0'\n elif 'cont.taylor.1.restored' in file_name:\n result = 'cont_taylor_1_restored'\n elif 'cont.taylor.1' in file_name:\n result = 'cont_taylor_1'\n elif 'restored' in file_name and 'contcube' in file_name:\n result = 'contcube_restored'\n elif 'contcube' in file_name:\n result = 'contcube'\n else:\n raise mc.CadcException(\n 'Could not guess product ID from file name {}'.format(\n file_name))\n return result\n\n\ndef accumulate_bp(bp, uri):\n \"\"\"Configure the telescope-specific ObsBlueprint at the CAOM model \n Observation level.\"\"\"\n logging.debug('Begin accumulate_bp.')\n # TODO - timezone is Z\n bp.set('Observation.metaRelease', '2018-10-12T03:11:35.015')\n\n bp.set('Plane.dataProductType', '_get_data_product_type(uri)')\n bp.set('Plane.calibrationLevel', '_get_calibration_level(uri)')\n bp.set('Plane.dataRelease', '2018-10-12T03:11:35.015')\n bp.set('Plane.metaRelease', '2018-10-12T03:11:35.015')\n\n # artifact level\n bp.clear('Artifact.productType')\n bp.set('Artifact.productType', '_get_product_type(uri)')\n\n # chunk level\n bp.configure_position_axes((1, 2))\n bp.configure_energy_axis(4)\n bp.configure_polarization_axis(3)\n\n # same as VLASS\n bp.clear('Chunk.position.axis.function.cd11')\n bp.clear('Chunk.position.axis.function.cd22')\n bp.add_fits_attribute('Chunk.position.axis.function.cd11', 'CDELT1')\n bp.set('Chunk.position.axis.function.cd12', 0.0)\n bp.set('Chunk.position.axis.function.cd21', 0.0)\n bp.add_fits_attribute('Chunk.position.axis.function.cd22', 'CDELT2')\n\n logging.debug('Done accumulate_bp.')\n\n\ndef update(observation, **kwargs):\n \"\"\"Called to fill multiple CAOM model elements and/or attributes, must\n have this signature for import_module loading and execution.\n\n :param observation A CAOM Observation model instance.\n :param **kwargs Everything else.\"\"\"\n logging.debug('Begin update.')\n mc.check_param(observation, Observation)\n\n headers = None\n if 'headers' in kwargs:\n headers = kwargs['headers']\n fqn = None\n if 'fqn' in kwargs:\n fqn = kwargs['fqn']\n\n logging.debug('Done update.')\n return observation\n\n\ndef _get_calibration_level(uri):\n if 'selavy-image' in uri:\n result = CalibrationLevel.ANALYSIS_PRODUCT\n else:\n result = CalibrationLevel.CALIBRATED\n return result\n\n\ndef _get_data_product_type(uri):\n if 'selavy-image' in uri:\n result = DataProductType.CATALOG\n else:\n result = DataProductType.IMAGE\n return result\n\n\ndef _get_product_type(uri):\n if 'esidual' in uri:\n result = ProductType.AUXILIARY\n elif 'weights' in uri:\n result = ProductType.WEIGHT\n else:\n result = ProductType.SCIENCE\n return result\n\n\ndef _build_blueprints(uri):\n \"\"\"This application relies on the caom2utils fits2caom2 ObsBlueprint\n definition for mapping FITS file values to CAOM model element\n attributes. This method builds the DRAO-ST blueprint for a single\n artifact.\n\n The blueprint handles the mapping of values with cardinality of 1:1\n between the blueprint entries and the model attributes.\n\n :param uri The artifact URI for the file to be processed.\"\"\"\n module = importlib.import_module(__name__)\n blueprint = ObsBlueprint(module=module)\n accumulate_bp(blueprint, uri)\n blueprints = {uri: blueprint}\n return blueprints\n\n\ndef _get_uri(args):\n result = None\n if args.local:\n if args.local[0].endswith('.jpg'):\n pass\n else:\n result = args.local[0]\n elif args.lineage:\n temp_product_id, temp_uri = mc.decompose_lineage(args.lineage[0])\n if temp_uri.endswith('.jpg'):\n pass\n else:\n result = temp_uri\n else:\n raise mc.CadcException(\n 'Could not define uri from these args {}'.format(args))\n return result\n\n\ndef caom_main():\n args = get_gen_proc_arg_parser().parse_args()\n try:\n uri = _get_uri(args)\n blueprints = _build_blueprints(uri)\n gen_proc(args, blueprints)\n except Exception as e:\n logging.error('Failed {} execution for {}.'.format(APPLICATION, args))\n tb = traceback.format_exc()\n logging.error(tb)\n sys.exit(-1)\n\n logging.debug('Done {} processing.'.format(APPLICATION))\n", "repo_name": "opencadc/askap2caom2", "sub_path": "askap2caom2/main_app.py", "file_name": "main_app.py", "file_ext": "py", "file_size_in_byte": 10509, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "48", "api": [{"api_name": "caom2pipe.execute_composable.StorageName", "line_number": 90, "usage_type": "attribute"}, {"api_name": "caom2pipe.execute_composable", "line_number": 90, "usage_type": "name"}, {"api_name": "caom2pipe.manage_composable.CadcException", "line_number": 149, "usage_type": "call"}, {"api_name": "caom2pipe.manage_composable", "line_number": 149, "usage_type": "name"}, {"api_name": "logging.debug", "line_number": 158, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 184, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 193, "usage_type": "call"}, {"api_name": "caom2pipe.manage_composable.check_param", "line_number": 194, "usage_type": "call"}, {"api_name": "caom2.Observation", "line_number": 194, "usage_type": "argument"}, {"api_name": "caom2pipe.manage_composable", "line_number": 194, "usage_type": "name"}, {"api_name": "logging.debug", "line_number": 203, "usage_type": "call"}, {"api_name": "caom2.CalibrationLevel.ANALYSIS_PRODUCT", "line_number": 209, "usage_type": "attribute"}, {"api_name": "caom2.CalibrationLevel", "line_number": 209, "usage_type": "name"}, {"api_name": "caom2.CalibrationLevel.CALIBRATED", "line_number": 211, "usage_type": "attribute"}, {"api_name": "caom2.CalibrationLevel", "line_number": 211, "usage_type": "name"}, {"api_name": "caom2.DataProductType.CATALOG", "line_number": 217, "usage_type": "attribute"}, {"api_name": "caom2.DataProductType", "line_number": 217, "usage_type": "name"}, {"api_name": "caom2.DataProductType.IMAGE", "line_number": 219, "usage_type": "attribute"}, {"api_name": "caom2.DataProductType", "line_number": 219, "usage_type": "name"}, {"api_name": "caom2.ProductType.AUXILIARY", "line_number": 225, "usage_type": "attribute"}, {"api_name": "caom2.ProductType", "line_number": 225, "usage_type": "name"}, {"api_name": "caom2.ProductType.WEIGHT", "line_number": 227, "usage_type": "attribute"}, {"api_name": "caom2.ProductType", "line_number": 227, "usage_type": "name"}, {"api_name": "caom2.ProductType.SCIENCE", "line_number": 229, "usage_type": "attribute"}, {"api_name": "caom2.ProductType", "line_number": 229, "usage_type": "name"}, {"api_name": "importlib.import_module", "line_number": 243, "usage_type": "call"}, {"api_name": "caom2utils.ObsBlueprint", "line_number": 244, "usage_type": "call"}, {"api_name": "caom2pipe.manage_composable.decompose_lineage", "line_number": 258, "usage_type": "call"}, {"api_name": "caom2pipe.manage_composable", "line_number": 258, "usage_type": "name"}, {"api_name": "caom2pipe.manage_composable.CadcException", "line_number": 264, "usage_type": "call"}, {"api_name": "caom2pipe.manage_composable", "line_number": 264, "usage_type": "name"}, {"api_name": "caom2utils.get_gen_proc_arg_parser", "line_number": 270, "usage_type": "call"}, {"api_name": "caom2utils.gen_proc", "line_number": 274, "usage_type": "call"}, {"api_name": "logging.error", "line_number": 276, "usage_type": "call"}, {"api_name": "traceback.format_exc", "line_number": 277, "usage_type": "call"}, {"api_name": "logging.error", "line_number": 278, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 279, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 281, "usage_type": "call"}]} +{"seq_id": "74393981906", "text": "from pyrogram import filters\n\nfrom config import HANDLER, OWNER_ID\nfrom NekoUserBot import neko\n\n\n@neko.on_message(filters.command(\"cinfo\", prefixes=HANDLER) & filters.user(OWNER_ID))\nasync def cinfo(_, m):\n reply = m.reply_to_message\n if not reply:\n await m.reply_text(\"yoo! baka reply to channel\")\n return\n if not reply.sender_chat:\n await m.reply_text(\"yoo! baka reply to channel\")\n return\n if reply.sender_chat:\n message = await m.reply_text(\"information gathering!!!\")\n id = reply.sender_chat.id\n reply.sender_chat.type\n name = reply.sender_chat.title\n username = reply.sender_chat.username\n pfp = reply.sender_chat.photo\n if not pfp:\n text = f\"✪ **TYPE:** Channel\\n\\n\"\n text += f\"✪ **ID:** {id}\\n\\n\"\n text += f\"✪ **NAME:** {name}\\n\\n\"\n text += f\"✪ **USERNAME:** @{username}\\n\\n\"\n text += f\"✪ **MENTION:** [link](t.me/{username})\"\n await m.reply_text(text)\n await message.delete()\n return\n image = reply.sender_chat.photo\n if image:\n photo = await neko.download_media(image.big_file_id)\n text = f\"✪ **TYPE:** Channel\\n\\n\"\n text += f\"✪ **ID:** {id}\\n\\n\"\n text += f\"✪ **NAME:** {name}\\n\\n\"\n text += f\"✪ **USERNAME:** @{username}\\n\\n\"\n text += f\"✪ **MENTION:** [link](t.me/{username})\"\n await m.reply_photo(photo=photo, caption=(text))\n await message.delete()\n\n\nno_reply_user = \"\"\" ╒═══「 Appraisal results:」\n\n**ɪᴅ**: `{}`\n**ᴅᴄ**: `{}`\n**ғɪʀsᴛ ɴᴀᴍᴇ**: {}\n**ᴜsᴇʀɴᴀᴍᴇ**: @{}\n**ᴘᴇʀᴍᴀʟɪɴᴋ**: {}\n**ᴜsᴇʀʙɪᴏ**: {}\n\n**Meet Me Here🙈 @Besties_XD ✨🥀**\n\"\"\"\n\n\n@neko.on_message(filters.command(\"info\", prefixes=HANDLER) & filters.user(OWNER_ID))\nasync def info(_, m):\n m.reply_to_message\n if len(m.command) < 2:\n await m.reply_text(\"ɢɪᴠᴇ ᴍᴇ ɪᴅ\")\n return\n id_user = m.text.split(\" \")[1]\n msg = await m.reply_text(\"ɪɴғᴏʀᴍᴀᴛɪᴏɴ ɢᴀᴛʜᴇʀɪɴɢ!\")\n info = await neko.get_chat(id_user)\n if info.photo:\n file_id = info.photo.big_file_id\n photo = await neko.download_media(file_id)\n user_id = info.id\n first_name = info.first_name\n username = info.username\n user_bio = info.bio\n dc_id = info.dc_id\n user_link = f\"[link](tg://user?id={user_id})\"\n await m.reply_photo(\n photo=photo,\n caption=no_reply_user.format(\n user_id, dc_id, first_name, username, user_link, user_bio\n ),\n )\n elif not info.photo:\n user_id = info.id\n first_name = info.first_name\n username = info.username\n user_bio = info.bio\n dc_id = info.dc_id\n user_link = f\"[link](tg://user?id={user_id})\"\n await m.reply_text(\n text=no_reply_user.format(\n user_id, dc_id, first_name, username, user_link, user_bio\n )\n )\n await msg.delete()\n", "repo_name": "Awesome-Prince/NekoUserBot", "sub_path": "NekoUserBot/plugins/userinfo.py", "file_name": "userinfo.py", "file_ext": "py", "file_size_in_byte": 3058, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "48", "api": [{"api_name": "NekoUserBot.neko.download_media", "line_number": 34, "usage_type": "call"}, {"api_name": "NekoUserBot.neko", "line_number": 34, "usage_type": "name"}, {"api_name": "NekoUserBot.neko.on_message", "line_number": 7, "usage_type": "call"}, {"api_name": "NekoUserBot.neko", "line_number": 7, "usage_type": "name"}, {"api_name": "pyrogram.filters.command", "line_number": 7, "usage_type": "call"}, {"api_name": "pyrogram.filters", "line_number": 7, "usage_type": "name"}, {"api_name": "config.HANDLER", "line_number": 7, "usage_type": "name"}, {"api_name": "pyrogram.filters.user", "line_number": 7, "usage_type": "call"}, {"api_name": "config.OWNER_ID", "line_number": 7, "usage_type": "argument"}, {"api_name": "NekoUserBot.neko.get_chat", "line_number": 65, "usage_type": "call"}, {"api_name": "NekoUserBot.neko", "line_number": 65, "usage_type": "name"}, {"api_name": "NekoUserBot.neko.download_media", "line_number": 68, "usage_type": "call"}, {"api_name": "NekoUserBot.neko", "line_number": 68, "usage_type": "name"}, {"api_name": "NekoUserBot.neko.on_message", "line_number": 57, "usage_type": "call"}, {"api_name": "NekoUserBot.neko", "line_number": 57, "usage_type": "name"}, {"api_name": "pyrogram.filters.command", "line_number": 57, "usage_type": "call"}, {"api_name": "pyrogram.filters", "line_number": 57, "usage_type": "name"}, {"api_name": "config.HANDLER", "line_number": 57, "usage_type": "name"}, {"api_name": "pyrogram.filters.user", "line_number": 57, "usage_type": "call"}, {"api_name": "config.OWNER_ID", "line_number": 57, "usage_type": "argument"}]} +{"seq_id": "17575907024", "text": "#!/usr/bin/env python\n# encoding: utf-8\n\n\n\"\"\"\n@version: 0.1\n@author: Yang Reid\n@license: Apache Licence \n@contact: yangtao584@126.com\n@site: https://github.com/yangr5/python\n@software: PyCharm Community Edition\n@file: sqlite_test.py\n@time: 2018/8/23 9:43\n\"\"\"\n\nimport sqlite3\n\nconn = sqlite3.connect('labmangement.sqlite3')\n\n\ndef createTable():\n print(\"Opened database successfully\")\n c = conn.cursor()\n c.execute('''CREATE TABLE work\n (id INTEGER PRIMARY KEY NOT NULL,\n name TEXT NOT NULL,\n ar_found INT NOT NULL,\n ar_fix INT NOT NULL,\n script INT NOT NULL,\n cases INT NOT NULL,\n version INT NOT NULL);''')\n conn.commit()\n conn.close()\n print(\"Table created successfully\")\n\n\ndef insertData():\n c = conn.cursor()\n c.execute(\"INSERT INTO COMPANY (ID,NAME,AGE,ADDRESS,SALARY) \\\n VALUES (1, 'Paul', 32, 'California', 20000.00 )\")\n\n c.execute(\"INSERT INTO COMPANY (ID,NAME,AGE,ADDRESS,SALARY) \\\n VALUES (2, 'Allen', 25, 'Texas', 15000.00 )\")\n\n c.execute(\"INSERT INTO COMPANY (ID,NAME,AGE,ADDRESS,SALARY) \\\n VALUES (3, 'Teddy', 23, 'Norway', 20000.00 )\")\n\n c.execute(\"INSERT INTO COMPANY (ID,NAME,AGE,ADDRESS,SALARY) \\\n VALUES (4, 'Mark', 25, 'Rich-Mond ', 65000.00 )\")\n conn.commit()\n conn.close()\n print( \"Records created successfully\")\n\n\ndef selectData():\n c = conn.cursor()\n cursor = c.execute(\"SELECT id, name, address, salary from COMPANY\")\n for row in cursor:\n print (\"ID = \", row[0])\n print (\"NAME = \", row[1])\n print (\"ADDRESS = \", row[2])\n print (\"SALARY = \", row[3], \"\\n\")\n\n conn.commit()\n conn.close()\n print (\"Operation select done successfully\")\n\n\nif __name__ == \"__main__\":\n print ('This is main of module')\n createTable()\n\n\n", "repo_name": "A432-git/flask", "sub_path": "Lab_Redis_CRUD/app/sqlite_test.py", "file_name": "sqlite_test.py", "file_ext": "py", "file_size_in_byte": 1923, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "48", "api": [{"api_name": "sqlite3.connect", "line_number": 18, "usage_type": "call"}]} +{"seq_id": "30222433616", "text": "import os\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.common.exceptions import NoSuchElementException, TimeoutException, StaleElementReferenceException\nfrom webdriver_manager.chrome import ChromeDriverManager\nfrom selenium.webdriver.chrome.service import Service as ChromeService\nfrom concurrent.futures import ThreadPoolExecutor\nfrom datetime import datetime\nimport time\nimport math\nimport sys\n\n\ndef process_urls(url_chunk, start_index):\n options = webdriver.ChromeOptions()\n options.add_argument(\"--disable-media-source\")\n options.add_argument(\"--blink-settings=imagesEnabled=false\")\n options.add_argument(\"--disable-javascript\")\n service = ChromeService(executable_path=ChromeDriverManager().install())\n driver = webdriver.Chrome(service=service, options=options)\n\n for url in url_chunk:\n driver.get(url.strip())\n champion = WebDriverWait(driver, 10).until(\n EC.presence_of_element_located((By.XPATH, '//*[@id=\"champInfo\"]/div/div[1]/div[3]'))\n )\n champion_name = champion.text\n champion_name = str(champion_name).split(',')[0]\n\n print('챔피언 이름 : ', end='')\n print(champion_name)\n \n driver.execute_script(\"var elems = document.querySelectorAll('.primisslate, .vm-placement'); for (var i = 0; i < elems.length; i++) { elems[i].remove(); }\")\n\n button_xpath = \"/html/body/div[4]/div[1]/section/article/section[2]/div[2]/div/div/div[3]/div[2]/div[3]/span/span[3]/a\"\n current_last_comment = 40\n texts = []\n continueable = True\n\n counter = 0\n j = 0\n\n while continueable:\n for i in range(1, current_last_comment + 1):\n try:\n time.sleep(0.1)\n comment_xpath = f'/html/body/div[4]/div[1]/section/article/section[2]/div[2]/div/div/div[3]/div[2]/div[2]/table/tbody/tr[{i}]/td[2]/span/span'\n element = driver.find_element(By.XPATH, comment_xpath)\n texts.append(str(element.text).replace('\\n', ' '))\n except NoSuchElementException:\n break\n j=j+i\n try:\n counter += 1\n # 버튼 클릭 시도\n button = driver.find_element(By.XPATH, button_xpath)\n if not button.get_attribute(\"href\"): # 버튼의 href 속성이 없으면 다음 페이지로 이동할 수 없음\n continueable = False\n else:\n button.click()\n time.sleep(2) # 버튼 클릭 후 페이지 로딩 대기\n except NoSuchElementException:\n # 버튼이 없으면 다음 페이지로 이동할 수 없으므로 반복 종료\n print(f'{champion_name} click counter = {counter} / 댓글 : {j}')\n continueable = False\n \n # 댓글 수집이 끝난 후, 파일에 저장\n file_path = f\"C:\\\\anaconda\\\\aiProject\\\\lol_project\\\\inven_champ_url\\\\{start_index + url_chunk.index(url):03}_{champion_name}_repl.txt\"\n with open(file_path, 'w', encoding='utf-8') as file:\n for text in texts:\n file.write(text + ' ')\n\n driver.quit()\n\nif __name__ == \"__main__\":\n # 파일 경로에서 모든 파일 리스트를 가져옵니다.\n files_in_directory = os.listdir(\"C:\\\\anaconda\\\\aiProject\\\\lol_project\\\\opgg_champ_url\\\\\")\n # 세자리숫자_캐릭터이름_repl.txt 형식을 가진 파일들만 필터링합니다.\n filtered_files = [file for file in files_in_directory if file.endswith(\"_repl.txt\") and file.split(\"_\")[0].isdigit() and len(file.split(\"_\")[0]) == 3]\n\n # 번호들을 추출합니다.\n existing_indices = [int(file.split(\"_\")[0]) for file in filtered_files]\n\n with open(\"C:\\\\anaconda\\\\aiProject\\\\lol_project\\\\opgg_champ_url\\\\lol_addresses.txt\", \"r\") as file:\n urls = [url.strip() for url in file.readlines()]\n\n # 빈 번호들을 확인합니다.\n missing_indices = [i for i in range(len(urls)) if i not in existing_indices]\n print(f'빈 번호 {missing_indices}')\n\n # 빈 번호에 해당하는 URL만을 가져옵니다.\n missing_urls = [urls[i] for i in missing_indices]\n\n num_threads = 5\n chunk_size = math.ceil(len(missing_urls) / num_threads)\n\n # 빈 번호에 해당하는 URL 목록을 동일한 크기의 청크로 분할\n url_chunks = [missing_urls[i:i + chunk_size] for i in range(0, len(missing_urls), chunk_size)]\n \n print('프로세스 시작')\n with ThreadPoolExecutor(max_workers=num_threads) as executor:\n for index, chunk in enumerate(url_chunks):\n # 각 청크의 시작 인덱스는 빈 번호의 시작 인덱스입니다.\n executor.submit(process_urls, chunk, missing_indices[index * chunk_size])\n\n print(f\"Program finished at {datetime.now()}\")\n", "repo_name": "begace/lolChampRecommendation", "sub_path": "lol_champ_recommendation_01_multi_inven.py", "file_name": "lol_champ_recommendation_01_multi_inven.py", "file_ext": "py", "file_size_in_byte": 5033, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "48", "api": [{"api_name": "selenium.webdriver.ChromeOptions", "line_number": 17, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 17, "usage_type": "name"}, {"api_name": "selenium.webdriver.chrome.service.Service", "line_number": 21, "usage_type": "call"}, {"api_name": "webdriver_manager.chrome.ChromeDriverManager", "line_number": 21, "usage_type": "call"}, {"api_name": "selenium.webdriver.Chrome", "line_number": 22, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 22, "usage_type": "name"}, {"api_name": "selenium.webdriver.support.ui.WebDriverWait", "line_number": 26, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions.presence_of_element_located", "line_number": 27, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions", "line_number": 27, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.XPATH", "line_number": 27, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 27, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 48, "usage_type": "call"}, {"api_name": "selenium.webdriver.common.by.By.XPATH", "line_number": 50, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 50, "usage_type": "name"}, {"api_name": "selenium.common.exceptions.NoSuchElementException", "line_number": 52, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.XPATH", "line_number": 58, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 58, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 63, "usage_type": "call"}, {"api_name": "selenium.common.exceptions.NoSuchElementException", "line_number": 64, "usage_type": "name"}, {"api_name": "os.listdir", "line_number": 79, "usage_type": "call"}, {"api_name": "math.ceil", "line_number": 97, "usage_type": "call"}, {"api_name": "concurrent.futures.ThreadPoolExecutor", "line_number": 103, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 108, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 108, "usage_type": "name"}]} +{"seq_id": "11736941993", "text": "from bs4 import BeautifulSoup\nimport requests\n\nurl = \"https://finance.naver.com/item/sise_day.nhn?code=068270&page=1\"\nresponse = requests.get(url, headers={'User-agent':'Mozilla/5.0'})\nsource = response.text\n#print(source)\n\nsoup = BeautifulSoup(source, 'lxml') #body > table.type2 > tbody > tr:nth-child(3) > td:nth-child(4) > span\nspan_today = soup.find('span', class_='tah p11')\n#print(span_today) #183,500\n#print(span_today.text) #183,500\n\n#(1) 마지막 페이지 가져오기\ntd_pgRR = soup.find('td', class_=\"pgRR\")\n#print(td_pgRR) #td를 포함한 전체 내용\n#print(td_pgRR.text) #맨뒤\na_href = td_pgRR.a['href']\n#print(a_href) #/item/sise_day.nhn?code=068270&page=421\na_href_split_list = a_href.split(\"=\")\n#print(a_href_split_list) #['/item/sise_day.nhn?code', '068270&page', '421']\nlast_page = a_href_split_list[-1]\n#print(last_page)\n\n#(2) 전체페이지 읽어오기\nimport pandas as pd\ndf = pd.DataFrame()\nbase_url = \"https://finance.naver.com/item/sise_day.nhn?code=068270\"\nfor page in range(1, int(last_page)+1): #for page in range(1, 10): #10페이지 까지\n url = \"{}&page={}\".format(base_url, page)\n response = requests.get(url, headers={'User-agent':'Mozilla/5.0'})\n source = response.text\n html = pd.read_html(source, header=0)[0]\n #print(html)\n df = pd.concat([df, html])\n#print(df.to_string())\n\n#(3) DataFrame 가공\ndf = df.dropna()\n#print(df.to_string())\ndf = df.iloc[0:3000] # 0행 n까지의 row만을 가져옴\n#print(df.to_string())\ndf = df.sort_values(by='날짜') # 날짜의 오름차순\nprint(df.to_string())\n\n#(4) 차트 그리기\nimport matplotlib.pyplot as plt\nplt.title(\"Celltrion Close\")\nplt.xticks(rotation=45)\nplt.plot(df['날짜'], df['종가'], 'ro-')\nplt.grid(color='gray', linestyle='--')\nplt.show()", "repo_name": "pjs845/test", "sub_path": "day06_crawling/D.py", "file_name": "D.py", "file_ext": "py", "file_size_in_byte": 1794, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "48", "api": [{"api_name": "requests.get", "line_number": 5, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 9, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 27, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 31, "usage_type": "call"}, {"api_name": "pandas.read_html", "line_number": 33, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 35, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.title", "line_number": 48, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 48, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 49, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 49, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 50, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 50, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.grid", "line_number": 51, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 51, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 52, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 52, "usage_type": "name"}]} +{"seq_id": "40291646931", "text": "import base64\nimport uuid\nfrom fastapi import APIRouter, Body\nfrom Project.Server.Utils.Image_Handler import Image_Converter\nfrom Project.Server.Controller.Levels import Add_Level,Delete_Old_Image,Check_Level, delete_Level_data, retrieve_all_Levels, retrieve_Level_by_id, update_Level\nfrom Project.Server.Models.Levels import Levels\nfrom fastapi.encoders import jsonable_encoder\n\nrouter = APIRouter()\n\n\n@router.post(\"/Add_Levels\", response_description=\"Add Level\")\nasync def add_Levels_data(schema: Levels = Body(...)):\n schema = jsonable_encoder(schema)\n Level =await Check_Level(schema)\n if Level==False:\n return {\"code\": 200, \"Msg\":\"Levels already exists\"}\n if len(schema['IMAGE'])>0: \n img_path= await Image_Converter(schema['IMAGE'])\n else:\n img_path=\"\"\n schema['IMAGE'] = str(img_path)\n Output = await Add_Level(schema)\n return {\"code\": 200, \"Msg\": Output}\n\n\n@router.get(\"/Get_all_Levels\", response_description=\"Get all Levels\")\nasync def get_all_Levels():\n Levels = await retrieve_all_Levels()\n if Levels:\n return {\"code\": 200, \"Data\": Levels}\n return {\"Data\": Levels, \"Msg\": \"Empty list return\"}\n\n\n@router.get(\"/Get_Level_Data/{id}\", response_description=\"Get Level data by id\")\nasync def get_Level_data(id):\n data = await retrieve_Level_by_id(id)\n if data:\n return {\"code\": 200, \"Data\": data}\n return {\"Msg\": \"Id may not exist\"}\n\n\n@router.delete(\"/Delete/{id}\", response_description=\"Delete Level data by id\")\nasync def delete_Level(id: str):\n data = await delete_Level_data(id)\n if data:\n return {\"code\": 200, \"Msg\": data}\n return {\"Msg\": \"Id may not exist\"}\n\n\n@router.put(\"/Update/{id}\")\nasync def update_Level_data(id: str, req: Levels = Body(...)):\n req = jsonable_encoder(req)\n data = {}\n for i, j in req.items():\n\n if (type(j) == str or type(j) == int) and (len(str(j)) > 0):\n data[i] = j\n\n if \"IMAGE\" in data:\n if len(data[\"IMAGE\"]) != 0:\n # Del_img= await Delete_Old_Image(id)\n imagepath = await Image_Converter(data[\"IMAGE\"])\n data[\"IMAGE\"] = imagepath\n updated_Level = await update_Level(id, data)\n if updated_Level:\n return {\"code\": 200, \"Data\": \"Data updated Successfully\"}\n\n return {\n \"code\": 404, \"Data\": \"Something Went Wrong\"\n }\n", "repo_name": "PARTH370/fitness", "sub_path": "Project/Server/Views/Levels.py", "file_name": "Levels.py", "file_ext": "py", "file_size_in_byte": 2346, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "48", "api": [{"api_name": "fastapi.APIRouter", "line_number": 9, "usage_type": "call"}, {"api_name": "Project.Server.Models.Levels.Levels", "line_number": 13, "usage_type": "name"}, {"api_name": "fastapi.Body", "line_number": 13, "usage_type": "call"}, {"api_name": "fastapi.encoders.jsonable_encoder", "line_number": 14, "usage_type": "call"}, {"api_name": "Project.Server.Controller.Levels.Check_Level", "line_number": 15, "usage_type": "call"}, {"api_name": "Project.Server.Utils.Image_Handler.Image_Converter", "line_number": 19, "usage_type": "call"}, {"api_name": "Project.Server.Controller.Levels.Add_Level", "line_number": 23, "usage_type": "call"}, {"api_name": "Project.Server.Models.Levels.Levels", "line_number": 29, "usage_type": "name"}, {"api_name": "Project.Server.Controller.Levels.retrieve_all_Levels", "line_number": 29, "usage_type": "call"}, {"api_name": "Project.Server.Models.Levels.Levels", "line_number": 30, "usage_type": "name"}, {"api_name": "Project.Server.Models.Levels.Levels", "line_number": 31, "usage_type": "name"}, {"api_name": "Project.Server.Models.Levels.Levels", "line_number": 32, "usage_type": "name"}, {"api_name": "Project.Server.Controller.Levels.retrieve_Level_by_id", "line_number": 37, "usage_type": "call"}, {"api_name": "Project.Server.Controller.Levels.delete_Level_data", "line_number": 45, "usage_type": "call"}, {"api_name": "Project.Server.Models.Levels.Levels", "line_number": 52, "usage_type": "name"}, {"api_name": "fastapi.Body", "line_number": 52, "usage_type": "call"}, {"api_name": "fastapi.encoders.jsonable_encoder", "line_number": 53, "usage_type": "call"}, {"api_name": "Project.Server.Utils.Image_Handler.Image_Converter", "line_number": 63, "usage_type": "call"}, {"api_name": "Project.Server.Controller.Levels.update_Level", "line_number": 65, "usage_type": "call"}]} +{"seq_id": "6032595094", "text": "#!/usr/bin/python\n\nimport fileinput\nimport math\nimport collections\n\n\ndef getOreCount(reactions, target, amount):\n created = collections.defaultdict(int) \n return getOreRequirement(reactions, target, amount, created)\n \ndef getOreRequirement(reactions, target, amount, created):\n generatedOre = 0\n if target == \"ORE\":\n created[\"ORE\"] += amount\n return amount\n else:\n # Do we already have the amount required?\n if created[target] >= amount:\n return 0\n\n # How much extra stuff do we need\n recipe = reactions[target]\n required = amount - created[target]\n multiplier = math.ceil(required / recipe[0]) \n \n for part in recipe[1]:\n newAmount = part[0]*multiplier\n generatedOre += getOreRequirement(reactions, part[1], newAmount, created)\n # Remember that we've consumed whatever we've needed before next req\n created[part[1]] -= (part[0]*multiplier)\n \n # And we produce our stuff\n created[target] += (recipe[0]*multiplier)\n return generatedOre\n \ndef main():\n reactions = {}\n rstr = list(fileinput.input())\n for r in rstr:\n r = r.strip().split(' => ')\n\n ingredients = []\n for ingredient in r[0].split(', '):\n q, w = ingredient.split(' ')\n ingredients.append((int(q), w))\n \n # Resultant\n q, w = r[1].split(' ')\n reactions[w] = (int(q), ingredients)\n\n # Part 1\n print(getOreCount(reactions, \"FUEL\", 1))\n\n # Part 2\n # Take some extreme bounds and binary search, checking if too much ore is used\n lower = 1\n upper = 1000000000000\n\n while True:\n middle = int((lower+upper) // 2)\n \n if getOreCount(reactions, \"FUEL\", middle) <= 1000000000000: \n # Success. Let middle be our new lower, but check if done first\n if lower == middle:\n # Sovled\n print(lower)\n break\n\n lower = middle\n else:\n # O dear. Let middle be ouer new upper\n upper = middle\n \nif __name__ == \"__main__\":\n main()\n", "repo_name": "Dox5/advent-of-code", "sub_path": "treg/day14.py", "file_name": "day14.py", "file_ext": "py", "file_size_in_byte": 2194, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "48", "api": [{"api_name": "collections.defaultdict", "line_number": 9, "usage_type": "call"}, {"api_name": "math.ceil", "line_number": 25, "usage_type": "call"}, {"api_name": "fileinput.input", "line_number": 39, "usage_type": "call"}]} +{"seq_id": "32303651721", "text": "import json\nfrom typing import Any, Dict\n\n\n_EVENT_S3_OBJECT_CREATED: Dict[str, Any] = {\n \"version\": \"0\",\n \"id\": \"17793124-05d4-b198-2fde-7ededc63b103\",\n \"detail-type\": \"Object Created\",\n \"source\": \"aws.s3\",\n \"account\": \"123456789012\",\n \"time\": \"2021-11-12T00:00:00Z\",\n \"region\": None,\n \"resources\": [],\n \"detail\": None,\n}\n\n\ndef send_notification(\n source: str, event_name: str, region: str, resources: Any, detail: Any\n) -> None:\n try:\n _send_safe_notification(source, event_name, region, resources, detail)\n except: # noqa\n # If anything goes wrong, we should never fail\n pass\n\n\ndef _send_safe_notification(\n source: str, event_name: str, region: str, resources: Any, detail: Any\n) -> None:\n from .models import events_backends\n\n event = None\n if source == \"aws.s3\" and event_name == \"CreateBucket\":\n event = _EVENT_S3_OBJECT_CREATED.copy()\n event[\"region\"] = region\n event[\"resources\"] = resources\n event[\"detail\"] = detail\n\n if event is None:\n return\n\n for account_id, account in events_backends.items():\n for backend in account.values():\n applicable_targets = []\n for event_bus in backend.event_buses.values():\n for rule in event_bus.rules.values():\n if rule.state != \"ENABLED\":\n continue\n pattern = rule.event_pattern.get_pattern()\n if source in pattern.get(\"source\", []):\n if event_name in pattern.get(\"detail\", {}).get(\"eventName\", []):\n applicable_targets.extend(rule.targets)\n\n for target in applicable_targets:\n if target.get(\"Arn\", \"\").startswith(\"arn:aws:lambda\"):\n _invoke_lambda(account_id, target.get(\"Arn\"), event=event)\n\n\ndef _invoke_lambda(account_id: str, fn_arn: str, event: Any) -> None:\n from moto.awslambda import lambda_backends\n\n lmbda_region = fn_arn.split(\":\")[3]\n\n body = json.dumps(event)\n lambda_backends[account_id][lmbda_region].invoke(\n function_name=fn_arn,\n qualifier=None,\n body=body,\n headers=dict(),\n response_headers=dict(),\n )\n", "repo_name": "getmoto/moto", "sub_path": "moto/events/notifications.py", "file_name": "notifications.py", "file_ext": "py", "file_size_in_byte": 2247, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 7174, "dataset": "github-code", "pt": "48", "api": [{"api_name": "typing.Dict", "line_number": 5, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 5, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 19, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 29, "usage_type": "name"}, {"api_name": "models.events_backends.items", "line_number": 43, "usage_type": "call"}, {"api_name": "models.events_backends", "line_number": 43, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 60, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 65, "usage_type": "call"}, {"api_name": "moto.awslambda.lambda_backends", "line_number": 66, "usage_type": "name"}]} +{"seq_id": "783210881", "text": "# -*- coding: utf-8 -*-\n'''\n.. _module_mc_autoupgrade:\n\nmc_autoupgrade / packages autoupgrade\n============================================\n\n\n'''\n# Import python libs\nimport logging\nimport mc_states.utils\n\n__name = 'autoupgrade'\n\nlog = logging.getLogger(__name__)\n\n\ndef settings():\n '''\n autoupgrade registry\n\n '''\n @mc_states.utils.lazy_subregistry_get(__salt__, __name)\n def _settings():\n _s = __salt__\n grains = __grains__\n pillar = __pillar__\n locations = __salt__['mc_locations.settings']()\n origins = []\n if grains['os'] in ['Debian']:\n origins.append(\"Debian:stable\")\n origins.append(\"${distro_id}:${distro_codename}-security\")\n data = _s['mc_utils.defaults'](\n 'makina-states.localsettings.autoupgrade', {\n 'enable': True,\n \"unattended\": {\n \"activated\": \"1\",\n \"autoclean\": \"7\",\n \"DownloadUpgradeablePackages\": \"1\",\n \"UpdatePackageLists\": \"1\",\n \"mail_on_error\": \"true\",\n \"remove_unused\": \"false\",\n \"mail\": \"root\",\n \"autofix\": \"true\",\n 'blacklist': [\n ],\n 'origins': origins,\n }\n }\n )\n return data\n return _settings()\n\n\ndef dump():\n return mc_states.utils.dump(__salt__,__name)\n\n#\n", "repo_name": "jpcw/makina-states", "sub_path": "mc_states/modules/mc_autoupgrade.py", "file_name": "mc_autoupgrade.py", "file_ext": "py", "file_size_in_byte": 1464, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "48", "api": [{"api_name": "logging.getLogger", "line_number": 16, "usage_type": "call"}, {"api_name": "mc_states.utils.utils.lazy_subregistry_get", "line_number": 24, "usage_type": "call"}, {"api_name": "mc_states.utils.utils", "line_number": 24, "usage_type": "attribute"}, {"api_name": "mc_states.utils", "line_number": 24, "usage_type": "name"}, {"api_name": "mc_states.utils.utils.dump", "line_number": 57, "usage_type": "call"}, {"api_name": "mc_states.utils.utils", "line_number": 57, "usage_type": "attribute"}, {"api_name": "mc_states.utils", "line_number": 57, "usage_type": "name"}]} +{"seq_id": "29305194802", "text": "import pytest\n\nfrom HartreeParticleDSL.Particle_IR.nodes.literal import Literal\nfrom HartreeParticleDSL.Particle_IR.datatypes.datatype import FLOAT_TYPE, INT_TYPE, BOOL_TYPE\n\ndef test_literal_init_bad_inputs():\n\n with pytest.raises(TypeError) as excinfo:\n Literal(123, INT_TYPE)\n assert (\"Literal value must be a string but supplied.\" in\n str(excinfo.value))\n\n with pytest.raises(TypeError) as excinfo:\n Literal(\"123\", 123)\n assert(\"Literal datatype must be a ScalarType but \"\n \"supplied.\" in str(excinfo.value))\n\ndef test_literal_init_bad_int():\n with pytest.raises(ValueError) as excinfo:\n Literal(\"abc\", INT_TYPE)\n assert(\"Constructing integer Literal but got a value of 'abc' instead \"\n \"of an integer value.\" in str(excinfo.value))\n with pytest.raises(ValueError) as excinfo:\n Literal(\"012\", INT_TYPE)\n assert(\"Constructing integer Literal but got a value of '012' instead \"\n \"of an integer value.\" in str(excinfo.value))\n\ndef test_literal_init_bad_float():\n with pytest.raises(ValueError) as excinfo:\n Literal(\"123.45F19\", FLOAT_TYPE)\n assert(\"Constructing float Literal but got a value of '123.45F19' \"\n \"instead of a float value.\" in str(excinfo.value))\n\ndef test_literal_init_bad_bool():\n with pytest.raises(ValueError) as excinfo:\n Literal(\"notTrue\", BOOL_TYPE)\n assert(\"Constructing boolean Literal but got a value of 'notTrue' \"\n \"instead of True or False.\" in str(excinfo.value))\n\ndef test_valid_literals():\n Literal(\"True\", BOOL_TYPE)\n Literal(\"False\", BOOL_TYPE)\n\n Literal(\"1\", INT_TYPE)\n Literal(\"-1\", INT_TYPE)\n Literal(\"12345\", INT_TYPE)\n Literal(\"67890\", INT_TYPE)\n\n Literal(\"1\", FLOAT_TYPE)\n x = Literal(\"1E10\", FLOAT_TYPE)\n assert x.value == \"1e10\"\n Literal(\"1.234\", FLOAT_TYPE)\n Literal(\"-0.255\", FLOAT_TYPE)\n Literal(\"123.45678e12345\", FLOAT_TYPE)\n Literal(\"-12.985e-13\", FLOAT_TYPE)\n\ndef test_literal_nodestr():\n x = Literal(\"1e10\", FLOAT_TYPE)\n correct = \"Literal['1e10', Scalar]\"\n assert correct == x.node_str()\n", "repo_name": "stfc/HartreeParticleDSL", "sub_path": "src/HartreeParticleDSL/test/Particle_IR/nodes/test_literal.py", "file_name": "test_literal.py", "file_ext": "py", "file_size_in_byte": 2168, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "48", "api": [{"api_name": "pytest.raises", "line_number": 8, "usage_type": "call"}, {"api_name": "HartreeParticleDSL.Particle_IR.nodes.literal.Literal", "line_number": 9, "usage_type": "call"}, {"api_name": "HartreeParticleDSL.Particle_IR.datatypes.datatype.INT_TYPE", "line_number": 9, "usage_type": "argument"}, {"api_name": "pytest.raises", "line_number": 13, "usage_type": "call"}, {"api_name": "HartreeParticleDSL.Particle_IR.nodes.literal.Literal", "line_number": 14, "usage_type": "call"}, {"api_name": "pytest.raises", "line_number": 19, "usage_type": "call"}, {"api_name": "HartreeParticleDSL.Particle_IR.nodes.literal.Literal", "line_number": 20, "usage_type": "call"}, {"api_name": "HartreeParticleDSL.Particle_IR.datatypes.datatype.INT_TYPE", "line_number": 20, "usage_type": "argument"}, {"api_name": "pytest.raises", "line_number": 23, "usage_type": "call"}, {"api_name": "HartreeParticleDSL.Particle_IR.nodes.literal.Literal", "line_number": 24, "usage_type": "call"}, {"api_name": "HartreeParticleDSL.Particle_IR.datatypes.datatype.INT_TYPE", "line_number": 24, "usage_type": "argument"}, {"api_name": "pytest.raises", "line_number": 29, "usage_type": "call"}, {"api_name": "HartreeParticleDSL.Particle_IR.nodes.literal.Literal", "line_number": 30, "usage_type": "call"}, {"api_name": "HartreeParticleDSL.Particle_IR.datatypes.datatype.FLOAT_TYPE", "line_number": 30, "usage_type": "argument"}, {"api_name": "pytest.raises", "line_number": 35, "usage_type": "call"}, {"api_name": "HartreeParticleDSL.Particle_IR.nodes.literal.Literal", "line_number": 36, "usage_type": "call"}, {"api_name": "HartreeParticleDSL.Particle_IR.datatypes.datatype.BOOL_TYPE", "line_number": 36, "usage_type": "argument"}, {"api_name": "HartreeParticleDSL.Particle_IR.nodes.literal.Literal", "line_number": 41, "usage_type": "call"}, {"api_name": "HartreeParticleDSL.Particle_IR.datatypes.datatype.BOOL_TYPE", "line_number": 41, "usage_type": "argument"}, {"api_name": "HartreeParticleDSL.Particle_IR.nodes.literal.Literal", "line_number": 42, "usage_type": "call"}, {"api_name": "HartreeParticleDSL.Particle_IR.datatypes.datatype.BOOL_TYPE", "line_number": 42, "usage_type": "argument"}, {"api_name": "HartreeParticleDSL.Particle_IR.nodes.literal.Literal", "line_number": 44, "usage_type": "call"}, {"api_name": "HartreeParticleDSL.Particle_IR.datatypes.datatype.INT_TYPE", "line_number": 44, "usage_type": "argument"}, {"api_name": "HartreeParticleDSL.Particle_IR.nodes.literal.Literal", "line_number": 45, "usage_type": "call"}, {"api_name": "HartreeParticleDSL.Particle_IR.datatypes.datatype.INT_TYPE", "line_number": 45, "usage_type": "argument"}, {"api_name": "HartreeParticleDSL.Particle_IR.nodes.literal.Literal", "line_number": 46, "usage_type": "call"}, {"api_name": "HartreeParticleDSL.Particle_IR.datatypes.datatype.INT_TYPE", "line_number": 46, "usage_type": "argument"}, {"api_name": "HartreeParticleDSL.Particle_IR.nodes.literal.Literal", "line_number": 47, "usage_type": "call"}, {"api_name": "HartreeParticleDSL.Particle_IR.datatypes.datatype.INT_TYPE", "line_number": 47, "usage_type": "argument"}, {"api_name": "HartreeParticleDSL.Particle_IR.nodes.literal.Literal", "line_number": 49, "usage_type": "call"}, {"api_name": "HartreeParticleDSL.Particle_IR.datatypes.datatype.FLOAT_TYPE", "line_number": 49, "usage_type": "argument"}, {"api_name": "HartreeParticleDSL.Particle_IR.nodes.literal.Literal", "line_number": 50, "usage_type": "call"}, {"api_name": "HartreeParticleDSL.Particle_IR.datatypes.datatype.FLOAT_TYPE", "line_number": 50, "usage_type": "argument"}, {"api_name": "HartreeParticleDSL.Particle_IR.nodes.literal.Literal", "line_number": 52, "usage_type": "call"}, {"api_name": "HartreeParticleDSL.Particle_IR.datatypes.datatype.FLOAT_TYPE", "line_number": 52, "usage_type": "argument"}, {"api_name": "HartreeParticleDSL.Particle_IR.nodes.literal.Literal", "line_number": 53, "usage_type": "call"}, {"api_name": "HartreeParticleDSL.Particle_IR.datatypes.datatype.FLOAT_TYPE", "line_number": 53, "usage_type": "argument"}, {"api_name": "HartreeParticleDSL.Particle_IR.nodes.literal.Literal", "line_number": 54, "usage_type": "call"}, {"api_name": "HartreeParticleDSL.Particle_IR.datatypes.datatype.FLOAT_TYPE", "line_number": 54, "usage_type": "argument"}, {"api_name": "HartreeParticleDSL.Particle_IR.nodes.literal.Literal", "line_number": 55, "usage_type": "call"}, {"api_name": "HartreeParticleDSL.Particle_IR.datatypes.datatype.FLOAT_TYPE", "line_number": 55, "usage_type": "argument"}, {"api_name": "HartreeParticleDSL.Particle_IR.nodes.literal.Literal", "line_number": 58, "usage_type": "call"}, {"api_name": "HartreeParticleDSL.Particle_IR.datatypes.datatype.FLOAT_TYPE", "line_number": 58, "usage_type": "argument"}]} +{"seq_id": "73300863504", "text": "import json\r\n\r\n#saglabāto datu izmantošana katrai lietošanas reizei\r\nwith open(\"putnudati.json\", \"r\", encoding = 'utf-8') as fails:\r\n dati = fails.read()\r\n\r\nputnudati = json.loads(dati)\r\n#datu nolasīšanas beigas\r\n\r\ndef izvelne():\r\n print(\"\\nSveiciens, putnu vērotāj!\\nKādu darbību veiksi?\")\r\n print(\"1: parādīt visus dienasgrāmatas ierakstus\")\r\n print(\"2: pievienot jaunu ierakstu\")\r\n print(\"3: meklēt dienasgrāmatas ierakstu\")\r\n print(\"4: saglabāt un beigt darbu\")\r\n\r\n #sākas datu ievade\r\n darbiba = input(\"\\nIevadi skaitli!\")\r\n #beidzas datu ievade\r\n\r\n if darbiba == \"1\":\r\n ieraksti()\r\n elif darbiba == \"2\":\r\n pievienot()\r\n elif darbiba == \"3\":\r\n meklesana()\r\n elif darbiba == \"4\":\r\n beigas()\r\n else:\r\n print(\"\\nIzvēlies vienu no minētajiem skaitļiem!\\n\")\r\n izvelne()\r\n\r\ndef ieraksti():\r\n\r\n #visi dati tiek parādīti\r\n print(\"\\nVisi ieraksti:\\n\")\r\n for putns in putnudati:\r\n for p in putns:\r\n print(f'{p}: {putns[p]}')\r\n print()\r\n #koda beigas\r\n\r\n izvelne()\r\n\r\n\r\ndef pievienot():\r\n putns = {} \r\n\r\n #sākas datu ievade \r\n nosaukumsLV = input(\"\\nIevadi putna latvisko nosaukumu!\")\r\n nosaukumsLAT = input(\"Ievadi putna latīnisko nosaukumu!\") \r\n datums = input(\"Ievadi novērojuma datumu!\")\r\n laiks = input(\"Ievadi novērojuma laiku!\")\r\n vieta = input(\"Ievadi novērojuma vietu!\")\r\n piezimes = input(\"Ievadi novērojuma piezīmes!\")\r\n #beidzas datu ievade\r\n\r\n #dati tiek saglabāti masīvā\r\n putns[\"nosaukumsLV\"] = nosaukumsLV\r\n putns[\"nosaukumsLAT\"] = nosaukumsLAT\r\n putns[\"datums\"] = datums\r\n putns[\"laiks\"] = laiks\r\n putns[\"vieta\"] = vieta\r\n putns[\"piezimes\"] = piezimes\r\n\r\n putnudati.append(putns)\r\n #beidzas datu saglabāšana\r\n\r\n print(\"\\nNovērojums ir pievienots dienasgrāmatai\\n\")\r\n\r\n meklesana(nosaukumsLV)\r\n\r\n izvelne()\r\n\r\n\r\n#meklētie dati tiek parādīti\r\ndef meklesana(*args):\r\n if len(args)==0:\r\n\r\n #sākas datu ievade\r\n nosaukums = input(\"\\nIevadi putna latvisko nosaukumu!\")\r\n #beidzas datu ievade\r\n\r\n else:\r\n nosaukums = args[0]\r\n nosaukums = nosaukums.capitalize()\r\n atrasts = False\r\n for p in putnudati:\r\n if nosaukums in p[\"nosaukumsLV\"]:\r\n print(\"\\nPutns ir atrasts\\n\")\r\n atrasts = True\r\n for putns in p:\r\n print(f\"{putns}: {p[putns]}\")\r\n#koda beigas\r\n\r\n if not atrasts:\r\n print(\"\\nIeraksti par putnu nav atrasti\\n\")\r\n izvelne()\r\n\r\n\r\ndef beigas():\r\n\r\n #dati tiek saglabāti JSON failā\r\n dati = json.dumps(putnudati, ensure_ascii = False)\r\n with open(\"putnudati.json\", \"w\", encoding = 'utf-8') as fails:\r\n fails.write(dati)\r\n #saglabāšana beidzas\r\n\r\n print(\"\\nIeraksti ir saglabāti dienasgrāmatā.\\nUz tikšanos, putnu vērotāj!\\n\")\r\n\r\n\r\nizvelne()", "repo_name": "xtarnation/nosleguma-darbs-putnu-verosana", "sub_path": "42-P-nosleguma-darbs.py", "file_name": "42-P-nosleguma-darbs.py", "file_ext": "py", "file_size_in_byte": 2917, "program_lang": "python", "lang": "lv", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "48", "api": [{"api_name": "json.loads", "line_number": 7, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 104, "usage_type": "call"}]} +{"seq_id": "23514090027", "text": "\"\"\"Commonly used utilities\"\"\"\n\nfrom multiprocessing.pool import ThreadPool\nfrom PIL import ImageFile\nimport re\nfrom typing import List, Optional\nfrom urllib.request import urlopen\n\nfrom bs4 import BeautifulSoup\nimport requests\n\nfrom article import Image\n\n\ndef normalize_text(text: str) -> str:\n return ' '.join(text.split('\\n'))\n\n\ndef fetch_image_dimensions(url: str) -> Optional[Image]:\n \"\"\"Fetches the dimensions of an image without downloading it.\"\"\"\n try:\n file = urlopen(url)\n except Exception as e:\n pass\n else:\n p = ImageFile.Parser()\n while True:\n data = file.read(1024)\n if not data:\n break\n p.feed(data)\n if p.image:\n file.close()\n return Image(url, p.image.size)\n file.close()\n\n\ndef fetch_images_dimensions(image_urls: List[str]) -> List[Optional[Image]]:\n \"\"\"Fetches the dimensions for a given batch of image urls\"\"\"\n return [\n result for result in ThreadPool(20).imap_unordered(\n fetch_image_dimensions, image_urls) if result\n ]\n\n\ndef filter_images(images: List[Image]) -> List[Image]:\n \"\"\"Filters images\"\"\"\n filtered_by_size = size_filter(images)\n return filtered_by_size\n\n\ndef size_filter(images: List[Image],\n widht: int = 25,\n height: int = 25) -> List[Image]:\n \"\"\"Filters those images having width and height less than the specified\"\"\"\n return [\n image for image in images\n if image.dimensions[0] >= widht and image.dimensions[1] >= height\n ]\n\n\ndef sort_by_dims(images: List[Image]) -> List[Image]:\n \"\"\"Sorts the list of images by dimensions\"\"\"\n return sorted(images,\n key=lambda image:\n (image.dimensions[0] * image.dimensions[1]),\n reverse=True)\n\n\ndef scrap_image_urls(url: str) -> List[str]:\n \"\"\"Fetches all supported image format(JPG,PNG and JPEG) present on a URL\"\"\"\n response = requests.get(url)\n soup = BeautifulSoup(response.content, 'html.parser')\n images = soup.find_all('img')\n regex = \"(?Phttps?://[^\\s]+)\"\n urls = [\n re.search(regex, str(url)).group(\"url\").rstrip('\"') for url in images\n if re.search(regex, str(url))\n ]\n return list(\n filter(lambda url: 'jpg' in url or 'jpeg' in url or 'png' in url,\n urls))\n", "repo_name": "usamajamil43/Summerizing-API", "sub_path": "utils.py", "file_name": "utils.py", "file_ext": "py", "file_size_in_byte": 2390, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "48", "api": [{"api_name": "urllib.request.urlopen", "line_number": 22, "usage_type": "call"}, {"api_name": "PIL.ImageFile.Parser", "line_number": 26, "usage_type": "call"}, {"api_name": "PIL.ImageFile", "line_number": 26, "usage_type": "name"}, {"api_name": "article.Image", "line_number": 34, "usage_type": "call"}, {"api_name": "typing.Optional", "line_number": 19, "usage_type": "name"}, {"api_name": "article.Image", "line_number": 19, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 38, "usage_type": "name"}, {"api_name": "multiprocessing.pool.ThreadPool", "line_number": 41, "usage_type": "call"}, {"api_name": "typing.Optional", "line_number": 38, "usage_type": "name"}, {"api_name": "article.Image", "line_number": 38, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 46, "usage_type": "name"}, {"api_name": "article.Image", "line_number": 46, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 52, "usage_type": "name"}, {"api_name": "article.Image", "line_number": 52, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 54, "usage_type": "name"}, {"api_name": "article.Image", "line_number": 54, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 62, "usage_type": "name"}, {"api_name": "article.Image", "line_number": 62, "usage_type": "name"}, {"api_name": "requests.get", "line_number": 72, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 73, "usage_type": "call"}, {"api_name": "re.search", "line_number": 77, "usage_type": "call"}, {"api_name": "re.search", "line_number": 78, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 70, "usage_type": "name"}]} +{"seq_id": "40902839142", "text": "import argparse\nimport os\n\nimport pandas as pd\nfrom sklearn.preprocessing import StandardScaler, OneHotEncoder, MinMaxScaler\nfrom sklearn.compose import make_column_transformer\nfrom sklearn.impute import SimpleImputer\n\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\n# Processing configuration\nNUMERICAL_COLUMNS = ['Pickup_longitude', 'Pickup_latitude', 'Dropoff_longitude', \n 'Dropoff_latitude', 'Passenger_count', 'Total_amount', 'Trip_distance']\n\nCOLUMNS_TO_DROP = ['Payment_type', 'Trip_type ', 'Fare_amount', \n 'Extra', 'MTA_tax','Tip_amount', 'Tolls_amount', \n 'Ehail_fee', 'Store_and_fwd_flag', 'RateCodeID', \n 'lpep_pickup_datetime', 'Lpep_dropoff_datetime'] # columns in original dataset which will be dropped\n\nCATEGORICAL_COLUMNS = ['VendorID'] # columns which will be replaced with \"one-hot encoded\" columns\n\nINPUT_PATH = \"/opt/ml/processing/input/data\"\nOUTPUT_PATH = \"/opt/ml/processing/output/data\" \n\n\ndef _get_data_files(extension=\".csv\", input_path=INPUT_PATH):\n \"\"\"\n Get individual files uploaded to processing nodes.\n Files are stored in INPUT_PATH by default.\n \"\"\"\n \n files = []\n \n print(os.listdir(INPUT_PATH))\n \n for file in os.listdir(input_path):\n print(file)\n if file.endswith(extension):\n files.append(os.path.join(input_path, file))\n \n return files\n \n \n\ndef _process_file(fpath):\n \"\"\"\n - read file into Pandas dataframe;\n - drop undesired columns;\n - perform one-hot encoding on categorical features;\n - standartize numerical features\n \"\"\"\n print(fpath)\n \n # read input file\n dfcolumns = pd.read_csv(fpath, nrows=1)\n df = pd.read_csv(fpath,header = None, skiprows = 1, \n usecols = list(range(len(dfcolumns.columns))), \n names = dfcolumns.columns)\n \n # Process individual file\n df = df.drop(COLUMNS_TO_DROP, axis=1)\n \n preprocess = make_column_transformer(\n (CATEGORICAL_COLUMNS, OneHotEncoder()),\n (NUMERICAL_COLUMNS, StandardScaler())\n ) \n processed_np = preprocess.fit_transform(df)\n \n # Create a new DataFrame with processed values\n new_columns = ['Vendor_1', 'Vendor_2'] + NUMERICAL_COLUMNS\n processed_df = pd.DataFrame(processed_np, columns=new_columns)\n \n # Saving processed dataframe locally\n fname = os.path.basename(fpath)\n processed_fname = f\"processed_{fname}\" # adding prefix to identify processed files\n processed_fpath = os.path.join(OUTPUT_PATH, processed_fname)\n processed_df.to_csv(processed_fpath)\n print(f\"File {fname} has been processed and saved.\")\n \n\ndef main():\n \"\"\"\n Main processing method\n \"\"\" \n input_files = _get_data_files()\n total_files = len(input_files)\n skipped_files = 0\n processed_files = 0\n print(f\"{total_files} are queued for processing.\")\n \n for counter, file in enumerate(input_files):\n try:\n print(f\"Processing file {file}\")\n _process_file(file)\n processed_files += 1\n except Exception as e:\n print(e)\n print(f\"File {file} cannot be processed. Skipping it...\")\n skipped_files += 1\n \n print(f\"{processed_files} file(s) out of {total_files} total number are processed. {skipped_files} files were skipped due to processing errors.\") \n\nif __name__==\"__main__\":\n print(\"Starting processing.\")\n main()\n print(\"Processing completed.\")\n \n \n", "repo_name": "vdabravolski/ml_immersion_day", "sub_path": "Lab 4/feature_processing.py", "file_name": "feature_processing.py", "file_ext": "py", "file_size_in_byte": 3540, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "48", "api": [{"api_name": "warnings.filterwarnings", "line_number": 10, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 35, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 37, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 40, "usage_type": "call"}, {"api_name": "os.path", "line_number": 40, "usage_type": "attribute"}, {"api_name": "pandas.read_csv", "line_number": 56, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 57, "usage_type": "call"}, {"api_name": "sklearn.compose.make_column_transformer", "line_number": 64, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.OneHotEncoder", "line_number": 65, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.StandardScaler", "line_number": 66, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 72, "usage_type": "call"}, {"api_name": "os.path.basename", "line_number": 75, "usage_type": "call"}, {"api_name": "os.path", "line_number": 75, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 77, "usage_type": "call"}, {"api_name": "os.path", "line_number": 77, "usage_type": "attribute"}]} +{"seq_id": "10445626320", "text": "#!/usr/bin/env python5\n# Simple photo booth software\n# using Polaroid PoGo, webcam (opencv), and input buttons (via MakeyMakey)\n\nimport cv2\nimport os, sys\nimport time, datetime\n\nimport polaroid\n\nWEBCAM_DEVICE = 1\nSTORAGE_DIR = 'storage'\n\n# NOTE: printing ratio may be different; may need to crop\n\n\ndef putText(img, text, location, positive=True):\n \"\"\"UI helper function for adding text to image\"\"\"\n font = cv2.FONT_HERSHEY_DUPLEX\n fsize = 2\n colour = (0, 255, 0) if positive else (255, 0, 0)\n if location == 'left_button':\n cv2.putText(img, text, (0, img.shape[0] - 10),\n font, fsize, colour)\n elif location == 'right_button':\n cv2.putText(\n img, text,\n (int(img.shape[1] - 40*len(text)), int(img.shape[0] - 10)),\n font, fsize, colour)\n elif location == 'centre':\n cv2.putText(\n img, text,\n (int(img.shape[1] / 2 - 20*len(text)), int(img.shape[0] / 2)),\n font, fsize, colour)\n\n\nif __name__ == '__main__':\n # open bluetooth connection (keep open)\n printer = polaroid.Polaroid()\n printer.connect()\n\n # open webcam and UI\n cap = cv2.VideoCapture(WEBCAM_DEVICE)\n screen = 'photo_booth'\n cv2.namedWindow(screen, cv2.WND_PROP_FULLSCREEN)\n\n # show webcam and wait for user input\n while True:\n ret, frame = cap.read()\n img = frame\n img_ui = img.copy()\n putText(img_ui, \"TAKE\", 'left_button', True)\n cv2.imshow(screen, img_ui)\n keypress = cv2.waitKey(10)\n\n if keypress == 32: # space: take photo\n # take snapshot, wait for user input\n img_ui = img.copy()\n putText(img_ui, \"PRINT\", 'left_button', True)\n putText(img_ui, \"RETAKE\", 'right_button', False)\n cv2.imshow(screen, img_ui)\n keypress = cv2.waitKey(0)\n\n if keypress == 32: # space: print\n # save snapshot, send to printer\n datestr = datetime.datetime.now().strftime(\"%Y-%m-%d_%H-%M-%S\")\n filename = os.path.join(os.getcwd(), STORAGE_DIR, datestr + \".jpg\")\n cv2.imwrite(filename, img)\n\n # send to printer\n printer.send_image(filename)\n\n # countdown while printing (scientifically proven waiting time)\n for i in range(48, 0, -1):\n time.sleep(1)\n img_ui = img.copy()\n putText(img_ui, \"PRINTING ({})\".format(i), 'centre', True)\n cv2.imshow(screen, img_ui)\n cv2.waitKey(1)\n else:\n continue # retake\n\n if keypress == 27: # ESC: exit\n break\n", "repo_name": "turiphro/photobooth", "sub_path": "src/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 2719, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 4, "dataset": "github-code", "pt": "48", "api": [{"api_name": "cv2.FONT_HERSHEY_DUPLEX", "line_number": 19, "usage_type": "attribute"}, {"api_name": "cv2.putText", "line_number": 23, "usage_type": "call"}, {"api_name": "cv2.putText", "line_number": 26, "usage_type": "call"}, {"api_name": "cv2.putText", "line_number": 31, "usage_type": "call"}, {"api_name": "polaroid.Polaroid", "line_number": 39, "usage_type": "call"}, {"api_name": "cv2.VideoCapture", "line_number": 43, "usage_type": "call"}, {"api_name": "cv2.namedWindow", "line_number": 45, "usage_type": "call"}, {"api_name": "cv2.WND_PROP_FULLSCREEN", "line_number": 45, "usage_type": "attribute"}, {"api_name": "cv2.imshow", "line_number": 53, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 54, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 61, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 62, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 66, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 66, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 67, "usage_type": "call"}, {"api_name": "os.path", "line_number": 67, "usage_type": "attribute"}, {"api_name": "os.getcwd", "line_number": 67, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 68, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 75, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 78, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 79, "usage_type": "call"}]} +{"seq_id": "26892087182", "text": "#Some general functions\nimport random\nimport numpy as np\nfrom scipy.signal import fftconvolve\nfrom consts import *\nfrom classes import *\n\ndef drive(t, param):\n return param.hdc + param.amp * np.cos(param.omega * t)\n\ndef sample(param, sampling, seed):\n \"\"\"\n Different phase space sampling schemes for the initial state,\n hardcoded as a fully polarized product state\n \"\"\"\n random.seed(seed)\n np.random.seed(seed)\n N = param.latsize\n sx_init = np.ones(N)\n if sampling == \"spr\":\n #According to Schachenmayer, the wigner function of the quantum\n #state generates the below initial conditions classically\n sy_init = 2.0 * np.random.randint(0,2, size=N) - 1.0\n sz_init = 2.0 * np.random.randint(0,2, size=N) - 1.0\n #Set initial conditions for the dynamics locally to vector\n #s_init and store it as [s^x,s^x,s^x, .... s^y,s^y,s^y ...,\n #s^z,s^z,s^z, ...]\n s_init_spins = np.concatenate((sx_init, sy_init, sz_init))\n elif sampling == \"1-0\":\n spin_choices = np.array([(1, 1,0),(1, 0,1),(1, -1,0),(1, 0,-1)])\n spins = np.array([random.choice(spin_choices) for i in xrange(N)])\n s_init_spins = spins.T.flatten()\n elif sampling == \"all\":\n spin_choices_spr = np.array([(1, 1,1),(1, 1,-1),(1, -1,1),(1, -1,-1)])\n spin_choices_10 = np.array([(1, 1,0),(1, 0,1),(1, -1,0),(1, 0,-1)])\n spin_choices = np.concatenate((spin_choices_10, spin_choices_spr))\n spins = np.array([random.choice(spin_choices) for i in xrange(N)])\n s_init_spins = spins.T.flatten()\n else:\n pass\n # Set initial correlations to 0.\n s_init_corrs = np.zeros(9*N*N)\n return s_init_spins, s_init_corrs\n\ndef bbgky_observables(t_output, s, params):\n N = params.latsize\n \"\"\"\n Compute expectations and \\sum_{ij} -^2 with\n wigner func at t_output values LOCALLY for each initcond and\n return them as an 'OutData' object. This assumes bbgky routine.\n For dtwa only, the observables are coded inline\n \"\"\"\n sx_expct = np.sum(s[:, 0:N], axis=1)\n sy_expct = np.sum(s[:, N:2*N], axis=1)\n sz_expct = np.sum(s[:, 2*N:3*N], axis=1)\n\n #svec is the tensor s^l_\\mu\n #G = s[3*N:].reshape(3,3,N,N) is the tensor g^{ab}_{\\mu\\nu}.\n sview = s.view()\n gt = sview[:, 3*N:].reshape(s.shape[0], 3, 3, N, N)\n gt[:,:,:,range(N),range(N)] = 0.0 #Set the diagonals of g_munu to 0\n #Quantum spin variance\n sx_var = np.sum(gt[:,0,0,:,:], axis=(-1,-2))\n sx_var += (np.sum(s[:, 0:N], axis=1)**2 \\\n - np.sum(s[:, 0:N]**2, axis=1))\n\n sy_var = np.sum(gt[:,1,1,:,:], axis=(-1,-2))\n sy_var += (np.sum(s[:, N:2*N], axis=1)**2 \\\n - np.sum(s[:, N:2*N]**2, axis=1))\n\n sz_var = np.sum(gt[:,2,2,:,:], axis=(-1,-2))\n sz_var += (np.sum(s[:, 2*N:3*N], axis=1)**2 \\\n - np.sum(s[:, 2*N:3*N]**2, axis=1))\n\n sxy_var = np.sum(gt[:,0,1,:,:], axis=(-1,-2))\n sxy_var += np.sum([fftconvolve(s[m, 0:N], s[m, N:2*N]) \\\n for m in xrange(t_output.size)], axis=1)\n #Remove the diagonal parts\n sxy_var -= np.sum(s[:, 0:N] * s[:, N:2*N], axis=1)\n\n sxz_var = np.sum(gt[:,0,2,:,:], axis=(-1,-2))\n sxz_var += np.sum([fftconvolve(s[m, 0:N], s[m, 2*N:3*N]) \\\n for m in xrange(t_output.size)], axis=1)\n #Remove the diagonal parts\n sxz_var -= np.sum(s[:, 0:N] * s[:, 2*N:3*N], axis=1)\n\n syz_var = np.sum(gt[:,1,2,:,:], axis=(-1,-2))\n syz_var += np.sum([fftconvolve(s[m, N:2*N], s[m, 2*N:3*N]) \\\n for m in xrange(t_output.size)], axis=1)\n #Remove the diagonal parts\n syz_var -= np.sum(s[:, N:2*N] * s[:, 2*N:3*N], axis=1)\n\n localdata = OutData(t_output, sx_expct, sy_expct,\\\n sz_expct, sx_var, sy_var, sz_var, sxy_var, sxz_var, \\\n syz_var, params)\n\n return localdata\n\ndef t_deriv(quantities, times):\n \"\"\"\n Computes the time derivative of quantities wrt times\n \"\"\"\n dt = np.gradient(times)\n return np.gradient(quantities, dt)\n\ndef weyl_hamilt(s,times,param):\n \"\"\"\n Evaluates the Weyl Symbols of the Hamiltonian, H_w\n Does this at all times\n If |s^a> = (s^a_0, s^a_1 ... s^a_N), and\n H_w = -(1/2) * \\sum_{nm} J_{nm} (J_x s^n_x s^m_x + J_y s^n_y s^m_y\n + J_z s^n_z s^m_z) - h(t) * \\sum_n (h_x s^n_x +h_y s^n_y\n + h_z s^n_z)\n \"\"\"\n N = param.latsize\n #s[:, 0:N] = sx , s[:, N:2*N] = sy, s[:, 2*N:3*N] = sz\n hw = param.jx * np.dot(s[:,0*N:1*N],param.jmat.dot(s[:,0*N:1*N].T))\n hw += param.jy * np.dot(s[:,1*N:2*N],param.jmat.dot(s[:,1*N:2*N].T))\n hw += param.jz * np.dot(s[:,2*N:3*N],param.jmat.dot(s[:,2*N:3*N].T))\n hw = hw /(2.0 * param.norm)\n hw += (param.hx * np.sum(s[:, 0:N]) +\\\n param.hy * np.sum(s[:, N:2*N]) + param.hz * np.sum(s[:, 2*N:3*N]))\n return -hw\n", "repo_name": "hariseldon99/dtwa_quantum_spins", "sub_path": "dtwa_quantum_spins/funcs.py", "file_name": "funcs.py", "file_ext": "py", "file_size_in_byte": 4755, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 7, "dataset": "github-code", "pt": "48", "api": [{"api_name": "numpy.cos", "line_number": 9, "usage_type": "call"}, {"api_name": "random.seed", "line_number": 16, "usage_type": "call"}, {"api_name": "numpy.random.seed", "line_number": 17, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 17, "usage_type": "attribute"}, {"api_name": "numpy.ones", "line_number": 19, "usage_type": "call"}, {"api_name": "numpy.random.randint", "line_number": 23, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 23, "usage_type": "attribute"}, {"api_name": "numpy.random.randint", "line_number": 24, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 24, "usage_type": "attribute"}, {"api_name": "numpy.concatenate", "line_number": 28, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 30, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 31, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 31, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 34, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 35, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 36, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 37, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 37, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 42, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 53, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 54, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 55, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 63, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 64, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 65, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 67, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 68, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 69, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 71, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 72, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 73, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 75, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 76, "usage_type": "call"}, {"api_name": "scipy.signal.fftconvolve", "line_number": 76, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 79, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 81, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 82, "usage_type": "call"}, {"api_name": "scipy.signal.fftconvolve", "line_number": 82, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 85, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 87, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 88, "usage_type": "call"}, {"api_name": "scipy.signal.fftconvolve", "line_number": 88, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 91, "usage_type": "call"}, {"api_name": "numpy.gradient", "line_number": 103, "usage_type": "call"}, {"api_name": "numpy.gradient", "line_number": 104, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 117, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 118, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 119, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 121, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 122, "usage_type": "call"}]} +{"seq_id": "321394225", "text": "# -*- coding: utf-8 -*-\n\"\"\"\n-------------------------------------------------\n File Name: fetch\n Description :\n Author : seger\n date: 2018/4/2\n-------------------------------------------------\n Change Activity:\n 2018/4/2:\n-------------------------------------------------\n\"\"\"\nimport threading\n\n__author__ = 'seger'\nimport requests\nimport re\nfrom scrapy.selector import Selector\nfrom utils.db import DataBase\ndb = DataBase()\nheaders = {\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:52.0) Gecko/20100101 Firefox/52.0\"\n}\n\nclass Fetch:\n\n def get_html(self,url):\n return requests.get(url,headers=headers).text\n\n def fetch_66ip(self):\n print(\"fetch 66ip-------\")\n url = 'http://www.66ip.cn/nmtq.php?getnum=1000&isp=0&anonymoustype=3&start=&ports=&export=&ipaddress=&area=0&proxytype=1&api=66ip'\n html = requests.get(url)\n p = re.compile('\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}:\\d{2,5}')\n text = p.findall(html)\n ips = []\n for t in text:\n s = t.split(':')\n one = {}\n one['ip'] = s[0]\n one['port'] = s[1]\n one['protocol'] = 'https'\n if self.judge_ip(one['ip'], one['protocol']):\n db.insert_item(one)\n print(\"66ip has fetched--------\")\n\n # 西刺代理\n def fetch_xici(self, page=1):\n print(\"fetch xici--------\")\n for i in range(100):\n selector = Selector(text=self.get_html(url=\"http://www.xicidaili.com/nn/{0}\".format(i)))\n all_trs = selector.css('#ip_list tr')\n for tr in all_trs[1:]:\n all_texts = tr.css(\"td::text\").extract()\n match_obj1 = re.match(\".*'HTTPS'.*\", str(all_texts))\n match_obj2 = re.match(\".*'HTTP'.*\", str(all_texts))\n proxy_type = \"\"\n if match_obj1:\n proxy_type = \"https\"\n elif match_obj2:\n proxy_type = \"http\"\n ip = all_texts[0]\n port = all_texts[1]\n if self.judge_ip(ip, port):\n db.insert_item(\n {\n 'ip':ip,\n 'port':port,\n 'protocol':proxy_type\n }\n )\n print(\"xici has fetched------\")\n\n\n\n\n # ip海\n def fetch_iphai(self):\n print('fetching iphai---------')\n url = 'http://www.iphai.com/free/ng'\n selector = Selector(text=self.get_html(url))\n trs = selector.xpath('/html/body/div[2]/div[2]/table/tr').extract()[1:]\n for tr in trs:\n tr = Selector(text=tr)\n one = {}\n one['ip'] = tr.xpath('//tr/td[1]/text()').extract()[0].strip()\n one['port'] = tr.xpath('//tr/td[2]/text()').extract()[0].strip()\n one['protocol'] = 'https' if 'https' in tr.xpath('//tr/td[4]/text()').extract()[\n 0].strip().lower() else 'http' # 对于支持双协议的http,https,保守一点,只选择http\n if self.judge_ip(one['ip'], one['protocol']):\n db.insert_item(one)\n\n print(\"iphai has fetched!\")\n\n\n # 云代理\n def fetch_ip3366(self):\n print('fetching ip3366---------')\n urls = ['http://www.ip3366.net/?page=%d' % i for i in range(1, 8)]\n for url in urls:\n selector = Selector(text=self.get_html(url))\n trs = selector.xpath('//*[@id=\"list\"]/table/tbody/tr').extract()\n for tr in trs:\n tr = Selector(text=tr)\n one = {}\n one['ip'] = tr.xpath('//tr/td[1]/text()').extract()[0]\n one['port'] = tr.xpath('//tr/td[2]/text()').extract()[0]\n one['protocol'] = tr.xpath('//tr/td[4]/text()').extract()[0]\n if self.judge_ip(one['ip'], one['protocol']):\n db.insert_item(one)\n print('ip3366 has fetched')\n\n\n def main(self):\n self.fetch_ip3366()\n self.fetch_iphai()\n self.fetch_xici()\n self.fetch_66ip()\n ip_list = db.get_item()\n pass\n # for ip in ip_list:\n # t = ProxyConnectionTest(ip['ip'], ip['port'])\n # t.setDaemon(True)\n # t.start()\n # t.join()\n\n def judge_ip(self, ip, port):\n http_url = 'https://www.baidu.com'\n proxy_url = \"http://{0}:{1}\".format(ip, port)\n try:\n proxy_dict = {\n 'http': proxy_url\n }\n response = requests.get(http_url, proxies=proxy_dict)\n except Exception as e:\n print(\"Exception: invalid ip and port---{0}:{1}\".format(ip, port))\n db.remove_item(ip)\n return False\n else:\n code = response.status_code\n if code >= 200 and code < 300:\n print('valid ip---{0}:{1}'.format(ip, port))\n return True\n else:\n print(\"invalid ip and port---{0}:{1}\".format(ip, port))\n db.remove_item(ip)\n return False\n\nclass ProxyConnectionTest(threading.Thread):\n def __init__(self, ip, port):\n threading.Thread.__init__(self)\n self.ip = ip\n self.http_url = 'https://www.baidu.com'\n self.proxy_url = \"http://{0}:{1}\".format(ip, port)\n\n def run(self):\n try:\n proxy_dict = {\n 'http': self.proxy_url\n }\n response = requests.get(self.http_url, proxies=proxy_dict)\n except Exception as e:\n print(\"invalid ip and port\")\n db.remove_item(self.ip)\n return False\n else:\n code = response.status_code\n if code >= 200 and code < 300:\n print('valid ip')\n return True\n else:\n db.remove_item(self.ip)\n return False\nif __name__ == '__main__':\n obj = Fetch()\n obj.main()", "repo_name": "segerLin/RecruitmentInfoAnalysis", "sub_path": "spider/ProxyFetch/fetch/fetch.py", "file_name": "fetch.py", "file_ext": "py", "file_size_in_byte": 6018, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "48", "api": [{"api_name": "utils.db.DataBase", "line_number": 20, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 28, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 33, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 34, "usage_type": "call"}, {"api_name": "scrapy.selector.Selector", "line_number": 51, "usage_type": "call"}, {"api_name": "re.match", "line_number": 55, "usage_type": "call"}, {"api_name": "re.match", "line_number": 56, "usage_type": "call"}, {"api_name": "scrapy.selector.Selector", "line_number": 81, "usage_type": "call"}, {"api_name": "scrapy.selector.Selector", "line_number": 84, "usage_type": "call"}, {"api_name": "scrapy.selector.Selector", "line_number": 101, "usage_type": "call"}, {"api_name": "scrapy.selector.Selector", "line_number": 104, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 134, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 149, "usage_type": "attribute"}, {"api_name": "threading.Thread.__init__", "line_number": 151, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 151, "usage_type": "attribute"}, {"api_name": "requests.get", "line_number": 161, "usage_type": "call"}]} +{"seq_id": "26637548627", "text": "import torch\nimport json\nfrom typing import Union\nfrom dataset_utilities import create_mnist_train_dataloader, create_adv_mnist_test_dataloader_preprocessed, create_synthetic_dataloader\nfrom dataset_utilities import create_imagenet_test_loader\nfrom dataset_utilities import create_adversarial_cifar10_dataloaders\nfrom dataset_utilities import create_mnist_dataloaders\nfrom dataset_utilities import create_tensor_dataloader\n# from dataset_utilities import create_svhn_dataloaders\nfrom models.mpl import Net, Net_800_400_100, MNISTClassifier, PnmlModel, NetSynthetic\nfrom models.wide_resnet_original import WideResNet\nfrom models.madry_wide_resnet import MadryWideResNet\nfrom models.model_utils import load_pretrained_imagenet_model, load_pretrained_model, ImagenetModel\nfrom adversarial.attacks import get_attack\nfrom dataset_utilities import get_dataset_min_max_val\n\n\nclass Experiment:\n def __init__(self, args: dict, cli_params: Union[dict, None] = None):\n \"\"\"\n\n :param args: General arguments detailing the experiment such as: output_root, path to parameters.json (param_file_path)\n and experiment_type\n :param cli_params:\n \"\"\"\n if args['experiment_type'] not in [\n 'out_of_dist_svhn',\n 'synthetic',\n 'pnml_mnist',\n 'imagenet_adversarial',\n 'cifar_adversarial',\n 'mnist_adversarial']:\n raise NameError('No experiment type: %s' % type)\n self.exp_type = args['experiment_type']\n self.params = self.__load_params_from_file(args, self.exp_type)\n if self.params.get(\"num_classes\") is None:\n self.params[\"num_classes\"] = 10\n if cli_params is None:\n cli_params = dict()\n self.__update_params_from_cli(cli_params)\n self.output_dir = args['output_root']\n\n @staticmethod\n def __load_params_from_file(args, exp_type):\n \"\"\"\n Load parameters for exp_type\n :param args: a dict containing a path to parameters file containing parameters for different experiments\n :param exp_type:\n :return:\n \"\"\"\n param_file_path = args['param_file_path']\n with open(param_file_path) as f: # Load the params for all experiments from param_file_path\n params = json.load(f)\n assert(exp_type == params['exp_type'])\n return params\n\n def __update_params_from_cli(self, cli_params):\n for key, inner_dict in cli_params.items():\n for inner_key, val in inner_dict.items():\n if val is not None:\n print(\"Update: params[{}][{}] = {}\".format(key, inner_key, val))\n self.params[key][inner_key] = val\n\n def get_params(self):\n return self.params\n\n def get_dataloaders(self) -> dict:\n \"\"\"\n :return: Non adversarial dataloaders\n \"\"\"\n if self.params['adv_attack_test'][\"white_box\"]:\n return self.get_adv_dataloaders(datafolder='./data', p=None, model=None)\n else:\n adv = torch.load(self.params['adv_attack_test'][\"black_box_adv_path\"])\n return self.get_blackbox_dataloader(adv, self.params[\"batch_size\"], self.params[\"num_workers\"],\n self.params[\"adv_attack_test\"]['test_start_idx'],\n self.params[\"adv_attack_test\"]['test_end_idx'],\n self.params[\"adv_attack_test\"]['idx_step_size'], self.params.get('num_classes'))\n\n\n def get_blackbox_dataloader(self, adv, batch_size=128, num_workers: int = 4, start_idx: int = 0,\n end_idx: Union[int, None] = None, idx_step_size: int = 1, labels_to_test: int = 10):\n if end_idx is None:\n end_idx = len(adv.adversarial_sample)-1\n dataloader = dict()\n dataloader['test'], dataloader['classes'] = create_tensor_dataloader(adv.adversarial_sample, adv.true_label,\n batch_size=batch_size, num_workers=num_workers,\n start_idx=start_idx, end_idx=end_idx,\n idx_step_size=idx_step_size, labels_to_test=labels_to_test)\n dataloader['dataset_name'] = self.exp_type\n dataloader[\"black_box_attack_params\"] = adv.attack_params\n return dataloader\n\n def get_adv_dataloaders(self, datafolder: str = './data', p=None, model=None):\n \"\"\"\n :param datafolder: location of the data\n :param p: (dict) the adversarial attack parameters\n :param model: the black/white-box model on which the attack will work, if None no attack will run\n :return: dataloaders dict\n \"\"\"\n if p is None:\n p = {'attack_type': \"no_attack\"}\n if model is None or p['attack_type'] == \"no_attack\":\n attack = get_attack(p)\n else:\n model.eval()\n attack = get_attack(p, model, get_dataset_min_max_val(self.exp_type))\n return self._create_dataloaders(datafolder, attack)\n\n def _create_dataloaders(self, data_folder: str = './data', attack=None):\n if self.exp_type == 'pnml_mnist':\n trainloader, testloader, classes, bounds = create_mnist_dataloaders(data_folder,\n self.params['batch_size'],\n self.params['num_workers'])\n dataloaders = {'train': trainloader,\n 'test': testloader,\n 'classes': classes,\n 'bounds': bounds}\n # elif self.exp_type == 'out_of_dist_svhn':\n # trainloader, testloader_svhn, classes_svhn, classes_cifar10 = create_svhn_dataloaders(data_folder,\n # self.params[\n # 'batch_size'],\n # self.params[\n # 'num_workers'])\n # dataloaders = {'train': trainloader,\n # 'test': testloader_svhn,\n # 'classes': classes_cifar10,\n # 'classes_svhn': classes_svhn}\n elif self.exp_type == 'synthetic':\n trainloader, classes = create_synthetic_dataloader(shuffle=True)\n testloader, _ = create_synthetic_dataloader(shuffle=False)\n dataloaders = {'train': trainloader, 'test': testloader, 'classes': classes, 'adv_test_flag': False}\n elif self.exp_type == 'imagenet_adversarial':\n assert (attack is not None)\n testloader, classes, bounds = create_imagenet_test_loader(data_folder,\n self.params['batch_size'], self.params['num_workers'],\n self.params['adv_attack_test']['test_start_idx'],\n self.params['adv_attack_test']['test_end_idx'],\n self.params['adv_attack_test'][\"idx_step_size\"],\n self.params['num_classes'])\n dataloaders = {'test': testloader,\n 'classes': classes,\n 'bounds': bounds}\n elif self.exp_type == 'cifar_adversarial':\n assert(attack is not None)\n trainloader, testloader, classes, bounds = create_adversarial_cifar10_dataloaders(attack, data_folder,\n self.params['batch_size'], self.params['num_workers'],\n self.params['adv_attack_test']['test_start_idx'],\n self.params['adv_attack_test']['test_end_idx'])\n adv_test_flag = True if attack.name != \"NoAttack\" else False # This flag indicates whether the testset is already adversarial\n dataloaders = {'train': trainloader,\n 'test': testloader,\n 'adv_test_flag': adv_test_flag, # This flag indicates whether the testset is already adversarial\n 'classes': classes, 'bounds': bounds}\n\n elif self.exp_type == 'mnist_adversarial':\n assert(attack is not None)\n dataloaders = dict()\n dataloaders['train'], dataloaders['classes'], bounds_train = create_mnist_train_dataloader(data_folder,\n self.params['batch_size'], self.params['num_workers'])\n\n dataloaders['adv_test_flag'] = True if attack.name != \"NoAttack\" else False # This flag indicates whether the testset is already adversarial\n dataloaders['test'], _, bounds_test = create_adv_mnist_test_dataloader_preprocessed(attack, data_folder,\n self.params['batch_size'], self.params['num_workers'],\n self.params['adv_attack_test']['test_start_idx'],\n self.params['adv_attack_test']['test_end_idx'])\n assert(bounds_train == bounds_test)\n dataloaders['bounds'] = bounds_train\n else:\n raise NameError('No experiment type: %s' % self.exp_type)\n\n dataloaders['dataset_name'] = self.exp_type\n return dataloaders\n\n def get_model(self, model_arch: str, ckpt_path: str, pnml_model_flag: bool = False, pnml_model_keep_grad: bool =True):\n \"\"\"\n Load a untrained or trained model according to the experiment type and if a ckpt_path is given.\n :param model_arch: the architecture of the model\n :param ckpt_path: the path to the model .ckpt file. If no ckpt_path is given then the initial model is loaded\n :param pnml_model_flag: If true, return PnmlModel of the loaded model\n :return: A NN model\n \"\"\"\n ckpt_path = None if ckpt_path == \"None\" else ckpt_path\n if self.exp_type == \"mnist_adversarial\":\n if model_arch == 'Net':\n model = Net()\n elif model_arch == 'Net_800_400_100':\n model = Net_800_400_100()\n elif model_arch == 'MNISTClassifier':\n model = MNISTClassifier()\n elif model_arch == 'PnmlModel':\n model = MNISTClassifier()\n else:\n raise NameError('No model_arch type %s for %s experiment' % (str(model_arch), self.exp_type))\n elif self.exp_type == \"cifar_adversarial\":\n if model_arch == 'wide_resnet':\n model = MadryWideResNet(depth=34, num_classes=10, widen_factor=10, dropRate=0.0)\n elif model_arch == \"RST\": # Model used in \"Unlabeled Data Improves Adversarial Robustness\" paper, which has the same architecture as in the original WideResNet\n model = WideResNet(depth=28, num_classes=10, widen_factor=10)\n else:\n raise NameError('No model_arch type %s for %s experiment' % (str(model_arch), self.exp_type))\n elif self.exp_type == \"imagenet_adversarial\":\n if model_arch == 'resnet50':\n model = load_pretrained_imagenet_model(\"resnet50\")\n elif self.exp_type == \"synthetic\":\n model = NetSynthetic()\n else:\n raise NameError('No experiment type: %s' % self.exp_type)\n\n model = load_pretrained_model(model, ckpt_path) if ckpt_path is not None else model\n model = ImagenetModel(model, self.params[\"num_classes\"]) if self.exp_type == \"imagenet_adversarial\" else model\n if pnml_model_flag:\n model = self.get_pnml_model(model, pnml_model_keep_grad)\n model.ckpt_path = ckpt_path\n return model\n\n def get_pnml_model(self, base_model, pnml_model_keep_grad: bool = True):\n return PnmlModel(base_model, self.params['fit_to_sample'], get_dataset_min_max_val(self.exp_type),\n self.params[\"num_classes\"], pnml_model_keep_grad)\n\n def get_exp_name(self):\n if self.exp_type == 'out_of_dist_svhn':\n name = 'out_of_dist_svhn'\n elif self.exp_type == 'out_of_dist_noise':\n name = 'out_of_dist_noise'\n elif self.exp_type == 'pnml_mnist':\n name = 'pnml_mnist'\n elif self.exp_type == 'imagenet_adversarial':\n name = 'imagenet_adversarial'\n elif self.exp_type == 'cifar_adversarial':\n name = 'cifar_adversarial'\n elif self.exp_type == 'mnist_adversarial':\n name = 'mnist_adversarial'\n elif self.exp_type == 'synthetic':\n name = 'synthetic'\n else:\n raise NameError('No experiment type: %s' % self.exp_type)\n\n return name\n\n def get_attack_for_model(self, model):\n return get_attack(self.params[\"adv_attack_test\"], model, get_dataset_min_max_val(self.exp_type))\n", "repo_name": "uriyapes/pnml_adv", "sub_path": "src/experimnet_utilities.py", "file_name": "experimnet_utilities.py", "file_ext": "py", "file_size_in_byte": 13621, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "48", "api": [{"api_name": "typing.Union", "line_number": 19, "usage_type": "name"}, {"api_name": "json.load", "line_number": 53, "usage_type": "call"}, {"api_name": "torch.load", "line_number": 74, "usage_type": "call"}, {"api_name": "typing.Union", "line_number": 82, "usage_type": "name"}, {"api_name": "dataset_utilities.create_tensor_dataloader", "line_number": 86, "usage_type": "call"}, {"api_name": "adversarial.attacks.get_attack", "line_number": 104, "usage_type": "call"}, {"api_name": "adversarial.attacks.get_attack", "line_number": 107, "usage_type": "call"}, {"api_name": "dataset_utilities.get_dataset_min_max_val", "line_number": 107, "usage_type": "call"}, {"api_name": "dataset_utilities.create_mnist_dataloaders", "line_number": 112, "usage_type": "call"}, {"api_name": "dataset_utilities.create_synthetic_dataloader", "line_number": 130, "usage_type": "call"}, {"api_name": "dataset_utilities.create_synthetic_dataloader", "line_number": 131, "usage_type": "call"}, {"api_name": "dataset_utilities.create_imagenet_test_loader", "line_number": 135, "usage_type": "call"}, {"api_name": "dataset_utilities.create_adversarial_cifar10_dataloaders", "line_number": 146, "usage_type": "call"}, {"api_name": "dataset_utilities.create_mnist_train_dataloader", "line_number": 159, "usage_type": "call"}, {"api_name": "dataset_utilities.create_adv_mnist_test_dataloader_preprocessed", "line_number": 163, "usage_type": "call"}, {"api_name": "models.mpl.Net", "line_number": 186, "usage_type": "call"}, {"api_name": "models.mpl.Net_800_400_100", "line_number": 188, "usage_type": "call"}, {"api_name": "models.mpl.MNISTClassifier", "line_number": 190, "usage_type": "call"}, {"api_name": "models.mpl.MNISTClassifier", "line_number": 192, "usage_type": "call"}, {"api_name": "models.madry_wide_resnet.MadryWideResNet", "line_number": 197, "usage_type": "call"}, {"api_name": "models.wide_resnet_original.WideResNet", "line_number": 199, "usage_type": "call"}, {"api_name": "models.model_utils.load_pretrained_imagenet_model", "line_number": 204, "usage_type": "call"}, {"api_name": "models.mpl.NetSynthetic", "line_number": 206, "usage_type": "call"}, {"api_name": "models.model_utils.load_pretrained_model", "line_number": 210, "usage_type": "call"}, {"api_name": "models.model_utils.ImagenetModel", "line_number": 211, "usage_type": "call"}, {"api_name": "models.mpl.PnmlModel", "line_number": 218, "usage_type": "call"}, {"api_name": "dataset_utilities.get_dataset_min_max_val", "line_number": 218, "usage_type": "call"}, {"api_name": "adversarial.attacks.get_attack", "line_number": 242, "usage_type": "call"}, {"api_name": "dataset_utilities.get_dataset_min_max_val", "line_number": 242, "usage_type": "call"}]} +{"seq_id": "25214398743", "text": "import pygame as pg\n\npg.init()\npg.time.set_timer(pg.USEREVENT, 1000)\n\nfrom espacial.entities import Planeta, Nave, Asteroide, Astronauta\nfrom espacial import niveles, FPS, vel_nivel\nimport random\nimport sqlite3\n\n\nclass Escena:\n def __init__(self, pantalla):\n self.pantalla = pantalla\n self.reloj = pg.time.Clock()\n\n def bucle_ppal():\n pass\n\nclass Intro(Escena):\n def __init__(self, pantalla):\n super().__init__(pantalla)\n self.fuente = pg.font.Font(\"resources/fonts/AGENCYR.TTF\", 100)\n self.fuente2 = pg.font.Font(\"resources/fonts/AGENCYR.TTF\", 30)\n self.fuente3 = pg.font.Font(\"resources/fonts/AGENCYR.TTF\", 25)\n self.background = pg.image.load(\"./resources/img/intro.png\")\n self.user_text = ''\n\n self.input_rect = pg.Rect(self.pantalla.get_width()//2-70,self.pantalla.get_height()//2+100,150,50)\n self.color_active = pg.Color('lightskyblue3')\n self.color_passive = pg.Color('gray15')\n self.color = self.color_passive\n self.active = False\n\n def bucle_ppal(self, retorno):\n pg.mixer.music.load(f\"./resources/sounds/Sound_intro.mp3\")\n pg.mixer.music.play()\n\n while True:\n for evento in pg.event.get():\n if evento.type == pg.QUIT:\n return False\n \n if evento.type == pg.KEYDOWN:\n if evento.key == pg.K_SPACE:\n return [True, self.user_text]\n \n\n if evento.type == pg.MOUSEBUTTONDOWN:\n if self.input_rect.collidepoint(evento.pos):\n self.active = True\n else:\n self.active = False\n\n if evento.type == pg.KEYDOWN:\n if self.active == True:\n if evento.key == pg.K_BACKSPACE:\n self.user_text = self.user_text[:-1]\n else:\n self.user_text += evento.unicode\n\n self.pantalla.fill((0,0,0))\n self.pantalla.blit(self.background, (0,0))\n\n if self.active:\n self.color = self.color_active\n else:\n self.color = self.color_passive\n \n pg.draw.rect(self.pantalla,self.color,self.input_rect,2)\n\n texto = self.fuente.render(\"THE QUEST\", True, (102, 204, 102))\n texto2 = self.fuente3.render(\"TU MISIÓN: Encuentra y coloniza nuevos planetas para salvar a la especie humana de su propio exterminio.\", True, (102, 204, 102))\n texto3 = self.fuente3.render(\"Introduce tu nombre en el cuadro inferior y pulsa la tecla ESPACIO cuando estés listo.\", True, (102, 204, 102))\n texto4 = self.fuente2.render(\"BUENA SUERTE!\", True, (102, 204, 102))\n texto5 = self.fuente2.render(\"PLAYER: \", True, (102, 204, 102))\n text_surface = self.fuente2.render(self.user_text,True,(255,255,255))\n \n self.pantalla.blit(texto, (self.pantalla.get_width()//2 - texto.get_width()//2 - 200, self.pantalla.get_height()//2 - texto.get_height()//2 -200))\n self.pantalla.blit(texto2, (self.pantalla.get_width()//2 - texto.get_width()//2 - 200, self.pantalla.get_height()//2 - texto.get_height()//2-50))\n self.pantalla.blit(texto3, (self.pantalla.get_width()//2 - texto.get_width()//2 - 200, self.pantalla.get_height()//2 - texto.get_height()//2))\n self.pantalla.blit(texto4, (self.pantalla.get_width()//2 - texto.get_width()//2+100, self.pantalla.get_height()//2 + texto.get_height()//2-40))\n self.pantalla.blit(texto5, (self.pantalla.get_width()//2 - texto.get_width()//2, self.pantalla.get_height()//2 + 112))\n self.pantalla.blit(text_surface,(self.input_rect.x +10,self.input_rect.y + 10))\n self.input_rect.w=max(150,text_surface.get_width())\n\n pg.display.flip()\n \n\nclass Partida(Escena):\n def __init__(self, pantalla):\n super().__init__(pantalla)\n self.fuente = pg.font.Font(\"resources/fonts/AGENCYR.TTF\", 30)\n self.fuente2 = pg.font.Font(\"resources/fonts/AGENCYR.TTF\", 60)\n self.planeta = Planeta(self.pantalla, 1000, 100, 0)\n self.nave = Nave(self.pantalla, 20, -200, 2)\n \n def reset(self, nivel):\n self.counter = 20\n self.asteroides = []\n self.astronautas = []\n self.todos = []\n self.todos.append(self.planeta)\n self.todos.append(self.nave)\n self.crea_astronautas()\n self.crea_asteroides(nivel)\n\n def crea_astronautas(self):\n for l in range (0, 3):\n l = Astronauta(self.pantalla, random.randint(1200,2400), random.randint(0,600), 5)\n self.astronautas.append(l)\n self.todos = self.todos + self.astronautas\n \n def crea_asteroides(self, nivel):\n for l, coor in enumerate(niveles[nivel]):\n asteroide = Asteroide(self.pantalla, coor[0], coor[1], vel_nivel[nivel], l)\n self.asteroides.append(asteroide)\n self.todos = self.todos + self.asteroides\n\n def bucle_ppal(self, retorno): \n nivel = 0\n self.contador_vidas = 3\n self.contador_frames = 0\n self.puntuacion = 0\n self.cuenta = ''\n self.player = retorno[1]\n self.nave.viva = retorno[2]\n \n self.reset(nivel)\n self.nave.reset()\n\n while self.contador_vidas > 0 and nivel < len(niveles)-1:\n pg.mixer.music.load(f\"./resources/sounds/Sound_game.mp3\")\n pg.mixer.music.play()\n \n while self.counter >= 0 and self.contador_vidas > 0 and self.nave.viva and nivel <= len(niveles)-1:\n\n self.reloj.tick(FPS)\n\n eventos = pg.event.get()\n for evento in eventos:\n if evento.type == pg.QUIT:\n return False\n elif evento.type == pg.USEREVENT:\n self.cuenta = str(self.counter).rjust(3)\n self.counter -= 1\n\n self.pantalla.fill((0, 0, 0)) \n\n for objeto in self.todos:\n objeto.mover()\n\n for asteroide in self.asteroides:\n\n if asteroide.comprobarToque(self.nave):\n self.asteroides.remove(asteroide)\n self.todos.remove(asteroide)\n self.nave.viva = False\n\n for astronauta in self.astronautas:\n \n if astronauta.comprobarToque(self.nave):\n self.puntuacion += 50\n self.astronautas.remove(astronauta)\n self.todos.remove(astronauta)\n\n\n for objeto in self.todos:\n objeto.dibujar()\n\n self.contador_frames +=1\n if self.contador_frames == 300:\n self.puntuacion += 25\n self.contador_frames = 0\n\n Marcador = self.fuente.render(\"Puntuacion: \" + str(self.puntuacion) + \" | Tiempo restante: \" + self.cuenta + \"s | Vidas: \" + str(self.contador_vidas) + \" | Nivel \" + str(nivel), True, (102, 204, 102))\n self.pantalla.blit(Marcador, (650, 10))\n pg.display.flip()\n\n if self.counter == 0 and self.nave.viva and nivel < len(niveles)-1:\n self.nave.aterriza = True\n self.nave.y = self.pantalla.get_height()//2\n i = 1\n contador_frames = 0\n while self.nave.x <= 1000:\n self.pantalla.fill((0, 0, 0)) \n self.nave.avanzar()\n if self.nave.x > 500:\n contador_frames += 1\n self.nave.rotar(i)\n if contador_frames == 63:\n i += 1\n contador_frames = 0\n self.planeta.dibujar()\n self.nave.dibujar()\n Mensaje= self.fuente2.render(\"Bien hecho! Pasas al nivel \" + str(nivel+1), True, (102, 204, 102))\n #Mensaje2 = self.fuente.render(\"Presiona tecla ESPACIO para continuar\", True, (102, 204, 102))\n self.pantalla.blit(Mensaje, (self.pantalla.get_width()//2 - Mensaje.get_width()//2, self.pantalla.get_height()//2 - 2*Mensaje.get_height()//2))\n #self.pantalla.blit(Mensaje2, (self.pantalla.get_width()//2 - Mensaje2.get_width()//2, self.pantalla.get_height()//2 - Mensaje2.get_height()//2 + 75))\n pg.display.flip()\n nivel += 1\n self.reset(nivel)\n self.nave.reset()\n pg.event.clear()\n\n elif not self.nave.viva:\n pg.mixer.music.load(f\"./resources/sounds/Sound_explosion.mp3\")\n pg.mixer.music.play()\n self.contador_vidas -=1\n if self.contador_vidas > 0:\n while not self.nave.viva:\n self.pantalla.fill((0, 0, 0)) \n Mensaje = self.fuente2.render(\"BOOM!!!! Te quedan \" + str(self.contador_vidas) + \" vidas\", True, (102, 204, 102))\n Mensaje2 = self.fuente.render(\"Presiona tecla ESPACIO para continuar\", True, (102, 204, 102))\n self.pantalla.blit(Mensaje, (self.pantalla.get_width()//2 - Mensaje.get_width()//2, self.pantalla.get_height()//2 - Mensaje.get_height()//2))\n self.pantalla.blit(Mensaje2, (self.pantalla.get_width()//2 - Mensaje2.get_width()//2, self.pantalla.get_height()//2 - Mensaje2.get_height()//2 + 75))\n self.planeta.dibujar()\n self.nave.dibujar()\n pg.display.flip()\n\n eventos = pg.event.get()\n for evento in eventos:\n if evento.type == pg.QUIT:\n return False\n if evento.type == pg.KEYDOWN:\n if evento.key == pg.K_SPACE:\n self.nave.reset()\n self.reset(nivel)\n pg.mixer.music.load(f\"./resources/sounds/Sound_game.mp3\")\n pg.mixer.music.play()\n else:\n conn = sqlite3.connect('score.db')\n conn.execute(\"INSERT INTO puntuaciones (Player,Puntos) \\\n VALUES (?,?)\", (self.player, self.puntuacion,))\n conn.commit()\n conn.close() \n \n return [True, self.puntuacion, self.nave.viva]\n\n\nclass GameOver(Escena):\n def __init__(self, pantalla):\n super().__init__(pantalla)\n self.fuente = pg.font.Font(\"resources/fonts/AGENCYR.TTF\", 100)\n self.fuente2 = pg.font.Font(\"resources/fonts/AGENCYR.TTF\", 50)\n self.fuente3 = pg.font.Font(\"resources/fonts/AGENCYR.TTF\", 25)\n self.background = pg.image.load(\"./resources/img/intro.png\")\n\n def bucle_ppal(self, retorno):\n while True:\n for evento in pg.event.get():\n if evento.type == pg.QUIT:\n return False\n \n if evento.type == pg.KEYDOWN:\n if evento.key == pg.K_SPACE:\n return True\n if retorno[2] == True:\n Mensaje = \"JUEGO COMPLETO\"\n else:\n Mensaje = \"GAME OVER\"\n \n puntuacion = str(retorno[1])\n texto = self.fuente.render(Mensaje, True, (102, 204, 102))\n texto2 = self.fuente2.render(\"Tu puntuacion fue \" + puntuacion, True, (102, 204, 102))\n texto3 = self.fuente3.render(\"TOP PUNTUACIONES\", True, (102, 204, 102))\n texto4 = self.fuente3.render(\"Pulsa tecla ESPACIO para jugar de nuevo\", True, (102, 204, 102))\n\n self.pantalla.blit(self.background, (0,0))\n self.pantalla.blit(texto, (self.pantalla.get_width()//2 - texto.get_width()//2, self.pantalla.get_height()//2 - texto.get_height()//2-125))\n self.pantalla.blit(texto2, (self.pantalla.get_width()//2 - texto2.get_width()//2, self.pantalla.get_height()//2 - texto2.get_height()//2-50))\n self.pantalla.blit(texto3, (self.pantalla.get_width()//2 - texto3.get_width()//2, self.pantalla.get_height()//2 - texto3.get_height()//2+25))\n self.pantalla.blit(texto4, (self.pantalla.get_width()//2 - texto4.get_width()//2, self.pantalla.get_height()-100))\n \n con = sqlite3.connect('score.db')\n cur = con.cursor()\n cur.execute(\"\"\"\n SELECT Player,Puntos \n FROM puntuaciones \\\n ORDER BY Puntos DESC\n \"\"\"\n )\n datos = cur.fetchall()\n cur.close()\n \n i = 0\n for dato in datos:\n if i <= 2:\n player = self.fuente3.render(str(dato[0]), True, (102, 204, 102))\n separator = self.fuente3.render(str(40*'.'), True, (102, 204, 102))\n score = self.fuente3.render(str(dato[1]), True, (102, 204, 102))\n self.pantalla.blit(player, (self.pantalla.get_width()//2-125, self.pantalla.get_height()//2 +50+ 25*i))\n self.pantalla.blit(separator, (self.pantalla.get_width()//2-75, self.pantalla.get_height()//2 +50+ 25*i))\n self.pantalla.blit(score, (self.pantalla.get_width()//2+100, self.pantalla.get_height()//2 +50+ 25*i)) \n i += 1\n \n pg.display.flip()\n\n", "repo_name": "alegocon/PFB_pygame", "sub_path": "espacial/escenes.py", "file_name": "escenes.py", "file_ext": "py", "file_size_in_byte": 14094, "program_lang": "python", "lang": "es", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "48", "api": [{"api_name": "pygame.init", "line_number": 3, "usage_type": "call"}, {"api_name": "pygame.time.set_timer", "line_number": 4, "usage_type": "call"}, {"api_name": "pygame.time", "line_number": 4, "usage_type": "attribute"}, {"api_name": "pygame.USEREVENT", "line_number": 4, "usage_type": "attribute"}, {"api_name": "pygame.time.Clock", "line_number": 15, "usage_type": "call"}, {"api_name": "pygame.time", "line_number": 15, "usage_type": "attribute"}, {"api_name": "pygame.font.Font", "line_number": 23, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 23, "usage_type": "attribute"}, {"api_name": "pygame.font.Font", "line_number": 24, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 24, "usage_type": "attribute"}, {"api_name": "pygame.font.Font", "line_number": 25, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 25, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 26, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 26, "usage_type": "attribute"}, {"api_name": "pygame.Rect", "line_number": 29, "usage_type": "call"}, {"api_name": "pygame.Color", "line_number": 30, "usage_type": "call"}, {"api_name": "pygame.Color", "line_number": 31, "usage_type": "call"}, {"api_name": "pygame.mixer.music.load", "line_number": 36, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 36, "usage_type": "attribute"}, {"api_name": "pygame.mixer.music.play", "line_number": 37, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 37, "usage_type": "attribute"}, {"api_name": "pygame.event.get", "line_number": 40, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 40, "usage_type": "attribute"}, {"api_name": "pygame.QUIT", "line_number": 41, "usage_type": "attribute"}, {"api_name": "pygame.KEYDOWN", "line_number": 44, "usage_type": "attribute"}, {"api_name": "pygame.K_SPACE", "line_number": 45, "usage_type": "attribute"}, {"api_name": "pygame.MOUSEBUTTONDOWN", "line_number": 49, "usage_type": "attribute"}, {"api_name": "pygame.KEYDOWN", "line_number": 55, "usage_type": "attribute"}, {"api_name": "pygame.K_BACKSPACE", "line_number": 57, "usage_type": "attribute"}, {"api_name": "pygame.draw.rect", "line_number": 70, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 70, "usage_type": "attribute"}, {"api_name": "pygame.display.flip", "line_number": 87, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 87, "usage_type": "attribute"}, {"api_name": "pygame.font.Font", "line_number": 93, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 93, "usage_type": "attribute"}, {"api_name": "pygame.font.Font", "line_number": 94, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 94, "usage_type": "attribute"}, {"api_name": "espacial.entities.Planeta", "line_number": 95, "usage_type": "call"}, {"api_name": "espacial.entities.Nave", "line_number": 96, "usage_type": "call"}, {"api_name": "espacial.entities.Astronauta", "line_number": 110, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 110, "usage_type": "call"}, {"api_name": "espacial.niveles", "line_number": 115, "usage_type": "name"}, {"api_name": "espacial.entities.Asteroide", "line_number": 116, "usage_type": "call"}, {"api_name": "espacial.vel_nivel", "line_number": 116, "usage_type": "name"}, {"api_name": "espacial.niveles", "line_number": 132, "usage_type": "argument"}, {"api_name": "pygame.mixer.music.load", "line_number": 133, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 133, "usage_type": "attribute"}, {"api_name": "pygame.mixer.music.play", "line_number": 134, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 134, "usage_type": "attribute"}, {"api_name": "espacial.niveles", "line_number": 136, "usage_type": "argument"}, {"api_name": "espacial.FPS", "line_number": 138, "usage_type": "argument"}, {"api_name": "pygame.event.get", "line_number": 140, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 140, "usage_type": "attribute"}, {"api_name": "pygame.QUIT", "line_number": 142, "usage_type": "attribute"}, {"api_name": "pygame.USEREVENT", "line_number": 144, "usage_type": "attribute"}, {"api_name": "pygame.display.flip", "line_number": 178, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 178, "usage_type": "attribute"}, {"api_name": "espacial.niveles", "line_number": 180, "usage_type": "argument"}, {"api_name": "pygame.display.flip", "line_number": 200, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 200, "usage_type": "attribute"}, {"api_name": "pygame.event.clear", "line_number": 204, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 204, "usage_type": "attribute"}, {"api_name": "pygame.mixer.music.load", "line_number": 207, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 207, "usage_type": "attribute"}, {"api_name": "pygame.mixer.music.play", "line_number": 208, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 208, "usage_type": "attribute"}, {"api_name": "pygame.display.flip", "line_number": 219, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 219, "usage_type": "attribute"}, {"api_name": "pygame.event.get", "line_number": 221, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 221, "usage_type": "attribute"}, {"api_name": "pygame.QUIT", "line_number": 223, "usage_type": "attribute"}, {"api_name": "pygame.KEYDOWN", "line_number": 225, "usage_type": "attribute"}, {"api_name": "pygame.K_SPACE", "line_number": 226, "usage_type": "attribute"}, {"api_name": "pygame.mixer.music.load", "line_number": 229, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 229, "usage_type": "attribute"}, {"api_name": "pygame.mixer.music.play", "line_number": 230, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 230, "usage_type": "attribute"}, {"api_name": "sqlite3.connect", "line_number": 232, "usage_type": "call"}, {"api_name": "pygame.font.Font", "line_number": 244, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 244, "usage_type": "attribute"}, {"api_name": "pygame.font.Font", "line_number": 245, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 245, "usage_type": "attribute"}, {"api_name": "pygame.font.Font", "line_number": 246, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 246, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 247, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 247, "usage_type": "attribute"}, {"api_name": "pygame.event.get", "line_number": 251, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 251, "usage_type": "attribute"}, {"api_name": "pygame.QUIT", "line_number": 252, "usage_type": "attribute"}, {"api_name": "pygame.KEYDOWN", "line_number": 255, "usage_type": "attribute"}, {"api_name": "pygame.K_SPACE", "line_number": 256, "usage_type": "attribute"}, {"api_name": "sqlite3.connect", "line_number": 275, "usage_type": "call"}, {"api_name": "pygame.display.flip", "line_number": 297, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 297, "usage_type": "attribute"}]} +{"seq_id": "35248854715", "text": "# pip install pyperclip # 어떤 문장을 클립보드로 저장 \n# -> pyautogui.write()는 한글 불가, pyperclip를 사용하여 클립보드에 저장하여 붙여넣기 방식으로 한글 사용.\n\nimport pyautogui\n\nw = pyautogui.getWindowsWithTitle(\"제목 없음\")[0] # 메모장 1개 띄운 상태에서 창 정보 가져옴\nw.activate()\n\n# pyautogui.write(\"12345\")\n# pyautogui.write(\"NadoCoding\", interval=0.25) # interval = 글자 쓰는 속도\n# pyautogui.write([\"t\",\"e\",\"s\",\"t\",\"left\",\"left\",\"right\",\"l\",\"a\",\"enter\"], interval = 0.1)\n\n# 특수문자\n# ex. shift + 4 -> $ \npyautogui.keyDown(\"shift\") # shift 키를 누른 상태에서\npyautogui.press(\"4\") # 숫자 4를 입력하고\npyautogui.keyUp(\"shift\") # shift 키를 뗀다\n\n# 간편한 조합키\npyautogui.hotkey(\"ctrl\",\"alt\",\"shift\",\"a\")\n# ctrl 누르고 > alt 누르고 > shift 누르고 > a 누르고 > a 떼고 > shift 떼고 > alt 떼고 > ctrl 떼고 \n\n# 한글 사용하기\nimport pyperclip\npyperclip.copy(\"나도코딩\") # \"나도코딩\" 글자를 클립보드에 저장\npyautogui.hotkey(\"ctrl\",\"v\") # 클립보드에 있는 내용을 붙여넣기\n\n# 함수로 만들어서 쉽게 한글 사용하기\ndef my_write(text):\n pyperclip.copy(text)\n pyautogui.hotkey(\"ctrl\",\"v\")\n\nmy_write(\"쉬운한글\")\n\n# 자동화 프로그램 종료\n# win : ctrl + alt + del\n# mac : cmd + chift + option + q", "repo_name": "OatLager/Study", "sub_path": "Python/Pyautogui/RPA_example/5.keyboard.py", "file_name": "5.keyboard.py", "file_ext": "py", "file_size_in_byte": 1382, "program_lang": "python", "lang": "ko", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "48", "api": [{"api_name": "pyautogui.getWindowsWithTitle", "line_number": 6, "usage_type": "call"}, {"api_name": "pyautogui.keyDown", "line_number": 15, "usage_type": "call"}, {"api_name": "pyautogui.press", "line_number": 16, "usage_type": "call"}, {"api_name": "pyautogui.keyUp", "line_number": 17, "usage_type": "call"}, {"api_name": "pyautogui.hotkey", "line_number": 20, "usage_type": "call"}, {"api_name": "pyperclip.copy", "line_number": 25, "usage_type": "call"}, {"api_name": "pyautogui.hotkey", "line_number": 26, "usage_type": "call"}, {"api_name": "pyperclip.copy", "line_number": 30, "usage_type": "call"}, {"api_name": "pyautogui.hotkey", "line_number": 31, "usage_type": "call"}]} +{"seq_id": "4421438494", "text": "# _*_ coding:utf-8 _*_\n\nimport torch\nimport torch.nn as nn\nimport torchvision\nimport torch.utils.data as Data\nimport matplotlib.pyplot as plt\n\n\ntorch.manual_seed(1)\n\nEPOCH = 2\nBATCH_SIZE = 64\nTIME_STEP = 28\nINPUT_SIZE = 28\nLR = 0.001\nDOWNLOAD_MNIST = False\n\n\n# 包含traindata和trianlabel两个数据\ntrain_data = torchvision.datasets.MNIST(\n root='./mnist/',\n train=True, # this is training data\n transform=torchvision.transforms.ToTensor(), # Converts a PIL.Image or numpy.ndarray to\n # torch.FloatTensor of shape (C x H x W) and normalize in the range [0.0, 1.0]\n download=DOWNLOAD_MNIST,\n)\n\n\n# 包含testdata和testlabel两个数据\ntest_data = torchvision.datasets.MNIST(\n root='./mnist/',\n train=False,\n transform=torchvision.transforms.ToTensor(),\n download=DOWNLOAD_MNIST,\n)\n\n# 批训练\ntrain_loader = Data.DataLoader(\n dataset=train_data,\n batch_size=BATCH_SIZE,\n shuffle=True\n)\n\n\ntest_x = test_data.test_data.type(torch.FloatTensor)[:2000]/255\nprint(train_data.train_data.size())\ntest_y = test_data.test_labels[:2000]\n\n\nclass RNN(nn.Module):\n def __init__(self):\n super(RNN, self).__init__()\n\n self.rnn = nn.LSTM( # if use nn.RNN(), it hardly learns\n input_size=INPUT_SIZE,\n hidden_size=64, # rnn hidden unit\n num_layers=1, # number of rnn layer\n batch_first=True, # input & output will has batch size as 1s dimension. e.g. (batch, time_step, input_size)\n )\n\n self.out = nn.Linear(64, 10)\n\n def forward(self, x):\n # x shape (batch, time_step, input_size)\n # r_out shape (batch, time_step, output_size)\n # h_n shape (n_layers, batch, hidden_size)\n # h_c shape (n_layers, batch, hidden_size)\n r_out, (h_n, h_c) = self.rnn(x, None) # None represents zero initial hidden state\n\n # choose r_out at the last time step\n out = self.out(r_out[:, -1, :])\n return out\n\nrnn = RNN()\nprint(rnn)\n# test_yy = rnn(test_x[:10].view(-1, 28, 28))\n# print(torch.max(test_yy,1)) # torch.max)(a,0) 返回每一列中最大值的那个元素,且返回索引(返回最大元素在这一列的行索引)\n\noptimizer = torch.optim.Adam(rnn.parameters(), lr=LR)\nloss_func = torch.nn.CrossEntropyLoss()\n\nlosses = []\nfor epoch in range(EPOCH):\n for step, (x, b_y) in enumerate(train_loader):\n b_x = x.view(-1, 28, 28)\n output = rnn(b_x)\n\n loss = loss_func(output,b_y)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n losses.append(loss.data.numpy())\n\n\n if step % 50 == 0:\n test_output = rnn(test_x) # (samples, time_step, input_size)\n pred_y = torch.max(test_output, 1)[1].data.numpy().squeeze()\n accuracy = float((pred_y == test_y.data.numpy()).sum()) / float(test_y.size(0))\n print('Epoch', epoch, 'step', step, '*********** Train_Loss:%.4f' % loss.data.numpy(),\n '*********Test_accuracy:.%4f' % accuracy)\n\n\ntest_yy = rnn(test_x[:10].view(-1, 28, 28))\n# print(torch.max(test_yy, 1))\nopt_y = torch.max(test_yy, 1)[1].data.numpy().squeeze()\npred_y = test_y[:10]\n\nprint(opt_y)\nprint(pred_y.data.numpy())\n\nfor i, loss in enumerate(losses):\n plt.plot(losses, 'r-')\nplt.xlabel('steps')\nplt.ylabel('loss')\nplt.title('The CNN classifier loss about MNIST database')\nplt.show()\n", "repo_name": "MrRenQIANG/Touch_course", "sub_path": "rnn_classifier.py", "file_name": "rnn_classifier.py", "file_ext": "py", "file_size_in_byte": 3505, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "48", "api": [{"api_name": "torch.manual_seed", "line_number": 10, "usage_type": "call"}, {"api_name": "torchvision.datasets.MNIST", "line_number": 21, "usage_type": "call"}, {"api_name": "torchvision.datasets", "line_number": 21, "usage_type": "attribute"}, {"api_name": "torchvision.transforms.ToTensor", "line_number": 24, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 24, "usage_type": "attribute"}, {"api_name": "torchvision.datasets.MNIST", "line_number": 31, "usage_type": "call"}, {"api_name": "torchvision.datasets", "line_number": 31, "usage_type": "attribute"}, {"api_name": "torchvision.transforms.ToTensor", "line_number": 34, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 34, "usage_type": "attribute"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 39, "usage_type": "call"}, {"api_name": "torch.utils.data", "line_number": 39, "usage_type": "name"}, {"api_name": "torch.FloatTensor", "line_number": 46, "usage_type": "attribute"}, {"api_name": "torch.nn.Module", "line_number": 51, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 51, "usage_type": "name"}, {"api_name": "torch.nn.LSTM", "line_number": 55, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 55, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 62, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 62, "usage_type": "name"}, {"api_name": "torch.optim.Adam", "line_number": 80, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 80, "usage_type": "attribute"}, {"api_name": "torch.nn.CrossEntropyLoss", "line_number": 81, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 81, "usage_type": "attribute"}, {"api_name": "torch.max", "line_number": 98, "usage_type": "call"}, {"api_name": "torch.max", "line_number": 106, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 113, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 113, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 114, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 114, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 115, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 115, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 116, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 116, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 117, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 117, "usage_type": "name"}]} +{"seq_id": "8622844585", "text": "#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nimport math\nfrom tqdm import tqdm\nimport networkx as nx\nimport numpy as np\nimport os\nfrom IPython.display import clear_output\nfrom collections import Counter, namedtuple\nimport multiprocessing\nfrom gensim.test.utils import common_texts, get_tmpfile\nfrom gensim.models import Word2Vec, FastText\nimport logging # Setting up the loggings to monitor gensim\nlogging.basicConfig(format=\"%(levelname)s - %(asctime)s: %(message)s\", datefmt= '%H:%M:%S', level=logging.INFO)\n\n\n# In[25]:\n\n\n#Below is the code for loading the dataset, parsing, and preprocessing it and in the end saving each sentence on a file.\n#The script also does a check for senses included in the mapping, dropping them if not in mapping file. \nimport xml.etree.ElementTree as ET\ncount=0\nanncount=0\nsentence=[]\nannotation=[]\nannotations=[]\nbabelnet=[]\nprovisore=[]\nflag=0\nlogg=0\nv=0\n\n#open the mapping file\nwith open('bn2wn_mapping.txt', 'r') as fp:\n for line in fp:\n provisore=line.split()\n babelnet.append(provisore[0])\n \n\n#Create a file that will store the sentences\n \nwith open('corpus.txt', 'wb+') as f:\n #parse through each line in the XML dataset file. The file has been renamed for simplicity of use. \n for event, elem in ET.iterparse(\"eurosensehp.xml\"):\n #this code checks if the sentences are in english. If true, extract the info needed\n if elem.get('lang')=='en':\n \n if elem.tag=='text':\n count+=1\n print(count,'/1.9 mil')\n clear_output()\n \n if elem.text==None:\n flag=1\n \n \n else:\n flag=0\n strg=elem.text\n \n if flag==0:\n if elem.tag=='annotation':\n \n if elem.text not in babelnet:\n logg+=1\n \n else:\n \n v+=1\n temp=[]\n temp=elem.get('lemma').split()\n temp='_'.join(temp)\n annotation.extend([[elem.get('anchor'), temp, elem.text]])\n \n if elem.tag=='sentence':\n if v==0:\n #a check for sentences without annotations is done. If so , drop the sentence\n annotation=[]\n \n \n else:\n \n annotations.append(annotation)\n for ann in annotations[-1]:\n strg=strg.replace(ann[0]+' ',ann[1]+'_'+ann[2]+' ')\n line=(strg+'\\n').encode('utf-8')\n f.write(line)\n annotation=[]\n sentence.append(strg)\n v=0\n elem.clear()\n \n\n\n# In[2]:\n\n\n#Loading the sentences from the file and some farther preprocessing for example lowering every uppercase character\n\n\n# In[79]:\n\n\nwordList=[]\n\n\n# In[80]:\n\n\ncount=0\nwith open('corpus.txt', encoding='utf8') as f:\n \n for entry in f:\n if count==0:\n \n count+=1\n wordList.append(entry.lower().split())\n\n\n# In[81]:\n\n\n#remove every word that is not alphanumeric\nfor i in wordList:\n for j in i:\n if not j[0].isalpha() and not j[0].isdigit(): #<3\n \n \n i.remove(j)\n \n \n\n\n# In[8]:\n\n\n#saving the final preprocessed corpus\nimport pickle\nwith open(\"loweredcorpus.pickle\", \"wb+\") as v:\n pickle.dump(wordList, v)\n\n\n# In[153]:\n\n\n\n\n\n# In[83]:\n\n\n#Time to build the model\nw2v_model = FastText(min_count=1,workers=cores-1, window=10,size=300)\n\n\n# In[84]:\n\n\n#Getting the number of CPU Threads\ncores = multiprocessing.cpu_count()\n\n\n# In[128]:\n\n\n#Building the vocabolary\nw2v_model.build_vocab(wordList)\n\n\n# In[131]:\n\n\n#getting the keys from the vocabolary\nkeys= w2v_model.wv.vocab.keys()\n\n\n# In[129]:\n\n\n#time to train the model\nw2v_model.train(wordList, total_examples=w2v_model.corpus_count, epochs=50)\n\n\n# In[17]:\n\n\n#this function basically extracts only the sense embeddings from the vocabolary, it takes a word as a parameter and it returns the list of senses\ndef wordcheck(W):\n \n words=[]\n for word in keys:\n temp=word.split(\":\")\n wordC=temp[0].split(\"_\")\n if temp[-1]=='bn':\n wordC=\"_\".join(temp[:-1])\n if W.lower()==temp.lower():\n words.append(word)\n return words\n\n\n# In[ ]:\n\n\n#second method is by getting the senses from wordnet itself\n\n\n# In[27]:\n\n\nfrom nltk.corpus import wordnet as wn\nimport nltk\nnltk.download('wordnet')\n\n\n# In[46]:\n\n\nbabelnet=[]\nwordn=[]\nwith open('bn2wn_mapping.txt', 'r') as fp:\n for line in fp:\n provisore=line.split()\n babelnet.append(provisore[0])\n wordn.append(provisore[1])\n\n\n# In[100]:\n\n\ndef wordcheck2(W1):\n lemmas=[]\n\n offset=[]\n synsets=wn.synsets(W1)\n for syn in synsets:\n offset.append(str(syn.offset()).zfill(8)+'n')\n for sys in synsets:\n lemmas.append(sys.lemma_names())\n newsyn=[]\n\n countoff=0\n for elem in offset:\n count=0\n countoff+=1\n for elem2 in wordn:\n count+=1\n if elem==elem2:\n for lem in lemmas[countoff-1]:\n newsyn.append(str(lem)+'_'+str(babelnet[count-1]))\n return newsyn\n\n\n# In[ ]:\n\n\n### END OF WORDNET SYNSET EXTRACTION#####\n\n\n# In[117]:\n\n\n#The function to check for similarity\ndef checksimilarity(bank,money):\n x=0\n temp=[]\n if bank==[] or money==[]:\n temp.append(-1)\n else:\n for elem in bank:\n for elem2 in money:\n \n temp.append(w2v_model.wv.similarity(elem, elem2))\n if w2v_model.wv.similarity(elem, elem2)==None:\n temp.append(-1)\n return temp \n \n\n\n# In[134]:\n\n\n#This uses the second method of sense extraction\ndef checksimilarity2(bank,money):\n x=0\n temp=[]\n if bank==[] or money==[]:\n temp.append(-1)\n else:\n for elem in bank:\n for elem2 in money:\n if elem in keys and elem2 in keys:\n \n \n temp.append(w2v_model.wv.similarity(elem, elem2))\n if w2v_model.wv.similarity(elem, elem2)==None:\n temp.append(-1)\n return temp \n \n\n\n# In[157]:\n\n\n#getting the testing data from the file\ncontent=[]\n\nwith open('combined.tab') as f:\n linez=f.readlines()\n#print(linez)\nfor elem in linez:\n content.append(elem.replace('\\n','').split('\\t'))\n\ncontent.pop(0)\n\n\n# In[163]:\n\n\n#time to check for similarity\nsim=[]\nc=0\nfor comb in content:\n c+=1\n a=wordcheck(comb[0])\n b=wordcheck(comb[1])\n x=checksimilarity(a,b)\n if x==[]:\n sim.append(-1)\n else:\n sim.append(np.max(x))\n\n\n# In[164]:\n\n\nfor i in range(len(content)):\n content[i].append(str(sim[i]))\n\n\n# In[165]:\n\n\n#append everything to the testing data\na1=[]\na2=[]\nfor comb in content:\n a1.extend(comb[2].split())\n a2.extend(comb[3].split())\n\n\n# In[166]:\n\n\nfrom scipy.stats import spearmanr\n\n\n# In[170]:\n\n\n#Calculate the spearman correlation\nprint(spearmanr(a1,a2)[0])\n\n\n# In[23]:\n\n\n#Save model and weights\nw2v_model.wv.save_word2vec_format('embeddings.vec', binary=False)\n\n\n# In[24]:\n\n\nw2v_model.save(\"word2vec.model\")\n\n\n# In[ ]:\n\n\n#this function is used in case an existing embeddings.vec file is present. \nfrom gensim.models import KeyedVectors\nw2v_model = KeyedVectors.load_word2vec_format('embeddings.vec', binary=False)\n\n", "repo_name": "jorisdemiraj/sense-embeddings", "sub_path": "code/Script.py", "file_name": "Script.py", "file_ext": "py", "file_size_in_byte": 7682, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "48", "api": [{"api_name": "logging.basicConfig", "line_number": 18, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 18, "usage_type": "attribute"}, {"api_name": "xml.etree.ElementTree.iterparse", "line_number": 49, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree", "line_number": 49, "usage_type": "name"}, {"api_name": "IPython.display.clear_output", "line_number": 56, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 145, "usage_type": "call"}, {"api_name": "gensim.models.FastText", "line_number": 158, "usage_type": "call"}, {"api_name": "multiprocessing.cpu_count", "line_number": 165, "usage_type": "call"}, {"api_name": "nltk.download", "line_number": 217, "usage_type": "call"}, {"api_name": "nltk.corpus.wordnet.synsets", "line_number": 239, "usage_type": "call"}, {"api_name": "nltk.corpus.wordnet", "line_number": 239, "usage_type": "name"}, {"api_name": "numpy.max", "line_number": 335, "usage_type": "call"}, {"api_name": "scipy.stats.spearmanr", "line_number": 366, "usage_type": "call"}, {"api_name": "gensim.models.KeyedVectors.load_word2vec_format", "line_number": 387, "usage_type": "call"}, {"api_name": "gensim.models.KeyedVectors", "line_number": 387, "usage_type": "name"}]} +{"seq_id": "71999120787", "text": "import re\nfrom bson import json_util\nfrom flask import Flask, json, jsonify\nimport database\nimport json\nfrom bson.json_util import loads, dumps\n\n\napp = Flask(__name__)\n\n\ndef parseJson(data):\n return json.loads(json_util.dumps(data))\n\n\n@app.route(\"/notes\")\ndef getNotes():\n data = database.FindAllNotes()\n parsed = parseJson(data)\n return jsonify(parsed)\n\n@app.route(\"/notes/\")\ndef deleteNote(note):\n database.DeleteFromNotes(note)\n\n@app.route('/todo')\ndef getTodo():\n data = database.FindAllTodo()\n parsed = parseJson(data)\n return jsonify(parsed)\n\n@app.route('/todo/')\ndef deleteTodo(todo):\n database.DeleteFromTodo(todo)\n\n@app.route('/calendar')\ndef getCalendar():\n data = database.FindAllCalendar()\n parsed = parseJson(data)\n return jsonify(parsed)\n\n@app.route('/calendar/')\ndef deleteCalendar(reminder):\n database.DeleteFromCalandar(reminder)\n\n# @app.route(\"/notes\")\n# def getNotes():\n# return {\"notes\": [ {\"notename\" : \"name\", \"note\" : \"test\"}]}\n\nif __name__ == '__main__':\n app.run(debug=True)", "repo_name": "masonbrad831/CoeusWebsite", "sub_path": "server/server.py", "file_name": "server.py", "file_ext": "py", "file_size_in_byte": 1088, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "48", "api": [{"api_name": "flask.Flask", "line_number": 9, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 13, "usage_type": "call"}, {"api_name": "bson.json_util.dumps", "line_number": 13, "usage_type": "call"}, {"api_name": "bson.json_util", "line_number": 13, "usage_type": "name"}, {"api_name": "database.FindAllNotes", "line_number": 18, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 20, "usage_type": "call"}, {"api_name": "database.DeleteFromNotes", "line_number": 24, "usage_type": "call"}, {"api_name": "database.FindAllTodo", "line_number": 28, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 30, "usage_type": "call"}, {"api_name": "database.DeleteFromTodo", "line_number": 34, "usage_type": "call"}, {"api_name": "database.FindAllCalendar", "line_number": 38, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 40, "usage_type": "call"}, {"api_name": "database.DeleteFromCalandar", "line_number": 44, "usage_type": "call"}]} +{"seq_id": "15227104011", "text": "r\"\"\" Directive for dynamic generation of tables from code\n\nExample::\n\n .. dynamic-math-table:: **Title of table**\n :header: \"\", \"First\", \"Second\"\n :raw-cols: 0\n\n # raw-cols above specifies columns that don't have math symbols\n import sympy\n a, b, c, d = sympy.symbols(r'\\alpha, \\beta, \\gamma, \\delta')\n [[\"one\", a, b], [\"two\", c, d]]\n\"\"\"\n\nfrom docutils import nodes, statemachine\nfrom docutils.parsers.rst.directives.tables import CSVTable\nfrom docutils.parsers.rst import directives\n\nfrom docutils.utils import SystemMessagePropagation\n\nfrom texext.mathcode import eval_code\n\ndef nonnegative_int_list(argument):\n \"\"\"\n Converts a space- or comma-separated list of values into a Python list\n of integers.\n\n (Directive option conversion function.)\n\n Raises ValueError for values that aren't non-negative integers.\n \"\"\"\n if ',' in argument:\n entries = argument.split(',')\n else:\n entries = argument.split()\n return [directives.nonnegative_int(entry) for entry in entries]\n\n\nclass DynamicTable(CSVTable):\n option_spec = {'header-rows': directives.nonnegative_int,\n 'stub-columns': directives.nonnegative_int,\n 'header': directives.unchanged,\n 'widths': directives.positive_int_list,\n 'file': directives.path,\n 'url': directives.uri,\n 'encoding': directives.encoding,\n 'class': directives.class_option,\n 'name': directives.unchanged,\n # field delimiter char\n 'delim': directives.single_char_or_whitespace_or_unicode,\n # treat whitespace after delimiter as significant\n 'keepspace': directives.flag,\n # text field quote/unquote char:\n 'quote': directives.single_char_or_unicode,\n # char used to escape delim & quote as-needed:\n 'escape': directives.single_char_or_unicode,\n 'newcontext': directives.flag,\n }\n\n def get_plot_context(self):\n # First try dyntable_plot_context dictionary\n plot_context = setup.config.dyntable_plot_context\n if plot_context is not None:\n # Plot context is a string naming a module attribute\n parts = plot_context.split('.')\n mod_name, el_name = '.'.join(parts[:-1]), parts[-1]\n mod = __import__(mod_name, globals(), locals(), el_name)\n return getattr(mod, el_name)\n # Default to matplotlib plot_context dictionary\n from matplotlib.sphinxext.plot_directive import plot_context\n return plot_context\n\n def get_context(self, newcontext=False):\n if setup.config.dyntable_use_plot_ns:\n plot_context = self.get_plot_context()\n else:\n plot_context = setup.dyntable_code_context\n if newcontext:\n plot_context.clear()\n return plot_context\n\n def run(self):\n self.check_requirements()\n title, messages = self.make_title()\n table_head, max_header_cols = self.process_header_option()\n rows, source = self.get_rows()\n max_cols = max(len(row) for row in rows)\n max_cols = max(max_cols, max_header_cols)\n header_rows = self.options.get('header-rows', 0)\n stub_columns = self.options.get('stub-columns', 0)\n self.check_table_dimensions(rows, header_rows, stub_columns)\n table_head.extend(rows[:header_rows])\n table_body = rows[header_rows:]\n col_widths = self.get_column_widths(max_cols)\n # Deal with differences between docutils 0.12 and 0.13.1\n kwargs = {}\n if isinstance(col_widths, tuple): # 0.13.1\n kwargs['widths'], col_widths = col_widths\n self.extend_short_rows_with_empty_cells(max_cols,\n (table_head, table_body))\n table = (col_widths, table_head, table_body)\n table_node = self.state.build_table(table, self.content_offset,\n stub_columns, **kwargs)\n table_node['classes'] += self.options.get('class', [])\n self.add_name(table_node)\n if title:\n table_node.insert(0, title)\n return [table_node] + messages\n\n def get_rows(self):\n \"\"\"\n Get rows as list of lists or array from the directive content\n \"\"\"\n if not self.content:\n error = self.state_machine.reporter.warning(\n 'The \"%s\" directive requires content; none supplied.'\n % self.name, nodes.literal_block(\n self.block_text, self.block_text), line=self.lineno)\n raise SystemMessagePropagation(error)\n want_new = True if 'newcontext' in self.options else False\n context = self.get_context(want_new)\n source = self.content.source(0)\n output = eval_code('\\n'.join(self.content), context)\n rows = self._process_output(output)\n rows = [] if rows is None else rows\n return self._process_rows(rows, source), source\n\n def _process_rows(self, rows, source):\n \"\"\" Add table cell boilerplace to cells in rows\n \"\"\"\n p_rows = []\n for row in rows:\n p_row = []\n for cell in row:\n cell_content = statemachine.StringList(\n cell.splitlines(),\n source=source)\n p_row.append((0, 0, 0, cell_content))\n p_rows.append(p_row)\n return p_rows\n\n def _process_output(self, output):\n \"\"\" Apply any post-processing to output of code\n \"\"\"\n return output\n\n\nclass DynamicMathTable(DynamicTable):\n option_spec = DynamicTable.option_spec.copy()\n option_spec['raw-cols'] = nonnegative_int_list\n\n def _process_output(self, output):\n \"\"\" Apply sympy.latex and add math role to selected columns\n \"\"\"\n raw_cols = self.options.get('raw-cols', [])\n from sympy import latex\n rows = []\n for row in output:\n cells = []\n for col_no, cell in enumerate(row):\n if col_no not in raw_cols:\n cell = ':math:`{}`'.format(latex(cell))\n cells.append(cell)\n rows.append(cells)\n return rows\n\n\ndef setup(app):\n # Global variables\n setup.app = app\n setup.config = app.config\n setup.confdir = app.confdir\n # Workspace for code run in dyntable blocks\n setup.dyntable_code_context = dict()\n app.add_directive('dynamic-table', DynamicTable)\n app.add_directive('dynamic-math-table', DynamicMathTable)\n app.add_config_value('dyntable_use_plot_ns', False, 'env')\n app.add_config_value('dyntable_plot_context', None, 'env')\n", "repo_name": "matthew-brett/teaching", "sub_path": "sphinxext/dyntables.py", "file_name": "dyntables.py", "file_ext": "py", "file_size_in_byte": 6848, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 12, "dataset": "github-code", "pt": "48", "api": [{"api_name": "docutils.parsers.rst.directives.nonnegative_int", "line_number": 36, "usage_type": "call"}, {"api_name": "docutils.parsers.rst.directives", "line_number": 36, "usage_type": "name"}, {"api_name": "docutils.parsers.rst.directives.tables.CSVTable", "line_number": 39, "usage_type": "name"}, {"api_name": "docutils.parsers.rst.directives.nonnegative_int", "line_number": 40, "usage_type": "attribute"}, {"api_name": "docutils.parsers.rst.directives", "line_number": 40, "usage_type": "name"}, {"api_name": "docutils.parsers.rst.directives.nonnegative_int", "line_number": 41, "usage_type": "attribute"}, {"api_name": "docutils.parsers.rst.directives", "line_number": 41, "usage_type": "name"}, {"api_name": "docutils.parsers.rst.directives.unchanged", "line_number": 42, "usage_type": "attribute"}, {"api_name": "docutils.parsers.rst.directives", "line_number": 42, "usage_type": "name"}, {"api_name": "docutils.parsers.rst.directives.positive_int_list", "line_number": 43, "usage_type": "attribute"}, {"api_name": "docutils.parsers.rst.directives", "line_number": 43, "usage_type": "name"}, {"api_name": "docutils.parsers.rst.directives.path", "line_number": 44, "usage_type": "attribute"}, {"api_name": "docutils.parsers.rst.directives", "line_number": 44, "usage_type": "name"}, {"api_name": "docutils.parsers.rst.directives.uri", "line_number": 45, "usage_type": "attribute"}, {"api_name": "docutils.parsers.rst.directives", "line_number": 45, "usage_type": "name"}, {"api_name": "docutils.parsers.rst.directives.encoding", "line_number": 46, "usage_type": "attribute"}, {"api_name": "docutils.parsers.rst.directives", "line_number": 46, "usage_type": "name"}, {"api_name": "docutils.parsers.rst.directives.class_option", "line_number": 47, "usage_type": "attribute"}, {"api_name": "docutils.parsers.rst.directives", "line_number": 47, "usage_type": "name"}, {"api_name": "docutils.parsers.rst.directives.unchanged", "line_number": 48, "usage_type": "attribute"}, {"api_name": "docutils.parsers.rst.directives", "line_number": 48, "usage_type": "name"}, {"api_name": "docutils.parsers.rst.directives.single_char_or_whitespace_or_unicode", "line_number": 50, "usage_type": "attribute"}, {"api_name": "docutils.parsers.rst.directives", "line_number": 50, "usage_type": "name"}, {"api_name": "docutils.parsers.rst.directives.flag", "line_number": 52, "usage_type": "attribute"}, {"api_name": "docutils.parsers.rst.directives", "line_number": 52, "usage_type": "name"}, {"api_name": "docutils.parsers.rst.directives.single_char_or_unicode", "line_number": 54, "usage_type": "attribute"}, {"api_name": "docutils.parsers.rst.directives", "line_number": 54, "usage_type": "name"}, {"api_name": "docutils.parsers.rst.directives.single_char_or_unicode", "line_number": 56, "usage_type": "attribute"}, {"api_name": "docutils.parsers.rst.directives", "line_number": 56, "usage_type": "name"}, {"api_name": "docutils.parsers.rst.directives.flag", "line_number": 57, "usage_type": "attribute"}, {"api_name": "docutils.parsers.rst.directives", "line_number": 57, "usage_type": "name"}, {"api_name": "matplotlib.sphinxext.plot_directive.plot_context", "line_number": 71, "usage_type": "name"}, {"api_name": "matplotlib.sphinxext.plot_directive.plot_context", "line_number": 75, "usage_type": "name"}, {"api_name": "matplotlib.sphinxext.plot_directive.plot_context", "line_number": 77, "usage_type": "name"}, {"api_name": "matplotlib.sphinxext.plot_directive.plot_context.clear", "line_number": 79, "usage_type": "call"}, {"api_name": "matplotlib.sphinxext.plot_directive.plot_context", "line_number": 79, "usage_type": "name"}, {"api_name": "matplotlib.sphinxext.plot_directive.plot_context", "line_number": 80, "usage_type": "name"}, {"api_name": "docutils.nodes.literal_block", "line_number": 117, "usage_type": "call"}, {"api_name": "docutils.nodes", "line_number": 117, "usage_type": "name"}, {"api_name": "docutils.utils.SystemMessagePropagation", "line_number": 119, "usage_type": "call"}, {"api_name": "texext.mathcode.eval_code", "line_number": 123, "usage_type": "call"}, {"api_name": "docutils.statemachine.StringList", "line_number": 135, "usage_type": "call"}, {"api_name": "docutils.statemachine", "line_number": 135, "usage_type": "name"}, {"api_name": "{'plot_context': 'matplotlib.sphinxext.plot_directive.plot_context'}.option_spec.copy", "line_number": 149, "usage_type": "call"}, {"api_name": "{'plot_context': 'matplotlib.sphinxext.plot_directive.plot_context'}.option_spec", "line_number": 149, "usage_type": "attribute"}, {"api_name": "sympy.latex", "line_number": 162, "usage_type": "call"}]} +{"seq_id": "74799769105", "text": "'''\nCreated on 03.06.2017\n\n@author: Dennis Struhs\n\n'''\n\nimport re\nimport json\n\n#===============================================================================\n# RegEx Pattern to retrieve the info from the file\n# extractFileInfo = re.match('.*X:\\s*(\\d+.\\d+).*Y:\\s*(\\d+.\\d+', line)\n#===============================================================================\n\n#===============================================================================\n# Help Classes\n#===============================================================================\n\nclass Coordinate:\n def __init__(self, x, y):\n self.x = x\n self.y = y\n\n#Offers serialization of the Coordinate custom Object\nclass CustomJSONEncoder(json.JSONEncoder):\n def default(self, o):\n if isinstance(o, Coordinate):\n return [o.x, o.y]\n return CustomJSONEncoder(self, o)\n\n#===============================================================================\n# Main class\n#===============================================================================\nclass CameraGCodeExtraction:\n\n desiredExtruder = ''\n z_stepping = 0.0\n currentLayer = 1\n Z_layer = 0.0\n\n #Z_layer = 1.0 #Define the Z-Layer Threshold where the T1 extruder is working at\n current_extruder = '' #Stores the currently selected Extruder beein T0/T1\n currentExtruderZPos = 0.0 #Stores the last Z Position of the extruder\n lastExtruderZPos = 0.0\n\n shortCoordList = None\n masterCoordList = None\n\n #desiredExtruder = raw_input('Enter your input Extruder: ')\n #Z_layer = float(raw_input('Enter your input Layer: '))\n\n \"\"\"The instantiation function for the incoming values\n :param zSteps: Holds the value how thick a layer is. For example 0.25\n :param targetExtruder: Specifies the monitored Extruder. For example T0\"\"\"\n def __init__(self,zSteps,targetExtruder):\n self.desiredExtruder = targetExtruder\n self.z_stepping = float(zSteps)\n self.Z_layer = self.z_stepping * self.currentLayer\n # Initialize lists\n self.shortCoordList = []\n self.masterCoordList = []\n\n \"\"\"The main function to handle the extraction of the GCode information from\n a given testfile\n :param Data: Contains the textdata for processing\"\"\"\n def extractCameraGCode(self, Data):\n zWorkList = self.findAllZValues(Data)\n for eachItem in zWorkList:\n self.Z_layer = eachItem\n self.findAllGCodesInLayer(Data)\n self.swapfirstArrayEntries()\n\n \"\"\"Finds all layers for further processing\n :param Data: Contains the textdata for processing\"\"\"\n def findAllZValues(self,Data):\n zValueList = []\n previousZ = 0.0\n currentZ = 0.0\n for line in Data:\n z_values = re.match('G1 Z(\\d+.\\d+)', line)\n\n if(self.validZValues(z_values)):\n # Check if new Z value is smaller to filter unwanted values\n previousZ = currentZ\n currentZ = z_values.group(1)\n if(not zValueList.__contains__(float(z_values.group(1)))\n and currentZ < previousZ):\n zValueList.append(float(z_values.group(1)))\n return zValueList\n\n \"\"\"Do some RegEx to find the entries of value for us.\n :param Data: Contains the textdata for processing\n \"\"\"\n def findAllGCodesInLayer(self, Data):\n for line in Data:\n self.extruder_state = re.match('T\\d', line)\n z_values = re.match('G1 Z(\\d+.\\d+)', line)\n #Get the currently selected extruder from File (T1 or T0)\n if self.extruder_state != None:\n self.current_extruder = self.extruder_state.group(0)\n #Get the last Z Position value of the T0 exrruder\n if self.properSelectedExtruder(z_values):\n self.currentExtruderZPos = float(z_values.group(1))\n\n #Get the X and Y values of the extruder at the specified layer\n if self.extruder_working(self.desiredExtruder):\n xy_values = re.match('G1 X(\\d+.\\d+) Y(\\d+.\\d+) E\\d+.\\d+', line)\n if xy_values != None:\n newCoord = Coordinate(\n float(xy_values.group(1)),\n float(xy_values.group(2)))\n self.shortCoordList.append(newCoord)\n\n # Make sure the list has enough entries\n if(len(self.shortCoordList) >= 2):\n self.masterCoordList.append(self.shortCoordList)\n\n self.shortCoordList = []\n\n#===============================================================================\n# Help functions\n#===============================================================================\n\n def swapfirstArrayEntries(self):\n mylist = self.masterCoordList\n mylist[0],mylist[1] = mylist[1],mylist[0]\n\n def getCoordList(self):\n return self.masterCoordList\n\n def properSelectedExtruder(self, z_values):\n return self.validZValues(z_values) and self.current_extruder == self.desiredExtruder\n\n def validZValues(self, z_values ):\n return z_values != None\n\n def extruder_working(self, inputExtruder):\n return self.currentExtruderZPos == self.Z_layer and self.current_extruder == inputExtruder\n\n#===============================================================================\n# writeFiles(CoordList, desiredExtruder + \"_ExtruderPositions.txt\")\n# writeFiles(shortCoordList, desiredExtruder + \"_positions.txt\")\n#===============================================================================\n\n#===============================================================================\n# Old legacy Code\n#===============================================================================\n#===============================================================================\n# #Do some RegEx to find the entries of value for us.\n# for line in Data:\n# extruder_state = re.match( 'T\\d', line )\n# z_values = re.match( 'G1 Z(\\d+.\\d+)', line )\n#\n# #Get the currently selected extruder from File (T1 or T0)\n# if extruder_state != None:\n# current_extruder = extruder_state.group( 0 )\n#\n# #Get the last Z Position value of the T1 exrruder\n# if validZValues( z_values ) and current_extruder == desiredExtruder:\n# currentExtruderZPos = float( z_values.group( 1 ) )\n#\n# #Get the X and Y values of the extruder at the specified layer\n# if extruder_working(desiredExtruder):\n# xy_values = re.match( 'G1 X(\\d+.\\d+) Y(\\d+.\\d+)', line )\n# if xy_values != None:\n# CoordList.append(desiredExtruder\n# + ' Extruder is at X: {}'.format( xy_values.group(1) )\n# + ', Y: {}'.format( xy_values.group(2) )\n# + ', in Z-Layer: {}'.format( currentExtruderZPos ) + '\\n')\n# newCoord = Coordinate(xy_values.group(1),xy_values.group(2))\n# shortCoordList.append(newCoord)\n#===============================================================================\n", "repo_name": "Fragjacker/OctoCamDox", "sub_path": "octoprint_OctoCamDox/GCode_processor.py", "file_name": "GCode_processor.py", "file_ext": "py", "file_size_in_byte": 6993, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "48", "api": [{"api_name": "json.JSONEncoder", "line_number": 26, "usage_type": "attribute"}, {"api_name": "re.match", "line_number": 81, "usage_type": "call"}, {"api_name": "re.match", "line_number": 97, "usage_type": "call"}, {"api_name": "re.match", "line_number": 98, "usage_type": "call"}, {"api_name": "re.match", "line_number": 108, "usage_type": "call"}]} +{"seq_id": "27269149802", "text": "\n#____LIBRARIES YOU MIGHT NEED____\n\nimport numpy as np\nimport matplotlib.pyplot as pl\nfrom mpl_toolkits import mplot3d\n\n#____NUMERICALLY REPRESENTING FUCTIONS____\n\n#Discretiseing axies:\n\ndef discretiseAxis1(x_0, x_n, numNodes): #Discretises the axis in the range defined by x_0 and x_n to numNodes number of nodes. (No need to calculate the step size)\n x = np.linspace(x_0,x_n, numNodes)\n return x\n\n\ndef discretiseAxis2(x_0, x_n, stepSize): #Discretises the axis in the range defined by x_0 and x_n with a step size of stepSize. (No need to calcutae the number of nodes)\n x = np.arange(x_0, x_n, stepSize)\n return x\n\n#GENERATING MESHES:\n \n#For f(x,y):\n \ndef makeMesh(x_0, x_n, y_0, y_n, stepSize):\n #Defining the spacial domains of both axies and discretiseing them:\n x = np.arange(x_0, x_n, stepSize)\n y = np.arange(y_0, y_n, stepSize) \n\n #Creating the grids:\n (Xg, Yg) = np.meshgrid(x,y) #This function creates the 2d grids as needed\n return Xg, Yg #IMPORTANT: Use numpy library for the math functions when working with these grids (Only this will do it mesh wise)\n\n#For f(x,y,t)\n\ndef makeMeshWithTime(x_0, x_n, y_0, y_n, t_0, t_n, stepSizeXY, stepSizeT):\n #Defining the spacial domains of both axies and discretiseing them:\n x = np.arange(x_0, x_n, stepSizeXY)\n y = np.arange(y_0, y_n, stepSizeXY) \n #Defining the time domain and discretiseing it:\n t = np.arange(t_0, t_n, stepSizeT)\n \n #Creating the grids:\n (Xg, Yg, Tg) = np.meshgrid(x, y, t)\n \n return Xg, Yg, Tg\n \n \n\n\n#FINDING THE \"LENGTHS IN ALL DIRECTIONS\" OF MESHES:\n\ndef lenGrid(Xg): \n print(np.shape(Xg))\n \n \n#PLOTTING RESULTS IN 3D:\n \ndef threeDPlot(Xg, Yg, Surface): #This funciton is for a grid that describes a f(x,y)\n \n #Plotting S\n print('S(x,y) is the following in 3D:')\n ax = pl.axes(projection='3d')\n ax.plot_surface(Xg,Yg,Surface)\n \n #Plotting the contour of S\n pl.show() # make a new window plot\n print('S(x,y) has the following contour plot:')\n pl.contour(Xg,Yg,Surface) # plot contours\n \ndef threeDPlotAtTimeT(Xg, Yg, Tg, Surface, atT): #This funciton is for a grid that describes a f(x,y,t)\n print(f'The 3D plot of R at t={atT}s will be:')\n ax = pl.axes(projection='3d')\n ax.plot_surface(Xg[:,:,atT], Yg[:,:,atT], Surface[:,:,atT])\n \n \n #MAKING A 2D VECTOR SPACE\n\ndef twoDVectorInAPlane(x_0, x_n, y_0, y_n, stepSize):\n #Defining the spacial domains of both axies and discretiseing them:\n x = np.arange(x_0, x_n, stepSize)\n y = np.arange(y_0, y_n, stepSize) \n #String their lengths:\n LenX = len(x)\n LenY = len(y)\n\n #Creating the mesh for the plane\n Xg, Yg = np.meshgrid(x,y)\n \n # allocate an array for the vector field F: size is Nx by Ny by 2 (2 is because the vector has two components, i and j)\n F = np.ndarray((LenX,LenY, 2))\n \n #Calculating the values of the vector field (!Change the expressions!):\n F[:,:,0] = Xg #i-component, WATCH OUT THE INDEXING IS FROM 0\n F[:,:,1] = Yg #j-component\n \n#PLOTTING A 2D VECTOR SPACE\n\ndef plot2DVectorSpace(Xg, Yg, F):\n #Plotting the values:\n\n print('The vector field in Problem a), plotted, looks like the following:')\n pl.quiver(Xg, Yg, F[:,:,0], F[:,:,1])\n\n pl.show()\n print('The vector field in Problem a), plotted as streamlines, looks like the following:')\n pl.streamplot(Xg, Yg, F[:,:,0], F[:,:,1])\n \n \n \n #____GAUSSIAN ELIMINATION____\n \ndef GaussianElimination(A,b):\n \n #The number of equations in the system is\n n = len(A)\n\n #Eliminating the matrix so that it becomes upper triangular form\n for j in range(0,n):\n for i in range(j+1, n):\n #Save p so you can use it even after you changed A (p is the pivot)\n p = (A[i,j] / A[j,j])\n A[i] = A[i] - p * A[j]\n #Adjusting vector b as well\n b[i] = b[i] - p * b[j]\n \n #Finding the values of x_i for each case \n \n x = np.zeros(n)\n for i in range(n-1,-1, -1):\n x[i] = b[i] / A[i][i]\n for j in range(i+1,n):\n x[i] = x[i] - x[j] * A[i][j] / A[i][i]\n return x\n\n#IMPORTANT: When defining the arrays to use in this function use np.array and set the type to float !!! Lke this\n \n #A = np.array([[8, -2, 1, 3], [1, -5, 2, 1],[-1, 2, 7, 2],[2 ,-1, 3, 8]],dtype=float)\n #b = np.array([9, -7, -1, 5],dtype=float)\n\n #____READING DATA FROM FILES____\n\n#READING IN EACH LINE INTO AN ARRAY\ndef ReadFileToArray (FileName):\n f = open(FileName, \"r\")\n FileArr = []\n for line in f:\n FileArr.append(float(line.rstrip())) #In case you need int modigfy this line, float is the most general so I went with that one here :D\n f.close()\n return FileArr\n\n #____NUMERICAL METHODS____\n\n#FORWARDS NUMERICAL DIFFERENTATION\n\ndef forwardsDifferentiation(xn,yn):\n \n dydx = []\n \n for i in range(len(xn)-1):\n dydx.append((yn[i+1] - yn[i]) / (xn[i+1] - xn[i]))\n \n return dydx\n\n#FORWARDS NUMERICAL DIFFERENTATION K TIMES (THIS USES THE FUNCTION ABOVE) NOTE: THIS IS VERY EXPENSIVE COMPUTATIONALLY BUT WILL WORK, THE BETTER VERSION BELOW WILL USE THE BINOMIAL SERIES TO QUICKEN THINGS IF THE STEP SIZE IS EQUAL EVERYWHERE\n\ndef forwardsDifferentiationKTimes(xn,yn, k):\n dydx = forwardsDifferentiation(xn,yn)\n for i in range(1, k):\n dydx = forwardsDifferentiation(xn[:-i], dydx) #The x-s have to be split since we lose a node with each order of differentiation\n return dydx\n\n#FORWARDS NUMERICAL DIFFERENTATION K TIMES EFFICIENT WAY WITH EQUIDISTANT NODES\n#We will be using binomial coefficients to speed up the proscess and this way we will be able to jump to the right dervivative at first:\n#For this we will need a few functions that help to find the coefficients\n\ndef factorial(n):\n if n==0:\n return 1\n else:\n return n*factorial(n-1)\n \ndef binomialCoefficients(n, k):\n nUnderK = factorial(n) / (factorial(k) * factorial(n - k))\n return nUnderK\n \n#Now for the actual funtion:\n \ndef forwardsDifferentiationKTimesEfficient(xn, yn, k):\n dydx = []\n \n for n in range(len(xn)-k):\n y_ = 0\n for i in range(k+1):\n y_ += (-1)**i * binomialCoefficients(k, i) * yn[n+k-i] \n dydx.append(y_ / (xn[1] - xn[0])**k)\n return dydx\n\n#BACKWARDS NUMERICAL DIFFERENTATION\n\ndef backwardsDifferentiation(xn,yn):\n \n dydx = []\n \n for i in range(1,len(xn)):\n dydx.append((yn[i] - yn[i-1]) / (xn[i] - xn[i-1]))\n \n return dydx\n\n#BACKWARDS NUMERICAL DIFFERENTATION K TIMES (THIS USES THE FUNCTION ABOVE) NOTE: THIS IS VERY EXPENSIVE COMPUTATIONALLY BUT WILL WORK, THE BETTER VERSION BELOW WILL USE THE BINOMIAL SERIES TO QUICKEN THINGS IF THE STEP SIZE IS EQUAL EVERYWHERE\n\ndef backwardsDifferentiationKTimes(xn,yn, k):\n dydx = forwardsDifferentiation(xn,yn)\n for i in range(1, k):\n dydx = backwardsDifferentiation(xn[i:], dydx) #The x-s have to be split since we lose a node with each order of differentiation\n return dydx\n\n#FORWARDS NUMERICAL DIFFERENTATION K TIMES EFFICIENT WAY WITH EQUIDISTANT NODES. NOTE: this uses the factorial and binominalCoefficient functions as well\n \ndef backwardsDifferentiationKTimesEfficient(xn, yn, k):\n dydx = []\n \n for n in range(k, len(xn)):\n y_ = 0\n for i in range(k+1):\n y_ += (-1)**i * binomialCoefficients(k, i) * yn[n-i] \n dydx.append(y_ / (xn[1] - xn[0])**k)\n return dydx\n\n#RIGHT RIEMANN SUM\n\ndef rightRiemannSum(x,y):\n #Finding the step sizes between x-s\n h = []\n for i in range(len(x)-1):\n h.append(x[i+1]-x[i])\n #Calculating the right Riemann sum:\n rRSum = 0\n for i in range(len(x)-1):\n rRSum += y[i+1] * h[i]\n \n return rRSum\n\n#LEFT RIEMANN SUM:\n \ndef leftRiemannSum(x,y):\n #Finding the step sizes between x-s\n h = []\n for i in range(len(x)-1):\n h.append(x[i+1]-x[i])\n #Calculating the left Riemann sum:\n lRSum = 0\n for i in range(len(x)-1):\n lRSum += y[i] * h[i]\n \n return lRSum\n\n#UPPER RIEMANN SUM\n \ndef upperRiemannSum(x,y):\n #Finding the step sizes between x-s\n h = []\n for i in range(len(x)-1):\n h.append(x[i+1]-x[i])\n #Calculating the upper Riemann sum:\n upperRSum = 0\n for i in range(len(x)-1):\n rRSum = y[i+1] * h[i]\n lRSum = y[i] * h[i]\n if rRSum > lRSum:\n upperRSum += rRSum\n else:\n upperRSum += lRSum\n return upperRSum\n\n#LOWER RIEMANN SUM \n \ndef lowerRiemannSum(x,y):\n #Finding the step sizes between x-s\n h = []\n for i in range(len(x)-1):\n h.append(x[i+1]-x[i])\n #Calculating the upper Riemann sum:\n upperRSum = 0\n for i in range(len(x)-1):\n rRSum = y[i+1] * h[i]\n lRSum = y[i] * h[i]\n if rRSum < lRSum:\n upperRSum += rRSum\n else:\n upperRSum += lRSum\n return upperRSum\n\n\n\n#TRAPEZIUM RULE FOR EQUIDISTANT NODES\n\ndef trapeziumRuleWithEquidistantNodes(x,y):\n \n #Finding the number of nodes (n) and the subinrerval (h) (here assumed to be the same everywhere)\n n = len(x)\n h = x[1] - x[0]\n \n #Adding the first and last datapoints \n integral = h * (y[0] / 2 + y[-1]/2)\n \n #Adding all other datapoints, (the sum part of the formula)\n for i in range(1,n-1):\n integral += h * y[i] #IMPORTANT NOTE: Here I multiply each value by h and thus open the bracket this is different from doing not openening the bracket and multiplying by h at the very end\n \n return integral\n\n#TRAPEZIUM RULE FOR NON-EQUIDISTANT NODES\n\ndef trapeziumRuleGeneral(x,y):\n \n #Finding the number of nodes (n) and the subinrerval (h) between each neighbouring nodes (Here the nodes might not be equidistant!!)\n n = len(x)\n h = []\n for i in range(n-1):\n h.append(x[i+1] - x[i]) \n \n integral = 0\n \n for i in range(n-1):\n \n integral += ((y[i+1]+y[i]) * h[i]) / 2\n \n return integral\n\n#TRAPEZIUM RULE FOR NON-EQUIDISTANT NODES, DOUBLE INTEGRAL:\n\ndef trapeziumRuleDoubleIntegral(x_0, x_n, stepSizeXY):\n # set the x range, not including the boundaries\n x = np.arange(x_0 + stepSizeXY, x_n, stepSizeXY)\n N = len(x)\n # the y range depends of the various values of x, and cannot be fixed here\n \n # integrate in dy, for all the value of x, i.e. find G(x)\n \n G = np.zeros(N)\n # for every x\n for i in range(0,N):\n # determine the boundaries m and p for this x, NOTE: here I calculate these to be where the function slices the \n mx = 423 # CHANGE THIS TO THE NUMBER NEEDED OR IF YOU WANT VOLUME UNDER A DOME JUST THE FUNCTION z(x,y) with y =0 !!!\n px = 423 # CHANGE THIS TO THE NUMBER NEEDED OR IF YOU WANT VOLUME UNDER A DOME JUST THE FUNCTION z(x,y) with y =0 !!!\n # set the y points for this x, not including the boundaries\n y = np.arange(-mx+stepSizeXY,px,stepSizeXY)\n z = np.zeros(len(y))\n # determine the values of the function z(x,y)\n for j in range(0,len(y)):\n z[j] = np.sqrt(25-x[i]**2-y[j]**2) # CHANGE THIS TO THE FUNCTION z(x,y) !!!!\n \n # integrate in dy from cx to dx (for this specific x)\n G[i] = trapeziumRuleGeneral(y,z) # G(x)\n \n # integrate G(x) in dx\n I = trapeziumRuleGeneral(x,G)\n return I\n\n#LAGRANGIAN INTEROPOLATION (USES BOTH FUNCTIONS BELOW)\n \ndef Lagrangian(j, xp, xn):\n Lj = 1\n for k in range(len(xn)):\n if k != j:\n Lj = Lj * (xp - xn[k]) / (xn[j] - xn[k])\n else:\n pass\n return Lj\n\ndef LagrangianInterpolation(xn, yn, x):\n y = []\n for i in range(len(x)):\n a = 0\n xp = x[i]\n for j in range(len(yn)):\n a += yn[j] * Lagrangian(j,xp, xn)\n y.append(a)\n return y\n\n#NEWTON FORWARD DEVIDED DIFFERENCE (USES BOTH FUNCTIONS BELOW)\n \n#Defining the function that will come up with a_i from the experession. (This will take it to the base case every time, but by giving it different inputs it we will be able to come to \"different levels\")\n\ndef newtonDevidedDifference(xn, yn):\n if len(xn) == 1:\n return yn[0]\n else:\n return (newtonDevidedDifference(xn[1:],yn[1:]) - newtonDevidedDifference(xn[0:-1],yn[0:-1])) / (xn[-1]-xn[0])\n \n#Defining the function that will do the actual interpolation for us NOTE: USES THE GAUSSIAN ELIMINATION FUNCTION:\n \ndef newtonInterpolation(xn, yn, x):\n \n y = [] #making the target array for the interpolation values\n \n for k in range(len(x)):\n y_ = 0\n for i in range(len(xn)):\n y__ = newtonDevidedDifference(xn[0:i+1],yn[0:i+1])\n for j in range(i):\n y__ = y__ * (x[k] - xn[j])\n y_ += y__\n y.append(y_)\n return y\n\n#Creating cubic splines with the clamped boundary conditions and then interpolating that on a given array of x:\n \ndef splines(xn,yn, boundaryConditionLower, boundaryConditionUpper, x):\n \n #Create the arrays into which the coefficients will go:\n \n aj = np.ndarray(len(xn)-1)\n bj = np.ndarray(len(xn)-1)\n cj = np.ndarray(len(xn)-1)\n dj = np.ndarray(len(xn)-1)\n \n #Create the matrix A and Collumn vector b that will help find the first derivatives v_j-s\n\n A = np.zeros((len(xn),len(xn)))\n b = np.zeros(len(xn))\n \n #Inputing the information we know into these matricies:\n b[0] = boundaryConditionLower\n b[-1] = boundaryConditionUpper\n \n A[0,0] = 1\n A[-1,-1] = 1\n \n #Filling up the rest of the matrix and collumn vector:\n \n for i in range(1,len(xn)-1):\n A[i,i-1] = 1 / (xn[i]-xn[i-1])\n A[i,i] = 2 / (xn[i]-xn[i-1]) + 2 / (xn[i+1]-xn[i])\n A[i,i+1] = 1 / (xn[i+1]-xn[i])\n\n b[i] = 3 * ((yn[i]-yn[i-1]) / (xn[i]-xn[i-1])**2 + (yn[i+1]-yn[i]) / (xn[i+1]-xn[i])**2 )\n\n \n #Solving the resulting system of linear equations:\n \n v = GaussianElimination(A,b)\n \n #Determining the coefficients using all this:\n \n for i in range(len(xn)-1):\n aj[i] = yn[i]\n bj[i] = v[i]\n cj[i] = 3*(yn[i+1]-yn[i])/(xn[i+1]-xn[i])**2 - (v[i+1]+2*v[i])/(xn[i+1]-xn[i])\n dj[i] = -2 * (yn[i+1] - yn[i]) / (xn[i+1] - xn[i]) ** 3 + (v[i+1] + v[i]) / (xn[i+1] - xn[i]) ** 2\n \n \n #Interpolate with these:\n \n y = np.zeros(len(x))\n \n for j in range(len(xn)-1):\n y[(xn[j]<=x) & (x<=xn[j+1])] = aj[j] + bj[j]*(x[(xn[j]<=x) & (x<=xn[j+1])]-xn[j]) + \\\n cj[j]*(x[(xn[j]<=x) & (x<=xn[j+1])]-xn[j])**2 + dj[j]*(x[(xn[j]<=x) & (x<=xn[j+1])]-xn[j])**3\n \n return y\n\n\n\n #____WORKING WITH IMAGES____\n\n#Reading in the image into a 3 layered matrix:\n\ndef readImage(fileName):\n \n Picture = pl.imread(fileName)\n \n return Picture #Remember that the JPG file is just 3 layers of a matrix, each layer represents the RGB conde for the pixel it represents\n\n#Displaying a 3 layer matrix as an image:\n\ndef showImage(Picture):\n \n pl.imshow(Picture)\n pl.show() #Make a new kernel for the next thing to display \n \n#Saving an image (3 layered matrix) to a file:\n \ndef saveImage(fileName, Picture):\n \n pl.imsave(fileName, Picture)\n \ndef compressingAnImage(Picture, CompressionRatio):\n smallPicture = np.zeros((int(Picture.shape[1] / CompressionRatio),int(Picture.shape[0] / CompressionRatio),3)) #Making the new target 3 layered array\n\n smallPicture = Picture[0: Picture.shape[1] : CompressionRatio, 0:Picture.shape[0] : CompressionRatio, :] #Entering only every n-th pixel into the new small image\n\n pl.imshow(smallPicture) #Showing the new image\n pl.show() #Make a new kernel for the next thing to display \n\n \n#RESIZEING AN IMAGE WITH INTERPOLATION\n \n #Using lagrangian intepolation\n \ndef imageResizeWithLagrangian(smallPicture, resizeRatio):\n\n xn = np.ndarray(smallPicture.shape[1]) #Defining the points we know, xn \n for i in range(smallPicture.shape[1]):\n xn[i] = i\n \n N = resizeRatio #Defining the size increase\n \n x = np.linspace(0, smallPicture.shape[1], smallPicture.shape[1] * N) #Defining the x range where we want interpolated values\n largeImage1 = np.ndarray((smallPicture.shape[0], smallPicture.shape[1] * N, 3)) #Defining the matrix for the new image (only re-sized in teh x direction)\n \n #Finding the values for the missing points\n \n for i in range(3): #There are three layers for RGB\n for j in range(smallPicture.shape[1]): #Going over each part of the matrix row by row\n yn = smallPicture[j,:,i]\n y = LagrangianInterpolation(xn,yn,0,0,x)\n largeImage1[j,:,i] = y\n \n pl.imshow(largeImage1.astype(int))\n pl.show()\n \n \n #Next, in the y direction:\n \n xn = np.ndarray(smallPicture.shape[0]) #Defining the points we know, xn \n for i in range(smallPicture.shape[0]):\n xn[i] = i\n \n \n \n x = np.linspace(0, smallPicture.shape[0], smallPicture.shape[0] * N) #Defining the x range where we want interpolated values\n largeImage2 = np.ndarray((smallPicture.shape[0] * N, smallPicture.shape[1] * N, 3)) #Defining the matrix for the new image (only re-sized in teh x direction)\n \n #Finding the values for the missing points\n \n for i in range(3): #There are three layers for RGB\n for j in range(smallPicture.shape[1] * N): #Going over each part of the matrix row by row\n yn = largeImage1[:,j,i]\n y = LagrangianInterpolation(xn,yn,0,0,x)\n largeImage2[:,j,i] = y\n \n \n largeImage2 = np.trunc(largeImage2)\n largePicture = (largeImage2.astype(int))\n return largePicture\n \n #Using spline interpolation\n \n\n#First in the x direction\ndef imageResizeWithSplines(smallPicture, resizeRatio):\n\n xn = np.ndarray(smallPicture.shape[1]) #Defining the points we know, xn \n for i in range(smallPicture.shape[1]):\n xn[i] = i\n \n N = resizeRatio #Defining the size increase\n \n x = np.linspace(0, smallPicture.shape[1], smallPicture.shape[1] * N) #Defining the x range where we want interpolated values\n largeImage1 = np.ndarray((smallPicture.shape[0], smallPicture.shape[1] * N, 3)) #Defining the matrix for the new image (only re-sized in teh x direction)\n \n #Finding the values for the missing points\n \n for i in range(3): #There are three layers for RGB\n for j in range(smallPicture.shape[1]): #Going over each part of the matrix row by row\n yn = smallPicture[j,:,i]\n y = splines(xn,yn,0,0,x)\n largeImage1[j,:,i] = y\n \n pl.imshow(largeImage1.astype(int))\n pl.show()\n \n \n #Next, in the y direction:\n \n xn = np.ndarray(smallPicture.shape[0]) #Defining the points we know, xn \n for i in range(smallPicture.shape[0]):\n xn[i] = i\n \n \n \n x = np.linspace(0, smallPicture.shape[0], smallPicture.shape[0] * N) #Defining the x range where we want interpolated values\n largeImage2 = np.ndarray((smallPicture.shape[0] * N, smallPicture.shape[1] * N, 3)) #Defining the matrix for the new image (only re-sized in teh x direction)\n \n #Finding the values for the missing points\n \n for i in range(3): #There are three layers for RGB\n for j in range(smallPicture.shape[1] * N): #Going over each part of the matrix row by row\n yn = largeImage1[:,j,i]\n y = splines(xn,yn,0,0,x)\n largeImage2[:,j,i] = y\n \n \n largeImage2 = np.trunc(largeImage2)\n largePicture = (largeImage2.astype(int))\n return largePicture\n\n \n\n #____INTERPOLATION WITH UNSTRUCTURED GRIDS____\n\n#Testing if 4 points are co-planar\n \ndef CoPlanarTest (r1,r2,r3,r4): #Enter the 4 points as position vectors\n\n A = [[r4[0]-r1[0],r4[1]-r1[1], r4[2]-r1[2]],[r4[0]-r2[0],r4[1]-r2[1], r4[2]-r2[2]],[r4[0]-r3[0],r4[1]-r3[1], r4[2]-r3[2]]]\n print(A)\n b = np.linalg.det(A)\n print(b)\n if b != 0:\n print(\"The fourth point is not co-planar with the 3 known points\")\n else:\n print(\"You good, the fourth is also coplanar\")\n \n \n#Interpolation over a triangle with the nearest neghbour method:\n\n\ndef triangleInterponaltionNearestNeighbour(r1,r2,r3,f1,f2,f3,r4): #r1, r2, r3 are the position vectors of the 3 nodes that define the triangle. f1, f2, f3 are the values the function takes at that point. r4 is the position vector of the point of interest. The result will be the interponlated value f4 at r4.\n \n #Calculating the weights for each direction based on the distance between the point of interest and the points known\n w = []\n w.append(1/(np.sqrt((r1[0] - r4[0]) ** 2 + (r1[1] - r4[1]) ** 2)))\n w.append(1/(np.sqrt((r2[0] - r4[0]) ** 2 + (r2[1] - r4[1]) ** 2))) \n w.append(1/(np.sqrt((r3[0] - r4[0]) ** 2 + (r3[1] - r4[1]) ** 2)))\n \n #Caclulating the weighted average at the point of interest\n f4 = (w[0] * f1 + w[1] * f2 + w[2] * f3) / (w[0] + w[1] + w[2])\n \n return f4\n\n#Interpolation over a triangle with the Barycentric coordinates method:\n\ndef triangleInterponaltionBarycentric (r1,r2,r3,f1,f2,f3,r4):#r1, r2, r3 are the position vectors of the 3 nodes that define the triangle. f1, f2, f3 are the values the function takes at that point. r4 is the position vector of the point of interest. The result will be the interponlated value f4 at r4.\n \n #Instead of doing the matrix manipulations, I just put in the algebraic result to find the lambda values:\n lamda1 = (((r2[1] - r3[1]) * (r4[0] -r3[0]) + (r3[0]-r2[0]) * (r4[1] - r3[1])) / (((r2[1] - r3[1]) * (r1[0] - r3[0])) + (r3[0] - r2[0]) * (r1[1] - r3[1])))\n lamda2 = (((r3[1] - r1[1]) * (r4[0] -r3[0]) + (r1[0]-r3[0]) * (r4[1] - r3[1])) / (((r2[1] - r3[1]) * (r1[0] - r3[0])) + (r3[0] - r2[0]) * (r1[1] - r3[1])))\n lamda3 = 1 - lamda1 - lamda2\n \n #Caclulating the weighted average at the point of interest\n f4 = lamda1 * f1 + lamda2 * f2 + lamda3 * f3\n \n return f4\n", "repo_name": "AtiHaas/ME2_Numerical-Methods", "sub_path": "ME2 All Numerical Methods so far.py", "file_name": "ME2 All Numerical Methods so far.py", "file_ext": "py", "file_size_in_byte": 22033, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "48", "api": [{"api_name": "numpy.linspace", "line_number": 13, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 18, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 27, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 28, "usage_type": "call"}, {"api_name": "numpy.meshgrid", "line_number": 31, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 38, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 39, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 41, "usage_type": "call"}, {"api_name": "numpy.meshgrid", "line_number": 44, "usage_type": "call"}, {"api_name": "numpy.shape", "line_number": 54, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.axes", "line_number": 63, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 63, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 67, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 67, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.contour", "line_number": 69, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 69, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axes", "line_number": 73, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 73, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 81, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 82, "usage_type": "call"}, {"api_name": "numpy.meshgrid", "line_number": 88, "usage_type": "call"}, {"api_name": "numpy.ndarray", "line_number": 91, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.quiver", "line_number": 103, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 103, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 105, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 105, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.streamplot", "line_number": 107, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 107, "usage_type": "name"}, {"api_name": "numpy.zeros", "line_number": 129, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 335, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 341, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 348, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 349, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 352, "usage_type": "call"}, {"api_name": "numpy.ndarray", "line_number": 414, "usage_type": "call"}, {"api_name": "numpy.ndarray", "line_number": 415, "usage_type": "call"}, {"api_name": "numpy.ndarray", "line_number": 416, "usage_type": "call"}, {"api_name": "numpy.ndarray", "line_number": 417, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 421, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 422, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 456, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.imread", "line_number": 472, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 472, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 480, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 480, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 481, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 481, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imsave", "line_number": 487, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 487, "usage_type": "name"}, {"api_name": "numpy.zeros", "line_number": 490, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 494, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 494, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 495, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 495, "usage_type": "name"}, {"api_name": "numpy.ndarray", "line_number": 504, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 510, "usage_type": "call"}, {"api_name": "numpy.ndarray", "line_number": 511, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 521, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 521, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 522, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 522, "usage_type": "name"}, {"api_name": "numpy.ndarray", "line_number": 527, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 533, "usage_type": "call"}, {"api_name": "numpy.ndarray", "line_number": 534, "usage_type": "call"}, {"api_name": "numpy.trunc", "line_number": 545, "usage_type": "call"}, {"api_name": "numpy.ndarray", "line_number": 555, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 561, "usage_type": "call"}, {"api_name": "numpy.ndarray", "line_number": 562, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 572, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 572, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 573, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 573, "usage_type": "name"}, {"api_name": "numpy.ndarray", "line_number": 578, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 584, "usage_type": "call"}, {"api_name": "numpy.ndarray", "line_number": 585, "usage_type": "call"}, {"api_name": "numpy.trunc", "line_number": 596, "usage_type": "call"}, {"api_name": "numpy.linalg.det", "line_number": 610, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 610, "usage_type": "attribute"}, {"api_name": "numpy.sqrt", "line_number": 625, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 626, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 627, "usage_type": "call"}]} +{"seq_id": "19935922036", "text": "###\n# Running simple simulations to check 'catastrophe' probabilities\n#\n# +eps is good baseline, -eps is bad baseline\n#\n###\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom utils import *\n\n## Various decay rates for the prob of taking the good action\ndef rec_sigmoid(x, alpha, epsilon=1):\n # epsilon is how much the baseline underestimates the optimal one\n clip_tolerance = 1e-10\n # x = np.clip(x, 1e-9, 1-1e-9)\n return sigmoid(sigmoid_inv(x) + alpha * (1 - epsilon / (1-np.clip(x, clip_tolerance, 1-clip_tolerance))))\n\ndef rec_logit_sigmoid_constant_baseline(x, alpha, baseline):\n clip_tolerance = 1e-10\n return sigmoid(sigmoid_inv(x) + alpha * baseline / (1 - np.clip(x, clip_tolerance, 1-clip_tolerance)))\n\ndef rec_logistic(x, alpha):\n return x*(1-alpha*x)\n\ndef rec_exponential(x, alpha):\n return x * math.exp(-alpha)\n\n\n\n\n## checking decay rate\nx_lst = []\n\nx = 0.5\nx_lst.append(x)\nfor i in range(100):\n x = rec_logistic(x, 0.3)\n x_lst.append(x)\n print(x)\n\nx_lst = np.log(1 - np.array(x_lst))\n# y_lst = np.log(1 - x_lst\nplt.plot(x_lst)\n# plt.show()\n\n### check catastrophe prob for each x_0\nx_check = np.arange(0, 1, 0.01) # initial prob of taking the optimal action\nm_check = [1, 2, 3, 4, 5, 10, 100, 1000]\nalpha = 0.3\nepsilon = -1\n\nplt.figure()\nplt.title(\"alpha {} eps {}\".format(alpha, epsilon))\nfor m in m_check:\n results = []\n for x_0 in x_check:\n\n prod = 1 - x_0\n x = x_0\n for i in range(m-1):\n x = rec_sigmoid(x, alpha, epsilon)\n prod *= (1-x)\n\n results.append(prod)\n\n plt.plot(results)\n\n\ndef get_cat_prob(x_0, alpha, epsilon, T):\n prod = 1 - x_0\n x = x_0\n for i in range(T-1):\n # x = rec_logit_sigmoid_constant_baseline(x, alpha, epsilon)\n x = rec_logit_sigmoid_constant_baseline(x, alpha, epsilon)\n prod *= (1-x)\n return prod\n\ndef cat_prob_bound1(alpha, baseline):\n # this is the one Nicolas derived on prob for infinite left\n # assume we start at x=0.5\n return 0.5 * np.exp( np.log(1- np.exp(alpha*baseline) ) / (1-np.exp(alpha*baseline)))\n\ndef cat_prob_bound12(alpha, baseline):\n return 0.5 * (1 - np.exp(alpha*baseline) )**(-1/(alpha*baseline))\n\ndef cat_prob_bound2(p_0, alpha, T):\n # assumes baseline is the optimal one perturbed by -1\n return ( (1-p_0) / (1 - p_0 + alpha * p_0 * T))**(1 / alpha)\n## comparing empirical probabilities to theoretical ones\n# p_0 = 0.3\nalpha = 0.2\nbaseline = -2\nprint(get_cat_prob(0.5, alpha, baseline, 1000), cat_prob_bound1(alpha, baseline), cat_prob_bound12(alpha, baseline))\n# print(get_cat_prob(p_0, alpha, baseline, 100), cat_prob_bound2(p_0, alpha, 100))\n\n# ### Try to define reasonable one step values\n# x_check = np.arange(0, 1, 0.01)\n# def value(x, alpha, epsilon, cat_probs, x_check):\n# # return of x + return improvement in 1 step\n# return (x + alpha - alpha**2 * (1 + epsilon**2 / (x*(1-x)))) * (1 - cat_probs[list(x_check).index(x)])\n#\n# def value2(x, alpha, epsilon, cat_prob):\n# # return of x + return improvement in 1 step\n# return (x + alpha - alpha**2 * (1 + epsilon**2 / (x*(1-x)))) * (1 - cat_prob)\n#\n#\n# values = []\n# i=0\n# for x in x_check:\n# values.append(value(x, 0.1, 1, results, i))\n# i+=1\n#\n# plt.figure()\n# plt.plot(values)\n#\n# ### Check epsilon curve\n# x = 0.5\n# eps_check = np.arange(-6, 6, 0.1)\n# values = []\n#\n# for eps in eps_check:\n# values.append(value2(x, 0.3, eps, get_cat_prob(x, 0.3, eps, 100)))\n#\n# plt.figure()\n# plt.plot(values)\n#\n\n\n### Check sequences of good and bad actions\n# check sequences of 16 actions\nsettings = [[1, 2]*8,\n [1, 1, 2, 2]*4,\n [1, 1, 1, 1, 2, 2, 2, 2]*2,\n [1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2],\n [2, 1]*8,\n [2, 2, 1, 1]*4,\n [2, 2, 2, 2, 1, 1, 1, 1]*2,\n [2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1]]\n\n\nfrom bandits.TwoArmedBandit import *\ninit_param = 0\nperturb = 1\nstep_size = 0.3\n\nresults = [] # (log_probs_lst, performance)\nfor act_seq in settings:\n env = Bandit(1, 0, init_param, perturb_baseline=perturb)\n\n log_probs_lst= []\n for act in act_seq:\n prob = env.do_sgd_step_action(step_size, act)\n log_probs_lst.append(np.log(prob))\n results.append([log_probs_lst, env.get_prob()])\n\nprint(\"epsilon\", perturb, [round(x[1], 3) for x in results])\nfinal_perfs = np.array([x[1] for x in results])\nprobs = np.array([np.exp(np.sum(x[0])) for x in results])\n\n\n\n### Get full distribution of parameter values over k steps\nfrom bandits.TwoArmedBandit import *\nfrom utils import *\nnum_steps = 16\nstep_size = 0.2\nperturb = -1.1\ninit_param = sigmoid_inv(0.5)\noptimizer = 'natural'\nparameterization = 'sigmoid'\n\ndef get_distribution(num_steps, step_size, perturb, init_param, optimizer, parameterization, include_actions=False):\n ''' Returns the distribution over parameters after num_steps steps as a list of tuples (param, prob)\n include_actions: If true, also returns the sequence of actions leading to the parameter value in the tuple (param, prob, actions) '''\n env = Bandit(1, 0, init_param, perturb_baseline=perturb, optimizer=optimizer, parameterization=parameterization)\n # Note we can change both rewards to 0 to obtain a martingale\n\n distribution = [(init_param, 0.0)]\n if include_actions:\n distribution = [(init_param, 0.0, '')]\n\n for i in range(num_steps):\n new_distribution = []\n\n for entry in distribution:\n x = entry[0]\n log_prob = entry[1]\n updates = env.get_possible_gradients(x, return_next_params=True, step_size=step_size)\n\n # computes the next parameters and probability for each possible update\n if include_actions:\n action_seq = entry[2]\n act = 1\n for new_x, new_prob in updates:\n new_distribution.append((new_x, log_prob + np.log(new_prob), action_seq + str(act)))\n act += 1\n else:\n for new_x, new_prob in updates:\n new_distribution.append((new_x, log_prob + np.log(new_prob)))\n\n distribution = new_distribution\n\n # distribution.sort(key= lambda x: x[0])\n\n # temp = np.exp([x[1] for x in distribution])\n #\n #\n # distribution = np.array(distribution).astype('float64')\n # distribution[:, 2] = distribution[:, 2].astype('int32')\n if include_actions:\n distribution = [(a, np.exp(b), c) for a, b, c in distribution]\n else:\n distribution = np.array(distribution)\n distribution[:, 1] = np.exp(distribution[:,1]) # uses log probs to avoid numerical issues\n # distribution.astype('str')\n return distribution\n\n\n## Check distribution\n# here we compare the outcomes of all sequences of increases/decreases for pos/neg baselines\n#\ndist = get_distribution(num_steps, step_size, perturb, init_param, optimizer, parameterization, include_actions=True)\nfrom collections import OrderedDict\ndist = OrderedDict((c, (a,b)) for a, b, c in dist) # contains +eps\n\ndist2 = get_distribution(num_steps, step_size, -perturb, init_param, optimizer, parameterization, include_actions=True)\nfrom collections import OrderedDict\ndist2 = OrderedDict((c, (a,b)) for a, b, c in dist2) # contains -eps\n\n# we need to invert the action sequences because action 1 either increases or decreases theta depending on whether the baseline was pos or neg\n# so, in the comparisons, to match the number of increases, we need to swap the actions\ndef invert(action_seq):\n # switches 1 and 2\n s = \"\"\n for a in action_seq:\n if a == '1':\n s += '2'\n else:\n s += '1'\n return s\nlst = []\nfor k in dist.keys(): # compare end parameter value for each sequence\n lst.append(dist[k][0] > dist2[invert(k)][0])\n if dist[k][0] < dist2[invert(k)][0]:\n print(k, dist[k][0], dist2[invert(k)][0])\n\nprint(np.mean(lst), np.sum(lst))\n\n# just check the most probable action sequences\ndist.sort(key=lambda x: x[1], reverse=True)\ndist2.sort(key=lambda x: x[1], reverse=True)\n\n## Check distribution after k steps\n# this gets the full distribution\n\n# gets the average return after k steps for a grid of values of x\nresults = [] # this is the mean return\nxs = np.arange(0.01, 1, 0.01)\nfor x in xs:\n distribution = get_distribution(num_steps, step_size, perturb, sigmoid_inv(x), optimizer, parameterization, False)\n results.append(np.sum(sigmoid(distribution[:,0])*distribution[:,1]))\nplt.plot(xs, results)\nprint(results)\n\n# plots the distribution after k steps starting at some initial parameter value\ndistribution = get_distribution(num_steps, step_size, perturb, 0, optimizer, parameterization, False)\nplt.figure()\nplt.xlim((0,1))\nplt.ylim((0,0.5))\nplt.title(\"num steps {} epsilon {}\".format(num_steps, perturb))\nplt.hist(sigmoid(distribution[:,0]), weights=distribution[:,1],bins=20) # plot the resulting probs\nplt.ylabel('Probability')\nplt.xlabel('Prob. of right action')\n# plt.hist(np.log(np.abs(distribution[:,0])), weights=distribution[:,1], bins=50) # plot the raw parameter values\nprint(\"mean\", np.sum(sigmoid(distribution[:,0])*distribution[:,1]))\n\n\n\n\n#### baseline", "repo_name": "wechu/baseline_project", "sub_path": "bandits/BanditCheckProbs.py", "file_name": "BanditCheckProbs.py", "file_ext": "py", "file_size_in_byte": 9174, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "48", "api": [{"api_name": "numpy.clip", "line_number": 16, "usage_type": "call"}, {"api_name": "numpy.clip", "line_number": 20, "usage_type": "call"}, {"api_name": "numpy.log", "line_number": 41, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 41, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 43, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 43, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 47, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 52, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 52, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 53, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 53, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 66, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 66, "usage_type": "name"}, {"api_name": "numpy.exp", "line_number": 81, "usage_type": "call"}, {"api_name": "numpy.log", "line_number": 81, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 84, "usage_type": "call"}, {"api_name": "numpy.log", "line_number": 153, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 157, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 158, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 158, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 158, "usage_type": "call"}, {"api_name": "numpy.log", "line_number": 195, "usage_type": "call"}, {"api_name": "numpy.log", "line_number": 199, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 211, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 213, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 214, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 224, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 228, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 247, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 247, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 258, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 261, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 262, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 262, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 267, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 267, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 268, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 268, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 269, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 269, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 270, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 270, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.hist", "line_number": 271, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 271, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 272, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 272, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 273, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 273, "usage_type": "name"}, {"api_name": "numpy.sum", "line_number": 275, "usage_type": "call"}]} +{"seq_id": "26866916786", "text": "# Logging\nimport logging\nlogger = logging.getLogger(__name__)\n\nfrom TopEFT.Analysis.Region import Region\nfrom TopEFT.Tools.u_float import u_float\nfrom TopEFT.Analysis.SystematicEstimator import SystematicEstimator\nfrom TopEFT.Analysis.SetupHelpers import trilepChannels, quadlepChannels, singlelepChannels, channel\n\n\nclass MCBasedEstimate(SystematicEstimator):\n def __init__(self, name, sample, cacheDir=None):\n super(MCBasedEstimate, self).__init__(name, cacheDir=cacheDir)\n self.sample = sample\n #self.short = short #this removes filters and triggers from the cutstring\n\n # FastSim and 76X only for the MCBasedEstimate. Dirty. Looks whether one of the samples is fastsim.\n self.isFastSim = getattr(sample, \"isFastSim\", False) \n \n def _estimate(self, region, channel, setup):\n\n ''' Concrete implementation of abstract method 'estimate' as defined in Systematic\n '''\n\n logger.debug( \"MC prediction for %s channel %s\" %(self.name, channel.name) )\n\n if channel.name=='all':\n # 'all' is the total of all contributions\n if setup.nLeptons == 1: channels = singlelepChannels\n elif setup.nLeptons == 3: channels = trilepChannels\n elif setup.nLeptons == 4: channels = quadlepChannels\n else: raise NotImplementedError\n return sum([self.cachedEstimate(region, c, setup) for c in channels])\n\n #elif channel=='SF':\n # # 'all' is the total of all contributions\n # return sum([self.cachedEstimate(region, c, setup) for c in ['MuMu', 'EE']])\n\n else:\n preSelection = setup.preselection('MC', nElectrons=channel.nE, nMuons=channel.nM, isFastSim = self.isFastSim)\n cut = \"&&\".join([region.cutString(setup.sys['selectionModifier']), preSelection['cut']])\n weight = preSelection['weightStr']\n logger.debug( \"Using cut %s and weight %s\"%(cut, weight) )\n return setup.lumi/1000.*u_float(**self.sample.getYieldFromDraw(selectionString = cut, weightString = weight) )\n", "repo_name": "HephyAnalysisSW/TopEFT", "sub_path": "Analysis/python/MCBasedEstimate.py", "file_name": "MCBasedEstimate.py", "file_ext": "py", "file_size_in_byte": 2075, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "48", "api": [{"api_name": "logging.getLogger", "line_number": 3, "usage_type": "call"}, {"api_name": "TopEFT.Analysis.SystematicEstimator.SystematicEstimator", "line_number": 11, "usage_type": "name"}, {"api_name": "TopEFT.Analysis.SetupHelpers.channel.name", "line_number": 25, "usage_type": "attribute"}, {"api_name": "TopEFT.Analysis.SetupHelpers.channel", "line_number": 25, "usage_type": "name"}, {"api_name": "TopEFT.Analysis.SetupHelpers.channel.name", "line_number": 27, "usage_type": "attribute"}, {"api_name": "TopEFT.Analysis.SetupHelpers.channel", "line_number": 27, "usage_type": "name"}, {"api_name": "TopEFT.Analysis.SetupHelpers.singlelepChannels", "line_number": 29, "usage_type": "name"}, {"api_name": "TopEFT.Analysis.SetupHelpers.trilepChannels", "line_number": 30, "usage_type": "name"}, {"api_name": "TopEFT.Analysis.SetupHelpers.quadlepChannels", "line_number": 31, "usage_type": "name"}, {"api_name": "TopEFT.Analysis.SetupHelpers.channel.nE", "line_number": 40, "usage_type": "attribute"}, {"api_name": "TopEFT.Analysis.SetupHelpers.channel", "line_number": 40, "usage_type": "name"}, {"api_name": "TopEFT.Analysis.SetupHelpers.channel.nM", "line_number": 40, "usage_type": "attribute"}, {"api_name": "TopEFT.Tools.u_float.u_float", "line_number": 44, "usage_type": "call"}]} +{"seq_id": "26653083310", "text": "#!/usr/bin/env python\n''' Parse command line arguments '''\n\n# IMPORTS\nimport skj_std\n\n# AUTHOR\n__author__ = skj_std.__author__\n__email__ = skj_std.__email__\n__status__ = skj_std.__status__\n__version__ = skj_std.__version__\n__license__ = skj_std.__license__\n__year__ = skj_std.__year__\n__maintainer__ = skj_std.__maintainer__\n#\n# ARGUMENTS PARSING (START)\n#\n\n\ndef alter_args(parser_):\n ''' Do some most basic edits to user input and check for config file\n status: finished\n return: None\n '''\n # Store default values into a dict\n for argument in skj_std.arguments_values:\n skj_std.arguments_defaults[argument] = parser_.get_default(argument)\n\n # Verbose needs to be int\n if skj_std.arguments_values['verbose'] == skj_std.arguments_defaults['verbose']:\n skj_std.arguments_values['verbose'] = 0\n\n # Check if configuration file exists and is readable\n if skj_std.arguments_values['config'] != skj_std.arguments_defaults['config']:\n from skj_checker_common import check_file\n skj_std.arguments_values['config'] = check_file(skj_std.arguments_values['config']) # raise IOError\n\n\ndef parse_args():\n ''' Parse all command line arguments\n status: finished\n return: dictionary\n '''\n import argparse\n\n parser = argparse.ArgumentParser( description='''\\\nAnimation creation script. Requires Python (http://python.org/).\nFor more information please see the documentation.''',\n epilog='''\\\nSemestral work for BI-SKJ (B122) @ CTU (https://www.cvut.cz/)\nPls report bugs (bugs, what's that?) to ''' + __email__ + '''\nCreated by: ''' + __author__ + '''; License: ''' + __license__ + '''; Year: ''' + __year__,\n add_help=False,\n formatter_class=argparse.RawDescriptionHelpFormatter)\n data = parser.add_argument_group(\"Data files\")\n glob = parser.add_argument_group(\"Global options\")\n animation = parser.add_argument_group(\"Animation options\")\n\n # -e EFFECT\n animation.add_argument('-e', '--effect', type=str, action='append', dest='effectparams',\n help='''Effects to use in animation. \\\n See user doc for more info on this. (type: %(type)s, default: %(default)s)''')\n\n # source [source...]\n data.add_argument('source', type=str, nargs='+',\n help='''Path to files with data to process. \\\n Can be a filesystem path or URL accessible using http protocol.''')\n\n # -t timeformat\n glob.add_argument('-t', '--time-format', type=str, default='[%Y-%m-%d %H:%M:%S]', dest='timeformat',\n help='''Date && time format in source files. See \\\"man -s 3 strftime\\\" for syntax. \\\n (type: %(type)s, default: %(default)s)''')\n\n # -X auto/max/timeformat\n animation.add_argument('-X', '--x-max', type=str, default='max', dest='xmax',\n help='''Maximum value for X-axis. Use \\\"auto\\\"/\\\"max\\\"/timeformat. \\\n (type: %(type)s, default: %(default)s)''')\n\n # -x auto/min/timeformat\n animation.add_argument('-x', '--x-min', type=str, default='min', dest='xmin',\n help='''Minimum value for X-axis. Use \\\"auto\\\"/\\\"min\\\"/timeformat. \\\n (type: %(type)s, default: %(default)s)''')\n\n # -Y auto/max/float\n animation.add_argument('-Y', '--y-max', type=str, default='auto', dest='ymax',\n help='''Maximum value for Y-axis. Use \\\"auto\\\"/\\\"max\\\"/float. \\\n (type: %(type)s, default: %(default)s)''')\n\n # -y auto/min/float\n animation.add_argument('-y', '--y-min', type=str, default='auto', dest='ymin',\n help='''Minimum value for Y-axis. Use \\\"auto\\\"/\\\"min\\\"/float. \\\n (type: %(type)s, default: %(default)s)''')\n\n # -S SPEED\n animation.add_argument('-S', '--speed', type=float, default=1, dest='speed',\n help='''Number of records used to create one frame. \\\n See user doc for more info on this. (type: %(type)s, default: %(default)s)''')\n\n # -T TIME\n animation.add_argument('-T', '--time', type=float, dest='time',\n help='''Animation duration. \\\n See user doc for more info on this. (type: %(type)s, default: %(default)s)''')\n\n # -F FPS\n animation.add_argument('-F', '--fps', type=float, default=25, dest='fps',\n help='''Number of frames/second. \\\n See user doc for more info on this. (type: %(type)s, default: %(default)s)''')\n\n # -c CRITICAL\n animation.add_argument('-c', '--critical', type=str, action='append', dest='criticalvalue',\n help='''Highlighted value in animation. \\\n See user doc for more info on this. (type: %(type)s, default: %(default)s)''')\n\n # -l LEGEND\n animation.add_argument('-l', '--legend', type=str, dest='legend',\n help='''Animation legend. \\\n (type: %(type)s, default: %(default)s)''')\n\n # -g GNUPLOT\n animation.add_argument('-g', '--gnuplot', type=str, action='append', dest='gnuplotparams',\n help='''Arguments passed to gnuplot. Syntax is YOUR responsibility. \\\n (type: %(type)s, default: %(default)s)''')\n\n # -f CONFIG\n glob.add_argument('-f', '--config', type=str, dest='config',\n help='''Path to configuration file. \\\n See user doc for more info on this. (type: %(type)s, default: %(default)s)''')\n\n # -n NAME\n glob.add_argument('-n', '--name', type=str, dest='name',\n help='''Name of the animation. \\\n See user doc for more info on this. (type: %(type)s, default: %(default)s)''')\n\n # -E\n glob.add_argument('-E', '--ignore-errors', action='store_true', default=False, dest='ignoreerrors',\n help='''Try to ignore non-fatal errors, just print warnings. \\\n (type: bool, default: %(default)s)''')\n\n # -h\n glob.add_argument('-h', '--help', action='help',\n help='''Show this help and exit. See doc for more help.''')\n # -v\n glob.add_argument('-v', '--verbose', action='count',\n help='''Be verbose. Use multiple times to be more verbose.''')\n # -V\n glob.add_argument('-V', '--version', action='version', version='%(prog)s v' + __version__,\n help='''Print program version and exit.''')\n\n # Now we know about all possible params, so parse them from command line\n skj_std.arguments_values = vars(parser.parse_args())\n\n # Most basic edits/checks which need to be passed before parsing optional config file\n alter_args(parser) # raise IOError\n#\n# ARGUMENTS PARSING (END)\n#\n", "repo_name": "nzt4567/animator", "sub_path": "skj_parser_cmdline.py", "file_name": "skj_parser_cmdline.py", "file_ext": "py", "file_size_in_byte": 7038, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "48", "api": [{"api_name": "skj_std.__author__", "line_number": 8, "usage_type": "attribute"}, {"api_name": "skj_std.__email__", "line_number": 9, "usage_type": "attribute"}, {"api_name": "skj_std.__status__", "line_number": 10, "usage_type": "attribute"}, {"api_name": "skj_std.__version__", "line_number": 11, "usage_type": "attribute"}, {"api_name": "skj_std.__license__", "line_number": 12, "usage_type": "attribute"}, {"api_name": "skj_std.__year__", "line_number": 13, "usage_type": "attribute"}, {"api_name": "skj_std.__maintainer__", "line_number": 14, "usage_type": "attribute"}, {"api_name": "skj_std.arguments_values", "line_number": 26, "usage_type": "attribute"}, {"api_name": "skj_std.arguments_defaults", "line_number": 27, "usage_type": "attribute"}, {"api_name": "skj_std.arguments_values", "line_number": 30, "usage_type": "attribute"}, {"api_name": "skj_std.arguments_defaults", "line_number": 30, "usage_type": "attribute"}, {"api_name": "skj_std.arguments_values", "line_number": 31, "usage_type": "attribute"}, {"api_name": "skj_std.arguments_values", "line_number": 34, "usage_type": "attribute"}, {"api_name": "skj_std.arguments_defaults", "line_number": 34, "usage_type": "attribute"}, {"api_name": "skj_std.arguments_values", "line_number": 36, "usage_type": "attribute"}, {"api_name": "skj_checker_common.check_file", "line_number": 36, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 46, "usage_type": "call"}, {"api_name": "argparse.RawDescriptionHelpFormatter", "line_number": 54, "usage_type": "attribute"}, {"api_name": "skj_std.arguments_values", "line_number": 150, "usage_type": "attribute"}]} +{"seq_id": "20594544321", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Nov 20 15:58:57 2020\n\n@author: panpanhuang\n\"\"\"\nimport os\nimport shutil\nimport sys\nimport datetime\nimport numpy as np\nimport h5py\nfrom mpi4py import MPI\nimport xraylib as xlib\nimport xraylib_np as xlib_np\n\nimport torch as tc\ntc.set_default_tensor_type(tc.FloatTensor)\nimport torch.nn as nn\nfrom tqdm import tqdm\nimport time\nfrom util import rotate, MakeFLlinesDictionary_manual, intersecting_length_fl_detectorlet_3d_mpi_write_h5_3_manual, find_lines_roi_idx_from_dataset\nfrom standard_calibration import calibrate_incident_probe_intensity\nfrom array_ops import initialize_guess_3d\nfrom forward_model import PPM\nfrom misc import print_flush_root, print_flush_all\n\nimport matplotlib.pyplot as plt\nimport matplotlib \nmatplotlib.rcParams['pdf.fonttype'] = 'truetype'\nfontProperties = {'family': 'sans-serif', 'sans-serif': ['Helvetica'], 'weight': 'normal', 'size': 12}\nplt.rc('font', **fontProperties)\nfrom matplotlib import gridspec\nfrom mpl_toolkits.axes_grid1 import make_axes_locatable\nimport matplotlib.ticker as mtick\n\nimport dxchange\n\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\nfl = {\"K\": np.array([xlib.KA1_LINE, xlib.KA2_LINE, xlib.KA3_LINE, xlib.KB1_LINE, xlib.KB2_LINE,\n xlib.KB3_LINE, xlib.KB4_LINE, xlib.KB5_LINE]),\n \"L\": np.array([xlib.LA1_LINE, xlib.LA2_LINE, xlib.LB1_LINE, xlib.LB2_LINE, xlib.LB3_LINE,\n xlib.LB4_LINE, xlib.LB5_LINE, xlib.LB6_LINE, xlib.LB7_LINE, xlib.LB9_LINE,\n xlib.LB10_LINE, xlib.LB15_LINE, xlib.LB17_LINE]), \n \"M\": np.array([xlib.MA1_LINE, xlib.MA2_LINE, xlib.MB_LINE]) \n }\n\n \ndef reconstruct_jXRFT_tomography(\n # ______________________________________\n # |Raw data and experimental parameters|________________________________\n sample_size_n, sample_height_n, sample_size_cm, probe_energy=None,\n # Set sample_size_n (sample_height_n) to the number of pixels along the direction \n # perpendicular (parallel) to # the rotational axis of the sample;\n # sample_size_cm is the size of the sample size (in unit cm) along the direction \n # perpendicular to the rotational axis of the sample\n probe_intensity=None,\n probe_att = True,\n # Set the value of incident probe fluence for simulation data\n # Not required for exp. data unless the calibration data is missing. In that case,\n # Set the value to some estimated probe intensity\n \n manual_det_coord=True, \n # True when using a exp. data; \n # False when using a simulation data; \n # For a simulation data, auto-distribute detecting points on the ciucualr sening area\n # given det_dia_cm and det_ds_spacing_cm;\n set_det_coord_cm=None,\n # Set to None for simulation data;\n # For exp. data, a np array with dimension (# of detecing points, 3) \n # 3 for (z, x, y) coordinates, probe propagate along +y axis; sample rotates about +z axis;\n # The sign of x determines which side the detector locates relative to the sample;\n\n det_on_which_side=\"negative\", \n # Choose from 'positive' or 'negative' depending on the side that the detector locates \n # relative the sample;\n \n manual_det_area=True,\n # True when using a exp. data;\n # False when using a simulation data;\n # If set to True, the program caculated the signal collecting solid angle of XRF using det_area_cm2;\n # If set to False, the program uses provided det_dia_cm and det_from_sample_cm;\n #### Note ####\n # For exp. data, the factor of signal collecting solid angle is included in probe_intensity\n # which is calculated from the calibration data;\n \n det_area_cm2=None,\n # For exp. data only. Set the value of the total sensing area;\n # For simulation data, set to None (program calaultes sensing area with given det_dia_cm)\n \n det_dia_cm=None, \n # For simulation data only. Diameter of the sensor assuming a circular sensing area.\n # Only used when manual_det_area is False. \n # Need to use the same diameter setting as it's used to generating a simulation data \n det_ds_spacing_cm=None,\n # For simulation data only. used to distribute detecting points on the XRF detecting plane.\n \n det_from_sample_cm=None,\n # For simulation data only.\n # For exp. data, the distance between the detector and the sample is given in set_det_coord_cm\n \n # __________________________________\n # |Probe Intensity calibration data|____________________________________ \n use_std_calibation=False, std_path=None, f_std=None, std_element_lines_roi=None, density_std_elements=None, fitting_method=None,\n # Set use_std_calibration to True if the calibration measurement exist otherwise set to False.\n # density_std_elements unit in g/cm^2\n # Set fitting method to 'XRF_fits' , 'XRF_roi' or 'XRF_roi_plus'\n \n # ___________________________\n # |Reconstruction parameters|___________________________________________\n n_epochs=50, save_every_n_epochs=10, minibatch_size=None,\n f_recon_parameters=\"recon_parameters.txt\", dev=None,\n selfAb=False, cont_from_check_point=False, use_saved_initial_guess=False, \n ini_kind='const', init_const=0.5, ini_rand_amp=0.1,\n # Set ini_kind to 'const', 'rand' or 'randn'\n \n recon_path='./', f_initial_guess=None, f_recon_grid=None, data_path=None, f_XRF_data=None, f_XRT_data=None,\n # f_recon_grid is the name of the file that saves the most recent reconstructed result \n scaler_counts_us_ic_dataset_idx=None,\n # the index of us_ic in the dataset MAPS/scaler_names\n scaler_counts_ds_ic_dataset_idx=None,\n # the index of ds_ic in the dataset MAPS/scaler_names\n XRT_ratio_dataset_idx=None,\n # the index of abs_ic in the dataset MAPS/scaler_names\n theta_ls_dataset='exchange/theta', channel_names='exchange/elements',\n this_aN_dic=None, element_lines_roi=None, n_line_group_each_element=None,\n b1=None, b2=None, lr=None,\n P_folder=None, f_P=None, fl_K=fl[\"K\"], fl_L=fl[\"L\"], fl_M=fl[\"M\"], **kwargs,):\n \n comm = MPI.COMM_WORLD\n n_ranks = comm.Get_size()\n rank = comm.Get_rank()\n \n loss_fn = nn.MSELoss()\n dia_len_n = int(1.2 * (sample_height_n**2 + sample_size_n**2 + sample_size_n**2)**0.5) #dev\n n_voxel_minibatch = minibatch_size * sample_size_n #dev\n n_voxel = sample_height_n * sample_size_n**2 #dev\n \n #### create the file handle for experimental data; y1: channel data, y2: scalers data ####\n y1_true_handle = h5py.File(os.path.join(data_path, f_XRF_data), 'r')\n y2_true_handle = h5py.File(os.path.join(data_path, f_XRT_data), 'r') \n ####----------------------------------------------------------------------------------####\n \n #### Calculate the number of elements in the reconstructed object, list the atomic numbers ####\n n_element = len(this_aN_dic)\n aN_ls = np.array(list(this_aN_dic.values()))\n ####--------------------------------------------------------------####\n \n #### Make the lookup table of the fluorescence lines of interests ####\n fl_all_lines_dic = MakeFLlinesDictionary_manual(element_lines_roi, \n n_line_group_each_element, probe_energy, \n sample_size_n, sample_size_cm,\n fl_line_groups = np.array([\"K\", \"L\", \"M\"]), fl_K = fl_K, fl_L = fl_L, fl_M = fl_M) #cpu\n \n stdout_options = {'root':0, 'output_folder': recon_path, 'save_stdout': True, 'print_terminal': False}\n \n FL_line_attCS_ls = tc.as_tensor(xlib_np.CS_Total(aN_ls, fl_all_lines_dic[\"fl_energy\"])).float().to(dev) #dev\n detected_fl_unit_concentration = tc.as_tensor(fl_all_lines_dic[\"detected_fl_unit_concentration\"]).float().to(dev)\n n_line_group_each_element = tc.IntTensor(fl_all_lines_dic[\"n_line_group_each_element\"]).to(dev)\n n_lines = fl_all_lines_dic[\"n_lines\"] #scalar\n ####--------------------------------------------------------------####\n \n #### Calculate the MAC of probe ####\n probe_attCS_ls = tc.as_tensor(xlib_np.CS_Total(aN_ls, probe_energy).flatten()).to(dev)\n ####----------------------------####\n \n #### Load all object angles ####\n theta_ls = tc.from_numpy(y1_true_handle[theta_ls_dataset][...] * np.pi / 180).float() #unit: rad #cpu\n n_theta = len(theta_ls) \n ####------------------------####\n \n element_lines_roi_idx = find_lines_roi_idx_from_dataset(data_path, f_XRF_data, element_lines_roi, std_sample = False)\n \n #### pick only the element lines of interests from the channel data. flatten the data to strips\n #### original dim = (n_lines_roi, n_theta, sample_height_n, sample_size_n)\n y1_true = tc.from_numpy(y1_true_handle['exchange/data'][element_lines_roi_idx]).view(len(element_lines_roi_idx), n_theta, sample_height_n * sample_size_n).to(dev)\n# #### pick the probe photon counts after the ion chamber from the scalers data as the transmission data\n# y2_true = tc.from_numpy(y2_true_handle['exchange/data'][scaler_counts_ds_ic_dataset_idx]).view(n_theta, sample_height_n * sample_size_n).to(dev)\n \n ## Use this y2_true if using the attenuating expoenent in the XRT loss calculation\n y2_true = tc.from_numpy(y2_true_handle['exchange/data'][XRT_ratio_dataset_idx]).view(n_theta, sample_height_n * sample_size_n).to(dev)\n y2_true = - tc.log(y2_true)\n \n #### pick the probe photon counts calibrated for all optics and detectors\n if use_std_calibation:\n probe_cts = calibrate_incident_probe_intensity(std_path, f_std, fitting_method, std_element_lines_roi, density_std_elements, probe_energy)\n else:\n probe_cts = probe_intensity\n\n minibatch_ls_0 = tc.arange(n_ranks).to(dev) #dev\n n_batch = (sample_height_n * sample_size_n) // (n_ranks * minibatch_size) #scalar\n \n \n if manual_det_area == True:\n# det_solid_angle_ratio = det_area_cm2 / (4 * np.pi * det_from_sample_cm**2)\n det_solid_angle_ratio = 1.0\n signal_attenuation_factor = 1.0\n \n else:\n #### det_solid_angle_ratio is used only for simulated dataset (use_std_calibation: False, manual_det_area: False, manual_det_coord: False)\n #### in which the incident probe intensity is not calibrated with the axo_std file.\n #### The simulated collected XRF photon number is estimated by multiplying the generated\n #### fluorescence photon number by \"det_solid_angle_ratio\" to account for the limited solid angle and the detecting efficiency of the detector\n \n# #### Calculate the detecting solid angle covered by the area of the spherical cap covered by the detector #### \n# #### OPTION A: estimate the solid angle by the curved surface\n# # The distance from the sample to the boundary of the detector\n# r = (det_from_sample_cm**2 + (det_dia_cm/2)**2)**0.5 \n# # The height of the cap\n# h = r - det_from_sample_cm\n# # The area of the cap area\n# fl_sig_collecting_cap_area = np.pi*((det_dia_cm/2)**2 + h**2)\n# # The ratio of the detecting solid angle / full soilid angle\n# det_solid_angle_ratio = fl_sig_collecting_cap_area / (4*np.pi*r**2)\n\n #### OPTION B: estimate the solid angle by the flat surface\n det_solid_angle_ratio = (np.pi * (det_dia_cm/2)**2) / (4*np.pi * det_from_sample_cm**2)\n \n #### signal_attenuation_factor is used to account for other factors that cause the attenuation of the XRF\n #### exept the limited solid angle and self-absorption\n signal_attenuation_factor = 1.0\n checkpoint_path = os.path.join(recon_path, \"checkpoint\")\n if rank == 0: \n if not os.path.exists(recon_path):\n os.makedirs(recon_path) \n if not os.path.exists(checkpoint_path):\n os.makedirs(checkpoint_path) \n \n P_save_path = os.path.join(P_folder, f_P)\n \n #Check if the P array exists, if it doesn't exist, call the function to calculate the P array and store it as a .h5 file.\n if not os.path.isfile(P_save_path + \".h5\"): \n intersecting_length_fl_detectorlet_3d_mpi_write_h5_3_manual(n_ranks, minibatch_size, rank,\n manual_det_coord, set_det_coord_cm, det_on_which_side,\n manual_det_area, det_dia_cm, det_from_sample_cm, det_ds_spacing_cm,\n sample_size_n, sample_size_cm,\n sample_height_n, P_folder, f_P) #cpu\n \n comm.Barrier()\n P_handle = h5py.File(P_save_path + \".h5\", 'r')\n\n\n if cont_from_check_point == False: \n \n # load the saved_initial_guess to rank0 cpu\n if use_saved_initial_guess:\n if rank == 0:\n with h5py.File(os.path.join(recon_path, f_initial_guess + '.h5'), \"r\") as s:\n X = s[\"sample/densities\"][...].astype(np.float32)\n X = tc.from_numpy(X)\n shutil.copy(os.path.join(recon_path, f_initial_guess +'.h5'), os.path.join(recon_path, f_recon_grid +'.h5'))\n \n else:\n X = None\n \n # create the initial_guess in rank0 cpu\n else:\n if rank == 0:\n X = initialize_guess_3d(\"cpu\", ini_kind, n_element, sample_size_n, sample_height_n, recon_path, f_recon_grid, f_initial_guess, init_const) #cpu \n ## Save the initial guess for future reference\n with h5py.File(os.path.join(recon_path, f_initial_guess +'.h5'), 'w') as s:\n sample = s.create_group(\"sample\")\n sample_v = sample.create_dataset(\"densities\", shape=(n_element, sample_height_n, sample_size_n, sample_size_n), dtype=\"f4\")\n sample_e = sample.create_dataset(\"elements\", shape=(n_element,), dtype='S5')\n sample_v[...] = X\n sample_e[...] = np.array(list(this_aN_dic.keys())).astype('S5')\n \n ## Save the initial guess which will be used in reconstruction and will be updated to the current reconstructing result \n shutil.copy(os.path.join(recon_path, f_initial_guess +'.h5'), os.path.join(recon_path, f_recon_grid +'.h5'))\n\n else:\n X = None\n \n comm.Barrier()\n \n if rank == 0:\n XRF_loss_whole_obj = tc.zeros(n_epochs * n_theta)\n XRT_loss_whole_obj = tc.zeros(n_epochs * n_theta)\n loss_whole_obj = tc.zeros(n_epochs * n_theta)\n with open(os.path.join(recon_path, f_recon_parameters), \"w\") as recon_params:\n recon_params.write(\"starting_epoch = 0\\n\")\n recon_params.write(\"n_epochs = %d\\n\" %n_epochs)\n recon_params.write(\"n_ranks = %d\\n\" %n_ranks)\n recon_params.write(\"element_line:\\n\" + str(element_lines_roi)+\"\\n\") \n recon_params.write(\"b1 = %.9f\\n\" %b1)\n recon_params.write(\"b2 = %.9f\\n\" %b2)\n recon_params.write(\"learning rate = %f\\n\" %lr)\n recon_params.write(\"theta_st = %.2f\\n\" %theta_ls[0])\n recon_params.write(\"theta_end = %.2f\\n\" %theta_ls[-1])\n recon_params.write(\"n_theta = %d\\n\" %n_theta)\n recon_params.write(\"sample_size_n = %d\\n\" %sample_size_n)\n recon_params.write(\"sample_height_n = %d\\n\" %sample_height_n)\n recon_params.write(\"sample_size_cm = %.2f\\n\" %sample_size_cm)\n recon_params.write(\"probe_energy_keV = %.2f\\n\" %probe_energy[0])\n recon_params.write(\"incident_probe_cts = %.2e\\n\" %probe_cts)\n \n if not manual_det_area:\n recon_params.write(\"det_dia_cm = %.2f\\n\" %det_dia_cm)\n \n if not manual_det_coord:\n recon_params.write(\"det_from_sample_cm = %.2f\\n\" %det_from_sample_cm)\n recon_params.write(\"det_ds_spacing_cm = %.2f\\n\" %det_ds_spacing_cm)\n comm.Barrier() \n \n for epoch in range(n_epochs):\n t0_epoch = time.perf_counter()\n if rank == 0:\n rand_idx = tc.randperm(n_theta)\n theta_ls_rand = theta_ls[rand_idx] \n else:\n rand_idx = tc.ones(n_theta)\n theta_ls_rand = tc.ones(n_theta)\n\n comm.Barrier() \n rand_idx = comm.bcast(rand_idx, root=0).to(dev) \n theta_ls_rand = comm.bcast(theta_ls_rand, root=0).to(dev) \n comm.Barrier() \n \n stdout_options = {'root':0, 'output_folder': recon_path, 'save_stdout': True, 'print_terminal': True}\n timestr = str(datetime.datetime.today()) \n print_flush_root(rank, val=f\"epoch: {epoch}, time: {timestr}\", output_file='', **stdout_options)\n \n for idx, theta in enumerate(theta_ls_rand):\n this_theta_idx = rand_idx[idx] \n \n # The updated X read by all ranks only at each new obj. angle\n # Because updating the remaining slices in the current obj. angle doesn't require the info of the previous updated slices. \n ## Calculate lac using the current X. lac (linear attenuation coefficient) has the dimension of [n_element, n_lines, n_voxel_minibatch, n_voxel]\n with h5py.File(os.path.join(recon_path, f_recon_grid +'.h5'), \"r\") as s:\n X = s[\"sample/densities\"][...].astype(np.float32)\n X = tc.from_numpy(X).to(dev) #dev \n \n if selfAb == True: \n X_ap_rot = rotate(X, theta, dev) #dev\n lac = X_ap_rot.view(n_element, 1, 1, n_voxel) * FL_line_attCS_ls.view(n_element, n_lines, 1, 1) #dev\n lac = lac.expand(-1, -1, n_voxel_minibatch, -1).float() #dev\n \n else:\n lac = 0.\n \n if rank == 0:\n XRF_loss_n_batch = tc.zeros(n_batch)\n XRT_loss_n_batch = tc.zeros(n_batch)\n total_loss_n_batch = tc.zeros(n_batch)\n \n for m in range(n_batch): \n minibatch_ls = n_ranks * m + minibatch_ls_0 #dev, e.g. [5,6,7,8]\n p = minibatch_ls[rank]\n \n if selfAb == True:\n P_minibatch = tc.from_numpy(P_handle['P_array'][:,:, p * dia_len_n * minibatch_size * sample_size_n:\\\n (p+1) * dia_len_n * minibatch_size * sample_size_n]).to(dev)\n n_det = P_minibatch.shape[0] \n \n else:\n P_minibatch = 0\n n_det = 0\n \n# stdout_options = {'output_folder': recon_path, 'save_stdout': True, 'print_terminal': False}\n# print_flush_all(rank, val=p * dia_len_n * minibatch_size * sample_size_n, output_file=f'P_start_idx_{rank}.csv', **stdout_options)\n# print_flush_all(rank, val=(p+1) * dia_len_n * minibatch_size * sample_size_n, output_file=f'P_end_idx_{rank}.csv', **stdout_options)\n \n# stdout_options = {'root':0, 'output_folder': recon_path, 'save_stdout': True, 'print_terminal': False}\n# print_flush_root(rank, val=minibatch_ls, output_file='minibatch_ls.csv', **stdout_options)\n \n ## Load us_ic as the incoming probe count in this minibatch\n model = PPM(dev, selfAb, lac, X, p, n_element, n_lines, FL_line_attCS_ls,\n detected_fl_unit_concentration, n_line_group_each_element,\n sample_height_n, minibatch_size, sample_size_n, sample_size_cm,\n probe_energy, probe_cts, probe_att, probe_attCS_ls,\n theta, signal_attenuation_factor,\n n_det, P_minibatch, det_dia_cm, det_from_sample_cm, det_solid_angle_ratio)\n \n optimizer = tc.optim.Adam(model.parameters(), lr=lr) \n \n ## load true data, y1: XRF_data, y2: XRT data\n #dev #Take all lines_roi, this_theta_idx, and strips in this minibatc\n y1_hat, y2_hat = model()\n \n XRF_loss = loss_fn(y1_hat, y1_true[:, this_theta_idx, minibatch_size * p : minibatch_size * (p+1)])\n XRT_loss = loss_fn(y2_hat, b2 * y2_true[this_theta_idx, minibatch_size * p : minibatch_size * (p+1)])\n loss = XRF_loss + b1 * XRT_loss\n\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n \n updated_minibatch = model.xp.detach().cpu()\n updated_minibatch = tc.clamp(updated_minibatch, 0, float('inf'))\n comm.Barrier()\n \n XRF_loss = XRF_loss.detach().item()\n XRF_loss_sum = comm.reduce(XRF_loss, op=MPI.SUM, root=0)\n \n XRT_loss = XRT_loss.detach().item() \n XRT_loss_sum = comm.reduce(XRT_loss, op=MPI.SUM, root=0) \n \n loss = loss.detach().item() \n loss_sum = comm.reduce(loss, op=MPI.SUM, root=0)\n comm.Barrier() \n \n with h5py.File(os.path.join(recon_path, f_recon_grid +'.h5'), 'r+', driver='mpio', comm=comm) as s:\n s[\"sample/densities\"][:, minibatch_size * p // sample_size_n : minibatch_size * (p + 1) // sample_size_n, :, :] = updated_minibatch.numpy()\n \n comm.Barrier()\n if rank == 0: \n XRF_loss_n_batch[m] = XRF_loss_sum/n_ranks\n XRT_loss_n_batch[m] = XRT_loss_sum/n_ranks\n total_loss_n_batch[m] = loss_sum/n_ranks\n \n del model \n tc.cuda.empty_cache()\n\n if rank == 0:\n loss_whole_obj[n_theta * epoch + idx] = tc.mean(total_loss_n_batch)\n XRF_loss_whole_obj[n_theta * epoch + idx] = tc.mean(XRF_loss_n_batch)\n XRT_loss_whole_obj[n_theta * epoch + idx] = tc.mean(XRT_loss_n_batch)\n \n comm.Barrier() \n del lac\n\n stdout_options = {'root':0, 'output_folder': recon_path, 'save_stdout': True, 'print_terminal': False}\n per_epoch_time = time.perf_counter() - t0_epoch\n print_flush_root(rank, val=per_epoch_time, output_file=f'per_epoch_time_mb_size_{minibatch_size}.csv', **stdout_options)\n comm.Barrier()\n \n if rank == 0:\n with h5py.File(os.path.join(recon_path, f_recon_grid +'.h5'), \"r\") as s:\n X_cpu = s[\"sample/densities\"][...].astype(np.float32)\n \n if rank == 0 and epoch != 0:\n epsilon = np.mean((X_cpu - X_previous)**2)\n print_flush_root(rank, val=epsilon, output_file=f'model_change_mse_epoch.csv', **stdout_options)\n \n if epsilon < 10**(-12): \n if rank == 0:\n with h5py.File(os.path.join(recon_path, f_recon_grid +\"_\"+str(epoch)+\"_ending_condition\" +'.h5'), \"w\") as s:\n sample = s.create_group(\"sample\")\n sample_v = sample.create_dataset(\"densities\", shape=(n_element, sample_height_n, sample_size_n, sample_size_n), dtype=\"f4\")\n sample_e = sample.create_dataset(\"elements\", shape=(n_element,), dtype='S5')\n s[\"sample/densities\"][...] = X_cpu\n s[\"sample/elements\"][...] = np.array(list(this_aN_dic.keys())).astype('S5')\n dxchange.write_tiff(X_cpu, os.path.join(recon_path, f_recon_grid)+\"_\"+str(epoch)+\"_ending_condition\", dtype='float32', overwrite=True) \n break\n else:\n pass\n \n else:\n pass\n \n comm.Barrier()\n if rank == 0:\n X_previous = X_cpu\n \n comm.Barrier()\n if rank == 0 and ((epoch+1)%save_every_n_epochs == 0 and (epoch+1)//save_every_n_epochs !=0 or epoch+1 == n_epochs):\n with h5py.File(os.path.join(checkpoint_path, f_recon_grid +\"_\"+str(epoch) +'.h5'), \"w\") as s:\n sample = s.create_group(\"sample\")\n sample_v = sample.create_dataset(\"densities\", shape=(n_element, sample_height_n, sample_size_n, sample_size_n), dtype=\"f4\")\n sample_e = sample.create_dataset(\"elements\", shape=(n_element,), dtype='S5')\n s[\"sample/densities\"][...] = X_cpu\n s[\"sample/elements\"][...] = np.array(list(this_aN_dic.keys())).astype('S5')\n# dxchange.write_tiff(X_cpu, os.path.join(recon_path, f_recon_grid)+\"_\"+str(epoch), dtype='float32', overwrite=True) \n \n\n ## It's important to close the hdf5 file hadle in the end of the reconstruction.\n P_handle.close()\n y1_true_handle.close()\n y2_true_handle.close()\n comm.Barrier()\n \n if rank == 0: \n fig6 = plt.figure(figsize=(10,15))\n gs6 = gridspec.GridSpec(nrows=3, ncols=1, width_ratios=[1])\n\n fig6_ax1 = fig6.add_subplot(gs6[0,0])\n fig6_ax1.plot(loss_whole_obj.numpy())\n fig6_ax1.set_xlabel('theta_iteration')\n fig6_ax1.set_ylabel('loss')\n fig6_ax1.yaxis.set_major_formatter(mtick.FormatStrFormatter('%.2e'))\n\n fig6_ax2 = fig6.add_subplot(gs6[1,0])\n fig6_ax2.plot(XRF_loss_whole_obj.numpy())\n fig6_ax2.set_xlabel('theta_iteration')\n fig6_ax2.set_ylabel('XRF loss')\n fig6_ax2.yaxis.set_major_formatter(mtick.FormatStrFormatter('%.2e'))\n\n fig6_ax3 = fig6.add_subplot(gs6[2,0])\n fig6_ax3.plot(XRT_loss_whole_obj.numpy())\n fig6_ax3.set_xlabel('theta_iteration')\n fig6_ax3.set_ylabel('XRT loss')\n fig6_ax3.yaxis.set_major_formatter(mtick.FormatStrFormatter('%.2e'))\n \n plt.savefig(os.path.join(recon_path, 'loss_signal.pdf'))\n \n np.save(os.path.join(recon_path, 'XRF_loss_signal.npy'), XRF_loss_whole_obj.numpy())\n np.save(os.path.join(recon_path, 'XRT_loss_signal.npy'), XRT_loss_whole_obj.numpy())\n np.save(os.path.join(recon_path, 'loss_signal.npy'), loss_whole_obj.numpy())\n \n comm.Barrier()\n \n if cont_from_check_point == True:\n if rank == 0: \n with h5py.File(os.path.join(recon_path, f_recon_grid + \".h5\"), \"r\") as s:\n X = s[\"sample/densities\"][...].astype(np.float32)\n X = tc.from_numpy(X)\n \n else:\n X = None\n \n if rank == 0:\n XRF_loss_whole_obj = tc.from_numpy(np.load(os.path.join(recon_path, 'XRF_loss_signal.npy')).astype(np.float32))\n XRT_loss_whole_obj = tc.from_numpy(np.load(os.path.join(recon_path, 'XRT_loss_signal.npy')).astype(np.float32))\n loss_whole_obj = tc.from_numpy(np.load(os.path.join(recon_path, 'loss_signal.npy')).astype(np.float32))\n \n XRF_loss_whole_obj_cont = tc.zeros(n_epochs * n_theta)\n XRT_loss_whole_obj_cont = tc.zeros(n_epochs * n_theta)\n loss_whole_obj_cont = tc.zeros(n_epochs * n_theta)\n \n with open(os.path.join(recon_path, f_recon_parameters), \"r\") as recon_params:\n params_list = []\n for line in recon_params.readlines():\n params_list.append(line.rstrip(\"\\n\"))\n n_ending = len(params_list)\n\n with open(os.path.join(recon_path, f_recon_parameters), \"a\") as recon_params:\n n_start_last = n_ending - 15 - len(element_lines_roi)\n\n previsous_starting_epoch = int(params_list[n_start_last][params_list[n_start_last].find(\"=\")+1:])\n previous_n_epoch = int(params_list[n_start_last+1][params_list[n_start_last+1].find(\"=\")+1:])\n starting_epoch = previsous_starting_epoch + previous_n_epoch\n recon_params.write(\"\\n\")\n recon_params.write(\"###########################################\\n\")\n recon_params.write(\"starting_epoch = %d\\n\" %starting_epoch)\n recon_params.write(\"n_epochs = %d\\n\" %n_epochs)\n recon_params.write(\"n_ranks = %d\\n\" %n_ranks)\n recon_params.write(\"element_line:\\n\" + str(element_lines_roi)+\"\\n\") \n recon_params.write(\"b1 = %.9f\\n\" %b1)\n recon_params.write(\"b2 = %.9f\\n\" %b2)\n recon_params.write(\"learning rate = %f\\n\" %lr)\n recon_params.write(\"theta_st = %.2f\\n\" %theta_ls[0])\n recon_params.write(\"theta_end = %.2f\\n\" %theta_ls[-1])\n recon_params.write(\"n_theta = %d\\n\" %n_theta)\n recon_params.write(\"sample_size_n = %d\\n\" %sample_size_n)\n recon_params.write(\"sample_height_n = %d\\n\" %sample_height_n)\n recon_params.write(\"sample_size_cm = %.2f\\n\" %sample_size_cm)\n recon_params.write(\"probe_energy_keV = %.2f\\n\" %probe_energy[0])\n recon_params.write(\"incident_probe_cts = %.2e\\n\" %probe_cts) \n if not manual_det_area:\n recon_params.write(\"det_dia_cm = %.2f\\n\" %det_dia_cm)\n \n if not manual_det_coord:\n recon_params.write(\"det_from_sample_cm = %.2f\\n\" %det_from_sample_cm)\n recon_params.write(\"det_ds_spacing_cm = %.2f\\n\" %det_ds_spacing_cm)\n comm.Barrier() \n \n for epoch in range(n_epochs):\n t0_epoch = time.perf_counter()\n if rank == 0:\n rand_idx = tc.randperm(n_theta)\n theta_ls_rand = theta_ls[rand_idx] \n else:\n rand_idx = tc.ones(n_theta)\n theta_ls_rand = tc.ones(n_theta)\n\n comm.Barrier() \n rand_idx = comm.bcast(rand_idx, root=0).to(dev) \n theta_ls_rand = comm.bcast(theta_ls_rand, root=0).to(dev) \n comm.Barrier() \n \n \n stdout_options = {'root':0, 'output_folder': recon_path, 'save_stdout': True, 'print_terminal': True}\n timestr = str(datetime.datetime.today())\n print_flush_root(rank, f\"epoch: {epoch}, time: {timestr}\", output_file='', **stdout_options)\n \n for idx, theta in enumerate(theta_ls_rand):\n this_theta_idx = rand_idx[idx]\n\n # The updated X read by all ranks only at each new obj. angle\n # Because updating the remaining slices in the current obj. angle doesn't require the info of the previous updated slices. \n ## Calculate lac using the current X. lac (linear attenuation coefficient) has the dimension of [n_element, n_lines, n_voxel_minibatch, n_voxel]\n with h5py.File(os.path.join(recon_path, f_recon_grid +'.h5'), \"r\") as s:\n X = s[\"sample/densities\"][...].astype(np.float32)\n X = tc.from_numpy(X).to(dev) #dev\n \n ## Calculate lac using the current X. lac (linear attenuation coefficient) has the dimension of [n_element, n_lines, n_voxel_minibatch, n_voxel]\n if selfAb == True:\n X_ap_rot = rotate(X, theta, dev) #dev\n lac = X_ap_rot.view(n_element, 1, 1, n_voxel) * FL_line_attCS_ls.view(n_element, n_lines, 1, 1) #dev\n lac = lac.expand(-1, -1, n_voxel_minibatch, -1).float() #dev\n \n else:\n lac = 0.\n \n if rank == 0:\n XRF_loss_n_batch = tc.zeros(n_batch)\n XRT_loss_n_batch = tc.zeros(n_batch)\n total_loss_n_batch = tc.zeros(n_batch)\n \n for m in range(n_batch):\n minibatch_ls = n_ranks * m + minibatch_ls_0 #dev\n p = minibatch_ls[rank]\n\n if selfAb == True:\n P_minibatch = tc.from_numpy(P_handle['P_array'][:,:, p * dia_len_n * minibatch_size * sample_size_n: (p+1) * dia_len_n * minibatch_size * sample_size_n]).to(dev)\n n_det = P_minibatch.shape[0] \n \n else:\n P_minibatch = 0\n n_det = 0 \n \n model = PPM(dev, selfAb, lac, X, p, n_element, n_lines, FL_line_attCS_ls,\n detected_fl_unit_concentration, n_line_group_each_element,\n sample_height_n, minibatch_size, sample_size_n, sample_size_cm,\n probe_energy, probe_cts, probe_att, probe_attCS_ls,\n theta, signal_attenuation_factor,\n n_det, P_minibatch, det_dia_cm, det_from_sample_cm, det_solid_angle_ratio)\n\n optimizer = tc.optim.Adam(model.parameters(), lr=lr) \n \n ## load true data, y1: XRF_data, y2: XRT data\n #dev #Take all lines_roi, this_theta_idx, and strips in this minibatc \n y1_hat, y2_hat = model()\n XRF_loss = loss_fn(y1_hat, y1_true[:, this_theta_idx, minibatch_size * p : minibatch_size * (p+1)])\n XRT_loss = loss_fn(y2_hat, b2 * y2_true[this_theta_idx, minibatch_size * p : minibatch_size * (p+1)])\n loss = XRF_loss + b1 * XRT_loss\n \n optimizer.zero_grad()\n loss.backward() \n optimizer.step()\n \n updated_minibatch = model.xp.detach().cpu()\n updated_minibatch = tc.clamp(updated_minibatch, 0, float('inf'))\n comm.Barrier()\n \n \n XRF_loss = XRF_loss.detach().item() \n XRF_loss_sum = comm.reduce(XRF_loss, op=MPI.SUM, root=0)\n \n XRT_loss = XRT_loss.detach().item() \n XRT_loss_sum = comm.reduce(XRT_loss, op=MPI.SUM, root=0) \n \n loss = loss.detach().item() \n loss_sum = comm.reduce(loss, op=MPI.SUM, root=0)\n comm.Barrier() \n \n with h5py.File(os.path.join(recon_path, f_recon_grid +'.h5'), 'r+', driver='mpio', comm=comm) as s:\n s[\"sample/densities\"][:, minibatch_size * p // sample_size_n : minibatch_size * (p + 1) // sample_size_n, :, :] = updated_minibatch.numpy()\n \n if rank == 0:\n XRF_loss_n_batch[m] = XRF_loss_sum/n_ranks\n XRT_loss_n_batch[m] = XRT_loss_sum/n_ranks\n total_loss_n_batch[m] = loss_sum/n_ranks \n \n # Note that we need to detach the voxels in the updated_batch of the current iteration.\n # Otherwise Pytorch will keep calculating the gradient of the updated_batch of the current iteration in the NEXT iteration\n\n del model \n \n if rank == 0: \n loss_whole_obj_cont[n_theta * epoch + idx] = tc.mean(total_loss_n_batch)\n XRF_loss_whole_obj_cont[n_theta * epoch + idx] = tc.mean(XRF_loss_n_batch)\n XRT_loss_whole_obj_cont[n_theta * epoch + idx] = tc.mean(XRT_loss_n_batch)\n\n \n comm.Barrier() \n del lac\n# tc.cuda.empty_cache()\n \n \n stdout_options = {'root':0, 'output_folder': recon_path, 'save_stdout': True, 'print_terminal': False}\n per_epoch_time = time.perf_counter() - t0_epoch\n print_flush_root(rank, val=per_epoch_time, output_file=f'per_epoch_time_mb_size_{minibatch_size}.csv', **stdout_options)\n comm.Barrier()\n\n if rank == 0:\n with h5py.File(os.path.join(recon_path, f_recon_grid +'.h5'), \"r\") as s:\n X_cpu = s[\"sample/densities\"][...].astype(np.float32)\n \n if rank ==0 and epoch != 0:\n epsilon = np.mean((X_cpu - X_previous)**2)\n print_flush_root(rank, val=epsilon, output_file=f'model_change_mse_epoch.csv', **stdout_options)\n \n if epsilon < 10**(-12): \n if rank == 0:\n with h5py.File(os.path.join(recon_path, f_recon_grid +\"_\"+str(epoch)+\"_ending_condition\" +'.h5'), \"w\") as s:\n sample = s.create_group(\"sample\")\n sample_v = sample.create_dataset(\"densities\", shape=(n_element, sample_height_n, sample_size_n, sample_size_n), dtype=\"f4\")\n sample_e = sample.create_dataset(\"elements\", shape=(n_element,), dtype='S5')\n s[\"sample/densities\"][...] = X_cpu\n s[\"sample/elements\"][...] = np.array(list(this_aN_dic.keys())).astype('S5')\n dxchange.write_tiff(X_cpu, os.path.join(recon_path, f_recon_grid)+\"_\"+str(epoch)+\"_ending_condition\", dtype='float32', overwrite=True) \n break\n else:\n pass\n \n else:\n pass\n \n comm.Barrier()\n if rank == 0:\n X_previous = X_cpu \n comm.Barrier() \n checkpoint_path = os.path.join(recon_path, \"checkpoint\")\n if rank == 0 and ((epoch+1)%save_every_n_epochs == 0 and (epoch+1)//save_every_n_epochs !=0 or epoch+1 == n_epochs):\n with h5py.File(os.path.join(checkpoint_path, f_recon_grid +\"_\"+str(starting_epoch + epoch) +'.h5'), \"w\") as s:\n sample = s.create_group(\"sample\")\n sample_v = sample.create_dataset(\"densities\", shape=(n_element, sample_height_n, sample_size_n, sample_size_n), dtype=\"f4\")\n sample_e = sample.create_dataset(\"elements\", shape=(n_element,), dtype='S5')\n s[\"sample/densities\"][...] = X_cpu\n s[\"sample/elements\"][...] = np.array(list(this_aN_dic.keys())).astype('S5')\n# dxchange.write_tiff(X_cpu, os.path.join(recon_path, f_recon_grid)+\"_\"+str(epoch), dtype='float32', overwrite=True) \n \n ## It's important to close the hdf5 file hadle in the end of the reconstruction.\n P_handle.close() \n y1_true_handle.close()\n y2_true_handle.close()\n comm.Barrier()\n \n if rank == 0: \n loss_whole_obj = tc.cat((loss_whole_obj, loss_whole_obj_cont))\n XRF_loss_whole_obj = tc.cat((XRF_loss_whole_obj, XRF_loss_whole_obj_cont))\n XRT_loss_whole_obj = tc.cat((XRT_loss_whole_obj, XRT_loss_whole_obj_cont))\n \n fig6 = plt.figure(figsize=(10,15))\n gs6 = gridspec.GridSpec(nrows=3, ncols=1, width_ratios=[1])\n\n fig6_ax1 = fig6.add_subplot(gs6[0,0])\n fig6_ax1.plot(loss_whole_obj.numpy())\n fig6_ax1.set_xlabel('theta_iteration')\n fig6_ax1.set_ylabel('loss')\n fig6_ax1.yaxis.set_major_formatter(mtick.FormatStrFormatter('%.2e'))\n\n fig6_ax2 = fig6.add_subplot(gs6[1,0])\n fig6_ax2.plot(XRF_loss_whole_obj.numpy())\n fig6_ax2.set_xlabel('theta_iteration')\n fig6_ax2.set_ylabel('XRF loss')\n fig6_ax2.yaxis.set_major_formatter(mtick.FormatStrFormatter('%.2e'))\n\n fig6_ax3 = fig6.add_subplot(gs6[2,0])\n fig6_ax3.plot(XRT_loss_whole_obj.numpy())\n fig6_ax3.set_xlabel('theta_iteration')\n fig6_ax3.set_ylabel('XRT loss')\n fig6_ax3.yaxis.set_major_formatter(mtick.FormatStrFormatter('%.2e'))\n \n plt.savefig(os.path.join(recon_path, 'loss_signal.pdf'))\n \n np.save(os.path.join(recon_path, 'XRF_loss_signal.npy'), XRF_loss_whole_obj.numpy())\n np.save(os.path.join(recon_path, 'XRT_loss_signal.npy'), XRT_loss_whole_obj.numpy())\n np.save(os.path.join(recon_path, 'loss_signal.npy'), loss_whole_obj.numpy())\n \n", "repo_name": "hpphappy/XRF_tomography", "sub_path": "3D/XRF_tomography.py", "file_name": "XRF_tomography.py", "file_ext": "py", "file_size_in_byte": 42128, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "48", "api": [{"api_name": "torch.set_default_tensor_type", "line_number": 19, "usage_type": "call"}, {"api_name": "torch.FloatTensor", "line_number": 19, "usage_type": "attribute"}, {"api_name": "matplotlib.rcParams", "line_number": 31, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.rc", "line_number": 33, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 33, "usage_type": "name"}, {"api_name": "warnings.filterwarnings", "line_number": 41, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 43, "usage_type": "call"}, {"api_name": "xraylib.KA1_LINE", "line_number": 43, "usage_type": "attribute"}, {"api_name": "xraylib.KA2_LINE", "line_number": 43, "usage_type": "attribute"}, {"api_name": "xraylib.KA3_LINE", "line_number": 43, "usage_type": "attribute"}, {"api_name": "xraylib.KB1_LINE", "line_number": 43, "usage_type": "attribute"}, {"api_name": "xraylib.KB2_LINE", "line_number": 43, "usage_type": "attribute"}, {"api_name": "xraylib.KB3_LINE", "line_number": 44, "usage_type": "attribute"}, {"api_name": "xraylib.KB4_LINE", "line_number": 44, "usage_type": "attribute"}, {"api_name": "xraylib.KB5_LINE", "line_number": 44, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 45, "usage_type": "call"}, {"api_name": "xraylib.LA1_LINE", "line_number": 45, "usage_type": "attribute"}, {"api_name": "xraylib.LA2_LINE", "line_number": 45, "usage_type": "attribute"}, {"api_name": "xraylib.LB1_LINE", "line_number": 45, "usage_type": "attribute"}, {"api_name": "xraylib.LB2_LINE", "line_number": 45, "usage_type": "attribute"}, {"api_name": "xraylib.LB3_LINE", "line_number": 45, "usage_type": "attribute"}, {"api_name": "xraylib.LB4_LINE", "line_number": 46, "usage_type": "attribute"}, {"api_name": "xraylib.LB5_LINE", "line_number": 46, "usage_type": "attribute"}, {"api_name": "xraylib.LB6_LINE", "line_number": 46, "usage_type": "attribute"}, {"api_name": "xraylib.LB7_LINE", "line_number": 46, "usage_type": "attribute"}, {"api_name": "xraylib.LB9_LINE", "line_number": 46, "usage_type": "attribute"}, {"api_name": "xraylib.LB10_LINE", "line_number": 47, "usage_type": "attribute"}, {"api_name": "xraylib.LB15_LINE", "line_number": 47, "usage_type": "attribute"}, {"api_name": "xraylib.LB17_LINE", "line_number": 47, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 48, "usage_type": "call"}, {"api_name": "xraylib.MA1_LINE", "line_number": 48, "usage_type": "attribute"}, {"api_name": "xraylib.MA2_LINE", "line_number": 48, "usage_type": "attribute"}, {"api_name": "xraylib.MB_LINE", "line_number": 48, "usage_type": "attribute"}, {"api_name": "mpi4py.MPI.COMM_WORLD", "line_number": 133, "usage_type": "attribute"}, {"api_name": "mpi4py.MPI", "line_number": 133, "usage_type": "name"}, {"api_name": "torch.nn.MSELoss", "line_number": 137, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 137, "usage_type": "name"}, {"api_name": "h5py.File", "line_number": 143, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 143, "usage_type": "call"}, {"api_name": "os.path", "line_number": 143, "usage_type": "attribute"}, {"api_name": "h5py.File", "line_number": 144, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 144, "usage_type": "call"}, {"api_name": "os.path", "line_number": 144, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 149, "usage_type": "call"}, {"api_name": "util.MakeFLlinesDictionary_manual", "line_number": 153, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 156, "usage_type": "call"}, {"api_name": "torch.as_tensor", "line_number": 160, "usage_type": "call"}, {"api_name": "xraylib_np.CS_Total", "line_number": 160, "usage_type": "call"}, {"api_name": "torch.as_tensor", "line_number": 161, "usage_type": "call"}, {"api_name": "torch.IntTensor", "line_number": 162, "usage_type": "call"}, {"api_name": "torch.as_tensor", "line_number": 167, "usage_type": "call"}, {"api_name": "xraylib_np.CS_Total", "line_number": 167, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 171, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 171, "usage_type": "attribute"}, {"api_name": "util.find_lines_roi_idx_from_dataset", "line_number": 175, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 179, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 184, "usage_type": "call"}, {"api_name": "torch.log", "line_number": 185, "usage_type": "call"}, {"api_name": "standard_calibration.calibrate_incident_probe_intensity", "line_number": 189, "usage_type": "call"}, {"api_name": "torch.arange", "line_number": 193, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 220, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 225, "usage_type": "call"}, {"api_name": "os.path", "line_number": 225, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 227, "usage_type": "call"}, {"api_name": "os.path", "line_number": 227, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 228, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 229, "usage_type": "call"}, {"api_name": "os.path", "line_number": 229, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 230, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 232, "usage_type": "call"}, {"api_name": "os.path", "line_number": 232, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 235, "usage_type": "call"}, {"api_name": "os.path", "line_number": 235, "usage_type": "attribute"}, {"api_name": "util.intersecting_length_fl_detectorlet_3d_mpi_write_h5_3_manual", "line_number": 236, "usage_type": "call"}, {"api_name": "h5py.File", "line_number": 243, "usage_type": "call"}, {"api_name": "h5py.File", "line_number": 251, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 251, "usage_type": "call"}, {"api_name": "os.path", "line_number": 251, "usage_type": "attribute"}, {"api_name": "numpy.float32", "line_number": 252, "usage_type": "attribute"}, {"api_name": "torch.from_numpy", "line_number": 253, "usage_type": "call"}, {"api_name": "shutil.copy", "line_number": 254, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 254, "usage_type": "call"}, {"api_name": "os.path", "line_number": 254, "usage_type": "attribute"}, {"api_name": "array_ops.initialize_guess_3d", "line_number": 262, "usage_type": "call"}, {"api_name": "h5py.File", "line_number": 264, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 264, "usage_type": "call"}, {"api_name": "os.path", "line_number": 264, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 269, "usage_type": "call"}, {"api_name": "shutil.copy", "line_number": 272, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 272, "usage_type": "call"}, {"api_name": "os.path", "line_number": 272, "usage_type": "attribute"}, {"api_name": "torch.zeros", "line_number": 280, "usage_type": "call"}, {"api_name": "torch.zeros", "line_number": 281, "usage_type": "call"}, {"api_name": "torch.zeros", "line_number": 282, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 283, "usage_type": "call"}, {"api_name": "os.path", "line_number": 283, "usage_type": "attribute"}, {"api_name": "time.perf_counter", "line_number": 309, "usage_type": "call"}, {"api_name": "torch.randperm", "line_number": 311, "usage_type": "call"}, {"api_name": "torch.ones", "line_number": 314, "usage_type": "call"}, {"api_name": "torch.ones", "line_number": 315, "usage_type": "call"}, {"api_name": "datetime.datetime.today", "line_number": 323, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 323, "usage_type": "attribute"}, {"api_name": "misc.print_flush_root", "line_number": 324, "usage_type": "call"}, {"api_name": "h5py.File", "line_number": 332, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 332, "usage_type": "call"}, {"api_name": "os.path", "line_number": 332, "usage_type": "attribute"}, {"api_name": "numpy.float32", "line_number": 333, "usage_type": "attribute"}, {"api_name": "torch.from_numpy", "line_number": 334, "usage_type": "call"}, {"api_name": "util.rotate", "line_number": 337, "usage_type": "call"}, {"api_name": "torch.zeros", "line_number": 345, "usage_type": "call"}, {"api_name": "torch.zeros", "line_number": 346, "usage_type": "call"}, {"api_name": "torch.zeros", "line_number": 347, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 354, "usage_type": "call"}, {"api_name": "forward_model.PPM", "line_number": 370, "usage_type": "call"}, {"api_name": "torch.optim.Adam", "line_number": 377, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 377, "usage_type": "attribute"}, {"api_name": "torch.clamp", "line_number": 392, "usage_type": "call"}, {"api_name": "mpi4py.MPI.SUM", "line_number": 396, "usage_type": "attribute"}, {"api_name": "mpi4py.MPI", "line_number": 396, "usage_type": "name"}, {"api_name": "mpi4py.MPI.SUM", "line_number": 399, "usage_type": "attribute"}, {"api_name": "mpi4py.MPI", "line_number": 399, "usage_type": "name"}, {"api_name": "mpi4py.MPI.SUM", "line_number": 402, "usage_type": "attribute"}, {"api_name": "mpi4py.MPI", "line_number": 402, "usage_type": "name"}, {"api_name": "h5py.File", "line_number": 405, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 405, "usage_type": "call"}, {"api_name": "os.path", "line_number": 405, "usage_type": "attribute"}, {"api_name": "torch.cuda.empty_cache", "line_number": 415, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 415, "usage_type": "attribute"}, {"api_name": "torch.mean", "line_number": 418, "usage_type": "call"}, {"api_name": "torch.mean", "line_number": 419, "usage_type": "call"}, {"api_name": "torch.mean", "line_number": 420, "usage_type": "call"}, {"api_name": "time.perf_counter", "line_number": 426, "usage_type": "call"}, {"api_name": "misc.print_flush_root", "line_number": 427, "usage_type": "call"}, {"api_name": "h5py.File", "line_number": 431, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 431, "usage_type": "call"}, {"api_name": "os.path", "line_number": 431, "usage_type": "attribute"}, {"api_name": "numpy.float32", "line_number": 432, "usage_type": "attribute"}, {"api_name": "numpy.mean", "line_number": 435, "usage_type": "call"}, {"api_name": "misc.print_flush_root", "line_number": 436, "usage_type": "call"}, {"api_name": "h5py.File", "line_number": 440, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 440, "usage_type": "call"}, {"api_name": "os.path", "line_number": 440, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 445, "usage_type": "call"}, {"api_name": "dxchange.write_tiff", "line_number": 446, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 446, "usage_type": "call"}, {"api_name": "os.path", "line_number": 446, "usage_type": "attribute"}, {"api_name": "h5py.File", "line_number": 460, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 460, "usage_type": "call"}, {"api_name": "os.path", "line_number": 460, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 465, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 476, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 476, "usage_type": "name"}, {"api_name": "matplotlib.gridspec.GridSpec", "line_number": 477, "usage_type": "call"}, {"api_name": "matplotlib.gridspec", "line_number": 477, "usage_type": "name"}, {"api_name": "matplotlib.ticker.FormatStrFormatter", "line_number": 483, "usage_type": "call"}, {"api_name": "matplotlib.ticker", "line_number": 483, "usage_type": "name"}, {"api_name": "matplotlib.ticker.FormatStrFormatter", "line_number": 489, "usage_type": "call"}, {"api_name": "matplotlib.ticker", "line_number": 489, "usage_type": "name"}, {"api_name": "matplotlib.ticker.FormatStrFormatter", "line_number": 495, "usage_type": "call"}, {"api_name": "matplotlib.ticker", "line_number": 495, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 497, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 497, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 497, "usage_type": "call"}, {"api_name": "os.path", "line_number": 497, "usage_type": "attribute"}, {"api_name": "numpy.save", "line_number": 499, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 499, "usage_type": "call"}, {"api_name": "os.path", "line_number": 499, "usage_type": "attribute"}, {"api_name": "numpy.save", "line_number": 500, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 500, "usage_type": "call"}, {"api_name": "os.path", "line_number": 500, "usage_type": "attribute"}, {"api_name": "numpy.save", "line_number": 501, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 501, "usage_type": "call"}, {"api_name": "os.path", "line_number": 501, "usage_type": "attribute"}, {"api_name": "h5py.File", "line_number": 507, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 507, "usage_type": "call"}, {"api_name": "os.path", "line_number": 507, "usage_type": "attribute"}, {"api_name": "numpy.float32", "line_number": 508, "usage_type": "attribute"}, {"api_name": "torch.from_numpy", "line_number": 509, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 515, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 515, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 515, "usage_type": "call"}, {"api_name": "os.path", "line_number": 515, "usage_type": "attribute"}, {"api_name": "numpy.float32", "line_number": 515, "usage_type": "attribute"}, {"api_name": "torch.from_numpy", "line_number": 516, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 516, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 516, "usage_type": "call"}, {"api_name": "os.path", "line_number": 516, "usage_type": "attribute"}, {"api_name": "numpy.float32", "line_number": 516, "usage_type": "attribute"}, {"api_name": "torch.from_numpy", "line_number": 517, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 517, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 517, "usage_type": "call"}, {"api_name": "os.path", "line_number": 517, "usage_type": "attribute"}, {"api_name": "numpy.float32", "line_number": 517, "usage_type": "attribute"}, {"api_name": "torch.zeros", "line_number": 519, "usage_type": "call"}, {"api_name": "torch.zeros", "line_number": 520, "usage_type": "call"}, {"api_name": "torch.zeros", "line_number": 521, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 523, "usage_type": "call"}, {"api_name": "os.path", "line_number": 523, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 529, "usage_type": "call"}, {"api_name": "os.path", "line_number": 529, "usage_type": "attribute"}, {"api_name": "time.perf_counter", "line_number": 561, "usage_type": "call"}, {"api_name": "torch.randperm", "line_number": 563, "usage_type": "call"}, {"api_name": "torch.ones", "line_number": 566, "usage_type": "call"}, {"api_name": "torch.ones", "line_number": 567, "usage_type": "call"}, {"api_name": "datetime.datetime.today", "line_number": 576, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 576, "usage_type": "attribute"}, {"api_name": "misc.print_flush_root", "line_number": 577, "usage_type": "call"}, {"api_name": "h5py.File", "line_number": 585, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 585, "usage_type": "call"}, {"api_name": "os.path", "line_number": 585, "usage_type": "attribute"}, {"api_name": "numpy.float32", "line_number": 586, "usage_type": "attribute"}, {"api_name": "torch.from_numpy", "line_number": 587, "usage_type": "call"}, {"api_name": "util.rotate", "line_number": 591, "usage_type": "call"}, {"api_name": "torch.zeros", "line_number": 599, "usage_type": "call"}, {"api_name": "torch.zeros", "line_number": 600, "usage_type": "call"}, {"api_name": "torch.zeros", "line_number": 601, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 608, "usage_type": "call"}, {"api_name": "forward_model.PPM", "line_number": 615, "usage_type": "call"}, {"api_name": "torch.optim.Adam", "line_number": 622, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 622, "usage_type": "attribute"}, {"api_name": "torch.clamp", "line_number": 636, "usage_type": "call"}, {"api_name": "mpi4py.MPI.SUM", "line_number": 641, "usage_type": "attribute"}, {"api_name": "mpi4py.MPI", "line_number": 641, "usage_type": "name"}, {"api_name": "mpi4py.MPI.SUM", "line_number": 644, "usage_type": "attribute"}, {"api_name": "mpi4py.MPI", "line_number": 644, "usage_type": "name"}, {"api_name": "mpi4py.MPI.SUM", "line_number": 647, "usage_type": "attribute"}, {"api_name": "mpi4py.MPI", "line_number": 647, "usage_type": "name"}, {"api_name": "h5py.File", "line_number": 650, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 650, "usage_type": "call"}, {"api_name": "os.path", "line_number": 650, "usage_type": "attribute"}, {"api_name": "torch.mean", "line_number": 664, "usage_type": "call"}, {"api_name": "torch.mean", "line_number": 665, "usage_type": "call"}, {"api_name": "torch.mean", "line_number": 666, "usage_type": "call"}, {"api_name": "time.perf_counter", "line_number": 675, "usage_type": "call"}, {"api_name": "misc.print_flush_root", "line_number": 676, "usage_type": "call"}, {"api_name": "h5py.File", "line_number": 680, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 680, "usage_type": "call"}, {"api_name": "os.path", "line_number": 680, "usage_type": "attribute"}, {"api_name": "numpy.float32", "line_number": 681, "usage_type": "attribute"}, {"api_name": "numpy.mean", "line_number": 684, "usage_type": "call"}, {"api_name": "misc.print_flush_root", "line_number": 685, "usage_type": "call"}, {"api_name": "h5py.File", "line_number": 689, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 689, "usage_type": "call"}, {"api_name": "os.path", "line_number": 689, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 694, "usage_type": "call"}, {"api_name": "dxchange.write_tiff", "line_number": 695, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 695, "usage_type": "call"}, {"api_name": "os.path", "line_number": 695, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 707, "usage_type": "call"}, {"api_name": "os.path", "line_number": 707, "usage_type": "attribute"}, {"api_name": "h5py.File", "line_number": 709, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 709, "usage_type": "call"}, {"api_name": "os.path", "line_number": 709, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 714, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 724, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 725, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 726, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 728, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 728, "usage_type": "name"}, {"api_name": "matplotlib.gridspec.GridSpec", "line_number": 729, "usage_type": "call"}, {"api_name": "matplotlib.gridspec", "line_number": 729, "usage_type": "name"}, {"api_name": "matplotlib.ticker.FormatStrFormatter", "line_number": 735, "usage_type": "call"}, {"api_name": "matplotlib.ticker", "line_number": 735, "usage_type": "name"}, {"api_name": "matplotlib.ticker.FormatStrFormatter", "line_number": 741, "usage_type": "call"}, {"api_name": "matplotlib.ticker", "line_number": 741, "usage_type": "name"}, {"api_name": "matplotlib.ticker.FormatStrFormatter", "line_number": 747, "usage_type": "call"}, {"api_name": "matplotlib.ticker", "line_number": 747, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 749, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 749, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 749, "usage_type": "call"}, {"api_name": "os.path", "line_number": 749, "usage_type": "attribute"}, {"api_name": "numpy.save", "line_number": 751, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 751, "usage_type": "call"}, {"api_name": "os.path", "line_number": 751, "usage_type": "attribute"}, {"api_name": "numpy.save", "line_number": 752, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 752, "usage_type": "call"}, {"api_name": "os.path", "line_number": 752, "usage_type": "attribute"}, {"api_name": "numpy.save", "line_number": 753, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 753, "usage_type": "call"}, {"api_name": "os.path", "line_number": 753, "usage_type": "attribute"}]} +{"seq_id": "71374675667", "text": "import os\nimport argparse\n\nimport torch\nimport torchvision\nimport torchvision.transforms as transforms\nimport numpy as np\nfrom tqdm import tqdm\n\n\ndef get_mnist():\n transform = transforms.Compose([\n transforms.ToTensor()\n ])\n\n trainset = torchvision.datasets.MNIST(\n root='./data/mnist/archive', train=True, download=True, transform=transform)\n testset = torchvision.datasets.MNIST(\n root='./data/mnist/archive', train=False, download=True, transform=transform)\n\n trainld = torch.utils.data.DataLoader(\n trainset, batch_size=1, shuffle=False, num_workers=0)\n testld = torch.utils.data.DataLoader(\n testset, batch_size=1, shuffle=False, num_workers=0)\n\n return trainld, testld\n\n\ndef save_data(arr: np.ndarray, name: str, path: str):\n size = arr.size * arr.itemsize / 1e6\n tqdm.write(f\"Saving {name} ndarray [{size} MB]\")\n np.save(os.path.join(path, name), arr)\n\n\nCLASSES = (\"0\", \"1\", \"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\", \"9\")\nDATA_ROOT = \"./data/mnist\"\nTRAIN_DIR = os.path.join(DATA_ROOT, \"train\")\nTEST_DIR = os.path.join(DATA_ROOT, \"test\")\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description=\"Build MNIST dataset.\")\n parser.add_argument('-d', type=str, action='store',\n default='./data/mnist', help='Dataset root directory.')\n args = parser.parse_args()\n\n # read DATA_ROOT\n DATA_ROOT = args.d\n\n # make dirs\n if not os.path.exists(TRAIN_DIR):\n os.makedirs(TRAIN_DIR)\n if not os.path.exists(TEST_DIR):\n os.makedirs(TEST_DIR)\n\n # build loaders\n train_dl, test_dl = get_mnist()\n\n # train data\n train_img_arr = []\n train_lab_arr = []\n\n tqdm.write(\"Reading train data\")\n for data in tqdm(iter(train_dl)):\n image, label = data\n img_class = CLASSES[label.item()]\n train_img_arr.append(image.squeeze(0).numpy())\n train_lab_arr.append(int(img_class))\n\n save_data(np.asarray(train_img_arr), \"images\", TRAIN_DIR)\n save_data(np.asarray(train_lab_arr), \"labels\", TRAIN_DIR)\n\n # test data\n test_img_arr = []\n test_lab_arr = []\n\n tqdm.write(\"Reading test data\")\n for data in tqdm(iter(test_dl)):\n image, label = data\n img_class = CLASSES[label.item()]\n test_img_arr.append(image.squeeze(0).numpy())\n test_lab_arr.append(int(img_class))\n\n save_data(np.asarray(test_img_arr), \"images\", TEST_DIR)\n save_data(np.asarray(test_lab_arr), \"labels\", TEST_DIR)\n", "repo_name": "FilippoVajana/master-degree", "sub_path": "utils/mnist-data.py", "file_name": "mnist-data.py", "file_ext": "py", "file_size_in_byte": 2484, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "48", "api": [{"api_name": "torchvision.transforms.Compose", "line_number": 12, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 12, "usage_type": "name"}, {"api_name": "torchvision.transforms.ToTensor", "line_number": 13, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 13, "usage_type": "name"}, {"api_name": "torchvision.datasets.MNIST", "line_number": 16, "usage_type": "call"}, {"api_name": "torchvision.datasets", "line_number": 16, "usage_type": "attribute"}, {"api_name": "torchvision.datasets.MNIST", "line_number": 18, "usage_type": "call"}, {"api_name": "torchvision.datasets", "line_number": 18, "usage_type": "attribute"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 21, "usage_type": "call"}, {"api_name": "torch.utils", "line_number": 21, "usage_type": "attribute"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 23, "usage_type": "call"}, {"api_name": "torch.utils", "line_number": 23, "usage_type": "attribute"}, {"api_name": "numpy.ndarray", "line_number": 29, "usage_type": "attribute"}, {"api_name": "tqdm.tqdm.write", "line_number": 31, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 31, "usage_type": "name"}, {"api_name": "numpy.save", "line_number": 32, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 32, "usage_type": "call"}, {"api_name": "os.path", "line_number": 32, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 37, "usage_type": "call"}, {"api_name": "os.path", "line_number": 37, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 38, "usage_type": "call"}, {"api_name": "os.path", "line_number": 38, "usage_type": "attribute"}, {"api_name": "argparse.ArgumentParser", "line_number": 41, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 50, "usage_type": "call"}, {"api_name": "os.path", "line_number": 50, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 51, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 52, "usage_type": "call"}, {"api_name": "os.path", "line_number": 52, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 53, "usage_type": "call"}, {"api_name": "tqdm.tqdm.write", "line_number": 62, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 62, "usage_type": "name"}, {"api_name": "tqdm.tqdm", "line_number": 63, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 69, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 70, "usage_type": "call"}, {"api_name": "tqdm.tqdm.write", "line_number": 76, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 76, "usage_type": "name"}, {"api_name": "tqdm.tqdm", "line_number": 77, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 83, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 84, "usage_type": "call"}]} +{"seq_id": "17766423581", "text": "from __future__ import division\n# do not use multithreading; it causes racing conditions\n#from concurrent.futures import ProcessPoolExecutor\nimport multiprocessing\nimport logging\n\nfrom .image_ import Image_\nimport numpy as np\nimport time\nimport pickle\nimport os\nimport sys\n\n\nclass String_(object):\n \"\"\"\n see Maragliano et al. (2006) J. Chem. Phys. for the algorithm\n \"\"\"\n \n js= None\n \n def __init__(self, img_list, root_dir):\n self.itr= 0\n self.img_list= img_list\n self.root_dir= root_dir\n self.num_img= len(img_list)\n self.new_cntr= None\n \n self.two_pi_per_lst= self.img_list[0].two_pi_per_lst\n self.shift= self.img_list[0].shift\n\n def evolve(self):\n \"\"\"\n Evolve the string\n \"\"\"\n\n # first check if the output directory exists\n itr_dir= String_.js['output_dir']+'/iter_%d' %self.itr\n if os.path.isdir(itr_dir):\n os.system('rm -r %s' %itr_dir)\n sys.stderr.write('Output folder already exists and will be overwritten, itr: %d\\n' %self.itr)\n os.mkdir(itr_dir)\n \n # evolve the images in parallel with multiprocess\n jobs= []\n result_queue= multiprocessing.Queue()\n multiprocessing.log_to_stderr(logging.DEBUG)\n logger= multiprocessing.get_logger()\n logger.setLevel(logging.WARNING)\n \n for img in self.img_list:\n proc= multiprocessing.Process(target= img.evolve, args= (result_queue, self.itr))\n jobs.append(proc)\n proc.start()\n \n # fetch results from the queue\n new_img_list= []\n\n for proc in jobs:\n new_img= result_queue.get()\n # exceptions are passed back to the main process as the result\n if isinstance(new_img, Exception):\n sys.exit('%s (found in %s)' %(new_img, proc))\n new_img_list.append(new_img)\n\n for proc in jobs:\n proc.join()\n \n # unscramble the returned images\n new_img_order= [img.index for img in new_img_list]\n assert sorted(new_img_order) == list(range(self.num_img)), 'Some images are not returned by multiprocessing!'\n new_img_sorted= sorted(zip(new_img_order, new_img_list))\n self.img_list= [img[1] for img in new_img_sorted]\n\n\n # the following code is for concurrent.futures, which somehow doesn't work on Wynton\n # evolve the images in parallel with multiprocess\n #pool= ProcessPoolExecutor(self.num_img)\n #futures= [pool.submit(img.evolve, (self.itr,)) for img in self.img_list]\n # \n #while True:\n # complete= [future.done() for future in futures]\n # if np.all(complete):\n # self.img_list= [future.result() for future in futures]\n # break\n # else:\n # time.sleep(60)\n \n # extract simulation results\n current_cntr= Image_.cntr_list[-1]\n self.new_cntr= np.zeros_like(current_cntr)\n current_gradient= [img.gradient for img in self.img_list]\n current_M= [img.M for img in self.img_list]\n dt= String_.js['euler_time']\n \n # compute the projection operator for each image (except for the two end points)\n # for the two end points, insert identity matrix (for indexing purpose only)\n project=[]\n num_cv= len(self.new_cntr[0])\n project.append(np.eye(num_cv))\n for ind in range(1, self.num_img - 1):\n operator=np.eye(num_cv)\n \n disp_vec= current_cntr[ind + 1] - current_cntr[ind]\n if self.two_pi_per_lst.size == 0:\n disp_proj= np.dot(disp_vec, np.dot(current_M[ind], current_gradient[ind]))\n else:\n min_disp_vec= Image_.min_abs(disp_vec, disp_vec + self.shift, disp_vec - self.shift)\n disp_proj= np.dot(min_disp_vec, np.dot(current_M[ind], current_gradient[ind]))\n \n sigma= 1 if disp_proj >= 0 else -1\n \n disp_vec= current_cntr[ind + sigma] - current_cntr[ind]\n if self.two_pi_per_lst.size != 0:\n disp_vec= Image_.min_abs(disp_vec, disp_vec + self.shift, disp_vec - self.shift)\n \n disp_vec_unit= disp_vec/np.linalg.norm(disp_vec)\n disp_vec_outer= np.outer(disp_vec_unit, disp_vec_unit)\n \n operator-= disp_vec_outer\n project.append(operator)\n project.append(np.eye(num_cv))\n \n \n # Evolve the string using forward Euler\n # the two end points evolve by gradient descent\n self.new_cntr[0]= current_cntr[0] - dt*current_gradient[0]\n self.new_cntr[-1]= current_cntr[-1] - dt*current_gradient[-1]\n for ind in range(1, self.num_img - 1):\n self.new_cntr[ind]= current_cntr[ind] - dt*np.dot(np.dot(project[ind], current_M[ind]), current_gradient[ind])\n \n # write out the displacement vector (w/o the dt)\n if String_.js['clean_up'] == False:\n displacement= []\n displacement.append(current_gradient[0])\n for ind in range(1, self.num_img - 1):\n displacement.append(np.dot(np.dot(project[ind], current_M[ind]), current_gradient[ind]))\n displacement.append(current_gradient[-1])\n \n np.save(itr_dir+'/displacement.npy', np.asarray(displacement))\n np.save(itr_dir+'/current_cntr.npy', np.asarray(current_cntr))\n \n def smooth(self):\n \"\"\"\n Smooth the string\n \"\"\"\n \n s= String_.js['smooth']\n \n temp_cntr= np.zeros_like(self.new_cntr)\n \n for ind in range(1, self.num_img - 1):\n if self.two_pi_per_lst.size == 0:\n temp_cntr[ind]= (1 - s)*self.new_cntr[ind] + (s/2.)*(self.new_cntr[ind + 1] + self.new_cntr[ind - 1])\n else:\n dist_next_img= self.new_cntr[ind + 1] - self.new_cntr[ind]\n dist_prev_img= self.new_cntr[ind - 1] - self.new_cntr[ind]\n \n next_img= Image_.min_abs(dist_next_img, dist_next_img + self.shift, dist_next_img - self.shift) + self.new_cntr[ind]\n prev_img= Image_.min_abs(dist_prev_img, dist_prev_img + self.shift, dist_prev_img - self.shift) + self.new_cntr[ind]\n \n temp_cntr[ind]= (1 - s)*self.new_cntr[ind] + (s/2.)*(next_img + prev_img)\n \n self.new_cntr[1:-1]= temp_cntr[1:-1]\n \n if String_.js['clean_up'] == False:\n itr_dir= String_.js['output_dir']+'/iter_%d' %self.itr\n np.save(itr_dir+'/smoothed_cntr.npy', np.asarray(self.new_cntr))\n \n def reparametrize(self):\n \"\"\"\n Reparametrization of the string\n \"\"\"\n \n img_disp_vec= np.array([self.new_cntr[ind + 1] - self.new_cntr[ind] for ind in range(self.num_img - 1)])\n if self.two_pi_per_lst.size != 0: img_disp_vec= np.array([Image_.min_abs(disp, disp + self.shift, disp - self.shift) for disp in img_disp_vec])\n \n segment_lengths= np.array([np.linalg.norm(img_disp_vec[ind]) for ind in range(self.num_img - 1)])\n segment_length_upto= np.add.accumulate(segment_lengths)\n segment_length_upto= np.insert(segment_length_upto, 0, 0.)\n img_disp_vec_unit= img_disp_vec/(np.asarray([segment_lengths]).T)\n \n new_img_length_upto= np.array([ind*segment_length_upto[-1]/(self.num_img - 1) for ind in range(1, self.num_img - 1)])\n \n temp_cntr= np.zeros_like(self.new_cntr)\n for ind in range(1, self.num_img - 1):\n old_img_ind= int(np.argwhere(segment_length_upto < new_img_length_upto[ind - 1])[-1])\n temp_cntr[ind]= self.new_cntr[old_img_ind] + (new_img_length_upto[ind - 1] - segment_length_upto[old_img_ind])*img_disp_vec_unit[old_img_ind]\n \n if self.two_pi_per_lst.size != 0:\n old_temp_disp= temp_cntr[ind] - self.new_cntr[ind]\n temp_cntr[ind]= Image_.min_abs(old_temp_disp, old_temp_disp + self.shift, old_temp_disp - self.shift) + self.new_cntr[ind]\n \n self.new_cntr[1:-1] = temp_cntr[1:-1]\n # write new centers to image\n Image_.set_cntr_list(self.new_cntr)\n \n def replica_exchange(self):\n \"\"\"\n Perform replica exchange\n \"\"\"\n \n bias_cntr= Image_.cntr_list[-2] # not -1, since we assume that the cntr_list has already been updated for the next iteration\n structure_cntr= [img.current_cntr for img in self.img_list]\n kappa= np.asarray([res.kappa for res in Image_.restraint_list])\n \n ind_after_exchange= list(range(self.num_img))\n \n # randomize the neighbor list for each iteration\n neighbor_list= np.asarray(String_.js['RE_NL'])\n np.random.shuffle(neighbor_list)\n \n success_exchange= 0\n for pair in neighbor_list:\n # the first image in the neighbor list is labeled i; the second j\n i, j= [ind_after_exchange[ind] for ind in pair]\n \n Eixi= np.sum(0.5*kappa*(bias_cntr[i]-structure_cntr[i])**2)\n Ejxj= np.sum(0.5*kappa*(bias_cntr[j]-structure_cntr[j])**2)\n Eixj= np.sum(0.5*kappa*(bias_cntr[i]-structure_cntr[j])**2)\n Ejxi= np.sum(0.5*kappa*(bias_cntr[j]-structure_cntr[i])**2)\n \n ln_prob= -String_.js['RE_temp']*((Eixj - Eixi) - (Ejxj - Ejxi))/String_.js['RT']\n ln_r= np.log(np.random.random())\n \n if ln_r <= ln_prob:\n ind_after_exchange[i], ind_after_exchange[j]= ind_after_exchange[j], ind_after_exchange[i]\n success_exchange+= 1\n \n accept_ratio= float(success_exchange)/len(neighbor_list)\n \n # update the exchanged_index attribute for each image\n for ind, exchanged_ind in enumerate(ind_after_exchange):\n self.img_list[ind].exchanged_index= exchanged_ind\n \n # write log file\n out_str_list= ['%d' %(ind) for ind in ind_after_exchange] + ['%.2f' %accept_ratio]\n out_str='\\t'.join(out_str_list) + '\\n'\n \n output_file= String_.js['output_dir']+'/RE_log.txt'\n with open(output_file, 'a') as outfile:\n outfile.write(out_str)\n \n # save the parameters used to do replica exchange\n itr_dir= String_.js['output_dir']+'/iter_%d' %self.itr\n if String_.js['clean_up'] == False:\n np.save(itr_dir+'/bias_cntr.npy', bias_cntr)\n np.save(itr_dir+'/structure_cntr.npy', structure_cntr)\n np.save(itr_dir+'/kappa.npy', kappa)\n \n def dump_string(self):\n \"\"\"\n Write out the latest string iteration as an npy file for analysis\n \"\"\"\n \n outfile= '%s/string_%d.npy' %(String_.js['output_dir'], self.itr)\n np.save(outfile, Image_.cntr_list[-1])\n \n def dump_status(self):\n \"\"\"\n Write out the current state for restart; use pickle\n \"\"\"\n \n pickle.dump(self, open('%s/string.p' %String_.js['output_dir'], 'wb'))\n \n def umbrella(self):\n \"\"\"\n A mode for running umbrella sampling calculations, starting from a completed iteration\n \"\"\"\n \n itr_for_US= String_.js['US_iter']\n assert 0 <= itr_for_US <= self.itr, 'US_iter = %d is invalid!' %itr_for_US\n US_continuation= False\n \n # first check if the output directory exists\n US_dir= String_.js['output_dir']+'/iter_%d_US' %itr_for_US\n if os.path.isdir(US_dir):\n US_continuation= True\n sys.stdout.write('Output folder already exists and existing simulations will be continued: %s\\n' %US_dir)\n else: os.mkdir(US_dir)\n \n \n jobs= []\n \n multiprocessing.log_to_stderr(logging.DEBUG)\n logger= multiprocessing.get_logger()\n logger.setLevel(logging.INFO)\n \n for img in self.img_list:\n p= multiprocessing.Process(target= img.umbrella, args= (itr_for_US, US_continuation))\n jobs.append(p)\n p.start()\n \n for p in jobs: p.join()\n \n # the following code is for concurrent.futures, which somehow doesn't work on Wynton\n #pool= ProcessPoolExecutor(self.num_img)\n #futures= [pool.submit(img.umbrella, (itr_for_US, US_continuation)) for img in self.img_list]\n \n #while True:\n # complete= [future.done() for future in futures]\n # if np.all(complete):\n # break\n # else:\n # time.sleep(60)\n \n \n \n", "repo_name": "luhong88/string_method", "sub_path": "string_method/string_.py", "file_name": "string_.py", "file_ext": "py", "file_size_in_byte": 12762, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "48", "api": [{"api_name": "os.path.isdir", "line_number": 39, "usage_type": "call"}, {"api_name": "os.path", "line_number": 39, "usage_type": "attribute"}, {"api_name": "os.system", "line_number": 40, "usage_type": "call"}, {"api_name": "sys.stderr.write", "line_number": 41, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 41, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 42, "usage_type": "call"}, {"api_name": "multiprocessing.Queue", "line_number": 46, "usage_type": "call"}, {"api_name": "multiprocessing.log_to_stderr", "line_number": 47, "usage_type": "call"}, {"api_name": "logging.DEBUG", "line_number": 47, "usage_type": "attribute"}, {"api_name": "multiprocessing.get_logger", "line_number": 48, "usage_type": "call"}, {"api_name": "logging.WARNING", "line_number": 49, "usage_type": "attribute"}, {"api_name": "multiprocessing.Process", "line_number": 52, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 63, "usage_type": "call"}, {"api_name": "image_.Image_.cntr_list", "line_number": 90, "usage_type": "attribute"}, {"api_name": "image_.Image_", "line_number": 90, "usage_type": "name"}, {"api_name": "numpy.zeros_like", "line_number": 91, "usage_type": "call"}, {"api_name": "numpy.eye", "line_number": 100, "usage_type": "call"}, {"api_name": "numpy.eye", "line_number": 102, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 106, "usage_type": "call"}, {"api_name": "image_.Image_.min_abs", "line_number": 108, "usage_type": "call"}, {"api_name": "image_.Image_", "line_number": 108, "usage_type": "name"}, {"api_name": "numpy.dot", "line_number": 109, "usage_type": "call"}, {"api_name": "image_.Image_.min_abs", "line_number": 115, "usage_type": "call"}, {"api_name": "image_.Image_", "line_number": 115, "usage_type": "name"}, {"api_name": "numpy.linalg.norm", "line_number": 117, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 117, "usage_type": "attribute"}, {"api_name": "numpy.outer", "line_number": 118, "usage_type": "call"}, {"api_name": "numpy.eye", "line_number": 122, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 130, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 137, "usage_type": "call"}, {"api_name": "numpy.save", "line_number": 140, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 140, "usage_type": "call"}, {"api_name": "numpy.save", "line_number": 141, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 141, "usage_type": "call"}, {"api_name": "numpy.zeros_like", "line_number": 150, "usage_type": "call"}, {"api_name": "image_.Image_.min_abs", "line_number": 159, "usage_type": "call"}, {"api_name": "image_.Image_", "line_number": 159, "usage_type": "name"}, {"api_name": "image_.Image_.min_abs", "line_number": 160, "usage_type": "call"}, {"api_name": "image_.Image_", "line_number": 160, "usage_type": "name"}, {"api_name": "numpy.save", "line_number": 168, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 168, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 175, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 176, "usage_type": "call"}, {"api_name": "image_.Image_.min_abs", "line_number": 176, "usage_type": "call"}, {"api_name": "image_.Image_", "line_number": 176, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 178, "usage_type": "call"}, {"api_name": "numpy.linalg.norm", "line_number": 178, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 178, "usage_type": "attribute"}, {"api_name": "numpy.add.accumulate", "line_number": 179, "usage_type": "call"}, {"api_name": "numpy.add", "line_number": 179, "usage_type": "attribute"}, {"api_name": "numpy.insert", "line_number": 180, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 181, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 183, "usage_type": "call"}, {"api_name": "numpy.zeros_like", "line_number": 185, "usage_type": "call"}, {"api_name": "numpy.argwhere", "line_number": 187, "usage_type": "call"}, {"api_name": "image_.Image_.min_abs", "line_number": 192, "usage_type": "call"}, {"api_name": "image_.Image_", "line_number": 192, "usage_type": "name"}, {"api_name": "image_.Image_.set_cntr_list", "line_number": 196, "usage_type": "call"}, {"api_name": "image_.Image_", "line_number": 196, "usage_type": "name"}, {"api_name": "image_.Image_.cntr_list", "line_number": 203, "usage_type": "attribute"}, {"api_name": "image_.Image_", "line_number": 203, "usage_type": "name"}, {"api_name": "numpy.asarray", "line_number": 205, "usage_type": "call"}, {"api_name": "image_.Image_.restraint_list", "line_number": 205, "usage_type": "attribute"}, {"api_name": "image_.Image_", "line_number": 205, "usage_type": "name"}, {"api_name": "numpy.asarray", "line_number": 210, "usage_type": "call"}, {"api_name": "numpy.random.shuffle", "line_number": 211, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 211, "usage_type": "attribute"}, {"api_name": "numpy.sum", "line_number": 218, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 219, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 220, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 221, "usage_type": "call"}, {"api_name": "numpy.log", "line_number": 224, "usage_type": "call"}, {"api_name": "numpy.random.random", "line_number": 224, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 224, "usage_type": "attribute"}, {"api_name": "numpy.save", "line_number": 247, "usage_type": "call"}, {"api_name": "numpy.save", "line_number": 248, "usage_type": "call"}, {"api_name": "numpy.save", "line_number": 249, "usage_type": "call"}, {"api_name": "numpy.save", "line_number": 257, "usage_type": "call"}, {"api_name": "image_.Image_.cntr_list", "line_number": 257, "usage_type": "attribute"}, {"api_name": "image_.Image_", "line_number": 257, "usage_type": "name"}, {"api_name": "pickle.dump", "line_number": 264, "usage_type": "call"}, {"api_name": "os.path.isdir", "line_number": 277, "usage_type": "call"}, {"api_name": "os.path", "line_number": 277, "usage_type": "attribute"}, {"api_name": "sys.stdout.write", "line_number": 279, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 279, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 280, "usage_type": "call"}, {"api_name": "multiprocessing.log_to_stderr", "line_number": 285, "usage_type": "call"}, {"api_name": "logging.DEBUG", "line_number": 285, "usage_type": "attribute"}, {"api_name": "multiprocessing.get_logger", "line_number": 286, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 287, "usage_type": "attribute"}, {"api_name": "multiprocessing.Process", "line_number": 290, "usage_type": "call"}]} +{"seq_id": "40058243843", "text": "import asyncio\nfrom pyppeteer import launch\n\nasync def main():\n browser = await launch(headless=True, args=['--no-sandbox'])\n page = await browser.newPage()\n await page.goto('https://google.com')\n await page.screenshot({'path': 'example.png'})\n\n dimensions = await page.evaluate('''() => {\n return {\n width: document.documentElement.clientWidth,\n height: document.documentElement.clientHeight,\n deviceScaleFactor: window.devicePixelRatio,\n }\n }''')\n\n print(dimensions)\n # >>> {'width': 800, 'height': 600, 'deviceScaleFactor': 1}\n await browser.close()\n\nasyncio.get_event_loop().run_until_complete(main())", "repo_name": "Davidchristy/house-research-project", "sub_path": "houseSearchEmailScanner/test_headless_browser.py", "file_name": "test_headless_browser.py", "file_ext": "py", "file_size_in_byte": 679, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "48", "api": [{"api_name": "pyppeteer.launch", "line_number": 5, "usage_type": "call"}, {"api_name": "asyncio.get_event_loop", "line_number": 22, "usage_type": "call"}]} +{"seq_id": "36719851538", "text": "import requests, bs4\nimport sqlite3\nfrom sklearn import tree\nimport numpy as np\n\nconn = sqlite3.connect('matches.sqlite3')\nc = conn.cursor()\n\nallp = []\n\nc.execute('SELECT * FROM players WHERE tier=-1')\nfor row in c: allp.append(row[0])\n\nfor player in allp:\n\tcounter = 0\n\tc.execute('SELECT * FROM matches WHERE winner=(?)', (player, ))\n\tfor row in c: counter += 1\n\tif counter > 15:\n\t\tprint(\"{}: {} games and tier -1\". format(player, counter))\n\tcounter = 0\n", "repo_name": "ambisinister/ambistats", "sub_path": "ahhhh.py", "file_name": "ahhhh.py", "file_ext": "py", "file_size_in_byte": 455, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "48", "api": [{"api_name": "sqlite3.connect", "line_number": 6, "usage_type": "call"}]} +{"seq_id": "23361048002", "text": "from django.urls import path\r\nfrom .views import login_view,admin_view,customer_view,logout_view,home_view\r\nfrom . import views\r\n\r\nurlpatterns = [\r\n path('login/', login_view, name='login_view'),\r\n path('admin_view',admin_view,name='admin_view'),\r\n path('customer_view',customer_view,name='customer_view'),\r\n path('logout/', logout_view, name='logout'),\r\n path('',home_view,name='home_view'),\r\n path('createorder/', views.createorder, name='createorder'),\r\n \r\n \r\n]", "repo_name": "Cheshta1828/Task", "sub_path": "task/accounts/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 495, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "48", "api": [{"api_name": "django.urls.path", "line_number": 6, "usage_type": "call"}, {"api_name": "views.login_view", "line_number": 6, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 7, "usage_type": "call"}, {"api_name": "views.admin_view", "line_number": 7, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 8, "usage_type": "call"}, {"api_name": "views.customer_view", "line_number": 8, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 9, "usage_type": "call"}, {"api_name": "views.logout_view", "line_number": 9, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 10, "usage_type": "call"}, {"api_name": "views.home_view", "line_number": 10, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 11, "usage_type": "call"}, {"api_name": "views.createorder", "line_number": 11, "usage_type": "attribute"}]} +{"seq_id": "6826857948", "text": "import os\nimport signal\nimport socket\nimport logging\n\nfrom work.protocol import Feeder, Packet\nfrom work.models import cmd\nfrom work.utils import configure_logging\nfrom work.cmdargs import get_cmd_args\nfrom work.exceptions import ClientFinishException\n\n\ndef shutdown_handler(signum, frame):\n raise ClientFinishException()\n\n\nclass CommandClient:\n\n session_id = None\n TIMEOUT = 1.0\n CHUNK_SIZE = 1024\n commands = [cmd.CONNECTED, cmd.PONG, cmd.PONGD, cmd.ACKQUIT, cmd.ACKFINISH]\n\n def __init__(self, host, port):\n self.socket = socket.socket(socket.AF_INET,\n socket.SOCK_STREAM)\n self.socket.settimeout(self.TIMEOUT)\n self.socket.connect((host, port))\n\n @classmethod\n def run_client(cls, host, port):\n client = cls(host, port)\n try:\n handler = signal.signal(signal.SIGINT, shutdown_handler)\n client.run()\n except (OSError, socket.timeout, ClientFinishException):\n client.shutdown()\n finally:\n signal.signal(signal.SIGINT, handler)\n\n def run(self):\n self.feeder = Feeder(self.commands)\n while True:\n command = input().split()\n kwargs = {}\n cmd_input = getattr(cmd, command[0].upper())\n if cmd_input == cmd.PINGD:\n kwargs['data'] = command[1]\n packet = eval('{}(**kwargs).pack()'.format(command[0]))\n self.socket.sendall(packet)\n self.recv_response()\n\n def recv_response(self):\n tail = bytes()\n while True:\n chunk = tail + self.socket.recv(self.CHUNK_SIZE)\n packet, tail = self.feeder.feed(chunk)\n if not packet:\n continue\n else:\n getattr(self, packet.__class__.__name__.lower())(packet)\n break\n\n def connected(self, packet):\n self.session = packet.session\n print('{} {}'.format(packet.cmd, packet.session))\n\n def pong(self, packet):\n print(packet.cmd)\n\n def pongd(self, packet):\n print('{} {}'.format(packet.cmd, packet.data))\n\n def ackquit(self, packet):\n print('{} {}'.format(packet.cmd, packet.session))\n self.shutdown()\n\n def ackfinish(self, packet):\n print(packet.cmd)\n self.shutdown()\n\n def shutdown(self):\n self.socket.close()\n logging.info('socket closed')\n raise SystemExit()\n\n\nif __name__ == '__main__':\n configure_logging('Client')\n args = get_cmd_args()\n CommandClient.run_client(args.host, args.port)", "repo_name": "learnpython/advanced-01", "sub_path": "lud4ik/command_client.py", "file_name": "command_client.py", "file_ext": "py", "file_size_in_byte": 2580, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 5, "dataset": "github-code", "pt": "48", "api": [{"api_name": "work.exceptions.ClientFinishException", "line_number": 14, "usage_type": "call"}, {"api_name": "work.models.cmd.CONNECTED", "line_number": 22, "usage_type": "attribute"}, {"api_name": "work.models.cmd", "line_number": 22, "usage_type": "name"}, {"api_name": "work.models.cmd.PONG", "line_number": 22, "usage_type": "attribute"}, {"api_name": "work.models.cmd.PONGD", "line_number": 22, "usage_type": "attribute"}, {"api_name": "work.models.cmd.ACKQUIT", "line_number": 22, "usage_type": "attribute"}, {"api_name": "work.models.cmd.ACKFINISH", "line_number": 22, "usage_type": "attribute"}, {"api_name": "socket.socket", "line_number": 25, "usage_type": "call"}, {"api_name": "socket.AF_INET", "line_number": 25, "usage_type": "attribute"}, {"api_name": "socket.SOCK_STREAM", "line_number": 26, "usage_type": "attribute"}, {"api_name": "signal.signal", "line_number": 34, "usage_type": "call"}, {"api_name": "signal.SIGINT", "line_number": 34, "usage_type": "attribute"}, {"api_name": "socket.timeout", "line_number": 36, "usage_type": "attribute"}, {"api_name": "work.exceptions.ClientFinishException", "line_number": 36, "usage_type": "name"}, {"api_name": "signal.signal", "line_number": 39, "usage_type": "call"}, {"api_name": "signal.SIGINT", "line_number": 39, "usage_type": "attribute"}, {"api_name": "work.protocol.Feeder", "line_number": 42, "usage_type": "call"}, {"api_name": "work.models.cmd", "line_number": 46, "usage_type": "argument"}, {"api_name": "work.models.cmd.PINGD", "line_number": 47, "usage_type": "attribute"}, {"api_name": "work.models.cmd", "line_number": 47, "usage_type": "name"}, {"api_name": "logging.info", "line_number": 84, "usage_type": "call"}, {"api_name": "work.utils.configure_logging", "line_number": 89, "usage_type": "call"}, {"api_name": "work.cmdargs.get_cmd_args", "line_number": 90, "usage_type": "call"}]} +{"seq_id": "23446727046", "text": "from collections import deque\nimport sys\n\ndR = [0, 1, 0, -1]\ndC = [1, 0, -1, 0]\n\ndef bfs(dq) :\n while dq :\n row, col = dq.popleft()\n chk = visited[row][col]\n for i in range(4) :\n next_row = row + dR[i];\n next_col = col + dC[i];\n\n if next_row < 0 or next_row >=h or next_col < 0 or next_col >= w :\n if chk != -2: # 불 아니면\n return visited[row][col] + 1\n else :\n if visited[next_row][next_col] == -1 and (building[next_row][next_col] == '.' or building[next_row][next_col] == '@') : # 방문 안했는데 빈 칸이거나 시작점(빈칸) 이면\n if chk == -2: # 내가 불이면\n visited[next_row][next_col] = -2\n else :\n visited[next_row][next_col] = visited[row][col] + 1\n dq.append((next_row, next_col))\n return \"IMPOSSIBLE\"\n\n\ntest_size = int(sys.stdin.readline()) # 반복문으로 여러 줄 입력 받을 때는 이거 사용하기\n\nfor test in range(test_size) :\n w, h = map(int, sys.stdin.readline().split(\" \"))\n building = []\n start = []\n fire = []\n visited = [[-1] * w for _ in range(h)]\n\n for row in range(h) :\n tmp = sys.stdin.readline()\n building.append(tmp)\n for col in range(w) :\n if tmp[col] == '*' :\n fire.append((row, col))\n visited[row][col] = -2\n elif tmp[col] == '@' :\n start.append((row, col))\n visited[row][col] = 0\n\n dq = deque()\n dq.extend(fire)\n dq.extend(start)\n\n print(bfs(dq))\n", "repo_name": "JoungMinJu/PyCodingTest", "sub_path": "Reis/bfs/first/BOJ_5427.py", "file_name": "BOJ_5427.py", "file_ext": "py", "file_size_in_byte": 1668, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "48", "api": [{"api_name": "sys.stdin.readline", "line_number": 28, "usage_type": "call"}, {"api_name": "sys.stdin", "line_number": 28, "usage_type": "attribute"}, {"api_name": "sys.stdin.readline", "line_number": 31, "usage_type": "call"}, {"api_name": "sys.stdin", "line_number": 31, "usage_type": "attribute"}, {"api_name": "sys.stdin.readline", "line_number": 38, "usage_type": "call"}, {"api_name": "sys.stdin", "line_number": 38, "usage_type": "attribute"}, {"api_name": "collections.deque", "line_number": 48, "usage_type": "call"}]} +{"seq_id": "6843183563", "text": "import tensorflow as tf\nimport tensorflow_datasets as tfds\nimport argparse\nfrom contextlib import redirect_stdout\nfrom tf_nndct.optimization import IterativePruningRunner\nimport os\n\ndef add_normalized_values(img, label):\n \"\"\"Normalizes images\"\"\"\n norm_img = tf.cast(img, dtype=tf.float32) / 255.0\n return tf.image.resize(norm_img, [224, 224]), label\n\n\ndef load_dataset(batch_size):\n (ds_train, ds_validation) = tfds.load('imagenet2012', split=['train', 'validation'], as_supervised=True, shuffle_files=True)\n # map data\n ds_train = ds_train.map(add_normalized_values, num_parallel_calls=tf.data.experimental.AUTOTUNE)\n ds_validation = ds_validation.map(add_normalized_values, num_parallel_calls=tf.data.experimental.AUTOTUNE)\n\n # use batching\n ds_validation = ds_validation.batch(batch_size)\n ds_train = ds_train.batch(batch_size)\n\n return ds_train, ds_validation\n\n\ndef load_model(workspace, prefix, network) -> tf.keras.models.Model:\n return tf.keras.models.load_model(workspace + '/' + prefix + '/trained/{}_model'.format(network))\n\n\ndef evaluate(model):\n \"\"\"Function used by Pruner to evaluate pruning performance\"\"\"\n model.compile(loss=\"sparse_categorical_crossentropy\", optimizer=\"adam\", metrics=[\"accuracy\"])\n score = model.evaluate(ds_test, verbose=0)\n return score[1]\n\n\ndef app(epochs, workspace, ratio, prefix, network):\n input_shape = (224, 224, 3)\n\n # model to use\n model = load_model(workspace, prefix, network)\n model.compile(loss=\"sparse_categorical_crossentropy\", optimizer=\"adam\", metrics=[\"accuracy\"])\n\n # create prunning runner and analyze model\n input_spec = tf.TensorSpec((1, *input_shape), tf.float32)\n pruning_runner = IterativePruningRunner(model, input_spec)\n pruning_runner.ana(evaluate)\n\n # prune\n sparse_model = pruning_runner.prune(ratio=ratio)\n\n # fine-tuning process\n sparse_model.compile(loss=\"sparse_categorical_crossentropy\", optimizer=\"adam\", metrics=[\"accuracy\"])\n sparse_model.fit(ds_train, epochs=epochs)\n\n # evaluate\n with open(workspace + '/' + prefix + '/pruned/{}_pruned_evaluate.txt'.format(network), 'w+') as f:\n with redirect_stdout(f):\n loss, accuracy = sparse_model.evaluate(ds_test, verbose=2)\n print('Loss {}, accuracy {}'.format(loss, accuracy))\n\n\n # save model\n filename = \"/tmp/vai_benchmark/data/pruned/{}_model_sparse\".format(network)\n sparse_model.save_weights(filename, save_format=\"tf\")\n model.load_weights(filename)\n\n runner = IterativePruningRunner(model, input_spec)\n pruned_slim_model = runner.get_slim_model()\n pruned_slim_model.save(workspace + '/' + prefix + '/pruned/{}_model'.format(network))\n\n # save init summary\n with open(workspace + '/' + prefix + '/pruned/{}_pruned_summary.txt'.format(network), 'w+') as f:\n with redirect_stdout(f):\n pruned_slim_model.summary()\n\n print('Finished pruning and saving information')\n\n\nif __name__ == '__main__':\n\n parser = argparse.ArgumentParser()\n\n parser.add_argument('-r', '--ratio', type=float, default='0.9',\n help='Ratio to use for pruning. Float value from 0 to 1, 0.9 being default')\n parser.add_argument('-b', '--batch_size', type=int, default='32',\n help='Batch size to use for training. Default is 32')\n parser.add_argument('-e', '--epochs', type=int, default='10',\n help='Epoch number to finetune network. Default is 10')\n parser.add_argument('-w', '--workspace', type=str, default='/workspace/results',\n help='Path to folder to write pruned model summary, evaluate and h5')\n parser.add_argument('-p', '--prefix', type=str, default='default',\n help='Prefix to folder where all information will be written')\n parser.add_argument('-n', '--network', type=str, default='custom',\n help='Name of network. App uses this prop to save and load files')\n\n args = parser.parse_args()\n print('Command line options:')\n print(' --ratio : ', args.ratio)\n print(' --batch_size : ', args.batch_size)\n print(' --epochs : ', args.epochs)\n print(' --workspace : ', args.workspace)\n print(' --prefix : ', args.prefix)\n print(' --network : ', args.network)\n\n # load dataset. Needs to be done earlier for evaluate function\n ds_train, ds_test = load_dataset(args.batch_size)\n\n # create dir\n os.makedirs(args.workspace + '/' + args.prefix + '/pruned', exist_ok=True)\n\n app(args.epochs, args.workspace, args.ratio, args.prefix, args.network)\n", "repo_name": "savicci/vai_benchmark", "sub_path": "pruning/imagenet224_custom_prune.py", "file_name": "imagenet224_custom_prune.py", "file_ext": "py", "file_size_in_byte": 4657, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "48", "api": [{"api_name": "tensorflow.cast", "line_number": 10, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 10, "usage_type": "attribute"}, {"api_name": "tensorflow.image.resize", "line_number": 11, "usage_type": "call"}, {"api_name": "tensorflow.image", "line_number": 11, "usage_type": "attribute"}, {"api_name": "tensorflow_datasets.load", "line_number": 15, "usage_type": "call"}, {"api_name": "tensorflow.data", "line_number": 17, "usage_type": "attribute"}, {"api_name": "tensorflow.data", "line_number": 18, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.models.load_model", "line_number": 28, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 28, "usage_type": "attribute"}, {"api_name": "tensorflow.keras", "line_number": 27, "usage_type": "attribute"}, {"api_name": "tensorflow.TensorSpec", "line_number": 46, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 46, "usage_type": "attribute"}, {"api_name": "tf_nndct.optimization.IterativePruningRunner", "line_number": 47, "usage_type": "call"}, {"api_name": "contextlib.redirect_stdout", "line_number": 59, "usage_type": "call"}, {"api_name": "tf_nndct.optimization.IterativePruningRunner", "line_number": 69, "usage_type": "call"}, {"api_name": "contextlib.redirect_stdout", "line_number": 75, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 83, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 111, "usage_type": "call"}]} +{"seq_id": "34445609876", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n'''\nCalculate several reconstruction error metrics for spherical harmonics \nrepresentations of a given dataset.\n\nInput: results from cvapipe_analysis computefeatures step\nOutput: csv file with measured errors for each CellId\n'''\n\nimport argparse\nimport ast\nfrom pathlib import Path\n\nfrom aicsimageio import AICSImage\nfrom aicsshparam import shtools\nimport numpy as np\nimport pandas as pd\nfrom tqdm import tqdm\nimport vtk\nfrom vtk.util.numpy_support import vtk_to_numpy\n\nfrom neuromast3d.visualization.plotting_tools import get_features_data, get_matrix_of_shcoeffs_for_pca, reconstruct_mesh_from_shcoeffs_array\n\n\ndef get_distances_between_meshes(original, reconstructed):\n coords_orig = vtk_to_numpy(original.GetPoints().GetData())\n coords_rec = vtk_to_numpy(reconstructed.GetPoints().GetData())\n\n # Calculate forward distances\n Tree = vtk.vtkKdTreePointLocator()\n Tree.SetDataSet(original)\n Tree.BuildLocator()\n\n forward_dists = []\n for i in range(coords_rec.shape[0]):\n j = Tree.FindClosestPoint(coords_rec[i])\n dist = np.linalg.norm(coords_rec[i]-coords_orig[j])\n forward_dists.append(dist)\n\n forward_dists = np.array(forward_dists)\n\n # Calculate reverse distances\n Tree = vtk.vtkKdTreePointLocator()\n Tree.SetDataSet(reconstructed)\n Tree.BuildLocator()\n\n reverse_dists = []\n for i in range(coords_orig.shape[0]):\n j = Tree.FindClosestPoint(coords_orig[i])\n dist = np.linalg.norm(coords_orig[i]-coords_rec[j])\n reverse_dists.append(dist)\n\n reverse_dists = np.array(reverse_dists)\n return forward_dists, reverse_dists\n\n\ndef calculate_hausdorff_distances(forward_dists, reverse_dists):\n forward_hd = np.max(forward_dists)\n reverse_hd = np.max(reverse_dists)\n max_hd = max(forward_hd, reverse_hd)\n return forward_hd, reverse_hd, max_hd\n\n\ndef calculate_mean_dist_error(forward_dists, reverse_dists):\n forward_mde = forward_dists.mean()\n reverse_mde = reverse_dists.mean()\n return forward_mde, reverse_mde\n\n\ndef calculate_std_dist_error(forward_dists, reverse_dists):\n forward_sde = forward_dists.std()\n reverse_sde = reverse_dists.std()\n return forward_sde, reverse_sde\n\n\ndef measure_reconstruction_error(original, reconstructed, scale):\n forward_dists, reverse_dists = get_distances_between_meshes(original, reconstructed)\n forward_hd, reverse_hd, max_hd = calculate_hausdorff_distances(forward_dists, reverse_dists)\n forward_mde, reverse_mde = calculate_mean_dist_error(forward_dists, reverse_dists)\n forward_sde, reverse_sde = calculate_std_dist_error(forward_dists, reverse_dists)\n errors = {\n 'forward_hd': forward_hd * scale,\n 'reverse_hd': reverse_hd * scale,\n 'max_hd': max_hd * scale,\n 'forward_mde': forward_mde * scale,\n 'reverse_mde': reverse_mde * scale,\n 'forward_sde': forward_sde * scale,\n 'reverse_sde': reverse_sde * scale,\n }\n return errors\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('local_staging_dir')\n parser.add_argument('alias')\n\n args = parser.parse_args()\n ls_dir = Path(args.local_staging_dir)\n alias = str(args.alias)\n\n features_data = get_features_data(ls_dir)\n shcoeffs, feat_names = get_matrix_of_shcoeffs_for_pca(features_data, alias=alias)\n\n rec_errors = []\n for row_ind, cell_shcoeffs in tqdm(enumerate(shcoeffs)):\n coeffs, mesh_rec, grid_rec = reconstruct_mesh_from_shcoeffs_array(\n cell_shcoeffs,\n feat_names,\n alias,\n 32,\n save_path=None\n )\n seg_path = features_data['crop_seg'][row_ind]\n\n reader = AICSImage(seg_path)\n\n if alias == 'MEM':\n channel = features_data['cell_seg'][row_ind]\n elif alias == 'NUC':\n channel = features_data['nuc_seg'][row_ind]\n\n seg_img = reader.get_image_data('ZYX', C=channel, S=0, T=0)\n mesh, _, _ = shtools.get_mesh_from_image(seg_img)\n\n pixel_size = ast.literal_eval(features_data['pixel_size_xyz'][row_ind])\n scale_factor = pixel_size[0]*10**6\n errors = measure_reconstruction_error(mesh, mesh_rec, scale_factor)\n errors['CellId'] = features_data.index[row_ind]\n rec_errors.append(errors)\n\n rec_errors = pd.DataFrame.from_dict(rec_errors)\n\n rec_errors.to_csv(ls_dir / f'rec_errors_{alias}.csv')\n\nif __name__ == '__main__':\n main()\n", "repo_name": "raible-lab/neuromast3d", "sub_path": "neuromast3d/visualization/rec_error.py", "file_name": "rec_error.py", "file_ext": "py", "file_size_in_byte": 4500, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "48", "api": [{"api_name": "vtk.util.numpy_support.vtk_to_numpy", "line_number": 28, "usage_type": "call"}, {"api_name": "vtk.util.numpy_support.vtk_to_numpy", "line_number": 29, "usage_type": "call"}, {"api_name": "vtk.vtkKdTreePointLocator", "line_number": 32, "usage_type": "call"}, {"api_name": "numpy.linalg.norm", "line_number": 39, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 39, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 42, "usage_type": "call"}, {"api_name": "vtk.vtkKdTreePointLocator", "line_number": 45, "usage_type": "call"}, {"api_name": "numpy.linalg.norm", "line_number": 52, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 52, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 55, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 60, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 61, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 96, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 101, "usage_type": "call"}, {"api_name": "neuromast3d.visualization.plotting_tools.get_features_data", "line_number": 104, "usage_type": "call"}, {"api_name": "neuromast3d.visualization.plotting_tools.get_matrix_of_shcoeffs_for_pca", "line_number": 105, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 108, "usage_type": "call"}, {"api_name": "neuromast3d.visualization.plotting_tools.reconstruct_mesh_from_shcoeffs_array", "line_number": 109, "usage_type": "call"}, {"api_name": "aicsimageio.AICSImage", "line_number": 118, "usage_type": "call"}, {"api_name": "aicsshparam.shtools.get_mesh_from_image", "line_number": 126, "usage_type": "call"}, {"api_name": "aicsshparam.shtools", "line_number": 126, "usage_type": "name"}, {"api_name": "ast.literal_eval", "line_number": 128, "usage_type": "call"}, {"api_name": "pandas.DataFrame.from_dict", "line_number": 134, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 134, "usage_type": "attribute"}]} +{"seq_id": "30718539485", "text": "import tensorflow as tf\nimport numpy as np\nimport gensim\nimport os\nimport pickle\nfrom rslutils.embeding import WordsVect, TagsOneHot\nfrom rslutils.calc_f1 import calc_f1\nclass SRLModel():\n def __init__(self, model_file, labels, embedding_size, hidden_layer, nlabels, tag_size, pad_tok):\n # labels, all label impossible\n self.label_to_tag = {idx: tag for tag, idx in labels.items()}\n self.embedding_size = embedding_size\n self.hidden_layer = hidden_layer\n self.nlabels = nlabels\n self.tag_size = tag_size\n self.pad_tok = pad_tok\n self._cursor = 0\n self.data_size = 0\n self.labels = 0\n self.test_data = []\n self.test_raw_data = []\n self.test_raw_tags = []\n self.test_label = 0\n self.dropout = 0\n self.unknown_words = dict()\n self.restore = 0\n self.learn_rate = 0.002\n self.alg_optim = \"adam\"\n self.clip_val = 1.25\n # 'D:/project/bilstm/model/word2vec_from_weixin/word2vec_wx'\n model = gensim.models.Word2Vec.load(model_file)\n self.model_vectors = model.wv\n del model\n\n\n def next_batch(self, batch_size, data, tags, rel_loc, labels=None ):\n # data, tags, labels, sequences of words, tag or labels are int\n batches = []\n rlabels = []\n\n if self._cursor == 0:\n self.data_size = len(data)\n segment = self.data_size // batch_size\n self._cursor = [offset * segment for offset in range(batch_size)]\n\n for b in range(batch_size):\n wv, un = WordsVect(data[self._cursor[b]], self.model_vectors, self.unknown_words)\n self.unknown_words = dict(self.unknown_words, **un)\n tgs = TagsOneHot(tags[self._cursor[b]], self.tag_size)\n st, un = WordsVect(rel_loc[self._cursor[b]], self.model_vectors, self.unknown_words) #[0] * len(wv)\n self.unknown_words = dict(self.unknown_words, **un)\n st = st * len(wv)\n arr = np.concatenate((np.array(wv) , np.array(st), np.array(tgs)),axis=1)\n batches += [arr.tolist()]\n if labels:\n rlabels.append(labels[self._cursor[b]])\n self._cursor[b] = (self._cursor[b] + 1) % self.data_size\n\n return batches, rlabels\n\n\n def load_test(self, test, tags, rel_loc, labels=None, raw_tags=None):\n # data, tags, labels, sequences of words, tag or labels are int\n batches = []\n rlabels = []\n if len(self.test_data) == 0:\n for b in range(len(test)):\n wv, un = WordsVect(test[b], self.model_vectors, self.unknown_words)\n self.unknown_words = dict(self.unknown_words, **un)\n tgs = TagsOneHot(tags[b], self.tag_size)\n st, un = WordsVect(rel_loc[b], self.model_vectors, self.unknown_words) # [0] * len(wv)\n self.unknown_words = dict(self.unknown_words, **un)\n st = st * len(wv)\n arr = np.concatenate((np.array(wv), np.array(st), np.array(tgs)), axis=1)\n if labels:\n rlabels.append(labels[b])\n batches += [arr.tolist()]\n self.test_data = batches\n self.test_label = rlabels\n self.test_raw_data = test\n self.test_raw_tags = raw_tags\n return batches, rlabels\n else:\n return self.test_data, self.test_label\n\n\n def _seq_padding(self, pad_tok, data, max_length, nlevels=1):\n\n sequence_padded, sequence_length = [], []\n if nlevels == 1:\n for seq in data:\n seq = list(seq)\n seq_ = seq[:max_length] + [pad_tok] * max(max_length - len(seq), 0)\n sequence_padded += [seq_]\n sequence_length += [min(len(seq), max_length)]\n else:\n length_list = len(data[0][0])\n for seq in data:\n seq = list(seq)\n seq_ = seq[:max_length] + [ [pad_tok] * length_list ] * max(max_length - len(seq), 0)\n sequence_padded += [seq_]\n sequence_length += [min(len(seq), max_length)]\n\n return sequence_padded, sequence_length\n\n\n def get_feed_dict(self, pad_tok, batch_seq, labels=None):\n feed = None\n max_length = max(map(lambda x: len(x), batch_seq))\n if labels:\n label_seq, _ = self._seq_padding(pad_tok, labels, max_length)\n feed = {\n self.labels: label_seq,\n self.dropout: 0.5\n }\n else:\n feed = {\n self.dropout: 1.0\n }\n\n sequence_padded, sequence_length = self._seq_padding(pad_tok, batch_seq, max_length, nlevels=2)\n feed[self.sequence_lengths] = sequence_length\n feed[self.word_data] = sequence_padded\n return feed, sequence_length\n\n\n def _build_tf(self, batch_size, lr_method, lr, clip):\n # shape = (batch size, max length of sentence in batch)\n self.word_data = tf.placeholder(tf.float32, shape=[None, None, 256 * 2 + self.tag_size],\n name=\"word_data\")\n\n # shape = (batch size)\n self.sequence_lengths = tf.placeholder(tf.int32, shape=[None],\n name=\"sequence_lengths\")\n\n # shape = (batch size, max length of sentence in batch)\n self.labels = tf.placeholder(tf.int32, shape=[None, None],\n name=\"labels\")\n # hyper parameters\n self.dropout = tf.placeholder(dtype=tf.float32, shape=[],\n name=\"dropout\")\n\n cell_fw = tf.contrib.rnn.LSTMCell(self.hidden_layer)\n cell_bw = tf.contrib.rnn.LSTMCell(self.hidden_layer)\n (output_fw, output_bw), _ = tf.nn.bidirectional_dynamic_rnn(\n cell_fw, cell_bw, self.word_data, sequence_length=self.sequence_lengths, dtype=tf.float32)\n output = tf.concat([output_fw, output_bw], axis=-1)\n # TODO: dropout\n output = tf.nn.dropout(output, self.dropout)\n\n # crf-loss\n # TODO: Logits\n W = tf.get_variable(\"W\", dtype=tf.float32,\n shape=[2*self.hidden_layer, self.nlabels])\n b = tf.get_variable(\"b\", shape=[self.nlabels],\n dtype=tf.float32, initializer=tf.zeros_initializer())\n\n nsteps = tf.shape(output)[1]\n output = tf.reshape(output, [-1, 2 * self.hidden_layer])\n pred = tf.matmul(output, W) + b\n self.logits = tf.reshape(pred, [-1, nsteps, self.nlabels])\n log_likelihood, trans_params = tf.contrib.crf.crf_log_likelihood(self.logits, self.labels, self.sequence_lengths)\n self.trans_params = trans_params # need to evaluate it for decoding\n self.loss = tf.reduce_mean(-log_likelihood)\n\n # Optimizer.\n _lr_m = lr_method.lower()\n # global_step = tf.Variable(0)\n learning_rate = lr\n # learning_rate = tf.train.exponential_decay(\n # lr, global_step, 5000, 0.1, staircase=True)\n\n if _lr_m == 'adam': # sgd method\n optimizer = tf.train.AdamOptimizer(learning_rate)\n elif _lr_m == 'adagrad':\n optimizer = tf.train.AdagradOptimizer(learning_rate)\n elif _lr_m == 'sgd':\n optimizer = tf.train.GradientDescentOptimizer(learning_rate)\n elif _lr_m == 'rmsprop':\n optimizer = tf.train.RMSPropOptimizer(learning_rate)\n else:\n raise NotImplementedError(\"Unknown method {}\".format(_lr_m))\n\n # clip 1.25\n if clip < 0:\n self.opt = optimizer.minimize(self.loss)\n else:\n gradients, v = zip(*optimizer.compute_gradients(self.loss))\n gradients, _ = tf.clip_by_global_norm(gradients, clip)\n self.opt = optimizer.apply_gradients(zip(gradients, v))\n\n # Predictions.\n\n\n def train(self, train_data, tags, label, rel_loc, nepochs, batch_size, with_val_file=None):\n self.sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True, log_device_placement=True))\n with tf.device(\"/gpu:10\"):\n self._build_tf(batch_size, self.alg_optim, self.learn_rate, self.clip_val)\n self.sess.run(tf.global_variables_initializer())\n self.saver = tf.train.Saver()\n # batch_size = batch_size\n # nbatches = (len(train) + batch_size - 1) // batch_size\n\n print('Initialized')\n mean_loss = 0\n for epchs in range(nepochs):\n batch_seq, labels = self.next_batch(batch_size, train_data, tags, rel_loc, label)\n feed_dict, _ = self.get_feed_dict(self.pad_tok, batch_seq, labels)\n _, train_loss = self.sess.run(\n [self.opt, self.loss], feed_dict=feed_dict)\n mean_loss += train_loss\n if (epchs ) % 400 == 0 and epchs != 0:\n print(epchs, \" \", \"mean train loss: \", mean_loss / (epchs + 1))\n if with_val_file:\n self.run_evaluate(100, with_val_file)\n\n if epchs % 50 == 0:\n print(epchs, \" \", \"train loss: \", train_loss)\n\n if ( epchs ) % 400 == 0 and epchs != 0:\n self.save_session(\"tmp_200_lr0.02_model_\" + str(epchs))\n\n\n def retrain(self, dir_model, train_data, tags, label, rel_loc, nepochs, batch_size, with_val_file=None):\n self._build_tf(batch_size, self.alg_optim, self.learn_rate, self.clip_val)\n if self.restore == 0:\n self.restoreModel(dir_model)\n self.sess.run(tf.global_variables_initializer())\n # batch_size = batch_size\n # nbatches = (len(train) + batch_size - 1) // batch_size\n\n print('Initialized')\n mean_loss = 0\n for epchs in range(nepochs):\n batch_seq, labels = self.next_batch(batch_size, train_data, tags, rel_loc, label)\n feed_dict, _ = self.get_feed_dict(self.pad_tok, batch_seq, labels)\n _, train_loss = self.sess.run(\n [self.opt, self.loss], feed_dict=feed_dict)\n mean_loss += train_loss\n if epchs % 500 == 0:\n print(\"mean train loss: \", mean_loss / (epchs + 1))\n if with_val_file:\n self.run_evaluate(100, with_val_file+ \"_\" + str(epchs))\n if epchs % 50 == 0:\n print(\"train loss: \", train_loss)\n\n # predict\n def predict(self, test, tags, rel_loc, batch_size):\n # assert self.unknown_words is not None\n with tf.device(\"/cpu:0\"):\n self._build_tf(batch_size, self.alg_optim, self.learn_rate, self.clip_val)\n feed_dict = dict()\n batch_test = self.load_test(test, tags, rel_loc)\n nbatches = len(test) // batch_size\n viterbi_sequences, seq_lengths = self._predict(nbatches, batch_size, batch_test)\n return viterbi_sequences, seq_lengths\n\n\n def _predict_batch(self, batch_data):\n viterbi_sequences = []\n feed_dict, sequence_lengths = self.get_feed_dict(self.pad_tok, batch_data)\n logits, trans_params = self.sess.run(\n [self.logits, self.trans_params], feed_dict=feed_dict)\n # return decodeing\n for logit, sequence_length in zip(logits, sequence_lengths):\n logit = logit[:sequence_length] # keep only the valid steps\n viterbi_seq, viterbi_score = tf.contrib.crf.viterbi_decode(\n logit, trans_params)\n viterbi_sequences += [viterbi_seq]\n\n return viterbi_sequences, sequence_lengths\n\n\n def _predict(self, nbatches, batch_size, data):\n viterbi_sequences = []\n seq_lengths = []\n begin = 0\n for it in range(nbatches):\n begin = it * batch_size\n batch_data = data[begin: begin + batch_size]\n vs, sl = self._predict_batch(batch_data)\n seq_lengths.extend(sl)\n viterbi_sequences.extend(vs)\n\n if begin + batch_size < len(data):\n batch_data = data[begin + batch_size: len(data)]\n vs, sl = self._predict_batch(batch_data)\n seq_lengths.extend(sl)\n viterbi_sequences.extend(vs)\n\n return viterbi_sequences, seq_lengths\n\n\n def restoreModel(self, dir_model):\n self.saver = tf.train.import_meta_graph(dir_model + \".meta\")\n self.sess = tf.Session()\n with open(dir_model + \".unknown\", 'rb') as f:\n self.unknown_words = pickle.load(f)\n self.saver.restore(self.sess, dir_model)\n self.restore = 1\n\n\n def save_session(self, dir_model):\n \"\"\"Saves session = weights\"\"\"\n with open(dir_model + \".unknown\", 'wb') as f:\n pickle.dump(self.unknown_words, f)\n self.saver.save(self.sess, dir_model)\n\n\n def close_session(self):\n \"\"\"Closes the session\"\"\"\n self.sess.close()\n\n # run_evaluate\n def run_evaluate(self, batch_size, filename):\n assert len(self.test_data) > 0 and len(self.test_raw_data) > 0\n print(\"Begin Run evaluate: \")\n nbatches = len(self.test_data) // batch_size\n viterbi_sequences, seq_lengths = self._predict(nbatches, batch_size, self.test_data)\n self.evaluate(\"pred_200_lr0.02\", filename, viterbi_sequences,\n seq_lengths, self.test_raw_data, self.test_raw_tags)\n\n\n def evaluate(self, filename, dev_file, viterbi_seq, seq_length, val_data, val_tags, ref_seq=None):\n vit_dic = self.label_to_tag\n assert len(viterbi_seq) == len(seq_length) == len(val_data) == len(val_tags)\n with open(filename, 'w', encoding=\"utf-8\") as f:\n for num in range(len(viterbi_seq)):\n seq = viterbi_seq[num]\n sl = seq_length[num]\n vt = val_tags[num]\n vd = val_data[num]\n if ref_seq:\n rel = ref_seq[num]\n str = \"\"\n for it in range(sl - 1):\n if ref_seq and vit_dic[seq[it]] == 'rel' :\n if vd[it] != rel:\n vd[it] = 'O'\n if ref_seq and vd[it] == rel:\n if vit_dic[seq[it]] != 'rel':\n seq[it] = 1\n str += \"%s/%s/%s \" % (vd[it], vt[it], vit_dic[seq[it]])\n str += \"%s/%s/%s\\n\" % (vd[sl - 1], vt[sl - 1], vit_dic[seq[sl - 1]])\n f.write(str)\n\n # print(calc_f1(filename, dev_file)) F: 0.6530078465562337\n\n\n\n\n", "repo_name": "slh0302/bilstm", "sub_path": "tmodel/model.py", "file_name": "model.py", "file_ext": "py", "file_size_in_byte": 14593, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "48", "api": [{"api_name": "gensim.models.Word2Vec.load", "line_number": 31, "usage_type": "call"}, {"api_name": "gensim.models", "line_number": 31, "usage_type": "attribute"}, {"api_name": "rslutils.embeding.WordsVect", "line_number": 47, "usage_type": "call"}, {"api_name": "rslutils.embeding.TagsOneHot", "line_number": 49, "usage_type": "call"}, {"api_name": "rslutils.embeding.WordsVect", "line_number": 50, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 53, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 53, "usage_type": "call"}, {"api_name": "rslutils.embeding.WordsVect", "line_number": 68, "usage_type": "call"}, {"api_name": "rslutils.embeding.TagsOneHot", "line_number": 70, "usage_type": "call"}, {"api_name": "rslutils.embeding.WordsVect", "line_number": 71, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 74, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 74, "usage_type": "call"}, {"api_name": "tensorflow.placeholder", "line_number": 129, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 129, "usage_type": "attribute"}, {"api_name": "tensorflow.placeholder", "line_number": 133, "usage_type": "call"}, {"api_name": "tensorflow.int32", "line_number": 133, "usage_type": "attribute"}, {"api_name": "tensorflow.placeholder", "line_number": 137, "usage_type": "call"}, {"api_name": "tensorflow.int32", "line_number": 137, "usage_type": "attribute"}, {"api_name": "tensorflow.placeholder", "line_number": 140, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 140, "usage_type": "attribute"}, {"api_name": "tensorflow.contrib.rnn.LSTMCell", "line_number": 143, "usage_type": "call"}, {"api_name": "tensorflow.contrib", "line_number": 143, "usage_type": "attribute"}, {"api_name": "tensorflow.contrib.rnn.LSTMCell", "line_number": 144, "usage_type": "call"}, {"api_name": "tensorflow.contrib", "line_number": 144, "usage_type": "attribute"}, {"api_name": "tensorflow.nn.bidirectional_dynamic_rnn", "line_number": 145, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 145, "usage_type": "attribute"}, {"api_name": "tensorflow.float32", "line_number": 146, "usage_type": "attribute"}, {"api_name": "tensorflow.concat", "line_number": 147, "usage_type": "call"}, {"api_name": "tensorflow.nn.dropout", "line_number": 149, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 149, "usage_type": "attribute"}, {"api_name": "tensorflow.get_variable", "line_number": 153, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 153, "usage_type": "attribute"}, {"api_name": "tensorflow.get_variable", "line_number": 155, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 156, "usage_type": "attribute"}, {"api_name": "tensorflow.zeros_initializer", "line_number": 156, "usage_type": "call"}, {"api_name": "tensorflow.shape", "line_number": 158, "usage_type": "call"}, {"api_name": "tensorflow.reshape", "line_number": 159, "usage_type": "call"}, {"api_name": "tensorflow.matmul", "line_number": 160, "usage_type": "call"}, {"api_name": "tensorflow.reshape", "line_number": 161, "usage_type": "call"}, {"api_name": "tensorflow.contrib.crf.crf_log_likelihood", "line_number": 162, "usage_type": "call"}, {"api_name": "tensorflow.contrib", "line_number": 162, "usage_type": "attribute"}, {"api_name": "tensorflow.reduce_mean", "line_number": 164, "usage_type": "call"}, {"api_name": "tensorflow.train.AdamOptimizer", "line_number": 174, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 174, "usage_type": "attribute"}, {"api_name": "tensorflow.train.AdagradOptimizer", "line_number": 176, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 176, "usage_type": "attribute"}, {"api_name": "tensorflow.train.GradientDescentOptimizer", "line_number": 178, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 178, "usage_type": "attribute"}, {"api_name": "tensorflow.train.RMSPropOptimizer", "line_number": 180, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 180, "usage_type": "attribute"}, {"api_name": "tensorflow.clip_by_global_norm", "line_number": 189, "usage_type": "call"}, {"api_name": "tensorflow.Session", "line_number": 196, "usage_type": "call"}, {"api_name": "tensorflow.ConfigProto", "line_number": 196, "usage_type": "call"}, {"api_name": "tensorflow.device", "line_number": 197, "usage_type": "call"}, {"api_name": "tensorflow.global_variables_initializer", "line_number": 199, "usage_type": "call"}, {"api_name": "tensorflow.train.Saver", "line_number": 200, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 200, "usage_type": "attribute"}, {"api_name": "tensorflow.global_variables_initializer", "line_number": 228, "usage_type": "call"}, {"api_name": "tensorflow.device", "line_number": 250, "usage_type": "call"}, {"api_name": "tensorflow.contrib.crf.viterbi_decode", "line_number": 267, "usage_type": "call"}, {"api_name": "tensorflow.contrib", "line_number": 267, "usage_type": "attribute"}, {"api_name": "tensorflow.train.import_meta_graph", "line_number": 295, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 295, "usage_type": "attribute"}, {"api_name": "tensorflow.Session", "line_number": 296, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 298, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 306, "usage_type": "call"}]} +{"seq_id": "38633062871", "text": "from selenium import webdriver\nimport time\nimport telegram\n\n\ndef daily_point(login_id, login_pw):\n print(login_id, login_pw, ' RUN !!!')\n options = webdriver.ChromeOptions()\n #options.add_argument('--headless') # 헤드리스모드\n options.add_argument('--disable-gpu') # 호환성용 (필요없는 경우도 있음)\n options.add_argument('--window-size=1920x1080') # (가상)화면 크기 조절\n options.add_argument('--no-sandbox')\n driver = webdriver.Chrome(chrome_options=options)\n # driver = webdriver.Chrome('chromedriver')\n driver.set_page_load_timeout(60) # selenium timeout 60초\n login_url = 'https://memberssl.auction.co.kr/Authenticate/default.aspx?url=http%3A//promotion.auction.co.kr/promotion/MD/eventview.aspx%3FtxtMD%3D05F804C1E8'\n try:\n driver.get(url=login_url)\n\n time.sleep(5)\n driver.find_element_by_css_selector('#id').send_keys(login_id)\n driver.find_element_by_css_selector('#password').send_keys(login_pw)\n time.sleep(1)\n driver.find_element_by_css_selector('#Image1').click()\n time.sleep(5)\n driver.get('http://eventv2.auction.co.kr/event3/Regular/EverydayPoint/IfrmMainContents.aspx')\n time.sleep(5)\n\n for index in range(1,10):\n event_list = driver.find_elements_by_css_selector('div.swiper-slide-visible a.btn_point')\n for bt in event_list:\n if '적립하러' in bt.text:\n time.sleep(1)\n bt.click()\n time.sleep(1)\n alert = driver.find_element_by_css_selector('.ly_msg_box p.txt')\n if '지급된' in alert.text:\n driver.refresh()\n time.sleep(3)\n break\n driver.find_element_by_css_selector('.btn_type').click()\n time.sleep(3)\n tabs = driver.window_handles\n driver.switch_to.window(tabs[1])\n driver.close()\n driver.switch_to.window(tabs[0])\n time.sleep(3)\n\n driver.find_element_by_css_selector('.swiper-button-next').click()\n time.sleep(2)\n except Exception as e:\n print(e)\n finally:\n driver.close()\n driver.quit()\n\n\ndef gmarket_daily_point(login_id, login_pw):\n print(login_id, login_pw, ' RUN !!!')\n options = webdriver.ChromeOptions()\n #options.add_argument('--headless') # 헤드리스모드\n options.add_argument('--disable-gpu') # 호환성용 (필요없는 경우도 있음)\n options.add_argument('--window-size=1920x1080') # (가상)화면 크기 조절\n options.add_argument('--no-sandbox')\n driver = webdriver.Chrome(chrome_options=options)\n # driver = webdriver.Chrome('chromedriver')\n driver.set_page_load_timeout(60) # selenium timeout 60초\n login_url = 'https://signinssl.gmarket.co.kr/login/login?prmtdisp=Y&url=http://promotion.gmarket.co.kr/Event/PlusZone.asp'\n try:\n driver.get(url=login_url)\n\n time.sleep(5)\n driver.find_element_by_css_selector('#id').send_keys(login_id)\n time.sleep(1)\n driver.find_element_by_css_selector('#pwd').send_keys(login_pw)\n time.sleep(1)\n driver.find_element_by_css_selector('#mem_login div.btn-login > a').click()\n time.sleep(5)\n driver.get(url=\"http://promotion.gmarket.co.kr/Event/AttendRoulette_none.asp\")\n time.sleep(5)\n driver.find_element_by_css_selector('#wrapper a.button_start').click()\n time.sleep(5)\n\n except Exception as e:\n print(e)\n finally:\n driver.close()\n driver.quit()\n\n\nif '__main__' == __name__:\n\n # 텔레그램 Bot 메세지 보내기\n my_token = '602824143:AAEjqPKSe95ncMH9lDluEKwR_J7BorJUbWE' # 토큰을 변수에 저장합니다.\n bot = telegram.Bot(token=my_token) # bot을 선언합니다.\n print(bot)\n updates = bot.getUpdates()\n print(updates)\n for u in updates:\n print(u.message)\n\n # chat_id = bot.getUpdates()[-1].message.chat.id\n chat_id = '568182246'\n print(chat_id)\n #bot.sendMessage(chat_id=chat_id, text='TEST')\n\n # 지마켓 포인트\n gmarket_daily_point('', '')\n gmarket_daily_point('', '')\n\n # 옥션 포인트\n daily_point('','')\n daily_point('','')\n\n\n\n", "repo_name": "nikerun23/python", "sub_path": "rnd_crawler/auction/daily_point.py", "file_name": "daily_point.py", "file_ext": "py", "file_size_in_byte": 4370, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "48", "api": [{"api_name": "selenium.webdriver.ChromeOptions", "line_number": 8, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 8, "usage_type": "name"}, {"api_name": "selenium.webdriver.Chrome", "line_number": 13, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 13, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 20, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 23, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 25, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 27, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 33, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 35, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 39, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 42, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 47, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 50, "usage_type": "call"}, {"api_name": "selenium.webdriver.ChromeOptions", "line_number": 60, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 60, "usage_type": "name"}, {"api_name": "selenium.webdriver.Chrome", "line_number": 65, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 65, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 72, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 74, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 76, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 78, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 80, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 82, "usage_type": "call"}, {"api_name": "telegram.Bot", "line_number": 95, "usage_type": "call"}]} +{"seq_id": "71088338705", "text": "import asyncio\nfrom inspect import getfullargspec\n\nfrom pyrogram import Client, filters\n\nfrom MusicAndVideo.config import SUDO_USERS\nfrom MusicAndVideo.helpers.filters import command\n\n\nasync def eor(msg):\n func = (\n (msg.edit_text if msg.from_user.is_self else msg.reply)\n if msg.from_user\n else msg.reply\n )\n spec = getfullargspec(func.__wrapped__).args\n return await func(**{k: v for k, v in kwargs.items() if k in spec})\n\n\n@Client.on_message(filters.user(SUDO_USERS) & command([\"del\"]))\nasync def del_user(_, message):\n rep = message.reply_to_message\n await message.delete()\n await rep.delete()\n\n\n@Client.on_message(command([\"linkoceanAntiTinjauDisini\"]))\nasync def wdel_user(_, message):\n await message.delete()\n\n\n@Client.on_message(filters.user(SUDO_USERS) & command([\"purgeme\"]))\nasync def purge_me_func(client, message):\n if len(message.command) != 2:\n return await message.delete()\n n = (\n message.reply_to_message.text\n if message.reply_to_message\n else message.text.split(None, 1)[1].strip()\n )\n if not n.isnumeric():\n return await eor(message, text=\"Argumen Tidak Valid\")\n n = int(n)\n if n < 1:\n return await eor(message, text=\"Butuh nomor >=1-999\")\n chat_id = message.chat.id\n message_ids = [\n m.message_id\n async for m in client.search_messages(\n chat_id,\n from_user=int(message.from_user.id),\n limit=n,\n )\n ]\n if not message_ids:\n return await eor(message, text=\"Tidak ada pesan yang ditemukan.\")\n to_delete = [message_ids[i : i + 999] for i in range(0, len(message_ids), 999)]\n for hundred_messages_or_less in to_delete:\n await client.delete_messages(\n chat_id=chat_id,\n message_ids=hundred_messages_or_less,\n revoke=True,\n )\n mmk = await message.reply(f\"✅ {n} Pesan Telah Di Hapus\")\n await asyncio.sleep(2)\n await mmk.delete()\n\n\n@Client.on_message(filters.user(SUDO_USERS) & command([\"purge\"]))\nasync def purgefunc(client, message):\n await message.delete()\n if not message.reply_to_message:\n return await message.reply_text(\"Reply to a message to purge from.\")\n chat_id = message.chat.id\n message_ids = []\n for message_id in range(\n message.reply_to_message.message_id,\n message.message_id,\n ):\n message_ids.append(message_id)\n if len(message_ids) == 100:\n await client.delete_messages(\n chat_id=chat_id,\n message_ids=message_ids,\n revoke=True, # For both sides\n )\n message_ids = []\n if len(message_ids) > 0:\n await client.delete_messages(\n chat_id=chat_id,\n message_ids=message_ids,\n revoke=True,\n )\n", "repo_name": "XtomiX/userbot-music-video", "sub_path": "MusicAndVideo/module/delete.py", "file_name": "delete.py", "file_ext": "py", "file_size_in_byte": 2853, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "48", "api": [{"api_name": "inspect.getfullargspec", "line_number": 16, "usage_type": "call"}, {"api_name": "pyrogram.Client.on_message", "line_number": 20, "usage_type": "call"}, {"api_name": "pyrogram.Client", "line_number": 20, "usage_type": "name"}, {"api_name": "pyrogram.filters.user", "line_number": 20, "usage_type": "call"}, {"api_name": "MusicAndVideo.config.SUDO_USERS", "line_number": 20, "usage_type": "argument"}, {"api_name": "pyrogram.filters", "line_number": 20, "usage_type": "name"}, {"api_name": "MusicAndVideo.helpers.filters.command", "line_number": 20, "usage_type": "call"}, {"api_name": "pyrogram.Client.on_message", "line_number": 27, "usage_type": "call"}, {"api_name": "pyrogram.Client", "line_number": 27, "usage_type": "name"}, {"api_name": "MusicAndVideo.helpers.filters.command", "line_number": 27, "usage_type": "call"}, {"api_name": "asyncio.sleep", "line_number": 65, "usage_type": "call"}, {"api_name": "pyrogram.Client.on_message", "line_number": 32, "usage_type": "call"}, {"api_name": "pyrogram.Client", "line_number": 32, "usage_type": "name"}, {"api_name": "pyrogram.filters.user", "line_number": 32, "usage_type": "call"}, {"api_name": "MusicAndVideo.config.SUDO_USERS", "line_number": 32, "usage_type": "argument"}, {"api_name": "pyrogram.filters", "line_number": 32, "usage_type": "name"}, {"api_name": "MusicAndVideo.helpers.filters.command", "line_number": 32, "usage_type": "call"}, {"api_name": "pyrogram.Client.on_message", "line_number": 69, "usage_type": "call"}, {"api_name": "pyrogram.Client", "line_number": 69, "usage_type": "name"}, {"api_name": "pyrogram.filters.user", "line_number": 69, "usage_type": "call"}, {"api_name": "MusicAndVideo.config.SUDO_USERS", "line_number": 69, "usage_type": "argument"}, {"api_name": "pyrogram.filters", "line_number": 69, "usage_type": "name"}, {"api_name": "MusicAndVideo.helpers.filters.command", "line_number": 69, "usage_type": "call"}]} +{"seq_id": "21411970846", "text": "from django.db.models import Avg, Count, Max, Min, Q, Sum\n\nfrom authors.models import Author\nfrom books.models import Book\n\n\ndef defer():\n \"\"\"exclude the fields mentioned in defer()\"\"\"\n\n instance = Book.objects.last()\n print(\"Instance\")\n print(vars(instance))\n\n defered_instance = Book.objects.defer(\"description\").last()\n print(\"\\nDefered instance\")\n\n print(vars(defered_instance))\n print(\"\\ndescription -->\", defered_instance.description)\n\n\ndef only():\n \"\"\"get the fields mentioned in only()\"\"\"\n\n only_instance = Book.objects.only(\"title\").last()\n\n print(\"\\nOnly instance\")\n print(vars(only_instance))\n\n print(\"\\nauthor -->\", only_instance.author)\n\n\ndef intersection():\n book_price_less_than_500 = Book.objects.filter(price__lt=500)\n book_price_greater_than_300 = Book.objects.filter(price__gt=300)\n\n # 300 > price < 500\n\n print(\"< 500 count -->\", book_price_less_than_500.count())\n print(\"> 300 count -->\", book_price_greater_than_300.count())\n\n books_intersection = book_price_less_than_500.intersection(\n book_price_greater_than_300\n )\n\n books_intersection_method2 = book_price_less_than_500 & book_price_greater_than_300\n print(\"Intersection count -->\", books_intersection.count())\n print(\"Intersection count -->\", books_intersection_method2.count())\n\n\ndef difference():\n books_price_less_than_500 = Book.objects.filter(price__lt=500)\n books_price_less_than_300 = Book.objects.filter(price__lt=300)\n\n print(\"< 500 count -->\", books_price_less_than_500.count())\n print(\"< 300 count -->\", books_price_less_than_300.count())\n\n difference = books_price_less_than_500.difference(books_price_less_than_300)\n print(\"Difference count -->\", difference.count())\n\n\ndef annotate():\n author = Author.objects.annotate(Avg(\"book__price\")).last()\n print(\"Author -->\", vars(author))\n\n # total = 0\n # books_of_author = Book.objects.filter(author=author)\n # total_books = books_of_author.count()\n # for i in books_of_author:\n # total += i.price\n # print(\"price -->\", i.price)\n\n # print(\"average ->\", total / total_books)\n\n\ndef alias():\n \"\"\"\n annotate 'authors_count' added to instance\n alias 'authors_count' is not added to instance,instead cached. used in filtering/sorting\n \"\"\"\n\n books_annotate = Book.objects.annotate(authors_count=Count(\"author\"))\n print(\"annotate -->\", vars(books_annotate[0]))\n\n books_alias = Book.objects.alias(avg_price=Avg(\"price\"))\n\n # alias \"avg_price\" not added, avg_price : filtering,sorting in the cache\n print(\"\\n\\nalias -->\", vars(books_alias[0]))\n\n authors_alias = Author.objects.alias(books=Count(\"book\")).filter(books__gt=2)\n print(\"authors -->\", vars(authors_alias[0]))\n\n author_annotate = Author.objects.annotate(books=Count(\"book\")).filter(books__gt=2)\n print(\"authors -->\", vars(author_annotate[0]))\n\n\ndef dates():\n from datetime import date\n\n date = date(2020, 1, 1)\n books = Book.objects.filter(published_date__date=date)\n print(\"books\", books)\n\n\ndef q_object():\n book_title_starts_with_a_or_e = Book.objects.filter(\n Q(title__startswith=\"a\") | Q(title__startswith=\"e\")\n )\n\n\n# select_related : onetoone, foreignkey\n# prefetch_related : manytomany\n", "repo_name": "pvfarooq/labglo-django-tut", "sub_path": "core/queries.py", "file_name": "queries.py", "file_ext": "py", "file_size_in_byte": 3270, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "48", "api": [{"api_name": "books.models.Book.objects.last", "line_number": 10, "usage_type": "call"}, {"api_name": "books.models.Book.objects", "line_number": 10, "usage_type": "attribute"}, {"api_name": "books.models.Book", "line_number": 10, "usage_type": "name"}, {"api_name": "books.models.Book.objects.defer", "line_number": 14, "usage_type": "call"}, {"api_name": "books.models.Book.objects", "line_number": 14, "usage_type": "attribute"}, {"api_name": "books.models.Book", "line_number": 14, "usage_type": "name"}, {"api_name": "books.models.Book.objects.only", "line_number": 24, "usage_type": "call"}, {"api_name": "books.models.Book.objects", "line_number": 24, "usage_type": "attribute"}, {"api_name": "books.models.Book", "line_number": 24, "usage_type": "name"}, {"api_name": "books.models.Book.objects.filter", "line_number": 33, "usage_type": "call"}, {"api_name": "books.models.Book.objects", "line_number": 33, "usage_type": "attribute"}, {"api_name": "books.models.Book", "line_number": 33, "usage_type": "name"}, {"api_name": "books.models.Book.objects.filter", "line_number": 34, "usage_type": "call"}, {"api_name": "books.models.Book.objects", "line_number": 34, "usage_type": "attribute"}, {"api_name": "books.models.Book", "line_number": 34, "usage_type": "name"}, {"api_name": "books.models.Book.objects.filter", "line_number": 51, "usage_type": "call"}, {"api_name": "books.models.Book.objects", "line_number": 51, "usage_type": "attribute"}, {"api_name": "books.models.Book", "line_number": 51, "usage_type": "name"}, {"api_name": "books.models.Book.objects.filter", "line_number": 52, "usage_type": "call"}, {"api_name": "books.models.Book.objects", "line_number": 52, "usage_type": "attribute"}, {"api_name": "books.models.Book", "line_number": 52, "usage_type": "name"}, {"api_name": "authors.models.Author.objects.annotate", "line_number": 62, "usage_type": "call"}, {"api_name": "authors.models.Author.objects", "line_number": 62, "usage_type": "attribute"}, {"api_name": "authors.models.Author", "line_number": 62, "usage_type": "name"}, {"api_name": "django.db.models.Avg", "line_number": 62, "usage_type": "call"}, {"api_name": "books.models.Book.objects.annotate", "line_number": 81, "usage_type": "call"}, {"api_name": "books.models.Book.objects", "line_number": 81, "usage_type": "attribute"}, {"api_name": "books.models.Book", "line_number": 81, "usage_type": "name"}, {"api_name": "django.db.models.Count", "line_number": 81, "usage_type": "call"}, {"api_name": "books.models.Book.objects.alias", "line_number": 84, "usage_type": "call"}, {"api_name": "books.models.Book.objects", "line_number": 84, "usage_type": "attribute"}, {"api_name": "books.models.Book", "line_number": 84, "usage_type": "name"}, {"api_name": "django.db.models.Avg", "line_number": 84, "usage_type": "call"}, {"api_name": "authors.models.Author.objects.alias", "line_number": 89, "usage_type": "call"}, {"api_name": "authors.models.Author.objects", "line_number": 89, "usage_type": "attribute"}, {"api_name": "authors.models.Author", "line_number": 89, "usage_type": "name"}, {"api_name": "django.db.models.Count", "line_number": 89, "usage_type": "call"}, {"api_name": "authors.models.Author.objects.annotate", "line_number": 92, "usage_type": "call"}, {"api_name": "authors.models.Author.objects", "line_number": 92, "usage_type": "attribute"}, {"api_name": "authors.models.Author", "line_number": 92, "usage_type": "name"}, {"api_name": "django.db.models.Count", "line_number": 92, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 99, "usage_type": "name"}, {"api_name": "books.models", "line_number": 100, "usage_type": "name"}, {"api_name": "books.models.Book.objects.filter", "line_number": 100, "usage_type": "call"}, {"api_name": "books.models.Book.objects", "line_number": 100, "usage_type": "attribute"}, {"api_name": "books.models.Book", "line_number": 100, "usage_type": "name"}, {"api_name": "datetime.date", "line_number": 100, "usage_type": "name"}, {"api_name": "books.models", "line_number": 101, "usage_type": "argument"}, {"api_name": "books.models.Book.objects.filter", "line_number": 105, "usage_type": "call"}, {"api_name": "books.models.Book.objects", "line_number": 105, "usage_type": "attribute"}, {"api_name": "books.models.Book", "line_number": 105, "usage_type": "name"}, {"api_name": "django.db.models.Q", "line_number": 106, "usage_type": "call"}]} +{"seq_id": "13601098545", "text": "from django.http import HttpRequest\nfrom django.template.context_processors import csrf\n\n\ndef get_csrf_form_element(request: HttpRequest):\n csrf_token = \"\"\n if request is not None:\n csrf_token = str(csrf(request)['csrf_token']) # , 'utf-8')\n return '
'\n", "repo_name": "Technikradio/C3FOCSite", "sub_path": "c3shop/frontpage/uitools/dataforge.py", "file_name": "dataforge.py", "file_ext": "py", "file_size_in_byte": 390, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "48", "api": [{"api_name": "django.http.HttpRequest", "line_number": 5, "usage_type": "name"}, {"api_name": "django.template.context_processors.csrf", "line_number": 8, "usage_type": "call"}]} +{"seq_id": "28564766847", "text": "import urllib\n\nfrom eventlet import tpool\n\ntry:\n import rados\n import rbd\nexcept ImportError:\n rados = None\n rbd = None\n\nfrom oslo_log import log as logging\nfrom oslo_serialization import jsonutils\nfrom oslo_service import loopingcall\nfrom oslo_utils import excutils\nfrom oslo_utils import units\n\nfrom nova.compute import task_states\nfrom nova import exception\nfrom nova.i18n import _\nfrom nova.i18n import _LE\nfrom nova.i18n import _LW\nfrom nova import utils\nfrom nova.virt.libvirt import utils as libvirt_utils\n\nLOG = logging.getLogger(__name__)\n\n\nclass RBDVolumeProxy(object):\n \"\"\"Context manager for dealing with an existing rbd volume.\n\n This handles connecting to rados and opening an ioctx automatically, and\n otherwise acts like a librbd Image object.\n\n The underlying librados client and ioctx can be accessed as the attributes\n 'client' and 'ioctx'.\n \"\"\"\n def __init__(self, driver, name, pool=None, snapshot=None,\n read_only=False):\n client, ioctx = driver._connect_to_rados(pool)\n try:\n snap_name = snapshot.encode('utf8') if snapshot else None\n self.volume = rbd.Image(ioctx, name.encode('utf8'),\n snapshot=snap_name,\n read_only=read_only)\n except rbd.ImageNotFound:\n with excutils.save_and_reraise_exception():\n LOG.debug(\"rbd image %s does not exist\", name)\n driver._disconnect_from_rados(client, ioctx)\n except rbd.Error:\n with excutils.save_and_reraise_exception():\n LOG.exception(_LE(\"error opening rbd image %s\"), name)\n driver._disconnect_from_rados(client, ioctx)\n\n self.driver = driver\n self.client = client\n self.ioctx = ioctx\n\n def __enter__(self):\n return self\n\n def __exit__(self, type_, value, traceback):\n try:\n self.volume.close()\n finally:\n self.driver._disconnect_from_rados(self.client, self.ioctx)\n\n def __getattr__(self, attrib):\n return getattr(self.volume, attrib)\n\n\nclass RADOSClient(object):\n \"\"\"Context manager to simplify error handling for connecting to ceph.\"\"\"\n def __init__(self, driver, pool=None):\n self.driver = driver\n self.cluster, self.ioctx = driver._connect_to_rados(pool)\n\n def __enter__(self):\n return self\n\n def __exit__(self, type_, value, traceback):\n self.driver._disconnect_from_rados(self.cluster, self.ioctx)\n\n @property\n def features(self):\n features = self.cluster.conf_get('rbd_default_features')\n if ((features is None) or (int(features) == 0)):\n features = rbd.RBD_FEATURE_LAYERING\n return int(features)\n\n\nclass RBDDriver(object):\n\n def __init__(self, pool, ceph_conf, rbd_user):\n self.pool = pool.encode('utf8')\n # NOTE(angdraug): rados.Rados fails to connect if ceph_conf is None:\n # https://github.com/ceph/ceph/pull/1787\n self.ceph_conf = ceph_conf.encode('utf8') if ceph_conf else ''\n self.rbd_user = rbd_user.encode('utf8') if rbd_user else None\n if rbd is None:\n raise RuntimeError(_('rbd python libraries not found'))\n\n def _connect_to_rados(self, pool=None):\n client = rados.Rados(rados_id=self.rbd_user,\n conffile=self.ceph_conf)\n try:\n client.connect()\n pool_to_open = pool or self.pool\n ioctx = client.open_ioctx(pool_to_open.encode('utf-8'))\n return client, ioctx\n except rados.Error:\n # shutdown cannot raise an exception\n client.shutdown()\n raise\n\n def _disconnect_from_rados(self, client, ioctx):\n # closing an ioctx cannot raise an exception\n ioctx.close()\n client.shutdown()\n\n def ceph_args(self):\n \"\"\"List of command line parameters to be passed to ceph commands to\n reflect RBDDriver configuration such as RBD user name and location\n of ceph.conf.\n \"\"\"\n args = []\n if self.rbd_user:\n args.extend(['--id', self.rbd_user])\n if self.ceph_conf:\n args.extend(['--conf', self.ceph_conf])\n return args\n\n def get_mon_addrs(self):\n args = ['ceph', 'mon', 'dump', '--format=json'] + self.ceph_args()\n out, _ = utils.execute(*args)\n lines = out.split('\\n')\n if lines[0].startswith('dumped monmap epoch'):\n lines = lines[1:]\n monmap = jsonutils.loads('\\n'.join(lines))\n addrs = [mon['addr'] for mon in monmap['mons']]\n hosts = []\n ports = []\n for addr in addrs:\n host_port = addr[:addr.rindex('/')]\n host, port = host_port.rsplit(':', 1)\n hosts.append(host.strip('[]'))\n ports.append(port)\n return hosts, ports\n\n def parse_url(self, url):\n prefix = 'rbd://'\n if not url.startswith(prefix):\n reason = _('Not stored in rbd')\n raise exception.ImageUnacceptable(image_id=url, reason=reason)\n pieces = map(urllib.unquote, url[len(prefix):].split('/'))\n if '' in pieces:\n reason = _('Blank components')\n raise exception.ImageUnacceptable(image_id=url, reason=reason)\n if len(pieces) != 4:\n reason = _('Not an rbd snapshot')\n raise exception.ImageUnacceptable(image_id=url, reason=reason)\n return pieces\n\n def get_fsid(self):\n with RADOSClient(self) as client:\n return client.cluster.get_fsid()\n\n def is_cloneable(self, image_location, image_meta):\n url = image_location['url']\n try:\n fsid, pool, image, snapshot = self.parse_url(url)\n except exception.ImageUnacceptable as e:\n LOG.debug('not cloneable: %s', e)\n return False\n\n if self.get_fsid() != fsid:\n reason = '%s is in a different ceph cluster' % url\n LOG.debug(reason)\n return False\n\n if image_meta.get('disk_format') != 'raw':\n reason = (\"rbd image clone requires image format to be \"\n \"'raw' but image {0} is '{1}'\").format(\n url, image_meta.get('disk_format'))\n LOG.debug(reason)\n return False\n\n # check that we can read the image\n try:\n return self.exists(image, pool=pool, snapshot=snapshot)\n except rbd.Error as e:\n LOG.debug('Unable to open image %(loc)s: %(err)s' %\n dict(loc=url, err=e))\n return False\n\n def clone(self, image_location, dest_name, dest_pool=None):\n _fsid, pool, image, snapshot = self.parse_url(\n image_location['url'])\n LOG.debug('cloning %(pool)s/%(img)s@%(snap)s to '\n '%(dest_pool)s/%(dest_name)s',\n dict(pool=pool, img=image, snap=snapshot,\n dest_pool=dest_pool, dest_name=dest_name))\n with RADOSClient(self, str(pool)) as src_client:\n with RADOSClient(self, dest_pool) as dest_client:\n try:\n rbd.RBD().clone(src_client.ioctx,\n image.encode('utf-8'),\n snapshot.encode('utf-8'),\n dest_client.ioctx,\n str(dest_name),\n features=src_client.features)\n except rbd.PermissionError:\n raise exception.Forbidden(_('no write permission on '\n 'storage pool %s') % dest_pool)\n\n def size(self, name):\n with RBDVolumeProxy(self, name) as vol:\n return vol.size()\n\n def resize(self, name, size):\n \"\"\"Resize RBD volume.\n\n :name: Name of RBD object\n :size: New size in bytes\n \"\"\"\n LOG.debug('resizing rbd image %s to %d', name, size)\n with RBDVolumeProxy(self, name) as vol:\n vol.resize(size)\n\n def parent_info(self, volume, pool=None):\n \"\"\"Returns the pool, image and snapshot name for the parent of an\n RBD volume.\n\n :volume: Name of RBD object\n :pool: Name of pool\n \"\"\"\n try:\n with RBDVolumeProxy(self, str(volume), pool=pool) as vol:\n return vol.parent_info()\n except rbd.ImageNotFound:\n raise exception.ImageUnacceptable(_(\"no usable parent snapshot \"\n \"for volume %s\") % volume)\n\n def flatten(self, volume, pool=None):\n \"\"\"\"Flattens\" a snapshotted image with the parents' data,\n effectively detaching it from the parent.\n\n :volume: Name of RBD object\n :pool: Name of pool\n \"\"\"\n LOG.debug('flattening %(pool)s/%(vol)s', dict(pool=pool, vol=volume))\n with RBDVolumeProxy(self, str(volume), pool=pool) as vol:\n tpool.execute(vol.flatten)\n\n def exists(self, name, pool=None, snapshot=None):\n try:\n with RBDVolumeProxy(self, name,\n pool=pool,\n snapshot=snapshot,\n read_only=True):\n return True\n except rbd.ImageNotFound:\n return False\n\n def remove_image(self, name):\n \"\"\"Remove RBD volume\n\n :name: Name of RBD volume\n \"\"\"\n with RADOSClient(self, self.pool) as client:\n try:\n rbd.RBD().remove(client.ioctx, name)\n except rbd.ImageNotFound:\n LOG.warning(_LW('image %(volume)s in pool %(pool)s can not be '\n 'found, failed to remove'),\n {'volume': name, 'pool': self.pool})\n except rbd.ImageHasSnapshots:\n LOG.error(_LE('image %(volume)s in pool %(pool)s has '\n 'snapshots, failed to remove'),\n {'volume': name, 'pool': self.pool})\n\n def import_image(self, base, name):\n \"\"\"Import RBD volume from image file.\n\n Uses the command line import instead of librbd since rbd import\n command detects zeroes to preserve sparseness in the image.\n\n :base: Path to image file\n :name: Name of RBD volume\n \"\"\"\n args = ['--pool', self.pool, base, name]\n # Image format 2 supports cloning,\n # in stable ceph rbd release default is not 2,\n # we need to use it explicitly.\n args += ['--image-format=2']\n args += self.ceph_args()\n utils.execute('rbd', 'import', *args)\n\n def _destroy_volume(self, client, volume, pool=None):\n \"\"\"Destroy an RBD volume, retrying as needed.\n \"\"\"\n def _cleanup_vol(ioctx, volume, retryctx):\n try:\n rbd.RBD().remove(ioctx, volume)\n raise loopingcall.LoopingCallDone(retvalue=False)\n except rbd.ImageHasSnapshots:\n self.remove_snap(volume, libvirt_utils.RESIZE_SNAPSHOT_NAME,\n ignore_errors=True)\n except (rbd.ImageBusy, rbd.ImageHasSnapshots):\n LOG.warning(_LW('rbd remove %(volume)s in pool %(pool)s '\n 'failed'),\n {'volume': volume, 'pool': self.pool})\n retryctx['retries'] -= 1\n if retryctx['retries'] <= 0:\n raise loopingcall.LoopingCallDone()\n\n # NOTE(danms): We let it go for ten seconds\n retryctx = {'retries': 10}\n timer = loopingcall.FixedIntervalLoopingCall(\n _cleanup_vol, client.ioctx, volume, retryctx)\n timed_out = timer.start(interval=1).wait()\n if timed_out:\n # NOTE(danms): Run this again to propagate the error, but\n # if it succeeds, don't raise the loopingcall exception\n try:\n _cleanup_vol(client.ioctx, volume, retryctx)\n except loopingcall.LoopingCallDone:\n pass\n\n def cleanup_volumes(self, instance):\n with RADOSClient(self, self.pool) as client:\n\n def belongs_to_instance(disk):\n # NOTE(nic): On revert_resize, the cleanup steps for the root\n # volume are handled with an \"rbd snap rollback\" command,\n # and none of this is needed (and is, in fact, harmful) so\n # filter out non-ephemerals from the list\n if instance.task_state == task_states.RESIZE_REVERTING:\n return (disk.startswith(instance.uuid) and\n disk.endswith('disk.local'))\n else:\n return disk.startswith(instance.uuid)\n\n volumes = rbd.RBD().list(client.ioctx)\n for volume in filter(belongs_to_instance, volumes):\n self._destroy_volume(client, volume)\n\n def get_pool_info(self):\n with RADOSClient(self) as client:\n stats = client.cluster.get_cluster_stats()\n return {'total': stats['kb'] * units.Ki,\n 'free': stats['kb_avail'] * units.Ki,\n 'used': stats['kb_used'] * units.Ki}\n\n def create_snap(self, volume, name, pool=None, protect=False):\n \"\"\"Create a snapshot of an RBD volume.\n\n :volume: Name of RBD object\n :name: Name of snapshot\n :pool: Name of pool\n :protect: Set the snapshot to \"protected\"\n \"\"\"\n LOG.debug('creating snapshot(%(snap)s) on rbd image(%(img)s)',\n {'snap': name, 'img': volume})\n with RBDVolumeProxy(self, str(volume), pool=pool) as vol:\n tpool.execute(vol.create_snap, name)\n if protect and not vol.is_protected_snap(name):\n tpool.execute(vol.protect_snap, name)\n\n def remove_snap(self, volume, name, ignore_errors=False, pool=None,\n force=False):\n \"\"\"Removes a snapshot from an RBD volume.\n\n :volume: Name of RBD object\n :name: Name of snapshot\n :ignore_errors: whether or not to log warnings on failures\n :pool: Name of pool\n :force: Remove snapshot even if it is protected\n \"\"\"\n with RBDVolumeProxy(self, str(volume), pool=pool) as vol:\n if name in [snap.get('name', '') for snap in vol.list_snaps()]:\n if vol.is_protected_snap(name):\n if force:\n tpool.execute(vol.unprotect_snap, name)\n elif not ignore_errors:\n LOG.warning(_LW('snapshot(%(name)s) on rbd '\n 'image(%(img)s) is protected, '\n 'skipping'),\n {'name': name, 'img': volume})\n return\n LOG.debug('removing snapshot(%(name)s) on rbd image(%(img)s)',\n {'name': name, 'img': volume})\n tpool.execute(vol.remove_snap, name)\n elif not ignore_errors:\n LOG.warning(_LW('no snapshot(%(name)s) found on rbd '\n 'image(%(img)s)'),\n {'name': name, 'img': volume})\n\n def rollback_to_snap(self, volume, name):\n \"\"\"Revert an RBD volume to its contents at a snapshot.\n\n :volume: Name of RBD object\n :name: Name of snapshot\n \"\"\"\n with RBDVolumeProxy(self, volume) as vol:\n if name in [snap.get('name', '') for snap in vol.list_snaps()]:\n LOG.debug('rolling back rbd image(%(img)s) to '\n 'snapshot(%(snap)s)', {'snap': name, 'img': volume})\n tpool.execute(vol.rollback_to_snap, name)\n else:\n raise exception.SnapshotNotFound(snapshot_id=name)\n\n def destroy_volume(self, volume, pool=None):\n \"\"\"A one-shot version of cleanup_volumes()\n \"\"\"\n with RADOSClient(self, pool) as client:\n self._destroy_volume(client, volume)\n", "repo_name": "BU-NU-CLOUD-SP16/Trusted-Platform-Module-nova", "sub_path": "nova/virt/libvirt/storage/rbd_utils.py", "file_name": "rbd_utils.py", "file_ext": "py", "file_size_in_byte": 16159, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 8, "dataset": "github-code", "pt": "48", "api": [{"api_name": "oslo_log.log.getLogger", "line_number": 26, "usage_type": "call"}, {"api_name": "oslo_log.log", "line_number": 26, "usage_type": "name"}, {"api_name": "rbd.Image", "line_number": 43, "usage_type": "call"}, {"api_name": "rbd.ImageNotFound", "line_number": 46, "usage_type": "attribute"}, {"api_name": "oslo_utils.excutils.save_and_reraise_exception", "line_number": 47, "usage_type": "call"}, {"api_name": "oslo_utils.excutils", "line_number": 47, "usage_type": "name"}, {"api_name": "rbd.Error", "line_number": 50, "usage_type": "attribute"}, {"api_name": "oslo_utils.excutils.save_and_reraise_exception", "line_number": 51, "usage_type": "call"}, {"api_name": "oslo_utils.excutils", "line_number": 51, "usage_type": "name"}, {"api_name": "nova.i18n._LE", "line_number": 52, "usage_type": "call"}, {"api_name": "rbd.RBD_FEATURE_LAYERING", "line_number": 88, "usage_type": "attribute"}, {"api_name": "nova.i18n._", "line_number": 101, "usage_type": "call"}, {"api_name": "rados.Rados", "line_number": 104, "usage_type": "call"}, {"api_name": "rados.Error", "line_number": 111, "usage_type": "attribute"}, {"api_name": "nova.i18n._", "line_number": 135, "usage_type": "name"}, {"api_name": "nova.utils.execute", "line_number": 135, "usage_type": "call"}, {"api_name": "nova.utils", "line_number": 135, "usage_type": "name"}, {"api_name": "oslo_serialization.jsonutils.loads", "line_number": 139, "usage_type": "call"}, {"api_name": "oslo_serialization.jsonutils", "line_number": 139, "usage_type": "name"}, {"api_name": "nova.i18n._", "line_number": 153, "usage_type": "call"}, {"api_name": "nova.exception.ImageUnacceptable", "line_number": 154, "usage_type": "call"}, {"api_name": "nova.exception", "line_number": 154, "usage_type": "name"}, {"api_name": "urllib.unquote", "line_number": 155, "usage_type": "attribute"}, {"api_name": "nova.i18n._", "line_number": 157, "usage_type": "call"}, {"api_name": "nova.exception.ImageUnacceptable", "line_number": 158, "usage_type": "call"}, {"api_name": "nova.exception", "line_number": 158, "usage_type": "name"}, {"api_name": "nova.i18n._", "line_number": 160, "usage_type": "call"}, {"api_name": "nova.exception.ImageUnacceptable", "line_number": 161, "usage_type": "call"}, {"api_name": "nova.exception", "line_number": 161, "usage_type": "name"}, {"api_name": "nova.exception.ImageUnacceptable", "line_number": 172, "usage_type": "attribute"}, {"api_name": "nova.exception", "line_number": 172, "usage_type": "name"}, {"api_name": "rbd.Error", "line_number": 191, "usage_type": "attribute"}, {"api_name": "rbd.RBD", "line_number": 206, "usage_type": "call"}, {"api_name": "rbd.PermissionError", "line_number": 212, "usage_type": "attribute"}, {"api_name": "nova.exception.Forbidden", "line_number": 213, "usage_type": "call"}, {"api_name": "nova.exception", "line_number": 213, "usage_type": "name"}, {"api_name": "nova.i18n._", "line_number": 213, "usage_type": "call"}, {"api_name": "rbd.ImageNotFound", "line_number": 240, "usage_type": "attribute"}, {"api_name": "nova.exception.ImageUnacceptable", "line_number": 241, "usage_type": "call"}, {"api_name": "nova.exception", "line_number": 241, "usage_type": "name"}, {"api_name": "nova.i18n._", "line_number": 241, "usage_type": "call"}, {"api_name": "eventlet.tpool.execute", "line_number": 253, "usage_type": "call"}, {"api_name": "eventlet.tpool", "line_number": 253, "usage_type": "name"}, {"api_name": "rbd.ImageNotFound", "line_number": 262, "usage_type": "attribute"}, {"api_name": "rbd.RBD", "line_number": 272, "usage_type": "call"}, {"api_name": "rbd.ImageNotFound", "line_number": 273, "usage_type": "attribute"}, {"api_name": "nova.i18n._LW", "line_number": 274, "usage_type": "call"}, {"api_name": "rbd.ImageHasSnapshots", "line_number": 277, "usage_type": "attribute"}, {"api_name": "nova.i18n._LE", "line_number": 278, "usage_type": "call"}, {"api_name": "nova.utils.execute", "line_number": 297, "usage_type": "call"}, {"api_name": "nova.utils", "line_number": 297, "usage_type": "name"}, {"api_name": "rbd.RBD", "line_number": 304, "usage_type": "call"}, {"api_name": "oslo_service.loopingcall.LoopingCallDone", "line_number": 305, "usage_type": "call"}, {"api_name": "oslo_service.loopingcall", "line_number": 305, "usage_type": "name"}, {"api_name": "rbd.ImageHasSnapshots", "line_number": 306, "usage_type": "attribute"}, {"api_name": "nova.virt.libvirt.utils.RESIZE_SNAPSHOT_NAME", "line_number": 307, "usage_type": "attribute"}, {"api_name": "nova.virt.libvirt.utils", "line_number": 307, "usage_type": "name"}, {"api_name": "rbd.ImageBusy", "line_number": 309, "usage_type": "attribute"}, {"api_name": "rbd.ImageHasSnapshots", "line_number": 309, "usage_type": "attribute"}, {"api_name": "nova.i18n._LW", "line_number": 310, "usage_type": "call"}, {"api_name": "oslo_service.loopingcall.LoopingCallDone", "line_number": 315, "usage_type": "call"}, {"api_name": "oslo_service.loopingcall", "line_number": 315, "usage_type": "name"}, {"api_name": "oslo_service.loopingcall.FixedIntervalLoopingCall", "line_number": 319, "usage_type": "call"}, {"api_name": "oslo_service.loopingcall", "line_number": 319, "usage_type": "name"}, {"api_name": "oslo_service.loopingcall.LoopingCallDone", "line_number": 327, "usage_type": "attribute"}, {"api_name": "oslo_service.loopingcall", "line_number": 327, "usage_type": "name"}, {"api_name": "nova.compute.task_states.RESIZE_REVERTING", "line_number": 338, "usage_type": "attribute"}, {"api_name": "nova.compute.task_states", "line_number": 338, "usage_type": "name"}, {"api_name": "rbd.RBD", "line_number": 344, "usage_type": "call"}, {"api_name": "oslo_utils.units.Ki", "line_number": 351, "usage_type": "attribute"}, {"api_name": "oslo_utils.units", "line_number": 351, "usage_type": "name"}, {"api_name": "oslo_utils.units.Ki", "line_number": 352, "usage_type": "attribute"}, {"api_name": "oslo_utils.units", "line_number": 352, "usage_type": "name"}, {"api_name": "oslo_utils.units.Ki", "line_number": 353, "usage_type": "attribute"}, {"api_name": "oslo_utils.units", "line_number": 353, "usage_type": "name"}, {"api_name": "eventlet.tpool.execute", "line_number": 366, "usage_type": "call"}, {"api_name": "eventlet.tpool", "line_number": 366, "usage_type": "name"}, {"api_name": "eventlet.tpool.execute", "line_number": 368, "usage_type": "call"}, {"api_name": "eventlet.tpool", "line_number": 368, "usage_type": "name"}, {"api_name": "eventlet.tpool.execute", "line_number": 384, "usage_type": "call"}, {"api_name": "eventlet.tpool", "line_number": 384, "usage_type": "name"}, {"api_name": "nova.i18n._LW", "line_number": 386, "usage_type": "call"}, {"api_name": "eventlet.tpool.execute", "line_number": 393, "usage_type": "call"}, {"api_name": "eventlet.tpool", "line_number": 393, "usage_type": "name"}, {"api_name": "nova.i18n._LW", "line_number": 395, "usage_type": "call"}, {"api_name": "eventlet.tpool.execute", "line_number": 409, "usage_type": "call"}, {"api_name": "eventlet.tpool", "line_number": 409, "usage_type": "name"}, {"api_name": "nova.exception.SnapshotNotFound", "line_number": 411, "usage_type": "call"}, {"api_name": "nova.exception", "line_number": 411, "usage_type": "name"}]} +{"seq_id": "368948114", "text": "import time\nfrom appium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom urllib3.exceptions import ProtocolError\n\ndesired_caps = {\n 'platformName': 'Android',\n 'automationName': 'UiAutomator2',\n 'deviceName': 'DemoApp',\n 'app': r'C:/Users/Abdallah/PycharmProjects/AppiumProject2/Base/calculator.apk',\n 'appPackage': 'com.google.android.calculator',\n 'appActivity': 'com.android.calculator2.Calculator'\n}\n\ntry:\n driver = webdriver.Remote(\"http://127.0.0.1:4723/wd/hub\", desired_caps)\nexcept ProtocolError as e:\n raise ProtocolError(\"Connection aborted.\") from e\n\nwait = WebDriverWait(driver, 20)\nbutton_7 = wait.until(EC.element_to_be_clickable((By.XPATH, \"//android.widget.ImageButton[@index='11']\")))\nbutton_7.click()\nmultiply_button = wait.until(EC.element_to_be_clickable((By.XPATH, \"//android.widget.ImageButton[@index='14']\")))\nmultiply_button.click()\nbutton_9 = wait.until(EC.element_to_be_clickable((By.XPATH, \"//android.widget.ImageButton[@index='13']\")))\nbutton_9.click()\nleft_parenthesis_button = wait.until(EC.element_to_be_clickable((By.XPATH, \"//android.widget.ImageButton[@index='8']\")))\nleft_parenthesis_button.click()\nbutton_1 = wait.until(EC.element_to_be_clickable((By.XPATH, \"//android.widget.ImageButton[@index='19']\")))\nbutton_1.click()\nbutton_2 = wait.until(EC.element_to_be_clickable((By.XPATH, \"//android.widget.ImageButton[@index='20']\")))\nbutton_2.click()\nbutton_8 = wait.until(EC.element_to_be_clickable((By.XPATH, \"//android.widget.ImageButton[@index='12']\")))\nbutton_8.click()\ndivide_button = wait.until(EC.element_to_be_clickable((By.XPATH, \"//android.widget.ImageButton[@index='10']\")))\ndivide_button.click()\nbutton_2 = wait.until(EC.element_to_be_clickable((By.XPATH, \"//android.widget.ImageButton[@index='20']\")))\nbutton_2.click()\nright_parenthesis_button = wait.until(EC.element_to_be_clickable((By.XPATH, \"//android.widget.ImageButton[@index='8']\")))\nright_parenthesis_button.click()\nminus_button = wait.until(EC.element_to_be_clickable((By.XPATH, \"//android.widget.ImageButton[@index='18']\")))\nminus_button.click()\nbutton_4 = wait.until(EC.element_to_be_clickable((By.XPATH, \"//android.widget.ImageButton[@index='15']\")))\nbutton_4.click()\nequals_button = wait.until(EC.element_to_be_clickable((By.XPATH, \"//android.widget.ImageButton[@index='26']\")))\nequals_button.click()\ntime.sleep(6)", "repo_name": "mounayousef100/Calculator-Application-With-Appium-Android-and-Selenium-On-Python-", "sub_path": "Base/App.py", "file_name": "App.py", "file_ext": "py", "file_size_in_byte": 2485, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "48", "api": [{"api_name": "appium.webdriver.Remote", "line_number": 18, "usage_type": "call"}, {"api_name": "appium.webdriver", "line_number": 18, "usage_type": "name"}, {"api_name": "urllib3.exceptions.ProtocolError", "line_number": 19, "usage_type": "name"}, {"api_name": "urllib3.exceptions.ProtocolError", "line_number": 20, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.ui.WebDriverWait", "line_number": 22, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions.element_to_be_clickable", "line_number": 23, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions", "line_number": 23, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.XPATH", "line_number": 23, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 23, "usage_type": "name"}, {"api_name": "selenium.webdriver.support.expected_conditions.element_to_be_clickable", "line_number": 25, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions", "line_number": 25, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.XPATH", "line_number": 25, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 25, "usage_type": "name"}, {"api_name": "selenium.webdriver.support.expected_conditions.element_to_be_clickable", "line_number": 27, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions", "line_number": 27, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.XPATH", "line_number": 27, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 27, "usage_type": "name"}, {"api_name": "selenium.webdriver.support.expected_conditions.element_to_be_clickable", "line_number": 29, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions", "line_number": 29, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.XPATH", "line_number": 29, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 29, "usage_type": "name"}, {"api_name": "selenium.webdriver.support.expected_conditions.element_to_be_clickable", "line_number": 31, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions", "line_number": 31, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.XPATH", "line_number": 31, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 31, "usage_type": "name"}, {"api_name": "selenium.webdriver.support.expected_conditions.element_to_be_clickable", "line_number": 33, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions", "line_number": 33, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.XPATH", "line_number": 33, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 33, "usage_type": "name"}, {"api_name": "selenium.webdriver.support.expected_conditions.element_to_be_clickable", "line_number": 35, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions", "line_number": 35, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.XPATH", "line_number": 35, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 35, "usage_type": "name"}, {"api_name": "selenium.webdriver.support.expected_conditions.element_to_be_clickable", "line_number": 37, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions", "line_number": 37, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.XPATH", "line_number": 37, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 37, "usage_type": "name"}, {"api_name": "selenium.webdriver.support.expected_conditions.element_to_be_clickable", "line_number": 39, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions", "line_number": 39, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.XPATH", "line_number": 39, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 39, "usage_type": "name"}, {"api_name": "selenium.webdriver.support.expected_conditions.element_to_be_clickable", "line_number": 41, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions", "line_number": 41, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.XPATH", "line_number": 41, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 41, "usage_type": "name"}, {"api_name": "selenium.webdriver.support.expected_conditions.element_to_be_clickable", "line_number": 43, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions", "line_number": 43, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.XPATH", "line_number": 43, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 43, "usage_type": "name"}, {"api_name": "selenium.webdriver.support.expected_conditions.element_to_be_clickable", "line_number": 45, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions", "line_number": 45, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.XPATH", "line_number": 45, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 45, "usage_type": "name"}, {"api_name": "selenium.webdriver.support.expected_conditions.element_to_be_clickable", "line_number": 47, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions", "line_number": 47, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.XPATH", "line_number": 47, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 47, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 49, "usage_type": "call"}]} +{"seq_id": "5763310106", "text": "import unittest\nimport logging as log\nimport os\nimport astroid\n\nfrom log_quality.retrieve_logs.retriever_py_ast import LogRetrieverPyAST\nfrom tests.helpers import *\n\nbase_path = os.path.dirname(__file__)\ntest_file_path = os.path.join(base_path, \"test_files\")\n\nclass TestLogRetriverPyAST(unittest.TestCase):\n\n def setUp(self):\n configure_logging()\n\n def get_default_node(self):\n return astroid.parse(\n \"\"\"\n import string\n print(\"Helo world\")\n \"\"\"\n )\n\n def test_walk_for_crash(self):\n node = self.get_default_node()\n lr = LogRetrieverPyAST()\n try:\n lr.walk(node)\n except Exception:\n self.fail(\"LogRetrieverPyAST.walk() raised an exception unexpectedly!\")\n\n def test_visit(self):\n node = self.get_default_node()\n lr = LogRetrieverPyAST()\n try:\n lr.visit(node)\n except Exception:\n self.fail(\"LogRetrieverPyAST.visit() raised an exception unexpectedly!\") \n\n def test_visit_import(self):\n node = astroid.parse(\n \"\"\"\n import logging\n import logging as log\n import logging as banana\n \"\"\"\n )\n expected = sorted(list({\"logging\", \"log\", \"banana\"}))\n lr = LogRetrieverPyAST()\n try:\n for n in node.body:\n lr.visit_import(n)\n except Exception:\n self.fail(\"LogRetrieverPyAST.visit_import() raised an exception unexpectedly!\")\n self.assertListEqual(sorted(list(lr._logging_module_aliases)), expected)\n\n def test_visit_importfrom(self):\n node = astroid.parse(\n \"\"\"\n from logging import info\n from logging import log\n from logging import warning as cucumber\n from logging import log as l\n \"\"\"\n )\n lr = LogRetrieverPyAST()\n expected_lma = sorted(list(lr.log_method_aliases) + [\"l\"])\n expected_lla = sorted(list(lr.log_level_aliases) + [\"cucumber\"])\n try:\n for n in node.body:\n lr.visit_importfrom(n)\n except Exception:\n self.fail(\"LogRetrieverPyAST.visit_importfrom() raised an exception unexpectedly!\")\n \n lma = sorted(list(lr.log_method_aliases)) \n self.assertListEqual(lma, expected_lma)\n\n lla = sorted(list(lr.log_level_aliases))\n self.assertListEqual(lla, expected_lla)\n\n\n def test__infer_log_level(self):\n node = astroid.parse(\n \"\"\"\n logging.log(logging.DEBUG, \"Test1\")\n logging.log(logging.WARNING, \"Test2\")\n logging.log(10, \"Test3\")\n \"\"\"\n )\n expected = [\"debug\", \"warning\", \"debug\"]\n lr = LogRetrieverPyAST()\n levels = []\n try:\n for n in node.body:\n print(n.value.repr_tree())\n levels.append(lr._infer_log_level(n.value.args[0]))\n except Exception as e:\n log.exception(e)\n self.fail(\"LogRetrieverPyAST._infer_log_level() raised an exception unexpectedly!\")\n self.assertListEqual(levels, expected)\n\n def test__clone_node(self):\n node1 = astroid.extract_node(\n \"\"\"\n print(\"Hello world\")\n \"\"\"\n )\n node2 = astroid.extract_node(\n \"\"\"\n # Comment\n print(\"Hello world\")\n \"\"\"\n )\n lr = LogRetrieverPyAST()\n try:\n node = lr._clone_node(node1, node2)\n except Exception as e:\n log.exception(e)\n self.fail(\"LogRetrieverPyAST._clone_node() raised an exception unexpectedly!\")\n \n self.assertEqual(node, node2)\n\n\n def _get_walk_test_cases(self):\n c1_msg = \"Testing basic logging\"\n c1 = astroid.parse(\n '''\n import logging\n\n logging.warn(\"Warning\")\n logging.warning(\"Warning\")\n logging.error(\"Error\")\n logging.exception(\"Exception\")\n '''\n )\n c1_log_message = [\"Warning\", \"Warning\", \"Error\", \"Exception\"]\n c1_level = [\"warning\", \"warning\", \"error\", \"error\"]\n\n c2_msg = \"Testing logging alias\"\n c2 = astroid.parse(\n '''\n import logging as l\n\n l.warn(\"Warning\")\n l.warning(\"Warning\")\n '''\n )\n c2_log_message = [\"Warning\", \"Warning\"]\n c2_level = [\"warning\", \"warning\"]\n\n c3_msg = \"Testing logging attribute import\"\n c3 = astroid.parse(\n '''\n from logging import warning\n\n warning(\"Warning\")\n '''\n )\n c3_log_message = [\"Warning\"]\n c3_level = [\"warning\"]\n\n c4_msg = \"Testing logging attribute import alias\"\n c4 = astroid.parse(\n '''\n from logging import warning as w\n\n w(\"Warning\")\n '''\n )\n c4_log_message = [\"Warning\"]\n c4_level = [\"warning\"]\n\n c5_msg = \"Testing string concatination in logging statement\"\n c5 = astroid.parse(\n '''\n import logging\n\n logging.warning(\"Warning \" + \"W\" + \" W\")\n '''\n )\n c5_log_message = [\"Warning W W\"]\n c5_level = [\"warning\"]\n\n c6_msg = \"Testing string concatination in logging statement with indirection (variabel, call)\"\n c6 = astroid.parse(\n '''\n import logging\n\n i_w = input(\"Enter your name: \")\n def w_f():\n return \"W\"\n w = \"W\"\n logging.warning(\"Warning \" + \"W\" + \" W\")\n logging.warning(\"Warning \" + w_f() + \" \" + w)\n logging.warning(\"Warning \" + i_w)\n '''\n )\n c6_log_message = [\"Warning W W\", \"Warning W W\", \"Warning *\"]\n c6_level = [\"warning\", \"warning\", \"warning\"]\n\n c7_msg = \"Testing loging internal string formatting\"\n c7 = astroid.parse(\n '''\n import logging\n\n i_w = input(\"Enter your name: \")\n\n logging.warning(\"Warning %s %d\", \"W\", 5)\n logging.warning(\"Warning %s %s\", \"W\", i_w)\n logging.warning(\"%s %s %s\", \"Warning\", \"W\", \"W\", exc_info=\"\")\n '''\n )\n c7_log_message = [\"Warning W 5\", \"Warning W *\", \"Warning W W\"]\n c7_level = [\"warning\", \"warning\", \"warning\"]\n\n c8_msg = \"Testing log method call with level as parameter\"\n c8 = astroid.parse(\n '''\n import logging\n\n level = logging.WARNING\n def get_level():\n return logging.WARNING\n logging.log(logging.WARNING, \"Warning\")\n logging.log(level, \"Warning\")\n logging.log(get_level(), \"Warning\")\n logging.log(10, \"Debug\")\n '''\n )\n c8_log_message = [\"Warning\", \"Warning\", \"Warning\", \"Debug\"]\n c8_level = [\"warning\", \"warning\", \"warning\", \"debug\"]\n\n c9_msg = \"Testing string formating with f\"\n c9 = astroid.parse(\n '''\n import logging\n\n i_w = input(\"Enter your name: \")\n w = \"W\"\n\n logging.warning(f\"{w}\")\n logging.warning(f\"Warning {w}\")\n logging.warning(f\"Warning {i_w}\")\n '''\n )\n c9_log_message = [\"W\", \"Warning W\", \"Warning *\"]\n c9_level = [\"warning\", \"warning\", \"warning\"]\n\n c10_msg = \"Testing string formating format method\"\n c10 = astroid.parse(\n '''\n import logging\n\n i_w = input(\"Enter your name: \")\n w = \"W\"\n\n logging.warning(\"{}\".format(\"Warning\"))\n logging.warning(\"Warning {}\".format(w))\n logging.warning(\"Warning {} {}\".format(w, i_w))\n '''\n )\n c10_log_message = [\"Warning\", \"Warning W\", \"Warning W *\"]\n c10_level = [\"warning\", \"warning\", \"warning\"]\n\n c11_msg = \"Testing string formating with %\"\n c11 = astroid.parse(\n '''\n import logging\n\n i_w = input(\"Enter your name: \")\n w = \"W\"\n\n logging.warning(\"%s\" % (\"Warning\"))\n logging.warning(\"Warning %s %d\" % (w, 5))\n logging.warning(\"Warning %s %%\" % (w, i_w))\n '''\n )\n c11_log_message = [\"Warning\", \"Warning W 5\", \"Warning W *\"]\n c11_level = [\"warning\", \"warning\", \"warning\"]\n\n c12_msg = \"Testing get logger method call\"\n c12 = astroid.parse(\n '''\n import logging\n\n a2 = logging.getLogger('aa')\n\n a2.debug('A debug message')\n a2.warn('A warning message')\n a4.info('This should not be parsed')\n '''\n )\n c12_log_message = [\"A debug message\", \"A warning message\"]\n c12_level = [\"debug\", \"warning\"]\n\n test_cases = {\n c1: (c1_log_message, c1_level, c1_msg),\n c2: (c2_log_message, c2_level, c2_msg),\n c3: (c3_log_message, c3_level, c3_msg),\n c4: (c4_log_message, c4_level, c4_msg),\n c5: (c5_log_message, c5_level, c5_msg),\n c6: (c6_log_message, c6_level, c6_msg),\n c7: (c7_log_message, c7_level, c7_msg),\n c8: (c8_log_message, c8_level, c8_msg),\n c9: (c9_log_message, c9_level, c9_msg),\n c10: (c10_log_message, c10_level, c10_msg),\n c11: (c11_log_message, c11_level, c11_msg),\n c12: (c12_log_message, c12_level, c12_msg)\n }\n return test_cases\n\n def test_walk(self):\n test_cases = self._get_walk_test_cases()\n for node, (expct_msg, expct_level, msg) in test_cases.items():\n lr = LogRetrieverPyAST()\n with self.subTest(msg=msg):\n try:\n lr.walk(node)\n except Exception as e:\n log.exception(e)\n self.fail(\"LogRetrieverPyAST.walk() raised an exception unexpectedly!\")\n self.assertListEqual(lr.log_messages, expct_msg)\n self.assertListEqual(lr.log_levels, expct_level)\n \n\nif __name__ == '__main__':\n unittest.main()", "repo_name": "aiops/check-log-quality", "sub_path": "log_quality/retrieve_logs/tests/test_retriever_py_ast.py", "file_name": "test_retriever_py_ast.py", "file_ext": "py", "file_size_in_byte": 10233, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "48", "api": [{"api_name": "os.path.dirname", "line_number": 9, "usage_type": "call"}, {"api_name": "os.path", "line_number": 9, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 10, "usage_type": "call"}, {"api_name": "os.path", "line_number": 10, "usage_type": "attribute"}, {"api_name": "unittest.TestCase", "line_number": 12, "usage_type": "attribute"}, {"api_name": "astroid.parse", "line_number": 18, "usage_type": "call"}, {"api_name": "log_quality.retrieve_logs.retriever_py_ast.LogRetrieverPyAST", "line_number": 27, "usage_type": "call"}, {"api_name": "log_quality.retrieve_logs.retriever_py_ast.LogRetrieverPyAST", "line_number": 35, "usage_type": "call"}, {"api_name": "astroid.parse", "line_number": 42, "usage_type": "call"}, {"api_name": "log_quality.retrieve_logs.retriever_py_ast.LogRetrieverPyAST", "line_number": 50, "usage_type": "call"}, {"api_name": "astroid.parse", "line_number": 59, "usage_type": "call"}, {"api_name": "log_quality.retrieve_logs.retriever_py_ast.LogRetrieverPyAST", "line_number": 67, "usage_type": "call"}, {"api_name": "astroid.parse", "line_number": 84, "usage_type": "call"}, {"api_name": "log_quality.retrieve_logs.retriever_py_ast.LogRetrieverPyAST", "line_number": 92, "usage_type": "call"}, {"api_name": "logging.exception", "line_number": 99, "usage_type": "call"}, {"api_name": "astroid.extract_node", "line_number": 104, "usage_type": "call"}, {"api_name": "astroid.extract_node", "line_number": 109, "usage_type": "call"}, {"api_name": "log_quality.retrieve_logs.retriever_py_ast.LogRetrieverPyAST", "line_number": 115, "usage_type": "call"}, {"api_name": "logging.exception", "line_number": 119, "usage_type": "call"}, {"api_name": "astroid.parse", "line_number": 127, "usage_type": "call"}, {"api_name": "astroid.parse", "line_number": 141, "usage_type": "call"}, {"api_name": "astroid.parse", "line_number": 153, "usage_type": "call"}, {"api_name": "astroid.parse", "line_number": 164, "usage_type": "call"}, {"api_name": "astroid.parse", "line_number": 175, "usage_type": "call"}, {"api_name": "astroid.parse", "line_number": 186, "usage_type": "call"}, {"api_name": "astroid.parse", "line_number": 203, "usage_type": "call"}, {"api_name": "astroid.parse", "line_number": 218, "usage_type": "call"}, {"api_name": "astroid.parse", "line_number": 235, "usage_type": "call"}, {"api_name": "astroid.parse", "line_number": 251, "usage_type": "call"}, {"api_name": "astroid.parse", "line_number": 267, "usage_type": "call"}, {"api_name": "astroid.parse", "line_number": 283, "usage_type": "call"}, {"api_name": "log_quality.retrieve_logs.retriever_py_ast.LogRetrieverPyAST", "line_number": 316, "usage_type": "call"}, {"api_name": "logging.exception", "line_number": 321, "usage_type": "call"}, {"api_name": "unittest.main", "line_number": 328, "usage_type": "call"}]} +{"seq_id": "2587902296", "text": "# -*- coding: utf-8 -*-\n\n'''\nCreated on 2016-12-30 18:37:08\nSQLAlchemy ORM支持性模块\n@author: zhoujiagen\n'''\n\nimport sqlalchemy\n\nfrom com.spike.env.log import SpikeConsoleLogger\nlogger = SpikeConsoleLogger('SQLAlchemy-ORM-Tutorial-Support').native()\n\n\ndef show_version():\n \"\"\"检查版本\"\"\"\n logger.info('Framework version is: %s' % sqlalchemy.__version__)\n\ndef create_engine():\n \"\"\"建立连接\"\"\"\n engine = sqlalchemy.create_engine('mysql://root:@localhost/test', echo = True)\n logger.info('create engine: %s' % engine)\n return engine\n\nfrom sqlalchemy.ext.declarative import declarative_base\nSQLAlchemy_Base = declarative_base() # 单个实体\nSQLAlchemy_Relationship_Base = declarative_base() # 多个实体, 带关系\n\ndef create_schema(engine, base = SQLAlchemy_Base):\n \"\"\"创建/检查Schema\"\"\"\n logger.info('create schema with engine[%s], base[%s]' % (engine, base))\n\n base.metadata.create_all(engine)\n\nfrom sqlalchemy.orm.session import sessionmaker\n\ndef create_session(engine):\n \"\"\"创建Session\"\"\"\n Session = sessionmaker(bind = engine) # 使用session工厂\n session = Session() # session实例\n return session\n\ndef create_mysql_fulltext_index(engine):\n \"\"\"\n 创建MySQL全文索引\n !WRONG! REF http://stackoverflow.com/questions/14971619/proper-use-of-mysql-full-text-search-with-sqlalchemy\n 原因可能是SQLAlchemy版本之间的变化比较大.\n \"\"\"\n connect = engine.connect()\n connect.execute('CREATE FULLTEXT INDEX first_name_fulltext ON EMPLOYEE (FIRST_NAME ASC)')\n\n\n\nif __name__ == '__main__':\n pass\n", "repo_name": "King12138/python-playground", "sub_path": "src/com/spike/application/database/mysql/tutorial/sqlalchemy_orm_support.py", "file_name": "sqlalchemy_orm_support.py", "file_ext": "py", "file_size_in_byte": 1595, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "48", "api": [{"api_name": "com.spike.env.log.SpikeConsoleLogger", "line_number": 12, "usage_type": "call"}, {"api_name": "sqlalchemy.__version__", "line_number": 17, "usage_type": "attribute"}, {"api_name": "sqlalchemy.create_engine", "line_number": 21, "usage_type": "call"}, {"api_name": "sqlalchemy.ext.declarative.declarative_base", "line_number": 26, "usage_type": "call"}, {"api_name": "sqlalchemy.ext.declarative.declarative_base", "line_number": 27, "usage_type": "call"}, {"api_name": "sqlalchemy.orm.session.sessionmaker", "line_number": 39, "usage_type": "call"}]} +{"seq_id": "8666855612", "text": "from __future__ import unicode_literals\n\n\"\"\"Utilities for testing Motor with Tornado.\"\"\"\n\nimport concurrent.futures\nimport datetime\nimport functools\n\ntry:\n # Python 2.6.\n from unittest2 import SkipTest\n import unittest2 as unittest\nexcept ImportError:\n from unittest import SkipTest # If this fails you need unittest2.\n import unittest\n\nfrom mockupdb import MockupDB\nfrom tornado import gen, testing\n\nimport motor\nfrom test.test_environment import env, CLIENT_PEM\nfrom test.assert_logs_backport import AssertLogsMixin\nfrom test.version import padded, _parse_version_string\n\n\n@gen.coroutine\ndef version(client):\n info = yield client.server_info()\n raise gen.Return(_parse_version_string(info[\"version\"]))\n\n\n@gen.coroutine\ndef at_least(client, min_version):\n client_version = yield version(client)\n raise gen.Return(client_version >= tuple(padded(min_version, 4)))\n\n\n@gen.coroutine\ndef get_command_line(client):\n command_line = yield client.admin.command('getCmdLineOpts')\n assert command_line['ok'] == 1, \"getCmdLineOpts() failed\"\n raise gen.Return(command_line)\n\n\n@gen.coroutine\ndef server_is_mongos(client):\n ismaster_response = yield client.admin.command('ismaster')\n raise gen.Return(ismaster_response.get('msg') == 'isdbgrid')\n\n\n@gen.coroutine\ndef skip_if_mongos(client):\n is_mongos = yield server_is_mongos(client)\n if is_mongos:\n raise SkipTest(\"connected to mongos\")\n\n\n@gen.coroutine\ndef remove_all_users(db):\n version_check = yield at_least(db.connection, (2, 5, 4))\n if version_check:\n yield db.command({\"dropAllUsersFromDatabase\": 1})\n else:\n yield db.system.users.remove({})\n\n\n@gen.coroutine\ndef skip_if_mongos(client):\n is_mongos = yield server_is_mongos(client)\n if is_mongos:\n raise SkipTest(\"connected to mongos\")\n\n\n@gen.coroutine\ndef remove_all_users(db):\n version_check = yield at_least(db.connection, (2, 5, 4))\n if version_check:\n yield db.command({\"dropAllUsersFromDatabase\": 1})\n else:\n yield db.system.users.remove({})\n\n\nclass PauseMixin(object):\n @gen.coroutine\n def pause(self, seconds):\n yield gen.Task(\n self.io_loop.add_timeout, datetime.timedelta(seconds=seconds))\n\n\nclass MotorTest(PauseMixin, AssertLogsMixin, testing.AsyncTestCase):\n longMessage = True # Used by unittest.TestCase\n ssl = False # If True, connect with SSL, skip if mongod isn't SSL\n\n def setUp(self):\n super(MotorTest, self).setUp()\n\n if self.ssl and not env.mongod_started_with_ssl:\n raise SkipTest(\"mongod doesn't support SSL, or is down\")\n\n if env.auth:\n self.cx = self.motor_client(env.uri, ssl=self.ssl)\n else:\n self.cx = self.motor_client(ssl=self.ssl)\n\n self.db = self.cx.motor_test\n self.collection = self.db.test_collection\n\n @gen.coroutine\n def make_test_data(self):\n yield self.collection.remove()\n yield self.collection.insert([{'_id': i} for i in range(200)])\n\n make_test_data.__test__ = False\n\n def get_client_kwargs(self, **kwargs):\n kwargs.setdefault('io_loop', self.io_loop)\n ssl = env.mongod_started_with_ssl\n kwargs.setdefault('ssl', ssl)\n if kwargs['ssl'] and env.mongod_validates_client_cert:\n kwargs.setdefault('ssl_certfile', CLIENT_PEM)\n\n return kwargs\n\n def motor_client(self, uri=None, *args, **kwargs):\n \"\"\"Get a MotorClient.\n\n Ignores self.ssl, you must pass 'ssl' argument. You'll probably need to\n close the client to avoid file-descriptor problems after AsyncTestCase\n calls self.io_loop.close(all_fds=True).\n \"\"\"\n return motor.MotorClient(\n uri or env.uri,\n *args,\n **self.get_client_kwargs(**kwargs))\n\n def motor_rsc(self, uri=None, *args, **kwargs):\n \"\"\"Get an open MotorReplicaSetClient. Ignores self.ssl, you must pass\n 'ssl' argument. You'll probably need to close the client to avoid\n file-descriptor problems after AsyncTestCase calls\n self.io_loop.close(all_fds=True).\n \"\"\"\n return motor.MotorReplicaSetClient(\n uri or env.rs_uri,\n *args,\n **self.get_client_kwargs(**kwargs))\n\n @gen.coroutine\n def check_optional_callback(self, fn, *args, **kwargs):\n \"\"\"Take a function and verify that it accepts a 'callback' parameter\n and properly type-checks it. If 'required', check that fn requires\n a callback.\n\n NOTE: This method can call fn several times, so it should be relatively\n free of side-effects. Otherwise you should test fn without this method.\n\n :Parameters:\n - `fn`: A function that accepts a callback\n - `required`: Whether `fn` should require a callback or not\n - `callback`: To be called with ``(None, error)`` when done\n \"\"\"\n partial_fn = functools.partial(fn, *args, **kwargs)\n self.assertRaises(TypeError, partial_fn, callback='foo')\n self.assertRaises(TypeError, partial_fn, callback=1)\n\n # Should not raise\n yield partial_fn(callback=None)\n\n # Should not raise\n (result, error), _ = yield gen.Task(partial_fn)\n if error:\n raise error\n\n def tearDown(self):\n env.sync_cx.motor_test.test_collection.remove()\n self.cx.close()\n super(MotorTest, self).tearDown()\n\n\nclass MotorReplicaSetTestBase(MotorTest):\n def setUp(self):\n super(MotorReplicaSetTestBase, self).setUp()\n if not env.is_replica_set:\n raise SkipTest(\"Not connected to a replica set\")\n\n self.rsc = self.motor_rsc()\n self.rsc = self.motor_rsc()\n\n\nclass MotorMockServerTest(MotorTest):\n\n executor = concurrent.futures.ThreadPoolExecutor(1)\n\n def server(self, *args, **kwargs):\n server = MockupDB(*args, **kwargs)\n server.run()\n self.addCleanup(server.stop)\n return server\n\n def client_server(self, *args, **kwargs):\n server = self.server(*args, **kwargs)\n client = motor.motor_tornado.MotorClient(server.uri,\n io_loop=self.io_loop)\n\n return client, server\n\n def run_thread(self, fn, *args, **kwargs):\n return self.executor.submit(fn, *args, **kwargs)\n", "repo_name": "everhide/ex-motor", "sub_path": "test/tornado_tests/__init__.py", "file_name": "__init__.py", "file_ext": "py", "file_size_in_byte": 6341, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "48", "api": [{"api_name": "tornado.gen.Return", "line_number": 29, "usage_type": "call"}, {"api_name": "tornado.gen", "line_number": 29, "usage_type": "name"}, {"api_name": "test.version._parse_version_string", "line_number": 29, "usage_type": "call"}, {"api_name": "tornado.gen.coroutine", "line_number": 26, "usage_type": "attribute"}, {"api_name": "tornado.gen", "line_number": 26, "usage_type": "name"}, {"api_name": "tornado.gen.Return", "line_number": 35, "usage_type": "call"}, {"api_name": "tornado.gen", "line_number": 35, "usage_type": "name"}, {"api_name": "test.version.padded", "line_number": 35, "usage_type": "call"}, {"api_name": "tornado.gen.coroutine", "line_number": 32, "usage_type": "attribute"}, {"api_name": "tornado.gen", "line_number": 32, "usage_type": "name"}, {"api_name": "tornado.gen.Return", "line_number": 42, "usage_type": "call"}, {"api_name": "tornado.gen", "line_number": 42, "usage_type": "name"}, {"api_name": "tornado.gen.coroutine", "line_number": 38, "usage_type": "attribute"}, {"api_name": "tornado.gen", "line_number": 38, "usage_type": "name"}, {"api_name": "tornado.gen.Return", "line_number": 48, "usage_type": "call"}, {"api_name": "tornado.gen", "line_number": 48, "usage_type": "name"}, {"api_name": "tornado.gen.coroutine", "line_number": 45, "usage_type": "attribute"}, {"api_name": "tornado.gen", "line_number": 45, "usage_type": "name"}, {"api_name": "unittest.SkipTest", "line_number": 55, "usage_type": "call"}, {"api_name": "tornado.gen.coroutine", "line_number": 51, "usage_type": "attribute"}, {"api_name": "tornado.gen", "line_number": 51, "usage_type": "name"}, {"api_name": "tornado.gen.coroutine", "line_number": 58, "usage_type": "attribute"}, {"api_name": "tornado.gen", "line_number": 58, "usage_type": "name"}, {"api_name": "unittest.SkipTest", "line_number": 71, "usage_type": "call"}, {"api_name": "tornado.gen.coroutine", "line_number": 67, "usage_type": "attribute"}, {"api_name": "tornado.gen", "line_number": 67, "usage_type": "name"}, {"api_name": "tornado.gen.coroutine", "line_number": 74, "usage_type": "attribute"}, {"api_name": "tornado.gen", "line_number": 74, "usage_type": "name"}, {"api_name": "tornado.gen.Task", "line_number": 86, "usage_type": "call"}, {"api_name": "tornado.gen", "line_number": 86, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 87, "usage_type": "call"}, {"api_name": "tornado.gen.coroutine", "line_number": 84, "usage_type": "attribute"}, {"api_name": "tornado.gen", "line_number": 84, "usage_type": "name"}, {"api_name": "test.assert_logs_backport.AssertLogsMixin", "line_number": 90, "usage_type": "name"}, {"api_name": "tornado.testing.AsyncTestCase", "line_number": 90, "usage_type": "attribute"}, {"api_name": "tornado.testing", "line_number": 90, "usage_type": "name"}, {"api_name": "test.test_environment.env.mongod_started_with_ssl", "line_number": 97, "usage_type": "attribute"}, {"api_name": "test.test_environment.env", "line_number": 97, "usage_type": "name"}, {"api_name": "unittest.SkipTest", "line_number": 98, "usage_type": "call"}, {"api_name": "test.test_environment.env.auth", "line_number": 100, "usage_type": "attribute"}, {"api_name": "test.test_environment.env", "line_number": 100, "usage_type": "name"}, {"api_name": "test.test_environment.env.uri", "line_number": 101, "usage_type": "attribute"}, {"api_name": "test.test_environment.env", "line_number": 101, "usage_type": "name"}, {"api_name": "tornado.gen.coroutine", "line_number": 108, "usage_type": "attribute"}, {"api_name": "tornado.gen", "line_number": 108, "usage_type": "name"}, {"api_name": "test.test_environment.env.mongod_started_with_ssl", "line_number": 117, "usage_type": "attribute"}, {"api_name": "test.test_environment.env", "line_number": 117, "usage_type": "name"}, {"api_name": "test.test_environment.env.mongod_validates_client_cert", "line_number": 119, "usage_type": "attribute"}, {"api_name": "test.test_environment.env", "line_number": 119, "usage_type": "name"}, {"api_name": "test.test_environment.CLIENT_PEM", "line_number": 120, "usage_type": "argument"}, {"api_name": "motor.MotorClient", "line_number": 131, "usage_type": "call"}, {"api_name": "test.test_environment.env.uri", "line_number": 132, "usage_type": "attribute"}, {"api_name": "test.test_environment.env", "line_number": 132, "usage_type": "name"}, {"api_name": "motor.MotorReplicaSetClient", "line_number": 142, "usage_type": "call"}, {"api_name": "test.test_environment.env.rs_uri", "line_number": 143, "usage_type": "attribute"}, {"api_name": "test.test_environment.env", "line_number": 143, "usage_type": "name"}, {"api_name": "functools.partial", "line_number": 161, "usage_type": "call"}, {"api_name": "tornado.gen.Task", "line_number": 169, "usage_type": "call"}, {"api_name": "tornado.gen", "line_number": 169, "usage_type": "name"}, {"api_name": "tornado.gen.coroutine", "line_number": 147, "usage_type": "attribute"}, {"api_name": "tornado.gen", "line_number": 147, "usage_type": "name"}, {"api_name": "test.test_environment.env.sync_cx.motor_test.test_collection.remove", "line_number": 174, "usage_type": "call"}, {"api_name": "test.test_environment.env.sync_cx", "line_number": 174, "usage_type": "attribute"}, {"api_name": "test.test_environment.env", "line_number": 174, "usage_type": "name"}, {"api_name": "test.test_environment.env.is_replica_set", "line_number": 182, "usage_type": "attribute"}, {"api_name": "test.test_environment.env", "line_number": 182, "usage_type": "name"}, {"api_name": "unittest.SkipTest", "line_number": 183, "usage_type": "call"}, {"api_name": "concurrent.futures.futures.ThreadPoolExecutor", "line_number": 191, "usage_type": "call"}, {"api_name": "concurrent.futures.futures", "line_number": 191, "usage_type": "attribute"}, {"api_name": "concurrent.futures", "line_number": 191, "usage_type": "name"}, {"api_name": "mockupdb.MockupDB", "line_number": 194, "usage_type": "call"}, {"api_name": "motor.motor_tornado.MotorClient", "line_number": 201, "usage_type": "call"}, {"api_name": "motor.motor_tornado", "line_number": 201, "usage_type": "attribute"}]} +{"seq_id": "43517182892", "text": "# 지민이는 N개의 원소를 포함하고 있는 양방향 순환 큐를 가지고 있다. 지민이는 이 큐에서 몇 개의 원소를 뽑아내려고 한다.\r\n#\r\n# 지민이는 이 큐에서 다음과 같은 3가지 연산을 수행할 수 있다.\r\n#\r\n# 첫 번째 원소를 뽑아낸다. 이 연산을 수행하면, 원래 큐의 원소가 a1, ..., ak이었던 것이 a2, ..., ak와 같이 된다.\r\n# 왼쪽으로 한 칸 이동시킨다. 이 연산을 수행하면, a1, ..., ak가 a2, ..., ak, a1이 된다.\r\n# 오른쪽으로 한 칸 이동시킨다. 이 연산을 수행하면, a1, ..., ak가 ak, a1, ..., ak-1이 된다.\r\n# 큐에 처음에 포함되어 있던 수 N이 주어진다. 그리고 지민이가 뽑아내려고 하는 원소의 위치가 주어진다. (이 위치는 가장 처음 큐에서의 위치이다.) 이때, 그 원소를 주어진 순서대로 뽑아내는데 드는 2번, 3번 연산의 최솟값을 출력하는 프로그램을 작성하시오.\r\n\r\nfrom collections import deque\r\n\r\ndata = deque([])\r\nN, M = map(int, input().split())\r\nfor i in range(N):\r\n data.append(i + 1)\r\n\r\nli = list(map(int, input().split()))\r\n\r\ncnt = 0\r\nfor i in li:\r\n while True:\r\n t = data.index(i)\r\n mid = len(data) // 2\r\n if t == 0:\r\n data.remove(i)\r\n break\r\n if t <= mid:\r\n temp = data.popleft()\r\n data.append(temp)\r\n cnt += 1\r\n else:\r\n temp = data.pop()\r\n data.appendleft(temp)\r\n cnt += 1\r\nprint(cnt)\r\n\r\n", "repo_name": "dnwls16071/PS_Baekjoon", "sub_path": "1000~1999/1021.py", "file_name": "1021.py", "file_ext": "py", "file_size_in_byte": 1548, "program_lang": "python", "lang": "ko", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "48", "api": [{"api_name": "collections.deque", "line_number": 12, "usage_type": "call"}]} +{"seq_id": "14524301085", "text": "from flask import request\nfrom flask_restplus import Resource\nfrom ..util.courier_dto import CourierDto\nfrom ..services.couriers_services import save_new_couriers, get_courier, update_courier\n\napi = CourierDto.api\n\n_courier_item = CourierDto.courier_item\n_courier_post_request = CourierDto.courier_post_request\n_courier_get_response = CourierDto.courier_get_response\n_courier_update_request = CourierDto.courier_update_request\n\n\n@api.route(\"/\")\n@api.response(201, \"Created\")\n@api.response(400, \"Bad request\")\nclass CouriersHandler(Resource):\n @api.doc(\"Import couriers\")\n @api.expect(_courier_post_request, validate=True)\n def post(self):\n \"\"\" Import couriers \"\"\"\n data = request.json[\"data\"]\n return save_new_couriers(data)\n\n\n@api.route(\"/\")\n@api.param(\"courier_id\", \"The courier ID\")\n@api.response(404, \"Not found\")\n@api.response(200, \"OK\")\nclass Courier(Resource):\n @api.doc(\"Get courier info\")\n def get(self, courier_id):\n \"\"\" Get courier info \"\"\"\n return get_courier(courier_id)\n\n @api.doc(\"Update courier by id\")\n @api.expect(_courier_update_request, validate=True)\n @api.response(400, \"Bad Request\")\n def patch(self, courier_id):\n \"\"\" Update courier by id \"\"\"\n data = request.json\n return update_courier(courier_id, data)\n", "repo_name": "nooblose/CandyDeliveryApp", "sub_path": "app/main/controller/couriers_controller.py", "file_name": "couriers_controller.py", "file_ext": "py", "file_size_in_byte": 1328, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "48", "api": [{"api_name": "util.courier_dto.CourierDto.api", "line_number": 6, "usage_type": "attribute"}, {"api_name": "util.courier_dto.CourierDto", "line_number": 6, "usage_type": "name"}, {"api_name": "util.courier_dto.CourierDto.courier_item", "line_number": 8, "usage_type": "attribute"}, {"api_name": "util.courier_dto.CourierDto", "line_number": 8, "usage_type": "name"}, {"api_name": "util.courier_dto.CourierDto.courier_post_request", "line_number": 9, "usage_type": "attribute"}, {"api_name": "util.courier_dto.CourierDto", "line_number": 9, "usage_type": "name"}, {"api_name": "util.courier_dto.CourierDto.courier_get_response", "line_number": 10, "usage_type": "attribute"}, {"api_name": "util.courier_dto.CourierDto", "line_number": 10, "usage_type": "name"}, {"api_name": "util.courier_dto.CourierDto.courier_update_request", "line_number": 11, "usage_type": "attribute"}, {"api_name": "util.courier_dto.CourierDto", "line_number": 11, "usage_type": "name"}, {"api_name": "flask_restplus.Resource", "line_number": 17, "usage_type": "name"}, {"api_name": "flask.request.json", "line_number": 22, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 22, "usage_type": "name"}, {"api_name": "services.couriers_services.save_new_couriers", "line_number": 23, "usage_type": "call"}, {"api_name": "flask_restplus.Resource", "line_number": 30, "usage_type": "name"}, {"api_name": "services.couriers_services.get_courier", "line_number": 34, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 41, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 41, "usage_type": "name"}, {"api_name": "services.couriers_services.update_courier", "line_number": 42, "usage_type": "call"}]} +{"seq_id": "33796865194", "text": "from rest_framework.decorators import api_view\nfrom django.http.response import JsonResponse\nfrom rest_framework import status\nfrom ..business.fyersStreamDataBusiness import FyersStreamBusiness\nimport json, asyncio\n\nfyersStream = FyersStreamBusiness()\n\n@api_view(['GET'])\ndef StartStream(request):\n fyersStream.StartStream()\n return JsonResponse({'status' : 'success'},status=status.HTTP_200_OK, safe=False)\n\n@api_view(['GET'])\ndef AddStreamSymbol(request):\n stockName = request.GET['StockSymbol']\n fyersStream.AddSymbol(stockName)\n return JsonResponse({'status' : 'success'},status=status.HTTP_200_OK, safe=False)\n\n@api_view(['GET'])\ndef RemoveStreamSymbol(request):\n stockName = request.GET['StockSymbol']\n asyncio.run(fyersStream.RemoveSymbol(stockName))\n return JsonResponse({'status' : 'success'},status=status.HTTP_200_OK, safe=False)\n\n@api_view(['GET'])\ndef Stop(request):\n fyersStream.StopStream()\n return JsonResponse({'status' : 'success'},status=status.HTTP_200_OK, safe=False)\n\n@api_view(['GET'])\ndef RefreshStockData(request):\n fyersStream.RefreshStockData()\n return JsonResponse({'status' : 'success'},status=status.HTTP_200_OK, safe=False)\n\n@api_view(['GET'])\ndef GetData(request):\n try:\n stockKey = request.GET['StockKey']\n res = fyersStream.GetData(stockKey)\n response_data = {}\n if not (res is None):\n response_data['status'] = 'success'\n response_data['data'] = json.loads(res.reset_index().to_json(orient='records'))\n return JsonResponse(response_data, status=status.HTTP_200_OK, safe=False)\n else:\n return JsonResponse({'status':'failure', 'message' :'No Response'},status=status.HTTP_404_NOT_FOUND)\n except Exception as ex:\n message = ''\n if hasattr(ex, 'message'):\n message = ex.message\n else:\n message = ex\n return JsonResponse({'status' : 'failure', 'message' : f'{message}'}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)", "repo_name": "Karthickkoppaka/TradeAlertSys", "sub_path": "backend/fyers/controllers/fyersStreamDataHandler.py", "file_name": "fyersStreamDataHandler.py", "file_ext": "py", "file_size_in_byte": 2018, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "48", "api": [{"api_name": "business.fyersStreamDataBusiness.FyersStreamBusiness", "line_number": 7, "usage_type": "call"}, {"api_name": "django.http.response.JsonResponse", "line_number": 12, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_200_OK", "line_number": 12, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 12, "usage_type": "name"}, {"api_name": "rest_framework.decorators.api_view", "line_number": 9, "usage_type": "call"}, {"api_name": "django.http.response.JsonResponse", "line_number": 18, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_200_OK", "line_number": 18, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 18, "usage_type": "name"}, {"api_name": "rest_framework.decorators.api_view", "line_number": 14, "usage_type": "call"}, {"api_name": "asyncio.run", "line_number": 23, "usage_type": "call"}, {"api_name": "django.http.response.JsonResponse", "line_number": 24, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_200_OK", "line_number": 24, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 24, "usage_type": "name"}, {"api_name": "rest_framework.decorators.api_view", "line_number": 20, "usage_type": "call"}, {"api_name": "django.http.response.JsonResponse", "line_number": 29, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_200_OK", "line_number": 29, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 29, "usage_type": "name"}, {"api_name": "rest_framework.decorators.api_view", "line_number": 26, "usage_type": "call"}, {"api_name": "django.http.response.JsonResponse", "line_number": 34, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_200_OK", "line_number": 34, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 34, "usage_type": "name"}, {"api_name": "rest_framework.decorators.api_view", "line_number": 31, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 44, "usage_type": "call"}, {"api_name": "django.http.response.JsonResponse", "line_number": 45, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_200_OK", "line_number": 45, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 45, "usage_type": "name"}, {"api_name": "django.http.response.JsonResponse", "line_number": 47, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_404_NOT_FOUND", "line_number": 47, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 47, "usage_type": "name"}, {"api_name": "django.http.response.JsonResponse", "line_number": 54, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_500_INTERNAL_SERVER_ERROR", "line_number": 54, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 54, "usage_type": "name"}, {"api_name": "rest_framework.decorators.api_view", "line_number": 36, "usage_type": "call"}]} +{"seq_id": "41437535878", "text": "# Structured code after this resource: https://github.com/desh2608/dnn-hmm-asr/blob/master/submission.py\n# And this resource: https://github.com/raminnakhli/HMM-DNN-Speech-Recognition\n\n# implementing a HMM from scratch: https://towardsdatascience.com/hidden-markov-model-implemented-from-scratch-72865bda430e\n\n# import mlp\n# import numpy as np\n\n\n# helpful HMM from scratch: https://towardsdatascience.com/hidden-markov-model-implemented-from-scratch-72865bda430e\nimport numpy as np\nimport pandas as pd\n\n\nclass ProbabilityVector:\n def __init__(self, probabilities: dict):\n states = probabilities.keys()\n probs = probabilities.values()\n \n assert len(states) == len(probs), \"The probabilities must match the states.\"\n assert len(states) == len(set(states)), \"The states must be unique.\"\n assert abs(sum(probs) - 1.0) < 1e-12, \"Probabilities must sum up to 1.\"\n assert len(list(filter(lambda x: 0 <= x <= 1, probs))) == len(probs), \"Probabilities must be numbers from [0, 1] interval.\"\n self.states = sorted(probabilities)\n self.values = np.array(list(map(lambda x: probabilities[x], self.states))).reshape(1, -1)\n \n @classmethod\n def initialize(cls, states: list):\n size = len(states)\n rand = np.random.rand(size) / (size**2) + 1 / size\n rand /= rand.sum(axis=0)\n return cls(dict(zip(states, rand)))\n \n @classmethod\n def from_numpy(cls, array: np.ndarray, states: list):\n return cls(dict(zip(states, list(array))))\n\n @property\n def dict(self):\n return {k:v for k, v in zip(self.states, list(self.values.flatten()))}\n\n @property\n def df(self):\n return pd.DataFrame(self.values, columns=self.states, index=['probability'])\n\n def __repr__(self):\n return \"P({}) = {}.\".format(self.states, self.values)\n\n def __eq__(self, other):\n if not isinstance(other, ProbabilityVector):\n raise NotImplementedError\n if (self.states == other.states) and (self.values == other.values).all():\n return True\n return False\n\n def __getitem__(self, state: str):\n if state not in self.states:\n raise ValueError(\"Requesting unknown probability state from vector.\")\n index = self.states.index(state)\n return float(self.values[0, index])\n\n def __mul__(self, other):\n if isinstance(other, ProbabilityVector):\n return self.values * other.values\n elif isinstance(other, (int, float)):\n return self.values * other\n else:\n NotImplementedError\n\n def __rmul__(self, other):\n return self.__mul__(other)\n\n def __matmul__(self, other):\n if isinstance(other, ProbabilityMatrix):\n return self.values @ other.values\n\n def __truediv__(self, number):\n if not isinstance(number, (int, float)):\n raise NotImplementedError\n x = self.values\n return x / number if number != 0 else x / (number + 1e-12)\n\n def argmax(self):\n index = self.values.argmax()\n return self.states[index]\n\n# A group of ProbabilityVectors to form a Matrix\nclass ProbabilityMatrix:\n def __init__(self, prob_vec_dict: dict):\n \n assert len(prob_vec_dict) > 1, \\\n \"The numebr of input probability vector must be greater than one.\"\n assert len(set([str(x.states) for x in prob_vec_dict.values()])) == 1, \\\n \"All internal states of all the vectors must be indentical.\"\n assert len(prob_vec_dict.keys()) == len(set(prob_vec_dict.keys())), \\\n \"All observables must be unique.\"\n\n self.states = sorted(prob_vec_dict)\n self.observables = prob_vec_dict[self.states[0]].states\n self.values = np.stack([prob_vec_dict[x].values \\\n for x in self.states]).squeeze() \n\n @classmethod\n def initialize(cls, states: list, observables: list):\n size = len(states)\n rand = np.random.rand(size, len(observables)) \\\n / (size**2) + 1 / size\n rand /= rand.sum(axis=1).reshape(-1, 1)\n aggr = [dict(zip(observables, rand[i, :])) for i in range(len(states))]\n pvec = [ProbabilityVector(x) for x in aggr]\n return cls(dict(zip(states, pvec)))\n\n @classmethod\n def from_numpy(cls, array: \n np.ndarray, \n states: list, \n observables: list):\n p_vecs = [ProbabilityVector(dict(zip(observables, x))) \\\n for x in array]\n return cls(dict(zip(states, p_vecs)))\n\n @property\n def dict(self):\n return self.df.to_dict()\n\n @property\n def df(self):\n return pd.DataFrame(self.values, \n columns=self.observables, index=self.states)\n\n def __repr__(self):\n return \"PM {} states: {} -> obs: {}.\".format(\n self.values.shape, self.states, self.observables)\n\n def __getitem__(self, observable: str) -> np.ndarray:\n if observable not in self.observables:\n raise ValueError(\"Requesting unknown probability observable from the matrix.\")\n index = self.observables.index(observable)\n return self.values[:, index].reshape(-1, 1)\n\nfrom itertools import product\nfrom functools import reduce\n\n\nclass HiddenMarkovChain:\n def __init__(self, T, E, pi):\n self.T = T # transmission matrix A\n self.E = E # emission matrix B\n self.pi = pi\n self.states = pi.states\n self.observables = E.observables\n \n def __repr__(self):\n return \"HML states: {} -> observables: {}.\".format(\n len(self.states), len(self.observables))\n \n @classmethod\n def initialize(cls, states: list, observables: list):\n T = ProbabilityMatrix.initialize(states, states)\n E = ProbabilityMatrix.initialize(states, observables)\n pi = ProbabilityVector.initialize(states)\n return cls(T, E, pi)\n \n def _create_all_chains(self, chain_length):\n return list(product(*(self.states,) * chain_length))\n \n def score(self, observations: list):\n def mul(x, y): return x * y\n \n score = 0\n all_chains = self._create_all_chains(len(observations))\n for idx, chain in enumerate(all_chains):\n expanded_chain = list(zip(chain, [self.T.states[0]] + list(chain)))\n expanded_obser = list(zip(observations, chain))\n \n p_observations = list(map(lambda x: self.E.df.loc[x[1], x[0]], expanded_obser))\n p_hidden_state = list(map(lambda x: self.T.df.loc[x[1], x[0]], expanded_chain))\n p_hidden_state[0] = self.pi[chain[0]]\n \n score += reduce(mul, p_observations) * reduce(mul, p_hidden_state)\n return score\n\nclass HiddenMarkovChain_FP(HiddenMarkovChain):\n def _alphas(self, observations: list):\n alphas = np.zeros((len(observations), len(self.states)))\n alphas[0, :] = self.pi.values * self.E[observations[0]].T\n for t in range(1, len(observations)):\n alphas[t, :] = (alphas[t - 1, :].reshape(1, -1) \n @ self.T.values) * self.E[observations[t]].T\n return alphas\n \n def score(self, observations: list):\n alphas = self._alphas(observations)\n return float(alphas[-1].sum())\n\nclass HiddenMarkovChain_Simulation(HiddenMarkovChain):\n def run(self, length: int):\n assert length >= 0, \"The chain needs to be a non-negative number.\"\n s_history = [0] * (length + 1)\n o_history = [0] * (length + 1)\n \n prb = self.pi.values\n obs = prb @ self.E.values\n s_history[0] = np.random.choice(self.states, p=prb.flatten())\n o_history[0] = np.random.choice(self.observables, p=obs.flatten())\n \n for t in range(1, length + 1):\n prb = prb @ self.T.values\n obs = prb @ self.E.values\n s_history[t] = np.random.choice(self.states, p=prb.flatten())\n o_history[t] = np.random.choice(self.observables, p=obs.flatten())\n \n return o_history, s_history\n\nclass HiddenMarkovChain_Uncover(HiddenMarkovChain_Simulation):\n def _alphas(self, observations: list):\n alphas = np.zeros((len(observations), len(self.states)))\n alphas[0, :] = self.pi.values * self.E[observations[0]].T\n for t in range(1, len(observations)):\n alphas[t, :] = (alphas[t - 1, :].reshape(1, -1) @ self.T.values) \\\n * self.E[observations[t]].T\n return alphas\n \n def _betas(self, observations: list):\n betas = np.zeros((len(observations), len(self.states)))\n betas[-1, :] = 1\n for t in range(len(observations) - 2, -1, -1):\n betas[t, :] = (self.T.values @ (self.E[observations[t + 1]] \\\n * betas[t + 1, :].reshape(-1, 1))).reshape(1, -1)\n return betas\n \n def uncover(self, observations: list):\n alphas = self._alphas(observations)\n betas = self._betas(observations)\n maxargs = (alphas * betas).argmax(axis=1)\n return list(map(lambda x: self.states[x], maxargs))\n\nclass HiddenMarkovLayer(HiddenMarkovChain_Uncover):\n def _digammas(self, observations: list):\n L, N = len(observations), len(self.states)\n digammas = np.zeros((L - 1, N, N))\n\n alphas = self._alphas(observations)\n betas = self._betas(observations)\n score = self.score(observations)\n for t in range(L - 1):\n P1 = (alphas[t, :].reshape(-1, 1) * self.T.values)\n P2 = self.E[observations[t + 1]].T * betas[t + 1].reshape(1, -1)\n digammas[t, :, :] = P1 * P2 / score\n return digammas\n\nclass HiddenMarkovModel:\n def __init__(self, hml: HiddenMarkovLayer):\n self.layer = hml\n self._score_init = 0\n self.score_history = []\n\n @classmethod\n def initialize(cls, states: list, observables: list):\n layer = HiddenMarkovLayer.initialize(states, observables)\n return cls(layer)\n\n def update(self, observations: list):\n alpha = self.layer._alphas(observations)\n beta = self.layer._betas(observations)\n digamma = self.layer._digammas(observations)\n score = alpha[-1].sum()\n gamma = alpha * beta / score \n\n L = len(alpha)\n obs_idx = [self.layer.observables.index(x) \\\n for x in observations]\n capture = np.zeros((L, len(self.layer.states), len(self.layer.observables)))\n for t in range(L):\n capture[t, :, obs_idx[t]] = 1.0\n\n pi = gamma[0]\n T = digamma.sum(axis=0) / gamma[:-1].sum(axis=0).reshape(-1, 1)\n E = (capture * gamma[:, :, np.newaxis]).sum(axis=0) / gamma.sum(axis=0).reshape(-1, 1)\n\n self.layer.pi = ProbabilityVector.from_numpy(pi, self.layer.states)\n self.layer.T = ProbabilityMatrix.from_numpy(T, self.layer.states, self.layer.states)\n self.layer.E = ProbabilityMatrix.from_numpy(E, self.layer.states, self.layer.observables)\n \n return score\n\n def train(self, observations: list, epochs: int, tol=None):\n self._score_init = 0\n self.score_history = (epochs + 1) * [0]\n early_stopping = isinstance(tol, (int, float))\n\n for epoch in range(1, epochs + 1):\n score = self.update(observations)\n print(\"Training... epoch = {} out of {}, score = {}.\".format(epoch, epochs, score))\n if early_stopping and abs(self._score_init - score) / score < tol:\n print(\"Early stopping.\")\n break\n self._score_init = score\n self.score_history[epoch] = score\n\nif __name__ == \"__main__\":\n np.random.seed(42)\n\n observations = ['3L', '2M', '1S', '3L', '3L', '3L']\n\n states = ['1H', '2C']\n observables = ['1S', '2M', '3L']\n\n hml = HiddenMarkovLayer.initialize(states, observables)\n hmm = HiddenMarkovModel(hml)\n\n hmm.train(observations, 20)\n\n RUNS = 100000\n T = 5\n\n chains = RUNS * [0]\n for i in range(len(chains)):\n chain = hmm.layer.run(T)[0]\n chains[i] = '-'.join(chain)\n df = pd.DataFrame(pd.Series(chains).value_counts(), columns=['counts']).reset_index().rename(columns={'index': 'chain'})\n df = pd.merge(df, df['chain'].str.split('-', expand=True), left_index=True, right_index=True)\n\n s = []\n for i in range(T + 1):\n s.append(df.apply(lambda x: x[i] == observations[i], axis=1))\n\n df['matched'] = pd.concat(s, axis=1).sum(axis=1)\n df['counts'] = df['counts'] / RUNS * 100\n df = df.drop(columns=['chain'])\n print(df.head(30))\n# class HMM():\n# def __init__(self, num_states):\n# self.pi = np.zeros(num_states)\n# self.pi[0] = 1\n# # next state?\n# self.num_states = num_states\n# # chose 16000 for 16kHz sample rate. That is 1 second of audio and 44 for number of phonemes in english language.\n# self.states = [mlp.MLP(16000, 44) for i in range(num_states)]", "repo_name": "TrevorAshby/VALL-E", "sub_path": "src/models/phoneme_conversion/tmp/hmm.py", "file_name": "hmm.py", "file_ext": "py", "file_size_in_byte": 13012, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "48", "api": [{"api_name": "numpy.array", "line_number": 25, "usage_type": "call"}, {"api_name": "numpy.random.rand", "line_number": 30, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 30, "usage_type": "attribute"}, {"api_name": "numpy.ndarray", "line_number": 35, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 44, "usage_type": "call"}, {"api_name": "numpy.stack", "line_number": 100, "usage_type": "call"}, {"api_name": "numpy.random.rand", "line_number": 106, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 106, "usage_type": "attribute"}, {"api_name": "numpy.ndarray", "line_number": 115, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 128, "usage_type": "call"}, {"api_name": "numpy.ndarray", "line_number": 135, "usage_type": "attribute"}, {"api_name": "itertools.product", "line_number": 165, "usage_type": "call"}, {"api_name": "functools.reduce", "line_number": 180, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 185, "usage_type": "call"}, {"api_name": "numpy.random.choice", "line_number": 204, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 204, "usage_type": "attribute"}, {"api_name": "numpy.random.choice", "line_number": 205, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 205, "usage_type": "attribute"}, {"api_name": "numpy.random.choice", "line_number": 210, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 210, "usage_type": "attribute"}, {"api_name": "numpy.random.choice", "line_number": 211, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 211, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 217, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 225, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 241, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 273, "usage_type": "call"}, {"api_name": "numpy.newaxis", "line_number": 279, "usage_type": "attribute"}, {"api_name": "numpy.random.seed", "line_number": 302, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 302, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 321, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 321, "usage_type": "call"}, {"api_name": "pandas.merge", "line_number": 322, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 328, "usage_type": "call"}]} +{"seq_id": "13255775025", "text": "import locale\nfrom enum import Enum\n\nclass EntityObjEnum(Enum):\n ENTITY_ID = 0\n ENTITY_NAME = 1\n STREET_NUMBER = 2\n STREET_NAME = 3\n CITY = 4\n STATE = 5\n ZIP = 6\n COUNTRY = 7\n IS_ACTIVE = 8\n\nclass EntityObj:\n locale.setlocale(locale.LC_ALL, 'en_US')\n\n def __init__(self, id=-1, name='', street_number='', street_name='', city='', state='', zip='', country='', is_active=1, *, entityList=None):\n if entityList is None:\n self.id = id\n self.name = name\n self.street_name = street_name\n self.street_number = street_number\n self.city = city\n self.state = state\n self.zip = zip\n self.country = country\n self.is_active = is_active\n else:\n self.id = entityList[EntityObjEnum.ENTITY_ID.value]\n self.name = entityList[EntityObjEnum.ENTITY_NAME.value]\n self.street_number = entityList[EntityObjEnum.STREET_NUMBER.value]\n self.street_name = entityList[EntityObjEnum.STREET_NAME.value]\n self.city = entityList[EntityObjEnum.CITY.value]\n self.state = entityList[EntityObjEnum.STATE.value]\n self.zip = entityList[EntityObjEnum.ZIP.value]\n self.country = entityList[EntityObjEnum.COUNTRY.value]\n self.is_active = entityList[EntityObjEnum.IS_ACTIVE.value]\n\n #print(\"######################Entity Object created######################\"+'\\n'+self.__repr__())\n\n def __repr__(self) -> str:\n entity_representation = '\\n' + \"Entity ID: \" + str(self.id) + '\\n' + \"Name: \" + str(self.name) + '\\n' + \"Street Number: \" + str(self.street_number) + '\\n' + \"Street Name: \" + str(self.street_name) + '\\n' + \"City: \" + str(self.city) + '\\n' + \"State: \" + str(self.state) + '\\n' + \"Zip Code: \" + str(self.zip) + '\\n' + \"Country: \" + str(self.country) + '\\n' + \"Is Active: \" + str(self.is_active)\n \n return entity_representation\n\n def asList(self):\n list = []\n list.insert(EntityObjEnum.ENTITY_ID.value, self.id)\n list.insert(EntityObjEnum.ENTITY_NAME.value, self.name)\n list.insert(EntityObjEnum.STREET_NUMBER.value, self.street_number)\n list.insert(EntityObjEnum.STREET_NAME.value, self.street_name)\n list.insert(EntityObjEnum.CITY.value, self.city)\n list.insert(EntityObjEnum.STATE.value, self.state)\n list.insert(EntityObjEnum.ZIP.value, self.zip)\n list.insert(EntityObjEnum.COUNTRY.value, self.country)\n list.insert(EntityObjEnum.IS_ACTIVE.value, self.is_active)\n return list\n\n def asListForDBInsertion(self):\n list = []\n list.insert(EntityObjEnum.ENTITY_ID.value, self.id)\n list.insert(EntityObjEnum.ENTITY_NAME.value, self.name)\n list.insert(EntityObjEnum.STREET_NUMBER.value, self.street_number)\n list.insert(EntityObjEnum.STREET_NAME.value, self.street_name)\n list.insert(EntityObjEnum.CITY.value, self.city)\n list.insert(EntityObjEnum.STATE.value, self.state)\n list.insert(EntityObjEnum.ZIP.value, self.zip)\n list.insert(EntityObjEnum.COUNTRY.value, self.country)\n list.insert(EntityObjEnum.IS_ACTIVE.value, self.is_active)\n return list\n \n def asListForDBUpdate(self):\n list = []\n list.append(self.name)\n list.append(self.street_number)\n list.append(self.street_name)\n list.append(self.city)\n list.append(self.state)\n list.append(self.zip)\n list.append(self.country)\n list.append(self.is_active)\n list.append(self.id)\n return list\n\n def toString(self):\n return_str = str(self.id) + \" \" + self.name + \" \" + self.street_number + \" \" + self.street_name + \" \" + self.city + \" \" + self.state + \" \" + self.zip + \" \" + self.country\n return return_str\n\n def getAsAddressString(self):\n #print(str(self.street_number) + \" \" + str(self.street_name) + \" \" + str(self.city) + \" \" + str(self.state) + \" \" + str(self.zip) + \" \" + str(self.country))\n return_str = str(self.street_number) + \" \" + self.street_name + \" \" + self.city + \", \" + self.state + \" \" + str(self.zip) + \" \" + self.country\n return return_str\n\n def getAsCustomerWidgetDisplay(self):\n return_str = self.name + \"\\n\" + str(self.street_number) + \" \" + self.street_name + \" \" + self.city + \" \" + self.state + \" \" + str(self.zip) + \" \" + self.country\n return return_str\n\n def addEntityAsTuple(self, EntityTuple=None):\n if EntityTuple is None:\n print(\"Entity tuple is None\")\n else:\n self.id = EntityTuple[EntityObjEnum.ENTITY_ID.value]\n self.name = EntityTuple[EntityObjEnum.ENTITY_NAME.value]\n self.street_number = EntityTuple[EntityObjEnum.STREET_NUMBER.value]\n self.street_name = EntityTuple[EntityObjEnum.STREET_NAME.value]\n self.city = EntityTuple[EntityObjEnum.CITY.value]\n self.state = EntityTuple[EntityObjEnum.STATE.value]\n self.zip = EntityTuple[EntityObjEnum.ZIP.value]\n self.country = EntityTuple[EntityObjEnum.COUNTRY.value]\n self.is_active = EntityTuple[EntityObjEnum.IS_ACTIVE.value]\n\n def getName(self):\n return str(self.name)\n\n def getID(self):\n return self.id\n\n def getStreetName(self):\n return self.street_name\n\n def getStreetNumber(self):\n return self.street_number\n\n def getCity(self):\n return self.city\n\n def getState(self):\n return self.state\n\n def getIsActive(self):\n return self.is_active\n\n def getCountry(self):\n return self.country\n\n def getZip(self):\n return self.zip\n", "repo_name": "mower003/GPF", "sub_path": "Entity.py", "file_name": "Entity.py", "file_ext": "py", "file_size_in_byte": 5707, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "48", "api": [{"api_name": "enum.Enum", "line_number": 4, "usage_type": "name"}, {"api_name": "locale.setlocale", "line_number": 16, "usage_type": "call"}, {"api_name": "locale.LC_ALL", "line_number": 16, "usage_type": "attribute"}]} +{"seq_id": "11889594113", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Feb 3 11:10:03 2020\n\n@author: Reuel D'silva (@reuelrds)\n\"\"\"\n\nfrom pathlib import Path\n\n\ndef printLines(file_path, n=10):\n \"\"\"Print first n lines from the file.\n\n Args:\n file_path - A string with Absolute Path to the file.\n n - Number of lines to be printed.\n \"\"\"\n\n with open(file_path, \"rb\") as datafile:\n for _ in range(n):\n print(datafile.readline())\n\n\nif __name__ == \"__main__\":\n path = Path.cwd()\n fp = path / \"data\" / \"cornell movie-dialogs corpus\" / \"movie_line.txt\"\n print(fp)\n printLines(fp)\n", "repo_name": "eipa-project/eipa", "sub_path": "eipa/chatbot/utils.py", "file_name": "utils.py", "file_ext": "py", "file_size_in_byte": 608, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "48", "api": [{"api_name": "pathlib.Path.cwd", "line_number": 25, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 25, "usage_type": "name"}]} +{"seq_id": "5280121542", "text": "import pandas as pd\nimport enum\nimport math\nimport time\nimport os\nimport sys\nimport plot\nimport multiprocessing\nimport itertools\nfrom numba import njit\nimport utils\nfrom numpy import ndarray\nimport indicators\nimport collections\nimport numpy as np\n\n\nsimulations = None\nglobal_settings = None\nglobal_data = None\n\n\nclass GlobalSettings:\n precision = 5\n skip = 0\n amount = 1000000\n step_output = False\n order_output = False\n record_orders = False\n neg_balance = True\n lot_size = 100000\n record_balance = True\n\n\nclass GlobalData:\n price_data = None\n ask_data = None\n bid_data = None\n dayofweek_data = None\n hour_data = None\n minute_data = None\n prop_list = []\n\n\ndef init():\n global simulations, global_settings, global_data\n simulations = []\n indicators.indicators = collections.OrderedDict()\n global_settings = GlobalSettings()\n global_data = GlobalData()\n\n\n@njit\ndef _to_curr(lst, precision):\n return [int(round(x * 10**precision)) for x in lst]\n\n\ndef to_curr(object):\n if isinstance(object, float):\n return int(round(object * 10**global_settings.precision))\n elif isinstance(object, list):\n return _to_curr(utils.to_typed_list(object), global_settings.precision)\n else:\n obj_type = type(global_settings.amount)\n raise Exception(f\"{obj_type} is not allowed in to_curr\")\n\n\ndef from_curr(object):\n if isinstance(object, int):\n return object / 10**global_settings.precision\n elif isinstance(object, list) or isinstance(object, ndarray):\n return [x / 10**global_settings.precision for x in object]\n else:\n raise Exception(f\"{type(object)} should not be in from_curr\")\n\n\ndef calculate_margin(amount, price, leverage):\n return int(amount * from_curr(price)) // leverage\n\n\ndef generate_file(path, df):\n print(\"Generating file\")\n price_data = to_curr(list(df[\"\"]))\n ask_data = []\n bid_data = []\n for bar_i in range(len(price_data)):\n if(bar_i % 1000 == 0):\n percent = round(bar_i / len(price_data) * 100, 1)\n print(f\"{bar_i}/{len(price_data)} ({percent}%)\", end='\\r')\n price = price_data[bar_i]\n spread = df[\"\"][bar_i]\n spread_half = math.ceil(spread / 2)\n ask_data.append(price + spread_half)\n bid_data.append(price - spread_half)\n print()\n print(\"Converting timestamps\")\n combined_datetime = df[\"\"] + \" \" + df[\"