seq_id string | text string | repo_name string | sub_path string | file_name string | file_ext string | file_size_in_byte int64 | program_lang string | lang string | doc_type string | stars int64 | dataset string | pt string | api list |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
280801201 | import re
from os import path, mkdir, makedirs, remove, rmdir
from glob import glob
from shutil import copyfile
from src.fonts_sampler.html import HTML
# Import the needed sub-dir's, if needed
__all__ = [ path.basename(f)[:3] for f in glob(path.dirname(__file__) + "/*py")
if path.isfile(f) and not f.endswith("__init__.py") ]
APP_ROOT = path.abspath(path.dirname(__file__))
FONT_PATHS = [ (path.abspath(f), path.basename(f), path.splitext(path.basename(f))[0]) for f in glob(path.join(path.join("C:\\Windows\\Fonts"), '*'))
if '.ttf' in path.splitext(f)[1:] ]
DIST_DIR = path.join("dist")
CACHE_DIR = path.join(DIST_DIR, "fonts")
def build(stdin):
# create font cache, if it doesn't exist
if not path.isdir(CACHE_DIR):
makedirs(CACHE_DIR)
# setup pre-requisites and initialize objects
html = HTML(title="Font Sample Generator")
html.dependencies("script", "https://cdnjs.cloudflare.com/ajax/libs/jquery/3.4.1/jquery.js")
html.styles("""
td.font-name { text-transform: capitalize; }
td.font-sample.capitalize { text-transform: capitalize; }
td.font-sample.uppercase { text-transform: uppercase; }
td.font-sample.lowercase { text-transform: lowercase; }
""")
# generating font styles
html.styles("\n".join( [ "".ljust(2, '\t') + '@font-face { font-family: ' + fn + '; src: url(fonts/' + ff + '); }' for (_, ff, fn) in FONT_PATHS] ))
html.styles("\n".join( [ "".ljust(2, '\t') + 'td.font-sample[aria-font="' + fn + '"] { font-family: ' + fn + '; }' for (_, ff, fn) in FONT_PATHS] ))
# all script are pushed to the tail of the body, so this will be
# loaded after the document has been loaded.
html.scripts("""
function delay(t,n){var e=0;return function(){var i=this,u=arguments;clearTimeout(e),e=setTimeout(function(){t.apply(i,u)},n||0)}};
$(".case-type").unbind('change').change(function() {
let val = this.value;
$(".font-sample").addClass(val).removeClass(["uppercase", "lowercase", "capitalize"].filter(_=> _ !== val))
});
$("input.font-size").keyup(delay(function() {
let value = ((!this.value) ? "" : this.value.toString()).match(/[0-9]{1,}/g)[0],
currSize = (!value) ? 16 : value,
fontSize = currSize + "px";
this.value = fontSize;
$(".font-sample").attr("style", "font-size: " + fontSize);
}, 1e3));
$("#sample-text").keyup(delay(function() {
$(".font-sample").text(this.value);
}, 1e3));
""")
# add the user interactives
html.body.append("""
<div class="prefills">
<select class="case-type">
<option value="capitalize">Capitalize</option>
<option value="uppercase">Uppercase</option>
<option value="lowercase">Lowercase</option>
</select>
<label>Font-size: <input class="font-size" value="16px"></label>
<label>Sample Text: <input id="sample-text" value="Hello World!"></label>
</div>
""")
# create the head of the table
table = ["<table>"]
table.extend([
"".ljust(1, '\t') + "<thead>",
"".ljust(2, '\t') + "<tr>",
"".ljust(3, '\t') + "<th>Name</th>",
"".ljust(3, '\t') + "<th>Text</th>",
"".ljust(2, '\t') + "</tr>",
"".ljust(1, '\t') + "</thead>",
"".ljust(1, '\t') + "<tbody>"
])
# parse over all fonts cached and create entries to the table
# and copy the file from the fonts dir to the target dir
# default: './dist/fonts'
print(f"Caching Fonts [{ CACHE_DIR }].")
for (fontDir, fontFile, fontName) in FONT_PATHS:
CACHE_FILE = path.join(CACHE_DIR, fontFile)
# copy font file if the file doesn't already exist
if not path.isfile(CACHE_FILE):
copyfile(fontDir, CACHE_FILE)
if path.isfile(CACHE_FILE):
print(f" - Caching [{ CACHE_FILE }]. SUCCESS.")
else:
print(f" - Caching [{ CACHE_FILE }]. FAILED.")
else:
print(f" - Cached Already [{ CACHE_FILE }]. Skipped.")
# add new table row for the font
table.extend([
"".ljust(2, '\t') + "<tr>",
"".ljust(3, '\t') + f"<td class=\"font-name\">{ fontName }</td>",
"".ljust(3, '\t') + f"<td class=\"font-sample\" aria-font=\"{ fontName }\" style=\"font-size: 16px;\">Hello World!</td>",
"".ljust(2, '\t') + "</tr>"
])
# close the tbody and table
table.extend(["".ljust(1, '\t') + "</tbody>", "</table>"])
html.body.append(table)
print("Generating HTML.")
# compile the html object into an .html file
html.compile()
html.write(DIST_DIR, "index.html")
print(f"HTML generated [{ path.join(DIST_DIR, 'index.html') }]")
print("Re-run script with flag:'--run' to run a localized server to view the file.")
def clean(stdin):
html_file = path.join(DIST_DIR, "index.html")
if path.isfile(html_file):
remove(html_file,)
print(f"Removed HTML Generated file [{ html_file }].")
# parse over each fonts that are cached and attempt to remove them
# from the directory.
print(f"Cleaning [{ DIST_DIR }].")
if path.isdir(CACHE_DIR):
for CACHE_FILE in glob(path.join(CACHE_DIR, '*')):
remove(CACHE_FILE, )
if not path.isfile(CACHE_FILE):
print(f"Remove [{ CACHE_FILE }]. SUCCESS.")
else:
print(f"Remove [{ CACHE_FILE }]. FAILED.")
rmdir(CACHE_DIR,)
if not path.isdir(CACHE_DIR):
print(f"Remove [{ CACHE_DIR }]. SUCCESS.")
else:
print(f"Remove [{ CACHE_DIR }]. FAILED.")
print("Cleaned")
def run(stdin):
import webbrowser
import http.server
import socketserver
PORT = 8000
DIRECTORY = DIST_DIR
class Handler(http.server.SimpleHTTPRequestHandler):
def __init__(self, *args, **kwargs):
super().__init__(*args, directory=DIRECTORY, **kwargs)
with socketserver.TCPServer(("", PORT), Handler) as httpd:
print(f"Serving at port: { PORT }")
webbrowser.open(f'http://localhost:{ PORT }')
httpd.serve_forever()
server() | xransum/fonts-sampler | src/fonts_sampler/__init__.py | __init__.py | py | 5,708 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "os.path.basename",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "glob.glob",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "os.path.dirname",
"line_number... |
37971391788 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2019/5/14/014 15:09
# @Author : 刘登攀
# @Site :
# @File : group_purchase.py
# @Software: PyCharm
import requests
from Daily_city_group.group_port.get_token import get_token
import json
import threading
session = requests.Session()
def get_money():
# ogLGp5Z8THB-t1rPk53QRc-i1Qn0
login_url = 'http://39.108.195.100:8088/groupApi/wechatLogin'
login_date = {'openId': 'ogLGp5Z8THB-t1rPk53QRc-i1Qn',
'version': '1.0'}
log = session.post(login_url, login_date).json()
# print(log)
url = 'http://39.108.195.100:8088/groupApi/profit/get?version=1.0'
ppp = session.get(url).json()
profit_sum_money = ppp['data']['profitSumMoney']
freeze_sum_money = ppp['data']['freezeSumMoney']
print('已解冻收益:', profit_sum_money/100, '未解冻收益:', freeze_sum_money/100)
# 购买下单
def purchase_1(conten):
i = 1
while i <= conten:
# 登陆 自己修改为自己账户的openId
login_url = 'http://39.108.195.100:8088/groupApi/wechatLogin'
login_date = {'openId': 'sadsadsadsadsa',
'version': '1.0'}
log = session.post(login_url, login_date).json()
# print(log['message'])
tj_url = 'http://39.108.195.100:8088/groupApi/order/batchAddOrder?terminal=3&version=1.0&token=%s' % get_token()
headers = {'content-type': "application/json"}
tj_data = {"orderType": 0,
"orderFrom": 3,
"remark": "",
"products": [{"phone": "14857477373",
"contact": "就是就是",
"province": "辽宁省",
"city": "丹东市",
"area": "宽甸满族自治县",
"detail": "不到好多活到九十九",
"buyerRemark": "",
"skus": [{"skuId": "e436b580f7054b19ab16be8b89b992c1",
"quantity": "1"}]
}]
}
tj = session.put(tj_url, data=json.dumps(tj_data), headers=headers).json()
print(tj['message'])
order_code = tj['data'][0]['orderCode']
print('订单:'+tj['data'][0]['orderCode']+tj['message'])
zf_url = 'http://39.108.195.100:8088/groupApi/order/payPwd'
zf_data = {'orderCode': order_code,
'password': 'e10adc3949ba59abbe56e057f20f883e',
'version': '1.0',
'terminal': '4',
'payType': '0'}
tj = session.post(zf_url, zf_data).json()
print('刘登攀:', tj['message'], i)
i += 1
threading.Thread(target=purchase_1(5))
# threading.Thread(target=purchase_2(100))
# if __name__ == '__main__':
# get_money()
#
while 1:
pass | LDPGG/dm | ZXYJ_GG-master/Daily_city_group/group_port/group_purchase.py | group_purchase.py | py | 2,963 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "requests.Session",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "Daily_city_group.group_port.get_token.get_token",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 59,
"usage_type": "call"
},
{
"api_n... |
37777523664 | # -*- coding:utf-8 -*-
from django.http import HttpResponse
from django.shortcuts import render
from pymongo import MongoClient
from django.shortcuts import render_to_response
from django.template import RequestContext
import logging
import json
import time
logging.basicConfig(
level = logging.DEBUG,
format = '%(asctime)s %(levelname)s %(message)s',
)
con = MongoClient('localhost', 27017)
db = con.myblog
collection = db.users
def all(request):
return HttpResponse(collection.find())
def login(request):
return render(request, 'login.html', {})
def do_login(request):
account = request.POST['account']
pwd = request.POST['pwd']
info = collection.find_one({"account": account})
if info == None or info['pwd'] != pwd:
return HttpResponse("账号或密码不正确")
else:
return HttpResponse("登陆成功")
def register(request):
return render(request, 'register.html', {})
lastRegTime = 0
def do_register(request):
global lastRegTime
account = request.POST['account']
pwd = request.POST['pwd']
if pwd == "" or account == "":
return HttpResponse("账号或密码不能为空")
info = collection.find_one({"account": account})
if info != None:
return HttpResponse("该账号已被注册")
now = time.time()
logging.debug(str(now) + " " + str(lastRegTime))
if now - lastRegTime < 30:
return HttpResponse("当前注册人数太多了 请稍后再试")
lastRegTime = now
user = {'account': account, "pwd": pwd}
collection.insert(user)
return HttpResponse("注册成功")
| Runnyu/website | website/website/view.py | view.py | py | 1,529 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "logging.basicConfig",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "logging.DEBUG",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "pymongo.MongoClient",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "django.htt... |
4458860862 | import logging.config
import os
from flask import Flask, Blueprint
import settings
from api.restplus import api
from api.auth.endpoints.register import ns as authentication_namespace
from database.models import mysql as db
app = Flask(__name__)
logging_conf_path = os.path.normpath(os.path.join(os.path.dirname(__file__), 'logging.conf'))
logging.config.fileConfig(logging_conf_path)
log = logging.getLogger(__name__)
# mysql = MySQL()
# MySQL configurations
# app.config['MYSQL_DATABASE_USER'] = 'jay'
# app.config['MYSQL_DATABASE_PASSWORD'] = 'jay'
# app.config['MYSQL_DATABASE_DB'] = 'BucketList'
# app.config['MYSQL_DATABASE_HOST'] = 'localhost'
def configure_app(flask_app):
flask_app.config['SERVER_NAME'] = settings.FLASK_SERVER_NAME
flask_app.config['SQLALCHEMY_DATABASE_URI'] = settings.SQLALCHEMY_DATABASE_URI
flask_app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = settings.SQLALCHEMY_TRACK_MODIFICATIONS
flask_app.config['SWAGGER_UI_DOC_EXPANSION'] = settings.RESTPLUS_SWAGGER_UI_DOC_EXPANSION
flask_app.config['RESTPLUS_VALIDATE'] = settings.RESTPLUS_VALIDATE
flask_app.config['RESTPLUS_MASK_SWAGGER'] = settings.RESTPLUS_MASK_SWAGGER
flask_app.config['ERROR_404_HELP'] = settings.RESTPLUS_ERROR_404_HELP
#===
flask_app.config['MYSQL_DATABASE_HOST'] = 'localhost'
flask_app.config['MYSQL_DATABASE_USER'] = 'root'
flask_app.config['MYSQL_DATABASE_PASSWORD'] = 'admin'
flask_app.config['MYSQL_DATABASE_DB'] = 'mydatabase'
def initialize_app(flask_app):
configure_app(flask_app)
db.init_app(flask_app)
blueprint = Blueprint('api', __name__, url_prefix='/api')
api.init_app(blueprint)
api.add_namespace(authentication_namespace)
flask_app.register_blueprint(blueprint)
def main():
# login_manager.init_app(app)
print('asasd')
initialize_app(app)
log.info('>>>>> Starting development example at http://{}/api/ <<<<<'.format(app.config['SERVER_NAME']))
app.run(debug=settings.FLASK_DEBUG)
if __name__ == "__main__":
main()
| mani144/flask_blueprint_swagger_mysql | server.py | server.py | py | 2,094 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "flask.Flask",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "os.path.normpath",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_nu... |
23854850311 | """Store global variables, in this modual by default"""
"""defaults for prodigal"""
from pathlib import Path
PRODIGAL_MODE_DFT = "meta"
PRODIGAL_MODE_CHOICES = ["train", "meta", "single"]
PRODIGAL_TRANS_TABLE_CHOICES = [str(i) for i in range(1, 26)]
PRODIGAL_TRANS_TABLE_DFT = 11
MIN_CONTIG_SIZE_DFT = 2500
BIT_SCORE_THRESHOLD_DFT = 60
RBH_BIT_SCORE_THRESHOLD_DFT = 350
GENOMES_PER_PRODUCT = 1000
# all piplines
DEFAULT_FORCE: bool = False
DEFAULT_OUTPUT_DIR: Path = Path(".")
FASTAS_CONF_TAG = "fastas"
| rmFlynn/collection_of_typical_ocoli_samples | dram2/utils/globals.py | globals.py | py | 510 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "pathlib.Path",
"line_number": 19,
"usage_type": "name"
}
] |
5811212464 | from tpdb2xbvr import handle_request, search, get_xtras, get_scene
from main import get_config, calculate_age
from pathlib import Path
import json
from datetime import (date)
import unittest
class MyTestCase(unittest.TestCase):
def setUp(self) -> None:
Path(Path.cwd() / 'data').mkdir(exist_ok=True)
config = get_config('../config.json')
api_key = config['key']
self.headers = {
'Authorization': f'Bearer {api_key}',
'Content-Type': 'application/json',
'Accept': 'application/json'
}
# self.url = 'https://api.metadataapi.net/scenes/wankz-vr-hello-neighbor'
# self.url = 'https://api.metadataapi.net/scenes/wankz-vr-wankzvr-car-wash-1'
self.url = 'https://api.metadataapi.net/scenes/wankz-vr-paddys-pub'
def test_handle_request(self):
res = handle_request(self.url, headers=self.headers)
print(f'result: {res}')
if res is not None:
with open('data/test_handle_request.json', 'w') as outfile:
json.dump(res, outfile)
with open('data/test_handle_request.json') as request_file:
req = json.load(request_file)
self.assertEqual('Paddys Pub', req['data']['title'])
def test_get_scene(self):
# test using requests response
res = handle_request(self.url, headers=self.headers)
print(f'result: {res}')
if res is not None:
scene = get_scene(url_or_response=res, headers=self.headers)
self.assertEqual('Paddys Pub', scene['scenes'][0]['title'])
else:
self.fail(f'Failed to get result from "handle_request()" with {self.url}')
# test using url
scene = get_scene(url_or_response=self.url, headers=self.headers)
if scene is not None:
self.assertEqual('Paddys Pub', scene['scenes'][0]['title'])
else:
self.fail(f'Failed to get scene from "get_scene()" with {self.url}')
def test_get_xtras(self):
xtras = get_xtras(self.url, headers=self.headers)
for name, vals in xtras.items():
age = calculate_age(vals['dob'])
dob = vals['dob']
print(f'{name} was born on {dob} and is currently {age}')
self.assertEqual(date.fromisoformat('1990-02-03'), xtras['Trinity St Clair']['dob'])
self.assertEqual(date.fromisoformat('1997-04-20'), xtras['Alex Grey']['dob'])
self.assertEqual(
'https://thumb.metadataapi.net/unsafe/1000x1500/smart/filters:sharpen():upscale()/https%3A%2F%2Fcdn.metadataapi.net%2Fperformer%2Ffe%2F73%2F9b%2F6c623e650bdc7590ca091f32bdb1621%2Fposter%2Falex-grey.jpg',
xtras['Alex Grey']['artist_posters'][3])
self.assertEqual(
'https://thumb.metadataapi.net/unsafe/1000x1500/smart/filters:sharpen():upscale()/https%3A%2F%2Fcdn.metadataapi.net%2Fperformer%2F39%2F6f%2Fb6%2Fe8d455f18438eb462a64125d83bb5cd%2Fposter%2Ftrinity-st-clair.jpg',
xtras['Trinity St Clair']['artist_posters'][1])
def test_search(self):
query = 'czech vr dellai'
res = search(query, headers=self.headers)
print(res)
if res is not None:
with open('data/search.json', 'w') as outfile:
json.dump(res, outfile)
if __name__ == '__main__':
unittest.main()
| make-it-fun/GUI-for-tpdb2xbvr | tests/test_tpdb2xbvr.py | test_tpdb2xbvr.py | py | 3,352 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "unittest.TestCase",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "pathlib.Path",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "pathlib.Path.cwd",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "main.get_config"... |
28588370599 | from django.db import models
from django.contrib.auth.base_user import AbstractBaseUser, BaseUserManager
from django.contrib.auth.models import PermissionsMixin
import chimera.settings as settings
import random
from hashlib import sha256
from PIL import Image, ImageDraw, ImageFont
# Create your models here.
class UserManager(BaseUserManager):
use_in_migrations = True
def create_user(self, userName, email, password=None):
user = self.model(
userName=userName,
email=self.normalize_email(email),
)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, userName, email, password=None):
superUser = self.create_user(
userName,
email,
password=password,
)
superUser.is_admin = True
superUser.is_staff = True
superUser.is_superuser = True
superUser.save(using=self._db)
return superUser
class User(AbstractBaseUser, PermissionsMixin):
userId = models.AutoField(primary_key=True)
userName = models.CharField(
verbose_name='user name', max_length=20, unique=True)
email = models.EmailField(verbose_name='email address', max_length=20)
is_admin = models.BooleanField(default=False)
is_staff = models.BooleanField(default=False)
objects = UserManager()
USERNAME_FIELD = 'userName'
REQUIRED_FIELDS = ['email']
def __str__(self):
return self.userName
# The number list, lower case character list and upper case character list are used to generate ccode text.
NUMBER_LIST = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']
ALPHABET_LOWERCASE = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k',
'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']
ALPHABET_UPPERCASE = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K',
'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z']
SYMBOLS = ['!', '@', '#', '$', '%', '^', '&',
'*', '(', ')', '-', '_', '+', '=', ',', '.', ':']
class Chimera(models.Model):
# chimera_code = {(start, end), hashing}
# tempname_list = [order num, name of the capt imgs]
id = models.AutoField(primary_key=True)
ip = models.CharField(max_length=20, default='0.0.0.0')
chimera_code = models.TextField()
tempname_list = models.CharField(max_length=100)
def __str__(self):
return self.ip
# This function will create a random ccode string text based on above three list.
# The input parameter is the ccode text length.
def create_random_ccode_text(self, text_length):
base_char = ALPHABET_LOWERCASE + ALPHABET_UPPERCASE + NUMBER_LIST + SYMBOLS
# create a 5 char random strin and sha hash it, note that there is no big i
imgtext = ''.join([random.choice(base_char)
for i in range(text_length)])
# create hash
return imgtext
def create_hash(self, ccode_text):
salt = settings.SECRET_KEY[:20]
# create hash
imghash = sha256((salt+ccode_text).encode('utf-8')).hexdigest()
return imghash
# Create an image ccode with special text.
def create_image_ccode(self, request, num, ccode_text):
W, H = (150, 70)
image = Image.new("RGB", (W, H), (248, 152, 7))
draw = ImageDraw.Draw(image)
font = ImageFont.truetype(settings.STATIC_DIR + "/chimera_core/font/Verdana Pro W01 Light.ttf", 40)
w, h = draw.textsize(ccode_text, font=font)
draw.text(((W-w)/2,(H-h)/2), ccode_text, font=font, fill=(255, 255, 255))
# Save the image to a png file.
temp = settings.CC_IMAGES_DIR_URL + \
request.META['REMOTE_ADDR'] + "_" + str(num) + '.png'
image.save(temp, "PNG")
tempname = request.META['REMOTE_ADDR'] + "_" + str(num) + '.png'
self.ip = request.META['REMOTE_ADDR']
return tempname
def generate_chimera_codes(self, request):
self.chimera_code = {}
self.tempname_list = []
order = random.sample(range(0, 8), 4)
order.sort()
lenadd = 0
for i in range(3):
text_length = random.randint(1, 3)
text = self.create_random_ccode_text(text_length)
self.chimera_code[(order[i] + lenadd, order[i] +
text_length + lenadd)] = self.create_hash(text)
self.tempname_list.append(
(order[i], self.create_image_ccode(request, i, text)))
lenadd += text_length | kadenkan/Project-Chimera | chimera_core/models.py | models.py | py | 4,699 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "django.contrib.auth.base_user.BaseUserManager",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "django.contrib.auth.base_user.AbstractBaseUser",
"line_number": 49,
"usage_type": "name"
},
{
"api_name": "django.contrib.auth.models.PermissionsMixin",
"l... |
7100733902 | import argparse
import sys
from post_timestamp_app_poc.commands.deploy import Deploy
from post_timestamp_app_poc.commands.destroy import Destroy
from post_timestamp_app_poc.commands.post import Post
class CLI:
"""Coordinating class to run the CLI
"""
def __init__(self, stdin=sys.stdin, stdout=sys.stdout, stderr=sys.stderr):
"""Initialiser
Keyword Args:
stdin (file): file IO object to use for stdin when running commands. (Default sys.stdin)
stdout (file): file IO object to use for stdout when running commands. (Default sys.stdout)
stderr (file): file IO object to use for stderr when running commands. (Default sys.stderr)
"""
self.stdin = stdin
self.stdout = stdout
self.stderr = stderr
def run(self, args=sys.argv[1:]):
"""Execute the cli action
Keyword Args:
args (list): List of strings to parse for arguments (Default sys.argv[1:])
Returns:
None
"""
parser = argparse.ArgumentParser(description="Timestamp POST proof of concept by Jonathan Harden")
subparsers = parser.add_subparsers(dest="command_name", required=True)
self.__setup_deploy_parser(subparsers)
self.__setup_destroy_parser(subparsers)
self.__setup_post_parser(subparsers)
parsed_args = parser.parse_args(args)
if parsed_args.command_name == "deploy":
self.__deploy(parsed_args)
elif parsed_args.command_name == "destroy":
self.__destroy(parsed_args)
elif parsed_args.command_name == "post":
self.__post(parsed_args)
def __setup_deploy_parser(self, subparsers):
deploy_parser = subparsers.add_parser(
"deploy", description="Deploy the solution into AWS. Will run terraform apply"
)
deploy_parser.add_argument(
"-n", "--app-name",
default="jfharden-poc",
help="Name to use for most resources (for some it will be used as a prefix separated with a '-'",
)
deploy_parser.add_argument(
"-r", "--resource-group-tag-name",
default="project",
help="Name of a tag (value will be APP_NAME) to add to all resources to allow for grouping",
)
deploy_parser.add_argument(
"--region",
default="eu-west-2",
help="AWS region to deploy into (default eu-west-2).",
)
def __setup_destroy_parser(self, subparsers):
destroy_parser = subparsers.add_parser(
"destroy", description="Destroy the solution in AWS (will run terraform destroy)"
)
destroy_parser.add_argument(
"--region",
default="eu-west-2",
help="AWS region to destroy in (default eu-west-2).",
)
def __setup_post_parser(self, subparsers):
post_parser = subparsers.add_parser(
"post", description="Perform a POST request to the deployed solution"
)
post_parser.add_argument(
"-e", "--endpoint",
dest="endpoint",
help="Endpoint URL to POST to. If not provided will be read from the terraform state file",
)
def __deploy(self, parsed_args):
deploy_command = Deploy(self.stdin, self.stdout, self.stderr)
deploy_command.execute(parsed_args.app_name, parsed_args.resource_group_tag_name, parsed_args.region)
def __destroy(self, parsed_args):
destroy_command = Destroy(self.stdin, self.stdout, self.stderr)
destroy_command.execute(parsed_args.region)
def __post(self, parsed_args):
post_command = Post(self.stdin, self.stdout, self.stderr)
post_command.execute(parsed_args.endpoint)
| jfharden/post-timestamp-app-poc | post_timestamp_app_poc/cli.py | cli.py | py | 3,786 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "sys.stdin",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "sys.stdout",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "sys.stderr",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_n... |
8909909632 | # -*-coding:utf-8-*-
from selenium.webdriver.chrome.options import Options
from lianxi2 import BaseOperator
from selenium import webdriver
import time
from selenium.webdriver.common.action_chains import ActionChains
class lianxi:
def __init__(self):
chrome_options=Options()
chrome_options.add_argument('--headless')
chrome_options.add_argument('--no-sandbox')
self.driver=webdriver.Chrome(chrome_options=chrome_options)
self.driver.get("https://arcb.com/tools/rate-quote.html#/new")
self.zentao=BaseOperator(self.driver)
self.loc1=("xpath", "//div[4]/div/div[1]/abt-party/div/div[2]/div[1]/abt-city-coding-input//input")
self.loc3=("xpath", "//ul[@role='listbox']/li")
self.loc=""
def open_file(self):
f=open("Untitled-1.txt", "r")
list_f=eval(str((f.readlines())).replace("\\n", ""))
return list_f
#
# def for_list_f(self):
# for n in self.open_file():
# self.zentao.sendKeys(self.loc1, str(n))
# return len(self.zentao.findElements(self.loc3))
def len_list_f(self):
return self.zentao.get_att(self.loc1, "value")
def test01(self):
time.sleep(20)
for n in self.open_file():
print(n)
self.zentao.sendKeys(self.loc1, str(n))
time.sleep(2)
if str.isdigit(self.zentao.get_att(self.loc1, "value")):
for i in range(1, len(self.zentao.findElements(self.loc3)) + 1):
loc2=("xpath", '//ul[@role="listbox"]/li[{num}]'.format(num=i))
self.zentao.ActionChains_move(loc2)
with open("a.txt", "a") as f:
f.write(self.zentao.get_ele_text(loc2) + "\n")
f.close()
print(str(self.open_file().index(n) + 1))
print(str(self.open_file().index(n) + 1) + "---------" + self.zentao.get_ele_text(loc2))
self.zentao.clear(self.loc1)
else:
with open("a.txt", "a") as f:
f.write(self.zentao.get_att(self.loc1, "value") + "\n")
f.close()
print(str(self.open_file().index(n) + 1))
print(str(self.open_file().index(n) + 1) + "---------" + self.zentao.get_att(self.loc1, "value"))
self.zentao.clear(self.loc1)
pass
if __name__ == '__main__':
print(lianxi().test01())
| cjc598033763/Climb-to-the-U.S.-Postal-Code | runmain.py | runmain.py | py | 2,500 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "selenium.webdriver.chrome.options.Options",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.Chrome",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver",
"line_number": 14,
"usage_type": "name"
},
{... |
8490794184 | import pyposeidon
import pyposeidon.model as pm
import pyposeidon.mesh as pmesh
from pyposeidon.utils.get_value import get_value
import numpy as np
import errno
import datetime
import sys
import os, errno
from shutil import copy2
import glob
import pandas as pd
import pathlib
import json
import logging
logger = logging.getLogger(__name__)
def set(solver_name: str, **kwargs):
if solver_name == "d3d":
instance = D3DCast(**kwargs)
elif solver_name == "schism":
instance = SchismCast(**kwargs)
else:
raise ValueError(f"Don't know how to handle solver: {solver_name}")
return instance
def copy_files(rpath: str, ppath: str, filenames: list[str]) -> None:
for filename in filenames:
src = os.path.join(ppath, filename)
dst = os.path.join(rpath, filename)
if os.path.exists(src):
os.makedirs(os.path.dirname(dst), exist_ok=True)
copy2(src, dst)
logger.debug("copied src -> dst: %s -> %s", src, dst)
def symlink_files(rpath: str, ppath: str, filenames: list[str]) -> None:
for filename in filenames:
src = os.path.join(ppath, filename)
dst = os.path.join(rpath, filename)
if os.path.exists(src):
os.makedirs(os.path.dirname(dst), exist_ok=True)
if os.path.exists(dst):
os.remove(dst)
os.symlink(src, dst)
logger.debug("symlinked src -> dst: %s -> %s", src, dst)
class D3DCast:
def __init__(self, **kwargs):
for attr, value in kwargs.items():
setattr(self, attr, value)
def run(self, **kwargs):
if isinstance(self.model, str):
self.model = pyposeidon.model.read(self.model)
for attr, value in self.model.__dict__.items():
if not hasattr(self, attr):
setattr(self, attr, value)
execute = get_value(self, kwargs, "execute", False)
pwd = os.getcwd()
files = [
self.tag + "_hydro.xml",
self.tag + ".enc",
self.tag + ".obs",
self.tag + ".bnd",
self.tag + ".bca",
"run_flow2d3d.sh",
]
files_sym = [self.tag + ".grd", self.tag + ".dep"]
self.origin = self.model.rpath
self.rdate = self.model.rdate
if not os.path.exists(self.origin):
sys.stdout.write("Initial folder not present {}\n".format(self.origin))
sys.exit(1)
ppath = self.ppath
cf = [glob.glob(ppath + "/" + e) for e in files]
cfiles = [item.split("/")[-1] for sublist in cf for item in sublist]
# create the folder/run path
rpath = self.cpath
if not os.path.exists(rpath):
os.makedirs(rpath)
copy2(ppath + self.tag + "_model.json", rpath) # copy the info file
# load model
with open(rpath + self.tag + "_model.json", "rb") as f:
data = json.load(f)
data = pd.json_normalize(data, max_level=0)
info = data.to_dict(orient="records")[0]
try:
args = set(kwargs.keys()).intersection(info.keys()) # modify dic with kwargs
for attr in list(args):
info[attr] = kwargs[attr]
except:
pass
# update the properties
info["rdate"] = self.rdate
info["start_date"] = self.start_date
info["time_frame"] = self.time_frame
info["meteo_source"] = self.meteo
info["rpath"] = rpath
if self.restart_step:
info["restart_step"] = self.restart_step
m = pm.set(**info)
# copy/link necessary files
logger.debug("copy necessary files")
for filename in cfiles:
ipath = glob.glob(ppath + filename)
if ipath:
try:
copy2(ppath + filename, rpath + filename)
except:
dir_name, file_name = os.path.split(filename)
if not os.path.exists(rpath + dir_name):
os.makedirs(rpath + dir_name)
copy2(ppath + filename, rpath + filename)
logger.debug(".. done")
# symlink the big files
logger.debug("symlink model files")
for filename in files_sym:
ipath = glob.glob(os.path.join(self.origin, filename))
if ipath:
try:
os.symlink(pathlib.Path(ipath[0]).resolve(strict=True), rpath + filename)
except OSError as e:
if e.errno == errno.EEXIST:
logger.warning("Restart link present\n")
logger.warning("overwriting\n")
os.remove(rpath + filename)
os.symlink(
pathlib.Path(ipath[0]).resolve(strict=True),
rpath + filename,
)
logger.debug(".. done")
copy2(ppath + m.tag + ".mdf", rpath) # copy the mdf file
# copy restart file
inresfile = "tri-rst." + m.tag + "." + datetime.datetime.strftime(self.rdate, "%Y%m%d.%H%M%M")
outresfile = "restart." + datetime.datetime.strftime(self.rdate, "%Y%m%d.%H%M%M")
# copy2(ppath+inresfile,rpath+'tri-rst.'+outresfile)
try:
os.symlink(
pathlib.Path(ppath + "/" + inresfile).resolve(strict=True),
rpath + "tri-rst." + outresfile,
)
logger.debug("symlink {} to {}".format(ppath + "/" + inresfile, rpath + "tri-rst." + outresfile))
except OSError as e:
if e.errno == errno.EEXIST:
logger.warning("Restart symlink present\n")
logger.warning("overwriting\n")
os.remove(rpath + "tri-rst." + outresfile)
os.symlink(
pathlib.Path(ppath + "/" + inresfile).resolve(strict=True),
rpath + "tri-rst." + outresfile,
)
else:
raise e
# get new meteo
logger.info("process meteo\n")
flag = get_value(self, kwargs, "update", ["meteo"])
check = [os.path.exists(rpath + f) for f in ["u.amu", "v.amv", "p.amp"]]
if (np.any(check) == False) or ("meteo" in flag):
m.force()
m.to_force(m.meteo.Dataset, vars=["msl", "u10", "v10"], rpath=rpath) # write u,v,p files
else:
logger.info("meteo files present\n")
# modify mdf file
m.config(
config_file=ppath + m.tag + ".mdf",
config={"Restid": outresfile},
output=True,
)
m.config_file = rpath + m.tag + ".mdf"
os.chdir(rpath)
m.save()
if execute:
m.run()
# cleanup
os.remove(rpath + "tri-rst." + outresfile)
logger.info("done for date :" + datetime.datetime.strftime(self.rdate, "%Y%m%d.%H"))
os.chdir(pwd)
class SchismCast:
files = [
"launchSchism.sh",
"sflux/sflux_inputs.txt",
"outputs/flux.out",
]
model_files = [
"bctides.in",
"hgrid.gr3",
"hgrid.ll",
"manning.gr3",
"vgrid.in",
"drag.gr3",
"rough.gr3",
"station.in",
"stations.json",
"windrot_geo2proj.gr3",
]
station_files = [
"outputs/staout_1",
"outputs/staout_2",
"outputs/staout_3",
"outputs/staout_4",
"outputs/staout_5",
"outputs/staout_6",
"outputs/staout_7",
"outputs/staout_8",
"outputs/staout_9",
]
def __init__(self, **kwargs):
for attr, value in kwargs.items():
setattr(self, attr, value)
def run(self, **kwargs):
if isinstance(self.model, str):
self.model = pyposeidon.model.read(self.model)
for attr, value in self.model.__dict__.items():
if not hasattr(self, attr):
setattr(self, attr, value)
execute = get_value(self, kwargs, "execute", True)
copy = get_value(self, kwargs, "copy", False)
pwd = os.getcwd()
self.origin = self.model.rpath
self.rdate = self.model.rdate
ppath = self.ppath
# ppath = pathlib.Path(ppath).resolve()
# ppath = str(ppath)
ppath = os.path.realpath(ppath)
# control
if not isinstance(self.rdate, pd.Timestamp):
self.rdate = pd.to_datetime(self.rdate)
if not os.path.exists(self.origin):
sys.stdout.write(f"Initial folder not present {self.origin}\n")
sys.exit(1)
# create the new folder/run path
rpath = self.cpath
# rpath = pathlib.Path(rpath).resolve()
# rpath = str(rpath)
rpath = os.path.realpath(rpath)
if not os.path.exists(rpath):
os.makedirs(rpath)
model_definition_filename = f"{self.tag}_model.json"
copy2(os.path.join(ppath, model_definition_filename), rpath) # copy the info file
# load model
with open(os.path.join(rpath, model_definition_filename), "rb") as f:
data = json.load(f)
data = pd.json_normalize(data, max_level=0)
info = data.to_dict(orient="records")[0]
try:
args = info.keys() & kwargs.keys() # modify dic with kwargs
for attr in list(args):
if isinstance(info[attr], dict):
info[attr].update(kwargs[attr])
else:
info[attr] = kwargs[attr]
setattr(self, attr, info[attr])
except Exception as e:
logger.exception("problem with kwargs integration\n")
raise e
# add optional additional kwargs
for attr in kwargs.keys():
if attr not in info.keys():
info[attr] = kwargs[attr]
info["config_file"] = os.path.join(ppath, "param.nml")
# update the properties
info["rdate"] = self.rdate
info["start_date"] = self.sdate
info["time_frame"] = self.time_frame
info["end_date"] = self.sdate + pd.to_timedelta(self.time_frame)
info["meteo_source"] = self.meteo
info["rpath"] = rpath
m = pm.set(**info)
# copy/link necessary files
logger.debug("Copy necessary + station files")
copy_files(rpath=rpath, ppath=ppath, filenames=self.files + self.station_files)
if copy:
logger.debug("Copy model files")
copy_files(rpath=rpath, ppath=ppath, filenames=self.model_files)
else:
logger.debug("Symlink model files")
symlink_files(rpath=rpath, ppath=ppath, filenames=self.model_files)
logger.debug(".. done")
# create restart file
logger.debug("create restart file")
# check for combine hotstart
hotout = int((self.sdate - self.rdate).total_seconds() / info["params"]["core"]["dt"])
logger.debug("hotout_it = {}".format(hotout))
# link restart file
inresfile = os.path.join(ppath, f"outputs/hotstart_it={hotout}.nc")
outresfile = os.path.join(rpath, "hotstart.nc")
logger.debug("hotstart_file: %s", inresfile)
if not os.path.exists(inresfile):
logger.info("Generating hotstart file.\n")
# load model model from ppath
with open(os.path.join(ppath, self.tag + "_model.json"), "rb") as f:
data = json.load(f)
data = pd.json_normalize(data, max_level=0)
ph = data.to_dict(orient="records")[0]
p = pm.set(**ph)
p.hotstart(it=hotout)
else:
logger.info("Hotstart file already existing. Skipping creation.\n")
if copy:
logger.info("Copying: %s -> %s", inresfile, outresfile)
copy2(inresfile, outresfile)
else:
logger.info("Symlinking`: %s -> %s", inresfile, outresfile)
try:
os.symlink(inresfile, outresfile)
except OSError as e:
if e.errno == errno.EEXIST:
logger.warning("Restart link present\n")
logger.warning("overwriting\n")
os.remove(outresfile)
os.symlink(inresfile, outresfile)
else:
raise e
# get new meteo
logger.info("process meteo\n")
flag = get_value(self, kwargs, "update", [])
check = [os.path.exists(os.path.join(rpath, "sflux", f)) for f in ["sflux_air_1.0001.nc"]]
if (np.any(check) == False) or ("meteo" in flag):
m.force(**info)
if hasattr(self, "meteo_split_by"):
times, datasets = zip(*m.meteo.Dataset.resample(time=f"{self.meteo_split_by}"))
mpaths = ["sflux_air_1.{:04d}.nc".format(t + 1) for t in np.arange(len(times))]
for das, mpath in list(zip(datasets, mpaths)):
m.to_force(
das,
vars=["msl", "u10", "v10"],
rpath=rpath,
filename=mpath,
date=self.rdate,
)
else:
m.to_force(
m.meteo.Dataset,
vars=["msl", "u10", "v10"],
rpath=rpath,
date=self.rdate,
)
else:
logger.warning("meteo files present\n")
# modify param file
rnday_new = (self.sdate - self.rdate).total_seconds() / (3600 * 24.0) + pd.to_timedelta(
self.time_frame
).total_seconds() / (3600 * 24.0)
hotout_write = int(rnday_new * 24 * 3600 / info["params"]["core"]["dt"])
info["parameters"].update(
{
"ihot": 2,
"rnday": rnday_new,
"start_hour": self.rdate.hour,
"start_day": self.rdate.day,
"start_month": self.rdate.month,
"start_year": self.rdate.year,
}
)
m.config(output=True, **info) # save param.nml
m.config_file = os.path.join(rpath, "param.nml")
m.save()
if execute:
m.run()
logger.info("done for date : %s", self.sdate.strftime("%Y%m%d.%H"))
os.chdir(pwd)
| ec-jrc/pyPoseidon | pyposeidon/utils/cast.py | cast.py | py | 14,505 | python | en | code | 17 | github-code | 1 | [
{
"api_name": "logging.getLogger",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 35,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_... |
6477205354 | import requests
def address_to_coordinates(address):
try :
# requesting from Geopunt
geo_resp = requests.get("http://loc.geopunt.be/geolocation/location?q="+address+"&c=25")
geo_loc = geo_resp.json()['LocationResult'][0]
# Storing value into variables
lat = geo_loc['Location']['X_Lambert72']
lon = geo_loc['Location']['Y_Lambert72']
return lat,lon
except IndexError as error :
print(error)
print('Wrong address')
print('Try format : "Street number, postcode city"')
| WimChristiaansen/testapp | coordinates.py | coordinates.py | py | 575 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "requests.get",
"line_number": 6,
"usage_type": "call"
}
] |
29376927959 | from trellowarrior.clients.taskwarrior import TaskwarriorClient
from trellowarrior.clients.trello import TrelloClient
from trellowarrior.config import config
import logging
logger = logging.getLogger(__name__)
class TrelloWarriorClient:
def __init__(self, config):
self.taskwarrior_client = TaskwarriorClient(config.taskwarrior_taskrc_location, config.taskwarrior_data_location)
self.trello_client = TrelloClient(config.trello_api_key, config.trello_api_secret, config.trello_token, config.trello_token_secret)
def upload_taskwarrior_task(self, project, taskwarrior_task, trello_list):
"""
Upload all contents of Taskwarrior task to a Trello list creating a new card and storing cardid and url
:param project: TrelloWarrior project object
:param taskwarrior_task: Taskwarrior task object
:param trello_list: Trello list object
"""
new_trello_card = trello_list.add_card(taskwarrior_task['description'])
if taskwarrior_task['due']:
new_trello_card.set_due(taskwarrior_task['due'])
for tag in taskwarrior_task['tags']:
trello_label = self.trello_client.get_board_label(tag)
new_trello_card.add_label(trello_label)
if project.only_my_cards:
new_trello_card.assign(self.trello_client.whoami)
taskwarrior_task['trelloid'] = new_trello_card.id
taskwarrior_task.save()
taskwarrior_task.add_annotation('[Trello URL] {}'.format(new_trello_card.short_url))
logger.info('Taskwarrior task with ID {} saved as new card in Trello with ID {}'.format(taskwarrior_task['id'], new_trello_card.id))
def fetch_trello_card(self, project, list_name, trello_card):
"""
Fetch contents of a Trello card to a new Taskwarrior task
:param project: TrelloWarrior project object
:list_name: name of the Trello list where the card is stored
:param trello_card: Trello card object
"""
new_taskwarrior_task = self.taskwarrior_client.new_task()
new_taskwarrior_task['project'] = project.taskwarrior_project_name
new_taskwarrior_task['description'] = trello_card.name
if trello_card.due_date:
new_taskwarrior_task['due'] = trello_card.due_date
if trello_card.labels:
for label in trello_card.labels:
new_taskwarrior_task['tags'].add(label.name)
new_taskwarrior_task['trelloid'] = trello_card.id
new_taskwarrior_task['trellolistname'] = list_name
new_taskwarrior_task.save()
new_taskwarrior_task.add_annotation('[Trello URL] {}'.format(trello_card.short_url))
if trello_card.description:
new_taskwarrior_task.add_annotation('[Trello Description] {}'.format(trello_card.description))
logger.info('Trello card with ID {} saved as new task in Taskwarrior with ID {}'.format(trello_card.id, new_taskwarrior_task['id']))
if list_name == project.trello_doing_list:
new_taskwarrior_task.start()
logger.info('New task {} kicked to doing list'.format(new_taskwarrior_task['id']))
if list_name == project.trello_done_list:
new_taskwarrior_task.done()
logger.info('New task {} kicked to done list'.format(new_taskwarrior_task['id']))
def sync_task_card(self, project, list_name, trello_card, taskwarrior_task):
"""
Sync an existing Taskwarrior task with an existing Trello card
:param project: TrelloWarrior project object
:param list_name: name of the Trello list where the card is stored
:param trello_card: Trello card object
:param taskwarrior_task: Taskwarrior task object
"""
taskwarrior_task_modified = False # Change to true to save modification
# Task description <> Trello card name
if taskwarrior_task['description'] != trello_card.name:
if taskwarrior_task['modified'] > trello_card.date_last_activity:
# Taskwarrior data is newer
trello_card.set_name(taskwarrior_task['description'])
else:
# Trello data is newer
taskwarrior_task['description'] = trello_card.name
taskwarrior_task_modified = True
logger.info('Name of task {} synchronized'.format(taskwarrior_task['id']))
# Task due <> Trello due
if taskwarrior_task['due']:
if not trello_card.due_date or taskwarrior_task['modified'] > trello_card.date_last_activity:
# No due data in Trello or Taskwarrior data is newer
trello_card.set_due(taskwarrior_task['due'])
else:
# Trello data is newer
taskwarrior_task['due'] = trello_card.due_date
taskwarrior_task_modified = True
logger.info('Due date of task {} synchronized'.format(taskwarrior_task['id']))
elif trello_card.due_date:
# No due data in Taskwarrior
taskwarrior_task['due'] = trello_card.due_date
taskwarrior_task_modified = True
logger.info('Due date of task {} synchronized'.format(taskwarrior_task['id']))
# Task tags <> Trello labels
trello_card_labels_set = set(trello_card.labels) if trello_card.labels else set()
trello_card_labels_name_set = set([label.name for label in trello_card_labels_set])
if taskwarrior_task['tags'] != trello_card_labels_name_set:
if taskwarrior_task['modified'] > trello_card.date_last_activity:
# Taskwarrior data is newer
for tag in taskwarrior_task['tags']:
# Get or create label in board
trello_label = self.trello_client.get_board_label(tag)
if not trello_label in trello_card_labels_set:
trello_card.add_label(trello_label) # Assign label to card
for label in trello_card_labels_set:
if not label.name in taskwarrior_task['tags']:
trello_card.remove_label(label) # Remove labels that are not present in tag list
else:
# Trello data is newer
taskwarrior_task['tags'] = trello_card_labels_name_set # Copy tags from Trello labels
taskwarrior_task_modified = True
logger.info('Tags of task {} synchronized'.format(taskwarrior_task['id']))
# Task list name and status <> Trello list name
if taskwarrior_task.pending and not taskwarrior_task.active and taskwarrior_task['trellolistname'] in [project.trello_doing_list, project.trello_done_list] and taskwarrior_task['modified'] > trello_card.date_last_activity:
# Task kicked to To Do in Taskwarrior and not synchronized
trello_card.change_list(self.trello_client.get_list(project.trello_todo_list).id)
taskwarrior_task['trellolistname'] = list_name = project.trello_todo_list
taskwarrior_task_modified = True
logger.info('Task {} kicked to todo list in Trello'.format(taskwarrior_task['id']))
if taskwarrior_task.active and taskwarrior_task['trellolistname'] != project.trello_doing_list and taskwarrior_task['modified'] > trello_card.date_last_activity:
# Task kicked to doing in Taskwarrior and not synchronized
trello_card.change_list(self.trello_client.get_list(project.trello_doing_list).id)
taskwarrior_task['trellolistname'] = list_name = project.trello_doing_list
taskwarrior_task_modified = True
logger.info('Task {} kicked to doing list in Trello'.format(taskwarrior_task['id']))
if taskwarrior_task.completed and taskwarrior_task['trellolistname'] != project.trello_done_list and taskwarrior_task['modified'] > trello_card.date_last_activity:
# Task kicked to doing in Taskwarrior and not synchronized
trello_card.change_list(self.trello_client.get_list(project.trello_done_list).id)
taskwarrior_task['trellolistname'] = list_name = project.trello_done_list
taskwarrior_task_modified = True
logger.info('Task {} kicked to done list in Trello'.format(taskwarrior_task['id']))
if taskwarrior_task['trellolistname'] != list_name:
if taskwarrior_task['modified'] > trello_card.date_last_activity:
# Taskwarrior data is newer
trello_card.change_list(self.trello_client.get_list(taskwarrior_task['trellolistname']).id)
logger.info('Task {} kicked to {} list in Trello'.format(taskwarrior_task['id'], taskwarrior_task['trellolistname']))
else:
# Trello data is newer
taskwarrior_task['trellolistname'] = list_name
if list_name == project.trello_done_list and not taskwarrior_task.completed:
taskwarrior_task.save() # Must save before a status change to avoid data loss
taskwarrior_task.done()
logger.info('Task {} kicked to done list in Taskwarrior'.format(taskwarrior_task['id']))
elif list_name == project.trello_doing_list:
if taskwarrior_task.completed:
taskwarrior_task['status'] = 'pending'
taskwarrior_task.save()
taskwarrior_task.start()
elif not taskwarrior_task.active:
taskwarrior_task.save()
taskwarrior_task.start()
else:
taskwarrior_task.save()
logger.info('Task {} kicked to doing list in Taskwarrior'.format(taskwarrior_task['id']))
else:
if taskwarrior_task.completed:
taskwarrior_task['status'] = 'pending'
taskwarrior_task.save()
elif taskwarrior_task.active:
taskwarrior_task.save()
taskwarrior_task.stop()
else:
taskwarrior_task.save()
logger.info('Task {} kicked to {} list in Taskwarrior'.format(taskwarrior_task['id'], taskwarrior_task['trellolistname']))
taskwarrior_task_modified = False # Avoid save again
logger.info('All changes in Taskwarrior task {} saved'.format(taskwarrior_task['id']))
# Save Taskwarrior changes (if any)
if taskwarrior_task_modified:
taskwarrior_task.save()
logger.info('All changes in Taskwarrior task {} saved'.format(taskwarrior_task['id']))
# Task annotations <> Trello url and description
# WARNING: The task must be saved before play with annotations https://tasklib.readthedocs.io/en/latest/#working-with-annotations
taskwarrior_task_annotation_trello_url = None
taskwarrior_task_annotation_trello_description = [None, '']
if taskwarrior_task['annotations']:
for annotation in taskwarrior_task['annotations']:
# Look for Trello url
if annotation['description'][0:13].lower() == '[trello url] ':
taskwarrior_task_annotation_trello_url = annotation
# Look for Trello description
if annotation['description'][0:21].lower() == '[trello description] ':
taskwarrior_task_annotation_trello_description = [annotation, annotation['description'][21:]]
if taskwarrior_task_annotation_trello_url is None:
# No previous url annotated
taskwarrior_task.add_annotation('[Trello URL] {}'.format(trello_card.short_url))
logger.info('URL of task {} added'.format(taskwarrior_task['id']))
elif taskwarrior_task_annotation_trello_url['description'][13:] != trello_card.short_url:
# Cannot update annotations (see https://github.com/robgolding/tasklib/issues/91)
# Delete old URL an add the new one
taskwarrior_task_annotation_trello_url.remove()
taskwarrior_task.add_annotation('[Trello URL] {}'.format(trello_card.short_url))
logger.info('URL of task {} synchronized'.format(taskwarrior_task['id']))
if taskwarrior_task_annotation_trello_description[1] != trello_card.description:
if taskwarrior_task['modified'] > trello_card.date_last_activity:
# Taskwarrior data is newer
trello_card.set_description(taskwarrior_task_annotation_trello_description[1])
else:
# Trello data is newer (delete old description and add new one)
if taskwarrior_task_annotation_trello_description[0] is not None:
taskwarrior_task_annotation_trello_description[0].remove()
if trello_card.description != '':
taskwarrior_task.add_annotation('[Trello Description] {}'.format(trello_card.description))
logger.info('Description of task {} synchronized'.format(taskwarrior_task['id']))
def sync_project(self, project):
"""
Sync a Taskwarrior project with a Trello board
:param project: TrelloWarrior project object
"""
# Initialize clients
self.taskwarrior_client.project(project)
self.trello_client.project(project)
# Get all Taskwarrior deleted tasks and seek for ones that have trelloid (deleted in Taskwarrior)
logger.info('Syncing project {} step 1: delete Trello cards that already deleted in Taskwarrior'.format(project.name))
taskwarrior_deleted_tasks = self.taskwarrior_client.get_deleted_tasks()
for taskwarrior_deleted_task in taskwarrior_deleted_tasks:
if taskwarrior_deleted_task['trelloid']:
logger.info('Deleting previously deleted Taskwarrior task with ID {} from Trello'.format(taskwarrior_deleted_task['trelloid']))
self.trello_client.delete_card(taskwarrior_deleted_task['trelloid'])
taskwarrior_deleted_task['trelloid'] = None
taskwarrior_deleted_task.save()
# Compare and sync Taskwarrior with Trello
logger.info('Syncing project {} step 2: syncing changes between Taskwarrior and Trello'.format(project.name))
trello_cards_dict = self.trello_client.get_cards_dict()
trello_cards_ids = [] # List to store cards IDs to compare later with local trelloid
for trello_list_name in trello_cards_dict:
for trello_card in trello_cards_dict[trello_list_name]:
# Fech all data from card
trello_card.fetch(False) # Pass False to fetch to avoid download attachments
trello_cards_ids.append(trello_card.id)
taskwarrior_task = self.taskwarrior_client.get_task_by_trello_id(trello_card.id)
if taskwarrior_task is None:
# Download new Trello card that not present in Taskwarrior
logger.info('Downloading Trello card with ID {} as new task in Taskwarrior'.format(trello_card.id))
self.fetch_trello_card(project, trello_list_name, trello_card)
else:
# Sync Taskwarrior task with Trello card
self.sync_task_card(project, trello_list_name, trello_card, taskwarrior_task)
# Compare Trello and Taskwarrior tasks for remove deleted Trello tasks in Taskwarrior
logger.info('Syncing project {} step 3: delete Takswarrior tasks that already deleted in Trello'.format(project.name))
taskwarrior_tasks_ids = self.taskwarrior_client.get_tasks_ids_set()
taskwarrior_tasks_ids.discard(None) # Remove None element if present (new tasks created with Taskwarrior)
trello_cards_ids = set(trello_cards_ids) # Convert trello_cards_ids list in a set
for deleted_trello_task_id in taskwarrior_tasks_ids - trello_cards_ids:
taskwarrior_task_to_delete = self.taskwarrior_client.get_task_by_trello_id(deleted_trello_task_id)
taskwarrior_task_to_delete['trelloid'] = None
taskwarrior_task_to_delete.save()
taskwarrior_task_to_delete.delete()
logger.info('Deleting previously deleted Trello task with ID {} from Taskwarrior'.format(deleted_trello_task_id))
# Upload new Taskwarrior tasks that never uploaded before
logger.info('Syncing project {} step 4: upload new Takswarrior tasks'.format(project.name))
for taskwarrior_pending_task in self.taskwarrior_client.get_pending_tasks():
logger.info('Uploading new pending Taskwarrior task with ID {} to Trello'.format(taskwarrior_pending_task['id']))
if taskwarrior_pending_task.active:
# Upload new pending active task to doing list
self.upload_taskwarrior_task(project, taskwarrior_pending_task, self.trello_client.get_list(project.trello_doing_list))
taskwarrior_pending_task['trellolistname'] = project.trello_doing_list
taskwarrior_pending_task.save()
else:
if taskwarrior_pending_task['trellolistname']:
# Upload new pending task to user provided list
self.upload_taskwarrior_task(project, taskwarrior_pending_task, self.trello_client.get_list(taskwarrior_pending_task['trellolistname']))
else:
# Upload new pending task to default todo list
self.upload_taskwarrior_task(project, taskwarrior_pending_task, self.trello_client.get_list(project.trello_todo_list))
taskwarrior_pending_task['trellolistname'] = project.trello_todo_list
taskwarrior_pending_task.save()
for taskwarrior_completed_task in self.taskwarrior_client.get_completed_tasks():
logger.info('Uploading new completed Taskwarrior task to Trello')
self.upload_taskwarrior_task(project, taskwarrior_completed_task, self.trello_client.get_list(project.trello_done_list))
taskwarrior_completed_task['trellolistname'] = project.trello_done_list
taskwarrior_completed_task.save()
logger.info('Project {} synchronized'.format(project.name))
| ogarcia/trellowarrior | trellowarrior/clients/trellowarrior.py | trellowarrior.py | py | 18,357 | python | en | code | 101 | github-code | 1 | [
{
"api_name": "logging.getLogger",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "trellowarrior.clients.taskwarrior.TaskwarriorClient",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "trellowarrior.config.config.taskwarrior_taskrc_location",
"line_number":... |
70171476835 | #! /usr/bin/env python3
"""
plot the time vs miRNA ratio box plot
"""
import os, sys, re
import json
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
fn = '/data/cqs/chenh19/findadapt/findadapt_bench/2023_realdata/findadapt_res.time_mem_summary.txt'
def get_miRNA_ratio(lb):
pw = '/data/cqs/chenh19/findadapt/findadapt_bench/2023_realdata/findadapt_res'
fn_log = f'{pw}/{lb}.log'
# total matched reads for inferring = 601, n_processed = 90000, matched reads density = 149.8
with open(fn_log) as f:
ratio = None
for i in f:
p = r'.*total matched reads for inferring = (\d+), n_processed = (\d+)'
m = re.match(p, i)
if m:
matched, total_parsed = m.groups()
total_parsed = int(total_parsed)
matched = int(matched)
ratio = matched / total_parsed
if ratio is None:
print(f'invalid miRNA ratio value found: {fn_log}')
return ratio
fn = '/data/cqs/chenh19/findadapt/findadapt_bench/fq_size_vs_memory/findadapt_res.time_mem_summary.txt'
with open(fn) as f:
header = f.readline().strip().split('\t')
idx_fq = header.index('fq')
idx_time = header.index('wall_time')
idx_lb = header.index('lb')
ratio_l = []
ratio_d = {}
time_l = []
for i in f:
line = i.strip().split('\t')
time = line[idx_time]
time = float(time)
fq = line[idx_fq]
lb = line[idx_lb]
ratio_value = get_miRNA_ratio(lb)
if not ratio_value:
continue
# so it is right open, (right border value is not included in the class)
if ratio_value < 0.005:
ratio_class = '0-0.5%'
elif ratio_value < 0.01:
ratio_class = '0.5-1%'
elif ratio_value < 0.05:
ratio_class = '1-5%'
elif ratio_value < 0.1:
ratio_class = '5-10%'
elif ratio_value >= 0.1:
ratio_class = '>10%'
ratio_d[lb] = ratio_value
ratio_l.append(ratio_class)
time_l.append(time)
with open(f'findadapt_fq_files_miRNA_ratio.json', 'w') as f:
json.dump(ratio_d, f)
# Wall Time
order = ['0-0.5%', '0.5-1%', '1-5%', '5-10%', '>10%']
flierprops = dict(marker='o', markersize=1, linestyle='none')
fig = plt.figure(figsize=(4.5, 3),dpi=300)
a = sns.boxplot(x=ratio_l, y=time_l, linewidth=0.4, flierprops=flierprops, order=order)
a.set_xlabel('')
a.set_ylabel('Time (s)', fontsize=12)
a.set_yscale('log')
a.set_yticks([0.1, 0.5, 1, 5, 10, 50, 100])
a.get_yaxis().set_major_formatter(ticker.ScalarFormatter())
plt.xticks(fontsize=7, rotation=30, ha='right')
plt.yticks(fontsize=10)
fig.savefig(f"findadapt_time_vs_realdata_miRNA_ratio.png", bbox_inches='tight')
| chc-code/findadapt | utils/simulation_and_benchmark/plot_realdata_time_vs_miRNA_ratio.py | plot_realdata_time_vs_miRNA_ratio.py | py | 2,843 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "re.match",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "json.dump",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"... |
37176069079 | import bpy
import bmesh
import json
def clean():
for o in bpy.data.objects:
if o.type == 'MESH':
o.select = True
else:
o.select = False
# call the operator once
bpy.ops.object.delete()
clean()
file_obj = r'C:\Users\Christian\Dropbox\Arbejde\DTU BYG\Livestock\livestock\tests\test_data\drainage_flow\drain_mesh.obj'
bpy.ops.import_scene.obj(filepath=file_obj, axis_forward='X', axis_up='Z', )
imported_mesh = bpy.context.selected_objects[-1]
me = imported_mesh.data
bm = bmesh.new()
# Get a BMesh representation
bm.from_mesh(me)
lowest_neighbour = []
for face in bm.faces:
linked_faces = set(f
for v in face.verts
for f in v.link_faces)
centers = [[linked_face.index, tuple(linked_face.calc_center_median())]
for linked_face in linked_faces]
sorted_centers = sorted(centers, key=lambda v: v[1][2])
if face.calc_center_median().z <= sorted_centers[0][1][2]:
lowest_neighbour.append(None)
else:
lowest_neighbour.append(sorted_centers[0])
#print(lowest_neighbour)
def get_curve_points(start_index, point_list):
curve_points = []
next_index = start_index
while True:
pt = point_list[next_index]
if pt:
next_index = pt[0]
curve_points.append(pt[1])
else:
return curve_points
curves = []
for face_index in range(len(bm.faces)):
curves.append(get_curve_points(face_index, lowest_neighbour))
outfile = r'C:\Users\Christian\Dropbox\Arbejde\DTU BYG\Livestock\livestock\tests\test_data\drainage_flow\result.json'
with open(outfile, 'w') as file:
json.dump(curves, file)
| livestock3d/livestock | tests/archive/local_test/blender_drainge_mesh.py | blender_drainge_mesh.py | py | 1,707 | python | en | code | 8 | github-code | 1 | [
{
"api_name": "bpy.data",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "bpy.ops.object.delete",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "bpy.ops",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "bpy.ops.import_scene.... |
3682685578 | #!/usr/bin/env python
# coding=UTF-8
import sys
import os
import time
import reportlab
import StringIO
from reportlab.pdfbase import pdfmetrics
from reportlab.pdfbase.ttfonts import TTFont
from reportlab.pdfgen.canvas import Canvas
#import reportlab
from reportlab.pdfgen import canvas
##from pdfmetrics import standardEncodings
#
from reportlab.pdfbase import pdfmetrics
from reportlab.lib.pagesizes import letter
from reportlab.lib.pagesizes import A4
from datetime import datetime
from datetime import timedelta
class Invoice( object ):
canvas = None
invoice = None
size = (0,0)
col = 80
row = 600
default_font = ( 'Helvetica', 10 )
def __init__( self, invoice, output, size=A4 ):
self.canvas = canvas.Canvas(output, pagesize=size)
self.invoice = invoice
self.size = size
def save( self ):
self.getHeader()
self.getBillingLines()
self.getFooter()
return self.canvas.save()
def getBillingLines( self ):
font, size = self.default_font
self.canvas.setFont(font, size)
cost, total = 0, 0
self.setRow(365)
for line in self.invoice.billing_introduction().splitlines():
self.canvas.drawString(self.getCol(), self.getRow(), line );
self.nextRow()
self.nextRow()
self.nextRow()
for line in self.invoice.invoice_lines():
#dot = unicode('•')
total = total + line.amount
self.canvas.drawString( 435, self.getRow(), "€%s" % str.rjust("%0.2f" % line.amount,17) )
self.canvas.drawString( self.getCol(), self.getRow(), "%s" % line.name )
self.nextRow()
self.canvas.line(435,self.getRow(),505,self.getRow())
self.canvas.drawString(self.getCol(), self.nextRow(), "Subtotaal")
self.canvas.drawString(435, self.getRow(),
"€%s" % str.rjust("%0.2f" % total,17) )
self.canvas.drawString(self.getCol(), self.nextRow(),
"%0.2f%% BTW" % (self.invoice.get_vat() * 100))
self.canvas.drawString(435, self.getRow(),
"€%s" % str.rjust("%0.2f" % (total*self.invoice.get_vat()),17))
total = total + (total*self.invoice.get_vat())
self.nextRow(10)
self.canvas.line(435,self.getRow(),505,self.getRow())
self.nextRow(20)
self.canvas.setFont( 'Helvetica-Bold', 10 )
self.canvas.drawString(self.getCol(), self.getRow(), "Totaal" )
self.canvas.drawString(435, self.getRow(),
"€%s" % str.rjust("%0.2f" % total,17))
def getHeader( self ):
r,g,b = (0.4, 0.7, 0.3)
width, height = self.size
font, size = self.default_font
if self.invoice.company.logo:
image = canvas.ImageReader(StringIO.StringIO(self.invoice.company.logo))
self.canvas.drawImage( image, 435, height-180, 130, 165 )
self.canvas.setFont( '%s-Bold' % font, 16 )
self.canvas.drawString( 435, height - 265, 'Factuur' )
self.canvas.setFont( font, size )
self.setCol(80)
self.setRow(140)
customer = self.invoice.customer
self.canvas.drawString(self.getCol(), self.getRow(),
"%s %s" % (customer.firstname, customer.surname) );
self.nextRow()
address = customer.address.splitlines()
for line in address:
self.canvas.drawString( self.getCol(), self.getRow(), line.strip() );
self.nextRow()
# plaats, datum
self.setRow(260)
self.canvas.setFillColorRGB(r,g,b);
self.canvas.drawString( self.getCol(), self.getRow(), "plaats, datum" )
self.canvas.drawString( self.getCol()+200, self.getRow(), "factuur nummer" )
self.nextRow()
self.canvas.setFillColorRGB(0,0,0);
billed = datetime.now()
if self.invoice.billed:
billed = self.invoice.billed
self.canvas.drawString( self.getCol(), self.getRow(),
"%s, %s" % (self.invoice.company.city, billed.strftime('%e %B %Y')) )
self.canvas.drawString( self.getCol()+200, self.getRow(), "%s" % self.invoice.billing_number() )
self.nextRow()
self.nextRow()
self.canvas.setFillColorRGB(r,g,b);
self.canvas.drawString( self.getCol(), self.getRow(), "betreft" )
self.nextRow()
self.canvas.setFillColorRGB(0,0,0);
self.canvas.drawString( self.getCol(), self.getRow(), self.invoice.description )
#
#self.canvas.drawString(self.getCol(), self.getRow(),
# "%s, %s" % ( self.invoice.company.city, self.invoice.billing_date(format)) )
#
def getBillingDate(self, format="%e %B %Y"):
self.canvas.drawString(self.getCol(), self.getRow(),
"%s, %s" % ( self.invoice.company.city, self.invoice.billing_date(format)) )
self.canvas.drawString(self.getCol(), self.getRow(),
"F-%s" % "HOEPLA" )
#self.canvas.drawString(self.getCol()+200, self.getRow(),
# "F-%s" % self.invoice.billing_number() )
self.nextRow()
self.nextRow()
self.canvas.setFillColorRGB(r,g,b);
self.canvas.drawString(self.getCol(), self.getRow(), "betreft" )
self.nextRow()
self.canvas.setFillColorRGB(0,0,0);
self.canvas.drawString(self.getCol(), self.getRow(), "Hosting" )
self.nextRow(45)
def getFooter( self ):
#w, height = self.size
self.canvas.setFont( 'Helvetica-Bold', 9 )
self.setRow( 780 )
self.setCol( 80 )
lines = self.invoice.company.footer.splitlines() #str.split( self.invoice.footer, "\n")
for line in lines:
self.canvas.drawString( self.getCol(), self.getRow(), str.center( str(line), 100 ));
self.nextRow()
def getCol(self):
return self.col
def setCol(self,num):
self.col = num
return self.col
def getRow(self):
return self.row
def setRow( self, num ):
width, height = self.size
self.row = height - num
return self.row
def nextRow(self, increase = 15 ):
self.row = self.row - increase
return self.row
| abelenki/Billy | common/render.py | render.py | py | 6,375 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "reportlab.pdfgen.canvas",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "reportlab.lib.pagesizes.A4",
"line_number": 36,
"usage_type": "name"
},
{
"api_name": "reportlab.pdfgen.canvas.Canvas",
"line_number": 37,
"usage_type": "call"
},
{
... |
15274724012 | """Session helper."""
import traceback
from functools import wraps
from ilswbot.db import get_session
def job_session_wrapper():
"""Create a session and handle exceptions for jobs."""
def real_decorator(func):
"""Parametrized decorator closure."""
@wraps(func)
def wrapper(context):
session = get_session()
try:
func(context, session)
session.commit()
except: # noqa
traceback.print_exc()
finally:
session.close()
return wrapper
return real_decorator
def session_wrapper(send_message=True):
"""Allow specification whether a debug message should be sent to the user."""
def real_decorator(func):
"""Create a database session and handle exceptions."""
@wraps(func)
def wrapper(update, context):
session = get_session()
if update.message is None:
return
try:
func(context.bot, update, session)
session.commit()
except:
if send_message:
context.bot.sendMessage(
chat_id=update.message.chat_id,
text='An unknown error occurred.',
)
traceback.print_exc()
finally:
session.remove()
return wrapper
return real_decorator
| Nukesor/ilswbot | ilswbot/session.py | session.py | py | 1,464 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "ilswbot.db.get_session",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "traceback.print_exc",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "functools.wraps",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "ilswbot.db... |
3275459846 | from typing import List, Optional, Set
from PyQt5 import QtCore, QtGui
from commandbar.api import cmdutils
from mainwindow.canvas import CanvasContentManager, Canvas, CanvasPainter
from mainwindow.painters.painter import CustomPainter
from mainwindow.painters import tools
from commandbar.utils import objreg
ANNOTATE_PAINTER = 'annotate-painter'
ANNOTATIONS = 'annotations'
@cmdutils.register(name='annotate', instance='canvas', scope='canvas')
def annotate(self: Canvas):
if ANNOTATIONS not in self.registry:
self.add_content_manager(ANNOTATIONS, AnnotationContentManager(self))
painters = self.registry['painters']
if ANNOTATE_PAINTER not in painters:
painters[ANNOTATE_PAINTER] = AnnotatePainter(self.qpainter)
self.set_painter(painters[ANNOTATE_PAINTER])
class AnnotationContentManager(CanvasContentManager):
def __init__(self, canvas: Canvas) -> None:
super().__init__(canvas)
self.lines: List[QtCore.QLine] = list()
class AnnotatePainter(CanvasPainter):
def __init__(self, painter: CustomPainter) -> None:
super().__init__(painter)
self.content: AnnotationContentManager = objreg.get(
'annotations', scope='canvas', canvas='current'
)
self.last_point: Optional[QtCore.QPoint] = None
self.pen, self.brush = tools.make_pen_and_brush('white')
#tools.WHITE_PEN, tools.WHITE_BRUSH
def on_mouse_press(self, e: QtGui.QMouseEvent):
self.last_point = e.pos()
def on_mouse_left_click_drag(self, e: QtGui.QMouseEvent):
new_point = e.pos()
self.painter.set(self.pen, self.brush)
line = QtCore.QLine(self.last_point, new_point)
self.painter.drawLine(self.last_point, new_point)
self.content.lines.append(line)
self.last_point = new_point
@cmdutils.register(name='set-color', instance='annotate-painter', scope='canvas')
def set_color(self, color: str):
color = QtGui.QColor(color)
self.pen = QtGui.QPen(color)
self.brush = QtGui.QBrush(color, QtCore.Qt.SolidPattern) | yairmol/graphui | mainwindow/painters/annotate.py | annotate.py | py | 2,096 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "mainwindow.canvas.Canvas",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "commandbar.api.cmdutils.register",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "commandbar.api.cmdutils",
"line_number": 15,
"usage_type": "name"
},
{
... |
13596239415 | import json
from flask import Blueprint, jsonify
from redis import Redis
redis_endpoint = Blueprint("redis_endpoint", __name__)
r = Redis(host="redis-container", port=6379)
# Endpoint
def ping_service():
try:
if r.ping():
response = jsonify({'Connection': "OK"})
response.status_code = 200
return response
else:
response = jsonify({'Connection': "NOT OK"})
response.status_code = 500
return response
except Exception as e:
return jsonify({'message': str(e)}, 404)
def validatePush(data_object):
if 'msg' in data_object and type(data_object["msg"]) == str:
return True
else:
return False
# Endpoint
def push_service(data):
try:
if validatePush(data_object=data):
push_redis(r, "Queue", data["msg"])
response = jsonify({'status': 'ok'})
response.status_code = 200
return response
else:
response = jsonify({"message": "invalid message input"})
response.status_code = 400
return response
except Exception as e:
return jsonify({'message': str(e)}, 404)
def push_redis(redis: Redis, key, msg):
return redis.lpush(key, msg)
# Endpoint
def pop_service():
try:
if count_redis(r, 'Queue') != 0:
deleted = pop_redis(r, "Queue")
response = jsonify({"message": deleted})
response.status_code = 200
return response
else:
response = jsonify({"message": "Queue is empty"})
response.status_code = 400
return response
except Exception as e:
return jsonify({'message': str(e)}, 404)
def pop_redis(redis: Redis, key):
return redis.lpop(key).decode("utf-8")
# Endpoint
def count_service():
try:
response = jsonify({"status": "ok", "count": count_redis(r, "Queue")})
response.status_code = 200
return response
except Exception as e:
return jsonify({'message': str(e)}, 404)
def count_redis(redis: Redis, key):
return redis.llen(key)
| henry-geary/JwtRestApiFlask | Services/queue.py | queue.py | py | 2,140 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "flask.Blueprint",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "redis.Redis",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "flask.jsonify",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "flask.jsonify",
"line_num... |
14111108103 |
# This script creates TFRecord files for the testing data set.
import tensorflow as tf
import cv2
import glob2
import math
def _bytes_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
# Path to the directory containing test TIFF images.
image_paths = glob2.glob('../test-tif-v2/*')
temp = image_paths[0].split('/')
print(temp)
# Path for the directory to store TFRecord files for test data.
tfrecord_path_prefix = '../tfrecords/test/'
num_shards = 6
records_per_file = math.ceil(len(image_paths)/num_shards)
tfrecord_paths = [tfrecord_path_prefix+'planet-'+str(i*records_per_file)+'-'+str((i+1)*records_per_file)+'.tfrecord'
if i!=num_shards-1
else tfrecord_path_prefix+'planet-'+str(i*records_per_file)+'-'+str(len(image_paths))+'.tfrecord'
for i in range(num_shards)]
print(tfrecord_paths)
tfrecord_writer = None
num_processed = 0
for i in range(num_shards):
tfrecord_writer = tf.io.TFRecordWriter(tfrecord_paths[i])
image_paths_shard = image_paths[i*records_per_file: (i+1)*records_per_file] if i!=num_shards-1 \
else image_paths[i*records_per_file: len(image_paths)]
for image_path in image_paths_shard:
image = cv2.imread(image_path, cv2.IMREAD_UNCHANGED)
image = cv2.normalize(image, None, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F)
#encode
image_string = image.tostring()
feature_dict = {'image_string':_bytes_feature(image_string)}
example = tf.train.Example(features=tf.train.Features(feature=feature_dict))
tfrecord_writer.write(example.SerializeToString())
num_processed = num_processed + len(image_paths_shard)
print('Processed: '+str(num_processed)+ ' files.')
tfrecord_writer.close()
| aayushARM/planet-cv | create_tfrecord_test.py | create_tfrecord_test.py | py | 1,821 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "tensorflow.train.Feature",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "tensorflow.train",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.train.BytesList",
"line_number": 10,
"usage_type": "call"
},
{
"api_nam... |
17453309279 | # -*- coding: utf-8 -*-
"""
Created on Wed Jun 12 15:04:07 2019
@author: admin1
"""
from flask import Flask, render_template, request
import sqlite3 as sql
from datetime import datetime
import pandas as pd
from matplotlib import pyplot as plt
import io
import re
import base64
app = Flask(__name__)
def pie_chart():
con = sql.connect('database_vib3.db')
df = pd.read_sql_query('SELECT * FROM MOTORVIB ORDER BY DATETIME(date) DESC LIMIT 80',con)
tags = ['220-PM-1A','220-PM-1B','220-PM-2A','220-PM-2B','220-PM-3A']
series_list = []
for tag in tags:
df1 = df[df['tag']==tag].iloc[0]
series_list.append(df1)
df_series = pd.DataFrame(series_list)
df2 = df_series
for i in df2.index:
if df2['NDE_V_VEL'][i]<6 and df2['NDE_H_VEL'][i]<6 and df2['NDE_H_ENV'][i]<5 and df2['NDE_H_ACC'][i]<5 and df2['DE_V_VEL'][i]<6 and df2['DE_H_VEL'][i]<6 and df2['DE_H_ENV'][i]<5 and df2['DE_H_ACC'][i]<5:
if df2['NDE_V_VEL'][i]<3.25 and df2['NDE_H_VEL'][i]<3.25 and df2['NDE_H_ENV'][i]<3 and df2['NDE_H_ACC'][i]<3 and df2['DE_V_VEL'][i]<3.25 and df2['DE_H_VEL'][i]<3.25 and df2['DE_H_ENV'][i]<3 and df2['DE_H_ACC'][i]<3:
df2.at[i,'STATUS'] = 'normal'
else :
df2.at[i,'STATUS'] = 'alert high'
else :
df2.at[i,'STATUS'] = 'danger high'
df3 = df2
normal = len(df3[df3['STATUS']=='normal'])
alert_high = len(df3[df3['STATUS']=='alert high'])
danger_high = len(df3[df3['STATUS']=='danger high'])
labels = ['Normal','Alert High','Danger High']
sizes = [normal, alert_high, danger_high]
colors = ['#ff9999','#66b3ff','#99ff99','#ffcc99']
fig1, ax1 = plt.subplots()
patches, texts, autotexts = ax1.pie(sizes, colors = colors, labels=labels, autopct='%1.1f%%', startangle=90)
for text in texts:
text.set_color('grey')
for autotext in autotexts:
autotext.set_color('grey')
# Equal aspect ratio ensures that pie is drawn as a circle
ax1.axis('equal')
plt.tight_layout()
img = io.BytesIO()
plt.savefig(img, format='jpg')
img.seek(0)
graph_url = base64.b64encode(img.getvalue()).decode()
plt.close()
return 'data:image/png;base64,{}'.format(graph_url)
@app.route('/')
def home():
graph1_url = pie_chart();
return render_template('home.html',
graph1=graph1_url)
@app.route('/student')
def student():
return render_template('student.html')
@app.route('/addrec',methods = ['POST', 'GET'])
def addrec():
if request.method == 'POST':
try:
tag = request.form['tag']
nde_v_vel = request.form['nde_v_vel']
nde_h_vel = request.form['nde_h_vel']
nde_h_env = request.form['nde_h_env']
nde_h_acc = request.form['nde_h_acc']
de_v_vel = request.form['de_v_vel']
de_h_vel = request.form['de_h_vel']
de_h_env = request.form['de_h_env']
de_h_acc = request.form['de_h_acc']
rekom = request.form['rekom']
#date = datetime.now().strftime("%B %d, %Y %I:%M%p")
date = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
with sql.connect("database_vib3.db") as con:
#con.execute('CREATE TABLE motorvib (date TEXT, tag TEXT, NDE_V_VEL REAL, NDE_H_VEL REAL, NDE_H_ENV REAL, NDE_H_ACC REAL, DE_V_VEL REAL, DE_H_VEL REAL, DE_H_ENV REAL, DE_H_ACC REAL, REKOMENDASI TEXT)')
cur = con.cursor()
cur.execute("INSERT INTO motorvib (date,tag, NDE_V_VEL, NDE_H_VEL, NDE_H_ENV, NDE_H_ACC, DE_V_VEL, DE_H_VEL, DE_H_ENV, DE_H_ACC, REKOMENDASI) VALUES (?,?,?,?,?,?,?,?,?,?,?)",(date,tag, nde_v_vel, nde_h_vel, nde_h_env, nde_h_acc, de_v_vel, de_h_vel, de_h_env, de_h_acc,rekom) )
con.commit()
msg = "Record successfully added"
except:
con.rollback()
msg = "error in insert operation"
finally:
return render_template("result.html",msg = msg)
con.close()
@app.route('/list')
def list():
con = sql.connect("database_vib3.db")
con.row_factory = sql.Row
cur = con.cursor()
cur.execute("select * from motorvib")
rows = cur.fetchall();
return render_template("list.html",rows = rows)
if __name__ == '__main__':
app.run(debug = True) | fidelisgalla/flask_sqlite3 | app.py | app.py | py | 4,317 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "flask.Flask",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "sqlite3.connect",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "pandas.read_sql_query",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
... |
34529181883 | import os
from knack.log import get_logger
from knack.util import CLIError
logger = get_logger(__name__)
def filter_by_git_diff(selected_modules, git_source, git_target, git_repo):
if not any([git_source, git_target, git_repo]):
return selected_modules
if not all([git_target, git_repo]):
raise CLIError('usage error: [--src NAME] --tgt NAME --repo PATH')
files_changed = diff_branches(git_repo, git_target, git_source)
mods_changed = summarize_changed_mods(files_changed)
repo_path = str(os.path.abspath(git_repo)).lower()
to_remove = {'mod': [], 'core': [], 'ext': []}
for key in selected_modules:
for name, path in selected_modules[key].items():
path = path.lower()
if path.startswith(repo_path):
if name in mods_changed:
# has changed, so do not filter out
continue
# if not in the repo or has not changed, filter out
to_remove[key].append(name)
# remove the unchanged modules
for key, value in to_remove.items():
for name in value:
selected_modules[key].pop(name)
logger.info('Filtered out: %s', to_remove)
return selected_modules
def summarize_changed_mods(files_changed):
from azdev.utilities import extract_module_name
mod_set = set()
for f in files_changed:
try:
mod_name = extract_module_name(f)
except CLIError:
# some files aren't part of a module
continue
mod_set.add(mod_name)
return list(mod_set)
def diff_branches(repo, target, source):
""" Returns a list of files that have changed in a given repo
between two branches. """
try:
import git # pylint: disable=unused-import,unused-variable
import git.exc as git_exc
import gitdb
except ImportError as ex:
raise CLIError(ex)
from git import Repo
try:
git_repo = Repo(repo)
except (git_exc.NoSuchPathError, git_exc.InvalidGitRepositoryError):
raise CLIError('invalid git repo: {}'.format(repo))
def get_commit(branch):
try:
return git_repo.commit(branch)
except gitdb.exc.BadName:
raise CLIError('usage error, invalid branch: {}'.format(branch))
if source:
source_commit = get_commit(source)
else:
source_commit = git_repo.head.commit
target_commit = get_commit(target)
logger.info('Filtering down to modules which have changed based on:')
logger.info('cd %s', repo)
logger.info('git --no-pager diff %s..%s --name-only -- .\n', target_commit, source_commit)
diff_index = target_commit.diff(source_commit)
return [diff.b_path for diff in diff_index]
def diff_branches_detail(repo, target, source):
""" Returns compare results of files that have changed in a given repo between two branches.
Only focus on these files: _params.py, commands.py, test_*.py """
try:
import git # pylint: disable=unused-import,unused-variable
import git.exc as git_exc
import gitdb
except ImportError as ex:
raise CLIError(ex)
from git import Repo
try:
git_repo = Repo(repo)
except (git_exc.NoSuchPathError, git_exc.InvalidGitRepositoryError):
raise CLIError('invalid git repo: {}'.format(repo))
def get_commit(branch):
try:
return git_repo.commit(branch)
except gitdb.exc.BadName:
raise CLIError('usage error, invalid branch: {}'.format(branch))
if source:
source_commit = get_commit(source)
else:
source_commit = git_repo.head.commit
target_commit = get_commit(target)
logger.info('Filtering down to modules which have changed based on:')
logger.info('cd %s', repo)
logger.info('git --no-pager diff %s..%s --name-only -- .\n', target_commit, source_commit)
diff_index = target_commit.diff(source_commit)
return diff_index
| Azure/azure-cli-dev-tools | azdev/utilities/git_util.py | git_util.py | py | 4,005 | python | en | code | 71 | github-code | 1 | [
{
"api_name": "knack.log.get_logger",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "knack.util.CLIError",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "os.path.abspath",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "os.path",
... |
4948004910 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Advent of code 2021 day 14 - https://adventofcode.com/2021/day/14
"""Goals:
1) What do you get if you take the quantity of the most common element and subtract the quantity
of the least common element?
2) Same as 1 but instead of 10 iterations, 40, which exponential growth makes
harder.
"""
# Programmed by CoolCat467
__title__ = 'Advent of Code 2021 - Day 14'
__author__ = 'CoolCat467'
__version__ = '0.0.0'
__ver_major__ = 0
__ver_minor__ = 0
__ver_patch__ = 0
from collections import Counter
def get_pairs(template):
"Return pairs in template"
return tuple(x+y for x,y in zip(template, template[1:]))
def insert_pairs(template, rules):
"Insert pairs in template polymer following rules"
data = []
for pair in get_pairs(template):
data.append(pair[0])
if pair in rules:
data.append(rules[pair])
data.append(template[-1])
return ''.join(data)
def preform_steps(step_count, template, rules):
"Preform step_count steps of adding stuff to template polymer following rules."
poly = template
for _ in range(step_count):
poly = insert_pairs(poly, rules)
return poly
def insert_pairs_counts(template, rules, types):
"Insert pairs in template polymer following rules"
for pair, count in tuple(template.items()):
if not pair in rules or count == 0:
continue
new = rules[pair]
types[new] += count
pos = pair[0]+new, new+pair[1]
for new_pair in pos:
template[new_pair] += count
template[pair] -= count
return template, types
def preform_steps_counts(step_count, template, rules):
"Preform step_count steps of adding stuff to template polymer following rules."
types = Counter(template)
poly = Counter(get_pairs(template))
for _ in range(step_count):
poly, types = insert_pairs_counts(poly, rules, types)
return poly, types
def run():
"Solve problems"
# Read file
data = []
with open('adv14.txt', 'r', encoding='utf-8') as rfile:
data = rfile.read().splitlines()
rfile.close()
## data = """NNCB
##
##CH -> B
##HH -> N
##CB -> H
##NH -> C
##HB -> C
##HC -> B
##HN -> C
##NN -> C
##BH -> H
##NC -> B
##NB -> B
##BN -> B
##BB -> N
##BC -> B
##CC -> N
##CN -> C""".splitlines()
# Format data
template = data[0]
pair_insert_rules = {}
for item in data[2:]:
key, value = item.split(' -> ')
pair_insert_rules[key] = value
# Solve 1
## data = preform_steps(10, template, pair_insert_rules)
## count = Counter(data)
## values = sorted(tuple(count.values()))
_, types = preform_steps_counts(10, template, pair_insert_rules)
values = sorted(tuple(types.values()))
print(values[-1] - values[0])
# Solve 2
_, types = preform_steps_counts(40, template, pair_insert_rules)
values = sorted(tuple(types.values()))
print(values[-1] - values[0])
if __name__ == '__main__':
print(f'{__title__} v{__version__}\nProgrammed by {__author__}.')
run()
| CoolCat467/Advent-Of-Code | 2021/adv14.py | adv14.py | py | 3,094 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "collections.Counter",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "collections.Counter",
"line_number": 60,
"usage_type": "call"
}
] |
24877260133 | """pytest configuration."""
import pathlib
from typing import Generator, Optional, Tuple
import pytest
from sqlalchemy.orm import Session
from remote_command_server.database import Base, database_connection
@pytest.fixture()
def db() -> Generator[Session, None, None]:
"""
Fixture for creating a fresh test database in memory.
All tables are created, but they have no entries.
"""
db: Optional[Session] = None
try:
db_connection = database_connection("sqlite:///:memory:")
Base.metadata.create_all(bind=db_connection.engine)
db = db_connection.LocalSession()
yield db
finally:
if db:
db.close()
@pytest.fixture()
def file_based_db(
tmp_path: pathlib.Path,
) -> Generator[Tuple[Session, pathlib.Path], None, None]:
"""
Fixture for creating a fresh test database in a temporary file.
All tables are created, but they have entries.
"""
db: Optional[Session] = None
try:
db_file = tmp_path.absolute() / "test.sqlite3"
db_connection = database_connection(f"sqlite:///{db_file}")
Base.metadata.create_all(bind=db_connection.engine)
db = db_connection.LocalSession()
yield db, db_file
finally:
if db:
db.close()
| saltastroops/remote-command-server | conftest.py | conftest.py | py | 1,287 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "typing.Optional",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.orm.Session",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "remote_command_server.database.database_connection",
"line_number": 21,
"usage_type": "call"
},
... |
16120973803 | import pytest
import os
from conflow import from_env
from conflow.manager import Config
DEFAULT_SETTINGS = {
'db': {
'master': {
'host': 'localhost',
'port': 5432,
},
'slave': {
'host': 'localhost',
'port': 5433,
}
}
}
ENV_SETTINGS = {
'db': {
'master': {
'host': 'env_master_host',
'port': 5432,
},
'slave': {
'host': 'env_slave_host',
'port': 5433,
}
}
}
@pytest.fixture
def config():
return Config()
def test_config_first_merge(config):
config.merge(DEFAULT_SETTINGS)
assert config.layer is not None
def test_config_merge(config):
config = config.merge(DEFAULT_SETTINGS)
assert config.layer.tree().db.master.host == 'localhost'
config = config.merge(ENV_SETTINGS)
assert config.layer.tree().db.master.host == 'env_master_host'
def test_config_get_attr(config):
config = config.merge(DEFAULT_SETTINGS)
assert config.db.master.host == 'localhost'
def test_config_get_item(config):
config = config.merge(DEFAULT_SETTINGS)
assert config['db']['master']['host'] == 'localhost'
def test_config_with_froms(config):
os.environ['APP_DB__MASTER__HOST'] = 'env_host'
os.environ['APP_DB__SLAVE__HOST'] = 'env_host'
config = config.merge(DEFAULT_SETTINGS).merge(from_env('APP'))
assert config.db.master.host() == 'env_host'
assert config.db.slave.host() == 'env_host'
| singulared/conflow | tests/unit/manager/config_test.py | config_test.py | py | 1,513 | python | en | code | 9 | github-code | 1 | [
{
"api_name": "conflow.manager.Config",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "pytest.fixture",
"line_number": 34,
"usage_type": "attribute"
},
{
"api_name": "os.environ",
"line_number": 62,
"usage_type": "attribute"
},
{
"api_name": "os.environ... |
37232639874 | from config import *
import pygame
from bullet import Bullet
class Player(pygame.sprite.Sprite):
def __init__(self, x, y, file_paths: list, lives,speed_x, sound_bullet, jumping_path, dead_path, character_type) -> None:
pygame.sprite.Sprite.__init__(self)
self.frame_index = 0
#loading running animation propperly
self.file_paths = []
for i in file_paths:
image = pygame.image.load(i).convert_alpha()
image = pygame.transform.scale(image, PLAYER_SIZE)
self.file_paths.append(image)
self.image = self.file_paths[self.frame_index]
self.x = x
self.y = y
self.lives = lives
self.rect = self.image.get_rect()
self.rect.center = (x, y)
self.speed_x = speed_x
self.speed_y = 0
self.sound_bullet = pygame.mixer.Sound(sound_bullet)
self.run = True
self.cooldown_animation = 80
self.current_time_animations = pygame.time.get_ticks()
self.jumping = False
self.jumping_img = pygame.image.load(jumping_path).convert_alpha()
self.jumping_img = pygame.transform.scale(self.jumping_img, PLAYER_SIZE)
self.character_type = character_type
self.death_img = pygame.image.load(dead_path).convert_alpha()
self.death_img = pygame.transform.scale(self.death_img, PLAYER_SIZE)
self.action = 0
self.dead = False
def update(self):
self.update_animations()
if self.character_type == "enemy":
if self.lives == 0:
self.kill()
self.rect.x -= self.speed_x * ENEMY_SPEED
self.rect.y += self.speed_y
if self.rect.bottom >= SCREEN_HEIGHT - FLOOR: #compruba si la nave no sobrepase el limite izquierdo. Si sobrepasa se ejecuta la siguiente linea de codigo.
self.rect.bottom = SCREEN_HEIGHT - FLOOR #Establece la posición izquierda de la nave en 0, evitando que se desplace más hacia la izquierda y se salga de la pantalla.
elif self.rect.top <= ROOF: #compruba si la nave no sobrepase el limite derecho. Si sobrepasa se ejecuta la siguiente linea de codigo.
self.rect.top = ROOF
if self.character_type == "player":
self.rect.x += self.speed_x
if self.rect.x == 180:
self.speed_x = 0
def shoot(self, sprites, bullets):
bullet = Bullet(BULLET_PATH, BULLET_SIZE, (self.rect.midright[0], self.rect.midright[1] +5),BULLET_COLLITION, BULLET_SPEED) #genero la instancia laser
pygame.mixer.Sound.play(self.sound_bullet)
sprites.add(bullet)
bullets.add(bullet)
def update_animations(self):
if self.jumping == True:
self.image = self.jumping_img
else:
self.jumping = False
self.image = self.file_paths[self.frame_index]
if pygame.time.get_ticks() - self.current_time_animations > self.cooldown_animation:
self.current_time_animations = pygame.time.get_ticks()
self.frame_index += 1
if self.frame_index == len(self.file_paths):
self.frame_index = 0
if self.dead == True:
self.image = self.death_img
| Behrens0/BehrensTomasLab1AJuegoPython | source/PlayerClass.py | PlayerClass.py | py | 3,507 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "pygame.sprite",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "pygame.sprite.Sprite.__init__",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "pygame.sprite",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "pyga... |
5408299059 | import copy
import sys
# for linux env.
sys.path.insert(0, '..')
import time
import pickle
import argparse
from utils import *
from torch.utils.data.sampler import SubsetRandomSampler
import numpy as np
import torch
import torch.nn.functional as F
import random
import pandas as pd
import json
import matplotlib.pyplot as plt
from model import lstm, ml
import itertools
import functools
from dataset_lstm import LSTM_Dataset
from Vocab import *
print = functools.partial(print, flush=True)
def parse_args():
parser = argparse.ArgumentParser(description='process parameters')
# Input
parser.add_argument('--dataset', type=str, choices=['apcd', 'hidd', 'khin'], default='khin')
parser.add_argument("--random_seed", type=int, default=0)
parser.add_argument('--feature_space', type=str, choices=['combined', 'local'], default='combined')
parser.add_argument('--code_topk', type=int, default=300)
# parser.add_argument('--run_model', choices=['LSTM', 'MLP'], default='MLP')
# Deep PSModels
parser.add_argument('--batch_size', type=int, default=256) # 768) # 64)
parser.add_argument('--learning_rate', type=float, default=1e-3) # 0.001
parser.add_argument('--weight_decay', type=float, default=1e-6) # )0001)
parser.add_argument('--epochs', type=int, default=10) # 30
# LSTM
parser.add_argument('--diag_emb_size', type=int, default=128)
# parser.add_argument('--med_emb_size', type=int, default=128)
# parser.add_argument('--med_hidden_size', type=int, default=64)
parser.add_argument('--diag_hidden_size', type=int, default=64)
parser.add_argument('--lstm_hidden_size', type=int, default=100)
# MLP
# parser.add_argument('--hidden_size', type=str, default='', help=', delimited integers')
# Output
parser.add_argument('--output_dir', type=str, default='output/lstm/')
args = parser.parse_args()
# Modifying args
args.cuda = torch.cuda.is_available()
args.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
if args.random_seed >= 0:
rseed = args.random_seed
else:
from datetime import datetime
rseed = datetime.now()
args.random_seed = rseed
args.save_model_filename = os.path.join(args.output_dir, '{}/lstm_S{}.model'.format(args.dataset,
args.random_seed))
check_and_mkdir(args.save_model_filename)
#
# args.hidden_size = [int(x.strip()) for x in args.hidden_size.split(',')
# if (x.strip() not in ('', '0'))]
return args
if __name__ == '__main__':
# python main_lstm.py --dataset hidd --random_seed 0 2>&1 | tee log/lstm_hidd_r0.txt
start_time = time.time()
args = parse_args()
print('args: ', args)
print('random_seed: ', args.random_seed)
print('device: ', args.device)
# reproducibility
np.random.seed(args.random_seed)
torch.manual_seed(args.random_seed)
if torch.cuda.is_available():
torch.cuda.manual_seed(args.random_seed)
random.seed(args.random_seed)
torch.backends.cudnn.deterministic = True
#
print('Dataset:', args.dataset)
encode2namefile = r'dataset/icd9encode2name.pkl'
majordatafile = r'dataset/final_pats_1st_neg_triples_{}-{}_new_labeled.pkl'.format(args.dataset, 'icd9')
with open(encode2namefile, 'rb') as f:
dx_name = pickle.load(f)
print('len(dx_name):', len(dx_name))
with open(majordatafile, 'rb') as f:
data_1st_neg = pickle.load(f)
print('len(data_1st_neg):', len(data_1st_neg))
if args.feature_space == 'combined':
# Load combined features
with open('Pre_train/selected_features_apcd.obj', 'rb') as f:
vocab_apcd = pickle.load(f)
with open('Pre_train/selected_features_hidd.obj', 'rb') as f:
vocab_hidd = pickle.load(f)
with open('Pre_train/selected_features_khin.obj', 'rb') as f:
vocab_khin = pickle.load(f)
vocab_combined = copy.deepcopy(vocab_apcd)
vocab_combined.extend_vocab(vocab_hidd)
vocab_combined.extend_vocab(vocab_khin)
print('Using combined feature space')
my_dataset = LSTM_Dataset(data_1st_neg, diag_name=dx_name, diag_code_vocab=vocab_combined)
else:
print('Using local feature space, top k:', args.code_topk)
my_dataset = LSTM_Dataset(data_1st_neg, diag_name=dx_name, diag_code_topk=args.code_topk)
n_feature = my_dataset.DIM_OF_CONFOUNDERS
feature_name = my_dataset.FEATURE_NAME
print('n_feature: ', n_feature, ':')
train_ratio = 0.7 # 0.5
val_ratio = 0.1
print('train_ratio: ', train_ratio,
'val_ratio: ', val_ratio,
'test_ratio: ', 1 - (train_ratio + val_ratio))
dataset_size = len(my_dataset)
indices = list(range(dataset_size))
train_index = int(np.floor(train_ratio * dataset_size))
val_index = int(np.floor(val_ratio * dataset_size))
# data shuffle in here
np.random.shuffle(indices)
train_indices, val_indices, test_indices = indices[:train_index], \
indices[train_index:train_index + val_index], \
indices[train_index + val_index:]
train_sampler = SubsetRandomSampler(train_indices)
val_sampler = SubsetRandomSampler(val_indices)
test_sampler = SubsetRandomSampler(test_indices)
train_loader = torch.utils.data.DataLoader(my_dataset, batch_size=args.batch_size, sampler=train_sampler)
val_loader = torch.utils.data.DataLoader(my_dataset, batch_size=args.batch_size, sampler=val_sampler)
test_loader = torch.utils.data.DataLoader(my_dataset, batch_size=args.batch_size, sampler=test_sampler)
data_loader = torch.utils.data.DataLoader(my_dataset, batch_size=args.batch_size, sampler=SubsetRandomSampler(indices))
# train_loader_shuffled = torch.utils.data.DataLoader(my_dataset, batch_size=256, sampler=train_sampler)
#
# print('len(train_loader_shuffled): ', len(train_loader_shuffled),
# 'train_loader_shuffled.batch_size: ', train_loader_shuffled.batch_size)
#
# for confounder, outcome, uid in train_loader_shuffled:
# print(confounder)
print("**************************************************")
print("**************************************************")
print('LSTM-Attention PS PSModels learning:')
paras_grid = {
'hidden_size': [32, 64], # 64, 128
'diag_hidden_size': [32, 64],
'diag_embedding_size': [32, 64],
'lr': [1e-3, 1e-4],
'weight_decay': [1e-4, 1e-5,],
'batch_size': [1024], # 50
}
hyper_paras_names, hyper_paras_v = zip(*paras_grid.items())
hyper_paras_list = list(itertools.product(*hyper_paras_v))
print('Model lstm Searching Space N={}: '.format(len(hyper_paras_list)), paras_grid)
best_hyper_paras = None
best_model = None
best_auc = float('-inf')
best_balance = float('inf')
best_model_epoch = -1
best_result_9 = None
best_result_95 = None
results = []
i = -1
i_iter = -1
for hyper_paras in tqdm(hyper_paras_list):
i += 1
hidden_size, diag_hidden_size, diag_embedding_size, lr, weight_decay, batch_size = hyper_paras
print('In hyper-paras space [{}/{}]...'.format(i, len(hyper_paras_list)))
print(hyper_paras_names)
print(hyper_paras)
train_loader_shuffled = torch.utils.data.DataLoader(my_dataset, batch_size=batch_size, sampler=train_sampler)
print('len(train_loader_shuffled): ', len(train_loader_shuffled),
'train_loader_shuffled.batch_size: ', train_loader_shuffled.batch_size)
model_params = dict(
diag_hidden_size=diag_hidden_size, # 64
hidden_size=hidden_size, # 100,
bidirectional=True,
diag_vocab_size=len(my_dataset.diag_code_vocab),
diag_embedding_size=diag_embedding_size, # args.diag_emb_size, # 128
# end_index=my_dataset.diag_code_vocab[CodeVocab.END_CODE],
# pad_index=my_dataset.diag_code_vocab[CodeVocab.PAD_CODE],
)
print('Model: LSTM')
print(model_params)
model = lstm.LSTMModel(**model_params)
if args.cuda:
model = model.to('cuda')
print(model)
optimizer = torch.optim.Adam(model.parameters(), lr=args.learning_rate, weight_decay=args.weight_decay)
for epoch in tqdm(range(args.epochs)):
i_iter += 1
epoch_losses = []
uid_list = []
for confounder, labels, outcome, uid in train_loader_shuffled:
model.train()
# train IPW
optimizer.zero_grad()
uid_list.extend(uid)
if args.cuda: # confounder = (diag, med, sex, age)
for ii in range(len(confounder)):
confounder[ii] = confounder[ii].to('cuda')
flag = labels.to('cuda').float()
treatment_logits, _ = model(confounder)
loss = F.binary_cross_entropy_with_logits(treatment_logits, flag)
loss.backward()
optimizer.step()
epoch_losses.append(loss.item())
# just finish 1 epoch
# scheduler.step()
epoch_losses = np.mean(epoch_losses)
auc_val, loss_val, Y_val, Y_pred_val, uid_val = transfer_data(model, val_loader, cuda=args.cuda,
normalized=False)
auc_test, loss_test, Y_test, Y_pred_test, uid_test = transfer_data(model, test_loader, cuda=args.cuda,
normalized=False)
results.append((i_iter, i, epoch, hyper_paras, auc_val, auc_test))
print('', flush=True)
print('HP-i:{}, epoch:{}, loss:{}, val-auc:{}, test-auc:{}'.format(
i, epoch, epoch_losses, auc_val, auc_test)
)
print('Validation set, len:', len(Y_val))
result_9_val = ml.MLModels._performance_at_specificity_or_threshold(Y_pred_val, Y_val, specificity=0.9)
result_95_val = ml.MLModels._performance_at_specificity_or_threshold(Y_pred_val, Y_val, specificity=0.95)
print('Test set, len:', len(Y_test))
result_9_test = ml.MLModels._performance_at_specificity_or_threshold(Y_pred_test, Y_test, specificity=0.9)
result_95_test = ml.MLModels._performance_at_specificity_or_threshold(Y_pred_test, Y_test, specificity=0.95)
if auc_val > best_auc:
best_model = model
best_hyper_paras = hyper_paras
best_auc = auc_val
best_model_epoch = epoch
best_result_9 = result_9_val
best_result_95 = result_95_val
print('Save Best PSModel at Hyper-iter[{}/{}]'.format(i, len(hyper_paras_list)),
'Epoch: ', epoch, 'val-auc:', best_auc, "test-auc", auc_test)
print(hyper_paras_names)
print(hyper_paras)
save_model(model, args.save_model_filename, model_params=model_params)
col_name = ['i', 'ipara', 'epoch', 'paras', 'val_auc', 'test_auc']
results = pd.DataFrame(results, columns=col_name)
print('Model selection finished! Save Global Best PSModel at Hyper-iter [{}/{}], Epoch: {}'.format(
i, len(hyper_paras_list), best_model_epoch), 'val-auc:', best_auc)
print(hyper_paras_names)
print(best_hyper_paras)
results.to_csv(args.save_model_filename + '_ALL-model-select.csv')
auc_test, loss_test, Y_test, Y_pred_test, uid_test = transfer_data(best_model, test_loader, cuda=args.cuda,
normalized=False)
print('Final results', 'loss_test', loss_test, "test-auc", auc_test)
result_1 = ml.MLModels._performance_at_specificity_or_threshold(Y_pred_test, Y_test, specificity=0.9)
print('......Results at specificity 0.95:')
result_2 = ml.MLModels._performance_at_specificity_or_threshold(Y_pred_test, Y_test, specificity=0.95)
df1 = pd.DataFrame([result_1 + (best_hyper_paras,), result_2 + (best_hyper_paras,)],
columns=["AUC", "threshold", "Specificity", "Sensitivity/recall", "PPV/precision",
"n_negative", "n_positive", "precision_recall_fscore_support", 'best_hyper_paras'],
index=['r_9', 'r_95'])
df1.to_csv(os.path.join(os.path.dirname(args.save_model_filename),
'test_results_{}r{}.csv'.format('lstm', args.random_seed)))
# to dump feature importance, how?
# df3 = pd.DataFrame({'train_x_model': model.best_model.coef_[0]}, index=feature_name).reset_index()
# df3.to_csv(os.path.join(os.path.dirname(args.save_model_filename),
# 'model_coef_train_LRr{}.csv'.format(args.random_seed)))
print('Done! Total Time used:', time.strftime("%H:%M:%S", time.gmtime(time.time() - start_time)))
| calvin-zcx/hidd-sui | main_lstm.py | main_lstm.py | py | 13,250 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "sys.path.insert",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "functools.partial",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "argparse.ArgumentParser... |
7469955324 | '''
Mini-Project1 - COMP 551 - Winter 2019
Mahyar Bayran
Luis Pinto
Rebecca Salganik
'''
import json # we need to use the JSON package to load the data, since the data is stored in JSON format
import numpy as np
import matplotlib.pyplot as pt
from proj1_task1 import splitData
from proj1_task2 import closed_form
import time
with open("proj1_data.json") as fp:
data = json.load(fp)
def bias(x):
one = np.ones(len(x))
newx = np.column_stack((x,one))
return newx
def error_print(y_train,y_val,):
error_training = np.square(np.subtract(y_train,y_training)).mean()
error_validation = np.square(np.subtract(y_val, y_validation)).mean()
print('The mean-squared error on the training set is:', error_training)
print('The mean-squared error on the validation set is:', error_validation)
#for Task 3.2
[x1_t,x2_t,x3_t,y_training] = splitData(data,0,10000,'Task3.2')
x1training = bias(x1_t)
x2training = bias(x2_t)
x3training = bias(x3_t)
[x1_v, x2_v, x3_v, y_validation] = splitData(data,10000,11000,'Task3.2')
x1validation = bias(x1_v)
x2validation = bias(x2_v)
x3validation = bias(x3_v)
def task32(xt,xv,y):
w = closed_form(xt,y)
y_predicted_train = np.matmul(xt,w)
y_predicted_val = np.matmul(xv,w)
error_print(y_predicted_train,y_predicted_val)
print('Task 3.2: Linear regression using closed-form approach')
start = time.time()
print('Errors for set with no text features:')
task32(x1training,x1validation,y_training)
end = time.time()
print('Time elapsed using no text features: ', end-start)
start = time.time()
print('Errors for set with top 60 words:')
task32(x2training,x2validation,y_training)
end = time.time()
print('Time elapsed using top 60 words: ', end-start)
start = time.time()
print('Errors for set with top 160 words:')
task32(x3training,x3validation,y_training)
end = time.time()
print('Time elapsed using top 160 words: ', end-start)
| luispintoc/Task-1 | proj1_task3.2.py | proj1_task3.2.py | py | 1,938 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "json.load",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "numpy.ones",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "numpy.column_stack",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "numpy.square",
"line_numb... |
32810013472 | """
This code uses the onnx model to detect faces from live video or cameras.
"""
import os
import time
import random
import cv2
import numpy as np
import onnx
import sys
import pickle as pkl
sys.path.append("..")
from caffe2.python.onnx import backend
import vision.utils.box_utils_numpy as box_utils
from vision.ssd.config import fd_config
from vision.ssd.config.fd_config import define_img_size
import torch
# onnx runtime
import onnxruntime as ort
#input_img_size = 320
input_img_size = 640
define_img_size(input_img_size)
def predict(width, height, confidences, boxes, prob_threshold, iou_threshold=0.3, top_k=-1):
boxes = boxes[0]
confidences = confidences[0]
picked_box_probs = []
picked_labels = []
for class_index in range(1, confidences.shape[1]):
probs = confidences[:, class_index]
mask = probs > prob_threshold
probs = probs[mask]
if probs.shape[0] == 0:
continue
subset_boxes = boxes[mask, :]
box_probs = np.concatenate([subset_boxes, probs.reshape(-1, 1)], axis=1)
box_probs = box_utils.hard_nms(box_probs,
iou_threshold=iou_threshold,
top_k=top_k,
)
picked_box_probs.append(box_probs)
picked_labels.extend([class_index] * box_probs.shape[0])
if not picked_box_probs:
return np.array([]), np.array([]), np.array([])
picked_box_probs = np.concatenate(picked_box_probs)
picked_box_probs[:, 0] *= width
picked_box_probs[:, 1] *= height
picked_box_probs[:, 2] *= width
picked_box_probs[:, 3] *= height
return picked_box_probs[:, :4].astype(np.int32), np.array(picked_labels), picked_box_probs[:, 4]
label_path = "../models/objdetect/voc-model-labels.txt"
onnx_path = "../models/onnx/sim.onnx"
class_names = [name.strip() for name in open(label_path).readlines()]
predictor = onnx.load(onnx_path)
onnx.checker.check_model(predictor)
onnx.helper.printable_graph(predictor.graph)
predictor = backend.prepare(predictor, device="CPU") # default CPU
ort_session = ort.InferenceSession(onnx_path)
input_name = ort_session.get_inputs()[0].name
result_path = "./detect_results"
threshold = 0.8
path = "./imgs"
sum = 0
if not os.path.exists(result_path):
os.makedirs(result_path)
listdir = os.listdir(path)
sum = 0
colors = pkl.load(open("pallete", "rb"))
for file_path in listdir:
img_path = os.path.join(path, file_path)
orig_image = cv2.imread(img_path)
image = cv2.cvtColor(orig_image, cv2.COLOR_BGR2RGB)
#image = cv2.resize(image, (320, 240))
image = cv2.resize(image, (640, 480))
image_mean = np.array([127, 127, 127])
image = (image - image_mean) / 128
image = np.transpose(image, [2, 0, 1])
image = np.expand_dims(image, axis=0)
image = image.astype(np.float32)
# confidences, boxes = predictor.run(image)
time_time = time.time()
confidences, boxes = ort_session.run(None, {input_name: image})
############
boxes = box_utils.convert_locations_to_boxes(
boxes, fd_config.priors, fd_config.center_variance, fd_config.size_variance
#torch.from_numpy(boxes), fd_config.priors, fd_config.center_variance, fd_config.size_variance
)
boxes = box_utils.center_form_to_corner_form(boxes)
############
print("cost time:{}".format(time.time() - time_time))
boxes, labels, probs = predict(orig_image.shape[1], orig_image.shape[0], confidences, boxes, threshold)
for i in range(boxes.shape[0]):
box = boxes[i, :]
label = f"{class_names[labels[i]]}: {probs[i]:.2f}"
color = random.choice(colors)
c1 = (box[0], box[1])
c2 = (box[2], box[3])
cv2.rectangle(orig_image, c1, c2,color, 2)
t_size = cv2.getTextSize(label, cv2.FONT_HERSHEY_PLAIN, 1 , 1)[0]
c2 = c1[0] + t_size[0] + 220, c1[1] + t_size[1] + 25
cv2.rectangle(orig_image, c1, c2,color, -1)
cv2.putText(orig_image, label, (c1[0], c1[1] + t_size[1] + 24), cv2.FONT_HERSHEY_PLAIN, 3, [225,255,255], 2);
cv2.imwrite(os.path.join(result_path, file_path), orig_image)
sum += boxes.shape[0]
print("sum:{}".format(sum))
| deep-phd/rfb-detector | test/detect_imgs_onnx_without_nms.py | detect_imgs_onnx_without_nms.py | py | 4,231 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "sys.path.append",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "vision.ssd.config.fd_config.define_img_size",
"line_number": 26,
"usage_type": "call"
},
{
"api_nam... |
73737553955 | # -*- coding: utf-8 -*-
"""Config for the Trunk.
"""
__authors__ = "emenager, tnavez"
__contact__ = "etienne.menager@inria.fr, tanguy.navez@inria.fr"
__version__ = "1.0.0"
__copyright__ = "(c) 2020, Inria"
__date__ = "Jun 29 2022"
import sys
import pathlib
sys.path.insert(0, str(pathlib.Path(__file__).parent.absolute())+"/../")
sys.path.insert(0, str(pathlib.Path(__file__).parent.absolute()))
from Libraries.BaseConfig import BaseConfig
import Mesh.Constants as Const
class Config(BaseConfig):
def __init__(self):
super(Config,self).__init__("Finger")
self.scene_name = "2Finger"
self.n_robot = 2
self.set_scene_config({"source": [-400.0, -50, 100],
"target": [30, -25, 100],
"goalPos": [0, Const.Height/2, -3.0*Const.Length]})
def get_actuators_variables(self):
return {"cable1": [20, 0, 20],
"cable2": [10, 0, 20]}
def get_n_dt(self):
return 30
def get_n_eq_dt(self):
return 2
def get_trajectory(self):
import numpy as np
goals = []
n_samples, max_x_pos = 5, 30
init_point = np.array([- max_x_pos, Const.Height/2, -3.0*Const.Length])
for i in range(2*n_samples):
goals.append(init_point+ i*np.array([max_x_pos/n_samples, 0, 0]))
return goals
| SofaDefrost/CondensedFEMModel | Models/2Finger/Config.py | Config.py | py | 1,372 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "sys.path.insert",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "pathlib.Path",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "sys.path.insert",
"lin... |
73011436194 | import numpy as np
import pandas as pd
import os
import matplotlib.pyplot as plt
import torch
import json
import pycocotools._mask as _mask
import wandb
class DynamicsEvaluator():
def __init__(self, config):
# list of the annotation files for the whole dataset
proposals=os.listdir('CLEVRER/derender_proposals')
# sort the annotations
self.proposals=sorted([proposal for proposal in proposals])
self.skip=config['interaction_history']
self.num_keypoints=config['num_keypoints']
self.width=config['width']
self.height=config['height']
self.horizon=config['prediciton_horizon']
self.success_rates=[]
for i in range(4):
self.success_rates.append([])
def collect(self, video_idx, coords, status, predicted_coords, predicted_status):
predicted_coords=predicted_coords.detach().cpu().numpy().astype(np.int32)[0]
predicted_status=predicted_status.detach().cpu().numpy().astype(np.int32)[0]
coords= coords.detach().cpu().numpy().astype(np.int32)[0]
status=status.detach().cpu().numpy().astype(np.int32)[0]
# for multistep prediction
for h in range(self.horizon):
# shift the prediction one time step to compare to the ground truth
predicted_coords[:,:,h]=np.roll(predicted_coords[:,:,h],-(h+1), axis=0)
# print('Video_idx: {0} Annotation file: CLEVRER/derender_proposals/{1}'.format(video_idx, self.proposals[video_idx]))
with open('CLEVRER/derender_proposals/{0}'.format(self.proposals[video_idx])) as f:
data=json.load(f)
# objects in the scene
objects={}
num_frames=len(data['frames'])-1
num_objects=np.zeros(num_frames)
detected_objects=np.zeros(num_frames)
success_rate=[]
for i in range(self.horizon):
success_rate.append([])
# iterate over the frames
for i,frame in enumerate(data['frames']):
if i>=126:
continue
# get the number of objects
num_objects=len(frame['objects'])
detected_objects=np.zeros(self.horizon)
# iterate over objects
for obj in frame['objects']:
# object name
object_name="{0}_{1}_{2}".format(obj['color'], obj['material'], obj['shape'])
# create an entry in the dictionary if it's not already there
if object_name not in objects.keys():
# kp the keypoint association at time step t-1
# pred predicted keypoint association for time step t (predicted at timestep t-1)
objects[object_name]={'kp':np.zeros((self.horizon+1,self.num_keypoints)), 'pred':np.zeros((self.horizon,self.num_keypoints))}
# decode the mask (time step t)
mask=_mask.decode([obj['mask']])
# update the keypoints in the object
for k in range(self.num_keypoints):
# fill in the association of keypoints and objects
# consider only active keypoints in time step t for predictions
if status[i,k,0]>0.9:
if i>self.skip and i+self.skip < num_frames:
for p in range(self.horizon):
x=predicted_coords[i,k,p,0]
y=predicted_coords[i,k,p,1]
if x<self.width and y<self.height and mask[y,x]==1:
objects[object_name]['pred'][p][k]=1
else:
objects[object_name]['pred'][p][k]=0
# keep the keypoint in the object only if the input coordinates (updated at time t-p) was detecting the object
objects[object_name]['pred'][p][k]*=objects[object_name]['kp'][p+1][k]
# update the keypoints in the object for time step t
# this will be used to compare with prediciton at next time step
if coords[i,k,0]<self.width and coords[i,k,1]<self.height and mask[coords[i,k,1],coords[i,k,0]]==1:
objects[object_name]['kp'][0][k]=1
else:
objects[object_name]['kp'][0][k]=0
# shift the kp to add the last occurence
objects[object_name]['kp']=np.roll(objects[object_name]['kp'],1,axis=0)
if i>self.skip:
for p in range(self.horizon):
# an object is detected if a prediciton lies in it
if np.sum(objects[object_name]['pred'][p])>0:
detected_objects[p]+=1
if i>self.skip+self.horizon and i+self.skip+self.horizon < num_frames:
for p in range(self.horizon):
# the success rate for this frame is the percentage of object detected by the prediciton
success_rate[p].append(detected_objects[p]/num_objects)
for p in range(self.horizon):
# the mean of success rates over all frames
mean_success_rate=np.array(success_rate[p]).mean()
self.success_rates[p].append(mean_success_rate)
def save_to_file(self, model_name=None):
# create a folder to store the dataset
os.makedirs('results',exist_ok=True)
file_path="results/{0}.xlsx".format(model_name)
writer=pd.ExcelWriter(file_path, engine='openpyxl')
data_frame=pd.DataFrame({"Success rate - 1 step prediction" : self.success_rates[0],
"Success rate - 2 steps prediction" : self.success_rates[1],
"Success rate - 3 steps prediction" : self.success_rates[2],
})
table=wandb.Table(dataframe=data_frame)
wandb.log({"Results" : table})
data_frame.to_excel(writer, sheet_name='per-video results', index=False)
result_stats=data_frame.describe()
table=wandb.Table(dataframe=result_stats)
wandb.log({"Statistics over all videos" : table})
result_stats.to_excel(writer, sheet_name='Statistics')
writer.save()
writer.close()
return data_frame
| iROSA-lab/MINT | src/downstream/dynamics_evaluator.py | dynamics_evaluator.py | py | 6,425 | python | en | code | 3 | github-code | 1 | [
{
"api_name": "os.listdir",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "numpy.int32",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "numpy.int32",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name": "numpy.int32",
"line_... |
16878545135 | # -*- coding: utf-8 -*-
import datetime
from openerp import models, fields, api
from openerp.exceptions import Warning
from openerp.tools.translate import _
import logging
class hr_contract(models.Model):
_inherit = "hr.contract"
no_renewal = fields.Boolean('No Renewal', default=False)
@api.multi
def get_email_to_send(self):
"""
Get list emails of users in group HR officer.
"""
group = self.env.ref('base.group_hr_user')
mail_list = ''
for user in group.users:
mail_list += user.email and user.email + ', ' or ''
return mail_list
@api.multi
def _get_contract_link(self):
"""
Build a link to the contract.
"""
param_obj = self.env['ir.config_parameter']
base_url = param_obj.get_param('web.base.url', default='')
# Example link: http://0.0.0.0:8069/?#id=4
# &view_type=form&model=hr.contract&action=...
act_window = self.env.ref('hr_contract.action_hr_contract')
link = base_url + '/?#id=%s&view_type=form&model=hr.contract&action=%s'\
% (self.id, act_window.id)
return link
@api.model
def _get_expired_contracts_by_department(self, date):
"""
Get the contract which will be expired before inputed date
without the next contract created
without the No Renewal checkbox set
"""
self._cr.execute("""
SELECT con.id, emp.name_related as employee,
con.name, con.date_end,
dept.name as department, job.name as job
FROM hr_contract con
JOIN hr_employee emp ON con.employee_id = emp.id
LEFT JOIN hr_department dept ON emp.department_id = dept.id
LEFT JOIN hr_job job ON con.job_id = job.id
WHERE con.date_end <= '%s'
AND con.no_renewal = False
AND NOT EXISTS(
SELECT 1 FROM hr_contract
WHERE date_start > con.date_end
AND employee_id = con.employee_id
)
ORDER BY date_end
""" % date)
res = self._cr.dictfetchall()
expired_contracts_department = {}
for line in res:
department = line['department'] or 'Undefined'
if department not in expired_contracts_department:
expired_contracts_department[department] = []
expired_contracts_department[department].append(line)
return expired_contracts_department
@api.model
def send_email_contract_end_next_x_days(self):
"""
Send email weekly to remind the contracts expired next 30 days
"""
context = dict(self._context)
param_obj = self.env['ir.config_parameter']
days_param = param_obj.get_param('contract_expiring_days', default='30')
try:
next_x_date = (datetime.date.today()
+ datetime.timedelta(int(days_param))).strftime('%Y-%m-%d')
except Exception as exc:
logging.error(exc)
raise Warning(_('Wrong value defined in parameter expiring_days.'))
email_data = self._get_expired_contracts_by_department(next_x_date)
if not email_data:
return True
context.update({'email_data': email_data})
template = self.env.ref(
'trobz_hr_mail_contract_end.email_template_contract_end_next_x_days'
)
if not template:
return True
template.with_context(context).send_mail(
email_data.values()[0][0]['id'], True
)
return True
@api.model
def send_email_contract_end(self):
"""
Send email daily to remind the contracts expired
"""
context = dict(self._context)
date = datetime.date.today().strftime('%Y-%m-%d')
email_data = self._get_expired_contracts_by_department(date)
if not email_data:
return True
context.update({'email_data': email_data})
template = self.env.ref(
'trobz_hr_mail_contract_end.email_template_contract_end'
)
if not template:
return True
first_contract_id = email_data.values()[0][0]['id']
template.with_context(context).send_mail(
first_contract_id, True
)
return True
@api.multi
def get_email_information(self):
"""
Get content of renewal contracts email
"""
email_data = self._context.get('email_data', False)
department = email_data.keys()
str_email_info = ''
for department in email_data:
str_email_info += """<p style="padding-top: 10px; padding-left: 20px;"> <b>Department %s</b>
""" % department
str_email_info += '<ul>'
for contract in email_data[department]:
link = self._get_contract_link()
str_email_info += '<li>%s - <a href=\"%s\">%s</a> - %s</li>'\
% (contract['date_end'], link,
contract['name'], contract['job'] or 'Undefined Job')
str_email_info += '</p>'
str_email_info += '</ul>'
return str_email_info
@api.model
def get_contract_expiring_days(self):
"""
"""
param_obj = self.env['ir.config_parameter']
return param_obj.get_param('contract_expiring_days', default='30')
hr_contract()
| TinPlusIT05/tms | addons/app-trobz-hr/trobz_hr_mail_contract_end/model/hr_contract.py | hr_contract.py | py | 5,520 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "openerp.models.Model",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "openerp.models",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "openerp.fields.Boolean",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "openerp... |
41108550582 | import time
import pytest
from gitlabform.gitlabform import GitLabForm
from gitlabform.gitlabform.test import (
create_group,
create_project_in_group,
create_readme_in_project,
get_gitlab,
GROUP_NAME,
)
PROJECT_NAME = "archive_project"
GROUP_AND_PROJECT_NAME = GROUP_NAME + "/" + PROJECT_NAME
@pytest.fixture(scope="function")
def gitlab(request):
gl = get_gitlab()
create_group(GROUP_NAME)
create_project_in_group(GROUP_NAME, PROJECT_NAME)
create_readme_in_project(GROUP_AND_PROJECT_NAME) # in main branch
def fin():
gl.delete_project(GROUP_AND_PROJECT_NAME)
# TODO: find some smarter way to avoid 400 when trying to create project while it is still being deleted
time.sleep(15)
request.addfinalizer(fin)
return gl # provide fixture value
archive_project = """
gitlab:
api_version: 4
project_settings:
gitlabform_tests_group/archive_project:
project:
archive: true
"""
unarchive_project = """
gitlab:
api_version: 4
project_settings:
gitlabform_tests_group/archive_project:
project:
archive: false
"""
edit_archived_project = """
gitlab:
api_version: 4
# the project has to be configured as archived
# for other configs for it to be ignored
project_settings:
gitlabform_tests_group/archive_project:
project:
archive: true
group_settings:
gitlabform_tests_group:
files:
README.md:
overwrite: true
branches:
- main
content: |
Some other content that the default one
"""
class TestArchiveProject:
def test__archive_project(self, gitlab):
gf = GitLabForm(
config_string=archive_project,
project_or_group=GROUP_AND_PROJECT_NAME,
)
gf.main()
project = gitlab.get_project(GROUP_AND_PROJECT_NAME)
assert project["archived"] is True
def test__unarchive_project(self, gitlab):
gf = GitLabForm(
config_string=archive_project,
project_or_group=GROUP_AND_PROJECT_NAME,
)
gf.main()
project = gitlab.get_project(GROUP_AND_PROJECT_NAME)
assert project["archived"] is True
gf = GitLabForm(
config_string=unarchive_project,
project_or_group=GROUP_AND_PROJECT_NAME,
)
gf.main()
project = gitlab.get_project(GROUP_AND_PROJECT_NAME)
assert project["archived"] is False
def test__dont_edit_archived_project(self, gitlab):
gf = GitLabForm(
config_string=archive_project,
project_or_group=GROUP_AND_PROJECT_NAME,
)
gf.main()
project = gitlab.get_project(GROUP_AND_PROJECT_NAME)
assert project["archived"] is True
gf = GitLabForm(
config_string=edit_archived_project,
project_or_group=GROUP_NAME,
)
gf.main()
# if we tried to edit an archived project, then we will get an exception here
| Pigueiras/gitlabform | gitlabform/gitlabform/test/test_archive_project.py | test_archive_project.py | py | 2,990 | python | en | code | null | github-code | 1 | [
{
"api_name": "gitlabform.gitlabform.test.GROUP_NAME",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "gitlabform.gitlabform.test.get_gitlab",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "gitlabform.gitlabform.test.create_group",
"line_number": 22,
... |
18941046063 | from time import sleep
from typing import List
from utils.PageObject import PageObject
PRODUCTS_LINK_SELECTOR = ".product .title > h2 > a"
class WallpapersPage(PageObject):
page_url = "https://avi-home.co.il/product-category/wallpapers/"
def scroll_down(self):
self.driver.execute_script("window.scrollTo(0,document.body.scrollHeight);")
def load_all_products(self, time_out=15, max_products=None):
sleeping_time = 0
products_len = 0
while True:
new_products_len = len(self.getElements(PRODUCTS_LINK_SELECTOR))
if new_products_len > products_len:
self.scroll_down()
products_len = new_products_len
sleeping_time = 0
else:
sleep(0.1)
sleeping_time += 0.1
if sleeping_time >= time_out:
return self.products_list
if max_products and new_products_len > max_products:
return self.products_list
@property
def products_list(self) -> List[str]:
links = []
for product in self.getElements(PRODUCTS_LINK_SELECTOR):
links.append(product.get_attribute("href"))
return links
| solomonBoltin/AviDesignScrapping | pages/WallpapersPage.py | WallpapersPage.py | py | 1,233 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "utils.PageObject.PageObject",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "time.sleep",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "typing.List",
"line_number": 36,
"usage_type": "name"
}
] |
24522986960 | # -*- coding: utf-8 -*-
import time
from fastapi import FastAPI,Request
from .routers import setu
from .db import on_shutdown, on_start
app = FastAPI()
app.include_router(setu.router)
app.on_event("startup")(on_start)
app.on_event("shutdown")(on_shutdown)
@app.middleware("http")
async def add_process_time_header(request: Request, call_next):
start_time = time.time()
response = await call_next(request)
process_time = time.time() - start_time
response.headers["X-Process-Time"] = str(process_time)
return response
| synodriver/asgi-server-benchmark | app/__init__.py | __init__.py | py | 542 | python | en | code | 7 | github-code | 1 | [
{
"api_name": "fastapi.FastAPI",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "routers.setu.router",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "routers.setu",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "db.on_start",
... |
33752525086 | # read the dataset
# divide it train test splet 60 to 40
# classfication using svm
# kernel linear and kerneal rbf
# acuuracy and presetion and f1 measure
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.svm import SVC
df = pd.read_csv('./classficationData/classifier.csv') # dataframe
X = df.drop('outcome', 1)
y = df['outcome']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4, random_state=10)
svc_model = SVC(C=.1, kernel='linear', gamma=1)
svc_model.fit(X_train, y_train)
prediction = svc_model.predict(X_test)
from sklearn.metrics import accuracy_score
acc = accuracy_score(y_test, prediction)
print("accuracy ", acc)
# precision
from sklearn.metrics import precision_score
pre = precision_score(y_test, prediction)
print("precision ", pre)
from sklearn.metrics import f1_score
f1 = f1_score(y_test, prediction)
print("f1-measure ", f1)
print("--------------------kernel rbf-------------------------------")
svc_model = SVC(kernel='rbf')
svc_model.fit(X_train, y_train)
from sklearn.metrics import accuracy_score
acc = accuracy_score(y_test, prediction)
print("accuracy ", acc)
# precision
from sklearn.metrics import precision_score
pre = precision_score(y_test, prediction)
print("precision ", pre)
from sklearn.metrics import f1_score
f1 = f1_score(y_test, prediction)
print("f1-measure ", f1)
| tawfik-s/ML-Algo | Quiz/main.py | main.py | py | 1,381 | python | en | code | 3 | github-code | 1 | [
{
"api_name": "pandas.read_csv",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "sklearn.model_selection.train_test_split",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "sklearn.svm.SVC",
"line_number": 16,
"usage_type": "call"
},
{
"api_name... |
19676075192 | from stl import STL, Signal
time_begin = 0 # global begin time
signal = Signal(py_dict={"0": {"content": {"x": 1, "y": 2}},
"1": {"content": {"x": 2, "y": 1}}})
#stl_spec = STL("G[0, 1](0 < x < 1)")
stl_eval = stl_spec.eval(time_begin, signal)
print()
#print("original STL expr: ")
#print(stl_spec)
print()
weakened_stl = stl_spec.weaken("ap-range", 2, 3)
#print(weakened_stl_spec_ap_range_1)
# G[0, 1](0-2 < x < 1+3)
# G[0, 1](-2 < x < 4)
print(weakened_stl.eval(time_begin, signal))
| sychoo/STL-API | stl/example/api/stl/weaken2.py | weaken2.py | py | 517 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "stl.Signal",
"line_number": 4,
"usage_type": "call"
}
] |
6493781224 | import socket
import struct
import time
import pytest
from brain_computer_interface.utils import Connection
_DATA = b'Hello, world!'
_HOST = '127.0.0.1'
_PORT = 1234
@pytest.fixture
def server():
server = socket.socket()
server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
server.bind(('0.0.0.0', _PORT))
server.listen(1000)
try:
time.sleep(0.1)
yield server
finally:
server.close()
@pytest.fixture
def connection(server):
sock = socket.socket()
sock.connect((_HOST, _PORT))
return Connection(sock)
def test_repr(connection: Connection):
_, other_port = connection._socket.getsockname()
assert connection.__repr__() == \
f'<Connection from {_HOST}:{other_port} to {_HOST}:{_PORT}>'
def test_close(connection: Connection):
assert not connection._socket._closed
connection.close()
assert connection._socket._closed
def test_send(server):
connection = Connection.connect(_HOST, _PORT)
with connection:
client, _ = server.accept()
connection.send(_DATA)
chunks = []
while True:
chunk = client.recv(4096)
if not chunk:
break
chunks.append(chunk)
assert b''.join(chunks) == _DATA
def test_send_length_follow_by_value(server):
connection = Connection.connect(_HOST, _PORT)
with connection:
client, _ = server.accept()
connection.send_length_follow_by_value(_DATA)
chunks = []
while True:
chunk = client.recv(4096)
if not chunk:
break
chunks.append(chunk)
assert b''.join(chunks)[Connection.length_size:] == _DATA
def test_receive(server):
with Connection.connect(_HOST, _PORT) as connection:
client, _ = server.accept()
client.sendall(_DATA)
assert connection.receive(len(_DATA)) == _DATA
def test_receive_length_follow_by_value(server):
with Connection.connect(_HOST, _PORT) as connection:
client, _ = server.accept()
client.sendall(struct.pack(Connection.length_format, len(_DATA)))
client.sendall(_DATA)
assert connection.receive_length_follow_by_value() == _DATA
def test_incomplete_data(server, connection: Connection):
with connection:
client, _ = server.accept()
client.close()
with pytest.raises(RuntimeError, match='Incomplete data'):
connection.receive(1)
| sahargavriely/the-unbearable-ease-of-programming | tests/test_connection.py | test_connection.py | py | 2,426 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "socket.socket",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "socket.SOL_SOCKET",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "socket.SO_REUSEADDR",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "time.sl... |
33949911726 | # needed imports
from matplotlib import pyplot as plt
from scipy.cluster.hierarchy import dendrogram, linkage
import scipy.cluster.hierarchy as sch
import numpy as np
# This determines the clustering method. Check https://docs.scipy.org/doc/scipy/reference/generated/scipy.cluster.hierarchy.linkage.html#scipy.cluster.hierarchy.linkage for details
method = 'ward' # average, ward, single...
class cluster():
def __init__(self, cluster, projections):
self.cluster = cluster
self.bundles = []
self.n = 0
self.summation = []
for factor in range(len(projections)):
self.summation.append([])
for projection in range(len(projections[factor])):
self.summation[factor].append(0)
def add_bundle(self, bundle):
self.bundles.append(bundle)
self.n += 1
# adds 1 to one projection - specified in bundle - for each factor
for i in range(len(self.summation)):
self.summation[i][bundle[i]-1] += 1
def get_mixture(self):
mixture = []
for factor in range(len(self.summation)):
mixture.append([])
for projection in range(len(self.summation[factor])):
mixture[factor].append(
self.summation[factor][projection]/self.n)
return mixture
def create_distance_matrix(bundles):
# Need to convert the class projection bundle in a list of bundles
bundles_list = []
for object in bundles:
bundles_list.append(object.bundle)
bundles = np.array(bundles_list)
# Hamming metric: 0 if same, 1 if different
d = sch.distance.pdist(bundles, 'hamming')
# generate the linkage matrix
return d
def create_linkage_matrix(distances):
Z = linkage(distances, method)
return Z
def fancy_dendrogram(*args, **kwargs):
max_d = kwargs.pop('max_d', None)
if max_d and 'color_threshold' not in kwargs:
kwargs['color_threshold'] = max_d
annotate_above = kwargs.pop('annotate_above', 0)
ddata = dendrogram(*args, **kwargs)
if not kwargs.get('no_plot', False):
plt.title('Hierarchical Clustering Dendrogram (truncated)')
plt.xlabel('sample index or (cluster size)')
plt.ylabel('distance')
for i, d, c in zip(ddata['icoord'], ddata['dcoord'], ddata['color_list']):
x = 0.5 * sum(i[1:3])
y = d[1]
if y > annotate_above:
plt.plot(x, y, 'o', c=c)
plt.annotate("%.3g" % y, (x, y), xytext=(0, -5),
textcoords='offset points',
va='top', ha='center')
if max_d:
plt.axhline(y=max_d, c='k')
return ddata
def show_dendrogram(Z):
fancy_dendrogram(Z, truncate_mode='lastp',
p=12,
leaf_rotation=90.,
leaf_font_size=12.,
show_contracted=True,
annotate_above=10, # useful in small plots so annotations don't overlap
)
plt.show()
def show_elbow(Z):
# showing the elobow diagram
last = Z[-10:, 2]
last_rev = last[::-1]
idxs = np.arange(1, len(last) + 1)
plt.plot(idxs, last_rev)
acceleration = np.diff(last, 2) # 2nd derivative of the distances
acceleration_rev = acceleration[::-1]
plt.plot(idxs[:-2] + 1, acceleration_rev)
plt.show()
# if idx 0 is the max of this we want 2 clusters
k = acceleration_rev.argmax() + 2
print("Recommend clusters from elbow diagram:", k)
return k
def get_clusters(Z, k):
return sch.fcluster(Z, k, criterion='maxclust')
| Sockenschlauch/Scenario-Software | clustering.py | clustering.py | py | 3,672 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "numpy.array",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "scipy.cluster.hierarchy.distance.pdist",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "scipy.cluster.hierarchy.distance",
"line_number": 49,
"usage_type": "attribute"
},
... |
72301148514 | from iota.crypto.types import Seed
from iota.crypto.addresses import AddressGenerator
from iota import Iota
from datetime import datetime
import requests, json, zmq
class Node:
def __init__(self, seed=None):
if seed != None:
self.seed = seed
else:
self.seed = str(Seed.random())
def __repr__(self):
return self.seed
# Connects to IRI
def api(self):
api = Iota(url(), self.seed)
return api
@staticmethod
def url():
# Grabs available IOTA nodes urls
r = requests.get('https://api.iota-nodes.net/').json()
# Filters nodes that are at 100% health and are on the latest version of IRI
nodes = [r[node]['hostname'] for node in range(len(r))\
if (r[node]['health'] == 5)\
and (r[node]['version'] == '1.8.5-RELEASE')\
and (r[node]['hasZMQ'] == 1)]
# Returns first node url in list
return nodes[0]
# Gets 1st index address from designated seed
def address(self):
addresses = AddressGenerator(seed=self.seed, checksum=False, security_level=3)
main_address = addresses.get_addresses(0,1)
main = str(main_address[0])
return main
@staticmethod
def gen_seed():
return Seed.random()
# Create a RTC to the Tangle and listen for the ricipient address
class Tangle:
def __init__(self, connect, address=None):
config = Node()
self.node = config.url()
self.connect_status = connect
self.rec_address = config.address()
def connect(self):
if self.connect_status == False:
pass
else:
recipient = bytes(self.rec_address, 'utf-8')
context = zmq.Context()
socket = context.socket(zmq.SUB)
# Prepares socket to istens to 1st indexed address
# of seed for tx information
zmq_node = f'tcp://zmq.{self.node}'
socket.setsockopt(zmq.SUBSCRIBE, recipient)
# Initates connections
socket.connect(zmq_node)
print(socket)
connected = True
while connected:
address, data = socket.recv().decode().split(' ', 1)
if address:
hash_data = dict.fromkeys([
tx_hash,
address,
value,
obs_tag,
ts,
index,
last_index,
bundle_hash,
trunk_hash,
branch_hash,
received_ts,
tag,
])
for i, j in data.items():
hash_data[i] = j
if hash_data:
print(hash_data)
connected = False
return connected
test = Tangle(True, None)
test.connect() | 0xCozart/IOTA-CLI | IotaCli/core/api.py | api.py | py | 3,082 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "iota.crypto.types.Seed.random",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "iota.crypto.types.Seed",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "iota.Iota",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "reques... |
17290104296 | from django.urls import path
from . import views
from .views import Register
urlpatterns = [
path('cart/', views.view_cart, name='cart'),
path('add_to_cart/<int:product_id>/', views.add_to_cart, name='add_to_cart'),
path('remove_from_cart/<int:cart_item_id>/', views.remove_from_cart, name='remove_from_cart'),
path('<int:product_id>/', views.product_detail, name='product_detail'),
path('update_cart_item/', views.update_cart_item_quantity, name='update_cart_item_quantity'),
path('payment_result/', views.payment, name='payment_result'),
path('checkout/', views.checkout, name='checkout'),
path('order_success/', views.order_success, name='order_success'),
path('register/', Register.as_view(), name='register'),
path('products/', views.product_list, name='product_list'),
path('', views.home, name='home'),
path('products/<int:category_id>/', views.product_list, name='product_list_by_category'),
]
| MAA8007/ecommerce_django | core/urls.py | urls.py | py | 957 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "django.urls.path",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "views.view_cart",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "django.urls.path",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "views.add_to_cart... |
10390872903 |
from sklearn.decomposition import PCA, TruncatedSVD, SparsePCA
from sklearn.feature_extraction.text import CountVectorizer
from mpl_toolkits import mplot3d
from matplotlib import pyplot as plt
import d2v_model
import iterate_docs
import numpy as np
from itertools import accumulate
def get_pca(docs):
pca = PCA(n_components=3)
d2vm = d2v_model.get_d2v_model()
ret = pca.fit_transform([d2vm.infer_vector(doc.split(' ')) for doc in docs])
return ret, list(accumulate(pca.explained_variance_ratio_))
def get_tsvd(docs):
tsvd = TruncatedSVD(n_components=3)
cts = CountVectorizer().fit_transform(docs)
ret = tsvd.fit_transform(cts)
return ret, list(accumulate(tsvd.explained_variance_ratio_))
def plot_points(dim_red_func, docs):
points, freq_ = dim_red_func(docs)
print('total variance preserved: ' + str(freq_))
fig = plt.figure()
ax = plt.axes(projection ='3d')
dim_tuples = [(min(points.T[i])-1, max(points.T[i])+1) for i in range(3)]
ax.set_xlim(*dim_tuples[0])
ax.set_ylim(*dim_tuples[1])
ax.set_zlim(*dim_tuples[2])
print(dim_tuples)
ax.scatter3D(points.T[0], points.T[1], points.T[2], cmap ='black')
plt.show()
print(get_pca(iterate_docs.get_docs()[0:40])[1])
print(get_tsvd(iterate_docs.get_docs()[0:40])[1])
| mihir-b-shah/time-analyzer | analyze/doc_visualizer.py | doc_visualizer.py | py | 1,264 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "sklearn.decomposition.PCA",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "d2v_model.get_d2v_model",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "itertools.accumulate",
"line_number": 16,
"usage_type": "call"
},
{
"api_name":... |
73915914915 | #!/usr/bin/python3
# a simple script, meant to get the 1st initial shell on openAdmin machine from HTB
# Exploit Title: OpenNetAdmin 18.1.1 - Remote Code Execution
# Origin exploit @mattpascoe
# python version @m3dsec
# Software Link: https://github.com/opennetadmin/ona
# Version: v18.1.1
import requests
import json
import re
import sys
if len(sys.argv) != 2:
print("USAGE : python3 exploit.py URL")
#print("USAGE : python3 exploit.py http://10.10.10.171/ona/")
exit()
url = sys.argv[1]
while True:
try:
userCmd = input('$ ')
headers = {"Accept": "*/*", "Content-Type": "application/x-www-form-urlencoded"}
data1 = "xajax=window_submit&xajaxr=1574117726710&xajaxargs[]=tooltips&xajaxargs[]=ip%3D%3E;echo 'BEGIN';{};echo 'END'&xajaxargs[]=ping".format(userCmd)
r = requests.post(url, data=data1, headers=headers)
result = re.search(r"BEGIN(.*)END", r.text, re.S)
print(result.group(1))
except KeyboardInterrupt:
exit(0)
| m3dsec/openNetAdmin18.1.1-SemiShell | openNetAdmin18.1.1_SemiShell.py | openNetAdmin18.1.1_SemiShell.py | py | 948 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "sys.argv",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "requests.post",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "re.search",
"line_numbe... |
71228823393 | import numpy as np
import cv2
'''include if you want some more information about the needed memory'''
#from memory_profiler import profile
#@profile
def NC():
'''Put file name here'''
input_name = "demo.png"
pic = cv2.imread(input_name,1)
gray = cv2.imread(input_name,0)
height, width, _ = pic.shape
'''for less extreme values set it lower
value should be between 0 and 1'''
bmp_percent = 0.5
nrm_percent = 0.7
'''blur on or off?'''
blur_switch = 1
'''Fast and rough Ambient Occlusion?'''
thresh = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C,cv2.THRESH_BINARY,31,21)
cv2.imwrite(input_name[:len(input_name)-4]+"_notfinished_AO.png",thresh)
'''the next part is for blurring image a bit,
but leaving the edged sharp. '''
if blur_switch == 1:
not_thresh = cv2.bitwise_not(thresh)
th1 = cv2.GaussianBlur(gray, (11,11), 0)
th1 = cv2.bitwise_and(th1, th1, mask = thresh)
th2 = cv2.bitwise_and(gray, gray, mask = not_thresh)
bump = cv2.add(th1, th2)
else:
bump = gray
'''reduces contrast'''
bump = (np.ones((height,width),dtype = np.uint16)*200*(1-bmp_percent) + bump*bmp_percent)
'''bmp pic can be modified for bump or roughness'''
cv2.imwrite(input_name[:len(input_name)-4]+"_notfinished_bmp.png",bump)
'''starting edge detection with sobel horizontal and vertical'''
sobel_x = cv2.Sobel(bump, cv2.CV_16S,1,0, ksize =-1)
sobel_y = cv2.Sobel(bump, cv2.CV_16S,0,1, ksize =-1)
'''Using absolute units if there is an (horizental or vertical) edge, and 0 if there is not'''
abs_x = cv2.convertScaleAbs(sobel_x) - np.ones((height,width),dtype = np.uint8)*255
abs_y = cv2.convertScaleAbs(sobel_y) - np.ones((height,width),dtype = np.uint8)*255
'''Converting into a value between -1 and 1 to be ready to convert into normalmap values'''
pixel_x = (abs_x*(2/255)-1)
pixel_y = (abs_y*(2/255)-1)
'''Array of ones (fastens the calculation)'''
ones = np.ones((height,width),dtype = np.uint8)
'''1 color for each pixel (3 dimensional array per pixel)
x/y value is 1 z value is pixel_x/pixel_y
this leads to a color change only if the is an horizontal/vertical edge'''
dx = np.zeros((height,width,3),dtype = np.float64)
dx[:,:,0] = ones
dx[:,:,2] = pixel_x
dy = np.zeros((height,width,3),dtype = np.float64)
dy[:,:,1] = ones
dy[:,:,2] = pixel_y
'''norm values are the lenght of each array(treated like vector's)'''
dx_norm = np.sqrt(dx[:,:,0]**2+dx[:,:,1]**2+dx[:,:,2]**2)**-1
dy_norm = np.sqrt(dy[:,:,0]**2+dy[:,:,1]**2+dy[:,:,2]**2)**-1
'''normalizing the arrays (treated like vector's)'''
dx[:,:,0] = np.multiply(dx[:,:,0],dx_norm)
dx[:,:,1] = np.multiply(dx[:,:,1],dx_norm)
dx[:,:,2] = np.multiply(dx[:,:,2],dx_norm)
dy[:,:,0] = np.multiply(dy[:,:,0],dy_norm)
dy[:,:,1] = np.multiply(dy[:,:,1],dy_norm)
dy[:,:,2] = np.multiply(dy[:,:,2],dy_norm)
'''cross product of x and y is the final normalmap(with extreme values)'''
dcross = np.zeros((height,width,3),dtype = np.float)
dcross[:,:,2] = (np.multiply(dx[:,:,1],dy[:,:,2]) - np.multiply(dx[:,:,2],dy[:,:,1]) + ones)*(256/2)
dcross[:,:,1] = (np.multiply(dx[:,:,2],dy[:,:,0]) - np.multiply(dx[:,:,0],dy[:,:,2]) + ones)*(256/2)
dcross[:,:,0] = (np.multiply(dx[:,:,0],dy[:,:,1]) - np.multiply(dx[:,:,1],dy[:,:,0]) + ones)*(128/2) + ones * 128
'''calc normals to Normalmap
z is blue between 128 and 256
x is red between 0 and 256
y is green between 0 and 256'''
blue = np.array([255,127,127])
'''flatens normal'''
dcross = (dcross*nrm_percent + blue*(1-nrm_percent))
color_matrix = dcross.astype(np.uint8)
cv2.imwrite(input_name[:len(input_name)-4]+"_nrm.png",color_matrix)
print ("ready")
return ("ready")
if __name__ == '__main__':
NC()
end = input("end")
| Jacobcrease/Normalmap-Calculator | NC.py | NC.py | py | 3,977 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "cv2.imread",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "cv2.imread",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "cv2.adaptiveThreshold",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "cv2.ADAPTIVE_THRESH_GAUSS... |
19511530507 | """
Created on Jul 24, 2018
@author: ionut
"""
import logging
import datetime
from reach.base import Reach
class ReachGPS(Reach):
"""
GPS client implementation for Reach
"""
def __init__(self, host, port, queue):
Reach.__init__(self, host, port, queue, message_delimiter="\n$GNRMC")
self.conn_buf = 4096
self.tcp_buf_len = 64000
@staticmethod
def parse_coordinate(coord, hemi):
"""
Parse ddmm.mmmm coordinate into dd.ddddd format
:param coord: ddmm.mmmm coordinate data
:param hemi: hemisphere data E,W,N or S
:returns dd.ddddd coordinate
"""
dot_position = coord.find(".")
degrees = coord[:dot_position-2]
minutes = coord[dot_position-2:]
degrees = float(degrees) + float(minutes) / 60.0
if hemi in ["S", "W"]:
degrees = degrees * -1
return degrees
def parse_data(self, data):
sentences = data.split("\n")
position = {}
for sentence in sentences:
if sentence.startswith("$GNRMC"):
parts = sentence.split(",")
if parts[2] != "A":
logging.warning("invalid GNRMC data: %s", sentence)
continue
position["ts"] = datetime.datetime.strptime(parts[9]+parts[1], "%d%m%y%H%M%S.%f")
position["ts"] = position["ts"].replace(tzinfo=datetime.timezone.utc)
position["lat"] = self.parse_coordinate(parts[3], parts[4])
position["lng"] = self.parse_coordinate(parts[5], parts[6])
position["speed"] = float(parts[7]) * 1.852 # knots to km/h
# position["heading"] = float(parts[8]) #0-360
elif sentence.startswith("$GNGST"):
parts = sentence.split(",")
position["acc"] = max(float(parts[6]), float(parts[7])) # meters
elif sentence.startswith("$GNGGA"):
parts = sentence.split(",")
position["alt"] = float(parts[9]) + float(parts[11]) # meters
position["fix"] = float(parts[6])
return position
| BWiebe1/openexcavator | openexcavator/reach/gps.py | gps.py | py | 2,161 | python | en | code | 4 | github-code | 1 | [
{
"api_name": "reach.base.Reach",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "reach.base.Reach.__init__",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "reach.base.Reach",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "logging.w... |
44376704296 | #-------------------------------------------------------------------
#
# Written by Phillip Grabovsky 03/20/2022
# This code is distributed under the GNU LGPL license.
#
# This code graphs the output produced by the Fortran code
# in the Stieltjes project. Fortran writes text files containing numbers
# This grapher reads these files and builds the plots. See README files
# for this project.
#
# ------------------------------------------------------------------
import sys
import csv
import numpy as np
import PySide2
import PySide2.QtWidgets as Qt
import PySide2.QtGui as Gui
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg, NavigationToolbar2QT as NavigationToolbar
from matplotlib.gridspec import GridSpec
from matplotlib.figure import Figure
#Same Curve should be set to true if the input data lies on the same curve as the
#extrapolation data. This is only needed in Nyquist mode, it is true always in
#Voigt mode regardless the value below.
SameCurve = True
class MplCanvas(FigureCanvasQTAgg):
def __init__(self, parent, mode, status, dpi=100):
if status:
self.initializeNormally(parent, mode, dpi=100)
else:
self.disableCaprinis(parent, mode, dpi=100)
super(MplCanvas, self).__init__(self.fig)
#initialize normally with caprinis.
def initializeNormally(self, parent, mode, dpi=100):
self.dataLabels = []
self.lw = 1
self.fig = Figure(dpi=dpi, constrained_layout=True)
gs = GridSpec(2, 2, width_ratios=[3,2], figure=self.fig) #2x2 grid, more space alotted for left column.
self.topCapriniAxes = self.fig.add_subplot(gs[0,1]) #top caprini is top left
self.bottomCapriniAxes = self.fig.add_subplot(gs[1,1]) #bottom caprini is bottom left
self.axisDict = {"TC":self.topCapriniAxes,
"BC":self.bottomCapriniAxes}
if(mode == "Voigt"):
self.topVoigtAxes = self.fig.add_subplot(gs[0,0])
self.bottomVoigtAxes = self.fig.add_subplot(gs[1,0])
self.axisDict["TV"] = self.topVoigtAxes
self.axisDict["BV"] = self.bottomVoigtAxes
self.setPlotInfo(self.topVoigtAxes, "Real Part", "Freq/Hz", "Z Real/Ohm")
self.setPlotInfo(self.bottomVoigtAxes, "Imaginary Part", "Freq/Hz", "Z Imag/Ohm)")
self.setPlotInfo(self.topCapriniAxes,"Caprini Function", "Freq/Hz", "")
self.setPlotInfo(self.bottomCapriniAxes,"Zeros of the Caprini Function", "Freq/Hz", "")
elif(mode == "Nyquist"):
self.nyquistAxes = self.fig.add_subplot(gs[:,0]) #nyquist takes up whole left column
self.axisDict["N"] = self.nyquistAxes
self.setPlotInfo(self.nyquistAxes, "Nyquist Plot", "Re", "Im")
self.setPlotInfo(self.topCapriniAxes,"Caprini Function", "t/S", "")
self.setPlotInfo(self.bottomCapriniAxes,"Zeros of the Caprini Function", "t/S", "")
#initialize without caprinis because they shouldn't be shown.
def disableCaprinis(self, parent, mode, dpi=100):
self.dataLabels = []
self.lw = 1
self.axisDict = {}
self.fig = Figure(dpi=dpi, constrained_layout=True)
if(mode == "Voigt"):
gs = GridSpec(2, 1, figure=self.fig)
self.topVoigtAxes = self.fig.add_subplot(gs[0,0])
self.bottomVoigtAxes = self.fig.add_subplot(gs[1,0])
self.axisDict["TV"] = self.topVoigtAxes
self.axisDict["BV"] = self.bottomVoigtAxes
self.setPlotInfo(self.topVoigtAxes, "Real Part", "Freq/Hz", "Z Real/Ohm")
self.setPlotInfo(self.bottomVoigtAxes, "Imaginary Part", "Freq/Hz", "Z Imag/Ohm)")
elif(mode == "Nyquist"):
self.nyquistAxes = self.fig.add_subplot(111) #nyquist takes up whole left column
self.axisDict["N"] = self.nyquistAxes
self.setPlotInfo(self.nyquistAxes, "Nyquist Plot", "Re", "Im")
def setGraphScale(self, axes, xMin, xMax, yMin, yMax):
self.axisDict[axes].set_xlim(xMin, xMax)
self.axisDict[axes].set_ylim(yMin, yMax)
def drawSemiLogX(self, axes, x, y, dataLabel, curveColor):
if not (dataLabel in self.dataLabels):
self.dataLabels.append(dataLabel)
self.axisDict[axes].semilogx(x,y, label=dataLabel, color=curveColor, linewidth=self.lw)
else:
self.axisDict[axes].semilogx(x,y,color=curveColor,linewidth=self.lw)
if(axes == "TV" or axes == "N"):
self.axisDict[axes].legend()
def drawSemiLogXDots(self, axes, x, y, dataLabel, curveColor):
if not (dataLabel in self.dataLabels):
self.dataLabels.append(dataLabel)
self.axisDict[axes].semilogx(x,y, '.',label=dataLabel, color=curveColor, linewidth=self.lw)
else:
self.axisDict[axes].semilogx(x,y,'.',color=curveColor,linewidth=self.lw)
if(axes == "TV" or axes == "N"):
self.axisDict[axes].legend()
def drawLinLin(self, axes, x, y, dataLabel, curveColor):
if not (dataLabel in self.dataLabels):
self.dataLabels.append(dataLabel)
self.axisDict[axes].plot(x,y, label=dataLabel, color=curveColor,linewidth=self.lw)
else:
self.axisDict[axes].plot(x,y, color = curveColor,linewidth=self.lw)
if(axes == "TV" or axes == "N"):
self.axisDict[axes].legend()
def drawLinLinDots(self, axes, x, y, dataLabel, curveColor):
if not (dataLabel in self.dataLabels):
self.dataLabels.append(dataLabel)
self.axisDict[axes].plot(x,y, '.',label=dataLabel, color=curveColor,linewidth=self.lw)
else:
self.axisDict[axes].plot(x,y, '.',color = curveColor,linewidth=self.lw)
if(axes == "TV" or axes == "N"):
self.axisDict[axes].legend()
def setPlotInfo(self, axes, title, xTitle, yTitle):
axes.set_title(title)
axes.set_xlabel(xTitle)
axes.set_ylabel(yTitle)
class StieltjesGraphWindow(Qt.QDialog):
def __init__(self, sameCurveArg):
super().__init__()#set up Qt
self.topLevelLayout = Qt.QHBoxLayout()
self.graphData = [[],[]] #set by populateData()
self.mode, zetaFZeta = self.detectMode() #is set by populateData()
self.sameCurve = sameCurveArg #set by user. whether the data lies on the extrapolation curve.
self.status, fZeta = self.detectStatus() #whether to show caprini functions
#setup graphs!
self.graph = Qt.QFrame()
graphLayout = Qt.QVBoxLayout()
self.graphCanvas = MplCanvas(self,self.mode, self.status, dpi=100)
toolbar = NavigationToolbar(self.graphCanvas, self)
graphLayout.addWidget(toolbar)
graphLayout.addWidget(self.graphCanvas)
self.graph.setLayout(graphLayout)
self.graph.setFrameShape(Qt.QFrame.Panel)
self.graph.setFrameShadow(Qt.QFrame.Sunken)
self.graph.setLineWidth(2)
graphLayout = Qt.QVBoxLayout() #update topLevelLayout to include graphs!
graphLayout.addWidget(self.graph)
self.topLevelLayout.addItem(graphLayout)
self.populateData(zetaFZeta, fZeta) #take in txt file data and add it to graphs.
self.setLayout(self.topLevelLayout)
self.setWindowState(PySide2.QtCore.Qt.WindowState.WindowMaximized)
def populateData(self, zetaFZeta, fZeta): #figure out graph contents
#populate data for correct graph
if self.mode == "Nyquist":
self.populateNyquist(zetaFZeta, fZeta)
elif self.mode == "Voigt":
self.populateVoigt(zetaFZeta, fZeta)
if(self.status):
self.populateCaprinis()
def populateNyquist(self, zetaFZeta, fZeta): #update nyquist plot
#Spectral rep and Extrapolation curves
Z, W = [], []
for row in zetaFZeta:
Z.append(complex(row[0], row[1])) #needed for spectral rep
W.append(complex(row[2], row[3])) #extrapolation
fZ = self.computeSpectralRep(Z, fZeta) #finish computing spectral rep
#Uncertainty curves
dataSizes = self.loadFileIntoNestedArrays("data_sizes.txt")
NMC = int(dataSizes[2][0])
nZeta = int(dataSizes[1][0])
count = 0
if NMC > 0:
MonteCarlo = self.loadFileIntoNestedArrays("WMC.txt")
WMC = []
for k in range(nZeta): #iteration is not row->col to suit WMC.txt format.
for j in range(NMC):
if(k==0):
WMC.append([])
WMC[j].append(complex(MonteCarlo[count][0], MonteCarlo[count][1]))
count=count+1
#actually plot the results
for j in range(NMC):
self.graphCanvas.drawLinLin("N",np.real(WMC[j]),np.imag(WMC[j]),"Uncertainty", "silver")
self.graphCanvas.drawLinLin("N",np.real(W), np.imag(W), "Extrapolation", "orange") #graph extrapolation
self.graphCanvas.drawLinLin("N",np.real(fZ), np.imag(fZ), "Spectral Rep", "cyan") #graph spectral rep
#plot original points if extrap is on same curve is on original data.
if self.sameCurve:
expData = self.loadFileIntoNestedArrays("exp_data.txt")
if self.status:
wfix = np.transpose(self.loadFileIntoNestedArrays("wfix.txt"))
self.graphCanvas.drawLinLinDots("N", wfix[0], wfix[1], "Alternative Data", "red")
wOrig = []
for row in expData:
wOrig.append(complex(row[2], row[3]))
self.graphCanvas.drawLinLinDots("N", np.real(wOrig), np.imag(wOrig), "W Original", "black")
def computeSpectralRep(self, Z, fZeta):
sk = [] #sk is the 2nd column of fzeta.
tk = [] #tk is the 1st column of fzeta, w/out the 1st element
for i in range(len(fZeta)):
sk.append(fZeta[i][1])
if i != 0:
tk.append(fZeta[i][0])
M=[] #M = 1/(tk - Z), tk is a column, Z is a row.
for i in range(len(tk)):
M.append([])
for j in range(len(Z)):
M[i].append( 1/(tk[i] - Z[j]) )
Mt = np.transpose(M)
fZ = np.matmul(Mt,sk[1:]) #fZ=[] #fZ=sk[0] + M.'* sk[1:]
fZ = [sk[0] + i for i in fZ]
return fZ
def populateVoigt(self, zetaFZeta, fZeta):
# fZ exp data [freq,re(Z),im(Z)]; wfix=alternative data
# f=all frequencies, F=Z(f) is the model
# Z(n_zeta,2) is the extrapolation
# EIS(f)=F_spectral(f), WZ(n_zeta,Nr)=monte-carlo
dataSizes=self.loadFileIntoNestedArrays('data_sizes.txt');
fZ1Split = self.loadFileIntoNestedArrays("Voigt_data.txt")
Nr = int(dataSizes[2][0])
#unpack data
freq, fZ1, f1, sk, tk, zetaFZetaCombined = [],[],[],[],[],[]
for row in fZ1Split:
freq.append(row[0])
fZ1.append(complex(row[1], row[2]))
for row in zetaFZeta:
f1.append(row[0])
nZeta = len(f1)
for i in range(len(fZeta)):
if i > 0:
tk.append(fZeta[i][0])
sk.append(fZeta[i][1])
for row in zetaFZeta:
zetaFZetaCombined.append(complex(row[1], row[2]))
#compute M and EIS - to show voigt circuit on graphs.
M=[] #M=1./(tk+2i*pi*f1');
for i in range(len(tk)):
M.append([])
for j in range(len(f1)):
M[i].append( 1/(complex(tk[i], f1[j]*2*np.pi)) )
Mt = np.transpose(M)
EIS = sk[0] + np.matmul(Mt, sk[1:])
#uncertainty lines retrieved and computed
if Nr > 0:
WZ = []
WMC = self.loadFileIntoNestedArrays("WMC.txt");
count=0;
for k in range(nZeta):
for j in range(Nr):
if(k==0):
WZ.append([])
WZ[j].append( complex(WMC[count][0], WMC[count][1]) )
count+=1
#uncertainty
for i in range(Nr):
self.graphCanvas.drawSemiLogX("TV", f1, np.real(WZ[i]), "Uncertainty", "silver")
self.graphCanvas.drawSemiLogX("BV", f1, np.imag(WZ[i]), "Uncertainty", "silver")
if self.status: #alternative data is only printed if status is true.
wfix = self.loadFileIntoNestedArrays("wfix.txt")
wfix = np.transpose(wfix)
self.graphCanvas.drawSemiLogXDots("TV", freq, wfix[0], "Alternative Data","red")
self.graphCanvas.drawSemiLogXDots("BV", freq, wfix[1], "Alternative Data","red")
#upper graph - real part
self.graphCanvas.drawSemiLogXDots("TV", freq, np.real(fZ1), "Data","black")
self.graphCanvas.drawSemiLogX("TV", f1, np.real(zetaFZetaCombined), "Extrapolation", "black")
self.graphCanvas.drawSemiLogX("TV", f1, np.real(EIS), "Voigt Circuit", "cyan")
#lower graph - imaginary part
self.graphCanvas.drawSemiLogXDots("BV", freq, np.imag(fZ1), "Data","black")
self.graphCanvas.drawSemiLogX("BV", f1, np.imag(zetaFZetaCombined), "Extrapolation", "black")
self.graphCanvas.drawSemiLogX("BV", f1, np.imag(EIS), "Voigt Circuit", "cyan")
def populateCaprinis(self):
capriniData = self.loadFileIntoNestedArrays("tC.txt") #The Caprini function [t(k),C(t(k))]
tC = [] #data to be graphed - x
C = [] #y axis data
for row in capriniData:
tC.append(row[0])
C.append(row[1])
self.graphCanvas.drawSemiLogX("TC",[tC[0],tC[len(tC)-1]],[0,0],"","red") #draw baseline
self.graphCanvas.drawSemiLogX("TC", tC, C, "", "blue" ) #draw caprini
self.graphCanvas.drawSemiLogX("BC",[tC[0],tC[len(tC)-1]],[0,0],"","red") #draw baseline
self.graphCanvas.drawSemiLogX("BC",tC, C, "", "blue") #draw caprini mins
#scale the bottom graph to show zeros
CMin, CMax = self.getgraphScale(C)
tMin = min(tC)
tMax = max(tC)
self.graphCanvas.setGraphScale("BC",tMin, tMax, CMin, CMax)
self.graphCanvas.axisDict["TC"].set_xlim(tMin, tMax)
def getgraphScale(self, C): #find local minima, and figure out suitable graph scale.
diffs = np.diff(C)
CLocalMins = []
lastNegative = False
for i in range(len(diffs)):
thisPositive = diffs[i] > 0
if(lastNegative and thisPositive):
CLocalMins.append(C[i])
lastNegative = diffs[i] < 0
biggestMin = max(CLocalMins)
smallestMin = min(CLocalMins)
CMax=max([-10*smallestMin,1.1*biggestMin,1.e-9*max(C)])
CMin = -1.e-10*max(C)
return CMin, CMax
def detectMode(self):
zetaFZeta=self.loadFileIntoNestedArrays('W_extr.txt')
if len(zetaFZeta[0]) == 4:
return "Nyquist", zetaFZeta
elif len(zetaFZeta[0]) == 3:
return "Voigt", zetaFZeta
else:
return "UnknownData", zetaFZeta
def detectStatus(self):
fZeta = self.loadFileIntoNestedArrays("spectral_measure.txt")
if(fZeta[0][0] == 0): #determine if caprinis should be shown.
return False, fZeta
else:
return True, fZeta
def loadFileIntoNestedArrays(self, filename): #load a data file into a 2d array
dataList = []
with open(filename, 'r') as csvfile:
csvreader = csv.reader(csvfile, delimiter=" ")
#fields = next(csvreader)
i = 0 #counter variable needed iteration over len(csvreader) not possible
for row in csvreader: #fill in data
dataList.append([]) #add next column of dataList
for j in range(len(row)):
if(row[j] != ""):
dataList[i].append(float(row[j]))
i+=1
return dataList
if __name__ == '__main__':
# Create the Qt Application
app = Qt.QApplication(sys.argv)
# Create and show the form
form = StieltjesGraphWindow(SameCurve)
form.show()
# Run the main Qt loop
sys.exit(app.exec_())
| YuryGrabovsky/Stieltjes | PythonGrapher.py | PythonGrapher.py | py | 16,120 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "matplotlib.backends.backend_qt5agg.FigureCanvasQTAgg",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "matplotlib.figure.Figure",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "matplotlib.gridspec.GridSpec",
"line_number": 43,
"usage_ty... |
8366309057 | #!/usr/bin/python3
"""new view for State objects"""
from api.v1.views import app_views
from models import storage
from models.state import State
from flask import jsonify, abort, request
@app_views.route(
'/states',
methods=['GET', 'POST'],
strict_slashes=False
)
def states():
""" GET and POST """
if request.method == "GET":
states = [state.to_dict() for state in storage.all('State').values()]
return jsonify(list(states))
if request.method == "POST":
jreq = request.get_json()
if jreq is None:
abort(400, 'Not a JSON')
if 'name' not in jreq.keys():
abort(400, 'Missing name')
state = State(**jreq)
state.save()
return jsonify(state.to_dict()), 201
@app_views.route(
'/states/<state_id>',
methods=['GET', 'DELETE', 'PUT'],
strict_slashes=False
)
def state(state_id):
""" GET, DELETE, and PUT """
state = storage.get(State, state_id)
if state is None:
abort(404)
if request.method == "GET":
return jsonify(state.to_dict())
if request.method == "DELETE":
state.delete()
storage.save()
return jsonify({})
if request.method == "PUT":
jreq = request.get_json()
if jreq is None:
abort(400, 'Not a JSON')
for k, v in jreq.items():
if k not in ['id', 'created_at', 'updated_at']:
setattr(state, k, v)
state.save()
return jsonify(state.to_dict())
| scan3ls/AirBnB_clone_v3 | api/v1/views/states.py | states.py | py | 1,524 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "flask.request.method",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "models.storage.all",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "models.sto... |
2125778137 | from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
from pgportfolio.marketdata.coinlist import CoinList
import numpy as np
import pandas as pd
from pgportfolio.tools.data import panel_fillna
from pgportfolio.constants import *
import sqlite3
from datetime import datetime
import logging
from pandas_datareader import data as pdr
from datetime import date
from ta import add_all_ta_features
from ta.utils import dropna
class StockHistoryManager:
# if offline ,the coin_list could be None
# NOTE: return of the sqlite results is a list of tuples, each tuple is a row
def __init__(self, coin_number, end, stocks, volume_average_days=1, volume_forward=0, online=True):
self.__storage_period = FIVE_MINUTES # keep this as 300
self._coin_number = coin_number
self.__volume_forward = volume_forward
self.__volume_average_days = volume_average_days
self.__coins = stocks
def coins(self):
return self.__coins
def get_global_data_matrix(self, start, end, features=('close',)):
"""
:return a numpy ndarray whose axis is [feature, coin, time]
"""
return self.get_global_dataframe(start, end, features).values
# returns the securities and tech ind into a multiIndex dataframe
def get_global_dataframe(self, start, end, features, stocks):
#Sample start and end
df_list = [] # list of all dataframes for stocks
# features = [feature.capitalize() for feature in features]
def getData(ticker):
startdt = datetime.fromtimestamp(start) # convert timestamp to date and time
enddt = datetime.fromtimestamp(end)
print("getting stock data from: " + ticker)
data = pdr.DataReader(ticker,start=startdt, end=enddt, data_source='yahoo') # get the security data from yfinance
df = pd.DataFrame(data, columns = ['Close','High','Low','Open','Volume']) # construct dataframe with security data
df = add_all_ta_features(df, open="Open", high="High", low="Low", close="Close", volume="Volume", fillna='bfill') # add all tech ind, with back fill for NaN values
df.fillna(method='ffill', inplace=True) # forward fill NaN if necessary
df.columns = df.columns.str.lower() # lowercase columns for proper input
df = df[features] # crop the dataframe to include only features the user wants
df_list.append(df) # puts dataframe into the list
for tik in stocks: # calls each security to be added to the dataframe
df = getData(tik)
df = pd.concat(df_list, axis=1, join='outer') # concatenate the list of dataframes into one Multi Index dataframe
df.fillna(method='bfill', inplace=True)
df.fillna(method='ffill', inplace=True)
df.index.name = None # remove the name index
index = pd.MultiIndex.from_product([df.index])
columns = pd.MultiIndex.from_product([stocks, features])
# contains the final dataframe in proper format for the neural netwoek
panel = pd.DataFrame(df.values, index=index, columns=columns, dtype="float64")
print(panel)
return panel
# select top coin_number of coins by volume from start to end
def select_coins(self, start, end):
pass
| dgeorge1000/portfolio_management_senior_capstone | pgportfolio/marketdata/stockglobaldatamatrix.py | stockglobaldatamatrix.py | py | 3,670 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "datetime.datetime.fromtimestamp",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 47,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.fromtimestamp",
"line_number": 48,
"usage_type": "call"
},
{
... |
40329203690 | import cvxpy as cvp
import numpy as np
from collections import namedtuple
from mayavi import mlab
from mayavi.mlab import points3d, plot3d, quiver3d
import matplotlib.pyplot as plt
"""
http://www.larsblackmore.com/iee_tcst13.pdf
carrying the same assumption here that X is up (i.e. normal to land)
TODO:
- fix q definition to be the y,z coordinate instead of xyz
- figure out how to best pick an initial tf
- make alpha a parameter istead of isp
"""
GFoldConfig = namedtuple('GFoldConfig', ['isp', 'm', 'm_fuel',
'p1', 'p2',
'g', 'pointing_lim', 'landing_cone',
'q', 'v_max', 'w'])
_e1 = np.array([1, 0 ,0]).T
_e2 = np.array([0, 1 ,0]).T
_e3 = np.array([0, 0 ,1]).T
_E = np.array([[0, 1, 0], [0, 0, 1]])
def create_S(w):
S = np.array([[0, -w[2], w[1]],
[w[2], 0, -w[0]],
[-w[1], w[0], 0]])
return S
def create_A(w):
S = create_S(w)
A = np.zeros((6, 6))
A[0:3, 3:6] = np.eye(3)
A[3:6, 0:3] = -np.square(S)
A[3:6, 3:6] = -2*S
return A
def create_B():
B = np.zeros((6, 3))
B[0:3, :] = np.eye(3)
return B
def get_cone(cone_angle):
# cone angle between 0 and pi/2
return _e1 / np.tan(cone_angle)
class GFOLDSolverPosition:
def __init__(self, config, iterations):
N = iterations
self.N = N
self.config = config
# Parameters
self.dt = cvp.Parameter()
self.x_0 = cvp.Parameter(6)
# Solved variables
self.x = cvp.Variable((6, N))
self.u = cvp.Variable((3, N))
self.gam = cvp.Variable(N)
self.z = cvp.Variable(N)
# Problem 3
self.constr = []
self.constr = set_initial_constraints(self.constr, config, self.x, self.u, self.gam, self.z, N, self.x_0)
self.constr = running_constraints(self.constr, config, self.x, self.u, self.gam, self.z, self.dt, N)
self.obj = cvp.norm(self.x[0:3, N-1] - config.q[:])
self.problem = cvp.Problem(cvp.Minimize(self.obj), self.constr)
def initialize_dt(self, start_vel):
alpha = 1.0 / (self.config.isp * 9.8)
t_max = self.config.m / (alpha * self.config.p2) # this differs from the paper, but using p1 results in cvp.log to return nan
t_min = (self.config.m - self.config.m_fuel) * np.linalg.norm(start_vel) / self.config.p2
print("Full time: ", (0.5*t_max + 0.5*t_min))
self.dt.value = (0.5*t_max + 0.5*t_min) / self.N
def solve(self, start_pos, start_vel):
self.initialize_dt(start_vel)
self.x_0.value = np.array([*start_pos, *start_vel])
self.problem.solve(solver=cvp.ECOS, verbose=True)
return self.x.value, self.u.value, self.gam.value, self.z.value
class GFOLDSolverMass(GFOLDSolverPosition):
def __init__(self, config, iterations):
super().__init__(config, iterations)
self.d_p3 = cvp.Parameter(3)
self.constr += [cvp.norm(self.x[:3, self.N-1] - config.q) <= cvp.norm(self.d_p3 - config.q)]
self.problem = cvp.Problem(cvp.Maximize(self.z[self.N-1]), self.constr)
def solve(self, d_p3, start_pos, start_vel):
self.initialize_dt(start_vel)
self.d_p3.value = d_p3
self.x_0.value = np.array([*start_pos, *start_vel])
self.problem.solve(solver=cvp.ECOS, verbose=False, max_iters=400)
first_mass = self.z[-1].value
print("Start dt: ", self.dt.value)
self.dt.value = (self.N * self.dt.value + 1e-1) / self.N
print("end dt: ", self.dt.value)
self.problem.solve(solver=cvp.ECOS, verbose=False, max_iters=400)
second_mass = self.z[-1].value
print("Mass change: ", first_mass - second_mass)
return self.x.value, self.u.value, self.gam.value, self.z.value
class GFOLDSolver:
def __init__(self, config, iterations):
self.config = config
self.position_solver = GFOLDSolverPosition(config, iterations)
self.mass_solver = GFOLDSolverMass(config, iterations)
def solve(self, start_pos, start_vel):
print("Solving position problem")
x1, u1, gam1, z1 = self.position_solver.solve(start_pos, start_vel)
# TODO: solve the mass problem efficiently!
#print("Solving mass problem")
#x2, u2, gam2, z2 = self.mass_solver.solve(x1[0:3,-1], start_pos, start_vel)
return x1, u1, gam1, z1, self.position_solver.dt.value
def set_initial_constraints(constr, config, x, u, gam, z, N, x_0):
constr += [x[:, 0] == x_0[:]] # Initial velocity and position
constr += [x[3:6, N-1] == np.array([0, 0, 0])] # Final velocity == 0
# TODO (make initial thrust direction a parameter)
constr += [u[:, 0] == gam[0]*_e1] # Initial thrust is vertical
constr += [u[:, N-1] == gam[N-1]*_e1] # final thrust is vertical (or 0)
constr += [z[0] == cvp.log(config.m)] # Initial mass
constr += [x[0, N-1] == 0] # Final altitude should be 0
constr += [x[0, 0:N-1] >= 0] # All altitudes during flight should be above the ground
return constr
def running_constraints(constr, config, x, u, gam, z, dt, N):
A_w = create_A(config.w)
alpha = 1.0 / (config.isp * 9.8)
pointing_angle = np.cos(config.pointing_lim)
p1 = config.p1
p2 = config.p2
v_max = config.v_max
c = get_cone(config.landing_cone)
g = config.g
# Simple Euler integration
for k in range(N-1):
# Rocket dynamics constraints
constr += [x[0:3, k+1] == x[0:3, k] + dt*(A_w@x[:, k])[0:3]]
constr += [x[3:6, k+1] == x[3:6, k] + dt*(g + u[:, k])]
constr += [z[k+1] == z[k] - dt*alpha*gam[k]]
constr += [cvp.norm(x[3:6, k]) <= v_max] # Velocity remains below maximum
constr += [cvp.norm(u[:,k]) <= gam[k]] # Helps enforce the magnitude of thrust vector == thrust magnitude
constr += [u[0,k] >= pointing_angle*gam[k]] # Rocket can only point away from vertical by so much
constr += [cvp.norm(_E@(x[:3,k] - x[:3,-1])) - c.T@(x[:3, k] - x[:3,-1]) <= 0] # Stay inside the glide cone
if k > 0:
z_0 = cvp.log(config.m - alpha * p2 * (k) * dt)
z_1 = cvp.log(config.m - alpha * p1 * (k) * dt)
sigma_lower = p1 * cvp.exp(-z_0) * (1 - (z[k] - z_0) + (z[k] - z_0))
sigma_upper = p2 * cvp.exp(-z_0) * (1 - (z[k] - z_0))
# Minimimum and maximum thrust constraints
constr += [gam[k] <= sigma_upper]
constr += [gam[k] >= sigma_lower]
# Minimum and maximum mass constraints
constr += [z[k] >= z_0]
constr += [z[k] <= z_1]
return constr
def solve_gfold(config, start_pos, start_vel, iterations=100):
solver = GFOLDSolver(config, iterations)
x, u, gam, z, dt = solver.solve(start_pos, start_vel)
return x, u, gam, z, dt
if __name__ == "__main__":
config = GFoldConfig(isp=350,
m=12000,
m_fuel=1000,
p1=0.001*250000,
p2=0.5*250000,
g=np.array([-3, 0, 0]),
pointing_lim=np.deg2rad(45),
landing_cone=np.deg2rad(30),
q=np.array([0, 0, 0]),
v_max=1000,
w=np.array([0, 0, 0])
)
start_pos = np.array([25000, 10000, 10000])
start_vel = np.array([-300, -300, 300])
x, u, gam, z = solve_gfold(config, start_pos, start_vel)
print(f"final values:\nx: {x[:,-1]}\nu: {u[:,-1]}", )
f = mlab.figure(bgcolor=(0, 0, 0))
points3d([0], [0], [0], scale_factor=200.0, resolution=128, color=(0, 0.5, 0.5))
s = plot3d(x[0,:], x[1,:], x[2,:], tube_radius=5.0, colormap='Spectral')
v = quiver3d(x[0,:], x[1,:], x[2,:], u[0,:], u[1,:], u[2,:])
mlab.axes()
mlab.show() | heidtn/ksp_autopilot | gfold_test.py | gfold_test.py | py | 7,980 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "collections.namedtuple",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"lin... |
30340972856 | #!/usr/bin/env python
# Requires PyQt5 and compiltion of GUI files via pyuic
from setuptools import setup, Extension
from setuptools.command.build_py import build_py
try:
from pyqt_distutils.build_ui import build_ui
except ImportError:
print("Please install pyqt_distutils")
print( "(sudo) pip(3) install pyqt-distutils")
exit()
class custom_build_py(build_py):
def run(self):
self.run_command('build_ui')
build_py.run(self)
with open("README.md", "r") as fh:
long_description = fh.read()
setup(name='Akvo',
version='1.7.3',
python_requires='>3.7.0', # due to pyLemma
description='Surface nuclear magnetic resonance workbench',
long_description=long_description,
long_description_content_type='text/markdown',
author='Trevor P. Irons',
author_email='Trevor.Irons@lemmasoftware.org',
url='https://akvo.lemmasoftware.org/',
#setup_requires=['PyQt5'],
setup_requires=[
# Setuptools 18.0 properly handles Cython extensions.
#'PyQt',
'pyqt_distutils',
'PyQt5',
'setuptools>=18.0',
],
# ext_modules = cythonise("akvo/tressel/*.pyx"),
# build_requires=['cython'],
install_requires=[
# 'cython',
# 'rpy2',
'matplotlib',
'scipy',
'padasip',
'seaborn',
'numpy',
'pyqt5',
'pyyaml',
'ruamel.yaml',
'pandas',
'pyqt-distutils',
'cmocean',
'pyLemma >= 0.4.0'
],
packages=['akvo', 'akvo.tressel', 'akvo.gui'],
license='GPL 4.0',
entry_points = {
'console_scripts': [
'akvo = akvo.gui.akvoGUI:main',
'akvoK0 = akvo.tressel.calcAkvoKernel:main',
'akvoQT = akvo.tressel.invertTA:main',
],
},
#cmdclass = cmdclass,
# for forced build of pyuic
cmdclass={
'build_ui': build_ui,
'build_py': custom_build_py,
},
# Mechanism to include auxiliary files
# commenting out may be necessary?
#include_package_data=True,
package_data={
'akvo.gui': ['*.png'],
'akvo.gui': ['*.ui'],
'': ['*.png'],
},
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: GNU Lesser General Public License v3 or later (LGPLv3+)",
"Operating System :: OS Independent",
],
)
| LemmaSoftware/akvo | setup.py | setup.py | py | 2,591 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "setuptools.command.build_py.build_py",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "setuptools.command.build_py.build_py.run",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "setuptools.command.build_py.build_py",
"line_number": 17,
"... |
18481376081 | """
Extract activations from a model, almost completely and shamelessly copied from
https://github.com/dieuwkehupkes/diagnosing_lms/tree/interventions.
"""
# STD
from argparse import ArgumentParser
# EXT
from diagnnose.config.setup import ConfigSetup
from diagnnose.extractors.base_extractor import Extractor
from diagnnose.models.language_model import LanguageModel
from diagnnose.typedefs.corpus import LabeledCorpus, LabeledSentence
from diagnnose.models.import_model import import_model_from_json
from diagnnose.corpora.import_corpus import convert_to_labeled_corpus
def init_argparser() -> ArgumentParser:
parser = ArgumentParser()
# create group to load config from a file
from_config = parser.add_argument_group('From config file',
'Provide full experiment setup via config file')
from_config.add_argument('-c', '--config',
help='Path to json file containing extraction config.')
# create group to provide info via commandline arguments
# Required args are not set to be required here as they can come from --config
from_cmd = parser.add_argument_group('From commandline',
'Specify experiment setup via commandline arguments')
from_cmd.add_argument('--model',
help='Path to model parameters')
from_cmd.add_argument('--vocab',
help='Path to model vocabulary')
from_cmd.add_argument('--lm_module',
help='Path to folder containing model module')
from_cmd.add_argument('--corpus_path',
help='Path to labeled corpus')
from_cmd.add_argument('--activation_names',
help='Activations to be extracted', nargs='*')
from_cmd.add_argument('--output_dir',
help='Path to folder to which extracted embeddings will be written.')
from_cmd.add_argument('--device',
help='(optional) Torch device name on which model will be run.'
'Defaults to cpu.')
from_cmd.add_argument('--init_lstm_states_path',
help='(optional) Location of initial lstm states of the model. '
'If no path is provided zero-initialized states will be used at the'
'start of each sequence.')
from_cmd.add_argument('--print_every', type=int,
help='(optional) Print extraction progress every n steps.'
'Defaults to 20.')
from_cmd.add_argument('--cutoff', type=int,
help='(optional) Stop extraction after n sentences. '
'Defaults to -1 to extract entire corpus.')
return parser
def subj_selection_func(pos: int, token: str, sentence: LabeledSentence):
""" Select activations only when they occur on the subject's position. """
return pos == sentence.misc_info["subj_pos"]
def pos_4_selection_func(pos: int, token: str, sentence: LabeledSentence):
""" Select activations only on position 4. """
return pos == 4
if __name__ == "__main__":
required_args = {'model', 'vocab', 'lm_module', 'corpus_path', 'activation_names', 'output_dir'}
arg_groups = {
'model': {'model', 'vocab', 'lm_module', 'device'},
'corpus': {'corpus_path'},
'init_extract': {'activation_names', 'output_dir', 'init_lstm_states_path'},
'extract': {'cutoff', 'print_every'},
}
argparser = init_argparser()
config_object = ConfigSetup(argparser, required_args, arg_groups)
config_dict = config_object.config_dict
model: LanguageModel = import_model_from_json(**config_dict['model'])
corpus: LabeledCorpus = convert_to_labeled_corpus(**config_dict['corpus'])
extractor = Extractor(model, corpus, **config_dict['init_extract'])
extractor.extract(**config_dict['extract'], selection_func=pos_4_selection_func)
# In case you want to extract average eos activations as well, uncomment this line
# extractor.extract_average_eos_activations(print_every=config_dict['extract']['print_every'])
| Kaleidophon/tenacious-toucan | src/replication/extract.py | extract.py | py | 4,218 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "diagnnose.typedefs.corpus.LabeledSentence",
"line_number": 60,
"usage_type": "name"
},
... |
18444114202 | import os
import sys
os.environ["PYTHONDONTWRITEBYTECODE"] = 'stobbit'
import time
import logging
import datetime
import argparse
import threading
from array import array, typecodes
try:
import pygame
except ImportError as e:
print("Can't import `pygame`. Did you remember to activate the virtual environment?")
sys.exit(5)
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('--solo', action='store_true', help='Launch level with no network capabilities. Overrides server and client.')
parser.add_argument('--server', action='store_true', help='Launch level with server capabilities. Overrides client.')
parser.add_argument('--useserver', help='Launch level as client connecting to specified server IP address.')
parser.add_argument('--debug', action='store_true',help='Display variables from the player')
parser.add_argument('-u', '--username', required=True, help='Username')
parser.add_argument('-c', '--color', required=True, help='Username')
parser.add_argument('-r', '--resolution', help='resolution to run game at')
return parser.parse_args()
def test(args):
# Testing basic movement, weapons, environment here.
os.system("cls")
logging.error("Beginning the pygame test\n\n")
logging.info(f"pygame version: {pygame.__version__}")
global USERNAME
USERNAME = args.username
color = args.color.upper()
if color not in CN.COLOR_DICT:
raise ValueError("Must be one of BLACK, WHITE, RED, GREEN, BLUE, YELLOW, CYAN, MAGENTA")
start_game(args)
def make_text(text, color, bgcolor, top, left):
'''
Create rough text box to blit onto screen
'''
textSurf = CN.BASICFONT.render(text, True, color, bgcolor)
textRect = textSurf.get_rect()
textRect.topleft = (top, left)
return (textSurf, textRect)
def get_screen_size(res):
'''
Get screen area from args, and return default value if not parseable
'''
try:
w, h = res.split('x')
return (int(w), int(h))
except:
return CN.SCREEN_REZ
def get_proper_screen_size(w,h):
'''
Force screen size to be 16x9
'''
# TODO: Python float storage imperfect, rejigger this plz
while float(w)/float(h) != CN.SCREEN_ASPECTRATIO:
if float(w)/float(h) > CN.SCREEN_ASPECTRATIO:
# too wide, base off of height
w = int(16*(float(h)/9))
else:
# too narrow, base off of width
h = int(9*(float(w)/16))
return w,h
def start_server():
'''
Run the server for the game. Eventually build the game loop here.
'''
from MMX_Combat.network.server import MMXServer
from time import sleep
server = MMXServer(localaddr=('', 12000))
logging.debug(server)
pygame.init()
server_clock = pygame.time.Clock()
while True:
server.CalcAndPump()
server_clock.tick(CN.FPS)
def start_game(args):
'''
The actual game function. Imports necessary modules, builds level (should
move this to server), makes player, and houses main game loop.
'''
# Import game-specific modules
from MMX_Combat.player import BasePlayerObj, LocalPlayerObj, DummyJoystick
from MMX_Combat.environment import BaseEnvironmentObj, TestLevel, ServerTestLevel
from MMX_Combat.camera import Camera, complex_camera
from MMX_Combat.network.server import MMXServer
DEBUG = args.debug
# Initialize key program elements
pygame.init()
FPSCLOCK = pygame.time.Clock()
SCREEN = pygame.display.set_mode(get_screen_size(args.resolution), pygame.HWSURFACE | pygame.DOUBLEBUF | pygame.RESIZABLE)
CAMERA_SCREEN = pygame.Surface(CN.SCREEN_REZ)
pygame.display.set_caption(f'MMX Combat: {args.username}')
BASICFONT = pygame.font.Font(None, CN.BASICFONTSIZE)
if not pygame.joystick.get_init():
pygame.joystick.init()
attached_joysticks = pygame.joystick.get_count()
if attached_joysticks:
# logging.debug(f"Attached joysticks: {attached_joysticks}")
# logging.debug("Getting first joystick")
jstick = pygame.joystick.Joystick(0)
jstick.init()
else:
jstick = DummyJoystick()
if args.solo:
level = TestLevel(CN.DEFAULT_SERVER_IP, CN.DEFAULT_SERVER_PORT, args.username, network=False)
elif args.server:
threading.Thread(target=start_server, daemon=True, name='server_thread').start()
# time.sleep(3)
level = ServerTestLevel(CN.DEFAULT_SERVER_IP, CN.DEFAULT_SERVER_PORT, args.username)
elif args.useserver:
level = ServerTestLevel(args.useserver, CN.DEFAULT_SERVER_PORT, args.username)
# logging.debug(f'Level block size: ({level.block_width}x{level.block_height})')
MINIMAP_SCREEN = pygame.Surface((level.width, level.height))
show_minimap = False
all_players = []
local_player = LocalPlayerObj(USERNAME, level, jstick, args.color)
# local_player = LocalPlayerObj(USERNAME, level, jstick, player_color, {"MAX_SHOTS":2})
all_players.append(local_player)
level.player_names.add(USERNAME)
# for remote_player in get_all_remote_players(level):
# all_players.append(remote_player)
for player in all_players:
player.walls = level.wall_list
level.all_sprite_list.add(player)
level.players.add(player)
longest_proc_frame = 0
frame_history = array('f')
player_camera = Camera(complex_camera, level.width, level.height)
in_focus = True
# main game loop
while True:
# os.system("cls")
pressed_keys = []
pressed_keys_str = ''
cur_keys = pygame.key.get_pressed()
cur_hat = jstick.get_hat(0)
if cur_keys[pygame.K_LEFT] or cur_keys[pygame.K_RIGHT] or cur_hat[0] != 0:
if cur_keys[pygame.K_LEFT] or cur_hat[0] == -1:
local_player.go_left()
if cur_keys[pygame.K_RIGHT] or cur_hat[0] == 1:
local_player.go_right()
else:
local_player.stop()
if local_player._check_control_down():
local_player.duck()
if local_player._check_control_jump():
local_player.jump()
if local_player._check_control_fire():
local_player.charge()
if local_player._check_control_dash():
local_player.dash()
if (cur_keys[pygame.K_KP_MINUS] or cur_keys[pygame.K_MINUS]) and CN.FPS > 1:
CN.FPS -= 1
if (cur_keys[pygame.K_KP_PLUS] or cur_keys[pygame.K_PLUS])and CN.FPS < 30:
CN.FPS += 1
show_minimap = True if cur_keys[pygame.K_TAB] else False
for event in pygame.event.get():
if event.type == pygame.QUIT:
logging.info(f" Longest frame process: {longest_proc_frame} seconds")
frame_update = 1.0/CN.FPS
logging.info(f" Average frame process: {sum(frame_history)/len(frame_history)} seconds")
logging.info(f"Single frame update max: {frame_update} seconds")
print("")
logging.info(" Code status: {}".format("All good\n" if frame_update > longest_proc_frame else "Too heavy\n"))
sys.exit()
# elif event.type == pygame.ACTIVEEVENT:
# # logging.debug(event)
# # logging.debug(event.state)
# logging.debug('state:', event.state, '| gain:', event.gain,)
# if event.state == 1:
# if event.gain == 0:
# logging.debug("| mouse out",)
# elif event.gain == 1:
# logging.debug("| mouse in",)
# elif event.state == 2:
# if event.gain == 0:
# logging.debug("| titlebar pressed",)
# elif event.gain == 1:
# logging.debug("| titlebar unpressed",)
# elif event.state == 6:
# if event.gain == 0:
# logging.debug("| window minimized",)
# elif event.state == 4:
# if event.gain == 1:
# logging.debug("| window normal",)
elif event.type == pygame.VIDEORESIZE:
# logging.debug(f"Resizing to {event.dict['size']}!")
SCREEN = pygame.display.set_mode(get_proper_screen_size(*event.dict['size']), pygame.HWSURFACE | pygame.DOUBLEBUF | pygame.RESIZABLE)
# Check for keys being released.
elif event.type == pygame.KEYUP:
if event.key == pygame.K_LEFT and local_player.x_velocity < 0 and local_player.dashing < 0:
if not pygame.key.get_pressed()[pygame.K_RIGHT]:
local_player.stop()
else:
local_player.go_right()
if event.key == pygame.K_RIGHT and local_player.x_velocity > 0 and local_player.dashing < 0:
if not pygame.key.get_pressed()[pygame.K_LEFT]:
local_player.stop()
else:
local_player.go_left()
if event.key == pygame.K_DOWN:
local_player.standup()
if event.key == pygame.K_SPACE:
local_player.allow_jump()
if (event.key == pygame.K_LSHIFT and not cur_keys[pygame.K_RSHIFT]) or (event.key == pygame.K_RSHIFT and not cur_keys[pygame.K_LSHIFT]):
local_player.allow_dash()
if event.key == pygame.K_LCTRL and not cur_keys[pygame.K_RCTRL]:
if local_player.charge_level > 2:
local_player.fire()
# Check for keys being pressed this frame.
elif event.type == pygame.KEYDOWN:
if event.key in (pygame.K_LCTRL, pygame.K_RCTRL):
local_player.fire()
elif event.key == pygame.K_z:
local_player.alt_fire()
elif event.key == pygame.K_i:
level.print_data_cache()
elif event.type == pygame.JOYBUTTONDOWN:
# logging.debug(f"Joystick button {event.button} pressed.")
if event.button == 2:
local_player.fire()
elif event.type == pygame.JOYBUTTONUP:
# logging.debug(f"Joystick button {event.button} released.")
if event.button == 0:
local_player.allow_jump()
if event.button == 1:
local_player.allow_dash()
if event.button == 2:
if local_player.charge_level > 5:
local_player.fire()
local_player.discharge()
# Now we check for whether the hat is being pressed/released
elif event.type == pygame.JOYHATMOTION:
if event.value[0] == 0:
local_player.stop()
if event.value[1] == 0:
local_player.standup()
CAMERA_SCREEN.fill(CN.BLACK)
MINIMAP_SCREEN.fill(CN.BLACK)
# Update info on all players
proc_start_time = datetime.datetime.now()
if local_player not in level.all_sprite_list:
local_player.update()
level.chat_client.Loop()
level.update_data()
# TODO: check for conflicts between local_player local data and server data
level.all_sprite_list.update()
player_camera.update(local_player)
proc_end_time = (datetime.datetime.now() - proc_start_time).total_seconds()
frame_history.append(proc_end_time)
if proc_end_time > longest_proc_frame:
longest_proc_frame = proc_end_time
# Apply camera
for s in level.all_sprite_list:
CAMERA_SCREEN.blit(s.image, player_camera.apply(s))
MINIMAP_SCREEN.blit(s.image, s.rect)
SCREEN.blit(pygame.transform.scale(CAMERA_SCREEN, [*SCREEN.get_size()], SCREEN), [0,0])
if show_minimap:
MINIMAP_SCREEN.set_alpha(128)
SCREEN.blit(pygame.transform.scale(MINIMAP_SCREEN, [level.block_width*4, level.block_height*4]),
[SCREEN.get_size()[0]-(level.block_width*4), SCREEN.get_size()[1]-(level.block_height*4)])
# Display debug and/or controls
if DEBUG:
text_surf, text_rect = make_text("{}: {}".format('id',str(local_player.__dict__['id'])), CN.WHITE, CN.BLACK, 50, 10)
SCREEN.blit(text_surf, text_rect)
i = 1
text_surf, text_rect = make_text("{}: {}".format('FPS',str(CN.FPS)), CN.WHITE, CN.BLACK, 50, 10+(i*1.1*CN.BASICFONTSIZE))
SCREEN.blit(text_surf, text_rect)
i+=1
for k,v in sorted(local_player.__dict__.items()):
# logging.debug(k)
# logging.debug("\t" + str(type(v)))
if k != 'id' and not callable(v) and not k.startswith("_") and k[0] != k[0].upper() and isinstance(v, (int, bool, list)):
text_surf, text_rect = make_text("{}: {} ({})".format(k,str(v), type(v)), CN.WHITE, CN.BLACK, 50, 10+(i*1.1*CN.BASICFONTSIZE))
SCREEN.blit(text_surf, text_rect)
i += 1
else:
text = """Controls:
Movement: Arrow Keys or Controller D-Pad
Jump: Spacebar or bottom Controller button (X)
Dash: Shift or right Controller button (O)
Fire: Control or left Controller button ([])"""
for i, line in enumerate(text.split("\n")):
text_surf, text_rect = make_text(line.strip(), CN.WHITE, CN.BLACK, 50, 10+(i*1.1*CN.BASICFONTSIZE))
SCREEN.blit(text_surf, text_rect)
pygame.display.flip()
# logging.debug(f'{threading.activeCount()} active threads!')
FPSCLOCK.tick(CN.FPS)
if __name__ == "__main__":
mmx_main_path = os.path.normpath(os.path.join(os.path.realpath(__file__), "..", ".."))
if mmx_main_path not in sys.path:
sys.path.append(mmx_main_path)
import MMX_Combat.constants as CN
logging.basicConfig(level=logging.DEBUG, format=CN.LOG_FMT)
args = get_args()
# logging.debug(args)
test(args)
| joshuatbadger/mmx_combat | __init__.py | __init__.py | py | 14,360 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "os.environ",
"line_number": 3,
"usage_type": "attribute"
},
{
"api_name": "sys.exit",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "os.system",
"line... |
27801575276 | #!/usr/bin/python3
# This software is distributed under the GNU Lesser General Public License (https://www.gnu.org/licenses/lgpl-3.0.en.html)
# WARNING: This is not a software to be used in production. I write this software for teaching purposes.
# TO DO: There lots of things to be done. This is experimental software and will never finish. But I will do the followings first:
# 1) Save SQL Query as view - done 2017/04/11
# 2) Modify Table is not implemented. Will implement whenever I find time
# 3) Editing SQLite parameters
# 4) Creating Index
# 5) SQL syntax highlight
# 6) Currently all changes done on database. Write Changes, Revert Changes
# 7) ….
# 8) Those features queried by users
import sys
import os
from PyQt5 import QtWidgets, QtCore, QtGui, Qt, QtSql, uic
from apps.pyqt_sqlite.ui import ModifyTableDialog,TYPE_DICT,addUi
from functools import partial
ui_path = os.path.dirname(os.path.abspath(__file__))
mainwindowUi = uic.loadUiType(os.path.join(ui_path, "mainwindow.ui"))[0]
#mainwindowUi = uic.loadUiType("main_window.ui")[0]
app = QtWidgets.QApplication(sys.argv)
__appname__='PyQt SqLite Database Browser'
class SqliteWindow(QtWidgets.QMainWindow, mainwindowUi):
selectedTable=None
work_directory=os.path.dirname(os.path.realpath(__file__))
current_database_file=''
current_database=None
def __init__(self, *args):
super(SqliteWindow, self).__init__(*args)
QtCore.QCoreApplication.setOrganizationName("mbaser")
QtCore.QCoreApplication.setOrganizationDomain("foo.org")
QtCore.QCoreApplication.setApplicationVersion("0.0.1")
self.current_database = QtSql.QSqlDatabase.addDatabase('QSQLITE')
self.setupUi(self)
self.tree_model=QtGui.QStandardItemModel()
self.treeView.setModel(self.tree_model)
self.settings=QtCore.QSettings()
self.recent_files=self.settings.value('recent_files',[])
self.update_recent_files()
def setTitle(self, title=None):
t_str="PyQt SqLite"
if title:
t_str += ' [%s]' % title
self.setWindowTitle(t_str)
@QtCore.pyqtSlot()
def on_actionAbout_triggered(self):
QtWidgets.QMessageBox.about(self, "About this software","This software is written for teaching purposes. Don't use in production!!! My purpose is to demonstrate how PyQt widgets can be used to develop software.\n\n Main developer: Mustafa Baser <mbaser@mail.com>")
@QtCore.pyqtSlot()
def on_commandLinkButton_clicked(self):
dialog=ModifyTableDialog(self)
dialog.exec_()
@QtCore.pyqtSlot()
def on_commandLinkButton_3_clicked(self):
if self.selectedTable:
result=QtWidgets.QMessageBox.question(self, "Warning",
"You are just deleting %s %s. You will lost all the data in %s. Dou you want to delete?" % (
self.selectedTable[1],
self.selectedTable[0],
self.selectedTable[0]
))
if result==QtWidgets.QMessageBox.Yes:
self.current_database.exec("DROP %s `%s`" % (self.selectedTable[0], self.selectedTable[1]))
if not self.error_check(self.current_database):
self.update_tables_table()
@QtCore.pyqtSlot('QModelIndex', int)
def on_treeView_expanded(self, ind):
pass
@QtCore.pyqtSlot('QModelIndex')
def on_treeView_clicked(self, ind):
item=self.tree_model.itemFromIndex(ind.sibling(0,0))
if hasattr(item,"tableType"):
self.selectedTable=(item.tableType,ind.sibling(ind.row(),0).data())
self.commandLinkButton_2.setEnabled(True)
self.commandLinkButton_3.setEnabled(True)
else:
self.commandLinkButton_2.setEnabled(False)
self.commandLinkButton_3.setEnabled(False)
def update_tables_table(self):
self.tree_model.clear()
self.tree_model.setHorizontalHeaderLabels(['Name', 'Type','Schema'])
for typ in ('table','view'):
q=self.current_database.exec("SELECT name FROM sqlite_master WHERE type = '%s'" % typ)
self.error_check(self.current_database)
tables=[]
while q.next():
tables.append(q.value(0))
tab_par = QtGui.QStandardItem('%ss (%d)' % (typ.title(),len(tables)))
for tb in tables:
if typ=='table': c_icon="sc_inserttable.png"
else: c_icon="sc_dbviewtablenames.png"
self.comboBox.addItem(QtGui.QIcon("icons/"+c_icon), tb)
tb_name=QtGui.QStandardItem(tb)
tb_name.tableType=typ
tb_type=QtGui.QStandardItem(typ.title())
q=self.current_database.exec("SELECT sql FROM sqlite_master WHERE tbl_name = '%s' AND type = '%s'" % (tb,typ))
tb_schema_str=''
if q.next():
tb_schema_str=q.value(0)
tb_schema_str=tb_schema_str.replace("\n", " ")
tb_schema_str=tb_schema_str.replace("\t", " ")
tb_schema=QtGui.QStandardItem(tb_schema_str)
driver=self.current_database.driver()
rec=driver.record(tb)
for i in range(rec.count()):
col_name=QtGui.QStandardItem(rec.field(i).name())
type_id=rec.field(i).type()
if type_id in TYPE_DICT: type_str=TYPE_DICT[type_id]
else: type_str=str(type_id)
col_type=QtGui.QStandardItem(type_str)
tb_name.appendRow([col_name, col_type])
tab_par.appendRow([tb_name, tb_type, tb_schema])
self.tree_model.appendRow(tab_par)
def error_check(self, model):
error = model.lastError()
if error.isValid():
self.show_warning(error.text())
return True
@QtCore.pyqtSlot()
def on_queryExecButton_pressed(self):
self.queryTableView.setModel(QtSql.QSqlQueryModel())
query = self.queryTextEdit.toPlainText()
model = self.queryTableView.model()
model.setQuery(query)
self.error_check(model)
@QtCore.pyqtSlot(str)
def on_comboBox_currentIndexChanged(self,tbl_name):
if tbl_name:
model = QtSql.QSqlTableModel()
model.setTable('"'+tbl_name+'"')
model.setEditStrategy(QtSql.QSqlTableModel.OnFieldChange)
model.select()
self.error_check(model)
self.tableView.setModel(model)
def show_warning(self, text):
QtWidgets.QMessageBox.warning(self, "Info", "Could not execute query. Error message from database engine is:\n"+ text)
@QtCore.pyqtSlot()
def on_newRecordButton_pressed(self):
model = self.tableView.model()
model.submitAll()
result=model.insertRows(model.rowCount(), 1)
if not result:
self.error_check(model)
@QtCore.pyqtSlot()
def on_reloadTableButton_pressed(self):
self.tableView.model().select()
@QtCore.pyqtSlot()
def on_deleteRecordButton_pressed(self):
model = self.tableView.model()
model.removeRow(self.tableView.currentIndex().row())
model.select()
@QtCore.pyqtSlot()
def on_actionClose_triggered(self):
self.closeDatabase()
@QtCore.pyqtSlot()
def on_actionExit_triggered(self):
self.close()
@QtCore.pyqtSlot()
def on_actionNew_triggered(self):
save_file_dialog=QtWidgets.QFileDialog.getSaveFileName(self, "Name of new database", self.work_directory)
if save_file_dialog[0]:
self.loadDatabase(save_file_dialog[0])
@QtCore.pyqtSlot()
def on_actionOpen_triggered(self):
self.fileDialog = QtWidgets.QFileDialog(self)
self.fileDialog.setDirectory(self.work_directory)
result=self.fileDialog.getOpenFileName()
if result[0]:
self.loadDatabase(result[0])
@QtCore.pyqtSlot()
def on_saveQueryAsView_pressed(self):
model = self.queryTableView.model()
if model:
if model.rowCount():
view_name, result = QtWidgets.QInputDialog.getText(self, __appname__, 'Enter vieww name:')
if result:
query = self.queryTextEdit.toPlainText()
query = 'CREATE VIEW {0} AS\n{1}'.format(view_name, query)
if not self.execute_query(query):
self.update_tables_table()
def closeDatabase(self):
if self.current_database:
if self.current_database.isOpen():
self.tree_model.clear()
self.comboBox.clear()
tbm=self.tableView.model()
tbm.clear()
self.current_database.close()
self.setTitle()
self.commandLinkButton.setEnabled(False)
self.actionClose.setEnabled(False)
print('Clearing "Execute SQL Widgets"')
#clear "Execute SQL Widgets"
self.queryTextEdit.clear()
tableModel=self.queryTableView.model()
if tableModel:
tableModel.clear()
self.queryTableView.setModel(None)
self.queryResultText.clear()
def loadDatabase(self, db_file, *args):
self.closeDatabase()
self.tree_model.removeRows(0, self.tree_model.rowCount())
self.work_directory=os.path.dirname(db_file)
self.current_database_file=os.path.basename(db_file)
self.current_database.setDatabaseName(db_file)
if self.current_database.open():
self.commandLinkButton.setEnabled(True)
self.actionClose.setEnabled(True)
self.setTitle(db_file)
self.update_tables_table()
if db_file in self.recent_files:
self.recent_files.remove(db_file)
self.recent_files.insert(0,db_file)
self.update_recent_files()
def update_recent_files(self):
self.menuOpen_Recent.clear()
for i, rc in enumerate(self.recent_files):
recent_file_action=QtWidgets.QAction('&%d %s' % (i+1, rc), self, triggered=partial(self.loadDatabase, rc))
self.menuOpen_Recent.addAction(recent_file_action)
def execute_query(self, query):
self.current_database.exec(query)
return self.error_check(self.current_database)
def closeEvent(self, event):
self.settings.setValue('recent_files', self.recent_files)
self.closeDatabase()
main = SqliteWindow()
main.show()
sys.exit(app.exec_())
| esmallah/data_assistant | config_pyqt/ui/pyqt_sqlite.py | pyqt_sqlite.py | py | 11,277 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "os.path.dirname",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "PyQt5.uic.loadUiType",
... |
35423259791 | # Author: Xinyi Wang
# Date: 2021/10/05
import cv2
from PIL import Image
import numpy as np
import pandas as pd
import os
import torch
import torch.nn.functional as F
import torchvision.models as models
import scipy.io as io
import sys
sys.path.append('..')
from utils import *
from cam.scorecam import *
def fsam(video_name, framerate):
torch.cuda.empty_cache()
# vgg score saliency map
vgg = models.vgg16(pretrained=True).eval()
vgg_model_dict = dict(type='vgg16', arch=vgg, layer_name='features_29', input_size=(224, 224))
vgg_scorecam = ScoreCAM(vgg_model_dict)
torch.cuda.empty_cache()
# open the video
cap = cv2.VideoCapture(video_name) # Get the video object
isOpened = cap.isOpened # Determine if it is open
# Video information acquisition
fps = cap.get(cv2.CAP_PROP_FPS)
torch.cuda.empty_cache()
imageNum = 0
sum = 0
# timef = 30 # Save a picture every 30 frames
print('framerate:', framerate)
torch.cuda.empty_cache()
sum_samp = 0
while (isOpened):
sum += 1
(frameState, frame) = cap.read() # Recording of each frame and acquisition status
torch.cuda.empty_cache()
if frameState == True and (sum % framerate == 0):
torch.cuda.empty_cache()
# Format transformation, BGRtoRGB
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
# Convert to Image
frame = Image.fromarray(np.uint8(frame))
frame = np.array(frame)
# RGBtoBGR meets the opencv display format
frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
torch.cuda.empty_cache()
imageNum = imageNum + 1
fileName = '../../tmp/video_image_temp/' + str(imageNum) + '.jpg' # Temporary storage path
cv2.imwrite(fileName, frame, [cv2.IMWRITE_JPEG_QUALITY, 100])
# print(fileName + " successfully write in") # Output storage status
input_image = load_image(fileName)
torch.cuda.empty_cache()
# print(input_image)
input_ = apply_transforms(input_image)
if torch.cuda.is_available():
input_ = input_.cuda()
predicted_class = vgg(input_).max(1)[-1]
torch.cuda.empty_cache()
scorecam_map = vgg_scorecam(input_)
# print(input_)
t = scorecam_map.cpu()
smap = t.numpy() # tensor 2 numpy
# print(scorecam_map)
# print(smap)
sum_samp += smap
# Clear temp data
os.remove(fileName)
torch.cuda.empty_cache()
elif frameState == False:
break
print('Complete the extraction of video frames!')
torch.cuda.empty_cache()
mean_samp = sum_samp / imageNum
# print(mean_samp)
mean_samp = np.squeeze(mean_samp)
io.savemat('../../tmp/tempmat_path/samp.mat', {'mean_samp': mean_samp})
torch.cuda.empty_cache()
return mean_samp
if __name__ == "__main__":
torch.cuda.empty_cache()
video = "C://Users//um20242//OneDrive - University of Bristol//Documents//PycharmProjects//UoB//RVS-resize//tmp//temp_resize//tmp.avi"
mean_samp = fsam(video, 24)
print(mean_samp) | xinyiW915/RVS-resize | src/func/frame_sam.py | frame_sam.py | py | 3,257 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "sys.path.append",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "torch.cuda.empty_cache",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
... |
22351360896 | """
Module to call all API endpoints
Author: Moises Gonzalez
Date: 02/Jul/2023
"""
import requests
import json
import logging
from pathlib import Path
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
URL = "http://127.0.0.1:8000"
with open('config.json', 'r') as f:
config = json.load(f)
test_data_path = Path(config['test_data_path'])
output_model_path = Path(config['output_model_path'])
test_data_file = test_data_path / "testdata.csv"
def call_api_endpoint(url: str = URL):
"""
function to call the API endpoints
Args:
url: url to call the endpoints to
"""
logger.info("Calling all functions")
response1 = requests.get(url + f"/prediction?data={str(test_data_file)}")
response2 = requests.get(url + "/scoring")
response3 = requests.get(url + "/summarystats")
response4 = requests.get(url + "/diagnostics")
responses = f"\npredictions status_code-> {response1.status_code}\n" \
f"{response1.content.decode('utf-8')}\n" \
f"\nscoring status_code-> {response2.status_code}\n" \
f"{response2.content.decode('utf-8')}\n" \
f"\nsummarystats status_code-> {response3.status_code}\n" \
f"{response3.content.decode('utf-8')}\n" \
f"\ndiagnostics status_code-> {response4.status_code}\n" \
f"{response4.content.decode('utf-8')}"
api_returns = output_model_path / "apireturns.txt"
with open(api_returns, "w") as txt:
txt.write(responses)
logger.info(f"API responses saved to -> {api_returns}")
if __name__ == "__main__":
call_api_endpoint()
| moicesc/mldevops-dynamic-risk-assessment | apicalls.py | apicalls.py | py | 1,662 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "logging.basicConfig",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "logging.INFO",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "logging.getLogger",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "json.load",
... |
34189932533 | import numpy as np
import sys
import scipy.spatial.distance as dist
import pyparticles.forces.force as fr
class VanDerWaals( fr.Force ) :
def __init__(self , size , dim=3 , m=None , Consts=1.0 ):
self.__dim = dim
self.__size = size
self.__C = Consts # Hamaker coefficient (A)
self.__A = np.zeros( ( size , dim ) )
self.__F = np.zeros( ( size , dim ) )
self.__Fm = np.zeros( ( size , size ) )
self.__R = np.zeros( ( size , 1 ) )
if m != None :
self.set_messes( m )
def set_masses( self , m ):
self.__R[:] = m
def update_force( self , p_set ):
self.__D[:] = dist.squareform( dist.pdist( p_set.X , 'euclidean' ) )
return self.__A
def getA(self):
return self.__A
A = property( getA )
def getF(self):
return self.__F
F = property( getF )
| simon-r/PyParticles | pyparticles/forces/van_der_waals_force.py | van_der_waals_force.py | py | 987 | python | en | code | 77 | github-code | 1 | [
{
"api_name": "pyparticles.forces.force.Force",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "pyparticles.forces.force",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "numpy.zeros",
"line_number": 15,
"usage_type": "call"
},
{
"api_name":... |
24761517333 | from .serializers import *
import logging
from rest_framework import generics, status
from rest_framework.response import Response
from rest_framework_simplejwt.tokens import RefreshToken
logger = logging.getLogger(__name__)
def get_tokens_for_user(user):
refresh = RefreshToken.for_user(user)
return {
'refresh': str(refresh),
'access': str(refresh.access_token),
}
class RegisterUserAPIView(generics.CreateAPIView):
serializer_class = RegisterUserSerializer
def post(self, request, *args, **kwargs):
try:
serializer = RegisterUserSerializer(data=request.data)
if serializer.is_valid():
self.perform_create(serializer)
return_data = serializer.data
return_data['token'] = get_tokens_for_user(serializer.instance)
response = Response(return_data, status=status.HTTP_201_CREATED)
return response
return Response({"message": serializer.errors}, status=status.HTTP_400_BAD_REQUEST)
except Exception as e:
logger.error(e.__cause__)
logger.error(e)
return Response({"message": e.__cause__}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
class UserLoginAPIView(generics.GenericAPIView):
serializer_class = UserLoginSerializer
def post(self, request, *args, **kwargs):
try:
serializer = UserLoginSerializer(data=request.data)
if serializer.is_valid():
ret_data = UserSerializers(serializer.instance, many=False).data
ret_data['token'] = get_tokens_for_user(serializer.instance)
resp = Response(ret_data, status=status.HTTP_200_OK)
return resp
return Response({"message": serializer.errors}, status=status.HTTP_400_BAD_REQUEST)
except Exception as e:
logger.error(e.__cause__)
logger.error(e)
return Response({"message": e.__cause__}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
| slalit360/DRF-assignment | account/views.py | views.py | py | 2,047 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "logging.getLogger",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "rest_framework_simplejwt.tokens.RefreshToken.for_user",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "rest_framework_simplejwt.tokens.RefreshToken",
"line_number": 11,
... |
11830335032 | import numpy as np
import argparse
import cv2
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--image", required = True, help = "Image pathname")
args = vars(ap.parse_args())
image = cv2.imread(args["image"])
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
eq = cv2.equalizeHist(image)
cv2.imshow("Histogram Equalisation", np.hstack([image, eq]))
cv2.waitKey(0)
| muhsinali/opencv-book | chapter7/equalize.py | equalize.py | py | 372 | python | en | code | 27 | github-code | 1 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "cv2.imread",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "cv2.cvtColor",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "cv2.COLOR_BGR2GRAY",
... |
8266025936 | from django.contrib import admin
from django.urls import path, include
from Crud import views
urlpatterns = [
path('admin/', admin.site.urls),
path('company/', include('company.urls')),
path('department/', include('department.urls')),
path('employee/',include('employee.urls')),
path('project/',include('project.urls')),
path('', views.index),
]
| zala49/CRUD-Django | Crud/urls.py | urls.py | py | 371 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "django.urls.path",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "django.contrib.admin.site",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "django.contrib.admin",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "dja... |
44090219209 | import curses
import sys
import time
if False:
def pbar(window):
for i in range(10):
j = str(i * 10)
window.addstr(
0,
0,
"Time to download "
+ "["
+ ("#" * i)
+ ("-" * (9 - i))
+ "]"
+ " " * len(j)
+ j
+ "%",
)
window.addstr(10, 10, "[" + ("#" * i) + ("-" * (9 - i)) + "]")
window.refresh()
time.sleep(0.5)
curses.wrapper(pbar)
else:
for i in range(11):
sys.stdout.write("\r[{0}{1}] {2}%".format("#" * i, "-" * (10 - i), i * 10))
sys.stdout.flush()
time.sleep(0.5)
print
| Peilonrayz/dota_binds | src/dota_binds/progressbar.py | progressbar.py | py | 756 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "time.sleep",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "curses.wrapper",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "sys.stdout.write",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "sys.stdout",
"line_num... |
73352650273 | # -*- coding: utf-8 -*-
"""
Created on Fri Mar 11 16:03:50 2022
@author: ethan
"""
# -*- coding: utf-8 -*-
"""
Created on Sat Feb 19 14:05:58 2022
@author: ethan
"""
from datetime import date
from datetime import datetime as dt
from time import time
import math
import numpy as np
import pandas as pd
from scipy import stats
from bokeh.io import export_png, output_file, show
from bokeh.plotting import figure
from bokeh.models import NumeralTickFormatter, LabelSet, ColumnDataSource
from bokeh.models.tickers import FixedTicker
from bokeh.layouts import row, column
df1 = (
pd.read_excel(
"data.xlsx", sheet_name="Sheet1", index_col="date_daily", parse_dates=True
).dropna()
/ 100
)
df2 = pd.read_excel("data.xlsx", sheet_name="Sheet2").dropna()
date_convert = lambda x: dt(
year=int(x[-2:]) + 1900, day=int(x[-4:-2]), month=int(x[:-4])
)
df2.index = df2["MTGDATE"].astype(str).apply(date_convert)
df2 = df2 / 100
print(df2)
NIUred = (200, 16, 46)
NIUpantone = (165, 167, 168)
#%%
def set_up(x, y, truncated=True, margins=None):
if truncated:
b = (3 * y.min() - y.max()) / 2
else:
b = y.min()
if margins == None:
xrng = (x.min(), x.max())
yrng = (b, y.max())
else:
xrng = (x.min() - margins, x.max() + margins)
yrng = (b - margins, y.max() + margins)
x = x.dropna()
y = y.dropna()
return (x, y, xrng, yrng)
# Chart of approximately stionary time series, e.g. PCE-Core inflation from 2008 to 2020
def chart1(df, series, title, name):
xdata, ydata, xrng, yrng = set_up(df.index, df[series], truncated=False)
scale = 1
p = figure(
width= int(1000 * scale),
height=int(666 * scale),
title=title,
x_axis_label="Date",
x_axis_type="datetime",
y_range=yrng,
x_range=xrng,
toolbar_location=None,
)
p.line(xrng, [0, 0], color="black", width=1)
p.line(xdata, ydata, color=NIUred, width=2)
p.xaxis[0].ticker.desired_num_ticks = 10
# p.legend.location = "bottom_right"
p.xgrid.grid_line_color = None
p.ygrid.grid_line_color = None
p.yaxis.formatter = NumeralTickFormatter(format="0.0%")
#p.xaxis.major_label_orientation = math.pi/4
p.title.text_font_size = "16pt"
p.xaxis.axis_label_text_font_size = "14pt"
p.yaxis.axis_label_text_font_size = "14pt"
# p.legend.label_text_font_size = "14pt"
export_png(p, filename=name)
return p
# Chart of a regression e.g. inflation vs money supply
def chart2(df):
xdata, ydata, xrng, yrng = set_up(
df["DFF"], df["GDPC1_PCA"], truncated=False, margins=0.005
)
xrng = (0, xrng[1])
p = figure(
width=700,
height=500,
title="Do Rate Hikes Decrease Real GDP? 1954 to 2019",
x_axis_label="Federal Funds Effective Rate (Quarterly Average)",
y_axis_label="RGDP Growth (Annualized)",
y_range=yrng,
x_range=xrng,
)
p.line(xrng, [0, 0], color="black", width=3)
p.line([0, 0], yrng, color="black", width=3)
slope, intercept, r_value, p_value, std_err = stats.linregress(xdata, ydata)
leg = "R = {:.4f}, Slope = {:.4f}".format(r_value, slope)
p.line(xdata, xdata * slope + intercept, legend=leg, color=NIUpantone, width=4)
p.circle(xdata, ydata, color=NIUred, size=5)
p.xaxis[0].ticker.desired_num_ticks = 10
p.xgrid.grid_line_color = None
p.ygrid.grid_line_color = None
p.xaxis.formatter = NumeralTickFormatter(format="0.0%")
p.yaxis.formatter = NumeralTickFormatter(format="0.0%")
p.title.text_font_size = "16pt"
p.xaxis.axis_label_text_font_size = "14pt"
p.yaxis.axis_label_text_font_size = "14pt"
p.legend.label_text_font_size = "14pt"
export_png(p, filename="imgs/ffrRgdp.png")
return p
show(
column(
chart1(df1, "FFR_shock", "Gertler and Karadi Baseline Indicator", "gk15.png"),
chart1(df2, "RESIDF", "Romer and Romer Indicator", "rr04.png"),
)
)
| ethanr2/thesis | presentation/charts/charts.py | charts.py | py | 4,003 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "pandas.read_excel",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "pandas.read_excel",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "bokeh.plotting.... |
16239791104 | import os
import time
import logging
import os.path as osp
import torch.distributed as dist
def setup_logger(logpth):
logfile = 'Deeplab_v3plus-{}.log'.format(time.strftime('%Y-%m-%d-%H-%M-%S'))
logfile = osp.join(logpth, logfile)
FORMAT = '%(levelname)s %(filename)s(%(lineno)d): %(message)s'
log_level = logging.INFO
if dist.is_initialized() and dist.get_rank()!=0:
log_level = logging.WARNING
logging.basicConfig(level=log_level, format=FORMAT, filename=logfile)
logging.root.addHandler(logging.StreamHandler())
class Logger(object):
def __init__(self, args, logger_str):
self._logger_name = args.save_path
# if os.path
self._logger_str = logger_str
self._save_path = os.path.join(
self._logger_name, self._logger_str+'.txt')
# self._save_path = os.path.abspath(_save_path)
self._file = open(self._save_path, 'w')
def log(self, string, save=True):
print(string)
if save:
self._file.write('{:}\n'.format(string))
self._file.flush()
def close(self):
self._file.close()
| NoamRosenberg/autodeeplab | utils/logger.py | logger.py | py | 1,133 | python | en | code | 306 | github-code | 1 | [
{
"api_name": "time.strftime",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "logging.INFO",
"line_number": ... |
72593883553 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2021/3/28 8:39 下午
# @Author : 宋继贤
# @Description :
# @File : utils.py
# @Software: PyCharm
import torch
import gensim
def build_optimizer(args, model):
optimizer = getattr(torch.optim, args.optim)(
model.parameters(),
lr=args.lr,
weight_decay=args.weight_decay,
amsgrad=args.amsgrad
)
return optimizer
def set_lr(optimizer, lr):
for group in optimizer.param_groups:
group['lr'] = lr
def load_embedding(w2v_file, vocab_size=859, embedding_size=256):
w2v = gensim.models.Word2Vec.load(w2v_file).wv
embedding = torch.zeros((vocab_size, embedding_size), dtype=torch.float32)
for i in range(1, vocab_size):
embedding[i] = torch.from_numpy(w2v[str(i - 1)].copy())
return embedding
| behome/tianchi | code/utils.py | utils.py | py | 837 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "torch.optim",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "gensim.models.Word2Vec.load",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "gensim.models",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "torch... |
14908687554 | import discord
from discord import app_commands
from discord.ext import commands
from discord.ext import tasks
from discord import ui
import requests
from bs4 import BeautifulSoup
import json
import sqlite3
import os
import asyncio
import twitchio
from twitchio.ext import commands as cmd
import datetime
from typing import List
from discord.interactions import Interaction
bot = commands.Bot(command_prefix="!", intents = discord.Intents.all())
con = sqlite3.connect("C:/Users/nelso/OneDrive/Desktop/HenryBot/Stats.db")
cur = con.cursor()
@tasks.loop(minutes=1.0)
async def auto_stream_start():
global ISLIVE
channel = bot.get_channel(1112906850090893362)
guild = bot.get_guild(1112902755426783394)
role = guild.get_role(1114393370744340621)
cur.execute("SELECT Live FROM Live_Info ORDER BY Entry DESC LIMIT 1")
liveStatus = cur.fetchone()[0]
cur.execute("SELECT Title FROM Live_Info ORDER BY Entry DESC LIMIT 1")
title = cur.fetchone()[0]
cur.execute("SELECT Game FROM Live_Info ORDER BY Entry DESC LIMIT 1")
gameName = cur.fetchone()[0]
cur.execute("SELECT Noti FROM Live_Info ORDER BY Entry DESC LIMIT 1")
ISLIVE = cur.fetchone()[0]
if liveStatus == "True" and ISLIVE == 'False':
message = await channel.send(embed=EmbedSections.twitch_noti(title, gameName.upper()))
with open('notiID.txt', "r+") as file:
file.truncate(0)
file.writelines(str(message.id))
await channel.send(role.mention)
ISLIVE = 'True'
elif liveStatus == "True" and ISLIVE == 'True':
with open('notiID.txt', "r+") as file:
id = file.readline()
message = await channel.fetch_message(int(id))
await message.edit(embed=EmbedSections.twitch_noti(title, gameName.upper()))
else:
pass
@tasks.loop(minutes=1.0)
async def auto_stream_end():
channel = bot.get_channel(1112906850090893362)
guild = bot.get_guild(1112902755426783394)
role = guild.get_role(1114393370744340621)
cur.execute("SELECT Live FROM Live_Info ORDER BY Entry DESC LIMIT 1")
liveStatus = cur.fetchone()[0]
cur.execute("SELECT Noti FROM Live_Info ORDER BY Entry DESC LIMIT 1")
ISLIVE = cur.fetchone()[0]
if liveStatus == "False" and ISLIVE == 'True':
await channel.purge(limit=10)
cur.execute(f"UPDATE Live_Info SET Noti = '{False}' WHERE Noti = 'True'")
con.commit()
@bot.event
async def on_ready():
print("Bot is up")
auto_stream_start.start()
auto_stream_end.start()
try:
synced = await bot.tree.sync()
print(f"Synced {len(synced)} commands")
except Exception as e:
print(e)
#Choices testing
@bot.tree.command(name="test", description="This is used for testing!")
@app_commands.choices(choices=[
app_commands.Choice(name="Yes", value="yep"),
app_commands.Choice(name="No", value="yep")
])
async def test(interaction: discord.Interaction, choices: app_commands.Choice[str]):
if (choices.value == 'yep'):
await interaction.response.send_message(content="Alright!", ephemeral=True)
else:
await interaction.response.send_message(content="Nope!")
# ACTUAL BOT #
#-------------------------#
class Variables():
alternate = False #Button alternate functions. (Off by default)
notiamount = 0
class EmbedSections():
def title_card():
embed = discord.Embed(title="Tatox3 Menu", color=0x6FFF7B)
embed.set_image(url="https://media.giphy.com/media/pYy1pETzRveSzWQmEz/giphy.gif")
return embed
def help_body(name, avatar_url):
embed=discord.Embed(color=0x6FFF7B)
embed.set_author(name=f'\n{name}', icon_url=avatar_url)
embed.add_field(name="🟣 Subscriber roles 🟣", value="To get access to your twitch roles if you are subscribed please ensure you have the twitch connection enabled in your personal account settings and we do the rest.", inline=False)
embed.add_field(name="🟣 Twitch Live Notifications 🟣", value=f"To get notified when Tatox3 goes live on twitch head over to #live-notifications in order to get the notification role")
embed.add_field(name="🚧 More helpful tips to be added soon...🚧\nIf you have any suggestions notify any of the admins", value="", inline=False)
embed.set_footer(text="If you require further assistance, click on the option you are confused about using the buttons below.")
return embed
def twitch_body(name, avatar_url):
embed = discord.Embed(color=0x6FFF7B)
embed.set_author(name=f'\n{name}', icon_url=avatar_url)
embed.set_image(url="https://media.giphy.com/media/v1.Y2lkPTc5MGI3NjExMjZjZTZiZWQxOGY5YWE0NzIwOThkZDM0YjQxZDJhOTUwN2ZmZjU1ZiZlcD12MV9pbnRlcm5hbF9naWZzX2dpZklkJmN0PWc/gli9wNZ5gt2gy9m2Zg/giphy.gif")
embed.add_field(name="Further assistance:", value="Below you can find a gif of where you should be looking in your settings to enable your twitch connection.", inline=False)
embed.add_field(name="", value="1. Go to your settings\n2. Click on the 'Connections' tab\n3. Click on the twitch Icon\n4. Authorize the connection through twitch")
embed.set_footer(text="If you do not find it useful you can contact a moderator or admin.")
return embed
def twitch_noti_body(name, avatar_url, channel):
embed = discord.Embed(color=0x6FFF7B)
embed.set_author(name=f'\n{name}', icon_url=avatar_url)
embed.add_field(name="Further assistance:", value="", inline=False)
embed.add_field(name="", value=f"1. Go to {channel.mention} at the top of the discord\n2. Click on the reaction for which you want to get notified for!")
embed.set_footer(text="If you do not find it useful you can contact a moderator or admin.")
return embed
def twitch_noti(title, game):
channel = bot.get_channel(1112906850090893362)
guild = bot.get_guild(1112902755426783394)
embed = discord.Embed(title=f"{title}", description=f"TATOX3 IS LIVE ON TWITCH PLAYING {game} COME JOIN!", url="https://www.twitch.tv/tatox3_", color=0x6441a5)
embed.set_image(url="https://cdn.discordapp.com/attachments/1112902756022358039/1120788781268750437/20230620_145403.jpg") #https://i.pinimg.com/originals/da/99/60/da99605920778b7b85b4fbb96cbacb78.gif
#https://media.giphy.com/media/v1.Y2lkPTc5MGI3NjExNmFiY2ZlZjUxMmJiYmU0Yzc1ZTY4NGNhNTBkODQ2MDhhODcwODczMyZlcD12MV9pbnRlcm5hbF9naWZzX2dpZklkJmN0PWc/SMoMrhoSQvPBXhxqzj/giphy.gif
return embed
def leaderboard_body(result):
count = 0
embed = discord.Embed(title="POTATO LEADERBOARD", color=0x6FFF7B)
embed.set_thumbnail(url="https://media.giphy.com/media/v1.Y2lkPTc5MGI3NjExdWZvbXVvYmdjNW92ZTNua2EwZWRobnV1N2VqMHBlemM2aWk3eWVjcCZlcD12MV9pbnRlcm5hbF9naWZfYnlfaWQmY3Q9Zw/24vTxomgaewVz5ZwPx/giphy.gif")
embed.add_field(name=f"LEADER: {result[0][0]} -> {result[0][1]} potatoes", value=f"· • —–—– ٠✤ ٠—–—– • ·")
for i in result:
if count > 0:
embed.add_field(name=f"#{count+1} {result[count][0]}: {result[count][1]}", value="", inline=False)
count += 1
embed.set_footer(text="Chat in Henry's twitch chat to earn more")
return embed
class MenuButtons(discord.ui.View):
def __init__(self):
super().__init__(timeout=None)
@discord.ui.button(label="1", style=discord.ButtonStyle.green)
async def One(self, interaction: discord.Interaction, button: discord.Button):
self.Two.disabled = True
try:
if Variables.alternate == False:
button.label = "Back"
await interaction.response.edit_message(embeds=[EmbedSections.title_card(), EmbedSections.twitch_body(interaction.user.name, interaction.user.avatar.url)], view=self)
Variables.alternate = True
else:
await interaction.response.edit_message(embeds=[EmbedSections.title_card(), EmbedSections.help_body(interaction.user.name, interaction.user.avatar.url)], view=MenuButtons())
Variables.alternate = False
except Exception as e:
print(e)
@discord.ui.button(label='2', style=discord.ButtonStyle.green)
async def Two(self, interaction: discord.Interaction, button: discord.Button):
self.One.disabled = True
try:
if Variables.alternate == False:
button.label = "Back"
await interaction.response.edit_message(embeds=[EmbedSections.title_card(), EmbedSections.twitch_noti_body(interaction.user.name, interaction.user.avatar.url, channel=bot.get_channel(1114394449657745488))], view=self)
Variables.alternate = True
else:
await interaction.response.edit_message(embeds=[EmbedSections.title_card(), EmbedSections.help_body(interaction.user.name, interaction.user.avatar.url)], view=MenuButtons())
Variables.alternate = False
except Exception as e:
print(e)
@discord.ui.button(label="Exit", style=discord.ButtonStyle.danger)
async def Exit(self, interaction: discord.Interaction, Button: discord.Button):
await interaction.response.edit_message(view=None, content="Exited.", embed=None)
class ClearModal(ui.Modal, title='Clear Command'):
amount = ui.TextInput(label='How much do you want to delete?', style=discord.TextStyle.short,required=True)
member = ui.TextInput(label="Member name (Optional)", style=discord.TextStyle.short, required=False, default=None)
reason = ui.TextInput(label="Reason (Optional)", style=discord.TextStyle.paragraph, required=False, default=None)
async def on_submit(self, interaction: discord.Interaction):
try:
if self.member.value == "":
await interaction.channel.purge(limit=int(self.amount.value), reason=self.reason.value)
await interaction.response.send_message(f"Chat was cleared by: {interaction.user.mention} ({int(self.amount.value)} messages cleared!)", delete_after=30)
else:
await interaction.channel.purge(limit=int(self.amount.value), reason=self.reason.value, check=lambda m: m.author.name == self.member.value)
await interaction.response.send_message(f"{self.member.value}'s chat was cleared by: {interaction.user.mention} ({int(self.amount.value)} messages cleared!)", delete_after=30)
except Exception:
await interaction.response.send_message("Hmmm... Something went wrong! Please make sure you put a valid integer as an amount.", ephemeral=True, delete_after=10)
@bot.event
async def on_member_join(person):
await person.add_roles(person.guild.get_role(1112902914160197724))
@bot.event
async def on_raw_reaction_add(payload):
message = await bot.get_channel(payload.channel_id).fetch_message(payload.message_id)
global member
member = payload.member
guild = bot.get_guild(1112902755426783394)
twitch_role = guild.get_role(1114393370744340621)
music_role = guild.get_role(1114396915400986734)
if message.id == 1114942064321376256:
if str(payload.emoji) == "🟣":
await payload.member.add_roles(twitch_role)
if str(payload.emoji) == "🎶":
await payload.member.add_roles(music_role)
@bot.event
async def on_raw_reaction_remove(payload):
message = await bot.get_channel(payload.channel_id).fetch_message(payload.message_id)
guild = bot.get_guild(1112902755426783394)
twitch_role = guild.get_role(1114393370744340621)
music_role = guild.get_role(1114396915400986734)
if message.id == 1114942064321376256:
if str(payload.emoji) == "🟣":
await member.remove_roles(twitch_role)
if str(payload.emoji) == "🎶":
await member.remove_roles(music_role)
@bot.tree.command(name="discord-help")
async def help(interaction: discord.Interaction):
await interaction.response.send_message(embeds=[EmbedSections.title_card(), EmbedSections.help_body(interaction.user.name, interaction.user.avatar.url)], view=MenuButtons(), ephemeral=True, delete_after=120)
@bot.tree.command(name="clear")
@app_commands.default_permissions(manage_messages=True)
async def clear(interaction: discord.Interaction):
await interaction.response.send_modal(ClearModal())
@bot.tree.command(name="whiffs")
async def Whiff(interaction: discord.Interaction):
point = cur.execute("SELECT Amount From Whiff")
result = point.fetchone()
await interaction.response.send_message(f"Henry has been caught whiffing {result[0]} times on stream!\n\nIf you catch him whiffing do !whiff on his stream", ephemeral=False)
@bot.tree.command(name="link", description="Link the discord bot to your twitch account **CASE SENSITIVE**")
async def link(interaction: discord.Interaction, twitchname: str=None):
channel = bot.get_channel(1119398653468082270)
if twitchname is None:
await interaction.response.send_message("Please specify your twitch username...")
else:
try:
cur.execute(f"SELECT TwitchName FROM Economy WHERE TwitchName = '{twitchname}'")
result = cur.fetchone()
cur.execute(f"SELECT DiscordID FROM Economy WHERE TwitchName = '{twitchname}'")
discord_result = cur.fetchone()
if int(discord_result[0]) != 0:
await interaction.response.send_message(f"This twitch name has already been registered to a user with the ID {discord_result[0]}", ephemeral=True)
if result is None:
pass
if int(discord_result[0]) == 0:
cur.execute(f"UPDATE Economy SET DiscordID = {interaction.user.id} WHERE TwitchName = '{twitchname}'")
con.commit()
await interaction.response.send_message(f"{twitchname} and {interaction.user.name} have been linked succesfully!")
await channel.send(
f"""- - - LINK INFO:
Discord Name: {interaction.user.name}
Twitch Name: {twitchname}
Discord ID: {interaction.user.id}
LINK SUCCESSFUL""")
#FUcking make it an embed pls
except Exception as e:
await interaction.response.send_message("Please enter a valid twitch username, make sure you have sent at least 1 message in twitch chat", ephemeral=True)
@bot.tree.command(name="balance", description="Gets the balance of twitch potatoes")
async def bal(interaction: discord.Interaction):
user_id = interaction.user.id
cur.execute(f'SELECT DiscordID FROM Economy WHERE DiscordID = {user_id}')
result = cur.fetchall()
try:
if result is None:
pass
else:
cur.execute(f"SELECT Potatoes FROM Economy WHERE DiscordID = {user_id}")
result = cur.fetchone()
await interaction.response.send_message(f"Your current potato balance is {result[0]}", ephemeral=True)
except:
await interaction.response.send_message("Please use /link (twitch username) to link this bot to your twitch account and access your potatoes!")
@bot.tree.command(name="leaderboard", description="Displays the top 5 potato owners in Henry's stream!")
async def leaderboard(interaction: discord.Interaction):
cur.execute(f'SELECT TwitchName, Potatoes FROM Economy ORDER BY Potatoes DESC LIMIT 5')
result = cur.fetchall()
await interaction.response.send_message(embed=EmbedSections.leaderboard_body(result=result))
@bot.tree.command(name="rank", description="Shows your ranking on the potato leaderboard")
async def rank(interaction: discord.Interaction):
count = 0
cur.execute(f'SELECT DiscordID, Potatoes FROM Economy ORDER BY Potatoes DESC')
result = cur.fetchall()
for i in result:
count += 1
if i[0] == interaction.user.id:
await interaction.response.send_message(f"{interaction.user.mention} you are rank #{count} on the potato leaderboard with {i[1]} potatoes")
bot.run("MTExMjQ2OTY4MDU5NTE1MjkxNw.Gkw5Br.rrW9d2huMMaDruE2lzL33cchKH3xq7S12ilDp8")
#-------------------------------------------------------------------# | NelsonD2004/Neonbot | main.py | main.py | py | 16,614 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "discord.ext.commands.Bot",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "discord.Intents.all",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "dis... |
39563984867 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2020/12/24 13:57
# @Author : Tian Hao
# @Email : hao.tian@intcolon.cn
# @File : tools.py
# @Software: PyCharm
# @Desc : 时间处理
import calendar
import datetime
from BaiduIndex.tools.DBHelper import DBHelper
def get_time_range_list(start_date, end_date):
"""
根据开始时间和结束时间按月份获取每月第一天和最后一天
:param start_date: 起始时间 --> str
:param end_date: 结束时间 --> str
:return: date_range_list -->list
"""
date_range_list = []
start_date = datetime.datetime.strptime(start_date, '%Y-%m-%d')
end_date = datetime.datetime.strptime(end_date, '%Y-%m-%d')
while 1:
next_month = start_date + datetime.timedelta(days=calendar.monthrange(start_date.year, start_date.month)[1])
month_end = next_month - datetime.timedelta(days=1)
if month_end < end_date:
date_range_list.append((datetime.datetime.strftime(start_date,
'%Y-%m-%d'),
datetime.datetime.strftime(month_end,
'%Y-%m-%d')))
start_date = next_month
else:
return date_range_list
class QueryData(object):
def __init__(self):
self.db_helper = DBHelper()
def get_region_id(self):
# 查询省份ID
query_sql = "SELECT id FROM province_id ORDER BY id"
return self.db_helper.query_task(query_sql)
def get_keyword(self, table_name):
keywords_list = []
sql = 'SELECT distinct(keyword) FROM {}'.format(table_name)
keywords = self.db_helper.query_task(sql)
for keyword in keywords:
keywords_list.append(str.lower(str(keyword[0])).strip())
return keywords_list[2400:]
if __name__ == '__main__':
startDate = "2020-03-01"
endDate = "2020-09-01"
print(get_time_range_list(startDate, endDate)) | CY113/PythonSpider | BaiduIndex/BaiduIndex/tools/tools.py | tools.py | py | 2,033 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "datetime.datetime.strptime",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime.strptime",
"line_number": 25,
"usage_type": "call"
},
{
"api_... |
195087124 | # encoding: utf-8
"""Test keras.layers.core.Layer.__call__"""
from __future__ import print_function
import unittest
import numpy as np
from numpy.testing import assert_allclose
from keras import backend as K
from keras.layers.core import Dense
from keras.models import Sequential
class TestCall(unittest.TestCase):
"""Test __call__ methods"""
def test_layer_call(self):
"""Test keras.layers.core.Layer.__call__"""
nb_samples, input_dim, output_dim = 3, 10, 5
layer = Dense(output_dim, input_dim=input_dim)
W = np.asarray(K.eval(layer.W)).astype(K.floatx())
X = K.placeholder(ndim=2)
Y = layer(X)
F = K.function([X], [Y])
x = np.ones((nb_samples, input_dim)).astype(K.floatx())
y = F([x])[0].astype(K.floatx())
t = np.dot(x, W).astype(K.floatx())
assert_allclose(t, y, rtol=.2)
def test_sequential_call(self):
"""Test keras.models.Sequential.__call__"""
nb_samples, input_dim, output_dim = 3, 10, 5
model = Sequential()
model.add(Dense(output_dim=output_dim, input_dim=input_dim))
model.compile('sgd', 'mse')
X = K.placeholder(ndim=2)
Y = model(X)
F = K.function([X], [Y])
x = np.ones((nb_samples, input_dim)).astype(K.floatx())
y1 = F([x])[0].astype(K.floatx())
y2 = model.predict(x)
# results of __call__ should match model.predict
assert_allclose(y1, y2)
if __name__ == '__main__':
unittest.main(verbosity=2)
| jem0101/BigSwag-SQA2022-AUBURN | TestOrchestrator4ML-main/resources/Data/supervised/GITLAB_REPOS/mynameisfiber@keras/tests/keras/layers/test_call.py | test_call.py | py | 1,532 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "unittest.TestCase",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "keras.layers.core.Dense",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "numpy.asarray",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "keras.ba... |
31697017903 | from flask_wtf import FlaskForm
from wtforms import StringField, TextAreaField, validators
class CategoryForm(FlaskForm):
# Minimum in name is only one because some languages such as Japanese can easily have
# one character long words such as ухх as in picture.
name = StringField(
"Category",
[
validators.Length(min=1, max=64),
validators.data_required()
]
)
description = TextAreaField(
"Description",
[validators.Length(max=1024)]
)
class Meta:
csrf = False
| CrescentKohana/keiji | application/categories/forms.py | forms.py | py | 566 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "flask_wtf.FlaskForm",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "wtforms.StringField",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "wtforms.validators.Length",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "wtfor... |
32166415006 | """Tool for sorting imports alphabetically, and automatically separated into sections."""
import argparse
import functools
import json
import os
import sys
from gettext import gettext as _
from io import TextIOWrapper
from pathlib import Path
from typing import Any, Dict, List, Optional, Sequence, Union
from warnings import warn
from . import __version__, api, files, sections
from .exceptions import FileSkipped, ISortError, UnsupportedEncoding
from .format import create_terminal_printer
from .logo import ASCII_ART
from .profiles import profiles
from .settings import VALID_PY_TARGETS, Config, find_all_configs
from .utils import Trie
from .wrap_modes import WrapModes
DEPRECATED_SINGLE_DASH_ARGS = {
"-ac",
"-af",
"-ca",
"-cs",
"-df",
"-ds",
"-dt",
"-fas",
"-fass",
"-ff",
"-fgw",
"-fss",
"-lai",
"-lbt",
"-le",
"-ls",
"-nis",
"-nlb",
"-ot",
"-rr",
"-sd",
"-sg",
"-sl",
"-sp",
"-tc",
"-wl",
"-ws",
}
QUICK_GUIDE = f"""
{ASCII_ART}
Nothing to do: no files or paths have have been passed in!
Try one of the following:
`isort .` - sort all Python files, starting from the current directory, recursively.
`isort . --interactive` - Do the same, but ask before making any changes.
`isort . --check --diff` - Check to see if imports are correctly sorted within this project.
`isort --help` - In-depth information about isort's available command-line options.
Visit https://pycqa.github.io/isort/ for complete information about how to use isort.
"""
class SortAttempt:
def __init__(self, incorrectly_sorted: bool, skipped: bool, supported_encoding: bool) -> None:
self.incorrectly_sorted = incorrectly_sorted
self.skipped = skipped
self.supported_encoding = supported_encoding
def sort_imports(
file_name: str,
config: Config,
check: bool = False,
ask_to_apply: bool = False,
write_to_stdout: bool = False,
**kwargs: Any,
) -> Optional[SortAttempt]:
incorrectly_sorted: bool = False
skipped: bool = False
try:
if check:
try:
incorrectly_sorted = not api.check_file(file_name, config=config, **kwargs)
except FileSkipped:
skipped = True
return SortAttempt(incorrectly_sorted, skipped, True)
try:
incorrectly_sorted = not api.sort_file(
file_name,
config=config,
ask_to_apply=ask_to_apply,
write_to_stdout=write_to_stdout,
**kwargs,
)
except FileSkipped:
skipped = True
return SortAttempt(incorrectly_sorted, skipped, True)
except (OSError, ValueError) as error:
warn(f"Unable to parse file {file_name} due to {error}")
return None
except UnsupportedEncoding:
if config.verbose:
warn(f"Encoding not supported for {file_name}")
return SortAttempt(incorrectly_sorted, skipped, False)
except ISortError as error:
_print_hard_fail(config, message=str(error))
sys.exit(1)
except Exception:
_print_hard_fail(config, offending_file=file_name)
raise
def _print_hard_fail(
config: Config, offending_file: Optional[str] = None, message: Optional[str] = None
) -> None:
"""Fail on unrecoverable exception with custom message."""
message = message or (
f"Unrecoverable exception thrown when parsing {offending_file or ''}! "
"This should NEVER happen.\n"
"If encountered, please open an issue: https://github.com/PyCQA/isort/issues/new"
)
printer = create_terminal_printer(
color=config.color_output, error=config.format_error, success=config.format_success
)
printer.error(message)
def _build_arg_parser() -> argparse.ArgumentParser:
parser = argparse.ArgumentParser(
description="Sort Python import definitions alphabetically "
"within logical sections. Run with no arguments to see a quick "
"start guide, otherwise, one or more files/directories/stdin must be provided. "
"Use `-` as the first argument to represent stdin. Use --interactive to use the pre 5.0.0 "
"interactive behavior."
" "
"If you've used isort 4 but are new to isort 5, see the upgrading guide: "
"https://pycqa.github.io/isort/docs/upgrade_guides/5.0.0.html",
add_help=False, # prevent help option from appearing in "optional arguments" group
)
general_group = parser.add_argument_group("general options")
target_group = parser.add_argument_group("target options")
output_group = parser.add_argument_group("general output options")
inline_args_group = output_group.add_mutually_exclusive_group()
section_group = parser.add_argument_group("section output options")
deprecated_group = parser.add_argument_group("deprecated options")
general_group.add_argument(
"-h",
"--help",
action="help",
default=argparse.SUPPRESS,
help=_("show this help message and exit"),
)
general_group.add_argument(
"-V",
"--version",
action="store_true",
dest="show_version",
help="Displays the currently installed version of isort.",
)
general_group.add_argument(
"--vn",
"--version-number",
action="version",
version=__version__,
help="Returns just the current version number without the logo",
)
general_group.add_argument(
"-v",
"--verbose",
action="store_true",
dest="verbose",
help="Shows verbose output, such as when files are skipped or when a check is successful.",
)
general_group.add_argument(
"--only-modified",
"--om",
dest="only_modified",
action="store_true",
help="Suppresses verbose output for non-modified files.",
)
general_group.add_argument(
"--dedup-headings",
dest="dedup_headings",
action="store_true",
help="Tells isort to only show an identical custom import heading comment once, even if"
" there are multiple sections with the comment set.",
)
general_group.add_argument(
"-q",
"--quiet",
action="store_true",
dest="quiet",
help="Shows extra quiet output, only errors are outputted.",
)
general_group.add_argument(
"-d",
"--stdout",
help="Force resulting output to stdout, instead of in-place.",
dest="write_to_stdout",
action="store_true",
)
general_group.add_argument(
"--overwrite-in-place",
help="Tells isort to overwrite in place using the same file handle. "
"Comes at a performance and memory usage penalty over its standard "
"approach but ensures all file flags and modes stay unchanged.",
dest="overwrite_in_place",
action="store_true",
)
general_group.add_argument(
"--show-config",
dest="show_config",
action="store_true",
help="See isort's determined config, as well as sources of config options.",
)
general_group.add_argument(
"--show-files",
dest="show_files",
action="store_true",
help="See the files isort will be run against with the current config options.",
)
general_group.add_argument(
"--df",
"--diff",
dest="show_diff",
action="store_true",
help="Prints a diff of all the changes isort would make to a file, instead of "
"changing it in place",
)
general_group.add_argument(
"-c",
"--check-only",
"--check",
action="store_true",
dest="check",
help="Checks the file for unsorted / unformatted imports and prints them to the "
"command line without modifying the file. Returns 0 when nothing would change and "
"returns 1 when the file would be reformatted.",
)
general_group.add_argument(
"--ws",
"--ignore-whitespace",
action="store_true",
dest="ignore_whitespace",
help="Tells isort to ignore whitespace differences when --check-only is being used.",
)
general_group.add_argument(
"--sp",
"--settings-path",
"--settings-file",
"--settings",
dest="settings_path",
help="Explicitly set the settings path or file instead of auto determining "
"based on file location.",
)
general_group.add_argument(
"--cr",
"--config-root",
dest="config_root",
help="Explicitly set the config root for resolving all configs. When used "
"with the --resolve-all-configs flag, isort will look at all sub-folders "
"in this config root to resolve config files and sort files based on the "
"closest available config(if any)",
)
general_group.add_argument(
"--resolve-all-configs",
dest="resolve_all_configs",
action="store_true",
help="Tells isort to resolve the configs for all sub-directories "
"and sort files in terms of its closest config files.",
)
general_group.add_argument(
"--profile",
dest="profile",
type=str,
help="Base profile type to use for configuration. "
f"Profiles include: {', '.join(profiles.keys())}. As well as any shared profiles.",
)
general_group.add_argument(
"--old-finders",
"--magic-placement",
dest="old_finders",
action="store_true",
help="Use the old deprecated finder logic that relies on environment introspection magic.",
)
general_group.add_argument(
"-j",
"--jobs",
help="Number of files to process in parallel. Negative value means use number of CPUs.",
dest="jobs",
type=int,
nargs="?",
const=-1,
)
general_group.add_argument(
"--ac",
"--atomic",
dest="atomic",
action="store_true",
help="Ensures the output doesn't save if the resulting file contains syntax errors.",
)
general_group.add_argument(
"--interactive",
dest="ask_to_apply",
action="store_true",
help="Tells isort to apply changes interactively.",
)
general_group.add_argument(
"--format-error",
dest="format_error",
help="Override the format used to print errors.",
)
general_group.add_argument(
"--format-success",
dest="format_success",
help="Override the format used to print success.",
)
general_group.add_argument(
"--srx",
"--sort-reexports",
dest="sort_reexports",
action="store_true",
help="Automatically sort all re-exports (module level __all__ collections)",
)
target_group.add_argument(
"files", nargs="*", help="One or more Python source files that need their imports sorted."
)
target_group.add_argument(
"--filter-files",
dest="filter_files",
action="store_true",
help="Tells isort to filter files even when they are explicitly passed in as "
"part of the CLI command.",
)
target_group.add_argument(
"-s",
"--skip",
help="Files that isort should skip over. If you want to skip multiple "
"files you should specify twice: --skip file1 --skip file2. Values can be "
"file names, directory names or file paths. To skip all files in a nested path "
"use --skip-glob.",
dest="skip",
action="append",
)
target_group.add_argument(
"--extend-skip",
help="Extends --skip to add additional files that isort should skip over. "
"If you want to skip multiple "
"files you should specify twice: --skip file1 --skip file2. Values can be "
"file names, directory names or file paths. To skip all files in a nested path "
"use --skip-glob.",
dest="extend_skip",
action="append",
)
target_group.add_argument(
"--sg",
"--skip-glob",
help="Files that isort should skip over.",
dest="skip_glob",
action="append",
)
target_group.add_argument(
"--extend-skip-glob",
help="Additional files that isort should skip over (extending --skip-glob).",
dest="extend_skip_glob",
action="append",
)
target_group.add_argument(
"--gitignore",
"--skip-gitignore",
action="store_true",
dest="skip_gitignore",
help="Treat project as a git repository and ignore files listed in .gitignore."
"\nNOTE: This requires git to be installed and accessible from the same shell as isort.",
)
target_group.add_argument(
"--ext",
"--extension",
"--supported-extension",
dest="supported_extensions",
action="append",
help="Specifies what extensions isort can be run against.",
)
target_group.add_argument(
"--blocked-extension",
dest="blocked_extensions",
action="append",
help="Specifies what extensions isort can never be run against.",
)
target_group.add_argument(
"--dont-follow-links",
dest="dont_follow_links",
action="store_true",
help="Tells isort not to follow symlinks that are encountered when running recursively.",
)
target_group.add_argument(
"--filename",
dest="filename",
help="Provide the filename associated with a stream.",
)
target_group.add_argument(
"--allow-root",
action="store_true",
default=False,
help="Tells isort not to treat / specially, allowing it to be run against the root dir.",
)
output_group.add_argument(
"-a",
"--add-import",
dest="add_imports",
action="append",
help="Adds the specified import line to all files, "
"automatically determining correct placement.",
)
output_group.add_argument(
"--append",
"--append-only",
dest="append_only",
action="store_true",
help="Only adds the imports specified in --add-import if the file"
" contains existing imports.",
)
output_group.add_argument(
"--af",
"--force-adds",
dest="force_adds",
action="store_true",
help="Forces import adds even if the original file is empty.",
)
output_group.add_argument(
"--rm",
"--remove-import",
dest="remove_imports",
action="append",
help="Removes the specified import from all files.",
)
output_group.add_argument(
"--float-to-top",
dest="float_to_top",
action="store_true",
help="Causes all non-indented imports to float to the top of the file having its imports "
"sorted (immediately below the top of file comment).\n"
"This can be an excellent shortcut for collecting imports every once in a while "
"when you place them in the middle of a file to avoid context switching.\n\n"
"*NOTE*: It currently doesn't work with cimports and introduces some extra over-head "
"and a performance penalty.",
)
output_group.add_argument(
"--dont-float-to-top",
dest="dont_float_to_top",
action="store_true",
help="Forces --float-to-top setting off. See --float-to-top for more information.",
)
output_group.add_argument(
"--ca",
"--combine-as",
dest="combine_as_imports",
action="store_true",
help="Combines as imports on the same line.",
)
output_group.add_argument(
"--cs",
"--combine-star",
dest="combine_star",
action="store_true",
help="Ensures that if a star import is present, "
"nothing else is imported from that namespace.",
)
output_group.add_argument(
"-e",
"--balanced",
dest="balanced_wrapping",
action="store_true",
help="Balances wrapping to produce the most consistent line length possible",
)
output_group.add_argument(
"--ff",
"--from-first",
dest="from_first",
action="store_true",
help="Switches the typical ordering preference, "
"showing from imports first then straight ones.",
)
output_group.add_argument(
"--fgw",
"--force-grid-wrap",
nargs="?",
const=2,
type=int,
dest="force_grid_wrap",
help="Force number of from imports (defaults to 2 when passed as CLI flag without value) "
"to be grid wrapped regardless of line "
"length. If 0 is passed in (the global default) only line length is considered.",
)
output_group.add_argument(
"-i",
"--indent",
help='String to place for indents defaults to " " (4 spaces).',
dest="indent",
type=str,
)
output_group.add_argument(
"--lbi", "--lines-before-imports", dest="lines_before_imports", type=int
)
output_group.add_argument(
"--lai", "--lines-after-imports", dest="lines_after_imports", type=int
)
output_group.add_argument(
"--lbt", "--lines-between-types", dest="lines_between_types", type=int
)
output_group.add_argument(
"--le",
"--line-ending",
dest="line_ending",
help="Forces line endings to the specified value. "
"If not set, values will be guessed per-file.",
)
output_group.add_argument(
"--ls",
"--length-sort",
help="Sort imports by their string length.",
dest="length_sort",
action="store_true",
)
output_group.add_argument(
"--lss",
"--length-sort-straight",
help="Sort straight imports by their string length. Similar to `length_sort` "
"but applies only to straight imports and doesn't affect from imports.",
dest="length_sort_straight",
action="store_true",
)
output_group.add_argument(
"-m",
"--multi-line",
dest="multi_line_output",
choices=list(WrapModes.__members__.keys())
+ [str(mode.value) for mode in WrapModes.__members__.values()],
type=str,
help="Multi line output (0-grid, 1-vertical, 2-hanging, 3-vert-hanging, 4-vert-grid, "
"5-vert-grid-grouped, 6-deprecated-alias-for-5, 7-noqa, "
"8-vertical-hanging-indent-bracket, 9-vertical-prefix-from-module-import, "
"10-hanging-indent-with-parentheses).",
)
output_group.add_argument(
"-n",
"--ensure-newline-before-comments",
dest="ensure_newline_before_comments",
action="store_true",
help="Inserts a blank line before a comment following an import.",
)
inline_args_group.add_argument(
"--nis",
"--no-inline-sort",
dest="no_inline_sort",
action="store_true",
help="Leaves `from` imports with multiple imports 'as-is' "
"(e.g. `from foo import a, c ,b`).",
)
output_group.add_argument(
"--ot",
"--order-by-type",
dest="order_by_type",
action="store_true",
help="Order imports by type, which is determined by case, in addition to alphabetically.\n"
"\n**NOTE**: type here refers to the implied type from the import name capitalization.\n"
' isort does not do type introspection for the imports. These "types" are simply: '
"CONSTANT_VARIABLE, CamelCaseClass, variable_or_function. If your project follows PEP8"
" or a related coding standard and has many imports this is a good default, otherwise you "
"likely will want to turn it off. From the CLI the `--dont-order-by-type` option will turn "
"this off.",
)
output_group.add_argument(
"--dt",
"--dont-order-by-type",
dest="dont_order_by_type",
action="store_true",
help="Don't order imports by type, which is determined by case, in addition to "
"alphabetically.\n\n"
"**NOTE**: type here refers to the implied type from the import name capitalization.\n"
' isort does not do type introspection for the imports. These "types" are simply: '
"CONSTANT_VARIABLE, CamelCaseClass, variable_or_function. If your project follows PEP8"
" or a related coding standard and has many imports this is a good default. You can turn "
"this on from the CLI using `--order-by-type`.",
)
output_group.add_argument(
"--rr",
"--reverse-relative",
dest="reverse_relative",
action="store_true",
help="Reverse order of relative imports.",
)
output_group.add_argument(
"--reverse-sort",
dest="reverse_sort",
action="store_true",
help="Reverses the ordering of imports.",
)
output_group.add_argument(
"--sort-order",
dest="sort_order",
help="Specify sorting function. Can be built in (natural[default] = force numbers "
"to be sequential, native = Python's built-in sorted function) or an installable plugin.",
)
inline_args_group.add_argument(
"--sl",
"--force-single-line-imports",
dest="force_single_line",
action="store_true",
help="Forces all from imports to appear on their own line",
)
output_group.add_argument(
"--nsl",
"--single-line-exclusions",
help="One or more modules to exclude from the single line rule.",
dest="single_line_exclusions",
action="append",
)
output_group.add_argument(
"--tc",
"--trailing-comma",
dest="include_trailing_comma",
action="store_true",
help="Includes a trailing comma on multi line imports that include parentheses.",
)
output_group.add_argument(
"--up",
"--use-parentheses",
dest="use_parentheses",
action="store_true",
help="Use parentheses for line continuation on length limit instead of slashes."
" **NOTE**: This is separate from wrap modes, and only affects how individual lines that "
" are too long get continued, not sections of multiple imports.",
)
output_group.add_argument(
"-l",
"-w",
"--line-length",
"--line-width",
help="The max length of an import line (used for wrapping long imports).",
dest="line_length",
type=int,
)
output_group.add_argument(
"--wl",
"--wrap-length",
dest="wrap_length",
type=int,
help="Specifies how long lines that are wrapped should be, if not set line_length is used."
"\nNOTE: wrap_length must be LOWER than or equal to line_length.",
)
output_group.add_argument(
"--case-sensitive",
dest="case_sensitive",
action="store_true",
help="Tells isort to include casing when sorting module names",
)
output_group.add_argument(
"--remove-redundant-aliases",
dest="remove_redundant_aliases",
action="store_true",
help=(
"Tells isort to remove redundant aliases from imports, such as `import os as os`."
" This defaults to `False` simply because some projects use these seemingly useless "
" aliases to signify intent and change behaviour."
),
)
output_group.add_argument(
"--honor-noqa",
dest="honor_noqa",
action="store_true",
help="Tells isort to honor noqa comments to enforce skipping those comments.",
)
output_group.add_argument(
"--treat-comment-as-code",
dest="treat_comments_as_code",
action="append",
help="Tells isort to treat the specified single line comment(s) as if they are code.",
)
output_group.add_argument(
"--treat-all-comment-as-code",
dest="treat_all_comments_as_code",
action="store_true",
help="Tells isort to treat all single line comments as if they are code.",
)
output_group.add_argument(
"--formatter",
dest="formatter",
type=str,
help="Specifies the name of a formatting plugin to use when producing output.",
)
output_group.add_argument(
"--color",
dest="color_output",
action="store_true",
help="Tells isort to use color in terminal output.",
)
output_group.add_argument(
"--ext-format",
dest="ext_format",
help="Tells isort to format the given files according to an extensions formatting rules.",
)
output_group.add_argument(
"--star-first",
help="Forces star imports above others to avoid overriding directly imported variables.",
dest="star_first",
action="store_true",
)
output_group.add_argument(
"--split-on-trailing-comma",
help="Split imports list followed by a trailing comma into VERTICAL_HANGING_INDENT mode",
dest="split_on_trailing_comma",
action="store_true",
)
section_group.add_argument(
"--sd",
"--section-default",
dest="default_section",
help="Sets the default section for import options: " + str(sections.DEFAULT),
)
section_group.add_argument(
"--only-sections",
"--os",
dest="only_sections",
action="store_true",
help="Causes imports to be sorted based on their sections like STDLIB, THIRDPARTY, etc. "
"Within sections, the imports are ordered by their import style and the imports with "
"the same style maintain their relative positions.",
)
section_group.add_argument(
"--ds",
"--no-sections",
help="Put all imports into the same section bucket",
dest="no_sections",
action="store_true",
)
section_group.add_argument(
"--fas",
"--force-alphabetical-sort",
action="store_true",
dest="force_alphabetical_sort",
help="Force all imports to be sorted as a single section",
)
section_group.add_argument(
"--fss",
"--force-sort-within-sections",
action="store_true",
dest="force_sort_within_sections",
help="Don't sort straight-style imports (like import sys) before from-style imports "
"(like from itertools import groupby). Instead, sort the imports by module, "
"independent of import style.",
)
section_group.add_argument(
"--hcss",
"--honor-case-in-force-sorted-sections",
action="store_true",
dest="honor_case_in_force_sorted_sections",
help="Honor `--case-sensitive` when `--force-sort-within-sections` is being used. "
"Without this option set, `--order-by-type` decides module name ordering too.",
)
section_group.add_argument(
"--srss",
"--sort-relative-in-force-sorted-sections",
action="store_true",
dest="sort_relative_in_force_sorted_sections",
help="When using `--force-sort-within-sections`, sort relative imports the same "
"way as they are sorted when not using that setting.",
)
section_group.add_argument(
"--fass",
"--force-alphabetical-sort-within-sections",
action="store_true",
dest="force_alphabetical_sort_within_sections",
help="Force all imports to be sorted alphabetically within a section",
)
section_group.add_argument(
"-t",
"--top",
help="Force specific imports to the top of their appropriate section.",
dest="force_to_top",
action="append",
)
section_group.add_argument(
"--combine-straight-imports",
"--csi",
dest="combine_straight_imports",
action="store_true",
help="Combines all the bare straight imports of the same section in a single line. "
"Won't work with sections which have 'as' imports",
)
section_group.add_argument(
"--nlb",
"--no-lines-before",
help="Sections which should not be split with previous by empty lines",
dest="no_lines_before",
action="append",
)
section_group.add_argument(
"--src",
"--src-path",
dest="src_paths",
action="append",
help="Add an explicitly defined source path "
"(modules within src paths have their imports automatically categorized as first_party)."
" Glob expansion (`*` and `**`) is supported for this option.",
)
section_group.add_argument(
"-b",
"--builtin",
dest="known_standard_library",
action="append",
help="Force isort to recognize a module as part of Python's standard library.",
)
section_group.add_argument(
"--extra-builtin",
dest="extra_standard_library",
action="append",
help="Extra modules to be included in the list of ones in Python's standard library.",
)
section_group.add_argument(
"-f",
"--future",
dest="known_future_library",
action="append",
help="Force isort to recognize a module as part of Python's internal future compatibility "
"libraries. WARNING: this overrides the behavior of __future__ handling and therefore"
" can result in code that can't execute. If you're looking to add dependencies such "
"as six, a better option is to create another section below --future using custom "
"sections. See: https://github.com/PyCQA/isort#custom-sections-and-ordering and the "
"discussion here: https://github.com/PyCQA/isort/issues/1463.",
)
section_group.add_argument(
"-o",
"--thirdparty",
dest="known_third_party",
action="append",
help="Force isort to recognize a module as being part of a third party library.",
)
section_group.add_argument(
"-p",
"--project",
dest="known_first_party",
action="append",
help="Force isort to recognize a module as being part of the current python project.",
)
section_group.add_argument(
"--known-local-folder",
dest="known_local_folder",
action="append",
help="Force isort to recognize a module as being a local folder. "
"Generally, this is reserved for relative imports (from . import module).",
)
section_group.add_argument(
"--virtual-env",
dest="virtual_env",
help="Virtual environment to use for determining whether a package is third-party",
)
section_group.add_argument(
"--conda-env",
dest="conda_env",
help="Conda environment to use for determining whether a package is third-party",
)
section_group.add_argument(
"--py",
"--python-version",
action="store",
dest="py_version",
choices=tuple(VALID_PY_TARGETS) + ("auto",),
help="Tells isort to set the known standard library based on the specified Python "
"version. Default is to assume any Python 3 version could be the target, and use a union "
"of all stdlib modules across versions. If auto is specified, the version of the "
"interpreter used to run isort "
f"(currently: {sys.version_info.major}{sys.version_info.minor}) will be used.",
)
# deprecated options
deprecated_group.add_argument(
"--recursive",
dest="deprecated_flags",
action="append_const",
const="--recursive",
help=argparse.SUPPRESS,
)
deprecated_group.add_argument(
"-rc", dest="deprecated_flags", action="append_const", const="-rc", help=argparse.SUPPRESS
)
deprecated_group.add_argument(
"--dont-skip",
dest="deprecated_flags",
action="append_const",
const="--dont-skip",
help=argparse.SUPPRESS,
)
deprecated_group.add_argument(
"-ns", dest="deprecated_flags", action="append_const", const="-ns", help=argparse.SUPPRESS
)
deprecated_group.add_argument(
"--apply",
dest="deprecated_flags",
action="append_const",
const="--apply",
help=argparse.SUPPRESS,
)
deprecated_group.add_argument(
"-k",
"--keep-direct-and-as",
dest="deprecated_flags",
action="append_const",
const="--keep-direct-and-as",
help=argparse.SUPPRESS,
)
return parser
def parse_args(argv: Optional[Sequence[str]] = None) -> Dict[str, Any]:
argv = sys.argv[1:] if argv is None else list(argv)
remapped_deprecated_args = []
for index, arg in enumerate(argv):
if arg in DEPRECATED_SINGLE_DASH_ARGS:
remapped_deprecated_args.append(arg)
argv[index] = f"-{arg}"
parser = _build_arg_parser()
arguments = {key: value for key, value in vars(parser.parse_args(argv)).items() if value}
if remapped_deprecated_args:
arguments["remapped_deprecated_args"] = remapped_deprecated_args
if "dont_order_by_type" in arguments:
arguments["order_by_type"] = False
del arguments["dont_order_by_type"]
if "dont_follow_links" in arguments:
arguments["follow_links"] = False
del arguments["dont_follow_links"]
if "dont_float_to_top" in arguments:
del arguments["dont_float_to_top"]
if arguments.get("float_to_top", False):
sys.exit("Can't set both --float-to-top and --dont-float-to-top.")
else:
arguments["float_to_top"] = False
multi_line_output = arguments.get("multi_line_output", None)
if multi_line_output:
if multi_line_output.isdigit():
arguments["multi_line_output"] = WrapModes(int(multi_line_output))
else:
arguments["multi_line_output"] = WrapModes[multi_line_output]
return arguments
def _preconvert(item: Any) -> Union[str, List[Any]]:
"""Preconverts objects from native types into JSONifyiable types"""
if isinstance(item, (set, frozenset)):
return list(item)
if isinstance(item, WrapModes):
return str(item.name)
if isinstance(item, Path):
return str(item)
if callable(item) and hasattr(item, "__name__"):
return str(item.__name__)
raise TypeError(f"Unserializable object {item} of type {type(item)}")
def identify_imports_main(
argv: Optional[Sequence[str]] = None, stdin: Optional[TextIOWrapper] = None
) -> None:
parser = argparse.ArgumentParser(
description="Get all import definitions from a given file."
"Use `-` as the first argument to represent stdin."
)
parser.add_argument(
"files", nargs="+", help="One or more Python source files that need their imports sorted."
)
parser.add_argument(
"--top-only",
action="store_true",
default=False,
help="Only identify imports that occur in before functions or classes.",
)
target_group = parser.add_argument_group("target options")
target_group.add_argument(
"--follow-links",
action="store_true",
default=False,
help="Tells isort to follow symlinks that are encountered when running recursively.",
)
uniqueness = parser.add_mutually_exclusive_group()
uniqueness.add_argument(
"--unique",
action="store_true",
default=False,
help="If true, isort will only identify unique imports.",
)
uniqueness.add_argument(
"--packages",
dest="unique",
action="store_const",
const=api.ImportKey.PACKAGE,
default=False,
help="If true, isort will only identify the unique top level modules imported.",
)
uniqueness.add_argument(
"--modules",
dest="unique",
action="store_const",
const=api.ImportKey.MODULE,
default=False,
help="If true, isort will only identify the unique modules imported.",
)
uniqueness.add_argument(
"--attributes",
dest="unique",
action="store_const",
const=api.ImportKey.ATTRIBUTE,
default=False,
help="If true, isort will only identify the unique attributes imported.",
)
arguments = parser.parse_args(argv)
file_names = arguments.files
if file_names == ["-"]:
identified_imports = api.find_imports_in_stream(
sys.stdin if stdin is None else stdin,
unique=arguments.unique,
top_only=arguments.top_only,
follow_links=arguments.follow_links,
)
else:
identified_imports = api.find_imports_in_paths(
file_names,
unique=arguments.unique,
top_only=arguments.top_only,
follow_links=arguments.follow_links,
)
for identified_import in identified_imports:
if arguments.unique == api.ImportKey.PACKAGE:
print(identified_import.module.split(".")[0])
elif arguments.unique == api.ImportKey.MODULE:
print(identified_import.module)
elif arguments.unique == api.ImportKey.ATTRIBUTE:
print(f"{identified_import.module}.{identified_import.attribute}")
else:
print(str(identified_import))
def main(argv: Optional[Sequence[str]] = None, stdin: Optional[TextIOWrapper] = None) -> None:
arguments = parse_args(argv)
if arguments.get("show_version"):
print(ASCII_ART)
return
show_config: bool = arguments.pop("show_config", False)
show_files: bool = arguments.pop("show_files", False)
if show_config and show_files:
sys.exit("Error: either specify show-config or show-files not both.")
if "settings_path" in arguments:
if os.path.isfile(arguments["settings_path"]):
arguments["settings_file"] = os.path.abspath(arguments["settings_path"])
arguments["settings_path"] = os.path.dirname(arguments["settings_file"])
else:
arguments["settings_path"] = os.path.abspath(arguments["settings_path"])
if "virtual_env" in arguments:
venv = arguments["virtual_env"]
arguments["virtual_env"] = os.path.abspath(venv)
if not os.path.isdir(arguments["virtual_env"]):
warn(f"virtual_env dir does not exist: {arguments['virtual_env']}")
file_names = arguments.pop("files", [])
if not file_names and not show_config:
print(QUICK_GUIDE)
if arguments:
sys.exit("Error: arguments passed in without any paths or content.")
return
if "settings_path" not in arguments:
arguments["settings_path"] = (
arguments.get("filename", None) or os.getcwd()
if file_names == ["-"]
else os.path.abspath(file_names[0] if file_names else ".")
)
if not os.path.isdir(arguments["settings_path"]):
arguments["settings_path"] = os.path.dirname(arguments["settings_path"])
config_dict = arguments.copy()
ask_to_apply = config_dict.pop("ask_to_apply", False)
jobs = config_dict.pop("jobs", None)
check = config_dict.pop("check", False)
show_diff = config_dict.pop("show_diff", False)
write_to_stdout = config_dict.pop("write_to_stdout", False)
deprecated_flags = config_dict.pop("deprecated_flags", False)
remapped_deprecated_args = config_dict.pop("remapped_deprecated_args", False)
stream_filename = config_dict.pop("filename", None)
ext_format = config_dict.pop("ext_format", None)
allow_root = config_dict.pop("allow_root", None)
resolve_all_configs = config_dict.pop("resolve_all_configs", False)
wrong_sorted_files = False
all_attempt_broken = False
no_valid_encodings = False
config_trie: Optional[Trie] = None
if resolve_all_configs:
config_trie = find_all_configs(config_dict.pop("config_root", "."))
if "src_paths" in config_dict:
config_dict["src_paths"] = {
Path(src_path).resolve() for src_path in config_dict.get("src_paths", ())
}
config = Config(**config_dict)
if show_config:
print(json.dumps(config.__dict__, indent=4, separators=(",", ": "), default=_preconvert))
return
if file_names == ["-"]:
file_path = Path(stream_filename) if stream_filename else None
if show_files:
sys.exit("Error: can't show files for streaming input.")
input_stream = sys.stdin if stdin is None else stdin
if check:
incorrectly_sorted = not api.check_stream(
input_stream=input_stream,
config=config,
show_diff=show_diff,
file_path=file_path,
extension=ext_format,
)
wrong_sorted_files = incorrectly_sorted
else:
try:
api.sort_stream(
input_stream=input_stream,
output_stream=sys.stdout,
config=config,
show_diff=show_diff,
file_path=file_path,
extension=ext_format,
raise_on_skip=False,
)
except FileSkipped:
sys.stdout.write(input_stream.read())
elif "/" in file_names and not allow_root:
printer = create_terminal_printer(
color=config.color_output, error=config.format_error, success=config.format_success
)
printer.error("it is dangerous to operate recursively on '/'")
printer.error("use --allow-root to override this failsafe")
sys.exit(1)
else:
if stream_filename:
printer = create_terminal_printer(
color=config.color_output, error=config.format_error, success=config.format_success
)
printer.error("Filename override is intended only for stream (-) sorting.")
sys.exit(1)
skipped: List[str] = []
broken: List[str] = []
if config.filter_files:
filtered_files = []
for file_name in file_names:
if config.is_skipped(Path(file_name)):
skipped.append(file_name)
else:
filtered_files.append(file_name)
file_names = filtered_files
file_names = files.find(file_names, config, skipped, broken)
if show_files:
for file_name in file_names:
print(file_name)
return
num_skipped = 0
num_broken = 0
num_invalid_encoding = 0
if config.verbose:
print(ASCII_ART)
if jobs:
import multiprocessing
executor = multiprocessing.Pool(jobs if jobs > 0 else multiprocessing.cpu_count())
attempt_iterator = executor.imap(
functools.partial(
sort_imports,
config=config,
check=check,
ask_to_apply=ask_to_apply,
write_to_stdout=write_to_stdout,
extension=ext_format,
config_trie=config_trie,
),
file_names,
)
else:
# https://github.com/python/typeshed/pull/2814
attempt_iterator = (
sort_imports( # type: ignore
file_name,
config=config,
check=check,
ask_to_apply=ask_to_apply,
show_diff=show_diff,
write_to_stdout=write_to_stdout,
extension=ext_format,
config_trie=config_trie,
)
for file_name in file_names
)
# If any files passed in are missing considered as error, should be removed
is_no_attempt = True
any_encoding_valid = False
for sort_attempt in attempt_iterator:
if not sort_attempt:
continue # pragma: no cover - shouldn't happen, satisfies type constraint
incorrectly_sorted = sort_attempt.incorrectly_sorted
if arguments.get("check", False) and incorrectly_sorted:
wrong_sorted_files = True
if sort_attempt.skipped:
num_skipped += (
1 # pragma: no cover - shouldn't happen, due to skip in iter_source_code
)
if not sort_attempt.supported_encoding:
num_invalid_encoding += 1
else:
any_encoding_valid = True
is_no_attempt = False
num_skipped += len(skipped)
if num_skipped and not config.quiet:
if config.verbose:
for was_skipped in skipped:
print(
f"{was_skipped} was skipped as it's listed in 'skip' setting, "
"matches a glob in 'skip_glob' setting, or is in a .gitignore file with "
"--skip-gitignore enabled."
)
print(f"Skipped {num_skipped} files")
num_broken += len(broken)
if num_broken and not config.quiet:
if config.verbose:
for was_broken in broken:
warn(f"{was_broken} was broken path, make sure it exists correctly")
print(f"Broken {num_broken} paths")
if num_broken > 0 and is_no_attempt:
all_attempt_broken = True
if num_invalid_encoding > 0 and not any_encoding_valid:
no_valid_encodings = True
if not config.quiet and (remapped_deprecated_args or deprecated_flags):
if remapped_deprecated_args:
warn(
"W0502: The following deprecated single dash CLI flags were used and translated: "
f"{', '.join(remapped_deprecated_args)}!"
)
if deprecated_flags:
warn(
"W0501: The following deprecated CLI flags were used and ignored: "
f"{', '.join(deprecated_flags)}!"
)
warn(
"W0500: Please see the 5.0.0 Upgrade guide: "
"https://pycqa.github.io/isort/docs/upgrade_guides/5.0.0.html"
)
if wrong_sorted_files:
sys.exit(1)
if all_attempt_broken:
sys.exit(1)
if no_valid_encodings:
printer = create_terminal_printer(
color=config.color_output, error=config.format_error, success=config.format_success
)
printer.error("No valid encodings.")
sys.exit(1)
if __name__ == "__main__":
main()
| PyCQA/isort | isort/main.py | main.py | py | 46,907 | python | en | code | 6,145 | github-code | 1 | [
{
"api_name": "logo.ASCII_ART",
"line_number": 52,
"usage_type": "name"
},
{
"api_name": "settings.Config",
"line_number": 76,
"usage_type": "name"
},
{
"api_name": "typing.Any",
"line_number": 80,
"usage_type": "name"
},
{
"api_name": "exceptions.FileSkipped",
... |
535490489 | import requests
from bs4 import BeautifulSoup
import smtplib
import email.message
Req = "https://store.playstation.com/pt-br/product/UP0082-PPSA10664_00-FF16SIEA00000002"
#Substituir o "browserA' pelo seu Browser agent > https://www.whatismybrowser.com/detect/what-is-my-user-agent/
headers = {'User-Agent': "browserA"}
store = requests.get(Req, headers=headers)
soup = BeautifulSoup(store.content, 'html.parser')
ff = soup.find('h1', class_ = 'psw-m-b-5').get_text()
valor = soup.find('span', class_= 'psw-l-line-left psw-l-line-wrap').get_text()
nvalor = valor[2:6]
nvalor = nvalor.replace(',','')
nvalor = float(nvalor)
def gomail():
email_content = """
Final Fantasy XVI acaba de entrar no valor desejado, aproveite! >>>
https://store.playstation.com/pt-br/product/UP0082-PPSA10664_00-FF16SIEA00000002
"""
cont = email.message.Message()
cont['Subject'] = 'FFXVI BARATINHO!'
# Substitua o 'email' pelo seu email
cont['From'] = 'email'
# Substitua o 'email' pelo seu email
cont['To'] = 'email'
# Substitua 'pw' pela senha do seu email
senha = 'pw'
cont.add_header('Content-Type', 'text/html')
cont.set_payload(email_content)
# Substitua o "outlook.com" pelo dominio do seu email
envio = smtplib.SMTP('smtp.outlook.com: 587')
envio.ehlo()
envio.starttls()
envio.login(cont['From'], senha)
envio.sendmail(cont['From'], [cont['To']], cont.as_string())
envio.quit()
#200 reais é o valor máximo que eu pagaria por esse novo final fantasy, mas você pode substituir o "200" pelo valor que desejar :)
if (nvalor < 200):
gomail()
| Marcos-SL/FFXVI-price-monitor | FFXVImonitor.py | FFXVImonitor.py | py | 1,685 | python | pt | code | 0 | github-code | 1 | [
{
"api_name": "requests.get",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "email.message.message.Message",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "email.me... |
16149837506 | from pygame import mixer # for audio playing
from tkinter import * # for gui
import os
from mutagen.mp3 import MP3 # extracting metadata from file
import tkinter.messagebox # for messages showing error for example
from tkinter import filedialog
from tkinter import ttk
from ttkthemes import themed_tk as tk
import time
import threading
# create a window for the mp3 player
root = tk.ThemedTk()
root.get_themes()
root.set_theme("keramik")
status_bar = ttk.Label(root, text="Welcome to Acoustic!", relief=SUNKEN, anchor=W, font='Arial 10 bold')
status_bar.pack(side=BOTTOM, fill=X)
# Create the menu bar
menu_bar = Menu(root)
subMenu = Menu(menu_bar, tearoff=0)
root.config(menu=menu_bar)
root.title("Acoustic") # change the title displayed on top of the window
root.iconbitmap(r'images/acoustic_logo.ico') # change the logo
mixer.init() # initiate the mixer module
# Root Window contains the status bar, left frame and right frame
# Left frame contains the list box
# Right frame contains the top, middle and bottom frames
left_frame = ttk.Frame(root)
left_frame.pack(side=LEFT, padx=30)
right_frame = ttk.Frame(root)
right_frame.pack()
top_frame = ttk.Frame(right_frame)
top_frame.pack()
length_label = ttk.Label(top_frame, text='Total Length: --:--', font='Arial 10 bold')
length_label.pack(pady=10)
current_duration_label = ttk.Label(top_frame, text='Current Duration: --:--', font='Arial 10 bold')
current_duration_label.pack()
# Playlist contains the full path + filename
# Playlist box contains just the filename
# Full path + filename is required to play music inside play_music function
playlist = []
# function to choose file from directory and adding functionality to the 'open' cascade
def browse_file():
global song_path
song_path = filedialog.askopenfilename()
add_to_playlist(song_path)
def add_to_playlist(song_name):
song_name = os.path.basename(song_name)
index = 0
playlist_box.insert(index, song_name)
playlist.insert(index, song_path)
index += 1
def about_us():
tkinter.messagebox.showinfo('About Acoustic', 'This is a music player build using tkinter on Python')
def show_details(play_song):
file_data = os.path.splitext(play_song)
if file_data[1] == '.mp3':
audio = MP3(play_song)
total_length = audio.info.length
else:
a = mixer.Sound(play_song)
total_length = a.get_length()
# div - total_length/60, mod - total_length%60
mins, secs = divmod(total_length, 60)
mins = round(mins)
secs = round(secs)
# proper format to display the length
time_format = '{:02d}:{:02d}'.format(mins, secs)
length_label['text'] = "Total Length: " + ' - ' + time_format
# Threading is necessary because the while loop in start_count will stop the other parts of the program until it is finished
t1 = threading.Thread(target=start_count, args=(total_length,))
t1.start()
def start_count(t):
global paused
# mixer.music.get_busy() stops the duration count when we press the stop button
# continue - ignores all of the statements below it and we check if the music is paused or not
current_time = 0
while current_time <= t and mixer.music.get_busy():
if paused:
continue
else:
mins, secs = divmod(current_time, 60)
mins = round(mins)
secs = round(secs)
time_format = '{:02d}:{:02d}'.format(mins, secs)
current_duration_label['text'] = "Current Duration: " + ' - ' + time_format
time.sleep(1)
current_time += 1
def play_music(event):
global paused
if paused:
mixer.music.unpause()
status_bar['text'] = "Music resumed."
paused = FALSE
else:
try:
# when switching the song in the playlist we need to stop the song that is already playing because
# if it is not stopped another thread will be started and the duration label will not display
# the time correctly
stop_music()
time.sleep(1)
selected_song = playlist_box.curselection()
selected_song = int(selected_song[0])
play_song = playlist[selected_song]
mixer.music.load(play_song)
mixer.music.play()
status_bar['text'] = "Playing music " + ' ' + os.path.basename(play_song)
show_details(play_song)
except:
tkinter.messagebox.showerror("File not found", "Acoustic could not find the file, please check again.")
def stop_music():
mixer.music.stop()
status_bar['text'] = "Music stopped."
paused = FALSE
# function that pauses the music
def pause_music(event):
global paused
paused = TRUE
mixer.music.pause()
status_bar['text'] = "Music paused."
def rewind_music(event):
play_music(event)
status_bar['text'] = "Music rewound."
muted = FALSE
# function for the mute and unmute functionality
def mute_music(event):
global muted
if muted: # Unmute the music
mixer.music.set_volume(0.5)
volume_btn.configure(image=volume_photo)
scale_vol.set(50)
muted = FALSE
else: # Mute the music
mixer.music.set_volume(0)
volume_btn.configure(image=mute_photo)
scale_vol.set(0)
muted = TRUE
# set_volume function of mixer takes value only from 0 to 1 exp: 0.1, 0.25
def set_vol(val):
volume = float(val) / 100
mixer.music.set_volume(volume)
def del_song():
selected_song = playlist_box.curselection()
selected_song = int(selected_song[0])
playlist_box.delete(selected_song)
playlist.pop(selected_song)
# center frame for play,stop,pause buttons and status bar
center_frame = ttk.Frame(right_frame)
center_frame.pack(pady=30, padx=30)
# Create the sub-menu
menu_bar.add_cascade(label="File", menu=subMenu)
subMenu.add_command(label="Open", command=browse_file)
subMenu.add_command(label="Exit", command=root.destroy)
subMenu = Menu(menu_bar, tearoff=0)
menu_bar.add_cascade(label="Help", menu=subMenu)
subMenu.add_command(label="About us", command=about_us)
playlist_box = Listbox(left_frame)
playlist_box.pack()
add_photo = PhotoImage(file='images/add_button.png')
add_btn = ttk.Button(left_frame, image=add_photo, command=browse_file)
add_btn.pack(side=LEFT)
del_photo = PhotoImage(file='images/remove_button.png')
del_btn = ttk.Button(left_frame, image=del_photo, command=del_song)
del_btn.pack(side=RIGHT)
play_photo = PhotoImage(file='images/play_button.png') # specify what is the image to be inserted
play_btn = ttk.Button(center_frame, image=play_photo) # convert the image into a button
play_btn.bind("<Button-1>", play_music)
play_btn.grid(row=0, column=0, padx=10)
stop_photo = PhotoImage(file='images/stop_button.png') # specify what is the image to be inserted
stop_btn = ttk.Button(center_frame, image=stop_photo, command=stop_music) # convert the image into a button
stop_btn.grid(row=0, column=1, padx=10)
pause_photo = PhotoImage(file='images/pause_button.png')
pause_btn = ttk.Button(center_frame, image=pause_photo)
pause_btn.bind("<Button-1>", pause_music)
pause_btn.grid(row=0, column=2, padx=10)
# bottom frame for mute,rewind,scale of volume
bottom_frame = ttk.Frame(right_frame)
bottom_frame.pack()
rewind_photo = PhotoImage(file='images/rewind_button.png')
rewind_btn = ttk.Button(bottom_frame, image=rewind_photo)
rewind_btn.bind("<Button-1>", rewind_music)
rewind_btn.grid(row=0, column=0)
mute_photo = PhotoImage(file='images/mute_btn.png')
volume_photo = PhotoImage(file='images/sound_btn.png')
volume_btn = ttk.Button(bottom_frame, image=volume_photo)
volume_btn.bind("<Button-1>", mute_music)
volume_btn.grid(row=0, column=1, padx=10, pady=10)
# adding a scale for the sound level
scale_vol = ttk.Scale(bottom_frame, from_=0, to=100, orient=HORIZONTAL, command=set_vol)
scale_vol.set(50) # implement the default value of scale when music player starts
mixer.music.set_volume(0.5)
scale_vol.grid(row=0, column=2, pady=15, padx=30)
def on_close():
stop_music()
root.destroy()
# makes sure that when you exit the program while
# it is playing music it doesnt display an error relating the thread function
root.protocol("WM_DELETE_WINDOW", on_close)
root.mainloop()
| MartinKalchev/Music-Player | main.py | main.py | py | 8,410 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "ttkthemes.themed_tk.ThemedTk",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "ttkthemes.themed_tk",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "tkinter.ttk.Label",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "tk... |
14760350365 | try:
import importlib.resources as pkg_resources
except ImportError:
# Try backported to PY<37 `importlib_resources`.
import importlib_resources as pkg_resources
import pandas as pd
from worldcereal import resources
def load_refid_lut():
with pkg_resources.open_text(resources, 'CIB_RefIdLUT.csv') as LUTfile:
LUT = pd.read_csv(LUTfile, delimiter=";")
return LUT
def load_refidweights():
LUT = load_refid_lut()[['CIB', 'LC', 'CT', 'IRR']]
LUT = LUT[LUT.isnull().sum(axis=1) == 0]
LUT['CIB'] = ['_'.join(x.split('_')[:3]) for x in
LUT['CIB'].values]
LUT = LUT.drop_duplicates().set_index('CIB')
return LUT.to_dict(orient='index')
def get_refid_weight(ref_id, label, refidweights=None):
'''
Function to get the weight to be put on
a particular ref_id.
'''
refid_weights = refidweights or load_refidweights()
weight = refid_weights.get(ref_id, None)
if weight is not None:
weight = weight[label]
else:
weight = 90
return weight
| WorldCereal/worldcereal-classification | src/worldcereal/classification/weights.py | weights.py | py | 1,057 | python | en | code | 12 | github-code | 1 | [
{
"api_name": "importlib_resources.open_text",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "worldcereal.resources",
"line_number": 13,
"usage_type": "argument"
},
{
"api_name": "pandas.read_csv",
"line_number": 14,
"usage_type": "call"
}
] |
71496463393 | import numpy as np
from scipy.optimize import root_scalar
from scipy.optimize import fsolve
class sieplasma(object):
def __init__(self, theta_E_g, eta, zl, c, Dl, Ds, Dls, psi0_plasma_num, theta_0_num, B, C, delta_rs, deltab_10, deltab_20):
self.theta_E_g = theta_E_g
self.eta = eta
self.psi0_plasma_num = psi0_plasma_num
self.theta_0_num = theta_0_num
self.B = B
self.C = C
self.delta_rs = delta_rs
self.deltab_10 = deltab_10
self.deltab_20 = deltab_20
def f(r):
tmp_f = r - theta_E_g + C/r * (r/B/theta_0_num)**C * psi0_plasma_num * np.exp(-(r/B/theta_0_num)**C)
return tmp_f
zero = root_scalar(f, bracket=[theta_E_g*.1, theta_E_g*1.9], method='bisect')
self.theta_E = zero.root
self.r = zero.root
r = self.r
def g(phi_k):
tmp_g = theta_E_g*eta*np.sin(2.*phi_k)/np.sqrt(1.-eta*np.cos(2.*phi_k)) - deltab_10*np.sin(phi_k) + deltab_20*np.cos(phi_k)
return tmp_g
phi_arr = np.array([np.pi/4, 5/4*np.pi])
zeros_g = fsolve(g, phi_arr)
self.zeros_phi = zeros_g
zeros_phi = self.zeros_phi
tmp_psi = theta_E_g*r*np.sqrt(1.-eta*np.cos(2.*zeros_phi)) + \
psi0_plasma_num*np.exp(-(r/B/theta_0_num)**C)
self.psi = tmp_psi
tmp_dpsi = theta_E_g*r*(np.sqrt( 1. - eta*np.cos(2*zeros_phi)) - 1)
self.dpsi = tmp_dpsi
tmp_psi0 = theta_E_g*r + psi0_plasma_num*np.exp(-(r/B/theta_0_num)**C)
self.psi0 = tmp_psi0
tmp_psi_plasma = psi0_plasma_num*np.exp(-(r/B/theta_0_num)**C)
self.psi_plasma = tmp_psi_plasma
tmp_ddpsi_dr = theta_E_g*(np.sqrt( 1. - eta*np.cos(2*zeros_phi)) - 1)
self.ddpsi_dr = tmp_ddpsi_dr
tmp_ddpsi_dphi = theta_E_g*r*eta*np.sin(2.*zeros_phi)/np.sqrt(1.-eta*np.cos(2.*zeros_phi))
self.ddpsi_dphi = tmp_ddpsi_dphi
tmp_d2dpsi_dphi2 = theta_E_g*r*eta*( 2*np.cos(2.*zeros_phi)/np.sqrt(1.-eta*np.cos(2.*zeros_phi)) - (1.-eta*np.cos(2.*zeros_phi))**(-3/2)*eta*np.sin(2*zeros_phi)**2)
self.d2dpsi_dphi2 = tmp_d2dpsi_dphi2
tmp_d2psi0 = self.psi_plasma * ( - C*(C-1)/r**2*(r/B/theta_0_num)**C + (C/r*(r/B/theta_0_num)**C)**2 )
self.d2psi0_dr2 = tmp_d2psi0
delta_r = 1/(1 - self.d2psi0_dr2 )*(self.ddpsi_dr + deltab_10*np.cos(zeros_phi) + deltab_20*np.sin(zeros_phi) )
self.delta_r = delta_r
r_ = r + delta_r
psi = theta_E_g*r_*np.sqrt(1.-eta*np.cos(2.*zeros_phi)) + psi0_plasma_num*np.exp(-(r_/B/theta_0_num)**C)
psi_plasma = psi0_plasma_num*np.exp(-(r_/B/theta_0_num)**C)
d2psi_dr2 = psi_plasma * ( - C*(C-1)/r_**2*(r_/B/theta_0_num)**C + (C/r_*(r_/B/theta_0_num)**C)**2 )
dpsi_dr = theta_E_g*np.sqrt(1.-eta*np.cos(2.*zeros_phi)) - C/r_*(r_/B/theta_0_num)**C*psi0_plasma_num*np.exp(-(r_/B/theta_0_num)**C)
d2psi_dphi2 = theta_E_g*r_*eta*( 2*np.cos(2.*zeros_phi)/np.sqrt(1.-eta*np.cos(2.*zeros_phi)) - (1.-eta*np.cos(2.*zeros_phi))**(-3/2)*eta*np.sin(2*zeros_phi)**2)
dpsi_dphi = theta_E_g*r_*eta*np.sin(2.*zeros_phi)/np.sqrt(1.-eta*np.cos(2.*zeros_phi))
d2psi_dphidr = theta_E_g*eta*np.sin(2.*zeros_phi)/np.sqrt(1.-eta*np.cos(2.*zeros_phi))
mu = r_*( (1 - d2psi_dr2)*(r_ - dpsi_dr - 1/r_*d2psi_dphi2) - 1/r_*(1/r_*dpsi_dphi - d2psi_dphidr )**2 )**(-1)
R = np.abs(mu[0]/mu[1])
self.R = R
t = (1 + zl)/c*Dl*Ds/Dls*(1/2* ( (r_*np.cos(zeros_phi) - deltab_10)**2 + (r_*np.sin(zeros_phi) - deltab_20)**2 ) - psi )
self.t = t/24/60/60*0.000004848136811095**2
dt = np.abs(t[0]-t[1])/24/60/60*0.000004848136811095**2 #convert seconds to days and arcsec^2 to rad
self.dt = dt
| everettiantomi/plasmalens | perturbative/validity_class.py | validity_class.py | py | 3,938 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "numpy.exp",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "scipy.optimize.root_scalar",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "numpy.sin",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "numpy.sqrt",
"line... |
28443403720 | #!/usr/bin/python3
# -*- coding: utf-8 -*-
# @Time : 2021/8/6 上午10:04
# @Author : PeiP Liu
# @FileName: BertModel.py
# @Software: PyCharm
import torch
import torch.nn as nn
from torch.nn import LayerNorm as BertLayerNorm
import sys
sys.path.append("..")
import torch.nn.functional as F
class BERT_SC(nn.Module):
def __init__(self, bert_model, idx2label, hidden_size=768, device='cpu'):
super(BERT_SC, self).__init__()
self.bert_model = bert_model
self.hidden_size = hidden_size
self.num_label = len(idx2label)
self.device = device
self.dropout = nn.Dropout(0.5)
self.bert_sigmod = nn.Sigmoid()
self.softmax = nn.Softmax(dim=-1)
self.hid2label = nn.Linear(self.hidden_size, self.num_label)
# init the weight and bias of feature-emission layer
nn.init.xavier_uniform_(self.hid2label.weight)
nn.init.constant_(self.hid2label.bias, 0.0)
self.apply(self.init_bert_weight)
def init_bert_weight(self, module):
# cf https://github.com/Louis-udm/NER-BERT-CRF/blob/master/NER_BERT_CRF.py
# rf https://www.cnblogs.com/BlueBlueSea/p/12875517.html
if isinstance(module, (nn.Linear, nn.Embedding)):
module.weight.data.normal_(mean=0.0, std=0.02)
elif isinstance(module, BertLayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
def get_bert_features(self, input_ids, seg_ids, atten_mask):
# rf https://huggingface.co/transformers/model_doc/bert.html#bertmodel
outputs = self.bert_model(input_ids, token_type_ids=seg_ids,
attention_mask=atten_mask, output_hidden_states=True, output_attentions=True)
# last_hidden_states = outputs[0]
last_hidden_states = outputs.last_hidden_state # (batch_size, seq_length, hidden_size)
# pooler_outputs = outputs[1]
# the feature of [CLS], and it represents the feature of whole sentence
# We can better average or pool the sequence of hidden-states for the whole sequence.
pooler_outputs = outputs.pooler_output # (batch_size, hidden_size)
return pooler_outputs, last_hidden_states # the feature of [CLS] and all the tokens
def tfidf_seq(self, bert_seq_features, tfidf_token_masks):
batch_size, seq_len, feature_dim = bert_seq_features.shape
seq_reps = torch.zeros((batch_size, feature_dim), dtype=torch.float32).to(self.device)
for i_seq in range(batch_size):
i_seq_feature = bert_seq_features[i_seq]
i_seq_mask = tfidf_token_masks[i_seq]
assert len(i_seq_feature) == len(i_seq_mask)
extended_i_seq_mask = i_seq_mask.unsqueeze(1) # (seq_len, 1)
masked_seq_feature = i_seq_feature * extended_i_seq_mask # (seq_len, feature_dim)
i_seq_rep = masked_seq_feature.sum(0) # 对所有tf-idf的token-embedding进行了sum-pooling
seq_reps[i_seq] = i_seq_rep
return seq_reps
def forward(self, input_ids, input_mask, seg_ids, tfidf_token_masks, sents_labels, mode):
bert_cls_features, bert_seq_features = self.get_bert_features(input_ids, seg_ids, input_mask)
if tfidf_token_masks.count_nonzero().detach().item() == 0: # 非0数据为0,即没有非0数据,即都是0。此时,我们使用CLS来表示序列信息
seq_rep = self.bert_sigmod(self.dropout(bert_cls_features))
else:
seq_rep = self.tfidf_seq(bert_seq_features, tfidf_token_masks) # 对于有tf-idf选择的词汇,我们使用tf-idf来表示序列特征
seq_rep = self.bert_sigmod(self.dropout(seq_rep))
class_result = self.softmax(self.hid2label(seq_rep))
if mode == 'train':
try:
object_score = F.cross_entropy(class_result, sents_labels, ignore_index=2)
return object_score
except:
print('There is something wrong with the prediction result!')
else:
return torch.argmax(class_result, dim=-1).detach().cpu().tolist()
| LiuPeiP-CS/BertSeqC4Vul | Bert/BertModel.py | BertModel.py | py | 4,238 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "sys.path.append",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "torch.nn.Module",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"li... |
17847458843 | from twisted.trial.unittest import TestCase
from twisted.python.filepath import FilePath
from epsilon.extime import Time
from nevow import loaders, rend
from nevow.testutil import renderPage, renderLivePage
from axiom.store import Store
from axiom.dependency import installOn
from xmantissa.people import Person, EmailAddress
from xquotient.exmess import Message, MessageDetail, PartDisplayer
from xquotient.inbox import Inbox, InboxScreen
from xquotient import compose
from xquotient.test.util import MIMEReceiverMixin, PartMaker, ThemedFragmentWrapper
from xquotient.qpeople import MessageList, MessageLister
from xquotient.test.util import DummyMessageImplementation
from xquotient.test.test_inbox import testMessageFactory
def makeMessage(receiver, parts, impl):
"""
Create a new L{exmess.Message}, either by parsing C{parts} or by wrapping
one around C{impl}.
"""
if impl is None:
return receiver.feedStringNow(parts)
else:
return testMessageFactory(store=impl.store,
receivedWhen=Time(),
sentWhen=Time(),
spam=False,
subject=u'',
impl=impl)
class RenderingTestCase(TestCase, MIMEReceiverMixin):
aBunchOfRelatedParts = PartMaker(
'multipart/related', 'related',
*(list(PartMaker('text/html', '<p>html-' + str(i) + '</p>')
for i in xrange(100)) +
list(PartMaker('image/gif', '')
for i in xrange(100)))).make()
def setUp(self):
"""
Make a copy of the very minimal database for a single test method to
mangle.
"""
receiver = self.setUpMailStuff()
makeMessage(receiver, self.aBunchOfRelatedParts, None)
def test_messageRendering(self):
"""
Test rendering of message detail for an extremely complex message.
"""
msg = self.substore.findUnique(Message)
msg.classifyClean()
return renderLivePage(
ThemedFragmentWrapper(
MessageDetail(msg)))
def test_inboxRendering(self):
"""
Test rendering of the inbox with a handful of extremely complex
messages in it.
"""
def deliverMessages():
for i in xrange(5):
makeMessage(
self.createMIMEReceiver(), self.aBunchOfRelatedParts, None)
self.substore.transact(deliverMessages)
inbox = self.substore.findUnique(Inbox)
composer = compose.Composer(store=self.substore)
installOn(composer, self.substore)
return renderLivePage(
ThemedFragmentWrapper(
InboxScreen(inbox)))
def test_inboxComposeFragmentRendering(self):
"""
Test rendering of the L{xquotient.compose.ComposeFragment} returned
from L{xquotient.inbox.InboxScreen.getComposer}
"""
installOn(compose.Composer(store=self.substore), self.substore)
inbox = self.substore.findUnique(Inbox)
inboxScreen = InboxScreen(inbox)
composeFrag = inboxScreen.getComposer()
return renderLivePage(
ThemedFragmentWrapper(composeFrag))
def test_peopleMessageListRendering(self):
mlister = MessageLister(store=self.substore)
installOn(mlister, self.substore)
p = Person(store=self.substore, name=u'Bob')
EmailAddress(store=self.substore, person=p, address=u'bob@internet')
for i in xrange(5):
testMessageFactory(
store=self.substore, subject=unicode(str(i)),
receivedWhen=Time(), spam=False, sender=u'bob@internet')
self.assertEqual(len(list(mlister.mostRecentMessages(p))), 5)
return renderPage(
rend.Page(docFactory=loaders.stan(MessageList(mlister, p))))
class MockPart(object):
"""
A mock L{xquotient.mimestorage.Part} which implements enough functionality
to satisfy L{xquotient.exmess.PartDisplayer}
"""
def __init__(self, unicodeBody, contentType='text/plain'):
"""
@param unicodeBody: the body of the part
@type unicodeBody: C{unicode}
@param contentType: the content type of the part. defaults to
text/plain
@type contentType: C{str}
"""
self.unicodeBody = unicodeBody
self.contentType = contentType
def getUnicodeBody(self):
return self.unicodeBody
def getBody(self, decode=False):
return str(self.unicodeBody)
def getContentType(self):
return self.contentType
class PartDisplayerTestCase(TestCase):
"""
Tests for L{xquotient.exmess.PartDisplayer}
"""
def setUp(self):
self.partDisplayer = PartDisplayer(None)
def test_scrubbingInvalidDocument(self):
"""
Pass a completely malformed document to L{PartDisplayer.scrubbedHTML}
and assert that it returns C{None} instead of raising an exception.
"""
self.assertIdentical(None, self.partDisplayer.scrubbedHTML(''))
def test_scrubbingSimpleDocument(self):
"""
Pass a trivial document to L{PartDisplayer.scrubbedHMTL} and make sure
it comes out the other side in-tact.
"""
self.assertEquals('<div></div>', self.partDisplayer.scrubbedHTML('<div></div>'))
def test_renderablePartReplacesInvalidCharsinHTML(self):
"""
Test that L{xquotient.exmess.PartDisplayer.renderablePart} replaces
XML-illegal characters in the body of the text/html part it is passed
"""
part = MockPart(u'<div>\x00 hi \x01</div>', 'text/html')
tag = self.partDisplayer.renderablePart(part)
self.assertEquals(tag.content, '<div>0x0 hi 0x1</div>')
def test_renderablePartDoesntReplaceInvalidCharsElsewhere(self):
"""
Test that L{xquotient.exmess.PartDisplayer.renderablePart} doesn't
replace XML-illegal characters if the content-type of the part isn't
text/html
"""
part = MockPart(u'\x00', 'text/plain')
tag = self.partDisplayer.renderablePart(part)
self.assertEquals(tag.content, '\x00')
| rcarmo/divmod.org | Quotient/xquotient/test/test_rendering.py | test_rendering.py | py | 6,301 | python | en | code | 10 | github-code | 1 | [
{
"api_name": "xquotient.test.test_inbox.testMessageFactory",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "epsilon.extime.Time",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "epsilon.extime.Time",
"line_number": 33,
"usage_type": "call"
},
{
... |
670609188 | import pandas as pd
import argparse
import json
# Creates json file with the number of times each hot topic appears in the annotaed file
parser = argparse.ArgumentParser()
parser.add_argument('-o', '--outfile')
parser.add_argument('-i', '--coded_file', required=True)
args = parser.parse_args()
# "Hot Topics"
result = {'course-related': '0', 'food-related': '0', 'residence-related': '0', 'other': '0'}
def get_count(coded_file):
df = pd.read_csv(coded_file, sep='\t', header=0) # convert infile to dataframe
count = df['coding'].value_counts()
for category in list(result.keys()):
letter = category[:1] # course-related -> c
if letter in count.index:
result[category] = int(count[letter])
return result
def main():
count_result = get_count(args.coded_file)
if args.outfile:
with open(args.outfile,'w') as outfile:
json.dump(count_result,outfile)
print(result)
if __name__ == '__main__':
main()
| BrendaNamuh/COMP598-UniversityHotTopics | src/analyze.py | analyze.py | py | 988 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "json.dump",
"line_number": 29,
"usage_type": "call"
}
] |
18461666541 | from flask import Flask, jsonify, request
from calculation import Calc
app = Flask(__name__)
@app.route('/get_time', methods=['POST'])
def give_res():
do = Calc()
answer_dict = {}
n = 1 # переменная для нумерации ответов в словаре answer_dict
for i, test in enumerate(request.json): # нумеруем и итерируем данные из нужной нам части запроса
test_answer = do.appearance(test['data']) # за расчетами обращаемся к классу в отдельном файле (calculate.py)
# assert test_answer == test['answer'], f'Error on test case {i}, got {test_answer}, expected {test["answer"]}'
answer_dict.setdefault(n, test_answer)
n += 1
# print(answer_dict)
return jsonify(answer_dict)
if __name__ == '__main__':
app.run()
| Dortov/Web-API | app.py | app.py | py | 921 | python | ru | code | 0 | github-code | 1 | [
{
"api_name": "flask.Flask",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "calculation.Calc",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "flask.request.json",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
... |
20567576086 | import matplotlib.pyplot as plt
import numpy as np
def loadData(fileName):
"""加载数据:解析以tab键分隔的文件中的浮点数
Args:
fileName : 数据集文件
Returns:
dataMat : feature 对应的数据集
labelMat : feature 对应的分类标签,即类别标签
"""
# 获取样本特征的总数,不算最后的目标变量
numFeat = len(open(fileName).readline().split('\t')) - 1
dataMat = []
labelMat = []
fr = open(fileName)
for line in fr.readlines():
# 读取每一行
lineArr = []
# 删除一行中以tab分隔的数据前后的空白符号
curLine = line.strip().split('\t')
# i 从0到2,不包括2
for i in range(numFeat):
# 将数据添加到lineArr List中,每一行数据测试数据组成一个行向量
lineArr.append(float(curLine[i]))
# 将测试数据的输入数据部分存储到dataMat 的List中
dataMat.append(lineArr)
# 将每一行的最后一个数据,即类别,或者叫目标变量存储到labelMat List中
labelMat.append(float(curLine[-1]))
return dataMat, labelMat
# 岭回归
def ridgeRegres(xMat, yMat, lam = 0.2):
"""
Parameters:
xMat - x数据集
yMat - y数据集
lam - 缩减系数
Returns:
ws - 回归系数
"""
xTx = xMat.T * xMat
denom = xTx + np.eye(np.shape(xMat)[1]) * lam
if np.linalg.det(denom) == 0.0:
print("矩阵为奇异矩阵,不能转置")
return
ws = denom.I * (xMat.T * yMat)
return ws
def ridgeTest(xArr, yArr):
"""岭回归测试
Args:
xMat - x数据集
yMat - y数据集
Returns:
wMat - 回归系数矩阵
"""
xMat = np.mat(xArr); yMat = np.mat(yArr).T
#数据标准化
yMean = np.mean(yMat, axis = 0) #行与行操作,求均值
yMat = yMat - yMean #数据减去均值
xMeans = np.mean(xMat, axis = 0) #行与行操作,求均值
xVar = np.var(xMat, axis = 0) #行与行操作,求方差
xMat = (xMat - xMeans) / xVar #数据减去均值除以方差实现标准化
numTestPts = 30 #30个不同的lambda测试
wMat = np.zeros((numTestPts, np.shape(xMat)[1])) #初始回归系数矩阵
for i in range(numTestPts): #改变lambda计算回归系数
ws = ridgeRegres(xMat, yMat, np.exp(i - 10)) #lambda以e的指数变化,最初是一个非常小的数,
wMat[i, :] = ws.T #计算回归系数矩阵
# 30x8
return wMat
def plotwMat():
abX, abY = loadData('data/abalone.txt')
redgeWeights = ridgeTest(abX, abY)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(redgeWeights)
ax_title_text = ax.set_title('The relationship between log(lambada) and regression coefficient')
ax_xlabel_text = ax.set_xlabel('log(lambada)')
ax_ylabel_text = ax.set_ylabel('regression coefficient')
plt.setp(ax_title_text, size = 10, weight = 'bold', color = 'red')
plt.setp(ax_xlabel_text, size = 10, weight = 'bold', color = 'black')
plt.setp(ax_ylabel_text, size = 10, weight = 'bold', color = 'black')
plt.show()
if __name__ == '__main__':
plotwMat() | yijunquan-afk/machine-learning | basic-learn/05-regression/code/Regression4.py | Regression4.py | py | 3,509 | python | zh | code | 1 | github-code | 1 | [
{
"api_name": "numpy.eye",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "numpy.shape",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "numpy.linalg.det",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "numpy.linalg",
"line_numbe... |
8742949407 | #!/usr/bin/env env/bin/python
import argparse
import mido
from timeit import default_timer as timer
PROG_NAME = 'rmidi'
PROG_DESCRIP = 'record midi data from given input to given file'
DEFAULT_INPUT = 'KeyLab mkII 61:KeyLab mkII 61 MIDI'
DEFAULT_BPM = 120
SECONDS_PER_MIN = 60
def get_cmd_args():
parser = argparse.ArgumentParser(prog=PROG_NAME, description=PROG_DESCRIP)
parser.add_argument('-b', '--bpm', default=DEFAULT_BPM)
parser.add_argument('-i', '--input')
parser.add_argument('-f', '--file')
parser.add_argument('-s', '--silence', action='store_true')
parser.add_argument('-v', '--verbose', action='store_true')
return parser.parse_args()
def get_midi_in():
inputs = mido.get_input_names()
default_i = 0
entry = -1
while(entry < 0 or entry >= len(inputs)):
for i, name in enumerate(inputs):
if DEFAULT_INPUT in name:
default_i = i
print('%d: %s' % (i, name))
raw_entry = input('Input (default: %d): ' % default_i)
try:
entry = int(raw_entry)
except ValueError:
entry = default_i
return inputs[entry]
def get_appending_handler(appendable, ticks_per_second, print_msgs):
def full(x):
appendable.append(x)
print(x)
def append_only(x):
appendable.append(x)
if print_msgs:
return full
return append_only
def print_input(midi_in):
process_msgs(lambda x: print(x))
def accumulate_input(midi_in, appendable, ticks_per_second, print_msgs=True):
handler = get_appending_handler(appendable, ticks_per_second, print_msgs)
process_msgs(handler, ticks_per_second=ticks_per_second)
def process_msgs(handler, ticks_per_second=0):
last_time = timer()
try:
with mido.open_input(midi_in) as inport:
for msg in inport:
end = timer()
seconds = end - last_time
last_time = end
msg.time = round(ticks_per_second * seconds)
handler(msg)
except KeyboardInterrupt:
if print_msgs:
print() # Just so the "^C" ends up on a separate line from prompt on exit
pass
def record_to_file(midi_in, filename, bpm, print_msgs, trim_init_silence):
mid = mido.MidiFile(type=0)
track = mido.MidiTrack()
mid.tracks.append(track)
ticks_per_second = bpm * mid.ticks_per_beat / SECONDS_PER_MIN
accumulate_input(midi_in, track, ticks_per_second, print_msgs)
if trim_init_silence:
track[0].time = 0
mid.save(filename)
print('MIDI data written to', filename)
if __name__ == '__main__':
args = get_cmd_args()
midi_in = args.input
filename = args.file
bpm = args.bpm
print_msgs = args.verbose
trim_init_silence = not args.silence
if not midi_in:
midi_in = get_midi_in()
print('Receiving from "%s"' % midi_in)
if filename:
record_to_file(midi_in, filename, bpm, print_msgs, trim_init_silence)
elif print_msgs:
print_input(midi_in)
| erickak/audiotools | rmidi/src/main.py | main.py | py | 3,066 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "mido.get_input_names",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "timeit.default_timer",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "mid... |
75059026592 | import triton
import triton.language as tl
def conv2d_forward_config(
BLOCK_SIZE_BATCH_HEIGHT_WIDTH: int,
BLOCK_SIZE_IN_FEAT: int,
BLOCK_SIZE_OUT_FEAT: int,
n_warps: int = 4,
n_stages: int = 2,
) -> triton.Config:
"""
Creates a triton.Config object for conv2d_forward_kernel
given meta-parameters for auto-tuning.
Args:
BLOCK_SIZE_BATCH_HEIGHT_WIDTH: Block size across the batch, height, and
width dimensions.
BLOCK_SIZE_IN_FEAT: Block size across the input feature dimension.
BLOCK_SIZE_OUT_FEAT: Block size across the output feature dimension.
n_warps: Number of warps to use for the kernel when compiled for GPUs.
n_stages: Number of stages the compiler uses to software-pipeline.
Returns:
Kernel configuration.
"""
return triton.Config({'BLOCK_SIZE_BATCH_HEIGHT_WIDTH': BLOCK_SIZE_BATCH_HEIGHT_WIDTH,
'BLOCK_SIZE_IN_FEAT': BLOCK_SIZE_IN_FEAT,
'BLOCK_SIZE_OUT_FEAT': BLOCK_SIZE_OUT_FEAT},
num_warps=n_warps, num_stages=n_stages)
@triton.autotune(
configs=[
conv2d_forward_config(128, 32, 128, n_warps=8, n_stages=2),
conv2d_forward_config(256, 32, 64, n_warps=8, n_stages=2),
conv2d_forward_config(256, 32, 32, n_warps=4, n_stages=4),
conv2d_forward_config(256, 64, 32, n_warps=4, n_stages=4),
conv2d_forward_config(256, 32, 16, n_warps=2, n_stages=4),
conv2d_forward_config(64, 32, 128, n_warps=8, n_stages=4),
conv2d_forward_config(128, 32, 64, n_warps=4, n_stages=4),
conv2d_forward_config(64, 32, 64, n_warps=4, n_stages=4),
conv2d_forward_config(128, 32, 16, n_warps=4, n_stages=4),
conv2d_forward_config(128, 128, 128, n_warps=8, n_stages=3),
conv2d_forward_config(256, 128, 64, n_warps=8, n_stages=3),
conv2d_forward_config(256, 128, 32, n_warps=4, n_stages=4),
conv2d_forward_config(64, 128, 128, n_warps=4, n_stages=4),
conv2d_forward_config(128, 128, 64, n_warps=4, n_stages=4),
conv2d_forward_config(128, 64, 32, n_warps=2, n_stages=4),
conv2d_forward_config(64, 64, 64, n_warps=2, n_stages=4),
],
key=['batch_dim', 'in_feat_dim', 'in_height', 'in_width',
'out_feat_dim', 'out_height', 'out_width',
'kernel_height', 'kernel_width',
'stride_height', 'stride_width',
'padding_height', 'padding_width',
'groups'],
)
@triton.jit
def conv2d_forward_kernel(
input_pointer, weight_pointer, output_pointer,
batch_dim, in_feat_dim, in_height, in_width,
out_feat_dim, out_height, out_width,
input_batch_stride, input_in_feat_stride, input_height_stride, input_width_stride,
weight_out_feat_stride, weight_in_feat_stride, weight_height_stride, weight_width_stride,
output_batch_stride, output_out_feat_stride, output_height_stride, output_width_stride,
kernel_height: tl.constexpr, kernel_width: tl.constexpr,
stride_height: tl.constexpr, stride_width: tl.constexpr,
padding_height: tl.constexpr, padding_width: tl.constexpr,
groups: tl.constexpr,
BLOCK_SIZE_BATCH_HEIGHT_WIDTH: tl.constexpr, BLOCK_SIZE_IN_FEAT: tl.constexpr,
BLOCK_SIZE_OUT_FEAT: tl.constexpr,
):
"""
2D-convolves over the input using weights.
Args:
input_pointer: Pointer to the input to convolve over.
The input must be of shape [batch_dim, in_feat_dim, in_height, in_width].
weight_pointer: Pointer to the weights input is convolved over by.
The weights must be of shape [out_feat_dim, in_feat_dim, kernel_height, kernel_width].
output_pointer: Pointer to a container the result is written to.
The container must be of shape [batch_dim, out_feat_dim, out_height, out_width].
batch_dim: Batch dimension of the input and output.
in_feat_dim: Dimensionality of the input features.
in_height: Input height.
in_width: Input width.
out_feat_dim: Dimensionality of the output features.
out_height: Output height.
out_width: Output width.
input_batch_stride: Stride necessary to jump one element along the
input's batch dimension.
input_in_feat_stride: Stride necessary to jump one element along the
input's feature dimension.
input_height_stride: Stride necessary to jump one element along the
input's height dimension.
input_width_stride: Stride necessary to jump one element along the
input's width dimension.
weight_out_feat_stride: Stride necessary to jump one element along the
weights' output feature dimension.
weight_in_feat_stride: Stride necessary to jump one element along the
weights' input feature dimension.
weight_height_stride: Stride necessary to jump one element along the
weights' height dimension.
weight_width_stride: Stride necessary to jump one element along the
weights' width dimension.
output_batch_stride: Stride necessary to jump one element along the
output's batch dimension.
output_out_feat_stride: Stride necessary to jump one element along the
output's feature dimension.
output_height_stride: Stride necessary to jump one element along the
output's height dimension.
output_width_stride: Stride necessary to jump one element along the
output's width dimension.
kernel_height: Kernel height.
kernel_width: Kernel width.
stride_height: Stride of kernel across the height dimension.
stride_width: Stride of kernel across the width dimension.
padding_height: Padding applied to the input across the height dimension.
padding_width: Padding applied to the input across the width dimension.
groups: Number of groups for the convolution.
BLOCK_SIZE_BATCH_HEIGHT_WIDTH: Block size across the batch, height, and
width dimensions.
BLOCK_SIZE_IN_FEAT: Block size across the input feature dimension.
BLOCK_SIZE_OUT_FEAT: Block size across the output feature dimension.
"""
batch_height_width_pid = tl.program_id(0)
out_feat_pid = tl.program_id(1)
group_pid = tl.program_id(2)
in_group_dim = in_feat_dim // groups
out_group_dim = out_feat_dim // groups
batch_height_width_offset = (batch_height_width_pid * BLOCK_SIZE_BATCH_HEIGHT_WIDTH +
tl.arange(0, BLOCK_SIZE_BATCH_HEIGHT_WIDTH))
batch_height_offset = batch_height_width_offset // out_width
batch_offset = batch_height_offset // out_height
output_feat_offset = (out_feat_pid * BLOCK_SIZE_OUT_FEAT +
tl.arange(0, BLOCK_SIZE_OUT_FEAT))
output_height_offset = batch_height_offset % out_height
output_width_offset = batch_height_width_offset % out_width
input_pointer += (input_batch_stride * batch_offset +
input_in_feat_stride * group_pid * in_group_dim)[:, None]
weight_pointer += (weight_out_feat_stride * output_feat_offset +
weight_out_feat_stride * group_pid * out_group_dim)[None, :]
accum = tl.zeros((BLOCK_SIZE_BATCH_HEIGHT_WIDTH, BLOCK_SIZE_OUT_FEAT),
dtype=tl.float32)
for h in range(kernel_height):
for w in range(kernel_width):
for c in range(0, in_group_dim, BLOCK_SIZE_IN_FEAT):
input_feat_offset = c + tl.arange(0, BLOCK_SIZE_IN_FEAT)
input_height_offset = (h - padding_height +
stride_height * output_height_offset)
input_width_offset = (w - padding_width +
stride_width * output_width_offset)
curr_input_pointer = (input_pointer +
(input_in_feat_stride * input_feat_offset)[None, :] +
(input_height_stride * input_height_offset)[:, None] +
(input_width_stride * input_width_offset)[:, None])
curr_weight_pointer = (weight_pointer +
(weight_in_feat_stride * input_feat_offset)[:, None] +
(weight_height_stride * h) +
(weight_width_stride * w))
input_mask = ((batch_offset < batch_dim)[:, None] &
(input_feat_offset < in_group_dim)[None, :] &
(0 <= input_height_offset)[:, None] &
(input_height_offset < in_height)[:, None] &
(0 <= input_width_offset)[:, None] &
(input_width_offset < in_width)[:, None])
weight_mask = ((input_feat_offset < in_group_dim)[:, None] &
(output_feat_offset < out_group_dim)[None, :])
input_block = tl.load(curr_input_pointer, mask=input_mask)
weight_block = tl.load(curr_weight_pointer, mask=weight_mask)
accum += tl.dot(input_block, weight_block)
output_pointer += ((output_batch_stride * batch_offset)[:, None] +
(output_out_feat_stride * (group_pid * out_group_dim + output_feat_offset))[None, :] +
(output_height_stride * output_height_offset)[:, None] +
(output_width_stride * output_width_offset)[:, None])
output_mask = ((batch_offset < batch_dim)[:, None] &
(output_feat_offset < out_group_dim)[None, :] &
(output_height_offset < out_height)[:, None] &
(output_width_offset < out_width)[:, None])
tl.store(output_pointer, accum, mask=output_mask)
| BobMcDear/attorch | attorch/conv_kernels.py | conv_kernels.py | py | 9,935 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "triton.Config",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "triton.Config",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "triton.language.constexpr",
"line_number": 67,
"usage_type": "attribute"
},
{
"api_name": "trito... |
38018655263 | import os
import h5py
import torch
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from collections import Counter
def write(data, gts, outfile):
'''
This function writes the pre-processed image data to a HDF5 file
Args:
data: numpy.array, image data as numpy array
outfile: string, path to write file to
'''
print("---------------------------------------")
print("Saving data")
print("---------------------------------------\n")
with h5py.File(outfile, "w") as f:
f.create_dataset("features", data=data, dtype=data.dtype)
f.create_dataset("gts", data=gts, dtype=gts.dtype)
def load_data(config):
all_data = config['DATA_PATH']
dfs = []
for i in range(7):
paths = [all_data + '/feeding/csv/Feeding_25Hz_',
all_data + '/swimming/csv/Swimming_25Hz_',
all_data + '/resting/csv/Resting_25Hz_',
all_data + '/ndm/csv/NDM_25Hz_']
df = pd.concat((pd.read_csv(path + str(i+1) + '.csv',
index_col=['Date_Time'],
parse_dates=['Date_Time'],
infer_datetime_format=True) for path in paths), ignore_index=False, sort=False).iloc[:, 1:9]
df = df.replace(to_replace={"Non directed motion": "NDM"})
dfs.append(df)
return dfs
def split_data(dfs, config):
split_type = config['SPLIT']
df_dict = {}
if split_type == 'experiment':
# Train: 1, 2, 3, 4, 7
# Val: 6
# Test: 5
df_dict['train'] = pd.concat([dfs[0], dfs[1], dfs[2], dfs[3], dfs[6]])
df_dict['val'] = dfs[5]
df_dict['test'] = dfs[4]
elif split_type == 'full':
data = pd.concat(dfs)
df_dict['train'], df_dict['test'] = train_test_split(data,
test_size=0.25,
random_state=33)
df_dict['train'], df_dict['val'] = train_test_split(df_dict['train'],
test_size=0.2,
random_state=33)
df_dict['train'] = df_dict['train'].sort_index()
df_dict['val'] = df_dict['val'].sort_index()
df_dict['test'] = df_dict['test'].sort_index()
else:
print("Not a valid split type (full/experiment)")
exit()
print("*"*44)
print('Train shape:', df_dict['train'].shape)
print('Val shape:', df_dict['val'].shape)
print('Test shape:', df_dict['test'].shape)
print("="*44)
print()
return df_dict
def normalize(df_dict, features):
for column in features:
mean = np.mean(df_dict['train'][column])
std = np.std(df_dict['train'][column])
df_dict['train'][column] = df_dict['train'][column].map(lambda x: (x-mean)/std)
df_dict['val'][column] = df_dict['val'][column].map(lambda x: (x-mean)/std)
df_dict['test'][column] = df_dict['test'][column].map(lambda x: (x-mean)/std)
return df_dict
def group_times(df):
time_diff = df.index.to_series().diff()
breaks = time_diff > pd.Timedelta('1s')
groups = breaks.cumsum()
df['Group'] = groups
return df
def sample_sequences(df, features, num_samples=None, seq_len=50, dims=1, test=False):
X = []
Y = []
label_list = ['Feeding', 'Swimming', 'Resting', 'NDM']
for idx, label in enumerate(label_list):
print(str(idx) + ": " + label)
class_df = df.loc[df['Label'] == label]
groups = class_df['Group'].unique()
if not test:
X_class = np.zeros((num_samples, seq_len, dims), dtype=np.float32)
Y_class = np.full((num_samples, 1), idx, dtype=np.int64)
for i in range(num_samples):
chunk_idx = groups[np.random.randint(len(groups))]
data = class_df.loc[class_df['Group'] == chunk_idx][features].to_numpy()
while(len(data) <= seq_len):
chunk_idx = groups[np.random.randint(len(groups))]
data = class_df.loc[class_df['Group'] == chunk_idx][features].to_numpy()
rand = np.random.randint(len(data)-seq_len)
X_class[i] = data[rand:rand+seq_len]
else:
X_class = []
Y_class = []
for group in groups:
data = class_df.loc[class_df['Group'] == group][features].to_numpy()
num_samples = len(data)//50
X_group = np.zeros((num_samples, seq_len, dims), dtype=np.float32)
Y_group = np.full((num_samples, 1), idx, dtype=np.int64)
for i in range(num_samples):
X_group[i] = data[seq_len*i:seq_len*(i+1)]
X_class.append(X_group)
Y_class.append(Y_group)
X_class, Y_class = np.concatenate(X_class), np.concatenate(Y_class)
print(label + " -- num test points: " + str(Y_class.shape[0]))
X.append(X_class)
Y.append(Y_class)
return np.concatenate(X), np.concatenate(Y) | buchholzmd/SharkBehaviorClassification | datasets/utils.py | utils.py | py | 5,524 | python | en | code | 3 | github-code | 1 | [
{
"api_name": "h5py.File",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "pandas.concat",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "pandas.concat",
"line_num... |
16988033518 | from django.http import JsonResponse
from jam.spotify_api import url_argument_parse
import pitchfork
import re
import urllib
try:
import urllib.request as urllib2
except ImportError:
import urllib2
#######################
# PITCHFORK API WRAPPER
#######################
def search(request, artist, album):
review = pitchfork.search(url_argument_parse(artist), url_argument_parse(album))
review_dictionary = {"artist": review.artist(), "album": review.album(), \
"editorial": repair_editorial(review.editorial()), "label": review.label(), "score": review.score()}
return JsonResponse(review_dictionary)
def repair_editorial(editorial):
print(editorial)
regex = re.compile(r'.[A-Z]')
repaired_editorial = ""
for token in editorial.split():
if regex.search(token) is not None:
repaired_editorial += token.replace(".", ".\n ")
repaired_editorial += " "
else:
repaired_editorial += token
repaired_editorial += " "
print(repaired_editorial)
return repaired_editorial | cartev/Jam | jam/pitchfork_api.py | pitchfork_api.py | py | 1,079 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "pitchfork.search",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "jam.spotify_api.url_argument_parse",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "django.http.JsonResponse",
"line_number": 19,
"usage_type": "call"
},
{
"api_... |
22674612731 | # -*- coding: utf-8 -*-
import sys
from nltk.tag import StanfordNERTagger
from nltk.tokenize import word_tokenize
from sklearn.metrics import precision_recall_fscore_support as score
def computeStatistics(expected_tags, generated_tags):
unique_tags = list()
floatFormat = "{:.2f}"
for tag in generated_tags:
if tag not in unique_tags:
unique_tags.append(tag)
precision, recall, fscore, support = score(expected_tags, generated_tags)
fw = open("precision_recall_fscore.txt", "w")
fw.write('{:20} {:10} {:10} {:10}'.format("Tag", "Precision", "Recall", "F-Measure"))
fw.write("\n")
for (t, p, r, f) in zip(unique_tags, precision, recall, fscore):
fw.write('{:20} {:10} {:10} {:10}'.format(str(t), str(floatFormat.format(p)), str(floatFormat.format(r)), str(floatFormat.format(f))))
fw.write("\n")
fw.close()
def getTags(filename):
tag_sequence = list()
fr = open(filename, 'r')
for line in fr:
tokenized_line = word_tokenize(line)
for token in tokenized_line:
if '_' in token:
tag_sequence.append(token.split('_')[1])
else:
tag_sequence.append('O')
fr.close()
return tag_sequence
def getOutputAsString(classified_text):
output_str = ""
for token in classified_text:
if token[1] == 'O':
output_str += token[0] + " "
else:
output_str += token[0] + "_" + token[1] + " "
return output_str
def main():
if len(sys.argv) < 3 or len(sys.argv) > 3:
print("Usage: python3 111508041_Assign5-Code.py <pretrained_file> <test_file>")
exit(1)
expected_tags = getTags(sys.argv[1])
print(expected_tags)
try:
st = StanfordNERTagger('/home/dell/Practicals/NLP/111508041_Assign5/stanford-ner-2018-02-27/classifiers/english.muc.7class.distsim.crf.ser.gz', '/home/dell/Practicals/NLP/111508041_Assign5/stanford-ner-2018-02-27/stanford-ner.jar', encoding="utf-8")
except LookUpError:
print("Please change the path to the jar file")
exit(1)
fr = open(sys.argv[2], 'r')
fw = open('NER_labelled_Corpus_111508041.txt', 'w')
for lines in fr:
tokenized_text = word_tokenize(lines)
classified_text = st.tag(tokenized_text)
output_str = getOutputAsString(classified_text)
fw.write(output_str)
fw.write('\n')
fw.flush()
fw.close()
fr.close()
generated_tags = getTags('output.txt')
print(generated_tags)
computeStatistics(expected_tags, generated_tags)
if __name__ == '__main__':
main()
| YashashreeKolhe/Natural-Language-Processing | 111508041_Assign5/111508041_Assign5-Code.py | 111508041_Assign5-Code.py | py | 2,372 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "sklearn.metrics.precision_recall_fscore_support",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "nltk.tokenize.word_tokenize",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 52,
"usage_type": "attribute"
},
... |
71345725154 | from .models import BookModel, AuthorModel
from django import forms
import re
from django.utils.translation import gettext_lazy as _
from django.core.exceptions import ValidationError
import datetime
class BookCreateForm(forms.Form):
authors = forms.CharField(max_length=100, required=True, widget=forms.TextInput(attrs={
'id': 'authors_field',
'class': 'input-book',
'placeholder': 'Введите название авторов через запятую'
}))
name = forms.CharField(max_length=100, required=True, widget=forms.TextInput(attrs={
'class': 'input-book',
'placeholder': 'Введите название книги'
}))
year = forms.IntegerField(max_value=datetime.datetime.now().year, required=False, widget=forms.NumberInput(attrs={
'class': 'input-book',
'placeholder': 'Введите год издания книги'
}))
language = forms.CharField(max_length=30, required=False, widget=forms.TextInput(attrs={
'class': 'input-book',
'placeholder': 'Введите язык книги'
}))
description = forms.CharField(max_length=400, required=False, widget=forms.Textarea(attrs={
'class': 'input-book-description-textarea',
'placeholder': 'Введите описание книги'
}))
image = forms.ImageField(required=False, widget=forms.FileInput(attrs={
'class': 'file-input',
'placeholder': 'Описание...',
'name': "file",
'id': "choose-file-container",
'oninvalid': "this.setCustomValidity('Enter User Name Here')",
'oninput': "this.setCustomValidity('')"
}))
def clean_authors(self):
authors = self.cleaned_data['authors']
if re.search(r'[^а-яА-Яa-zA-Z-,.\' ]', authors):
raise forms.ValidationError(
"Имя автора может состоять из букв русского и английского алфавита, символов -,.' ")
return authors
def clean_name(self):
name = self.cleaned_data['name']
if re.search(r'[^а-яА-Яa-zA-Z-,1234567890.+!?#$%()/@\' ]', name):
raise forms.ValidationError(
"Название книги может состоять из букв русского и английского алфавита, цифр, символов -,.'+!?#$%()/@")
return name
def clean_description(self):
description = self.cleaned_data['description']
if re.search(r'[^а-яА-Яa-zA-Z-,.()%$#@!&*?+=/;:"0123456789\' ]', description):
raise forms.ValidationError(
"Описание может состоять из букв русского и английского алфавита, цифр, символов -,.'()%$#@!&*?+=/;:")
return description
def clean_language(self):
language = self.cleaned_data['language']
if re.search(r'[^а-яА-Яa-zA-Z-,. ]', language):
raise forms.ValidationError(
"Язык может состоять из букв русского и английского алфавита, -,.")
return language
def clean_year(self):
year = self.cleaned_data['year']
if year:
cur_year = datetime.datetime.now().year
if re.search(r'\d\d\d\d\d', str(year)):
raise forms.ValidationError(
"Год может состоять только из цифр (максимум 4 цифры)!")
if year > cur_year or year <= 0:
raise forms.ValidationError(
"Год не может быть больше текущего и меньше 0")
return year
| cyber-tatarin/booka | books/forms.py | forms.py | py | 3,799 | python | ru | code | 0 | github-code | 1 | [
{
"api_name": "django.forms.Form",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "django.forms",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "django.forms.CharField",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "django.forms... |
26582542421 | import subprocess
import warnings
import sys
import numpy as np
from datetime import datetime
#---------------------------------------------------------------------------------------#
# Dics and Colors
#---------------------------------------------------------------------------------------#
Dic_Keys = {0:'energy',1:'BE2',2:'ME2',3:'rho2E0'}
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
#---------------------------------------------------------------------------------------#
# Read input file
#---------------------------------------------------------------------------------------#
def read_input(self):
'''Read and parse cbsmodel input file'''
out_cbs_energies = []
out_cbs_BE2 = []
out_cbs_ME2 = []
out_cbs_rho2E0 = []
cbs_data_file = open('%s%s'% (self.input_path,self.input_file))
lines_cbs_data_file = list(cbs_data_file.readlines())
for num_line,line in enumerate(lines_cbs_data_file):
if len(line) > 1:
elements_line = line.split()
if elements_line[0] == 'A':
if self.A != int(elements_line[1]):
warnings.warn('Mass numbers do not coincide! Using A = %i'% self.A,UserWarning)
elif elements_line[0] == 'Z':
if self.Z != int(elements_line[1]):
warnings.warn('Charge numbers do not coincide! Using Z = %i'% self.Z,UserWarning)
elif elements_line[0] == 'fit':
if len(elements_line) < 3:
raise ValueError('cbsmodel fit command should specify the data file\n \
and at least one fit parameter!')
self.name_fit_params = elements_line[2:]
splitted_cbs_file = elements_line[1].split('/')
self.cbs_file = splitted_cbs_file[-1]
if len(splitted_cbs_file) == 1:
self.cbs_path = ''
else:
self.cbs_path = '/'.join(elements_line[1].split('/')[:-1])+'/'
elif elements_line[0] == 'energy':
if len(elements_line) != 3:
raise ValueError('cbsmodel input for energy not correct in %s!\n \
Must be `energy L s` not `%s`!'% (self.cbs_file,' '.join(elements_line)))
out_cbs_energies.append([int(elements_line[1]),int(elements_line[2])])
elif elements_line[0] == 'BE2':
if int(elements_line[1]) == 0 and int(elements_line[3]) == 0:
warnings.warn('No E2 gamma transition between two states with J=0 possible.\n \
Ignoring the transition %s.'% ' '.join(elements_line),UserWarning)
else:
out_cbs_BE2.append([int(elements_line[1]),int(elements_line[2]),
int(elements_line[3]),int(elements_line[4])])
elif elements_line[0] == 'ME2':
if int(elements_line[1]) == 0 and int(elements_line[3]) == 0:
warnings.warn('No E2 gamma transition between two states with J=0 possible.\n \
Ignoring the transition %s.'% ' '.join(elements_line),UserWarning)
else:
out_cbs_ME2.append([int(elements_line[1]),int(elements_line[2]),
int(elements_line[3]),int(elements_line[4])])
elif elements_line[0] == 'rho2E0':
out_cbs_rho2E0.append([int(elements_line[1]),int(elements_line[2]),
int(elements_line[3]),int(elements_line[4])])
return np.array(out_cbs_energies),np.array(out_cbs_BE2),np.array(out_cbs_ME2),np.array(out_cbs_rho2E0)
#---------------------------------------------------------------------------------------#
# Fit CBS to input data
#---------------------------------------------------------------------------------------#
def cbs_fit_data(self,*args):
'''Fit CBS to data as indicated in cbsmodel input file'''
run_string = 'A %i Z %i '% (self.A,self.Z)
run_string += 'Wu '
for arg in args:
run_string += '%s '% arg
run_string += 'fit %s%s %s '% (self.cbs_path,self.cbs_file,' '.join(self.name_fit_params))
run_string += 'exit'
output_cbs = subprocess.run('cbsmodel %s'% run_string,shell=True,capture_output=True).stdout
return output_cbs
#---------------------------------------------------------------------------------------#
# Extract CBS parameters
#---------------------------------------------------------------------------------------#
def extract_params(self):
'''Extract structural parameters (r_beta etc.) from cbsmodel output'''
#Perform fits to data with different r_beta until solution is found
for r_beta in np.arange(0.1,1,0.2):
output_cbs = cbs_fit_data(self,'rb %s'% r_beta)
if b'Fit successful' in output_cbs:
self.cbs_fit_success = True
if self.verbose:
print('Fit successful!')
break
if self.verbose:
print('Fit with starting value r_beta = %.2f not successful. Continuing...'% r_beta)
else:
sys.exit(f'{bcolors.FAIL}Error: Fits in cbsmodel did not converge.{bcolors.ENDC}')
#Split output by linebreaks
output_cbs = output_cbs.split(b'\n')
#extract reduced chisquare and fit parameters
self.red_chi = float(output_cbs[16].split(b':')[1])
self.fit_params = np.zeros((2*len(self.name_fit_params)))
for i in range(len(self.name_fit_params)):
string_param = output_cbs[7+i].split(b':')[1].split(b'+-')
self.fit_params[2*i] = float(string_param[0])
self.fit_params[2*i+1] = float(string_param[1])
return
#---------------------------------------------------------------------------------------#
# Calculate quantities of interest
#---------------------------------------------------------------------------------------#
def calculate_cbs_quantities(self,in_list,in_keyword):
'''Use obtained structural parameters to calculate CBS predictions for quantities specified in input file'''
run_string = 'A %i Z %i '% (self.A,self.Z)
run_string += 'Wu simpleoutput '
for num_param,param in enumerate(self.name_fit_params):
run_string += '%s %.5f '% (param,self.fit_params[2*num_param])
for num_quantity,quantity in enumerate(in_list):
if in_keyword == 'energy':
run_string += '%s %i %i '% (in_keyword,in_list[num_quantity,0],in_list[num_quantity,1])
elif in_keyword in ['BE2','ME2','rho2E0']:
run_string += '%s %i %i %i %i '% (in_keyword,in_list[num_quantity,0],in_list[num_quantity,1],
in_list[num_quantity,2],in_list[num_quantity,3])
else:
warning.warn('keyword %s not known in cbsmodel!'% in_keyword,UserWarning)
run_string += 'exit'
output_cbs = subprocess.run('cbsmodel %s'% run_string,shell=True,capture_output=True).stdout
return output_cbs
#---------------------------------------------------------------------------------------#
# Extract calculated CBS quantities
#---------------------------------------------------------------------------------------#
def extract_cbs_quantities(self,in_output_cbs):
'''Extract specified qauntities from cbsmodel output'''
in_output_cbs = in_output_cbs.split()
out_quantities = np.array([float(i) for i in in_output_cbs[2:]])
return out_quantities
#---------------------------------------------------------------------------------------#
# Write results to output file
#---------------------------------------------------------------------------------------#
def write_output(self):
'''Write essential output of cbsmodel to file'''
out_string = '#############################\n'
out_string += '# Results CBSplot #\n'
out_string += '# %i%s #\n'% (self.A,self.nucl_name)
out_string += '# %s #\n'% datetime.now().strftime('%d-%m-%Y %H:%M:%S')
out_string += '#############################\n'
out_string += '\n'
#CBS parameters
for num_param,param in enumerate(self.name_fit_params):
out_string += '%s\t%.5f +/- %.5f\n'% (param,self.fit_params[2*num_param],self.fit_params[2*num_param+1])
out_string += '\n'
#extracted quantities
for num_quantity,quantity in enumerate([self.cbs_energies,self.cbs_BE2,self.cbs_ME2,self.cbs_rho2E0]):
if isinstance(quantity,np.ndarray):
for element in quantity:
#print(element)
if num_quantity == 0:
out_string += '%s %i %i %.2f\n'% (Dic_Keys[num_quantity],element[0],
element[1],element[2])
else:
out_string += '%s %i %i %i %i %.2f\n'% (Dic_Keys[num_quantity],element[0],
element[1],element[2],element[3],element[4])
out_string += '\n'
return out_string
#---------------------------------------------------------------------------------------#
# Main CBS calculations
#---------------------------------------------------------------------------------------#
def main_cbs_calculations(self):
'''Perform complete CBS calculation as specified in cbsmodel input file'''
#extract quantities to be calculated
cbs_energies,cbs_BE2,cbs_ME2,cbs_rho2E0 = read_input(self)
#perform CBS fit to data and extract parameters (r_beta, etc.)
extract_params(self)
for num_quantity,quantity in enumerate([cbs_energies,cbs_BE2,cbs_ME2,cbs_rho2E0]):
if quantity != []:
output_cbs_quantities = calculate_cbs_quantities(self,quantity,Dic_Keys[num_quantity])
if num_quantity == 0:
self.cbs_energies = np.zeros((len(cbs_energies),3))
self.cbs_energies[:,:2] = cbs_energies
self.cbs_energies[:,2] = extract_cbs_quantities(self,output_cbs_quantities)
elif num_quantity == 1:
self.cbs_BE2 = np.zeros((len(cbs_BE2),5))
self.cbs_BE2[:,:4] = cbs_BE2
self.cbs_BE2[:,4] = extract_cbs_quantities(self,output_cbs_quantities)
elif num_quantity == 2:
self.cbs_ME2 = np.zeros((len(cbs_ME2),5))
self.cbs_ME2[:,:4] = cbs_ME2
self.cbs_ME2[:,4] = extract_cbs_quantities(self,output_cbs_quantities)
elif num_quantity == 3:
self.cbs_rho2E0 = np.zeros((len(cbs_rho2E0),5))
self.cbs_rho2E0[:,:4] = cbs_rho2E0
self.cbs_rho2E0[:,4] = extract_cbs_quantities(self,output_cbs_quantities)
#create output and save/print it if requested
output = write_output(self)
if self.write_output:
out_file = open('%sresults_%i%s.txt'% (self.out_path,self.A,self.nucl_name),'w')
out_file.write(output)
out_file.close()
if self.verbose:
print(output)
return
| TB-IKP/CBSplot | CBSplot/CBS_commands.py | CBS_commands.py | py | 9,902 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "warnings.warn",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "warnings.warn",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "warnings.warn",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "warnings.warn",
"line_n... |
14366329848 | '''
@autor Simone Lima
'''
import pandas as pd
from .files import AppFilesPath
from domain.app.models import VersionModel, ReleaseNoteModel, ReleaseNoteType
from domain.utils.check_value_utils import CheckValuesUtils
from domain.utils.file_utils import CSVColumnsName, CSV_EXTENSION
from django.db import transaction
class VersionService:
def install(self):
installed_versions = VersionModel.objects.find_all()
versions:dict = {}
df = pd.read_csv(AppFilesPath.VERSIONS)
for index, row in df.iterrows():
version_instance = self.__create_version_instance(row)
if self.__is_installed(version_instance, installed_versions):
print('Ignorando a versao: {} porque ja foi instalada! '.format(version_instance.version))
else:
print('Preparando a versao: {} {}'.format(version_instance.version, version_instance.name))
release_notes = self.__create_release_notes_instances(version_instance)
versions[version_instance.version] = (version_instance,release_notes)
self.__create_version_with_related_items(versions)
# Cria instancia da versao apartir do ficheiro
def __create_version_instance(self, row):
version_name = row[CSVColumnsName.NAME.value]
version_number = row[CSVColumnsName.NUMBER.value]
CheckValuesUtils.non_empties([version_name,version_number])
return VersionModel.factory(version_name,version_number)
# Cria instancias de release notes da versao apartir do ficheiro
def __create_release_notes_instances(self, version_instance, ):
rn_df = pd.read_csv('{}{}{}'.format(AppFilesPath.REALESE_NOTES,version_instance.version,CSV_EXTENSION))
release_notes = []
for rn_index, rn_row in rn_df.iterrows():
description = rn_row[CSVColumnsName.DESCRIPTION.value]
note_type = rn_row[CSVColumnsName.TYPE.value]
CheckValuesUtils.non_empties([description,note_type])
ReleaseNoteType.validate(note_type)
release_notes.append(ReleaseNoteModel.factory(description,note_type))
return release_notes
def __is_installed(self,version_instance:VersionModel, installed_versions:list[VersionModel]):
if not installed_versions:
return False
for instaled_version in installed_versions:
if float(instaled_version.version) == float(version_instance.version):
return True
return False
@transaction.atomic
def __create_version_with_related_items(self, versions:dict):
if versions:
for key, values in versions.items():
version_instance = values[0]
print('Instalando a versao: {} {}'.format(version_instance.version, version_instance.name))
#print(key," : ",values)
version_instance.save()
version_instance.release_notes.set(values[1],bulk=False )
#print(values[0])
#print(values[1])
print('Versao: {} {} instalada com sucesso!'.format(version_instance.version, version_instance.name))
| slimaElixir/restaurante | domain/app/services/versions.py | versions.py | py | 3,239 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "domain.app.models.VersionModel.objects.find_all",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "domain.app.models.VersionModel.objects",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "domain.app.models.VersionModel",
"line_number": 1... |
27177815209 | """
datasets.py
"""
import numpy as np
import pandas as pd
from collections import namedtuple
from gensim.corpora import Dictionary
# Load two dictionary
dct = Dictionary.load_from_text("vocab.txt")
def doc2bow(morphemes):
""" Converrt strings with non filtered dictionary to vector
:param morphemes: morphemes
:return: BOW vector
"""
global dct
# We can obtain all of indices and frequency of a morpheme
# [(ID, frequency)]
bow_format = dct.doc2bow(morphemes.split())
return bow_format
def load_dataset():
""" Load dataset
:return: data and label
"""
global dct
# Load training dataset
df = pd.read_csv("livedoor_news.csv")
Dataset = namedtuple("Dataset", ["news", "data", "target", "target_names", "dct"])
news = [doc for doc in df["news"]]
data = [doc2bow(doc) for doc in df["news"]]
target = np.array([label for label in df["class"]]).astype(np.int64)
target_names = ["dokujo-tsushin","it-life-hack","kaden-channel","livedoor-homme",
"movie-enter","peachy","smax","sports-watch","topic-news"]
ldcc_dataset = Dataset(news, data, target, target_names, dct)
return ldcc_dataset
| pytry3g/pytorch-example | nlp/classification/ldcc/datasets.py | datasets.py | py | 1,190 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "gensim.corpora.Dictionary.load_from_text",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "gensim.corpora.Dictionary",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "pandas.read_csv",
"line_number": 29,
"usage_type": "call"
},
{
... |
2378264148 | import cv2
from matplotlib import pyplot as plt
import numpy as np
input_image = "Prasanna.png"
def display_save(display_name, file_name, img):
cv2.imshow(display_name, img)
cv2.waitKey(0)
cv2.destroyAllWindows()
cv2.imwrite(file_name, img)
def grayscale():
img = cv2.imread(input_image, cv2.IMREAD_GRAYSCALE)
display_save("Gray", "prasanna_gray.png", img)
def blur():
img = cv2.imread(input_image)
kernel = np.ones((20,20),np.float32)/400
blur = cv2.filter2D(img,-1,kernel)
display_save("Blurred", "prasanna_blurred.png", blur)
def change_color_space():
img = cv2.imread(input_image)
op = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
display_save("BGR2HSV", "prasanna_hsv.png", op)
def mix_color_scheme():
img = cv2.imread(input_image)
rows,cols,channels = img.shape
img[:int(rows),:2*int(cols/3),:] = cv2.cvtColor(img[:int(rows),:2*int(cols/3),:],cv2.COLOR_BGR2RBG)
img[:int(rows),int(cols/3):2*int(cols/3),:] = cv2.cvtColor(img[:int(rows),:int(cols/3):2*int(cols/3),:],cv2.COLOR_LAB2BGR)
img[:int(rows),2*int(cols/3):,:] = cv2.cvtColor(img[:int(rows),2*int(cols/3):,:],cv2.COLOR_RGB2YCrCb)
display_save("Mix Color", "prasanna_mix_color.png", img)
def erosion():
img = cv2.imread(input_image)
kernel = np.ones((8,8),np.uint8)
erosion = cv2.erode(img,kernel,iterations = 1)
display_save("erosion", "prasanna_eroded.png", erosion)
def morph_gradient():
img = cv2.imread(input_image)
kernel = np.ones((10,10),np.uint8)
gradient = cv2.morphologyEx(img, cv2.MORPH_GRADIENT, kernel)
display_save("gradient", "prasanna_gradient.png", gradient)
def mask_face():
img = cv2.imread(input_image, cv2.IMREAD_GRAYSCALE)
mask = np.zeros(img.shape[:2], np.uint8)
mask[200:780, 300:750] = 255
masked_img = cv2.bitwise_and(img,img,mask = mask)
display_save("Masked Image", "masked_img.png", masked_img)
def histogram_equalization():
img = cv2.imread(input_image, cv2.IMREAD_GRAYSCALE)
equ = cv2.equalizeHist(img)
display_save("Histogram Equalization", "prasanna_gray_histequ.png", equ)
def affine_transformation():
img = cv2.imread(input_image, cv2.IMREAD_GRAYSCALE)
rows,cols = map(int, img.shape)
pts1 = np.float32([[70,50],[200,70],[70,200]])
pts2 = np.float32([[100,100],[200,50],[100,250]])
M = cv2.getAffineTransform(pts1,pts2)
dst = cv2.warpAffine(img,M,(cols,rows))
display_save("Affline Transformation", "prasanna_affline1.png", dst)
def edge_detection():
img = cv2.imread(input_image, cv2.IMREAD_GRAYSCALE)
edges = cv2.Canny(img,100,200)
display_save("Edges", "prasanna_edges.png", edges)
def image_thresholding():
img = cv2.imread(input_image, cv2.IMREAD_GRAYSCALE)
ret,thresh1 = cv2.threshold(img,127,255,cv2.THRESH_BINARY)
display_save("IMG_THRESHOLDING", "prasanna_thresholding.png", thresh1)
'''
grayscale()
blur() #1
change_color_space() #2
switch_color_scheme() #3
erosion() #4
morph_gradient() #5
mask_face() #6
histogram_equalization() #7
affine_transformation() #8
edge_detection() #9
image_thresholding() #10
'''
blur() | ppartha2018/ComputerVision---Projects | BasicTransformations/Transformations.py | Transformations.py | py | 3,122 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "cv2.imshow",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "cv2.waitKey",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "cv2.destroyAllWindows",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "cv2.imwrite",
"line_nu... |
27894651886 | import streamlit as st
from PIL import Image
import random
import pandas as pd
def app():
image = Image.open('./picture/khtn.PNG')
st.image(image, width=500)
st.markdown("------")
st.markdown("""
<style>
.big-font {
font-size:80px !important;
}
</style>
""", unsafe_allow_html=True)
st.markdown('<center><p class="big-font"><font color="darkblue">Bài 15: List (mở rộng)</center></p>', unsafe_allow_html=True)
st.markdown("## Hãy xắp xếp hình theo hình mẫu")
st.image(Image.open('./picture/van.jpg'), width=800)
st.write("### Hình cần xắp xếp: ")
col1, col2, col3, col4 = st.columns(4)
# left = 5
# top = height / 4
# right = 164
# bottom = 3 * height / 4
# im1 = im.crop((left, top, right, bottom))
imwidth = 1920
imheight = 1604
onethid_imw = imwidth/3 # = 640
onethid_height = imheight/3 # = 534.6
with col1:
pic6 = st.image(Image.open('./picture/van.jpg').crop((onethid_imw*2, onethid_height, onethid_imw*3, onethid_height*2)), width=350)
#st.write("## a")
st.markdown('<h2 id ="a"><center><strong> a </center></a>', unsafe_allow_html=True)
pic5 = st.image(Image.open('./picture/van.jpg').crop((onethid_imw, onethid_height, onethid_imw*2, onethid_height*2)), width=350)
st.markdown('<h2 id ="a"><center><strong> d </center></a>', unsafe_allow_html=True)
pic3 = st.image(Image.open('./picture/van.jpg').crop((onethid_imw*2, 0, onethid_imw*3, onethid_height)), width=350)
st.markdown('<h2 id ="a"><center><strong> g </center></a>', unsafe_allow_html=True)
#st.markdown('<p><center><strong> d </center></span></p>', unsafe_allow_html=True)
#st.markdown('<div data-testid="caption" class="css-1b0udgb etr89bj0" style="width: 500px;"><span style="font-size: 20px"><center><strong> g </center></span></div>', unsafe_allow_html=True)
with col2:
pic9 = st.image(Image.open('./picture/van.jpg').crop((onethid_imw*2, onethid_height*2, onethid_imw*3, onethid_height*3)), width=350)
st.markdown('<h2 id ="a"><center><strong> b </center></a>', unsafe_allow_html=True)
pic7 = st.image(Image.open('./picture/van.jpg').crop((0, onethid_height*2, onethid_imw, onethid_height*3)), width=350)
st.markdown('<h2 id ="a"><center><strong> e </center></a>', unsafe_allow_html=True)
pic1 = st.image(Image.open('./picture/van.jpg').crop((0, 0, onethid_imw, onethid_height)), width=350)
st.markdown('<h2 id ="a"><center><strong> h </center></a>', unsafe_allow_html=True)
with col3:
pic4 = st.image(Image.open('./picture/van.jpg').crop((0, onethid_height, onethid_imw, onethid_height*2)), width=350)
st.markdown('<h2 id ="a"><center><strong> c </center></a>', unsafe_allow_html=True)
pic8 = st.image(Image.open('./picture/van.jpg').crop((onethid_imw, onethid_height*2, onethid_imw*2, onethid_height*3)), width=350)
st.markdown('<h2 id ="a"><center><strong> f </center></a>', unsafe_allow_html=True)
pic2 = st.image(Image.open('./picture/van.jpg').crop((onethid_imw, 0, onethid_imw*2, onethid_height)), width=350)
st.markdown('<h2 id ="a"><center><strong> i </center></a>', unsafe_allow_html=True)
with col4:
pass
st.write("## Hãy xắp xếp hình trên vào thứ tự trong khung để hoàn thiện")
st.image(Image.open('./picture/frame.PNG'), width=400)
selection = st.multiselect(" ", options=["a", "b", "c", "d", "e", "f", "g", "h", "i"])
if st.button("Xem kết quả: "):
if selection == ["h", "i", "g", "c", "d", "a", "e", "f", "b"]:
st.write("#### Chính xác chúc mừng bạn :tada:")
col5, col6, col7, col8, col9, col10 = st.columns(6)
with col5:
pic1 = st.image(Image.open('./picture/van.jpg').crop((0, 0, onethid_imw, onethid_height)), width=200)
pic4 = st.image(Image.open('./picture/van.jpg').crop((0, onethid_height, onethid_imw, onethid_height*2)), width=200)
pic7 = st.image(Image.open('./picture/van.jpg').crop((0, onethid_height*2, onethid_imw, onethid_height*3)), width=200)
with col6:
pic2 = st.image(Image.open('./picture/van.jpg').crop((onethid_imw, 0, onethid_imw*2, onethid_height)), width=200)
pic5 = st.image(Image.open('./picture/van.jpg').crop((onethid_imw, onethid_height, onethid_imw*2, onethid_height*2)), width=200)
pic8 = st.image(Image.open('./picture/van.jpg').crop((onethid_imw, onethid_height*2, onethid_imw*2, onethid_height*3)), width=200)
with col7:
pic3 = st.image(Image.open('./picture/van.jpg').crop((onethid_imw*2, 0, onethid_imw*3, onethid_height)), width=200)
pic6 = st.image(Image.open('./picture/van.jpg').crop((onethid_imw*2, onethid_height, onethid_imw*3, onethid_height*2)), width=200)
pic9 = st.image(Image.open('./picture/van.jpg').crop((onethid_imw*2, onethid_height*2, onethid_imw*3, onethid_height*3)), width=200)
with col8:
pass
with col9:
pass
with col10:
pass
st.write("----")
st.write("#### Bức ảnh hoàn thiện ")
st.image(Image.open('./picture/van.jpg'), width=800)
st.write("""##### Thông tin tranh vẽ
Tên: Fields near the Alpilles
Tác giả: Vincent van Gogh’s
Năm: 1889
Địa điểm: Pháp""")
else:
st.write("#### Bạn sai rồi hãy xắp xếp lại nhé :cry:")
| johnluk0092/leesson1 | webs/lesson18.py | lesson18.py | py | 5,335 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "PIL.Image.open",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "streamlit.image",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "streamlit.markdown",
"line_... |
12039630568 | # -*- coding: utf-8 -*-
"""
Created on 02/17/2019
NSC - AD440 CLOUD PRACTICIUM
@author: Dao Nguyen
Changed ownership on 03/01/2019
@author: Michael Leon
"""
import urllib.request
import re
import os
import json
import time
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import selenium.webdriver.chrome.service as service
from bs4 import BeautifulSoup
import bs4 as bs
import time
from dateutil.parser import parse
from datetime import datetime
import datetime as dt
import boto3
import uuid
dynamodb = boto3.resource('dynamodb', 'us-east-1')
f = open("sslog.log", "w")
OUTPUT = []
ADDRESS = ['Renton: Lindbergh HS Pool: 16740 128th Ave SE Renton, WA 98058',
'Shoreline Pool: 19030 1st Ave NE Shoreline, WA 98155',
'Juanita Aquatic Center: 01 NE 132nd St Kirkland, WA 98034 425-936-1627',
'Hazen High School: 1101 Hoquiam Ave NE Renton, WA 98059 425-204-4230']
#This script scrapes a website and pulls specific data.
def main():
print("Starting SS Scraper; " + str(datetime.now()), file=f)
try:
print("Connecting to http://www.shadowsealsswimming.org/Calendar.html; success", file=f)
source = urllib.request.urlopen('http://www.shadowsealsswimming.org/Calendar.html').read()
soup = bs.BeautifulSoup(source,'html.parser')
table = soup.table
table = soup.find('table')
table_rows = table.find_all('tr')
for tr in table_rows:
td = tr.find_all('td')
row = [i.text for i in td]
data = {}
find_date = row[0].replace(' - ', '-')
find_date = find_date.replace(',', '').split(' ')
find_location = row[2].split(' ')
target_title = ""
for find_title in find_location:
if find_title != "-":
target_title = target_title + find_title + " "
else:
break
data["Title"] = target_title.strip('\xa0').strip('\n')
if len(find_date) >= 3:
date_string = find_date[0] + ' ' + find_date[1] + ' ' + find_date[2].strip('\n')
if '-' in find_date[1]:
date_operation = find_date[1].split('-')
for x in range(int(date_operation[0]), int(date_operation[1]) + 1):
new_date_string = ""
new_date_string = new_date_string + find_date[0] + ' ' + \
str(x) + ' ' + find_date[2].strip('\n')
date_object = validate_date(new_date_string)
if date_object:
data["Date"] = date_object.strftime(
'%Y-%m-%d')
for i in td:
time = ""
event_des = ""
if ("pm" in row[1].lower() or "am" in row[1].lower()) and any(c.isdigit() for c in row[1]):
time += row[1]
data["Time"] = time.replace('\n', '')
for location in find_location:
for address in ADDRESS:
if location in address:
data["Location"] = address
data["Desription"] = row[2].replace('\n', '').replace('\xa0', '')
#print(data)
OUTPUT.append(data)
else:
date_object = validate_date(date_string)
if date_object:
data["Date"] = date_object.strftime(
'%Y-%m-%d')
else:
data["Date"] = row[0].strip('\xa0')
for i in td:
time = ""
event_des = ""
if ("pm" in row[1].lower() or "am" in row[1].lower()) and any(c.isdigit() for c in row[1]):
time += row[1]
data["Time"] = time.replace('\n','')
for location in find_location:
for address in ADDRESS:
if location in address:
data["Location"] = address
data["Description"] = row[2].replace('\n', '').replace('\xa0', '')
#print(row)
if "Location" not in data:
data["Location"] = "Unknown"
if "Time" not in data:
data["Time"] = "Unknown"
data["URL"] = "http://www.shadowsealsswimming.org/Calendar.html"
data["ID"] = str(uuid.uuid3(uuid.NAMESPACE_DNS, data["Title"] + data["Date"]))
print("Found event " + data["Title"], file=f)
to_dynamo(data)
except:
print("Connecting to http://www.shadowsealsswimming.org/Calendar.html; failed", file=f)
print("Ending SS Scraper; " + str(datetime.now()), file=f)
def to_dynamo(data):
table = dynamodb.Table('events')
table.put_item(Item={'ID': data['ID'],
'URL': data['URL'],
'Title': data['Title'],
'Description': data['Description'],
'Location': data['Location'],
'Date': data['Date']})
#s3.Object('mjleontest', 'browser_event_data.json').put(Body=open('browser_event_data.json', 'rb'))
# This function to check the date in string and return if date is valid or not
def validate_date(date_text):
date_string = date_text
new_date_string = ""
fmts = ('%Y','%b %d, %Y','%b %d, %Y','%B %d, %Y','%B %d %Y','%m/%d/%Y','%m/%d/%y','%b %Y','%B%Y','%b %d,%Y', '%b %d %Y')
month = ('Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec')
date_string = date_string.split(' ')
for find_month in month:
if find_month in date_string[0]:
set_month = find_month
new_date_string += set_month + ' ' + date_string[1] + ' ' + date_string[2]
for fmt in fmts:
try:
t = dt.datetime.strptime(new_date_string, fmt)
#print(t)
return t
break
except ValueError as err:
pass
if __name__ == '__main__':
main()
| ActoKids/web-crawler | scripts/browserCrawler/SSScraper.py | SSScraper.py | py | 6,625 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "boto3.resource",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 37,
"usage_type": "name"
},
{
"api_name": "urllib.request... |
14932230430 | """Unit tests for metrics module."""
from ldp.utils import linalg
import torch
import torch.linalg
def test_effective_rank():
"""Test effective_rank matches intuition."""
matrix = torch.diag(torch.tensor([100, 100, 1e-6]))
actual = linalg.effective_rank(matrix)
assert torch.allclose(torch.tensor(actual), torch.tensor(2.))
def test_truncate():
"""Test truncate matches intuition."""
matrix = torch.eye(3)
actual = linalg.truncate(matrix, 2)
assert torch.linalg.matrix_rank(actual) == 2
def test_rowspace():
"""Test rowspace returns projection onto rowspace."""
matrix = torch.tensor([[3., 1., 0.], [0., 2., 0.], [1., 0., 0.]])
expected = torch.eye(3)
expected[-1, -1] = 0
actual = linalg.rowspace(matrix)
assert actual.allclose(expected, atol=1e-7)
assert actual.mm(actual).allclose(actual, atol=1e-7)
def test_rowspace_close_to_zero():
"""Test rowspace returns zeros when all elements close to 0."""
matrix = torch.ones(10, 10) / 1e10
actual = linalg.rowspace(matrix)
assert actual.equal(torch.zeros_like(actual))
def test_nullspace():
"""Test nullspace returns projection onto nullspace."""
matrix = torch.tensor([[3., 1., 0.], [0., 2., 0.], [1., 0., 0.]])
expected = torch.zeros(3, 3)
expected[-1, -1] = 1
actual = linalg.nullspace(matrix)
assert actual.allclose(expected, atol=1e-6)
assert actual.mm(actual).allclose(actual, atol=1e-6)
| evandez/low-dimensional-probing | tests/utils/linalg_test.py | linalg_test.py | py | 1,459 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "torch.diag",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "torch.tensor",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "ldp.utils.linalg.effective_rank",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "ldp.utils.lin... |
74541216032 | import os
import tornado
import importlib
from ..base.handlers import default_handlers as default_base_handlers
from ..services.kernels.pool import ManagedKernelPool
from .cell.parser import APICellParser
from .swagger.handlers import SwaggerSpecHandler
from .handlers import NotebookAPIHandler, parameterize_path, NotebookDownloadHandler
from notebook.utils import url_path_join
from traitlets import Bool, Unicode, Dict, default
from traitlets.config.configurable import LoggingConfigurable
class NotebookHTTPPersonality(LoggingConfigurable):
"""Personality for notebook-http support, creating REST endpoints
based on the notebook's annotated cells
"""
cell_parser_env = 'KG_CELL_PARSER'
cell_parser = Unicode('kernel_gateway.notebook_http.cell.parser',
config=True,
help="""Determines which module is used to parse the notebook for endpoints and
documentation. Valid module names include 'kernel_gateway.notebook_http.cell.parser'
and 'kernel_gateway.notebook_http.swagger.parser'. (KG_CELL_PARSER env var)
"""
)
@default('cell_parser')
def cell_parser_default(self):
return os.getenv(self.cell_parser_env, 'kernel_gateway.notebook_http.cell.parser')
# Intentionally not defining an env var option for a dict type
comment_prefix = Dict({
'scala': '//',
None: '#'
}, config=True, help='Maps kernel language to code comment syntax')
allow_notebook_download_env = 'KG_ALLOW_NOTEBOOK_DOWNLOAD'
allow_notebook_download = Bool(config=True,
help="Optional API to download the notebook source code in notebook-http mode, defaults to not allow"
)
@default('allow_notebook_download')
def allow_notebook_download_default(self):
return os.getenv(self.allow_notebook_download_env, 'False') == 'True'
static_path_env = 'KG_STATIC_PATH'
static_path = Unicode(None, config=True, allow_none=True,
help="Serve static files on disk in the given path as /public, defaults to not serve"
)
@default('static_path')
def static_path_default(self):
return os.getenv(self.static_path_env)
def __init__(self, *args, **kwargs):
super(NotebookHTTPPersonality, self).__init__(*args, **kwargs)
# Import the module to use for cell endpoint parsing
cell_parser_module = importlib.import_module(self.cell_parser)
# Build the parser using the comment syntax for the notebook language
func = getattr(cell_parser_module, 'create_parser')
try:
kernel_language = self.parent.seed_notebook['metadata']['language_info']['name']
except (AttributeError, KeyError):
kernel_language = None
prefix = self.comment_prefix.get(kernel_language, '#')
self.api_parser = func(parent=self, log=self.log,
comment_prefix=prefix,
notebook_cells=self.parent.seed_notebook.cells)
self.kernel_language = kernel_language
def init_configurables(self):
"""Create a managed kernel pool"""
self.kernel_pool = ManagedKernelPool(
self.parent.prespawn_count,
self.parent.kernel_manager
)
def create_request_handlers(self):
"""Create handlers and redefine them off of the base_url path. Assumes
init_configurables() has already been called, and that the seed source
was available there.
"""
handlers = []
# Register the NotebookDownloadHandler if configuration allows
if self.allow_notebook_download:
path = url_path_join('/', self.parent.base_url, r'/_api/source')
self.log.info('Registering resource: {}, methods: (GET)'.format(path))
handlers.append((
path,
NotebookDownloadHandler,
{'path': self.parent.seed_uri}
))
# Register a static path handler if configuration allows
if self.static_path is not None:
path = url_path_join('/', self.parent.base_url, r'/public/(.*)')
self.log.info('Registering resource: {}, methods: (GET)'.format(path))
handlers.append((
path,
tornado.web.StaticFileHandler,
{'path': self.static_path}
))
# Discover the notebook endpoints and their implementations
endpoints = self.api_parser.endpoints(self.parent.kernel_manager.seed_source)
response_sources = self.api_parser.endpoint_responses(self.parent.kernel_manager.seed_source)
if len(endpoints) == 0:
raise RuntimeError('No endpoints were discovered. Check your notebook to make sure your cells are annotated correctly.')
# Cycle through the (endpoint_path, source) tuples and register their handlers
for endpoint_path, verb_source_map in endpoints:
parameterized_path = parameterize_path(endpoint_path)
parameterized_path = url_path_join('/', self.parent.base_url, parameterized_path)
self.log.info('Registering resource: {}, methods: ({})'.format(
parameterized_path,
list(verb_source_map.keys())
))
response_source_map = response_sources[endpoint_path] if endpoint_path in response_sources else {}
handler_args = { 'sources' : verb_source_map,
'response_sources' : response_source_map,
'kernel_pool' : self.kernel_pool,
'kernel_name' : self.parent.kernel_manager.seed_kernelspec,
'kernel_language' : self.kernel_language or ''
}
handlers.append((parameterized_path, NotebookAPIHandler, handler_args))
# Register the swagger API spec handler
path = url_path_join('/', self.parent.base_url, r'/_api/spec/swagger.json')
handlers.append((
path,
SwaggerSpecHandler, {
'notebook_path' : self.parent.seed_uri,
'source_cells': self.parent.seed_notebook.cells,
'cell_parser' : self.api_parser
}))
self.log.info('Registering resource: {}, methods: (GET)'.format(path))
# Add the 404 catch-all last
handlers.append(default_base_handlers[-1])
return handlers
def should_seed_cell(self, code):
"""Determines whether the given code cell source should be executed when
seeding a new kernel."""
# seed cells that are uninvolved with the presented API
return (not self.api_parser.is_api_cell(code) and not self.api_parser.is_api_response_cell(code))
def shutdown(self):
"""Stop all kernels in the pool."""
self.kernel_pool.shutdown()
def create_personality(*args, **kwargs):
return NotebookHTTPPersonality(*args, **kwargs)
| jupyter-server/kernel_gateway | kernel_gateway/notebook_http/__init__.py | __init__.py | py | 6,877 | python | en | code | 459 | github-code | 1 | [
{
"api_name": "traitlets.config.configurable.LoggingConfigurable",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "traitlets.Unicode",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 29,
"usage_type": "call"
},
{
"api... |
73981839393 | from django.urls import path
from .views import (case_detail, add_case, update_case,
delete_case, add_task,upload_files,
new_case, pending_list, completed_list, CaseList, CaseCreateView, CategoryListView,
delete_cat, cat_update, cat_add, add_argument, cat_detail, file_list,
delete_files, task_view, task_list_view, completed, delete_task, argument_list, argument_detail, arg_update, arg_delete, case_filter, complete_case,
court_list, add_court, court_detail, update_court, court_delete,
add_session, update_session, detail_session, delete_session,bill_case,
add_process, edit_task,generate_process,generate_process1,list_processes,process_detail,update_process,delete_process,add_to_archives,
upload_archives,update_archive,archive_detail,
add_action_report,list_action_reports,update_action,post_action_detail,delete_action,
add_rep,rep_detail,update_rep,delete_rep,rep_list,
case_history,
request_file,request_list,approve_request,make_request,all_files,start_timer,case_hours,
add_expense,ExpenseDetailView,ExpenseListView,update_expense,delete_expense,
RenumerationList,approve_renumeration,request_renumeration,
request_client_payment,update_client_payment,client_payment_list,client_payment_detail,edit_start_timer, addpercharge, PendingExpenseListView, accept_expense,
decline_expense,resend_expense,generate_invoice_total, generate_invoice_total_save,calendar,all_user_logs,all_user_logs_per_case,upload_terms_files,terms_files_list,terms_edit
)
app_name = "cases"
urlpatterns = [
path('cases/list/', CaseList.as_view(), name='case_list'),
path('calendar/', calendar, name='calendar'),
path('cases/list/filter-cases', case_filter, name='case_filter'),
path("cases/pending-list", pending_list, name="pending_list"),
path("cases/completed-list", completed_list, name="completed_list"),
############################### ARCHIVED CASES #####################################################
path("cases/archived_cases", add_to_archives, name="add_archive"),
path("cases/archives/<int:pk>/archive-details", archive_detail, name="archive_detail"),
path("cases/archives/<int:pk>/update-archive",update_archive, name="archive_update"),
path("cases/archives/<int:pk>/upload-files",upload_archives, name="archive_upload"),
path("cases/<int:pk>/case-detail", case_detail, name="case_detail"),
path("cases/add-case", add_case, name="add_case"),
path("cases/<int:pk>/case-update", update_case, name="case_update"),
path("cases/<int:pk>/case-delete", delete_case, name="case_delete"),
path("cases/tasks/<int:pk>/add-task", add_task, name="add_task"),
path("cases/files/<int:pk>/add-files", upload_files, name="upload_files"),
############################### TERMS OF ENGAGEMENT ########################################################
path("cases/termsofengagement/<int:pk>/add-files", upload_terms_files, name="upload_terms_files"),
path("cases/termsofengagement/<int:pk>/list-files", terms_files_list, name="terms_files_list"),
path("cases/termsofengagement/terms_edit", terms_edit, name="terms_edit"),
############################### CATEGORY ########################################################
path("cases/category/list", CategoryListView.as_view(), name="cat_list"),
path("cases/category/<int:pk>/update",
cat_update, name="cat_update"),
path("cases/category/<int:pk>/delete", delete_cat, name="cat_delete"),
path("cases/category/add", cat_add, name="cat_add"),
path("cases/argument/<int:pk>/add", add_argument, name="add_arg"),
path("cases/category/<int:pk>/detail", cat_detail, name="cat_detail"),
path("cases/<int:pk>/case-files", file_list, name="file_list"),
path("cases/<int:pk>/delete_file", delete_files, name="delete_file"),
path("cases/tasks/<int:pk>/list", task_view, name="task_list"),
path("cases/tasks/<int:pk>/list-tasks", task_list_view, name="list_tasks"),
path("cases/tasks/<int:pk>/completed", completed, name="completed"),
path("cases/tasks/<int:pk>/delete", delete_task, name="delete_task"),
path("cases/<int:pk>/legal_arguments/list",
argument_list, name="argument_list"),
path("cases/<int:pk>/legal_arguments/detail",
argument_detail, name="argument_detail"),
path("cases/<int:pk>/legal_arguments/update",
arg_update, name="argument_update"),
path("cases/<int:pk>/legal_arguments/delete",
arg_delete, name="argument_delete"),
path("cases/<int:pk>/case-update-status",
complete_case, name="complete_case"),
########################################################## COURT ######################################################################
path('cases/courts/list', court_list, name='court_list'),
path('cases/courts/<int:pk>/court-detail',
court_detail, name="court_detail"),
path('cases/courts/<int:pk>/court-update',
update_court, name="court_update"),
path('cases/courts/add', add_court, name="court_add"),
path('cases/courts/<int:pk>/court-delete',
court_delete, name="court_delete"),
#################################################### COURT SESSIONS #################################################################
path('cases/court-sessions/<int:pk>/add', add_session, name="session_add"),
path('cases/court-sessions/<int:pk>/details',
detail_session, name="session_detail"),
path('cases/court-sessions/<int:pk>/update',
update_session, name="session_update"),
path('cases/court-sessions/<int:pk>/delete',
delete_session, name="session_delete"),
############################################### PROCESSES ####################################
path('cases/processes/add', add_process, name="process_add"),
path('cases/processes/list', list_processes, name="list_process"),
path('cases/processes/<int:pk>/generate-process', generate_process1, name="generate_process"),
path('cases/processes/<int:pk>/process-detail', process_detail, name="process_detail"),
path('cases/processes/<int:pk>/process-update', update_process, name="process_update"),
path('cases/processes/<int:pk>/delete-process', delete_process, name="process_delete"),
############################################### BILL CASE ####################################
path('cases/<int:pk>/bill-case',bill_case,name='bill_case'),
path('cases/tasks/<int:pk>/edit-task',edit_task,name='edit_task'),
############################################### POST ACTION REPORT ##########################
path('cases/post-action-report/<int:pk>/add-new',add_action_report,name="add_action"),
path('cases/post-action-report/<int:pk>/report-list',list_action_reports,name='list_report'),
path('cases/post-action-report/<int:pk>/report-detail',post_action_detail,name="action_detail"),
path('cases/post-action-report/<int:pk>/update-report',update_action,name='action_update'),
path('cases/post-action-report/<int:pk>/delete-report',delete_action,name='action_delete'),
##################################### REPRESENTATION ############################################
path('cases/representation/add',add_rep,name="add_rep"),
path('cases/representation/list',rep_list,name="rep_list"),
path('cases/representation/<int:pk>/details',rep_detail,name="rep_detail"),
path('cases/representation/<int:pk>/update',update_rep,name="update_rep"),
path('cases/representation/<int:pk>/delete',delete_rep,name="delete_rep"),
path("cases/<int:pk>/case-history",case_history,name='case_history'),
################################### ARCHIVE REQUESTS #########################################
path('cases/archives/request',request_file,name='request_file'),
path('cases/archives/requests/list',request_list,name='request_list'),
path('cases/archives/<int:pk>/request',make_request,name='make_request'),
path('cases/archives/<int:pk>/request/approve',approve_request,name='approve_request'),
################################### CASE FILES ###############################################
path('cases/files/all-files',all_files,name="all_files"),
path('cases/start-timer',start_timer,name="start_timers"),
path('cases/edit_start_timer',edit_start_timer,name="edit_start_timer"),
path('cases/<int:pk>/timer/elapsed-time',case_hours,name="case_hours"),
path('cases/addpercharge',addpercharge,name="addpercharge"),
path('cases/delete_case/<int:pk>',delete_case,name="delete_case"),
################################## EXPENSES #################################################
path('cases/expenses/', ExpenseListView.as_view(), name='expenses'),
path('cases/pending_expenses/', PendingExpenseListView.as_view(), name='pending_expenses'),
path('cases/expenses/<int:pk>/detail', ExpenseDetailView.as_view(), name='expense_detail'),
path('cases/expenses/<int:pk>/update', update_expense, name='update_expense'),
path('cases/expenses/<int:pk>/delete', delete_expense, name='delete_expense'),
path('cases/expenses/add-expense',add_expense,name='add_expense'),
path('cases/accept_expense/<int:pk>',accept_expense,name='accept_expense'),
path('cases/decline_expense/<int:pk>',decline_expense,name='decline_expense'),
path('cases/resend_expense/<int:pk>',resend_expense,name='resend_expense'),
################################### Renumeration #############################################
path('case/renumeration/list',RenumerationList.as_view(),name='renumerations'),
path('case/renumeration/<int:pk>/approve',approve_renumeration,name='approve_renumeration'),
path('case/renumeration/<int:pk>/request',request_renumeration,name='request_renumeration'),
################################## REQUEST CLIENT PAYMENT INFORMATION ########################
path('cases/<int:pk>/client-payment',request_client_payment,name='request_payment_info'),
path('cases/<int:pk>/update-client-payment',update_client_payment,name='update_payment_info'),
path('cases/client-payment',client_payment_list,name='client_payment_list'),
path('cases/<int:pk>/client-payment-details',client_payment_detail,name='client_payment_detail'),
################################## INVOICE ########################
path('cases/generate_invoice_total/<int:pk>',generate_invoice_total,name='generate_invoice_total'),
path('cases/generate_invoice_total_save/<int:pk>',generate_invoice_total_save,name='generate_invoice_total_save'),
############################################### USERLOGS ####################################
path('userlogs/', all_user_logs, name='all_user_logs'),
path('caselogs/<int:pk>', all_user_logs_per_case, name='all_user_logs_per_case'),
]
| succeed98/Lawyer | cases/urls.py | urls.py | py | 11,114 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "django.urls.path",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "views.CaseList.as_view",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "views.CaseList",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "django.urls.pa... |
70645888353 | import os
import csv
from PIL import Image
import torch
from torch.utils.data import Dataset
from typing import Any, Callable, Optional, Tuple
class Emotions(Dataset):
def __init__(self, cvs_file, root_dir, transform=None):
self.root_dir = root_dir
self.transform=transform
data_file=os.path.join(root_dir, cvs_file)
with open(data_file, 'r', newline='') as file:
self._samples = [
(
torch.tensor([int(idx) for idx in row [' pixels'].split()],
dtype=torch.uint8).reshape(48, 48),
int(row['emotion']) if 'emotion' in row else None,
)
for row in csv.DictReader(file)
]
def __len__(self):
return len(self._samples)
def __getitem__(self, idx: int):
image_tensor, target = self._samples[idx]
image = Image.fromarray(image_tensor.numpy())
if self.transform:
image=self.transform(image)
return image, target
| jump-orange/coms453-project | data.py | data.py | py | 1,167 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "torch.utils.data.Dataset",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "torch.tensor",
... |
38418811597 | #!/usr/bin/env python
# coding: utf-8
# Data Analysis of Unemployment in India
# In[1]:
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import plotly.express as px
# In[2]:
# importing the given dataset
df = pd.read_csv("C:/Users/ASUS/Downloads/Unemployment in India.csv")
df
# In[3]:
df.isnull().sum()
# In[4]:
#df_dropped = df.dropna()
df.fillna(0, inplace=True)
df.fillna(df.mean(), inplace=True)
# In[5]:
df.isnull().sum()
# In[6]:
df = df.rename(columns={df.columns[0]:'Region',df.columns[3]:'Unemployment_rate',df.columns[4]:'Employed', df.columns[5]:'labour_participation_rate', df.columns[6]:'area'})
df.head()
# In[7]:
df["Region"].unique()
# In[8]:
df2 = pd.read_csv("C:/Users/ASUS/Downloads/Unemployment_Rate_upto_11_2020.csv")
df2
# In[9]:
# create a new column for month
#df2['Date'] = pd.to_datetime(df2['Date'], dayfirst=True)
#df2['month_int'] = df2['Date'].dt.month
#df2['month'] = df2['month_int'].apply(lambda x: calendar.month_abbr[x])
#df2.head()
# In[10]:
df2 = df2.rename(columns={df2.columns[0]:'Region',df2.columns[3]:'Unemployment_rate',df2.columns[4]:'Employed', df2.columns[5]:'labour_participation_rate', df2.columns[6]:'area'})
df2.head()
# In[11]:
heat_maps = df[['Unemployment_rate','Employed','labour_participation_rate']]
heat_maps = heat_maps.corr()
plt.figure(figsize=(12,7))
sns.set_context('notebook',font_scale=1)
sns.heatmap(heat_maps, annot=True,cmap='winter');
# In[12]:
df2.columns= ["Region","Date","Frequency","Unemployment_rate","Employed","labour_participation_rate","area","longitude","latitude"]
plt.figure(figsize=(14, 18))
plt.title("Unemployment_rate")
sns.histplot(x="Unemployment_rate", hue="Region", data=df)
plt.show()
# In[13]:
region = df2.groupby(["Region"])[['Unemployment_rate', "Employed", "labour_participation_rate"]].mean()
region = pd.DataFrame(region).reset_index()
fig = px.bar(region, x="Region", y="Employed", color="Region", title="Average Employed Rate by Region")
fig.update_layout(xaxis={'categoryorder':'total descending'})
fig.show()
# In[14]:
region = df2.groupby(["Region"])[['Unemployment_rate', "Employed", "labour_participation_rate"]].mean()
region = pd.DataFrame(region).reset_index()
fig = px.bar(region, x="Region", y="Unemployment_rate", color="Region", title="Average Employed Rate by Region")
fig.update_layout(xaxis={'categoryorder':'total descending'})
fig.show()
# In[15]:
unemployment = df2[["Region", "area", "Unemployment_rate"]]
fig = px.sunburst(unemployment, path=['area','Region'], values='Unemployment_rate',
title= 'Unemployment rate in every State and Region', height=700)
fig.show()
# In[ ]:
# In[ ]:
# In[ ]:
| shrutiiiyadav/UNEMPLOYMENT-IN-INDIA-ANALYSIS-TASK2 | Unemployment in India Analysis.py | Unemployment in India Analysis.py | py | 2,761 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "pandas.read_csv",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "matplotlib.p... |
71919922594 | # Read text from a file, and count the occurence of words in that text
# Example:
# count_words("The cake is done. It is a big cake!")
# --> {"cake":2, "big":1, "is":2, "the":1, "a":1, "it":1}
from collections import Counter
import re
def read_file_content(filename):
# Reading the text file
with open(filename, "r") as f:
return f.read()
def count_words():
text = read_file_content("./story.txt")
# [assignment] Add your code here
text = text.lower()
text = re.sub(r'[^\w\s]', '', text)
return Counter(text.split())
print(count_words()) | Hephzihub/Python | Reading-Text-Files/main2.py | main2.py | py | 610 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "re.sub",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "collections.Counter",
"line_number": 23,
"usage_type": "call"
}
] |
43684655986 | import math
import string
from nltk.corpus import stopwords
from collections import Counter
from nltk.stem.porter import *
from sklearn.feature_extraction.text import TfidfVectorizer
import jieba
def tfidf_calc(text, corpus, k, file_path):
# corpus = ['This is the first document.',
# 'This is the second second document.',
# 'And the third one.',
# 'Is this the first document?', ]
tv = TfidfVectorizer(use_idf=True, smooth_idf=True, norm=None)
tv_fit = tv.fit_transform(corpus)
new_corpus = tv.get_feature_names()[-1::-1]
print(new_corpus)
with open(file_path, 'w', encoding='utf_8') as f:
for i in range(k):
undetermined_words = synonym_replace(new_corpus[i])
if len(undetermined_words) == 0:
continue
for j in range(len(undetermined_words)):
f.write(text.replace(
new_corpus[i], undetermined_words[j]) + '\n')
def divide_text(text):
seg_text = jieba.cut(text, cut_all=False)
return ' '.join(seg_text)
def synonym_replace(word):
with open('synonym.txt', 'r', encoding='utf_8') as f:
synonym_database = list(
map(lambda x: x.strip().split()[1:], f.readlines()))
words = []
flag = False
for i in range(len(synonym_database)):
for j in range(len(synonym_database[i])):
if synonym_database[i][j] == word:
for k in range(len(synonym_database[i])):
if synonym_database[i][k] == word:
continue
words.append(synonym_database[i][k])
flag = True
break
if flag:
break
if flag:
break
return words
if __name__ == '__main__':
with open('query.txt', 'r', encoding='utf_8') as f:
text = f.readlines()
corpus = list(map(divide_text, text))
tfidf_calc(text[0], corpus, 3, 'new_query.txt')
| henry-nju/- | main-match.py | main-match.py | py | 2,000 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "sklearn.feature_extraction.text.TfidfVectorizer",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "jieba.cut",
"line_number": 30,
"usage_type": "call"
}
] |
6484753583 | """
@Time : 2021/1/31 18:23
@Author : Steven Chen
@File : 8.selenium_cookies.py
@Software: PyCharm
"""
# 目标:
# 方法:
from selenium import webdriver
url = 'https://www.baidu.com'
driver = webdriver.Chrome()
driver.get(url)
cookies = {data["name"]: data["value"] for data in driver.get_cookies()}
print(cookies)
| PandaCoding2020/pythonProject | SpiderLearning/3.selenium/8.selenium_cookies.py | 8.selenium_cookies.py | py | 325 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "selenium.webdriver.Chrome",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver",
"line_number": 13,
"usage_type": "name"
}
] |
21701900747 | from decimal import Decimal
from django.contrib.gis.db.backends.base import BaseSpatialOperations
from django.contrib.gis.db.backends.util import SpatialFunction
from django.contrib.gis.geometry.backend import Geometry
from django.contrib.gis.measure import Distance
from django.utils import six
from sql_server.pyodbc.operations import DatabaseOperations
from .models import SpatialRefSys
class MSSqlBoolMethod(SpatialFunction):
"""SQL Server (non-static) spatial functions are treated as methods,
for eg g.STContains(p)"""
sql_template = '%(geo_col)s.%(function)s(%(geometry)s) = 1'
def __init__(self, function, **kwargs):
super(MSSqlBoolMethod, self).__init__(function, **kwargs)
class MSSqlDistanceFunc(SpatialFunction):
"""Implements distance comparison lookups, eg distance_lte"""
sql_template = ('%(geo_col)s.%(function)s(%(geometry)s) '
'%(operator)s %(result)s')
def __init__(self, op):
super(MSSqlDistanceFunc, self).__init__('STDistance',
operator=op,
result='%s')
class MSSqlBBBoolMethod(MSSqlBoolMethod):
"""SQL Server has no native bounding-box methods, but we can emulate
them with a slightly more complicated expression. The call will
be translated into something like
col.STEnvelope().STOverlaps(geom.STEnvelope())
where STEnvelope() first simplifies the geometries to their
bounding rectangles."""
sql_template = '%(geo_col)s.STEnvelope().%(function)s(%(geometry)s.STEnvelope()) = 1'
def __init__(self, function, **kwargs):
super(MSSqlBoolMethod, self).__init__(function, **kwargs)
class MSSqlAdapter(str):
"""This adapter works around an apparent bug in the pyodbc driver
itself. We only require the wkt adapter, but if we use
django.contrib.gis.db.backends.adapter.WKTAdapter then
cursor.execute() fails because it doesn't call str() on unrecognised
types. So we make sure that our adaper *is* a string."""
def __new__(cls, geom):
geostr = str.__new__(cls, geom.wkt)
geostr.srid = geom.srid
return geostr
def __eq__(self, other):
if not isinstance(other, MSSqlAdapter):
return False
return super(MSSqlAdapter, self).__eq__(other) and \
self.srid == other.srid
def prepare_database_save(self, unused):
return self
# Valid distance types and substitutions
dtypes = (Decimal, Distance, float) + six.integer_types
class MSSqlOperations(DatabaseOperations, BaseSpatialOperations):
name = 'SQL Server'
select = '%s.STAsText()'
Adapter = MSSqlAdapter
Adaptor = Adapter # Backwards-compatibility alias.
compiler_module = 'django_pyodbc_gis.compiler'
geometry = True
geography = True
# Clearly a bald-faced lie, but the limitations of SQL Server are
# actually similar to those of mysql, and coupled with some
# baked-in mysql checks in the framework this lie enables the
# admin to run. See geometry_columns() and spatial_ref_sys().
mysql = True
# NOTE: we should get a default added for core code
mssql = True
# 'bbcontains'
# 'bboverlaps'
# 'contained'
# 'contains'
# 'contains_properly'
# 'coveredby'
# 'covers'
# 'crosses'
# 'disjoint'
# 'distance_gt'
# 'distance_gte'
# 'distance_lt'
# 'distance_lte'
# 'dwithin'
# 'equals'
# 'exact'
# 'intersects'
# 'overlaps'
# 'relate'
# 'same_as'
# 'touches'
# 'within'
# 'left'
# 'right'
# 'overlaps_left'
# 'overlaps_right'
# 'overlaps_above'
# 'overlaps_below'
# 'strictly_above'
# 'strictly_below'
geometry_functions = {
'bbcontains': MSSqlBBBoolMethod('STContains'),
'bboverlaps': MSSqlBBBoolMethod('STOverlaps'),
'contained': MSSqlBBBoolMethod('STWithin'),
'contains': MSSqlBoolMethod('STContains'),
'crosses': MSSqlBoolMethod('STCrosses'),
'disjoint': MSSqlBoolMethod('STDisjoint'),
'equals': MSSqlBoolMethod('STEquals'), # can we also implement exact, same_as like this?
'intersects': MSSqlBoolMethod('STIntersects'),
'overlaps': MSSqlBoolMethod('STOverlaps'),
'touches': MSSqlBoolMethod('STTouches'),
'within': MSSqlBoolMethod('STWithin'),
}
distance_functions = {
'distance_gt': (MSSqlDistanceFunc('>'), dtypes),
'distance_gte': (MSSqlDistanceFunc('>='), dtypes),
'distance_lt': (MSSqlDistanceFunc('<'), dtypes),
'distance_lte': (MSSqlDistanceFunc('<='), dtypes),
}
geometry_functions.update(distance_functions)
geography_functions = {
'contains': MSSqlBoolMethod('STContains'),
'disjoint': MSSqlBoolMethod('STDisjoint'),
'equals': MSSqlBoolMethod('STEquals'),
'intersects': MSSqlBoolMethod('STIntersects'),
'overlaps': MSSqlBoolMethod('STOverlaps'),
'within': MSSqlBoolMethod('STWithin'),
}
geography_functions.update(distance_functions)
gis_terms = set(geometry_functions) | set(['isnull'])
collect = 'CollectionAggregate'
extent = 'EnvelopeAggregate'
unionagg = 'UnionAggregate'
distance = 'STDistance'
valid_aggregates = dict([(k, None) for k in
('Collect', 'Extent', 'Union')])
def spatial_lookup_sql(self, lvalue, lookup_type, value, field, qn):
alias, col, db_type = lvalue
geo_col = '%s.%s' % (qn(alias), qn(col))
if field.geography:
if lookup_type not in self.geography_functions:
raise TypeError("Got invalid lookup_type for geography: %s" %
lookup_type)
else:
if lookup_type not in self.geometry_functions:
raise TypeError("Got invalid lookup_type for geometry: %s" %
lookup_type)
if lookup_type == 'isnull':
return "%s IS %sNULL" % (geo_col, ('' if value else 'NOT ')), []
else:
if field.geography:
op = self.geography_functions[lookup_type]
else:
op = self.geometry_functions[lookup_type]
# if lookup_type is a tuple then we expect the value to be
# a tuple as well:
if isinstance(op, tuple):
dist_op, arg_type = op
# Ensuring that a tuple _value_ was passed in from the user
if not isinstance(value, tuple):
raise ValueError('Tuple required for `%s` lookup type.' %
lookup_type)
if len(value) != 2:
raise ValueError('2-element tuple required for %s lookup type.' %
lookup_type)
# Ensuring the argument type matches what we expect.
if not isinstance(value[1], arg_type):
raise ValueError('Argument type should be %s, got %s instead.' %
(arg_type, type(value[1])))
geom = value[0]
return dist_op.as_sql(geo_col, self.get_geom_placeholder(field, geom))
return op.as_sql(geo_col, self.get_geom_placeholder(field, value))
def check_aggregate_support(self, aggregate):
"""
Checks if the given aggregate name is supported (that is, if it's
in `self.valid_aggregates`).
"""
agg_name = aggregate.__class__.__name__
return agg_name in self.valid_aggregates
def spatial_aggregate_sql(self, agg):
"""
Returns the spatial aggregate SQL template and function for the
given Aggregate instance.
"""
agg_name = agg.__class__.__name__
if not self.check_aggregate_support(agg):
raise NotImplementedError('%s spatial aggregate is not implmented '
'for this backend.' % agg_name)
agg_name = agg_name.lower()
if agg_name == 'union':
agg_name += 'agg'
# We need to namespace the function depending on whether it's
# for a geography or geometry (which requires digging into the
# Aggregate), but the function name is the same for both:
ns = 'geography' if agg.source.geography else 'geometry'
sql_template = ns + '::%(function)s(%(field)s).ToString()'
sql_function = getattr(self, agg_name)
return sql_template, sql_function
def convert_extent(self, poly):
"""
Returns a 4-tuple extent for the `Extent` aggregate by converting
the bounding box text returned by SQL Server (`poly` argument), for
example: "POLYGON ((0 0, 2 0, 2 3, 0 3, 0 0))".
"""
crnrs = poly[10:-2].split(',')
xmin, ymin = map(float, crnrs[0].strip().split(' '))
xmax, ymax = map(float, crnrs[2].strip().split(' '))
return xmin, ymin, xmax, ymax
def convert_geom(self, hex, geo_field):
"""
Converts the geometry returned from aggregate queries.
"""
if hex:
return Geometry(hex)
else:
return None
# GeometryField operations
def geo_db_type(self, f):
# We only have the one geometry type (especially since we
# don't currently support geography):
return 'geometry'
def get_distance(self, f, value, lookup_type):
"""
Returns the distance parameters for the given geometry field,
lookup value, and lookup type. This is based on the Spatialite
backend, since we don't currently support geography operations.
"""
if not value:
return []
value = value[0]
if isinstance(value, Distance):
if f.geodetic(self.connection):
raise ValueError('The SQL Server backend does not support '
'distance queries on geometry fields with '
'a geodetic coordinate system. Distance '
'objects; use a numeric value of your '
'distance in degrees instead.')
else:
dist_param = getattr(value, Distance.unit_attname(f.units_name(self.connection)))
else:
dist_param = value
return [dist_param]
def get_geom_placeholder(self, f, value):
"""
Because SQL Server does not support spatial transformations,
there is no need to modify the placeholder based on the
contents of the given value. We do need to specify the SRID
however, since this argument is required.
"""
if hasattr(value, 'expression'):
placeholder = self.get_expression_column(value)
else:
ns = 'geography' if f.geography else 'geometry'
placeholder = '%s::STGeomFromText(%%s,%s)' % (ns, f.srid)
return placeholder
# Routines for getting the OGC-compliant models --- SQL Server
# does not have OGC-compliant tables
def geometry_columns(self):
raise NotImplementedError
def spatial_ref_sys(self):
return SpatialRefSys
| condense/django-pyodbc-gis | django_pyodbc_gis/operations.py | operations.py | py | 11,306 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "django.contrib.gis.db.backends.util.SpatialFunction",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "django.contrib.gis.db.backends.util.SpatialFunction",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "decimal.Decimal",
"line_number": 72,
... |
24666495868 | # coding:utf-8
import serial
import time
import atexit
import signal
class Laser(object):
def __init__(self, com):
self.rate = 230400
self.com = com
self.port = serial.Serial(port=self.com, baudrate=self.rate, timeout=2)
self.stop_cmd = b'e'
self.start_cmd = b'b'
self.range = 42
self.data_len = 42 * 60
self.item_len = 6
self.rpm = 0
self.avr_rpm = 0
atexit.register(self.stop)
signal.signal(signal.SIGINT, lambda x, y: self.stop())
signal.signal(signal.SIGTERM, lambda x, y: self.stop())
def start(self):
if not self.port.is_open:
self.port.open()
self.port.write(self.start_cmd)
def stop(self):
if not self.port.is_open:
self.port.open()
self.port.write(self.stop_cmd)
self.port.close()
def main_loop(self):
print("Ctrl+c to stop!")
while self.port.is_open:
for data in self.read_data():
print(data)
time.sleep(0.02)
print("-" * 30)
def read_data(self):
sync_flag = False
head_flag = False
# 搜索开始数据包 0度 数据包 OXFA 0XA0 开头为0xfa 0xa0,0xfa 0xa1,0xfa 0xa2 ...
while not sync_flag or not head_flag:
b = self.port.read(1)
if b and b[0] == 0xfa:
sync_flag = True
head_flag = False
elif b and sync_flag and b[0] == 0xa0:
head_flag = True
else:
sync_flag = False
head_flag = False
# 读出剩下的数据包
data = self.port.read(self.data_len - 2)
data = b"\xfa\xa0" + data
rpms = 0
frame_ok = 0
for index, split in enumerate(range(0, self.data_len, self.range)):
seg = data[split:split + self.range] # 单个42字节的数据包
check_sum = (0xff - sum(seg[:40])) & 0xff # 校验数据
if seg[40] != seg[41] or seg[40] != check_sum or seg[41] != check_sum:
print(
"bad frame.checksum:{}, num1:{} num2:{}".format(
check_sum, seg[40], seg[41]))
continue
if seg[0] == 0xfa and seg[1] == (
0xa0 + index): # seg[0]开始标志 0xfa, seg[1]角度下标 实际角度 = (seg[1]-0xa0) * 6 + offset
self.rpm = ((seg[3] << 8) + seg[2]) / 10.0
rpms += self.rpm
frame_ok += 1
for r_index, deg_split in enumerate(
range(4, 40, self.item_len)):
deg_data = seg[deg_split: deg_split +
self.item_len] # 单个1度 距离数据包 6个字节
intensity = (deg_data[1] << 8) + deg_data[0] # 反射强度
length = (
(deg_data[3] << 8) + deg_data[2]) / 1000.0 # 距离 毫米转换为米
deg = (seg[1] - 0XA0) * 6 + r_index # 角度
yield deg, length, intensity, self.rpm, rpms
else:
print("Data miss!")
self.avr_rpm = rpms / frame_ok
| reece15/hls_flcd2_python | Laser.py | Laser.py | py | 3,261 | python | en | code | 10 | github-code | 1 | [
{
"api_name": "serial.Serial",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "atexit.register",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "signal.signal",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "signal.SIGINT",
"line... |
2929396805 | from PyQt5 import QtCore, QtGui, QtWidgets
import os
import ctypes
from PyQt5.QtCore import QCoreApplication
import threading
from time import sleep
from PyQt5.QtGui import QCursor, QWindow
from PyQt5.QtCore import Qt, QPoint
from PyQt5.QtWidgets import (QMessageBox,QApplication, QWidget, QToolTip, QPushButton,
QDesktopWidget, QMainWindow, QAction, qApp, QToolBar, QVBoxLayout,
QComboBox,QLabel,QLineEdit,QGridLayout,QMenuBar,QMenu,QStatusBar,
QTextEdit,QDialog,QFrame,QProgressBar
)
from SystemDatabaseLogin import Ui_OtherWindow
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(1099, 786)
MainWindow.setWindowFlags(Qt.FramelessWindowHint | Qt.WindowSystemMenuHint |Qt.WindowMinimizeButtonHint | Qt.WindowMaximizeButtonHint)
MainWindow.setAttribute(QtCore.Qt.WA_TranslucentBackground)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.frame = QtWidgets.QFrame(self.centralwidget)
self.frame.setGeometry(QtCore.QRect(0, 0, 1101, 791))
self.frame.setStyleSheet("background-color: rgb(39, 68, 114);\n"
"border-radius:3px;")
self.frame.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.frame.setFrameShadow(QtWidgets.QFrame.Raised)
self.frame.setObjectName("frame")
self.pushButton = QtWidgets.QPushButton(self.frame)
self.pushButton.clicked.connect(QCoreApplication.instance().quit)
self.pushButton.setGeometry(QtCore.QRect(1050, 20, 21, 21))
self.pushButton.setStyleSheet("QPushButton{\n"
"\n"
"background-color: rgb(255, 52, 34);\n"
"border-radius:10px;\n"
"}\n"
"\n"
"QPushButton::hover{\n"
"background-color: rgb(204, 65, 22);\n"
"}")
self.pushButton.setText("")
self.pushButton.setObjectName("pushButton")
self.pushButton_2 = QtWidgets.QPushButton(self.frame)
self.pushButton_2.clicked.connect(self.min)
self.pushButton_2.setGeometry(QtCore.QRect(1010, 20, 21, 21))
self.pushButton_2.setStyleSheet("QPushButton{\n"
"background-color: rgb(19, 221, 5);\n"
"border-radius:10px;\n"
"}\n"
"\n"
"QPushButton::hover{\n"
"\n"
" background-color: rgb(118, 182, 21);\n"
"}")
self.pushButton_2.setText("")
self.pushButton_2.setObjectName("pushButton_2")
self.lineEdit = QtWidgets.QLineEdit(self.frame)
self.lineEdit.setGeometry(QtCore.QRect(310, 340, 521, 41))
self.lineEdit.setStyleSheet("background-color: rgb(203, 203, 203);\n"
"border-radius:2px;\n"
"font: 12pt \"Segoe UI\";\n"
"color: rgb(131, 131, 131);")
self.lineEdit.setAlignment(QtCore.Qt.AlignCenter)
self.lineEdit.setObjectName("lineEdit")
self.lineEdit_2 = QtWidgets.QLineEdit(self.frame)
self.lineEdit_2.setGeometry(QtCore.QRect(310, 400, 521, 41))
self.lineEdit_2.setStyleSheet("background-color: rgb(203, 203, 203);\n"
"border-radius:2px;\n"
"font: 12pt \"Segoe UI\";\n"
"color: rgb(131, 131, 131);")
self.lineEdit_2.setEchoMode(QtWidgets.QLineEdit.Password)
self.lineEdit_2.setAlignment(QtCore.Qt.AlignCenter)
self.lineEdit_2.setObjectName("lineEdit_2")
self.pushButton_3 = QtWidgets.QPushButton(self.frame)
self.pushButton_3.setGeometry(QtCore.QRect(310, 460, 521, 41))
self.pushButton_3.clicked.connect(self.openwindow)
self.pushButton_3.setStyleSheet("QPushButton{\n"
"background-color: rgb(65, 114, 159);\n"
"border-radius:3px;\n"
"color: rgb(235, 235, 235);\n"
"font: 11pt \"Segoe UI\";\n"
"}\n"
"QPushButton::hover{\n"
"border:2px solid rgb(39, 68, 114);\n"
"border-radius:3px;\n"
"}")
self.pushButton_3.setObjectName("pushButton_3")
self.label = QtWidgets.QLabel(self.frame)
self.label.setGeometry(QtCore.QRect(280, 240, 591, 61))
self.label.setStyleSheet("color: rgb(226, 226, 226);\n"
"font: 75 24pt \"Segoe UI\";\n"
"")
self.label.setAlignment(QtCore.Qt.AlignCenter)
self.label.setObjectName("label")
MainWindow.setCentralWidget(self.centralwidget)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "System"))
self.lineEdit.setPlaceholderText(_translate("MainWindow", "Username . ."))
self.lineEdit_2.setPlaceholderText(_translate("MainWindow", "Passsword . ."))
self.pushButton_3.setText(_translate("MainWindow", "Login"))
self.label.setText(_translate("MainWindow", "Database System Local"))
def min(self):
MainWindow.showMinimized()
def openwindow(self):
if str(self.lineEdit.text())=="admin" and str(self.lineEdit_2.text())=="1234":
self.window = QtWidgets.QMainWindow()
self.ui = Ui_OtherWindow()
self.ui.setupUi(self.window)
MainWindow.hide()
self.window.show()
else:
_translate = QtCore.QCoreApplication.translate
self.pushButton_3.setText(_translate("MainWindow", "Username or Password Incorrect!!"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
MainWindow = QtWidgets.QMainWindow()
ui = Ui_MainWindow()
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_())
| ipys/jopmanage | SystemDatabase.py | SystemDatabase.py | py | 5,703 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "PyQt5.QtCore.Qt.FramelessWindowHint",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "PyQt5.QtCore.Qt",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtCore.Qt.WindowSystemMenuHint",
"line_number": 19,
"usage_type": "attribu... |
7611745222 | import gui
import pygame, sys
from pygame.locals import *
from tkinter import filedialog
WINDOW_DIMENSIONS = (700, 500)
BLACK = (0, 0, 0)
main_clock = pygame.time.Clock()
pygame.init()
pygame.display.set_caption('Play Aid!')
window = pygame.display.set_mode(WINDOW_DIMENSIONS, 0, 32)
font = pygame.font.SysFont("comicsansms", 20)
def exit_manager(events, sub_menu=False):
for event in events:
if event.type == QUIT:
if not sub_menu:
pygame.quit()
sys.exit()
else:
return False
if event.type == KEYDOWN:
if event.key == K_ESCAPE:
if not sub_menu:
pygame.quit()
sys.exit()
else:
return
return True
def main_menu():
add_button = gui.Button(window, name="New Toy", pos=(10, 10))
button_group = pygame.sprite.Group(add_button)
while True:
window.fill(BLACK)
event_list = pygame.event.get()
button_group.update(event_list)
pygame.display.update()
main_clock.tick(60)
exit_manager(event_list)
if add_button.pressed:
add_button.pressed = False
new_toy_menu()
def new_toy_menu():
run = True
text_input_box = gui.TextInputBox(10, 10, 400, font, default_text="New toy name?")
group = pygame.sprite.Group(text_input_box)
browser_button = gui.Button(window, name="Pick Image", pos=(10, 60))
button_group = pygame.sprite.Group(browser_button)
while run:
window.fill(BLACK)
event_list = pygame.event.get()
button_group.update(event_list)
run = exit_manager(event_list, sub_menu=True)
group.update(event_list)
main_clock.tick(60)
group.draw(window)
pygame.display.flip()
if __name__ == "__main__":
main_menu()
| daveymclain/play_aid_app | main.py | main.py | py | 1,890 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "pygame.time.Clock",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "pygame.time",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "pygame.init",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "pygame.display.set_capti... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.