seq_id string | text string | repo_name string | sub_path string | file_name string | file_ext string | file_size_in_byte int64 | program_lang string | lang string | doc_type string | stars int64 | dataset string | pt string | api list |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
30461892433 | import csv
import base64
import pprint
import mysql.connector
from time import sleep as s
from functions import files, getLinesFromFile, getIPs, nmapScan, toLogFile
#fromLogs
#+---------------+--------------+------+-----+-------------------+-------------------+
#| Field | Type | Null | Key | Default | Extra |
#+---------------+--------------+------+-----+-------------------+-------------------+
#| id | int | NO | PRI | NULL | auto_increment |
#| logFile | varchar(100) | NO | | NULL | |
#| ipAddr | varchar(30) | NO | | NULL | |
#| timeSubmitted | timestamp | NO | | CURRENT_TIMESTAMP | DEFAULT_GENERATED |
#| dateSubmitted | datetime | NO | | CURRENT_TIMESTAMP | DEFAULT_GENERATED |
#+---------------+--------------+------+-----+-------------------+-------------------+
def c1():
file = "/home/sam/pull/test.txt"
with open(file, 'r') as f:
coded = f.readline()
f.close()
temp1 = base64.b64decode(coded)
temp2 = temp1.decode('utf-8')
db = mysql.connector.connect(
host="localhost",
passwd = temp2,
user="localUser1",
database="main",
auth_plugin='mysql_native_password'
)
return db
def getAllIPs():
db = c1()
cursor = db.cursor()
cursor.execute("SELECT ipAddr FROM fromLogs;")
temp1 = cursor.fetchall()
x = 0
finial = []
while(x != len(temp1)):
temp2 = str(temp1[x])
temp2 = temp2.strip("[(',')]")
finial.append(temp2)
x += 1
db.close()
return finial
def getMultiples(list1):
db = c1()
cursor = db.cursor()
size = len(list1)
dups = []
x = 0
while(x != size):
temp1 = str(list1[x])
cursor.execute("SELECT ipAddr FROM fromLogs WHERE ipAddr = (%s);", (temp1,))
t2 = cursor.fetchall()
if(len(t2) != 1):
print("is a dup ", t2)
getFiles(str(t2.pop()))
else:
print("single entry ", t2)
x += 1
def getFiles(ip):
db = c1()
cursor = db.cursor()
ip = ip.strip("[(',')]")
cursor.execute("SELECT logFile FROM fromLogs WHERE ipAddr = (%s);", (ip,))
t2 = cursor.fetchall()
pprint.pprint(t2)
def toCSV(ips):
with open("/home/sam/Documents/ips.csv", 'w') as f:
wr = csv.writer(f,delimiter=',')
wr.writerows(ips)
f.close()
###########################################
# created by Samuel Schatz #
# github: https://github.com/sschatz1997 #
# email: sjschatz@captechu.edu #
########################################### | sschatz1997/Sams_website | py_MySQL/IPcount.py | IPcount.py | py | 2,677 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "base64.b64decode",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "mysql.connector.connector.connect",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "mysql.connector.connector",
"line_number": 27,
"usage_type": "attribute"
},
{
... |
3225854902 | #!/usr/bin/python2.7
from functools import wraps
from flask import Flask, request, jsonify, Response, abort, json
import MySQLdb, collections
app = Flask(__name__)
MYSQL_DATABASE_HOST = "127.0.0.1"
MYSQL_DATABASE_USER = "twitter"
MYSQL_DATABASE_PASSWORD = "DF7U7q2yy6pUPSn3"
MYSQL_DATABASE_DB = "twitter"
db = MySQLdb.connect(host=MYSQL_DATABASE_HOST, user=MYSQL_DATABASE_USER, passwd=MYSQL_DATABASE_PASSWORD, db=MYSQL_DATABASE_DB)
def run_query(q):
c = db.cursor()
size = c.execute(q)
return size, c
@app.route('/twitter.py')
def ret_twitter_source():
return "<pre>" + open('twitter.py', 'r').read() + "</pre>"
@app.route('/testsuite.py')
def ret_testsuite_source():
return "<pre>" + open('testsuite.py', 'r').read() + "</pre>"
@app.route('/schema.sql')
def ret_schema_source():
return "<pre>" + open('schema.sql', 'r').read() + "</pre>"
@app.route('/')
def hello():
out = """
<pre>URLS
<a href=/user_timeline.json>User Timeline</a> params: token, username (optional, defaults to users token)
<a href=/friendslist.json>Friends List</a> params: token, username (optional, defaults to users token)
<a href=/followerslist.json>Followers List</a> params: token, username (optional, defaults to users token)
<a href=/createfriend.json>Add Friend</a> params: token, username
<a href=/destroyfriend.json>Destroy Friend</a> params: token, username
<a href=/tweet.json>Add tweet</a> (not tested) params: token, message
append query string token=<token> for user-context token
append query string username=<desired user> to query parameters about
i.e. /friendslist.json?token=1b43ef1e0618de6d&username=brian
<a href=/twitter.py>twitter.py source</a>
<a href=/testsuite.py>testsuite.py source</a>
<a href=/schema.sql>database schema</a>
"""
query = "SELECT username, token FROM users"
size, ret = run_query(query)
rows = ret.fetchall()
for row in rows:
out += "User: %s, Token: %s\n" % (row[0], row[1])
out += "</pre>"
return out
@app.route('/user_timeline.json')
def get_tweets():
"""Returns JSON-encoded list of tweets belonging to the specified username, and their friends
If no username is specified, default to the authenticating user
Returns HTTP Error 400 if given username doesn't exist"""
auth_user = verify_token(get_req_args_or_fail('token', 401))
target_user = ""
if check_None_or_empty(request.args.get('username', None)):
target_user = auth_user
else:
target_user = get_req_args_or_fail('username')
get_userid(target_user)
query = "SELECT timestamp, users.username, messageId, message FROM tweets LEFT JOIN users ON users.id = tweets.userId WHERE tweets.userId = ANY (SELECT id FROM users WHERE username = '%s' UNION SELECT friends.followingId FROM users JOIN friends ON friends.userId = users.id WHERE users.username = '%s') ORDER BY timestamp DESC" % (target_user, target_user)
size, ret = run_query(query)
tweets = []
rows = ret.fetchall()
for row in rows:
d = collections.OrderedDict()
d['timestamp'] = row[0].isoformat()
d['username'] = row[1]
d['messageId'] = row[2]
d['tweet'] = row[3]
tweets.append(d)
return jsonify(tweets=tweets)
@app.route('/friendslist.json')
def get_friends():
"""Returns a list of users the specified username is friends with
If no username is specified, default to the authenticating user
Returns HTTP Error 400 if given username doesn't exist"""
auth_user = verify_token(get_req_args_or_fail('token', 401))
target_user = ""
if check_None_or_empty(request.args.get('username', None)):
target_user = auth_user
else:
target_user = get_req_args_or_fail('username')
get_userid(target_user)
query = "SELECT id, username FROM users WHERE id IN (SELECT friends.followingId FROM users JOIN friends ON friends.userId = users.id WHERE users.username = '%s')" % target_user
size, ret = run_query(query)
friends = []
rows = ret.fetchall()
for row in rows:
d = collections.OrderedDict()
d['id'] = row[0]
d['username'] = row[1]
friends.append(d)
return jsonify(users=friends)
@app.route('/followerslist.json')
def get_followers():
"""Returns a list of users who follow the specified username
If no username is specified, default to the authenticating user
Returns HTTP Error 400 if given username doesn't exist"""
auth_user = verify_token(get_req_args_or_fail('token', 401))
target_user = ""
if check_None_or_empty(request.args.get('username', None)):
target_user = auth_user
else:
target_user = get_req_args_or_fail('username')
get_userid(target_user)
query = "SELECT id, username FROM users WHERE id IN (SELECT friends.userId FROM users JOIN friends ON friends.followingId = users.id WHERE users.username = '%s')" % target_user
size, ret = run_query(query)
followers = []
rows = ret.fetchall()
for row in rows:
d = collections.OrderedDict()
d['id'] = row[0]
d['username'] = row[1]
followers.append(d)
return jsonify(users=followers)
@app.route('/tweet.json')
def add_tweet():
"""EXPERIMENTAL Add tweet for the authenticating user"""
auth_user = verify_token(get_req_args_or_fail('token', 401))
message = get_req_args_or_fail('message')
userid = get_userid(auth_user)
query = "INSERT into tweets (userId, message) VALUES ('%i', '%s')" % (userid, message)
try:
size, ret = run_query(query)
except:
abort(400)
return jsonify(tweet={'status': "Success!"})
@app.route('/createfriend.json')
def add_friend():
"""Adds the specified username to the authenticating users friends list
Returns HTTP Erorr 400 if username is None, an empty string, the authenticating user or a non-existant user
Returns the user id and username upon successful friending"""
auth_user = verify_token(get_req_args_or_fail('token', 401))
target_user = get_req_args_or_fail('username')
if target_user == auth_user: abort(400)
userid = get_userid(auth_user)
target_userid = get_userid(target_user)
query = "INSERT into friends (userId, followingId) VALUES ('%i', '%i')" % (userid, target_userid)
try:
size, ret = run_query(query)
except:
abort(400)
d = collections.OrderedDict()
d['id'] = target_userid
d['username'] = target_user
return jsonify(user=d)
@app.route('/destroyfriend.json')
def remove_friend():
"""Removes the specified username from the authenticating users friends list
Returns HTTP Erorr 400 if username is None, an empty string, the authenticating user or a non-existant user
Returns the user id and username upon successful removing"""
auth_user = verify_token(get_req_args_or_fail('token', 401))
target_user = get_req_args_or_fail('username')
if target_user == auth_user: abort(400)
userid = get_userid(auth_user)
target_userid = get_userid(target_user)
query = "DELETE FROM friends where userId = '%i' AND followingId = '%i'" % (userid, target_userid)
size, ret = run_query(query)
if size == 0: abort(400)
d = collections.OrderedDict()
d['id'] = target_userid
d['username'] = target_user
return jsonify(user=d)
def get_userid(user):
"""Simple statement to retrieve a given users id
Throw an HTTP Error 400 if the user doesn't exist"""
query = "SELECT id FROM users WHERE username = '%s'" % user
size, ret = run_query(query)
if size != 1:
abort(400)
uid = ret.fetchone()[0]
return uid
def get_req_args_or_fail(attribute, abortCode=400):
"""Retrieves query string parameters and verifies they are not None/empty
Returns the value if succesful, or throws an HTTP Error code (400 by default)"""
value = request.args.get(attribute, None)
if check_None_or_empty(value): abort(abortCode)
return MySQLdb.escape_string(value)
def check_None_or_empty(string):
"""Returns True if the string is None or empty, False otherwise"""
if string is None or string == "": return True
return False
def verify_token(token):
"""Verifies a user API token is valid
Returns the authenticating username, or throws an HTTP Error 401 Authorization Denied"""
query = "SELECT username FROM users WHERE token = '%s'" % token
size, ret = run_query(query)
if size == 0: abort(401)
if size == 1:
return ret.fetchone()[0]
abort(401)
if __name__ == "__main__":
app.debug = True
app.run(host="0.0.0.0")
| brianfife/twitterapi | twitter.py | twitter.py | py | 8,268 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "flask.Flask",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "MySQLdb.connect",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "flask.request.args.get",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "flask.request.args"... |
20629891320 | import argparse
import json
from collections import OrderedDict
import kernel_tuner as kt
import common
# Parse command line arguments
def parse_command_line():
parser = argparse.ArgumentParser(description='Tuning script for add_fluxes_kernel kernel')
parser.add_argument('--tune', default=False, action='store_true')
parser.add_argument('--run', default=False, action='store_true')
parser.add_argument('--best_configuration', default=False, action='store_true')
parser.add_argument('--block_size_x', type=int, default=96)
parser.add_argument('--block_size_y', type=int, default=1)
parser.add_argument('--block_size_z', type=int, default=1)
return parser.parse_args()
# Run one instance of the kernel and test output
def run_and_test(params: OrderedDict):
print(f"Running {kernel_name} [{params['block_size_x']}, {params['block_size_y']}, {params['block_size_z']}]")
result = kt.run_kernel(kernel_name, kernels_src, problem_size, args, params, compiler_options=common.cp)
common.compare_fields(flux_up + radn_up, result[6], "flux_up")
common.compare_fields(flux_dn + radn_dn, result[7], "flux_dn")
common.compare_fields(flux_up_jac + radn_up_jac, result[8], "flux_up_jac")
# Tuning
def tune():
tune_params = OrderedDict()
tune_params["block_size_x"] = [2**i for i in range(0, 11)]
tune_params["block_size_y"] = [2**i for i in range(0, 11)]
tune_params["block_size_z"] = [2**i for i in range(0, 7)]
restrictions = [f"block_size_x <= {ncol}", f"block_size_y <= {nlev}", f"block_size_z <= {ngpt}"]
print(f"Tuning {kernel_name}")
answer = [None for _ in range(0, len(args))]
answer[6] = flux_up + radn_up
answer[7] = flux_dn + radn_dn
answer[8] = flux_up_jac + radn_up_jac
result, env = kt.tune_kernel(kernel_name, kernels_src, problem_size, args, tune_params, answer=answer,
compiler_options=common.cp, verbose=True, restrictions=restrictions)
with open("timings_add_fluxes_kernel.json", "w") as fp:
json.dump(result, fp)
if __name__ == '__main__':
command_line = parse_command_line()
kernels_src = common.dir_name + "../src_kernels_cuda/rte_solver_kernels.cu"
# Input
ncol = common.type_int(512)
nlay = common.type_int(140)
nlev = common.type_int(nlay + 1)
ngpt = common.type_int(224)
flux_size = ncol * nlev * ngpt
radn_up = common.random(flux_size, common.type_float)
radn_dn = common.random(flux_size, common.type_float)
radn_up_jac = common.random(flux_size, common.type_float)
# Output
flux_up = common.random(flux_size, common.type_float)
flux_dn = common.random(flux_size, common.type_float)
flux_up_jac = common.random(flux_size, common.type_float)
kernel_name = f"add_fluxes_kernel<{common.str_float}>"
problem_size = (ncol, nlev, ngpt)
args = [ncol, nlev, ngpt, radn_up, radn_dn, radn_up_jac, flux_up, flux_dn, flux_up_jac]
if command_line.tune:
tune()
elif command_line.run:
parameters = OrderedDict()
if command_line.best_configuration:
best_configuration = common.best_configuration("timings_add_fluxes_kernel.json")
parameters["block_size_x"] = best_configuration["block_size_x"]
parameters["block_size_y"] = best_configuration["block_size_y"]
parameters["block_size_z"] = best_configuration["block_size_z"]
else:
parameters["block_size_x"] = command_line.block_size_x
parameters["block_size_y"] = command_line.block_size_y
parameters["block_size_z"] = command_line.block_size_z
run_and_test(parameters)
| earth-system-radiation/rte-rrtmgp-cpp | tuning_kernels_cuda/add_fluxes_kernel.py | add_fluxes_kernel.py | py | 3,673 | python | en | code | 3 | github-code | 6 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "collections.OrderedDict",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "kernel_tuner.run_kernel",
"line_number": 23,
"usage_type": "call"
},
{
"api_name"... |
26626842826 | from django.shortcuts import render_to_response
from django.template import RequestContext
from product.models import Product
from satchmo_store.shop import signals
from signals_ahoy.signals import application_search
def search_view(request, template="shop/search.html"):
"""Perform a search based on keywords and categories in the form submission"""
if request.method=="GET":
data = request.GET
else:
data = request.POST
keywords = data.get('keywords', '').split(' ')
category = data.get('category', None)
keywords = filter(None, keywords)
results = {}
# this signal will usually call listeners.default_product_search_listener
application_search.send(Product, request=request,
category=category, keywords=keywords, results=results)
context = RequestContext(request, {
'results': results,
'category' : category,
'keywords' : keywords})
return render_to_response(template, context_instance=context)
| dokterbob/satchmo | satchmo/apps/satchmo_store/shop/views/search.py | search.py | py | 1,013 | python | en | code | 30 | github-code | 6 | [
{
"api_name": "signals_ahoy.signals.application_search.send",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "product.models.Product",
"line_number": 22,
"usage_type": "argument"
},
{
"api_name": "signals_ahoy.signals.application_search",
"line_number": 22,
"usa... |
70101306109 | """Control api connections and information gathering."""
import os
import requests
from constants.endpoints import endpoints
from typing import Tuple, Optional
class Binance:
"""Class to manage connection with Binance and data retrieving and inputting."""
def __init__(self, api_type: str = 'prod', endpoints = endpoints):
self.auth: Tuple[Optional[str], Optional[str]]
self.endpoints = endpoints
self.api_type = api_type
if self.api_type == 'test':
self.main_endpoint = 'https://testnet.binance.vision'
self.auth_dict = {
'key' : os.environ.get('TEST_KEY'),
'skey' : os.environ.get('TEST_SKEY'),
}
elif self.api_type == 'prod':
self.main_endpoint = 'https://api1.binance.com'
self.options_endpoint = 'https://vapi.binance.com'
self.auth_dict = {
'key' : os.environ.get('SPOT_KEY'),
'skey' : os.environ.get('SPOT_SKEY'),
}
# Complete endpoints strings.
for i in self.endpoints:
if i[0:7] == 'options':
self.endpoints[i] = self.options_endpoint + self.endpoints[i]
else:
self.endpoints[i] = self.main_endpoint + self.endpoints[i]
print(self.endpoints)
try:
r = requests.get(endpoints['test'])
print('ping: ' + str(r) + '\nConnection successful')
except:
print('Could not ping Binance API.')
def get_tickers(self, market: str = 'USDT') -> list[str]:
r1 = requests.get(endpoints['exchange_info'], auth=(self.auth_dict['key'], self.auth_dict['skey']))
tickers: list
tickers = []
for i in range(0, len(r1.json()['symbols'])):
if r1.json()['symbols'][1]['status'] == 'TRADING':
if r1.json()['symbols'][i]['quoteAsset'] == market:
tickers.append(r1.json()['symbols'][i]['symbol'])
elif market == None:
tickers.append(r1.json()['symbols'][i]['symbol'])
print(tickers)
return tickers | cthadeufaria/redesigned-pas-trading | src/utils/api.py | api.py | py | 2,165 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "constants.endpoints.endpoints",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "typing.Tuple",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "constants.e... |
17396933652 | from django.http import HttpResponse
def hello(req):
return HttpResponse('Hello, World !!')
def hello_html(req):
src = []
src.append('<!doctype html>')
src.append('<html>')
src.append('<head>')
src.append('<meta charset="utf-8">')
src.append('<title>Hello, World</title>')
src.append('</head>')
src.append('<body>')
src.append('<h1 style="color:#F4A346;">Hello, World!!</h1>')
src.append('</body>')
src.append('</html>')
return HttpResponse('\n'.join(src))
from django.shortcuts import render
from django.views.generic import TemplateView
class HelloTemplateView(TemplateView):
template_name = 'hello.html'
def get(self, request, *args, **kwargs):
context = super(TemplateView, self).get_context_data(**kwargs)
return render(self.request, self.template_name, context) | RyoJ/hellopython | hello/views.py | views.py | py | 853 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "django.http.HttpResponse",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "django.http.HttpResponse",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "django.views.generic.TemplateView",
"line_number": 23,
"usage_type": "name"
},
{
... |
34670400006 | import json
from app_logic_extractor import app_logic_extractor
from lib.entity import App, Device, Net
from lib.util import convert_to_prolog_symbol, convert_to_prolog_var
from vul_analyzer import vul_analyzer
from vul_scanner import vul_scanner
def translate_vul_exists(prolog_dev_name, cve_id):
"""
Given a device full name and the list of CVE-IDs on that device, translate each CVE ID to
Prolog's `vulExistsV2` predicate
:param prolog_dev_name: a string of the device full name, in Prolog camel format
:param cve_id: a CVE-ID on that device
:return: a converted string of Prolog `vulExistsV2` predicate
"""
return 'vulExistsV2(' + prolog_dev_name + ', \'' + cve_id + '\').\n'
def translate_vul_property(exploit_model_tuple):
"""
Translate the app_logic_tuple returned by vul_analyzer() function to Prolog's `vulPropertyV2` predicate
:param exploit_model_tuple: a tuple of exploit model for a CVE ID: (cve_id, precondition, effect, probability, impact_score)
:return: a converted string of Prolog `vulPropertyV2` predicate
"""
(cve_id, precondition, effect, probability, impact_score) = exploit_model_tuple
return 'vulPropertyV2(\'' + cve_id + '\', ' + precondition + ', ' + effect + ', ' + str(probability) + ', ' + str(impact_score) + ').\n'
def parse_app_config(app_config_file):
"""
Parse `app_config.json` file and return a list of App objects
:param app_config_file: path to `app_config.json` file
:return: a list of App objects
"""
f_app_config = open(app_config_file)
app_json = json.load(f_app_config)
app_list = []
for app in app_json['apps']:
app_name = app['App name']
app_desc = app['description']
app_dev_map = app['device map']
app_list.append(App(app_name, app_desc, app_dev_map))
f_app_config.close()
return app_list
def parse_sys_config(sys_config_file):
"""
Parse `sys_config.json` file and return a list of Device objects
:param sys_config_file: path to `sys_config.json` file
:return: a tuple of (a list of Device objects, a tuple of Network objects)
"""
f_dev_config = open(sys_config_file)
dev_json = json.load(f_dev_config)
dev_list = []
for dev in dev_json['devices']:
dev_name = dev['name']
dev_type = dev['type']
dev_net_list = dev['network']
cur_dev_obj = Device(dev_name, dev_type, dev_net_list)
dev_list.append(cur_dev_obj)
if 'outdoor' in dev.keys():
outdoor = dev['outdoor']
cur_dev_obj.outdoor = outdoor
if 'plug into' in dev.keys():
plug_into = dev['plug into']
cur_dev_obj.plug_into = plug_into
net_list = []
for net in dev_json['networks']:
net_name = net['name']
net_type = net['type']
net_list.append(Net(net_name, net_type))
f_dev_config.close()
return dev_list, net_list
def translate_device_predicates(device_list):
"""
Given the device objects list, generate Prolog facts about device type, device inNetwork, plugInto, outdoor,
vulExistsV2, and vulPropertyV2
:param device_list: a list of Device objects
:return: a string of translated Prolog predicates
"""
res = ''
for device in device_list:
prolog_dev_type = convert_to_prolog_symbol(device.type)
prolog_dev_name = convert_to_prolog_symbol(device.name)
# translate facts about device type declaration
res += prolog_dev_type + '(' + prolog_dev_name + ').\n'
# translate facts about device outdoor declaration
if device.outdoor:
res += 'outdoor(' + prolog_dev_name + ').\n'
# translate facts about device plug into declaration
if device.plug_into:
prolog_outlet = convert_to_prolog_symbol(device.plug_into)
res += 'plugInto(' + prolog_dev_name + ', ' + prolog_outlet + ').\n'
# translate facts about device in network
for net in device.net_list:
prolog_net_name = convert_to_prolog_symbol(net)
res += 'inNetwork(' + prolog_dev_name + ', ' + prolog_net_name + ').\n'
# Translate facts about vulnerability existence and property
# run vul_scanner to get CVEIDs for the given device
cve_list = vul_scanner(device.name)
for cve_id in cve_list:
res += translate_vul_exists(prolog_dev_name, cve_id)
# run vul_analyzer to get the exploit model for that CVE-ID
exploit_model_tuple = vul_analyzer(cve_id, device.type)
res += translate_vul_property(exploit_model_tuple)
res += '\n'
return res
def translate_sys_config(sys_config_file):
"""
Translate IoT system configuration to Prolog facts
:param sys_config_file: path to `sys_config.json` file
:return: a converted string of Prolog rules for the app logic
"""
# Parse sys config JSON file
dev_list, net_list = parse_sys_config(sys_config_file)
# Translate facts about: device type, device inNetwork, plugInto, outdoor, vulExistsV2, and vulPropertyV2
res = translate_device_predicates(dev_list)
# Translate facts about: network type declaration, e.g., `wifi(wifi1).` `zigbee(zigbee1).`
for network in net_list:
res += convert_to_prolog_symbol(network.type) + '(' + convert_to_prolog_symbol(network.name) + ').\n'
return res
def translate_app_logic(app_config_file):
"""
Translate app logic to Prolog rules based on app configuration file and device configuration file
IMPORTANT: An IoT app in proper form always has one action in the [[main clause]],
and the [[conditional clause]] should have NONE or multiple conditions connected by AND.
:param app_config_file: path to `app_config.json` file
:return: a converted string of Prolog rules for the app logic
"""
# Parse app config JSON file
app_list = parse_app_config(app_config_file)
# Translate app logic to Prolog rules
res = ''
for app in app_list:
# convert app description to Python tuple
# app_logic_tuple = app_logic_extractor(app.desc)
app_logic_tuple = ('AND', ['motion sensor', 'door contact sensor'], ['motion', 'open'], 'NONE', ['bulb'], ['on'])
# app_logic_tuple = ('NONE', ['motion sensor'], ['motion'], 'NONE', ['bulb'], ['on'])
if app_logic_tuple is None:
print('error: the input app logic tuple is None\n')
return ''
cond_relation, cond_np_list, cond_vp_list, main_relation, main_np_list, main_vp_list = app_logic_tuple
# Convert the app logic
return translate_app_logic_AND_cond_clause(app.dev_map, cond_np_list, cond_vp_list, main_np_list, main_vp_list)
def translate_app_logic_AND_cond_clause(app_dev_map, cond_np_list, cond_vp_list, main_np_list, main_vp_list):
"""
The cond lists can have one or multiple elements. But if they multiple elements, they must be in logical AND relationship
E.g., app_logic_tuple = ('AND', ['motion sensor', 'door contact sensor'], ['motion', 'open'], 'NONE', ['bulb'], ['on'])
:return: a string of Prolog rules
"""
# Convert main clause
action = main_vp_list[0]
actuator_type = main_np_list[0]
res = action + '(' + convert_to_prolog_symbol(app_dev_map[actuator_type]) + ') :-\n'
# Convert conditional clause
for trigger_dev, trigger_act in zip(cond_np_list, cond_vp_list):
if trigger_dev == 'motion sensor':
if trigger_act == 'motion':
res += '\treportsMotion(' + convert_to_prolog_symbol(app_dev_map[trigger_dev]) + '),\n'
if trigger_dev == 'door contact sensor':
if trigger_act == 'open':
res += '\treportsOpen(' + convert_to_prolog_symbol(app_dev_map[trigger_dev]) + '),\n'
return res[:-2] + '.\n'
def test_translate_vul_exists():
return translate_vul_exists('Nest Cam IQ indoor', 'CVE-2019-5035')
# should return:
# vulExistsV2(nestCamIQIndoor, 'CVE-2019-5035').\n
def test_translate_vul_analyzer():
exploit_tuple = ('CVE-2019-5035', 'network', 'rootPrivilege', 0.55, 10.0)
return translate_vul_property(exploit_tuple)
# should return:
# vulPropertyV2('CVE-2019-5035', network, rootPrivilege, 0.55, 10.0).
def test_translate_app_logic():
# app_logic_tuple = ('AND', ['motion sensor', 'door contact sensor'], ['motion', 'open'], 'NONE', ['light'], ['on'])
app_config_file = 'YOUR_IOTA_ROOT/python/test/app_config.json'
dev_config_file = 'YOUR_IOTA_ROOT/python/test/dev_config.json'
return translate_app_logic(app_config_file)
| pmlab-ucd/IOTA | python/translator.py | translator.py | py | 8,669 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "json.load",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "lib.entity.App",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "lib.entity.Device",
"line_numb... |
7935100756 | # -*- coding: utf-8 -*-
#import sys
#reload(sys)
#sys.setdefaultencoding('utf-8') #gb2312
import codecs
import random
import numpy as np
from tflearn.data_utils import pad_sequences
from collections import Counter
import os
import pickle
import json
import jieba
from predictor.data_util_test import pad_truncate_list
PAD_ID = 0
UNK_ID=1
_PAD="_PAD"
_UNK="UNK"
def load_data_multilabel(traning_data_path, valid_data_path, test_data_path, vocab_word2index, accusation_label2index,
sentence_len, name_scope='cnn', test_mode=False):
"""
convert data as indexes using word2index dicts.
:param traning_data_path:
:param vocab_word2index:
:param vocab_label2index:
:return:
"""
# 1. use cache file if exist
cache_data_dir = 'cache' + "_" + name_scope
cache_file =cache_data_dir+"/"+'train_valid_test_shiwan_3w_high.pik'
print("cache_path:",cache_file,"train_valid_test_file_exists:",os.path.exists(cache_file))
if os.path.exists(cache_file):
with open(cache_file, 'rb') as data_f:
print("going to load cache file from file system and return")
return pickle.load(data_f)
# 2. read source file
train_file_object = codecs.open(traning_data_path, mode='r', encoding='utf-8')
valid_file_object = codecs.open(valid_data_path, mode='r', encoding='utf-8')
test_data_obejct = codecs.open(test_data_path, mode='r', encoding='utf-8')
train_lines = train_file_object.readlines()
valid_lines=valid_file_object.readlines()
test_lines=test_data_obejct.readlines()
random.shuffle(train_lines)
random.shuffle(valid_lines)
random.shuffle(test_lines)
if test_mode:
train_lines=train_lines[0:1000]
# 3. transform to train/valid data to standardized format
train = transform_data_to_index(train_lines, vocab_word2index, accusation_label2index, sentence_len,'train',name_scope)
valid = transform_data_to_index(valid_lines, vocab_word2index, accusation_label2index, sentence_len,'valid',name_scope)
test = transform_data_to_index(test_lines, vocab_word2index, accusation_label2index, sentence_len,'test',name_scope)
# 4. save to file system if vocabulary of words not exists
if not os.path.exists(cache_file):
with open(cache_file, 'ab') as data_f:
print("going to dump train/valid/test data to file system.")
pickle.dump((train,valid,test),data_f, protocol=4)
return train, valid, test
splitter = ':'
num_mini_examples=1900
def transform_data_to_index(lines, vocab_word2index, accusation_label2index, sentence_len, data_type, name_scope):
"""
transform data to index using vocab and label dict.
:param lines:
:param vocab_word2index:
:param accusation_label2index:
:param article_label2index:
:param deathpenalty_label2index:
:param lifeimprisonment_label2index:
:param sentence_len: max sentence length
:return:
"""
X = []
Y_accusation = [] # discrete
accusation_label_size=len(accusation_label2index)
# load frequency of accu and relevant articles, so that we can copy those data with label are few. ADD 2018-05-29
accusation_freq_dict = load_accusation_freq_dict(accusation_label2index, name_scope)
for i, line in enumerate(lines):
if i % 10000 == 0:
print("i:", i)
json_string = json.loads(line.strip())
# 1. transform input x.discrete
facts = json_string['fact']
input_list = token_string_as_list(facts) # tokenize
x = [vocab_word2index.get(x, UNK_ID) for x in input_list] # transform input to index
x = pad_truncate_list(x, sentence_len)
# 2. transform accusation.discrete
accusation_list = json_string['meta']['accusation']
accusation_list = [accusation_label2index[label] for label in accusation_list]
y_accusation = transform_multilabel_as_multihot(accusation_list, accusation_label_size)
# OVER-SAMPLING:if it is training data, copy labels that are few based on their frequencies.
# num_copy = 1
# if data_type == 'train': #set specially weight and copy some examples when it is training data.
# freq_accusation = accusation_freq_dict[accusation_list[0]]
# if freq_accusation <= num_mini_examples:
# freq = freq_accusation
# num_copy=max(1,num_mini_examples/freq)
# if i%1000==0:
# print("####################freq_accusation:", freq_accusation, ";num_copy:", num_copy)
#
# for k in range(int(num_copy)):
# X.append(x)
# Y_accusation.append(y_accusation)
#### no oversampling
X.append(x)
Y_accusation.append(y_accusation)
#shuffle
number_examples = len(X)
X_ = []
Y_accusation_ = []
permutation = np.random.permutation(number_examples)
for index in permutation:
X_.append(X[index])
Y_accusation_.append(Y_accusation[index])
X_ = np.array(X_)
data = (X_, Y_accusation_)
return data
def transform_multilabel_as_multihot(label_list,label_size):
"""
convert to multi-hot style
:param label_list: e.g.[0,1,4], here 4 means in the 4th position it is true value(as indicate by'1')
:param label_size: e.g.199
:return:e.g.[1,1,0,1,0,0,........]
"""
result=np.zeros(label_size)
#set those location as 1, all else place as 0.
result[label_list] = 1
return result
def transform_mulitihot_as_dense_list(multihot_list):
length = len(multihot_list)
result_list = [i for i in range(length) if multihot_list[i] > 0]
return result_list
#use pretrained word embedding to get word vocabulary and labels, and its relationship with index
def create_or_load_vocabulary(data_path, predict_path, training_data_path, vocab_size, name_scope='cnn', test_mode=False):
"""
create vocabulary
:param training_data_path:
:param vocab_size:
:param name_scope:
:return:
"""
cache_vocabulary_label_pik='cache'+"_"+name_scope # path to save cache
if not os.path.isdir(cache_vocabulary_label_pik): # create folder if not exists.
os.makedirs(cache_vocabulary_label_pik)
#0.if cache exists. load it; otherwise create it.
cache_path =cache_vocabulary_label_pik+"/"+'vocab_label_shiwan_high.pik'
print("cache_path:",cache_path,"file_exists:",os.path.exists(cache_path))
if os.path.exists(cache_path):
with open(cache_path, 'rb') as data_f:
print("going to load cache file.vocab of words and labels")
return pickle.load(data_f)
else:
vocab_word2index = {}
vocab_word2index[_PAD] = PAD_ID
vocab_word2index[_UNK] = UNK_ID
accusation_label2index = {}
#1.load raw data
file_object = codecs.open(training_data_path, mode='r', encoding='utf-8')
lines = file_object.readlines()
random.shuffle(lines)
if test_mode:
lines=lines[0:10000]
#2.loop each line,put to counter
c_inputs = Counter()
c_accusation_labels = Counter()
for i,line in enumerate(lines):
if i % 10000 == 0:
print(i)
json_string = json.loads(line.strip())
facts = json_string['fact']
input_list = token_string_as_list(facts)
c_inputs.update(input_list)
accusation_list = json_string['meta']['accusation']
c_accusation_labels.update(accusation_list)
#3.get most frequency words
vocab_list = c_inputs.most_common(vocab_size)
word_vocab_file = predict_path+"/"+'word_freq_shiwan_3w_high.txt'
if os.path.exists(word_vocab_file):
print("word vocab file exists.going to delete it.")
os.remove(word_vocab_file)
word_freq_file = codecs.open(word_vocab_file,mode='a',encoding='utf-8')
for i, tuplee in enumerate(vocab_list):
word,freq = tuplee
word_freq_file.write(word+":"+str(freq)+"\n")
vocab_word2index[word] = i+2
#4.1 accusation and its frequency.
accusation_freq_file = codecs.open(cache_vocabulary_label_pik+"/"+'accusation_freq_shiwan_3w_high.txt',mode='a',encoding='utf-8')
accusation_label_list = c_accusation_labels.most_common()
for i, tuplee in enumerate(accusation_label_list):
label,freq = tuplee
accusation_freq_file.write(label+":"+str(freq)+"\n")
#4.2 accusation dict, code the accusation with number
accusation_voc_file = data_path+"/accu.txt"
accusation_voc_object = codecs.open(accusation_voc_file,mode='r',encoding='utf-8')
accusation_voc_lines = accusation_voc_object.readlines()
for i, accusation_name in enumerate(accusation_voc_lines):
accusation_name=accusation_name.strip()
accusation_label2index[accusation_name] = i
#6.save to file system if vocabulary of words not exists.
if not os.path.exists(cache_path):
with open(cache_path, 'ab') as data_f:
print("going to save cache file of vocab of words and labels")
pickle.dump((vocab_word2index, accusation_label2index), data_f, protocol=4)
#7.close resources
word_freq_file.close()
accusation_freq_file.close()
print("create_vocabulary.ended")
return vocab_word2index, accusation_label2index
def token_string_as_list(string,tokenize_style='word'):
#string=string.decode("utf-8")
string = replace_money_value(string) #TODO add normalize number ADD 2018.06.11
length = len(string)
if tokenize_style == 'char':
listt = [string[i] for i in range(length)]
elif tokenize_style == 'word':
listt = jieba.lcut(string)
listt = [x for x in listt if x.strip()]
return listt
def get_part_validation_data(valid, num_valid=6000):
valid_X, valid_Y_accusation = valid
number_examples = len(valid_X)
permutation = np.random.permutation(number_examples)[0:num_valid]
valid_X2, valid_Y_accusation2 = [], []
for index in permutation:
valid_X2.append(valid_X[index])
valid_Y_accusation2.append(valid_Y_accusation[index])
return valid_X2, valid_Y_accusation2
def load_accusation_freq_dict(accusation_label2index, name_scope):
cache_vocabulary_label_pik = 'cache'+"_"+name_scope # path to save cache
#load dict of accusations
accusation_freq_file = codecs.open(cache_vocabulary_label_pik + "/" + 'accusation_freq_shiwan_3w_high.txt', mode='r',encoding='utf-8')
accusation_freq_lines = accusation_freq_file.readlines()
accusation_freq_dict = {}
for i, line in enumerate(accusation_freq_lines):
acc_label, freq = line.strip().split(splitter) #编造、故意传播虚假恐怖信息:122
accusation_freq_dict[accusation_label2index[acc_label]] = int(freq)
return accusation_freq_dict
import re
def replace_money_value(string):
#print("string:")
#print(string)
moeny_list = [1,2,5,7,10, 20, 30,50, 100, 200, 500, 800,1000, 2000, 5000,7000, 10000, 20000, 50000, 80000,100000,200000, 500000, 1000000,3000000,5000000,1000000000]
double_patten = r'\d+\.\d+'
int_patten = r'[\u4e00-\u9fa5,,.。;;]\d+[元块万千百十余,,。.;;]'
doubles=re.findall(double_patten,string)
ints=re.findall(int_patten,string)
ints=[a[1:-1] for a in ints]
#print(doubles+ints)
sub_value=0
for value in (doubles+ints):
for money in moeny_list:
if money >= float(value):
sub_value=money
break
string=re.sub(str(value),str(sub_value),string)
return string
| 201520815029009/Text-classification-augmented-with-label-definitions | cnn_classification/data_util.py | data_util.py | py | 11,736 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "os.path.exists",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 34,
"usage_type": "attribute"
},
{
"api_name": "os.path.exists",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number... |
71362243707 | import networkx as nx
import config.config as config
def graph_kernel_map_to_nodetypes(_graph):
"""
NOT SUPPORTED AFTER GRAPH ENCODING CHANGE.
A pre-processing step to collapse nodes to their model types.
:param _graph:
:return:
"""
graph_relabelled = nx.relabel_nodes(_graph, {node: _graph.nodes[node]['model'].node_acronym for node in _graph.nodes})
all_node_relabels = []
for node_subtypes in config.NODE_TYPES_ALL.values():
for node_subtype in node_subtypes.values():
all_node_relabels.append(node_subtype.node_acronym)
graph_relabelled.add_nodes_from(all_node_relabels)
return graph_relabelled
def similarity_full_ged(g1, g2):
"""
Measures the Graph Edit Distance similarity between two graphs exactly. Can be slow, it is suggested use the
approximate (reduced) method instead
:param _graph1: Graph object
:param _graph2: Graph object
:return: similarity (integer number of steps needed to transform Graph 1 to Graph 2
"""
sim = nx.algorithms.similarity.graph_edit_distance(g1, g2,
edge_subst_cost=edge_match,
node_subst_cost=node_match,
upper_bound=30.0,
timeout=10.0,
)
return sim
def similarity_reduced_ged(g1, g2):
"""
Approximated the Graph Edit Distance similarity between two graphs exactly.
:param _graph1: Graph object
:param _graph2: Graph object
:return: similarity (integer number of steps needed to transform Graph 1 to Graph 2
"""
ged_approx = nx.algorithms.similarity.optimize_graph_edit_distance(g1, g2,
edge_subst_cost=edge_match,
node_subst_cost=node_match,
upper_bound=30.0,
)
sim = next(ged_approx) # faster, but less accurate
return sim
def edge_match(e1, e2):
# provides the comparison for the cost of substituting two edges or two nodes in the GED calculation
if type(e1['model']) == type(e2['model']):
cost = 0.0
else:
cost = 1.0
return cost
def node_match(e1, e2):
# provides the comparison for the cost of substituting two edges or two nodes in the GED calculation
if type(e1['model']) == type(e2['model']):
cost = 0.0
else:
cost = 0.5
return cost | benjimaclellan/aesop | algorithms/assets/graph_edit_distance.py | graph_edit_distance.py | py | 2,807 | python | en | code | 2 | github-code | 6 | [
{
"api_name": "networkx.relabel_nodes",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "config.config.NODE_TYPES_ALL.values",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "config.config.NODE_TYPES_ALL",
"line_number": 13,
"usage_type": "attribute"
... |
18705309300 | from time import sleep
import csv
import requests
from lxml import etree
from datetime import datetime
import pytz
urls = ['https://www.eclatparis.com/produits',
'https://www.eclatparis.com/produits?offset=200',
'https://www.eclatparis.com/produits?offset=400',
'https://www.eclatparis.com/produits?offset=600'
]
url_header = 'https://www.eclatparis.com'
# 存储所有产品的URL的列表
product_urls = []
# 由于要布置到云端,pythonanywhere对selenium的限制太多了,
# 直接布置selenium会导致程序无法运行
# 这里的cookies是从本地版的运行中,利用selenium得到的,
# 直接使用cookies就可以绕过登录界面
cookies= {'hasCart': 'true',
'_ga_ZMDKD43H01': 'GS1.1.1677141224.1.1.1677141248.0.0.0',
'siteUserCrumb': '91m8yMQracXHvtn3hp_zqJZ29UAbVo6aaNclbA8xsq_qVyEboCKRsEBv3EqT4dQmzImPIrdRSieZfpx1drxkGFQAvslwA5temqFq29j_XcmIbFuE51bxgA1TRcZFYz1o',
'SiteUserInfo': '%7B%22authenticated%22%3Atrue%2C%22lastAuthenticatedOn%22%3A%222023-02-23T08%3A34%3A03.898Z%22%2C%22siteUserId%22%3A%2263dc38e82b1d5869bf4988e2%22%2C%22firstName%22%3A%22Wenjie%22%7D',
'crumb': 'BaRQABCJ44v5MTBhMjM1YWRhODA1ZDUxMWU5Y2JhYjY3MmYyNjU5',
'ss_cvt': '1677141232051',
'ss_cvr': 'cb592531-5038-4c16-bff7-3c4d1ae0cf97|1677141232051|1677141232051|1677141232051|1',
'CART': '-B8ztComuxoy8mZfh6NOEGGjxgnUzIdW4JGacILa',
'_ga': 'GA1.1.1986290368.1677141224',
'SiteUserSecureAuthToken': 'MXw1OTBmNTNlNy0xZWMyLTQzODctYWMzZS01NjAzZTIwYjEzOWJ8V0w0UE1BQlN3SFYwOFY0WWQyRmtsQmVDR1ktSVV1SVltTVVkZkdGcy1oel9yT21odDY4OXFZUm1IeElMWkRWQg',
'_fbp': 'fb.1.1677141224123.561848947'}
for url in urls:
# get中的cookies是字典类的
# get方法是会阻塞线程的,只有在获取完整个页面的所有数据之后才会进行下面的代码
response =requests.get(url,cookies=cookies)
sleep(2) # sleep在这里的作用并不是等待页面加载完成,而是防止过快的爬取导致ip地址被网址suspendus
body = etree.HTML(response.content)
links = body.xpath( '//a[contains(@class, "grid-item-link")]')
for link in links:
product_urls.append(url_header + link.get('href'))
sleep(2)
francetime = pytz.timezone("Europe/Paris")
dt = datetime.now(francetime)
###英文格式
timenow =str(dt.year)+' '+str(dt.month)+' '+str(dt.day)
filename = timenow + 'product_urls_1.csv'
# 将所有产品的URL存储到CSV文件中
headers = ['URL', 'Sold out']
with open(filename, mode='w', newline='') as file:
writer = csv.writer(file)
writer.writerow(headers)
for url in product_urls:
writer.writerow([url, ''])
# 检查每个产品是否已售罄,将结果添加到CSV文件中
with open(filename, mode='r', newline='') as file:
reader = csv.reader(file)
next(reader) # 跳过标题行
rows = []
for row in reader:
try:
response = requests.get(row[0], cookies=cookies)
html = etree.HTML(response.content)
sold_out = 'Yes' if html.xpath('//div[@class="ProductItem-details-checkout"]//div[@class="product-mark sold-out"]') else ''
row[1] = sold_out
except Exception as e:
print(f"Failed to check product {row[0]}: {e}")
rows.append(row)
with open(filename, mode='w', newline='') as file:
writer = csv.writer(file)
writer.writerow(headers)
for row in rows:
writer.writerow(row)
print('done')
| qingyi-er-san/aprizo_codes | eclat数据爬取/eclat_云端版.py | eclat_云端版.py | py | 3,460 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "requests.get",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "lxml.etree.HTML",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "lxml.etree",
"line_number... |
42663404209 | import copy
import math #needed for calculation of weight and bias initialization
import numpy as np
import pandas as pd
from torch.utils.data import Dataset, DataLoader
import torch, torch.nn as nn, torch.nn.functional as F
import torchvision
from torchvision import transforms, models, utils
#Set seeds
np.random.seed(0)
torch.manual_seed(0)
torch.cuda.manual_seed(0)
torch.cuda.manual_seed_all(0)
#Import components
from . import components as cts
#TODOS FOR ALL MODELS:
#TODO try with Sigmoid on the attention instead of Softmax
#TODO try with self-attention instead of fixed learned attention weights
class BodyAvgDiseaseFeatureAttn(nn.Module): #7/1/2020
"""(1) ResNet18 [slices, 512, 14, 14]
(2) conv_final to [slices, 16, 6, 6]
(3) Make a 'copy representation': create n_outputs number of copies
by tiling: [slices, n_outputs, 16, 6, 6]
(4) Element wise multiply the 'copy representation' by a learned
weight vector of shape [1, n_outputs, 16, 1, 1]. This learned
weight vector re-weights the features for each disease separately.
Out shape: [slices, n_outputs, 16, 6, 6] (unchanged because we used
element-wise multiplication with broadcasting).
(5) Apply disease-specific FC layers which for each of the n_outputs
diseases will transform the 16*6*6 representation into a single
disease score. This step is analogous to the final FC layer in
the baseline model, except that in the baseline model we can
implement it easily with Conv2d whereas here because we have
separate disease representations we have to do something
trickier to implement disease-specific FC layers.
Out shape: [slices, n_outputs]
(6) Avg pooling over slices to get [n_outputs]"""
def __init__(self, n_outputs):
super(BodyAvgDiseaseFeatureAttn, self).__init__()
self.slices = 15 #9 projections
self.n_outputs = n_outputs
self.features = cts.resnet_features()
self.conv2d = cts.final_conv()
self.dzfeatweights = nn.Parameter(torch.ones((1,n_outputs,16,1,1), dtype=torch.float32),requires_grad=True)
self.softmax = nn.Softmax(dim=2) #make the 16 feature weights per disease add to 1
self.fclayers_weights, self.fclayers_biases = init_stacked_fc_layers(total_independent_fc_layers = n_outputs, in_features = 16*6*6)
self.avgpool_1d = nn.AvgPool1d(kernel_size=self.slices)
def forward(self, x):
x = cts.reshape_x(x, self.slices)
x = self.features(x) #out shape [slices,512,14,14]
x = self.conv2d(x) #out shape [slices, 16, 6, 6]
#Copy the representation n_outputs number of times, so that we can
#calculate disease-specific intermediate representations, in which
#the features have been reweighted for each disease separately:
x = x.repeat(self.n_outputs,1,1,1,1) #out shape [83, slices, 16, 6, 6]
x = x.transpose(0,1) #out shape [slices, 83, 16, 6, 6]
#Element wise multiply the copy representation by the learned weights
#The learned weights perform the feature reweighting per disease.
#The softmax makes the features for one disease "compete against each other"
x = torch.mul(x,self.softmax(self.dzfeatweights)) #out shape [slices, 83, 16, 6, 6]
#Flatten
x = x.flatten(start_dim=2,end_dim=4) #out shape [slices, 83, 16*6*6] = [slices, 83, 576]
#Apply disease-specific FC layers
slice_preds = apply_disease_fc_layers(x, self.fclayers_weights, self.fclayers_biases)
#Final steps are the same as for baseline model:
x = slice_preds.transpose(0,1).unsqueeze(0) #out shape [1, 83, slices]
x = self.avgpool_1d(x) #out shape [1, 83, 1]
x = torch.squeeze(x, dim=2) #out shape [1, 83]
return x
class BodyAvg_Testing(nn.Module): #7/2/2020
"""BodyAvg model, implemented using the 'copy representation' and
disease-specific FC layers of BodyAvgDiseaseFeatureAttn. The only purpose
of this model is code testing: to figure out if the performance is exactly
the same as for the BodyAvg model."""
def __init__(self, n_outputs):
super(BodyAvg_Testing, self).__init__()
self.slices = 15 #9 projections
self.n_outputs = n_outputs
self.features = cts.resnet_features()
self.conv2d = cts.final_conv()
self.fclayers_weights, self.fclayers_biases = init_stacked_fc_layers(total_independent_fc_layers = n_outputs, in_features = 16*6*6)
self.avgpool_1d = nn.AvgPool1d(kernel_size=self.slices)
def forward(self, x):
x = cts.reshape_x(x, self.slices)
x = self.features(x) #out shape [slices,512,14,14]
x = self.conv2d(x) #out shape [slices, 16, 6, 6]
x = x.repeat(self.n_outputs,1,1,1,1) #out shape [83, slices, 16, 6, 6]
x = x.transpose(0,1) #out shape [slices, 83, 16, 6, 6]
x = x.flatten(start_dim=2,end_dim=4) #out shape [slices, 83, 16*6*6] = [slices, 83, 576]
slice_preds = apply_disease_fc_layers(x, self.fclayers_weights, self.fclayers_biases)
x = slice_preds.transpose(0,1).unsqueeze(0) #out shape [1, 83, slices]
x = self.avgpool_1d(x) #out shape [1, 83, 1]
x = torch.squeeze(x, dim=2) #out shape [1, 83]
return x
class BodyAvgDiseaseFeatureAttn2(nn.Module): #7/2/2020, updated 7/7/2020
"""See BodyAvgDiseaseFeatureAttn for more documentation including code comments.
Difference from BodyAvgDiseaseFeatureAttn: in step (4) this model shares
the learned feature weights between the right lung and theleft lung."""
def __init__(self, n_outputs_lung, n_outputs_heart, nonlinearity):
super(BodyAvgDiseaseFeatureAttn2, self).__init__()
self.slices = 15 #9 projections
self.n_outputs = (2*n_outputs_lung)+n_outputs_heart
self.n_outputs_lung = n_outputs_lung
self.n_outputs_heart = n_outputs_heart
self.features = cts.resnet_features()
self.conv2d = cts.final_conv()
self.dzfeatweights_lung = nn.Parameter(torch.ones((1,n_outputs_lung,16,1,1), dtype=torch.float32),requires_grad=True)
self.dzfeatweights_heart = nn.Parameter(torch.ones((1,n_outputs_heart,16,1,1), dtype=torch.float32),requires_grad=True)
#Nonlinearity that gets applied to the feature weighting:
if nonlinearity == 'softmax':
self.nonlinearity = nn.Softmax(dim=2) #make the 16 feature weights per disease add to 1
elif nonlinearity == 'sigmoid':
self.nonlinearity = nn.Sigmoid()
self.fclayers_weights, self.fclayers_biases = init_stacked_fc_layers(total_independent_fc_layers = self.n_outputs, in_features = 16*6*6)
self.avgpool_1d = nn.AvgPool1d(kernel_size=self.slices)
def forward(self, x):
x = cts.reshape_x(x, self.slices)
x = self.features(x) #out shape [slices,512,14,14]
x = self.conv2d(x) #out shape [slices, 16, 6, 6]
x = x.repeat(self.n_outputs,1,1,1,1) #out shape [83, slices, 16, 6, 6]
x = x.transpose(0,1) #out shape [slices, 83, 16, 6, 6]
#Apply the feature weights.
#Must follow ground truth label order, which is heart, left_lung, right_lung
x_heart = torch.mul(x[:,0:self.n_outputs_heart,:,:,:],self.nonlinearity(self.dzfeatweights_heart))
x_left_lung = torch.mul(x[:,self.n_outputs_heart:self.n_outputs_heart+self.n_outputs_lung,:,:,:],self.nonlinearity(self.dzfeatweights_lung))
x_right_lung = torch.mul(x[:,-1*self.n_outputs_lung:,:,:,:],self.nonlinearity(self.dzfeatweights_lung))
x = torch.cat((x_heart,x_left_lung,x_right_lung),dim=1) #out shape [slices, 83, 16, 6, 6]
x = x.flatten(start_dim=2,end_dim=4) #out shape [slices, 83, 16*6*6] = [slices, 83, 576]
slice_preds = apply_disease_fc_layers(x, self.fclayers_weights, self.fclayers_biases)
x = slice_preds.transpose(0,1).unsqueeze(0) #out shape [1, 83, slices]
x = self.avgpool_1d(x) #out shape [1, 83, 1]
x = torch.squeeze(x, dim=2) #out shape [1, 83]
return x
class BodyLocationAttn3(nn.Module): #7/2/2020, updated 7/7/2020
"""See BodyAvgDiseaseFeatureAttn for more documentation including code comments.
Difference from BodyAvgDiseaseFeatureAttn: uses spatial attention instead of
feature attention. Specifically there is right lung, heart, and left lung
spatial attention. Also, instead of being fixed weights every time, the
weights are learned based on using the center slices (since the center
slices are most indicative of where the right lung, heart, and left
lung are located.) So this is trainable soft self-attention."""
def __init__(self, n_outputs_lung, n_outputs_heart, nonlinearity):
super(BodyLocationAttn3, self).__init__()
self.slices = 15 #9 projections
self.n_outputs = (2*n_outputs_lung)+n_outputs_heart
self.n_outputs_lung = n_outputs_lung
self.n_outputs_heart = n_outputs_heart
self.features = cts.resnet_features()
self.conv2d = cts.final_conv()
#Calculate the spatial attention based on center slices
if nonlinearity == 'softmax':
chosen_nonlinearity = nn.Softmax()
elif nonlinearity == 'sigmoid':
chosen_nonlinearity = nn.Sigmoid()
self.heart_attn_fc = nn.Sequential(nn.Linear(3*16*6*6, 6*6),
chosen_nonlinearity)
self.left_lung_attn_fc = nn.Sequential(nn.Linear(3*16*6*6, 6*6),
chosen_nonlinearity)
self.right_lung_attn_fc = nn.Sequential(nn.Linear(3*16*6*6, 6*6),
chosen_nonlinearity)
self.fclayers_weights, self.fclayers_biases = init_stacked_fc_layers(total_independent_fc_layers = self.n_outputs, in_features = 16*6*6)
self.avgpool_1d = nn.AvgPool1d(kernel_size=self.slices)
def forward(self, x):
x = cts.reshape_x(x, self.slices)
x = self.features(x) #out shape [slices,512,14,14]
x = self.conv2d(x) #out shape [slices, 16, 6, 6]
#Calculate the attention maps based on the center slices
#Use slices 6, 7, and 8 because these are in the exact center and
#also have the highest attention weight when you do height attention.
center_slices = x[6:9,:,:,:] #out shape [3, 16, 6, 6]
center_slices_flat = center_slices.flatten().unsqueeze(dim=0) #out shape [1,1728]
self.heart_spatial = self.heart_attn_fc(center_slices_flat).reshape(1,1,1,6,6) #out shape [1,1,1,6,6]
self.left_lung_spatial = self.left_lung_attn_fc(center_slices_flat).reshape(1,1,1,6,6) #out shape [1,1,1,6,6]
self.right_lung_spatial = self.right_lung_attn_fc(center_slices_flat).reshape(1,1,1,6,6) #out shape [1,1,1,6,6]
#Repeat x
x = x.repeat(self.n_outputs,1,1,1,1) #out shape [83, slices, 16, 6, 6]
x = x.transpose(0,1) #out shape [slices, 83, 16, 6, 6]
#Apply the attention maps
#Must follow ground truth label order, which is heart, left_lung, right_lung
x_heart = torch.mul(x[:,0:self.n_outputs_heart,:,:,:],self.heart_spatial)
x_left_lung = torch.mul(x[:,self.n_outputs_heart:self.n_outputs_heart+self.n_outputs_lung,:,:,:],self.left_lung_spatial)
x_right_lung = torch.mul(x[:,-1*self.n_outputs_lung:,:,:,:],self.right_lung_spatial)
x = torch.cat((x_heart,x_left_lung,x_right_lung),dim=1) #out shape [slices, 83, 16, 6, 6]
x = x.flatten(start_dim=2,end_dim=4) #out shape [slices, 83, 16*6*6] = [slices, 83, 576]
slice_preds = apply_disease_fc_layers(x, self.fclayers_weights, self.fclayers_biases)
x = slice_preds.transpose(0,1).unsqueeze(0) #out shape [1, 83, slices]
x = self.avgpool_1d(x) #out shape [1, 83, 1]
x = torch.squeeze(x, dim=2) #out shape [1, 83]
return x
class BodyDiseaseSpatialAttn4(nn.Module): #7/7/2020 #TODO test this
"""See BodyAvgDiseaseFeatureAttn for more documentation including code comments.
Difference from BodyLocationAttn3: while 4 also uses spatial
attention (like 3), 4 does spatial attention per disease instead of per
location."""
def __init__(self, n_outputs, nonlinearity):
super(BodyDiseaseSpatialAttn4, self).__init__()
self.slices = 15 #9 projections
self.n_outputs = n_outputs
self.features = cts.resnet_features()
self.conv2d = cts.final_conv()
#Calculate the spatial attention based on center slices
if nonlinearity == 'softmax':
self.nonlinearity = nn.Softmax(dim=2)
elif nonlinearity == 'sigmoid':
self.nonlinearity = nn.Sigmoid()
#FC layers for calculating the disease-specific spatial attention
#For each disease and each element of the 6x6 I learn a different FC layer:
self.fcattns_weights, self.fcattns_biases = init_stacked_fc_layers(total_independent_fc_layers = n_outputs*6*6, in_features = 16)
#FC layers for calculating the final disease predictions
self.fclayers_weights, self.fclayers_biases = init_stacked_fc_layers(total_independent_fc_layers = n_outputs, in_features = 16*6*6)
self.avgpool_1d = nn.AvgPool1d(kernel_size=self.slices)
def forward(self, x):
x = cts.reshape_x(x, self.slices)
x = self.features(x) #out shape [slices,512,14,14]
x = self.conv2d(x) #out shape [slices, 16, 6, 6]
x = x.repeat(self.n_outputs,1,1,1,1) #out shape [83, slices, 16, 6, 6]
x = x.transpose(0,1) #out shape [slices, 83, 16, 6, 6]
#Calculate the disease-specific spatial attention:
attn_raw_list = []
for slice_num in range(self.slices):
slice_data = x[slice_num,:,:,:,:] #out shape [83, 16, 6, 6]
slice_data = slice_data.flatten(start_dim=2,end_dim=3).transpose(1,2) #out shape [83, 6*6, 16]
slice_data = slice_data.flatten(start_dim=0,end_dim=1) #out shape [83*6*6, 16]
temp1 = torch.mul(slice_data,self.fcattns_weights) #out shape [83*6*6, 16]
temp2 = torch.sum(temp1,dim=1) #out shape [83*6*6]
temp3 = (temp2+self.fcattns_biases).unsqueeze(0) #out shape [83*6*6]
attn_raw_list.append(temp3)
attn_raw = torch.cat(attn_raw_list,dim=0) #out shape [slices, 83*6*6]
attn_raw = torch.reshape(attn_raw,(self.slices,self.n_outputs,6*6)) #out shape [slices, 83, 6*6]
attn = self.nonlinearity(attn_raw) #out shape [slices, 83, 6*6]
attn = torch.reshape(attn,(self.slices,self.n_outputs,6,6)).unsqueeze(2) #out shape [slices, 83, 1, 6, 6]
#Apply the attention
x_times_attn = torch.mul(x, attn) #out shape [slices, 83, 16, 6, 6]
#Disease predictions
x = x.flatten(start_dim=2,end_dim=4) #out shape [slices, 83, 16*6*6] = [slices, 83, 576]
slice_preds = apply_disease_fc_layers(x, self.fclayers_weights, self.fclayers_biases)
x = slice_preds.transpose(0,1).unsqueeze(0) #out shape [1, 83, slices]
x = self.avgpool_1d(x) #out shape [1, 83, 1]
x = torch.squeeze(x, dim=2) #out shape [1, 83]
return x
class BodyDiseaseSpatialAttn5(nn.Module): #7/7/2020 #TODO test this
"""See BodyAvgDiseaseFeatureAttn for more documentation including code comments.
Difference from BodyDiseaseSpatialAttn4: whereas 4 learns a different
mapping of 16 features -> 1 spatial attn value for each element of the 6x6
square, 5 uses a convolution layer such that the mapping of 16 -> 1 is
the same for each element of the 6x6 square"""
def __init__(self, n_outputs, nonlinearity):
super(BodyDiseaseSpatialAttn5, self).__init__()
self.slices = 15 #9 projections
self.n_outputs = n_outputs
self.features = cts.resnet_features()
self.conv2d = cts.final_conv()
#Calculate the spatial attention based on center slices
if nonlinearity == 'softmax':
self.nonlinearity = nn.Softmax(dim=2)
elif nonlinearity == 'sigmoid':
self.nonlinearity = nn.Sigmoid()
#Conv layer for calculating the disease-specific spatial attention
#For each disease and each element of the 6x6 I learn a different FC layer:
self.attn_conv = nn.Sequential(
nn.Conv2d(16, 83, kernel_size = (1,1), stride=(1,1), padding=0),
self.nonlinearity)
#FC layers for calculating the final disease predictions
self.fclayers_weights, self.fclayers_biases = init_stacked_fc_layers(total_independent_fc_layers = n_outputs, in_features = 16*6*6)
self.avgpool_1d = nn.AvgPool1d(kernel_size=self.slices)
def forward(self, x):
x = cts.reshape_x(x, self.slices)
x = self.features(x) #out shape [slices,512,14,14]
x = self.conv2d(x) #out shape [slices, 16, 6, 6]
#Calculate the disease-specific spatial attention:
attn = self.attn_conv(x).unsqueeze(2) #out shape [slices, 83, 1, 6, 6]
#Apply the attention
x = x.repeat(self.n_outputs,1,1,1,1) #out shape [83, slices, 16, 6, 6]
x = x.transpose(0,1) #out shape [slices, 83, 16, 6, 6]
x_times_attn = torch.mul(x, attn) #out shape [slices, 83, 16, 6, 6]
#Disease predictions
x = x.flatten(start_dim=2,end_dim=4) #out shape [slices, 83, 16*6*6] = [slices, 83, 576]
slice_preds = apply_disease_fc_layers(x, self.fclayers_weights, self.fclayers_biases)
x = slice_preds.transpose(0,1).unsqueeze(0) #out shape [1, 83, slices]
x = self.avgpool_1d(x) #out shape [1, 83, 1]
x = torch.squeeze(x, dim=2) #out shape [1, 83]
return x
#############
# Functions #-------------------------------------------------------------------
#############
def init_stacked_fc_layers(total_independent_fc_layers, in_features):
"""Return the weights and biases of <total_independent_fc_layers>
fully connected layers.
Let's say there are 83 <total_independent_fc_layers> and there are
16*6*6 in_features. Then the produced fclayers_weights will have shape
83 x 576 and the produced fclayers_biases will have shape 83.
Each row corresponds to one FC layer that goes from a 1 x 576 representation
to a 1."""
#dzfclayers_weights holds the weights for each disease-specific fc layer.
#https://github.com/pytorch/pytorch/blob/master/torch/nn/modules/conv.py#L40
fclayers_weights_list = []
fclayers_biases_list = []
out_features = 1
for layernum in range(total_independent_fc_layers):
#kaiming uniform init following https://github.com/pytorch/pytorch/blob/master/torch/nn/modules/linear.py
#for the case where we are doing disease-specific FC layers (i.e.
#where total_independent_fc_layers = 83 and in_features = 16*6*6)
#in order to be equivalent to the initialization of the final
#conv2d layer in the baseline model, the fan_in used should be 576.
#That is what we'll get in the calculation because in_features
#is 16*6*6=576, and the weights are defined as weight = Parameter(torch.Tensor(out_features, in_features))
#>>> nn.init._calculate_fan_in_and_fan_out(torch.rand(1,16*6*6))
#(576, 1)
#weight:
weight = torch.Tensor(out_features, in_features)
nn.init.kaiming_uniform_(weight, a=math.sqrt(5))
#bias:
bias = torch.Tensor(out_features)
fan_in, _ = nn.init._calculate_fan_in_and_fan_out(weight)
assert fan_in == in_features #e.g. 576 for in_features = 16*6*6. sanity check based on my calculations
bound = 1 / math.sqrt(fan_in)
nn.init.uniform_(bias, -bound, bound)
fclayers_weights_list.append(weight)
fclayers_biases_list.append(bias)
fclayers_weights = nn.Parameter(torch.cat(fclayers_weights_list,dim=0)) #e.g. shape [83, 576]
fclayers_biases = nn.Parameter(torch.cat(fclayers_biases_list,dim=0)) #e.g. shape [83]
return fclayers_weights, fclayers_biases
def apply_disease_fc_layers(x, fclayers_weights, fclayers_biases):
"""Apply the disease-specific fully connected layers"""
slice_preds_list = []
for slice_num in range(x.shape[0]):
slice_data = x[slice_num,:,:] #out shape [83, 576]
#apply all the disease-specific FC layers at once
#Weight multiplication
#element-wise multiply and then sum over the columns (because this
#is equivalent to doing vector-vector multiplication between
#the rows of slice_data and the corresponding rows of self.fclayers_weights)
temp1 = torch.mul(slice_data,fclayers_weights) #out shape [83, 576]
temp2 = torch.sum(temp1,dim=1) #out shape [83]
#Bias addition
temp3 = (temp2+fclayers_biases).unsqueeze(0) #out shape [1,83]
#Now we have our 83 disease predictions for this slice.
#Append these slice predictions to our list:
slice_preds_list.append(temp3)
slice_preds = torch.cat(slice_preds_list,dim=0) #out shape [slices, 83]
return slice_preds
| rachellea/explainable-ct-ai | src/models/custom_models_diseasereps.py | custom_models_diseasereps.py | py | 21,347 | python | en | code | 3 | github-code | 6 | [
{
"api_name": "numpy.random.seed",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "torch.manual_seed",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "torch.cuda.manu... |
74187907709 | #Richard Janssen <richardnjanssen@gmail.com>
#28/07/2023
#CS50 Introduction to Programming with Python
#File Input/Output
#This program expect for two command-line arguments, the first one is a image file input and the second
#is the output file name or path. This program overlay a "shirt image" on the given input file
#input and output are expected to have same format (.jpeg, .jpg, or .png)
#you can use befor1.jpg, before2.jpg or before3.jpg to test this script
# ------------------------------------------------
import sys
import PIL
from PIL import Image
def main():
check_argv(sys.argv)
shirt = Image.open("shirt.png")
with Image.open(sys.argv[1]) as input:
resized = PIL.ImageOps.fit(input, shirt.size)
resized.paste(shirt,shirt)
resized.save(sys.argv[2])
def check_argv(argv):
if len(argv) > 3:
sys.exit("Too many command-line arguments.")
elif len(argv) < 3:
sys.exit("Too few command-line arguments.")
elif get_extension(argv[1]) not in ["jpg", "jpeg", "png"]:
sys.exit("Not a valid format file.")
elif get_extension(argv[1]) != get_extension(argv[2]):
sys.exit("Input and output must have the same format")
try:
open(sys.argv[1],"r")
except FileNotFoundError:
sys.exit("Can't find the file.")
def get_extension(str):
return str.rsplit(".",1)[1]
if __name__ == "__main__":
main() | richardnj14/CS50_python | file_input_output/shirt/shirt.py | shirt.py | py | 1,411 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "sys.argv",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "PIL.Image.open",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "PIL.Image.open",
"line_num... |
39747449584 | """Training and Predicting Cifar10 with Mutant Networks.
The networks mutate their architecture using genetic algorithms.
Author: Lucas David -- <ld492@drexel.edu>
Licence: MIT License 2016 (c)
"""
import logging
import artificial as art
import numpy as np
import tensorflow as tf
from artificial.utils.experiments import arg_parser, ExperimentSet, Experiment
from keras.datasets import cifar10
from keras.preprocessing.image import ImageDataGenerator
import mutant
class Cifar10MutantEnvironment(mutant.Environment):
def build(self):
tf.logging.info('building environment...')
tf.logging.info('|-loading data...')
(X, y), (X_test, y_test) = cifar10.load_data()
X = X.astype('float32') / 255
X_test = X_test.astype('float32') / 255
g = ImageDataGenerator(
featurewise_center=True,
featurewise_std_normalization=True,
rotation_range=15,
width_shift_range=0.1,
height_shift_range=0.1,
horizontal_flip=True)
tf.logging.info('|-fitting image generator...')
g.fit(X)
tf.logging.info('|-defining data sets...')
self.dataset_ = g.flow(X, y, batch_size=self.consts.batch_size,
shuffle=self.consts.shuffle_train)
self.test_dataset_ = self.val_dataset_ = (X_test, y_test)
tf.logging.info('building complete')
return self
class ClimbOverCifar10Experiment(Experiment):
env_ = None
def setup(self):
consts = self.consts
# Settings for logging.
verbosity_level = logging.INFO if consts.verbose else logging.WARNING
for m in ('artificial', 'tensorflow', 'connoisseur'):
logger = logging.getLogger(m)
logger.setLevel(verbosity_level)
logger.addHandler(logging.FileHandler(consts.log_file))
np.random.seed(consts.seed)
# Create mutation environment.
e = Cifar10MutantEnvironment(optimizer='adam', consts=consts)
e.agents = [
mutant.Agent(search=art.searches.local.HillClimbing,
environment=e,
**consts.agent_params)
]
initial_architecture = e.architect_.validate({
mutant.Codes.Conv2D: [
e.architect_.random_layer(mutant.Codes.Conv2D)
for _ in range(4)
],
mutant.Codes.Dense: [
e.architect_.random_layer(mutant.Codes.Dense)
for _ in range(2)
],
})
initial_state = mutant.MutantNetwork(initial_architecture)
e.current_state = e.initial_state = initial_state
self.env_ = e
def run(self):
try:
self.env_.live(n_cycles=1)
finally:
answer = self.env_.current_state
if answer:
tf.logging.info('train and validation loss after %i epochs: '
'(%s, %s)', self.consts.n_epochs,
answer.loss_, answer.validation_loss_)
if __name__ == '__main__':
print(__doc__, flush=True)
logging.basicConfig(level=logging.DEBUG)
logging.getLogger('tensorflow').propagate = False
(ExperimentSet(ClimbOverCifar10Experiment)
.load_from_json(arg_parser.parse_args().constants)
.run())
| lucasdavid/unicamp-ia004-neural-networks-2 | mutant-networks/experiments/cifar-hill-climbing/experiment.py | experiment.py | py | 3,372 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "mutant.Environment",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.logging.info",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "tensorflow.logging",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name"... |
72453926907 | # -*- coding: utf-8 -*-
import pymysql
from pymongo import MongoClient
class MiddleTable(object):
def __init__(self):
self.mysql_host = "192.168.10.121"
self.mysql_user = 'hzyg'
self.mysql_password = '@hzyq20180426..'
self.MONGO_HOST = '127.0.0.1'
self.MONGO_PORT = 27017
# self.MONGO_USER = ''
# self.PSW = ''
def open_sql(self, ms_db, mo_db, mo_coll):
self.link = pymysql.connect(self.mysql_host, self.mysql_user, self.mysql_password, ms_db)
self.link.set_charset('utf8')
self.cursor = self.link.cursor()
self.client = MongoClient(host=self.MONGO_HOST, port=self.MONGO_PORT)
self.mo_db = self.client[mo_db]
self.coll = self.mo_db[mo_coll]
def input_sql(self):
producer_list = self.coll.distinct('corpName', {})
seller_list = self.coll.distinct('corpNameBy', {})
for name in producer_list or seller_list:
if name != '/':
detail_list = self.coll.find({'$or': [{'corpName': name, 'corpNameBy': name}]})
for detail in detail_list:
inspection_id = detail['_id']
produce_name = detail['corpName']
if produce_name != '/':
sql = "select id from sys_organization where name='%s'" % produce_name
self.cursor.execute(sql)
produce_id = self.cursor.fetchone()
if not produce_id:
break
else:
produce_id = produce_id[0]
else:
produce_id = None
seller_name = detail['corpNameBy']
if seller_name != '/':
sql = "select id from sys_organization where name='%s'" % seller_name
self.cursor.execute(sql)
seller_id = self.cursor.fetchone()
if not seller_id:
break
else:
seller_id = seller_id[0]
sql = "select supervise_id from sys_organization_ascription where organization_id='%s'" % seller_id
self.cursor.execute(sql)
supervise_id = self.cursor.fetchone()
supervise_id = supervise_id[0]
else:
seller_id = None
supervise_id = None
security_results = detail['newsDetailType']
if security_results >= 54 and security_results <= 76 or security_results == 100:
security_results = 1
elif security_results >= 77 and security_results <= 99 or security_results ==101:
security_results = 2
data_type = detail['rwly']
if '省抽' in data_type:
data_type = 521
elif '国抽' in data_type:
data_type = 520
else:
data_type = 526
status = detail['status']
notice_date = detail['ggrq']
sql = """INSERT INTO organization_inspection_relation(inspection_id, producer_id, seller_id, security_results, source, data_type, status, notice_date) VALUES("%s","%d", "%d", "%s", "%d", "%d", "%d", "%s")""" % (inspection_id, produce_id, seller_id, security_results, supervise_id, data_type, status, notice_date)
self.cursor.execute(sql)
self.link.commit()
def close_sql(self):
self.link.close()
mt = MiddleTable()
mt.open_sql('yfhunt', 'zhejiang', 'sheng')
mt.input_sql()
mt.close_sql()
| cyndi088/MiddleTables | mongo_to_mysql.py | mongo_to_mysql.py | py | 3,882 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "pymysql.connect",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "pymongo.MongoClient",
"line_number": 21,
"usage_type": "call"
}
] |
19416857297 | """Visualises the range of potentials relative to demand in each municipality."""
from itertools import chain, repeat
import click
import pandas as pd
import geopandas as gpd
import matplotlib
import matplotlib.pyplot as plt
import seaborn as sns
import pycountry
from src.vis import RED, GREEN, BLUE
SORT_QUANTILE = 0.5
@click.command()
@click.argument("path_to_results")
@click.argument("path_to_plot")
def visualise_normed_potentials(path_to_results, path_to_plot):
"""Visualises the range of potentials relative to demand in each municipality."""
sns.set_context('paper')
units = pd.DataFrame(gpd.read_file(path_to_results))
units = units[["country_code", "population_sum", "normed_potential"]]
units["country"] = units["country_code"].map(lambda country_code: pycountry.countries.lookup(country_code).name)
units["country"].replace("Macedonia, Republic of", value="Macedonia", inplace=True) # too long
units["country"].replace("Bosnia and Herzegovina", value="Bosnia", inplace=True) # too long
people = pd.DataFrame(
data={
"country": list(chain(*[
(repeat(unit[1].country, round(unit[1].population_sum / 100)))
for unit in units.iterrows()
])),
"normed_potential": list(chain(*[
(repeat(unit[1].normed_potential, round(unit[1].population_sum / 100)))
for unit in units.iterrows()
]))
}
)
people_eu = people.copy()
people_eu["country"] = "Europe"
people = pd.concat([people, people_eu])
fig = plt.figure(figsize=(8, 10), constrained_layout=True)
ax = fig.add_subplot(111)
sns.boxplot(
data=people,
x="normed_potential",
y="country",
order=people.groupby("country").normed_potential.quantile(SORT_QUANTILE).sort_values().index,
ax=ax,
color=GREEN,
whis=[2.5, 97.5],
saturation=0.85,
linewidth=1.3,
width=0.7,
boxprops=dict(linewidth=1.3, edgecolor=GREEN),
whiskerprops=dict(linewidth=1, color=GREEN),
flierprops=dict(markerfacecolor="k", markeredgecolor="k", markersize=0, marker="o"),
capprops=dict(color=GREEN)
)
ax.axvline(1, color=RED, linewidth=1.5)
ax.set_xlabel("potential relative to demand")
ax.set_ylabel("country")
ax.set_xscale('log')
ax.set_xlim(0.08, 100)
ax.set_xticklabels(["{:.0f}%".format(tick * 100) for tick in ax.get_xticks()])
eu_position = list(
people.groupby("country").normed_potential.quantile(SORT_QUANTILE).sort_values().index
).index("Europe")
eu_patch = [child for child in ax.get_children() if isinstance(child, matplotlib.patches.PathPatch)][eu_position]
eu_patch.set_facecolor(BLUE)
eu_patch.set_edgecolor(BLUE)
eu_patch.set_zorder(2)
if path_to_plot[-3:] == "png":
fig.savefig(path_to_plot, dpi=600, transparent=False)
else:
fig.savefig(path_to_plot, dpi=600, transparent=False, pil_kwargs={"compression": "tiff_lzw"})
if __name__ == "__main__":
visualise_normed_potentials()
| timtroendle/possibility-for-electricity-autarky | src/vis/potentials_normed_boxplot.py | potentials_normed_boxplot.py | py | 3,114 | python | en | code | 10 | github-code | 6 | [
{
"api_name": "seaborn.set_context",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "geopandas.read_file",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "pycountry.co... |
42673179666 | import graphene
from graphene_django.types import DjangoObjectType
from graphql import GraphQLError
from .models import *
from django.contrib.auth.models import User
class user(DjangoObjectType):
class Meta:
model = User
class task(DjangoObjectType):
class Meta:
model = Task
class Query(object):
all_tasks = graphene.List(task)
profile = graphene.NonNull(user)
def resolve_all_tasks(self, info, **kwargs):
if(not info.context.user.is_authenticated):
raise GraphQLError('Please log in')
return Task.objects.filter(user=info.context.user)
def resolve_profile(self, info, **kwargs):
if(not info.context.user.is_authenticated):
raise GraphQLError('Please log in')
return info.context.user
# def resolve_all_users(self, info, **kwargs):
# return User.objects.all()
| neelansh/Todo_app_graphql | todo/app/schema.py | schema.py | py | 879 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "graphene_django.types.DjangoObjectType",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "django.contrib.auth.models.User",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "graphene_django.types.DjangoObjectType",
"line_number": 14,
"usage... |
10754618870 | from nltk.corpus import stopwords
import pandas as pd
from nltk.stem.snowball import SnowballStemmer
import re
import nltk
class ngrams:
def __init__(self, df,column,n=10):
texto = " ".join(str(x) for x in df[column].values)
tokens = texto.split()
tokens=[x.lower() for x in tokens]
stopset = set(stopwords.words('english')) # dictionary of stop words
tokens = [w for w in tokens if not w in stopset]
stemmer=SnowballStemmer("english")
stemm_words=[]
tokens_clean=[]
for j in tokens:
sa=re.sub('[^A-Za-z]+', '', j)
tokens_clean.append(sa)
for s in tokens_clean:
try:
stem= stemmer.stem(s)
if s!='':
stemm_words.append(str(stem))
except:
pass
cuenta = len(tokens_clean)
bigrams = nltk.bigrams(tokens_clean)
trigrams=nltk.trigrams(tokens_clean)
fdist = nltk.FreqDist(bigrams)
fdist1 = nltk.FreqDist(trigrams)
#for i,j in fdist.items():
# print i,j
frecuentbigrams=fdist.most_common(n)
frecuenttrigrams=fdist1.most_common(10)
bigramslist=[]
trigramslist=[]
for x in frecuentbigrams:
a,b=x
l,m=a
if l !='' and m !='' and l!=m:
bigramslist.append(a)
bigramsduplicates=[]
for idx, x in enumerate(bigramslist):
for idy, y in enumerate(bigramslist):
if idx!=idy:
if x[0]==y[1]:
duplicate=(x[1],x[0])
#print bigramsduplicates
#print x
if x not in bigramsduplicates:
bigramslist.pop(idx)
bigramsduplicates.append(x)
bigramsduplicates.append(duplicate)
for x in frecuenttrigrams:
a,b=x
trigramslist.append(a)
self.bigrams=bigramslist
self.trigrams=trigramslist
| omedranoc/ThesisPreprocessing | model/ngrams.py | ngrams.py | py | 1,850 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "nltk.corpus.stopwords.words",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "nltk.corpus.stopwords",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "nltk.stem.snowball.SnowballStemmer",
"line_number": 14,
"usage_type": "call"
},
{
... |
39870180773 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import imageio
import scipy.misc
import numpy as np
import videostyletransfer as vst
video_id = 'temple_2'
content_path = os.getcwd() + '/input/' + video_id + '/'
style_path = os.getcwd() + '/style-images/starry_night.jpg'
flow_path = os.getcwd() + '/flow/' + video_id + '/'
height = 384#192#96#384#436
width = 512#256#128#512#1024
num_frames = 5
fps = 30
content = []
for i in range(1, num_frames + 1):
content_image = imageio.imread(content_path + ('frame_%04d.png' % i))
content.append(content_image[:height,:width,:])
style = imageio.imread(style_path)
style = scipy.misc.imresize(style, [height, width])
style = np.array(style)
vst_module = vst.VideoStyleTransferModule(content, style, flow_path)
styled_frames = vst_module.optimize_images()
vid_id = os.getcwd() + '/output/' + video_id + '.mp4'
writer = imageio.get_writer(vid_id, fps=fps)
for f in styled_frames:
writer.append_data(f)
writer.close()
| tomstrident/Video-Style-Transfer | video_style_transfer_demo.py | video_style_transfer_demo.py | py | 986 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "os.getcwd",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "os.getcwd",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "os.getcwd",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "imageio.imread",
"line_number": 25,... |
26986931486 | # -*- coding: utf-8 -*-
import itertools
import struct
import pytest
from mock import Mock, call, patch
from nameko_grpc.errors import GrpcError
from nameko_grpc.streams import (
STREAM_END,
ByteBuffer,
ReceiveStream,
SendStream,
StreamBase,
)
class TestByteBuffer:
def test_peek(self):
buffer = ByteBuffer()
buffer.write(b"abcdefghi")
assert buffer.peek(slice(0, 1)) == b"a"
assert buffer.peek(slice(3, 6)) == b"def"
assert buffer.peek(slice(-2, -1)) == b"h"
assert buffer.read() == b"abcdefghi"
def test_peek_all(self):
buffer = ByteBuffer()
buffer.write(b"abcdefghi")
assert buffer.peek() == b"abcdefghi"
assert buffer.read() == b"abcdefghi"
def test_discard(self):
buffer = ByteBuffer()
buffer.write(b"abcdefghi")
assert buffer.discard(3) is None
assert buffer.read() == b"defghi"
def test_discard_all(self):
buffer = ByteBuffer()
buffer.write(b"abcdefghi")
assert buffer.discard() is None
assert buffer.read() == b""
def test_read(self):
buffer = ByteBuffer()
buffer.write(b"abcdefghi")
assert buffer.read(3) == b"abc"
assert buffer.read() == b"defghi"
def test_read_all(self):
buffer = ByteBuffer()
buffer.write(b"abcdefghi")
assert buffer.read() == b"abcdefghi"
assert buffer.read() == b""
def test_write(self):
buffer = ByteBuffer()
buffer.write(b"abc")
assert buffer.peek() == b"abc"
buffer.write(b"def")
assert buffer.peek() == b"abcdef"
def test_empty(self):
buffer = ByteBuffer()
assert buffer.empty() is True
buffer.write(b"abc")
assert buffer.empty() is False
buffer.discard()
assert buffer.empty() is True
def test_len(self):
buffer = ByteBuffer()
assert len(buffer) == 0
buffer.write(b"abc")
assert len(buffer) == 3
class TestStreamBase:
def test_exhausted(self):
stream = StreamBase(1)
stream.buffer.write(b"abc")
assert not stream.exhausted
stream.close()
assert stream.closed
assert not stream.exhausted
stream.queue.get()
assert stream.queue.empty()
assert not stream.exhausted
stream.buffer.discard()
assert stream.buffer.empty()
assert stream.exhausted
def test_close(self):
stream = StreamBase(1)
stream.close()
assert stream.closed
assert stream.queue.get() == STREAM_END
def test_close_with_error(self):
stream = StreamBase(1)
error = GrpcError("boom", "details")
stream.close(error)
assert stream.closed
assert stream.queue.get() == error
def test_close_with_non_error(self):
stream = StreamBase(1)
error = Exception("boom")
with pytest.raises(AssertionError):
stream.close(error)
class TestReceiveStream:
def test_write_to_closed_stream(self):
stream = ReceiveStream(1)
assert stream.buffer.empty()
stream.close()
stream.write(b"\x00\x00\x00")
assert stream.buffer.empty()
def test_write_less_bytes_than_header(self):
stream = ReceiveStream(1)
stream.write(b"\x00\x00\x00")
assert stream.queue.empty()
assert stream.buffer.peek() == b"\x00\x00\x00"
def test_write_less_bytes_than_one_message(self):
stream = ReceiveStream(1)
stream.write(b"\x00\x00\x00\x01\x00\xff\xff\xff")
assert stream.queue.empty()
assert stream.buffer.peek() == b"\x00\x00\x00\x01\x00\xff\xff\xff"
def test_write_more_bytes_than_one_message(self):
stream = ReceiveStream(1)
# incompressed single byte message, followed by two more bytes of /xff
stream.write(b"\x00\x00\x00\x00\x01\xff\xff\xff")
# single byte message is queued
assert stream.queue.get() == (False, b"\xff")
# following two bytes remain in the buffer
assert stream.buffer.peek() == b"\xff\xff"
def test_write_multiple_messages(self):
stream = ReceiveStream(1)
for _ in range(10):
stream.write(b"\x00\x00\x00\x00\x01\xff") # 10 single byte messages
assert stream.queue.qsize() == 10
assert len(stream.buffer) == 0
def test_consume_grpc_error(self):
stream = ReceiveStream(1)
error = GrpcError("boom", "details")
stream.queue.put(error)
message_type = Mock()
with pytest.raises(GrpcError):
next(stream.consume(message_type))
def test_consume_end_of_stream(self):
stream = ReceiveStream(1)
stream.close()
message_type = Mock()
assert list(stream.consume(message_type)) == []
def test_consume_uncompressed_message(self):
stream = ReceiveStream(1)
message_data = b"x"
message_type = Mock()
message = message_type()
stream.queue.put((False, message_data))
stream.close() # close stream so that consume exits
assert list(stream.consume(message_type)) == [message]
assert message.ParseFromString.call_args_list == [call(message_data)]
@patch("nameko_grpc.streams.decompress")
def test_consume_compressed_message(self, decompress):
stream = ReceiveStream(1)
message_data = b"x"
message_type = Mock()
message = message_type()
stream.queue.put((True, message_data))
stream.close() # close stream so that consume exits
assert list(stream.consume(message_type)) == [message]
assert message.ParseFromString.call_args_list == [
call(decompress(message_data))
]
@patch("nameko_grpc.streams.decompress")
def test_consume_multiple_messages(self, decompress):
stream = ReceiveStream(1)
message_data = b"x"
message_type = Mock()
message = message_type()
stream.queue.put((False, message_data))
stream.queue.put((True, message_data))
stream.close() # close stream so that consume exits
assert list(stream.consume(message_type)) == [message, message]
assert message.ParseFromString.call_args_list == [
call(message_data),
call(decompress(message_data)),
]
class TestSendStream:
def test_populate(self):
stream = SendStream(1)
stream.populate(range(10))
assert stream.closed
assert stream.queue.qsize() == 11
def test_populate_closed_stream(self):
stream = SendStream(1)
stream.close()
assert stream.closed
stream.populate(range(10))
assert stream.queue.qsize() == 1
class TestSendStreamHeadersToSend:
def test_no_headers(self):
stream = SendStream(1)
assert len(stream.headers) == 0
assert stream.headers_to_send(False) is False
def test_empty_queue(self):
stream = SendStream(1)
stream.headers.set(("foo", "bar"))
assert stream.queue.qsize() == 0
assert stream.headers_to_send(True) is False
assert stream.headers_to_send(False) == [(b"foo", b"bar")]
def test_mark_as_sent(self):
stream = SendStream(1)
stream.headers.set(("foo", "bar"))
assert stream.headers_to_send(False) == [(b"foo", b"bar")] # marks as sent
assert stream.headers_to_send(False) is False # previously sent
def test_defer_until_data(self):
stream = SendStream(1)
stream.headers.set(("foo", "bar"))
assert stream.headers_to_send(True) is False # defer until data
stream.queue.put(Mock())
assert stream.queue.qsize() == 1
assert stream.headers_to_send(True) == [(b"foo", b"bar")]
class TestSendStreamTrailersToSend:
def test_no_trailers(self):
stream = SendStream(1)
assert len(stream.trailers) == 0
assert stream.trailers_to_send() is False
def test_send_trailers(self):
stream = SendStream(1)
stream.trailers.set(("foo", "bar"))
assert stream.trailers_to_send() == [(b"foo", b"bar")]
@pytest.fixture
def generate_messages():
with patch("nameko_grpc.streams.compress") as compress:
compress.side_effect = lambda body, _: (False, body)
def generate(count, length):
"""Generate a series of mock messages.
If `count` is 2 and `length` is 4, when passed to `stream.populate`,
two messages with the following payload will be added to the stream's
queue.
#1. b`\x00\x00\x00\x00`
#2. b`\x01\x01\x01\x01`
"""
messages = []
for index in range(count):
message = Mock()
message.SerializeToString.return_value = bytes([index] * length)
messages.append(message)
return messages
yield generate
class TestSendStreamFlushQueueToBuffer:
def test_empty_queue(self):
stream = SendStream(1)
assert stream.queue.qsize() == 0
stream.flush_queue_to_buffer()
assert stream.buffer.empty()
def test_messages_on_queue(self, generate_messages):
stream = SendStream(1)
stream.populate(generate_messages(count=2, length=20))
header = struct.pack("?", False) + struct.pack(">I", 20)
stream.flush_queue_to_buffer()
assert stream.buffer.peek() == header + b"\x00" * 20 + header + b"\x01" * 20
def test_stream_closed(self, generate_messages):
stream = SendStream(1)
stream.populate(generate_messages(count=2, length=20))
header = struct.pack("?", False) + struct.pack(">I", 20)
stream.flush_queue_to_buffer()
assert stream.buffer.peek() == header + b"\x00" * 20 + header + b"\x01" * 20
stream.flush_queue_to_buffer() # stream closed; no-op
assert stream.buffer.peek() == header + b"\x00" * 20 + header + b"\x01" * 20
def test_error_on_queue(self, generate_messages):
stream = SendStream(1)
error = GrpcError("boom", "details")
messages = itertools.chain(generate_messages(count=2, length=20), [error])
stream.populate(messages)
with pytest.raises(GrpcError):
stream.flush_queue_to_buffer()
class TestSendStreamRead:
def test_no_data(self):
stream = SendStream(1)
max_bytes = 10
chunk_size = 10
assert stream.buffer.empty()
assert list(stream.read(max_bytes, chunk_size)) == []
def test_less_than_one_chunk_of_data(self):
stream = SendStream(1)
stream.buffer.write(b"abc")
max_bytes = 10
chunk_size = 5
assert list(stream.read(max_bytes, chunk_size)) == [b"abc"]
assert stream.buffer.empty()
def test_more_than_one_chunk_of_data(self):
stream = SendStream(1)
stream.buffer.write(b"abcdefghijklm")
max_bytes = 10
chunk_size = 5
assert list(stream.read(max_bytes, chunk_size)) == [b"abcde", b"fghij"]
assert stream.buffer.peek() == b"klm"
def test_less_than_max_bytes_of_data(self):
stream = SendStream(1)
stream.buffer.write(b"abcdefghijklm")
max_bytes = 20
chunk_size = 5
assert list(stream.read(max_bytes, chunk_size)) == [b"abcde", b"fghij", b"klm"]
assert stream.buffer.empty()
def test_more_than_max_bytes_of_data(self):
stream = SendStream(1)
stream.buffer.write(b"abcdefghijklm")
max_bytes = 10
chunk_size = 5
assert list(stream.read(max_bytes, chunk_size)) == [b"abcde", b"fghij"]
assert stream.buffer.peek() == b"klm"
def test_chunk_greater_than_max_bytes(self):
stream = SendStream(1)
stream.buffer.write(b"abcdefghijklm")
max_bytes = 5
chunk_size = 10
assert list(stream.read(max_bytes, chunk_size)) == [b"abcde"]
assert stream.buffer.peek() == b"fghijklm"
def test_stream_closed(self):
stream = SendStream(1)
max_bytes = 10
chunk_size = 5
stream.close()
assert list(stream.read(max_bytes, chunk_size)) == []
def test_stream_closed_with_error(self):
stream = SendStream(1)
error = GrpcError("boom", "details")
stream.close(error)
max_bytes = 10
chunk_size = 5
with pytest.raises(GrpcError):
next(stream.read(max_bytes, chunk_size))
def test_multiple_small_messages(self, generate_messages):
stream = SendStream(1)
stream.populate(generate_messages(count=100, length=1))
header = struct.pack("?", False) + struct.pack(">I", 1)
max_bytes = 20
chunk_size = 10
chunks = list(stream.read(max_bytes, chunk_size))
expected = [
# 5 bytes header + 1 byte payload + 4 bytes of next header
header + b"\x00" + header[:4],
# remaining 1 byte of header + 1 byte payload
# + 5 bytes header + 1 byte payload + 2 bytes of next header
header[4:] + b"\x01" + header + b"\x02" + header[:2],
]
assert chunks == expected
assert sum(map(len, chunks)) == max_bytes
# queue is emptied into buffer
assert len(stream.buffer) == 100 * (5 + 1) - max_bytes # 580 bytes left
assert stream.queue.qsize() == 0
def test_multiple_large_messages(self, generate_messages):
stream = SendStream(1)
stream.populate(generate_messages(count=100, length=200))
header = struct.pack("?", False) + struct.pack(">I", 200)
max_bytes = 50
chunk_size = 10
chunks = list(stream.read(max_bytes, chunk_size))
expected = [
header + b"\x00\x00\x00\x00\x00", # 5 bytes header + 5 bytes payload
b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", # 10 bytes payload
b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", # 10 bytes payload
b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", # 10 bytes payload
b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", # 10 bytes payload
]
assert chunks == expected
assert sum(map(len, chunks)) == max_bytes
# queue is emptied into buffer
assert len(stream.buffer) == 100 * (5 + 200) - max_bytes # 20450 bytes left
assert stream.queue.qsize() == 0
def test_data_in_buffer_and_messages_in_queue(self, generate_messages):
stream = SendStream(1)
stream.buffer.write(b"\xff\xff\xff\xff\xff")
stream.populate(generate_messages(count=10, length=10))
header = struct.pack("?", False) + struct.pack(">I", 10)
max_bytes = 10
chunk_size = 10
chunks = list(stream.read(max_bytes, chunk_size))
expected = [b"\xff\xff\xff\xff\xff" + header]
assert chunks == expected
assert sum(map(len, chunks)) == max_bytes
# queue is emptied into buffer
assert len(stream.buffer) == 5 + 10 * (5 + 10) - max_bytes # 145 bytes left
assert stream.queue.qsize() == 0
| nameko/nameko-grpc | test/test_streams.py | test_streams.py | py | 15,281 | python | en | code | 57 | github-code | 6 | [
{
"api_name": "nameko_grpc.streams.ByteBuffer",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "nameko_grpc.streams.ByteBuffer",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "nameko_grpc.streams.ByteBuffer",
"line_number": 37,
"usage_type": "call"
... |
9087413248 | import librosa
import librosa.display
import matplotlib.pyplot as plt
import numpy as np
import os
import json
import math
import random, os
import shutil
# Extract and save MFCCs from audiofiles
def save_mfcc(dataset_path, json_path, n_mfcc=20, n_fft=2048, hop_length=1024, num_segments=1):
data = {
"mapping": [],
"mfcc": [],
"labels": []
}
samples_per_segment = int(SAMPLES_PER_FILE / num_segments)
expected_num_mfcc_vectors = math.ceil(samples_per_segment / hop_length)
for i, (dirpath, dirnames ,filenames) in enumerate(os.walk(dataset_path)):
if dirpath is not dataset_path:
dirpath_components = dirpath.split("\\")
semantic_label = dirpath_components[-1]
data["mapping"].append(semantic_label)
print("\nProcessing {}".format(semantic_label))
for file in filenames:
file_path = os.path.join(dirpath, file)
signal, sr = librosa.load(file_path, sr=SAMPLE_RATE)
for j in range(num_segments):
start_sample = samples_per_segment*j
finish_sample = start_sample + samples_per_segment
mfccs = librosa.feature.mfcc(signal[start_sample:finish_sample],
n_fft=n_fft,
hop_length=hop_length,
n_mfcc=n_mfcc, sr=SAMPLE_RATE)
mfccs = mfccs.T
#print(type(mfccs), np.shape(mfccs))
if len(mfccs) == expected_num_mfcc_vectors:
data["mfcc"].append(mfccs.tolist())
data["labels"].append(i-1)
print("{}, segment:{}".format(file_path, j+1))
with open(JSON_PATH, 'w') as fp:
json.dump(data, fp, indent=4)
return 0
# for mfcc
DATASET_PATH = "../Database_500"
JSON_PATH = "../Features/mfcc_500.json"
SAMPLE_RATE = 44100 # Sample rate of the audio signals (frequency)
DURATION = 10 # Duration of each audio data (seconds)
SAMPLES_PER_FILE = SAMPLE_RATE * DURATION
# -------------------------------------------------
save_mfcc(DATASET_PATH, JSON_PATH)
| NOR2R/ML_miniproject | pythonScripts/MFCCExtraction_SaveJson.py | MFCCExtraction_SaveJson.py | py | 2,349 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "math.ceil",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "os.walk",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 31,
"... |
1073007549 | import traceback
from selenium.webdriver.common.by import By
from traceback import print_stack
import utilities.logger as log
import logging
class SeleniumDriver():
log = log.myLogger(logging.DEBUG)
def __init__(self, driver):
self.driver = driver
def getByType(self, locatorType):
locatorType = locatorType.lower()
if locatorType == "id":
return By.ID
elif locatorType == "name":
return By.NAME
elif locatorType == "xpath":
return By.XPATH
elif locatorType == "css":
return By.CSS_SELECTOR
elif locatorType == "class":
return By.CLASS_NAME
elif locatorType == "link":
return By.LINK_TEXT
else:
self.log.info("Locator type " + locatorType +
" not correct/supported")
return False
def getElement(self, locator, locatorType="id"):
element = None
try:
locatorType = locatorType.lower()
byType = self.getByType(locatorType)
element = self.driver.find_element(byType, locator)
self.log.info("Element found with locator: " + locator +
" and locatorType: " + locatorType)
except:
self.log.info("Element not found with locator: " + locator +
" and locatorType: " + locatorType)
self.log.error("Exception Caught: {}".format(traceback.format_exc()))
self.log.error("".join(traceback.format_stack()))
return element
def elementClick(self, locator="", locatorType = "xpath", element=None):
try:
if locator: # This means if locator is not empty
element = self.getElement(locator, locatorType)
element.click()
self.log.info("Clicked on element with locator: " + locator +
" locatorType: " + locatorType)
except:
self.log.info("Cannot click on the element with locator: " + locator +
" locatorType: " + locatorType)
print_stack()
def getText(self, locator="", locatorType = "xpath", element=None, info=""):
try:
if locator:
element = self.getElement(locator, locatorType)
text = element.text
if len(text) != 0:
self.log.info("Getting text on element :: " + info)
self.log.info("The text is :: '" + text + "'")
text = text.strip()
except:
self.log.error("Failed to get text on element " + info)
print_stack()
text = None
return text
def isElementPresent(self, locator="", locatorType = "xpath", element=None):
try:
if locator:
element = self.getElement(locator, locatorType)
if len(element) > 0:
self.log.info("Element present with locator: " + locator +
" locatorType: " + locatorType)
return True
else:
self.log.info("Element not present with locator: " + locator +
" locatorType: " + locatorType)
return False
except:
print("Element not found")
return False
| rchroy/SamsungPhoneTest | base/my_selenium_driver.py | my_selenium_driver.py | py | 3,365 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "utilities.logger",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "utilities.logger.myLogger",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "logging.DEBUG",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "selenium.w... |
75234645628 | from odoo import models, fields, api, exceptions, _
from odoo.exceptions import Warning, ValidationError
import datetime
from dateutil.relativedelta import relativedelta
class ExtraContractInherit(models.Model):
_inherit = 'hr.contract'
date_of_birth = fields.Date(string='تاريخ ميلاد الموظف', compute='cal_contract_birth_from_emp', store=True)
# current_emp_age = fields.Integer(string='عمر الموظف', compute='get_age_for_alloc_by_birth')
current_emp_age = fields.Integer(string='عمر الموظف', )
now_date = fields.Date(default=fields.Date.today())
form_registration_date = fields.Date(string='تاريخ تسجيل الاستمارة')
form_six_date = fields.Date(string='تاريخ استمارة 6')
social_insurances = fields.Selection([
('insured', "مؤمن عليه"),
('not_insured', "غير مؤمن عليه"),
], string='التأمينات الاجتماعية', default='not_insured', related='employee_id.social_insurances', readonly=False)
non_insurance_reason = fields.Char(string='سبب عدم التأمين')
insurance_number = fields.Char(string='الرقم التأميني', related='employee_id.insurance_number', readonly=False)
insurances_calculation = fields.Selection([
('insurance_salary', "الراتب التأميني"),
('modified_salary', "راتب معدل"),
], string='طريقة احتساب التأمينات', default='insurance_salary')
register_method = fields.Selection([
('token', "Token"),
('office', "Office"),
], string='طريقة التسجيل', default='token', related='employee_id.register_method', readonly=False)
insurance_status = fields.Selection([
('open', "Open"),
('paid', "Paid"),
], string='حالة التأمين', default='open')
modified_salary = fields.Float(string='الراتب المعدل')
company_percentage = fields.Float(string='نسبة الشركة', readonly=True, compute='calc_emp_co_percentage')
employee_percentage = fields.Float(string='نسبة الموظف', readonly=True, compute='calc_emp_co_percentage')
over_age = fields.Float(string='عمر فوق السن', compute='calc_emp_co_percentage')
insurance_date_start = fields.Date('تاريخ بداية احتساب التأمينات', default=fields.Date.today, copy=True)
total_insurance = fields.Float(string='Total Insurance', )
total_insurance_company = fields.Float()
# total_insurance = fields.Float(string='Total Insurance', compute='cal_total_insurance')
# total_insurance_company = fields.Float(compute='cal_total_insurance')
insurance_table = fields.One2many('insurance.monthly', 'inv_history')
struct_id = fields.Many2one('hr.payroll.structure', string='Salary Structure', compute='cal_all_struct')
work_overtime = fields.Float()
bounce = fields.Float()
annual_raise = fields.Float()
retroactive_raise = fields.Float()
total_salary = fields.Float(compute='calculate_basic_salary', store=True, readonly=False)
@api.depends('wage', 'work_overtime', 'bounce', 'annual_raise', 'retroactive_raise')
def calculate_basic_salary(self):
for rec in self:
rec.total_salary = rec.wage + rec.work_overtime + rec.annual_raise + rec.bounce + rec.retroactive_raise
@api.depends('social_insurances')
def cal_all_struct(self):
for rec in self:
if rec.social_insurances == 'insured':
asd = self.env['hr.payroll.structure'].search([('is_insured', '=', True)], limit=1)
if asd:
rec.struct_id = asd.id
else:
rec.struct_id = False
elif rec.social_insurances == 'not_insured':
asd = self.env['hr.payroll.structure'].search([('not_insured', '=', True)], limit=1)
if asd:
rec.struct_id = asd.id
else:
rec.struct_id = False
else:
rec.struct_id = False
@api.depends('employee_id.birthday')
def cal_contract_birth_from_emp(self):
for rec in self:
if rec.employee_id:
print('heloo emp')
if rec.employee_id.birthday:
print('hello birth')
print(rec.date_of_birth)
rec.date_of_birth = rec.employee_id.birthday
print(rec.date_of_birth)
else:
print('no birth')
else:
print('no emp')
@api.onchange('social_insurances', 'wage')
def check_insuurance_range(self):
asd = self.env['emp.insurance'].search([('active', '=', True)])
for line in self:
if line.social_insurances == 'insured':
if line.wage:
if (line.wage < asd.min_insurance_salary):
raise ValidationError('Wage of this employee out of insurance range')
# (line.wage > asd.max_insurance_salary) or
@api.depends('wage', 'modified_salary', 'insurances_calculation', 'over_age')
def calc_emp_co_percentage(self):
asd = self.env['emp.insurance'].search([('active', '=', True)])
if asd:
for rec in self:
rec.over_age = asd.over_age
if rec.current_emp_age <= rec.over_age:
if rec.insurances_calculation == 'insurance_salary':
rec.company_percentage = (asd.company_percentage / 100) * rec.wage
rec.employee_percentage = (asd.employee_percentage / 100) * rec.wage
elif rec.insurances_calculation == 'modified_salary':
rec.company_percentage = (asd.company_percentage / 100) * rec.modified_salary
rec.employee_percentage = (asd.employee_percentage / 100) * rec.modified_salary
else:
if asd.is_over_age == True:
if rec.insurances_calculation == 'insurance_salary':
rec.company_percentage = (asd.over_age_company_percentage / 100) * rec.wage
rec.employee_percentage = (asd.over_age_employee_percentage / 100) * rec.wage
elif rec.insurances_calculation == 'modified_salary':
rec.company_percentage = (asd.over_age_company_percentage / 100) * rec.modified_salary
rec.employee_percentage = (asd.over_age_employee_percentage / 100) * rec.modified_salary
else:
raise ValidationError(
'there is no insurance configuration for over age employees please configur it and try again')
else:
raise ValidationError('there is no insurance configuration for employees please configur it and try again')
@api.depends("date_of_birth", "now_date")
def get_age_for_alloc_by_birth(self):
for rec in self:
if rec.now_date and rec.date_of_birth:
fmt = '%Y-%m-%d'
d1 = datetime.datetime.strptime(str(rec.now_date).strip(' \t\r\n').split(".")[0], fmt)
d2 = datetime.datetime.strptime(str(rec.date_of_birth).strip(' \t\r\n').split(".")[0], fmt)
years_between_dates = str((d1 - d2).days / 365)
rec.current_emp_age = int(float(years_between_dates))
print(years_between_dates)
@api.depends('insurance_table')
def cal_total_insurance(self):
for line in self:
if line.insurance_table:
for rec in line.insurance_table:
line.total_insurance += rec.emp_amount
line.total_insurance_company += rec.company_amount
@api.onchange('name', 'employee_id')
def cal_name_from_emp_number(self):
for rec in self:
if rec.employee_id and rec.employee_id.hiring_date:
rec.name = str(rec.employee_id.internal_number)
rec.date_start = rec.employee_id.hiring_date
rec.date_end = rec.date_start + relativedelta(years=1)
rec.trial_date_end = rec.date_start + relativedelta(months=3)
@api.onchange('name', 'state', 'form_registration_date', 'insurance_number', 'wage', 'company_percentage',
'employee_percentage', 'insurance_status',
'social_insurances', 'register_method')
def move_employee_fields(self):
for rec in self:
if rec.state == 'open':
check_emp = self.env['hr.employee'].search([('id', '=', rec.employee_id.id)])
if check_emp:
check_emp.write(
{
'contract_end_date': rec.date_end,
'form_registration_date': rec.form_registration_date,
'social_insurances': rec.social_insurances,
'insurance_number': rec.insurance_number,
'register_method': rec.register_method,
'insurance_status': rec.insurance_status,
'company_percentage': rec.company_percentage,
'employee_percentage': rec.employee_percentage,
})
class InsuranceMonthlyRecords(models.Model):
_name = 'insurance.monthly'
date = fields.Date('Date')
emp_amount = fields.Float('Employee Percentage')
company_amount = fields.Float('Company Percentage')
inv_history = fields.Many2one('hr.contract')
class HREmployee(models.Model):
_inherit = 'hr.employee.public'
hiring_date = fields.Date(string='Hiring Date', store=True, copy=True)
hiring_date = fields.Date(string='Hiring Date', store=True, copy=True)
internal_number = fields.Char(string="Tawzef Number")
employee_number = fields.Char(string="Client Number", store=True)
contract_end_date = fields.Date('Contract End Date')
medic_exam = fields.Char()
form_registration_date = fields.Date(string='تاريخ تسجيل الاستمارة', )
social_insurances = fields.Selection([
('insured', "مؤمن عليه"),
('not_insured', "غير مؤمن عليه"),
], string='التأمينات الاجتماعية', default='not_insured')
insurance_number = fields.Char(string='الرقم التأميني')
register_method = fields.Selection([
('token', "Token"),
('office', "Office"),
], string='طريقة التسجيل', default='token')
insurance_status = fields.Selection([
('open', "Open"),
('paid', "Paid"),
], string='حالة التأمين', default='open')
company_percentage = fields.Float(string='نسبة الشركة', readonly=True)
employee_percentage = fields.Float(string='نسبة الموظف', readonly=True)
company_period = fields.Float(string='نسبة الشركة خلال الفترة', readonly=True, store=True)
employee_period = fields.Float(string='نسبة الموظف خلال الفترة', readonly=True, store=True)
working_schedule = fields.Many2one('work.schedule')
service_id = fields.Many2one('product.product', domain="[('type','=','service')]", string="Current Service",
tracking=True)
branch_id = fields.Many2one('res.branch')
class HREmployeeContractInsurance(models.Model):
_inherit = 'hr.employee'
hiring_date = fields.Date(string='Hiring Date', store=True, copy=True)
internal_number = fields.Char(string="Tawzef Number")
employee_number = fields.Char(string="Client Number", store=True)
contract_end_date = fields.Date('Contract End Date')
medic_exam = fields.Char()
form_registration_date = fields.Date(string='تاريخ تسجيل الاستمارة', )
social_insurances = fields.Selection([
('insured', "مؤمن عليه"),
('not_insured', "غير مؤمن عليه"),
], string='التأمينات الاجتماعية', default='not_insured')
insurance_number = fields.Char(string='الرقم التأميني')
register_method = fields.Selection([
('token', "Token"),
('office', "Office"),
], string='طريقة التسجيل', default='token')
insurance_status = fields.Selection([
('open', "Open"),
('paid', "Paid"),
], string='حالة التأمين', default='open')
company_percentage = fields.Float(string='نسبة الشركة', readonly=True)
employee_percentage = fields.Float(string='نسبة الموظف', readonly=True)
company_period = fields.Float(string='نسبة الشركة خلال الفترة', readonly=True, store=True)
employee_period = fields.Float(string='نسبة الموظف خلال الفترة', readonly=True, store=True)
@api.onchange('name', 'insurance_number', 'social_insurances', 'register_method')
def cal_emp_insurance_data_to_contract(self):
for rec in self:
print('hello everybody')
print(self._origin.id)
contr = self.env['hr.contract'].search([('state', '=', 'open'), ('employee_id', '=', self._origin.id)])
if contr:
contr.write(
{
'social_insurances': rec.social_insurances,
'insurance_number': rec.insurance_number,
'register_method': rec.register_method,
'name': rec.internal_number,
})
# print('yes')
# for line in contr:
# line.insurance_number = rec.insurance_number
# line.social_insurances = rec.social_insurances
# line.register_method = rec.register_method
# print(line.social_insurances,rec.social_insurances)
# print(line.insurance_number, rec.insurance_number)
# print(line.register_method, rec.register_method)
else:
print('nooo')
class HRPayrollContractInsurance(models.Model):
_inherit = 'hr.payroll.structure'
is_insured = fields.Boolean('مؤمن عليه')
not_insured = fields.Boolean('غيرمؤمن عليه')
| emadraafatgad/visoneer | hr_insurance/models/employee_contract.py | employee_contract.py | py | 14,406 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "odoo.models.Model",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "odoo.models",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "odoo.fields.Date",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "odoo.fields",
"... |
10417120713 | from __future__ import annotations
import argparse
from argparse import ArgumentParser
def render_region_graph_logic(args):
import hashlib
import re
import graphviz
from randovania.game_description import default_database
from randovania.game_description.db.dock_node import DockNode
from randovania.game_description.db.pickup_node import PickupNode
from randovania.game_description.requirements.base import Requirement
from randovania.games.game import RandovaniaGame
gd = default_database.game_description_for(RandovaniaGame(args.game))
regions = list(gd.region_list.regions)
single_image: bool = args.single_image
added_edges = set()
vulnerabilities_colors = {
"Normal Door": None,
"Morph Ball Door": None,
"Other Door": None,
"Scan Portal": None,
"Missile": "#ff1919",
"Super Missile": "#38c914",
"Seeker Launcher": "#b233e8",
"Power Bomb": "#dfe833",
"Wave Door": "#a30af5",
"Ice Door": "#7cdede",
"Plasma Door": "#870f0f",
"Light Door": "#bfd9d9",
"Dark Door": "#3b3647",
"Annihilator Door": "#616969",
"Light Portal": "#bfd9d9",
"Dark Portal": "#3b3647",
}
def _weakness_name(s: str):
return re.sub(r"\([^)]*\)", "", s).replace(" Blast Shield", "").strip()
def _hash_to_color(s: str) -> str:
h = hashlib.blake2b(s.encode("utf-8"), digest_size=3).digest()
return "#{:06x}".format(int.from_bytes(h, "big"))
def _add_connection(dot: graphviz.Digraph, dock_node: DockNode):
the_region = gd.region_list.nodes_to_region(dock_node)
source_area = gd.region_list.nodes_to_area(dock_node)
target_node = gd.region_list.node_by_identifier(dock_node.default_connection)
target_area = gd.region_list.nodes_to_area(target_node)
if dock_node.default_dock_weakness.requirement == Requirement.impossible():
return
if dock_node.identifier in added_edges:
return
weak_name = _weakness_name(dock_node.default_dock_weakness.name)
direction = None
if isinstance(target_node, DockNode) and _weakness_name(target_node.default_dock_weakness.name) == weak_name:
direction = "both"
added_edges.add(target_node.identifier)
color = vulnerabilities_colors.get(weak_name, _hash_to_color(weak_name))
dot.edge(
f"{the_region.name}-{source_area.name}",
f"{the_region.name}-{target_area.name}",
weak_name,
dir=direction,
color=color,
fontcolor=color,
)
added_edges.add(dock_node.identifier)
def _add_teleporter(dot: graphviz.Digraph, teleporter_node: DockNode):
source_region = gd.region_list.nodes_to_region(teleporter_node)
source_area = gd.region_list.nodes_to_area(teleporter_node)
target_node = gd.region_list.node_by_identifier(teleporter_node.default_connection)
target_region = gd.region_list.nodes_to_region(target_node)
target_area = gd.region_list.nodes_to_area(target_node)
weak_name = _weakness_name(teleporter_node.default_dock_weakness.name)
color = vulnerabilities_colors.get(weak_name, _hash_to_color(weak_name))
dot.edge(
f"{source_region.name}-{source_area.name}",
f"{target_region.name}-{target_area.name}",
weak_name,
color=color,
fontcolor=color,
)
def _cross_region_dock(node: DockNode):
return node.default_connection.region_name != node.identifier.region_name
per_game_colors = {
RandovaniaGame.METROID_PRIME_ECHOES: {
"Agon Wastes": "#ffc61c",
"Torvus Bog": "#20ff1c",
"Sanctuary Fortress": "#3d62ff",
"Temple Grounds": "#c917ff",
"Great Temple": "#c917ff",
},
}
colors = per_game_colors.get(gd.game)
if colors is None:
colors = {region.name: _hash_to_color(region.name) for region in gd.region_list.regions}
dark_colors = {
"Agon Wastes": "#a88332",
"Torvus Bog": "#149612",
"Sanctuary Fortress": "#112991",
"Temple Grounds": "#7d2996",
"Great Temple": "#7d2996",
}
if single_image:
full_dot = graphviz.Digraph(name=gd.game.short_name, comment=gd.game.long_name)
else:
full_dot = None
per_region_dot = {}
for region in regions:
if single_image:
this_dot = full_dot
else:
this_dot = graphviz.Digraph(name=region.name)
per_region_dot[region.name] = this_dot
for area in region.areas:
shape = None
if any(isinstance(node, DockNode) and _cross_region_dock(node) for node in area.nodes):
shape = "polygon"
c = (dark_colors if area.in_dark_aether else colors)[region.name]
fillcolor = "".join(f"{max(0, int(c[i * 2 + 1:i * 2 + 3], 16) - 64):02x}" for i in range(3))
this_dot.node(
f"{region.name}-{area.name}",
area.name,
color=c,
fillcolor=f"#{fillcolor}",
style="filled",
fontcolor="#ffffff",
shape=shape,
penwidth="3.0",
)
for node in area.nodes:
if args.include_pickups and isinstance(node, PickupNode):
this_dot.node(
str(node.pickup_index), re.search(r"Pickup [^(]*\(([^)]+)\)", node.name).group(1), shape="house"
)
this_dot.edge(f"{region.name}-{area.name}", str(node.pickup_index))
for region in regions:
print(f"Adding docks for {region.name}")
for area in region.areas:
for node in area.nodes:
if isinstance(node, DockNode) and not _cross_region_dock(node):
_add_connection(per_region_dot[region.name], node)
elif isinstance(node, DockNode) and _cross_region_dock(node) and args.include_teleporters:
_add_teleporter(per_region_dot[region.name], node)
if single_image:
full_dot.render(format="png", view=True, cleanup=True)
else:
for name, this_dot in per_region_dot.items():
this_dot.render(format="png", view=True, cleanup=True)
def render_regions_graph(sub_parsers):
parser: ArgumentParser = sub_parsers.add_parser(
"render-region-graph",
help="Renders an image with all area connections",
formatter_class=argparse.MetavarTypeHelpFormatter,
)
parser.add_argument("--include-teleporters", action="store_true")
parser.add_argument("--include-pickups", action="store_true")
parser.add_argument("--single-image", action="store_true")
parser.set_defaults(func=render_region_graph_logic)
| randovania/randovania | randovania/cli/commands/render_regions.py | render_regions.py | py | 6,961 | python | en | code | 165 | github-code | 6 | [
{
"api_name": "randovania.game_description.default_database.game_description_for",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "randovania.game_description.default_database",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "randovania.games.game.RandovaniaGa... |
34381391905 | """
Compile QEMU Version 5.1.0 or newer. 5.1.0 is when AVR support was introduced.
.. code-block:: console
$ wget https://download.qemu.org/qemu-6.1.0.tar.xz
$ tar xvJf qemu-6.1.0.tar.xz
$ cd qemu-6.1.0
$ ./configure --target-list="avr-softmmu"
$ make -j $(($(nproc)*4))
Change directory to this file's parent directory and run using unittest
.. code-block:: console
$ cd python/cmd_msg_test/
$ python -u -m unittest discover -v
test_connect (test_cmd_msg.TestSerial) ... qemu-system-avr: -chardev socket,id=serial_port,path=/tmp/tmpuuq3oqvj/socket,server=on: info: QEMU waiting for connection on: disconnected:unix:/tmp/tmpuuq3oqvj/socket,server=on
reading message from arduino
b''
b''
qemu-system-avr: terminating on signal 2 from pid 90395 (python)
ok
----------------------------------------------------------------------
Ran 1 test in 4.601s
OK
"""
import os
import sys
import signal
import pathlib
import tempfile
import unittest
import subprocess
import contextlib
import dataclasses
import main
# top level directory in this git repo is three levels up
REPO_ROOT = pathlib.Path(__file__).parents[2].resolve()
@contextlib.contextmanager
def start_qemu(bios):
with tempfile.TemporaryDirectory() as tempdir:
socket_path = pathlib.Path(tempdir, "socket")
qemu_cmd = [
"qemu-system-avr",
"-mon",
"chardev=none",
"-chardev",
f"null,id=none",
"-serial",
"chardev:serial_port",
"-chardev",
f"socket,id=serial_port,path={socket_path},server=on",
"-nographic",
"-machine",
"arduino-uno",
"-cpu",
"avr6-avr-cpu",
"-bios",
str(bios),
]
qemu_proc = subprocess.Popen(qemu_cmd, start_new_session=True)
serial_port_path = pathlib.Path(tempdir, "ttyACM0")
socat_cmd = [
"socat",
f"PTY,link={serial_port_path},rawer,wait-slave",
f"UNIX:{socket_path}",
]
socat_proc = subprocess.Popen(socat_cmd, start_new_session=True)
try:
while not serial_port_path.exists():
pass
yield str(serial_port_path)
finally:
# Kill the whole process group (for problematic processes like qemu)
os.killpg(qemu_proc.pid, signal.SIGINT)
os.killpg(socat_proc.pid, signal.SIGINT)
qemu_proc.wait()
socat_proc.wait()
class RunQEMU(unittest.TestCase):
"""
Base class which will start QEMU to emulate an Arduino Uno machine using the
BIOS (the .elf output of arduino-cli compile) provided.
qemu-system-avr from QEMU Version 5.1.0 or newer is required.
Starts a new virtual machine for each test_ function.
"""
BIOS = REPO_ROOT.joinpath("build", "serial_cmd_test.ino.elf")
def setUp(self):
self.qemu = start_qemu(self.BIOS)
# __enter__ is called at the begining of a `with` block. __exit__ is
# called at the end of a `with` block. By calling these functions
# explicitly within setUp() and tearDown() we ensure a new VM is created
# and destroyed each time.
self.serial_port = self.qemu.__enter__()
def tearDown(self):
self.qemu.__exit__(None, None, None)
del self.qemu
class TestSerial(RunQEMU, unittest.TestCase):
def test_connect(self):
main.main(self.serial_port)
| sedihglow/braccio_robot_arm | python/archive/QEMU_arduino_serial_testing/test_cmd_msg.py | test_cmd_msg.py | py | 3,529 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "pathlib.Path",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "tempfile.TemporaryDirectory",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "subprocess.Pope... |
72532766909 | """
Functions and models to query scicrunch service REST API (https://scicrunch.org/api/)
- http client for API requests
- Error handling:
- translates network errors
- translates request error codes
Free functions with raw request scicrunch.org API
- client request context
- raise_for_status=True -> Raise an aiohttp.ClientResponseError if the response status is 400 or higher
- validates response and prunes using pydantic models
SEE test_scicrunch_service_api.py
"""
import logging
from typing import Any
from aiohttp import ClientSession
from pydantic import BaseModel, Field
from yarl import URL
from .models import ResourceHit
from .settings import SciCrunchSettings
logger = logging.getLogger(__name__)
# MODELS --
#
# NOTE: These models are a trucated version of the data payload for a scicrunch response.#
# NOTE: Examples of complete responses can be found in test_scicrunch.py::mock_scicrunch_service_api
#
class FieldItem(BaseModel):
field_name: str = Field(..., alias="field")
required: bool
value: str | None | list[Any] = None
class ResourceView(BaseModel):
resource_fields: list[FieldItem] = Field([], alias="fields")
version: int
curation_status: str
last_curated_version: int
scicrunch_id: str
@classmethod
def from_response_payload(cls, payload: dict):
assert payload["success"] == True # nosec
return cls(**payload["data"])
@property
def is_curated(self) -> bool:
return self.curation_status.lower() == "curated"
def _get_field(self, fieldname: str):
for field in self.resource_fields:
if field.field_name == fieldname:
return field.value
raise ValueError(f"Cannot file expected field {fieldname}")
def get_name(self):
return str(self._get_field("Resource Name"))
def get_description(self):
return str(self._get_field("Description"))
def get_resource_url(self):
return URL(str(self._get_field("Resource URL")))
class ListOfResourceHits(BaseModel):
__root__: list[ResourceHit]
# REQUESTS
async def get_all_versions(
unprefixed_rrid: str, client: ClientSession, settings: SciCrunchSettings
) -> list[dict[str, Any]]:
async with client.get(
f"{settings.SCICRUNCH_API_BASE_URL}/resource/versions/all/{unprefixed_rrid}",
params={"key": settings.SCICRUNCH_API_KEY.get_secret_value()},
raise_for_status=True,
) as resp:
body = await resp.json()
output: list[dict[str, Any]] = body.get("data") if body.get("success") else []
return output
async def get_resource_fields(
rrid: str, client: ClientSession, settings: SciCrunchSettings
) -> ResourceView:
async with client.get(
f"{settings.SCICRUNCH_API_BASE_URL}/resource/fields/view/{rrid}",
params={"key": settings.SCICRUNCH_API_KEY.get_secret_value()},
raise_for_status=True,
) as resp:
body = await resp.json()
assert body.get("success") # nosec
return ResourceView(**body.get("data", {}))
async def autocomplete_by_name(
guess_name: str, client: ClientSession, settings: SciCrunchSettings
) -> ListOfResourceHits:
async with client.get(
f"{settings.SCICRUNCH_API_BASE_URL}/resource/fields/autocomplete",
params={
"key": settings.SCICRUNCH_API_KEY.get_secret_value(),
"field": "Resource Name",
"value": guess_name.strip(),
},
raise_for_status=True,
) as resp:
body = await resp.json()
assert body.get("success") # nosec
return ListOfResourceHits.parse_obj(body.get("data", []))
| ITISFoundation/osparc-simcore | services/web/server/src/simcore_service_webserver/scicrunch/_rest.py | _rest.py | py | 3,706 | python | en | code | 35 | github-code | 6 | [
{
"api_name": "logging.getLogger",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "pydantic.BaseModel",
"line_number": 37,
"usage_type": "name"
},
{
"api_name": "pydantic.Field",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "typing.Any",
... |
6358696084 | #!/usr/bin/env python3
import json
from statistics import median
from datetime import datetime, timedelta
SENT_MESSAGE = "Sent message"
RECEIVED_MESSAGE = "Received message"
TRANSACTION_INIT = "Initialising transaction"
TRANSACTION_COMMIT = "Delivered transaction"
WITNESS_SET_SELECTED = "Witness set selected"
WITNESS_SET_SELECTION = "Witness set selection"
SIMULATION_STARTED = "Simulation started"
RELIABLE_ACCOUNTABILITY = "reliable_accountability"
CONSISTENT_ACCOUNTABILITY = "consistent_accountability"
LOG_PREFIXES = {
SENT_MESSAGE,
RECEIVED_MESSAGE,
TRANSACTION_INIT,
TRANSACTION_COMMIT,
WITNESS_SET_SELECTED,
WITNESS_SET_SELECTION,
SIMULATION_STARTED
}
class TransactionInitInfo:
def __init__(self, process_id, init_timestamp):
self.process_id = process_id
self.init_timestamp = init_timestamp
class TransactionCommitInfo:
def __init__(self, process_id, received_messages_cnt, commit_timestamp):
self.process_id = process_id
self.received_messages_cnt = received_messages_cnt
self.commit_timestamp = commit_timestamp
def drop_date(line):
start = 0
while start < len(line):
if line[start].isalpha():
break
start += 1
return line[start::]
def parse_data_from_logged_line(line):
return list(map(
lambda elem: elem.split(': ')[1],
line.split(', ')
))
def get_log_line_prefix(line):
prefix = ""
for log_prefix in LOG_PREFIXES:
if line.startswith(log_prefix):
prefix = log_prefix
break
return prefix
def parse_data_from_files(directory, n):
sent_messages = {}
received_messages = {}
transaction_inits = {}
transaction_commit_infos = {}
transaction_histories = {}
transaction_witness_sets = {
"own": {},
"pot": {}
}
simulation_start = None
simulation_end = None
for process_id in range(n):
f = open(f"{directory}/process{process_id}.txt", "r")
for line in f:
line = drop_date(line.strip(" \n"))
prefix = get_log_line_prefix(line)
if prefix == "":
continue
data = parse_data_from_logged_line(line)
timestamp = int(data[-1])
if simulation_end is None or timestamp > simulation_end:
simulation_end = timestamp
if prefix == SIMULATION_STARTED:
if simulation_start is None or timestamp < simulation_start:
simulation_start = timestamp
elif prefix == SENT_MESSAGE:
sent_messages[data[0]] = timestamp
elif prefix == RECEIVED_MESSAGE:
received_messages[data[0]] = timestamp
elif prefix == TRANSACTION_INIT:
transaction_inits[data[0]] = \
TransactionInitInfo(process_id=process_id, init_timestamp=timestamp)
elif prefix == TRANSACTION_COMMIT:
transaction = data[0]
received_messages_cnt = int(data[2])
if transaction_commit_infos.get(transaction) is None:
transaction_commit_infos[transaction] = []
transaction_commit_infos[transaction].append(
TransactionCommitInfo(
process_id=process_id,
received_messages_cnt=received_messages_cnt,
commit_timestamp=timestamp)
)
elif prefix == WITNESS_SET_SELECTION:
transaction = data[0]
assert data[2][0] == '[' and data[2][-1] == ']'
history_str = data[2][1:-1]
history = set()
if len(history_str) != 0:
history = set(history_str.split(' '))
if transaction_histories.get(transaction) is None:
transaction_histories[transaction] = []
transaction_histories[transaction].append(history)
elif prefix == WITNESS_SET_SELECTED:
ws_type = data[0]
transaction = data[1]
assert data[2][0] == '[' and data[2][-1] == ']'
pids_str = data[2][1:-1]
pids = set()
if len(pids_str) != 0:
pids = set(pids_str.split(' '))
if transaction_witness_sets[ws_type].get(transaction) is None:
transaction_witness_sets[ws_type][transaction] = []
transaction_witness_sets[ws_type][transaction].append(pids)
return {
"sent_messages": sent_messages,
"received_messages": received_messages,
"transaction_inits": transaction_inits,
"transaction_commit_infos": transaction_commit_infos,
"transaction_histories": transaction_histories,
"transaction_witness_sets": transaction_witness_sets,
"simulation_start": simulation_start,
"simulation_end": simulation_end
}
def calc_message_latencies(sent_messages, received_messages):
sum_latency = 0
message_cnt = 0
for message, send_timestamp in sent_messages.items():
receive_timestamp = received_messages.get(message)
if receive_timestamp is None:
continue
latency = receive_timestamp - send_timestamp
sum_latency += latency
message_cnt += 1
if message_cnt == 0:
return 0
return sum_latency / message_cnt
def calc_transaction_stat(n, transaction_inits, transaction_commit_infos, simulation_start, simulation_end):
sum_latency = 0
sum_messages_exchanged = 0
transaction_cnt = 0
latencies = []
throughput_distribution = {}
for transaction, init_info in transaction_inits.items():
commit_infos = transaction_commit_infos.get(transaction)
if commit_infos is None:
# print(f"Transaction {transaction} was not committed")
continue
# if len(commit_infos) != n:
# committed_pids = set(map(lambda commit_info: commit_info.process_id, commit_infos))
# not_committed_pids = set(range(n)).difference(committed_pids)
# print(f"Transaction {transaction} wasn't committed by processes {not_committed_pids}")
commit_timestamp = None
messages_exchanged = 0
for commit_info in commit_infos:
if commit_info.process_id == init_info.process_id:
commit_timestamp = commit_info.commit_timestamp
messages_exchanged += commit_info.received_messages_cnt
if commit_timestamp is None:
# print(f"Transaction {transaction} wasn't committed by source")
continue
commit_date_time = datetime.fromtimestamp(commit_timestamp // 1e9)
throughput_distribution[commit_date_time] = \
throughput_distribution.get(commit_date_time, 0) + 1
latency = commit_timestamp - init_info.init_timestamp
latencies.append(latency)
sum_latency += latency
sum_messages_exchanged += messages_exchanged
transaction_cnt += 1
first_commit = datetime.max
last_commit = datetime.min
for commit_date_time, _ in throughput_distribution.items():
if commit_date_time < first_commit:
first_commit = commit_date_time
if commit_date_time > last_commit:
last_commit = commit_date_time
while first_commit < last_commit:
throughput_distribution[first_commit] = throughput_distribution.get(first_commit, 0)
first_commit = first_commit + timedelta(seconds=1)
avg_latency = 0
avg_messages_exchanged = 0
median_latency = -1
if transaction_cnt > 0:
avg_latency = sum_latency / transaction_cnt
median_latency = median(latencies)
avg_messages_exchanged = int(sum_messages_exchanged / transaction_cnt)
throughput = transaction_cnt * 1e9 / (simulation_end - simulation_start)
return avg_latency, median_latency, avg_messages_exchanged, throughput, \
median(list(throughput_distribution.values())), transaction_cnt
def get_distance_metrics(sets):
max_diff = 0
for i in range(len(sets)):
for j in range(i + 1, len(sets)):
intersection_size = len(sets[i].intersection(sets[j]))
union_size = len(sets[i].union(sets[j]))
max_diff = max(max_diff, union_size - intersection_size)
return max_diff
def get_witness_sets_diff_metrics(transaction_witness_sets, n, ws_type):
metrics = []
for transaction, witness_sets in transaction_witness_sets[ws_type].items():
if len(witness_sets) != n:
continue
metrics.append(get_distance_metrics(witness_sets))
return metrics
def get_histories_diff_metrics(transaction_histories, n):
metrics = []
for transaction, histories in transaction_histories.items():
if len(histories) != n:
continue
metrics.append(get_distance_metrics(histories))
return metrics
def calculate_stat(directory, n):
data = parse_data_from_files(directory, n)
avg_message_latency = \
calc_message_latencies(
sent_messages=data["sent_messages"],
received_messages=data["received_messages"]
)
avg_transaction_latency, median_latency, avg_messages_exchanged, throughput, median_throughput, transaction_cnt = \
calc_transaction_stat(
n=n,
transaction_inits=data["transaction_inits"],
transaction_commit_infos=data["transaction_commit_infos"],
simulation_start=data["simulation_start"],
simulation_end=data["simulation_end"]
)
own_ws_diff_metrics = get_witness_sets_diff_metrics(
transaction_witness_sets=data["transaction_witness_sets"],
n=n,
ws_type="own"
)
pot_ws_diff_metrics = get_witness_sets_diff_metrics(
transaction_witness_sets=data["transaction_witness_sets"],
n=n,
ws_type="pot"
)
histories_diff_metrics = get_histories_diff_metrics(
transaction_histories=data["transaction_histories"],
n=n
)
return {
"avg_message_latency": avg_message_latency / 1e9,
"avg_transaction_latency": avg_transaction_latency / 1e9,
"median_transaction_latency": median_latency / 1e9,
"avg_messages_exchanged": avg_messages_exchanged,
"throughput": throughput,
"median_throughput": median_throughput,
"transaction_cnt": transaction_cnt,
"own_witness_sets_diff_metrics": own_ws_diff_metrics,
"pot_witness_sets_diff_metrics": pot_ws_diff_metrics,
"histories_diff_metrics": histories_diff_metrics
}
if __name__ == "__main__":
input_file = open("input.json")
input_json = json.load(input_file)
protocol = input_json["protocol"]
process_cnt = input_json["parameters"]["n"]
print(f"Protocol: {protocol}, {process_cnt} processes")
print()
stat = calculate_stat(directory="outputs", n=process_cnt)
avg_message_latency = stat["avg_message_latency"]
avg_transaction_latency, median_transaction_latency, avg_messages_exchanged = \
stat["avg_transaction_latency"], stat["median_transaction_latency"], stat["avg_messages_exchanged"]
throughput, median_throughput, transaction_cnt = \
stat["throughput"], stat["median_throughput"], stat["transaction_cnt"]
own_witness_sets_diff_metrics = stat["own_witness_sets_diff_metrics"]
pot_witness_sets_diff_metrics = stat["pot_witness_sets_diff_metrics"]
histories_diff_metrics = stat["histories_diff_metrics"]
print("Message latencies:")
print(f"\tAverage: {avg_message_latency}")
print()
print("Transaction latency statistics:")
print(f"\tAverage: {avg_transaction_latency}")
print(f"\tMedian: {median_transaction_latency}")
print()
print(f"Average number of exchanged messages per one transaction: {avg_messages_exchanged}")
print()
print("Throughput per second:")
print(f"\tAverage: {throughput}")
print(f"\tMedian: {median_throughput}")
print()
print(f"Transactions committed: {transaction_cnt}")
print()
if len(own_witness_sets_diff_metrics) != 0:
print(f"Difference metrics for own witness sets: {own_witness_sets_diff_metrics}")
print()
if len(pot_witness_sets_diff_metrics) != 0:
print(f"Difference metrics for pot witness sets: {pot_witness_sets_diff_metrics}")
print()
if len(histories_diff_metrics) != 0:
print(f"Difference metrics of histories: {histories_diff_metrics}")
print()
| interestIngc/simulation-analyzer | logs_analyzer.py | logs_analyzer.py | py | 12,715 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "datetime.datetime.fromtimestamp",
"line_number": 199,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 199,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.max",
"line_number": 210,
"usage_type": "attribute"
},
{
"a... |
30791829002 | from scipy.fftpack import dct, idct
# implement 2D DCT
def dct2(a):
return dct(dct(a.T, norm='ortho').T, norm='ortho')
# implement 2D IDCT
def idct2(a):
return idct(idct(a.T, norm='ortho').T, norm='ortho')
import cv2
import numpy as np
import matplotlib.pylab as plt
# read lena RGB image and convert to grayscale
im =cv2.imread("G:/Classical Object Detection/1.jpg",0)
imF = dct2(im)
rows, cols = imF.shape
imF2=np.array([[0 for i in range(cols)] for j in range(rows)])
print(imF2.shape)
for i in range(imF.shape[0]//10):
for j in range(imF.shape[1]//10):
imF2[i][j]=imF[i][j]
im1 = idct2(imF2)
# check if the reconstructed image is nearly equal to the original image
print(np.allclose(im, im1))
# True
# plot original and reconstructed images with matplotlib.pylab
plt.gray()
plt.subplot(121), plt.imshow(im), plt.axis('off'), plt.title('original image', size=10)
plt.subplot(122), plt.imshow(im1), plt.axis('off'), plt.title('reconstructed image (DCT+IDCT)', size=10)
plt.show() | NadiaFaramarzi/ClassicalObjectDetection | Codes/DCT(discrete cosine transform).py | DCT(discrete cosine transform).py | py | 1,048 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "scipy.fftpack.dct",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "scipy.fftpack.idct",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "cv2.imread",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line... |
75224158908 | from asyncio import run
import discord
from discord import app_commands
from discord import Interaction
from discord import Intents
from discord.ext import commands, tasks
import os
import asqlite
# Import the Guild ID
from misc.load import TEST_SERVER_ID
# Import the bot token
from Secure import BOT_TOKEN
# Bot intents are set to default
intents = Intents.default()
intents.message_content = True
intents.members = True
# Create the bot
bot = commands.Bot(command_prefix='!', intents=intents, activity=discord.Activity(type=discord.ActivityType.playing, name="with myself, cause these commands are amazing!"))
# Create the on_ready event
@bot.event
async def on_ready():
print('----------------------')
print(f'Logged in as {bot.user.name}#{bot.user.discriminator}')
print('----------------------')
async def first_start_db():
# Database setup
connection = await asqlite.connect('database.db')
cursor = await connection.cursor()
await cursor.execute("""CREATE TABLE IF NOT EXISTS user_data (
id INTEGER PRIMARY KEY,
user_id INTEGER,
name TEXT,
class TEXT,
zone INTEGER,
max_zone INTEGER,
level INTEGER,
xp INTEGER,
xp_cap INTEGER,
gold INTEGER,
hp INTEGER,
max_hp INTEGER,
attack INTEGER,
defense INTEGER,
agility INTEGER,
luck INTEGER,
intelligence INTEGER
)""")
await cursor.execute("""CREATE TABLE IF NOT EXISTS monster_data (
id INTEGER PRIMARY KEY,
name TEXT,
zone INTEGER,
is_boss BOOLEAN,
level INTEGER,
hp INTEGER,
max_hp INTEGER,
attack INTEGER,
defense INTEGER,
dodge_chance INTEGER,
give_xp INTEGER,
give_gold INTEGER
)""")
await cursor.execute("""CREATE TABLE IF NOT EXISTS shop_data (
item_id INTEGER PRIMARY KEY,
name TEXT,
price INTEGER,
description TEXT,
sellback_price INTEGER
)""")
await cursor.execute("""CREATE TABLE IF NOT EXISTS inventory_data (
user_id INTEGER,
item_id INTEGER,
item_name TEXT,
item_amount INTEGER,
item_sell_price INTEGER,
item_sellable BOOLEAN
)""")
await cursor.execute("""CREATE TABLE IF NOT EXISTS zone_data (
zone_id INTEGER PRIMARY KEY,
name TEXT,
description TEXT
)""")
await connection.commit()
async def add_db_items():
connection = await asqlite.connect('database.db')
cursor = await connection.cursor()
# Add items to the shop
await cursor.execute("INSERT INTO shop_data (item_id, name, price, description, sellback_price) VALUES (1, 'Small Health Potion', 10, 'Heals 25 HP', 5)")
await connection.commit()
await cursor.execute("INSERT INTO shop_data (item_id, name, price, description, sellback_price) VALUES (2, 'Medium Health Potion', 20, 'Heals 50 HP' , 10)")
await connection.commit()
await cursor.execute("INSERT INTO shop_data (item_id, name, price, description, sellback_price) VALUES (3, 'Large Health Potion', 30, 'Heals 75 HP', 15)")
await connection.commit()
await cursor.execute("INSERT INTO shop_data (item_id, name, price, description, sellback_price) VALUES (4, 'Perfect Health Potion', 50, 'Heals 100 HP', 25)")
await connection.commit()
await cursor.execute("INSERT INTO shop_data (item_id, name, price, description, sellback_price) VALUES (100, 'Wheat seed', 100, 'Used to farm wheat', 0)")
await connection.commit()
await cursor.execute("INSERT INTO shop_data (item_id, name, price, description, sellback_price) VALUES (101, 'Carrot seed', 300, 'Used to farm carrots', 0)")
await connection.commit()
await cursor.execute("INSERT INTO shop_data (item_id, name, price, description, sellback_price) VALUES (102, 'Potato seed', 500, 'Used to farm potatoes', 0)")
await connection.commit()
await cursor.execute("INSERT INTO shop_data (item_id, name, price, description, sellback_price) VALUES (103, 'Beetroot seed', 750, 'Used to farm beetroot', 0)")
await connection.commit()
await cursor.execute("INSERT INTO shop_data (item_id, name, price, description, sellback_price) VALUES (104, 'Melon seed', 1000, 'Used to farm melons', 0)")
await connection.commit()
await cursor.execute("INSERT INTO shop_data (item_id, name, price, description, sellback_price) VALUES (105, 'Pumpkin seed', 1500, 'Used to farm pumpkins', 0)")
await connection.commit()
# Add zones to the zone database
await cursor.execute("INSERT INTO zone_data (zone_id, name, description) VALUES (1, 'The young forest', 'A small bright forest full of life')")
await connection.commit()
await cursor.execute("INSERT INTO zone_data (zone_id, name, description) VALUES (2, 'The deep forest', 'A deep dark forest roamed only by ferocious animals')")
await connection.commit()
await cursor.execute("INSERT INTO zone_data (zone_id, name, description) VALUES (3, 'The Adventurer Road', 'The road that leads to the Town of Beginnings!')")
await connection.commit()
await cursor.execute("INSERT INTO zone_data (zone_id, name, description) VALUES (4, 'The Town of Beginnings', 'The town where everything starts! Fight good adventurers to grab the attention of the Adventurers Guild leader!')")
await connection.commit()
# Add monsters to the monster database
await cursor.execute("INSERT INTO monster_data (name, zone, is_boss, level, hp, max_hp, attack, defense, dodge_chance, give_xp, give_gold) VALUES ('Slimes', 1, 0, 1, 15, 15, 5, 5, 0, 5, 1)")
await connection.commit()
await cursor.execute("INSERT INTO monster_data (name, zone, is_boss, level, hp, max_hp, attack, defense, dodge_chance, give_xp, give_gold) VALUES ('Spider', 1, 0, 1, 20, 20, 7, 5, 0, 8, 3)")
await connection.commit()
await cursor.execute("INSERT INTO monster_data (name, zone, is_boss, level, hp, max_hp, attack, defense, dodge_chance, give_xp, give_gold) VALUES ('Rabbit', 1, 0, 1, 10, 10, 5, 5, 0, 3, 1)")
await connection.commit()
await cursor.execute("INSERT INTO monster_data (name, zone, is_boss, level, hp, max_hp, attack, defense, dodge_chance, give_xp, give_gold) VALUES ('Giant Spider', 1, 1, 1, 30, 30, 25, 10, 0, 15, 10)")
await connection.commit()
await cursor.execute("INSERT INTO monster_data (name, zone, is_boss, level, hp, max_hp, attack, defense, dodge_chance, give_xp, give_gold) VALUES ('Wolf', 2, 0, 1, 25, 25, 20, 15, 25, 15, 15)")
await connection.commit()
await cursor.execute("INSERT INTO monster_data (name, zone, is_boss, level, hp, max_hp, attack, defense, dodge_chance, give_xp, give_gold) VALUES ('Brown Bear', 2, 0, 1, 40, 40, 15, 30, 0, 15, 15)")
await connection.commit()
await cursor.execute("INSERT INTO monster_data (name, zone, is_boss, level, hp, max_hp, attack, defense, dodge_chance, give_xp, give_gold) VALUES ('Wolf Pack Leader', 2, 1, 1, 50, 50, 30, 15, 30, 25, 25)")
await connection.commit()
await cursor.execute("INSERT INTO monster_data (name, zone, is_boss, level, hp, max_hp, attack, defense, dodge_chance, give_xp, give_gold) VALUES ('Bandit Archer', 3, 0 , 1, 30, 30, 40, 0, 20, 25, 25)")
await connection.commit()
await cursor.execute("INSERT INTO monster_data (name, zone, is_boss, level, hp, max_hp, attack, defense, dodge_chance, give_xp, give_gold) VALUES ('Bandit Thug', 3, 0, 1, 50, 50, 25, 20, 0, 25, 25)")
await connection.commit()
await cursor.execute("INSERT INTO monster_data (name, zone, is_boss, level, hp, max_hp, attack, defense, dodge_chance, give_xp, give_gold) VALUES ('Armored Bandit', 3, 0, 1, 60, 60, 20, 30, 0, 25, 25)")
await connection.commit()
await cursor.execute("INSERT INTO monster_data (name, zone, is_boss, level, hp, max_hp, attack, defense, dodge_chance, give_xp, give_gold) VALUES ('Rogue Bandit', 3, 0, 1, 35, 35, 40, 0, 40, 25, 25)")
await connection.commit()
await cursor.execute("INSERT INTO monster_data (name, zone, is_boss, level, hp, max_hp, attack, defense, dodge_chance, give_xp, give_gold) VALUES ('Bandit Leader', 3, 1, 1, 70, 70, 30, 20, 20, 40, 50)")
await connection.commit()
await cursor.execute("INSERT INTO monster_data (name, zone, is_boss, level, hp, max_hp, attack, defense, dodge_chance, give_xp, give_gold) VALUES ('Adventurer Fighter', 4, 0, 1, 100, 100, 15, 25, 0, 30, 75)")
await connection.commit()
await cursor.execute("INSERT INTO monster_data (name, zone, is_boss, level, hp, max_hp, attack, defense, dodge_chance, give_xp, give_gold) VALUES ('Adventurer Mage', 4, 0, 1, 50, 50, 50, 10, 20, 30, 75)")
await connection.commit()
await cursor.execute("INSERT INTO monster_data (name, zone, is_boss, level, hp, max_hp, attack, defense, dodge_chance, give_xp, give_gold) VALUES ('Adventurer Archer', 4, 0, 1, 75, 75, 30, 5, 40, 30, 75)")
await connection.commit()
await cursor.execute("INSERT INTO monster_data (name, zone, is_boss, level, hp, max_hp, attack, defense, dodge_chance, give_xp, give_gold) VALUES ('Adventurer Guild Leader', 4, 1, 1, 150, 150, 25, 30, 20, 50, 100)")
await connection.commit()
tree = bot.tree
@tree.command(name='test', description='testing command to make sure everything is working')
@app_commands.checks.cooldown(1, 20)
async def test(interaction: Interaction):
await interaction.response.send_message(f'The command is working properly, {interaction.user.mention}!')
# Sync the tree commands
def check_if_its_me(interaction: discord.Interaction) -> bool:
return interaction.user.id == 263628384674775040
@tree.command(name='sync', description='Sync all the commands')
@app_commands.checks.cooldown(1, 100)
@app_commands.check(check_if_its_me)
async def sync(interaction: Interaction):
await tree.sync()
await interaction.response.send_message('Synced all the commands', ephemeral=True)
@tree.command(name="launchdb", description="launches the database")
@app_commands.check(check_if_its_me)
async def launchdb(interaction: Interaction):
await first_start_db()
await interaction.response.send_message("Database launched successfully", ephemeral=True)
@tree.command(name='adddbitems', description='Adds items to the database')
@app_commands.check(check_if_its_me)
async def adddbitems(interaction: Interaction):
await add_db_items()
await interaction.response.send_message("Items added successfully", ephemeral=True)
# Error checks
@test.error
async def on_test_error(interaction: Interaction, error: app_commands.AppCommandError):
if isinstance(error, app_commands.errors.CommandOnCooldown):
embed = discord.Embed(title='Error', description=f'You are on cooldown, please wait **{error.retry_after:.2f} seconds**', color=0xff0000)
await interaction.response.send_message(embed=embed, ephemeral=True)
# Check errors for all app commands
@tree.error
async def on_app_command_error(interaction: Interaction, error: app_commands.AppCommandError):
if isinstance(error, app_commands.errors.CommandOnCooldown):
embed = discord.Embed(title='Error', description=f'You are on cooldown, please wait {error.retry_after:.2f} seconds')
await interaction.response.send_message(embed=embed)
elif isinstance(error, app_commands.errors.MissingPermissions):
embed = discord.Embed(title='Error', description=f'You are missing permissions to use this command, please contact the owner or the bot developer if you believe this is an issue')
await interaction.response.send_message(embed=embed, ephemeral=True)
elif isinstance(error, app_commands.errors.MissingRole):
embed = discord.Embed(title='Error', description=f'You are missing the role to use this command, please contact the owner or the bot developer if you believe this is an issue')
await interaction.response.send_message(embed=embed, ephemeral=True)
elif isinstance(error, app_commands.errors.BotMissingPermissions):
embed = discord.Embed(title='Error', description=f'The bot is missing the permission to do the command, please contact the owner or the bot developer if you believe this is an issue')
await interaction.response.send_message(embed=embed, ephemeral=True)
else:
raise error
# Load all cogs
async def load():
for filename in os.listdir('./cogs'):
if filename.endswith('.py'):
await bot.load_extension(f'cogs.{filename[:-3]}')
print("All cogs loaded successfully")
print('----------------------')
for filename in os.listdir('./rpgcogs'):
if filename.endswith('.py'):
await bot.load_extension(f'rpgcogs.{filename[:-3]}')
print("All rpgcogs loaded successfully")
print('----------------------')
async def main():
await load()
await bot.start(BOT_TOKEN)
run(main()) | Alexici/discordpybot | main.py | main.py | py | 12,874 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "discord.Intents.default",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "discord.Intents",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "discord.ext.commands.Bot",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "disc... |
31841821973 | from rest_framework.views import APIView
from app.super_admin.controller import SuperAdminController
from common.django_utility import send_response
from rest_framework_simplejwt.authentication import JWTAuthentication
from rest_framework.permissions import IsAuthenticated
from dateutil.parser import parse
superAdminController = SuperAdminController()
class SuperAdminView(APIView):
authentication_classes = [JWTAuthentication]
permission_classes = [IsAuthenticated]
def get(self, request):
response_data = []
description = None
total_records = None
try:
exported_date = None
if request.query_params.get('exported_date'):
exported_date = parse(request.query_params.get('exported_date'))
response_data = superAdminController.export(exported_date)
exception_occured = False
except Exception as error_msg:
description = error_msg
exception_occured = True
finally:
return send_response(exception_occured=exception_occured, custom_description=description, request=request, total_records=total_records, response_data=response_data)
| ayush431/m_56studios | m56studios_be/app/super_admin/views.py | views.py | py | 1,194 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "app.super_admin.controller.SuperAdminController",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "rest_framework.views.APIView",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "rest_framework_simplejwt.authentication.JWTAuthentication",
"line... |
19026984837 | from django.shortcuts import render,redirect
import json
from django.conf import settings
import redis
from rest_framework.response import Response
from django.http import HttpResponse
from django.http import JsonResponse
import requests
from .forms import SomeForm
from django.views.decorators.csrf import csrf_exempt
from django.http import JsonResponse
# Connect to our Redis instance
redis_instance = redis.StrictRedis(host=settings.REDIS_HOST,
port=settings.REDIS_PORT, db=0)
#view for get and post request
@csrf_exempt
def manage_items(request, *args, **kwargs):
if request.method == 'POST':
form = SomeForm(request.POST)
if form.is_valid():
item=(form.cleaned_data['name'])
item = eval(item)
key = list(item.keys())[0]
value = item[key]
if redis_instance.exists(key):
return redirect('failure')
else:
redis_instance.set(key, value)
return redirect('success')
else:
form = SomeForm()
return render(request, 'app1/Home.html')
return render(request,'app1/Home.html')
#view for post request success message
def success(request):
return render(request,'app1/success.html')
#view for post request failure message
def failure(request):
return render(request,'app1/failure.html')
#view for get request
def get_single_key(request):
if request.method == 'GET':
keyword = request.GET.get('search')
value = redis_instance.get(keyword)
if value:
data = {'key': keyword,'value': value.decode('utf-8'),'msg': 'success'}
else:
data = {'key': keyword,'value': None,'msg': 'Key Not found'}
return render(request,'app1/Home.html',{"data":data})
#view for delete request
def delete_key(request):
if request.method == 'GET':
keyword = request.GET.get('delete')
result = redis_instance.delete(keyword)
if result == 1:
response = {'msg': f"{keyword} successfully deleted"}
else:
response = {'key': keyword,'value': None,'msg': 'Key Not found'}
return render(request,'app1/Home.html',{"response":response}) | nischithmk/freshworks_assignment | app1/views.py | views.py | py | 2,317 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "redis.StrictRedis",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "django.conf.settings.REDIS_HOST",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "django.conf.settings",
"line_number": 16,
"usage_type": "name"
},
{
"api_n... |
73203288508 | from konlpy.tag import Kkma
from konlpy.tag import Twitter
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.preprocessing import normalize
import numpy as np
import pymongo
from pymongo import MongoClient
import pymysql
class SentenceTokenizer(object):
def __init__(self):
self.kkma = Kkma()
self.twitter = Twitter()
self.stopwords = []
def text_to_sentence(self, text):
sentences = self.kkma.sentences(text)
# 너무 짧은 문장은 앞 문장에 addition 처리
for i in range(0, len(sentences)):
if len(sentences[i]) <= 10:
sentences[i-1] += (' ' + sentences[i])
sentences[i] = ''
return sentences
def get_words(self, sentences):
words = []
for s in sentences:
if s is not '':
# twitter 모듈의 nouns(string) 함수를 통해 키워드 추출 후
# 추출한 키워드 리스트 중 stop words가 아니고 두글자 이상인 경우 결과 리스트에 append
words.append(' '.join([word for word in self.twitter.nouns(str(s)) if word not in self.stopwords and len(word) > 1]))
return words
class GraphMatrix(object):
def __init__(self):
self.tfidf = TfidfVectorizer()
self.cnt_vec = CountVectorizer()
self.graph_sentence = []
# TF-IDF 모델 적용한 sentence-term matrix 생성
def create_sentence_graph(self, sentence):
tfidf_matrix = self.tfidf.fit_transform(sentence).toarray()
# sentence-matrix와 그 전치행렬을 곱하여 sentence correlation matrix 생성(가중치 계산)
self.graph_sentence = np.dot(tfidf_matrix, tfidf_matrix.T)
return self.graph_sentence
# term count 방식을 활용한 sentence-term matrix 생성
def create_words_graph(self, sentence):
cnt_vec_mat = normalize(self.cnt_vec.fit_transform(sentence).toarray().astype(float), axis=0)
# sentence 배열에서 추출한 vocabulary
vocab = self.cnt_vec.vocabulary_
return np.dot(cnt_vec_mat.T, cnt_vec_mat), {vocab[word] : word for word in vocab}
# TextRank 수식 계산
class TextRank(object):
def get_rank(self, graph, d=0.85):
A = graph
matrix_size = A.shape[0]
for i in range(matrix_size):
A[i, i] = 0 # 대각선 부분 = 0
link_sum = np.sum(A[:,i]) # A[:, i] = A[:][i]
if link_sum != 0:
A[:,i] /= link_sum
A[:,i] *= -d
A[i,i] = 1
B = (1-d) * np.ones((matrix_size, 1))
ranks = np.linalg.solve(A, B) # solve Ax = B
return {idx: r[0] for idx, r in enumerate(ranks)}
class Ranking(object):
def __init__(self, doc):
self.sentence_tokenize = SentenceTokenizer()
self.sentences = []
for text in doc:
self.sentences += self.sentence_tokenize.text_to_sentence(text)
self.words = self.sentence_tokenize.get_words(self.sentences)
self.graph_matrix = GraphMatrix()
self.sentence_graph = self.graph_matrix.create_sentence_graph(self.words)
self.words_graph, self.idx2word = self.graph_matrix.create_words_graph(self.words)
self.textRank = TextRank()
self.sentence_rank_idx = self.textRank.get_rank(self.sentence_graph)
self.sorted_sent_rank_idx = sorted(self.sentence_rank_idx, key=lambda k: self.sentence_rank_idx[k], reverse=True)
self.word_rank_idx = self.textRank.get_rank(self.words_graph)
self.sorted_word_rank_idx = sorted(self.word_rank_idx, key=lambda k: self.word_rank_idx[k], reverse=True)
def keywords(self, word_num=20):
keywords = []
index = []
for idx in self.sorted_word_rank_idx[:word_num]:
index.append(idx)
for idx in index:
keywords.append(self.idx2word[idx])
return keywords
# MongoDB connection & querying data
username = 'hs'
password = '12345'
client = MongoClient('mongodb://%s:%s@localhost:27017/allreview'%(username, password))
db = client['allreview']
document = []
for review in db.review.find({'category':'beauty'}):
document.append(review['context'])
# Top20 keywords extraction
rank = Ranking(document)
print(rank.keywords())
# MySQL 연동
conn = pymysql.connect(host='localhost', user='root', password='12345', db='allreview', charset='utf8')
curs = conn.cursor()
sql = 'insert into keyword(word, category) values(%s, %s)'
for keyword in rank.keywords():
curs.execute(sql,(keyword,8))
conn.commit() | hanseul1/Text-summarize-with-TextRank | main.py | main.py | py | 4,729 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "konlpy.tag.Kkma",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "konlpy.tag.Twitter",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "sklearn.feature_extraction.text.TfidfVectorizer",
"line_number": 42,
"usage_type": "call"
},
{
... |
16782914255 | import torch
import random
import gc
import optuna
import pandas as pd
import numpy as np
from utils import map3, compute_metrics, set_seed
from config3 import CFG
from config import CFG1
from datasets import Dataset
from torch.optim import AdamW
from pathlib import Path
from transformers import AutoModelForMultipleChoice, TrainingArguments, Trainer, AutoModel, \
get_cosine_schedule_with_warmup
from data import preprocess, DataCollatorForMultipleChoice, tokenizer, EarlyStoppingCallback, RemoveOptimizerCallback
from peft import LoraConfig, prepare_model_for_kbit_training, get_peft_model, PeftModel, PeftConfig, TaskType, \
PeftModelForSequenceClassification
from colorama import Fore, Back, Style
from sklearn.model_selection import StratifiedKFold
from eda import augment_fn, augmentation_data, eda
def model_init(trial):
return AutoModelForMultipleChoice.from_pretrained("model/deberta-v3-large-hf-weights")
###############################################################
################## Train/Valid Dataset ###################
###############################################################
def get_datasets(df, ext_df, fold):
train_df = ext_df
# valid_ext_df = ext_df.query("fold==@fold")
# valid_df = pd.concat([df, valid_ext_df], axis=0).reset_index(drop=True)
valid_df = df
valid_labels = valid_df['answer']
train_dataset = Dataset.from_pandas(train_df)
train_dataset = train_dataset.map(preprocess, remove_columns=['prompt', 'A', 'B', 'C', 'D', 'E', 'answer'])
valid_dataset = Dataset.from_pandas(valid_df)
valid_dataset = valid_dataset.map(preprocess, remove_columns=['prompt', 'A', 'B', 'C', 'D', 'E', 'answer'])
return train_dataset, valid_dataset, valid_labels
###############################################################
################## Hyperparameter Search #################
###############################################################
def optuna_hp_space(trial):
# 定义需要调优的超参数空间
hyperparameters = {
# Floating point parameter (log)
"learning_rate": trial.suggest_float("learning_rate", 5e-6, 1e-2, log=True),
# Floating point parameter (log)
"weight_decay": trial.suggest_float("weight_decay", 0.001, 0.01, log=True),
# Floating point parameter (log)
"warm_up_radio": trial.suggest_float("warmup_ratio", 0.1, 0.8, log=True),
# Integer parameter(step)
"gradient_accumulation_steps": trial.suggest_int("gradient_accumulation_steps", 2, 10, step=2)
}
train_df = pd.read_csv("./data/train_context.csv")
ext_df = pd.read_csv("./data/ext_train_context.csv")[:1500]
ext_df["prompt"] = ext_df["context"][:100] + " #### " + ext_df["prompt"]
ext_df = ext_df.sample(frac=1, random_state=CFG.seed).reset_index(drop=True)
train_dataset, valid_dataset, valid_label = get_datasets(train_df, ext_df, fold=0)
model = AutoModelForMultipleChoice.from_pretrained(CFG.model_path)
training_args = TrainingArguments(
warmup_ratio=hyperparameters["warm_up_radio"],
learning_rate=hyperparameters["learning_rate"],
weight_decay=hyperparameters["weight_decay"],
per_device_train_batch_size=CFG.per_device_train_batch_size,
per_device_eval_batch_size=CFG.per_device_eval_batch_size,
num_train_epochs=CFG.epochs,
report_to='none',
gradient_accumulation_steps=hyperparameters["gradient_accumulation_steps"],
output_dir=CFG.output_dir,
evaluation_strategy="steps",
save_strategy="steps",
save_total_limit=1,
load_best_model_at_end=True,
seed=CFG.seed,
fp16=True,
lr_scheduler_type='cosine'
)
trainer = Trainer(
model=model,
args=training_args,
tokenizer=tokenizer,
data_collator=DataCollatorForMultipleChoice(tokenizer=tokenizer),
train_dataset=train_dataset,
eval_dataset=valid_dataset,
compute_metrics=compute_metrics,
callbacks=[RemoveOptimizerCallback()],
)
trainer.train()
valid_pred = trainer.predict(valid_dataset).predictions
valid_pred_ids = np.argsort(-valid_pred, 1)
valid_pred_letters = np.array(list('ABCDE'))[valid_pred_ids][:, :3]
valid_map3 = map3(valid_label, valid_pred_letters)
return valid_map3
def optuna_hp_space_train1(trial):
# 定义需要调优的超参数空间
hyperparameters = {
# Floating point parameter (log)
"learning_rate": trial.suggest_float("learning_rate", 5e-6, 1e-2, log=True),
# Floating point parameter (log)
"weight_decay": trial.suggest_float("weight_decay", 0.001, 0.01, log=True),
# Floating point parameter (log)
"warm_up_radio": trial.suggest_float("warmup_ratio", 0.1, 0.8, log=True),
# Integer parameter(step)
"gradient_accumulation_steps": trial.suggest_int("gradient_accumulation_steps", 2, 10, step=2)
}
return hyperparameters
def optuna_hp_space_way2(trial):
return {
# Floating point parameter (log)
"learning_rate": trial.suggest_float("learning_rate", 5e-6, 1e-2, log=True),
# Floating point parameter (log)
"weight_decay": trial.suggest_float("weight_decay", 0.001, 0.01, log=True),
# Floating point parameter (log)
"warm_up_radio": trial.suggest_float("warmup_ratio", 0.1, 0.8, log=True),
# Integer parameter(step)
"gradient_accumulation_steps": trial.suggest_int("gradient_accumulation_steps", 2, 10, step=2)
}
def main0():
study = optuna.create_study(direction="maximize")
study.optimize(optuna_hp_space, n_trials=10)
# 输出最优的超参数组合和性能指标
print('Best hyperparameters: {}'.format(study.best_params))
print('Best performance: {:.4f}'.format(study.best_value))
best_params = study.best_params
def main1():
set_seed(CFG.seed)
df_train = pd.read_csv("./data/train.csv")
df_train = df_train.drop(columns="id")
df_train.dropna(inplace=True)
df_train = df_train.reset_index(drop=True)
stem_df = pd.read_csv("./data/stem_1k_v1.csv")
stem_df = stem_df.drop(columns="id")
ext_df = pd.concat([
pd.read_csv("data/6000_train_examples.csv"), # 6000
pd.read_csv("data/extra_train_set.csv"),
pd.read_csv("data/llm-science-3k-data-test.csv"), # 3000
stem_df # 1000
])
ext_len = len(ext_df) // 3
ext_df = ext_df[:ext_len]
del stem_df
ext_df = ext_df.drop_duplicates()
# 删除ext_df中存在于df_train中的row
values_to_exclude = df_train['prompt'].values
mask = ext_df['prompt'].isin(values_to_exclude)
ext_df = ext_df[~mask]
del values_to_exclude, mask
if CFG1.use_shuffle_options:
shuffle_df_train = augment_fn(df_train)
df_train = pd.concat([df_train, shuffle_df_train], axis=0)
shuffle_ext_df = augment_fn(ext_df)
ext_df = pd.concat([ext_df, shuffle_ext_df], axis=0)
ext_df = ext_df.sample(frac=1, random_state=CFG1.seed).reset_index(drop=True)
df_train = df_train.sample(frac=1, random_state=CFG1.seed).reset_index(drop=True)
train_dataset, valid_dataset, valid_label = get_datasets(df_train, ext_df, fold=0)
# model = AutoModelForMultipleChoice.from_pretrained(CFG1.model_path)
#
# if CFG1.is_freezingEmbedding:
# # Freeze the embeddings
# for param in model.base_model.embeddings.parameters():
# param.requires_grad = False
#
# if CFG1.use_self_optimizer:
# # Create optimizer and learning rate scheduler
# # Define different learning rates and weight decay for different layers
# optimizer_grouped_parameters = [
# {
# "params": [p for n, p in model.named_parameters() if "base_model.embeddings" not in n],
# "lr": 1e-5, # Example learning rate for top layers
# "weight_decay": 0.01, # Example weight decay
# },
#
# {
# "params": [p for n, p in model.named_parameters() if "base_model.embeddings" in n],
# "lr": 1e-4, # Example learning rate for bottom layers
# "weight_decay": 0.001, # Example weight decay
# },
# ]
# optimizer = AdamW(optimizer_grouped_parameters, lr=CFG1.learning_rate,
# weight_decay=CFG1.weight_decay)
#
# # Create a cosine learning rate scheduler
# num_training_steps = CFG1.epochs * (ext_len // (CFG1.per_device_train_batch_size * 2))
# scheduler = get_cosine_schedule_with_warmup(optimizer,
# num_warmup_steps=CFG1.warmup_ratio * num_training_steps,
# num_training_steps=num_training_steps)
training_args = TrainingArguments(
learning_rate=CFG1.learning_rate,
weight_decay=CFG1.weight_decay,
warmup_ratio=CFG1.warmup_ratio,
per_device_train_batch_size=CFG1.per_device_train_batch_size,
per_device_eval_batch_size=CFG1.per_device_eval_batch_size,
num_train_epochs=CFG1.epochs,
report_to='none',
output_dir=CFG1.output_dir,
evaluation_strategy="steps",
save_strategy="steps",
save_total_limit=1,
load_best_model_at_end=True,
seed=CFG1.seed
)
trainer = Trainer(
model_init=model_init,
args=training_args,
tokenizer=tokenizer,
data_collator=DataCollatorForMultipleChoice(tokenizer=tokenizer),
train_dataset=train_dataset,
eval_dataset=valid_dataset,
compute_metrics=compute_metrics,
callbacks=[RemoveOptimizerCallback()],
)
trainer.hyperparameter_search(
direction="maximize",
backend="optuna",
hp_space=optuna_hp_space_way2,
n_trials=10,
compute_objective=compute_metrics
)
def main2():
train_df = pd.read_csv("./data/train_context.csv")
ext_df = pd.read_csv("./data/ext_train_context.csv")[:1500]
ext_df["prompt"] = ext_df["context"] + " #### " + ext_df["prompt"]
ext_df = ext_df.sample(frac=1, random_state=CFG.seed).reset_index(drop=True)
train_dataset, valid_dataset, valid_label = get_datasets(train_df, ext_df, fold=0)
training_args = TrainingArguments(
learning_rate=CFG.learning_rate,
weight_decay=CFG.weight_decay,
warmup_ratio=CFG.warmup_ratio,
per_device_train_batch_size=CFG.per_device_train_batch_size,
per_device_eval_batch_size=CFG.per_device_eval_batch_size,
num_train_epochs=CFG.epochs,
report_to='none',
output_dir=CFG.output_dir,
evaluation_strategy="steps",
save_strategy="steps",
save_total_limit=1,
load_best_model_at_end=True,
metric_for_best_model='eval_loss',
seed=CFG.seed
)
trainer = Trainer(
model_init=model_init,
args=training_args,
tokenizer=tokenizer,
data_collator=DataCollatorForMultipleChoice(tokenizer=tokenizer),
train_dataset=train_dataset,
eval_dataset=valid_dataset,
compute_metrics=compute_metrics,
callbacks=[RemoveOptimizerCallback()],
)
trainer.hyperparameter_search(
direction="maximize",
backend="optuna",
hp_space=optuna_hp_space_way2,
n_trials=10,
compute_objective=compute_metrics
)
if __name__ == "__main__":
main2()
| zdhdream/LLM-Science-Exam | Hyperparameter-Search.py | Hyperparameter-Search.py | py | 11,550 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "transformers.AutoModelForMultipleChoice.from_pretrained",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "transformers.AutoModelForMultipleChoice",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "datasets.Dataset.from_pandas",
"line_number":... |
13083798365 | # -*- coding: utf-8 -*-
import re
from setuptools import setup
version = re.search(
'^__version__\s*=\s*"(.*)"',
open('pushscreeps/pushscreeps.py').read(),
re.M
).group(1)
with open("README.rst", "rb") as f:
long_description = f.read().decode("utf-8")
setup(
name="pushscreeps",
packages=["pushscreeps"],
entry_points={
"console_scripts": ['pushscreeps = pushscreeps.pushscreeps:main']
},
version=version,
description="Python3 script for pushing code to screeps",
long_description=long_description,
author="Mathias Bøhn Grytemark",
author_email="mathias@grytemark.no",
url="https://github.com/mboehn/pushscreeps",
install_requires=[
"requests",
],
)
| mboehn/pushscreeps | setup.py | setup.py | py | 744 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "re.search",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "re.M",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "setuptools.setup",
"line_number": 16,
"usage_type": "call"
}
] |
8293013280 | import cv2
import math
path = "img/angle.jpg"
img = cv2.imread(path)
pointsList = []
def mousePoints(event, x, y, flags, params):
if event == cv2.EVENT_LBUTTONDOWN:
size = len(pointsList)
if size != 0 and size % 3 != 0:
cv2.line(img, tuple(pointsList[round((size-1)/3)*3]), (x,y), (0,0,255), 2)
cv2.circle(img, (x,y), 5, (0,0,255), cv2.FILLED)
pointsList.append([x,y])
def gradient(p1,p2):
return (p2[1] - p1[1])/(p2[0]- p1[0])
def getAngle(pointList):
p1, p2, p3 = pointList[-3:]
m1 = gradient(p1, p2)
m2 = gradient(p1, p3)
angle_radiance = math.atan((m2-m1)/(1+(m2*m1)))
angle_degrees = round(math.degrees(angle_radiance))
cv2.putText(img, str(angle_degrees), (p1[0]-40, p1[1]-20), cv2.FONT_HERSHEY_COMPLEX, 1, (0,0,255), 2)
while True:
if len(pointsList) % 3 == 0 and len(pointsList) != 0:
getAngle(pointsList)
cv2.imshow('Image', img)
cv2.setMouseCallback('Image', mousePoints)
if cv2.waitKey(1) & 0xFF == ord('q'):
pointsList = []
img = cv2.imread(path)
| Demohack2022/hacktoberfest2022 | Contributors/angle-finder.py | angle-finder.py | py | 1,095 | python | en | code | 8 | github-code | 6 | [
{
"api_name": "cv2.imread",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "cv2.EVENT_LBUTTONDOWN",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "cv2.line",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "cv2.circle",
"line_n... |
3715152381 | """
A simple event-sourced user service
"""
import datetime
import functools
import logging
import typing
import aiohttp.web
import faust
import strawberry
import strawberry.asgi.http
import strawberry.asgi.utils
import strawberry.graphql
import strawberry.types.datetime
from faust_avro import App, Record
class UserExistsError(Exception):
pass
class UserDoesNotExistError(Exception):
pass
##############################################################################
# Models
##############################################################################
class User(Record, coerce=True):
"""The main data model for a user, stored in our table."""
email: str
name: str
joined: datetime.datetime
def __str__(self):
return f"{name} <{email}>"
class UserKey(Record):
key: str
class UserCreated(Record, coerce=True):
user: User
class NameChanged(Record, coerce=True):
email: str
name: str
class UpdatedEmail(Record, coerce=True):
old_email: str
new_email: str
class UserDeleted(Record, coerce=True):
email: str
class UserRequest(Record, coerce=True):
update: typing.Union[UserCreated, NameChanged, UpdatedEmail, UserDeleted]
##############################################################################
# App
##############################################################################
app = App(
"users", broker="kafka://localhost", reply_create_topic=True, topic_partitions=1
)
users_requests = app.topic(
"_users_requests", key_type=UserKey, value_type=UserRequest, internal=True
)
cleaned_users_requests = app.topic(
"users", key_type=UserKey, value_type=UserRequest, internal=True
)
users_table = app.Table("users_table", partitions=1)
##############################################################################
# Business logic
##############################################################################
@functools.singledispatch
async def update_handler(msg: typing.Any):
raise NotImplementedError(f"No handler for {msg}")
@update_handler.register
async def user_created(msg: UserCreated):
email = msg.user.email
if email in users_table:
raise UserExistsError(f"User with {email} already exists.")
users_table[email] = msg.user
@update_handler.register
async def name_changed(msg: NameChanged):
user = users_table[msg.email]
user.name = msg.name
users_table[msg.email] = user
@update_handler.register
async def updated_email(msg: UpdatedEmail):
if msg.old_email == msg.new_email:
pass
if msg.old_email not in users_table:
raise UserDoesNotExistError(f"User with {msg.old_email} does not exist.")
if msg.new_email in users_table:
raise UserExistsError(f"User with {msg.new_email} already exists.")
user = users_table[msg.old_email]
user.email = msg.new_email
users_table[msg.new_email] = user
# This is subtle. We jump from the agent for partition new_email over to
# the agent for partition old_email and request a delete there. For a
# short time, the user will exist under both email addresses.
await users_requests.send(
key=UserKey(msg.old_email), value=UserRequest(UserDeleted(msg.old_email))
)
@update_handler.register
async def deleted_email(msg: UserDeleted):
if msg.email not in users_table:
raise UserDoesNotExistError(f"User with {msg.email} does not exist.")
del users_table[msg.email]
##############################################################################
# Agent
##############################################################################
@app.agent(users_requests)
async def users_svc(requests):
async for key, value in requests.items():
try:
await update_handler(value.update)
await cleaned_users_requests.send(key=key, value=value)
yield 200 # OK
except UserExistsError:
yield 409 # Conflict
except UserDoesNotExistError:
yield 404 # Not Found
except NotImplementedError as e:
logging.error(e)
yield 501 # Not Implemented
except Exception as e:
logging.error(e)
yield 500 # Internal Server Error
@app.agent(cleaned_users_requests)
async def cleaned_users_requests(requests):
async for value in requests:
# Silly, but faust-avro uses the agent to do topic-schema registration
pass
##############################################################################
# RESTish
##############################################################################
@app.page("/users")
class users(faust.web.View):
async def get(self, request: faust.web.Request) -> faust.web.Response:
"""List all users"""
return self.json(dict(users=dict(users_table.items())))
async def post(self, request: faust.web.Request) -> faust.web.Response:
"""Create a new user"""
data = await request.json()
key = UserKey(data["email"])
user = User(**data, joined=datetime.datetime.now())
value = UserRequest(UserCreated(user))
response = await users_svc.ask(key=key, value=value)
if response == 200:
return self.json(dict(user=user.asdict()))
elif response == 409:
raise aiohttp.web.HTTPConflict()
else:
raise aiohttp.web.HTTPInternalServerError()
@app.page("/users/{email}")
class users_update(faust.web.View):
@app.table_route(table=users_table, match_info="email")
async def get(
self, request: faust.web.Request, *, email: str
) -> faust.web.Response:
"""Get a specific user"""
try:
return self.json(dict(user=users_table[email].asdict()))
except KeyError:
raise aiohttp.web.HTTPNotFound()
@app.table_route(table=users_table, match_info="email")
async def patch(
self, request: faust.web.Request, *, email: str = None
) -> faust.web.Response:
"""Update a specific user"""
data = await request.json()
if "name" in data:
update = NameChanged(email, data["name"])
elif "new_email" in data:
update = UpdatedEmail(email, data["new_email"])
# Note this re-routes what partition we'll send on
email = data["new_email"]
else:
raise aiohttp.web.HTTPBadRequest()
response = await users_svc.ask(key=UserKey(email), value=UserRequest(update))
if response == 200:
return self.json(dict(user=users_table[email].asdict()))
elif response == 404:
raise aiohttp.web.HTTPNotFound()
elif response == 409:
raise aiohttp.web.HTTPConflict()
else:
raise aiohttp.web.HTTPInternalServerError()
##############################################################################
# GraphQLish
##############################################################################
@strawberry.type
class UserType:
email: str
name: str
joined: strawberry.types.datetime.DateTime
@strawberry.type
class Query:
@strawberry.field
def users(self, info, email: str = None) -> typing.List[UserType]:
if email is not None:
return [users_table[email]]
else:
return list(users_table.values())
@strawberry.input
class CreateUserInput:
email: str
name: str
@strawberry.input
class ChangeUserNameInput:
email: str
name: str
@strawberry.input
class ChangeUserEmailInput:
old_email: str
new_email: str
@strawberry.type
class Mutation:
@staticmethod
async def ask(email, message):
response = await users_svc.ask(key=UserKey(email), value=UserRequest(message))
if response == 200:
return
else:
raise Exception("Failure")
@strawberry.mutation
async def create_user(self, info, input: CreateUserInput) -> UserType:
user = User(email=input.email, name=input.name, joined=datetime.datetime.now())
await Mutation.ask(input.email, UserCreated(user))
return user
@strawberry.mutation
async def change_user_name(self, info, input: ChangeUserNameInput) -> UserType:
await Mutation.ask(input.email, NameChanged(input.email, input.name))
return users_table[input.email]
@strawberry.mutation
async def change_user_email(self, info, input: ChangeUserEmailInput) -> UserType:
await Mutation.ask(
input.new_email, UpdatedEmail(input.old_email, input.new_email)
)
return users_table[input.new_email]
schema = strawberry.Schema(query=Query, mutation=Mutation)
# TODO -- routing! Currently this abuses partitions=1 and workers=1 to have consistency.
#
# Routing is a lot harder in graphql. It potentially needs to happen at the mutation level?
# It'd be worth investigating if the response could be the user object itself and/or an
# exception object. Serializing them with pickle would be okay since it is python/faust
# internal and not intended for outside consumption.
@app.page("/graphql")
class graphql(faust.web.View):
async def get(self, request: faust.web.Request) -> faust.web.Response:
html = strawberry.asgi.utils.get_playground_html(
"http://localhost:6066/graphql"
)
return aiohttp.web.Response(body=html, content_type="text/html")
async def execute(self, query, variables=None, context=None, operation_name=None):
return await strawberry.graphql.execute(
schema,
query,
variable_values=variables,
operation_name=operation_name,
context_value=context,
)
async def post(self, request: faust.web.Request) -> faust.web.Response:
response = await strawberry.asgi.http.get_http_response(request, self.execute)
return aiohttp.web.Response(
body=response.body, content_type=response.media_type
)
| trauter/faust-avro | examples/event_sourced_user.py | event_sourced_user.py | py | 9,993 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "faust_avro.Record",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "datetime.datetime",
"line_number": 37,
"usage_type": "attribute"
},
{
"api_name": "faust_avro.Record",
"line_number": 43,
"usage_type": "name"
},
{
"api_name": "faust_avro... |
23055518013 | """
Creation:
Author: Martin Grunnill
Date: 2023-01-25
Description: Derivation of Basic Reproductive Number (R0) and beta given R0 for model described in manuscript.
For methods see:
Diekmann, O., Heesterbeek, J. A. P., & Roberts, M. G. (2010). The construction of next-generation matrices
for compartmental epidemic models. Journal of the Royal Society Interface, 7(47), 873–885.
https://doi.org/10.1098/rsif.2009.0386
"""
import sympy
all_params = ['epsilon_1', 'gamma_A_1', 'p_s', 'gamma_I_1', 'epsilon_2', 'gamma_I_2', 'alpha',
'p_h_s', 'epsilon_H', 'epsilon_3', 'N', 'theta', 'gamma_A_2', 'gamma_H', 'beta']
all_states = ['S', 'E', 'G_I', 'G_A', 'P_I', 'P_A', 'M_H', 'M_I', 'M_A', 'F_H', 'F_I', 'F_A', 'R']
for list_of_symbols in [all_params, all_states]:
for symbol in list_of_symbols:
exec(symbol + ' = sympy.symbols("'+symbol +'")')
odes = sympy.Matrix([[R*alpha - S*beta*(F_A*theta + F_I + M_A*theta + M_H + M_I + P_A*theta + P_I*theta)/N],
[-E*epsilon_1*p_s - E*epsilon_1*(1 - p_s) + S*beta*(F_A*theta + F_I + M_A*theta + M_H + M_I + P_A*theta + P_I*theta)/N],
[E * epsilon_1 * (1 - p_s) - G_A * epsilon_2],
[E*epsilon_1*p_s - G_I*epsilon_2],
[G_A*epsilon_2 - P_A*epsilon_3],
[G_I * epsilon_2 - P_I * epsilon_3 * p_h_s - P_I * epsilon_3 * (1 - p_h_s)],
[-M_A*gamma_A_1 + P_A*epsilon_3],
[-M_I*gamma_I_1 + P_I*epsilon_3*(1 - p_h_s)],
[-M_H*epsilon_H + P_I*epsilon_3*p_h_s],
[-F_A * gamma_A_2 + M_A * gamma_A_1],
[-F_I * gamma_I_2 + M_I * gamma_I_1],
[-F_H*gamma_H + M_H*epsilon_H],
[F_A*gamma_A_2 + F_H*gamma_H + F_I*gamma_I_2 - R*alpha]])
infecteds = odes[1:-1]
infecteds = sympy.Matrix(odes[1:-1])
infecteds = infecteds.subs(S, N)
infecteds_jacobian = infecteds.jacobian(X=[E,
G_A, G_I,
P_A, P_I,
M_A, M_I, M_H,
F_A, F_I, F_H
])
# e.g. removing people becoming infected from the jacobian above.
Sigma = infecteds_jacobian.subs(beta, 0)
Sigma
# Obtainning matrix of transitions into of infectious stages (T)
# E.g. removing people transitioning from the jacobian above.
# Suggest not use T to name a variable could be confused with transpose of a matrix.
T_inf_births_subs = {eval(param):0
for param in all_params
if param not in ['beta', 'theta', 'kappa']}
T_inf_births = infecteds_jacobian.subs(T_inf_births_subs)
T_inf_births
# Obtainning Next Geneation Matrix
Sigma_inv = Sigma**-1 # note for powers in python it is ** not ^.
neg_Sigma_inv = -Sigma_inv
K_L = T_inf_births*neg_Sigma_inv
K_L
# Finally the Basic Reproductive Number
eigen_values = K_L.eigenvals()
eigen_values
none_zero_eigen_values = [item for item in eigen_values.keys() if item !=0]
eq_R0 = none_zero_eigen_values[0]
#%%
eq_R0 = sympy.simplify(eq_R0)
#%%
# Dervining Beta
R0 = sympy.symbols('R0')
eq_R0 = sympy.Eq(eq_R0, R0)
beta_eq = sympy.solve(eq_R0, beta)
beta_eq = beta_eq[0]
#%%
beta_eq = sympy.simplify(beta_eq) | LIAM-COVID-19-Forecasting/Modelling-Disease-Mitigation-at-Mass-Gatherings-A-Case-Study-of-COVID-19-at-the-2022-FIFA-World-Cup | meta_population_models/reproductive_numbers/MGE_single_population_derivation.py | MGE_single_population_derivation.py | py | 3,417 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "sympy.Matrix",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "sympy.Matrix",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "sympy.simplify",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "sympy.symbols",
"line_nu... |
37319818 | from IPython import get_ipython
from IPython.core.magic import Magics, magics_class, cell_magic
from IPython.core.magic_arguments import argument, magic_arguments, parse_argstring
import mamo
@magics_class
class MamoMagics(Magics):
@cell_magic
@magic_arguments()
@argument("name", type=str, default=None, help="Name of the cell.")
def mamo(self, line, cell_code):
"""mamo cell wrapper, only tracks global stores!"""
assert isinstance(line, str)
assert isinstance(cell_code, str)
args = parse_argstring(self.mamo, line)
mamo.run_cell(args.name, cell_code, self.shell.user_ns)
get_ipython().register_magics(MamoMagics)
| BlackHC/mamo | mamo/support/ipython.py | ipython.py | py | 678 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "IPython.core.magic.Magics",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "IPython.core.magic_arguments.parse_argstring",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "mamo.run_cell",
"line_number": 18,
"usage_type": "call"
},
{
... |
19637216652 | import pickle
import lasagne
import numpy as np
import theano as th
import theano.tensor as T
import lasagne.layers as ll
from data_reader import load
from settings import DATA_DIR
from inception_v3 import build_network, preprocess
def extract(data, layer, batch_size):
nr_batches_train = int(data.shape[0]/batch_size)
x_temp = T.tensor4()
features = ll.get_output(layer, x_temp , deterministic=True)
extract_features = th.function(inputs=[x_temp ], outputs=features)
output_features = []
for t in range(nr_batches_train):
train_temp = data[t*batch_size:(t+1)*batch_size]
tx_resized = []
for n in range(batch_size):
tx_resized.append(preprocess(np.transpose(train_temp[n],(1,2,0))))
tx_resized = np.concatenate(tx_resized, axis=0)
output_features.append(extract_features(tx_resized))
return np.concatenate(output_features, axis=0)
with open('inception_v3.pkl', 'rb') as f:
params = pickle.load(f)
net = build_network()
lasagne.layers.set_all_param_values(net['softmax'], params['param values'])
trainx, _ = load(DATA_DIR, subset='train')
testx, _ = load(DATA_DIR, subset='test')
minibatch_size = 10
feature_layer = net['pool3']
print("Extracting features from train data...")
train_features = extract(trainx, feature_layer, minibatch_size)
print("Extracting features from test data...")
test_features = extract(testx, feature_layer, minibatch_size)
print(train_features.shape)
print(test_features.shape)
np.savez_compressed('cifar_train_x', train_features)
np.savez_compressed('cifar_test_x', test_features) | maciejzieba/svmCIFAR10 | extract_inception.py | extract_inception.py | py | 1,602 | python | en | code | 2 | github-code | 6 | [
{
"api_name": "theano.tensor.tensor4",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "theano.tensor",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "lasagne.layers.get_output",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "lasagne... |
10876951886 | import yaml
import json
import os
import subprocess
class MLOps(object):
spool_dir = "/tmp/ta"
agent_dir = "/opt/mlops-agent"
mlops_dir_name = "datarobot_mlops_package-8.1.2"
total_dir_path = agent_dir + "/" + mlops_dir_name
def __init__(self, api_token, path):
self.token = api_token
if os.path.exists(path):
with open(path) as f:
mlops_config = json.load(f)
self.endpoint = mlops_config['datarobot_mlops_service_url']
self.model_id = mlops_config['model_id']
self.deployment_id = mlops_config['deployment_id']
self.mlops_name = mlops_config.get('mlops_dir_name', 'datarobot_mlops_package-8.1.2')
if "MLOPS_SERVICE_URL" in os.environ:
self.endpoint = os.environ['MLOPS_SERVICE_URL']
if "MODEL_ID" in os.environ:
self.model_id = os.environ['MODEL_ID']
if "DEPLOYMENT_ID" in os.environ:
self.deployment_id = os.environ['DEPLOYMENT_ID']
if not os.path.exists(self.agent_dir):
raise Exception("environment is not configured for mlops.\nPlease select a valid mlops enabled environment.")
if self.endpoint is None:
raise Exception("'no endpoint found, please add 'MLOPS_SERVICE_URL' environment variable, or create an "
"mlops.json file")
if self.model_id is None:
raise Exception("no model_id found, please add 'MODEL_ID' environment variable, or create an mlops.json "
"file")
if self.deployment_id is None:
raise Exception("no deployment_id found, please add 'DEPLOYMENT_ID' environment variable, or create an "
"mlops.json file")
def init(self):
os.environ['MLOPS_DEPLOYMENT_ID'] = self.deployment_id
os.environ['MLOPS_MODEL_ID'] = self.model_id
os.environ['MLOPS_SPOOLER_TYPE'] = "FILESYSTEM"
os.environ['MLOPS_FILESYSTEM_DIRECTORY'] = self.spool_dir
with open(self.total_dir_path + '/conf/mlops.agent.conf.yaml') as f:
documents = yaml.load(f, Loader=yaml.FullLoader)
documents['mlopsUrl'] = self.endpoint
documents['apiToken'] = self.token
with open(self.total_dir_path + '/conf/mlops.agent.conf.yaml', 'w') as f:
yaml.dump(documents, f)
subprocess.call(self.total_dir_path + '/bin/start-agent.sh')
check = subprocess.Popen([self.total_dir_path + '/bin/status-agent.sh'], stdout=subprocess.PIPE)
output = check.stdout.readlines()[0]
check.terminate()
if b"DataRobot MLOps-Agent is running as a service." in output:
return True
else:
raise Exception(output)
| algorithmiaio/algorithmia-adk-python | adk/mlops.py | mlops.py | py | 2,766 | python | en | code | 6 | github-code | 6 | [
{
"api_name": "os.path.exists",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "json.load",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number":... |
25032770232 | #pip install dash dash-renderer dash-html-components dash-core-components plotly
import dash
import dash_core_components as dcc
import dash_html_components as html
app = dash.Dash()
app.layout = html.Div(children=[
html.H1("Consumo dos clientes"), #separando elementos dos childs por virgula
dcc.Dropdown(
options=[
{'label': 'Fulano', 'value': "Fulano" }, #value = valor de id
{'label': 'Sicrano', 'value': "Sicrano"} #value = valor de id
],
value= '' #id do escolhido
),
dcc.Graph(id="Fulano",
figure = { #Consumo Mensal x Nome dos produtos
"data": [{"x": ["pale ale", "weissbier", "itaipava", "skol"], "y": [0, 5, 4, 2], "type":"bar", "name": "Cervejas"},
{"x": ["expresso", "cappuccino", "mocaccino", "cafe4"], "y": [0, 0, 2, 1], "type":"line", "name": "Cafés"},
],
"layout": {
"title": "Fulano"
}
}),
dcc.Graph(id="Sicrano",
figure = { #Consumo Mensal x Nome dos produtos
"data": [{"x": ["pale ale", "weissbier", "itaipava", "skol"], "y": [0, 1, 1, 0], "type":"bar", "name": "Cervejas"},
{"x": ["expresso", "cappucino", "mocaccino", "cafe4"], "y": [7, 0, 3, 2], "type":"line", "name": "Cafés"}
],
"layout": {
"title": "Sicrano"
}
})
])
if __name__ == '__main__':
app.run_server(debug=True)
| grupoflux/dashboard | dashboard.py | dashboard.py | py | 1,527 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "dash.Dash",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "dash_html_components.Div",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "dash_html_components.H1",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "dash_core_co... |
23007800095 | import numpy as np
from keras.utils import to_categorical
def create_labels(train_positives, train_negatives=None, flag=False):
''' This function creates labels for model training '''
if flag == False :
# only positive data in trainings
labels = np.zeros(train_positives.shape[0])
labels[:] = 1
else:
# negatives & positives data in training
labels = np.zeros(train_positives.shape[0] + train_negatives.shape[0])
labels[:train_positives.shape[0]] = 1
return np.expand_dims(labels, axis=1)
def reshape_X(data, nrows, ncols):
data_t = np.zeros((data.shape[0], nrows, ncols))
data_cols = data[0].shape[0]-1
ctr = 0
for i, j in zip(range(0, data_cols//2, 2), range(data_cols//2, data_cols, 2)):
data_t[:, ctr, :] = np.hstack([data[:, i:i+2], data[:, j:j+2]])
ctr += 1
return data, data_t
def reshape_y(y, nrows):
y = to_categorical(y)
print("\ny shape : ", y.shape)
y_ = np.zeros((nrows, y.shape[0], y.shape[1]))
for i in range(nrows):
y_[i, :, :] = y
return y_
def split_train_validation(x, y, val_split=0.1):
m = x.shape[0]
val_size = int(0.1 * m)
return x[:-val_size], y[:, :-val_size, :], x[-val_size:], y[:, -val_size:, :]
| nikitamalviya/user-authentication-using-siamese | features_and_labels.py | features_and_labels.py | py | 1,281 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "numpy.zeros",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "numpy.expand_dims",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_numb... |
35176426735 | '''
Organisation Model.py file
'''
import uuid
from django.db import models
class Organisation(models.Model):
'''
Organisation Table
id - Organisations ID
name - Organisations Name (Max length of 255 characters)
'''
id = models.UUIDField(
primary_key=True,
default=uuid.uuid4,
editable=False
)
Organisation_Name = models.CharField(
max_length=255,
name="Organisation_Name"
)
def __str__(self):
'''
Returns the Organisation's Name
'''
return self.Organisation_Name
| Code-Institute-Submissions/Support-Software-Inc | organisations/models.py | models.py | py | 590 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "django.db.models.Model",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "django.db.models.UUIDField",
"line_number": 17,
"usage_type": "call"
},
{
"api_name"... |
19528086110 | # -*- coding: utf-8 -*-
__author__='zhaicao'
from PyQt5 import QtCore, QtGui, QtWidgets
from frameUI.CreateControls import TraceControlsUI
from frameUI.CreateTextUI import TraceCreateTextUI
from frameUI.MainData import TraceObjItems
from eventAction.DefinedActions import TraceActions
from eventAction.DefinedSolot import TraceSolot
from eventAction.Utils import ObjRepository
from frameUI import resoure_rc
class TraceMainWidget(TraceControlsUI, TraceCreateTextUI, TraceObjItems):
def setupUi(self, MainWindow):
# 主窗口设置
MainWindow.setWindowTitle("追溯分析部署配置工具 V2.0.6(Bate)")
MainWindow.setObjectName("MainWindow")
MainWindow.resize(500, 660)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(":/icon/logo.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
MainWindow.setWindowIcon(icon)
MainWindow.setStyleSheet(".QGroupBox {border-radius: 3px;border: 1px solid #BFBFBF;}")
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.verticalLayout = QtWidgets.QVBoxLayout(self.centralwidget)
self.verticalLayout.setContentsMargins(0, 0, 0, 0)
self.verticalLayout.setObjectName("verticalLayout")
self.stackedWidget = QtWidgets.QStackedWidget(self.centralwidget)
self.stackedWidget.setObjectName("stackedWidget")
self.verticalLayout.addWidget(self.stackedWidget)
MainWindow.setCentralWidget(self.centralwidget)
# 设置菜单
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 500, 23))
self.menubar.setObjectName("menubar")
# 部署菜单
self.menuDeploy = QtWidgets.QMenu(self.menubar)
self.menuDeploy.setObjectName("menuDeploy")
# 更新菜单
self.menuUpdate = QtWidgets.QMenu(self.menubar)
self.menuUpdate.setObjectName("menuUpdate")
MainWindow.setMenuBar(self.menubar)
# 部署菜单子菜单
self.firstDeploy = QtWidgets.QAction(MainWindow)
self.firstDeploy.setObjectName("firstDeploy")
self.menuDeploy.addAction(self.firstDeploy)
# 升级菜单子菜单
self.updateDB = QtWidgets.QAction(MainWindow)
self.updateDB.setObjectName("updateDB")
self.menuUpdate.addAction(self.updateDB)
self.updateNifi = QtWidgets.QAction(MainWindow)
self.updateNifi.setObjectName("updateNifi")
self.menuUpdate.addAction(self.updateNifi)
self.menubar.addAction(self.menuDeploy.menuAction())
self.menubar.addAction(self.menuUpdate.menuAction())
# 生成控件
super().initControls(self.stackedWidget)
self.stackedWidget.setCurrentIndex(0)
self.tabWidget.setCurrentIndex(0)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
# 生成控件文字显示
super().initControlTexts()
# 生成控件名和对象关联dict
super().initObjItems()
# 初始化菜单切换信号槽
self.connSignalMenu()
# 初始化第一个页面控件信号槽
self.connSignalPage_1(MainWindow)
# 初始化第两个页面控件信号槽
self.connSignalPage_2(MainWindow)
# 初始化第三个页面控件信号槽
self.connSignalPage_3(MainWindow)
# 初始化控件库
self._objsDict = ObjRepository(MainWindow, self.deployConfItem, self.manifestConfItem, self.dbConfItem, self.nifiConfItem)
# 初始化事件
self._action = TraceActions()
# 初始化槽函数
self._solot = TraceSolot()
# 公共信号
def connSignalMenu(self):
self.firstDeploy.triggered.connect(lambda: self._solot.changeMenuPage(self.stackedWidget, 0))
self.updateDB.triggered.connect(lambda: self._solot.changeMenuPage(self.stackedWidget, 1))
self.updateNifi.triggered.connect(lambda: self._solot.changeMenuPage(self.stackedWidget, 2))
# 控件信号
def connSignalPage_1(self, mainWidget):
# 绑定切换页签的信号槽
self.tabWidget.currentChanged.connect(
lambda: self._solot.buttonChange(self.tabWidget, self.dep_confirmBtn, self.dep_copyDepBtn, self.dep_copyManBtn))
# 定义Next按钮信号槽
self.dep_confirmBtn.clicked.connect(
lambda: self._solot.nextClicked(mainWidget, self.tabWidget, self.dep_confirmBtn, self.dep_copyDepBtn,
self.dep_copyManBtn, self.deployConfItem, self.manifestConfItem,
self._objsDict))
# 定义复制部署按钮信号槽
self.dep_copyDepBtn.clicked.connect(
lambda: self._solot.copyConfClipboard(mainWidget, 'deploy', self.deployConfItem, self.manifestConfItem,
self._objsDict))
# 定义复制定制按钮信号槽
self.dep_copyManBtn.clicked.connect(
lambda: self._solot.copyConfClipboard(mainWidget, 'manifest', self.deployConfItem, self.manifestConfItem,
self._objsDict))
# Cancel按钮信号绑定退出槽函数
self.dep_cancelBtn.clicked.connect(mainWidget.close)
# 是否抽历史库checkbox绑定信号槽
self.dep_input_6.stateChanged.connect(lambda: self._solot.cbSetEnabledSlot(self._objsDict, 'his'))
# 是否抽工艺参数checkbox绑定信号槽
self.dep_input_24.stateChanged.connect(lambda: self._solot.cbSetEnabledSlot(self._objsDict, 'pp'))
# 工艺参数是否支持网络访问checkbox绑定信号槽
self.dep_input_25.stateChanged.connect(lambda: self._solot.cbSetEnabledSlot(self._objsDict, 'ppNet'))
# 是否启用单点登录绑定信号槽
self.dep_input_45.stateChanged.connect(lambda: self._solot.cbSetEnabledSlot(self._objsDict, 'login'))
# 是否启用Nifi登录绑定信号槽
self.dep_input_53.stateChanged.connect(lambda: self._solot.cbSetEnabledSlot(self._objsDict, 'nifiLogin'))
# 获取业务库,联动
# 业务库测试按钮绑定信号槽
self.getDBBtn_1.clicked.connect(lambda: self._solot.setDBNameList(mainWidget,
{
'ip': self.dep_input_1.text(),
'port': self.dep_input_2.text(),
'user': self.dep_input_3.text(),
'pwd': self.dep_input_4.text(),
},
self.dep_input_5))
# 输入框修改初始化下拉框数据
self.dep_input_1.textChanged.connect(lambda: self._action.initComboBoxDB(self.dep_input_5, '请选择业务库'))
self.dep_input_2.textChanged.connect(lambda: self._action.initComboBoxDB(self.dep_input_5, '请选择业务库'))
self.dep_input_3.textChanged.connect(lambda: self._action.initComboBoxDB(self.dep_input_5, '请选择业务库'))
self.dep_input_4.textChanged.connect(lambda: self._action.initComboBoxDB(self.dep_input_5, '请选择业务库'))
# 获取历史库,联动
# 历史库测试绑定信号槽
self.getDBBtn_2.clicked.connect(lambda: self._solot.setDBNameList(mainWidget,
{
'ip': self.dep_input_7.text(),
'port': self.dep_input_8.text(),
'user': self.dep_input_9.text(),
'pwd': self.dep_input_10.text(),
},
self.dep_input_11))
# 输入框修改初始化下拉框数据
self.dep_input_7.textChanged.connect(lambda: self._action.initComboBoxDB(self.dep_input_11, '请选择历史库'))
self.dep_input_8.textChanged.connect(lambda: self._action.initComboBoxDB(self.dep_input_11, '请选择历史库'))
self.dep_input_9.textChanged.connect(lambda: self._action.initComboBoxDB(self.dep_input_11, '请选择历史库'))
self.dep_input_10.textChanged.connect(lambda: self._action.initComboBoxDB(self.dep_input_11, '请选择历史库'))
# page_2控件信号
def connSignalPage_2(self, mainWidget):
# 是否更新系统库
self.db_input_1.stateChanged.connect(lambda: self._solot.cbSetEnabledSlot(self._objsDict, 'db_das'))
# 是否更新BI库
self.db_input_7.stateChanged.connect(lambda: self._solot.cbSetEnabledSlot(self._objsDict, 'db_bi'))
# 是否更新工艺参数
self.db_input_13.stateChanged.connect(lambda: self._solot.cbSetEnabledSlot(self._objsDict, 'db_pp'))
# 升级DB的信号
self.db_comfirmBtn.clicked.connect(lambda: self._solot.createFullDB(mainWidget))
# page_3控件信号
def connSignalPage_3(self, mainWidget):
# 是否抽取历史库
self.nifi_input_11.stateChanged.connect(lambda: self._solot.cbSetEnabledSlot(self._objsDict, 'nifi_history'))
# 是否抽取工艺参数
self.nifi_input_17.stateChanged.connect(lambda: self._solot.cbSetEnabledSlot(self._objsDict, 'nifi_pp'))
# 是否启用登录
self.nifi_input_23.stateChanged.connect(lambda: self._solot.cbSetEnabledSlot(self._objsDict, 'nifi_islogin'))
# 业务库试绑定信号槽
self.getDBBtn_3.clicked.connect(lambda: self._solot.setDBNameList(mainWidget,
{
'ip': self.nifi_input_1.text(),
'port': self.nifi_input_2.text(),
'user': self.nifi_input_3.text(),
'pwd': self.nifi_input_4.text(),
},
self.nifi_input_5))
# 业务库输入框修改初始化下拉框数据
self.nifi_input_1.textChanged.connect(lambda: self._action.initComboBoxDB(self.nifi_input_5, '请选择业务库'))
self.nifi_input_2.textChanged.connect(lambda: self._action.initComboBoxDB(self.nifi_input_5, '请选择业务库'))
self.nifi_input_3.textChanged.connect(lambda: self._action.initComboBoxDB(self.nifi_input_5, '请选择业务库'))
self.nifi_input_4.textChanged.connect(lambda: self._action.initComboBoxDB(self.nifi_input_5, '请选择业务库'))
# BI库试绑定信号槽
self.getDBBtn_4.clicked.connect(lambda: self._solot.setDBNameList(mainWidget,
{
'ip': self.nifi_input_6.text(),
'port': self.nifi_input_7.text(),
'user': self.nifi_input_8.text(),
'pwd': self.nifi_input_9.text(),
},
self.nifi_input_10))
# BI库输入框修改初始化下拉框数据
self.nifi_input_6.textChanged.connect(lambda: self._action.initComboBoxDB(self.nifi_input_10, '请选择BI库'))
self.nifi_input_7.textChanged.connect(lambda: self._action.initComboBoxDB(self.nifi_input_10, '请选择BI库'))
self.nifi_input_8.textChanged.connect(lambda: self._action.initComboBoxDB(self.nifi_input_10, '请选择BI库'))
self.nifi_input_9.textChanged.connect(lambda: self._action.initComboBoxDB(self.nifi_input_10, '请选择BI库'))
# 历史库试绑定信号槽
self.getDBBtn_5.clicked.connect(lambda: self._solot.setDBNameList(mainWidget,
{
'ip': self.nifi_input_12.text(),
'port': self.nifi_input_13.text(),
'user': self.nifi_input_14.text(),
'pwd': self.nifi_input_15.text(),
},
self.nifi_input_16))
# 历史库输入框修改初始化下拉框数据
self.nifi_input_12.textChanged.connect(lambda: self._action.initComboBoxDB(self.nifi_input_16, '请选择历史库'))
self.nifi_input_13.textChanged.connect(lambda: self._action.initComboBoxDB(self.nifi_input_16, '请选择历史库'))
self.nifi_input_14.textChanged.connect(lambda: self._action.initComboBoxDB(self.nifi_input_16, '请选择历史库'))
self.nifi_input_15.textChanged.connect(lambda: self._action.initComboBoxDB(self.nifi_input_16, '请选择历史库'))
# 工艺参数试绑定信号槽
self.getDBBtn_6.clicked.connect(lambda: self._solot.setDBNameList(mainWidget,
{
'ip': self.nifi_input_18.text(),
'port': self.nifi_input_19.text(),
'user': self.nifi_input_20.text(),
'pwd': self.nifi_input_21.text(),
},
self.nifi_input_22))
# 工艺参数输入框修改初始化下拉框数据
self.nifi_input_18.textChanged.connect(lambda: self._action.initComboBoxDB(self.nifi_input_22, '请选择工艺参数库'))
self.nifi_input_19.textChanged.connect(lambda: self._action.initComboBoxDB(self.nifi_input_22, '请选择工艺参数库'))
self.nifi_input_20.textChanged.connect(lambda: self._action.initComboBoxDB(self.nifi_input_22, '请选择工艺参数库'))
self.nifi_input_21.textChanged.connect(lambda: self._action.initComboBoxDB(self.nifi_input_22, '请选择工艺参数库'))
# 选择Nifi模板地址
self.getFile.clicked.connect(lambda: self._solot.getNifiTemplate(mainWidget, self._objsDict))
# 升级Nifi更新按钮信号
self.nifi_confirmBtn.clicked.connect(lambda: self._solot.updateNifiTemplate(mainWidget, self.nifiConfItem, self._objsDict)) | zhaicao/pythonWorkspace | DeployTool/frameUI/mainUI.py | mainUI.py | py | 15,888 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "frameUI.CreateControls.TraceControlsUI",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "frameUI.CreateTextUI.TraceCreateTextUI",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "frameUI.MainData.TraceObjItems",
"line_number": 14,
"usage_... |
38882380606 | from django.db import models
from imagekit.models import ImageSpecField
from imagekit.processors import ResizeToFill
class ImageList(models.Model):
def __str__(self):
return self.file_name
file_path = models.CharField(
verbose_name='ファイルパス',
max_length=1000,
blank=False,
null=False,
)
file_name = models.CharField(
verbose_name='ファイル名',
max_length=100,
)
class ImageListDetail(models.Model):
imageList = models.ForeignKey(ImageList, on_delete=models.CASCADE)
def __str__(self):
return self.file_path
# ファイルパス(オリジナルの画像ファイルパス)
file_path = models.CharField(
verbose_name='ファイルパス',
max_length=500,
blank=False,
null=False,
)
# 画像データ
image_data = models.ImageField(
verbose_name='画像データ',
upload_to='images/',
)
# サムネイル
thumbnail = ImageSpecField(source="image_data",
processors=[ResizeToFill(150,150)],
format='JPEG',
options={'quality': 60}
)
# 表示順
disp_order = models.IntegerField(
verbose_name='表示順',
blank=False,
null=False,
) | hogendan/SuzuImage | imagelist/models.py | models.py | py | 1,294 | python | ja | code | 0 | github-code | 6 | [
{
"api_name": "django.db.models.Model",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "django.db.models.CharField",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": ... |
27143309324 | #!/usr/bin/env python3
from PIL import Image
import os
path = os.getenv('HOME') + '/supplier-data/images/'
for file in os.listdir(path):
if (file.endswith('.tiff')):
shortFileName = file.rstrip('.tiff')
with Image.open(path + file) as im:
im.resize((600, 400)).convert('RGB').save(path + shortFileName + ".jpeg", "JPEG")
| Mark-C-Hall/Google-IT-Automate-Final | changeImage.py | changeImage.py | py | 357 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "os.getenv",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "os.listdir",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "PIL.Image.open",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 12,
... |
1008840332 | '''Problem 62 cubic permutations'''
import time
from itertools import permutations
t1 = time.time()
cubes = [x**3 for x in range(1001,10000)]
def make_list(cube):
cubestring = str(cube)
#print(cubestring)
cubelist = [int(x) for x in cubestring]
cubelist.sort()
return cubelist#.sort()
cube_lists = {x:make_list(x) for x in cubes}
#print(cube_lists)
#print(make_list(1234**3))
for cube in cube_lists.values():
sames = []
for cube2 in cube_lists.values():
if cube == cube2:
sames.append(cube)
sames.append(cube2)
if len(sames) == 5:
print(sames)
def fact(n):
if n<=1: return 1
return n*fact(n-1)
def permut(n):
tot = 0
'''returns a list of all the permutations of n'''
plist = set()
nstr = str(n)
p = permutations(nstr)
for i in range(fact(len(nstr))):
tempstr = ''
t = next(p)
#print("next p:",t)
for digit in t:
tempstr += digit
#print(tempstr,"plist:",plist)
if int(tempstr) in cubes:
print("Found cube:",tempstr,"plist:",plist)
plist.add(int(tempstr))
tot += 1
if len(plist) == 5:
return plist
return
#plist.append(int(tempstr))
'''for n in plist:
if n in cubes:
print("Found cube:",n)
tot += 1
return tot'''
#permut(1234**3)
'''for c in cubes[:5]:
if permut(c) == 5:
print("solution:",c)
break'''
t2 = time.time()
print(t2-t1)
#
| hackingmath/Project-Euler | euler62.py | euler62.py | py | 1,623 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "time.time",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "itertools.permutations",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 71,
"usage_type": "call"
}
] |
39583332735 | # -*- coding:utf-8 -*-
from flask import json
from DataSet import db
from DataSet.fastdfs.view import fun
from DataSet.models import Image, Label
def storage(up_file, collection, file_name):
try:
image_status = fun.upload(up_file, file_ext_name='jpg')
image = Image()
image.name = file_name
image.site = image_status.get('file_id')
image.collection_id = collection.id
db.session.add(image)
db.session.commit()
except Exception as e:
fun.remove(image_status['filename'])
db.session.rollback()
return '{"err_no": "1", "err_desc": "数据保存失败", "data": %s}' % e
images = Image.query.filter_by(collection_id=collection.id, site=image_status['filename']).first()
return images
class ChangeJsonFile(object):
def __init__(self):
self.join_images = '{' + '"images": {'
self.join_images += '"data_uploaded": "%s",' + '"file_name": "%s",'
self.join_images += '"height": "%s",' + '"width": "%s",' + '"id": %s},'
self.json_label_dict = ["background", "person", "bicycle", "car", "motorcycle", "airplane", "bus",
"train", "truck", "boat", "traffic_light", "fire_hydrant", "stop_sign", "parking_meter",
"bench",
"bird", "cat", "dog", "horse", "sheep", "cow", "elephant", "bear", "zebra", "giraffe",
"backpack",
"umbrella", "handbag", "tie", "suitcase", "frisbee", "skis", "snowboard", "sports_ball",
"kite",
"baseball_bat", "baseball_glove", "skateboard", "surfboard", "tennis_racket", "bottle",
"wine_glass",
"cup", "fork", "knife", "spoon", "bowl", "banana", "apple", "sandwich", "orange",
"broccoli", "carrot",
"hot_dog", "pizza", "donut", "cake", "chair", "couch", "potted_plant", "bed",
"dining_table", "toilet",
"tv", "laptop", "mouse", "remote", "keyboard", "cell_phone", "microwave", "oven",
"toaster", "sink",
"refrigerator", "book", "clock", "vase", "scissors", "teddy_bear", "hair_drier",
"toothbrush"]
def segmentation(self, images, size, file_name):
str_data = file_name
json_dict_data = json.loads(str_data)
annotation = json_dict_data.get('annotation')
data_list = []
for i in annotation:
category_id = i.get('category_id') # 标签label_id
try:
label_name = self.json_label_dict[category_id]
# print( label_name )
labels = Label.query.filter_by(name=label_name, collection_id=images.collection_id).first()
except:
continue
# labels = Label.query.filter_by(label_id=category_id, collection_id=images.collection_id).first()
if not labels:
continue
a = '{"bbox": ' + str(i['bbox']) + ','
a += '"category_id": ' + str(labels.label_id) + ',' + '"category_name": ' + '"{}"'.format(labels.name) + ','
a += '"segmentation": [' + str(i['segmentation']) + ']}' + ','
data_list.append(a)
next_join = '"classification": ' + '[],'
next_join += '"annotation": [' + ''.join(data_list)[:-1] + ']}'
str_list = [self.join_images, next_join]
data = ''.join(str_list)
up_file = data % (images.create_time, images.site[10:], size.get('height'), size.get('width'), images.id)
file_json = fun.upload(up_file, file_ext_name='json')
images.status = 3
images.label_path = file_json.get('file_id')
db.session.add(images)
db.session.commit()
cjf = ChangeJsonFile()
| limingzhang513/lmzrepository | data_module/src/Data_Processing/DataSet/utils/change_json_file.py | change_json_file.py | py | 4,014 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "DataSet.fastdfs.view.fun.upload",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "DataSet.fastdfs.view.fun",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "DataSet.models.Image",
"line_number": 12,
"usage_type": "call"
},
{
"api... |
35145658447 | import pytest
from .task import in_component
class Case:
def __init__(self, name: str, n: list, vertices: list, edges: list,
answer: bool):
self._name = name
self.n = n
self.vertices = vertices
self.edges = edges
self.answer = answer
def __str__(self) -> str:
return 'task4_test_{}'.format(self._name)
TEST_CASES = [
Case(
name='base1',
n=4,
vertices=[1, 2, 3],
edges=[
(1, 2),
(2, 3),
(1, 3),
],
answer=True,
),
Case(
name='base2',
n=4,
vertices=[1, 2, 3],
edges=[
(1, 2),
(3, 4),
],
answer=False,
),
Case(
name='base3',
n=4,
vertices=[4, 2, 3, 1],
edges=[
(1, 2),
],
answer=True,
),
Case(
name='base4',
n=2,
vertices=[1],
edges=[
(1, 2),
],
answer=False,
),
]
@pytest.mark.parametrize('case', TEST_CASES, ids=str)
def test_task3(case: Case) -> None:
answer = in_component(
n=case.n,
vertices=case.vertices,
edges=case.edges,
)
assert answer == case.answer
| renesat/Base-Graph-Contest | tasks/task4/test_public.py | test_public.py | py | 1,283 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "task.in_component",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "pytest.mark.parametrize",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "pytest.mark",
"line_number": 62,
"usage_type": "attribute"
}
] |
39441351801 | import re
import spacy
from bpemb import BPEmb
from mlearn import base
from string import punctuation
from ekphrasis.classes.tokenizer import SocialTokenizer
from ekphrasis.classes.preprocessor import TextPreProcessor
class Preprocessors(object):
"""A class to contain preprocessors and wrap preprocessing functions and their requirements."""
def __init__(self, liwc_dir: str = None):
"""Initialise cleaner class."""
self.tagger = spacy.load('en_core_web_sm', disable = ['ner', 'parser'])
self.liwc_dict = None
self.slurs = None
self.slur_window = None
if liwc_dir is None:
self.liwc_path = None
else:
self.liwc_path = liwc_dir + 'liwc-2015.csv'
def select_experiment(self, exp: str, slur_window: int = None) -> base.Callable:
"""
Select experiment to run.
:exp (str): The experiment to run.
:returns experiment: Return th experiment to run.
"""
if exp == 'word':
experiment = self.word_token
elif exp == 'liwc':
experiment = self.compute_unigram_liwc
elif exp in ['ptb', 'pos']:
experiment = self.ptb_tokenize
elif exp == 'length':
experiment = self.word_length
elif exp == 'syllable':
experiment = self.syllable_count
elif exp == 'slur':
self.slur_window = slur_window
experiment = self.slur_replacement
return experiment
def word_length(self, doc: base.DocType) -> base.List[int]:
"""
Represent sentence as the length of each token.
:doc (base.DocType): Document to be processed.
:returns: Processed document.
"""
return [len(tok) for tok in doc]
def syllable_count(self, doc: base.DocType) -> base.List[int]:
"""
Represent sentence as the syllable count for each word.
:doc (base.DocType): Document to be processed.
:returns: Processed document.
"""
return [self._syllable_counter(tok) for tok in doc]
def _syllable_counter(self, tok: str) -> int:
"""
Calculate syllables for each token.
:tok (str): The token to be analyzed.
:returns count (int): The number of syllables in the word.
"""
count = 0
vowels = 'aeiouy'
exceptions = ['le', 'es', 'e']
prev_char = '<s>'
for i, char in enumerate(tok):
if i == len(tok) and (prev_char + char in exceptions or char in exceptions):
pass
if (char in vowels) and (prev_char not in vowels and char != prev_char):
count += 1
prev_char = char
return count
def load_slurs(self):
"""Load slurs file."""
self.slurs = None
# TODO Update this with a slur list
def slur_replacement(self, doc: base.DocType):
"""
Produce documents where slurs are replaced.
:doc (base.List[str]): Document to be processed.
:returns doc: processed document
"""
if self.slurs is None:
self.slurs = self.load_slurs()
slur_loc = [i for i, tok in enumerate(doc) if tok in self.slurs]
pos = [tok for tok in self.tagger(" ".join(doc))]
for ix in slur_loc: # Only look at the indices where slurs exist
min_ix = 0 if ix - self.slur_window < 0 else ix - self.slur_window
max_ix = len(doc) - 1 if ix + self.slur_window > len(doc) - 1 else ix + self.slur_window
for i in range(min_ix, max_ix, 1): # Do replacements within the window
doc[i] = pos[i]
return doc
def word_token(self, doc: base.DocType) -> base.DocType:
"""
Produce word tokens.
:doc (base.List[str]): Document to be processed.
:returns: processed document
"""
return doc
def ptb_tokenize(self, document: base.DocType, processes: base.List[str] = None) -> base.DocType:
"""
Tokenize the document using SpaCy, get PTB tags and clean it as it is processed.
:document: Document to be parsed.
:processes: The cleaning processes to engage in.
:returns toks: Document that has been passed through spacy's tagger.
"""
self.processes = processes if processes else self.processes
toks = [tok.tag_ for tok in self.tagger(" ".join(document))]
return toks
def read_liwc(self) -> dict:
"""Read LIWC dict."""
with open(self.liwc_path, 'r') as liwc_f:
liwc_dict = {}
for line in liwc_f:
k, v = line.strip('\n').split(',')
if k in liwc_dict:
liwc_dict[k] += [v]
else:
liwc_dict.update({k: [v]})
return liwc_dict
def _compute_liwc_token(self, tok: str, kleene_star: base.List[str]) -> str:
"""
Compute LIWC categories for a given token.
:tok (str): Token to identify list of.
:kleen_star: List of kleen_start tokens.
:returns (str): Token reprented in terms of LIWC categories.
"""
if tok in self.liwc_dict:
term = self.liwc_dict[tok]
else:
liwc_cands = [r for r in kleene_star if tok.startswith(r)]
num_cands = len(liwc_cands)
if num_cands == 0:
term = 'NUM' if re.findall(r'[0-9]+', tok) else 'UNK'
elif num_cands == 1:
term = liwc_cands[0] + '*'
elif num_cands > 1:
sorted_cands = sorted(liwc_cands, key=len, reverse = True) # Longest first
term = sorted_cands[0] + '*'
if term not in ['UNK', 'NUM']:
liwc_term = self.liwc_dict[term]
if isinstance(liwc_term, list):
term = "_".join(liwc_term)
else:
term = liwc_term
if isinstance(term, list):
term = "_".join(term)
return term
def compute_unigram_liwc(self, doc: base.DocType) -> base.DocType:
"""
Compute LIWC for each document document.
:doc (base.DocType): Document to operate on.
:returns liwc_doc (base.DocType): Document represented as LIWC categories.
"""
if not self.liwc_dict:
self.liwc_dict = self.read_liwc()
kleene_star = [k[:-1] for k in self.liwc_dict if k[-1] == '*']
parse_doc = []
doc = doc.split() if isinstance(doc, str) else doc
for w in doc:
if all(c in punctuation for c in w):
parse_doc.append(w)
elif any(c in punctuation for c in w):
parse_doc.append(w.strip(punctuation))
else:
parse_doc.append(w)
liwc_doc = [self._compute_liwc_token(tok, kleene_star) for tok in parse_doc]
assert(len(liwc_doc) == len(parse_doc))
return liwc_doc
# TODO Othering language:
# Parse the document to see if there are us/them, we/them/ i/you
# Consider looking at a window that are 2-5 words before/after a slur.
class Cleaner(object):
"""A class for methods for cleaning."""
def __init__(self, processes: base.List[str] = None, ekphrasis_base: bool = False):
"""
Initialise cleaner class.
:processes (base.List[str]): Cleaning operations to be taken.
:ekprhasis_base (bool, default = False): Use ekphrasis to pre-process data in cleaner.
"""
self.processes = processes if processes is not None else []
self.tagger = spacy.load('en_core_web_sm', disable = ['ner', 'parser', 'textcats'])
self.bpe = BPEmb(lang = 'en', vs = 200000).encode
self.ekphrasis_base = ekphrasis_base
self.ekphrasis = None
self.liwc_dict = None
def clean_document(self, text: base.DocType, processes: base.List[str] = None, **kwargs):
"""
Clean document.
:text (types.DocType): The document to be cleaned.
:processes (List[str]): The cleaning processes to be undertaken.
:returns cleaned: Return the cleaned text.
"""
if processes is None:
processes = self.processes
cleaned = str(text)
if 'lower' in processes:
cleaned = cleaned.lower()
if 'url' in processes:
cleaned = re.sub(r'https?:/\/\S+', 'URL', cleaned)
if 'hashtag' in processes:
cleaned = re.sub(r'#[a-zA-Z0-9]*\b', 'HASHTAG', cleaned)
if 'username' in processes:
cleaned = re.sub(r'@\S+', 'USER', cleaned)
return cleaned
def tokenize(self, document: base.DocType, processes: base.List[str] = None, **kwargs):
"""
Tokenize the document using SpaCy and clean it as it is processed.
:document: Document to be parsed.
:processes: The cleaning processes to engage in.
:returns toks: Document that has been passed through spacy's tagger.
"""
toks = [tok.text for tok in self.tagger(self.clean_document(document, processes = processes, **kwargs))]
return toks
def bpe_tokenize(self, document: base.DocType, processes: base.List[str] = None, **kwargs):
"""
Tokenize the document using BPE and clean it as it is processed.
:document: Document to be parsed.
:processes: The cleaning processes to engage in.
:returns toks: Document that has been passed through spacy's tagger.
"""
toks = self.bpe(self.clean_document(document, processes = processes, **kwargs))
return toks
def _load_ekphrasis(self, annotate: set, filters: base.List[str] = None, normalize: base.List[str] = None,
segmenter: str = 'twitter', corrector: str = 'twitter', hashtags: bool = False,
contractions: bool = True, elong_spell: bool = False,
**kwargs) -> None:
"""
Set up ekphrasis tokenizer.
:annotate (set): Set of annotations to use (controls corrections).
:filters (base.List[str], default = None): List of tokens to remove from documents.
:normalize (base.List[str], default = None): List of normalisations.
:segmenter (str, default = 'twitter'): Choose which ekphrasis segmenter to use.
:corrector (str, default = 'twitter'): Choose which ekphrasis spell correction to use.
:hashtags (bool, default = False): Unpack hashtags into multiple tokens (e.g. #PhDLife -> PhD Life).
:contractions (bool, default = True): Unpack contractions into multiple tokens (e.g. can't -> can not)
:elong_spell (bool, default = True): Spell correct elongations.
"""
self.ekphrasis = TextPreProcessor(normalize = normalize if normalize is not None else [],
annotate = annotate,
fix_html = True,
segmenter = segmenter,
corrector = corrector,
unpack_hashtags = hashtags,
unpack_contractions = contractions,
spell_correct_elong = elong_spell,
tokenizer = SocialTokenizer(lowercase = True).tokenize)
self.filters = filters
def _filter_ekphrasis(self, document: base.DocType, **kwargs) -> base.List[str]:
"""
Remove Ekphrasis specific tokens.
:document (base.DocType): The document to process.
:returns document: Document filtered for ekphrasis specific tokens.
"""
if isinstance(document, list):
document = " ".join(document)
if self.filters is not None:
for filtr in self.filters:
document = document.replace(filtr, '')
document = document.split(" ")
return document
def ekphrasis_tokenize(self, document: base.DocType, processes: base.List[str] = None, **kwargs
) -> base.DocType:
"""
Tokenize the document using BPE and clean it as it is processed.
:document: Document to be parsed.
:processes: The cleaning processes to engage in.
:returns toks: Document that has been passed through spacy's tagger.
"""
if isinstance(document, list):
document = " ".join(document)
document = self.clean_document(document, processes, **kwargs)
document = self.ekphrasis.pre_process_doc(document)
return self._filter_ekphrasis(document, **kwargs)
| zeeraktalat/mlearn | mlearn/data/clean.py | clean.py | py | 12,772 | python | en | code | 2 | github-code | 6 | [
{
"api_name": "spacy.load",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "mlearn.base.Callable",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "mlearn.base",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "mlearn.base.DocType"... |
22075010385 | from flask import Flask, request, jsonify
from flask_cors import CORS
import sqlite3
import base64
app = Flask(__name__)
CORS(app)
@app.route('/')
def index():
return 'Index Page'
@app.route('/deleteUser', methods=['DELETE'])
def delete_user():
print("Petición DELETE")
try:
data = request.get_json()
username = data['username']
conn = sqlite3.connect('../Back-end/Database/MABG.db')
cursor = conn.cursor()
# Verifica si el usuario existe antes de eliminarlo
cursor.execute("SELECT * FROM Users WHERE username=?", (username,))
user = cursor.fetchone()
if user:
cursor.execute("DELETE FROM Users WHERE username=?", (username,))
conn.commit() # Guarda los cambios en la base de datos
conn.close()
return jsonify({"status": "Elemento eliminado"})
else:
conn.close()
return jsonify({"error": "Usuario no encontrado"}), 404
except sqlite3.Error as e:
return jsonify({'error': str(e)}), 500
except Exception as e:
return jsonify({'error': str(e)}), 500
@app.route('/users')
def users():
print("peticion de users")
conn = sqlite3.connect('../Back-end/Database/MABG.db')
cursor = conn.cursor()
try:
cursor.execute("SELECT username, role_id, picture FROM Users")
users = cursor.fetchall()
roles = ["Administrador" if user[1] <=
2 else "Usuario" for user in users]
user_info = []
for user, role in zip(users, roles):
username, _, picture_path = user
if user[2] is None:
picture_path = "/home/eliezercode/Documents/VSC/Proyecto MABG/Back-end/pictures/user/user.png"
with open(picture_path, "rb") as image_file:
picture_base64 = base64.b64encode(
image_file.read()).decode("utf-8")
user_info.append({
"username": username,
"role": role,
"picture": picture_base64
})
return jsonify({"data": user_info})
except sqlite3.Error as e:
return jsonify({'error': str(e)}), 500
except Exception as e:
return jsonify({'error': str(e)}), 500
finally:
conn.close()
@app.route('/login', methods=['POST'])
def login():
print("peticion de login")
conn = sqlite3.connect('../Back-end/Database/MABG.db')
cursor = conn.cursor()
try:
print("peticion de login")
data = request.get_json()
username = data['usuario']
password = data['password']
cursor.execute("SELECT * FROM Users WHERE username=?", (username,))
user = cursor.fetchone()
if not user:
return jsonify({'mensaje': 'No existe ese usuario'}), 404
rol = "Administrador" if user[3] <= 2 else "Usuario"
print("imagen:",user[2])
if user[2] is None:
image_binary = "/home/eliezercode/Documents/VSC/Proyecto MABG/Back-end/pictures/user/user.png"
with open(image_binary, "rb") as image_file:
image_binary = base64.b64encode(
image_file.read()).decode("utf-8")
else:
with open(user[2], "rb") as image_file:
image_binary = base64.b64encode(
image_file.read()).decode("utf-8")
user_data = {'name': user[1], 'pictureUrl': image_binary,
'role': rol, 'username': user[5]}
if user[4] == password:
return jsonify({'user_data': user_data, 'mensaje': 'Inicio de sesion correctamente'}, 200)
return jsonify({'mensaje': 'Inicio de sesion fallido'}), 401
except sqlite3.Error as e:
print("sqlite: ", e)
return jsonify({'error': str(e)}), 500
except Exception as e:
print("exeption: ", e)
return jsonify({'error': str(e)}), 500
finally:
conn.close()
@app.route('/addUsers', methods=['POST'])
def addUsers():
print("peticion de addUsers")
conn = sqlite3.connect('../Back-end/Database/MABG.db')
cursor = conn.cursor()
try:
data = request.get_json()
name = data['usuario']
username = data['username']
password = data['password']
rol = data['rol']
print("data: ", name, username, password, rol)
cursor.execute("INSERT INTO Users(name, role_id, password, username) VALUES (?, ?, ?, ?)",
(name, rol, password, username))
conn.commit()
conn.close()
print("Finished correctly")
return jsonify({'mensaje': 'Registro de usuario correcto'}, 200)
except sqlite3.Error as e:
print("error", e)
return jsonify({'error': str(e)}), 500
except Exception as e:
return jsonify({'error': str(e)}), 500
finally:
conn.close()
if __name__ == '__main__':
CORS(app)
app.config['CORS_HEADERS'] = 'Content-Type'
# cors = CORS(app, resources={r"/login": {"origins": "http://localhost:5173"}})
app.run(debug=True)
| DevEliezerMartinez/PosMABG | Back-end/server.py | server.py | py | 5,235 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "flask.Flask",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "flask_cors.CORS",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "flask.request.get_json",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "flask.request",
... |
25893346960 | import pygame
from pygame.draw import *
from random import randint
pygame.init()
FPS = 60 #число новых кругов в секунду
number_of_balls=4 #число обычных шаров
points=0 #счетчик очков
base_points_multiplier=100 #базовый множитель начисления очков
x_res,y_res=1920/1.25, 1080/1.25 #разрешение
res=[x_res,y_res]
sp_mult=0.01 #множитель скорости
screen = pygame.display.set_mode((x_res,y_res))
'''создаем массив цветов шарика в формате pygame.color'''
RED = (255, 0, 0)
BLUE = (0, 0, 255)
YELLOW = (255, 255, 0)
GREEN = (0, 255, 0)
MAGENTA = (255, 0, 255)
CYAN = (0, 255, 255)
BLACK = (0, 0, 0)
COLORS = [RED, BLUE, YELLOW, GREEN, MAGENTA, CYAN]
class ball:
'''хранит координаты шаров, пересчитывает их при переходе на следующий кадр, pos=[x,y] и spd=[Vx,Vy], где V - изменение координаты за кадр'''
def __init__(self):
self.r=randint(y_res//100,y_res//8) #радиус
self.pos=[randint(self.r, int(x_res-self.r)),randint(self.r, int(y_res-self.r))] #координата
self.spd=[randint(-int(x_res*sp_mult), int(x_res*sp_mult)),randint(-int(y_res*sp_mult), int(y_res*sp_mult))] # скррости
self.color=COLORS[randint(0,len(COLORS)-1)] # цвет
def new_frame(self): #рисуем новый кадр
pygame.draw.circle(screen, self.color,self.pos,self.r)
for i in range(2):
self.pos[i]+=self.spd[i]
if(self.pos[i]<self.r or self.pos[i]>res[i]-self.r):
self.spd[i]=-self.spd[i]
self.pos[i]+=self.spd[i]
def click_check(event, balls,add):
'''увеличивает счетчик попаданий и выводит его в случае попадания точки в круг, возвращает счетчик
на вход принимает event щелчка, массив кругов, множитель начисления быллов, начисляемые баллы обартно пропорциональны радиусу окружности'''
global points
for i in range(len(balls)):
x=balls[i].pos[0]
y=balls[i].pos[1]
if(((event.pos[0]-x)**2+(event.pos[1]-y)**2)<balls[i].r**2):
points += add/balls[i].r
def create_balls(n):
'''создает шары, n - количество'''
balls=[]
for i in range(n):
balls.append(ball())
return balls
def draw_balls(balls):
'''рисует шары из массива'''
for i in range(len(balls)):
balls[i].new_frame()
def gravitation(balls,a):
'''меняет скорость шаров из переданного массива, а - ускорение'''
for i in range(len(balls)):
balls[i].spd[1]+=a
'''создаем шары разных видов'''
balls=create_balls(number_of_balls)
gravity_balls=create_balls(int(number_of_balls/2))
pygame.display.update()
clock = pygame.time.Clock()
finished = False
while not finished:
clock.tick(FPS)
for event in pygame.event.get():
if event.type == pygame.QUIT:
finished = True
elif event.type == pygame.MOUSEBUTTONDOWN:
click_check(event,balls,base_points_multiplier) #проверяем попадание по шару
click_check(event,gravity_balls,2*base_points_multiplier)
print(points)
draw_balls(balls) #рисуем шары
gravitation(gravity_balls,2) #применяем гравитацию
draw_balls(gravity_balls)
pygame.display.update()
screen.fill(BLACK)
pygame.quit() | furs-aka-beast/mipt_inf | 1_sem/Lab8/balls.py | balls.py | py | 3,786 | python | ru | code | 0 | github-code | 6 | [
{
"api_name": "pygame.init",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "pygame.display.set_mode",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "random.randint... |
43627131674 | import heapq
import collections
class Solution:
def assignBikes(self, workers, bikes):
dist_map = collections.defaultdict(list)
m, n = len(workers), len(bikes)
for i in range(m):
for j in range(n):
w = workers[i]
b = bikes[j]
dist = abs(w[0]-b[0]) + abs(w[1]-b[1])
heap = dist_map[dist]
heapq.heappush(heap, (i, j))
dist_map[dist] = heap
assigned_workers = set()
assigned_bikes = set()
res = [0]*m
distances = sorted(list(dist_map.keys()))
for d in distances:
heap = dist_map[d]
while heap:
pair = heapq.heappop(heap)
if pair[0] not in assigned_workers and pair[1] not in assigned_bikes:
res[pair[0]] = pair[1]
assigned_workers.add(pair[0])
assigned_bikes.add(pair[1])
return res
| MichaelTQ/LeetcodePythonProject | solutions/leetcode_1051_1100/LeetCode1057_CampusBikes.py | LeetCode1057_CampusBikes.py | py | 986 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "collections.defaultdict",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "heapq.heappush",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "heapq.heappop",
"line_number": 27,
"usage_type": "call"
}
] |
3480622108 | from __future__ import division
import torch
import torch.nn as nn
import torch.nn.functional as F
from .quant_func import fix_quant as quant
### ==============================================================================###
### quant for different data types ###
### ==============================================================================###
act_quant = lambda x : quant(x, 3, 4, "act")
weight_quant = lambda x : quant(x, 2, 5, "weight")
bias_quant = lambda x : quant(x, 7, 8, "weight")
### ===============================================================================###
### Quantization Modules ###
### ===============================================================================###
class QReLu(nn.ReLU):
__constants__ = ['inplace']
def __init__(self, inplace=False):
super(nn.ReLU, self).__init__()
self.inplace = inplace
def forward(self, input):
out = F.relu(input, inplace=self.inplace)
out = act_quant(out)
return out
def extra_repr(self):
inplace_str = 'inplace=True' if self.inplace else ''
return inplace_str
class QLinear(nn.Linear):
def __init__(self, in_features, out_features, bias=True):
super(QLinear, self).__init__(in_features, out_features, bias=bias)
self.ia_quant = lambda x : quant(x, 5, 2, "act")
self.weight_quant = lambda x : quant(x, 3, 4, "weight")
self.bias_quant = lambda x : quant(x, 5, 2, "weight")
self.oa_quant = lambda x : quant(x, 5, 2, "act")
def forward(self, input):
input = self.ia_quant(input)
weight = self.weight_quant(self.weight)
if self.bias is not None :
bias = self.bias_quant(self.bias)
else :
bias = None
output = F.linear(input, weight, None)
output = self.oa_quant(output) # post bias
if self.bias is not None :
output = output + bias
output = self.oa_quant(output)
# output = F.linear(input, self.weight, self.bias)
return output
class QAveragePool2d(nn.AvgPool2d):
__constants__ = ['kernel_size', 'stride', 'padding', 'ceil_mode', 'count_include_pad', 'divisor_override']
def __init__(self, kernel_size, stride=None, padding=0, ceil_mode=False,
count_include_pad=True, divisor_override=None):
super(QAveragePool2d, self).__init__(kernel_size, stride=None, padding=0, ceil_mode=False,
count_include_pad=True, divisor_override=None)
def forward(self, input):
input = act_quant(input)
out = F.avg_pool2d(input, self.kernel_size, self.stride,
self.padding, self.ceil_mode, self.count_include_pad, self.divisor_override)
out = act_quant(out)
return out
class QConv2d(nn.Conv2d):
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=False,
):
super(QConv2d, self).__init__(in_channels, out_channels, kernel_size, stride=stride,
padding=padding, dilation=dilation, groups=groups, bias=bias)
def forward(self, input):
weight = weight_quant(self.weight)
if self.bias is not None:
bias = bias_quant(self.bias)
output = F.conv2d(input, weight, None, self.stride,
self.padding, self.dilation, self.groups)
output = act_quant(output)
if self.bias is not None :
output = output + bias.view(1, -1, 1, 1)
output = act_quant(output)
return output
class _QConvBnNd(nn.modules.conv._ConvNd):
_version = 2
def __init__(self,
# ConvNd args
in_channels, out_channels, kernel_size, stride,
padding, dilation, transposed, output_padding,
groups,
bias,
padding_mode,
# BatchNormNd args
# num_features: out_channels
eps=1e-05, momentum=0.1,
# affine: True
# track_running_stats: True
# Args for this module
freeze_bn=True,
):
nn.modules.conv._ConvNd.__init__(self, in_channels, out_channels, kernel_size,
stride, padding, dilation, transposed,
output_padding, groups, False, padding_mode)
self.freeze_bn = freeze_bn if self.training else True
# if self.training :
norm_layer = nn.BatchNorm2d
# else :
# norm_layer = IdentityBN
self.bn = norm_layer(out_channels, eps, momentum, True, True)
if bias:
self.bias = Parameter(torch.Tensor(out_channels))
else:
self.register_parameter('bias', None)
self.reset_bn_parameters()
# this needs to be called after reset_bn_parameters,
# as they modify the same state
if self.training:
if freeze_bn:
self.freeze_bn_stats()
else:
self.update_bn_stats()
else:
self.freeze_bn_stats()
def reset_running_stats(self):
self.bn.reset_running_stats()
def reset_bn_parameters(self):
self.bn.reset_running_stats()
nn.init.uniform_(self.bn.weight)
nn.init.zeros_(self.bn.bias)
# note: below is actully for conv, not BN
if self.bias is not None:
fan_in, _ = nn.init._calculate_fan_in_and_fan_out(self.weight)
bound = 1 / math.sqrt(fan_in)
nn.init.uniform_(self.bias, -bound, bound)
def get_params(self):
running_std = torch.sqrt(self.bn.running_var + self.bn.eps)
scale_factor = self.bn.weight / running_std
scaled_weight = self.weight_fake_quant(self.weight * scale_factor.reshape([-1, 1, 1, 1]))
# scaled bias :
if self.bias is not None :
scaled_bias = scale_factor * (self.bias - self.bn.running_mean) + self.bn.bias
else :
scaled_bias = - scale_factor * self.bn.running_mean + self.bn.bias
scaled_bias_q = self.bias_fake_quant(scaled_bias)
return scaled_weight, scaled_bias_q
def reset_parameters(self):
super(_QConvBnNd, self).reset_parameters()
def update_bn_stats(self):
self.freeze_bn = False
self.bn.training = True
return self
def freeze_bn_stats(self):
self.freeze_bn = True
self.bn.training = False
return self
def _conv_forward(self, input, weight):
if self.padding_mode != 'zeros':
return F.conv2d(F.pad(input, self._reversed_padding_repeated_twice, mode=self.padding_mode),
weight, self.bias, self.stride,
_pair(0), self.dilation, self.groups)
return F.conv2d(input, weight, self.bias, self.stride,
self.padding, self.dilation, self.groups)
def _forward(self, input):
input = act_quant(input)
running_std = torch.sqrt(self.bn.running_var + self.bn.eps)
scale_factor = self.bn.weight / running_std
scaled_weight = weight_quant(self.weight * scale_factor.reshape([-1, 1, 1, 1]))
# scaled bias :
# with torch.no_grad():
if self.bias is not None :
scaled_bias = scale_factor *(self.bias - self.bn.running_mean) + self.bn.bias
else :
scaled_bias = - scale_factor * self.bn.running_mean + self.bn.bias
scaled_bias_q = bias_quant(scaled_bias)
# this does not include the conv bias
conv = self._conv_forward(input, scaled_weight)
conv = act_quant(conv)
conv_bias = conv + scaled_bias_q.reshape([1, -1, 1, 1])
conv_bias = act_quant(conv_bias)
if self.training :
conv_bias_orig = conv_bias - scaled_bias.reshape([1, -1, 1, 1])
conv_orig = conv_bias_orig / scale_factor.reshape([1, -1, 1, 1])
conv_orig = conv / scale_factor.reshape([1, -1, 1, 1])
if self.bias is not None:
conv_orig = conv_orig + self.bias.reshape([1, -1, 1, 1])
conv = self.bn(conv_orig)
return conv
else :
return conv_bias
def extra_repr(self):
# TODO(jerryzh): extend
return super(_QConvBnNd, self).extra_repr()
def forward(self, input):
return act_quant(self._forward(input))
def train(self, mode=True):
"""
Batchnorm's training behavior is using the self.training flag. Prevent
changing it if BN is frozen. This makes sure that calling `model.train()`
on a model with a frozen BN will behave properly.
"""
self.training = mode
if not self.freeze_bn:
for module in self.children():
module.train(mode)
return self
# ===== Serialization version history =====
#
# Version 1/None
# self
# |--- weight : Tensor
# |--- bias : Tensor
# |--- gamma : Tensor
# |--- beta : Tensor
# |--- running_mean : Tensor
# |--- running_var : Tensor
# |--- num_batches_tracked : Tensor
#
# Version 2
# self
# |--- weight : Tensor
# |--- bias : Tensor
# |--- bn : Module
# |--- weight : Tensor (moved from v1.self.gamma)
# |--- bias : Tensor (moved from v1.self.beta)
# |--- running_mean : Tensor (moved from v1.self.running_mean)
# |--- running_var : Tensor (moved from v1.self.running_var)
# |--- num_batches_tracked : Tensor (moved from v1.self.num_batches_tracked)
def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs):
version = local_metadata.get('version', None)
if version is None or version == 1:
# BN related parameters and buffers were moved into the BN module for v2
v2_to_v1_names = {
'bn.weight': 'gamma',
'bn.bias': 'beta',
'bn.running_mean': 'running_mean',
'bn.running_var': 'running_var',
'bn.num_batches_tracked': 'num_batches_tracked',
}
for v2_name, v1_name in v2_to_v1_names.items():
if prefix + v1_name in state_dict:
state_dict[prefix + v2_name] = state_dict[prefix + v1_name]
state_dict.pop(prefix + v1_name)
elif strict:
missing_keys.append(prefix + v2_name)
super(_QConvBnNd, self)._load_from_state_dict(
state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs)
class QConvBn2d(_QConvBnNd, nn.Conv2d):
r"""
A ConvBn2d module is a module fused from Conv2d and BatchNorm2d,
attached with FakeQuantize modules for both output activation and weight,
used in quantization aware training.
We combined the interface of :class:`torch.nn.Conv2d` and
:class:`torch.nn.BatchNorm2d`.
Implementation details: https://arxiv.org/pdf/1806.08342.pdf section 3.2.2
Similar to :class:`torch.nn.Conv2d`, with FakeQuantize modules initialized
to default.
Attributes:
freeze_bn:
activation_post_process: fake quant module for output activation
weight_fake_quant: fake quant module for weight
"""
def __init__(self,
# ConvNd args
in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1,
bias=None,
padding_mode='zeros',
# BatchNorm2d args
# num_features: out_channels
eps=1e-05, momentum=0.1,
# affine: True
# track_running_stats: True
# Args for this module
freeze_bn=False):
kernel_size = _pair(kernel_size)
stride = _pair(stride)
padding = _pair(padding)
dilation = _pair(dilation)
_QConvBnNd.__init__(self, in_channels, out_channels, kernel_size, stride,
padding, dilation, False, _pair(0), groups, bias, padding_mode,
eps, momentum, freeze_bn)
class QConvBnReLU2d(QConvBn2d):
r"""
A ConvBnReLU2d module is a module fused from Conv2d, BatchNorm2d and ReLU,
attached with FakeQuantize modules for both output activation and weight,
used in quantization aware training.
We combined the interface of :class:`torch.nn.Conv2d` and
:class:`torch.nn.BatchNorm2d` and :class:`torch.nn.ReLU`.
Implementation details: https://arxiv.org/pdf/1806.08342.pdf
Similar to `torch.nn.Conv2d`, with FakeQuantize modules initialized to
default.
Attributes:
observer: fake quant module for output activation, it's called observer
to align with post training flow
weight_fake_quant: fake quant module for weight
"""
def __init__(self,
# Conv2d args
in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1,
bias=None,
padding_mode='zeros',
# BatchNorm2d args
# num_features: out_channels
eps=1e-05, momentum=0.1,
# affine: True
# track_running_stats: True
# Args for this module
freeze_bn=False):
super(QConvBnReLU2d, self).__init__(in_channels, out_channels, kernel_size, stride,
padding, dilation, groups, bias,
padding_mode, eps, momentum,
freeze_bn)
self.relu = nn.ReLU()
def forward(self, input):
return act_quant(self.relu(QConvBn2d._forward(self, input)))
| jmluu/ICAIS_ML.Pytorch | Quantization/modules/qlayers.py | qlayers.py | py | 14,252 | python | en | code | 3 | github-code | 6 | [
{
"api_name": "quant_func.fix_quant",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "quant_func.fix_quant",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "quant_func.fix_quant",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "torch.... |
18483249694 | import pygame
import random
import sys
from pygame.locals import *
from config import (
FPS,
MODIFIER,
WIDTH,
HEIGHT,
LINETHICKNESS,
PADDLESIZE,
PADDLEOFFSET,
BLACK,
GREY,
ORIGIN_X,
ORIGIN_Y,
DIFFICULTY,
MAX_SCORE
)
def drawArena():
DISPLAYSURF.fill((0, 0, 0))
# draw the outline of arena
pygame.draw.rect(DISPLAYSURF, GREY, ((0 ,0), (WIDTH, HEIGHT)), LINETHICKNESS*2)
# draw the middle line
pygame.draw.line(DISPLAYSURF, GREY, (int(WIDTH/2), 0), (int(WIDTH/2), HEIGHT), int(LINETHICKNESS/4))
def drawPaddle(paddle):
# checks boundaries
if paddle.bottom > HEIGHT - LINETHICKNESS:
paddle.bottom = HEIGHT - LINETHICKNESS
elif paddle.top < LINETHICKNESS:
paddle.top = LINETHICKNESS
# draws the paddle
pygame.draw.rect(DISPLAYSURF, GREY, paddle)
def drawBall(ball):
pygame.draw.rect(DISPLAYSURF, GREY, ball)
# moves the ball, returns new position
def moveBall(ball, ballDirX, ballDirY):
ball.x += (ballDirX * MODIFIER)
ball.y += (ballDirY * MODIFIER)
return ball
# checks for a collision with a wall, and 'bounces' off it.
def checkEdgeCollision(ball, ballDirX, ballDirY):
if ball.top == (LINETHICKNESS) or ball.bottom == (HEIGHT - LINETHICKNESS):
ballDirY = ballDirY * -1
if ball.left == (LINETHICKNESS) or ball.right == (WIDTH - LINETHICKNESS):
ballDirX = ballDirX * -1
return ballDirX, ballDirY
# checks if the ball has hit a paddle, and 'bounces' off it.
def checkPaddleCollision(ball, paddle1, paddle2, ballDirX):
if ballDirX == -1 and paddle1.right == ball.left and paddle1.top < ball.top and paddle1.bottom > ball.bottom:
return -1
elif ballDirX == 1 and paddle2.left == ball.right and paddle2.top < ball.top and paddle2.bottom > ball.bottom:
return -1
else:
return 1
# computer "ai"
def computerMove(ball, ballDirX, paddle2):
# if the ball is moving away from the paddle, center
if ballDirX == -1:
if paddle2.centery < (HEIGHT/2):
paddle2.y += MODIFIER - random.choice(DIFFICULTY)
elif paddle2.centery > (HEIGHT/2):
paddle2.y -= MODIFIER - random.choice(DIFFICULTY)
# if the ball moving towards the paddle, track its movement.
elif ballDirX == 1:
if paddle2.centery < ball.centery:
paddle2.y += MODIFIER - random.choice(DIFFICULTY)
else:
paddle2.y -= MODIFIER - random.choice(DIFFICULTY)
return paddle2
# checks to see if a point has been scored, returns new score
def checkScore(ball, p1_score, p2_score):
hit = False
# reset points if left wall is hit
if ball.left == LINETHICKNESS:
p2_score += 1
hit = True
# awards 1 point to the player if the right wall is hit
elif ball.right == WIDTH - LINETHICKNESS:
p1_score += 1
hit = True
# if no points scored, return score unchanged
return p1_score, p2_score, hit
# displays the current score on the screen
def displayScore(p1_score, p2_score):
# player
resultP1Surf = BASICFONT.render('Player %s' %(p1_score), True, GREY)
resultP1Rect = resultP1Surf.get_rect()
resultP1Rect.topright = (100, 25)
DISPLAYSURF.blit(resultP1Surf, resultP1Rect)
# computer
resultP2Surf = BASICFONT.render('Computer %s' %(p2_score), True, GREY)
resultP2Rect = resultP2Surf.get_rect()
resultP2Rect.topleft = (WIDTH - 150, 25)
DISPLAYSURF.blit(resultP2Surf, resultP2Rect)
# displays the end of the game
def gameOver():
finalSurf = BASICFONT.render('GAME OVER', True, GREY)
finalSurfRect = finalSurf.get_rect()
finalSurfRect.topright = (WIDTH/2 + 59, HEIGHT/2 - 50)
DISPLAYSURF.blit(finalSurf, finalSurfRect)
# main function
def main():
pygame.init()
global DISPLAYSURF
FPSCLOCK = pygame.time.Clock()
DISPLAYSURF = pygame.display.set_mode((WIDTH, HEIGHT))
pygame.display.set_caption('Pongame')
# font information
global BASICFONT, BASICFONTSIZE
BASICFONTSIZE = 20
BASICFONT = pygame.font.Font('freesansbold.ttf', BASICFONTSIZE)
# initiate variables and set starting positions
# for any future changes made within rectangles
ballX = ORIGIN_X
ballY = ORIGIN_Y
playerOnePosition = playerTwoPosition = int((HEIGHT - PADDLESIZE) /2)
p1_score = p2_score = 0
game_over = False
# keeps track of the ball's direction
ballDirX = -1 # -1 = left 1 = right
ballDirY = -1 # -1 = up 1 = down
# creates Rectangles for ball and paddles
paddle1 = pygame.Rect(PADDLEOFFSET, playerOnePosition, LINETHICKNESS, PADDLESIZE)
paddle2 = pygame.Rect(WIDTH - PADDLEOFFSET - LINETHICKNESS, playerTwoPosition, LINETHICKNESS, PADDLESIZE)
ball = pygame.Rect(ballX, ballY, LINETHICKNESS, LINETHICKNESS)
# draws the starting position of the Arena
drawArena()
drawPaddle(paddle1)
drawPaddle(paddle2)
drawBall(ball)
pygame.mouse.set_visible(0)
while True:
# main game loop
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
# mouse movement commands
elif event.type == pygame.MOUSEMOTION and not game_over:
mousex, mousey = event.pos
paddle1.y = mousey
if not game_over:
drawArena()
drawPaddle(paddle1)
drawPaddle(paddle2)
drawBall(ball)
ball = moveBall(ball, ballDirX, ballDirY)
ballDirX, ballDirY = checkEdgeCollision(ball, ballDirX, ballDirY)
ballDirX = ballDirX * checkPaddleCollision(ball, paddle1, paddle2, ballDirX)
p1_score, p2_score, hit = checkScore(ball, p1_score, p2_score)
paddle2 = computerMove (ball, ballDirX, paddle2)
displayScore(p1_score, p2_score)
game_over = p1_score + p2_score == MAX_SCORE
if hit:
ball.x = ballX = ORIGIN_X
ball.y = ballY = ORIGIN_Y
hit = False
pygame.time.wait(1000)
else:
gameOver()
pygame.display.update()
FPSCLOCK.tick(FPS)
if __name__=='__main__':
main() | samuele-mattiuzzo/pongame | pongame.py | pongame.py | py | 6,281 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "pygame.draw.rect",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "config.GREY",
"line_number": 26,
"usage_type": "argument"
},
{
"api_name": "pygame.draw",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "config.WIDTH",
... |
38255085940 | from django.shortcuts import reverse
from django.views.generic import TemplateView
from django.utils import timezone
from hknweb.utils import (
method_login_and_permission,
get_semester_bounds,
)
from hknweb.events.constants import ATTR
from hknweb.events.models import Event, EventType
from hknweb.events.utils import format_url
from hknweb.utils import get_access_level
@method_login_and_permission("events.add_rsvp")
class AllRsvpsView(TemplateView):
"""List of rsvp'd and not rsvp'd events."""
template_name = "events/all_rsvps.html"
def get_context_data(self):
# Get the start and end time for event filtering
start_time, end_time = get_semester_bounds(timezone.now())
if self.request.GET.get("option") == "upcoming":
start_time = timezone.now()
# Get the current event type
event_types = EventType.objects.order_by("type").all()
event_types = sorted(event_types, key=lambda e: not (e.type == ATTR.MANDATORY))
event_type = self.request.GET.get("event_type", event_types[0].type)
event_type = EventType.objects.filter(type=event_type).first()
# Get all events
all_events = Event.objects.filter(
start_time__gte=start_time,
start_time__lte=end_time,
access_level__gte=get_access_level(self.request.user),
event_type=event_type,
).order_by("start_time")
rsvpd_data, not_rsvpd_data = [], []
for event in all_events:
if event.rsvp_set.filter(user=self.request.user):
data, url = rsvpd_data, "events:unrsvp"
waitlisted = event.on_waitlist(self.request.user)
else:
data, url = not_rsvpd_data, "events:rsvp"
waitlisted = False
data.append(
{
"event": event,
"action": reverse(url, args=[event.id]),
"location": format_url(event.location),
"waitlisted": waitlisted,
}
)
data = [
{
ATTR.CLASS: "right-half",
ATTR.TITLE: "RSVP'd / Waitlist",
ATTR.EVENTS: rsvpd_data,
ATTR.DISPLAY_VALUE: "un-RSVP",
},
{
ATTR.CLASS: "left-half",
ATTR.TITLE: "Not RSVP'd",
ATTR.EVENTS: not_rsvpd_data,
ATTR.DISPLAY_VALUE: "RSVP",
},
]
context = {
"data": data,
"event_types": event_types,
"event_type": event_type,
}
return context
| Gabe-Mitnick/hknweb | hknweb/events/views/aggregate_displays/tabular.py | tabular.py | py | 2,679 | python | en | code | null | github-code | 6 | [
{
"api_name": "django.views.generic.TemplateView",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "hknweb.utils.get_semester_bounds",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "django.utils.timezone.now",
"line_number": 23,
"usage_type": "call"
... |
22837988470 | import pandas as pd
import networkx as nx
import pickle
import ast
def save_obj(obj, name ):
with open(name + '.pkl', 'wb') as f:
pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)
def load_obj(name ):
with open(name + '.pkl', 'rb') as f:
return pickle.load(f)
df1 = pd.read_csv('../reading_and_cleaning/guest_host_cleaned_podcasts.csv', sep='\t', index_col=0)
split_hosts = pd.read_csv('../reading_and_cleaning/split_hosts.csv', sep='\t', index_col=0)
guest_durations = pd.read_csv('../reading_and_cleaning/guest_durations.csv', sep='\t', index_col=0)
G2 = nx.from_pandas_dataframe(guest_durations, 'guests', 'hosts', edge_attr=['duration'], create_using=nx.Graph())
podcast_info = pd.read_csv('../reading_and_cleaning/meta_podcast_info.csv', sep='\t', index_col=0)
host_list = []
for index1, row1 in podcast_info.iterrows():
hosts = ast.literal_eval(row1['Hosts'])
for host in hosts:
host_list.append(host)
host_list = set(host_list)
top_host_podcast = {}
top_guest_podcast = {}
host_podcasts = {}
guest_podcasts = {}
for node in G2.nodes():
if node in host_list:
#print(node)
df = split_hosts[split_hosts['hosts']==node]
host_durations = df.groupby(['podcast'])['duration'].sum()
host_durations = host_durations.reset_index()
host_durations = host_durations.sort_values(by='duration', ascending=False)
#print(host_durations['podcast'])
#top_podcast = host_durations['podcast'][0]
top_host_podcast[node] = host_durations['podcast'][0]
host_podcasts[node] = host_durations['podcast'].values
#print(host_durations['podcast'].values)
# for index, row in podcast_info.iterrows():
# if(row['Podcast Name']==top_podcast):
# top_cat = ast.literal_eval(row['categories'])[0]
# top_category[node] = top_cat
#print(node, top_cat)
df = df1[df1['guests']==node]
guest_durations = df.groupby(['podcast'])['duration'].sum()
guest_durations = guest_durations.reset_index()
guest_durations = guest_durations.sort_values(by='duration', ascending=False)
#top_podcast = guest_durations['podcast'][0]
if(len(guest_durations)==0):
continue
top_guest_podcast[node] = guest_durations['podcast'].iloc[0]
guest_podcasts[node] = guest_durations['podcast'].values
# for index, row in podcast_info.iterrows():
# if(row['Podcast Name']==top_podcast):
# top_cat = ast.literal_eval(row['categories'])[0]
# top_category[node] = top_cat
save_obj(top_host_podcast, 'top_host_podcast')
save_obj(top_guest_podcast, 'top_guest_podcast')
save_obj(host_podcasts, 'host_podcasts')
save_obj(guest_podcasts, 'guest_podcasts')
| brooksjaredc/podcast_network_analysis | analyzing_functions/set_top_podcast.py | set_top_podcast.py | py | 2,778 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "pickle.dump",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "pickle.HIGHEST_PROTOCOL",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "pickle.load",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
... |
4311447480 | from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.layers import Conv2D, MaxPooling2D
from keras.layers import Activation, Dropout, Flatten, Dense
from keras import backend as K
import numpy as np
###################################
#### Constants ###################
##################################
def run(X, Y, Xtest = None):
_,img_width, img_height,_ = X.shape
_, classes = Y.shape
#validation_data_dir = 'data/validation'
#nb_train_samples = 2000
#nb_validation_samples = 800
epochs = 10
batch_size = 16
#if K.image_data_format() == 'channels_first':
# input_shape = (3, img_width, img_height)
#else:
input_shape = (img_width, img_height, 3)
model = Sequential()
model.add(Conv2D(32, (3, 3), input_shape=input_shape))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(32, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(64, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(64))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(classes))
model.add(Activation('sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
# this is the augmentation configuration we will use for training
train_datagen = ImageDataGenerator()
# this is the augmentation configuration we will use for testing:
# only rescaling
model.fit(X, Y, epochs=10, verbose=1, validation_split=0.2, shuffle=True)
#model.fit(X,Y,epochs=25)
#a = model.predict(X)
#exp_scores = np.exp(a)
#probs = exp_scores / np.sum(exp_scores, axis=1, keepdims=True)
#ypred = np.argmax(probs, axis = 1)
#Y = np.argmax(Y, axis = 1)
#from sklearn.metrics import confusion_matrix, accuracy_score
#acc = accuracy_score(Y, ypred)
#print acc
#xval = X[:int(0.2 * len(X))]
#yval = model.predict(xval)
#ytrue = Y[:int(0.2 * len(X))]
return model.predict(Xtest)
| psrikanthm/satellite-image-classification | src/simple_arch.py | simple_arch.py | py | 2,151 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "keras.models.Sequential",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "keras.layers.Conv2D",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "keras.layers.Activation",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "k... |
38238085136 | import csv
from PIL import Image
import numpy as np
import os
X = []
index = 0
for img in os.listdir("base224/"):
if img[-3:] == "jpg":
image = Image.open("base224/" + img)
img2 = image.transpose(Image.FLIP_LEFT_RIGHT)
img2.save("base224flip/" + img, "JPEG", quality=224, optimize=True, progressive=True)
index += 1
if index % 500 == 0:
print(index) | PKUGoodSpeed/FashionAiContest | Kedan/flip_images.py | flip_images.py | py | 395 | python | en | code | 3 | github-code | 6 | [
{
"api_name": "os.listdir",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "PIL.Image.open",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "PIL.Image.FLIP_LEFT_RIGHT",
"l... |
72013269309 | import numpy as np
from collections import deque
from segment_tree import SumSegmentTree,MinSegmentTree
class ReplayBuff(object):
def __init__(self,max_size,observation_shape):
self.max_size=max_size
self.observations=np.zeros([max_size,observation_shape],dtype=np.float32)
self.actions=np.zeros([max_size],dtype=np.int)
self.rewards=np.zeros([max_size],dtype=np.float32)
self.next_observations=np.zeros([max_size,observation_shape],dtype=np.float32)
self.terminals=np.zeros(max_size,dtype=np.float32)
self.size=0
self.ptr=0
def append(self,obs,action,reward,next_obs,terminal):
self.observations[self.ptr]=obs
self.actions[self.ptr]=action
self.rewards[self.ptr]=reward
self.next_observations[self.ptr]=next_obs
self.terminals[self.ptr]=terminal
self.ptr=(self.ptr+1)%self.max_size
self.size=min(self.size+1,self.max_size)
def sample(self,batch_size):
if batch_size > self.size:
batch_idxs=np.arange(self.size)
else:
batch_idxs=np.random.choice(self.size, size=batch_size,replace=False)
return dict(obs=self.observations[batch_idxs],
action=self.actions[batch_idxs],
reward=self.rewards[batch_idxs],
next_obs=self.next_observations[batch_idxs],
done=self.terminals[batch_idxs])
def __len__(self):
return self.size
class PrioritizedReplayBuff(ReplayBuff):
def __init__(self,max_size,observation_shape,alpha=0.6):
assert alpha>=0
super(PrioritizedReplayBuff,self).__init__(max_size,observation_shape)
self.max_priority=1.0
self.tree_ptr=0
self.alpha=alpha
tree_capacity=1
while tree_capacity < self.max_size:
tree_capacity*=2
self.sum_tree=SumSegmentTree(tree_capacity)
self.min_tree=MinSegmentTree(tree_capacity)
def append(self,obs,action,reward,next_obs,terminal):
super(PrioritizedReplayBuff,self).append(obs,action,reward,next_obs,terminal)
self.sum_tree[self.tree_ptr]=self.max_priority ** self.alpha
self.min_tree[self.tree_ptr]=self.max_priority ** self.alpha
self.tree_ptr=(self.tree_ptr+1)%self.max_size
def sample(self,batch_size,beta=0.4):
assert beta>0
if batch_size>self.size:
batch_size=self.size
batch_idxs=self._sample_proportional(batch_size)
weights=np.array([self._calculate_weight(i,beta) for i in batch_idxs],dtype=np.float32)
return dict(obs=self.observations[batch_idxs],
action=self.actions[batch_idxs],
reward=self.rewards[batch_idxs],
next_obs=self.next_observations[batch_idxs],
done=self.terminals[batch_idxs],
weights=weights,
indices=batch_idxs)
def update_priorities(self,idxs,priorities):
assert len(idxs) == len(priorities)
for idx,priority in zip(idxs,priorities):
assert priority>0
assert 0<=idx<len(self)
self.sum_tree[idx]=priority**self.alpha
self.min_tree[idx]=priority**self.alpha
self.max_priority=max(self.max_priority,priority)
def _sample_proportional(self,batch_size):
batch_idxs=[]
p_total=float(self.sum_tree.sum(0,len(self)-1))
segment=p_total/batch_size
for i in range(batch_size):
upperbound=np.random.uniform(segment*i,segment*(i+1))
batch_idxs.append(self.sum_tree.retrieve(upperbound))
return batch_idxs
def _calculate_weight(self,idx,beta):
p_min=float(self.min_tree.min())/self.sum_tree.sum()
max_weight=(p_min*len(self))**(-beta)
p_sample=self.sum_tree[idx]/float(self.sum_tree.sum())
weight=(p_sample*len(self))**(-beta)
weight=weight/max_weight
return weight
class multistepReplayBuff(object):
def __init__(self,max_size,observation_shape,n_step,gamma):
self.max_size=max_size
self.observations=np.zeros([max_size,observation_shape],dtype=np.float32)
self.actions=np.zeros([max_size],dtype=np.int)
self.rewards=np.zeros([max_size],dtype=np.float32)
self.next_observations=np.zeros([max_size,observation_shape],dtype=np.float32)
self.terminals=np.zeros(max_size,dtype=np.float32)
self.size=0
self.ptr=0
# for multi-step dqn
self.multi_step_buffer = deque(maxlen=n_step)
self.n_step=n_step
self.gamma=gamma
def append(self,obs,action,reward,next_obs,done):
transtion = (obs,action,reward,next_obs,done)
self.multi_step_buffer.append(transtion)
if len(self.multi_step_buffer) >= self.n_step:
reward,next_obs,done = self._get_n_step_info()
obs,action = self.multi_step_buffer[0][:2]
self.observations[self.ptr]=obs
self.actions[self.ptr]=action
self.rewards[self.ptr]=reward
self.next_observations[self.ptr]=next_obs
self.terminals[self.ptr]=done
self.ptr=(self.ptr+1)%self.max_size
self.size=min(self.size+1,self.max_size)
def sample(self,batch_size):
if batch_size > self.size:
batch_idxs=np.arange(self.size)
else:
batch_idxs=np.random.choice(self.size, size=batch_size,replace=False)
return dict(obs=self.observations[batch_idxs],
action=self.actions[batch_idxs],
reward=self.rewards[batch_idxs],
next_obs=self.next_observations[batch_idxs],
done=self.terminals[batch_idxs])
def sample_from_indexs(self,batch_idxs):
return dict(obs=self.observations[batch_idxs],
action=self.actions[batch_idxs],
reward=self.rewards[batch_idxs],
next_obs=self.next_observations[batch_idxs],
done=self.terminals[batch_idxs])
def _get_n_step_info(self):
for index in range(self.n_step):
if self.multi_step_buffer[index][-1]:
break
reward, next_obs, done = self.multi_step_buffer[index][-3:]
if index:
for transition in reversed(list(self.multi_step_buffer)[:index]):
r = transition[2]
reward = r + self.gamma * reward
return reward, next_obs, done
def __len__(self):
return self.size
if __name__=='__main__':
rb=ReplayBuff(512,6)
for i in range(50):
rb.append(np.random.randn(6),np.random.randn(),3.7,np.random.randn(6),3.3)
#print("sample test\n sample return type:"+str(type(rb.sample(1))))
#print(rb.sample(128))
| linnaeushuang/RL-pytorch | value-based/rainbow/memory.py | memory.py | py | 6,851 | python | en | code | 8 | github-code | 6 | [
{
"api_name": "numpy.zeros",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "numpy.float32",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "numpy.zeros",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "numpy.int",
"line_number... |
74288480507 | from django.urls import include, path
from rest_framework.routers import DefaultRouter
from seeds.api.views import AudioClipViewSet, BlobViewSet, SuiteViewSet, UserViewSet
from seeds.views import index, register
router = DefaultRouter()
router.register(r"users", UserViewSet, basename="User")
router.register(r"suites", SuiteViewSet, basename="Suite")
router.register(r"blobs", BlobViewSet, basename="Blob")
router.register(r"audioclips", AudioClipViewSet, basename="AudioClip")
urlpatterns = [
path("", index, name="index"),
path("register", register, name="register"),
path("accounts/", include("django.contrib.auth.urls")),
# api
path("api/v1/", include(router.urls)),
]
| jacobshandling/soundseeker | backend/seeds/urls.py | urls.py | py | 697 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "rest_framework.routers.DefaultRouter",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "seeds.api.views.UserViewSet",
"line_number": 8,
"usage_type": "argument"
},
{
"api_name": "seeds.api.views.SuiteViewSet",
"line_number": 9,
"usage_type": "argume... |
37430808138 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
name: TRS wcm 6.x版本infoview信息泄露
referer: http://www.wooyun.org/bugs/wooyun-2012-012957
author: Lucifer
description: 文件infoview.do中导致信息泄露。
'''
import sys
import requests
class trs_wcm_infoview_disclosure_BaseVerify:
def __init__(self, url):
self.url = url
def run(self):
headers = {
"User-Agent":"Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_8; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50"
}
payload = "/wcm/infoview.do?serviceid=wcm6_user&MethodName=getOnlineUsers"
vulnurl = self.url + payload
try:
req = requests.get(vulnurl, headers=headers, timeout=10, verify=False)
if r"<USERNAME>" in req.text and r"<Users>" in req.text:
return "[+]存在TRS wcm 6.x版本infoview信息泄露漏洞...(中危)\tpayload: "+vulnurl
except:
return "[-]connect timeout"
if __name__ == "__main__":
testVuln = trs_wcm_infoview_disclosure_BaseVerify(sys.argv[1])
testVuln.run() | iceyhexman/onlinetools | scanner/plugins/cms/trs/trs_wcm_infoview_disclosure.py | trs_wcm_infoview_disclosure.py | py | 1,115 | python | en | code | 1,626 | github-code | 6 | [
{
"api_name": "requests.get",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 34,
"usage_type": "attribute"
}
] |
29564634745 | #!/usr/bin/python
# -*- encoding: utf-8 -*-
from api.thanos_http import xtthanos_user_http, request_data
from api.http_api import ResultBase
from common.logger import logger
from common.get_signature import generate_auth_info
def adjust_leverage(leverage,positionSide,symbol):
'''调整杠杆倍数'''
result = ResultBase()
adjust_leverage = request_data.get('adjust_leverage')
params = {
'leverage' : leverage, # 杠杆倍数
'positionSide': positionSide, # 持仓方向:LONG;SHORT
'symbol' : symbol # 交易对
}
path = adjust_leverage.get('route') + adjust_leverage.get('path')
method = adjust_leverage.get('method')
headers = generate_auth_info(path=path, method=method,params=params,bodymod='x-www-form-urlencoded')
res = xtthanos_user_http.adjust_leverage(headers=headers, params=params)
result.status_code = res.status_code
result.response = res.json()
logger.info(f"调整杠杆倍数 ==>> 返回结果 ==>> {res.text}")
return result
| shiqilouyang/thanos_test | operation/contract/client/position/adjust_leverage.py | adjust_leverage.py | py | 1,084 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "api.http_api.ResultBase",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "api.thanos_http.request_data.get",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "api.thanos_http.request_data",
"line_number": 12,
"usage_type": "name"
},
{
... |
41978223591 | from pyspark.sql.types import StructType, StructField, StringType, DateType, FloatType
from pyspark.sql import SparkSession
from datetime import datetime
from task_4 import get_min_or_max_by_ppu
import pytest
# create a spark session
spark = SparkSession.builder.appName("task_0").getOrCreate()
# create a test dataframe
schema = StructType([
StructField('ndc11', StringType()),
StructField('invoice_date', DateType()),
StructField('invoice_cost', FloatType()),
StructField('invoice_quan', FloatType()),
StructField('bu_.customer_name', StringType()),
])
data = [(1, datetime(2020, 1, 15), 40.0, 10.0, 'John'),
(1, datetime(2020, 1, 7), 50.0, 10.0, 'Ann'),
(1, datetime(2020, 1, 22), 40.0, 2.0, 'Ann'),
(1, datetime(2020, 2, 15), 20.0, 10.0, 'John'),
(1, datetime(2020, 2, 7), 50.0, 10.0, 'Ann'),
(1, datetime(2020, 2, 21), 40.0, 20.0, 'Mathew'),
(2, datetime(2020, 2, 22), 50.0, 10.0, 'Carter'),
(2, datetime(2020, 2, 22), 40.0, 8.0, 'Ann')
]
test_trx_df = spark.createDataFrame(data, schema=schema)
@pytest.mark.parametrize('rows, sort, expected_cost, expected_names',
[(1, 'min', [40.0, 20.0, 40.0], ['Ann', 'John', 'John']),
(1, 'max', [40.0, 50.0, 40.0], ['Ann', 'Ann', 'Ann'])])
def test_get_min_or_max_by_ppu(rows, sort, expected_cost, expected_names):
result_df = get_min_or_max_by_ppu(test_trx_df, rows, sort)
actual_result = result_df.collect()
actual_invoice_cost = [row.invoice_cost for row in actual_result]
actual_names = [row['bu_.customer_name'] for row in actual_result]
assert actual_invoice_cost == expected_cost
assert actual_names == expected_names
| rkrvchnk/pyspark_tasks | tests/test_task_4.py | test_task_4.py | py | 1,784 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "pyspark.sql.SparkSession.builder.appName",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.SparkSession.builder",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "pyspark.sql.SparkSession",
"line_number": 10,
"usage_type"... |
12449590446 | from distutils.errors import LibError
from tabnanny import verbose
from time import timezone
from django.db import models
from django.forms import CharField
from django.contrib.auth.models import User
from datetime import datetime,date, time
from django.utils import timezone
# Create your models here.
class TipoVehiculo(models.Model):
cod_vehiculo = models.CharField(max_length=100, null=False,verbose_name="Código vehículo")
tipo_vehiculo = models.CharField(max_length=100, null=False,verbose_name="Tipo vehículo")
creado_el = models.DateTimeField(auto_now_add=True, verbose_name='Creado el')
class Meta:
verbose_name = 'Tipo de vehículo'
verbose_name_plural = 'Tipos de vehículos'
ordering = ['-creado_el']
def __str__(self):
return self.tipo_vehiculo
class TipoPoliza(models.Model):
cod_poliza = models.CharField(max_length=100, null=False,verbose_name="Código póliza")
tipo_poliza = models.CharField(max_length=100, null=False,verbose_name="Tipo póliza")
creado_el = models.DateTimeField(auto_now_add=True, verbose_name='Creado el')
class Meta:
verbose_name = 'Tipo de póliza'
verbose_name_plural = 'Tipos de póliza'
ordering = ['-creado_el']
def __str__(self):
return self.tipo_poliza
class TipoSiniestro(models.Model):
cod_siniestro= models.CharField(max_length=100, null=False,verbose_name="Código siniestro")
tipo_siniestro = models.CharField(max_length=100, null=False,verbose_name="Tipo siniestro")
creado_el = models.DateTimeField(auto_now_add=True, verbose_name='Creado el')
class Meta:
verbose_name = 'Tipo Siniestro'
verbose_name_plural = 'Tipos de siniestro'
ordering = ['-creado_el']
def __str__(self):
return self.tipo_siniestro
class Marca(models.Model):
cod_marca= models.CharField(max_length=100, null=False,verbose_name="Código marca")
marca = models.CharField(max_length=100, null=False,verbose_name="Marca")
creado_el = models.DateTimeField(auto_now_add=True, verbose_name='Creado el')
class Meta:
verbose_name = 'Marca'
verbose_name_plural = 'Marcas'
ordering = ['marca']
def __str__(self):
return self.marca
class ModeloVehiculo(models.Model):
marca_modelo = models.OneToOneField(Marca,null=False,on_delete= models.CASCADE)
#tipo_modelo = models.OneToOneField(TipoVehiculo,null=False,on_delete= models.CASCADE)
cod_modelo= models.CharField(max_length=100, null=False,verbose_name="Código modelo")
modelo = models.CharField(max_length=100, null=False,verbose_name="Modelo vehículo")
creado_el = models.DateTimeField(auto_now_add=True, verbose_name='Creado el')
class Meta:
verbose_name = 'Modelo vehículo'
verbose_name_plural = 'Modelos de vehículo'
ordering = ['modelo']
def __str__(self):
return self.modelo
class TablaSiniestros(models.Model):
tipo_de_siniestro = models.OneToOneField(TipoSiniestro,null=False,on_delete=models.CASCADE,default="")
tipo_de_poliza = models.OneToOneField(TipoPoliza,null=False,on_delete=models.CASCADE,default="")
nombre_marca = models.OneToOneField(Marca,null=False,on_delete=models.CASCADE,default="")
nombre_modelo = models.OneToOneField(ModeloVehiculo,null=False,on_delete=models.CASCADE,default="")
tipo_de_vehiculo = models.OneToOneField(TipoVehiculo,null=False,on_delete=models.CASCADE,default="")
nombre_conductor = models.CharField(max_length=100, null=False, verbose_name="Nombre conductor(a)")
apellido_conductor = models.CharField(max_length=100, null=False, verbose_name="Apellido conductor(a)")
edad_conductor = models.IntegerField(null=False, verbose_name="Edad conductor(a)",default="18")
rut_conductor = models.CharField(max_length=100, null=False, verbose_name="Rut conductor(a)")
fecha_siniestro = models.DateField(null=False, verbose_name="Fecha siniestro")
fecha_registro = models.DateTimeField(auto_now_add=True, verbose_name='Fecha registro')
descripcion_siniestro = models.TextField(max_length=300,null=False,verbose_name="Descripción siniestro")
class Meta:
verbose_name = 'Siniestro'
verbose_name_plural = 'BD Siniestros'
ordering = ['rut_conductor']
def __str__(self):
return self.rut_conductor
INGRESADO= 'Ingresado'
APROBADO = 'Aprobado'
EN_REPARACION = 'En reparación'
EN_ENTREGA = 'En entrega'
CERRADA = 'Cerrada'
INCIDENCIA = 'Incidencia'
ESTADO_CHOICES = (
(INGRESADO,INGRESADO),
(APROBADO,APROBADO),
(EN_REPARACION,EN_REPARACION),
(EN_ENTREGA,EN_ENTREGA),
(CERRADA,CERRADA),
(INCIDENCIA,INCIDENCIA),
)
class TablaDeSiniestros(models.Model):
usuario = models.ForeignKey(
User,
on_delete=models.CASCADE,
null=True,
blank=True)
tipo_de_siniestro = models.ForeignKey(TipoSiniestro,blank=False,on_delete=models.CASCADE,verbose_name="Tipo de siniestro",default="")
tipo_de_modelo = models.ForeignKey(ModeloVehiculo,blank=False,on_delete=models.CASCADE, verbose_name="Modelo vehículo",default="")
nombre_marca = models.ForeignKey(Marca,blank=False,on_delete=models.CASCADE,verbose_name="Marca vehículo",default="")
tipo_de_vehiculo = models.ForeignKey(TipoVehiculo,blank=False,on_delete=models.CASCADE,verbose_name="Tipo de vehículo",default="")
nombre_conductor = models.CharField(max_length=100, blank=False, verbose_name="Nombre conductor(a)",default="")
apellido_conductor = models.CharField(max_length=100, blank=False, verbose_name="Apellido conductor(a)",default="")
edad_conductor = models.IntegerField(blank=False, verbose_name="Edad conductor(a)",default=18)
rut_conductor = models.CharField(max_length=100, blank=False, verbose_name="Rut conductor(a)",default="")
tipo_de_poliza = models.ForeignKey(TipoPoliza,blank=False,on_delete=models.CASCADE,verbose_name="Póliza contratada",default="")
fecha_siniestro = models.DateField(auto_now_add=False,auto_now=False, null=False, blank=False, verbose_name="Fecha siniestro",default=timezone.now)
descripcion_siniestro = models.TextField(max_length=300,blank=False, null=False,verbose_name="Descripción siniestro",default="")
fecha_registro = models.DateTimeField(auto_now_add=True, verbose_name='Fecha registro')
updated_at = models.DateField(auto_now=True,verbose_name="Última actualización")
estado_siniestro = models.CharField(max_length=50,choices=ESTADO_CHOICES,default=INGRESADO)
class Meta:
verbose_name = 'Ingreso de siniestro'
verbose_name_plural = 'Ingreso de siniestros'
#ordering = ['tipo_de_siniestro']
def __str__(self):
datos = f'{self.nombre_conductor} {self.apellido_conductor} / Rut {self.rut_conductor}'
return datos
ADMIN = 'Administrador'
LIQUIDADOR = 'Liquidador'
CLIENTE = 'Cliente'
USUARIO_CHOICES = (
(ADMIN,ADMIN),
(LIQUIDADOR,LIQUIDADOR),
(CLIENTE,CLIENTE),
)
class DataBaseUsuarios(models.Model):
tipo_usuario = models.CharField(max_length=50,choices=USUARIO_CHOICES)
nombre_usuario = models.CharField(max_length=50,blank=False,default='')
apellido_usuario = models.CharField(max_length=50,blank=False,default='')
cargo_usuario = models.CharField(max_length=50,blank=False,default='')
mail_usuario = models.EmailField(max_length=100, default='@liquidaya.com')
class Meta:
ordering = ['tipo_usuario','apellido_usuario']
def __str__(self):
nombre_completo = f'{self.apellido_usuario}, {self.nombre_usuario} / {self.tipo_usuario} / {self.cargo_usuario}'
return nombre_completo
class FormularioContacto(models.Model):
nombre_contacto = models.CharField(max_length=100,verbose_name="Nombre")
apellido_contacto = models.CharField(max_length=100,verbose_name="Apellido")
email_contacto = models.EmailField(max_length=100,verbose_name="Email")
texto_contacto = models.TextField(max_length=400,verbose_name="Mensaje")
def __str__(self):
return self.email_contacto | xpilasi/segundo_liquidadora | web_base/models.py | models.py | py | 8,166 | python | es | code | 0 | github-code | 6 | [
{
"api_name": "django.db.models.Model",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "django.db.models.CharField",
"line_number": 16,
"usage_type": "call"
},
{
"api_name"... |
14536874544 | # Callbacks
from dash import Input, Output, State
from dash.exceptions import PreventUpdate
def sidebar_callbacks(app, df):
@app.callback(
[
Output("brand_dropdwn", 'options'),
Output("brand_dropdwn", 'value')
],
[
State("date_picker", "start_date"),
State("date_picker", "end_date"),
],
[
Input("company_dropdwn", 'value')
]
)
def get_brand_options(start_date, end_date, company):
if company is None or start_date is None or end_date is None:
raise PreventUpdate
else:
get_brand_options.df2 = df[df['NombreDeProductor'] == company]
brand_options = [{'label': b, 'value': b} for b in get_brand_options.df2.marca.unique()]
return brand_options, None
@app.callback(
[Output("city_dropdwn", 'options'),
Output("city_dropdwn", 'value')],
[Input("brand_dropdwn", 'value')])
def get_city_list(marca):
if marca is None:
raise PreventUpdate
else:
get_city_list.df3 = get_brand_options.df2[get_brand_options.df2['marca'] == marca]
city_options = [{'label': c, 'value': c} for c in get_city_list.df3.Municipio.unique()]
return city_options, None
@app.callback(
[Output("zone_dropdwn", 'options'),
Output("zone_dropdwn", 'value')],
[Input("city_dropdwn", 'value')])
def get_zone_list(city):
if city is None:
raise PreventUpdate
else:
get_zone_list.df4 = get_city_list.df3[get_city_list.df3['Municipio'] == city]
zone_options = [{'label': z, 'value': z} for z in get_zone_list.df4.Zona.unique()]
return zone_options, None
| jmalcovich21/ds4a_tiendareg | callbks/sidebar_calls.py | sidebar_calls.py | py | 1,794 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "dash.exceptions.PreventUpdate",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "dash.Output",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "dash.Output",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "dash.State",
... |
5584993015 | import asyncio
import httpx
from .._utils import chunk_file, format_locations
from .._exceptions import UploadException, LocationRetrieveException
class ConcatenateUploader:
default_concatenate_headers = {
"Tus-Resumable": "1.0.0",
"Upload-Concat": "partial",
}
default_chunk_size = 4 * 1024 * 1024
def __init__(self, client):
self.client = client
def get_creation_concatenate_headers(self, upload_length):
headers = {
"Upload-Length": str(upload_length),
**self.default_concatenate_headers,
}
return headers
def get_upload_concatenate_headers(self, content_length):
headers = {
"Upload-Offset": "0",
"Content-Length": str(content_length),
"Content-Type": "application/offset+octet-stream",
**self.default_concatenate_headers,
}
return headers
def get_concatenate_headers(self, *location_urls):
_location_urls = iter(location_urls)
return {
"Upload-Concat": format_locations(*location_urls),
}
async def get_location(self, upload_url, headers=None):
response: httpx.Response = await self.client.post(
upload_url, headers=headers or {}
)
if not response.is_success:
raise LocationRetrieveException(response.text)
return response.headers["location"]
async def upload_chunk(self, chunk, upload_url):
_chunk_len = len(chunk)
creation_headers = self.get_creation_concatenate_headers(_chunk_len)
location = await self.get_location(upload_url, headers=creation_headers)
concatenate_headers = self.get_upload_concatenate_headers(_chunk_len)
response = await self.client.patch(
location, data=chunk, headers=concatenate_headers
)
if not response.is_success:
raise UploadException(response.text)
return location, response
async def upload_chunks(self, fp, upload_url, chunk_size=None):
chunk_size = chunk_size or self.default_chunk_size
tasks = [
self.upload_chunk(
chunk,
upload_url,
)
for chunk in chunk_file(fp, chunk_size=chunk_size)
]
results = await asyncio.gather(*tasks)
summary = dict(results)
failures = [res for res in summary.values() if not res.is_success]
if failures:
raise UploadException()
return summary
async def perform_concatenate(self, upload_url, *locations):
headers = {
**self.default_concatenate_headers,
**self.get_concatenate_headers(*locations),
}
location = await self.get_location(upload_url, headers=headers)
return location
async def upload(self, fp, upload_url, chunk_size=None):
self.client.timeout.write = None
summary = await self.upload_chunks(fp, upload_url, chunk_size=chunk_size)
locations = summary.keys()
location = await self.perform_concatenate(upload_url, *locations)
return location
| LesPrimus/aiotusx | aiotusx/_uploaders/concatenate.py | concatenate.py | py | 3,146 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "_utils.format_locations",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "httpx.Response",
"line_number": 42,
"usage_type": "attribute"
},
{
"api_name": "_exceptions.LocationRetrieveException",
"line_number": 46,
"usage_type": "call"
},
{
... |
1068937513 | import numpy as np
import gymnasium as gym
from pendulum_model import PendulumModel
import cvxpy as cp
dt3g2l = 3 * 10 / (2 * 1) * 0.05
dt = 0.05
dt3ml2 = 3 * 0.05 / (1 * 1 * 1)
class CVX_SQP:
def __init__(self):
self.N = 30
self.theta_cost_weight = 1
self.theta_dot_cost_weight = 0.1
self.u_cost_weight = 0.001
self.theta = np.zeros(self.N)
self.theta_dot = np.zeros(self.N)
self.u = np.zeros(self.N)
self.lambda_vec = np.zeros(self.N*8+2)
self.delta_theta = cp.Variable(self.N)
self.delta_theta_dot = cp.Variable(self.N)
self.delta_u = cp.Variable(self.N)
self.slack_var_1 = cp.Variable(self.N)
self.slack_var_2 = cp.Variable(self.N)
self.target_value=[]
def set_init_traj(self, model_log):
self.theta = model_log['theta']
self.theta_dot = model_log['theta_dot']
self.u = model_log['u']
def solve_once(self):
cost = 0
constr = []
constr += [self.delta_theta[0] == 0]
constr += [self.delta_theta_dot[0] == 0]
for i in range(0, self.N - 1):
cost += self.theta_cost_weight * cp.square(self.delta_theta[i]) + \
self.theta_dot_cost_weight * cp.square(self.delta_theta_dot[i]) + \
self.u_cost_weight * cp.square(self.delta_u[i]) + \
0.5 * self.lambda_vec[2+8*i] * cp.square(self.delta_theta[i]) * (-dt3g2l * np.sin(self.theta[i])) + \
self.theta_cost_weight * self.theta[i] * self.delta_theta[i] + \
self.theta_dot_cost_weight * self.theta_dot[i] * self.delta_theta_dot[i] + \
self.u_cost_weight * self.u[i] * self.delta_u[i]
# 0.1*cp.square(self.slack_var_1[i])+0.1*cp.square(self.slack_var_2[i])
constr += [dt3g2l * np.cos(self.theta[i]) * self.delta_theta[i] +
self.delta_theta_dot[i] + dt3ml2 * self.delta_u[i] - self.delta_theta_dot[i + 1]
== -(
-self.theta_dot[i + 1] + self.theta_dot[i] + dt3g2l * np.sin(self.theta[i]) +
dt3ml2 * self.u[i]
),
self.theta[i + 1] + self.delta_theta[i + 1] == self.theta[i] + self.delta_theta[i] + dt * (
self.theta_dot[i] + self.delta_theta_dot[i]),
self.theta_dot[i] + self.delta_theta_dot[i] <= 8,
self.theta_dot[i] + self.delta_theta_dot[i] >= -8,
self.u[i] + self.delta_u[i] <= 2,
self.u[i] + self.delta_u[i] >= -2,
self.delta_u[i] <= 0.1,
self.delta_u[i] >= -0.1,
]
problem = cp.Problem(cp.Minimize(cost), constr)
problem.solve()
print("status:", problem.status)
print("optimal value", problem.value)
print("optimal var: delta_theta", self.delta_theta.value)
print("optimal var: delta_theta_dot", self.delta_theta_dot.value)
print("optimal var: delta_u", self.delta_u.value)
self.target_value.append(problem.value)
for i in range(len(problem.constraints)):
self.lambda_vec[i] = problem.constraints[i].dual_value
def solve(self):
for i in range(30):
self.solve_once()
self.theta += self.delta_theta.value
self.theta_dot += self.delta_theta_dot.value
self.u += self.delta_u.value
print(self.target_value)
def make_env(name):
gym_env = gym.make(name, render_mode="human")
return gym_env
def main():
env = make_env("Pendulum-v1")
observation, info = env.reset(seed=1)
print(observation)
model = PendulumModel()
model.reset(observation)
print(model.state)
model_log = {'theta': [], 'theta_dot': [], 'u': []}
for i in range(30):
model_log['theta'].append(model.state[0])
model_log['theta_dot'].append(model.state[1])
action = np.random.uniform(-2, 2, 1)
# action=np.array([0])
model.step(action)
model_log['u'].append(action)
model_log['theta'] = np.hstack(model_log['theta'])
model_log['theta_dot'] = np.hstack(model_log['theta_dot'])
model_log['u'] = np.hstack(model_log['u'])
cvx_sqp = CVX_SQP()
cvx_sqp.set_init_traj(model_log)
cvx_sqp.solve()
control = cvx_sqp.u
for i in range(200):
observation, reward, terminated , truncated , info = env.step(control[i].reshape(1,))
print(observation, reward, control[i])
if __name__ == "__main__":
main()
| CarlDegio/SQP_Pendulum | cvx_main.py | cvx_main.py | py | 4,654 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "numpy.zeros",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": ... |
35813238442 | import typing
from typing import Optional
from sqlalchemy.ext.asyncio import AsyncEngine, AsyncSession, create_async_engine, async_sessionmaker
from sqlalchemy.orm import declarative_base
from backend.services.database import db
if typing.TYPE_CHECKING:
pass
class Database:
def __init__(self, url: str):
self._url = url
self._engine: Optional[AsyncEngine] = None
self._db: Optional[declarative_base] = None
self.session: Optional[AsyncSession] = None
async def connect(self, *_: list, **__: dict) -> None:
self._db = db
self._engine = create_async_engine(url=self._url, echo=True)
self.session = async_sessionmaker(bind=self._engine, class_=AsyncSession, expire_on_commit=False)
async def disconnect(self, *_: list, **__: dict) -> None:
if self._engine:
await self._engine.dispose()
| jendox/tg_bot | backend/services/database/database/base.py | base.py | py | 883 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "typing.TYPE_CHECKING",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "typing.Optional",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.ext.asyncio.AsyncEngine",
"line_number": 16,
"usage_type": "name"
},
{
"api_n... |
38890746457 | import dialogflow_v2 as dialogflow
import os
path_key = "C:\wilasinee_pj\pj\python\ggthaluangbot-a6aed6caf27a.json"
os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = path_key
#*****************************************************************
project_id = "thaluangbot-lhrv"
session_id = "82d12b78-40b4-4028-a8f7-3a6a479cb2f7"
language_code = "Thai-th"
#****************************************************************
from flask import Flask ,request,abort
from linebot import(
LineBotApi , WebhookHandler
)
from linebot.exceptions import(
InvalidSignatureError
)
from linebot.models import *
import json
#**************************************
text = input("let's text : ")
app = Flask(__name__)
def detect_intent_texts(project_id, session_id, texts, language_code):
session_client = dialogflow.SessionsClient()
session = session_client.session_path(project_id, session_id)
print('Session path: {}\n'.format(session))
#for text in texts:
text = texts
text_input = dialogflow.types.TextInput(text=text, language_code=language_code)
query_input = dialogflow.types.QueryInput(text=text_input)
response = session_client.detect_intent(session=session, query_input=query_input)
return response.query_result.fulfillment_text
text_re = detect_intent_texts(project_id,session_id,text,language_code)
print(text_re)
| wilasineePE/chatbot | index.py | index.py | py | 1,363 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "os.environ",
"line_number": 4,
"usage_type": "attribute"
},
{
"api_name": "flask.Flask",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "dialogflow_v2.SessionsClient",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "dialogflow_v2... |
24354798885 | import os
import torch
import gym
import gym_donkeycar
import time
from env.vae_env import VaeEnv
from vae.vae import VAE
from stable_baselines.sac.policies import MlpPolicy
from stable_baselines import SAC
VARIANTS_SIZE = 32
DONKEY_SIM_PATH = f"/Applications/donkey_sim.app/Contents/MacOS/sdsim"
SIM_HOST="127.0.0.1"
DONKEY_SIM_PORT=9091
image_channels = 3
if __name__ == '__main__':
#model_path = 'vae-gt-80-160-10k-beta25-150.torch'#for 6
#model_path = 'vae-gt-80-160-18k-beta25-50-loss.torch'
model_path = 'vae-gt-30k-50.torch'
torch_device = 'cpu'
vae = VAE(image_channels=image_channels, z_dim=VARIANTS_SIZE)
vae.load_state_dict(torch.load(model_path, map_location=torch.device(torch_device)))
vae.to(torch.device(torch_device))
vae.eval()
env = gym.make('donkey-generated-track-v0', exe_path=DONKEY_SIM_PATH, host=SIM_HOST, port=DONKEY_SIM_PORT)
env.viewer.set_car_config("donkey", (128, 128, 128), "masato-ka", 20)
vae_env = VaeEnv(env, vae, device=torch_device)
model = SAC.load('donkey8')
obs = vae_env.reset()
dones=False
for step in range(10000): # 500ステップ実行
if step % 10 == 0: print("step: ", step)
#if dones:
# o = env.reset()
# break
action, _states = model.predict(obs)
obs, rewards, dones, info = vae_env.step(action)
# env.render()
env.close() | masato-ka/sac-car-racing | run_donkey.py | run_donkey.py | py | 1,406 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "vae.vae",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "vae.vae.VAE",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "vae.vae.load_state_dict",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "vae.vae",
"line_numbe... |
12569604796 | from rest_framework import permissions
from rest_framework.permissions import BasePermission
# this class will findout if the user has permission to delete or update the post , to check if logged in user and post owner is same or not
class IsOwnerOrReadOnly(BasePermission):
message = "you must be the owner of this post "
def has_object_permission(self, request, view, obj):
my_safe_method = ['PUT']
print(request.user.is_staff)
print(request.user.is_superuser)
if request.method in my_safe_method:
return True
return request.user.is_superuser or obj.author == request.user
| Maniabhishek/ContentManagementSystem | appcms/api/permissions.py | permissions.py | py | 639 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "rest_framework.permissions.BasePermission",
"line_number": 7,
"usage_type": "name"
}
] |
11735637618 | """Add md5 and sha256 columns to File
Revision ID: d128b94f9a63
Revises: 59d249ebf873
Create Date: 2021-10-24 14:54:30.381535
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'd128b94f9a63'
down_revision = '59d249ebf873'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('files', sa.Column('md5', sa.LargeBinary(length=16), nullable=True))
op.add_column('files', sa.Column('sha256', sa.LargeBinary(length=32), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('files', 'sha256')
op.drop_column('files', 'md5')
# ### end Alembic commands ###
| retroherna/rhinventory | alembic/versions/d128b94f9a63_add_md5_and_sha256_columns_to_file.py | d128b94f9a63_add_md5_and_sha256_columns_to_file.py | py | 806 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "alembic.op.add_column",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "alembic.op",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.LargeBi... |
70865858108 | # -*- coding: utf-8 -*-
"""
Created on Wed Dec 18 21:54:40 2019
@author: dingxu
"""
from astropy.io import fits
import numpy as np
import matplotlib.pyplot as plt
from photutils import DAOStarFinder
from astropy.stats import sigma_clipped_stats
from photutils import CircularAperture
import cv2
#import scipy.signal as signal
import os
import math
fitsname1 = 'E:\\BOOTES4\\20181118\\03088\\'+'20181118130518-952-RA.fits'
fitsname2 = 'E:\\BOOTES4\\20181118\\03088\\'+'20181118130621-081-RA.fits'
onehdu = fits.open(fitsname1)
imgdata1 = onehdu[0].data #hdu[0].header
copydata1 = np.copy(imgdata1)
imgdata1 = np.float32(copydata1)
oneimgdata = imgdata1
#oneimgdata = signal.medfilt2d(imgdata1, kernel_size=5) # 二维中值滤波
hang1,lie1 = oneimgdata.shape
twohdu = fits.open(fitsname2)
imgdata2 = twohdu[0].data #hdu[0].header
copydata2 = np.copy(imgdata2)
imgdata2 = np.float32(copydata2)
twoimgdata = imgdata2
#twoimgdata = signal.medfilt2d(imgdata2, kernel_size=5) # 二维中值滤波
hang2,lie2 = twoimgdata.shape
def adjustimage(imagedata, coffe):
mean = np.mean(imagedata)
sigma = np.std(imagedata)
mindata = np.min(imagedata)
maxdata = np.max(imagedata)
Imin = mean - coffe*sigma
Imax = mean + coffe*sigma
mindata = max(Imin,mindata)
maxdata = min(Imax,maxdata)
return mindata,maxdata
def displayimage(img, coff, i):
minimg,maximg = adjustimage(img, coff)
plt.figure(i)
plt.imshow(img, cmap='gray', vmin = minimg, vmax = maximg)
plt.savefig(str(i)+'.jpg')
def findsource(img):
mean, median, std = sigma_clipped_stats(img, sigma=3.0)
daofind = DAOStarFinder(fwhm=8.5, threshold=5.*std)
sources = daofind(img - median)
tezhen = np.transpose((sources['xcentroid'], sources['ycentroid']))
#tezhen = np.transpose((sources['xcentroid'], sources['ycentroid'],sources['sharpness']))
positions = np.transpose((sources['xcentroid'], sources['ycentroid']))
return tezhen,positions
###实现找星###
tezhen1,positions1 = findsource(oneimgdata)
tezhen2,positions2 = findsource(twoimgdata)
apertures1 = CircularAperture(positions1, r=5.)
apertures2 = CircularAperture(positions2, r=5.)
displayimage(oneimgdata,3,0)
apertures1.plot(color='blue', lw=1.5, alpha=0.5)
displayimage(twoimgdata,3,1)
apertures2.plot(color='blue', lw=1.5, alpha=0.5)
lenposition1 = len(positions1)
lenposition2 = len(positions2)
keyimg1 = np.zeros((lenposition1,128),dtype = np.float32)
keyimg2 = np.zeros((lenposition2,128),dtype = np.float32)
i = 0
j = 0
for i in range(lenposition1):
keyimg1[i,0:2] = tezhen1[i,:]
for j in range(lenposition2):
keyimg2[j,0:2] = tezhen2[j,:]
# FLANN 参数设计
FLANN_INDEX_KDTREE = 0
index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
search_params = dict(checks=50)
flann = cv2.FlannBasedMatcher(index_params,search_params)
matches = flann.knnMatch(keyimg1,keyimg2,k=2)
lenpipei = 0
temp1 = []
temp2 = []
for i, (m1, m2) in enumerate(matches):
if m1.distance < 0.75 * m2.distance:# 两个特征向量之间的欧氏距离,越小表明匹配度越高。
lenpipei = lenpipei+1
temp1.append(m1.queryIdx)
temp2.append(m1.trainIdx)
hmerge = np.hstack((oneimgdata, twoimgdata)) #水平拼接
displayimage(hmerge, 3, 2)
srckp1 = []
srckp2 = []
for i in range(lenpipei):
x = temp1[i]
y = temp2[i]
x10 = positions1[x][0]
y10 = positions1[x][1]
srckp1.append(x10)
srckp1.append(y10)
src_pts = np.float32(srckp1).reshape(-1,2)
x11 = positions2[y][0]
y11 = positions2[y][1]
srckp2.append(x11)
srckp2.append(y11)
dst_pts = np.float32(srckp2).reshape(-1,2)
#plt.plot(x10,y10,'*')
#plt.plot(x11+lie1,y11,'*')
plt.plot([x10,x11+lie1],[y10,y11],linewidth = 0.8)
H, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC,5.0)
newimg1 = cv2.warpPerspective(imgdata1, H, (lie1,hang1))
addimg = np.float32(newimg1) + np.float32(imgdata2)
minusimg = np.float32(newimg1) - np.float32(imgdata2)
displayimage(addimg, 3, 3)
displayimage(minusimg, 3, 4)
def witefits(data,name):
os.remove(name + '.fits')
grey=fits.PrimaryHDU(data)
greyHDU=fits.HDUList([grey])
greyHDU.writeto(name + '.fits')
witefits(newimg1,'one')
witefits(imgdata2,'two')
witefits(minusimg,'minusimg')
tempmatrix = np.zeros((3,1),dtype = np.float64)
tempmatrix[2] = 1
deltemp = []
for j in range(lenpipei):
tempmatrix[0] = src_pts[j][0]
tempmatrix[1] = src_pts[j][1]
result = np.dot(H,tempmatrix)
rx11 = result[0]/result[2]
ry11 = result[1]/result[2]
delcha = math.sqrt((rx11-dst_pts[j][0])**2 + (ry11-dst_pts[j][1])**2)
deltemp.append(delcha)
plt.figure(5)
plt.plot(deltemp)
print(np.mean(deltemp[15:40]))
| dingxu6207/newcode | newB4SIFT.py | newB4SIFT.py | py | 4,808 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "astropy.io.fits.open",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "astropy.io.fits",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "numpy.copy",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "numpy.float32",
"... |
35571671171 | import urllib.request as urllib2 #import the library used to query a website
from bs4 import BeautifulSoup #import the Beautiful soup functions to parse the data returned from the website
import pandas as pd #import pandas to convert list to data frame
from openpyxl import load_workbook
# INPUT VARIABLES SPECIFIED BY THE USER
input1 = input('Please input the column in excel that you want modified (UPPERCASE ONLY): ')
input2 = input('\nPlease count the number of the table from the web you would like to parse.\nFor example input 9 if you would like to read from the 9th table listed: ')
input3 = input('\nPlease input the number of the column from the table on the web you would like to parse.\nFor example input 3 if you would like to read from the 3rd column: ')
input4 = input('\nPlease input the number of the excel sheet that you would like to read from.\n For example from left to right the sheet tbas would be 1,2,3... accordingly: ')
input5 = input('\nPlease input the name of the file you would like to read from (extension included).\n For example Verisk Model_Send_Excel_2.xlsx: ')
input6 = input('\nPlease input the path where this folder is located on your computer (please include a "/" at the end of the path).\nFor Example ~/Documents/Git/Html_scraping_project/: ')
input7 = input('\nPlease input the url containing the table that you want to parse.\nFor example http://www.verisk.com/press-releases/2017/february/verisk-analytics-inc-reports-fourth-quarter-2016-financial-results.html: ')
print('\nThe file "temp.xlsx" has now been created in your directory...')
#Convert user input into proper indexes
def excelColumnToIndex(column):
return ord(column) - 65
def tableFromWebToIndex(index):
return int(index) - 1
def tableColumnFromWebToIndex(index):
return int(index) - 1
def excelSheetToIndex(index):
return int(index) - 1
#Set global variabes to correct values
EXCEL_COLUMN_INDEX = excelColumnToIndex(input1)
TABLE_FROM_WEB_INDEX = tableFromWebToIndex(input2)
TABLE_FROM_WEB_COLUMN_INDEX = tableColumnFromWebToIndex(input3)
EXCEL_SHEET_INDEX = excelSheetToIndex(input4)
FILENAME = input5
PATH = input6
URL = input7
def parseTables(all_tables):
parsed_tables = []
for i in range(len(all_tables)-1):
table_body = all_tables[i].find('tbody')
rows = table_body.find_all('tr')
df_temp = []
for row in rows:
cols =row.find_all('td')
cols = [ele.text.strip() for ele in cols]
df_temp.append([ele for ele in cols]) #get rid of empty values
parsed_tables.append(df_temp)
return parsed_tables
def loadExcelDoc(sheet_index):
# Open up Faton Excel file
xl = pd.ExcelFile(PATH + FILENAME)
sheets = xl.sheet_names
## open up the first sheet and print our data
df = xl.parse(sheets[sheet_index],header=None)
#row_index = df.iloc[0][0]
#df = xl.parse(sheets[sheet_index])
#df = df.set_index(row_index)
return df
def main():
user_agent = 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.9.0.7) Gecko/2009021910 Firefox/3.0.7'
headers={'User-Agent':user_agent,}
request=urllib2.Request(URL,None,headers) #The assembled request
page = urllib2.urlopen(request)
data = page.read() # The data
#Parse the html in the 'page' variable, and store it in Beautiful Soup format
soup = BeautifulSoup(data,'html.parser')
#####gather all tables from page into array
all_tables = soup.find_all("table", class_="table-blue")
parsed_tables = parseTables(all_tables)
df_from_web = pd.DataFrame(parsed_tables[TABLE_FROM_WEB_INDEX])
df = loadExcelDoc(EXCEL_SHEET_INDEX)
wb = load_workbook(FILENAME, keep_vba = True)
wb.get_sheet_names()
active_sheet = wb.sheetnames[EXCEL_SHEET_INDEX]
ws = wb[active_sheet]
# Lets try to match the row index names from the web to the excel doc
excel_labels = [i.value for i in ws['A']]# we assume that the labels are always in column A
web_labels = df_from_web.loc[:,0] # we assume that the label is always in the first column of the dataframe
for i,excel_label in enumerate(excel_labels):
for j,web_label in enumerate(web_labels):
if excel_label == web_label:
#set the cell value in the excel file to match the value found from the web
ws[i+1][EXCEL_COLUMN_INDEX].value = df_from_web.loc[j,TABLE_FROM_WEB_COLUMN_INDEX]
wb.save("temp.xlsx")
#########################################################################
# Lets run our script
main() | rich-coleman-gh/Html_scraping_project | main.py | main.py | py | 4,466 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "pandas.ExcelFile",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "urllib.request.Request",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "urllib.request",
"line_number": 74,
"usage_type": "name"
},
{
"api_name": "urllib.request... |
5707554791 | '''
activation_key module
'''
from dataclasses import dataclass
from sqlalchemy import Integer, Column, String, ForeignKey
from databases.models.user import User
from config.db import db
@dataclass
class ActivationKey(db.Model): # pylint: disable=too-few-public-methods
'''
activation_key model class
'''
id: int # pylint: disable=C0103
hash_key: str
user_id: int
__tablename__ = 'activation_key'
id = Column(Integer, primary_key=True)
hash_key = Column(String(255), unique=True, nullable=False)
user_id = Column(Integer, ForeignKey(User.id), nullable=False)
def __repr__(self):
return f'ActivationKey {self.id}'
| Dolzhenkov-Andrii/api | databases/models/activation_key.py | activation_key.py | py | 681 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "config.db.db.Model",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "config.db.db",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Int... |
18515188174 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 21 16:49:53 2017
@author: mducoffe
appendix : expansion operator for convolutional KFAC
"""
from keras.models import Model, Sequential
from keras.layers import Dense
import keras.backend as K
from keras.layers.merge import Concatenate
from keras.engine import InputSpec, Input, Layer
import numpy as np
def keras_expansion_op(A, delta, input_shape):
if K.image_dim_ordering() == "th":
(_, J, X, Y) = input_shape
else:
(_, X, Y, J) = input_shape
A = A.transpose((0, 3, 2, 1))
d_x = delta[0]/2; d_y = delta[1]/2
var_x = []
for n_x in range(d_x, X-d_x):
var_y = []
for n_y in range(d_y, Y-d_y):
tmp = A[:,:, n_x -d_x:n_x+d_x+1, n_y-d_y:n_y+d_y+1]
tmp = tmp[:,:, ::-1, ::-1, None]
var_y.append(tmp)
var_y = K.concatenate(var_y, axis=4)
var_y = var_y[:,:,:,:,:,None]
var_x.append(var_y)
var_x = K.concatenate(var_x, axis=5)
E_A = var_x.transpose((0, 5, 4, 1, 2, 3))
batch_size = E_A.shape[0]
coeff = 1./((X-2*d_x)*(Y-2*d_y)) # 1/sqrt(tau)
E_A = E_A.reshape((batch_size, (X-2*d_x)*(Y-2*d_y), J*(2*d_x+1)*(2*d_y+1)))
return coeff*E_A | mducoffe/Active_Learning_Variational_Inference_Deep_Networks | appendix.py | appendix.py | py | 1,264 | python | en | code | 4 | github-code | 6 | [
{
"api_name": "keras.backend.image_dim_ordering",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "keras.backend",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "keras.backend.concatenate",
"line_number": 35,
"usage_type": "call"
},
{
"api_name... |
34653033371 | import math, os
from dataclasses import dataclass
import pygame
from const import *
@dataclass
class Position:
sector: int
layer: int
index: int
x: float
y: float
def __init__(self, sector, layer, index):
self.sector = sector
self.layer = layer
self.index = index
sector_angle = self.sector * math.pi / 3
pos_angle = sector_angle + 2 * math.pi / 3
self.x = BOARD_CENTER[0] + 2 * (TILE_SIDE + TILE_PADDING) * (self.layer * math.sin(sector_angle) + self.index * math.sin(pos_angle))
self.y = BOARD_CENTER[1] - 2 * (TILE_SIDE + TILE_PADDING) * (self.layer * math.cos(sector_angle) + self.index * math.cos(pos_angle))
def to_map(self, offset: (float, float) = (0, 0)):
return (self.x + offset[0], self.y + offset[1])
def to_coor(self):
return (self.sector, self.layer, self.index)
def circle_intersect(self, radius: float, pos: (float, float)):
return math.sqrt((pos[0] - self.x)**2 + (pos[1] - self.y)**2) <= radius
class Resource:
textures: dict[str, list[pygame.Surface]] = {}
fonts: dict[str, pygame.font.Font] = {}
@staticmethod
def init():
Resource.textures = {
"town": [pygame.image.load(os.path.join("res", "images", "buildings", f"town{i}.png")) for i in range(NUM_PLAYERS)],
"farm": [pygame.image.load(os.path.join("res", "images", "buildings", f"farm{i}.png")) for i in range(NUM_PLAYERS)],
"soldier": [pygame.image.load(os.path.join("res", "images", "units", f"soldier{i}.png")) for i in range(NUM_PLAYERS)]
}
for items in Resource.textures.values():
for index, item in enumerate(items):
items[index] = pygame.transform.scale(item, PIECE_SIZE).convert_alpha()
Resource.fonts = {
"system": pygame.font.SysFont("Calibri", 18),
"systeml": pygame.font.SysFont("Calibri", 24),
}
| martin-hanekom/persian-silver-2 | src_old/tools.py | tools.py | py | 1,951 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "math.pi",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "math.pi",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "math.sin",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "math.cos",
"line_number": 21,
... |
6194000945 | import os
import json
from dotenv import load_dotenv
load_dotenv()
chinput = os.getenv('CHATINPUT')
chinput = '-1001799753250 -1001574745581 -1001322515232 -1001725353361'
channel_input = [int(i) for i in chinput.split(' ')]
choutput = os.getenv('CHATOUTPUT')
choutput = '-1001802541407'
channel_output = [int(i) for i in choutput.split(' ')]
REDIS_URL = os.getenv('REDIS_URL')
session = os.getenv("SESSION")
api_hash = os.getenv("API_HASH")
api_id = os.getenv("API_ID")
sentry_env = os.getenv("SENTRY_ENV")
| Lj6890/Forwarded | config.py | config.py | py | 513 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "dotenv.load_dotenv",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 1... |
23715644007 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models
from event.models import Event
# Create your models here.
class Episode(models.Model):
event = models.ForeignKey(
Event,
on_delete=models.CASCADE,
related_name='episode')
session_id = models.CharField(max_length=200)
archive_id = models.CharField(max_length=200)
| emilarran/channelshowdown | channelshowdown/livestream/models.py | models.py | py | 396 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "django.db.models.Model",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "event.models",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "django.db.m... |
21043710164 | from datetime import datetime, time
from typing import Dict
# getting info from administrator
def getting_payload() -> Dict:
"""
this function takes nothing and return a dictionary of the users answers
:return: id, company_name, departure_time, arrival_time
"""
temp_id = int(input("Enter the new plane ID(int):\n"))
temp_company_name = input("Enter the company name:\n")
# making sure time is in the correct format
try:
temp_departure_time = input("Enter the departure time => (hh:mm:ss) example, (14:05:20):\n")
temp_departure_time = datetime.strptime(temp_departure_time, "%H:%M:%S").time()
except ValueError:
print("You have entered time in the wrong Format, the system will assign the time to\n"
"1:1:1 instead. Feel free to modify the departure time in plane modification")
temp_departure_time = time(1, 1, 1)
# making sure time is in the correct format
try:
temp_arrival_time = input("Enter the arrival time => (hh:mm:ss) example, (14:05:20):\n")
temp_arrival_time = datetime.strptime(temp_arrival_time, "%H:%M:%S").time()
except ValueError:
print("You have entered time in the wrong Format, the system will assign the time to\n"
"1:1:1 instead. Feel free to modify the arrival time in plane modification")
temp_arrival_time = time(1, 1, 1)
return {
"id": temp_id,
"company_name": temp_company_name,
"departure_time": temp_departure_time,
"arrival_time": temp_arrival_time
}
| Mohamad-Hachem/Airplane_Booking_System | utils/getting_airplane_information.py | getting_airplane_information.py | py | 1,567 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "datetime.datetime.strptime",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "datetime.time",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "datetime.d... |
3423610291 | import json
from functools import wraps
from flask import request
from flask.ext.restful import reqparse, Api, Resource
from api_json_example import app
# Database? We don't need no stinkin database
db = {}
api = Api(app)
def accept_json(func):
"""
Decorator which returns a 406 Not Acceptable if the client won't accept JSON
"""
@wraps(func)
def wrapper(*args, **kwargs):
accept = api.mediatypes()
if "*/*" in accept or "application/json" in accept:
return func(*args, **kwargs)
return {"message": "Request must accept JSON"}, 406
return wrapper
def require_json(func):
"""
Decorator which returns a 415 Unsupported Media Type if the client sends
something other than JSON
"""
@wraps(func)
def wrapper(*args, **kwargs):
if request.mimetype == "application/json":
return func(*args, **kwargs)
return {"message": "Request must contain JSON"}, 415
return wrapper
class User(Resource):
"""
A simple RESTful API for a user
"""
parser = reqparse.RequestParser()
method_decorators = [accept_json]
def get(self, id):
if not id in db:
return {"message": "User not found"}, 404
return db[id], 200
@require_json
def put(self, id):
args = User.parser.parse_args()
# Validate arguments
if args["name"] and not isinstance(args["name"], basestring):
return {"message": "Name must be a string"}, 422
if args["email"] and not isinstance(args["email"], basestring):
return {"message": "Email address must be a string"}, 422
if (args["roles"] and
not all(isinstance(role, basestring) for role in args["roles"])):
return {"message": "Roles must be a strings"}, 422
if id in db:
# Edit user
# SMELL: Merging could be nicer
if args["name"]:
db[id]["name"] = args["name"]
if args["email"]:
db[id]["email"] = args["email"]
if args["roles"]:
db[id]["roles"] = args["roles"]
return db[id], 201, {"Location": "/api/user/{}".format(id)}
else:
# Create new user
if not args["name"] or not args["email"]:
return {"message": "Must provide name and email"}, 422
db[id] = {
"name": args["name"],
"email": args["email"]
}
if args["roles"]:
db[id]["roles"] = args["roles"]
else:
db[id]["roles"] = []
return db[id], 200
User.parser.add_argument("name", type=str, location="get_json")
User.parser.add_argument("email", type=str, location="get_json")
User.parser.add_argument("roles", type=list, location="get_json")
api.add_resource(User, "/api/user/<int:id>")
| sjl421/thinkful-python-code-examples | flask/api_json_example/api_json_example/api.py | api.py | py | 2,931 | python | en | code | null | github-code | 6 | [
{
"api_name": "flask.ext.restful.Api",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "api_json_example.app",
"line_number": 14,
"usage_type": "argument"
},
{
"api_name": "functools.wraps",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "flask.... |
37431367661 | # -*- coding: utf-8 -*-
"""
Created on Wed Dec 9 14:32:30 2020
@author: zjerma1
"""
import numpy as np
import matplotlib.pyplot as plt
from sklearn.preprocessing import PolynomialFeatures
from sklearn.linear_model import Ridge
from sklearn.model_selection import cross_val_score
from mlxtend.evaluate import bias_variance_decomp
#data for final plot
x_plot = np.linspace(0,1,num=10)
y_plot = np.sin(2*np.pi*x_plot)
j = 0
#y_avg = np.array[100]
test_error = []
bias = []
variance = []
reg_parameter = []
bias_variance = []
for i in range(8):
reg_parameter.append(10**i)
reg_parameter.append(10**(-i))
reg_parameter.sort()
print(reg_parameter)
for j in reg_parameter:
error_holder = 0
bias_holder = 0
variance_holder = 0
#generate training data
x_data = np.linspace(0,1,num=10).reshape(-1,1)
y_data = np.sin(2.0*np.pi*x_plot) + .1*np.random.randn(10)
#print(x_plot)
#print("\n")
#print(y_data)
#add polynomials to the model
poly_features = PolynomialFeatures(degree=9,include_bias=False)
#print(poly_features)
#x_data is extened by including its powers
x_data_poly = poly_features.fit_transform(x_data)
#print(x_data_poly)
#Ridge regression or Tikhonov regularizaiton
ridge_reg = Ridge(alpha = j ,solver="cholesky")
#fit the extended data set
ridge_reg.fit(x_data_poly,y_data)
#generate the test data set
x_new = np.linspace(0,1,num = 100).reshape(100,1)
x_new_poly = poly_features.transform(x_new)
#print(x_new)
#print('\n')
#print(x_new_poly)
#prediction on the test data set
y_new = ridge_reg.predict(x_new_poly)
error_holder, bias_holder, variance_holder = bias_variance_decomp(ridge_reg, x_data, y_data, x_new, y_new, loss = 'mse') #bias-variance decomp
test_error.append(error_holder)
bias.append(bias_holder)
variance.append(variance_holder)
for j in range(len(bias)):
bias[j] = bias[j]**2
for j in range(len(bias)):
bias_variance.append(bias[j] + variance[j])
print(test_error)
print(bias_variance)
plt.plot(reg_parameter, test_error, label = 'test error')
plt.plot(reg_parameter, bias, label = 'bias')
plt.plot(reg_parameter,variance, label = 'variance')
plt.plot(reg_parameter, bias_variance, label = 'bias + variance')
plt.xscale('log')
plt.legend()
plt.show | zjermain/Math-7390-Machine-Learning-Jermain | Homework 2-Bias Variance Decomp.py | Homework 2-Bias Variance Decomp.py | py | 2,428 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "numpy.linspace",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "numpy.sin",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "numpy.pi",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "numpy.linspace",
"line_num... |
20843364345 | import torch
import torch.nn as nn
import torch.optim as optim
from torch.optim.lr_scheduler import CosineAnnealingLR
from torch.utils.data import DataLoader
from torch.utils.data.sampler import WeightedRandomSampler
import math
from network import Network
from losses import WalkerVisitLosses
from input_pipeline import get_datasets
from utils import evaluate, write_logs, make_weights_for_balanced_classes
"""
The purpose of this script is to train a simple
CNN on mnist and svhn using associative domain adaptation.
"""
BATCH_SIZE = 200
NUM_EPOCHS = 15
EMBEDDING_DIM = 64
DELAY = 1000 # number of steps before turning on additional losses
GROWTH_STEPS = 1000 # number of steps of linear growth of additional losses
# so domain adaptation losses are in full strength after `DELAY + GROWTH_STEPS` steps
BETA1, BETA2 = 1.0, 0.5
DEVICE = torch.device('cuda:0')
SOURCE_DATA = 'svhn' # 'svhn' or 'mnist'
SAVE_PATH = 'models/svhn_source'
LOGS_PATH = 'logs/svhn_source.json'
def train_and_evaluate():
svhn, mnist = get_datasets(is_training=True)
source_dataset = svhn if SOURCE_DATA == 'svhn' else mnist
target_dataset = mnist if SOURCE_DATA == 'svhn' else svhn
weights = make_weights_for_balanced_classes(source_dataset, num_classes=10)
sampler = WeightedRandomSampler(weights, len(weights))
source_loader = DataLoader(source_dataset, BATCH_SIZE, sampler=sampler, pin_memory=True, drop_last=True)
target_loader = DataLoader(target_dataset, BATCH_SIZE, shuffle=True, pin_memory=True, drop_last=True)
val_svhn, val_mnist = get_datasets(is_training=False)
val_svhn_loader = DataLoader(val_svhn, BATCH_SIZE, shuffle=False, drop_last=False)
val_mnist_loader = DataLoader(val_mnist, BATCH_SIZE, shuffle=False, drop_last=False)
print('\nsource dataset is', SOURCE_DATA, '\n')
num_steps_per_epoch = math.floor(min(len(svhn), len(mnist)) / BATCH_SIZE)
embedder = Network(image_size=(32, 32), embedding_dim=EMBEDDING_DIM).to(DEVICE)
classifier = nn.Linear(EMBEDDING_DIM, 10).to(DEVICE)
model = nn.Sequential(embedder, classifier)
model.train()
optimizer = optim.Adam(lr=1e-3, params=model.parameters(), weight_decay=1e-3)
scheduler = CosineAnnealingLR(optimizer, T_max=num_steps_per_epoch * NUM_EPOCHS - DELAY, eta_min=1e-6)
cross_entropy = nn.CrossEntropyLoss()
association = WalkerVisitLosses()
text = 'e:{0:2d}, i:{1:3d}, classification loss: {2:.3f}, ' +\
'walker loss: {3:.3f}, visit loss: {4:.4f}, ' +\
'total loss: {5:.3f}, lr: {6:.6f}'
logs, val_logs = [], []
i = 0 # iteration
for e in range(NUM_EPOCHS):
model.train()
for (x_source, y_source), (x_target, _) in zip(source_loader, target_loader):
x_source = x_source.to(DEVICE)
x_target = x_target.to(DEVICE)
y_source = y_source.to(DEVICE)
x = torch.cat([x_source, x_target], dim=0)
embeddings = embedder(x)
a, b = torch.split(embeddings, BATCH_SIZE, dim=0)
logits = classifier(a)
usual_loss = cross_entropy(logits, y_source)
walker_loss, visit_loss = association(a, b, y_source)
if i > DELAY:
growth = torch.clamp(torch.tensor((i - DELAY)/GROWTH_STEPS).to(DEVICE), 0.0, 1.0)
loss = usual_loss + growth * (BETA1 * walker_loss + BETA2 * visit_loss)
else:
loss = usual_loss
optimizer.zero_grad()
loss.backward()
optimizer.step()
if i > DELAY:
scheduler.step()
lr = scheduler.get_lr()[0]
log = (e, i, usual_loss.item(), walker_loss.item(), visit_loss.item(), loss.item(), lr)
print(text.format(*log))
logs.append(log)
i += 1
result1 = evaluate(model, cross_entropy, val_svhn_loader, DEVICE)
result2 = evaluate(model, cross_entropy, val_mnist_loader, DEVICE)
print('\nsvhn loss {0:.3f} and accuracy {1:.3f}'.format(*result1))
print('mnist loss {0:.3f} and accuracy {1:.3f}\n'.format(*result2))
val_logs.append((i,) + result1 + result2)
torch.save(model.state_dict(), SAVE_PATH)
write_logs(logs, val_logs, LOGS_PATH)
train_and_evaluate()
| TropComplique/associative-domain-adaptation | train.py | train.py | py | 4,289 | python | en | code | 7 | github-code | 6 | [
{
"api_name": "torch.device",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "input_pipeline.get_datasets",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "utils.make_weights_for_balanced_classes",
"line_number": 42,
"usage_type": "call"
},
{
"... |
35395868914 | from django.db import models
from django.urls import reverse
from phone_field import PhoneField
# Create your models here.
class Department(models.Model):
"""Отдел компании"""
name = models.CharField(max_length=200, verbose_name="Название отдела")
class Meta:
db_table = 'department'
ordering = ['name']
def __str__(self):
return self.name
class Employee(models.Model):
"""Сотрудник компании"""
first_name = models.CharField(max_length=50, verbose_name='Имя')
middle_name = models.CharField(max_length=50, verbose_name='Отчество')
last_name = models.CharField(max_length=50, verbose_name='Фамилия')
birthday = models.DateField(verbose_name='Дата рождения')
email = models.EmailField(verbose_name='e-mail')
phone = PhoneField(verbose_name='Телефон')
begin_work = models.DateField(verbose_name='Начало работы')
end_work = models.DateField(
blank=True,
null=True,
help_text='Введите дату увольнения сотрудника',
verbose_name='Окончание работы'
)
position = models.CharField(max_length=200, verbose_name='Должность')
department = models.ForeignKey(
Department,
on_delete=models.SET_NULL,
blank=True,
null=True,
verbose_name='Отдел')
class Meta:
db_table = 'employee'
ordering = ['last_name', 'first_name', 'middle_name']
def get_absolute_url(self):
return reverse('employee-detail', args=[str(self.id)])
def display_last_name(self):
return '{0} {1} {2}'.format(self.last_name, self.first_name, self.middle_name)
display_last_name.short_description = 'Ф.И.О.'
def __str__(self):
return self.display_last_name()
| zarmoose/eastwood_test | employees/models.py | models.py | py | 1,894 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "django.db.models.Model",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "django.db.models.CharField",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": ... |
29702682927 | #!/usr/bin/env python3
import os
import numpy as np
import matplotlib.pyplot as plt
import sklearn
import pandas as pd
from pandas import DataFrame as df
from sklearn import svm
from sklearn.model_selection import KFold, cross_val_score
from scipy import stats
import seaborn as sns
from sklearn.linear_model import LogisticRegression,RidgeClassifier
from sklearn.metrics import (confusion_matrix,cohen_kappa_score,recall_score,
precision_score)
from sklearn.feature_selection import RFECV
#Set wd
os.chdir("D:\\Documentos\\Essex\\Machine Learning\\assignment")
cwd=os.getcwd()
#read data
data=pd.read_csv("data\\train_imp.csv", header=0)
data2=pd.read_csv("data\\val_imp.csv", header=0)
data3=pd.read_csv("data\\test_imp.csv", header=0)
#print(data.head())
#create feature matrix and feature vectors
#Training set
y=data.iloc[:,-1]
x=data.iloc[:,:-1]
names=list(x.columns)
print("Shape X matrix: ", x.shape)
print("prop: ", y.value_counts()/y.shape[0])
#validation set
y_v=data2.iloc[:,-1]
x_v=data2.iloc[:,:-1]
print("Shape X_v matrix: ", x_v.shape)
#test set
y_t=data3.iloc[:,-1]
x_t=data3.iloc[:,:-1]
print("Shape X_t matrix: ", x_t.shape)
#############################
#Feature selection
#############################
#setting up feature selection algorithm
k_fold = KFold(n_splits=10)
est=svm.SVC(kernel="linear", random_state=21)
selector=RFECV(est,cv=k_fold)
selector.fit(x,y)
#keeping selected variables and printing names for control
x=x.loc[:,selector.get_support()]
x_v=x_v.loc[:,selector.get_support()]
print("Optimal number of features : %d" % selector.n_features_)
print("Support", x.columns)
# Plot number of features VS. cross-validation scores
plt.figure()
plt.xlabel("Number of features selected")
plt.ylabel("Cross validation score (nb of correct classifications)")
plt.plot(range(1, len(selector.grid_scores_) + 1), selector.grid_scores_)
plt.savefig("plots\\featlog.pdf", bbox_inches='tight')
plt.close()
#############################
#Logistic Regression
#############################
#Setting up algorithm
clfnb=LogisticRegression()
#Fitting and printing cv accuracy
clfnb.fit(x,y)
print("params: ", clfnb.get_params())
score_2 = cross_val_score(clfnb, x, y, cv=k_fold, n_jobs=-1)
print('Average accuracy:', np.mean(score_2))
#Test accuracy and other measures
y_pred=clfnb.predict(x_v)
kappa=cohen_kappa_score(y_v,y_pred)
print("Kappa: ", kappa)
print("Recall: ", recall_score(y_v,y_pred))
print("Precision: ", precision_score(y_v,y_pred))
print("confussion: ", confusion_matrix(y_v,y_pred))
print("Score: ", clfnb.score(x_v,y_v))
#########################
#Predicting Test File
#########################
#Selecting only signigicative features
x_t=x_t.loc[:,selector.get_support()]
#Predicting classes
y_test=clfnb.predict(x_t)
#Saving Results
pd.DataFrame(y_test, columns=["Class"]).to_csv("data\\test_logistic.csv", index=False)
print("test results shape: ", pd.DataFrame(y_test, columns=["Class"]).shape)
| maybje/Fake-News-Detection | logistic.py | logistic.py | py | 3,056 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "os.chdir",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "os.getcwd",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number... |
11779793970 | import os
from scripts.util import read_supertopics, SuperTopic, get_spottopics, DateFormat, read_temp_dist, smooth
import numpy as np
from plotly.subplots import make_subplots
import plotly.graph_objects as go
BOOST = ['raw', # 0
'retweets', # 1
'replies', # 2
'likes', # 3
'retweets_likes', # 4
'replies_likes', # 5
'retweets_replies', # 6
'retweets_likes_replies' # 7
][0]
FILE_SUPERTOPICS = f'data/climate2/topics_big2/supertopics.csv'
FILES_TEMP_DIST = {
'keep (majority)': f'data/climate2/topics_big2/temporal_keep_majority/daily/temporal_daily_{BOOST}_abs.json',
'fresh (majority)': f'data/climate2/topics_big2/temporal_fresh_majority/daily/temporal_daily_{BOOST}_abs.json'
}
FILE_TEMP_DIST = FILES_TEMP_DIST[['keep (majority)', 'fresh (majority)'][0]]
BOUNDARY = '2020-03-01'
SMOOTHING = 90
EPS = 1e-12
annotations = read_supertopics(FILE_SUPERTOPICS)
td_groups, td_topics, td_counts = read_temp_dist(FILE_TEMP_DIST)
supertopic_counts = []
st_summed_counts = []
st_topic_counts = []
for st in SuperTopic:
t_counts = td_counts.T[annotations[:, st] > 0].sum(axis=0)
supertopic_counts.append(t_counts)
print(st.name, f'{t_counts.sum():,}')
st_summed_counts.append(t_counts.sum())
st_topic_counts.append(sum(annotations[:, st] > 0))
supertopic_counts = np.array(supertopic_counts)
BOUND = td_groups.index(BOUNDARY)
sts_plot = [SuperTopic.COVID, SuperTopic.Causes, SuperTopic.Impacts, SuperTopic.Solutions,
SuperTopic.POLITICS, SuperTopic.Movements, SuperTopic.Contrarian,
# SuperTopic.Other, # SuperTopic.Interesting, SuperTopic.NotRelevant
]
tweets_per_day = np.sum(td_counts, axis=1)
tweets_per_topic = np.sum(td_counts, axis=0)
st_plot_counts = supertopic_counts[sts_plot]
st_plot_shares = st_plot_counts / tweets_per_day
st_plot_shares_smooth = smooth(st_plot_shares, kernel_size=SMOOTHING)
subplot_titles = [
f'{st.name}: {sum(annotations[:, st] > 0):,} topics with {int(st_summed_counts[sti]):,} tweets'
for sti, st in enumerate(sts_plot)
]
os.makedirs('data/climate2/figures/supertopic_shares_split/', exist_ok=True)
for i, st in enumerate(sts_plot, start=1):
fig = go.Figure(layout={'title': {'text': subplot_titles[i - 1]}})
n_st_tweets = td_counts.T[annotations[:, st] > 0].T
n_st_tweets_per_day = n_st_tweets.sum(axis=1)
subfig = []
subfig_y = smooth(n_st_tweets.T / (n_st_tweets_per_day + EPS), kernel_size=SMOOTHING)
topic_nums = np.arange(annotations.shape[0])[annotations[:, st] > 0]
for ti, (y_, yt) in enumerate(zip(subfig_y, n_st_tweets.T)):
fig.add_trace(go.Scatter(x=td_groups,
y=y_,
mode='lines',
stackgroup='one',
name=f'Topic {topic_nums[ti]} ({int(yt.sum()):,} tweets)'))
fig.update_layout(height=1000, width=1000)
fig.write_html(f'data/climate2/figures/supertopic_shares_split/supertopic_{st.name}.html')
os.makedirs('data/climate2/figures/supertopic_abs_split/', exist_ok=True)
for i, st in enumerate(sts_plot, start=1):
fig = go.Figure(layout={'title': {'text': subplot_titles[i - 1]}})
n_st_tweets = td_counts.T[annotations[:, st] > 0].T
n_st_tweets_per_day = n_st_tweets.sum(axis=1)
subfig_y = smooth(n_st_tweets.T, kernel_size=SMOOTHING)
topic_nums = np.arange(annotations.shape[0])[annotations[:, st] > 0]
for ti, (y_, yt) in enumerate(zip(subfig_y, n_st_tweets.T)):
fig.add_trace(go.Scatter(x=td_groups,
y=y_,
mode='lines',
stackgroup='one',
name=f'Topic {topic_nums[ti]} ({int(yt.sum()):,} tweets)'))
fig.update_layout(height=1000, width=1000)
fig.write_html(f'data/climate2/figures/supertopic_abs_split/supertopic_{st.name}.html')
| TimRepke/twitter-climate | code/figures/supertopics/stacked_area_charts_interactive_separate.py | stacked_area_charts_interactive_separate.py | py | 3,987 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "scripts.util.read_supertopics",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "scripts.util.read_temp_dist",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "scripts.util.SuperTopic",
"line_number": 35,
"usage_type": "name"
},
{
... |
14594650315 | import tensorflow as tf
import pathlib
import os
import cv2
import numpy as np
import tqdm
import argparse
class TFRecordsGAN:
def __init__(self,
image_dir="/volumes2/datasets/horse2zebra/trainA",
tfrecord_path="data.tfrecords",
img_pattern="*.jpgg"):
"""
:param data_dir: the path to iam directory containing the subdirectories of xml and lines from iam dataset
:param tfrecord_path:
"""
self.image_dir = image_dir
self.tfrecord_path = tfrecord_path
self.img_pattern = img_pattern
self.image_feature_description = \
{
'image': tf.io.FixedLenFeature([], tf.string)
}
@staticmethod
def _bytes_feature(value):
"""Returns a bytes_list from a string / byte."""
if isinstance(value, type(tf.constant(0))):
value = value.numpy() # BytesList won't unpack a string from an EagerTensor.
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
@staticmethod
def _float_feature(value):
"""Returns a float_list from a float / double."""
return tf.train.Feature(float_list=tf.train.FloatList(value=[value]))
@staticmethod
def _int64_feature(value):
"""Returns an int64_list from a bool / enum / int / uint."""
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def _parse_example_function(self, example_proto):
# Parse the input tf.Example proto using the dictionary above.
return tf.io.parse_example(example_proto, self.image_feature_description)
def image_example(self, image_string):
feature = {
'image': self._bytes_feature(image_string)
}
return tf.train.Example(features=tf.train.Features(feature=feature))
def write_tfrecords(self, training=False, dataset_name=""):
img_paths = sorted(pathlib.Path(self.image_dir).rglob(self.img_pattern))
with tf.io.TFRecordWriter(self.tfrecord_path) as writer:
for img_path in tqdm.tqdm(img_paths):
img_string = open(str(img_path), 'rb').read()
tf_example = self.image_example(img_string)
writer.write(tf_example.SerializeToString())
if training:
import json
if os.path.exists('{}/data_samples.json'.format(os.path.dirname(self.tfrecord_path))):
with open('{}/data_samples.json'.format(os.path.dirname(self.tfrecord_path))) as f:
data = json.load(f)
if dataset_name in list(data.keys()):
print("Dataset {} value was already present but value was updated".format(dataset_name))
else:
data = {}
data[dataset_name] = len(img_paths)
with open('{}/data_samples.json'.format(os.path.dirname(self.tfrecord_path)), 'w') as json_file:
json.dump(data, json_file)
def decode_strings(self, record):
images = tf.io.decode_jpeg(record['image'], 3)
return images
def read_tfrecords(self):
"""
Read iam tfrecords
:return: Returns an image
"""
raw_dataset = tf.data.TFRecordDataset(self.tfrecord_path)
parsed_dataset = raw_dataset.map(self._parse_example_function)
decoded_dataset = parsed_dataset.map(self.decode_strings)
return decoded_dataset
if __name__ == "__main__":
args = argparse.ArgumentParser(description="Create tfrecords with the following settings")
args.add_argument("-d", "--dataset", type=str, default=f"no_name_{str(np.random.randint(0, 20000))}",
help="Name a dataset to be later used with seg_train script, highly recommended to have one")
args.add_argument("--img_dir", "-i", type=str, required=True, help="Directory containing the dataset images")
args.add_argument("--save_dir", "-s", type=str, required=True, help="Directory to save the tfrecords")
args.add_argument("--img_pat", "-i_p", type=str, default="*.jpg", help="Image pattern/extension in directory, "
"glob regex convention")
args.add_argument("--visualize", "-v", action="store_true", help="Show 4 samples after creation. As visual check.")
args.add_argument("--eval", "-e", action="store_true", help="Set to true in case the records are for evaluation")
args = args.parse_args()
dataset_name = args.dataset
os.makedirs(args.save_dir, exist_ok=True)
record_type = "train" if not args.eval else "val"
records = TFRecordsGAN(image_dir=f"{args.img_dir}",
tfrecord_path=f"{args.save_dir}/{dataset_name}_{record_type}.tfrecords",
img_pattern=args.img_pat)
records.write_tfrecords(training=True, dataset_name=dataset_name) if not args.eval else records.write_tfrecords()
if args.visualize:
image_dataset = records.read_tfrecords().batch(1).take(4)
cv2.namedWindow("img", 0)
for image_features in image_dataset:
img = image_features[0, ..., ::-1]
cv2.imshow("img", img.numpy())
cv2.waitKey()
| AhmedBadar512/Badr_AI_Repo | utils/create_gan_tfrecords.py | create_gan_tfrecords.py | py | 5,294 | python | en | code | 2 | github-code | 6 | [
{
"api_name": "tensorflow.io.FixedLenFeature",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "tensorflow.io",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.string",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name"... |
24526724853 | import json
from arXivo.models import ArXivoUser
from arXivo.serializers import ArXivoUserSerializer, SearchSerializer
from arXivo.utils import get_tokens_for_user
from django.conf import settings
from django.contrib.auth import authenticate
from django.http.response import JsonResponse
from django.middleware import csrf
from rest_framework import status
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.views import APIView
from rest_framework_simplejwt.tokens import RefreshToken
class RegisterView(APIView):
serializer_class = ArXivoUserSerializer
def post(self, request, format=None):
serializer = self.serializer_class(data=request.data)
if serializer.is_valid():
serializer.save()
csrf.get_token(request)
return JsonResponse(
{"message": "User Created Successfully"}, status=status.HTTP_200_OK
)
else:
return JsonResponse(
{"message": "There was an error!", "error": serializer.errors},
status=status.HTTP_400_BAD_REQUEST,
)
class LoginView(APIView):
def post(self, request, format=None):
data = request.data
response = Response()
username = data.get("username", None)
user = ArXivoUser.objects.filter(username=username)
if not user.exists():
return Response(
{"error": "Username does not exist!!"},
status=status.HTTP_403_FORBIDDEN,
)
password = data.get("password", None)
user = authenticate(username=username, password=password)
if user is not None:
if user.is_active:
data = get_tokens_for_user(user)
response.set_cookie(
key=settings.SIMPLE_JWT["AUTH_COOKIE"],
value=data["access"],
expires=settings.SIMPLE_JWT["ACCESS_TOKEN_LIFETIME"],
secure=settings.SIMPLE_JWT["AUTH_COOKIE_SECURE"],
httponly=settings.SIMPLE_JWT["AUTH_COOKIE_HTTP_ONLY"],
samesite=settings.SIMPLE_JWT["AUTH_COOKIE_SAMESITE"],
)
response.set_cookie(
key="refresh_token",
value=data["refresh"],
expires=settings.SIMPLE_JWT["REFRESH_TOKEN_LIFETIME"],
secure=settings.SIMPLE_JWT["AUTH_COOKIE_SECURE"],
httponly=settings.SIMPLE_JWT["AUTH_COOKIE_HTTP_ONLY"],
samesite=settings.SIMPLE_JWT["AUTH_COOKIE_SAMESITE"],
)
csrf.get_token(request)
response.data = {"message": "Login successfully", "data": data}
return response
else:
return Response(
{"error": "This account is not active!!"},
status=status.HTTP_403_FORBIDDEN,
)
else:
return Response(
{"error": "Invalid Password!!"},
status=status.HTTP_403_FORBIDDEN,
)
class RefreshView(APIView):
permission_classes = [IsAuthenticated]
def post(self, request, format=None):
refresh = RefreshToken(request.COOKIES.get("refresh_token"))
response = Response(status=status.HTTP_200_OK)
response.set_cookie(
key=settings.SIMPLE_JWT["AUTH_COOKIE"],
value=refresh.access_token,
expires=settings.SIMPLE_JWT["ACCESS_TOKEN_LIFETIME"],
secure=settings.SIMPLE_JWT["AUTH_COOKIE_SECURE"],
httponly=settings.SIMPLE_JWT["AUTH_COOKIE_HTTP_ONLY"],
samesite=settings.SIMPLE_JWT["AUTH_COOKIE_SAMESITE"],
)
response.set_cookie(
key=settings.SIMPLE_JWT["REFRESH_COOKIE"],
value=str(refresh),
expires=settings.SIMPLE_JWT["REFRESH_TOKEN_LIFETIME"],
secure=settings.SIMPLE_JWT["AUTH_COOKIE_SECURE"],
httponly=settings.SIMPLE_JWT["AUTH_COOKIE_HTTP_ONLY"],
samesite=settings.SIMPLE_JWT["AUTH_COOKIE_SAMESITE"],
)
response.data = {"message": "Tokens Refreshed Successfully"}
return response
class LogoutView(APIView):
permission_classes = [IsAuthenticated]
def post(self, request, format=None):
response = Response(status=status.HTTP_200_OK)
response.delete_cookie(settings.SIMPLE_JWT["AUTH_COOKIE"])
response.delete_cookie(settings.SIMPLE_JWT["REFRESH_COOKIE"])
response.delete_cookie(settings.SIMPLE_JWT["CSRF_COOKIE"])
response.data = {"message": "Logged Out Successfully"}
return response
class SearchView(APIView):
permission_classes = [IsAuthenticated]
serializer_class = SearchSerializer
def post(self, request, format=None):
users = ArXivoUser.objects.filter(
username__icontains=request.data["search_term"]
)
serializer = self.serializer_class(users, many=True)
return JsonResponse({"data": serializer.data}, status=status.HTTP_200_OK)
class GetNotificationView(APIView):
permission_classes = [IsAuthenticated]
def get(self, request, format=None):
notif_data = request.user.notification_array
notif_pyobj = json.loads(notif_data)["data"]
for _notif in notif_pyobj:
_notif["seen"] = True
request.user.notification_array = json.dumps({"data": notif_pyobj})
request.user.save()
return JsonResponse(notif_data, status=status.HTTP_200_OK, safe=False)
class SendNotificationView(APIView):
permission_classes = [IsAuthenticated]
def post(self, request, format=None):
other_user = ArXivoUser.objects.get(username=request.data["send_to"])
notif_data = {
"filename": request.data["filename"],
"address": request.data["address"],
"key": request.data["key"],
"file_type": request.data["file_type"],
"seen": False,
"sender": request.user.username,
}
prev_data = json.loads(other_user.notification_array)
prev_data["data"].append(notif_data)
other_user.notification_array = json.dumps(prev_data)
other_user.save()
data = {"reponse": "good_response"}
return JsonResponse(data, status=status.HTTP_200_OK)
| DebadityaPal/arXivo | backend/arXivo/views.py | views.py | py | 6,438 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "rest_framework.views.APIView",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "arXivo.serializers.ArXivoUserSerializer",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "django.middleware.csrf.get_token",
"line_number": 24,
"usage_type": ... |
33068133857 | import sys
import argparse
import importlib
commands = {
'train': {
'script': 'ocr4all_pixel_classifier.scripts.train',
'main': 'main',
'help': 'Train the neural network. See more via "* train --help"'
},
'predict': {
'script': 'ocr4all_pixel_classifier.scripts.predict',
'main': 'main',
'help': 'Predict a result with the neural network. See more via "* predict --help"'
},
'predict-json': {
'script': 'ocr4all_pixel_classifier.scripts.predict_json',
'main': 'main',
'help': 'Predict a result with the neural network, input via JSON. See more via "* predict --help"'
},
'create-dataset-file': {
'script': 'ocr4all_pixel_classifier.scripts.create_dataset_file',
'main': 'main',
'help': 'Create a dataset file'
},
'compute-image-normalizations': {
'script': 'ocr4all_pixel_classifier.scripts.compute_image_normalizations',
'main': 'main',
'help': 'Compute image normalizations'
},
'compute-image-map': {
'script': 'ocr4all_pixel_classifier.scripts.generate_image_map',
'main': 'main',
'help': 'Generates color map'
},
'migrate-model': {
'script': 'ocr4all_pixel_classifier.scripts.migrate_model',
'main': 'main',
'help': 'Convert old model to new format'
},
}
def main():
# Pretty print help for main programm
usage = 'page-segmentation <command> [<args>]\n\nCOMMANDS:'
# Add all commands to help
max_name_length = max(len(name) for name, _ in commands.items())
for name, command in commands.items():
usage += '\n\t{name:<{col_width}}\t{help}'.format(name=name, col_width=max_name_length, help=command["help"])
parser = argparse.ArgumentParser(usage=usage)
parser.add_argument('command', help='The sub command to execute, see COMMANDS')
args = parser.parse_args(sys.argv[1:2])
sys.argv = sys.argv[:1] + sys.argv[2:]
if args.command in commands.keys():
command = commands[args.command]
command_module = importlib.import_module(command['script'])
command_main = getattr(command_module, command['main'])
command_main()
else:
print('Unrecognized command')
parser.print_help()
exit(1)
if __name__ == "__main__":
main()
| OMMR4all/ommr4all-page-segmentation | ocr4all_pixel_classifier/scripts/main.py | main.py | py | 2,358 | python | en | code | 2 | github-code | 6 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 56,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 57,
"usage_type": "attribute"
},
{
"api_name": "importlib.import_... |
19304565869 |
# -*- coding: utf-8 -*-
from selenium import webdriver
from webdriver_manager.chrome import ChromeDriverManager
from time import sleep
from selenium.webdriver.chrome.options import Options
import pandas as pd
import requests
"""
options = webdriver.ChromeOptions()
options.add_argument('--headless')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
driver = webdriver.Chrome(ChromeDriverManager().install(), options=options)
driver.get('https://shifucon.ppihgroup.com/staffpage/')
print(driver)
elemname = driver.find_element_by_id("login_email")
elemname.send_keys('0167938')
elemname = driver.find_element_by_id("login_pass")
elemname.send_keys('3104chalo')
log_in = driver.find_element_by_class_name('btn btn-lg btn-primary btn-block')
log_in.click()
"""
def main():
options = Options()
options.add_argument('--headless')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
#options.add_argument('--disable-features=VizDisplayCompositor')
browser = webdriver.Chrome(ChromeDriverManager().install(), options=options)
browser.get("https://scraping-for-beginner.herokuapp.com/login_page")
username = browser.find_element_by_id('username')
username.send_keys("imanishi")
userpass = browser.find_element_by_id('password')
userpass.send_keys("ki")
log_in = browser.find_element_by_id('login-btn')
log_in.click()
NAME = browser.find_element_by_id("name")
print("名前:", NAME.text)
COM = browser.find_element_by_id("company")
print("所属企業:", COM.text)
birthday = browser.find_element_by_id("birthday")
print("生年月日:", birthday.text)
birthplace = browser.find_element_by_id("come_from")
print("出身地:", birthplace.text)
hobby = browser.find_element_by_id("hobby")
print("趣味:", hobby.text)
#要素一つ
elemth = browser.find_element_by_tag_name("th")
#要素複数
elemth = browser.find_elements_by_tag_name("th")
print(elemth[0].text)
key = []
for i in elemth:
key.append(i.text)
value = []
elemtd = browser.find_elements_by_tag_name("td")
for i in elemtd:
value.append(i.text)
sleep(5)
browser.quit()
df = pd.DataFrame()
df["項目"] = key
df["値"] = value
print(df)
df.to_csv("講師情報.csv", index = False)
if __name__ == '__main__':
main()
| satoshi-python/109 | train_sele.py | train_sele.py | py | 2,427 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "selenium.webdriver.chrome.options.Options",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.Chrome",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver",
"line_number": 30,
"usage_type": "name"
},
{... |
38152844484 | from typing import List
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from selenium.webdriver.chrome.options import Options
class PathsParser:
def __init__(self): # if url changed replace it
caps = DesiredCapabilities().CHROME
caps["pageLoadStrategy"] = "eager"
# setting the "eager" parameter so as not to wait for the full download
options = Options()
options.add_argument("--headless")
# setting for hide the browser while running
self.driver = webdriver.Chrome(desired_capabilities=caps,
options=options)
def check_existence(self, page: webdriver) -> bool:
"""
This method input page and checks if the last page
:param page:
:return flag:
"""
flag = True
xpath = "//strong[text()='No results available']"
try:
page.find_element(By.XPATH, xpath)
flag = False
except Exception:
pass
return flag
def parse_urls(self) -> List:
"""
This method parse all available url contains rows with articles
:return page_list:
"""
page_list = []
counter_page = 0
flag = True
current_page = 'https://www.aceee.org/news?keys=&field_' \
'authors_target_id=&field_related_programs_target_id' \
'=&field_related_topics_target_id=&' \
'sort_bef_combine=created_DESC&' \
'sort_by=created&sort_order=DESC&page={}'
while flag:
current_url = current_page.format(counter_page)
self.driver.get(current_url)
flag = self.check_existence(self.driver)
if not flag:
break
page_list.extend(self.parse_paths(current_url))
print(current_url, '--- complete!')
counter_page += 1
self.driver.quit()
return page_list
def parse_paths(self, url: str) -> List:
"""
Get rows with articles paths
:param url:
:return path_list:
"""
path_list = []
self.driver.get(url)
rows = self.driver.find_elements(By.CLASS_NAME, 'views-row')
for row in rows:
path_list.append(
row.find_element(By.TAG_NAME, 'a').get_attribute('href')
)
return path_list
| stolzor/test_task | models/paths_parser.py | paths_parser.py | py | 2,546 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "selenium.webdriver.common.desired_capabilities.DesiredCapabilities",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.chrome.options.Options",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.Chrome",
"lin... |
42123297938 | from collections import namedtuple
import numpy as np
from ...utils.interpolation.levels import ( # noqa
LevelsDefinition as ConversionLevelsDefinition,
)
INPUT_REQUIRED_FIELDS = dict(
export_format=str,
levels_method=(None, str),
levels_number=(None, int),
levels_dzmin=(None, float),
levels_ztop=(None, float),
comment=(None, str),
campaign=(None, str),
source_domain=(None, str),
reference=(None, str),
# AUTHOR=CREATOR
author=(None, str),
modifications=(None, str),
# CASE=FLIGHT
case=(None, str),
adv_temp=[0, 1],
adv_theta=[0, 1],
adv_thetal=[0, 1],
adv_qv=[0, 1],
adv_qt=[0, 1],
adv_rv=[0, 1],
adv_rt=[0, 1],
rad_temp=[0, 1, "adv"],
rad_theta=[0, 1, "adv"],
rad_thetal=[0, 1, "adv"],
forc_omega=[0, 1],
forc_w=[0, 1],
forc_geo=[0, 1],
nudging_u=(0, np.nan, float),
nudging_v=(0, np.nan, float),
nudging_temp=(0, np.nan, float),
nudging_theta=(0, np.nan, float),
nudging_thetal=(0, np.nan, float),
nudging_qv=(0, np.nan, float),
nudging_qt=(0, np.nan, float),
nudging_rv=(0, np.nan, float),
nudging_rt=(0, np.nan, float),
surfaceType=["ocean", "land", "mixed"],
surfaceForcing=["ts", "Flux", "surfaceFlux"],
surfaceForcingWind=["z0", "ustar", "z0_traj"],
nudging_method_scalar_traj=(None, str),
nudging_time_scalar_traj=(None, float),
nudging_height_scalar_traj=(None, float),
nudging_transition_scalar_traj=(None, float),
nudging_method_momentum_traj=(None, str),
nudging_time_momentum_traj=(None, float),
nudging_height_momentum_traj=(None, float),
nudging_transition_momentum_traj=(None, float),
)
ConversionDefinition = namedtuple(
"ConversionDefinition",
["export_format", "levels", "name", "metadata", "parameters"],
)
ConversionParametersDefinition = namedtuple(
"ConversionParametersDefinition",
[
"adv_temp",
"adv_theta",
"adv_thetal",
"adv_qv",
"adv_qt",
"adv_rv",
"adv_rt",
"rad_temp",
"rad_theta",
"rad_thetal",
"forc_omega",
"forc_w",
"forc_geo",
"nudging_u",
"nudging_v",
"nudging_temp",
"nudging_theta",
"nudging_thetal",
"nudging_qv",
"nudging_qt",
"nudging_rv",
"nudging_rt",
"surfaceType",
"surfaceForcing",
"surfaceForcingWind",
"nudging_parameters_scalar_traj",
"nudging_parameters_momentum_traj",
],
)
ConversionNudgingDefinition = namedtuple(
"ConversionNudgingDefinition",
[
"method",
"time",
"height",
"transition",
],
)
ConversionMetadataDefinition = namedtuple(
"ConversionMetadataDefinition",
[
"comment",
"campaign",
"source_domain",
"reference",
"author",
"modifications",
"case",
],
)
| EUREC4A-UK/lagtraj | lagtraj/forcings/conversion/input_definitions.py | input_definitions.py | py | 2,969 | python | en | code | 8 | github-code | 6 | [
{
"api_name": "numpy.nan",
"line_number": 37,
"usage_type": "attribute"
},
{
"api_name": "numpy.nan",
"line_number": 38,
"usage_type": "attribute"
},
{
"api_name": "numpy.nan",
"line_number": 39,
"usage_type": "attribute"
},
{
"api_name": "numpy.nan",
"line_nu... |
27577948701 | from django.urls import path
from .views import (HomePageView, MessageView, UserProfile, delete_message,
spam_message, AddReview, AbouUs, ContactUs, ReviewView, SettingsView, EditProfile)
urlpatterns = [
path('', HomePageView.as_view(), name='home'),
path('profile/', UserProfile.as_view(), name='user_profile'),
# path('profile/edit/<int:pk>', EditProfile.as_view(), name='edit_profile'),
path('profile/edit/<str:username>', EditProfile, name='edit_profile'),
path('about/', AbouUs.as_view(), name='about'),
path('contact/', ContactUs.as_view(), name='contact'),
path('review/', ReviewView, name='review'),
path('review/add/', AddReview, name='add_review'),
path('settings/', SettingsView, name='settings'),
path('message/<str:username>/', MessageView, name='message'),
path('delete/<int:m_id>/', delete_message, name='delete'),
path('spam/<int:m_id>/', spam_message, name='spam'),
]
| Afeez1131/Anonymous-v1 | anonymous/urls.py | urls.py | py | 954 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "django.urls.path",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "views.HomePageView.as_view",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "views.HomePageView",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "django.ur... |
22905238470 | import tensorflow as tf
import argparse
import sys
sys.path.insert(0, "../CycleGAN-TensorFlow")
import model # nopep8
# Transform image bitstring to float tensor
def preprocess_bitstring_to_float_tensor(input_bytes, image_size):
input_bytes = tf.reshape(input_bytes, [])
# Transform bitstring to uint8 tensor
input_tensor = tf.image.decode_png(input_bytes, channels=3)
# Convert to float32 tensor
input_tensor = tf.image.convert_image_dtype(input_tensor,
dtype=tf.float32)
input_tensor = input_tensor / 127.5 - 1.0
# Ensure tensor has correct shape
input_tensor = tf.reshape(input_tensor, [image_size, image_size, 3])
# CycleGAN's inference function accepts a batch of images
# So expand the single tensor into a batch of 1
input_tensor = tf.expand_dims(input_tensor, 0)
return input_tensor
# Transform float tensor to image bitstring
def postprocess_float_tensor_to_bitstring(output_tensor):
# Convert to uint8 tensor
output_tensor = (output_tensor + 1.0) / 2.0
output_tensor = tf.image.convert_image_dtype(output_tensor, tf.uint8)
# Remove the batch dimension
output_tensor = tf.squeeze(output_tensor, [0])
# Transform uint8 tensor to bitstring
output_bytes = tf.image.encode_png(output_tensor)
output_bytes = tf.identity(output_bytes, name="output_bytes")
return output_bytes
# Export graph to ProtoBuf
def export_graph():
graph = tf.Graph()
with graph.as_default():
# Instantiate a CycleGAN
cycle_gan = model.CycleGAN(ngf=64,
norm="instance",
image_size=FLAGS.image_size)
# Create placeholder for image bitstring
# This is the first injection layer
input_bytes = tf.placeholder(tf.string, shape=[], name="input_bytes")
# Preprocess input (bitstring to float tensor)
input_tensor = preprocess_bitstring_to_float_tensor(input_bytes,
FLAGS.image_size)
# Get style transferred tensor
output_tensor = cycle_gan.G.sample(input_tensor)
# Postprocess output
output_bytes = postprocess_float_tensor_to_bitstring(output_tensor)
# Instantiate a Saver
saver = tf.train.Saver()
with tf.Session(graph=graph) as sess:
sess.run(tf.global_variables_initializer())
# Access variables and weights from last checkpoint
latest_ckpt = tf.train.latest_checkpoint(FLAGS.checkpoint_dir)
saver.restore(sess, latest_ckpt)
# Export graph to ProtoBuf
output_graph_def = tf.graph_util.convert_variables_to_constants(
sess, graph.as_graph_def(), [output_bytes.op.name])
tf.train.write_graph(output_graph_def,
FLAGS.protobuf_dir,
FLAGS.model_name + "_v" + str(FLAGS.version),
as_text=False)
# Wrap a SavedModel around ProtoBuf
# Necessary for using the tensorflow-serving RESTful API
def build_saved_model():
# Instantiate a SavedModelBuilder
# Note that the serve directory MUST have a model version subdirectory
builder = tf.saved_model.builder.SavedModelBuilder(FLAGS.serve_dir +
"/" +
str(FLAGS.version))
# Read in ProtoBuf file
with tf.gfile.GFile(FLAGS.protobuf_dir +
"/" +
FLAGS.model_name +
"_v" +
str(FLAGS.version),
"rb") as protobuf_file:
graph_def = tf.GraphDef()
graph_def.ParseFromString(protobuf_file.read())
# Get input and output tensors from GraphDef
# These are our injected bitstring layers
[inp, out] = tf.import_graph_def(graph_def,
name="",
return_elements=["input_bytes:0",
"output_bytes:0"])
with tf.Session(graph=out.graph) as sess:
# Signature_definition expects a batch
# So we'll turn the output bitstring into a batch of 1 element
out = tf.expand_dims(out, 0)
# Build prototypes of input and output
input_bytes = tf.saved_model.utils.build_tensor_info(inp)
output_bytes = tf.saved_model.utils.build_tensor_info(out)
# Create signature for prediction
signature_definition = tf.saved_model.signature_def_utils.build_signature_def( # nopep8
inputs={"input_bytes": input_bytes},
outputs={"output_bytes": output_bytes},
method_name=tf.saved_model.signature_constants.PREDICT_METHOD_NAME)
# Add meta-information
builder.add_meta_graph_and_variables(
sess, [tf.saved_model.tag_constants.SERVING],
signature_def_map={
tf.saved_model.signature_constants.
DEFAULT_SERVING_SIGNATURE_DEF_KEY: signature_definition
})
# Create the SavedModel
builder.save()
def main(_):
print("Exporting model to ProtoBuf...")
export_graph()
print("Wrapping ProtoBuf in SavedModel...")
build_saved_model()
print("Exported successfully!")
print("""Run the server with:
tensorflow_model_server --rest_api_port=8501 """
"--model_name=saved_model --model_base_path=$(path)")
if __name__ == "__main__":
# Instantiate an arg parser
parser = argparse.ArgumentParser()
# Establish default arguments
parser.add_argument("--checkpoint_dir",
type=str,
default="../CycleGAN-TensorFlow/"
"checkpoints/20180628-1208",
help="Path to checkpoints directory")
parser.add_argument("--protobuf_dir",
type=str,
default="../CycleGAN-TensorFlow/protobufs",
help="Path to protobufs directory")
parser.add_argument("--model_name",
type=str,
default="model",
help="Model name")
parser.add_argument("--serve_dir",
type=str,
default="serve",
help="Path to serve directory")
parser.add_argument("--version",
type=int,
default=1,
help="Model version number")
parser.add_argument("--image_size",
type=int,
default=64,
help="Image size")
# Parse known arguments
FLAGS, unparsed = parser.parse_known_args()
# Run the tensorflow app
tf.app.run(argv=[sys.argv[0]] + unparsed)
| tmlabonte/tendies | minimum_working_example/export_graph_for_serving.py | export_graph_for_serving.py | py | 6,953 | python | en | code | 37 | github-code | 6 | [
{
"api_name": "sys.path.insert",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 4,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.reshape",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "tensorflow.image.decod... |
27580622561 | from django.shortcuts import render, HttpResponse, get_object_or_404, HttpResponseRedirect
from .models import fizzURL
from django.views import View
from fiz.utils import create_shortcode
from .forms import SubmitURLForm
class HomeView(View):
'''
for a CBV, post and get function will be written separately, unlike
FBV which handles the two by itself
'''
def get(self, request):
form = SubmitURLForm
return render(request, 'fiz/home.html', {'form': form, 'title': 'Fiz.co'})
def post(self, request):
form = SubmitURLForm(request.POST)
if form.is_valid():
new_url = form.cleaned_data['url']
obj, created = fizzURL.objects.get_or_create(url=new_url)
context = {
'obj': obj,
'created': created,
}
if created:
template = 'fiz/created.html'
else:
template= 'fiz/already-exist.html'
return render(request, template, context)
class FizCBV(View):
def get(self, request, shortcode):
obj = get_object_or_404(fizzURL, shortcode=shortcode)
return HttpResponseRedirect(obj.url)
| Afeez1131/shortener | fiz/views.py | views.py | py | 1,203 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "django.views.View",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "forms.SubmitURLForm",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.render",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "forms.Su... |
34252791072 | import argparse
import json
import os
from flask import Flask, render_template, request
import openai
import requests
import base64
app = Flask(__name__)
# Configure OpenAI API credentials
openai.api_key = 'OPEN_API_KEY'
@app.route('/')
def index():
return render_template('index.html')
@app.route('/search', methods=['POST'])
def search():
search_query = request.form['search']
if("picture" in search_query):
process(search_query)
return render_template('index.html', search_query=False, results=True)
else:
results = search_openai(search_query)
return render_template('index.html', search_query=search_query, results=results)
def search_openai(query):
response = openai.Completion.create(
engine='text-davinci-003', # Use the GPT-3.5 engine
prompt=query,
max_tokens=4000, # Adjust the response length as needed
temperature=0.7, # Adjust the temperature for response randomness
n=1, # Generate a single response
stop=None, # Optional stop sequence to end the response
timeout=10, # Optional timeout for the API request
)
return response.choices[0].text.strip()
def process(prompt):
parser = argparse.ArgumentParser()
parser.add_argument("-p", "--prompt", help="Text to image prompt:", default='an isometric view of a miniature city, tilt shift, bokeh, voxel, vray render, high detail')
parser.add_argument("-n", "--number", help="Number of images generated", default=1)
parser.add_argument("-s", "--size", help="Image size: 256, 512 or 1024", default=256)
args = parser.parse_args()
api_key ="OPEN_API_KEY"
url = 'https://api.openai.com/v1/images/generations'
custom_headers = {
"Content-Type": "application/json",
"Authorization": "Bearer " + api_key,
}
reqBody = {
"prompt": prompt,
"n": int(args.number),
"size": f'{args.size}x{args.size}',
"response_format": "b64_json"
}
res = requests.post(url,
data=json.dumps(reqBody),
headers=custom_headers,
)
# print(r)
# print(r.url)
# print(r.status_code)
# print(res.text)
# print(r.content)
res_json = json.loads(res.text)
for i in range(0, len(res_json['data'])):
img_file_name = 'image.jpeg'
folder="static"
file_path = os.path.join(folder, img_file_name)
with open(file_path, 'wb') as f:
f.write(base64.urlsafe_b64decode(res_json['data'][i]['b64_json']))
if __name__ == '__main__':
app.run(debug=True)
| Guhan-jb/HippoGPT | Hippo_GPT/main.py | main.py | py | 2,661 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "flask.Flask",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "openai.api_key",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "flask.render_template",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "flask.request.fo... |
20513626833 | from selenium import webdriver
from time import sleep
# validateText = "Option3"
driver = webdriver.Chrome(executable_path="/home/chaitanya/Documents/software/drivers/chromedriver_linux64/chromedriver")
driver.get("https://rahulshettyacademy.com/AutomationPractice/")
# Positive case
driver.find_element_by_css_selector("input#name").send_keys("Option3")
validateText = driver.find_element_by_xpath("//input[@id='name']").text
driver.find_element_by_xpath("//input[@value='Alert']").click()
alert = driver.switch_to.alert
assert validateText in alert.text
sleep(5)
alert.accept()
# Negative case
driver.find_element_by_id("confirmbtn").click()
# validateText = driver.find_element_by_xpath("//input[@value='Confirm']").text
# assert validateText in alert.text
sleep(5)
alert.dismiss()
| ChaithanyaRepo/PythonTesting | PythonSelenium/alerts.py | alerts.py | py | 788 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "selenium.webdriver.Chrome",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "time.sleep",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "time.sleep",
... |
37204853562 | import logging
import logging.config
import os
import yaml
from commons.path import LOG_PATH, CONFIG_PATH
class MyLog:
def __init__(self, file_name, config_path=CONFIG_PATH, handel_name='server', level=logging.INFO):
"""
自定义日志对象
:param config_path: 自定义日志配置文件
:param file_name: 自定义日志的日志名称
:param handel_name: 自定义的handler的名称, 如果自己定义了一个handler在config文件里面的话,可以修改此值,否则不要修改
:param level: 自定义的日志等级
"""
self.config_path = config_path
self.file_name = LOG_PATH + file_name
self.handler = handel_name
self.level = level
def setup_logging(self, env_key='LOG_CFG'):
"""
| **@author:** Prathyush SP
| Logging Setup
"""
value = os.getenv(env_key, None)
if value:
self.config_path = value
if os.path.exists(self.config_path):
with open(self.config_path, 'rt', encoding="utf-8") as f:
try:
config = yaml.safe_load(f.read())
logconfig = config['logConfig']
logconfig['handlers']['file']['filename'] = self.file_name
logging.config.dictConfig(logconfig)
except Exception as e:
print(e)
print('Error in Logging Configuration. Using default configs')
logging.basicConfig(level=self.level)
else:
logging.basicConfig(level=self.level)
print('Failed to load configuration file. Using default configs')
def get_loger(self):
self.setup_logging()
loger = logging.getLogger(self.handler)
return loger
if __name__ == '__main__':
logger = MyLog('../config.yaml','tjk.log').get_loger()
logger.info("testssss")
| tangjikuo/pdfHandlerSite | commons/logs.py | logs.py | py | 1,948 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "commons.path.CONFIG_PATH",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "logging.INFO",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "commons.path.LOG_PATH",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "os.get... |
70472477627 | import numpy as np
import matplotlib.pyplot as plt
# data to plot
n_groups = 5
meso4 = (0.5, 0.65, 0.84, 0.7,0.51)
capsule = (0.84, 0.89, 0.96, 0.95, 0.88)
xception = (0.93, 0.97, 0.98, 0.95, 0.88)
gan = (0.72, 0.73, 0.86, 0.86, 0.72)
spectrum = (0.81, 0.83, 0.98, 0.67, 0.57)
headpose = (0.64, 0.64, 0.64, 0.64, 0.62)
visual = (0.96, 0.96, 0.97, 0.84, 0.69)
# create plot
fig, ax = plt.subplots()
index = np.arange(n_groups)
bar_width = 0.35
opacity = 0.8
rects1 = plt.barh(index, meso4, bar_width,
alpha=opacity,
color='b',
label='meso4')
rects2 = plt.barh(index + bar_width, capsule, bar_width,
alpha=opacity,
color='g',
label='capsule')
rects3 = plt.barh(index + 2*bar_width, capsule, bar_width,
alpha=opacity,
color='r',
label='xception')
# plt.xlabel('Person')
# plt.ylabel('Scores')
plt.title('Scores by person')
plt.yticks(index + bar_width, ('meso4', 'capsule', 'xception', 'gan'))
plt.legend()
plt.tight_layout()
plt.show() | phuc180155/GraduationThesis | dfd_benchmark/plot_image/more_bar_chart.py | more_bar_chart.py | py | 981 | python | en | code | 2 | github-code | 6 | [
{
"api_name": "matplotlib.pyplot.subplots",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "numpy.arange",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "matplotlib.... |
1068948273 | import numpy as np
from matplotlib import pyplot as plt
from scipy.optimize import minimize
import gymnasium as gym
import imageio
from pendulum_model import PendulumModel
# Constants
g = 10.0 # gravitational acceleration
l = 1.0 # length of the pendulum
m = 1.0 # mass of the pendulum
dt = 0.05 # time step
n = 100 # number of time steps
x0 = np.zeros(3 * n)
class ScipySolver:
def __init__(self):
self.theta0 = np.pi
self.theta_dot0 = 0.0
@staticmethod
def objective(x):
theta = x[:n]
theta_dot = x[n:2 * n]
u = x[2 * n:]
cost = np.sum(theta ** 2) + 0.1 * np.sum(theta_dot ** 2) + 0.001 * np.sum(u ** 2)
return cost
def dynamics(self, x):
theta = x[:n]
theta_dot = x[n:2 * n]
u = x[2 * n:]
constraints = []
constraints.append(theta[0] - self.theta0)
constraints.append(theta_dot[0] - self.theta_dot0)
for t in range(n - 1):
constraints.append(theta_dot[t + 1] - (
theta_dot[t] + (3 * g * dt) / (2 * l) * np.sin(theta[t]) + (3 * dt) / (m * l ** 2) * u[t]))
constraints.append(theta[t + 1] - (theta[t] + theta_dot[t+1] * dt))
return np.array(constraints)
@staticmethod
def plot_results(theta, theta_dot, u):
time = np.linspace(0, dt * n, n)
plt.figure(figsize=(10, 8))
# Plot theta
plt.subplot(3, 1, 1)
plt.plot(time, theta)
plt.ylabel('Theta (rad)')
plt.title('Optimal Control Results')
# Plot theta_dot
plt.subplot(3, 1, 2)
plt.plot(time, theta_dot)
plt.ylabel('Theta_dot (rad/s)')
# Plot u
plt.subplot(3, 1, 3)
plt.plot(time, u)
plt.ylabel('Control Input (u)')
plt.xlabel('Time (s)')
plt.tight_layout()
plt.show()
def solve(self):
env = make_env("Pendulum-v1")
observation, info = env.reset(seed=4)
model = PendulumModel()
model.reset(observation)
model_log = []
self.theta0 = model.state[0]
self.theta_dot0 = model.state[1]
x0[:n] = np.linspace(self.theta0, 0, n)
# x0[:n] = self.theta0
x0[n:2 * n] = np.linspace(self.theta_dot0, 0, n)
# x0[n:2] = self.theta_dot0
for i in range(100):
model_log.append(model.state)
action = np.array([0.0])
model.step(action)
model_log.append(action)
model_log = np.hstack(model_log)
# Initial guess
# Bounds
theta_dot_bounds = (-8, 8)
u_bounds = (-2, 2)
bounds = [(None, None)] * n + [theta_dot_bounds] * n + [u_bounds] * n
# Constraints
constraints = {'type': 'eq', 'fun': self.dynamics}
# Optimize
result = minimize(self.objective, x0, method='trust-constr', bounds=bounds, constraints=constraints,
options={'gtol': 1e-5})
print(result)
if result.success:
theta_opt = result.x[:n]
theta_dot_opt = result.x[n:2 * n]
u_opt = result.x[2 * n:]
print(theta_opt)
print(theta_dot_opt)
print(u_opt)
else:
print("Optimization failed.")
theta_opt = result.x[:n]
theta_dot_opt = result.x[n:2 * n]
u_opt = result.x[2 * n:]
print(theta_opt)
print(theta_dot_opt)
print(u_opt)
frames = []
for i in range(100):
observation, reward, terminated, truncated, info = env.step(u_opt[i].reshape(1, ))
print(observation, reward, u_opt[i])
frame = env.render()
frames.append(frame)
imageio.mimsave('pendulum_run.gif', frames, duration=1.0 / 20)
self.plot_results(theta_opt, theta_dot_opt, u_opt)
def make_env(name):
gym_env = gym.make(name, render_mode='rgb_array')
return gym_env
if __name__ == "__main__":
scipy_solve = ScipySolver()
scipy_solve.solve()
| CarlDegio/SQP_Pendulum | scipy_trust.py | scipy_trust.py | py | 4,069 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "numpy.zeros",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "numpy.pi",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "numpy.sum",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "numpy.sin",
"line_number": 43... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.