seq_id string | text string | repo_name string | sub_path string | file_name string | file_ext string | file_size_in_byte int64 | program_lang string | lang string | doc_type string | stars int64 | dataset string | pt string | api list |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
26238944709 | # coding=utf-8
from __future__ import unicode_literals, absolute_import, print_function, division
import errno
import json
import os.path
import sys
from sopel.tools import Identifier
from sqlalchemy import create_engine, Column, ForeignKey, Integer, String
from sqlalchemy.engine.url import URL
from sqlalchemy.exc import OperationalError, SQLAlchemyError
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import scoped_session, sessionmaker
if sys.version_info.major >= 3:
unicode = str
basestring = str
def _deserialize(value):
if value is None:
return None
# sqlite likes to return ints for strings that look like ints, even though
# the column type is string. That's how you do dynamic typing wrong.
value = unicode(value)
# Just in case someone's mucking with the DB in a way we can't account for,
# ignore json parsing errors
try:
value = json.loads(value)
except ValueError:
pass
return value
BASE = declarative_base()
MYSQL_TABLE_ARGS = {'mysql_engine': 'InnoDB',
'mysql_charset': 'utf8mb4',
'mysql_collate': 'utf8mb4_unicode_ci'}
class NickIDs(BASE):
"""
NickIDs SQLAlchemy Class
"""
__tablename__ = 'nick_ids'
nick_id = Column(Integer, primary_key=True)
class Nicknames(BASE):
"""
Nicknames SQLAlchemy Class
"""
__tablename__ = 'nicknames'
__table_args__ = MYSQL_TABLE_ARGS
nick_id = Column(Integer, ForeignKey('nick_ids.nick_id'), primary_key=True)
slug = Column(String(255), primary_key=True)
canonical = Column(String(255))
class NickValues(BASE):
"""
NickValues SQLAlchemy Class
"""
__tablename__ = 'nick_values'
__table_args__ = MYSQL_TABLE_ARGS
nick_id = Column(Integer, ForeignKey('nick_ids.nick_id'), primary_key=True)
key = Column(String(255), primary_key=True)
value = Column(String(255))
class ChannelValues(BASE):
"""
ChannelValues SQLAlchemy Class
"""
__tablename__ = 'channel_values'
__table_args__ = MYSQL_TABLE_ARGS
channel = Column(String(255), primary_key=True)
key = Column(String(255), primary_key=True)
value = Column(String(255))
class PluginValues(BASE):
"""
PluginValues SQLAlchemy Class
"""
__tablename__ = 'plugin_values'
__table_args__ = MYSQL_TABLE_ARGS
plugin = Column(String(255), primary_key=True)
key = Column(String(255), primary_key=True)
value = Column(String(255))
class SopelDB(object):
"""*Availability: 5.0+*
This defines an interface for basic, common operations on a sqlite
database. It simplifies those common operations, and allows direct access
to the database, wherever the user has configured it to be.
When configured with a relative filename, it is assumed to be in the directory
set (or defaulted to) in the core setting ``homedir``.
"""
def __init__(self, config):
# MySQL - mysql://username:password@localhost/db
# SQLite - sqlite:////home/sopel/.sopel/default.db
db_type = config.core.db_type
# Handle SQLite explicitly as a default
if db_type == 'sqlite':
path = config.core.db_filename
if path is None:
path = os.path.join(config.core.homedir, config.basename + '.db')
path = os.path.expanduser(path)
if not os.path.isabs(path):
path = os.path.normpath(os.path.join(config.core.homedir, path))
if not os.path.isdir(os.path.dirname(path)):
raise OSError(
errno.ENOENT,
'Cannot create database file. '
'No such directory: "{}". Check that configuration setting '
'core.db_filename is valid'.format(os.path.dirname(path)),
path
)
self.filename = path
self.url = 'sqlite:///%s' % path
# Otherwise, handle all other database engines
else:
query = {}
if db_type == 'mysql':
drivername = config.core.db_driver or 'mysql'
query = {'charset': 'utf8mb4'}
elif db_type == 'postgres':
drivername = config.core.db_driver or 'postgresql'
elif db_type == 'oracle':
drivername = config.core.db_driver or 'oracle'
elif db_type == 'mssql':
drivername = config.core.db_driver or 'mssql+pymssql'
elif db_type == 'firebird':
drivername = config.core.db_driver or 'firebird+fdb'
elif db_type == 'sybase':
drivername = config.core.db_driver or 'sybase+pysybase'
else:
raise Exception('Unknown db_type')
db_user = config.core.db_user
db_pass = config.core.db_pass
db_host = config.core.db_host
db_port = config.core.db_port # Optional
db_name = config.core.db_name # Optional, depending on DB
# Ensure we have all our variables defined
if db_user is None or db_pass is None or db_host is None:
raise Exception('Please make sure the following core '
'configuration values are defined: '
'db_user, db_pass, db_host')
self.url = URL(drivername=drivername, username=db_user,
password=db_pass, host=db_host, port=db_port,
database=db_name, query=query)
self.engine = create_engine(self.url)
# Catch any errors connecting to database
try:
self.engine.connect()
except OperationalError:
print("OperationalError: Unable to connect to database.")
raise
# Create our tables
BASE.metadata.create_all(self.engine)
self.ssession = scoped_session(sessionmaker(bind=self.engine))
def connect(self):
"""Return a raw database connection object."""
return self.engine.connect()
def execute(self, *args, **kwargs):
"""Execute an arbitrary SQL query against the database.
Returns a cursor object, on which things like `.fetchall()` can be
called per PEP 249."""
with self.connect() as conn:
return conn.execute(*args, **kwargs)
def get_uri(self):
"""Returns a URL for the database, usable to connect with SQLAlchemy."""
return 'sqlite:///{}'.format(self.filename)
# NICK FUNCTIONS
def get_nick_id(self, nick, create=True):
"""Return the internal identifier for a given nick.
This identifier is unique to a user, and shared across all of that
user's aliases. If create is True, a new ID will be created if one does
not already exist"""
session = self.ssession()
slug = nick.lower()
try:
nickname = session.query(Nicknames) \
.filter(Nicknames.slug == slug) \
.one_or_none()
if nickname is None:
if not create:
raise ValueError('No ID exists for the given nick')
# Generate a new ID
nick_id = NickIDs()
session.add(nick_id)
session.commit()
# Create a new Nickname
nickname = Nicknames(nick_id=nick_id.nick_id, slug=slug, canonical=nick)
session.add(nickname)
session.commit()
return nickname.nick_id
except SQLAlchemyError:
session.rollback()
raise
finally:
session.close()
def alias_nick(self, nick, alias):
"""Create an alias for a nick.
Raises ValueError if the alias already exists. If nick does not already
exist, it will be added along with the alias."""
nick = Identifier(nick)
alias = Identifier(alias)
nick_id = self.get_nick_id(nick)
session = self.ssession()
try:
result = session.query(Nicknames) \
.filter(Nicknames.slug == alias.lower()) \
.filter(Nicknames.canonical == alias) \
.one_or_none()
if result:
raise ValueError('Given alias is the only entry in its group.')
nickname = Nicknames(nick_id=nick_id, slug=alias.lower(), canonical=alias)
session.add(nickname)
session.commit()
except SQLAlchemyError:
session.rollback()
raise
finally:
session.close()
def set_nick_value(self, nick, key, value):
"""Sets the value for a given key to be associated with the nick."""
nick = Identifier(nick)
value = json.dumps(value, ensure_ascii=False)
nick_id = self.get_nick_id(nick)
session = self.ssession()
try:
result = session.query(NickValues) \
.filter(NickValues.nick_id == nick_id) \
.filter(NickValues.key == key) \
.one_or_none()
# NickValue exists, update
if result:
result.value = value
session.commit()
# DNE - Insert
else:
new_nickvalue = NickValues(nick_id=nick_id, key=key, value=value)
session.add(new_nickvalue)
session.commit()
except SQLAlchemyError:
session.rollback()
raise
finally:
session.close()
def delete_nick_value(self, nick, key):
"""Deletes the value for a given key associated with a nick."""
nick = Identifier(nick)
nick_id = self.get_nick_id(nick)
session = self.ssession()
try:
result = session.query(NickValues) \
.filter(NickValues.nick_id == nick_id) \
.filter(NickValues.key == key) \
.one_or_none()
# NickValue exists, delete
if result:
session.delete(result)
session.commit()
except SQLAlchemyError:
session.rollback()
raise
finally:
session.close()
def get_nick_value(self, nick, key):
"""Retrieves the value for a given key associated with a nick."""
nick = Identifier(nick)
session = self.ssession()
try:
result = session.query(NickValues) \
.filter(Nicknames.nick_id == NickValues.nick_id) \
.filter(Nicknames.slug == nick.lower()) \
.filter(NickValues.key == key) \
.one_or_none()
if result is not None:
result = result.value
return _deserialize(result)
except SQLAlchemyError:
session.rollback()
raise
finally:
session.close()
def unalias_nick(self, alias):
"""Removes an alias.
Raises ValueError if there is not at least one other nick in the group.
To delete an entire group, use `delete_group`.
"""
alias = Identifier(alias)
nick_id = self.get_nick_id(alias, False)
session = self.ssession()
try:
count = session.query(Nicknames) \
.filter(Nicknames.nick_id == nick_id) \
.count()
if count <= 1:
raise ValueError('Given alias is the only entry in its group.')
session.query(Nicknames).filter(Nicknames.slug == alias.lower()).delete()
session.commit()
except SQLAlchemyError:
session.rollback()
raise
finally:
session.close()
def delete_nick_group(self, nick):
"""Removes a nickname, and all associated aliases and settings."""
nick = Identifier(nick)
nick_id = self.get_nick_id(nick, False)
session = self.ssession()
try:
session.query(Nicknames).filter(Nicknames.nick_id == nick_id).delete()
session.query(NickValues).filter(NickValues.nick_id == nick_id).delete()
session.commit()
except SQLAlchemyError:
session.rollback()
raise
finally:
session.close()
def merge_nick_groups(self, first_nick, second_nick):
"""Merges the nick groups for the specified nicks.
Takes two nicks, which may or may not be registered. Unregistered
nicks will be registered. Keys which are set for only one of the given
nicks will be preserved. Where multiple nicks have values for a given
key, the value set for the first nick will be used.
Note that merging of data only applies to the native key-value store.
If modules define their own tables which rely on the nick table, they
will need to have their merging done separately."""
first_id = self.get_nick_id(Identifier(first_nick))
second_id = self.get_nick_id(Identifier(second_nick))
session = self.ssession()
try:
# Get second_id's values
res = session.query(NickValues).filter(NickValues.nick_id == second_id).all()
# Update first_id with second_id values if first_id doesn't have that key
for row in res:
first_res = session.query(NickValues) \
.filter(NickValues.nick_id == first_id) \
.filter(NickValues.key == row.key) \
.one_or_none()
if not first_res:
self.set_nick_value(first_nick, row.key, _deserialize(row.value))
session.query(NickValues).filter(NickValues.nick_id == second_id).delete()
session.query(Nicknames) \
.filter(Nicknames.nick_id == second_id) \
.update({'nick_id': first_id})
session.commit()
except SQLAlchemyError:
session.rollback()
raise
finally:
session.close()
# CHANNEL FUNCTIONS
def set_channel_value(self, channel, key, value):
"""Sets the value for a given key to be associated with the channel."""
channel = Identifier(channel).lower()
value = json.dumps(value, ensure_ascii=False)
session = self.ssession()
try:
result = session.query(ChannelValues) \
.filter(ChannelValues.channel == channel)\
.filter(ChannelValues.key == key) \
.one_or_none()
# ChannelValue exists, update
if result:
result.value = value
session.commit()
# DNE - Insert
else:
new_channelvalue = ChannelValues(channel=channel, key=key, value=value)
session.add(new_channelvalue)
session.commit()
except SQLAlchemyError:
session.rollback()
raise
finally:
session.close()
def delete_channel_value(self, channel, key):
"""Deletes the value for a given key associated with a channel."""
channel = Identifier(channel).lower()
session = self.ssession()
try:
result = session.query(ChannelValues) \
.filter(ChannelValues.channel == channel)\
.filter(ChannelValues.key == key) \
.one_or_none()
# ChannelValue exists, delete
if result:
session.delete(result)
session.commit()
except SQLAlchemyError:
session.rollback()
raise
finally:
session.close()
def get_channel_value(self, channel, key):
"""Retrieves the value for a given key associated with a channel."""
channel = Identifier(channel).lower()
session = self.ssession()
try:
result = session.query(ChannelValues) \
.filter(ChannelValues.channel == channel)\
.filter(ChannelValues.key == key) \
.one_or_none()
if result is not None:
result = result.value
return _deserialize(result)
except SQLAlchemyError:
session.rollback()
raise
finally:
session.close()
# PLUGIN FUNCTIONS
def set_plugin_value(self, plugin, key, value):
"""Sets the value for a given key to be associated with a plugin."""
plugin = plugin.lower()
value = json.dumps(value, ensure_ascii=False)
session = self.ssession()
try:
result = session.query(PluginValues) \
.filter(PluginValues.plugin == plugin)\
.filter(PluginValues.key == key) \
.one_or_none()
# PluginValue exists, update
if result:
result.value = value
session.commit()
# DNE - Insert
else:
new_pluginvalue = PluginValues(plugin=plugin, key=key, value=value)
session.add(new_pluginvalue)
session.commit()
except SQLAlchemyError:
session.rollback()
raise
finally:
session.close()
def delete_plugin_value(self, plugin, key):
"""Deletes the value for a given key associated with a plugin."""
plugin = plugin.lower()
session = self.ssession()
try:
result = session.query(PluginValues) \
.filter(PluginValues.plugin == plugin)\
.filter(PluginValues.key == key) \
.one_or_none()
# PluginValue exists, update
if result:
session.delete(result)
session.commit()
except SQLAlchemyError:
session.rollback()
raise
finally:
session.close()
def get_plugin_value(self, plugin, key):
"""Retrieves the value for a given key associated with a plugin."""
plugin = plugin.lower()
session = self.ssession()
try:
result = session.query(PluginValues) \
.filter(PluginValues.plugin == plugin)\
.filter(PluginValues.key == key) \
.one_or_none()
if result is not None:
result = result.value
return _deserialize(result)
except SQLAlchemyError:
session.rollback()
raise
finally:
session.close()
# NICK AND CHANNEL FUNCTIONS
def get_nick_or_channel_value(self, name, key):
"""Gets the value `key` associated to the nick or channel `name`."""
name = Identifier(name)
if name.is_nick():
return self.get_nick_value(name, key)
else:
return self.get_channel_value(name, key)
def get_preferred_value(self, names, key):
"""Gets the value for the first name which has it set.
`names` is a list of channel and/or user names. Returns None if none of
the names have the key set."""
for name in names:
value = self.get_nick_or_channel_value(name, key)
if value is not None:
return value
| examknow/Exambot-Source | sopel/db.py | db.py | py | 19,385 | python | en | code | 2 | github-code | 6 | [
{
"api_name": "sys.version_info",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "json.loads",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.ext.declarative.declarative_base",
"line_number": 37,
"usage_type": "call"
},
{
"api_... |
22916095420 | #Create a gspread class and extract the data from the sheets
#requires:
# 1. Google API credentials json_key file path
# 2. scope e.g. ['https://spreadsheets.google.com/feeds','https://www.googleapis.com/auth/drive']
# 3. gspread_url e.g. 'https://docs.google.com/spreadsheets/d/1itaohdPiAeniCXNlntNztZ_oRvjh0HsGuJXUJWET008/edit?usp=sharing'
import gspread
from oauth2client.service_account import ServiceAccountCredentials
import pandas as pd
class gspread_obj(object):
"""
Create a google spreadsheet instance to download sheet(s) and merge them
Requires spreadsheet url and Google API json key file
Examples:
>>>> gc = gspread_obj()
>>>> gc.login('home/user/google_api_key.json')
>>>> gc.get_sheets('https://docs.google.com/spreadsheets/d/1itaohdPiAeniCXNlntNztZ_oRvjh0HsGuJXUJWET008/edit?usp=sharing')
>>>> df = gc.merge_sheets()
"""
def __init__(self):
self.scope = ['https://spreadsheets.google.com/feeds','https://www.googleapis.com/auth/drive']
self.client = None # gspread.Client object
self.sheets = None
def login(self, credentials_google: str):
#set Google spreadsheet credentials
credentials = ServiceAccountCredentials.from_json_keyfile_name(credentials_google, self.scope)
self.client = gspread.authorize(credentials)
def get_sheets(self, gspread_url: str):
#Get Google sheet instance
wks = self.client.open_by_url(gspread_url)
self.sheets = wks.worksheets()
def merge_sheets(self):
if self.sheets is None:
print('No sheets are found!')
df = None
elif len(self.sheets)==1:
data = self.sheets[0].get_all_values()
header = data.pop(0)
df = pd.DataFrame(data, columns=header)
elif len(self.sheets)>1:
#read all the sheets
df_list = []
for s in self.sheets:
data = s.get_all_values()
header = data.pop(0)
df = pd.DataFrame(data, columns=header)
df_list.append(df)
df = pd.concat(df_list, axis=0, join='outer', sort=False)
else:
print("self.sheets must be a list of sheet(s)!")
df = None
if df is not None:
print("Columns: ", df.columns)
print("{} Rows x {} Columns".format(df.shape[0],df.shape[1]))
return df
| yenlow/utils | apis/google.py | google.py | py | 2,442 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "oauth2client.service_account.ServiceAccountCredentials.from_json_keyfile_name",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "oauth2client.service_account.ServiceAccountCredentials",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "gspread.auth... |
35970918283 | from __future__ import annotations
__all__: list[str] = []
import argparse
import subprocess
import sys
import cmn
class _LintReturnCodes(cmn.ReturnCodes):
"""Return codes that can be received from pylint."""
SUCCESS = 0
# Error code 1 means a fatal error was hit
ERROR = 2
WARNING = 4
ERROR_WARNING = 6
REFACTOR = 8
ERROR_REFACTOR = 10
WARNING_REFACTOR = 12
ERROR_WARNING_REFACTOR = 14
CONVENTION = 16
ERROR_CONVENTION = 18
WARNING_CONVENTION = 20
ERROR_WARNING_CONVENTION = 22
REFACTOR_CONVENTION = 24
ERROR_REFACTOR_CONVENTION = 26
WARNING_REFACTOR_CONVENTION = 28
ERROR_WARNING_REFACTOR_CONVENTION = 30
USAGE_ERROR = 32
COMMAND_NOT_FOUND = 200
def _run_lint(args: argparse.Namespace) -> int:
"""Runs pylint on python files in workspace.
:param args: namespace object with args to run lint with.
:return: return code from CLI.
"""
rc = _LintReturnCodes.SUCCESS
include_files = cmn.get_python_files(args.untracked_files)
cmd = [cmn.which_python(), "-m", "pylint"] + list(include_files)
try:
subprocess.run(cmd, check=True)
except FileNotFoundError as exc:
if exc.errno is cmn.WinErrorCodes.FILE_NOT_FOUND.value:
cmn.handle_missing_package_error(exc.filename)
rc = _LintReturnCodes.COMMAND_NOT_FOUND
else:
raise
except subprocess.CalledProcessError as exc:
cmn.handle_cli_error(_LintReturnCodes, exc.returncode, exc.cmd, exc)
rc = _LintReturnCodes.USAGE_ERROR
return rc
def main() -> None:
"""Main function for pylint CLI. Parses and handles CLI input."""
parser = argparse.ArgumentParser(description="Run pylint on given files.")
parser.add_argument(
"-u",
"--untracked-files",
action="store_true",
default=False,
help="run on files untracked by git",
)
args = parser.parse_args()
rc = _run_lint(args)
sys.exit(rc)
if __name__ == "__main__":
main()
| kiransingh99/gurbani_analysis | tools/lint.py | lint.py | py | 2,043 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "cmn.ReturnCodes",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "argparse.Namespace",
"line_number": 37,
"usage_type": "attribute"
},
{
"api_name": "cmn.get_python_files",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "cmn... |
7640577991 | test = 2+3 # 答案存在指定test物件
test # 最後一行打指定物件名稱
import random
x=[random.randint(0,100) for i in range(0,12)]
x
x0_str=str(x[0])
x0_str
x_str=[str(x[i]) for i in range(0,len(x))]
x_str
x6_logi=x[6]<50
x6_logi
x_logi=[x[i]<50 for i in range(0,len(x))]
x_logi
num_false=x_logi.count(False)
num_false
import pandas as pd
df_business=pd.read_csv("http://data.gcis.nat.gov.tw/od/file?oid=340B4FDD-880E-4287-9289-F32782F792B8")
dict_business=df_business.to_dict()
address=list(dict_business['公司所在地'].values())
num_taoyuan=["桃園市" in address[i] for i in range(0,len(address))].count(True)
num_taoyuan
capital=list(dict_business['資本額'].values())
logi_largeCapital=[capital[i]>500000 for i in range(0,len(capital))]
num_largeCapital=logi_largeCapital.count(True)
num_largeCapital
import requests
response=requests.get("https://cloud.culture.tw/frontsite/trans/SearchShowAction.do?method=doFindTypeJ&category=3")
danceInfo=response.json()
numDance=len(danceInfo)
numDance
title1=danceInfo[0]['title']
title1
local1=danceInfo[0]['showInfo'][0]['location']
local1
time1=danceInfo[0]['showInfo'][0]['time']
time1
## 解答一: 當showInfo不唯一但只考慮每個showInfo的第一個
danceInfoList=[{
'title': danceInfo[i]['title'],
'time': danceInfo[i]['showInfo'][0]['time'],
'location': danceInfo[i]['showInfo'][0]['location']
} for i in range(0,len(danceInfo))]
danceInfoList
## 解答二:
danceInfoList2=list([])
for i in range(len(danceInfo)):
title_i=danceInfo[i]['title']
for j in range(len(danceInfo[i]['showInfo'])):
time_ij=danceInfo[i]['showInfo'][j]['time']
location_ij=danceInfo[i]['showInfo'][j]['location']
danceInfoList2.append({
'title': title_i,
'time': time_ij,
'location': location_ij
})
## 解答一: 當showInfo不唯一但只考慮每個showInfo的第一個
danceInfoStr=['【{title}】將於{time}在{location}演出'.format(
title=danceInfoList[i]['title'],
time=danceInfoList[i]['time'],
location=danceInfoList[i]['location']) for i in range(0,len(danceInfoList))]
danceInfoStr
## 解答二:
danceInfoStr2=['【{title}】將於{time}在{location}演出'.format(
title=danceInfoList2[i]['title'],
time=danceInfoList2[i]['time'],
location=danceInfoList2[i]['location']) for i in range(0,len(danceInfoList2))]
danceInfoStr2
| godgodgod11101/course_mathEcon_practice_1081 | hw1_ans.py | hw1_ans.py | py | 2,367 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "random.randint",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 27,
"usage_type": "call"
}
] |
43247812084 | import subprocess
import os
import shutil
import pytest
TEMP_DIRECTORY = os.path.join(os.path.dirname(__file__), '..', 'tmp')
TEMP_HEADER = os.path.join(TEMP_DIRECTORY, 'header.h')
TEMP_SOURCE = os.path.join(TEMP_DIRECTORY, 'source.c')
def set_up():
os.mkdir(TEMP_DIRECTORY)
def tear_down():
shutil.rmtree(TEMP_DIRECTORY)
@pytest.fixture(autouse=True)
def run_around_tests():
set_up()
yield
tear_down()
def read_file_content(filepath: str) -> str:
with open(filepath, 'r') as file:
return file.read()
def test_integration():
# given:
resource_dir = os.path.join(os.path.dirname(__file__), 'resource')
input_path = os.path.join(resource_dir, 'example_header.h')
expected_header = os.path.join(resource_dir, 'example_mock.h')
expected_source = os.path.join(resource_dir, 'example_mock.c')
# when:
subprocess.run([
'python', '-m', 'c_mock_generator.generate_mock',
'-i', input_path,
'-oh', TEMP_HEADER,
'-oc', TEMP_SOURCE], check=True)
# then:
assert os.path.isfile(TEMP_HEADER)
assert os.path.isfile(TEMP_SOURCE)
assert read_file_content(TEMP_HEADER) == read_file_content(expected_header)
assert read_file_content(TEMP_SOURCE) == read_file_content(expected_source)
| BjoernLange/C-Mock-Generator | tests/generate_mock_integration_test.py | generate_mock_integration_test.py | py | 1,290 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "os.path.join",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_numbe... |
28838106101 | import numpy as np
try:
from math import prod
except:
from functools import reduce
def prod(iterable):
return reduce(operator.mul, iterable, 1)
import zipfile
import pickle
import sys
import ast
import re
from fickling.pickle import Pickled
if sys.version_info >= (3, 9):
from ast import unparse
else:
from astunparse import unparse
NO_PICKLE_DEBUG = False
### Unpickling import:
def my_unpickle(fb0):
key_prelookup = {}
class HackTensor:
def __new__(cls, *args):
#print(args)
ident, storage_type, obj_key, location, obj_size = args[0][0:5]
assert ident == 'storage'
assert prod(args[2]) == obj_size
ret = np.zeros(args[2], dtype=storage_type)
if obj_key not in key_prelookup:
key_prelookup[obj_key] = []
key_prelookup[obj_key].append((storage_type, obj_size, ret, args[2], args[3]))
#print(f"File: {obj_key}, references: {len(key_prelookup[obj_key])}, size: {args[2]}, storage_type: {storage_type}")
return ret
class HackParameter:
def __new__(cls, *args):
#print(args)
pass
class Dummy:
pass
class MyPickle(pickle.Unpickler):
def find_class(self, module, name):
#print(module, name)
if name == 'FloatStorage':
return np.float32
if name == 'LongStorage':
return np.int64
if name == 'HalfStorage':
return np.float16
if module == "torch._utils":
if name == "_rebuild_tensor_v2":
return HackTensor
elif name == "_rebuild_parameter":
return HackParameter
else:
try:
return pickle.Unpickler.find_class(self, module, name)
except Exception:
return Dummy
def persistent_load(self, pid):
return pid
return MyPickle(fb0).load(), key_prelookup
def fake_torch_load_zipped(fb0, load_weights=True):
with zipfile.ZipFile(fb0, 'r') as myzip:
folder_name = [a for a in myzip.namelist() if a.endswith("/data.pkl")]
if len(folder_name)== 0:
raise ValueError("Looke like the checkpoints file is in the wrong format")
folder_name = folder_name[0].replace("/data.pkl" , "").replace("\\data.pkl" , "")
with myzip.open(folder_name+'/data.pkl') as myfile:
ret = my_unpickle(myfile)
if load_weights:
for k, v_arr in ret[1].items():
with myzip.open(folder_name + f'/data/{k}') as myfile:
#print(f"Eating data file {k} now")
file_data = myfile.read()
for v in v_arr:
if v[2].dtype == "object":
print(f"issue assigning object on {k}")
continue
#weight = np.frombuffer(file_data, v[2].dtype).reshape(v[3])
#np.copyto(v[2], weight)
np.copyto(v[2], np.frombuffer(file_data, v[2].dtype).reshape(v[3]))
return ret[0]
### No-unpickling import:
def extract_weights_from_checkpoint(fb0):
torch_weights = {}
torch_weights['state_dict'] = {}
with zipfile.ZipFile(fb0, 'r') as myzip:
folder_name = [a for a in myzip.namelist() if a.endswith("/data.pkl")]
if len(folder_name)== 0:
raise ValueError("Looks like the checkpoints file is in the wrong format")
folder_name = folder_name[0].replace("/data.pkl" , "").replace("\\data.pkl" , "")
with myzip.open(folder_name+'/data.pkl') as myfile:
load_instructions = examine_pickle(myfile)
for sd_key,load_instruction in load_instructions.items():
with myzip.open(folder_name + f'/data/{load_instruction.obj_key}') as myfile:
if (load_instruction.load_from_file_buffer(myfile)):
torch_weights['state_dict'][sd_key] = load_instruction.get_data()
#if len(special_instructions) > 0:
# torch_weights['state_dict']['_metadata'] = {}
# for sd_key,special in special_instructions.items():
# torch_weights['state_dict']['_metadata'][sd_key] = special
return torch_weights
def examine_pickle(fb0, return_special=False):
## return_special:
## A rabbit hole I chased trying to debug a model that wouldn't import that had 1300 metadata statements
## If for some reason it's needed in the future turn it on. It is passed into the class AssignInstructions and
## if turned on collect_special will be True
##
## If, by 2023, this hasn't been required, I would strip it out.
#turn the pickle file into text we can parse
decompiled = unparse(Pickled.load(fb0).ast).splitlines()
## Parsing the decompiled pickle:
## LINES WE CARE ABOUT:
## 1: this defines a data file and what kind of data is in it
## _var1 = _rebuild_tensor_v2(UNPICKLER.persistent_load(('storage', HalfStorage, '0', 'cpu', 11520)), 0, (320, 4, 3, 3), (36, 9, 3, 1), False, _var0)
##
## 2: this massive line assigns the previous data to dictionary entries
## _var2262 = {'model.diffusion_model.input_blocks.0.0.weight': _var1, [..... continue for ever]}
##
## 3: this massive line also assigns values to keys, but does so differently
## _var2262.update({ 'cond_stage_model.transformer.text_model.encoder.layers.3.layer_norm2.bias': _var2001, [ .... and on and on ]})
##
## 4: in some pruned models, the last line is instead a combination of 2/3 into the final variable:
## result = {'model.diffusion_model.input_blocks.0.0.weight': _var1, 'model.diffusion_model.input_blocks.0.0.bias': _var3, }
##
## that's it
# make some REs to match the above.
re_rebuild = re.compile('^_var\d+ = _rebuild_tensor_v2\(UNPICKLER\.persistent_load\(\(.*\)$')
re_assign = re.compile('^_var\d+ = \{.*\}$')
re_update = re.compile('^_var\d+\.update\(\{.*\}\)$')
re_ordered_dict = re.compile('^_var\d+ = OrderedDict\(\)$')
re_result = re.compile('^result = \{.*\}$')
load_instructions = {}
assign_instructions = AssignInstructions()
for line in decompiled:
## see if line matches patterns of lines we care about:
line = line.strip()
if re_rebuild.match(line):
variable_name, load_instruction = line.split(' = ', 1)
load_instructions[variable_name] = LoadInstruction(line, variable_name)
elif re_assign.match(line) or re_result.match(line):
assign_instructions.parse_assign_line(line)
elif re_update.match(line):
assign_instructions.parse_update_line(line)
elif re_ordered_dict.match(line):
#do nothing
continue
elif NO_PICKLE_DEBUG:
print(f'unmatched line: {line}')
if NO_PICKLE_DEBUG:
print(f"Found {len(load_instructions)} load instructions")
assign_instructions.integrate(load_instructions)
if return_special:
return assign_instructions.integrated_instructions, assign_instructions.special_instructions
return assign_instructions.integrated_instructions
class AssignInstructions:
def __init__(self, collect_special=False):
self.instructions = {}
self.special_instructions = {}
self.integrated_instructions = {}
self.collect_special = collect_special;
def parse_result_line(self, line):
garbage, huge_mess = line.split(' = {', 1)
assignments = huge_mess.split(', ')
del huge_mess
assignments[-1] = assignments[-1].strip('}')
#compile RE here to avoid doing it every loop iteration:
re_var = re.compile('^_var\d+$')
assignment_count = 0
for a in assignments:
if self._add_assignment(a, re_var):
assignment_count = assignment_count + 1
if NO_PICKLE_DEBUG:
print(f"Added/merged {assignment_count} assignments. Total of {len(self.instructions)} assignment instructions")
def parse_assign_line(self, line):
# input looks like this:
# _var2262 = {'model.diffusion_model.input_blocks.0.0.weight': _var1, 'model.diffusion_model.input_blocks.0.0.bias': _var3,\
# ...\
# 'cond_stage_model.transformer.text_model.encoder.layers.3.layer_norm2.weight': _var1999}
# input looks like the above, but with 'result' in place of _var2262:
# result = {'model.diffusion_model.input_blocks.0.0.weight': _var1, ... }
#
# or also look like:
# result = {'state_dict': _var2314}
# ... which will be ignored later
garbage, huge_mess = line.split(' = {', 1)
assignments = huge_mess.split(', ')
del huge_mess
assignments[-1] = assignments[-1].strip('}')
#compile RE here to avoid doing it every loop iteration:
re_var = re.compile('^_var\d+$')
assignment_count = 0
for a in assignments:
if self._add_assignment(a, re_var):
assignment_count = assignment_count + 1
if NO_PICKLE_DEBUG:
print(f"Added/merged {assignment_count} assignments. Total of {len(self.instructions)} assignment instructions")
def _add_assignment(self, assignment, re_var):
# assignment can look like this:
# 'cond_stage_model.transformer.text_model.encoder.layers.3.self_attn.out_proj.weight': _var2009
# or assignment can look like this:
# 'embedding_manager.embedder.transformer.text_model.encoder.layers.6.mlp.fc1': {'version': 1}
sd_key, fickling_var = assignment.split(': ', 1)
sd_key = sd_key.strip("'")
if sd_key != 'state_dict' and re_var.match(fickling_var):
self.instructions[sd_key] = fickling_var
return True
elif self.collect_special:
# now convert the string "{'version': 1}" into a dictionary {'version': 1}
entries = fickling_var.split(',')
special_dict = {}
for e in entries:
e = e.strip("{}")
k, v = e.split(': ')
k = k.strip("'")
v = v.strip("'")
special_dict[k] = v
self.special_instructions[sd_key] = special_dict
return False
def integrate(self, load_instructions):
unfound_keys = {}
for sd_key, fickling_var in self.instructions.items():
if fickling_var in load_instructions:
self.integrated_instructions[sd_key] = load_instructions[fickling_var]
else:
if NO_PICKLE_DEBUG:
print(f"no load instruction found for {sd_key}")
if NO_PICKLE_DEBUG:
print(f"Have {len(self.integrated_instructions)} integrated load/assignment instructions")
def parse_update_line(self, line):
# input looks like:
# _var2262.update({'cond_stage_model.transformer.text_model.encoder.layers.3.layer_norm2.bias': _var2001,\
# 'cond_stage_model.transformer.text_model.encoder.layers.4.self_attn.k_proj.weight': _var2003,\
# ...\
#'cond_stage_model.transformer.text_model.final_layer_norm.bias': _var2261})
garbage, huge_mess = line.split('({', 1)
updates = huge_mess.split(', ')
del huge_mess
updates[-1] = updates[-1].strip('})')
re_var = re.compile('^_var\d+$')
update_count = 0
for u in updates:
if self._add_assignment(u, re_var):
update_count = update_count + 1
if NO_PICKLE_DEBUG:
print(f"Added/merged {update_count} updates. Total of {len(self.instructions)} assignment instructions")
class LoadInstruction:
def __init__(self, instruction_string, variable_name, extra_debugging = False):
self.ident = False
self.storage_type = False
self.obj_key = False
self.location = False #unused
self.obj_size = False
self.stride = False #unused
self.data = False
self.variable_name = variable_name
self.extra_debugging = extra_debugging
self.parse_instruction(instruction_string)
def parse_instruction(self, instruction_string):
## this function could probably be cleaned up/shortened.
## this is the API def for _rebuild_tensor_v2:
## _rebuild_tensor_v2(storage, storage_offset, size, stride, requires_grad, backward_hooks):
#
## sample instruction from decompiled pickle:
# _rebuild_tensor_v2(UNPICKLER.persistent_load(('storage', HalfStorage, '0', 'cpu', 11520)), 0, (320, 4, 3, 3), (36, 9, 3, 1), False, _var0)
#
# the following comments will show the output of each string manipulation as if it started with the above.
if self.extra_debugging:
print(f"input: '{instruction_string}'")
garbage, storage_etc = instruction_string.split('((', 1)
# storage_etc = 'storage', HalfStorage, '0', 'cpu', 11520)), 0, (320, 4, 3, 3), (36, 9, 3, 1), False, _var0)
if self.extra_debugging:
print("storage_etc, reference: ''storage', HalfStorage, '0', 'cpu', 11520)), 0, (320, 4, 3, 3), (36, 9, 3, 1), False, _var0)'")
print(f"storage_etc, actual: '{storage_etc}'\n")
storage, etc = storage_etc.split('))', 1)
# storage = 'storage', HalfStorage, '0', 'cpu', 11520
# etc = , 0, (320, 4, 3, 3), (36, 9, 3, 1), False, _var0)
if self.extra_debugging:
print("storage, reference: ''storage', HalfStorage, '0', 'cpu', 11520'")
print(f"storage, actual: '{storage}'\n")
print("etc, reference: ', 0, (320, 4, 3, 3), (36, 9, 3, 1), False, _var0)'")
print(f"etc, actual: '{etc}'\n")
## call below maps to: ('storage', HalfStorage, '0', 'cpu', 11520)
self.ident, self.storage_type, self.obj_key, self.location, self.obj_size = storage.split(', ', 4)
self.ident = self.ident.strip("'")
self.obj_key = self.obj_key.strip("'")
self.location = self.location.strip("'")
self.obj_size = int(self.obj_size)
self.storage_type = self._torch_to_numpy(self.storage_type)
if self.extra_debugging:
print(f"{self.ident}, {self.obj_key}, {self.location}, {self.obj_size}, {self.storage_type}")
assert (self.ident == 'storage')
garbage, etc = etc.split(', (', 1)
# etc = 320, 4, 3, 3), (36, 9, 3, 1), False, _var0)
if self.extra_debugging:
print("etc, reference: '320, 4, 3, 3), (36, 9, 3, 1), False, _var0)'")
print(f"etc, actual: '{etc}'\n")
size, stride, garbage = etc.split('), ', 2)
# size = 320, 4, 3, 3
# stride = (36, 9, 3, 1
stride = stride.strip('(,')
size = size.strip(',')
if (size == ''):
# rare case where there is an empty tuple. SDv1.4 has two of these.
self.size_tuple = ()
else:
self.size_tuple = tuple(map(int, size.split(', ')))
if (stride == ''):
self.stride = ()
else:
self.stride = tuple(map(int, stride.split(', ')))
if self.extra_debugging:
print(f"size: {self.size_tuple}, stride: {self.stride}")
prod_size = prod(self.size_tuple)
assert prod(self.size_tuple) == self.obj_size # does the size in the storage call match the size tuple
# zero out the data
self.data = np.zeros(self.size_tuple, dtype=self.storage_type)
@staticmethod
def _torch_to_numpy(storage_type):
if storage_type == 'FloatStorage':
return np.float32
if storage_type == 'HalfStorage':
return np.float16
if storage_type == 'LongStorage':
return np.int64
if storage_type == 'IntStorage':
return np.int32
raise Exception("Storage type not defined!")
def load_from_file_buffer(self, fb):
if self.data.dtype == "object":
print(f"issue assigning object on {self.obj_key}")
return False
else:
np.copyto(self.data, np.frombuffer(fb.read(), self.data.dtype).reshape(self.size_tuple))
return True
def get_data(self):
return self.data
| divamgupta/diffusionbee-stable-diffusion-ui | backends/model_converter/fake_torch.py | fake_torch.py | py | 15,028 | python | en | code | 11,138 | github-code | 6 | [
{
"api_name": "functools.reduce",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "sys.version_info",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "math.prod",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"li... |
71409709627 | ###############################
####### SETUP (OVERALL) #######
###############################
## Import statements
# Import statements
import os
from flask import Flask, render_template, session, redirect, url_for, flash, request
from flask_wtf import FlaskForm
from wtforms import StringField, SubmitField, RadioField, ValidationError # Note that you may need to import more here! Check out examples that do what you want to figure out what.
from wtforms.validators import Required, Length # Here, too
from flask_sqlalchemy import SQLAlchemy
import json
import requests
## App setup code
app = Flask(__name__)
app.debug = True
app.use_reloader = True
app.config['SECRET_KEY'] = 'pokemonpokemon'
## All app.config values
app.config["SQLALCHEMY_DATABASE_URI"] = "postgresql://localhost/Midterm-katmazan"
## Provided:
app.config['SQLALCHEMY_COMMIT_ON_TEARDOWN'] = True
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
## Statements for db setup (and manager setup if using Manager)
db = SQLAlchemy(app)
######################################
######## HELPER FXNS (If any) ########
######################################
##################
##### MODELS #####
##################
class Name(db.Model):
__tablename__ = "names"
id = db.Column(db.Integer,primary_key=True)
name = db.Column(db.String)
height_value = db.Column(db.Integer, db.ForeignKey('heights.id'))
weight_value = db.Column(db.Integer, db.ForeignKey('weights.id'))
def __repr__(self):
return ('{' + str(self.name) + '} | ID: {' + str(self.id) + '}')
class Height(db.Model):
__tablename__ = 'heights'
id = db.Column(db.Integer, primary_key=True)
poke_height = db.Column(db.Integer)
poke_name = db.Column(db.String)
names = db.relationship('Name',backref='Height')
class Weight(db.Model):
__tablename__ = 'weights'
id = db.Column(db.Integer,primary_key=True)
poke_name = db.Column(db.String)
poke_weight = db.Column(db.Integer)
names = db.relationship('Name',backref='Weight')
###################
###### FORMS ######
###################
class NameForm(FlaskForm):
name = StringField("Pokemon_name",validators=[Required()])
submit = SubmitField()
def validate_name(self, field):
if len(field.data) <= 1:
raise ValidationError('Pokemon does not exist')
class FavoriteForm(FlaskForm):
fav_name = StringField("Add one of your favorite Pokemon:")
nick_name = StringField("Give your favorite a nickname:")
submit = SubmitField()
def validate_nick_name(self,field):
if field.data[-1] != 'y':
raise ValidationError("Your nickname must end in y!")
class RankForm(FlaskForm):
name = StringField('Enter a Pokemon name:', validators = [Required()])
rate = RadioField('Rate this pokemon in terms of how powerful you think it is', choices = [('1', '1 (low)'), ('2', '2'), ('3', '3 (high)')])
submit = SubmitField('Submit')
#######################
###### VIEW FXNS ######
#######################
@app.errorhandler(404)
def page_not_found(e):
return render_template('404_error.html'), 404
@app.route('/', methods = ['GET', 'POST'])
def home():
form = NameForm() # User should be able to enter name after name and each one will be saved, even if it's a duplicate! Sends data with GET
if form.validate_on_submit():
poke_name = form.name.data
pokemon = Name.query.filter_by(name=poke_name).first()
##only adds pokemon if it is not in database
if not pokemon:
params = {}
params['name'] = str(poke_name)
print(params)
response = requests.get('http://pokeapi.co/api/v2/pokemon/' + params['name'] + '/')
##if response.status_code != '200':
##return("The data you entered was not available in the data, check spelling")
poke_height = int(json.loads(response.text)['height'])
new_height = Height(poke_height = poke_height, poke_name = poke_name)
db.session.add(new_height)
db.session.commit()
poke_weight = int(json.loads(response.text)['weight'])
new_weight = Weight(poke_weight = poke_weight, poke_name = poke_name)
db.session.add(new_weight)
db.session.commit()
print('hello')
newname = Name(name = poke_name, height_value = new_height.id, weight_value = new_weight.id)
db.session.add(newname)
db.session.commit()
return redirect(url_for('all_names'))
errors = [v for v in form.errors.values()]
if len(errors) > 0:
flash("!!!! ERRORS IN FORM SUBMISSION - " + str(errors))
return render_template('base.html',form=form)
@app.route('/names')
def all_names():
names = Name.query.all()
return render_template('name_example.html',names=names)
@app.route('/tallest')
def tallest_pokemon():
all_heights = Height.query.all()
tallest_pokemon = 0
for h in all_heights:
height = h.poke_height
if height > tallest_pokemon:
tallest_pokemon = height
tp = h
tallest = tp.poke_name
height = tp.poke_height
return render_template('tallest_pokemon.html', tallest = tallest, height = height, names = all_heights)
@app.route('/heaviest')
def heaviest_pokemon():
all_weights = Weight.query.all()
heaviest_pokemon = 0
for w in all_weights:
weight = w.poke_weight
if weight > heaviest_pokemon:
heaviest_pokemon = weight
hp = w
heaviest = hp.poke_name
weight = hp.poke_weight
return render_template('heaviest.html', heaviest = heaviest, weight = weight, names = all_weights)
@app.route('/favorite_pokemon')
def favorite_form():
form = FavoriteForm()
return render_template('favorite_form.html', form = form)
@app.route('/fav_answers',methods=["GET","POST"])
def show_favs():
form = FavoriteForm()
if request.args:
fav_name = form.fav_name.data
nickname = form.nick_name.data
return render_template('fav_results.html',fav_name=fav_name, nick_name=nickname)
flash(form.errors)
return redirect(url_for('favorite_form'))
## Code to run the application...
# Put the code to do so here!
# NOTE: Make sure you include the code you need to initialize the database structure when you run the application!
if __name__ == '__main__':
db.create_all() # Will create any defined models when you run the application
app.run(use_reloader=True,debug=True) # The usual
| katmazan/SI364midtermKatmazan | SI364midterm.py | SI364midterm.py | py | 6,662 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "flask.Flask",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "flask_sqlalchemy.SQLAlchemy",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "flask_wtf.FlaskForm",
"line_number": 67,
"usage_type": "name"
},
{
"api_name": "wtforms.S... |
36273427497 | from collections import namedtuple
import itertools
import torch
import numpy as np
from sklearn.feature_extraction.text import TfidfVectorizer
import torch.nn.functional as F
import data_utils
import train_utils
from models import BinaryClassifier, LSTM, CNN
import part2_train_utils
import helpers
##############################################################################
# Settings
##############################################################################
CUDA = False
##############################################################################
# Load the dataset
##############################################################################
Data = namedtuple("Data", "corpus train dev test embeddings word_to_index")
data_utils.download_ask_ubuntu_dataset()
EMBEDDINGS, WORD_TO_INDEX = data_utils.load_part2_embeddings()
ASK_UBUNTU_CORPUS = data_utils.load_corpus(WORD_TO_INDEX)
ASK_UBUNTU_TRAIN_DATA = data_utils.load_train_data()
ASK_UBUNTU_DEV_DATA, ASK_UBUNTU_TEST_DATA = data_utils.load_eval_data()
ASK_UBUNTU_DATA = Data(ASK_UBUNTU_CORPUS, ASK_UBUNTU_TRAIN_DATA,\
ASK_UBUNTU_DEV_DATA, ASK_UBUNTU_TEST_DATA,\
EMBEDDINGS, WORD_TO_INDEX)
data_utils.download_android_dataset()
ANDROID_CORPUS = data_utils.load_android_corpus(WORD_TO_INDEX)
ANDROID_DEV_DATA, ANDROID_TEST_DATA = data_utils.load_android_eval_data()
ANDROID_DATA = Data(ANDROID_CORPUS, None,\
ANDROID_DEV_DATA, ANDROID_TEST_DATA,\
EMBEDDINGS, WORD_TO_INDEX)
##############################################################################
# Train and evaluate a baseline TFIDF model
##############################################################################
TOKENIZED_ANDROID_CORPUS = data_utils.load_tokenized_android_corpus()
TOKENIZED_ANDROID_CORPUS = [
entry.title + entry.body for entry in TOKENIZED_ANDROID_CORPUS.values()
]
TFIDF_VECTORIZER = TfidfVectorizer()
TFIDF_VECTORS = TFIDF_VECTORIZER.fit_transform(TOKENIZED_ANDROID_CORPUS)
QUERY_TO_INDEX = dict(zip(ANDROID_DATA.corpus.keys(), range(len(ANDROID_DATA.corpus))))
AUC = helpers.evaluate_tfidf_auc(ANDROID_DATA.dev, TFIDF_VECTORS, QUERY_TO_INDEX)
print("AUC", AUC)
AUC = helpers.evaluate_tfidf_auc(ANDROID_DATA.test, TFIDF_VECTORS, QUERY_TO_INDEX)
print("AUC", AUC)
##############################################################################
# Train models by direct transfer and evaluate
##############################################################################
RESULTS = []
MARGINS = [0.2]
MAX_EPOCHS = 50
BATCH_SIZE = 32
FILTER_WIDTHS = [3]
POOL_METHOD = "average"
FEATURE_DIMS = [667]
DROPOUT_PS = [0.1]
NUM_HIDDEN_UNITS = [240]
LEARNING_RATES = [1E-3]
MODELS = []
LSTM_HYPERPARAMETERS = itertools.product(MARGINS, NUM_HIDDEN_UNITS, LEARNING_RATES)
for margin, num_hidden_units, learning_rate in LSTM_HYPERPARAMETERS:
model = LSTM(EMBEDDINGS, num_hidden_units, POOL_METHOD, CUDA)
criterion = helpers.MaxMarginLoss(margin)
parameters = filter(lambda p: p.requires_grad, model.parameters())
optimizer = torch.optim.Adam(parameters, lr=learning_rate)
model, mrr = train_utils.train_model(model, optimizer, criterion, ASK_UBUNTU_DATA, \
MAX_EPOCHS, BATCH_SIZE, CUDA, eval_data=ANDROID_DATA)
torch.save(model.state_dict(), "./lstm_" +
str(margin) + "_" +
str(num_hidden_units) + "_" +
str(learning_rate) + "_" +
"auc=" + str(mrr))
MODELS.append((mrr, margin, num_hidden_units, learning_rate))
##############################################################################
# Train models by adverserial domain adaptation and evaluate
##############################################################################
MAX_EPOCHS = 50
BATCH_SIZE = 32
MARGINS = [0.2]
FILTER_WIDTH = 2
POOL_METHOD = "average"
FEATURE_DIM = 240
DIS_NUM_HIDDEN_UNITS = [150, 200]
DIS_LEARNING_RATES = [-1E-3]
ENC_LEARNING_RATE = 1E-3
DIS_TRADE_OFF_RATES = [1E-7, 1E-8, 1E-9]
DIS_HYPERPARAMETERS = itertools.product(DIS_LEARNING_RATES, DIS_NUM_HIDDEN_UNITS, DIS_TRADE_OFF_RATES, MARGINS)
for dis_lr, dis_hidden_units, trade_off, margin in DIS_HYPERPARAMETERS:
enc_model = LSTM(EMBEDDINGS, FEATURE_DIM, POOL_METHOD, CUDA)
dis_model = BinaryClassifier(FEATURE_DIM, dis_hidden_units)
model, auc = part2_train_utils.train_model(
enc_model,
dis_model,
trade_off,
ASK_UBUNTU_DATA,
ANDROID_DATA,
MAX_EPOCHS,
BATCH_SIZE,
ENC_LEARNING_RATE,
dis_lr,
margin,
CUDA,
)
print("max auc", auc)
torch.save(model.state_dict(), "./lstm_" +\
str(margin) + "_" +\
str(dis_hidden_units) + "_" +\
str(trade_off) + "_" +\
"auc=" + str(auc))
| timt51/question_retrieval | part2.py | part2.py | py | 5,017 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "collections.namedtuple",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "data_utils.download_ask_ubuntu_dataset",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "data_utils.load_part2_embeddings",
"line_number": 26,
"usage_type": "call"
... |
3084393112 | import numpy as np
import pandas as pd
import math
import json
import matplotlib as mpl
import matplotlib.pyplot as plt
import seaborn as sns
import warnings
warnings.filterwarnings('ignore')
import optuna
def create_data(f1, f2, A1, A2, sigma=0.02):
outs = []
ts = 1000
theta1 = 1.4
theta2 = 1.0
for t in range(ts):
# if t == 500:
# theta1 = 1.4
# theta2 = -0.5
# elif t == 1500:
# theta1 = 0.7
# theta2 = 0.0
n_f1 = np.random.normal(0.0, 0.05)
n_f2 = np.random.normal(0.0, 0.05)
val = A1*math.sin(f1*t+theta1+n_f1) + A2*math.sin(f2*t+theta2+n_f2) + np.random.normal(0.0, sigma)
outs.append(val)
return np.array(outs)
def sigmoid(x):
return 1.0 / (1.0 + np.exp(-x))
def relu(x):
if x > 0:
return x
else:
return 0.
### EKF
def predict_phase(x_vec, P_mat, J_s=np.eye(2), dw=np.array([0.01, 0.1]), Q_t=np.ones((2,2))):
# J_s: Jacobian
x_hat = x_vec + dw
P_hat = np.matmul(np.matmul(J_s,P_mat),J_s.T) + Q_t
return x_hat, P_hat
def update_phase(obs, x_hat, P_hat, x_vec, P_mat, w_vec, R_t=np.eye(2)):
y_error = obs - (np.sin(x_hat[0])+0.3*np.sin(x_hat[1]))
w_err = np.array([np.tanh(y_error*w_vec[0]), np.tanh(y_error*w_vec[1]), np.tanh(y_error*w_vec[2]), np.tanh(y_error*w_vec[3])])
alpha = sigmoid(np.dot(w_err, w_vec[4:]))
J_o = np.array([np.cos(x_hat[0]), 0.3*np.cos(x_hat[1])]) # Jacobian
S_t = np.matmul(np.matmul(J_o, P_hat), J_o.T) + R_t
K_t = np.matmul(np.matmul(P_hat, J_o.T), np.linalg.inv(S_t)) # Kalman Gain
K_t = K_t*np.array([alpha, 1.-alpha])
new_x_vec = x_vec + K_t*y_error
new_P_mat = np.matmul((np.eye(2) - np.matmul(K_t, J_o)), P_hat)
return new_x_vec, new_P_mat, y_error, alpha, K_t
ys = create_data(f1=0.01, f2=0.1, A1=1.0, A2=0.3, sigma=0.05)
# w_dict = {'w1': 0.8654948627671226, 'w2': -1.7444762795695032, 'w3': -1.256158244213108, 'w4': 2.9877172040880846, 'w5': 0.7674940690302532, 'w6': -0.5751565428986629, 'w7': -2.1525316155059886, 'w8': -1.593668210140296}
w_dict = {'w1': 0.8868339845276003, 'w2': -2.4239527390853723, 'w3': 2.5663446991064536, 'w4': -1.835679959314501, 'w5': 2.668697875044799, 'w6': -0.578802425496894, 'w7': -2.3135794565999737, 'w8': -0.9460572459969298}
w1 = w_dict['w1']
w2 = w_dict['w2']
w3 = w_dict['w3']
w4 = w_dict['w4']
w5 = w_dict['w5']
w6 = w_dict['w6']
w7 = w_dict['w7']
w8 = w_dict['w8']
W_1 = np.array([w1, w2, w3, w4, w5, w6, w7, w8])
x_vec = np.array([0.0, 0.0])
P_mat = np.eye(2)
total_err = 0.0
alphas = []
y_errors = []
preds = []
k_gains = []
ttt = 1
for _y in ys[1:]:
x_hat, P_hat = predict_phase(x_vec, P_mat)
new_x_vec, new_P_mat, y_error, _alpha, K_t = update_phase(_y, x_hat, P_hat, x_vec, P_mat, W_1)
x_vec = new_x_vec
P_mat = new_P_mat
total_err = total_err + np.sqrt(y_error*y_error)
alphas.append(_alpha)
y_errors.append(np.abs(y_error))
preds.append(np.sin(x_vec[0])+0.3*np.sin(x_vec[1]))
k_gains.append(K_t.tolist())
# print(ttt, y_error, _alpha)
ttt = ttt + 1
total_err = total_err/float(len(ys[1:]))
with open("./data/json/test_ekf_no_alpha5.json", "w") as f:
out_dict = {
"k_gain": k_gains,
# "alphas": alphas,
"y_errors": y_errors,
"ys": ys[1:].tolist(),
"preds": preds
}
json.dump(out_dict, f)
# print(alphas)
# print(y_errors)
# print(ys[1:].tolist())
# print(preds) | ksk-S/DynamicChangeBlindness | workspace_models/mcmc_model/test_ekf.py | test_ekf.py | py | 3,468 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "warnings.filterwarnings",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "numpy.random.normal",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "numpy.r... |
29433457016 | #! /usr/bin/env python
#
# Implementation of elliptic curves, for cryptographic applications.
#
# This module doesn't provide any way to choose a random elliptic
# curve, nor to verify that an elliptic curve was chosen randomly,
# because one can simply use NIST's standard curves.
#
# Notes from X9.62-1998 (draft):
# Nomenclature:
# - Q is a public key.
# The "Elliptic Curve Domain Parameters" include:
# - q is the "field size", which in our case equals p.
# - p is a big prime.
# - G is a point of prime order (5.1.1.1).
# - n is the order of G (5.1.1.1).
# Public-key validation (5.2.2):
# - Verify that Q is not the point at infinity.
# - Verify that X_Q and Y_Q are in [0,p-1].
# - Verify that Q is on the curve.
# - Verify that nQ is the point at infinity.
# Signature generation (5.3):
# - Pick random k from [1,n-1].
# Signature checking (5.4.2):
# - Verify that r and s are in [1,n-1].
#
# Version of 2008.11.25.
#
# Revision history:
# 2005.12.31 - Initial version.
# 2008.11.25 - Change CurveFp.is_on to contains_point.
#
# Written in 2005 by Peter Pearson and placed in the public domain.
from __future__ import division
from .six import print_
from . import numbertheory
class CurveFp( object ):
"""Elliptic Curve over the field of integers modulo a prime."""
def __init__( self, p, a, b ):
"""The curve of points satisfying y^2 = x^3 + a*x + b (mod p)."""
self.__p = p
self.__a = a
self.__b = b
def p( self ):
return self.__p
def a( self ):
return self.__a
def b( self ):
return self.__b
def contains_point( self, x, y ):
"""Is the point (x,y) on this curve?"""
return ( y * y - ( x * x * x + self.__a * x + self.__b ) ) % self.__p == 0
class Point( object ):
"""A point on an elliptic curve. Altering x and y is forbidding,
but they can be read by the x() and y() methods."""
def __init__( self, curve, x, y, order = None ):
"""curve, x, y, order; order (optional) is the order of this point."""
self.__curve = curve
self.__x = x
self.__y = y
self.__order = order
# self.curve is allowed to be None only for INFINITY:
if self.__curve: assert self.__curve.contains_point( x, y )
if order: assert self * order == INFINITY
def __eq__( self, other ):
"""Return True if the points are identical, False otherwise."""
if self.__curve == other.__curve \
and self.__x == other.__x \
and self.__y == other.__y:
return True
else:
return False
def __add__( self, other ):
"""Add one point to another point."""
# X9.62 B.3:
if other == INFINITY: return self
if self == INFINITY: return other
assert self.__curve == other.__curve
if self.__x == other.__x:
if ( self.__y + other.__y ) % self.__curve.p() == 0:
return INFINITY
else:
return self.double()
p = self.__curve.p()
l = ( ( other.__y - self.__y ) * \
numbertheory.inverse_mod( other.__x - self.__x, p ) ) % p
x3 = ( l * l - self.__x - other.__x ) % p
y3 = ( l * ( self.__x - x3 ) - self.__y ) % p
return Point( self.__curve, x3, y3 )
def __mul__( self, other ):
"""Multiply a point by an integer."""
def leftmost_bit( x ):
assert x > 0
result = 1
while result <= x: result = 2 * result
return result // 2
e = other
if self.__order: e = e % self.__order
if e == 0: return INFINITY
if self == INFINITY: return INFINITY
assert e > 0
# From X9.62 D.3.2:
e3 = 3 * e
negative_self = Point( self.__curve, self.__x, -self.__y, self.__order )
i = leftmost_bit( e3 ) // 2
result = self
# print_("Multiplying %s by %d (e3 = %d):" % ( self, other, e3 ))
while i > 1:
result = result.double()
if ( e3 & i ) != 0 and ( e & i ) == 0: result = result + self
if ( e3 & i ) == 0 and ( e & i ) != 0: result = result + negative_self
# print_(". . . i = %d, result = %s" % ( i, result ))
i = i // 2
return result
def __rmul__( self, other ):
"""Multiply a point by an integer."""
return self * other
def __str__( self ):
if self == INFINITY: return "infinity"
return "(%d,%d)" % ( self.__x, self.__y )
def double( self ):
"""Return a new point that is twice the old."""
if self == INFINITY:
return INFINITY
# X9.62 B.3:
p = self.__curve.p()
a = self.__curve.a()
l = ( ( 3 * self.__x * self.__x + a ) * \
numbertheory.inverse_mod( 2 * self.__y, p ) ) % p
x3 = ( l * l - 2 * self.__x ) % p
y3 = ( l * ( self.__x - x3 ) - self.__y ) % p
return Point( self.__curve, x3, y3 )
def x( self ):
return self.__x
def y( self ):
return self.__y
def curve( self ):
return self.__curve
def order( self ):
return self.__order
# This one point is the Point At Infinity for all purposes:
INFINITY = Point( None, None, None )
def __main__():
class FailedTest(Exception): pass
def test_add( c, x1, y1, x2, y2, x3, y3 ):
"""We expect that on curve c, (x1,y1) + (x2, y2 ) = (x3, y3)."""
p1 = Point( c, x1, y1 )
p2 = Point( c, x2, y2 )
p3 = p1 + p2
print_("%s + %s = %s" % ( p1, p2, p3 ), end=' ')
if p3.x() != x3 or p3.y() != y3:
raise FailedTest("Failure: should give (%d,%d)." % ( x3, y3 ))
else:
print_(" Good.")
def test_double( c, x1, y1, x3, y3 ):
"""We expect that on curve c, 2*(x1,y1) = (x3, y3)."""
p1 = Point( c, x1, y1 )
p3 = p1.double()
print_("%s doubled = %s" % ( p1, p3 ), end=' ')
if p3.x() != x3 or p3.y() != y3:
raise FailedTest("Failure: should give (%d,%d)." % ( x3, y3 ))
else:
print_(" Good.")
def test_double_infinity( c ):
"""We expect that on curve c, 2*INFINITY = INFINITY."""
p1 = INFINITY
p3 = p1.double()
print_("%s doubled = %s" % ( p1, p3 ), end=' ')
if p3.x() != INFINITY.x() or p3.y() != INFINITY.y():
raise FailedTest("Failure: should give (%d,%d)." % ( INFINITY.x(), INFINITY.y() ))
else:
print_(" Good.")
def test_multiply( c, x1, y1, m, x3, y3 ):
"""We expect that on curve c, m*(x1,y1) = (x3,y3)."""
p1 = Point( c, x1, y1 )
p3 = p1 * m
print_("%s * %d = %s" % ( p1, m, p3 ), end=' ')
if p3.x() != x3 or p3.y() != y3:
raise FailedTest("Failure: should give (%d,%d)." % ( x3, y3 ))
else:
print_(" Good.")
# A few tests from X9.62 B.3:
c = CurveFp( 23, 1, 1 )
test_add( c, 3, 10, 9, 7, 17, 20 )
test_double( c, 3, 10, 7, 12 )
test_add( c, 3, 10, 3, 10, 7, 12 ) # (Should just invoke double.)
test_multiply( c, 3, 10, 2, 7, 12 )
test_double_infinity(c)
# From X9.62 I.1 (p. 96):
g = Point( c, 13, 7, 7 )
check = INFINITY
for i in range( 7 + 1 ):
p = ( i % 7 ) * g
print_("%s * %d = %s, expected %s . . ." % ( g, i, p, check ), end=' ')
if p == check:
print_(" Good.")
else:
raise FailedTest("Bad.")
check = check + g
# NIST Curve P-192:
p = 6277101735386680763835789423207666416083908700390324961279
r = 6277101735386680763835789423176059013767194773182842284081
#s = 0x3045ae6fc8422f64ed579528d38120eae12196d5L
c = 0x3099d2bbbfcb2538542dcd5fb078b6ef5f3d6fe2c745de65
b = 0x64210519e59c80e70fa7e9ab72243049feb8deecc146b9b1
Gx = 0x188da80eb03090f67cbf20eb43a18800f4ff0afd82ff1012
Gy = 0x07192b95ffc8da78631011ed6b24cdd573f977a11e794811
c192 = CurveFp( p, -3, b )
p192 = Point( c192, Gx, Gy, r )
# Checking against some sample computations presented
# in X9.62:
d = 651056770906015076056810763456358567190100156695615665659
Q = d * p192
if Q.x() != 0x62B12D60690CDCF330BABAB6E69763B471F994DD702D16A5:
raise FailedTest("p192 * d came out wrong.")
else:
print_("p192 * d came out right.")
k = 6140507067065001063065065565667405560006161556565665656654
R = k * p192
if R.x() != 0x885052380FF147B734C330C43D39B2C4A89F29B0F749FEAD \
or R.y() != 0x9CF9FA1CBEFEFB917747A3BB29C072B9289C2547884FD835:
raise FailedTest("k * p192 came out wrong.")
else:
print_("k * p192 came out right.")
u1 = 2563697409189434185194736134579731015366492496392189760599
u2 = 6266643813348617967186477710235785849136406323338782220568
temp = u1 * p192 + u2 * Q
if temp.x() != 0x885052380FF147B734C330C43D39B2C4A89F29B0F749FEAD \
or temp.y() != 0x9CF9FA1CBEFEFB917747A3BB29C072B9289C2547884FD835:
raise FailedTest("u1 * p192 + u2 * Q came out wrong.")
else:
print_("u1 * p192 + u2 * Q came out right.")
if __name__ == "__main__":
__main__()
| espressif/ESP8266_RTOS_SDK | components/esptool_py/esptool/ecdsa/ellipticcurve.py | ellipticcurve.py | py | 8,609 | python | en | code | 3,148 | github-code | 6 | [
{
"api_name": "six.print_",
"line_number": 192,
"usage_type": "call"
},
{
"api_name": "six.print_",
"line_number": 196,
"usage_type": "call"
},
{
"api_name": "six.print_",
"line_number": 202,
"usage_type": "call"
},
{
"api_name": "six.print_",
"line_number": 2... |
74750167547 | import torch
from transformers import T5ForConditionalGeneration, T5Tokenizer
import re
def title_generation(data):
print("[!] Server logs: Title generation has started")
text = data["content"]
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = T5ForConditionalGeneration.from_pretrained(
"Michau/t5-base-en-generate-headline"
)
tokenizer = T5Tokenizer.from_pretrained("Michau/t5-base-en-generate-headline")
model = model.to(device)
encoding = tokenizer.encode_plus(text, return_tensors="pt")
input_ids = encoding["input_ids"].to(device)
attention_masks = encoding["attention_mask"].to(device)
beam_outputs = model.generate(
input_ids=input_ids,
attention_mask=attention_masks,
max_length=64,
num_beams=3,
early_stopping=True,
)
result = tokenizer.decode(beam_outputs[0])
print("[!] Server logs: Title generation completed")
regex_pattern = r"(?<=<pad> )(.*)(?=</s>)"
result = re.search(regex_pattern, result).group(0)
data["title"] = result
return data
| SVijayB/Gist | scripts/title_generation.py | title_generation.py | py | 1,106 | python | en | code | 4 | github-code | 6 | [
{
"api_name": "torch.device",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "torch.cuda.is_available",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "transformers.T5ForC... |
40696675203 | import re
from typing import NamedTuple, Optional
from magma.magmad.check import subprocess_workflow
class LscpuCommandParams(NamedTuple):
pass
class LscpuCommandResult(NamedTuple):
error: Optional[str]
core_count: Optional[int]
threads_per_core: Optional[int]
architecture: Optional[str]
model_name: Optional[str]
def get_cpu_info() -> LscpuCommandResult:
"""
Execute lscpu command via subprocess. Blocks while waiting for output.
"""
return list(
subprocess_workflow.exec_and_parse_subprocesses(
[LscpuCommandParams()],
_get_lscpu_command_args_list,
parse_lscpu_output,
),
)[0]
def _get_lscpu_command_args_list(_):
return ['lscpu']
def parse_lscpu_output(stdout, stderr, _):
"""
Parse stdout output from a lscpu command.
"""
def _create_error_result(err_msg):
return LscpuCommandResult(
error=err_msg, core_count=None,
threads_per_core=None, architecture=None,
model_name=None,
)
if stderr:
return _create_error_result(stderr)
stdout_decoded = stdout.decode()
try:
cores_per_socket = int(
re.search(
r'Core\(s\) per socket:\s*(.*)\n',
str(stdout_decoded),
).group(1),
)
num_sockets = int(
re.search(
r'Socket\(s\):\s*(.*)\n',
str(stdout_decoded),
).group(1),
)
threads_per_core = int(
re.search(
r'Thread\(s\) per core:\s*(.*)\n',
str(stdout_decoded),
).group(1),
)
architecture = re.search(
r'Architecture:\s*(.*)\n',
str(stdout_decoded),
).group(1)
model_name = re.search(
r'Model name:\s*(.*)\n',
str(stdout_decoded),
).group(1)
return LscpuCommandResult(
error=None,
core_count=cores_per_socket * num_sockets,
threads_per_core=threads_per_core,
architecture=architecture,
model_name=model_name,
)
except (AttributeError, IndexError, ValueError) as e:
return _create_error_result(
'Parsing failed: %s\n%s' % (e, stdout_decoded),
)
| magma/magma | orc8r/gateway/python/magma/magmad/check/machine_check/cpu_info.py | cpu_info.py | py | 2,341 | python | en | code | 1,605 | github-code | 6 | [
{
"api_name": "typing.NamedTuple",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "typing.NamedTuple",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
... |
36703905624 | import soundfile as sf
import numpy as np
import time
import matplotlib.pyplot as plt
from parameterization import STFT, iSTFT, optimal_synth_window, first_larger_square
DEF_PARAMS = {
"win_len": 25,
"win_ovlap": 0.75,
"blocks": 800,
"max_h_type": "lin-lin",
"min_gain_dry": 0,
"bias": 1.01,
"alpha": 0.1,
"gamma": 0.7,
}
TITLES = ["aula1_12", "kitchen_12", "stairway1_1", "test"]
SAMPLES = ["sploty/aula1/aula1_12.wav", "sploty/kitchen/kitchen_12.wav", "sploty/stairway1/stairway1_1.wav", "deverb_test_samples/test_raw.wav"]
TEST_SCOPE = False
def get_max_h_matrix(type, freqs, blocks):
if type == "log-log":
return (np.logspace(np.ones(freqs), np.ones(freqs) * np.finfo(np.float32).eps, blocks) *
np.logspace(np.ones(blocks), np.ones(blocks) * np.finfo(np.float32).eps, freqs).T - 1) / 99
elif type == "log-lin":
return (np.logspace(np.ones(freqs), np.ones(freqs) * np.finfo(np.float32).eps, blocks) *
np.linspace(np.ones(blocks), np.zeros(blocks), freqs).T) / 9
elif type == "log-full":
return (np.logspace(np.ones(freqs), np.ones(freqs) * np.finfo(np.float32).eps, blocks) - 1) / 9
elif type == "lin-log":
return (np.logspace(np.ones(freqs), np.ones(freqs) * np.finfo(np.float32).eps, blocks) *
np.logspace(np.ones(blocks), np.ones(blocks) * np.finfo(np.float32).eps, freqs).T - 1) / 9
elif type == "lin-lin":
return np.linspace(np.ones(freqs), np.zeros(freqs), blocks) * \
np.linspace(np.ones(blocks), np.zeros(blocks), freqs).T
elif type == "lin-full":
return np.linspace(np.ones(freqs), np.zeros(freqs), blocks)
else:
return np.ones((freqs, blocks)).T
def reconstruct(stft, window, overlap):
frame_count, frequency_count = stft.shape
sym_stft = np.hstack((stft, np.flipud(np.conj(stft[:, 0:frequency_count - 2]))))
signal = np.real(iSTFT(sym_stft, window, overlap))
return signal
def read_impulse_response(path, target_fs, target_bins, win_len, win_ovlap):
h, h_fs = sf.read(path)
h /= np.max(np.abs(h))
nfft = int(target_bins * h_fs / target_fs)
win_len = int(win_len / 1000 * h_fs)
win_ovlap = int(win_len * win_ovlap)
window = np.hanning(win_len)
H = STFT(h, window, win_ovlap, nfft, power=True)
return H[:, 0:target_bins // 2 + 1], H.shape[0]
def printProgressBar (iteration, total, prefix='', suffix='', decimals=1, length=100, fill='█', printEnd="\r"):
"""
Call in a loop to create terminal progress bar
@params:
iteration - Required : current iteration (Int)
total - Required : total iterations (Int)
prefix - Optional : prefix string (Str)
suffix - Optional : suffix string (Str)
decimals - Optional : positive number of decimals in percent complete (Int)
length - Optional : character length of bar (Int)
fill - Optional : bar fill character (Str)
printEnd - Optional : end character (e.g. "\r", "\r\n") (Str)
"""
percent = ("{0:." + str(decimals) + "f}").format(100 * (iteration / float(total)))
filledLength = int(length * iteration // total)
bar = fill * filledLength + '-' * (length - filledLength)
print(f'\r{prefix} |{bar}| {percent}% {suffix}', end = printEnd)
# Print New Line on Complete
if iteration == total:
print()
def dereverberate(wave, fs, params=None, estimate_execution_time=True, show_progress_bar=True):
"""
Estimates the impulse response in a room the recording took place
:param wave: 1-D ndarray of wave samples
:param fs: int - sampling frequency
:param params: dict containing the algorithm parameters - keys:
:param estimate_execution_time: should we print estimated execution time for each next frame
:param show_progress_bar: should we print progress bar of estimation
:returns: (h_stft_pow) 2-D ndarray power STFT of h_rir,
(wave_dry) 1-D ndarray of the dry signal,
(wave_wet) 1-D ndarray of the wet signal
"""
# estimating execution time
loop_times = np.zeros(10)
# =================== Parameters ===================
if params is None:
params = DEF_PARAMS
# ==================== Windowing ===================
win_len_ms = params["win_len"]
win_ovlap_p = params["win_ovlap"]
# ================ Times to samples ================
win_len = int(win_len_ms / 1000 * fs)
win_ovlap = int(win_len * win_ovlap_p)
window = np.hanning(win_len)
# =================== Signal stft ==================
nfft = first_larger_square(win_len)
sig_stft = STFT(wave, window, win_ovlap, nfft)
sig_stft = sig_stft[:, 0:nfft // 2 + 1]
frame_count, frequency_count = sig_stft.shape
# ==================== Constants ===================
# length of the impulse response
blocks = params["blocks"]
# minimum gain of dry signal per frequency
min_gain_dry = params["min_gain_dry"]
# maximum impulse response estimate
# max_h, blocks = read_impulse_response("deverb_test_samples/stalbans_a_mono.wav", fs, nfft, win_len_ms, win_ovlap_p)
max_h = get_max_h_matrix('const', frequency_count, blocks)
# bias used to keep magnitudes from getting stuck on a wrong minimum
bias = params["bias"]
# alpha and gamma - smoothing factors for impulse response magnitude and gain
alpha = params["alpha"]
gamma = params["gamma"]
# ==================== Algorithm ===================
# dry_stft and wet_stft are the estimated dry and reverberant signals in frequency-time domain
dry_stft = np.zeros((frame_count, frequency_count), dtype=np.csingle)
wet_stft = np.zeros((frame_count, frequency_count), dtype=np.csingle)
# h_stft_pow is the estimated impulse response in frequency-time domain
h_stft_pow = max_h / 2
# matrices with the information of currently estimated raw and dry signal (power spectra)
raw_frames = np.ones((blocks, frequency_count))
dry_frames = np.zeros((blocks, frequency_count))
# c is a matrix to keep the raw estimated powers of the impulse response
c = np.zeros((blocks, frequency_count))
# gain_dry and gain_wet are the frequency gains of the dry and wet signals
gain_dry = np.ones(frequency_count)
gain_wet = np.zeros(frequency_count)
for i in range(frame_count):
if estimate_execution_time:
remaining = round(np.mean(loop_times) * (frame_count - i))
loop_times[1:] = loop_times[0:-1]
loop_times[0] = time.time()
print("Processing frame {} of {}, estimated time left: {} ms".format(i + 1, frame_count, remaining))
frame = sig_stft[i, :]
frame_power = np.power(np.abs(frame), 2)
# estimate signals based on i-th frame
for b in range(blocks):
estimate = frame_power / raw_frames[b, :]
np.place(estimate, estimate >= h_stft_pow[b, :], h_stft_pow[b, :] * bias + np.finfo(np.float32).eps)
np.fmin(estimate, max_h[b, :], out=c[b, :])
h_stft_pow[b, :] = alpha * h_stft_pow[b, :] + (1 - alpha) * c[b, :]
# calculating gains
new_gain_dry = 1 - np.sum(dry_frames * h_stft_pow, axis=0) / frame_power
np.place(new_gain_dry, new_gain_dry < min_gain_dry, min_gain_dry)
gain_dry = gamma * gain_dry + (1 - gamma) * new_gain_dry
new_gain_wet = 1 - gain_dry
gain_wet = gamma * gain_wet + (1 - gamma) * new_gain_wet
# calculatnig signals
dry_stft[i, :] = gain_dry * frame
wet_stft[i, :] = gain_wet * frame
# shifting previous frames
dry_frames[1:blocks, :] = dry_frames[0:blocks - 1, :]
dry_frames[0, :] = np.power(np.abs(dry_stft[i, :]), 2)
raw_frames[1:blocks, :] = raw_frames[0:blocks - 1, :]
raw_frames[0, :] = frame_power
if estimate_execution_time:
loop_times[0] = round(1000 * (time.time() - loop_times[0]))
if show_progress_bar:
printProgressBar(i, frame_count, prefix='Progress', suffix='Complete', length=30)
window = optimal_synth_window(window, win_ovlap)
if TEST_SCOPE:
t = (np.arange(frame_count) * (win_len_ms * (1 - win_ovlap_p))).astype(int)
f = np.linspace(0, fs / 2, frequency_count).astype(int)
txx, fxx = np.meshgrid(t, f)
fig, axes = plt.subplots(3, 1, figsize=(10, 10))
axes[0].pcolormesh(txx, fxx, np.log10(np.power(np.abs(sig_stft.T), 2)), cmap=plt.get_cmap('plasma'))
axes[0].set_title("Original signal")
axes[1].pcolormesh(txx, fxx, np.log10(np.power(np.abs(dry_stft.T), 2)), cmap=plt.get_cmap('plasma'))
axes[1].set_title("Dry signal")
axes[2].pcolormesh(txx, fxx, np.log10(np.power(np.abs(wet_stft.T), 2)), cmap=plt.get_cmap('plasma'))
axes[2].set_title("Reverberant signal")
fig.show()
wave_dry = reconstruct(dry_stft, window, win_ovlap)
wave_wet = reconstruct(wet_stft, window, win_ovlap)
return h_stft_pow, wave_dry, wave_wet
def test_deverb():
for i, item in enumerate(SAMPLES):
# i = 3
# item = SAMPLES[3]
print("Estimating " + item)
wave, fs = sf.read(item)
wave = wave / np.max(np.abs(wave))
H_rir, dry_wav, wet_wav = dereverberate(wave, fs, estimate_execution_time=False)
min_size = np.min([wave.size, dry_wav.size, wet_wav.size])
t = np.linspace(0, min_size / fs, min_size)
fig, axes = plt.subplots(3, 1, figsize=(10, 10))
fig.suptitle("estimated signals - {} reverb".format(TITLES[i]))
axes[0].plot(t, wave[0:min_size])
axes[0].set_title("original")
axes[1].plot(t, dry_wav[0:min_size])
axes[1].set_title("dry")
axes[2].plot(t, wet_wav[0:min_size])
axes[2].set_title("reverberant")
axes[2].set_xlabel(r"time $[s]$")
fig.tight_layout()
fig.show()
frames, freqs = H_rir.shape
hop = DEF_PARAMS["win_len"] * (1 - DEF_PARAMS["win_ovlap"]) / 1000
f = np.linspace(0, fs / 2000, freqs)
t = np.linspace(0, hop * frames, frames)
fxx, txx = np.meshgrid(f, t)
fig, ax = plt.subplots(figsize=(6, 5))
ax.pcolormesh(txx, fxx, np.log10(H_rir), cmap=plt.get_cmap('plasma'))
ax.set_title(r"estimated $H_{rir}$: " + TITLES[i])
ax.set_xlabel(r"time $[s]$")
ax.set_ylabel(r"frequency $[kHz]$")
fig.show()
with open("tmp/dry_{}.wav".format(TITLES[i]), "wb") as f:
sf.write(f, dry_wav, fs)
with open("tmp/wet_{}.wav".format(TITLES[i]), "wb") as f:
sf.write(f, wet_wav, fs)
if __name__ == "__main__":
TEST_SCOPE = True
test_deverb()
| Revzik/AGH-ZTPS_Acoustical-Environment-Classification | deverb.py | deverb.py | py | 10,820 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "numpy.logspace",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "numpy.ones",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "numpy.finfo",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "numpy.float32",
"line_numbe... |
74197689149 | import workAssyncFile
from sora.prediction import prediction
from sora.prediction.occmap import plot_occ_map as occmap
import json
import datetime
import restApi
import os
def __clearName(name):
name = "".join(x for x in name if x.isalnum() or x==' ' or x=='-' or x=='_')
name = name.replace(' ', '_')
return name
def processRequest(data, output):
outputFile = generateMap(data,output, False)
f = open (outputFile, "rb")
content = f.read()
return content
def processFile(input, output, fileName):
f = open (os.path.join(input,fileName), "r")
data = json.loads(f.read())
generateMap(data, output, True)
def generateMap(data, output, forced=False):
if 'body' in data:
return generateMapWithIternet(data, output, forced)
else:
return generateMapWithoutIternet(data, output, forced)
def generateMapWithIternet(data, output, forced=False):
body = data['body']
strDate = data['date']
strTime = data['time']
fileName = __clearName(body+" "+strDate.replace("-","")+" "+strTime.replace(":",""))
outputFile = os.path.join(output,fileName+".jpg")
if forced or not os.path.exists(outputFile):
v = (strDate+'-'+strTime).replace(":","-").split('-')
dtRef = datetime.datetime(int(v[0]), int(v[1]), int(v[2]), int(v[3]), int(v[4]), int(v[5]))
time0 = dtRef-datetime.timedelta(hours=4, minutes=0) #fuso 3
time1 = time0+datetime.timedelta(hours=2, minutes=0)
dtRef = dtRef - datetime.timedelta(hours=3, minutes=0)
pred = prediction(body=body, time_beg=time0, time_end=time1, step=10, divs=1, verbose=False)
for p in pred:
p.plot_occ_map(nameimg=fileName, path=output, fmt='jpg')
return outputFile
def generateMapWithoutIternet(data, output, forced=False):
name = data["name"]
radius = data["radius"]
coord = data["coord"]
time = data["time"]
ca = data["ca"]
pa = data["pa"]
vel = data["vel"]
dist = data["dist"]
mag = data["mag"]
longi = data["longi"]
v = time.split("T")
strDate = v[0]
if '.' in v[1]:
v[1] = v[1].split('.')[0]
strTime = v[1]
fileName = __clearName(name+" "+strDate.replace("-","")+" "+strTime.replace(":",""))
outputFile = os.path.join(output,fileName+".jpg")
if forced or not os.path.exists(outputFile):
occmap(name, radius, coord, time, ca, pa, vel, dist, mag=mag, longi=longi, dpi=50, nameimg=fileName, path=output, fmt='jpg')
return outputFile
if __name__ == '__main__':
waf = workAssyncFile.WorkAssyncFile(os.getenv('INPUT_PATH', '~/media/input'),os.getenv('OUTPUT_PATH', '~/media/output'))
waf.setProcessFile(processFile)
waf.start()
api = restApi.RespApi(port=os.getenv('PORT', 8000), cachePath=os.getenv('CACHE_PATH', '~/media/output'))
api.setProcessReponse(processRequest)
api.start()
| linea-it/tno | container-SORA/src/main.py | main.py | py | 2,969 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "os.path.join",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "json.loads",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number"... |
11315559084 | #coding:utf-8
import sys
sys.path.insert(0, "./")
import os
os.environ["KMP_DUPLICATE_LIB_OK"]="TRUE"
from flask import Flask
from flask import render_template, redirect, url_for
from flask import request, session, json
from flask import jsonify
from keywords.keywordExtract import getKeywords
from parser.analysis_doc import parser_doc, basicInfoExtract
from conflict.conflict_detect import Conflict
from retrieval.infoRetrieval import find_policy
from association.asso_analyze import Association
app = Flask(__name__)
app.config["SECRET_KEY"] = "123456"
conflict = Conflict()
asso = Association()
@app.route('/')
def hello_world():
return '欢迎来到政策关联分析系统算法后台!!!'
@app.route('/dataProcess', methods=["POST", "GET"])
def dataProcess():
'''
对输入到数据库中的政策进行数据处理,进行信息提取操作。
:return:
'''
if request.method == 'POST':
datax = request.form.get('text',"")
name = request.form.get("name", "")
if datax:
'''
添加数据处理操作
'''
try:
results = basicInfoExtract(datax, source_name=name)
return jsonify({"error_code":0, "reason":"", "data":results})
except:
return jsonify({"error_code":3, "reason":"输入数据错误,无法进行解析", "data":""})
else:
return jsonify({"error_code": 1, "reason": "没有输入政策数据", "data": ""})
else:
return jsonify({"error_code":2, "reason":"请求方式错误,应使用post请求", "data":""})
@app.route("/keywords", methods=["POST","GET"])
def keywords():
'''
关键词提取
:return:
'''
if request.method == 'POST':
datax = request.form.get('text',"")
number = int(request.form.get('number', 3))
if datax:
'''
添加数据处理操作
'''
keyword = getKeywords(datax, num= number, use_value=False)
results = {
"keywords":keyword,#关键词
}
return jsonify({"error_code":0, "reason":"", "data":results})
else:
return jsonify({"error_code": 1, "reason": "没有输入政策数据", "data": ""})
else:
return jsonify({"error_code":2, "reason":"请求方式错误,应使用post请求", "data":""})
@app.route("/dataAnalyze", methods=["POST","GET"])
def dataAnalyze():
'''
政策文本结构化解析
:return:
'''
if request.method == 'POST':
datax = request.form.get('text',"")
name = request.form.get('name', "")
if datax:
'''
添加数据处理操作
'''
try:
results = parser_doc(datax)
return jsonify({"error_code":0, "reason":"", "data":results})
except:
return jsonify({"error_code": 3, "reason": "输入数据错误,无法进行解析", "data": ""})
else:
return jsonify({"error_code": 1, "reason": "没有输入政策数据", "data": ""})
else:
return jsonify({"error_code":2, "reason":"请求方式错误,应使用post请求", "data":""})
@app.route("/conflictDetection", methods=["POST", "GET"])
def conflictDetection():
'''
政策文本冲突检测
:return:
'''
if request.method == 'POST':
# datax = request.get_data()
datax = request.form.get('policy',"")
test_policy = request.form.get('test_policy', "")
if datax and test_policy:
'''
添加数据处理操作
'''
try:
datax = json.loads(datax)
print("conflict input: %s"%(datax))
results = conflict.conflict(datax, target_sent=test_policy)
# results = {
# "result":"存在时间类型的冲突",
# "sentence":"到2020年,实现全面建设中国物联网体系平台。"
# }
return jsonify({"error_code":0, "reason":"", "data":results})
except:
return jsonify({"error_code": 3, "reason": "输入数据错误,无法进行解析", "data": ""})
else:
return jsonify({"error_code": 1, "reason": "没有输入政策数据或者是待检测文本", "data": ""})
else:
return jsonify({"error_code":2, "reason":"请求方式错误,应使用post请求", "data":""})
@app.route("/assoAnalyze", methods=["POST", "GET"])
def assoAnalyze():
'''
两个政策关联分析
:return:
'''
if request.method == 'POST':
policy1 = request.form.get('policy1', "")
policy2 = request.form.get('policy2', "")
if policy1 and policy2:
'''
添加数据处理操作
'''
try:
policy1 = json.loads(policy1)
policy2 = json.loads(policy2)
results = asso.analyzeAll(policy1, policy2)
# results = {
# "result":"对于政策A来说,政策B是起到理论指导作用",
# "policy1":{
# "1":["句子", "理论指导"],
# "2":["句子", "理论指导"],
#
# },#第一个政策每句话的分析
# "policy2":{
# "1":["句子", "理论指导"],
# "2":["句子", "理论指导"],
# }#第二个政策每句话的分析
# }
return jsonify({"error_code":0, "reason":"", "data": results})
except:
return jsonify({"error_code": 3, "reason": "输入数据错误,无法进行解析", "data": ""})
else:
return jsonify({"error_code": 1, "reason": "没有输入政策数据", "data": ""})
else:
return jsonify({"error_code":2, "reason":"请求方式错误,应使用post请求", "data":""})
@app.route("/assoSingleAnalyze", methods=["POST", "GET"])
def assoSingleAnalyze():
'''
两个政策关联分析
:return:
'''
if request.method == 'POST':
policy1 = request.form.get('policy1',"")
policy2 = request.form.get('policy2', "")
sentence = request.form.get('sentence', "")
id = request.form.get('id', None)
if policy1 and policy2 and sentence and id is not None:
try:
id = int(id)
'''
添加数据处理操作
'''
policy1 = json.loads(policy1)
policy2 = json.loads(policy2)
results = asso.assoSingleAnalyze(policy1, policy2, sentence, id)
# results = {
# "policy":{
# "1":["句子", "相似"],
# "2":["句子", "不相似"],
# }
# }
return jsonify({"error_code":0, "reason":"", "data":results})
except:
return jsonify({"error_code": 3, "reason": "输入数据错误,无法进行解析", "data": ""})
else:
return jsonify({"error_code": 1, "reason": "没有输入政策数据或者输入信息不完整", "data": ""})
else:
return jsonify({"error_code":2, "reason":"请求方式错误,应使用post请求", "data":""})
@app.route("/policyFind", methods=["POST", "GET"])
def policyFind():
'''
政策查找
:return:
'''
if request.method == 'POST':
policy1 = request.form.get('policy',"")
policy_lis = request.form.get('policy_lis', "")
number = int(request.form.get('number', 10))
if policy1 and policy_lis and number :
'''
添加数据处理操作
'''
try:
print(policy_lis)
if not isinstance(policy_lis, list):
policy_lis = policy_lis.split("#")
res = find_policy(policy1, policy_lis, int(number))
print(res)
results = {
"result":"#".join(res)#"大数据#互联网#人工智能#物联网"
}
return jsonify({"error_code":0, "reason":"", "data":results})
except:
return jsonify({"error_code": 3, "reason": "输入数据错误,无法进行解析", "data": ""})
else:
return jsonify({"error_code": 1, "reason": "没有输入政策数据或者输入信息不完整", "data": ""})
else:
return jsonify({"error_code":2, "reason":"请求方式错误,应使用post请求", "data":""})
if __name__ == '__main__':
app.debug = True
app.run(host="0.0.0.0", port = 5005, debug=True)
| nlp520/policy_web | app.py | app.py | py | 8,806 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "sys.path.insert",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 3,
"usage_type": "attribute"
},
{
"api_name": "os.environ",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "flask.Flask",
"line_nu... |
16413423811 | from datetime import datetime, date, time
import time
from collections import OrderedDict
def parametrized_decor(parameter):
def decor(foo):
def new_foo(*args, **kwargs):
print(datetime.now())
print(f'Имя функции - {foo.__name__}')
if args is not None:
print(f'Позиционные аргументы args - {args}')
if kwargs is not None:
print(f'Именованные аргументы kwargs - {kwargs}')
result = foo(*args, **kwargs)
print('result: ', result)
print('result type: ', type(result))
return result
return new_foo
return decor
if __name__ == '__main__':
# foo(1, 2)
documents_list = [{
"type": "passport",
"number": "2207 876234",
"name": "Василий Гупкин"
}, {
"type": "invoice",
"number": "11-2",
"name": "Геннадий Покемонов"
}]
@parametrized_decor(parameter=None)
def give_name(doc_list, num):
for doc_dict in doc_list:
if num == doc_dict['number']:
print(
f"Документ под номером {num} соответствует имени {doc_dict['name']}"
)
give_name(documents_list, '11-2')
print("____" * 15)
@parametrized_decor(parameter=None)
def summator(x, y):
return x + y
three = summator(1, 2)
five = summator(2, 3)
result = summator(three, five)
| Smelkovaalla/4.5-Decorator | main.py | main.py | py | 1,414 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "datetime.datetime.now",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 9,
"usage_type": "name"
}
] |
6606964316 | import sys
from collections import deque
MOVES = [(-1, 0), (0, 1), (1, 0), (0, -1)]
input = sys.stdin.readline
def isrange(x: int, y: int) -> bool:
return 0 <= x < n and 0 <= y < n
def get_lands(x: int, y: int, island: int) -> set[tuple[int, int]]:
lands: set[tuple[int, int]] = set()
que: deque[tuple[int, int]] = deque()
que.append((x, y))
lands.add((x, y))
board[x][y] = island
while que:
x, y = que.popleft()
for movex, movey in MOVES:
nextx: int = x + movex
nexty: int = y + movey
if not isrange(nextx, nexty):
continue
if board[nextx][nexty] == 0:
continue
if (nextx, nexty) in lands:
continue
que.append((nextx, nexty))
lands.add((nextx, nexty))
board[nextx][nexty] = island
return lands
def get_bridge_length(lands: set[tuple[int, int]], island: int) -> int:
length: int = 0
que: deque[tuple[int, int]] = deque()
visited: list[list[bool]] = [[False for _ in range(n)] for _ in range(n)]
for x, y in lands:
que.append((x, y))
visited[x][y] = True
while que:
for _ in range(len(que)):
x, y = que.popleft()
for movex, movey in MOVES:
nextx: int = x + movex
nexty: int = y + movey
if not isrange(nextx, nexty):
continue
if board[nextx][nexty] == island:
continue
if visited[nextx][nexty]:
continue
if board[nextx][nexty] > 0:
return length
que.append((nextx, nexty))
visited[nextx][nexty] = True
length += 1
return -1
def solve() -> int:
island: int = 2
length: int = sys.maxsize
for x, row in enumerate(board):
for y, elem in enumerate(row):
if elem == 1:
lands = get_lands(x, y, island)
length = min(length, get_bridge_length(lands, island))
island += 1
return length
if __name__ == "__main__":
n = int(input())
board = [list(map(int, input().split())) for _ in range(n)]
print(solve())
| JeongGod/Algo-study | seonghoon/week06(22.02.01~22.02.07)/b2146.py | b2146.py | py | 2,298 | python | en | code | 7 | github-code | 6 | [
{
"api_name": "sys.stdin",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "collections.deque",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "collections.deque",
"line_number": 44,
"usage_type": "name"
},
{
"api_name": "sys.maxsize",
"... |
28912342142 | import transformers
import torch.nn as nn
import config
import torch
class BERT_wmm(nn.Module):
def __init__(self, keep_tokens):
super(BERT_wmm,self).__init__()
self.bert=transformers.BertModel.from_pretrained(config.BERT_PATH)
self.fc=nn.Linear(768,768)
self.layer_normalization=nn.LayerNorm((64, 768))
# self.bert_drop=nn.Dropout(0.2)
self.out=nn.Linear(768,6932)
if keep_tokens is not None:
self.embedding = nn.Embedding(6932, 768)
weight = torch.load(config.BERT_EMBEDDING)
weight = nn.Parameter(weight['weight'][keep_tokens])
self.embedding.weight = weight
self.bert.embeddings.word_embeddings = self.embedding
print(weight.shape)
def forward(self, ids, mask, token_type_ids):
out1, _=self.bert(
ids,
attention_mask=mask,
token_type_ids=token_type_ids,
return_dict=False
)
# mean pooling
# max pooling
# concat
# bert_output=self.bert_drop(out1)
output=self.fc(out1)
layer_normalized=self.layer_normalization(output)
final_output=self.out(layer_normalized)
return final_output
| Zibo-Zhao/Semantic-Matching | model.py | model.py | py | 1,326 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "torch.nn.Module",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "transformers.BertModel.from_pretrained",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "tra... |
73817307386 | #
# test_ab.py - generic tests for analysis programs
# repagh <rene.vanpaassen@gmail.com, May 2020
import pytest
from slycot import analysis
from slycot.exceptions import SlycotArithmeticError, SlycotResultWarning
from .test_exceptions import assert_docstring_parse
@pytest.mark.parametrize(
'fun, exception_class, erange, checkvars',
((analysis.ab05nd, SlycotArithmeticError, 1, {'p1': 1}),
(analysis.ab07nd, SlycotResultWarning, 2, {'m': 1}),
(analysis.ab09ad, SlycotArithmeticError, 3, {'dico': 'C'}),
(analysis.ab09ad, SlycotArithmeticError, (2,), {'dico': 'D'}),
(analysis.ab09ad, SlycotResultWarning, ((1, 0), ), {'nr': 3,
'Nr': 2}),
(analysis.ab09ax, SlycotArithmeticError, 2, {'dico': 'C'}),
(analysis.ab09ax, SlycotResultWarning, ((1, 0), ), {'nr': 3,
'Nr': 2}),
(analysis.ab09ad, SlycotArithmeticError, 3, {'dico': 'C'}),
(analysis.ab09ad, SlycotResultWarning, ((1, 0), ), {'nr': 3,
'Nr': 2}),
(analysis.ab09md, SlycotArithmeticError, 3, {'alpha': -0.1}),
(analysis.ab09md, SlycotResultWarning, ((1, 0), (2, 0)), {'nr': 3,
'Nr': 2,
'alpha': -0.1}),
(analysis.ab09nd, SlycotArithmeticError, 3, {'alpha': -0.1}),
(analysis.ab09nd, SlycotResultWarning, ((1, 0), (2, 0)), {'nr': 3,
'Nr': 2,
'alpha': -0.1}),
(analysis.ab13bd, SlycotArithmeticError, 6, {'dico': 'C'}),
(analysis.ab13bd, SlycotResultWarning, ((1, 0),), {}),
(analysis.ab13dd, SlycotArithmeticError, 4, {}),
(analysis.ab13ed, SlycotArithmeticError, 1, {}),
(analysis.ab13fd, SlycotArithmeticError, (2,), {}),
(analysis.ab13fd, SlycotResultWarning, (1,), {})))
def test_ab_docparse(fun, exception_class, erange, checkvars):
assert_docstring_parse(fun.__doc__, exception_class, erange, checkvars)
| python-control/Slycot | slycot/tests/test_analysis.py | test_analysis.py | py | 2,436 | python | en | code | 115 | github-code | 6 | [
{
"api_name": "test_exceptions.assert_docstring_parse",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "pytest.mark.parametrize",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "pytest.mark",
"line_number": 13,
"usage_type": "attribute"
},
{
"a... |
5654287369 | from django.shortcuts import render
from django.http import Http404, HttpResponse, JsonResponse
from django.template import loader
from catalog.models import *
from django.forms.models import model_to_dict
import random
from django.views.decorators.csrf import csrf_exempt
from django.middleware.csrf import get_token
import json
# Create your views here.
def index(request):
template = loader.get_template('template.html')
context = {}
questions_id = []
related_choices = []
ID = ""
name = ""
# get all available modules and randomly pick one
module = list(modules.objects.all().values('module_name'))
randomed = [i for i in range(len(module))]
random.shuffle(randomed)
context['module'] = module[randomed[0]]
#print(context)
#
# get related questions and pass to html template
module_id = list(modules.objects.filter(module_name=context['module']['module_name']).values("id"))[0]['id']
question = list(questions.objects.all().filter(questions_under_id=module_id))
random.shuffle(question)
context['question'] = question
#print(context)
#
#get related answers and pass to html template
#print(question)
for i in question:
questions_id.append(i.id)
#print(questions_id)
for id in questions_id:
related_choices.append(list(answers.objects.filter(answers_under_id=id)))
context['answer'] = related_choices
#print(context['answer'])
#
# get Id & scores and pass to html template
name = module[randomed[0]]["module_name"]
print(name)
Id = modules.objects.filter(module_name=name).values('id')
for each in Id:
ID = each['id']
print(ID)
Scores = scores.objects.filter(score_under_id=ID).order_by('scores').reverse()
print(Scores)
context['scores'] = Scores
return HttpResponse(template.render(context,request))
def newScore(request):
print("SUCCESS : AJAX ENTERED!")
template = loader.get_template('template.html')
context = {}
under_ID = ""
if request.method == "POST" :
# handle save logic
if request.body:
jsonLoad = json.loads(request.body)
Scores = jsonLoad['scores']
username = jsonLoad['username']
module = jsonLoad['module']
else :
return JsonResponse({"errors": ["POST object has insufficient parameters!"]})
ID = modules.objects.filter(module_name=module).values('id')
for each in ID:
under_ID = each['id']
errors = scores(scores=Scores, gameId=username, score_under_id=under_ID)
errors.save()
return HttpResponse(template.render(context,request))
| jng27/Agile | psb_project/locallibrary/catalog/views.py | views.py | py | 2,686 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "django.template.loader.get_template",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "django.template.loader",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "random.shuffle",
"line_number": 22,
"usage_type": "call"
},
{
"api_nam... |
10423490633 | from __future__ import annotations
import pytest
from randovania.lib import migration_lib
def test_migrate_to_version_missing_migration() -> None:
data = {
"schema_version": 1,
}
with pytest.raises(
migration_lib.UnsupportedVersion,
match=(
"Requested a migration from something 1, but it's no longer supported. "
"You can try using an older Randovania version."
),
):
migration_lib.apply_migrations(data, [None], version_name="something")
def test_migrate_to_version_data_too_new() -> None:
data = {
"schema_version": 3,
}
with pytest.raises(
migration_lib.UnsupportedVersion,
match=(
"Found version 3, but only up to 2 is supported. This file was created using a newer Randovania version."
),
):
migration_lib.apply_migrations(data, [None])
| randovania/randovania | test/lib/test_migration_lib.py | test_migration_lib.py | py | 899 | python | en | code | 165 | github-code | 6 | [
{
"api_name": "pytest.raises",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "randovania.lib.migration_lib.UnsupportedVersion",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "randovania.lib.migration_lib",
"line_number": 14,
"usage_type": "name"... |
33800228048 | # BFS
from collections import deque
import sys
input = lambda: sys.stdin.readline()
def bfs(i, c): # 정점, 색상
q = deque([i])
visited[i] = True
color[i] = c
while q:
i = q.popleft()
for j in arr[i]:
if not visited[j]:
visited[j] = True
q.append(j)
color[j] = 3- color[i]
else:
if color[i] == color[j]:
return False
return True
if __name__ == '__main__':
k = int(input())
for _ in range(k): # 테스트 케이스
v,e = map(int, input().split())
color = [0] * (v+1)
arr = [[] for _ in range(v+1)]
for _ in range(e):
a,b = map(int, input().split())
arr[a].append(b)
arr[b].append(a)
answer = True
visited = [False] * (v+1)
for i in range(1, v+1):
if not visited[i]:
if not bfs(i, 1): # return False이면 종료
answer = False
break
print('YES' if answer else 'NO')
# DFS -> 메모리 초과
# from collections import deque
# import sys
# input = lambda: sys.stdin.readline()
# sys.setrecursionlimit(10**6)
# def dfs(i, c): # 정점, 색상
# color[i] = c
# for j in arr[i]:
# if color[j] == 0:
# if not dfs(j, 3-c):
# return False
# elif color[i] == color[j]:
# return False
# return True
# if __name__ == '__main__':
# k = int(input())
# for _ in range(k): # 테스트 케이스
# v,e = map(int, input().split())
# color = [0] * (v)
# arr = [[] for _ in range(v)]
# for _ in range(e):
# a,b = map(int, input().split())
# arr[a-1].append(b-1)
# arr[b-1].append(a-1)
# answer = True
# for i in range(0, v):
# if color[i] == 0:
# if not dfs(i, 1):
# answer = False
# print('YES' if answer else 'NO')
| devAon/Algorithm | BOJ-Python/boj-1707_이분그래프.py | boj-1707_이분그래프.py | py | 2,065 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "sys.stdin.readline",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "sys.stdin",
"line_number": 4,
"usage_type": "attribute"
},
{
"api_name": "collections.deque",
"line_number": 7,
"usage_type": "call"
}
] |
23423087794 | import logging
from ab.base import NavTable
from ab.base import Link, Data, Item
class Console (object):
def __init__ (self):
self._indent = 0
self._nt = NavTable()
self.logger = logging.getLogger ('ab')
self.log = lambda msg, level=logging.INFO: self.logger.info (msg)
def reset (self):
self._indent = 0
self._nt = NavTable()
def indent_more (self):
self._indent += 2
return self._indent
def indent_less (self):
self._indent -= 2
return self._indent
def indent (self):
return self._indent
# def add_nav_entry (self, **kwa):
# href = kwa.get ('href')
#
# if href:
# no = self._nt.set (href = href)
# return no
#
#
# def nav_table (self):
# if not len (self._nt):
# raise UserWarning ('empty nav table')
#
# return self._nt
#
#
# def next_target_no (self):
# self._target_no += 1
def draw (self, thing):
out = '\n'
# if type (thing) in [list, tuple]:
if type (thing) is list:
self.indent_more()
for t in thing:
out += self.draw (t)
self.indent_less()
elif isinstance (thing, Item):
out += '{indent}[{index}] {prompt} ({href})'.format (
indent = ' ' * self.indent(),
index = self._nt.set (href = thing.href),
prompt = 'Permaurl',
href = 'GET ' + thing.href,
)
out += self.draw (thing.data)
out += self.draw (thing.links)
elif isinstance (thing, Data):
out += '{indent}{prompt}: {value}'.format (
indent = ' ' * self.indent(),
prompt = thing.prompt,
value = thing.value,
)
elif isinstance (thing, Link):
out += '{indent}[{index}] {prompt} ({method} {href})'.format (
indent = ' ' * self.indent(),
index = self._nt.set (href = thing.href),
prompt = thing.prompt,
method = thing.method,
href = thing.href,
)
else:
out += '<%s>' % thing
return out
| oftl/ab | ui.py | ui.py | py | 2,324 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "ab.base.NavTable",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "logging.getLogger",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "logging.INFO",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "ab.base.NavTable... |
36559608646 | import scipy as sci
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import animation
import scipy.integrate
#Definitionen
G=6.67408e-11
m_nd=1.989e+30 #Masse der Sonne
r_nd=5.326e+12
v_nd=30000
t_nd=79.91*365*24*3600*0.51
K1=G*t_nd*m_nd/(r_nd**2*v_nd)
K2=v_nd*t_nd/r_nd
#Definition der Massen
m1=1.1 #Alpha Centauri A
m2=0.907 #Alpha Centauri B
m3=1.0 #Dritter Stern
#Definition der Anfangs-Positionen
r1=np.array([-0.5,0,0], dtype="float64")
r2=np.array([0.5,0,0], dtype="float64")
r3=np.array([0,1,0], dtype="float64")
#Definition der Anfangs-Geschwindigkeiten
v1=np.array([0.01,0.01,0], dtype="float")
v2=np.array([-0.05,0,-0.1], dtype="float64")
v3=np.array([0,-0.01,0], dtype="float64")
#Updaten der COM Formeln
r_com=(m1*r1+m2*r2+m3*r3)/(m1+m2+m3)
v_com=(m1*v1+m2*v2+m3*v3)/(m1+m2+m3)
#Bewegungsgleichungen
def ThreeBodyEquations(w,t,G,m1,m2,m3):
r1=w[:3]
r2=w[3:6]
r3=w[6:9]
v1=w[9:12]
v2=w[12:15]
v3=w[15:18]
r12=sci.linalg.norm(r2-r1)
r13=sci.linalg.norm(r3-r1)
r23=sci.linalg.norm(r3-r2)
dv1bydt=K1*m2*(r2-r1)/r12**3+K1*m3*(r3-r1)/r13**3
dv2bydt=K1*m1*(r1-r2)/r12**3+K1*m3*(r3-r2)/r23**3
dv3bydt=K1*m1*(r1-r3)/r13**3+K1*m2*(r2-r3)/r23**3
dr1bydt=K2*v1
dr2bydt=K2*v2
dr3bydt=K2*v3
r12_derivs=np.concatenate((dr1bydt,dr2bydt))
r_derivs=np.concatenate((r12_derivs,dr3bydt))
v12_derivs=np.concatenate((dv1bydt,dv2bydt))
v_derivs=np.concatenate((v12_derivs,dv3bydt))
derivs=np.concatenate((r_derivs,v_derivs))
return derivs
init_params=np.array([r1,r2,r3,v1,v2,v3])
init_params=init_params.flatten() #Erstellen eines 1D Array
time_span=np.linspace(0,20,500) #20 Perioden und 500 Punkte
#Integrieren der Funktion
three_body_sol=sci.integrate.odeint(ThreeBodyEquations,init_params,time_span,args=(G,m1,m2,m3))
r1_sol=three_body_sol[:,:3]
r2_sol=three_body_sol[:,3:6]
r3_sol=three_body_sol[:,6:9]
#Erstellen der Figur
fig=plt.figure(figsize=(15,15))
#Erstellen der Achsen
ax=fig.add_subplot(111,projection="3d")
#Ploten der Orbits
ax.plot(r1_sol[:,0],r1_sol[:,1],r1_sol[:,2],color="darkblue")
ax.plot(r2_sol[:,0],r2_sol[:,1],r2_sol[:,2],color="tab:red")
ax.plot(r3_sol[:,0],r3_sol[:,1],r3_sol[:,2],color="tab:green")
#Plotten der finalen Position der Körper
ax.scatter(r1_sol[-1,0],r1_sol[-1,1],r1_sol[-1,2],color="darkblue",marker="o",s=100,label="Alpha Centauri A")
ax.scatter(r2_sol[-1,0],r2_sol[-1,1],r2_sol[-1,2],color="tab:red",marker="o",s=100,label="Alpha Centauri B")
ax.scatter(r3_sol[-1,0],r3_sol[-1,1],r3_sol[-1,2],color="tab:green",marker="o",s=100,label="Third Star")
#Hinzufügen der Beschriftungen
ax.set_xlabel("x-Koordinate",fontsize=14)
ax.set_ylabel("y-Koordinate",fontsize=14)
ax.set_zlabel("z-Kordinate",fontsize=14)
ax.set_title("Visualisierung der Orbits von Objekten im Raum\n",fontsize=14)
ax.legend(loc="upper left",fontsize=14)
ani = animation.FuncAnimation(fig, ThreeBodyEquations, frames=1000, interval=50)
plt.show() | Gauner3000/Facharbeit | Euler_Planetenbewegung_3D.py | Euler_Planetenbewegung_3D.py | py | 3,103 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "numpy.array",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": ... |
44407906870 | import wx
import ResizableRuneTag
'''
Created on 23/lug/2011
@author: Marco
'''
class DrawableFrame(wx.Window):
'''
Allows user to put resizable rune tags in a A4 like white frame
Configuration realized on that frame is then replicated proportionally at export time
'''
def __init__(self, parent, height, width):
wx.Window.__init__(self, parent)
self.SetSize((height, width))
self.SetMinSize((height, width))
self.SetMaxSize((height, width))
self.SetBackgroundColour(wx.Colour(255, 255, 255))
self.resizableRuneTags = []
'''
Constructor
'''
def DrawRuneTag(self, runeTagName, position, size, originalSize, info):
self.resizableRuneTags.append(ResizableRuneTag.ResizableRuneTag(self, runeTagName, size, position, originalSize, info))
def Clear(self):
for resizableRuneTag in self.resizableRuneTags:
resizableRuneTag.Destroy()
def checkSpecificPosition(self, changedRuneTag):
for tag in self.resizableRuneTags:
if changedRuneTag != tag:
radius1 = (tag.GetSize().GetHeight())/2 - 5
radius2 = (changedRuneTag.GetSize().GetHeight())/2 - 5
deltax = (tag.GetPosition().x + radius1) - (changedRuneTag.GetPosition().x + radius2)
deltay = (tag.GetPosition().y + radius1) - (changedRuneTag.GetPosition().y + radius2)
distance = (deltax*deltax + deltay*deltay)**(0.5)
radiusSum = radius1 + radius2
if distance <= radiusSum:
self.Parent.Parent.runeTagInfo.UpdateOverlap("In the output pdf file\n some slots of "+changedRuneTag.name+" RuneTag\n may laps over "+tag.name+"RuneTag")
else:
self.Parent.Parent.runeTagInfo.UpdateOverlap("")
def checkPosition(self):
size = len(self.resizableRuneTags)
for i in range(0, size):
for j in range(i+1, size):
tag1 = self.resizableRuneTags[i]
tag2 = self.resizableRuneTags[j]
radius1 = (tag1.GetSize().GetHeight())/2 - 5
radius2 = (tag2.GetSize().GetHeight())/2 - 5
deltax = (tag1.GetPosition().x + radius1) - (tag2.GetPosition().x + radius2)
deltay = (tag1.GetPosition().y + radius1) - (tag2.GetPosition().y + radius2)
distance = (deltax**2 + deltay**2)**(0.5)
radiusSum = radius1 + radius2
if distance <= radiusSum:
self.Parent.Parent.runeTagInfo.UpdateOverlap("In the output pdf file some slots of\n"+tag1.name+" RuneTag\n may laps over\n"+tag2.name+" RuneTag")
else:
self.Parent.Parent.runeTagInfo.UpdateOverlap("")
| mziccard/RuneTagDrawer | DrawableFrame.py | DrawableFrame.py | py | 2,831 | python | en | code | 3 | github-code | 6 | [
{
"api_name": "wx.Window",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "wx.Window.__init__",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "wx.Window",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "wx.Colour",
"line... |
36008540577 | import sqlite3
import os
import shlex
class Database():
def __init__(self, db_file):
"""Connect to the SQLite DB"""
try:
self.conn = sqlite3.connect(db_file)
self.cursor = self.conn.cursor()
except BaseException as err:
#print(str(err))
self.conn = None
self.cursor = None
def create_table(self, table_name, columns):
query = f"CREATE TABLE IF NOT EXISTS {table_name} ({', '.join([f'{k} {v}' for k, v in columns.items()])})"
self.cursor.execute(query)
self.conn.commit()
def create_index(self, index_name, table_name, column_list):
#query = f"CREATE INDEX IF NOT EXISTS {index_name} ON {table_name} ({column_list})"
query = f"CREATE INDEX IF NOT EXISTS idx_hash ON file_hash(filepath, filehash)"
self.cursor.execute(query)
self.conn.commit()
def delete_table(self, table_name):
query = f"DROP TABLE IF EXISTS {table_name}"
self.cursor.execute(query)
self.conn.commit()
def add_record(self, table_name, record):
query = f"INSERT INTO {table_name} ({', '.join(record.keys())}) VALUES ({', '.join(['?' for _ in record.values()])})"
#print(query)
self.cursor.execute(query, list(record.values()))
self.conn.commit()
def delete_record(self, table_name, condition):
query = f"DELETE FROM {table_name} WHERE {condition}"
self.cursor.execute(query)
self.conn.commit()
def run_query(self, query):
#print(query)
self.cursor.execute(query, args)
return self.cursor.fetchall()
def show_all_records(self, table_name):
query = f"SELECT * FROM {table_name}"
self.cursor.execute(query)
return self.cursor.fetchall()
def show_record(self, table_name, filepath):
file_path = (filepath)
#query = f"SELECT * FROM {table_name} WHERE {condition}"
#print(f"SELECT filename,filepath, filehash, timestamp FROM {table_name} WHERE filepath = '{file_path}'")
query = f'SELECT filename,filepath, filehash, timestamp FROM {table_name} WHERE filepath = "{file_path}"'
self.cursor.execute(query)
return self.cursor.fetchall()
def update_record(self, table, filepath, filehash):
"""Update the SQLite File Table"""
file_path = filepath
#print(f"file path: {file_path}")
query = f"UPDATE {table} SET filehash = '{filehash}' WHERE filepath = '{file_path}'"
self.cursor.execute(query)
return self.cursor.fetchall()
def is_rec_modifed(filepath,filehash,timestamp):
"""Check record for any changes
Returning false until function is completed"""
return False
def show_duplicate_records(self, table_name, index_name, value):
query = f"SELECT filename, filepath, filehash FROM {table_name} WHERE {index_name} = '{value}'"
self.cursor.execute(query)
return self.cursor.fetchall()
def show_all_tables(self):
query = "SELECT name FROM sqlite_master WHERE type='table'"
self.cursor.execute(query)
return self.cursor.fetchall()
def close_connection(self):
self.conn.close()
if __name__ == '__main__':
db = Database('test.db')
db.create_table('users', {'id': 'INTEGER PRIMARY KEY', 'name': 'TEXT', 'email': 'TEXT'})
db.add_record('users', {'name': 'Alice', 'email': 'alice@example.com'})
db.add_record('users', {'name': 'Bob', 'email': 'bob@example.com'})
db.add_record('users', {'name': 'Charlie', 'email': 'charlie@example.com'})
print(db.show_all_records('users'))
print(db.show_record('users', "name='Alice'"))
db.delete_record('users', "name='Bob'")
print(db.show_all_records('users'))
db.delete_table('users')
db.close_connection()
os.remove('test.db')
| echeadle/File_Track | app/sqlite_db.py | sqlite_db.py | py | 3,901 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "sqlite3.connect",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "os.remove",
"line_number": 101,
"usage_type": "call"
}
] |
39259262942 | #!/usr/bin/env python3
import rclpy
from rclpy.node import Node
import speech_recognition as sr
from custom_if.srv import SendSentence
from functools import partial
import time
### Node class
class SpeechToText(Node):
def __init__(self):
super().__init__("stt_node")
self.get_logger().info("STT node is up.")
self.stt = sr.Recognizer()
# Methods
self.listen_to_user()
## Listen and write
def listen_to_user(self):
self.call_nlu("Welcome")
# Inner loop
while True:
with sr.Microphone() as source:
self.stt.adjust_for_ambient_noise(source, duration=0.2)
audio = self.stt.listen(source)
try:
sentence = "{0}".format(self.stt.recognize_google(audio, language="it-IT"))
if 'Marvin' in sentence.split(" "):
self.call_nlu(sentence)
except sr.UnknownValueError:
self.get_logger().warn("Waiting for a command.")
except sr.RequestError as e:
self.get_logger().error("STT Error; {0}".format(e))
# Definition of the client request to the TTS
def call_nlu(self, sentence):
client = self.create_client(SendSentence, "send_command")
while not client.wait_for_service(1.0):
self.get_logger().warn("Waiting for Server...")
request = SendSentence.Request()
request.sentence = sentence
future = client.call_async(request)
future.add_done_callback(partial(self.callback_call_nlu, sentence=sentence))
def callback_call_nlu(self, future, sentence):
try:
response = future.result()
self.get_logger().info(f"Request solved: {response}")
except Exception as e:
self.get_logger().error("Request failed.")
def main(args=None):
rclpy.init(args=args)
node = SpeechToText()
rclpy.spin(node)
rclpy.shutdown()
if __name__ == "__main__":
main()
| Alessandro-Scarciglia/VoiceAssistant | speech_to_text/speech_to_text/speech_to_text.py | speech_to_text.py | py | 1,732 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "rclpy.node.Node",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "speech_recognition.Recognizer",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "speech_recognition.Microphone",
"line_number": 29,
"usage_type": "call"
},
{
"api_n... |
7920943241 | """
Neural Networks - Deep Learning
Heart Disease Predictor ( Binary Classification )
Author: Dimitrios Spanos Email: dimitrioss@ece.auth.gr
"""
import numpy as np
from cvxopt import matrix, solvers
# ------------
# Kernels
# ------------
def poly(x, z, d=3, coef=1, g=1):
return (g * np.dot(x, z.T) + coef) ** d
def rbf(x, z, sigma):
return np.exp(-np.linalg.norm(x-z,axis=1)**2 / (2*(sigma**2)))
def linear(x, z):
return np.matmul(x, z.T)
def sigmoid(x, z, g=1, coef=0):
return np.tanh(g * np.dot(x, z.T) + coef)
# ------------
# SVM
# ------------
class my_SVM:
def __init__(self, C, kernel='linear', sigma=1):
self.C = C
self.kernel = kernel
self.sigma = sigma
self.sv = 0
self.sv_y = 0
self.alphas = 0
self.w = 0
self.b = 0
def fit(self, X, y):
# Calculate the Kernel(xi,xj)
m, n = X.shape
K = np.zeros((m,m))
if self.kernel == 'rbf':
for i in range(m):
K[i,:] = rbf(X[i,np.newaxis], X, sigma=self.sigma)
elif self.kernel == 'poly':
for i in range(m):
K[i,:] = poly(X[i,np.newaxis], X)
elif self.kernel == 'sigmoid':
for i in range(m):
K[i,:] = sigmoid(X[i,np.newaxis], X)
elif self.kernel == 'linear':
for i in range(m):
K[i,:] = linear(X[i,np.newaxis], X)
# Solve the QP Problem
P = matrix(np.outer(y, y) * K)
q = matrix(-np.ones((m, 1)))
A = matrix(matrix(y.T), (1, m), 'd')
b = matrix(np.zeros(1))
G = matrix(np.vstack((np.eye(m)*-1, np.eye(m))))
h = matrix(np.hstack((np.zeros(m),np.ones(m)*self.C)))
solvers.options['show_progress'] = False
solution = solvers.qp(P, q, G, h, A, b)
# Get the solution's results
alphas = np.array(solution['x'])
S = (alphas > 1e-4).flatten()
self.sv = X[S]
self.sv_y = y[S]
self.w = np.dot((y.reshape(-1,1) * alphas).T, X)[0]
self.alphas = alphas[S] # get rid of alphas ~= 0
self.b = np.mean(self.sv_y - np.dot(self.sv, self.w.T))
#print("w:", self.w)
#print("b:", self.b)
def predict(self, X):
K_xi_x = 0
if self.kernel == 'rbf':
K_xi_x = rbf(self.sv, X, self.sigma)
elif self.kernel == 'poly':
K_xi_x = poly(self.sv, X)
elif self.kernel == 'sigmoid':
K_xi_x = sigmoid(self.sv, X)
elif self.kernel == 'linear':
K_xi_x = linear(self.sv, X)
sum = 0
for i in range(len(K_xi_x)):
sum +=self.alphas[i] * self.sv_y[i]* K_xi_x[i]
prod = sum + self.b
prediction = np.sign(prod)
return prediction | DimitriosSpanos/SVM-from-Scratch | SVM.py | SVM.py | py | 2,908 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "numpy.dot",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "numpy.exp",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "numpy.linalg.norm",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "numpy.linalg",
"line_number... |
42710543766 | '''
@ Carlos Suarez 2020
'''
import requests
import datetime
import time
import json
from cachetools import TTLCache
import ssl
import sys
class MoodleControlador():
def __init__(self,domain,token,cert):
self.domain = domain
self.token = token
self.cert = cert
#Moodle LTI
def getGrabacionesMoodleContextoLTI(self,moodle_id,tiempo):
endpoint = 'https://' + self.domain + '/contexts/?extId=' + moodle_id
bearer = "Bearer " + self.token
headers = {
"Authorization":bearer,
'Content-Type': 'application/json',
'Accept': 'application/json'
}
r = requests.get(endpoint,headers=headers,verify=self.cert)
if r.status_code == 200:
jsonInfo = json.loads(r.text)
if jsonInfo['size'] > 0:
contexto_id = jsonInfo['results'][0]['id']
return contexto_id
else:
return None
else:
print("Error Moodle ContextoLTI:" , str(r))
def grabacionesMoodleLTI(self,contexto_id):
endpoint = 'https://' + self.domain + '/recordings/?contextId=' + contexto_id
bearer = "Bearer " + self.token
headers = {
"Authorization":bearer,
'Content-Type': 'application/json',
'Accept': 'application/json'
}
r = requests.get(endpoint,headers=headers)
if r.status_code == 200:
jsonInfo = json.loads(r.text)
return jsonInfo
else:
print("Error GrabacionesLTL: " , str(r))
def get_moodleLTI_recording_data(self,recording_id):
authStr = 'Bearer ' + self.token
url = 'https://' + self.domain + '/recordings/' + recording_id + '/data'
credencial ={
'Authorization': authStr,
'Content-Type': 'application/json',
'Accept': 'application/json'
}
r = requests.get(url,headers=credencial, verify=self.cert)
if r.status_code == 200:
res = json.loads(r.text)
return res
else:
print(r)
#Moodle plugin
def moodleSesionName(self,sesionId):
endpoint = 'https://' + self.domain + '/sessions/' + sesionId
credencial = {
"Authorization":"Bearer " + self.token,
'Content-Type': 'application/json',
'Accept': 'application/json'
}
r = requests.get(endpoint,headers=credencial,verify=self.cert)
if r.status_code == 200:
res = json.loads(r.text)
return res['name']
else:
print("Error Session:", str(r))
def listaCompletaSessiones(self,criteria):
listaFiltrada = []
endpoint = 'https://' + self.domain + '/sessions'
credencial = {
"Authorization":"Bearer " + self.token,
'Content-Type': 'application/json',
'Accept': 'application/json'
}
r = requests.get(endpoint,headers=credencial,verify=self.cert)
if r.status_code == 200:
res = json.loads(r.text)
resultado = res['results']
for sesion in resultado:
if criteria in sesion['name']:
listaFiltrada.append({'id':sesion['id'], 'name':sesion['name']})
return listaFiltrada
else:
print("Error Session:", str(r))
def listaCompletaMoodleGrabaciones(self):
listaGrabaciones = []
endpoint = 'https://' + self.domain + '/recordings'
credencial = {
'Authorization': 'Bearer ' + self.token,
'Accept':'application/json'
}
r = requests.get(endpoint,headers=credencial,verify=self.cert)
if r.status_code == 200:
jsonInfo = json.loads(r.text)
resultado = jsonInfo['results']
if len(resultado) == 0:
print("No recordings found")
else:
for grabacion in resultado:
listaGrabaciones.append({'id':grabacion['id'], 'name':grabacion['name']})
print(listaGrabaciones)
else:
print("Error listaGrabación Moodle:", str(r))
def listaMoodleGrabaciones(self,sname):
endpoint = 'https://' + self.domain + '/recordings?name=' + sname
credencial = {
"Authorization":"Bearer " + self.token,
'Content-Type': 'application/json',
'Accept': 'application/json'
}
r = requests.get(endpoint,headers=credencial,verify=self.cert)
if r.status_code == 200:
res = json.loads(r.text)
idx = 0
recording_ids = []
try:
numero_grabaciones = len(res['results'])
if numero_grabaciones <= 0:
return None
while idx < numero_grabaciones:
if 'storageSize' in res['results'][idx]:
recording_ids.append({
'recording_id':res['results'][idx]['id'],
'recording_name':res['results'][idx]['name'],
'duration':res['results'][idx]['duration'],
'storageSize':res['results'][idx]['storageSize'],
'created':res['results'][idx]['created']
})
else:
recording_ids.append({
'recording_id':res['results'][idx]['id'],
'recording_name':res['results'][idx]['name'],
'duration':res['results'][idx]['duration'],
'storageSize':0,
'created':res['results'][idx]['created']
})
idx += 1
return recording_ids
except TypeError:
return None
else:
return None
| sfc-gh-csuarez/PyCollab | controladores/MoodleControlador.py | MoodleControlador.py | py | 6,010 | python | en | code | 15 | github-code | 6 | [
{
"api_name": "requests.get",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": ... |
18680754942 |
import matplotlib.pyplot as plt
import numpy as np
import os
import PIL
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras.models import Sequential
import pathlib
data_dir = "./Covid(CNN)/Veriseti"
data_dir = pathlib.Path(data_dir)
image_count = len(list(data_dir.glob('*/*.jpeg')))
print(image_count)
'''
roses = list(data_dir.glob('roses/*'))
PIL.Image.open(str(roses[0]))
PIL.Image.open(str(roses[1]))
tulips = list(data_dir.glob('tulips/*'))
PIL.Image.open(str(tulips[0]))
PIL.Image.open(str(tulips[1]))
'''
batch_size = 32
img_height = 180
img_width = 180
#Görüntülerin% 80'ini eğitim için ve% 20'sini doğrulama için kullanalım.
train_ds = tf.keras.preprocessing.image_dataset_from_directory(
"./Veriseti",
validation_split=0.2,
subset="training",
seed=123,
image_size=(img_height, img_width),
batch_size=batch_size)
val_ds = tf.keras.preprocessing.image_dataset_from_directory(
"./Veriseti",
validation_split=0.2,
subset="validation",
seed=123,
image_size=(img_height, img_width),
batch_size=batch_size)
class_names = train_ds.class_names
print(class_names)
import matplotlib.pyplot as plt
#Verileri görselleştirin.Eğitim veri kümesindeki ilk 9 görüntü.
plt.figure(figsize=(10, 10))
for images, labels in train_ds.take(1):
for i in range(9):
ax = plt.subplot(3, 3, i + 1)
plt.imshow(images[i].numpy().astype("uint8"))
plt.title(class_names[labels[i]])
plt.axis("off")
#Bu, 180x180x3 şeklinde 32 görüntüden oluşan bir 180x180x3 (son boyut, RGB renk kanallarına atıfta bulunur)
#label_batch , şeklin bir label_batch (32,) , bunlar 32 görüntüye karşılık gelen etiketlerdir.
for image_batch, labels_batch in train_ds:
print(image_batch.shape)
print(labels_batch.shape)
break
AUTOTUNE = tf.data.experimental.AUTOTUNE
#Dataset.cache() , görüntüleri ilk dönemde diskten yüklendikten sonra bellekte tutar.
#Bu, modelinizi eğitirken veri kümesinin bir darboğaz haline gelmemesini sağlayacaktır.
train_ds = train_ds.cache().shuffle(1000).prefetch(buffer_size=AUTOTUNE)
val_ds = val_ds.cache().prefetch(buffer_size=AUTOTUNE)
#Dataset.prefetch() , eğitim sırasında veri ön işleme ve model yürütme ile çakışır.
#RGB kanal değerleri [0, 255] aralığındadır. Bu bir sinir ağı için ideal değildir
#Yeniden Ölçeklendirme katmanı kullanarak değerleri [0, 1] aralığında olacak şekilde standart hale getiriyoruz.
normalization_layer = layers.experimental.preprocessing.Rescaling(1./255)
#Bu katmanı kullanmanın iki yolu vardır. Haritayı çağırarak veri kümesine uygulayabilirsiniz:
normalized_ds = train_ds.map(lambda x, y: (normalization_layer(x), y))
image_batch, labels_batch = next(iter(normalized_ds))
first_image = image_batch[0]
# Notice the pixels values are now in `[0,1]`.
print(np.min(first_image), np.max(first_image))
#Veya katmanı model tanımınızın içine dahil ederek dağıtımı basitleştirebilirsiniz. Burada ikinci yaklaşımı kullanalım.
num_classes = 4
#Modeli oluşturun
model = Sequential([
layers.experimental.preprocessing.Rescaling(1./255, input_shape=(img_height, img_width, 3)),
layers.Conv2D(16, 3, padding='same', activation='relu'),
layers.MaxPooling2D(),
layers.Conv2D(32, 3, padding='same', activation='relu'),
layers.MaxPooling2D(),
layers.Conv2D(64, 3, padding='same', activation='relu'),
layers.MaxPooling2D(),
layers.Flatten(),
layers.Dense(128, activation='relu'),
layers.Dense(num_classes)
])
#Modeli derleyin
model.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
#Model özeti
model.summary()
#Modeli eğitin
epochs=10
history = model.fit(
train_ds,
validation_data=val_ds,
epochs=epochs
)
#Eğitim ve doğrulama setlerinde kayıp ve doğruluk grafikleri oluşturun.
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs_range = range(epochs)
plt.figure(figsize=(8, 8))
plt.subplot(1, 2, 1)
plt.plot(epochs_range, acc, label='Training Accuracy')
plt.plot(epochs_range, val_acc, label='Validation Accuracy')
plt.legend(loc='lower right')
plt.title('Training and Validation Accuracy')
plt.subplot(1, 2, 2)
plt.plot(epochs_range, loss, label='Training Loss')
plt.plot(epochs_range, val_loss, label='Validation Loss')
plt.legend(loc='upper right')
plt.title('Training and Validation Loss')
plt.show()
#Veri büyütme
data_augmentation = keras.Sequential(
[
layers.experimental.preprocessing.RandomFlip("horizontal",
input_shape=(img_height,
img_width,
3)),
layers.experimental.preprocessing.RandomRotation(0.1),
layers.experimental.preprocessing.RandomZoom(0.1),
]
)
#Birkaç artırılmış örneğin nasıl göründüğünü, aynı görüntüye birkaç kez veri artırma uygulayarak görselleştirelim
plt.figure(figsize=(10, 10))
for images, _ in train_ds.take(1):
for i in range(9):
augmented_images = data_augmentation(images)
ax = plt.subplot(3, 3, i + 1)
plt.imshow(augmented_images[0].numpy().astype("uint8"))
plt.axis("off")
#layers.Dropout kullanarak yeni bir sinir ağı oluşturalım.
model = Sequential([
data_augmentation,
layers.experimental.preprocessing.Rescaling(1./255),
layers.Conv2D(16, 3, padding='same', activation='relu'),
layers.MaxPooling2D(),
layers.Conv2D(32, 3, padding='same', activation='relu'),
layers.MaxPooling2D(),
layers.Conv2D(64, 3, padding='same', activation='relu'),
layers.MaxPooling2D(),
layers.Dropout(0.2),
layers.Flatten(),
layers.Dense(128, activation='relu'),
layers.Dense(num_classes)
])
#Modeli derleyin ve eğitin
model.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
model.summary()
epochs = 15
history = model.fit(
train_ds,
validation_data=val_ds,
epochs=epochs
)
#Eğitim sonuçları
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs_range = range(epochs)
plt.figure(figsize=(8, 8))
plt.subplot(1, 2, 1)
plt.plot(epochs_range, acc, label='Training Accuracy')
plt.plot(epochs_range, val_acc, label='Validation Accuracy')
plt.legend(loc='lower right')
plt.title('Training and Validation Accuracy')
plt.subplot(1, 2, 2)
plt.plot(epochs_range, loss, label='Training Loss')
plt.plot(epochs_range, val_loss, label='Validation Loss')
plt.legend(loc='upper right')
plt.title('Training and Validation Loss')
plt.show()
#Eğitim veya doğrulama setlerinde yer almayan bir resmi sınıflandırmak için modelimizi kullanalım.
img_path = "./Veriseti/Covid.jpeg"
img = keras.preprocessing.image.load_img(
img_path, target_size=(img_height, img_width)
)
img_array = keras.preprocessing.image.img_to_array(img)
img_array = tf.expand_dims(img_array, 0) # Create a batch
predictions = model.predict(img_array)
score = tf.nn.softmax(predictions[0])
print(
"This image most likely belongs to {} with a {:.2f} percent confidence."
.format(class_names[np.argmax(score)], 100 * np.max(score))
) | elifyelizcelebi/Covid-CNN | model.py | model.py | py | 7,465 | python | tr | code | 0 | github-code | 6 | [
{
"api_name": "pathlib.Path",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras.preprocessing.image_dataset_from_directory",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras",
"line_number": 34,
"usage_type": "attribute"
... |
6425852046 | # 한자리 숫자가 적힌 종이 조각이 흩어져있습니다. 흩어진 종이 조각을 붙여 소수를 몇 개 만들 수 있는지 알아내려 합니다.
# 각 종이 조각에 적힌 숫자가 적힌 문자열 numbers가 주어졌을 때,
# 종이 조각으로 만들 수 있는 소수가 몇 개인지 return 하도록 solution 함수를 완성해주세요.
# 제한사항
# numbers는 길이 1 이상 7 이하인 문자열입니다.
# numbers는 0~9까지 숫자만으로 이루어져 있습니다.
# 013은 0, 1, 3 숫자가 적힌 종이 조각이 흩어져있다는 의미입니다.
def find_prime(numbers) -> int:
from itertools import permutations
def is_prime(n: int) -> bool:
if n==2:
return True
elif n==1 or n%2==0:
return False
for i in range(3, int(n**0.5)+1, 2):
if n%i==0:
return False
return True
answer=0
primes = []
for i in range(1, len(numbers)+1):
perms = list(permutations(numbers, i))
for perm in perms:
target=''
for p in perm:
target += p
if is_prime(int(target)) and int(target) not in primes:
primes.append(int(target))
answer += 1
return answer | script-brew/2019_KCC_Summer_Study | programmers/Lv_2/MaengSanha/findPrime.py | findPrime.py | py | 1,326 | python | ko | code | 0 | github-code | 6 | [
{
"api_name": "itertools.permutations",
"line_number": 26,
"usage_type": "call"
}
] |
16053211401 | import os
import sys
import glob
import argparse
from lsdo_viz.problem import Problem
from lsdo_viz.utils import clean, get_viz, get_args, exec_python_file
def main_viz(args=None):
if args is None:
args = sys.argv[1:]
parser = argparse.ArgumentParser()
parser.add_argument('args_file_name', nargs='?', default='viz_args.py')
parser.add_argument('--clean_data', '-cd', nargs='?', default=None, const=True)
parser.add_argument('--clean_frames', '-cf', nargs='?', default=None, const=True)
parser.add_argument('--viz_initial', '-vi', nargs='?', default=None, const=True)
parser.add_argument('--viz_final', '-vf', nargs='?', default=None, const=True)
parser.add_argument('--viz_initial_show', '-vis', nargs='?', default=None, const=True)
parser.add_argument('--viz_final_show', '-vfs', nargs='?', default=None, const=True)
parser.add_argument('--viz_all', '-va', nargs='?', default=None, const=True)
parser.add_argument('--movie', '-m', nargs='?', default=None, const=True)
parsed_args = parser.parse_args(args)
args = get_args(parsed_args.args_file_name)
show = parsed_args.viz_initial_show or parsed_args.viz_final_show
if not show:
import matplotlib
matplotlib.use('Agg')
if parsed_args.clean_data:
clean(args.data_dir)
if parsed_args.clean_frames:
clean(args.frames_dir)
modes = []
if parsed_args.viz_initial or parsed_args.viz_initial_show:
modes.append('viz_initial')
if parsed_args.viz_final or parsed_args.viz_final_show:
modes.append('viz_final')
if parsed_args.viz_all:
modes.append('viz_all')
if parsed_args.movie:
modes.append('movie')
Problem.args = args
Problem.viz = get_viz(args.viz_file_name)
Problem.viz.args = args
Problem.viz.show = show
for mode in modes:
Problem.mode = mode
exec_python_file(args.run_file_name) | MAE155B-Group-3-SP20/Group3Repo | lsdo_viz/lsdo_viz/main_viz.py | main_viz.py | py | 1,938 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "sys.argv",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "lsdo_viz.utils.get_args",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "matplot... |
41815384400 | from urllib import response
import requests
from pprint import pprint
from time import sleep
import os
from sqlalchemy import null
url = "http://10.0.1.10:8080"
# ------------------------ PRINT ------------------------
def menu():
os.system('clear') or None
print("-------------------:-------------------")
print("| 1 | Cadastrar Usuario |")
print("| 2 | Exibir Usuario |")
print("| 3 | Alterar Usuario |")
print("| 4 | Excluir Usuario |")
print("-------------------:-------------------")
print("| 5 | Cadastrar Projeto |")
print("| 6 | Exibir Projeto |")
print("| 7 | Alterar Projeto |")
print("| 8 | Excluir Projeto |")
print("-------------------:-------------------")
print("| 9 | SAIR |")
print("-------------------:-------------------")
def menu1():
os.system('clear') or None
print("-------------------:-------------------")
print("| 1 | Pessoa Física |")
print("| 2 | Pessoa Jurídica Sair[0] |")
print("-------------------:-------------------")
def menu2():
print("-------------------:-------------------")
print("| Deseja alterar? |")
print("| [1] Sim [2] Não |")
print("-------------------:-------------------")
def main():
opc = None
while opc != "9":
menu()
opc = input("Informe uma opcao: ")
if opc == "1": #Cadastrar Usuario
cadastroUser()
elif opc == "2": #Exibir Usuario
exibirUser()
elif opc == "3": #Alterar Usuario
alterarUser()
elif opc == "4": #Excluir Usuario
excluirUser()
elif opc == "5": #Cadastrar Projeto
cadastroProj()
elif opc == "6": #Exibir Projeto
exibirProj()
elif opc == "7": #Alterar Projeto
alterarProj()
elif opc == "8": #Excluir Projeto
excluirProj()
elif opc == "9":
exit()
input("Pressione ENTER para continuar!\n")
def jsonPrint(resp):
if resp.status_code == 200:
pprint(resp.json())
elif resp.status_code == 201:
print("deletado!")
print(resp)
else:
print(resp)
# ------------------------ USER ------------------------
def cadastroUser():
opc = None
while opc != 1 and opc != 2 and opc != 0:
menu1()
opc = input("Informe uma opcao: ")
if opc == "1": #Fisica
nome = input("Informe nome: ")
idade = input("Informe idade: ")
cpf = input("Informe cpf: ")
instEnsino = input("Informe Instuicao de ensino: ")
data = {"nome": nome, "idade": idade, "cpf": cpf, "instEnsino": instEnsino}
requests.post(f"{url}/fisica", json=data)
break
elif opc == "2": #Juridica
nome = input("Informe nome: ")
segmento = input("Informe segmento: ")
cnpj = input("Informe cnpj: ")
data = {"nome": nome, "segmento": segmento, "cnpj": cnpj}
requests.post(f"{url}/juridica", json=data)
break
elif opc == "0":
break
else:
print("Opção invalida!")
input("Pressione ENTER para continuar!\n")
def exibirUser():
opc = None
while opc != 1 and opc != 2 and opc != 0:
menu1()
opc = input("Informe uma opcao: ")
if opc == "1": #Fisica
cpf = input("Informe o cpf: ")
resp = requests.get(f"{url}/fisica/" + cpf)
jsonPrint(resp)
break
elif opc == "2": #Juridica
cnpj = input("Informe o cnpj: ")
resp = requests.get(f"{url}/juridica/" + cnpj)
jsonPrint(resp)
break
elif opc == "0":
break
else:
print("Opção invalida!")
input("Pressione ENTER para continuar!\n")
def alterarUser():
opc = None
while opc != 1 and opc != 2 and opc != 0:
menu1()
opc = input("Informe uma opcao: ")
if opc == "1": #Fisica
cpf = input("Informe o cpf: ")
resp = requests.get(f"{url}/fisica/" + cpf)
jsonPrint(resp)
menu2()
opc1 = input("Informe uma opcao: ")
if opc1 == "1":
nome = input("Informe nome: ")
idade = input("Informe idade: ")
instEnsino = input("Informe Instuicao de ensino: ")
data = {"nome": nome, "idade": idade, "cpf": cpf, "instEnsino": instEnsino}
requests.put(f"{url}/fisica/" + cpf, json=data)
else:
break
input("Pressione ENTER para continuar!\n")
elif opc == "2": #Juridica
cnpj = input("Informe o cnpj: ")
resp = requests.get(f"{url}/juridica/" + cnpj)
jsonPrint(resp)
menu2()
opc1 = input("Informe uma opcao: ")
if opc1 == "1":
nome = input("Informe nome: ")
segmento = input("Informe segmento: ")
data = {"nome": nome, "segmento": segmento, "cnpj": cnpj}
requests.put(f"{url}/juridica/" + cnpj, json=data)
else:
break
elif opc == "0":
break
else:
print("Opção invalida!")
input("Pressione ENTER para continuar!\n")
def excluirUser():
opc = None
while opc != 1 and opc != 2 and opc != 0:
menu1()
opc = input("Informe uma opcao: ")
if opc == "1":
cpf = input("Informe o cpf: ")
resp = requests.delete(f"{url}/fisica/" + cpf)
jsonPrint(resp)
elif opc == "2":
cnpj = input("Informe o cnpj: ")
resp = requests.delete(f"{url}/juridica/" + cnpj)
jsonPrint(resp)
elif opc == "0":
break
else:
print("Opção invalida!")
input("Pressione ENTER para continuar!\n")
# ------------------------ PROJETO ------------------------
def cadastroProj():
cpf = None
cnpj = None
nome = input("Informe nome: ")
segmento = input("Informe o segmento: ")
descricao = input("Informe a descrição: ")
opc = None
while opc != 1 and opc != 2 and opc != 0:
menu1()
opc = input("Informe uma opcao: ")
if opc == "1": #Fisica
cpf = input("Informe cpf: ")
cnpj = "-"
break
elif opc == "2": #Juridica
cnpj = input("Informe cnpj: ")
cpf = "-"
break
elif opc == "0":
break
else:
print("Opção invalida!")
input("Pressione ENTER para continuar!\n")
data = {"nome": nome, "segmento": segmento, "descricao": descricao, "cpf": cpf, "cnpj": cnpj}
requests.post(f"{url}/projeto", json=data)
def exibirProj():
nome = input("Nome do Projeto: ")
resp = requests.get(f"{url}/projeto/" + nome)
jsonPrint(resp)
def alterarProj():
opc = None
while opc != 1 and opc != 2 and opc != 0:
nome = input("Informe o nome: ")
resp = requests.get(f"{url}/projeto/" + nome)
jsonPrint(resp)
menu2()
opc = input("Informe uma opcao: ")
if opc == "1":
newname = input("Informe nome: ")
segmento = input("Informe o segmento: ")
descricao = input("Informe a descrição: ")
data = {"nome": newname, "segmento": segmento, "descricao": descricao}
requests.put(f"{url}/projeto/" + nome, json=data)
break
else:
break
def excluirProj():
nome = input("Informe o nome: ")
resp = requests.delete(f"{url}/projeto/" + nome)
jsonPrint(resp)
if __name__ == "__main__":
main() | hencabral/Python-BoxCode-API | cliente.py | cliente.py | py | 8,346 | python | pt | code | 0 | github-code | 6 | [
{
"api_name": "os.system",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "os.system",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "pprint.pprint",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "requests.post",
"line_number": ... |
14493907058 | # -*- coding: utf-8 -*- #
'''
--------------------------------------------------------------------------
# File Name: PATH_ROOT/utils/signal_vis.py
# Author: JunJie Ren
# Version: v1.1
# Created: 2021/06/15
# Description: — — — — — — — — — — — — — — — — — — — — — — — — — — —
--> DD信号识别(可解释)系列代码 <--
-- 可视化信号输入
— — — — — — — — — — — — — — — — — — — — — — — — — — —
# Module called: <0> PATH_ROOT/config.py
<1> PATH_TOOT/dataset/RML2016.py
— — — — — — — — — — — — — — — — — — — — — — — — — — —
# Function List: <0> drawAllOriSignal():
绘制所有信号输入样本的图像,并保存至相应标签的文件夹下
<1> showOriSignal():
绘制并展示一个样本信号的图像
<2> showImgSignal():
绘制并展示一个信号样本的二维可视化图像
<3> showCamSignal():
叠加信号与CAM图,可视化CAM解释结果,并按类型保存
<4> mask_image():
软阈值擦除CAM对应的判别性特征区域
— — — — — — — — — — — — — — — — — — — — — — — — — — —
# Class List: None
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# History:
| <author> | <version> | <time> | <desc>
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
<0> | JunJie Ren | v1.0 | 2020/06/15 | creat
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
<1> | JunJie Ren | v1.1 | 2020/07/09 | 优化无name的数据集调用问题
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
<2> | JunJie Ren | v1.2 | 2020/07/13 | 增加CAM阈值擦除函数
--------------------------------------------------------------------------
'''
import sys
import os
import cv2
import torch
import torch.nn as nn
import numpy as np
import matplotlib.pyplot as plt
import matplotlib; matplotlib.use('TkAgg')
from sklearn.metrics import confusion_matrix
sys.path.append("../")
from app.configs import cfgs
from app.dataset.RML2016 import loadNpy
# from app.dataset.RML2016_04c.classes import modName
def t2n(t):
return t.detach().cpu().numpy().astype(np.int)
def fig2data(fig):
"""
fig = plt.figure()
image = fig2data(fig)
@brief Convert a Matplotlib figure to a 4D numpy array with RGBA channels and return it
@param fig a matplotlib figure
@return a numpy 3D array of RGBA values
"""
import PIL.Image as Image
# draw the renderer
fig.canvas.draw()
# Get the RGBA buffer from the figure
w, h = fig.canvas.get_width_height()
buf = np.fromstring(fig.canvas.tostring_argb(), dtype=np.uint8)
buf.shape = (w, h, 4)
# canvas.tostring_argb give pixmap in ARGB mode. Roll the ALPHA channel to have it in RGBA mode
buf = np.roll(buf, 3, axis=2)
image = Image.frombytes("RGBA", (w, h), buf.tostring())
image = np.asarray(image)
return image
def showOriSignal(sample, mod_name, idx):
''' 绘制并展示一个样本信号的图像 '''
signal_data = sample[0]
figure = plt.figure(figsize=(9, 6))
plt.title(str(idx)+" "+str(mod_name), fontsize=30)
plt.xlabel('N', fontsize=20)
plt.ylabel("Value", fontsize=20)
plt.plot(signal_data[:, 0], label = 'I', linewidth=2.0)
plt.plot(signal_data[:, 1], color = 'red', label = 'Q', linewidth=2.0)
plt.legend(loc="upper right", fontsize=30)
plt.close()
image = fig2data(figure)
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
return image
def showCamSignal(signal, CAM, mod):
"""
Args:
signal: numpy.ndarray(size=(1, 128, 2), dtype=np.float)
CAM: numpy.ndarray(size=(128, 2), dtype=np.float)
Funcs:
叠加信号与CAM图,可视化CAM解释结果,并按类型保存
"""
# 绘制信号
signal_data = signal[0]
sig_len, channel = signal_data.shape
figure = plt.figure(figsize=(18, 12))
plt.title(mod, fontsize=26)
plt.xlabel('N', fontsize=20)
plt.ylabel("Value", fontsize=20)
plt.plot(signal_data[:, 0]*(sig_len//10), label = 'I' ,linewidth=4.0)
plt.plot(signal_data[:, 1]*(sig_len//10), color = 'red', label = 'Q', linewidth=4.0)
plt.legend(loc="upper right", fontsize=26)
# 绘制CAM
sig_min, sig_max = np.min(signal_data), np.max(signal_data)
CAM = CAM.T # (2, 128)
CAM = CAM - np.min(CAM)
CAM = CAM / np.max(CAM) # CAM取值归一化
plt.imshow(CAM, cmap='jet', extent=[0., sig_len, (sig_min-0.5)*(sig_len//10), (sig_max+0.5)*(sig_len//10)]) # jet, rainbow
# plt.colorbar()
'''
save_path = "figs_CAM_ACARS/{}".format(mod_name)
if not os.path.exists(save_path):
os.makedirs(save_path)
plt.savefig(save_path + '/' + str(idx+1)+"_CAM")
plt.close()
'''
# plt.savefig("figs/CAM_cur")
# plt.show()
image = fig2data(figure)
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
plt.close()
return image
def plot_confusion_matrix(y_true, y_pred, labels, title='Normalized confusion matrix', intFlag = 0):
''' 绘制混淆矩阵 '''
cmap = plt.cm.Blues
''' 颜色参考http://blog.csdn.net/haoji007/article/details/52063168'''
cm = confusion_matrix(y_true, y_pred)
tick_marks = np.array(range(len(labels))) + 0.5
np.set_printoptions(precision=2)
if cm.sum(axis=1)[:, np.newaxis].all() != 0:
cm_normalized = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
else:
intFlag = 1
figure = plt.figure(figsize=(10, 9), dpi=360)
ind_array = np.arange(len(labels))
x, y = np.meshgrid(ind_array, ind_array)
# intFlag = 0 # 标记在图片中对文字是整数型还是浮点型
for x_val, y_val in zip(x.flatten(), y.flatten()):
if (intFlag):
c = cm[y_val][x_val]
plt.text(x_val, y_val, "%d" % (c,), color='red', fontsize=12, va='center', ha='center')
else:
c = cm_normalized[y_val][x_val]
if (c > 0.0001):
#这里是绘制数字,可以对数字大小和颜色进行修改
plt.text(x_val, y_val, "%0.2f" % (c*100,) + "%", color='red', fontsize=10, va='center', ha='center')
else:
plt.text(x_val, y_val, "%d" % (0,), color='red', fontsize=10, va='center', ha='center')
if(intFlag):
plt.imshow(cm, interpolation='nearest', cmap=cmap)
else:
plt.imshow(cm_normalized, interpolation='nearest', cmap=cmap)
plt.gca().set_xticks(tick_marks, minor=True)
plt.gca().set_yticks(tick_marks, minor=True)
plt.gca().xaxis.set_ticks_position('none')
plt.gca().yaxis.set_ticks_position('none')
plt.grid(True, which='minor', linestyle='-')
plt.gcf().subplots_adjust(bottom=0.15)
plt.title('Confusion Matrix', fontsize=18)
plt.colorbar()
xlocations = np.array(range(len(labels)))
plt.xticks(xlocations, labels, rotation=90)
plt.yticks(xlocations, labels)
plt.ylabel('Index of True Classes')
plt.xlabel('Index of Predict Classes')
plt.savefig('./app/figs/confusion_matrix.jpg', dpi=300)
image = fig2data(figure)
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
return image
# plt.title(title)
# plt.show()
def drawAllOriSignal(X, Y):
"""
Args:
X: numpy.ndarray(size = (bz, 1, 128, 2)), 可视化信号原始数据
Y: numpy.ndarray(size = (bz,)), 可视化信号标签
Returns:
None
Funcs:
绘制所有信号输入样本的图像,并保存至相应标签的文件夹下
"""
for idx in range(len(X)):
if (idx+1)%50 == 0:
print("{} complete!".format(idx+1))
signal_data = X[idx][0]
mod_name = str(modName[Y[idx]], "utf-8") \
if cfgs.dataset_name == "RML2016.04c" else "label-"+str(t2n(Y[idx]))
plt.figure(figsize=(6, 4))
plt.title(mod_name)
plt.xlabel('N')
plt.ylabel("Value")
plt.plot(signal_data[:, 0], label = 'I')
plt.plot(signal_data[:, 1], color = 'red', label = 'Q')
plt.legend(loc="upper right")
save_path = "../figs/original_signal/{}".format(mod_name)
if not os.path.exists(save_path):
os.makedirs(save_path)
plt.savefig(save_path + '/' + str(idx+1))
plt.close()
print(X.shape)
print(Y.shape)
print("Complete the drawing of all original signals !!!")
def showImgSignal(sample, label):
''' 绘制并展示一个信号样本的二维可视化图像 '''
data = sample[0].T # 2*128
data = data - np.min(data)
data = data / np.max(data)
mod_name = str(modName[label], "utf-8")\
if cfgs.dataset_name == "RML2016.04c" else "label-"+str(t2n(label))
# print(data.shape)
h, sig_len = data.shape
# 叠加信号,以便显示
img_sig = np.empty([sig_len, sig_len], dtype = float)
# for row in range(int(sig_len/h)):
# img_sig[row*h:row*h+h, :] = data
for row in range(sig_len):
if row<sig_len/2:
img_sig[row:row+1, :] = data[0]
else:
img_sig[row:row+1, :] = data[1]
img_sig = cv2.resize(img_sig, (sig_len*2,sig_len*2))
cv2.imshow(mod_name, img_sig)
cv2.waitKey(0)
return img_sig
def mask_image(cam, image, reserveORerase):
"""
Args:
cam: numpy.ndarray(size=(4096, 2), dtype=np.float), 0-1
image: torch.Tensor, torch.Size([1, 4096, 2])
reserveORerase: bool 保留(0)或擦除(1)判别性区域
Funcs:
软阈值擦除/保留CAM对应的判别性特征区域
"""
cam = torch.from_numpy(cam).cuda()
mask = torch.sigmoid(cfgs.CAM_omega * (cam - cfgs.Erase_thr)).squeeze()
masked_image = image - (image * mask) if reserveORerase else image * mask
return masked_image.float()
def mask_image_hard(cam, image, reserveORerase, thr):
''' 阈值硬擦除 '''
mask = np.zeros_like(cam)
mask[cam >= thr] = 1
mask[cam < thr] = 0
mask = torch.from_numpy(mask).cuda()
# print(mask.shape, image.shape)
masked_image = image - (image * mask) if reserveORerase else image * mask
return masked_image.float()
if __name__ == "__main__":
x_train, y_train, x_test, y_test = loadNpy(cfgs.train_path, cfgs.test_path)
print(x_train.shape, y_train.shape)
# drawAllOriSignal(X=x_train, Y=y_train)
for idx in range(len(x_train)):
showImgSignal(x_train[idx], y_train[idx])
showOriSignal(x_train[idx], y_train[idx])
| jjRen-xd/PyOneDark_Qt_GUI | app/utils/signal_vis.py | signal_vis.py | py | 11,107 | python | en | code | 2 | github-code | 6 | [
{
"api_name": "matplotlib.use",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "sys.path.append",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 50,
"usage_type": "attribute"
},
{
"api_name": "numpy.int",
"line_nu... |
4534308606 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
# REF [site] >> https://scrapy.org/
import scrapy
class BlogSpider(scrapy.Spider):
name = 'blogspider'
start_urls = ['https://blog.scrapinghub.com']
def parse(self, response):
for title in response.css('.post-header>h2'):
yield {'title': title.css('a ::text').get()}
for next_page in response.css('a.next-posts-link'):
yield response.follow(next_page, self.parse)
#--------------------------------------------------------------------
# Usage:
# scrapy runspider scrapy_test.py
#if '__main__' == __name__:
# main()
| sangwook236/SWDT | sw_dev/python/ext/test/networking/scrapy_test.py | scrapy_test.py | py | 581 | python | en | code | 17 | github-code | 6 | [
{
"api_name": "scrapy.Spider",
"line_number": 8,
"usage_type": "attribute"
}
] |
24650911393 | import asyncio
import curses
import typing
from curses_tools import draw_frame
class Obstacle:
def __init__(
self,
row: int,
column: int,
rows_size: int = 1,
columns_size: int = 1,
uid: str | None = None,
) -> None:
self.row = row
self.column = column
self.rows_size = rows_size
self.columns_size = columns_size
self.uid = uid
def get_bounding_box_frame(self) -> str:
"""Get frame of bounding box
Returns:
Bounding box frame.
"""
# increment box size to compensate obstacle movement
rows, columns = self.rows_size + 1, self.columns_size + 1
return '\n'.join(_get_bounding_box_lines(rows, columns))
def get_bounding_box_corner_pos(self) -> tuple[int, int]:
"""Get left upper position of bounding box."""
return self.row - 1, self.column - 1
def dump_bounding_box(self) -> tuple[int, int, str]:
"""Get data for drawing the border of an obstacle."""
row, column = self.get_bounding_box_corner_pos()
return row, column, self.get_bounding_box_frame()
def has_collision(
self,
obj_corner_row: int,
obj_corner_column: int,
obj_size_rows: int = 1,
obj_size_columns: int = 1,
) -> bool:
"""Determine if collision has occurred.
Args:
obj_corner_row: Left upper obj corner row;
obj_corner_column: Left upper obj corner column;
obj_size_rows: Obj width;
obj_size_columns: Obj height.
"""
return has_collision(
(self.row, self.column),
(self.rows_size, self.columns_size),
(obj_corner_row, obj_corner_column),
(obj_size_rows, obj_size_columns),
)
def _get_bounding_box_lines(
rows: int,
columns: int,
) -> typing.Generator[str, None, None]:
"""Get line of bounding_box frame.
Args:
rows: Box width;
columns: Box height.
"""
yield ' ' + '-' * columns + ' '
for _ in range(rows):
yield '|' + ' ' * columns + '|'
yield ' ' + '-' * columns + ' '
async def show_obstacles(
canvas: curses.window,
obstacles: list[Obstacle],
) -> None:
"""Display bounding boxes of every obstacle in a list.
Args:
canvas: Main window;
obstacles: List of obstacles.
"""
while True:
boxes = [obstacle.dump_bounding_box() for obstacle in obstacles]
for row, column, frame in boxes:
draw_frame(canvas, row, column, frame)
await asyncio.sleep(0)
for row, column, frame in boxes:
draw_frame(canvas, row, column, frame, negative=True)
def _is_point_inside(
corner_row: int,
corner_column: int,
size_rows: int,
size_columns: int,
point_row: int,
point_row_column: int,
) -> bool:
"""Check if a point is inside a rectangle of a given size.
Args:
corner_row: Left upper rectangle row position;
corner_column: Left upper rectangle column position
size_rows: Rectangle width;
size_columns: Rectangle height;
point_row: Left upper point row position;
point_row_column: Left upper point column position;
"""
rows_flag = corner_row <= point_row < corner_row + size_rows
columns_flag = (
corner_column <= point_row_column < corner_column + size_columns
)
return rows_flag and columns_flag
def has_collision(
obstacle_corner: tuple[int, int],
obstacle_size: tuple[int, int],
obj_corner: tuple[int, int],
obj_size: tuple[int, int] = (1, 1),
) -> bool:
"""Determine if collision has occurred.
Args:
obstacle_corner: Left upper corner obstacle position;
obstacle_size: Obstacle size (width, height);
obj_corner: Left upper corner obj position;
obj_size: Obj size (width, height).
"""
opposite_obstacle_corner = (
obstacle_corner[0] + obstacle_size[0] - 1,
obstacle_corner[1] + obstacle_size[1] - 1,
)
opposite_obj_corner = (
obj_corner[0] + obj_size[0] - 1,
obj_corner[1] + obj_size[1] - 1,
)
return any(
[
_is_point_inside(
*obstacle_corner,
*obstacle_size,
*obj_corner,
),
_is_point_inside(
*obstacle_corner,
*obstacle_size,
*opposite_obj_corner,
),
_is_point_inside(
*obj_corner,
*obj_size,
*obstacle_corner,
),
_is_point_inside(
*obj_corner,
*obj_size,
*opposite_obstacle_corner,
),
]
)
| Alex-Men-VL/space_game | src/obstacles.py | obstacles.py | py | 4,841 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "typing.Generator",
"line_number": 72,
"usage_type": "attribute"
},
{
"api_name": "curses.window",
"line_number": 87,
"usage_type": "attribute"
},
{
"api_name": "curses_tools.draw_frame",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "asy... |
22368252597 | import os, sys
import numpy as np
import pandas as pd
import pickle
import argparse
from keras import backend
from keras.models import load_model
from keras.optimizers import *
from sklearn.metrics import accuracy_score
from sklearn.decomposition import PCA
from sklearn.neighbors import KNeighborsClassifier
from model import *
from io_data import *
backend.set_image_dim_ordering('tf')
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
parser = argparse.ArgumentParser()
parser.add_argument('--train', help='train data path')
parser.add_argument('--test', help='test data path')
parser.add_argument('-l', '--log', help='log path')
parser.add_argument('-m', '--model', help='model path')
parser.add_argument('-o', '--output', help='output path')
parser.add_argument('-s', '--sample', type=int, help='novel sample')
parser.add_argument('-e', '--evaluate', type=int, help='novel sample')
parser.add_argument('-r', '--randomseed', type=int, help='randomseed')
args = parser.parse_args()
log_path = args.log
model_path = args.model
train_path = args.train
test_path = args.test
sample = args.sample
output_path = args.output
evaluate = args.evaluate
randomseed = args.randomseed
width = 32
height= 32
channel = 3
n_batch = 100
epoch = 30
print('Read data')
np.random.seed(randomseed)
train_imgs, label, test_imgs = read_test(train_path, test_path, sample=sample)
height, width, channel = train_imgs.shape[1:]
# training imgs flip horizontally
model, cnn_model = Recognition()
model.load_weights(model_path)
train_imgs = cnn_model.predict(train_imgs)
test_imgs = cnn_model.predict(test_imgs)
T = train_imgs.shape[0]
train_imgs = np.reshape(train_imgs, (20, sample, -1))
train_imgs = np.mean(train_imgs, axis=1)
label = np.array([label[i*sample] for i in range(20)])
test_imgs = np.reshape(test_imgs, (test_imgs.shape[0], -1))
knc = KNeighborsClassifier(n_neighbors=1)
knc.fit(train_imgs, label)
predict1 = knc.predict(test_imgs)
pca = PCA(n_components=64)
pca.fit(np.vstack([train_imgs, test_imgs]))
train_pca = pca.transform(train_imgs)
test_pca = pca.transform(test_imgs)
knc = KNeighborsClassifier(n_neighbors=1)
knc.fit(train_pca, label)
predict2 = knc.predict(test_pca)
save_predict(predict1, os.path.join(output_path, str(sample)+'_knn_predict.csv'))
save_predict(predict2, os.path.join(output_path, str(sample)+'_PCA_knn_predict.csv'))
del model
| tom6311tom6311/dlcv2018final | task2/knn/code/knn_test.py | knn_test.py | py | 2,377 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "keras.backend.set_image_dim_ordering",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "keras.backend",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "os.environ",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "ar... |
32129181331 | import logging
import pandas as pd
from flask import Flask, request, jsonify
from data_preprocessing import process_data_for_training
import psycopg2
from psycopg2 import sql
# Create a Flask app
app = Flask(__name__)
app.logger.setLevel(logging.DEBUG)
app.logger.addHandler(logging.StreamHandler())
db_params = {
'dbname': 'app_db',
'user': 'app_user',
'password': 'password',
'host': 'db',
'port': '5432'
}
def fetch_warranty_data():
# Establish a connection to the database
connection = psycopg2.connect(**db_params)
cursor = connection.cursor()
# Build the dynamic SQL query
select_query = sql.SQL("SELECT * FROM api.claims")
cursor.execute(select_query)
rows = cursor.fetchall()
columns = [desc[0] for desc in cursor.description]
warranty_df = pd.DataFrame(rows, columns=columns)
cursor.close()
connection.close()
return warranty_df
# Define the API endpoint for data preparation
@app.route("/train", methods=["POST"])
def train():
data = request.data.decode('utf-8')
warranty_data = fetch_warranty_data()
train(process_data_for_training(data, warranty_data))
return 'New model generated'
# Run the Flask app
if __name__ == "__main__":
app.run(host="0.0.0.0", port=5000, debug=True) | evialina/automotive_diagnostic_recommender_system | training-service/script.py | script.py | py | 1,290 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "flask.Flask",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "logging.DEBUG",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "logging.StreamHandler",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "psycopg2.connect"... |
30439578880 | import networkx as nx
from networkx.generators.degree_seq import expected_degree_graph
# make a random graph of 500 nodes with expected degreees of 50
n = 500 # n nodes
p = 0.1
w = [p * n for i in range(n)] # w = p*n for all nodes
G = expected_degree_graph(w) # configuration model
print("Degree Histogram")
print("degree (#nodes) ****")
dh = nx.degree_histogram(G)
low = min(nx.degree(G))
for i in range(low, len(dh)):
bar = ''.join(dh[i] * ['*'])
print("%2s (%2s) %s" % (i, dh[i], bar)) | oimichiu/NetworkX | graph/ex24.py | ex24.py | py | 503 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "networkx.generators.degree_seq.expected_degree_graph",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "networkx.degree_histogram",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "networkx.degree",
"line_number": 12,
"usage_type": "call"
... |
5229315790 | from django.http import HttpResponsePermanentRedirect, HttpResponseGone
def redirect_to(request, url, convert_funcs=None, **kwargs):
"""
A version of django.views.generic.simple.redirect_to which can handle
argument conversion. The 'convert_funcs' parameter is a dictionary mapping
'kwargs' keys to a function. The 'kwargs' value is run through the function
before the redirect is applied.
Mostly, this is useful for converting a parameter to an int before passing
it back to the redirect for formatting via %02d, for example.
"""
if not url:
return HttpResponseGone()
if convert_funcs:
for name, fn in convert_funcs.items():
if name in kwargs:
kwargs[name] = fn(kwargs[name])
return HttpResponsePermanentRedirect(url % kwargs)
| gboue/django-util | django_util/view_utils.py | view_utils.py | py | 819 | python | en | code | 2 | github-code | 6 | [
{
"api_name": "django.http.HttpResponseGone",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "django.http.HttpResponsePermanentRedirect",
"line_number": 19,
"usage_type": "call"
}
] |
36606021901 | import os
import csv
import queue
import logging
import argparse
import traceback
import itertools
import numpy as np
import tensorflow.compat.v1 as tf
from fedlearner.trainer.bridge import Bridge
from fedlearner.model.tree.tree import BoostingTreeEnsamble
from fedlearner.trainer.trainer_master_client import LocalTrainerMasterClient
from fedlearner.trainer.trainer_master_client import DataBlockInfo
'''
目前不太理解的地方:worker、verbosity、max-bins、ignore-fields
'''
def create_argument_parser():
parser = argparse.ArgumentParser(
description='FedLearner Tree Model Trainer.')
#训练角色,leader还是follower
parser.add_argument('role', type=str,
help="Role of this trainer in {'local', "
"'leader', 'follower'}")
#监听本地地址,ip+port
parser.add_argument('--local-addr', type=str,
help='Listen address of the local bridge, ' \
'in [IP]:[PORT] format')
#同伴地址,ip+port
parser.add_argument('--peer-addr', type=str,
help='Address of peer\'s bridge, ' \
'in [IP]:[PORT] format')
#分布式训练时,应用程序的id,默认空
parser.add_argument('--application-id', type=str, default=None,
help='application id on distributed ' \
'training.')
#current worker的排名,等级,默认0
parser.add_argument('--worker-rank', type=int, default=0,
help='rank of the current worker')
#总的worker数量,默认1
parser.add_argument('--num-workers', type=int, default=1,
help='total number of workers')
#mode,可以为 train,test,eval,默认为train
parser.add_argument('--mode', type=str, default='train',
help='Running mode in train, test or eval.')
#数据文件的路径
parser.add_argument('--data-path', type=str, default=None,
help='Path to data file.')
#验证数据文件的路径,仅用于test模式
parser.add_argument('--validation-data-path', type=str, default=None,
help='Path to validation data file. ' \
'Only used in train mode.')
#bool变量,默认为false,预测不需要数据
parser.add_argument('--no-data', type=bool, default=False,
help='Run prediction without data.')
#使用的文件扩展
parser.add_argument('--file-ext', type=str, default='.csv',
help='File extension to use')
#输入文件类型
parser.add_argument('--file-type', type=str, default='csv',
help='input file type: csv or tfrecord')
#加载已存储模型的路径
parser.add_argument('--load-model-path',
type=str,
default=None,
help='Path load saved models.')
#存储输出模型的位置
parser.add_argument('--export-path',
type=str,
default=None,
help='Path to save exported models.')
#保存模型的检查点
parser.add_argument('--checkpoint-path',
type=str,
default=None,
help='Path to save model checkpoints.')
#存储预测输出的路径
parser.add_argument('--output-path',
type=str,
default=None,
help='Path to save prediction output.')
#控制打印日志的数量,默认为1
parser.add_argument('--verbosity',
type=int,
default=1,
help='Controls the amount of logs to print.')
#损失函数的选择,默认为logistic
parser.add_argument('--loss-type',
default='logistic',
choices=['logistic', 'mse'],
help='What loss to use for training.')
#学习率,梯度下降中的步长,默认为0.3
parser.add_argument('--learning-rate',
type=float,
default=0.3,
help='Learning rate (shrinkage).')
#boost 迭代次数,默认为5
parser.add_argument('--max-iters',
type=int,
default=5,
help='Number of boosting iterations.')
#决策树的最大深度,默认为3
parser.add_argument('--max-depth',
type=int,
default=3,
help='Max depth of decision trees.')
#L2正则化参数,默认为1.0
parser.add_argument('--l2-regularization',
type=float,
default=1.0,
help='L2 regularization parameter.')
#最大的直方图维度
parser.add_argument('--max-bins',
type=int,
default=33,
help='Max number of histogram bins.')
#并行线程的数量,默认1
parser.add_argument('--num-parallel',
type=int,
default=1,
help='Number of parallel threads.')
#bool类型,如果被设置为true,数据第一列被认为是双方都匹配的example id
parser.add_argument('--verify-example-ids',
type=bool,
default=False,
help='If set to true, the first column of the '
'data will be treated as example ids that '
'must match between leader and follower')
#通过名字来忽略数据域,默认空字符串
parser.add_argument('--ignore-fields',
type=str,
default='',
help='Ignore data fields by name')
#分类特征的字段名称,特征的值应该为非负整数
parser.add_argument('--cat-fields',
type=str,
default='',
help='Field names of categorical features. Feature'
' values should be non-negtive integers')
#是否使用流传输,默认为否
parser.add_argument('--use-streaming',
type=bool,
default=False,
help='Whether to use streaming transmit.')
#是否发送预测评分给follower,默认为false
parser.add_argument('--send-scores-to-follower',
type=bool,
default=False,
help='Whether to send prediction scores to follower.')
#是否发送指标(metrics)给follower,默认为follower
parser.add_argument('--send-metrics-to-follower',
type=bool,
default=False,
help='Whether to send metrics to follower.')
return parser
def parse_tfrecord(record):
example = tf.train.Example()
example.ParseFromString(record)
parsed = {}
for key, value in example.features.feature.items():
kind = value.WhichOneof('kind')
if kind == 'float_list':
assert len(value.float_list.value) == 1, "Invalid tfrecord format"
parsed[key] = value.float_list.value[0]
elif kind == 'int64_list':
assert len(value.int64_list.value) == 1, "Invalid tfrecord format"
parsed[key] = value.int64_list.value[0]
elif kind == 'bytes_list':
assert len(value.bytes_list.value) == 1, "Invalid tfrecord format"
parsed[key] = value.bytes_list.value[0]
else:
raise ValueError("Invalid tfrecord format")
return parsed
def extract_field(field_names, field_name, required):
if field_name in field_names:
return []
assert not required, \
"Field %s is required but missing in data"%field_name
return None
def read_data(file_type, filename, require_example_ids,
require_labels, ignore_fields, cat_fields):
logging.debug('Reading data file from %s', filename)
if file_type == 'tfrecord':
reader = tf.io.tf_record_iterator(filename)
reader, tmp_reader = itertools.tee(reader)
first_line = parse_tfrecord(next(tmp_reader))
field_names = first_line.keys()
else:
fin = tf.io.gfile.GFile(filename, 'r')
reader = csv.DictReader(fin)
field_names = reader.fieldnames
example_ids = extract_field(
field_names, 'example_id', require_example_ids)
raw_ids = extract_field(
field_names, 'raw_id', False)
labels = extract_field(
field_names, 'label', require_labels)
ignore_fields = set(filter(bool, ignore_fields.strip().split(',')))
ignore_fields.update(['example_id', 'raw_id', 'label'])
cat_fields = set(filter(bool, cat_fields.strip().split(',')))
for name in cat_fields:
assert name in field_names, "cat_field %s missing"%name
cont_columns = list(filter(
lambda x: x not in ignore_fields and x not in cat_fields, field_names))
cont_columns.sort(key=lambda x: x[1])
cat_columns = list(filter(
lambda x: x in cat_fields and x not in cat_fields, field_names))
cat_columns.sort(key=lambda x: x[1])
features = []
cat_features = []
for line in reader:
if file_type == 'tfrecord':
line = parse_tfrecord(line)
if example_ids is not None:
example_ids.append(str(line['example_id']))
if raw_ids is not None:
raw_ids.append(str(line['raw_id']))
if labels is not None:
labels.append(float(line['label']))
features.append([float(line[i]) for i in cont_columns])
cat_features.append([int(line[i]) for i in cat_columns])
features = np.array(features, dtype=np.float)
cat_features = np.array(cat_features, dtype=np.int32)
if labels is not None:
labels = np.asarray(labels, dtype=np.float)
return features, cat_features, cont_columns, cat_columns, \
labels, example_ids, raw_ids
def read_data_dir(file_ext, file_type, path, require_example_ids,
require_labels, ignore_fields, cat_fields):
if not tf.io.gfile.isdir(path):
return read_data(
file_type, path, require_example_ids,
require_labels, ignore_fields, cat_fields)
files = []
for dirname, _, filenames in tf.io.gfile.walk(path):
for filename in filenames:
_, ext = os.path.splitext(filename)
if file_ext and ext != file_ext:
continue
subdirname = os.path.join(path, os.path.relpath(dirname, path))
files.append(os.path.join(subdirname, filename))
features = None
for fullname in files:
ifeatures, icat_features, icont_columns, icat_columns, \
ilabels, iexample_ids, iraw_ids = read_data(
file_type, fullname, require_example_ids,
require_labels, ignore_fields, cat_fields
)
if features is None:
features = ifeatures
cat_features = icat_features
cont_columns = icont_columns
cat_columns = icat_columns
labels = ilabels
example_ids = iexample_ids
raw_ids = iraw_ids
else:
assert cont_columns == icont_columns, \
"columns mismatch between files %s vs %s"%(
cont_columns, icont_columns)
assert cat_columns == icat_columns, \
"columns mismatch between files %s vs %s"%(
cat_columns, icat_columns)
features = np.concatenate((features, ifeatures), axis=0)
cat_features = np.concatenate(
(cat_features, icat_features), axis=0)
if labels is not None:
labels = np.concatenate((labels, ilabels), axis=0)
if example_ids is not None:
example_ids.extend(iexample_ids)
if raw_ids is not None:
raw_ids.extend(iraw_ids)
assert features is not None, "No data found in %s"%path
return features, cat_features, cont_columns, cat_columns, \
labels, example_ids, raw_ids
def train(args, booster):
X, cat_X, X_names, cat_X_names, y, example_ids, _ = read_data_dir(
args.file_ext, args.file_type, args.data_path, args.verify_example_ids,
args.role != 'follower', args.ignore_fields, args.cat_fields)
if args.validation_data_path:
val_X, val_cat_X, val_X_names, val_cat_X_names, val_y, \
val_example_ids, _ = \
read_data_dir(
args.file_ext, args.file_type, args.validation_data_path,
args.verify_example_ids, args.role != 'follower',
args.ignore_fields, args.cat_fields)
assert X_names == val_X_names, \
"Train data and validation data must have same features"
assert cat_X_names == val_cat_X_names, \
"Train data and validation data must have same features"
else:
val_X = val_cat_X = X_names = val_y = val_example_ids = None
if args.output_path:
tf.io.gfile.makedirs(os.path.dirname(args.output_path))
if args.checkpoint_path:
tf.io.gfile.makedirs(args.checkpoint_path)
booster.fit(
X, y,
cat_features=cat_X,
checkpoint_path=args.checkpoint_path,
example_ids=example_ids,
validation_features=val_X,
validation_cat_features=val_cat_X,
validation_labels=val_y,
validation_example_ids=val_example_ids,
output_path=args.output_path,
feature_names=X_names,
cat_feature_names=cat_X_names)
def write_predictions(filename, pred, example_ids=None, raw_ids=None):
logging.debug("Writing predictions to %s.tmp", filename)
headers = []
lines = []
if example_ids is not None:
headers.append('example_id')
lines.append(example_ids)
if raw_ids is not None:
headers.append('raw_id')
lines.append(raw_ids)
headers.append('prediction')
lines.append(pred)
lines = zip(*lines)
fout = tf.io.gfile.GFile(filename+'.tmp', 'w')
fout.write(','.join(headers) + '\n')
for line in lines:
fout.write(','.join([str(i) for i in line]) + '\n')
fout.close()
logging.debug("Renaming %s.tmp to %s", filename, filename)
tf.io.gfile.rename(filename+'.tmp', filename, overwrite=True)
def test_one_file(args, bridge, booster, data_file, output_file):
if data_file is None:
X = cat_X = X_names = cat_X_names = y = example_ids = raw_ids = None
else:
X, cat_X, X_names, cat_X_names, y, example_ids, raw_ids = \
read_data(
args.file_type, data_file, args.verify_example_ids,
False, args.ignore_fields, args.cat_fields)
pred = booster.batch_predict(
X,
example_ids=example_ids,
cat_features=cat_X,
feature_names=X_names,
cat_feature_names=cat_X_names)
if y is not None:
metrics = booster.loss.metrics(pred, y)
else:
metrics = {}
logging.info("Test metrics: %s", metrics)
if args.role == 'follower':
bridge.start(bridge.new_iter_id())
bridge.receive(bridge.current_iter_id, 'barrier')
bridge.commit()
if output_file:
tf.io.gfile.makedirs(os.path.dirname(output_file))
write_predictions(output_file, pred, example_ids, raw_ids)
if args.role == 'leader':
bridge.start(bridge.new_iter_id())
bridge.send(
bridge.current_iter_id, 'barrier', np.asarray([1]))
bridge.commit()
class DataBlockLoader(object):
def __init__(self, role, bridge, data_path, ext,
worker_rank=0, num_workers=1, output_path=None):
self._role = role
self._bridge = bridge
self._num_workers = num_workers
self._worker_rank = worker_rank
self._output_path = output_path
self._tm_role = 'follower' if role == 'leader' else 'leader'
if data_path:
files = None
if not tf.io.gfile.isdir(data_path):
files = [os.path.basename(data_path)]
data_path = os.path.dirname(data_path)
self._trainer_master = LocalTrainerMasterClient(
self._tm_role, data_path, files=files, ext=ext)
else:
self._trainer_master = None
self._count = 0
if self._role == 'leader':
self._block_queue = queue.Queue()
self._bridge.register_data_block_handler(self._data_block_handler)
self._bridge.start(self._bridge.new_iter_id())
self._bridge.send(
self._bridge.current_iter_id, 'barrier', np.asarray([1]))
self._bridge.commit()
elif self._role == 'follower':
self._bridge.start(self._bridge.new_iter_id())
self._bridge.receive(self._bridge.current_iter_id, 'barrier')
self._bridge.commit()
def _data_block_handler(self, msg):
logging.debug('DataBlock: recv "%s" at %d', msg.block_id, msg.count)
assert self._count == msg.count
if not msg.block_id:
block = None
elif self._trainer_master is not None:
block = self._trainer_master.request_data_block(msg.block_id)
return False
else:
block = DataBlockInfo(msg.block_id, None)
self._count += 1
self._block_queue.put(block)
return True
def _request_data_block(self):
while True:
for _ in range(self._worker_rank):
self._trainer_master.request_data_block()
block = self._trainer_master.request_data_block()
for _ in range(self._num_workers - self._worker_rank - 1):
self._trainer_master.request_data_block()
if block is None or self._output_path is None or \
not tf.io.gfile.exists(os.path.join(
self._output_path, block.block_id) + '.output'):
break
return block
def get_next_block(self):
if self._role == 'local':
return self._request_data_block()
if self._tm_role == 'leader':
while True:
block = self._request_data_block()
if block is not None:
if not self._bridge.load_data_block(
self._count, block.block_id):
continue
else:
self._bridge.load_data_block(self._count, '')
break
self._count += 1
else:
block = self._block_queue.get()
return block
def test(args, bridge, booster):
if not args.no_data:
assert args.data_path, "Data path must not be empty"
else:
assert not args.data_path and args.role == 'leader'
data_loader = DataBlockLoader(
args.role, bridge, args.data_path, args.file_ext,
args.worker_rank, args.num_workers, args.output_path)
while True:
data_block = data_loader.get_next_block()
if data_block is None:
break
if args.output_path:
output_file = os.path.join(
args.output_path, data_block.block_id) + '.output'
else:
output_file = None
test_one_file(
args, bridge, booster, data_block.data_path, output_file)
def run(args):
if args.verbosity == 0:
logging.basicConfig(level=logging.WARNING)
elif args.verbosity == 1:
logging.basicConfig(level=logging.INFO)
else:
logging.basicConfig(level=logging.DEBUG)
assert args.role in ['leader', 'follower', 'local'], \
"role must be leader, follower, or local"
assert args.mode in ['train', 'test', 'eval'], \
"mode must be train, test, or eval"
#follower或leader
if args.role != 'local':
bridge = Bridge(args.role, int(args.local_addr.split(':')[1]),
args.peer_addr, args.application_id, 0,
streaming_mode=args.use_streaming)
else:
bridge = None
try:
#boost
booster = BoostingTreeEnsamble(
bridge,
learning_rate=args.learning_rate,
max_iters=args.max_iters,
max_depth=args.max_depth,
l2_regularization=args.l2_regularization,
max_bins=args.max_bins,
num_parallel=args.num_parallel,
loss_type=args.loss_type,
send_scores_to_follower=args.send_scores_to_follower,
send_metrics_to_follower=args.send_metrics_to_follower)
#加载已存储的模型
if args.load_model_path:
booster.load_saved_model(args.load_model_path)
#训练不需要bridge,为什么呢
if args.mode == 'train':
train(args, booster)
#测试,评估模型需要bridge
else: # args.mode == 'test, eval'
test(args, bridge, booster)
#把模型存起来
if args.export_path:
booster.save_model(args.export_path)
except Exception as e:
logging.fatal(
'Exception raised during training: %s',
traceback.format_exc())
raise e
finally:
#结束bridge
if bridge:
bridge.terminate()
if __name__ == '__main__':
run(create_argument_parser().parse_args())
| rain701/fedlearner-explain | fedlearner/fedlearner/model/tree/trainer.py | trainer.py | py | 21,840 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "tensorflow.compat.v1.train.Example",
"line_number": 162,
"usage_type": "call"
},
{
"api_name": "tensorflow.compat.v1.train",
"line_number": 162,
"usage_type": "attribute"
... |
10769330374 | """my_first_django URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/4.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.shortcuts import render
from django.urls import include, path
from rest_framework_simplejwt.views import (
TokenObtainPairView,
TokenRefreshView,
TokenVerifyView
)
from rest_framework import routers
from myapp.views.person import PersonViewSet
from myapp.views.user import UserViewSet, GroupViewSet
router = routers.DefaultRouter()
router.register(r'users', UserViewSet)
router.register(r'groups', GroupViewSet)
router.register(r'persons', PersonViewSet)
def index(request):
return render(request, 'index.html')
urlpatterns = [
path("", index, name='index'),
path("admin/", admin.site.urls),
path("api/", include(router.urls)),
path("myapp/", include("myapp.urls")),
path("accounts/", include("django.contrib.auth.urls")),
path("api-auth/", include("rest_framework.urls")),
path('api/token/', TokenObtainPairView.as_view(), name='token_obtain_pair'),
path('api/token/refresh/', TokenRefreshView.as_view(), name='token_refresh'),
path('api/token/verify/', TokenVerifyView.as_view(), name='token_verify'),
]
| shine-codestove/my_first_django | my_first_django/urls.py | urls.py | py | 1,750 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "rest_framework.routers.DefaultRouter",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "rest_framework.routers",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "myapp.views.user.UserViewSet",
"line_number": 32,
"usage_type": "argument"
... |
7263711725 | # -*- coding: utf-8 -*-
from PyQt5.QtWidgets import QMainWindow, QVBoxLayout, QWidget, QTabWidget
from .movies_view import MoviesTab
from .games_view import GamesTab
from .music_view import MusicTab
class Window(QMainWindow):
"""Main Window."""
def __init__(self, parent=None):
"""Initializer."""
super().__init__(parent)
self.setWindowTitle("Media Library")
self.resize(720, 360)
self.table_widget = MyTableWidget(self)
self.setCentralWidget(self.table_widget)
class MyTableWidget(QWidget):
"""Container for all the tabs."""
def __init__(self, parent):
super(QWidget, self).__init__(parent)
self.layout = QVBoxLayout(self)
# Initialize tabs
self.tabs = QTabWidget()
self.moviesTab = MoviesTab(self)
self.gamesTab = GamesTab(self)
self.musicTab = MusicTab(self)
# Add tabs for each media type
self.tabs.addTab(self.moviesTab, "Movies")
self.tabs.addTab(self.gamesTab, "Games")
self.tabs.addTab(self.musicTab, "Music")
# Add tabs to widget
self.layout.addWidget(self.tabs)
self.setLayout(self.layout)
| aisandovalm/media-library | media_library/views/main_view.py | main_view.py | py | 1,186 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "PyQt5.QtWidgets.QMainWindow",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QWidget",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QWidget",
"line_number": 25,
"usage_type": "argument"
},
{
"ap... |
11849550981 |
"""
Created on Thu Dec 10 22:51:52 2020
@author: yzaghir
Image Arthmeric Opeations Add -
We can add two images with the OpenCV function , cv.add()
-Resize the two images and make sur they are exactly the same size before adding
"""
# import cv library
import cv2 as cv
#import numpy as np
# read image from computer
img1 = cv.imread("images/abhi2.jpg")
img2 = cv.imread("images/flower1.jpg")
#macke sur both images are same size before adding
# pickup matrix of number from image
cropped_image1 = img1[60:200 , 50:200]
cropped_image2 = img2[60:200 , 50:200]
cv.imshow("cropped 1" , cropped_image1)
cv.imshow("cropped 2" , cropped_image2)
# adding the images
added_image = cv.add(cropped_image1 , cropped_image2)
cv.imshow("Added Image" , added_image)
# adding the images
subtracted_image = cv.subtract(cropped_image1 , cropped_image2)
cv.imshow("Subtracted Image" , subtracted_image)
| zaghir/python | python-opencv/arithmetic_operations_addition_and_subtraction.py | arithmetic_operations_addition_and_subtraction.py | py | 906 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "cv2.imread",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "cv2.imread",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "cv2.imshow",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "cv2.imshow",
"line_number": 31,
... |
28237649684 | import typing
import requests
from requests import Session
from zenora.errors import MissingAccess, AvatarError, InvalidSnowflake
# Request functions
def fetch(
url: str,
headers: typing.Dict[str, str],
params: typing.Dict[str, str] = {},
) -> typing.Dict:
r = requests.get(url=url, headers=headers, params=params)
r.raise_for_status()
return r.json()
def post(
url: str,
headers: typing.Dict[str, str],
params: typing.Dict[str, str] = {},
) -> typing.Dict:
r = requests.post(url=url, headers=headers, json=params)
r.raise_for_status()
return r.json()
def patch(
url: str,
headers: typing.Dict[str, str],
params: typing.Dict[str, str] = {},
) -> typing.Dict:
r = requests.patch(url=url, headers=headers, json=params)
r.raise_for_status()
return r.json()
def delete(
url: str,
headers: typing.Dict[str, str],
params: typing.Dict[str, str] = {},
) -> typing.Dict:
r = requests.delete(url=url, headers=headers, json=params)
r.raise_for_status()
return r
# Utility functions
def error_checker(data: typing.Dict) -> None:
if data.get("user_id") or data.get("channel_id"):
raise InvalidSnowflake(
data.get("user_id")[0]
if data.get("user_id") is not None
else data.get("channel_id")[0]
)
elif data.get("code"):
if data.get("code") == 50001:
raise MissingAccess(data.get("message"))
else:
raise InvalidSnowflake(data.get("message"))
elif data.get("avatar"):
if isinstance(data.get("avatar"), list):
raise AvatarError(data.get("avatar")[0])
def get_file(url):
# Downloading Image from link
r = requests.get(url=url, stream=True)
return r
| StarrMan303/zenora | zenora/utils/helpers.py | helpers.py | py | 1,778 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "typing.Dict",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "typing.Dict",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "requests.get",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "typing.Dict",
"lin... |
70075742268 | # -*- encoding:utf-8 -*-
'''
@time: 2019/12/21 9:48 下午
@author: huguimin
@email: 718400742@qq.com
'''
import os
import random
import math
import torch
import argparse
import numpy as np
from util.util_data_gcn import *
from models.word2vec.ecgcn import ECGCN
from models.word2vec.ecgat import ECGAT
from models.word2vec.fssgcn import ECClassifier
from models.word2vec.aggcn import AGClassifier
# from models.ecaggcn_no_dcn import ECClassifier
from sklearn import metrics
import torch.nn as nn
import time
class Model:
def __init__(self, opt, idx):
self.opt = opt
self.embedding = load_embedding(opt.embedding_path)
self.embedding_pos = load_pos_embedding(opt.embedding_dim_pos)
self.split_size = math.ceil(opt.data_size / opt.n_split)
self.global_f1 = 0
# self.train, self.test = load_data(self.split_size, idx, opt.data_size) #意味着只能从一个角度上训练,应该换几种姿势轮着训练
if opt.dataset == 'EC':
self.train, self.test = load_percent_train(opt.per, self.split_size, idx, opt.data_size)
elif opt.dataset == 'EC_en':
self.train, self.test = load_data_en()
else:
print('DATASET NOT EXIST')
# self.train, self.test = load_data(self.split_size, idx, opt.data_size)
self.sub_model = opt.model_class(self.embedding, self.embedding_pos, self.opt).to(opt.device)
def _reset_params(self):
for p in self.sub_model.parameters():
if p.requires_grad:
if len(p.shape) > 1:
self.opt.initializer(p)
else:
stdv = 1. / math.sqrt(p.shape[0])
torch.nn.init.uniform_(p, a=-stdv, b=stdv)
def _print_args(self):
n_trainable_params, n_nontrainable_params, model_params = 0, 0, 0
for p in self.sub_model.parameters():
n_params = torch.prod(torch.tensor(p.shape)).item()
model_params += n_params
if p.requires_grad:
n_trainable_params += n_params
else:
n_nontrainable_params += n_params
print('n_trainable_params: {0}, n_nontrainable_params: {1}, model_params: {2}'.format(n_trainable_params, n_nontrainable_params, model_params))
print('> training arguments:')
for arg in vars(self.opt):
print('>>> {0}: {1}'.format(arg, getattr(self.opt, arg)))
def _train(self, criterion, optimizer):
max_test_pre = 0
max_test_rec = 0
max_test_f1 = 0
global_step = 0
continue_not_increase = 0
for epoch in range(self.opt.num_epoch):
print('>' * 100)
print('epoch: ', epoch)
n_correct, n_total = 0, 0
increase_flag = False
for train in get_train_batch_data(self.train, self.opt.batch_size, self.opt.keep_prob1, self.opt.keep_prob2):
global_step += 1
self.sub_model.train()
optimizer.zero_grad()
inputs = [train[col].to(self.opt.device) for col in self.opt.inputs_cols]
targets = train['label'].to(self.opt.device)
doc_len = train['doc_len'].to(self.opt.device)
targets = torch.argmax(targets, dim=2)
targets_flatten = torch.reshape(targets, [-1])
outputs = self.sub_model(inputs)
outputs_flatten = torch.reshape(outputs, [-1, self.opt.num_class])
loss = criterion(outputs_flatten, targets_flatten)
# loss = nn.functional.nll_loss(outputs_flatten, targets_flatten)
outputs = torch.argmax(outputs, dim=-1)
loss.backward()
optimizer.step()
if global_step % self.opt.log_step == 0:
train_acc, train_pre, train_rec, train_f1 = self._evaluate_prf_binary(targets, outputs, doc_len)
print('Train: loss:{:.4f}, train_acc: {:.4f}, train_pre:{:.4f}, train_rec:{:.4f}, train_f1: {:.4f}\n'.format(loss.item(), train_acc, train_pre, train_rec, train_f1))
test_acc, test_pre, test_rec, test_f1 = self._evaluate_acc_f1()
# if test_acc > max_test_acc:
# max_test_acc = test_acc
if test_f1 > max_test_f1:
increase_flag = True
max_test_f1 = test_f1
max_test_pre = test_pre
max_test_rec = test_rec
if self.opt.save and test_f1 > self.global_f1:
self.global_f1 = test_f1
torch.save(self.sub_model.state_dict(), 'state_dict/'+self.opt.model_name+'_'+self.opt.dataset+'_test.pkl')
print('>>> best model saved.')
print('Test: test_acc: {:.4f}, test_pre:{:.4f}, test_rec:{:.4f}, test_f1: {:.4f}'.format(test_acc, test_pre, test_rec, test_f1))
if increase_flag == False:
continue_not_increase += 1
if continue_not_increase >= 5:
print('early stop.')
break
else:
continue_not_increase = 0
return max_test_pre, max_test_rec, max_test_f1
def _evaluate_acc_f1(self):
# switch model to evaluation mode
self.sub_model.eval()
targets_all, outputs_all, doc_len_all = None, None, None
inference_time_list = []
with torch.no_grad():
for test in get_test_batch_data(self.test, self.opt.batch_size):
inputs = [test[col].to(self.opt.device) for col in self.opt.inputs_cols]
targets = test['label'].to(self.opt.device)
doc_len = test['doc_len'].to(self.opt.device)
targets = torch.argmax(targets, dim=2)#(32,75)
if self.opt.infer_time:
torch.cuda.synchronize()
start_time = time.time()
outputs = self.sub_model(inputs)
torch.cuda.synchronize()
end_time = time.time()
inference_time = end_time - start_time
inference_time_list.append(inference_time/targets.shape[0])
else:
outputs = self.sub_model(inputs)
outputs = torch.argmax(outputs, dim=2)#(32, 75)
if targets_all is None:
targets_all = targets
outputs_all = outputs
doc_len_all = doc_len
else:
targets_all = torch.cat((targets_all, targets), dim=0)
outputs_all = torch.cat((outputs_all, outputs), dim=0)
doc_len_all = torch.cat((doc_len_all, doc_len), dim=0)
test_acc, test_pre, test_rec, test_f1 = self._evaluate_prf_binary(targets_all, outputs_all, doc_len_all)
infer_time = np.mean(np.array(inference_time_list))
print('infer_time==================', str(infer_time))
return test_acc, test_pre, test_rec, test_f1
def _evaluate_prf_binary(self, targets, outputs, doc_len):
"""
:param targets: [32,75]
:param outputs: [32,75]
:return:
"""
tmp1, tmp2 = [], []
for i in range(outputs.shape[0]):
for j in range(doc_len[i]):
tmp1.append(outputs[i][j].cpu())
tmp2.append(targets[i][j].cpu())
y_pred, y_true = np.array(tmp1), np.array(tmp2)
acc = metrics.precision_score(y_true, y_pred, average='micro')
p = metrics.precision_score(y_true, y_pred, average='binary')
r = metrics.recall_score(y_true, y_pred, average='binary')
f1 = metrics.f1_score(y_true, y_pred, average='binary')
return acc, p, r, f1
def run(self, folder, repeats=1):
# Loss and Optimizer
print(('-'*50 + 'Folder{}' + '-'*50).format(folder))
criterion = nn.CrossEntropyLoss()
# criterion = nn.functional.nll_loss()
_params = filter(lambda p: p.requires_grad, self.sub_model.parameters())
optimizer = self.opt.optimizer(_params, lr=self.opt.learning_rate, weight_decay=self.opt.l2reg)
if not os.path.exists('log/'):
os.mkdir('log/')
f_out = open('log/' + self.opt.model_name + '_' + str(folder) + '_test.txt', 'a+', encoding='utf-8')
max_test_pre_avg = 0
max_test_rec_avg = 0
max_test_f1_avg = 0
for i in range(repeats):
print('repeat: ', (i + 1))
f_out.write('repeat: ' + str(i + 1))
self._reset_params()
max_test_pre, max_test_rec, max_test_f1 = self._train(criterion, optimizer)
print('max_test_acc: {0} max_test_hf1: {1}'.format(max_test_pre, max_test_f1))
f_out.write('max_test_acc: {0}, max_test_f1: {1}'.format(max_test_pre, max_test_f1))
max_test_pre_avg += max_test_pre
max_test_rec_avg += max_test_rec
max_test_f1_avg += max_test_f1
print('#' * 100)
print("max_test_acc_avg: {.4f}", max_test_pre_avg / repeats)
print('max_test_acc_rec: {.4f}', max_test_rec_avg / repeats)
print("max_test_f1_avg: {.4f}", max_test_f1_avg / repeats)
f_out.write("max_test_pre_avg: {0}, max_test_rec_avg: {1}, max_test_f1_avg: {2}".format(max_test_pre_avg / repeats, max_test_rec_avg / repeats, max_test_f1_avg / repeats))
f_out.close()
return max_test_pre_avg / repeats, max_test_rec_avg / repeats, max_test_f1_avg / repeats
if __name__ == '__main__':
# Hyper Parameters
parser = argparse.ArgumentParser()
parser.add_argument('--model_name', default='fssgcn', type=str)
parser.add_argument('--optimizer', default='adam', type=str)
parser.add_argument('--initializer', default='xavier_uniform_', type=str)
parser.add_argument('--learning_rate', default=0.001, type=float)
parser.add_argument('--input_dropout', default=0.1, type=float)
parser.add_argument('--gcn_dropout', default=0.1, type=float)
parser.add_argument('--head_dropout', default=0.1, type=float)
parser.add_argument('--keep_prob2', default=0.1, type=float)
parser.add_argument('--keep_prob1', default=0.1, type=float)
parser.add_argument('--alpha', default=0.3, type=float)
parser.add_argument('--l2reg', default=0.00001, type=float)
# parser.add_argument('--l2reg', default=0.000005, type=float)
parser.add_argument('--num_epoch', default=100, type=int)
parser.add_argument('--batch_size', default=32, type=int)
parser.add_argument('--log_step', default=5, type=int)
parser.add_argument('--embed_dim', default=200, type=int)
parser.add_argument('--embedding_dim_pos', default=100, type=int)
###中文数据集的embedding文件
parser.add_argument('--embedding_path', default='embedding.txt', type=str)
###英文数据集的embedding文件################################
# parser.add_argument('--embedding_path', default='all_embedding_en.txt', type=str)
#################################################
parser.add_argument('--pos_num',default=138, type=int)
parser.add_argument('--hidden_dim', default=100, type=int)
parser.add_argument('--num_layers', default=3, type=int)
parser.add_argument('--nheads', default=1, type=int)
parser.add_argument('--sublayer_first', default=2, type=int)
parser.add_argument('--sublayer_second', default=4, type=int)
parser.add_argument('--sublayer', default=1, type=int)
parser.add_argument('--no_rnn', default=False, type=bool)
parser.add_argument('--rnn_layer', default=1, type=int)
parser.add_argument('--rnn_hidden', default=100, type=int)
parser.add_argument('--rnn_dropout', default=0.5, type=float)
parser.add_argument('--no_pos', default=False, type=bool)
parser.add_argument('--n_split', default=10, type=int)
parser.add_argument('--per', default=1.0, type=float)
parser.add_argument('--num_class', default=2, type=int)
parser.add_argument('--save', default=True, type=bool)
parser.add_argument('--seed', default=776, type=int)
parser.add_argument('--device', default=None, type=str)
parser.add_argument('--infer_time', default=False, type=bool)
####数据集为英文数据集
# parser.add_argument('--dataset', default='EC_en', type=str)
####数据集为中文数据集
parser.add_argument('--dataset', default='EC', type=str)
opt = parser.parse_args()
model_classes = {
'ecgcn': ECGCN,
'ecgat': ECGAT,
'aggcn': AGClassifier,
'fssgcn': ECClassifier
}
input_colses = {
'ecgcn': ['content', 'sen_len', 'doc_len', 'doc_id', 'emotion_id', 'graph'],
'ecgat': ['content', 'sen_len', 'doc_len', 'doc_id', 'emotion_id', 'graph'],
'aggcn': ['content', 'sen_len', 'doc_len', 'doc_id', 'emotion_id', 'graph'],
'fssgcn': ['content', 'sen_len', 'doc_len', 'doc_id', 'emotion_id', 'graph']
}
initializers = {
'xavier_uniform_': torch.nn.init.xavier_uniform_,
'xavier_normal_': torch.nn.init.xavier_normal,
'orthogonal_': torch.nn.init.orthogonal_,
}
optimizers = {
'adadelta': torch.optim.Adadelta, # default lr=1.0
'adagrad': torch.optim.Adagrad, # default lr=0.01
'adam': torch.optim.Adam, # default lr=0.001
'adamax': torch.optim.Adamax, # default lr=0.002
'asgd': torch.optim.ASGD, # default lr=0.01
'rmsprop': torch.optim.RMSprop, # default lr=0.01
'sgd': torch.optim.SGD,
}
opt.model_class = model_classes[opt.model_name]
opt.inputs_cols = input_colses[opt.model_name]
opt.initializer = initializers[opt.initializer]
opt.optimizer = optimizers[opt.optimizer]
if opt.dataset == 'EC':
opt.max_doc_len = 75
opt.max_sen_len = 45
opt.data_size = 2105
opt.hidden_dim = 100
opt.rnn_hidden = 100
opt.embed_dim = 200
opt.embedding_path = 'embedding.txt'
else:
opt.max_doc_len = 45
opt.max_sen_len = 130
opt.data_size = 2105
opt.hidden_dim = 150
opt.rnn_hidden = 150
opt.embed_dim = 300
opt.embedding_path = 'all_embedding_en.txt'
opt.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') \
if opt.device is None else torch.device(opt.device)
if opt.seed is not None:
random.seed(opt.seed)
np.random.seed(opt.seed)
torch.manual_seed(opt.seed)
torch.cuda.manual_seed(opt.seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
p, r, f1 = [], [], []
for i in range(1):
model = Model(opt, i)
###计算模型大
model._print_args()
p_t, r_t, f1_t = model.run(i)
p.append(p_t)
r.append(r_t)
f1.append(f1_t)
print("max_test_pre_avg: {:.4f}, max_test_rec_avg: {:.4f}, max_test_f1_avg: {:.4f}".format(np.mean(p), np.mean(r), np.mean(f1)))
| LeMei/FSS-GCN | train.py | train.py | py | 15,194 | python | en | code | 14 | github-code | 6 | [
{
"api_name": "math.ceil",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "math.sqrt",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "torch.nn.init.uniform_",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_numbe... |
42095752382 | import os, settings
from app import myApp
import uuid
from flask import request, render_template
from pdf_core import PdfHelper
from threading import Timer
@myApp.route('/', methods=['GET', 'POST'])
def upload_file():
if request.method == 'POST':
# create a list with all pdf files
files = []
for uploadedFile in request.files.getlist('file'):
if allowed_file(uploadedFile.filename):
files.append(uploadedFile)
# join pdf files
pdfHelper = PdfHelper()
uniqueFilenamePath = os.path.join(settings.RESULT_PATH, str(uuid.uuid4()) + ".pdf")
pdfHelper.merge_pdfs(files, uniqueFilenamePath)
# remove the file after 10 min
t = Timer(60*10, delete, (uniqueFilenamePath,))
t.start();
# close the files
for uploadedFile in files:
uploadedFile.close()
return render_template('show_links.html', link=uniqueFilenamePath)
return render_template('index.html')
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1] in settings.ALLOWED_EXTENSIONS
def delete(dest):
if os.path.exists(dest):
os.remove(dest) | icruces/blog-PDFMerging | app/views.py | views.py | py | 1,305 | python | en | code | 2 | github-code | 6 | [
{
"api_name": "flask.request.method",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "flask.request.files.getlist",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "f... |
72940803068 | # This is a sample Python script.
# Press ⌃R to execute it or replace it with your code.
# Press Double ⇧ to search everywhere for classes, files, tool windows, actions, and settings.
import os, requests, json
# python request examples
# https://www.pythonforbeginners.com/requests/using-requests-in-python
def print_hi(name):
# Use a breakpoint in the code line below to debug your script.
print(f'Hi, {name}') # Press ⌘F8 to toggle the breakpoint.
def restexample01():
github_url = "https://api.github.com/user/repos"
data = json.dumps({'name': 'test', 'description': 'some test repo'})
r = requests.post(github_url, data, auth=('user', '*****'))
print(r.json)
# Press the green button in the gutter to run the script.
if __name__ == '__main__':
restexample01()
print_hi("PyCharm. It's end of the code")
# See PyCharm help at https://www.jetbrains.com/help/pycharm/
| lean35/python101 | main.py | main.py | py | 915 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "json.dumps",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "requests.post",
"line_number": 18,
"usage_type": "call"
}
] |
70082164988 |
from routersim.interface import LogicalInterface
from .messaging import FrameType
from .messaging import ICMPType, UnreachableType
from .mpls import MPLSPacket, PopStackOperation
from .observers import Event, EventType
from scapy.layers.inet import IP,ICMP,icmptypes
from copy import copy
import ipaddress
class ForwardingTable:
def __init__(self, event_manager, parent_logger):
self.fib = None
self.event_manager = event_manager
self.logger = parent_logger.getChild('forwarding')
def __str__(self):
return "Forwarding Table"
def set_fib(self, fib):
self.fib = fib
self.logger.debug("Installed new forwarding table")
def lookup_ip(self, ip_address):
as_network = ipaddress.ip_network(ip_address)
# ASSUMPTION: fib is sorted with highest prefix first
# so we should always arrive at something more specific first
# yes, this is very inefficient
if self.fib is None:
return None
for prefix in self.fib[FrameType.IPV4]:
if as_network.overlaps(prefix):
self.event_manager.observe(
Event(
EventType.FORWARDING,
self,
f"Identified forwarding entry for {ip_address}"
)
)
return [self.fib[FrameType.IPV4][prefix]]
return None
def lookup_label(self, label):
if self.fib is None:
return None
if self.fib is None or FrameType.MPLSU not in self.fib:
return None
return [self.fib[FrameType.MPLSU][str(label)]]
def print_fib(self):
print("** IPV4 FIB ***")
for prefix in self.fib[FrameType.IPV4]:
entry = self.fib[FrameType.IPV4][prefix]
print(f"{entry}")
print("")
print("** MPLS FIB ***")
for prefix in self.fib[FrameType.MPLSU]:
entry = self.fib[FrameType.MPLSU][prefix]
print(f"{entry}")
class PacketForwardingEngine():
def __init__(self, forwarding_table: ForwardingTable, router):
self.router = router
self.forwarding = forwarding_table
self.arp_cache = router.arp.cache
self.logger = router.logger.getChild("pfe")
# Intended for internal communications
def accept_frame(self, frame, dest_interface=None):
self.router.event_manager.observe(
Event(
EventType.PACKET_SEND,
self.router, f"PFE Sending {frame.type}", object=frame, target=dest_interface,
sub_type="LOCAL_SEND")
)
# parameter naming was confusing...
self.process_frame(frame, dest_interface=dest_interface, from_self=True)
def process_frame(self, frame, source_interface=None, from_self=False, dest_interface=None):
def process_ip(pdu, dest_interface=None):
if pdu.inspectable() and not from_self:
self.router.process_packet(source_interface, pdu)
return
# should be an IPPacket
potential_next_hops = self.forwarding.lookup_ip(
pdu.dst
)
if potential_next_hops is not None:
pdu.ttl -= 1
# TODO: Fire event?
hop_action = potential_next_hops[0]
self.logger.info(f"Will apply action {hop_action.action}")
if not isinstance(hop_action.action, str):
newpdu = hop_action.action.apply(pdu, self.router, self.router.event_manager)
self.logger.info(f"New pdu is {newpdu}")
if isinstance(newpdu, MPLSPacket):
hop_action.interface.phy.send(FrameType.MPLSU, newpdu)
else:
self.logger.warn("Didn't get back an MPLSPacket")
else:
if hop_action.action == 'FORWARD' or dest_interface is not None:
# TODO: If we know the dest_interface should we be blindly sending on it?
# I'm not too happy about this quite yet
# really the link between the RE and PFE is wonky
if dest_interface is None:
self.logger.debug(f"Using {potential_next_hops[0].interface} for {pdu}")
dest_interface = potential_next_hops[0].interface
self.logger.debug(f"Using {dest_interface} for {pdu} (potential NH: {potential_next_hops[0]}")
self.send_encapsulated(
potential_next_hops[0].next_hop_ip,
FrameType.IPV4,
pdu,
dest_interface
)
elif hop_action.action == 'CONTROL':
if from_self:
self.logger.error(f"Unexpectedly have frame from self we need to forward {pdu}")
raise Exception(f"Unexpectedly have frame from self we need to forward {pdu}")
self.router.process_packet(source_interface, pdu)
elif hop_action.action == 'REJECT' and source_interface is not None:
#print(f"Sending reject from {source_interface.name}:{source_interface.address().ip} to {pdu.source_ip}")
packet = IP(
dst=pdu.src,
src=source_interface.address().ip
) / ICMP(
type = ICMPType.DestinationUnreachable,
code=UnreachableType.NetworkUnreachable
) / (
pdu.dst,
pdu.src,
pdu.payload.payload # IRL its first 8 bytes
)
source_interface.send_ip(packet)
else:
self.logger.info(f"**** Have action {hop_action.action}")
else:
self.logger.warn("**** Need to issue ICMP UNREACHABLE")
pass
# send unreachable
pdu = copy(frame.pdu)
if frame.type == FrameType.IPV4:
self.logger.info("Calling process_ip")
process_ip(pdu, dest_interface)
# This means we're supposed to look at it
# special case of control plane...
elif frame.type == FrameType.ARP:
# So, dilemma: Here we PROBABLY want to make sure
# this only happens on switch interfaces?
# would is also happen on routed interfaces?
self.router.process_arp(source_interface, pdu)
# TODO: If we're switching, we also want to forward it!
elif frame.type == FrameType.CLNS:
self.router.process['isis'].process_pdu(source_interface, frame.pdu)
elif frame.type == FrameType.MPLSU:
# pdu should be an MPLSPacket
potential_next_hops = None
try:
potential_next_hops = self.forwarding.lookup_label(
pdu.label_stack[len(pdu.label_stack)-1]
)
except:
if pdu.label_stack[0] == '3':
newpdu = PopStackOperation().apply(pdu, self.router, event_manager=self.router.event_manager)
if isinstance(newpdu, IP):
process_ip(newpdu)
return
self.logger.warn(f"Unable to find {pdu.label_stack[0]}")
if potential_next_hops is not None:
fibentry = potential_next_hops[0]
newpdu = fibentry.action.apply(pdu,
self.router,
event_manager=self.router.event_manager)
if isinstance(newpdu, MPLSPacket):
fibentry.interface.parent.send(
FrameType.MPLSU, newpdu, logical=None)
elif isinstance(newpdu, IP):
fibentry.interface.send_ip(newpdu)
else:
print(f"Unknown de-encapsulated packet type!")
else:
self.logger.error(f"**** No action found for label {pdu.label_stack[0]}")
def send_encapsulated(self,
next_hop: ipaddress.IPv4Address,
type: FrameType,
packet,
interface: LogicalInterface):
if next_hop is None:
dest_ip = packet.dst
dest_ip_as_net = ipaddress.ip_network(f"{dest_ip}/32")
if interface.address().network.overlaps(dest_ip_as_net):
next_hop = dest_ip
else:
raise Exception("Valid IP is required")
hw_address = self.arp_cache[next_hop]
if hw_address is None:
# TODO: Drop it?
self.router.arp.request(next_hop, interface)
else:
interface.send(hw_address, type, packet)
| jdewald/router-sim | routersim/forwarding.py | forwarding.py | py | 9,267 | python | en | code | 5 | github-code | 6 | [
{
"api_name": "ipaddress.ip_network",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "messaging.FrameType.IPV4",
"line_number": 34,
"usage_type": "attribute"
},
{
"api_name": "messaging.FrameType",
"line_number": 34,
"usage_type": "name"
},
{
"api_name":... |
21797961836 | # make a time series of instantaneous electric power consumption graph from a csv file
import csv
import glob
import re
import os
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from statistics import mean
# define variables
timestep = 0.01
def csv_to_graph(path):
data = pd.read_csv(path, index_col=0, skipinitialspace=True)
# comvert csv data to a list format data
current = np.array(data['current'].values.tolist())
# find the peak value from the list data
peak_value_index = np.argmax(current)
# extract useful values from arround the peak value
arround_peak_value = current[peak_value_index-100:peak_value_index+500]
# calucurate const value
const_value = arround_peak_value[len(arround_peak_value)-400:len(arround_peak_value)]
avg_const_value = round(mean(const_value),2)
text_avg_const_value = "mean const value = " + str(avg_const_value)
# make a time series graph
count = np.arange(0, len(arround_peak_value)/100, timestep)
plt.plot(count, arround_peak_value)
plt.xlim(0.0, 6.0)
plt.ylim(0.0, 10.0)
plt.xlabel('t [s]')
plt.ylabel('current [A]')
font_dict = dict(style="italic",
size=16)
bbox_dict = dict(facecolor="#ffffff",
edgecolor="#000000",
fill=True)
plt.text(2.5, 9, text_avg_const_value, font_dict, bbox=bbox_dict)
plt.grid()
plt.show()
def make_result_file(path):
# define variables
peak_value = []
mean_const_value = []
# make file list
file_list = glob.glob(path+'*.csv')
# extract the peak value and average const value of each file
# and append each value to the list
for file in file_list:
print(file)
data = pd.read_csv(file, index_col=0, skipinitialspace=True)
# comvert csv data to a list format data
current = np.array(data['current'].values.tolist())
# find the peak value from the list data
peak_value_index = np.argmax(current)
# extract useful values from arround the peak value
arround_peak_value = current[peak_value_index-100:peak_value_index+500]
# calucurate const value
const_value = arround_peak_value[len(arround_peak_value)-400:len(arround_peak_value)]
avg_const_value = round(mean(const_value),2)
# calcurate mean value of peak value and average const value
peak_value.append(np.max(current))
mean_const_value.append(avg_const_value)
# make a result file(write each value)
file_name = path + 'result.txt'
f = open(file_name, 'a')
for i in range(len(file_list)):
peak = peak_value[i]
const = mean_const_value[i]
f.write("FILE%s: Peak value: %s, Mean const value: %s \n" % (i, peak, const))
mean_peak = round(mean(peak_value),2)
mean_const = round(mean(mean_const_value),2)
f.write("Mean peak value: %s, Mean const value: %s\n" % (mean_peak, mean_const))
f.close()
# analyze the step down experiment data
def analyze_gradation_exp(file_list):
# import csv format file
for file in file_list:
data = pd.read_csv(file, index_col=0, skipinitialspace=True)
# comvert csv data to a list format data
current = np.array(data['current'].values.tolist())
# extract the peak value from the list data
peak_value_index = np.argmax(current)
start_index = peak_value_index
# clip the time series data by 1 sec
grad_data = []
for i in range(int(len(current[start_index:])/99)):
grad_data.append(current[start_index:start_index+99*i])
start_index = start_index + 99
# calcurate mean value of each data set
for data in grad_data:
mean_grad = mean(data)
print(mean_grad)
# write results on a text file
if __name__ == "__main__":
# import csv format file
"""
# useage: make a time series of power consumption graph
path = "test.csv"
csv_to_graph(path)
"""
# useage: make result files
path = 'C:/Users/is0232xf/OneDrive - 学校法人立命館/ソースコード/BIWAKO_unit_test/csv/diagonal/25%/'
make_result_file(path)
"""
files = os.listdir(path)
# get subdirectory list
files_dir = [f for f in files if os.path.isdir(os.path.join(path, f))]
for subdir in files_dir:
dir = path + subdir + '/'
make_result_file(dir)
""" | is0232xf/BIWAKO_unit_test | csv_to_graph.py | csv_to_graph.py | py | 4,460 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "pandas.read_csv",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "numpy.argmax",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "statistics.mean",
"line_... |
70132131389 | from typing import Tuple
from sqlalchemy import and_, desc
from quizard_backend import db
from quizard_backend.utils.exceptions import raise_not_found_exception
from quizard_backend.utils.transaction import in_transaction
def dict_to_filter_args(model, **kwargs):
"""
Convert a dictionary to Gino/SQLAlchemy's conditions for filtering.
Example:
A correct Gino's query is:
User.query.where(
and_(
User.role_id == 10,
User.location == "Singapore"
)
).gino.all()
The given `kwargs` is:
{
"role_id": 10,
"location": "Singapore",
}
This function unpacks the given dictionary `kwargs`
into `and_(*clauses)`.
"""
return (getattr(model, k) == v for k, v in kwargs.items())
async def get_one(model, **kwargs):
return (
await model.query.where(and_(*dict_to_filter_args(model, **kwargs)))
.limit(1)
.gino.first()
)
async def get_many(
model,
columns=None,
after_id=None,
limit=15,
in_column=None,
in_values=None,
order_by="internal_id",
descrease=False,
**kwargs,
):
# Get the `internal_id` value from the starting row
# And use it to query the next page of results
last_internal_id = 0
if after_id:
row_of_after_id = await model.query.where(model.id == after_id).gino.first()
if not row_of_after_id:
raise_not_found_exception(model, **kwargs)
last_internal_id = row_of_after_id.internal_id
# Get certain columns only
if columns:
query = db.select([*(getattr(model, column) for column in columns)])
else:
query = model.query
query = query.where(
and_(
*dict_to_filter_args(model, **kwargs),
model.internal_id < last_internal_id
if descrease and last_internal_id
else model.internal_id > last_internal_id,
getattr(model, in_column).in_(in_values)
if in_column and in_values
else True,
)
)
return (
await query.order_by(
desc(getattr(model, order_by)) if descrease else getattr(model, order_by)
)
.limit(limit)
.gino.all()
)
async def get_latest_quiz_attempts(model, user_id, limit=15, after_id=None, **kwargs):
# Get the `internal_id` value from the starting row
# And use it to query the next page of results
last_internal_id = 0
if after_id:
row_of_after_id = await model.query.where(model.id == after_id).gino.first()
if not row_of_after_id:
raise_not_found_exception(model, **kwargs)
last_internal_id = row_of_after_id.internal_id
return (
await db.status(
db.text(
"""SELECT * FROM (
SELECT DISTINCT ON (quiz_attempt.quiz_id, quiz_attempt.user_id)
quiz_attempt.quiz_id,
quiz_attempt.user_id,
quiz_attempt.is_finished,
quiz_attempt.internal_id
FROM quiz_attempt
WHERE quiz_attempt.user_id = :user_id {}
ORDER BY
quiz_attempt.quiz_id,
quiz_attempt.user_id,
quiz_attempt.internal_id DESC
) t
ORDER By t.internal_id DESC limit :limit;""".format(
"and quiz_attempt.internal_id < :last_internal_id"
if after_id
else ""
)
),
{"user_id": user_id, "limit": limit, "last_internal_id": last_internal_id},
)
)[1]
async def get_one_latest(model, **kwargs):
return (
await model.query.where(and_(*dict_to_filter_args(model, **kwargs)))
.order_by(desc(model.internal_id))
.limit(1)
.gino.first()
)
async def get_many_with_count_and_group_by(
model, *, columns, in_column=None, in_values=None
):
return (
await db.select(
[*[getattr(model, column) for column in columns], db.func.count()]
)
.where(
getattr(model, in_column).in_(in_values)
if in_column and in_values
else True
)
.group_by(*[getattr(model, column) for column in columns])
.gino.all()
)
@in_transaction
async def create_one(model, **kwargs):
return await model(**kwargs).create()
@in_transaction
async def update_one(row, **kwargs):
if not kwargs:
return row
await row.update(**kwargs).apply()
return row
@in_transaction
async def update_many(model, get_kwargs, update_kwargs):
status: Tuple[str, list] = await model.update.values(**update_kwargs).where(
and_(*and_(*dict_to_filter_args(model, **get_kwargs)))
).gino.status()
return status[0]
@in_transaction
async def delete_many(model, **kwargs):
status: Tuple[str, list] = await model.delete.where(
and_(*dict_to_filter_args(model, **kwargs))
).gino.status()
return status[0]
| donjar/quizard | api/quizard_backend/utils/query.py | query.py | py | 5,219 | python | en | code | 5 | github-code | 6 | [
{
"api_name": "sqlalchemy.and_",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "quizard_backend.utils.exceptions.raise_not_found_exception",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "quizard_backend.db.select",
"line_number": 65,
"usage_type": "... |
1883488340 | import sys
import pefile
import re
# Pega os headers de um executável
def get_headers(executable):
pe = pefile.PE(executable)
sections = []
for section in pe.sections:
sections.append(section.Name.decode('utf-8'))
return sections
# Pega os headers dos argumentos de entrada
sections1 = get_headers(sys.argv[1])
sections2 = get_headers(sys.argv[2])
# Imprime a intersecção entre as listas
commonSections = list(set(sections1).intersection(sections2))
print("Seções comuns: [", end="")
for i, section in enumerate(commonSections):
print("'{}'".format(section), end="")
if i < len(commonSections) - 1:
print(', ', end="")
print("]\n")
# Imprime a diferença da lista 1 para a lista 2
difference12 = list(set(sections1).difference(sections2))
print(re.sub(".*/", "", sys.argv[1]) + ": [", end="")
for i, section in enumerate(difference12):
print("'{}'".format(section), end="")
if i < len(difference12) - 1:
print(', ', end="")
print("]\n")
# Imprime a diferença da lista 2 para a lista 1
difference21 = list(set(sections2).difference(sections1))
print(re.sub(".*/", "", sys.argv[2]) + ": [", end="")
for i, section in enumerate(difference21):
print("'{}'".format(section), end="")
if i < len(difference21) - 1:
print(', ', end="")
print("]\n") | kkatzer/CDadosSeg | T2/Parte2/T2P2b.py | T2P2b.py | py | 1,323 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "pefile.PE",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "re.sub",
"line_number": 28,
... |
3235447487 | from __future__ import annotations
from typing import TYPE_CHECKING
from avilla.core.context import Context
from avilla.core.event import RelationshipCreated, RelationshipDestroyed
from avilla.core.selector import Selector
from avilla.core.trait.context import EventParserRecorder
from cai.client.events.group import (
GroupLuckyCharacterChangedEvent,
GroupLuckyCharacterClosedEvent,
GroupLuckyCharacterInitEvent,
GroupLuckyCharacterNewEvent,
GroupLuckyCharacterOpenedEvent,
GroupMemberJoinedEvent,
GroupMemberLeaveEvent,
GroupMemberMutedEvent,
GroupMemberPermissionChangeEvent,
GroupMemberSpecialTitleChangedEvent,
GroupMemberUnMutedEvent,
GroupNameChangedEvent,
TransferGroupEvent,
)
if TYPE_CHECKING:
from ..account import CAIAccount
from ..protocol import CAIProtocol
event = EventParserRecorder["CAIProtocol", "CAIAccount"]
@event("GroupMemberJoinedEvent")
async def group_member_joined_event(
protocol: CAIProtocol, account: CAIAccount, raw: GroupMemberJoinedEvent
):
group = Selector().land(protocol.land.name).group(str(raw.group_id))
member = group.member(str(raw.uin))
context = Context(
account=account,
client=member,
endpoint=group,
scene=group,
selft=group.member(account.id),
)
return RelationshipCreated(context, member, group, context.self), context
@event("GroupMemberLeaveEvent")
async def group_member_leave_event(
protocol: CAIProtocol, account: CAIAccount, raw: GroupMemberLeaveEvent
):
group = Selector().land(protocol.land.name).group(str(raw.group_id))
member = group.member(str(raw.uin))
context = Context(
account=account,
client=member,
endpoint=group,
scene=group,
selft=group.member(account.id),
)
res = RelationshipDestroyed(context, member, group, context.self)
if raw.operator and raw.operator != raw.uin:
res.mediums.append(group.member(str(raw.operator)))
return res, context
| RF-Tar-Railt/Avilla-CAI | avilla/cai/event/group.py | group.py | py | 2,023 | python | en | code | 3 | github-code | 6 | [
{
"api_name": "typing.TYPE_CHECKING",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "avilla.core.trait.context.EventParserRecorder",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "protocol.CAIProtocol",
"line_number": 34,
"usage_type": "name"
},
... |
22034975052 | from lib2to3.pgen2 import token
from brownie import Test, accounts, interface
from eth_utils import to_wei
from web3 import Web3
def main():
deploy()
def deploy():
amount_in = Web3.toWei(1000000, "ether")
# DAI address
DAI = "0x6B175474E89094C44Da98b954EedeAC495271d0F"
# DAI whale
DAI_WHALE = "0xcffad3200574698b78f32232aa9d63eabd290703"
# WETH
WETH = "0xC02aaA39b223FE8D0A0e5C4F27eAD9083C756Cc2"
# WETH whale
WETH_WHALE = "0xeD1840223484483C0cb050E6fC344d1eBF0778a9"
print("===Transferring gas cost covers===")
# covering the transaction cost
# accounts[0].transfer(DAI_WHALE, "1 ether")
# accounts[0].transfer(WETH_WHALE, "1 ether")
tokenA = interface.IERC20(DAI)
tokenB = interface.IERC20(WETH)
print("===Transferring the tokenA and tokenB amounts from whales to account[0]===")
tokenA.transfer(accounts[0], Web3.toWei(2400, "ether"), {"from": DAI_WHALE})
tokenB.transfer(accounts[0], Web3.toWei(1, "ether"), {"from": WETH_WHALE})
contract = Test.deploy({"from": accounts[0]})
tokenA.approve(contract.address, Web3.toWei(2400, "ether"), {"from": accounts[0]})
tokenB.approve(contract.address, Web3.toWei(1, "ether"), {"from": accounts[0]})
print("Adding liquidity...")
tx = contract.addLiquidity(
DAI,
WETH,
Web3.toWei(2400, "ether"),
Web3.toWei(1, "ether"),
{"from": accounts[0]},
)
tx.wait(1)
print("Added Liquidity...")
for i in tx.events["Log"]:
print(i)
print("=== Removing Liquidity ===")
tx = contract.removeLiquidity(DAI, WETH, {"from": accounts[0]})
tx.wait(1)
for i in tx.events["Log"]:
print(i)
| emrahsariboz/DeFi | uniswap/scripts/_deployAndAddLiquidity.py | _deployAndAddLiquidity.py | py | 1,713 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "web3.Web3.toWei",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "web3.Web3",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "brownie.interface.IERC20",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "brownie.interface"... |
35800840346 | import unittest
import numpy as np
from numpy import linalg
from task import img_rescaled, img_array_transposed, U, s, Vt
class TestCase(unittest.TestCase):
def test_transpose(self):
np.testing.assert_array_equal(img_array_transposed, np.transpose(img_rescaled, (2, 0, 1)),
'The transposed array does not look right.')
def test_svd(self):
transposed_test = np.transpose(img_rescaled, (2, 0, 1))
U_test, s_test, Vt_test = linalg.svd(transposed_test)
np.testing.assert_array_equal(U, U_test,
'Your decomposition does not look right. Go back to "SVD on One Matrix" to refresh the topic.')
np.testing.assert_array_equal(s, s_test,
'Your decomposition does not look right. Go back to "SVD on One Matrix" to refresh the topic.')
np.testing.assert_array_equal(Vt, Vt_test,
'Your decomposition does not look right. Go back to "SVD on One Matrix" to refresh the topic.')
| jetbrains-academy/Python-Libraries-NumPy | Projects/SVD/Applying to All Colors/tests/test_task.py | test_task.py | py | 1,073 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "unittest.TestCase",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "numpy.testing.assert_array_equal",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "task.img_array_transposed",
"line_number": 9,
"usage_type": "argument"
},
{
... |
40467350126 | # 1번 풀이
# import sys
# dx = [0,0,-1,1] # 우좌상하
# dy = [1,-1,0,0]
# def dfs(places, x, y,depth):
# if depth == 3: # depth 3까지 찾아봤는데 거리두기 잘 지키는 경우 True
# return True
# for i in range(4):
# nx = x + dx[i]
# ny = y + dy[i]
# if 0<= nx <5 and 0<= ny <5 and visited[nx][ny] == 0 and places[nx][ny] != 'X':
# if places[nx][ny] == 'P':
# return False
# else:
# visited[nx][ny] = 1
# if dfs(places,nx,ny,depth + 1):
# visited[nx][ny] = 0
# else:
# visited[nx][ny] = 0
# return False
# return True
# def solution(places):
# global visited
# answer = []
# for place in places:
# flag = 0
# visited = [[0] * 5 for _ in range(5)]
# for i in range(5):
# if flag == 1: # 이미 거리두기 안지키는 사람을 발견함
# break
# for j in range(5):
# if place[i][j] == 'P' and not visited[i][j]:
# visited[i][j] = 1
# if dfs(place, i, j,1):
# continue
# else: # 거리두기 안지키는게 발견
# answer.append(0)
# flag = 1
# break
# else:
# answer.append(1)
# return answer
#2번 풀이
import sys
dx = [0,0,-1,1] # 우좌상하
dy = [1,-1,0,0]
def dfs(place, x, y,depth):
global check
if depth == 3: # depth 3까지 찾아봤는데 거리두기 잘 지키는 경우 True
return
for i in range(4):
nx = x + dx[i]
ny = y + dy[i]
if 0<= nx <5 and 0<= ny <5 and visited[nx][ny] == 0 and place[nx][ny] != 'X':
if place[nx][ny] == 'P':
check = 0
return
else:
visited[nx][ny] = 1
dfs(place,nx,ny,depth + 1)
visited[nx][ny] = 0
return
def solution(places):
global visited
global check
answer = []
for place in places:
flag = 0
check = 1 # 거리두기 잘지킴
visited = [[0] * 5 for _ in range(5)]
for i in range(5):
if flag == 1: # 이미 거리두기 안지키는 사람을 발견함
break
for j in range(5):
if place[i][j] == 'P' and not visited[i][j]:
visited[i][j] = 1
dfs(place,i,j,1)
if check:
continue
else: # 거리두기 안지키는게 발견
answer.append(0)
flag = 1
break
else:
answer.append(1)
return answer
# 3번 풀이
from collections import deque
def bfs(place):
dx = [0,0,-1,1] # 우좌상하
dy = [1,-1,0,0]
start = []
q = deque()
visited = [[0] * 5 for _ in range(5)]
for i in range(5):
for j in range(5):
if place[i][j] == 'P' and not visited[i][j]:
start.append((i,j))
for s in start:
i,j = s
visited = [[0] * 5 for _ in range(5)]
visited[i][j] = 1
q.append(s)
while q:
x, y = q.popleft()
if visited[x][y] < 3:
for i in range(4):
nx = x + dx[i]
ny = y + dy[i]
if 0 <= nx < 5 and 0<= ny < 5 and place[nx][ny] != 'X' and not visited[nx][ny]:
if place[nx][ny] == 'P':
return 0
else:
visited[nx][ny] = visited[x][y] + 1
q.append((nx,ny))
return 1
def solution(places):
answer = []
for place in places:
answer.append(bfs(place))
return answer
if __name__ == '__main__':
places = [["POOPX", "OXPXP", "PXXXO", "OXXXO", "OOOPP"],
["POOOP", "OXXOX", "OPXPX", "OOXOX", "POXXP"],
["PXOPX", "OXOXP", "OXPOX", "OXXOP", "PXPOX"],
["OOOXX", "XOOOX", "OOOXX", "OXOOX", "OOOOO"],
["PXPXP", "XPXPX", "PXPXP", "XPXPX", "PXPXP"]]
print(solution(places)) | Cho-El/coding-test-practice | 프로그래머스 문제/파이썬/level2/거리두기 확인하기.py | 거리두기 확인하기.py | py | 4,381 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "collections.deque",
"line_number": 102,
"usage_type": "call"
}
] |
25182089444 | # adapated from munch 2.5.0
from collections.abc import Mapping
class Munch(dict):
"""A dictionary that provides attribute-style access.
>>> b = Munch()
>>> b.hello = 'world'
>>> b.hello
'world'
>>> b['hello'] += "!"
>>> b.hello
'world!'
>>> b.foo = Munch(lol=True)
>>> b.foo.lol
True
>>> b.foo is b['foo']
True
A Munch is a subclass of dict; it supports all the methods a dict does...
>>> sorted(b.keys())
['foo', 'hello']
Including update()...
>>> b.update({ 'ponies': 'are pretty!' }, hello=42)
>>> print (repr(b))
Munch({'ponies': 'are pretty!', 'foo': Munch({'lol': True}), 'hello': 42})
As well as iteration...
>>> sorted([ (k,b[k]) for k in b ])
[('foo', Munch({'lol': True})), ('hello', 42), ('ponies', 'are pretty!')]
And "splats".
>>> "The {knights} who say {ni}!".format(**Munch(knights='lolcats', ni='can haz'))
'The lolcats who say can haz!'
See unmunchify/Munch.toDict, munchify/Munch.fromDict for notes about conversion.
"""
def __init__(self, *args, **kwargs): # pylint: disable=super-init-not-called
self.update(*args, **kwargs)
# only called if k not found in normal places
def __getattr__(self, k):
"""Gets key if it exists, otherwise throws AttributeError.
nb. __getattr__ is only called if key is not found in normal places.
>>> b = Munch(bar='baz', lol={})
>>> b.foo
Traceback (most recent call last):
...
AttributeError: foo
>>> b.bar
'baz'
>>> getattr(b, 'bar')
'baz'
>>> b['bar']
'baz'
>>> b.lol is b['lol']
True
>>> b.lol is getattr(b, 'lol')
True
"""
try:
# Throws exception if not in prototype chain
return object.__getattribute__(self, k)
except AttributeError:
try:
return self[k]
except KeyError as exc:
raise AttributeError(k) from exc
def __setattr__(self, k, v):
"""Sets attribute k if it exists, otherwise sets key k. A KeyError
raised by set-item (only likely if you subclass Munch) will
propagate as an AttributeError instead.
>>> b = Munch(foo='bar', this_is='useful when subclassing')
>>> hasattr(b.values, '__call__')
True
>>> b.values = 'uh oh'
>>> b.values
'uh oh'
>>> b['values']
Traceback (most recent call last):
...
KeyError: 'values'
"""
try:
# Throws exception if not in prototype chain
object.__getattribute__(self, k)
except AttributeError:
try:
self[k] = v
except KeyError as exc:
raise AttributeError(k) from exc
else:
object.__setattr__(self, k, v)
def __delattr__(self, k):
"""Deletes attribute k if it exists, otherwise deletes key k. A KeyError
raised by deleting the key--such as when the key is missing--will
propagate as an AttributeError instead.
>>> b = Munch(lol=42)
>>> del b.lol
>>> b.lol
Traceback (most recent call last):
...
AttributeError: lol
"""
try:
# Throws exception if not in prototype chain
object.__getattribute__(self, k)
except AttributeError:
try:
del self[k]
except KeyError as exc:
raise AttributeError(k) from exc
else:
object.__delattr__(self, k)
def toDict(self):
"""Recursively converts a munch back into a dictionary.
>>> b = Munch(foo=Munch(lol=True), hello=42, ponies='are pretty!')
>>> sorted(b.toDict().items())
[('foo', {'lol': True}), ('hello', 42), ('ponies', 'are pretty!')]
See unmunchify for more info.
"""
return unmunchify(self)
@property
def __dict__(self):
return self.toDict()
def __repr__(self):
"""Invertible* string-form of a Munch.
>>> b = Munch(foo=Munch(lol=True), hello=42, ponies='are pretty!')
>>> print (repr(b))
Munch({'ponies': 'are pretty!', 'foo': Munch({'lol': True}), 'hello': 42})
>>> eval(repr(b))
Munch({'ponies': 'are pretty!', 'foo': Munch({'lol': True}), 'hello': 42})
>>> with_spaces = Munch({1: 2, 'a b': 9, 'c': Munch({'simple': 5})})
>>> print (repr(with_spaces))
Munch({'a b': 9, 1: 2, 'c': Munch({'simple': 5})})
>>> eval(repr(with_spaces))
Munch({'a b': 9, 1: 2, 'c': Munch({'simple': 5})})
(*) Invertible so long as collection contents are each repr-invertible.
"""
return f"{self.__class__.__name__}({dict.__repr__(self)})"
def __dir__(self):
return list(self.keys())
def __getstate__(self):
"""Implement a serializable interface used for pickling.
See https://docs.python.org/3.6/library/pickle.html.
"""
return {k: v for k, v in self.items()}
def __setstate__(self, state):
"""Implement a serializable interface used for pickling.
See https://docs.python.org/3.6/library/pickle.html.
"""
self.clear()
self.update(state)
__members__ = __dir__ # for python2.x compatibility
@classmethod
def fromDict(cls, d):
"""Recursively transforms a dictionary into a Munch via copy.
>>> b = Munch.fromDict({'urmom': {'sez': {'what': 'what'}}})
>>> b.urmom.sez.what
'what'
See munchify for more info.
"""
return munchify(d, cls)
def copy(self):
return type(self).fromDict(self)
def update(self, *args, **kwargs):
"""
Override built-in method to call custom __setitem__ method that may
be defined in subclasses.
"""
for k, v in dict(*args, **kwargs).items():
self[k] = v
def get(self, k, d=None):
"""
D.get(k[,d]) -> D[k] if k in D, else d. d defaults to None.
"""
if k not in self:
return d
return self[k]
def setdefault(self, k, d=None):
"""
D.setdefault(k[,d]) -> D.get(k,d), also set D[k]=d if k not in D
"""
if k not in self:
self[k] = d
return self[k]
def munchify(x):
"""Recursively transforms a dictionary into a Munch via copy.
>>> b = munchify({'urmom': {'sez': {'what': 'what'}}})
>>> b.urmom.sez.what
'what'
munchify can handle intermediary dicts, lists and tuples (as well as
their subclasses), but ymmv on custom datatypes.
>>> b = munchify({ 'lol': ('cats', {'hah':'i win again'}),
... 'hello': [{'french':'salut', 'german':'hallo'}] })
>>> b.hello[0].french
'salut'
>>> b.lol[1].hah
'i win again'
nb. As dicts are not hashable, they cannot be nested in sets/frozensets.
"""
# Munchify x, using `seen` to track object cycles
seen = dict()
def munchify_cycles(obj):
# If we've already begun munchifying obj, just return the already-created munchified obj
try:
return seen[id(obj)]
except KeyError:
pass
# Otherwise, first partly munchify obj (but without descending into any lists or dicts) and save that
seen[id(obj)] = partial = pre_munchify(obj)
# Then finish munchifying lists and dicts inside obj (reusing munchified obj if cycles are encountered)
return post_munchify(partial, obj)
def pre_munchify(obj):
# Here we return a skeleton of munchified obj, which is enough to save for later (in case
# we need to break cycles) but it needs to filled out in post_munchify
if isinstance(obj, Mapping):
return Munch({})
elif isinstance(obj, list):
return type(obj)()
elif isinstance(obj, tuple):
type_factory = getattr(obj, "_make", type(obj))
return type_factory(munchify_cycles(item) for item in obj)
else:
return obj
def post_munchify(partial, obj):
# Here we finish munchifying the parts of obj that were deferred by pre_munchify because they
# might be involved in a cycle
if isinstance(obj, Mapping):
partial.update((k, munchify_cycles(obj[k])) for k in obj.keys())
elif isinstance(obj, list):
partial.extend(munchify_cycles(item) for item in obj)
elif isinstance(obj, tuple):
for item_partial, item in zip(partial, obj):
post_munchify(item_partial, item)
return partial
return munchify_cycles(x)
def unmunchify(x):
"""Recursively converts a Munch into a dictionary.
>>> b = Munch(foo=Munch(lol=True), hello=42, ponies='are pretty!')
>>> sorted(unmunchify(b).items())
[('foo', {'lol': True}), ('hello', 42), ('ponies', 'are pretty!')]
unmunchify will handle intermediary dicts, lists and tuples (as well as
their subclasses), but ymmv on custom datatypes.
>>> b = Munch(foo=['bar', Munch(lol=True)], hello=42,
... ponies=('are pretty!', Munch(lies='are trouble!')))
>>> sorted(unmunchify(b).items()) #doctest: +NORMALIZE_WHITESPACE
[('foo', ['bar', {'lol': True}]), ('hello', 42), ('ponies', ('are pretty!', {'lies': 'are trouble!'}))]
nb. As dicts are not hashable, they cannot be nested in sets/frozensets.
"""
# Munchify x, using `seen` to track object cycles
seen = dict()
def unmunchify_cycles(obj):
# If we've already begun unmunchifying obj, just return the already-created unmunchified obj
try:
return seen[id(obj)]
except KeyError:
pass
# Otherwise, first partly unmunchify obj (but without descending into any lists or dicts) and save that
seen[id(obj)] = partial = pre_unmunchify(obj)
# Then finish unmunchifying lists and dicts inside obj (reusing unmunchified obj if cycles are encountered)
return post_unmunchify(partial, obj)
def pre_unmunchify(obj):
# Here we return a skeleton of unmunchified obj, which is enough to save for later (in case
# we need to break cycles) but it needs to filled out in post_unmunchify
if isinstance(obj, Mapping):
return dict()
elif isinstance(obj, list):
return type(obj)()
elif isinstance(obj, tuple):
type_factory = getattr(obj, "_make", type(obj))
return type_factory(unmunchify_cycles(item) for item in obj)
else:
return obj
def post_unmunchify(partial, obj):
# Here we finish unmunchifying the parts of obj that were deferred by pre_unmunchify because they
# might be involved in a cycle
if isinstance(obj, Mapping):
partial.update((k, unmunchify_cycles(obj[k])) for k in obj.keys())
elif isinstance(obj, list):
partial.extend(unmunchify_cycles(v) for v in obj)
elif isinstance(obj, tuple):
for value_partial, value in zip(partial, obj):
post_unmunchify(value_partial, value)
return partial
return unmunchify_cycles(x)
| SAIL-Labs/AMICAL | amical/externals/munch/__init__.py | __init__.py | py | 11,370 | python | en | code | 9 | github-code | 6 | [
{
"api_name": "collections.abc.Mapping",
"line_number": 262,
"usage_type": "argument"
},
{
"api_name": "collections.abc.Mapping",
"line_number": 275,
"usage_type": "argument"
},
{
"api_name": "collections.abc.Mapping",
"line_number": 324,
"usage_type": "argument"
},
{... |
37568054562 | # import libraries
import sys
import nltk
nltk.download(['punkt', 'wordnet', 'stopwords'])
import re
import numpy as np
import pandas as pd
from sqlalchemy import create_engine
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
from nltk.stem import WordNetLemmatizer
from sklearn.metrics import classification_report
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.multioutput import MultiOutputClassifier
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
from sklearn.pipeline import Pipeline
import pickle
def load_data(database_filepath):
"""
Loads table from database as a Pandas Dataframe
and returns the following:
X -- feature dataset containing the messages to be
categorized
y -- label dataset containing the 36 categories that each
message is assigned to.
category_names -- list containing category names
Keyword arguments:
database_filepath -- filepath (including file name)
of the database containing the
messages and categories
"""
engine = create_engine('sqlite:///' + database_filepath)
df = pd.read_sql_table('messages_and_categories', engine)
X = df['message']
y = df.drop(columns=['id', 'message', 'original', 'genre'])
category_names = list(y.columns)
return X, y, category_names
def tokenize(text):
"""
Cleans, tokenizes, lemmatizes messages in preparation for
classification algorithm
1) finds and replaces urls with a placeholder
2) finds and replaces non alphanumeric characters with a space
3) removes stop words from tokenized messages
4) strips leading and trailing spaces and lowcases lemmatized
tokens
Keyword arguments:
text -- raw message that will be cleaned, tokenized
"""
url_regex = 'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+'
detected_urls = re.findall(url_regex, text)
for url in detected_urls:
text = text.replace(url, "urlplaceholder")
text = re.sub(r'\W+', ' ', text)
tokens = word_tokenize(text)
tokens = [t for t in tokens if t not in stopwords.words("english")]
lemmatizer = WordNetLemmatizer()
clean_tokens = []
for tok in tokens:
clean_tok = lemmatizer.lemmatize(tok).lower().strip()
clean_tokens.append(clean_tok)
return clean_tokens
def build_model():
"""
Creates a pipeline and grid search for hyperparameter tuning
returns pipeline with the specified parameter search space
"""
pipeline = Pipeline([
('vect', CountVectorizer(tokenizer=tokenize)),
('tfidf', TfidfTransformer()),
('clf', MultiOutputClassifier(estimator=AdaBoostClassifier()))
])
# specify parameters for grid search
parameters = {
'vect__ngram_range': ((1, 1), (1, 2),(2,2)),
'tfidf__use_idf': (True, False),
'tfidf__norm': ('l1', 'l2'),
'clf__estimator__learning_rate': [0.1, 0.5],
'clf__estimator__n_estimators': [50, 60, 70]
}
# create grid search object
cv = GridSearchCV(pipeline, param_grid=parameters, verbose=216)
return cv
def evaluate_model(model, X_test, Y_test, category_names):
"""
Generates predicted values for test data based on
fit model. Outputs a classification report for each category.
Keyword arguments:
model -- fit model based on training data
X_test, Y_test -- message and target category values for testing
category_names -- list of possible categories for each message
"""
Y_pred = model.predict(X_test)
for i, label in enumerate(category_names):
print(category_names[i])
print(classification_report(Y_test[label], Y_pred[:,i]))
def save_model(model, model_filepath):
"""
Export the classifier to a pickle file
Keyword arguments:
model -- final model
model_filepath -- location and name of saved pickle file
"""
with open(model_filepath, 'wb') as model_filepath:
pickle.dump(model, model_filepath)
def main():
if len(sys.argv) == 3:
database_filepath, model_filepath = sys.argv[1:]
print('Loading data...\n DATABASE: {}'.format(database_filepath))
X, Y, category_names = load_data(database_filepath)
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2, random_state = 42)
print('Building model...')
model = build_model()
print('Training model...')
model.fit(X_train, Y_train)
print(model.best_score_)
print(model.best_params_)
print('Evaluating model...')
evaluate_model(model, X_test, Y_test, category_names)
print('Saving model...\n MODEL: {}'.format(model_filepath))
save_model(model, model_filepath)
print('Trained model saved!')
else:
print('Please provide the filepath of the disaster messages database '\
'as the first argument and the filepath of the pickle file to '\
'save the model to as the second argument. \n\nExample: python '\
'train_classifier.py ../data/DisasterResponse.db classifier.pkl')
if __name__ == '__main__':
main() | goitom/project_2_disaster_response | models/train_classifier.py | train_classifier.py | py | 5,371 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "nltk.download",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.create_engine",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "pandas.read_sql_table",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "re.findal... |
37272423624 |
import sys
from aspartix_parser import Apx_parser
import itertools
def conflict_free(arguments, attacks):
confl_free_sets = []
combs = []
for i in range(1, len(arguments) + 1):
els = [list(x) for x in itertools.combinations(arguments, i)]
combs.extend(els)
combs_sorted = [list(combs_sorted) for combs_sorted in combs][::-1]
# print("Combs: ", combs_sorted)
for i in combs_sorted:
att_count = 0
# print(i)
for att in attacks:
# print(att)
if set([str(att)[2], str(att)[4]]).issubset(set(i)) or any([i in item for item in
confl_free_sets]): # any(x in item for item in confl_free_sets for x in i):#(True if list(filter(lambda x:i in x,confl_free_sets)) else False):#(any([set(i).issubset(set(item)) in item for item in confl_free_sets])):
break
else:
att_count += 1
# print(att_count)
# if ((str(att)[2] and str(att)[4]) not in i) and (not any([i in item for item in confl_free_sets])):
# att_count += 1
# print(att_count)
if att_count == len(attacks):
confl_free_sets.append(i)
return confl_free_sets
def admissible(confl_free, attacks):
admissible_sets = []
for ext in confl_free:
count = 0
# print(ext)
for att in attacks:
if str(att)[4] not in ext:
count += 1
# print(att, count)
else:
# print(att, count)
for atr in attacks:
# print(att, atr, count, str(atr)[4], str(att)[2])
if (str(att)[2] == str(atr)[4]) and (str(atr)[2] in ext):
count += 1
# print(count)
if count == len(attacks):
admissible_sets.append(ext)
return admissible_sets
# def complete(admissible_sets, attacks):
# complete_ext = []
# for adm in admissible_sets:
# for att in attacks:
# if (str(att)[2] in adm) and
#
# return complete_ext
def preferred(admissible_sets):
preferred_exts = []
for adm in admissible_sets:
count = 0
# print(adm, count)
for adm_t in admissible_sets:
if set(adm).issubset(set(adm_t)) and adm_t != adm:
pass
else:
count += 1
# print(adm, adm_t, count)
if count == len(admissible_sets):
preferred_exts.append(adm)
return preferred_exts
def stable_extensions(stable_exts):
pass
if __name__ == '__main__':
filepath = 'example.apx'
if sys.argv[1:]:
filepath = sys.argv[1]
arguments = []
attacks = []
parser = Apx_parser(filepath)
arguments, attacks = parser.read_file()
parser.close()
print(arguments, attacks)
print("There are ", len(arguments), " arguments and ", len(attacks), " attacks.")
# print(str(attacks[0])[4])
confl_free = conflict_free(arguments, attacks)
print("Conflict free extensions: ", "[]", sorted(confl_free))
admissible_sets = admissible(confl_free, attacks)
print("Admissible extensions: ", "[]", sorted(admissible_sets))
# complete_ext = complete(admissible_sets, attacks)
preferred_exts = preferred(admissible_sets)
print("Preferred extensions: ", sorted(preferred_exts))
stable_exts = stable_extensions(preferred_exts)
print("Stable extensions: ", stable_exts)
| Vladimyr23/aspartix_file_parsing_and_reasoning_with_args | Python_parser_and_reasoning_semantics/semantics.py | semantics.py | py | 3,690 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "itertools.combinations",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 90,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 91,
"usage_type": "attribute"
},
{
"api_name": "aspartix_parser.Ap... |
25546051885 | import os
import json
import flask
from vrprot.alphafold_db_parser import AlphafoldDBParser
import vrprot
from . import map_uniprot
from . import settings as st
from . import util
from .classes import NodeTags as NT
def get_scales(uniprot_ids=[], mode=st.DEFAULT_MODE):
return vrprot.overview_util.get_scale(uniprot_ids, mode)
def run_pipeline(proteins: list, parser: AlphafoldDBParser = st.parser, **kwargs):
# create the output directory for the corresponding coloring mode if they do not exist
# output_dir = os.path.join(st._MAPS_PATH, parser.processing)
output_dir = os.path.join(st._MAPS_PATH)
parser.update_output_dir(output_dir)
# initialize the structures dictionary of the parser and check wether some processing files do already exist
parser.init_structures_dict(proteins)
for protein in proteins:
parser.update_existence(protein)
# run the batched process
try:
parser.fetch_pipeline(proteins, **kwargs)
# batch([parser.fetch_pdb, parser.pdb_pipeline], proteins, parser.batch_size)
except vrprot.exceptions.ChimeraXException as e:
return {"error": "ChimeraX could not be found. Is it installed?"}
result = get_scales(proteins, parser.processing)
# update the existence of the processed files
for protein in proteins:
parser.update_existence(protein)
return result
def fetch_from_request(request: flask.Request, parser: AlphafoldDBParser = st.parser):
# get information from request
pdb_id = request.args.get("id")
if pdb_id is None:
return {
"error": "No PDB ID provided.",
"example": f"{request.host}/vrprot/fetch?id=P69905",
}
# extract processing mode and alphafold version from request
parser = util.parse_request(parser, request)
# if mode is not part of the list of available modes, return an error
if isinstance(parser, dict):
return parser
# create a list of proteins to be processed
proteins = [pdb_id]
return fetch(proteins, parser)
def fetch(proteins: list[str], parser: AlphafoldDBParser = st.parser):
# run the batched process
parser.not_fetched = set()
parser.already_exists = set()
result = run_pipeline(proteins, parser)
# Try whether you can find an updated UniProt id
second_try = {}
if len(parser.not_fetched) > 0:
try:
mapped_ac = map_uniprot.main(
parser.not_fetched,
source_db=map_uniprot.Databases.uniprot_ac,
target_db=map_uniprot.Databases.uniprot,
)
for re in mapped_ac["results"]:
a, b = True, True
while a and b:
a = re.get("from")
b = re.get("to")
b = b.get("uniProtKBCrossReferences")
for entry in b:
if entry.get("database") == "AlphaFoldDB":
b = entry.get("id")
second_try[b] = a
if a in parser.not_fetched:
parser.not_fetched.remove(a)
break
break
result.update(run_pipeline(second_try, parser))
tmp = parser.not_fetched.copy()
for ac in tmp:
if ac in second_try:
parser.not_fetched.remove(ac)
parser.not_fetched.add(second_try[ac])
except Exception as e:
print(e)
return {
"not_fetched": list(parser.not_fetched),
"already_exists": list(parser.already_processed),
"results": result,
"alternative_ids": {v: k for k, v in second_try.items()},
}
def for_project(
project: str, request: flask.request, parser: AlphafoldDBParser = st.parser
):
# get information from request
if project is None:
return {"error": "No project provided."}
# extract processing mode and alphafold version from request
parser = util.parse_request(parser, request)
# if mode is not part of the list of available modes, return an error
if isinstance(parser, dict):
return parser
# extract node data from the projects nodes.json file
nodes_files = os.path.join(st._PROJECTS_PATH, project, "nodes.json")
if not os.path.isfile(nodes_files):
return {"error": "Project does not exist."}
with open(nodes_files, "r") as f:
nodes = json.load(f)["nodes"]
# extract the uniprot ids from the nodes
proteins = [",".join(node[NT.uniprot]) for node in nodes if node.get(NT.uniprot)]
# run the batched process
result = run_pipeline(proteins, parser, on_demand=False)
return {"not_fetched": list(parser.not_fetched), "results": result}
def fetch_list_from_request(
request: flask.Request, parser: AlphafoldDBParser = st.parser
):
# get information from request
pdb_ids = request.args.get("ids")
if pdb_ids is None:
return {
"error": "No PDB IDs provided.",
"example": f"http://{request.host}/vrprot/list?ids=P02655,At1g58602",
}
# extract processing mode and alphafold version from request
parser = util.parse_request(parser, request)
# if mode is not part of the list of available modes, return an error
if isinstance(parser, dict):
return parser
# create a list of proteins to be processed
proteins = [id for id in pdb_ids.split(",")]
return fetch_list(proteins, parser)
def fetch_list(proteins: list[str], parser: AlphafoldDBParser = st.parser):
# run the batched process
result = run_pipeline(proteins, parser, on_demand=False)
return {"not_fetched": list(parser.not_fetched), "results": result}
| menchelab/ProteinStructureFetch | src/workflows.py | workflows.py | py | 5,793 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "vrprot.overview_util.get_scale",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "vrprot.overview_util",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "vrprot.alphafold_db_parser.AlphafoldDBParser",
"line_number": 19,
"usage_type": ... |
72473999867 | from math import sqrt
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
x1_list = []
x2_list = []
y_list = []
counter = 0
def show(x1_list, x2_list):
N = int(x1_list.__len__())
if (N <= 0):
return
fig, ax = plt.subplots(subplot_kw={"projection": "3d"})
x1_array = np.arange(min(x1_list) - 1, max(x1_list) + 1, 0.01)
x2_array = np.arange(min(x2_list) - 1, max(x2_list) + 1, 0.01)
#x1_array = np.arange(-6, 3, 0.1)
#x2_array = np.arange(-6, 6, 0.1)
x1_array, x2_array = np.meshgrid(x1_array, x2_array)
R = f(x1_array, x2_array)
ax = Axes3D(fig)
ax.set_xlabel('x1')
ax.set_ylabel('x2')
ax.set_zlabel('f(x1,x2)')
ax.plot_surface(x1_array, x2_array, R, color='b', alpha=0.5)
x1_list2 = []
x2_list2 = []
f_list = []
ax.scatter(x1_list[0], x2_list[0], f(x1_list[0], x2_list[0]), c='black')
x1_list2.append(x1_list[0])
x2_list2.append(x2_list[0])
f_list.append(f(x1_list[0], x2_list[0]))
#print(x1_list[0], x2_list[0], f(x1_list[0], x2_list[0]))
for n in range(1, N):
ax.scatter(x1_list[n], x2_list[n], f(x1_list[n], x2_list[n]), c='red')
x1_list2.append(x1_list[n])
x2_list2.append(x2_list[n])
f_list.append(f(x1_list[n], x2_list[n]))
#print(x1_list[n], x2_list[n], f(x1_list[n], x2_list[n]))
ax.scatter(x1_list[N - 1], x2_list[N - 1], f(x1_list[N - 1], x2_list[N - 1]), c='green')
#print(x1_list[N - 1], x2_list[N - 1], f(x1_list[N - 1], x2_list[N - 1]))
ax.plot(x1_list2, x2_list2, f_list, color="black")
plt.show()
def f(x1, x2):
return 3*x1**4 - x1*x2 + x2**4 - 7*x1 - 8*x2 + 2
def f_x1(x1, x2):
return 12*x1**3 - x2 - 7
def f_x2(x1, x2):
return 4*x2**3 - x1 - 8
def gradient(x1, x2):
i = f_x1(x1, x2)
j = f_x2(x1, x2)
return [i, j]
def module_of_gradient(grad):
i = 0; j = 1
return sqrt(grad[i]**2 + grad[j]**2)
def dichotomy_mehod(a, b, epsilon, x1, x2, d1, d2):
x = (a + b) / 2
global counter
counter += 2
if (f(x1 + (x - epsilon)*d1, x2 + (x - epsilon)*d2) < f(x1 + (x + epsilon)*d1, x2 + (x + epsilon)*d2)):
b = x
else:
a = x
if(abs(b - a) >= 2 * epsilon):
return dichotomy_mehod(a, b, epsilon, x1, x2, d1, d2)
return x
def the_fletcher_reevse_method(x1, x2, e1, e2, M):
global counter
k = 0
d_prev = [0, 0]
grad_prev = 0
while True:
counter += 2
grad = gradient(x1, x2)
module_grad = module_of_gradient(grad)
if ((module_grad < e1) | (k >= M)):
return [(round(x1, round_num), round(x2, round_num), round(f(x1, x2), round_num)), k]
B = 0
if k % 2 == 1: B = module_of_gradient(grad)**2 / module_of_gradient(grad_prev)**2
d = [-grad[0] + B * d_prev[0], -grad[1] + B * d_prev[1]]
t = dichotomy_mehod(0, 0.1, e1, x1, x2, d[0], d[1])
x1_next = x1 + t * d[0]
x2_next = x2 + t * d[1]
x1_list.append(x1); x2_list.append(x2)
counter += 1
if ((sqrt(abs(x1_next - x1)**2 + abs(x2_next - x2)**2) <= e2)
& (abs(f(x1_next, x2_next) - f(x1, x2)) <= e2)):
return [(round(x1_next, round_num),
round(x2_next, round_num),
round(f(x1_next, x2_next), round_num)),
k]
x1 = x1_next; x2 = x2_next
d_prev = d; grad_prev = grad
k += 1
round_num = 3
x1 = -5
x2 = 3
e1 = 0.001
e2 = 0.001
M = 100
result = the_fletcher_reevse_method(x1, x2, e1, e2, M)
print(f"The Fletcher Reevse method: {result[0]}; count of iteractions = {result[1]}")
print('Count of compute function =', counter)
#show(x1_list, x2_list)
| AlexSmirno/Learning | 6 Семестр/Оптимизация/Lab_4_1.py | Lab_4_1.py | py | 3,768 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "matplotlib.pyplot.subplots",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "numpy.arange",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "numpy.arang... |
18308754842 | from tempfile import gettempdir
import urllib.request
import platform
import zipfile
from os.path import join
from os import walk
pth = "https://github.com/AequilibraE/aequilibrae/releases/download/V0.6.0.post1/mod_spatialite-NG-win-amd64.zip"
outfolder = gettempdir()
dest_path = join(outfolder, "mod_spatialite-NG-win-amd64.zip")
urllib.request.urlretrieve(pth, dest_path)
fldr = join(outfolder, "temp_data")
zipfile.ZipFile(dest_path).extractall(fldr)
if "WINDOWS" in platform.platform().upper():
# We now set sqlite. Only needed in thge windows server in Github
plats = {
"x86": "https://sqlite.org/2020/sqlite-dll-win32-x86-3320100.zip",
"x64": "https://sqlite.org/2020/sqlite-dll-win64-x64-3320100.zip",
}
outfolder = "C:/"
zip_path64 = join(outfolder, "sqlite-dll-win64-x64-3320100.zip")
urllib.request.urlretrieve(plats["x64"], zip_path64)
zip_path86 = join(outfolder, "sqlite-dll-win32-x86-3320100.zip")
urllib.request.urlretrieve(plats["x86"], zip_path86)
root = "C:/hostedtoolcache/windows/Python/"
file = "sqlite3.dll"
for d, subD, f in walk(root):
if file in f:
if "x64" in d:
zipfile.ZipFile(zip_path64).extractall(d)
else:
zipfile.ZipFile(zip_path86).extractall(d)
print(f"Replaces {d}")
| AequilibraE/aequilibrae | tests/setup_windows_spatialite.py | setup_windows_spatialite.py | py | 1,347 | python | en | code | 140 | github-code | 6 | [
{
"api_name": "tempfile.gettempdir",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "urllib.request.request.urlretrieve",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "u... |
24072421464 | """
Parser.py
Used to parse URLs into a linked list of dictionaries.
"""
from bs4 import BeautifulSoup
import requests
import re
class Node: # pragma: no cover
"""
Creates a Node that contains data, and a next node
Data holds any object.
Next points to the next node, and should always be a node.
"""
def __init__(
self, data):
"""Initialize Node Class"""
self.data = data
self.next = None
class LinkedList: # pragma: no cover
"""
Creates a Linked List, with a head, and a tail.
Head only contains the first link in the list, and should be called at the
beginning of scan.
Tail only contains the last link in the list, and should not be called.
"""
def __init__(
self):
"""Initialize Linked List Class"""
self.head = None
self.tail = None
def add_list_item(
self, item):
"""Add an item to the Linked List"""
if not isinstance(item, Node):
item = Node(item)
if self.head is None:
self.head = item
elif self.tail.data == item:
return
else:
self.tail.next = item
self.tail = item
def parse_url_feed(
incoming) -> LinkedList:
"""
Receives either a list of URLs or a single URL, and returns a Linked
List of Dictionaries
"""
total_feed = LinkedList()
url_list = return_list(incoming)
for url_entry in url_list:
if not check_url(url_entry):
raise Exception("Invalid URL. Must Be a RSS Feed URL ending in "
".rss, .html, or .xml: " + url_entry)
parse_value = find_parser(url_entry)
response = requests.get(url_entry)
soup = BeautifulSoup(response.content, parse_value)
if soup.rss is not None:
feed = rss_parse(soup)
total_feed.add_list_item(feed)
elif soup.find_all(re.compile("atom.xml")) is not None:
feed = atom_parse(soup)
total_feed.add_list_item(feed)
return total_feed
def check_url(
url: str) -> bool:
"""Checks to see if the URL given is parseable"""
url = str(url)
if len(url) == 0:
return False
result1 = re.search("rss?", url)
result2 = re.search("xml?", url)
result3 = re.search("tml?", url)
result4 = re.search("feeds?", url)
if result1 is not None:
return True
elif result2 is not None:
return True
elif result3 is not None:
return True
elif result4 is not None:
return True
else:
return False
def find_parser(
response: str) -> str:
"""Checks to see which parser to use"""
if len(response) <= 3:
raise Exception("Invalid URL Length")
result = re.search("tml?", response)
if result is not None:
return "lxml"
else:
return "lxml-xml"
def return_list(
incoming) -> list:
"""
Checks to see if incoming is a String or a List. If a String, adds the
string to a list and returns.
"""
url_list = []
if isinstance(incoming, str):
url_list.append(incoming)
elif isinstance(incoming, list):
url_list = incoming
return url_list
def rss_parse(
soup: BeautifulSoup) -> LinkedList: # pragma: no cover
"""
When URL is an RSS feed, returns a linked list of dictionaries
containing the titles and links
"""
feed = LinkedList()
tag = soup.rss
tag = tag.channel
channel_dict = {"RSS_String": tag.title.string, "Link": tag.link.string}
feed.add_list_item(channel_dict)
for item in tag.find_all(re.compile("item?")):
feed_dict = {}
for title in item.find_all(re.compile("title?")):
for entry in title.find_all(string=True):
feed_dict["RSS_String"] = entry
feed_dict["RSS_String"] = truncate(feed_dict["RSS_String"])
for link in item.find_all(re.compile("link?")):
for entry in link.find_all(string=True):
feed_dict["Link"] = entry
feed.add_list_item(feed_dict)
return feed
def atom_parse(
soup: BeautifulSoup) -> LinkedList: # pragma: no cover
"""
When URL is an Atom feed, returns a linked list of dictionaries containing
the titles and links
"""
feed = LinkedList()
tag = soup.feed
for entry in tag.find_all("entry"):
feed_dict = {}
for title in entry.find_all("title"):
for string in title.find_all(string=True):
feed_dict["RSS_String"] = string
feed_dict["RSS_String"] = truncate(feed_dict["RSS_String"])
for link in entry.find_all(re.compile("link?")):
feed_dict["Link"] = link.get('href')
feed.add_list_item(feed_dict)
return feed
def truncate(
input_line: str) -> str:
"""
When a string is over 80 characters long, string is limited to 79
characters for readability in GUI window, An ellipsis (...) is added to
denote unseen text
"""
if len(input_line) >= 80:
input_line = input_line[0:79]
return input_line + "..."
else:
return input_line
| Jhawk1196/CS3250PythonProject | src/parser.py | parser.py | py | 5,232 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "requests.get",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "re.search",
"line_numbe... |
3229327686 | #!/usr/bin/python
### File Information ###
"""
Rejector
"""
__author__ = 'duanqz@gmail.com'
import os
import fnmatch
from config import Config
class Rejector:
""" Rejector:
1. Check whether conflicts happen.
2. Resolve conflicts automatically.
"""
CONFILCT_START = "<<<<<<<"
CONFLICT_MID = "======="
CONFILCT_END = ">>>>>>>"
def __init__(self, target):
self.mTarget = target
self.mConflictNum = 0
def getConflictNum(self):
if fnmatch.fnmatch(self.mTarget, "*.xml"):
self.resolveConflict()
else:
self.collectConflict()
return self.mConflictNum
def collectConflict(self):
""" Check whether conflict happen or not in the target
"""
self.mConflictNum = 0
top = 0
size = 0
# delLinesNumbers record the lines of conflicts
delLineNumbers = []
needToDel = False
targetFile = open(self.mTarget, "r+")
lineNum = 0
lines = targetFile.readlines()
for line in lines:
size = self.mConflictNum
if line.startswith(Rejector.CONFILCT_START):
top += 1
# Modify the conflict in the original
lines[lineNum] = "%s #Conflict %d\n" % (line.rstrip(), size)
self.mConflictNum += 1
#conflicts.append("#Conflict %d , start at line %d\n" % (size, lineNum))
#conflicts[size] += line
delLineNumbers.append(lineNum)
elif line.startswith(Rejector.CONFILCT_END):
# Modify the conflict in the original
lines[lineNum] = "%s #Conflict %d\n" % (line.rstrip(), size-top)
#conflicts[size-top] += line
#conflicts[size-top] += "#Conflict %d , end at line %d\n\n" % (size-top, lineNum)
delLineNumbers.append(lineNum)
needToDel = False
if top == 0: break;
top -= 1
else:
if top > 0:
#conflicts[size-top] += line
if line.startswith(Rejector.CONFLICT_MID):
# Modify the conflict in the original
#lines[lineNum] = "%s #Conflict %d\n" % (line.rstrip(), size-top)
needToDel = True
if needToDel:
delLineNumbers.append(lineNum)
lineNum += 1
# Create a reject file if conflict happen
if self.mConflictNum > 0:
rejFilename = Rejector.createReject(self.mTarget)
rejFile = open(rejFilename, "wb")
rejFile.writelines(lines)
rejFile.close()
# Remove conflict blocks, and write back target.
for lineNum in delLineNumbers[::-1]:
del lines[lineNum]
targetFile.seek(0)
targetFile.truncate()
targetFile.writelines(lines)
targetFile.close()
return self
@staticmethod
def createReject(target):
relTarget = os.path.relpath(target, Config.PRJ_ROOT)
rejFilename = os.path.join(Config.REJ_ROOT, relTarget)
dirname = os.path.dirname(rejFilename)
if not os.path.exists(dirname):
os.makedirs(dirname)
return rejFilename
def resolveConflict(self):
rejFileHandle = open(self.mTarget, "r+")
top = 0
lineNum = 0
delLineNumbers = []
needToDel = True
lines = rejFileHandle.readlines()
for line in lines:
if line.startswith(Rejector.CONFILCT_START):
top += 1
delLineNumbers.append(lineNum)
elif line.startswith(Rejector.CONFILCT_END):
top -= 1
delLineNumbers.append(lineNum)
needToDel = True
if top < 0: break;
else:
if top > 0:
if needToDel:
delLineNumbers.append(lineNum)
if line.startswith(Rejector.CONFLICT_MID):
needToDel = False
lineNum += 1
for lineNum in delLineNumbers[::-1]:
del lines[lineNum]
rejFileHandle.seek(0)
rejFileHandle.truncate()
rejFileHandle.writelines(lines)
rejFileHandle.close()
| baidurom/tools | autopatch/rejector.py | rejector.py | py | 4,416 | python | en | code | 12 | github-code | 6 | [
{
"api_name": "fnmatch.fnmatch",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "os.path.relpath",
"line_number": 122,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 122,
"usage_type": "attribute"
},
{
"api_name": "config.Config.PRJ_ROOT... |
29214262320 | import os.path
import unittest
from pathlib import Path
from sflkit.analysis.analysis_type import AnalysisType
from sflkit.analysis.spectra import Spectrum
from sflkit.analysis.suggestion import Location
from tests4py import framework
from tests4py.constants import DEFAULT_WORK_DIR
from utils import BaseTest
class TestSFL(BaseTest):
@unittest.skip
def test_middle(self):
project_name = "middle"
bug_id = 2
report = framework.default.tests4py_checkout(project_name, bug_id)
if report.raised:
raise report.raised
src = Path(report.location)
dst = DEFAULT_WORK_DIR / "sfl"
report = framework.sfl.tests4py_sfl_instrument(src, dst)
if report.raised:
raise report.raised
dst_src = dst / "src"
dst_src_middle = dst_src / "middle"
dst_src_middle___init___py = dst_src_middle / "__init__.py"
dst_tests = dst / "tests"
dst_tests_test_middle_py = dst_tests / "test_middle.py"
dst_gitignore = dst / ".gitignore"
dst_license = dst / "LICENSE"
dst_readme_md = dst / "README.md"
dst_setup_cfg = dst / "setup.cfg"
dst_setup_py = dst / "setup.py"
src_src = src / "src"
src_src_middle = src_src / "middle"
src_src_middle___init___py = src_src_middle / "__init__.py"
src_tests = src / "tests"
src_tests_test_middle_py = src_tests / "test_middle.py"
src_gitignore = src / ".gitignore"
src_license = src / "LICENSE"
src_readme_md = src / "README.md"
src_setup_cfg = src / "setup.cfg"
src_setup_py = src / "setup.py"
exist_files = [
dst_src_middle___init___py,
dst_tests_test_middle_py,
dst_gitignore,
dst_license,
dst_readme_md,
dst_setup_cfg,
dst_setup_py,
]
exist_dirs = [dst_src, dst_src_middle, dst_tests]
for d in exist_dirs:
self.assertTrue(d.exists())
self.assertTrue(d.is_dir())
for f in exist_files:
self.assertTrue(f.exists())
self.assertTrue(f.is_file())
for d, s in [
(dst_tests_test_middle_py, src_tests_test_middle_py),
(dst_gitignore, src_gitignore),
(dst_license, src_license),
(dst_readme_md, src_readme_md),
(dst_setup_cfg, src_setup_cfg),
(dst_setup_py, src_setup_py),
]:
with open(d, "r") as fp:
d_content = fp.read()
with open(s, "r") as fp:
s_content = fp.read()
self.assertEqual(s_content, d_content, f"{d} has other content then {s}")
for d, s in [
(dst_src_middle___init___py, src_src_middle___init___py),
]:
with open(d, "r") as fp:
d_content = fp.read()
with open(s, "r") as fp:
s_content = fp.read()
self.assertNotEqual(
s_content, d_content, f"{d} has the same content then {s}"
)
report = framework.sfl.tests4py_sfl_events(dst)
if report.raised:
raise report.raised
report = framework.sfl.tests4py_sfl_analyze(dst, src, predicates="line")
if report.raised:
raise report.raised
suggestions = report.analyzer.get_sorted_suggestions(
base_dir=src,
type_=AnalysisType.LINE,
metric=Spectrum.Ochiai,
)
self.assertAlmostEqual(
0.7071067811865475, suggestions[0].suspiciousness, delta=0.0000001
)
self.assertEqual(1, len(suggestions[0].lines))
self.assertIn(
Location(os.path.join("src", "middle", "__init__.py"), 6),
suggestions[0].lines,
)
| smythi93/Tests4Py | tests/test_sfl.py | test_sfl.py | py | 3,851 | python | en | code | 8 | github-code | 6 | [
{
"api_name": "utils.BaseTest",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "tests4py.framework.default.tests4py_checkout",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "tests4py.framework.default",
"line_number": 19,
"usage_type": "attribute"
}... |
27672884251 | from typing import Dict, Tuple
from copy import deepcopy
import torch
from config import tqc_config
from modules import Actor, TruncatedQuantileEnsembledCritic
class TQC:
def __init__(self,
cfg: tqc_config,
actor: Actor,
critic: TruncatedQuantileEnsembledCritic) -> None:
self.cfg = cfg
self.device = cfg.device
self.tau = cfg.tau
self.discount = cfg.discount
self.batch_size = cfg.batch_size
self.target_entropy = -float(actor.action_dim)
self.log_alpha = torch.tensor([0.0], dtype=torch.float32, device=self.device, requires_grad=True)
self.alpha_optimizer = torch.optim.AdamW([self.log_alpha], lr=cfg.alpha_lr)
self.alpha = self.log_alpha.exp().detach()
self.actor = actor.to(self.device)
self.actor_optim = torch.optim.AdamW(self.actor.parameters(), lr=cfg.actor_lr)
self.critic = critic.to(self.device)
self.critic_target = deepcopy(critic).to(self.device)
self.critic_optim = torch.optim.AdamW(self.critic.parameters(), lr=cfg.critic_lr)
self.quantiles_total = critic.num_critics * critic.num_quantiles
self.quantiles2drop = cfg.quantiles_to_drop_per_critic * cfg.num_critics
self.top = self.quantiles_total - self.quantiles2drop
huber_tau = torch.arange(self.cfg.num_quantiles, device=self.device).float() / self.top + 1 / (2 * self.top)
self.huber_tau = huber_tau[None, None, :, None]
self.total_iterations = 0
def train(self,
states: torch.Tensor,
actions: torch.Tensor,
rewards: torch.Tensor,
next_states: torch.Tensor,
dones: torch.Tensor) -> Dict[str, float]:
self.total_iterations += 1
# critic step
critic_loss = self.critic_loss(states, actions, rewards, next_states, dones)
self.critic_optim.zero_grad()
critic_loss.backward()
self.critic_optim.step()
# actor step
actor_loss, batch_entropy, qz_values = self.actor_loss(states)
self.actor_optim.zero_grad()
actor_loss.backward()
self.actor_optim.step()
# alpha step
alpha_loss = self.alpha_loss(states)
self.alpha_optimizer.zero_grad()
alpha_loss.backward()
self.alpha_optimizer.step()
self.alpha = self.log_alpha.exp().detach()
self.soft_critic_update()
return {
"critic_loss": critic_loss.item(),
"actor_loss": actor_loss.item(),
"actor_batch_entropy": batch_entropy,
"qz_values": qz_values,
"alpha": self.alpha.item(),
"alpha_loss": alpha_loss.item()
}
def actor_loss(self, states: torch.Tensor) -> Tuple[torch.Tensor, float, float]:
actions, log_prob = self.actor(states, need_log_prob=True)
qz_values = self.critic(states, actions).mean(dim=2).mean(dim=1, keepdim=True)
loss = self.alpha * log_prob - qz_values
batch_entropy = -log_prob.mean().item()
return loss.mean(), batch_entropy, qz_values.mean().item()
def critic_loss(self,
states: torch.Tensor,
actions: torch.Tensor,
rewards: torch.Tensor,
next_states: torch.Tensor,
dones: torch.Tensor) -> torch.Tensor:
with torch.no_grad():
next_actions, next_log_prob = self.actor(next_states, need_log_prob=True)
next_z = self.critic_target(next_states, next_actions)
sorted_next_z = torch.sort(next_z.reshape(self.batch_size, -1)).values
sorted_next_z_top = sorted_next_z[:, :self.top]
sorted_next_z_top = sorted_next_z_top - self.alpha * next_log_prob.unsqueeze(-1)
quantiles_target = rewards + self.discount * (1.0 - dones) * sorted_next_z_top
current_z = self.critic(states, actions)
loss = self.quantile_huber_loss(current_z, quantiles_target)
return loss
def quantile_huber_loss(self, quantiles: torch.Tensor, target: torch.Tensor) -> torch.Tensor:
pairwise_diff = target[:, None, None, :] - quantiles[:, :, :, None]
abs_val = pairwise_diff.abs()
huber_loss = torch.where(abs_val > 1.0,
abs_val - 0.5,
pairwise_diff.pow(2) / 2)
loss = torch.abs(self.huber_tau - (pairwise_diff < 0).float()) * huber_loss
return loss.mean()
def alpha_loss(self, state: torch.Tensor) -> torch.Tensor:
with torch.no_grad():
action, log_prob = self.actor(state, need_log_prob=True)
loss = -self.log_alpha * (log_prob + self.target_entropy)
return loss.mean()
def soft_critic_update(self):
for param, tgt_param in zip(self.critic.parameters(), self.critic_target.parameters()):
tgt_param.data.copy_(self.tau * param.data + (1 - self.tau) * tgt_param.data)
| zzmtsvv/rl_task | offline_tqc/tqc.py | tqc.py | py | 5,082 | python | en | code | 8 | github-code | 6 | [
{
"api_name": "config.tqc_config",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "modules.Actor",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "modules.TruncatedQuantileEnsembledCritic",
"line_number": 12,
"usage_type": "name"
},
{
"api_name... |
19167053066 | """
Common utilities for derp used by various classes.
"""
from collections import namedtuple
import cv2
from datetime import datetime
import heapq
import logging
import pathlib
import numpy as np
import os
import socket
import time
import yaml
import zmq
import capnp
import messages_capnp
Bbox = namedtuple("Bbox", ["x", "y", "w", "h"])
TOPICS = {
"camera": messages_capnp.Camera,
"controller": messages_capnp.Controller,
"action": messages_capnp.Action,
"imu": messages_capnp.Imu,
"quality": messages_capnp.Quality,
}
DERP_ROOT = pathlib.Path(os.environ["DERP_ROOT"])
MODEL_ROOT = DERP_ROOT / "models"
RECORDING_ROOT = DERP_ROOT / "recordings"
CONFIG_ROOT = DERP_ROOT / "config"
MSG_STEM = "/tmp/derp_"
def is_already_running(path):
""" For the given PID path check if the PID exists """
if isinstance(path, str):
path = pathlib.Path(path)
if not path.exists():
return False
with open(str(path)) as pid_file:
pid = int(pid_file.read())
try:
os.kill(pid, 0)
except OSError:
return False
return True
def write_pid(path):
with open(str(path), 'w') as pid_file:
pid_file.write(str(os.getpid()))
pid_file.flush()
def init_logger(name, recording_path, level=logging.INFO):
logger = logging.getLogger(name)
formatter = logging.Formatter('%(asctime)s %(levelname)-5s %(message)s')
fileHandler = logging.FileHandler(recording_path / ('%s.log' % name), mode='w')
fileHandler.setFormatter(formatter)
streamHandler = logging.StreamHandler()
streamHandler.setFormatter(formatter)
logger.setLevel(level)
logger.addHandler(fileHandler)
logger.addHandler(streamHandler)
return logger
def make_recording_path():
date = datetime.utcfromtimestamp(time.time()).strftime("%Y%m%d-%H%M%S")
folder = RECORDING_ROOT / ("recording-%s-%s" % (date, socket.gethostname()))
folder.mkdir(parents=True)
return folder
def get_timestamp():
return int(time.time() * 1e9)
def publisher(path):
context = zmq.Context()
sock = context.socket(zmq.PUB)
sock.bind("ipc://" + path)
# sock.bind("tcp://*:%s" % port)
return context, sock
def subscriber(paths):
context = zmq.Context()
sock = context.socket(zmq.SUB)
# sock.connect("tcp://localhost:%s" % port)
for path in paths:
sock.connect("ipc://" + path)
sock.setsockopt(zmq.SUBSCRIBE, b"")
return context, sock
def topic_file_reader(folder, topic):
return open("%s/%s.bin" % (folder, topic), "rb")
def topic_exists(folder, topic):
path = folder / ("%s.bin" % topic)
return path.exists()
def topic_file_writer(folder, topic):
return open("%s/%s.bin" % (folder, topic), "wb")
def print_image_config(name, config):
""" Prints some useful variables about the camera for debugging purposes """
top = config["pitch"] + config["vfov"] / 2
bot = config["pitch"] - config["vfov"] / 2
left = config["yaw"] - config["hfov"] / 2
right = config["yaw"] + config["hfov"] / 2
hppd = config["width"] / config["hfov"]
vppd = config["height"] / config["vfov"]
print(
"%s top: %6.2f bot: %6.2f left: %6.2f right: %6.2f hppd: %5.1f vppd: %5.1f"
% (name, top, bot, left, right, hppd, vppd)
)
def get_patch_bbox(target_config, source_config):
"""
Gets a different sub-persepective given a smaller desired hfov/vfov and different yaw/pitch
"""
hfov_ratio = target_config["hfov"] / source_config["hfov"]
vfov_ratio = target_config["vfov"] / source_config["vfov"]
hfov_offset = source_config["yaw"] - target_config["yaw"]
vfov_offset = source_config["pitch"] - target_config["pitch"]
patch_width = int(source_config["width"] * hfov_ratio + 0.5)
patch_height = int(source_config["height"] * vfov_ratio + 0.5)
x_center = (source_config["width"] - patch_width) // 2
y_center = (source_config["height"] - patch_height) // 2
x_offset = int(hfov_offset / source_config["hfov"] * source_config["width"] + 0.5)
y_offset = int(vfov_offset / source_config["vfov"] * source_config["height"] + 0.5)
x = x_center + x_offset
y = y_center + y_offset
if (x >= 0 and x + patch_width <= source_config["width"] and
y >= 0 and y + patch_height <= source_config["height"]):
return Bbox(x, y, patch_width, patch_height)
return None
def crop(image, bbox):
""" Crops the Bbox(x,y,w,h) from the image. Copy indicates to copy of the ROI"s memory"""
return image[bbox.y : bbox.y + bbox.h, bbox.x : bbox.x + bbox.w]
def resize(image, size):
""" Resize the image to the target (w, h) """
is_larger = size[0] > image.shape[1] or size[1] > image.shape[0]
interpolation = cv2.INTER_LINEAR if is_larger else cv2.INTER_AREA
return cv2.resize(image, size, interpolation=interpolation)
def perturb(frame, camera_config, shift=0, rotate=0):
# Estimate how many pixels to rotate by, assuming fixed degrees per pixel
pixels_per_degree = camera_config["width"] / camera_config["hfov"]
# Figure out where the horizon is in the image
horizon_frac = ((camera_config["vfov"] / 2) + camera_config["pitch"]) / camera_config["vfov"]
# For each row in the frame shift/rotate it
indexs = np.arange(len(frame))
vertical_fracs = np.linspace(0, 1, len(frame))
# For each vertical line, apply shift/rotation rolls
for index, vertical_frac in zip(indexs, vertical_fracs):
magnitude = rotate * pixels_per_degree
if vertical_frac > horizon_frac:
ground_angle = (vertical_frac - horizon_frac) * camera_config["vfov"]
ground_distance = camera_config["z"] / np.tan(deg2rad(ground_angle))
ground_width = 2 * ground_distance * np.tan(deg2rad(camera_config["hfov"]) / 2)
magnitude += (shift / ground_width) * camera_config["width"]
magnitude = int(magnitude + 0.5 * np.sign(magnitude))
if magnitude > 0:
frame[index, magnitude:, :] = frame[index, : frame.shape[1] - magnitude]
frame[index, :magnitude, :] = 0
elif magnitude < 0:
frame[index, :magnitude, :] = frame[index, abs(magnitude) :]
frame[index, frame.shape[1] + magnitude :] = 0
return frame
def deg2rad(val):
return val * np.pi / 180
def rad2deg(val):
return val * 180 / np.pi
def load_image(path):
return cv2.imread(str(path))
def save_image(path, image):
return cv2.imwrite(str(path), image)
def load_config(config_path):
""" Load a configuration file, also reading any component configs """
with open(str(config_path)) as config_fd:
config = yaml.load(config_fd, Loader=yaml.FullLoader)
for component in config:
if isinstance(config[component], dict) and "path" in config[component]:
component_path = CONFIG_ROOT / config[component]["path"]
with open(str(component_path)) as component_fd:
component_config = yaml.load(component_fd, Loader=yaml.FullLoader)
component_config.update(config[component])
config[component] = component_config
if "name" not in config[component]:
config[component]["name"] = component_path.stem
if "name" not in config:
config["name"] = config_path.stem
return config
def dump_config(config, config_path):
""" Write a configuration file """
with open(str(config_path), 'w') as config_fd:
yaml.dump(config, config_fd)
def extract_latest(desired_times, source_times, source_values):
out = []
pos = 0
val = 0
for desired_time in desired_times:
while pos < len(source_times) and source_times[pos] < desired_time:
val = source_values[pos]
pos += 1
out.append(val)
return np.array(out)
def load_topics(folder):
if isinstance(folder, str):
folder = pathlib.Path(folder)
out = {}
for topic in TOPICS:
if not topic_exists(folder, topic):
continue
topic_fd = topic_file_reader(folder, topic)
out[topic] = [msg for msg in TOPICS[topic].read_multiple(topic_fd)]
topic_fd.close()
return out
def replay(topics):
heap = []
for topic in topics:
for msg in topics[topic]:
heapq.heappush(heap, [msg.publishNS, topic, msg])
while heap:
yield heapq.heappop(heap)
def decode_jpg(jpg):
return cv2.imdecode(np.frombuffer(jpg, np.uint8), cv2.IMREAD_COLOR)
def encode_jpg(image, quality):
return cv2.imencode(".jpg", image, [cv2.IMWRITE_JPEG_QUALITY, quality])[1].tostring()
def extract_car_actions(topics):
out = []
autonomous = False
speed_offset = 0
steer_offset = 0
for timestamp, topic, msg in replay(topics):
if topic == "controller":
autonomous = msg.isAutonomous
speed_offset = msg.speedOffset
steer_offset = msg.steerOffset
elif topic == "action":
if autonomous or msg.isManual:
out.append([timestamp, msg.speed + speed_offset, msg.steer + steer_offset])
if not out:
out.append([0, 0, 0])
return np.array(out)
| notkarol/derplearning | derp/util.py | util.py | py | 9,198 | python | en | code | 40 | github-code | 6 | [
{
"api_name": "collections.namedtuple",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "messages_capnp.Camera",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "messages_capnp.Controller",
"line_number": 23,
"usage_type": "attribute"
},
{
"... |
2888676781 | import numpy as np
from matplotlib import pyplot as plt
if __name__ == '__main__':
ch, time, date = np.genfromtxt("events220302_1d.dat", unpack=True,
dtype=(int, float, 'datetime64[ms]'))
mask1 = ch==1
mask2 = ch==2
time1 = time[mask1]
time2 = time[mask2]
date1 = date[mask1]
date2 = date[mask2]
limit = np.datetime64("2022-03-02T13")
fig, ax = plt.subplots(2,1, sharex=True)
ax[0].errorbar(date1[date1 < limit], time1[date1 < limit], fmt='.k', markersize=0.6)
ax[1].errorbar(date2[date2 < limit], time2[date2 < limit], fmt='.k', markersize=0.6)
ax[0].set_ylabel("FPGA timestamp [s]")
ax[1].set_ylabel("FPGA timestamp [s]")
ax[0].set_title("CHANNEL 1")
ax[1].set_title("CHANNEL 2")
ax[1].set_xlabel("Local time [dd hh:mm]")
plt.show() | brinus/Sciami_lab4 | UNIX_vs_FPGA.py | UNIX_vs_FPGA.py | py | 841 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "numpy.genfromtxt",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "numpy.datetime64",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.subplots",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "matplotli... |
25003790859 | import math
from typing import Tuple
import tensorflow as tf
class ParityDataset(tf.keras.utils.Sequence):
def __init__(self, n_samples: int, n_elems: int = 64, batch_size: int = 128):
"""
Parameters
----------
n_samples : int
Number of samples.
n_elems : int, optional
Number of elements in the input vector.
The default is 64.
batch_size : int, optional
Batch size.
The default is 128.
"""
self.n_samples = n_samples
self.n_elems = n_elems
self.batch_size = batch_size
def __len__(self) -> int:
return int(math.floor(self.n_samples) / self.batch_size)
@tf.function
def __batch_generation(self) -> Tuple[tf.Tensor, tf.Tensor]:
X = []
Y = []
for _ in range(self.batch_size):
n_non_zero = tf.random.uniform((), 1, self.n_elems + 1, tf.int32)
x = tf.random.uniform((n_non_zero,), 0, 2, tf.int32) * 2 - 1
x = tf.concat(
[x, tf.zeros((self.n_elems - n_non_zero), dtype=tf.int32)], axis=0
)
x = tf.random.shuffle(x)
y = tf.math.reduce_sum(tf.cast(tf.equal(x, 1), tf.int32)) % 2
X.append(x)
Y.append(y)
X = tf.cast(tf.stack(X), tf.keras.backend.floatx())
Y = tf.cast(tf.stack(Y), tf.keras.backend.floatx())
return X, Y
def __getitem__(self, index: int) -> Tuple[tf.Tensor, tf.Tensor]:
batch_X, batch_Y = self.__batch_generation()
return batch_X, batch_Y
| EMalagoli92/PonderNet-TensorFlow | pondernet_tensorflow/dataset/parity_dataset.py | parity_dataset.py | py | 1,598 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "tensorflow.keras",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "math.floor",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "tensorflow.random.uniform",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "tensorflow.... |
23750393543 | """
최적화 비중을 계산해주는 모듈
@author: Younghyun Kim
Created on 2021.10.05
"""
import numpy as np
import pandas as pd
import cvxpy as cp
import torch
from cvxpylayers.torch import CvxpyLayer
class ClassicOptimizer:
"""
Classic Optimizer
"""
def __init__(self, m=100,
buying_fee=0.01, selling_fee=0.01,
min_cash_rate=0.01):
"""
Initialization
Args:
m: big number
"""
self.m = m
self.buying_fee = buying_fee
self.selling_fee = selling_fee
self.min_cash_rate = min_cash_rate
def max_sr(self, returns, nonneg=True, adjust=True):
"""
Maximize Sharpe Ratio
Args:
returns: pd.DataFrame or np.array
Return:
weights: np.array(N)
"""
if isinstance(returns, pd.DataFrame):
returns = returns.values
creturns = returns * self.m
cov = np.cov(creturns.transpose())
cov = np.nan_to_num(cov)
mu = creturns.mean(0).reshape(-1)
mu_min = abs(mu.min())
if mu[mu > 0].shape[0] == 0:
mu += mu_min
mu = np.nan_to_num(mu)
weights = cp.Variable(returns.shape[1])
cov_cp = cp.Parameter((cov.shape[1], cov.shape[0]), symmetric=True)
objective = cp.Minimize(cp.sum_squares(cov_cp @ weights))
constraints = [mu.T @ weights >= 1]
if nonneg:
constraints.append(0 <= weights)
prob = cp.Problem(objective, constraints)
assert prob.is_dpp()
cov = torch.FloatTensor(cov.astype(float))
cvxpylayer = CvxpyLayer(prob, parameters=[cov_cp],
variables=[weights])
weights, = cvxpylayer(cov)
if adjust:
weights = self.adjust_weights(weights)
return weights.numpy()
def min_var(self, returns):
"""
Minimum Variance Portfolio
Args:
returns: pd.DataFrame or np.array
Return:
weights: np.array(N)
"""
if isinstance(returns, pd.DataFrame):
returns = returns.values
creturns = returns * self.m
cov = np.cov(creturns.transpose())
cov = np.nan_to_num(cov)
weights = cp.Variable(returns.shape[1])
cov_cp = cp.Parameter((cov.shape[1], cov.shape[0]),
symmetric=True)
objective = cp.Minimize(cp.sum_squares(cov_cp @ weights))
constraints = [cp.sum(weights) == 1, 0 <= weights]
prob = cp.Problem(objective, constraints)
assert prob.is_dpp()
cov = torch.FloatTensor(cov.astype(float))
cvxpylayer = CvxpyLayer(prob, parameters=[cov_cp],
variables=[weights])
weights, = cvxpylayer(cov)
return weights.numpy()
def max_div(self, returns, nonneg=True, adjust=True):
"""
Maximum Diversification Portfolio
Args:
returns: pd.DataFrame or np.array
Return:
weights: np.array(N)
"""
if isinstance(returns, pd.DataFrame):
returns = returns.values
creturns = returns * self.m
cov = np.cov(creturns.transpose())
cov = np.nan_to_num(cov)
sig = creturns.std(0).reshape(-1)
sig = np.nan_to_num(sig)
weights = cp.Variable(returns.shape[1])
cov_cp = cp.Parameter((cov.shape[1], cov.shape[0]),
symmetric=True)
objective = cp.Minimize(cp.sum_squares(cov_cp @ weights))
constraints = [sig.T @ weights >= 1]
if nonneg:
constraints.append(0 <= weights)
prob = cp.Problem(objective, constraints)
assert prob.is_dpp()
cov = torch.FloatTensor(cov.astype(float))
cvxpylayer = CvxpyLayer(prob, parameters=[cov_cp],
variables=[weights])
weights, = cvxpylayer(cov)
if adjust:
weights = self.adjust_weights(weights)
return weights.numpy()
def mv_mean(self, returns):
"""
Mean-Variance Portfolio with min ret based on mean ret
Args:
returns: pd.DataFrame or np.array
Return:
weights: np.array(N)
"""
if isinstance(returns, pd.DataFrame):
returns = returns.values
creturns = returns * self.m
cov = np.cov(creturns.transpose())
cov = np.nan_to_num(cov)
weights = cp.Variable(returns.shape[1])
cov_cp = cp.Parameter((cov.shape[1], cov.shape[0]),
symmetric=True)
mu = creturns.mean(0).reshape(-1)
mu_min = abs(mu.min())
if mu[mu > 0].shape[0] == 0:
mu += mu_min
mu = np.nan_to_num(mu)
mret = mu.mean().item()
objective = cp.Minimize(cp.sum_squares(cov_cp @ weights))
constraints = [cp.sum(weights) == 1,
mu.T @ weights >= mret,
0 <= weights]
prob = cp.Problem(objective, constraints)
assert prob.is_dpp()
cov = torch.FloatTensor(cov.astype(float))
cvxpylayer = CvxpyLayer(prob, parameters=[cov_cp],
variables=[weights])
weights, = cvxpylayer(cov)
return weights.numpy()
def pm_port(self, returns, topk=5, return_type='pct'):
"""
Price Momentum Equal Weight Portfolio with TopK
Args:
returns: pd.DataFrame or np.array
topk: top K
return_type: return type(log or pct)
Return:
weights: np.array(N)
"""
if isinstance(returns, pd.DataFrame):
returns = returns.values
if return_type == 'pct':
returns = np.log(returns + 1.)
crets = returns.sum(0)
crets = np.nan_to_num(crets)
crank = crets.argsort()
weights = np.zeros(returns.shape[1])
weights[crank[-topk:]] = 1. / topk
return weights
def lowvol_port(self, returns, topk=5):
"""
Lowvol Equal Weight Portfolio with TopK
Args:
returns: pd.DataFrame or np.array
topk: top K
Return:
weights: np.array(N)
"""
if isinstance(returns, pd.DataFrame):
returns = returns.values
sig = returns.std(0)
sig = np.nan_to_num(sig)
srank = sig.argsort()
weights = np.zeros(returns.shape[1])
weights[srank[:topk]] = 1. / topk
return weights
def ew_port(self, n):
"""
Equal Weight Portfolio with n assets
Args:
n: asset num
Return:
weights: np.array(n)
"""
weights = torch.ones(n) / n
return weights
def solve_amount(self, asset_prices, asset_embs, optimal_emb, wealth):
"""
Solving method for trading amounts
Args:
asset_prices: np.array 수량 계산에 필요한 자산 별 가격(1 X N)
asset_embs = np.array 자산 별 임베딩(N X M)
optimal_emb: 최적 포트폴리오 임베딩(1 X M)
wealth: 총 투자금
Return:
buying_amount: 종목 별 수량
prob_value: 최적과 최종 포트폴리오 거리(L2)
"""
wealth =\
wealth * (1. - max(self.buying_fee, self.selling_fee)) # 비용 고려
wealth = wealth * (1. - self.min_cash_rate) # 최소 보유 현금 고려
asset_embs_v = asset_embs.transpose() * asset_prices / wealth
asset_prices = asset_prices.reshape(-1)
buying_amount = cp.Variable(asset_prices.shape[0])
optimal_emb = optimal_emb.reshape(-1)
objective = cp.Minimize(self.m *
cp.sum_squares((asset_embs_v @ buying_amount)
- optimal_emb))
constraints = [buying_amount >= 0,
asset_prices.T @ buying_amount == wealth]
prob = cp.Problem(objective, constraints)
prob.solve()
buying_amount = np.round(buying_amount.value, 0)
return buying_amount, prob.value
def get_replicated_buying_amounts(self, closes, asset_embs, weights,
insts=['A069500', 'A229200',
'A114800', 'A251340'],
topk=10, wealth=50000000):
"""
closes: pd.Series 종목 별 종가(stock_num)
asset_embs: torch.tensor 종목 별 임베딩(1, stock_num, emb_dim)
weights: torch.tensor 종목 별 투자비중(1, stock_num)
insts: list 복제에 활용될 시장 ETF(default: K200, KQ150)
topk: 복제하기 위한 상위 종목 수
* closes, asset_embs, weights는 종목 별 순서가 일치해야함
Return:
amounts: pd.DataFrame 매수수량
aweights: pd.DataFrame 매수수량을 바탕으로 한 투자비중
value_est: closes를 바탕으로 계산한 총금액
prob_value: 임베딩 거리
"""
ins = []
for inst in insts:
ind = np.argwhere(closes.index == inst).item()
ins.append(ind)
ranks = weights.argsort(descending=True)
ranks = ranks.cpu().numpy().reshape(-1)
sel = np.unique(np.concatenate((ranks[:topk], ins), axis=-1))
optimal_emb = self.calc_optimal_emb(asset_embs, weights)
embs = asset_embs[0, sel].cpu().numpy()
optimal_emb = optimal_emb.view(-1, 1).cpu().numpy()
amounts, prob_value = self.solve_amount(closes.iloc[sel].values,
embs, optimal_emb, wealth)
amounts = pd.DataFrame(amounts.reshape(-1, 1),
index=closes.index[sel],
columns=['amounts'])
amounts = amounts[amounts['amounts'] > 0]
closes = pd.DataFrame(closes.values, index=closes.index, columns=amounts.columns)
value_est = (amounts.values.ravel() * closes.loc[amounts.index].values.ravel()).sum()
aweights = (amounts * closes.loc[amounts.index]) / value_est
return amounts, aweights, value_est, prob_value
def calc_optimal_emb(self, asset_embs, weights):
"""
calculate optimal embedding
Args:
asset_embs: torch.tensor
(batch_size, stock_num, emb_dim)
weights: torch.tensor
(batch_size, stock_num)
"""
optimal_emb = torch.matmul(weights, asset_embs)
return optimal_emb
def adjust_weights(self, weights):
"""
비중 조정
* nonneg일때,
weights /= weights.sum()
* weights[weights > 0].sum() > 0 일때,
weights /= weights[weights > 0].sum()
* weights[weights > 0].sum() < 0이고,
weights[weights < 0] != 0일때,
weights /= -weights[weights < 0].sum()
"""
if (weights != 0).sum() > 0:
weights = weights / abs(weights).max()
wpos_sum = weights[weights > 0].sum()
wneg_sum = -weights[weights < 0].sum()
if weights.sum() != 0:
weights /= max(wpos_sum, wneg_sum)
return weights | kimyoungh/singlemolt | statesman/classic_optimizer.py | classic_optimizer.py | py | 11,771 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "pandas.DataFrame",
"line_number": 41,
"usage_type": "attribute"
},
{
"api_name": "numpy.cov",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "numpy.nan_to_num",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "numpy.nan_to_num",
... |
73529467707 | import os.path
from sklearn import metrics
from torch import nn, optim
# noinspection PyUnresolvedReferences
from tests.pytest_helpers.data import dataloaders, image
# noinspection PyUnresolvedReferences
from tests.pytest_helpers.nn import sample_model
def test_fit(sample_model, dataloaders):
try:
model = sample_model(
nn.CrossEntropyLoss,
optim.Adam,
[(metrics.accuracy_score, {})]
)
model.fit(dataloaders)
except:
assert False
def test_prediction(sample_model, image):
_image = image('../sampleData/images/cat1.jpeg')
model = sample_model(nn.CrossEntropyLoss, optim.Adam, [(metrics.recall_score, {'average': 'macro'})])
predictions = model.predict(_image)
assert list(predictions.size()) == [1, 2]
def test_save(sample_model, dataloaders):
model = sample_model(
nn.CrossEntropyLoss,
optim.Adam,
[(metrics.accuracy_score, {})]
)
model.fit(dataloaders)
assert os.path.exists('./bestModel.pkl.tar') | default-303/easyTorch | tests/testUtils/test_trainer.py | test_trainer.py | py | 1,039 | python | en | code | 2 | github-code | 6 | [
{
"api_name": "tests.pytest_helpers.nn.sample_model",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "torch.nn.CrossEntropyLoss",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 14,
"usage_type": "name"
},
{
"api_... |
21299192914 | """Module to evaluate full pipeline on the validation set.
python evaluate.py
"""
#!/usr/bin/env python
# coding: utf-8
import os
import sys
import glob
import numpy as np
import image_preprocessing
import cnn
import bayesian_network
import json
import pandas as pd
# class mapping
classes = {"Positive": 0, "Neutral": 1, "Negative": 2, "None": 3}
# function to classify an image
def classify_image(image_folder_path, image_name, real_label, cnn_model, bayesian_model, labels_list):
with open('val_labels.json', mode='r', encoding='utf-8') as f:
image_labels_dict = json.load(f)
labels = image_labels_dict[image_name]
# print("RadhaKrishna")
# print(labels)
# preprocess the image
image_preprocessing.preprocess(image_folder_path, image_name)
# get mean cnn predictions for the faces from the image
cnn_label, cnn_dict, faces_detected = cnn.predict_image(cnn_model, image_folder_path + "Aligned/", image_name)
# get the bayesian and bayesian + cnn predictions for the image
bayesian_label, bayesian_cnn_label, emotion_dict, emotion_cnn_dict = bayesian_network.inference(bayesian_model, labels_list, labels, cnn_label)
# print("Faces detected: " + str(faces_detected))
# print("Real Label: " + str(real_label))
# print("CNN Label: " + str(cnn_label))
# print("Bayesian Label: " + str(bayesian_label))
# print("Bayesian + CNN Label: " + str(bayesian_cnn_label))
return classes[real_label], classes[str(cnn_label)], classes[str(bayesian_label)], classes[str(bayesian_cnn_label)], faces_detected
# load the cnn model
cnn_model = cnn.load_model()
# load the bayesian model
bayesian_model, labels_list = bayesian_network.load_model()
# function to evaluate the pipeline on a given directory
def evaluate(image_folder_path, real_label):
# print("RadhaKrishna")
# get the count of total number of files in the directory
_, _, files = next(os.walk(image_folder_path))
file_count = len(files)-1
# list to store the predictions
predictions = []
# set count = 1
i = 1
# for each image in the directory
for file in sorted(glob.glob(image_folder_path + "*.jpg")):
# extract the image name
image_name = (file.split('/'))[-1]
print("Image: " + image_name)
print(str(i) + "/" + str(file_count))
# create a dict to store the image name and predictions
prediction = {"Image": image_name}
prediction["Actual"], prediction["CNN"], prediction["Bayesian"], prediction["Bayesian + CNN"], prediction["Faces Detected"] = classify_image(image_folder_path, image_name, real_label, cnn_model, bayesian_model, labels_list)
# append the dict to the list of predictions
predictions.append(prediction)
# increase the count
i+=1
# return the predictions list
return predictions
# class list
class_list = ['Positive', 'Neutral', 'Negative']
predictions_list = []
# for each class in the class list
for emotion_class in class_list:
# evaluate all the images in that folder
predictions = evaluate('input/val/' + emotion_class + '/', emotion_class)
# add the predictions to the predictions list
predictions_list += predictions
# create a pandas dataframe from the predictions list
df = pd.DataFrame(predictions_list)
# store the dataframe to a file
df.to_pickle('predictions')
| samanyougarg/Group-Emotion-Recognition | evaluate.py | evaluate.py | py | 3,390 | python | en | code | 43 | github-code | 6 | [
{
"api_name": "json.load",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "image_preprocessing.preprocess",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "cnn.predict_image",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "bayesian_n... |
23196116357 | import pyspark
import networkx as nx
import pandas as pd
from pyspark.sql.types import (
LongType,
StringType,
FloatType,
IntegerType,
DoubleType,
StructType,
StructField,
)
import pyspark.sql.functions as f
from pyspark.sql.functions import pandas_udf, PandasUDFType
from networkx.algorithms.centrality import (
eigenvector_centrality,
harmonic_centrality,
)
def eigencentrality(
sparkdf,
src="src",
dst="dst",
cluster_id_colname="cluster_id",
):
"""
Args:
sparkdf: imput edgelist Spark DataFrame
src: src column name
dst: dst column name
distance_colname: distance column name
cluster_id_colname: Graphframes-created connected components created cluster_id
Returns:
node_id:
eigen_centrality: eigenvector centrality of cluster cluster_id
cluster_id: cluster_id corresponding to the node_id
Eigenvector Centrality is an algorithm that measures the transitive influence or connectivity of nodes.
Eigenvector Centrality was proposed by Phillip Bonacich, in his 1986 paper Power and Centrality:
A Family of Measures.
It was the first of the centrality measures that considered the transitive importance of a node in a graph,
rather than only considering its direct importance.
Relationships to high-scoring nodes contribute more to the score of a node than connections to low-scoring nodes.
A high score means that a node is connected to other nodes that have high scores.
example input spark dataframe
|src|dst|weight|cluster_id|distance|
|---|---|------|----------|--------|
| f| d| 0.67| 0| 0.329|
| f| g| 0.34| 0| 0.659|
| b| c| 0.56|8589934592| 0.439|
| g| h| 0.99| 0| 0.010|
| a| b| 0.4|8589934592| 0.6|
| h| i| 0.5| 0| 0.5|
| h| j| 0.8| 0| 0.199|
| d| e| 0.84| 0| 0.160|
| e| f| 0.65| 0| 0.35|
example output spark dataframe
|node_id| eigen_centrality|cluster_id|
|-------|-------------------|----------|
| b | 0.707106690085642|8589934592|
| c | 0.5000000644180599|8589934592|
| a | 0.5000000644180599|8589934592|
| f | 0.5746147732828122| 0|
| d | 0.4584903903420785| 0|
| g |0.37778352393858183| 0|
| h |0.27663243805676946| 0|
| i |0.12277029263709134| 0|
| j |0.12277029263709134| 0|
| e | 0.4584903903420785| 0|
"""
ecschema = StructType(
[
StructField("node_id", StringType()),
StructField("eigen_centrality", DoubleType()),
StructField(cluster_id_colname, LongType()),
]
)
psrc = src
pdst = dst
@pandas_udf(ecschema, PandasUDFType.GROUPED_MAP)
def eigenc(pdf: pd.DataFrame) -> pd.DataFrame:
nxGraph = nx.Graph()
nxGraph = nx.from_pandas_edgelist(pdf, psrc, pdst)
ec = eigenvector_centrality(nxGraph, tol=1e-03)
out_df = (
pd.DataFrame.from_dict(ec, orient="index", columns=["eigen_centrality"])
.reset_index()
.rename(
columns={"index": "node_id", "eigen_centrality": "eigen_centrality"}
)
)
cluster_id = pdf[cluster_id_colname][0]
out_df[cluster_id_colname] = cluster_id
return out_df
out = sparkdf.groupby(cluster_id_colname).apply(eigenc)
return out
def harmoniccentrality(sparkdf, src="src", dst="dst", cluster_id_colname="cluster_id"):
"""
Args:
sparkdf: imput edgelist Spark DataFrame
src: src column name
dst: dst column name
distance_colname: distance column name
cluster_id_colname: Graphframes-created connected components created cluster_id
Returns:
node_id:
harmonic_centrality: Harmonic centrality of cluster cluster_id
cluster_id: cluster_id corresponding to the node_id
Harmonic centrality (also known as valued centrality) is a variant of closeness centrality, that was invented
to solve the problem the original formula had when dealing with unconnected graphs.
Harmonic centrality was proposed by Marchiori and Latora while trying to come up with a sensible notion of "average shortest path".
They suggested a different way of calculating the average distance to that used in the Closeness Centrality algorithm.
Rather than summing the distances of a node to all other nodes, the harmonic centrality algorithm sums the inverse of those distances.
This enables it deal with infinite values.
input spark dataframe:
|src|dst|weight|cluster_id|distance|
|---|---|------|----------|--------|
| f| d| 0.67| 0| 0.329|
| f| g| 0.34| 0| 0.659|
| b| c| 0.56|8589934592| 0.439|
| g| h| 0.99| 0| 0.010|
| a| b| 0.4|8589934592| 0.6|
| h| i| 0.5| 0| 0.5|
| h| j| 0.8| 0| 0.199|
| d| e| 0.84| 0| 0.160|
| e| f| 0.65| 0| 0.35|
output spark dataframe:
|node_id|harmonic_centrality|cluster_id|
|-------|-------------------|----------|
| b | 2.0|8589934592|
| c | 1.5|8589934592|
| a | 1.5|8589934592|
| f | 4.166666666666667| 0|
| d | 3.3333333333333335| 0|
| g | 4.0| 0|
| h | 4.166666666666667| 0|
| i | 2.8333333333333335| 0|
| j | 2.8333333333333335| 0|
| e | 3.3333333333333335| 0|
"""
hcschema = StructType(
[
StructField("node_id", StringType()),
StructField("harmonic_centrality", DoubleType()),
StructField(cluster_id_colname, LongType()),
]
)
psrc = src
pdst = dst
@pandas_udf(hcschema, PandasUDFType.GROUPED_MAP)
def harmc(pdf: pd.DataFrame) -> pd.DataFrame:
nxGraph = nx.Graph()
nxGraph = nx.from_pandas_edgelist(pdf, psrc, pdst)
hc = harmonic_centrality(nxGraph)
out_df = (
pd.DataFrame.from_dict(hc, orient="index", columns=["harmonic_centrality"])
.reset_index()
.rename(
columns={
"index": "node_id",
"harmonic_centrality": "harmonic_centrality",
}
)
)
cluster_id = pdf[cluster_id_colname][0]
out_df[cluster_id_colname] = cluster_id
return out_df
out = sparkdf.groupby(cluster_id_colname).apply(harmc)
return out
| moj-analytical-services/splink_graph | splink_graph/node_metrics.py | node_metrics.py | py | 6,877 | python | en | code | 6 | github-code | 6 | [
{
"api_name": "pyspark.sql.types.StructType",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.types.StructField",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.types.StringType",
"line_number": 86,
"usage_type": "call"
},
... |
74750230586 | import os
import pathlib
import shutil
from datetime import datetime
from pathlib import Path
from my_logger_object import create_logger_object
def copy_component(component_kb_list, component_name, source_folder, target_folder):
# source_folder = r"C:\CodeRepos\GetOfficeKBs\Folder_Office2016_KBs\x64_msp"
# target_folder = r"C:\CodeRepos\GetOfficeKBs\Folder_Latest_KB_Numbers\x64_msp"
# component_name = ""
if not os.path.exists(target_folder):
os.makedirs(target_folder)
for root, dirs, files in os.walk(source_folder):
for file_name in files:
component_name_in_file = file_name.split("-")[0].strip()
if component_name == component_name_in_file:
soure_file_path = root + os.sep + file_name
target_file_path = target_folder + os.sep + file_name
kb_number_in_file = file_name.split("_")[1].strip()
if (component_name + "," + kb_number_in_file) not in component_kb_list:
component_kb_list.append(component_name + "," + kb_number_in_file)
if os.path.isfile(soure_file_path):
try:
shutil.copy(soure_file_path, target_file_path)
except:
logger.debug("exception")
current_script_folder = str(pathlib.Path(__file__).parent.absolute()) + os.sep
FILENAME = current_script_folder + "log_" + os.path.basename(__file__) + ".log"
logger = create_logger_object(FILENAME)
logger.info("The script starts running.")
logger.info("The script folder is " + current_script_folder)
component_list = []
try:
f = open(current_script_folder + "output_msp_file_name_for_specified_kb.txt", "r")
for line in f:
component_str = line.split(",")[-1].strip()
if component_str in component_list:
logger.info("Duplicate component number: " + component_str)
else:
component_list.append(component_str)
except Exception as ex:
logger.info("Encounter exception when loading expected kb list." + str(ex))
finally:
f.close()
logger.info(len(component_list))
component_list.sort()
component_list_file = current_script_folder + "output_non_dup_component.txt"
with open(component_list_file, "w") as f:
for item in component_list:
f.write("%s\n" % item)
time_now = formatted_date_time = datetime.now().strftime("%Y%m%d%H%M%S")
source_folder_x32 = r"C:\CodeRepos\GetOfficeKBs\Folder_Office2016_KBs\x86_msp"
target_folder_x32 = (
"C:\CodeRepos\GetOfficeKBs\Folder_Latest_KB_Numbers\\" + time_now + "_x86_msp"
)
source_folder_x64 = r"C:\CodeRepos\GetOfficeKBs\Folder_Office2016_KBs\x64_msp"
target_folder_x64 = (
"C:\CodeRepos\GetOfficeKBs\Folder_Latest_KB_Numbers\\" + time_now + "_x64_msp"
)
component_kb_list = []
for item in component_list:
logger.debug(item)
copy_component(component_kb_list, item, source_folder_x32, target_folder_x32)
copy_component(component_kb_list, item, source_folder_x64, target_folder_x64)
component_kb_list.sort()
component_kb_list_file = current_script_folder + "output_latest_kb_for_component.txt"
with open(component_kb_list_file, "w") as f:
for item in component_kb_list:
f.write("%s\n" % item)
logger.info("Please check output file: " + component_kb_list_file)
logger.info(f"Please check output folder: {target_folder_x32}")
logger.info(f"Please check output folder: {target_folder_x64}")
logger.info("The script ends.")
| FullStackEngN/GetOfficeKBs | get_msp_file_for_specified_msp_list.py | get_msp_file_for_specified_msp_list.py | py | 3,495 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "os.path.exists",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "os.makedirs",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "os.walk",
"line_number": ... |
23303525367 | import pandas as pd
from sklearn.cluster import KMeans
from sklearn.metrics import silhouette_score
import matplotlib.pyplot as plt
def kmeans():
data = \
pd.read_csv(
'2019-04-28xm_fish.csv',
names=['房源名称', '租赁种类', '房源类型', '房源户型', '房源面积', '房源楼层', '房源朝向', '装修等级', '房源地址', '行政区划', '房源租金', '所在小区', '房源描述', '更新时间'],
keep_default_na=False,
index_col=False
)
invalid_list = data.loc[data['房源面积'] == 0]
data = data.drop(index=invalid_list.index)
invalid_list2 = data.loc[data['房源租金'] > 20000]
data = data.drop(index=invalid_list2.index)
data1 = data.iloc[:, [4, 10]]
km = KMeans(n_clusters=2, max_iter=500)
cluster_result = km.fit(data1)
# print(cluster_result.inertia_)
y_pred = cluster_result.labels_
predict = km.predict(data1)
color = ['red', 'green', 'blue', 'black', 'orange']
predict = [color[i] for i in predict]
plt.scatter(data1['房源面积'], data1['房源租金'], c=predict)
silhouette = silhouette_score(data1, y_pred)
print(silhouette)
plt.show()
# # 尝试归纳户型与租金的关系
# data2 = data.iloc[:, [3, 10]]
# km_ = KMeans(n_clusters=2, max_iter=500)
# cluster_result_ = km_.fit(data2)
# # print(cluster_result.inertia_)
# y_pred_ = cluster_result.labels_
# predict_ = km.predict(data2)
#
# predict_ = [color[i] for i in predict_]
#
# plt.scatter(data2['房源面积'], data2['房源租金'], c=predict_)
# silhouette = silhouette_score(data2, y_pred_)
# print(silhouette)
# plt.show()
if __name__ == '__main__':
kmeans()
# kmeans对初始值的稳定性较差
# input_file = 'a.csv'
# output_file = 'out.csv'
#
# k = 3
# iteration = 500
# data = pd.read_csv(input_file, index_col='Id')
# data_zs = 1.0 * (data - data.mean()) / data.std()
#
# model = KMeans(n_clusters=k, n_jobs=2, max_iter=iteration)
# model.fit(data_zs)
#
# r1 = pd.Series(model.labels_).value_counts()
# r2 = pd.DataFrame(model.cluster_centers_)
# r = pd.concat([r2, r1], axis=1)
# r.columns = list(data.columns) + [u'类别数目']
# print(r)
#
# r = pd.concat([data, pd.Series(model.labels_, index=data.index)], axis=1)
# r.columns = list(data.columns) + [u'聚类类别']
# r.to_csv(output_file)
| Joy1897/Spider_58 | kmeans.py | kmeans.py | py | 2,423 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "pandas.read_csv",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "sklearn.cluster.KMeans",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.scatter",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "matpl... |
41543430774 | import re
import sys
from .ply import lex
from .ply.lex import TOKEN
class CLexer(object):
""" A lexer for the C- language. After building it, set the
input text with input(), and call token() to get new
tokens.
The public attribute filename can be set to an initial
filaneme, but the lexer will update it upon #line
directives.
"""
def __init__(self, error_func, on_lbrace_func, on_rbrace_func,
type_lookup_func):
""" Create a new Lexer.
error_func:
An error function. Will be called with an error
message, line and column as arguments, in case of
an error during lexing.
on_lbrace_func, on_rbrace_func:
Called when an LBRACE or RBRACE is encountered
(likely to push/pop type_lookup_func's scope)
type_lookup_func:
A type lookup function. Given a string, it must
return True IFF this string is a name of a type
that was defined with a typedef earlier.
"""
self.error_func = error_func
self.on_lbrace_func = on_lbrace_func
self.on_rbrace_func = on_rbrace_func
self.type_lookup_func = type_lookup_func
self.filename = ''
# Keeps track of the last token returned from self.token()
self.last_token = None
# Allow either "# line" or "# <num>" to support GCC's
# cpp output
#
self.line_pattern = re.compile(r'([ \t]*line\W)|([ \t]*\d+)')
self.pragma_pattern = re.compile(r'[ \t]*pragma\W')
def build(self, **kwargs):
""" Builds the lexer from the specification. Must be
called after the lexer object is created.
This method exists separately, because the PLY
manual warns against calling lex.lex inside
__init__
"""
self.lexer = lex.lex(object=self, **kwargs)
def reset_lineno(self):
""" Resets the internal line number counter of the lexer.
"""
self.lexer.lineno = 1
def input(self, text):
self.lexer.input(text)
def token(self):
self.last_token = self.lexer.token()
return self.last_token
def find_tok_column(self, token):
""" Find the column of the token in its line.
"""
last_cr = self.lexer.lexdata.rfind('\n', 0, token.lexpos)
return token.lexpos - last_cr
def _error(self, msg, token):
location = self._make_tok_location(token)
self.error_func(msg, location[0], location[1])
self.lexer.skip(1)
def _make_tok_location(self, token):
return (token.lineno, self.find_tok_column(token))
##
## Reserved keywords
##
keywords = (
'BOOL', 'BREAK', 'ELSE', 'FALSE', 'FOR', 'IF', 'INT',
'READ', 'RETURN', 'STRING', 'TRUE', 'VOID', 'WHILE', 'WRITE'
)
keyword_map = {}
for keyword in keywords:
if keyword == '_BOOL':
keyword_map['_Bool'] = keyword
elif keyword == '_COMPLEX':
keyword_map['_Complex'] = keyword
else:
keyword_map[keyword.lower()] = keyword
##
## All the tokens recognized by the lexer
##
tokens = keywords + (
# Identifiers
'ID',
# Type identifiers (identifiers previously defined as
# types with typedef)
'TYPEID',
# String literals
'STRING_LITERAL',
'WSTRING_LITERAL',
# Operators
'PLUS', 'MINUS', 'TIMES', 'DIVIDE', 'MOD',
'AND', 'OR', 'NOT', 'LSHIFT', 'RSHIFT',
# Relations
'EQ', 'NE', 'LT', 'LE', 'GT', 'GE',
# Assignment
'EQUALS', 'TIMESEQUAL', 'DIVEQUAL', 'MODEQUAL',
'PLUSEQUAL', 'MINUSEQUAL',
'LSHIFTEQUAL','RSHIFTEQUAL',
'ANDEQUAL', 'XOREQUAL', 'OREQUAL',
# Increment/decrement
'PLUSPLUS', 'MINUSMINUS',
# Conditional operator (?)
'CONDOP',
# Delimeters
'LPAREN', 'RPAREN', # ( )
'LBRACKET', 'RBRACKET', # [ ]
'LBRACE', 'RBRACE', # { }
'COMMA', 'PERIOD', # . ,
'SEMI', 'COLON', # ; :
# Ellipsis (...)
'ELLIPSIS',
# pre-processor
'PPHASH', # '#'
'PPPRAGMA', # 'pragma'
'PPPRAGMASTR',
) | ricoms/mips | compiladorCminus/pycminus/c_lexer.py | c_lexer.py | py | 4,426 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "re.compile",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "ply.lex.lex",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "ply.lex",
"line_number": 53,
... |
24200680597 | from collections import Counter
class Solution:
def func(self, strings, K):
"""
Args:
strings: list[str]
K: int
"""
counter = Counter(strings)
counter_list = [(key, counter[key]) for key in counter]
# 频数大, 字母序小 -> 频数小, 字母序大
counter_list.sort(key=lambda x: [-x[1], x])
for i in range(K):
print(counter_list[i][0], counter_list[i][1])
counter_list.sort(key=lambda x: [x[1], x])
for i in range(K):
print(counter_list[i][0], counter_list[i][1])
if __name__ == "__main__":
N, K = list(map(int, input().split()))
strings = []
for _ in range(N):
strings.append(input())
Solution().func(strings, K) | AiZhanghan/Leetcode | 秋招/腾讯/3.py | 3.py | py | 787 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "collections.Counter",
"line_number": 11,
"usage_type": "call"
}
] |
71567683388 | import streamlit as st
import pandas as pd
@st.cache
def load_data():
data = pd.read_csv('data.csv', sep=';', encoding='latin1')
return data
data = load_data()
selected_country = st.selectbox("Select a Country", data['Country'])
col1, col2 = st.columns(2)
with col1:
coal_percent = st.slider("Coal %", 0.0, 100.0, 0.0, key="coal_slider")
gas_percent = st.slider("Gas %", 0.0, 100.0, 0.0, key="gas_slider")
oil_percent = st.slider("Oil %", 0.0, 100.0, 0.0, key="oil_slider")
hydro_percent = st.slider("Hydro %", 0.0, 100.0, 0.0, key="hydro_slider")
renewable_percent = st.slider("Renewable %", 0.0, 100.0, 0.0, key="renewable_slider")
nuclear_percent = st.slider("Nuclear %", 0.0, 100.0, 0.0, key="nuclear_slider")
with col2:
coal_percent_manual = st.number_input("Coal % (Manual Input)", 0.0, 100.0, 0.0, format="%.2f", key="coal_manual")
gas_percent_manual = st.number_input("Gas % (Manual Input)", 0.0, 100.0, 0.0, format="%.2f", key="gas_manual")
oil_percent_manual = st.number_input("Oil % (Manual Input)", 0.0, 100.0, 0.0, format="%.2f", key="oil_manual")
hydro_percent_manual = st.number_input("Hydro % (Manual Input)", 0.0, 100.0, 0.0, format="%.2f", key="hydro_manual")
renewable_percent_manual = st.number_input("Renewable % (Manual Input)", 0.0, 100.0, 0.0, format="%.2f", key="renewable_manual")
nuclear_percent_manual = st.number_input("Nuclear % (Manual Input)", 0.0, 100.0, 0.0, format="%.2f", key="nuclear_manual")
coal_percent_total = coal_percent_manual if coal_percent_manual else coal_percent
gas_percent_total = gas_percent_manual if gas_percent_manual else gas_percent
oil_percent_total = oil_percent_manual if oil_percent_manual else oil_percent
hydro_percent_total = hydro_percent_manual if hydro_percent_manual else hydro_percent
renewable_percent_total = renewable_percent_manual if renewable_percent_manual else renewable_percent
nuclear_percent_total = nuclear_percent_manual if nuclear_percent_manual else nuclear_percent
Overall_Emission = (coal_percent_total + gas_percent_total + oil_percent_total +
hydro_percent_total + renewable_percent_total + nuclear_percent_total)
coal_CO2 = data[data['Country'] == selected_country]["Coal"].values[0]
gas_CO2 = data[data['Country'] == selected_country]["Gas"].values[0]
oil_CO2 = data[data['Country'] == selected_country]["Oil"].values[0]
hydro_CO2 = data[data['Country'] == selected_country]["Hydro"].values[0]
renewable_CO2 = data[data['Country'] == selected_country]["Renewable"].values[0]
nuclear_CO2 = data[data['Country'] == selected_country]["Nuclear"].values[0]
kgCO2_result = ((coal_percent_total * coal_CO2 + gas_percent_total * gas_CO2 + oil_percent_total * oil_CO2 +
hydro_percent_total * hydro_CO2 + renewable_percent_total * renewable_CO2 +
nuclear_percent_total * nuclear_CO2) / 100000)
st.markdown("<div class='result-section'>", unsafe_allow_html=True)
st.write("Overall Emission %:", Overall_Emission)
st.write("CO2 Emissions (tons):", round(kgCO2_result, 2), "tons of CO2")
st.markdown("</div>", unsafe_allow_html=True)
| sneha-4-22/Energy-Calculator | app.py | app.py | py | 3,135 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "pandas.read_csv",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "streamlit.cache",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "streamlit.selectbox",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "streamlit.colu... |
23202639490 | import struct
import socket
import sys
import ipaddress
import threading
import os
class client:
"""
Responsible for keeping track of the clients information
"""
def __init__(self, ip_address, ll_address):
"""
Initialises all variables needed
Constructor: __init___(self, ip_address, ll_address)
"""
self.ip_address = ip_address
self.ip_no_mask = ip_address.split("/")[0]
self.ll_address = ll_address
self.gateway = None
self.arpTable = {} #dictionary
self.MTU = 1500
self.id_counter = 0
def get_idCounter(self):
"""
get_idCounter(None) -> (Int)
Returns the current packet counter
"""
return self.id_counter
def set_idCounter(self, value):
"""
set_idCounter(value)
sets the packet id counter
"""
self.id_counter = value
def get_ip(self):
"""
get_ip(None) -> (string)
Gets the ip address without CIDR suffix
"""
return self.ip_no_mask
def get_MTU(self):
"""
get_MTU(None) -> (Int)
Returns the Maximum Transmission Unit
"""
return self.MTU
def set_MTU(self, value):
"""
set_MTU(None)
Sets the Maximum Transmission Unit for the network
"""
self.MTU = value
def get_llAddr(self):
"""
get_llAddr(None) -> (Int)
"""
return self.ll_address
#adds to the arp table
def addToArpTable(self, ip_address, ll_address):
"""
addToArpTable(ip_address, linklayer_address)
Adds to ARP Table
"""
self.arpTable[ip_address] = ll_address
def viewArpTable(self):
"""
viewArpTable(None)
Prints all entries within ARP table
"""
for key, value in self.arpTable.items():
print("Key: ", key, " Value: ", value)
def setGateway(self, ipaddress):
"""
setGateway(ipaddress)
Sets the Gateway IP Address
"""
self.gateway = ipaddress
def getGateway(self):
"""
getGateway(None) -> (String)
Returns the Gateway IP address : None if not set
"""
return self.gateway
def hasGateway(self):
"""
hasGateway(None) -> (Boolean)
Checks to see if Gateway has been set
Returns True if set else False
"""
if self.gateway == None:
return False
else:
return True
def hasMapping(self, ipaddr):
"""
hasMapping(ipaddr) -> (Boolean)
Checks to see if an IP address has a mapping to a Link Layer Address
Returns True if set else False
"""
if ipaddr in self.arpTable:
if self.arpTable.get(ipaddr) != None:
return True
return False
def get_link_layer_addr(self, ipaddress):
"""
get_link_layer_addr(ipaddress) -> (Int)
Returns Link layer address mapped to an IP address
"""
return self.arpTable.get(ipaddress)
def hasArpEntry(self, ipaddress):
"""
hasArpEntry(ipaddress) -> (Boolean)
Checks to see if an IP address has a mapping to a Link Layer Address
Returns True if set else False
Prints to console if 'No Arp entry found' if ARP table doesnt have a mapping
"""
if self.arpTable.get(ipaddress) != None:
return True
else:
print("No ARP entry found")
return False
def get_subnetId(self, CIDR_ipaddress):
"""
get_subnetId(CIDR_ipaddress) -> (IPv4Interface)
Returns Subnet ID
"""
return ipaddress.ip_interface(CIDR_ipaddress)
def same_subnet(self, other_ip_address):
"""
same_subnet(other_ip_address) -> (Boolean)
Compares two IP addresses to see if they are within the same subnet
"""
return ipaddress.IPv4Address(other_ip_address) >= ipaddress.ip_network(self.ip_address,strict=False).network_address and \
ipaddress.IPv4Address(other_ip_address) <= ipaddress.ip_network(self.ip_address,strict=False).broadcast_address
class IPv4_packet:
"""
Responsible for dealing with the packet creation when sending packets
to other clients
"""
def __init__(self, length, fid, flags, offset, src_ip, dst_ip, payload):
"""
Initialises all header information
Constructor: ___init___(self, length, fid, flags, offset, src_ip, dst_ip, payload)
"""
self.version = 0b0100
self.header_length = 0b0101
self.type_of_service = 0b00000000
self.total_length = length
self.identifier = fid
self.flags = flags
self.fragment_offset = offset
self.time_to_live = 0b00100000
self.protocol = 0b00000000
self.header_checksum = int(format(0b00, '016b'))
self.src_address = src_ip
self.dest_address = dst_ip
self.payload = payload.encode()
self.version_hLength_tos = ((self.version << 4) + self.header_length) << 8 + self.type_of_service
self.flags_fragoffset = (self.flags << 13) + self.fragment_offset
self.ttl_prot = ((self.time_to_live << 8) + self.protocol)
self.ip_header = struct.pack('! 6H', self.version_hLength_tos, self.total_length, self.identifier,\
self.flags_fragoffset, self.ttl_prot, self.header_checksum)
#print(type(self.ip_header), " - ", type(self.src_address)," - ", type(self.dest_address)," - ", type(self.payload))
self.packet = self.ip_header+self.src_address + self.dest_address + self.payload
def getPacket(self):
"""
getPacket(None) -> (Packet)
Returns the packet object
"""
return self.packet
def __bytes__(self):
"""
__bytes__(None) -> (Bytes)
Returns a bytes representation of the packet object
"""
return self.packet
def return_args(string):
"""
return_args(string) -> <List>
separates the arguments and returns them as a list
"""
args = string.split(' ',maxsplit=2)
if len(args) == 3:
if args[0]=="msg":
return (args[0].strip(),args[1].strip(),args[2],None) #msg ip data
elif args[0] == "arp" and args[1] == "set":
ip, port = args[2].split(" ")
return(args[0].strip(),args[1].strip(), ip.strip(), port.strip())
else:
return (args[0].strip(),args[1].strip(), args[2].strip(),None)
elif len(args) == 2:
return (args[0].strip(" "),args[1].strip(" "),None,None)
return (None,None,None,None)
def main():
"""
Main Function
"""
arp = client(str(sys.argv[1]),str(sys.argv[2]))
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.settimeout(2)
port = int(arp.get_llAddr())
s.bind(('LOCALHOST',port))
global terminate
terminate = False;
thr = threading.Thread(target=receive_data, args=(s,))
thr.start()
while True:
#sys.stdout.flush()
arg1 = arg2 = arg3 = arg4 = "-1"
sys.stdout.write("> ")
command = input()
str(command)
arg1,arg2,arg3,arg4 = return_args(command)
if str(command) == "gw set " + str(arg3):
arp.setGateway(str(arg3))
elif str(command) == "gw get":
gway = arp.getGateway()
if gway == None:
print("None")
else:
print(gway)
elif str(command) == "arp set "+str(arg3)+" "+str(arg4):
arp.addToArpTable(str(arg3), int(arg4))
elif str(command) == "arp get "+ str(arg3):
ll_add = arp.get_link_layer_addr(str(arg3))
if ll_add != None:
print(ll_add)
else:
print("None")
elif str(command) == 'msg '+ str(arg2) +' '+str(arg3):
#see if ip is in same gateway
dstn_ip = str(arg2)
dstn_port = -1
message = str(arg3)
if arp.same_subnet(dstn_ip):
if arp.hasMapping(dstn_ip):
dstn_port = arp.get_link_layer_addr(dstn_ip)
send_msg(s,arp,dstn_ip,dstn_port,message[1:-1])
else:
print("No ARP entry found")
else:
#send to gateway
#Check if gateway is set
if arp.hasGateway():
dstn_port = arp.get_link_layer_addr(arp.getGateway())
send_msg(s,arp,dstn_ip,dstn_port,message[1:-1])
else:
print("No gateway found")
elif str(command) == "mtu set "+ str(arg3):
arp.set_MTU(int(arg3))
elif str(command) == "mtu get":
print(arp.get_MTU())
elif str(command) == "exit":
terminate = True
break
sys.stdout.flush()
#send message
def send_msg(s,arp_details, dest_ip,dest_port, msg):
"""
send_msg(socket, arp_details, dest_ip, dest_port, msg)
Responsible for sending a packet to another client
"""
source_ip = socket.inet_aton(arp_details.get_ip())
destination_ip = socket.inet_aton(dest_ip)
payload_size = arp_details.get_MTU() - 20 #MTU - IP Header
if len(msg) <= payload_size:
t = IPv4_packet(len(msg) + 20, arp_details.get_idCounter(), 0, 0, source_ip, destination_ip, msg)
ipv4_packet = bytes(t)
s.sendto(ipv4_packet,('LOCALHOST',dest_port))
else:
payload, payload_size = payloads_creator(arp_details, msg)
offsets = calc_frag_offsets(payload_size, len(msg))
for i in range(len(payload)): #amount of offsets
if i != len(payload) - 1:
#length, fid, flags, offset, src_ip, dst_ip, payload
packet = IPv4_packet(len(payload[i]) + 20, arp_details.get_idCounter(), 0b001, offsets[i], source_ip, destination_ip, payload[i])
bytes_packet = bytes(packet)
s.sendto(bytes_packet,('LOCALHOST',dest_port))
#print("i != offsets length: ", i)
else:
#print("i == offsets length: ", i)
packet = IPv4_packet(len(payload[i]) + 20, arp_details.get_idCounter(), 0b000, offsets[i], source_ip, destination_ip, payload[i])
bytes_packet = bytes(packet)
s.sendto(bytes_packet,('LOCALHOST',dest_port))
arp_details.set_idCounter(arp_details.get_idCounter() + 1)
return
def payloads_creator(arp_details, message):
"""
payloads_creator(arp_details, message)
Handles the creation of the payloads in respect to the
Maximum Transmission Unit of the clients network
"""
payloads = []
count = 0
mtu = arp_details.get_MTU()
payload_size = int((mtu - 20)/8) * 8 #divisible by 8
#print("payload size: ",payload_size)
#print(len(message))
while count <= len(message):
payloads.append(message[count:count + payload_size])
count = count + payload_size
#print(len(payloads))
#print("payloads length: ",len(payloads))
#print(payloads)
return payloads, payload_size
def calc_frag_offsets(max_payload_size, msg_size):
"""
calc_frag_offests(max_payload_size, msg_size) -> <List>
Creates a list of packet offsets for packet fragmentation
"""
#returns a list of offsets
offsets = []
if (msg_size) % (max_payload_size) == 0: # -20 because its only the data
offset_amount = (msg_size / max_payload_size)
for i in range(int(offset_amount - 1)):
offset = (i*(max_payload_size)/8)
offsets.append(int(offset))
else:
offset_amount = round((msg_size / (max_payload_size)+1))
for i in range(offset_amount):
offset = (i*(max_payload_size)/8)
offsets.append(int(offset))
return offsets
def receive_data(s):
"""
receive_data(s)
Responsible for handling the receiving of data received
from other clients
"""
#print(threading.current_thread().name)
packets = {}
while True:
try:
data, addr = s.recvfrom(1500)
packets, evaluate_flag = add_packet_to_dict(data, packets)
if evaluate_flag == 1:
evaluate_packets(packets)
packets = {}
except OSError as e:
if terminate == True:
break
def add_packet_to_dict(data, packets_dict):
"""
add_packet_to_dict(data, packets_dict) -> (Dict, Int)
Creates a dictionary with all packets / packet fragments
received
"""
eval_flag = 0
pLength, pid, flags_offset, protocol, source_ip = struct.unpack('! 2x 3H x B 2x 4s 4x ', data[:20])
offset = flags_offset & 0x1FFF
flags = flags_offset >> 13
protocol = format(int(protocol), '#04x')
source_ip = socket.inet_ntoa(bytes(source_ip))
key = source_ip+" " +str(pid)
if key in packets_dict:
packets_dict[key].append(data)
if flags == 0:
eval_flag = 1
else:
packets_dict[key] = [data]
if flags == 0:
eval_flag = 1
return packets_dict, eval_flag
def evaluate_packets(p_dict):
"""
evaluate_packets(p_dict)
evaluates the packets within the dictionary
and outputs the correct message depending on
protocol
"""
for key, value in p_dict.items(): #loop through dict items
source_ip = -1
protocol = -1
msg_list =[]
msg = ""
for v in value: # loop through each value at key
pLength, pid, flags_offset, protocol, source_ip = struct.unpack('! 2x 3H x B 2x 4s 4x ', v[:20])
offset = flags_offset & 0x1FFF
flags = flags_offset >> 13
source_ip = socket.inet_ntoa(bytes(source_ip))
msg = v[20:].decode()
protocol = format(int(protocol), '#04x')
msg_list.append(msg)
msg = msg.join(msg_list)
if protocol == "0x00":
print('\b\bMessage received from {}: "{}"'.format(source_ip, msg))
else:
print("\b\bMessage received from {} with protocol {}".format(source_ip, protocol))
print("> ", end='', flush=True)
return
if __name__ == '__main__':
main() | TSampey/COMS3200-Assign3 | assign3.py | assign3.py | py | 12,355 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "ipaddress.ip_interface",
"line_number": 151,
"usage_type": "call"
},
{
"api_name": "ipaddress.IPv4Address",
"line_number": 159,
"usage_type": "call"
},
{
"api_name": "ipaddress.ip_network",
"line_number": 159,
"usage_type": "call"
},
{
"api_name": "... |
71353706429 | # GMM implementation
# good resource http://www.rmki.kfki.hu/~banmi/elte/bishop_em.pdf
import numpy as np
from scipy import stats
import seaborn as sns
from random import shuffle, uniform
sns.set_style("white")
#Generate some data from 2 different distributions
x1 = np.linspace(start=-10, stop=10, num=1000)
x2 = np.linspace(start=5, stop=10, num=800)
y1 = stats.norm.pdf(x1, loc=3, scale=1.5)
y2 = stats.norm.pdf(x2, loc=0, scale=3)
#Put data in dataframe for better handling
x = list(x1)
x.extend(list(x2))
shuffle(x)
K = 2 #number of assumed distributions within the dataset
epsilon = 0.001 #tolerance change for log-likelihood
max_iter = 100
#gaussian pdf function
def G(datum, mu, sigma):
y = (1 / (np.sqrt((2 * np.pi) * sigma * sigma)) * np.exp(datum-mu)*(datum-mu)/(2*sigma*sigma))
return y
#compute log-likelihood
def L(X, N, mu, sigma, pi):
L = 0
for i in range(N):
Gk = 0
for k in range(K):
Gk += pi[k] * G(X[i], mu[k], sigma[k])
L += Gk
print(L)
return np.log(L)
def estimate_gmm(X, K, epsilon, max_iter):
N = len(X)
# assign random mean and variance to each distribution
mu, sigma = [uniform(0, 10) for _ in range(K)], [uniform(0, 10) for _ in range(K)]
# assign random probability to each distribution
pi = [uniform(0, 10) for _ in range(K)]
mu = [2, 0]
sigma = [1, 1]
current_loglike = np.inf
for _ in range(max_iter):
previous_loglike = current_loglike
#E step
mixture_affiliation_all_k = {}
for i in range(N):
parts = [pi[k] * G(X[i], mu[k], sigma[k]) for k in range(K)]
total = sum(parts)
for k in range(K):
mixture_affiliation_all_k[(i, k)] = parts[k] / total
#M step
mixture_affiliation_for_k = [sum(mixture_affiliation_all_k[(i, k)] for i in range(N)) for k in range(K)]
for k in range(K):
pi[k] = mixture_affiliation_for_k[k] / N
mu[k] = sum([mixture_affiliation_all_k[(i, k)] * X[i] for i in range(N)]) / mixture_affiliation_for_k[k]
sigma[k] = sum([mixture_affiliation_all_k[(i, k)] * (X[i] - mu[k]) ** 2 for i in range(N)]) / mixture_affiliation_for_k[k]
current_loglike = L(X, N, mu, sigma, pi)
if abs(previous_loglike - current_loglike) < epsilon:
print("break")
break
return mu, sigma, pi
print(estimate_gmm(x, K, epsilon, max_iter)) | cristian904/GMMs | GMM.py | GMM.py | py | 2,456 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "seaborn.set_style",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "numpy.linspace",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "numpy.linspace",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "scipy.stats.norm.pdf",... |
71781170429 | import cv2
# read your picture and store into variable "img"
img = cv2.imread('picture.jpg')
# scale image down 3 times
for i in range(3):
img = cv2.pyrDown(img)
# save scaled image
cv2.imwrite(f'picture_scaled_{i}.jpg', img) | yptheangel/opencv-starter-pack | python/basic/image_pyramid.py | image_pyramid.py | py | 240 | python | en | code | 8 | github-code | 6 | [
{
"api_name": "cv2.imread",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "cv2.pyrDown",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "cv2.imwrite",
"line_number": 11,
"usage_type": "call"
}
] |
38231691013 | from django.shortcuts import render, get_object_or_404
from .models import Post, Group
def index(request):
posts = Post.objects.order_by('-pub_date')[:10]
title = 'Это главная страница проекта Yatube'
context = {
'posts': posts,
'title': title,
}
return render(request, 'posts/index.html', context)
def group_posts(request, slug):
group = get_object_or_404(Group, slug=slug)
posts = Post.objects.filter(group=group).order_by('-pub_date')[:10]
title = 'Лев Толстой – зеркало русской революции.'
context = {
'group': group,
'posts': posts,
'title': title,
}
return render(request, 'posts/group_list.html', context)
| NikitaKorovykovskiy/Yatube_project | yatube/posts/views.py | views.py | py | 762 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "models.Post.objects.order_by",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "models.Post.objects",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "models.Post",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "django... |
5479668707 | import argparse
import os, numpy as np
import os.path as osp
from multiprocessing import Process
import h5py
import json
os.environ["D4RL_SUPPRESS_IMPORT_ERROR"] = "1"
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["MKL_NUM_THREADS"] = "1"
os.environ["NUMEXPR_NUM_THREADS"] = "1"
os.environ["OMP_NUM_THREADS"] = "1"
from maniskill2_learn.env import make_gym_env, ReplayMemory, import_env
from maniskill2_learn.utils.data import DictArray, GDict, f64_to_f32
from maniskill2_learn.utils.file import merge_h5_trajectory
from maniskill2_learn.utils.meta import get_total_memory, flush_print
from maniskill2_learn.utils.math import split_num
# from maniskill2_learn.utils.data import compress_f64
def auto_fix_wrong_name(traj):
if isinstance(traj, GDict):
traj = traj.memory
for key in traj:
if key in ["action", "reward", "done", "env_level", "next_env_level", "next_env_state", "env_state"]:
traj[key + "s"] = traj[key]
del traj[key]
return traj
tmp_folder_in_docker = "/tmp"
def render(env):
viewer = env.render()
def convert_state_representation(keys, args, worker_id, main_process_id):
input_dict = {
"env_name": args.env_name,
"unwrapped": False,
"obs_mode": args.obs_mode,
"obs_frame": args.obs_frame,
"reward_mode": args.reward_mode,
"control_mode": args.control_mode,
"n_points": args.n_points,
"n_goal_points": args.n_goal_points,
"camera_cfgs": {},
"render_mode": 'human',
}
if args.enable_seg:
input_dict["camera_cfgs"]["add_segmentation"] = True
with open(args.json_name, "r") as f:
json_file = json.load(f)
env_kwargs = json_file["env_info"]["env_kwargs"]
for k in input_dict:
env_kwargs.pop(k, None)
# update the environment creation args with the extra info from the json file, e.g., cabinet id & target link in OpenCabinetDrawer / Door
input_dict.update(env_kwargs)
env = make_gym_env(**input_dict)
assert hasattr(env, "get_obs"), f"env {env} does not contain get_obs"
reset_kwargs = {}
for d in json_file["episodes"]:
episode_id = d["episode_id"]
r_kwargs = d["reset_kwargs"]
reset_kwargs[episode_id] = r_kwargs
cnt = 0
output_file = osp.join(tmp_folder_in_docker, f"{worker_id}.h5")
output_h5 = h5py.File(output_file, "w")
input_h5 = h5py.File(args.traj_name, "r")
for j, key in enumerate(keys):
cur_episode_num = eval(key.split('_')[-1])
trajectory = GDict.from_hdf5(input_h5[key])
trajectory = auto_fix_wrong_name(trajectory)
print("Reset kwargs for the current trajectory:", reset_kwargs[cur_episode_num])
env.reset(**reset_kwargs[cur_episode_num])
all_env_states_present = ('env_states' in trajectory.keys())
if all_env_states_present:
length = trajectory['env_states'].shape[0] - 1
else:
assert 'env_init_state' in trajectory.keys()
length = trajectory['actions'].shape[0]
assert length == trajectory['actions'].shape[0] == trajectory['success'].shape[0]
replay = ReplayMemory(length)
next_obs = None
for i in range(length):
if all_env_states_present:
if next_obs is None:
env_state = trajectory["env_states"][i]
env.set_state(env_state)
obs = env.get_obs()
else:
obs = next_obs
_, reward, _, _, _ = env.step(trajectory["actions"][i])
# ^ We cannot directly get rewards when setting env_state.
# Instead, reward is only accurate after env.step(); otherwise e.g. grasp criterion will be inaccurate due to zero impulse
next_env_state = trajectory["env_states"][i + 1]
env.set_state(next_env_state)
next_obs = env.get_obs()
else:
if i == 0:
env.set_state(trajectory["env_init_state"])
if next_obs is None:
obs = env.get_obs()
else:
obs = next_obs
next_obs, reward, _, _, _ = env.step(trajectory["actions"][i])
item_i = {
"obs": obs,
"actions": trajectory["actions"][i],
"dones": trajectory["success"][i],
"episode_dones": False if i < length - 1 else True,
"rewards": reward,
}
if args.with_next:
item_i["next_obs"] = next_obs
item_i = GDict(item_i).f64_to_f32()
replay.push(item_i)
if args.render:
if args.debug:
print("reward", reward)
render(env)
if worker_id == 0:
flush_print(f"Convert Trajectory: completed {cnt + 1} / {len(keys)}; this trajectory has length {length}")
group = output_h5.create_group(f"traj_{cnt}")
cnt += 1
replay.to_hdf5(group, with_traj_index=False)
output_h5.close()
input_h5.close()
flush_print(f"Finish using {output_file}")
def parse_args():
parser = argparse.ArgumentParser(description="Generate visual observations of trajectories given environment states.")
# Configurations
parser.add_argument("--num-procs", default=1, type=int, help="Number of parallel processes to run")
parser.add_argument("--env-name", required=True, help="Environment name, e.g. PickCube-v0")
parser.add_argument("--traj-name", required=True, help="Input trajectory path, e.g. pickcube_pd_joint_delta_pos.h5")
parser.add_argument("--json-name", required=True, type=str,
help="""
Input json path, e.g. pickcube_pd_joint_delta_pos.json |
**Json file that contains reset_kwargs is required for properly rendering demonstrations.
This is because for environments using more than one assets, asset is different upon each environment reset,
and asset info is only contained in the json file, not in the trajectory file.
For environments that use a single asset with randomized dimensions, the seed info controls the specific dimension
used in a certain trajectory, and this info is only contained in the json file.**
""")
parser.add_argument("--output-name", required=True, help="Output trajectory path, e.g. pickcube_pd_joint_delta_pos_pcd.h5")
parser.add_argument("--max-num-traj", default=-1, type=int, help="Maximum number of trajectories to convert from input file")
parser.add_argument("--obs-mode", default="pointcloud", type=str, help="Observation mode")
parser.add_argument("--control-mode", default="pd_joint_delta_pos", type=str, help="Environment control Mode")
parser.add_argument("--reward-mode", default="dense", type=str, choices=["dense", "sparse"], help="Reward Mode (dense / sparse)")
parser.add_argument("--with-next", default=False, action="store_true", help="Add next_obs into the output file (for e.g. SAC+GAIL training)")
parser.add_argument("--render", default=False, action="store_true", help="Render the environment while generating demonstrations")
parser.add_argument("--debug", default=False, action="store_true", help="Debug print")
parser.add_argument("--force", default=False, action="store_true", help="Force-regenerate the output trajectory file")
# Extra observation args
parser.add_argument("--enable-seg", action='store_true', help="Enable ground truth segmentation")
# Specific point cloud generation args
parser.add_argument("--n-points", default=1200, type=int,
help="If obs_mode == 'pointcloud', the number of points to downsample from the original point cloud")
parser.add_argument("--n-goal-points", default=-1, type=int,
help="If obs_mode == 'pointcloud' and 'goal_pos' is returned from environment observations (in obs['extra']), \
then randomly sample this number of points near the goal to the returned point cloud. These points serve as helpful visual cue. -1 = disable")
parser.add_argument("--obs-frame", default="base", type=str, choices=["base", "world", "ee", "obj"],
help="If obs_mode == 'pointcloud', the observation frame (base/world/ee/obj) to transform the point cloud.")
args = parser.parse_args()
args.traj_name = osp.abspath(args.traj_name)
args.output_name = osp.abspath(args.output_name)
print(f"Obs mode: {args.obs_mode}; Control mode: {args.control_mode}")
if args.obs_mode == 'pointcloud':
print(f"Obs frame: {args.obs_frame}; n_points: {args.n_points}; n_goal_points: {args.n_goal_points}")
return args
def main():
os.makedirs(osp.dirname(args.output_name), exist_ok=True)
if osp.exists(args.output_name) and not args.force:
print(f"Trajectory generation for {args.env_name} with output path {args.output_name} has been completed!!")
return
with h5py.File(args.traj_name, "r+") as h5_file:
keys = sorted(h5_file.keys())
# remove empty "obs" key from the input h5 file
for key in keys:
_ = h5_file[key].pop('obs', None)
if args.max_num_traj < 0:
args.max_num_traj = len(keys)
args.max_num_traj = min(len(keys), args.max_num_traj)
args.num_procs = min(args.num_procs, args.max_num_traj)
keys = keys[: args.max_num_traj]
extra_args = ()
if args.num_procs > 1:
running_steps = split_num(len(keys), args.num_procs)[1]
flush_print(f"Num of trajs = {len(keys)}", f"Num of process = {args.num_procs}")
processes = []
from copy import deepcopy
for i, x in enumerate(running_steps):
p = Process(target=convert_state_representation, args=(
deepcopy(keys[:x]), args, i, os.getpid(), *extra_args))
keys = keys[x:]
processes.append(p)
p.start()
for p in processes:
p.join()
else:
running_steps = [len(keys)]
convert_state_representation(keys, args, 0, os.getpid(), *extra_args)
files = []
for worker_id in range(len(running_steps)):
tmp_h5 = osp.join(tmp_folder_in_docker, f"{worker_id}.h5")
files.append(tmp_h5)
from shutil import rmtree
rmtree(args.output_name, ignore_errors=True)
merge_h5_trajectory(files, args.output_name)
for file in files:
rmtree(file, ignore_errors=True)
print(f"Finish merging files to {args.output_name}")
if __name__ == "__main__":
args = parse_args()
main()
| haosulab/ManiSkill2-Learn | tools/convert_state.py | convert_state.py | py | 10,700 | python | en | code | 53 | github-code | 6 | [
{
"api_name": "os.environ",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "os.environ",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "os.environ",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "os.environ",
"line_... |
6117949220 | from google.cloud import bigquery
import os
import sys
import json
import argparse
import gzip
import configparser
import pandas as pd
def main():
# Load args
args = parse_args()
In_config=args.in_config
Input_study=args.in_study
Configs = configparser.ConfigParser()
Configs.read(In_config)
client = bigquery.Client()
## LOAD Job: load from GCS to BQ (table_id)
# Would it be possible to make this table temporary? Or delete itself automatically after 1 week?
Input_sumstats_path=Configs.get("config", "Input_sumstats_GCS")
Input_study_URI=Input_sumstats_path+"/"+Input_study+".parquet/*.parquet"
temp_BQ_sumstats=Configs.get("config", "Temp_BQ_sumstats")
table_id = temp_BQ_sumstats+"."+Input_study
print(table_id)
load_job_config = bigquery.LoadJobConfig(source_format=bigquery.SourceFormat.PARQUET,)
load_job = client.load_table_from_uri(
Input_study_URI, table_id, job_config=load_job_config
) # Make an API request.
load_job.result() # Waits for the job to complete.
destination_table = client.get_table(table_id) # Make an API request.
print("Loaded {} rows.".format(destination_table.num_rows))
# Query Job
table_id = Configs.get("config", "Temp_BQ_sumstats")+"."+Input_study
rsID_table = Configs.get("config", "RSID_BQ_sumstats")+"."+Input_study
query_job_config = bigquery.QueryJobConfig(destination=rsID_table)
query = """
WITH SNP_info AS (
SELECT
CONCAT(CAST(chrom AS string), CAST(pos AS string), CAST(ref AS string), CAST(alt AS string)) AS identifier,
ref,
alt,
n_total,
pval,
eaf,
beta
FROM
`{0}` )
SELECT
rs_id AS RSID, ref AS A1, alt AS A2, n_total AS N, pval AS P, eaf AS EAF, beta AS BETA
FROM
SNP_info
JOIN (
SELECT
CONCAT(CAST(chr_id AS string), CAST(position AS string), CAST(ref_allele AS string), CAST(alt_allele AS string)) AS identifier,
rs_id
FROM
`open-targets-genetics.210608.variants` ) variants
USING(identifier)
""".format(table_id)
query_job = client.query(query, job_config=query_job_config)
query_job.result()
# Extract Job
rsID_GCS_bucket=Configs.get("config", "Formatted_sumstats_GCS")
rsID_GCS_URI=rsID_GCS_bucket+"/{0}.txt.gz".format(Input_study)
extract_job_config = bigquery.ExtractJobConfig()
extract_job_config.field_delimiter = '\t'
extract_job_config.compression='GZIP'
extract_job = client.extract_table(
rsID_table,
rsID_GCS_URI,
# Location must match that of the source table.
location="EU",
job_config=extract_job_config
) # API request
extract_job.result() # Waits for job to complete.
print(
"Exported {} to {}".format(rsID_table, rsID_GCS_URI)
)
def parse_args():
''' Load command line args
'''
parser = argparse.ArgumentParser()
parser.add_argument('--in_config', metavar="<str>", type=str, required=True)
parser.add_argument('--in_study', metavar="<str>", type=str, required=True, help=("Study ID of input sumstats"))
args = parser.parse_args()
return args
if __name__ == '__main__':
main() | xyg123/SNP_enrich_preprocess | scripts/LDSC_format_single_sumstat.py | LDSC_format_single_sumstat.py | py | 3,343 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "configparser.ConfigParser",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "google.cloud.bigquery.Client",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "google.cloud.bigquery",
"line_number": 20,
"usage_type": "name"
},
{
"api_... |
8257233523 | # Use the environment variables DIANA_BROKER and DIANA_RESULT to attach the celery
# app to a message queue.
import os
from celery import Celery
app = Celery('diana')
app.conf.update(
result_expires = 3600,
task_serializer = "pickle",
accept_content = ["pickle"],
result_serializer = "pickle",
task_default_queue = 'default',
task_routes={'*.gpu': {'queue': 'gpu'}, # Only GPU boxes
'*.file': {'queue': 'file'} }, # Access to shared fs
include=['diana.star.tasks'],
broker_url=os.environ.get('DIANA_BROKER', "redis://localhost:6379/1"),
result_backend=os.environ.get('DIANA_RESULT', "redis://localhost:6379/2"),
timezone = 'America/New_York'
)
print(os.environ.get('DIANA_BROKER', "redis://localhost:6379/1")) | derekmerck/DIANA | packages/diana/diana/star/app.py | app.py | py | 776 | python | en | code | 11 | github-code | 6 | [
{
"api_name": "celery.Celery",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "os.environ.get",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "os.environ.get",
"lin... |
3337645854 | import time
from pyspark import SparkContext,SparkConf
#-----------------------------------------------
#spark map reduce练习
def mymap(line):
return len(line)
#在spark中这样对数字进行叠加是不可行对 由于闭包机制,每一份机器上都单独有一份所引用都对象 应该使用saprk提供都累加器
nums_all=0
def test_foreach(nums):
global nums_all
nums_all+=nums
print(nums_all)
if __name__ == '__main__':
conf = SparkConf().setAppName('test').setMaster('local')
sc = SparkContext(conf=conf)
text_rdd=sc.textFile('./data/*.txt')
map_rdd=text_rdd.map(mymap)
#count=map_rdd.foreach(test_foreach)
# new_text_rdd=text_rdd.flatMap(lambda x:(x,'hahaha','xxxx'))
new_rdd=text_rdd.map(lambda line:line.split('\t'))
print(new_rdd.first())
time.sleep(10000)
# for i in map_rdd.take(5):
# print(i)
#rdd2 = sc.textFile('./data/sequence_file', ) # 读取一个目录下的文件 已文件名、内容的形式返回
#print(rdd2.first().encode('utf-8').decode())
| zml1996/learn_record | learn_spark/test_spark2.py | test_spark2.py | py | 1,066 | python | fa | code | 2 | github-code | 6 | [
{
"api_name": "pyspark.SparkConf",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "pyspark.SparkContext",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 30,
"usage_type": "call"
}
] |
25145650810 | import pytest
import datetime
import pytz
from mixer.backend.django import mixer
from telegram_message_api.helpers import (
ParsedData, ParseText, CategoryData,
)
@pytest.mark.parametrize(
'text', [
'150 test',
'150 test 150',
'150',
]
)
def test_parsetext_dataclass(text):
"""Testing a ParseText parse text method"""
result = ParseText(text)()
if result:
assert result.amount == '150'
assert result.expense == 'test'
else:
assert result == None
def test_categorydata_dataclass(db):
"""Testing a CategoryData"""
category = mixer.blend('core.category')
result = CategoryData(
expense_text='150 test',
category=category
)()
tz = pytz.timezone("Europe/Moscow")
now = datetime.datetime.now(tz).strftime("%Y-%m-%d %H:%M:%S")
assert result == {
'amount': '150',
'created': now,
'category': category,
'expense_text': '150 test',
} | enamsaraev/telegram_bot_api | telegram_message_api/tests/test_helpers.py | test_helpers.py | py | 1,071 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "telegram_message_api.helpers.ParseText",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "pytest.mark.parametrize",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "pytest.mark",
"line_number": 12,
"usage_type": "attribute"
},
{
"a... |
3982882771 | from time import time
import sys, argparse, heightfield, os, povray_writer, load_info, read_lidar, cameraUtils, calculate_tile
#/media/pablo/280F8D1D0A5B8545/TFG_files/cliente_local/
#/media/pablo/280F8D1D0A5B8545/TFG_files/strummerTFIU.github.io/
def tiles_to_render(c1, c2, zoom):
"""
Return the tiles needed to render the scene and the limit coordinates.
Normal test
>>> tiles_to_render((700000, 4600000), (702000, 4602000), 8)
((130, 122), (131, 123), (699452.3984375, 4600406.1953125), (704062.296875, 4595796.296875))
Over limit test
>>> tiles_to_render((700000, 4600000), (2702000, 4602000), 8)
('null', 'null', 'null', 'null')
"""
# Calculate tiles
tile1_x, tile1_y = calculate_tile.calculate_tile(c1[0], c1[1], zoom)
tile2_x, tile2_y = calculate_tile.calculate_tile(c2[0], c2[1], zoom)
if tile1_x == 'null' or tile1_y == 'null' or tile2_x == 'null' or tile2_y == 'null':
return ('null', 'null', 'null', 'null')
w_tiles = tile2_x - tile1_x + 1
h_tiles = tile2_y - tile1_y + 1
if w_tiles != h_tiles:
tile_max = max(w_tiles, h_tiles)
w_tiles = tile_max
h_tiles = tile_max
tile2_x = tile1_x + w_tiles - 1
tile2_y = tile1_y + h_tiles - 1
# Calculate new coordinates
c_nw = calculate_tile.calculate_coordinates(tile1_x, tile1_y, zoom)
c_se = calculate_tile.calculate_coordinates(tile2_x + 1, tile2_y + 1, zoom)
if c_nw == 'null' or c_se == 'null':
return('null', 'null', 'null', 'null')
return ((tile1_x, tile1_y), (tile2_x, tile2_y), c_nw, c_se)
def dir_view_tile(tile, dir_view, zoom):
"""
Transform north tile number to specified POV tile number.
>>> dir_view_tile((222, 111), 'E', 9)
(111, 289)
"""
if dir_view == 'S':
return calculate_tile.tile_to_south(tile, zoom)
elif dir_view == 'E':
return calculate_tile.tile_to_east(tile, zoom)
elif dir_view == 'W':
return calculate_tile.tile_to_west(tile, zoom)
else:
return tile
def render(tile1, tile2, c1, c2, dir_view, angle, result, lidar):
"""
Generate the POV-Ray file which represents the scene passed as parameters.
"""
# Apply a offset
off_c1_0 = 0
off_c1_1 = 0
off_c2_0 = 0
off_c2_1 = 0
if dir_view == 'N':
off_c1_1 = 500
off_c2_1 = -2500
elif dir_view == 'S':
off_c1_1 = 2500
off_c2_1 = -500
elif dir_view == 'E':
off_c1_0 = -2500
off_c2_0 = 500
else:
off_c1_0 = -500
off_c2_0 = 2500
# Find mdts and ortophotos and write heighfields info
mdt_list = load_info.find_mdt(c1[0] + off_c1_0, c1[1] + off_c1_1, c2[0] + off_c2_0, c2[1] + off_c2_1)
if len(mdt_list) == 0:
return ('null', 'null', 'null')
orto_list = load_info.find_orto(c1[0] + off_c1_0, c1[1] + off_c1_1, c2[0] + off_c2_0, c2[1] + off_c2_1, mdt_list)
areas_list = load_info.find_a_interest(c1[0], c1[1], c2[0], c2[1])
lidar_list = load_info.find_lidar(areas_list, c1, c2)
if len(orto_list) <= 10:
if lidar == True:
spheres = read_lidar.generate_spheres(lidar_list, areas_list, c1, c2)
else:
spheres = ""
# Create camera, heighfields and spheres
cam = cameraUtils.calculate_camera(c1, c2, angle, dir_view)
heightfields = povray_writer.write_heightfields(mdt_list, orto_list) # Generate a string which contain the heightfields to pov file.
# Generate povray file
tile_size_x = 256
tile_size_y = int(256 / cam.get_aspectRatio() + 0.5)
povray_writer.write_povray_file(cam, heightfields, spheres)
w_tiles = tile2[0] - tile1[0] + 1
h_tiles = tile2[1] - tile1[1] + 1
w = tile_size_x * w_tiles
h = tile_size_y * h_tiles
# Rendering using new povray file
print("Rendering " + result)
os.system('povray +Irender.pov +O' + result + ' -D +A -GA +W' + str(w) + ' +H' + str(h) + '> /dev/null 2>&1')
return (tile_size_x, tile_size_y, w_tiles)
else:
print("Error: The zone to render must be smaller (orto_list > 10). Try with other coordinates.")
def tessellation(result, tile1, tile_size_x, tile_size_y, w_tiles, zoom, dir_view, angle, dist_tile):
"""
Create tiles for a few zooms and give them a number.
"""
if dist_tile[-1] != "/":
dist_tile += "/"
print("Creating tiles from [" + str(tile1[0]) + ", " + str(tile1[1]) + "]...")
os.system("mkdir " + dist_tile + angle + '> /dev/null 2>&1')
os.system("mkdir " + dist_tile + angle + "/" + dir_view + '> /dev/null 2>&1')
os.system("mkdir " + dist_tile + angle + "/" + dir_view + "/" + str(zoom) + '> /dev/null 2>&1')
os.system("convert " + result + " -crop " + str(tile_size_x) + "x" + str(tile_size_y) + " -set filename:tile \"%[fx:page.x/"
+ str(tile_size_x) + "+" + str(tile1[0]) + "]_%[fx:page.y/" + str(tile_size_y) + "+" + str(tile1[1]) + "]\" +adjoin \""
+ dist_tile + angle + "/" + dir_view + "/" + str(zoom) + "/map_%[filename:tile].png\"")
count = int(zoom) - 8
aux_zoom = int(zoom) - 1
aux1_x = int(tile1[0] / 2)
aux1_y = int(tile1[1] / 2)
while(count > 0):
# -1 zoom lvl
w_tiles = w_tiles / 2
w = tile_size_x * w_tiles
h = tile_size_y * w_tiles
os.system("mkdir " + dist_tile + angle + "/" + dir_view + "/" + str(aux_zoom) + '> /dev/null 2>&1')
os.system("convert " + result + " -resize " + str(w) + "x" + str(h) + " " + result)
os.system("convert " + result + " -crop " + str(tile_size_x) + "x" + str(tile_size_y) + " -set filename:tile \"%[fx:page.x/"
+ str(tile_size_x) + "+" + str(aux1_x) + "]_%[fx:page.y/" + str(tile_size_y) + "+" + str(aux1_y) + "]\" +adjoin \""
+ dist_tile + angle + "/" + dir_view + "/" + str(aux_zoom) + "/map_%[filename:tile].png\"")
count -= 1
aux_zoom -= 1
aux1_x = aux1_x / 2
aux1_y = aux1_y / 2
os.system("rm " + result)
def main():
# Arguments
parser = argparse.ArgumentParser(description="First version of Pablo's TFG.")
parser.add_argument("mdt_directory", help="Directory of the MDT files to transform.")
parser.add_argument("png_directory", help="PNG files transformed destination directory.")
parser.add_argument("orto_directory", help="Ortophotos files directory.")
parser.add_argument("lidar_directory", help="Directory of LAZ files.")
parser.add_argument("dir_view", help="Direction of the view (only N, S, E or W).")
parser.add_argument("angle", help="Angle of the view (only 45 or 30).")
parser.add_argument("zoom", help="Zoom.")
parser.add_argument("--max_height", dest="max_height", type=int, default=2200, metavar="MAX_HEIGHT",
help="Max height transforming MDT files. Higher heights will be considered MAX_HEIGHT " +
"(default value = 2200)")
parser.add_argument("--renderAll", help="Render all available zones.", action="store_true")
parser.add_argument("--renderTiles", help="Render especified tiles.", action="store_true")
parser.add_argument("--transform", help="Transform all mdt in mdt_directory from .asc to .png.", action="store_true")
parser.add_argument("--load", help="Load info from mdts, pnoas and lidar files.", action="store_true")
parser.add_argument("--tile", help="Tessellation result/s.", action="store_true")
parser.add_argument("--deletePov", help="Delete povray file.", action="store_true")
parser.add_argument("--lidar", help="Activate LiDAR render.", action="store_true")
args = parser.parse_args()
if (args.angle == "30") or (args.angle == "45"):
if (args.dir_view == 'S') or (args.dir_view == 'N') or (args.dir_view == 'W') or (args.dir_view == 'E'):
t_exe_i = time()
if args.mdt_directory[-1] != "/":
args.mdt_directory += "/"
if args.png_directory[-1] != "/":
args.png_directory += "/"
if args.orto_directory[-1] != "/":
args.orto_directory += "/"
if args.lidar_directory[-1] != "/":
args.lidar_directory += "/"
# Transform to heightfield
if args.transform:
os.system('mkdir ' + args.png_directory)
for base, dirs, files in os.walk(args.mdt_directory):
for asc_file in files:
heightfield.transform_file_to_heightfield(args.mdt_directory + asc_file, args.png_directory
+ asc_file[:-4] + ".png", args.max_height)
# Load info data to file
if args.load:
load_info.load_info(args.png_directory, args.orto_directory, args.lidar_directory)
if args.tile:
dist_tile = input("Introduce tiles destination directory: ")
else:
os.system("mkdir result_dir")
dist_tile = "./result_dir/"
minX = 560000
maxX = 789000
minY = 4410000
maxY = 4745000
if args.renderTiles:
tile_init = input("Introduce tile number (x y) for upper left vertex: ").split()
tile_init = (int(tile_init[0]), int(tile_init[1]))
if tile_init[0] >= 0 and tile_init[0] <= (2 ** int(args.zoom) - 1) and tile_init[1] >= 0 or tile_init[1] <= (2 ** int(args.zoom) - 1):
tile_end = input("Introduce tile number (x,y) for bottom right vertex: ").split()
tile_end = (int(tile_end[0]), int(tile_end[1]))
if tile_end[0] >= 0 and tile_end[0] <= (2 ** int(args.zoom) - 1) and tile_end[1] >= 0 or tile_end[1] <= (2 ** int(args.zoom) - 1
and tile_end[0] >= tile_init[0] and tile_end[1] >= tile_init[1]):
result = "./result.png"
if args.dir_view == 'S':
tile_1 = calculate_tile.tile_from_south(tile_end, int(args.zoom))
tile_2 = calculate_tile.tile_from_south(tile_init, int(args.zoom))
elif args.dir_view == 'E':
tile_1_aux = calculate_tile.tile_from_east(tile_init, int(args.zoom))
tile_2_aux = calculate_tile.tile_from_east(tile_end, int(args.zoom))
tile_1 = (tile_2_aux[0], tile_1_aux[1])
tile_2 = (tile_1_aux[0], tile_2_aux[1])
elif args.dir_view == 'W':
tile_1_aux = calculate_tile.tile_from_west(tile_init, int(args.zoom))
tile_2_aux = calculate_tile.tile_from_west(tile_end, int(args.zoom))
tile_1 = (tile_1_aux[0], tile_2_aux[1])
tile_2 = (tile_2_aux[0], tile_1_aux[1])
else:
tile_1 = tile_init
tile_2 = tile_end
tile_1 = [x - 1 if x % 2 != 0 else x for x in tile_1]
tile_2 = [x - 1 if x % 2 == 0 else x for x in tile_2]
tile1_x = tile_1[0]
tile1_y = tile_1[1]
tile2_x = tile_2[0]
tile2_y = tile_2[1]
n_tiles = 2 ** (int(args.zoom) - 8)
print([tile1_x, tile1_y])
print([tile2_x, tile2_y])
while tile1_x % n_tiles != 0:
tile1_x -= 1
while tile1_y % n_tiles != 0:
tile1_y -= 1
while tile2_x % n_tiles == 0 and n_tiles != 1:
tile2_x -= 1
while tile2_x % n_tiles == 0 and n_tiles != 1:
tile2_x -= 1
print([tile1_x, tile1_y])
print([tile2_x, tile2_y])
x_number = 0
while(tile1_x + x_number <= tile2_x):
aux1_x = tile1_x + x_number
y_number = 0
while(tile1_y + y_number <= tile2_y):
aux1_y = tile1_y + y_number
c_nw = calculate_tile.calculate_coordinates(aux1_x, aux1_y, int(args.zoom))
c_se = calculate_tile.calculate_coordinates(aux1_x + n_tiles, aux1_y + n_tiles, int(args.zoom))
if c_nw == 'null' or c_se == 'null':
print("ERROR: Wrong tiles.")
else:
print("Rendering from tile [" + str(aux1_x) + ", " + str(aux1_y) + "] to [" + str(aux1_x + n_tiles - 1)
+ "," + str(aux1_y + n_tiles -1) + "] with coordinates from [" + str(c_nw[0]) + ", " + str(c_nw[1])
+ "] to [" + str(c_se[0]) + ", " + str(c_se[1]) + "].")
tile_size_x, tile_size_y, w_tiles = render((aux1_x, aux1_y), (aux1_x + n_tiles - 1, aux1_y + n_tiles - 1), c_nw, c_se, args.dir_view, args.angle, result, args.lidar)
if tile_size_x == 'null' and tile_size_y == 'null':
print("ERROR: Nothing to render. Continuing...")
else:
if args.dir_view == 'S':
tile_init = calculate_tile.tile_to_south((aux1_x + n_tiles - 1, aux1_y + n_tiles - 1), int(args.zoom))
elif args.dir_view == 'E':
tile1_aux = calculate_tile.tile_to_east((aux1_x, aux1_y), int(args.zoom))
tile2_aux = calculate_tile.tile_to_east((aux1_x + n_tiles - 1, aux1_y + n_tiles - 1), int(args.zoom))
tile_init = (tile1_aux[0], tile2_aux[1])
elif args.dir_view == 'W':
tile1_aux = calculate_tile.tile_to_west((aux1_x, aux1_y), int(args.zoom))
tile2_aux = calculate_tile.tile_to_west((aux1_x + n_tiles - 1, aux1_y + n_tiles - 1), int(args.zoom))
tile_init = (tile2_aux[0], tile1_aux[1])
else:
tile_init = (aux1_x, aux1_y)
tessellation(result, tile_init, tile_size_x, tile_size_y, w_tiles, args.zoom, args.dir_view, args.angle, dist_tile)
y_number += n_tiles
x_number += n_tiles
else:
print("ERROR: Introduce tiles correctly.")
else:
print("ERROR: Introduce tiles correctly.")
else:
if args.renderAll:
if int(args.zoom) > 7 and int(args.zoom) < 13:
iTile_z5_x = 9
iTile_z5_y = 8
fTile_z5_x = 26
fTile_z5_y = 25
tile1_x = iTile_z5_x * (2 ** (int(args.zoom) - 5))
tile1_y = iTile_z5_y * (2 ** (int(args.zoom) - 5))
tile2_x = fTile_z5_x * (2 ** (int(args.zoom) - 5))
tile2_y = fTile_z5_y * (2 ** (int(args.zoom) - 5))
#tile1_x = 672
result = "./result.png"
n_tiles = 2 ** (int(args.zoom) - 8)
x_number = 0
while(tile1_x + x_number <= tile2_x):
aux1_x = tile1_x + x_number
y_number = 0
while(tile1_y + y_number <= tile2_y):
aux1_y = tile1_y + y_number
c_nw = calculate_tile.calculate_coordinates(aux1_x, aux1_y, int(args.zoom))
c_se = calculate_tile.calculate_coordinates(aux1_x + n_tiles, aux1_y + n_tiles, int(args.zoom))
if c_nw == 'null' or c_se == 'null':
print("ERROR: Wrong tiles.")
else:
print("Rendering from tile [" + str(aux1_x) + ", " + str(aux1_y) + "] to [" + str(aux1_x + n_tiles - 1)
+ "," + str(aux1_y + n_tiles -1) + "] with coordinates from [" + str(c_nw[0]) + ", " + str(c_nw[1])
+ "] to [" + str(c_se[0]) + ", " + str(c_se[1]) + "].")
tile_size_x, tile_size_y, w_tiles = render((aux1_x, aux1_y), (aux1_x + n_tiles - 1, aux1_y + n_tiles - 1), c_nw, c_se, args.dir_view, args.angle, result, args.lidar)
if tile_size_x == 'null' and tile_size_y == 'null':
print("ERROR: Nothing to render. Continuing...")
else:
if args.dir_view == 'S':
tile_init = calculate_tile.tile_to_south((aux1_x + n_tiles - 1, aux1_y + n_tiles - 1), int(args.zoom))
elif args.dir_view == 'E':
tile1_aux = calculate_tile.tile_to_east((aux1_x, aux1_y), int(args.zoom))
tile2_aux = calculate_tile.tile_to_east((aux1_x + n_tiles - 1, aux1_y + n_tiles - 1), int(args.zoom))
tile_init = (tile1_aux[0], tile2_aux[1])
elif args.dir_view == 'W':
tile1_aux = calculate_tile.tile_to_west((aux1_x, aux1_y), int(args.zoom))
tile2_aux = calculate_tile.tile_to_west((aux1_x + n_tiles - 1, aux1_y + n_tiles - 1), int(args.zoom))
tile_init = (tile2_aux[0], tile1_aux[1])
else:
tile_init = (aux1_x, aux1_y)
tessellation(result, tile_init, tile_size_x, tile_size_y, w_tiles, args.zoom, args.dir_view, args.angle, dist_tile)
y_number += n_tiles
x_number += n_tiles
else:
print("ERROR: zoom for --renderAll option must be 7 < z < 13.")
else:
# Ask for coordinates
coordinates = input("Introduce UTM X and Y coordinates, separated by a blank space and respecting the values min "
+ "and max for the coordinates, for upper left vertex (" + str(minX) + " <= X1 <= " + str(maxX) + " " + str(minY)
+ " <= Y1 <= " + str(maxY) + "): ")
coordinates1 = coordinates.split()
if (len(coordinates1) == 2 and float(coordinates1[0]) >= minX and float(coordinates1[0]) <= maxX and
float(coordinates1[1]) >= minY and float(coordinates1[1]) <= maxY):
coordinates = input("Introduce UTM X and Y coordinates, separated by a blank space and respecting the values min "
+ "and max for the coordinates, for bottom right vertex (" + coordinates1[0] + " <= X2 <= " + str(maxX) + " " + str(minY)
+ " <= Y2 <= " + coordinates1[1] + "): ")
coordinates2 = coordinates.split()
if (len(coordinates2) == 2 and float(coordinates2[0]) >= minX and float(coordinates2[0]) <= maxX and
float(coordinates2[1]) >= minY and float(coordinates2[1]) <= maxY and coordinates1[0] < coordinates2[0]
and coordinates1[1] > coordinates2[1]):
# Offset to adjust later during join process
coordinates1[0] = float(coordinates1[0])
coordinates2[0] = float(coordinates2[0])
coordinates1[1] = float(coordinates1[1])
coordinates2[1] = float(coordinates2[1])
result = "./result.png"
tile1, tile2, c_nw, c_se = tiles_to_render(coordinates1, coordinates2, int(args.zoom))
if tile_1 == 'null':
print("ERROR: Introduce UTM coordinates correctly.")
else:
if args.dir_view == 'S':
tile_init = calculate_tile.tile_to_south(tile2, int(args.zoom))
elif args.dir_view == 'E':
tile1_aux = calculate_tile.tile_to_east(tile1, int(args.zoom))
tile2_aux = calculate_tile.tile_to_east(tile2, int(args.zoom))
tile_init = (tile1_aux[0], tile2_aux[1])
elif args.dir_view == 'W':
tile1_aux = calculate_tile.tile_to_west(tile1, int(args.zoom))
tile2_aux = calculate_tile.tile_to_west(tile2, int(args.zoom))
tile_init = (tile2_aux[0], tile1_aux[1])
else:
tile_init = tile1
tile_size_x, tile_size_y, w_tiles = render(tile1, tile2, c_nw, c_se, args.dir_view, args.angle, result, args.lidar)
tessellation(result, tile_init, tile_size_x, tile_size_y, w_tiles, args.zoom, args.dir_view, args.angle, dist_tile)
print("DONE!")
else:
print("ERROR: Introduce UTM coordinates correctly.")
else:
print("ERROR: Introduce UTM coordinates correctly.")
if args.deletePov:
os.system('rm render.pov')
t_exe_f = time()
t_exe = t_exe_f - t_exe_i
print("Execution time: " + str(int(t_exe / 60)) + "min " + str(int(t_exe % 60)) + "s.")
else:
print("ERROR: dir_view must be N, S, W or E.")
else:
print("ERROR: angle must be 45 or 30.")
if __name__ == "__main__":
main() | strummerTFIU/TFG-IsometricMaps | src/main_program.py | main_program.py | py | 18,332 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "calculate_tile.calculate_tile",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "calculate_tile.calculate_tile",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "calculate_tile.calculate_coordinates",
"line_number": 41,
"usage_type": "call... |
37056080424 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Simple univariate BLUP implementation for starting values estimation."""
import numpy as np
from scipy.optimize import minimize
def grad(sigmas: np.ndarray, y: np.ndarray, k: np.ndarray):
v = 1 / (sigmas[0] + sigmas[1] * k)
if np.any(v < 1e-12):
return [np.nan, np.nan]
yt = y * v
g = np.zeros(2)
g[0] = np.sum(yt ** 2) - np.sum(np.log(v))
g[1] = np.sum(yt * k * y) - np.sum(np.log(v ** 2 * k))
return g
def obj(sigmas: np.ndarray, y: np.ndarray, k: np.ndarray):
v = 1 / (sigmas[0] + sigmas[1] * k)
if np.any(v < 1e-8):
return np.nan
yt = y * v
return np.sum(yt * y) - np.sum(np.log(v))
def blup(y: np.ndarray, k: np.ndarray, p=0.8, maxiter=50):
"""
Calculate BLUP estimate for U of a single variable.
Parameters
----------
y : np.ndarray
Observations of a given variable.
k : np.ndarray
K matrix.
p : float, optional
Expected ratio of variable variance to random effect variance. Used for
starting values only. The default is 0.8.
maxiter : int, optional
Maximal number of iterations. Better not be too high or estimation
process could take noticable time in some cases. The default is 50.
Returns
-------
U
Random effects estimate (BLUP).
"""
v = np.var(y)
x0 = np.array([p * v, (1 - p) * v])
s = minimize(lambda x: obj(x, y, k), x0, jac=lambda x: grad(x, y, k),
method="SLSQP", options={'maxiter': maxiter},
bounds=([0, None], [0, None])
).x
v = 1 / (1 / s[0] + (1 / s[1]) * (1 / k))
return y * v / s[0], s
| planplus/pysem | pysem/univariate_blup.py | univariate_blup.py | py | 1,705 | python | en | code | 4 | github-code | 6 | [
{
"api_name": "numpy.ndarray",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "numpy.any",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "numpy.nan",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "numpy.zeros",
"line_nu... |
71666200828 | from django.contrib import admin
from .models import newdoc
class DocAdmin(admin.ModelAdmin):
fieldsets = [
(None, {"fields": ["title"]}),
("Date information", {"fields": ["created_time"]}),
(None, {"fields": ["modified_time"]}),
("Author information", {"fields": ["author"]}),
(None, {"fields": ["body"]})
]
list_filter = ["created_time"]
list_display = ('title', 'created_time', 'author')
search_fields = ["title"]
#class uploaded(admin.ModelAdmin):
# Register models here.
admin.site.register(newdoc, DocAdmin)
| JarvisDong/Project-CGD | mysite/documents/admin.py | admin.py | py | 652 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "django.contrib.admin.ModelAdmin",
"line_number": 4,
"usage_type": "attribute"
},
{
"api_name": "django.contrib.admin",
"line_number": 4,
"usage_type": "name"
},
{
"api_name": "django.contrib.admin.site.register",
"line_number": 20,
"usage_type": "call"
},... |
72060297789 | from flask import render_template, Flask, request, jsonify, url_for, redirect
import requests
from flask_pymongo import PyMongo
import json
from Model import *
import time
def after_request(response):
response.headers['Access-Control-Allow-Origin'] = '*'
response.headers['Access-Control-Allow-Methods'] = 'PUT,GET,POST,DELETE'
response.headers['Access-Control-Allow-Headers'] = 'Content-Type,Authorization'
return response
global Username
global token
token = ""
app = Flask(__name__)
app.after_request(after_request)
app.config['MONGO_URI'] = 'mongodb://comp9900:z12345@ds161529.mlab.com:61529/comp9900_2019'
mongo = PyMongo(app)
@app.route('/',methods=['GET', 'POST'])
def home_page():
return render_template("test.html"), 200
@app.route('/user',methods=['GET'])
def personalpage():
return render_template("Personalinfo.html"), 200
@app.route('/signout', methods=['POST'])
def signout():
global token
global Username
token = ''
Username = ''
return "ok"
@app.route('/login', methods=['GET'])
def login_check():
global token
if token == '':
return '0'
else:
return Username
@app.route('/login', methods=['POST'])
def login():
global token
global Username
global Password
Username = request.form["sign_in_account"]
Password = request.form["sign_in_password"]
url = "http://127.0.0.1:5000/anhao0522/client/v1/login?username={Username}&password={Password}".format(Username=Username,Password=Password)
response = requests.get(url, headers={"Accept": "application/json"})
data = response.json()
print(data)
print(Username)
if data['reply'] == "NU":
return "No_account"
elif data['reply'] == "NM":
return "Wrong_password"
else:
token = data['reply']
return "ok"
@app.route('/signup',methods=['GET', 'POST'])
def signup():
if request.method == 'POST':
Username = request.form["account"]
Password1 = request.form["password_1"]
dict1 = {"customer_id": Username, "password": Password1, "first_name": "", "last_name": "", "address": "",
"email": "",
"birthday": "", "credit": 0, "contact_number": "", "gender": "", "account_type": False,
"host_order": [], "trip_order": [],
"properties": [], "new_message": [], "message_box": []}
url = "http://127.0.0.1:5000/anhao0522/client/v1/signup"
response = requests.post(url, headers={"Accept": "application/json"}, json=dict1)
if response.status_code == 400:
return "Account name exist"
else:
return "ok"
else:
pass
@app.route('/event', methods=['POST'])
def get_event():
location = request.form["location"]
yelp = Yelp()
result = yelp.search_events(location)["events"]
#result = yelp.search_restaurant("gym","kingsford")["businesses"]
return jsonify(result)
@app.route('/order_delete', methods=['POST'])
def order_delete():
global Username
global token
order_id = request.form["order_id"]
request_type = request.form["request_type"]
if request_type == '0':
url = f"http://127.0.0.1:5000/anhao0522/client/v1/user/{Username}/order"
url +=f"?order_id={order_id}"
response = requests.delete(url, headers={"auth_token": token})
elif request_type == '1':
url = f"http://127.0.0.1:5000/anhao0522/client/v1/landlord/{Username}/order"
url += f"?order_id={order_id}&cancel_order=false"
response = requests.delete(url, headers={"auth_token": token})
elif request_type == '2':
url = f"http://127.0.0.1:5000/anhao0522/client/v1/landlord/{Username}/order"
url += f"?order_id={order_id}&cancel_order=true"
response = requests.delete(url, headers={"auth_token": token})
if response.status_code == 401:
print("401")
return "timeout"
elif response.status_code == 200:
return "ok"
else:
return "Something wrong"
@app.route('/new_message_read', methods=['POST'])
def new_message_read():
global Username
global token
delete_new = request.form["delete_new"]
url = "http://127.0.0.1:5000/anhao0522/client/v1/messageBox"
body = {"mid": f"{Username}", "time": "", "text": delete_new}
response = requests.post(url, headers={"auth_token": token}, json=body)
if response.status_code == 401:
print("401")
return "timeout"
elif response.status_code == 200:
return "ok"
else:
return "Something wrong"
@app.route('/new_message', methods=['POST'])
def new_message():
global Username
global token
url = "http://127.0.0.1:5000/anhao0522/client/v1/messageBox"
send_to = request.form["send_to"]
message = request.form["message"]
message_time = request.form["message_time"]
body = {"mid":f"{Username}---{send_to}","time":message_time,"text":message}
response = requests.post(url, headers={"auth_token": token}, json=body)
if response.status_code == 401:
print("401")
return "timeout"
elif response.status_code == 200:
return "ok"
else:
return "Something wrong"
@app.route('/new_comment', methods=['POST'])
def new_comment():
global Username
global token
comment_pid = request.form["comment_pid"]
comment_text = request.form["comment_text"]
rating_num = request.form["rating_num"]
comment_oid = request.form["comment_oid"]
time = request.form["time"]
url = f"http://127.0.0.1:5000/anhao0522/client/v1/accommodation/room/{comment_pid}/comment?order_id={comment_oid}"
body = {
"commenter": Username,
"avg_mark": rating_num,
"cleanliness_mark": 0,
"facility_mark": 0,
"attitude_mark": 0,
"text": comment_text,
"reply": "",
"photo": [],
"date": time
}
response = requests.post(url, headers={"auth_token": token}, json=body)
if response.status_code == 401:
print("401")
return "timeout"
elif response.status_code == 201:
return "ok"
else:
return "Something wrong"
@app.route('/message_del', methods=['GET'])
def message_del():
AB = request.args["AB"]
print(AB)
url = f"http://127.0.0.1:5000/anhao0522/client/v1/messageBox?AB={AB}"
response = requests.delete(url, headers={"auth_token": token})
if response.status_code == 401:
print("401")
return "wrong"
elif response.status_code == 200:
return "ok"
else:
return "Something wrong"
@app.route('/personalinfo', methods=['GET'])
def personalinfo():
global Username
global token
url = "http://127.0.0.1:5000/anhao0522/client/v1/user/"
sign_in_account = request.args["sign_in_account"]
url = url + sign_in_account
print(url)
print(sign_in_account)
response = requests.get(url, headers={"auth_token": token})
print(response.json())
return jsonify(response.json())
@app.route('/chatbot_msg', methods=['POST'])
def chatbot_msg():
global Username
global token
message = request.form["message"]
url = "http://127.0.0.1:5000/anhao0522/client/v1/chatbot?"
url+=f"q={message}"
response = requests.post(url, headers={"auth_token": token})
return jsonify(response.json())
@app.route('/s/<location>/all')
def show_list(location):
if request.method == 'POST':
destination = location
num_persons = request.args["numpeople"]
arrive_date = request.args["checkin"]
departure_date = request.args["checkout"]
if destination != None and num_persons != None and arrive_date != None and departure_date != None:
url = "http://127.0.0.1:5000/anhao0522/client/v1/accommodation/all?" \
"location={location}&checkin={checkin}&checkout={checkout}&numberofpeople={num}&searchtype={type}".format()
pass
@app.route('/<id>/property_post')
def picture(id):
global Username
global token
if id != Username:
return redirect(url_for('home_page'))
return render_template('NewProperty.html', id=id)
@app.route('/<id>/post_done',methods=['GET','POST'])
def post_property(id):
global Username
global token
if id != Username:
return redirect(url_for('home_page'))
if request.method == 'POST':
#print(request.form)
#print(request.values.get('Pet'))
tmp_dic = {}
tmp_dic.setdefault('property_type',request.values['property_type'])
tmp_dic.setdefault('property_bedroom', request.values['property_bedroom'])
tmp_dic.setdefault('property_bathroom', request.values['property_bathroom'])
tmp_dic.setdefault('property_parking', request.values['property_parking'])
tmp_dic.setdefault('property_wifi', request.values['WIFI'])
tmp_dic.setdefault('property_air', request.values['Air_condition'])
tmp_dic.setdefault('property_cook', request.values['Cooking'])
tmp_dic.setdefault('property_pet', request.values['Pet'])
property_location = request.form['property_location'].lower()
property_suburb = request.form['property_suburb'].lower()
property_address = request.form['property_address']
property_size = request.form['property_size']
property_price = request.form['property_price']
property_max_people = request.form['property_max_people']
property_start = request.form['start_date']
property_end = request.form['end_date']
property_title = request.form['property_title']
property_description = request.form['property_description']
for key in tmp_dic:
if tmp_dic[key] == "YES":
tmp_dic[key] = True
elif tmp_dic[key] == "NO":
tmp_dic[key] = False
else:
continue
#print(tmp_dic)
photo_id = []
if 'upload' in request.files:
for file in request.files.getlist("upload"):
#print("file ", file, type(file), file.filename)
mongo.save_file(file.filename, file)
num_photo = str(int(time.time()))
photo_id.append(num_photo)
mongo.db.test.insert_one({'id': num_photo, 'photo_name': file.filename})
#for i in range(len(request.files.getlist('upload'))):
# photo = request.files.getlist('upload')
# print(photo.filename)
#mongo.save_file(photo.filename, photo)
#num_photo = str(int(time.time()))
#mongo.db.test.insert_one({'id': num_photo, 'photo_name': photo.filename})
#id = 'Cindy'
lis_db = list(mongo.db.property_collection.find())
t_id = lis_db[-1]['property_id']
#print(t_id)
url = "https://maps.google.com/maps/api/geocode/json?key=AIzaSyAANyBQ6ikIoa53iMdahFL99Bjt0oBmWpc&address={address}&sensor=false".format(
address=property_address)
data = requests.request("GET", url)
ddic_1 = data.json()['results'][0]['geometry']['location']
lng = ddic_1['lng']
lat = ddic_1['lat']
ava_time = get_date_list(property_start,property_end)
ava_time_l = []
for i in ava_time:
ava_time_dic = {}
ava_time_dic.setdefault('time',i)
ava_time_dic.setdefault('status',True)
ava_time_l.append(ava_time_dic)
post_data_dic = {}
post_data_dic.setdefault('customer_id',id)
post_data_dic.setdefault('property_id',t_id+1)
post_data_dic.setdefault('address',property_address)
post_data_dic.setdefault('longitude',float(lng))
post_data_dic.setdefault('latitude',float(lat))
post_data_dic.setdefault('price', float(property_price))
post_data_dic.setdefault('type',tmp_dic['property_type'])
post_data_dic.setdefault('size',float(property_size))
post_data_dic.setdefault('wifi', tmp_dic['property_wifi'])
post_data_dic.setdefault('air-condition',tmp_dic['property_air'])
post_data_dic.setdefault('cooking', tmp_dic['property_cook'])
post_data_dic.setdefault('pet',tmp_dic['property_pet'])
post_data_dic.setdefault('bed_room',int(tmp_dic['property_bedroom']))
post_data_dic.setdefault('bath_room',int(tmp_dic['property_bathroom']))
post_data_dic.setdefault('parking',int(tmp_dic['property_parking']))
post_data_dic.setdefault('location',property_location)
post_data_dic.setdefault('suburb',property_suburb)
post_data_dic.setdefault('maxium_people',int(property_max_people))
post_data_dic.setdefault('about_the_place',property_description)
post_data_dic.setdefault('title',property_title)
post_data_dic.setdefault('rating',0.0)
post_data_dic.setdefault('comments',[])
post_data_dic.setdefault('p_photo',photo_id)
post_data_dic.setdefault('discount',0.0)
post_data_dic.setdefault('available_time',ava_time_l)
url1 = "http://127.0.0.1:5000/anhao0522/client/v1/landlord/{customer_id}/properties".format(customer_id=id)
#print(token)
response = requests.post(url1,json=post_data_dic,headers={"auth_token": token})
#print(response)
return redirect(url_for('home_page'))
@app.route('/location_center', methods=['POST'])
def get_center():
if request.method == 'POST':
location_str = request.form['location_list']
print(location_str)
location_list = location_str.split(":")
print(location_list)
location_list_2 = []
for e in location_list:
location_list_2.append([float(e.split("/")[0]), float(e.split("/")[1])])
print(location_list_2)
reslut = center_geolocation(location_list_2)
return jsonify({"result": reslut})
@app.route('/file/<file_id>')
def file(file_id):
data = mongo.db.test.find_one_or_404({'id': file_id})
filename = data['photo_name']
return mongo.send_file(filename)
if __name__ == '__main__':
app.run(port=5200, debug=True)
| xiechzh/Accomodation-Web-Portal | COMP9900_Proj/COMP9900_Proj.py | COMP9900_Proj.py | py | 13,953 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "flask.Flask",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "flask_pymongo.PyMongo",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "flask.render_... |
22095502736 | from django.urls import path
from user import views
urlpatterns = [
path('fun',views.fun),
path('fun1',views.fun1),
path('u',views.us, name='uuu'),
path('user',views.user, name='aaaa'),
] | anshifmhd/demo | user/urls.py | urls.py | py | 205 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "django.urls.path",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "user.views.fun",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "user.views",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"... |
10699368918 | # -*- coding:utf-8 -*-
import cv2
import os
from glob import glob
import numpy as np
import shutil
'''处理原图片得到人物脸部图片并按比例分配train和test用于训练模型'''
SRC = "Raw" # 待处理的文件路径
DST = "data2" # 处理后的文件路径
TRAIN_PER = 5 # train的图片比例
TEST_PER = 1 # test的图片比例
def rename_file(path, new_name="", start_num=0, file_type=""):
if not os.path.exists(path):
return
count = start_num
files = os.listdir(path)
for file in files:
old_path = os.path.join(path, file)
if os.path.isfile(old_path):
if file_type == "":
file_type = os.path.splitext(old_path)[1]
new_path = os.path.join(path, new_name + str(count) + file_type)
if not os.path.exists(new_path):
os.rename(old_path, new_path)
count = count + 1
# print("Renamed %d file(s)" % (count - start_num))
def get_faces(src, dst, cascade_file="lbpcascade_animeface.xml"):
if not os.path.isfile(cascade_file):
raise RuntimeError("%s: not found" % cascade_file)
# Create classifier
cascade = cv2.CascadeClassifier(cascade_file)
files = [y for x in os.walk(src) for y in glob(os.path.join(x[0], '*.*'))] # 妙啊,一句话得到一个文件夹中所有文件
for image_file in files:
image_file = image_file.replace('\\', '/') # 解决Windows下的文件路径问题
target_path = "/".join(image_file.strip("/").split('/')[1:-1])
target_path = os.path.join(dst, target_path) + "/"
if not os.path.exists(target_path):
os.makedirs(target_path)
count = len(os.listdir(target_path)) + 1
image = cv2.imdecode(np.fromfile(image_file, dtype=np.uint8), -1) # 解决中文路径读入图片问题
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
gray = cv2.equalizeHist(gray)
faces = cascade.detectMultiScale(gray,
# detector options
scaleFactor=1.05, # 指定每个图像缩放比例缩小图像大小的参数
minNeighbors=4, # 此参数将影响检测到的面孔。值越高,检测结果越少,但质量越高
minSize=(24, 24) # 最小对象大小。小于此值的对象将被忽略
)
for (x, y, w, h) in faces:
crop_img = image[y:y + h, x:x + w]
crop_img = cv2.resize(crop_img, (96, 96)) # 重置为96*96
# filename = os.path.basename(image_file).split('.')[0]
cv2.imencode('.jpg', crop_img)[1].tofile(os.path.join(target_path, str(count) + ".jpg"))
print("All images are cropped")
def divide_train_test(src, train_percentage=5, test_percentage=1):
if not os.path.exists(src):
print("folder %s is not exist" % src)
return
dirs = os.listdir(src)
test_dir = os.path.join(src, "test")
train_dir = os.path.join(src, "train")
if not os.path.exists(test_dir):
os.mkdir(test_dir)
if not os.path.exists(train_dir):
os.mkdir(train_dir)
for dir_name in dirs:
if dir_name != "test" and dir_name != "train":
current_dir = os.path.join(src, dir_name)
test_dir = os.path.join(src, "test", dir_name)
train_dir = os.path.join(src, "train", dir_name)
if not os.path.exists(test_dir):
os.mkdir(test_dir)
if not os.path.exists(train_dir):
os.mkdir(train_dir)
if os.path.isdir(current_dir):
images = os.listdir(current_dir)
image_num = len(images)
for image in images:
filename = os.path.basename(image).split('.')[0]
if filename.isdigit():
percentage = train_percentage + test_percentage
test_num = (image_num / percentage) * test_percentage + 1
if int(filename) <= test_num:
if not os.path.exists(os.path.join(test_dir, image)):
shutil.move(os.path.join(current_dir, image), os.path.join(test_dir))
else:
os.remove(os.path.join(current_dir, image))
else:
if not os.path.exists(os.path.join(train_dir, image)):
shutil.move(os.path.join(current_dir, image), os.path.join(train_dir))
else:
os.remove(os.path.join(current_dir, image))
shutil.rmtree(current_dir)
for dirs in os.listdir(src):
for name in os.listdir(os.path.join(src, dirs)):
if os.path.isdir(os.path.join(src, dirs, name)):
rename_file(os.path.join(src, dirs, name))
print("Set all cropped images to train and test")
def main():
get_faces(SRC, DST)
divide_train_test(src=DST, train_percentage=TRAIN_PER, test_percentage=TEST_PER)
if __name__ == '__main__':
main()
| mikufanliu/AnimeCharacterRecognition | get_faces.py | get_faces.py | py | 5,231 | python | en | code | 4 | github-code | 6 | [
{
"api_name": "os.path.exists",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "os.listdir",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_numbe... |
7161765994 | from typing import List
class Solution:
def calculate(self, nums, k, max_len, s, nums_len):
if nums[s:] == []:
print("max_len=",max_len)
return max_len
else:
i = 0
temp = k
ans = []
temp_nums = nums[s:]
print("nums=", temp_nums)
while i != len(temp_nums):
if temp == 0 and temp_nums[i] == 0:
break
else:
print("*=", temp_nums[i])
if temp_nums[i] == 1:
ans.append(temp_nums[i])
elif temp_nums[i] == 0:
temp = temp - 1
ans.append(1)
i += 1
print("###########################")
max_len = max(max_len, len(ans))
print("max=",max_len)
s = s + 1
print("s=",s)
self.calculate(nums, k, max_len, s, nums_len)
def longestOnes(self, nums: List[int], k: int) -> int:
max_len = 0
max_len = self.calculate(nums, k, max_len, 0, len(nums))
# print(max_len)
obj = Solution()
obj.longestOnes([1,1,1,0,0,0,1,1,1,1,0],2)
obj.longestOnes([0,0,1,1,0,0,1,1,1,0,1,1,0,0,0,1,1,1,1],3) | CompetitiveCodingLeetcode/LeetcodeEasy | JuneLeetcodeChallenge/MaxConsecutiveOnesIII.py | MaxConsecutiveOnesIII.py | py | 1,280 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "typing.List",
"line_number": 33,
"usage_type": "name"
}
] |
18609666305 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
from waveapi import events
from waveapi import model
from waveapi import robot
from pyactiveresource.activeresource import ActiveResource
import logging
import settings
CC_XMPP = 'cc:xmpp'
CC_TWITTER = 'cc:twitter'
logger = logging.getLogger('GAE_Robot')
logger.setLevel(logging.INFO)
class Notification(ActiveResource):
_site = settings.MPUB_SITE
### Webhooks start
def OnParticipantsChanged(properties, context):
"""Invoked when any participants have been added/removed."""
added = properties['participantsAdded']
for p in added:
if p != settings.ROBOT_NICK+'@appspot.com':
Notify(context, "Hi, " + p)
def OnRobotAdded(properties, context):
"""Invoked when the robot has been added."""
root_wavelet = context.GetRootWavelet()
root_wavelet.CreateBlip().GetDocument().SetText("Connected to XMPP...")
def OnBlipSubmitted(properties, context):
"""Invoked when new blip submitted."""
blip = context.GetBlipById(properties['blipId'])
doc = blip.GetDocument()
creator = blip.GetCreator()
text = doc.GetText()
try:
if creator in settings.ADMINS and text != '' and text !='cc:xmpp' and text !='cc:twitter':
if CC_XMPP in text:
text = text.replace('cc:xmpp','')
note = Notification({'escalation':10, 'body':text, 'recipients':{'recipient':[{'position':1,'channel':'gchat','address':settings.MPUB_XMPP}]}})
note.save()
if CC_TWITTER in text:
text = text.replace('cc:twitter','')
note = Notification({'escalation':10, 'body':text, 'recipients':{'recipient':[{'position':1,'channel':'twitter','address':settings.MPUB_TWITTER}]}})
note.save()
except:
logger.debug(context, 'Submit failed. (blip=%s)' % properties['blipId'])
pass
### Webhooks end
def Notify(context, message):
root_wavelet = context.GetRootWavelet()
root_wavelet.CreateBlip().GetDocument().SetText(message)
if __name__ == '__main__':
myRobot = robot.Robot(settings.ROBOT_NICK,
image_url='http://%s.appspot.com/assets/bot.png' % settings.ROBOT_NICK,
version='1',
profile_url='http://%s.appspot.com/' % settings.ROBOT_NICK)
myRobot.RegisterHandler(events.WAVELET_PARTICIPANTS_CHANGED, OnParticipantsChanged)
myRobot.RegisterHandler(events.WAVELET_SELF_ADDED, OnRobotAdded)
myRobot.RegisterHandler(events.BLIP_SUBMITTED, OnBlipSubmitted)
myRobot.Run(debug=settings.DEBUG)
| zh/gae-robot | gaerobot.py | gaerobot.py | py | 2,435 | python | en | code | 4 | github-code | 6 | [
{
"api_name": "logging.getLogger",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "logging.INFO",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "pyactiveresource.activeresource.ActiveResource",
"line_number": 19,
"usage_type": "name"
},
{
... |
21812044102 | import pytest
from src.error import InputError
from src.auth import auth_register_v2
from src.user import user_profile_v2
from src.other import clear_v1
@pytest.fixture
def register_user():
clear_v1()
user = auth_register_v2("johnsmith@gmail.com", "123456", "john", "smith")
token = user['token']
id = user['auth_user_id']
return token, id
def test_valid_input(register_user):
token, id = register_user
res = user_profile_v2(token, id)
assert res['user']['u_id'] == id
assert res['user']['email'] == 'johnsmith@gmail.com'
assert res['user']['name_first'] == 'john'
assert res['user']['name_last'] == 'smith'
assert res['user']['handle_str'] == 'johnsmith'
def test_invalid_uid(register_user):
token, id = register_user
id += 1
with pytest.raises(InputError):
user_profile_v2(token, id)
| TanitPan/comp1531_UNSW_Dreams | tests/user_profile_test.py | user_profile_test.py | py | 857 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "src.other.clear_v1",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "src.auth.auth_register_v2",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "pytest.fixture",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "src.us... |
23950521358 | #!/usr/bin/python3
import argparse
from iCEburn.libiceblink import ICE40Board
def rtype(x):
return ('R', int(x, 16))
def wtype(x):
return ('W', [int(i,16) for i in x.split(':')])
def main():
ap = argparse.ArgumentParser()
ap.add_argument("-r", "--read", dest='actions', type=rtype, action='append')
ap.add_argument("-w", "--write", dest='actions', type=wtype, action='append')
args = ap.parse_args()
board = ICE40Board()
with board.get_board_comm() as comm:
for atype, arg in args.actions:
if atype == 'R':
addr = arg
print("READ %02x: %02x" % (addr, comm.readReg(addr)))
elif atype == 'W':
addr, value = arg
print("WRITE %02x: %02x" % (addr, value))
comm.writeReg(addr, value)
if __name__ == "__main__":
main()
| davidcarne/iceBurn | iCEburn/regtool.py | regtool.py | py | 868 | python | en | code | 32 | github-code | 6 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "iCEburn.libiceblink.ICE40Board",
"line_number": 18,
"usage_type": "call"
}
] |
14720838146 | import torch
import torch.nn as nn
from collections import OrderedDict
from networks.reshape import Reshape
class ImageEncoder(nn.Module):
def __init__(self, input_channels, layers_channels, prefix, useMaxPool=False, addFlatten=False):
'''
If useMaxPool is set to True, Max pooling is used to reduce
the image dims instead of stride = 2.
'''
super(ImageEncoder, self).__init__()
layers = OrderedDict()
pr_ch = input_channels
stride = 1 if useMaxPool else 2
for i in range(len(layers_channels)):
layers[prefix + '_conv' + str(i)] = nn.Conv2d(in_channels=pr_ch,
out_channels=layers_channels[i],
kernel_size=3, stride=stride, padding=1)
layers[prefix + '_relu' + str(i)] = nn.ReLU()
if (useMaxPool):
layers[prefix + '_maxpool' + str(i)] = nn.MaxPool2d(2, stride=2)
pr_ch = layers_channels[i]
if addFlatten:
layers[prefix + '_flat'] = nn.Flatten()
self.net = nn.Sequential(layers)
def forward(self, data):
return self.net(data)
class ImageEncoderFlatInput(ImageEncoder):
def __init__(self, input_channels, layers_channels, prefix, useMaxPool=False, addFlatten=False):
super(ImageEncoderFlatInput, self).__init__(input_channels, layers_channels, prefix, useMaxPool, addFlatten)
self.reshapeInput = Reshape(-1, input_channels, 32, 32)
def forward(self, data):
return self.net(self.reshapeInput(data))
| PradeepKadubandi/DemoPlanner | networks/imageencoder.py | imageencoder.py | py | 1,593 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "torch.nn.Module",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "collections.OrderedDict",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "torch.nn.Conv2d",... |
18020255074 | import random,argparse,sys
parser = argparse.ArgumentParser()
import numpy as np
class PlannerEncoder:
def __init__(self, opponent, p,q) -> None:
self.p = p; self.q = q
self.idx_to_states = {}
self.opp_action_probs = {}
with open(opponent,'r') as file:
i = 0
for line in file:
parts = line.split()
if parts[0] == 'state':
continue
if len(parts[0]) == 7:
self.idx_to_states[i] = parts[0]
self.opp_action_probs[parts[0]] = [float(parts[1]), float(parts[2]), float(parts[3]), float(parts[4])]
i+=1
self.idx_to_states[i] = 'lost' # both of these are terminal states
self.idx_to_states[i+1] = 'goal'
self.states_to_idx = {}
for i in self.idx_to_states:
self.states_to_idx[self.idx_to_states[i]] = i
self.S = len(self.idx_to_states)
self.A = 10
# Next step is to calculate probs based on different situations
def player_pos(self, player, action):
new = None
if action ==0:
new = player -1
if (new-1)//4 == (player-1)//4 and new > 0 and new < 17:
player = new
elif action == 1:
new = player +1
if (new-1)//4 == (player-1)//4 and new > 0 and new < 17:
player = new
elif action ==2:
new = player - 4
if new > 0 and new < 17:
player = new
elif action ==3:
new = player + 4
if new > 0 and new < 17:
player = new
return player
def state_after_action(self, curr_state, a):
b1_int = int(curr_state[:2])
b2_int = int(curr_state[2:4])
r_int = int(curr_state[4:6])
ball_int = int(curr_state[-1])
if a <4:
b1_int = self.player_pos(b1_int, a)
elif a < 8:
b2_int = self.player_pos(b2_int, a - 4)
elif a == 8:
if ball_int ==1:
ball_int = 2
elif ball_int ==2:
ball_int = 1
elif a == 9:
return 'goal'
b1_str = str(b1_int) ; b2_str = str(b2_int)
r_str = str(r_int)
ball_str = str(ball_int)
if len(b1_str)==1:
b1_str = '0' + b1_str
if len(b2_str)==1:
b2_str = '0' + b2_str
if len(r_str)==1:
r_str = '0' + r_str
new_state = b1_str + b2_str + r_str + ball_str
return new_state
def cordinates(self, state):
b1 = int(state[:2]); b2 = int(state[2:4]); r = int(state[-3:-1])
b1_cor = ( (b1 -1)//4 , (b1-1)%4 )
b2_cor = ( (b2 -1)//4 , (b2-1)%4 )
r_cor = ( (r -1)//4 , (r-1)%4 )
return [b1_cor, b2_cor, r_cor]
def transition_function(self, current_s, next_s, action):
ball_pos = int(current_s[-1])
if action <4:
if ball_pos == 1:
b1_old = current_s[:2] ; r_old = current_s[-3:-1]
b1_new = next_s[:2] ; r_new = next_s[-3:-1]
if b1_new == r_new:
return (0.5 - self.p, 0.5 + self.p)
elif b1_old == r_new and b1_new == r_old:
return (0.5 - self.p, 0.5 + self.p)
else:
return (1- self.p, self.p)
elif ball_pos == 2:
return (1- self.p, self.p)
elif action <8:
if ball_pos == 1:
return (1-self.p, self.p)
elif ball_pos == 2:
b2_old = current_s[2:4] ; r_old = current_s[-3:-1]
b2_new = next_s[2:4] ; r_new = next_s[-3:-1]
if b2_new == r_new:
return (0.5 - self.p, 0.5 + self.p)
elif b2_old == r_new and b2_new == r_old:
return (0.5 - self.p, 0.5 + self.p)
else:
return (1- self.p, self.p)
if action ==8:
b1_cor, b2_cor, r_cor = self.cordinates(next_s)
val = self.q - 0.1*max( abs(b1_cor[0] - b2_cor[0]), abs(b1_cor[1] - b2_cor[1]))
if b1_cor[0] == r_cor[0] and b2_cor[0] == r_cor[0]:
return (0.5*val, 1 - 0.5*val)
elif b1_cor == r_cor or b2_cor == r_cor:
return (0.5*val, 1 - 0.5*val)
elif ((b1_cor[1]- r_cor[1])/(b1_cor[0] - r_cor[0] + 1e-3)) == ((r_cor[1] - b2_cor[1])/(r_cor[0]- b2_cor[0] + 1e-3)):
return (0.5*val, 1 - 0.5*val)
else:
return (val, 1- val)
if action ==9:
b1_cor, b2_cor, r_cor = self.cordinates(next_s)
ball_pos = int(current_s[-1])
if ball_pos ==1:
val = self.q - 0.2*(3 - b1_cor[1]) # NOTE my x,y are reverse to the one used in the assgn description
# I use like the matrix 0,1 axis
if r_cor[0]>0 and r_cor[0]<3 and r_cor[1]>1:
return (0.5*val, 1- 0.5*val)
else:
return( val, 1-val)
elif ball_pos ==2:
val = self.q - 0.2*(3 - b2_cor[1]) # NOTE
if r_cor[0]>0 and r_cor[0]<3 and r_cor[1]>1:
return (0.5*val, 1- 0.5*val)
else:
return( val, 1-val)
def calculate_trans_probs(self):
self.trans_probs = np.zeros((self.S, self.A, self.S))
for s in range(self.S - 2): # we don't start from lost and goal state
current_s = self.idx_to_states[s]
for a in range(self.A):
if a <9:
new_state = self.state_after_action(current_s, a)
if new_state != current_s:
r_int = int(current_s[-3:-1])
for i, prob_opp in enumerate(self.opp_action_probs[current_s]):
# now for the current_s you will get a reaction from the opponent
if prob_opp !=0:
r_str = str(self.player_pos(r_int, i)) # 'i' would give the action for R
if len(r_str)==1:
r_str = '0' + r_str # NOTE: I hope the prob's are zero when the R is at the edge
next_s = new_state[:4] + r_str + new_state[-1]
# Now let's call a helper function to give prob. # It looks if there is tackling or intergecting etc...
# it's inputs would be current_s and next_s and the action taking place.
prob_s, prob_f = self.transition_function(current_s, next_s, a)
self.trans_probs[self.states_to_idx[current_s], a, self.states_to_idx[next_s]] = prob_opp*prob_s
self.trans_probs[self.states_to_idx[current_s], a, self.states_to_idx['lost']] = prob_opp*prob_f
else:
self.trans_probs[self.states_to_idx[current_s], a, self.states_to_idx['lost']] = 1 # regardless of what R does if you take a non feasible action then lossing is 1
elif a ==9: # this has to be separate because state_after_action function gives 'goal' for this so u can't slice like before.
new_state = current_s[:]
for i, prob_opp in enumerate(self.opp_action_probs[current_s]):
if prob_opp != 0:
r_int = int(current_s[-3:-1])
r_str = str(self.player_pos(r_int, i)) # 'i' would give the action for R
if len(r_str)==1:
r_str = '0' + r_str # NOTE: I hope the prob's are zero when the R is at the edge
next_s = new_state[:4] + r_str + new_state[-1]
prob_s, prob_f = self.transition_function(current_s, next_s, a)
self.trans_probs[self.states_to_idx[current_s], a, self.states_to_idx['goal']] = prob_opp*prob_s
self.trans_probs[self.states_to_idx[current_s], a, self.states_to_idx['lost']] = prob_opp*prob_f
self.rewards = np.zeros((self.S, self.A, self.S))
self.rewards[:,:,8192] = -1
self.rewards[:,:,8193] = 1
def save_transition_probabilities_and_rewards(self, filename):
self.calculate_trans_probs()
trans_probs = self.trans_probs
rewards = self.rewards
num_states, num_actions, _ = trans_probs.shape
with open(filename, 'w') as file:
file.write(f"numStates {num_states}\n")
file.write(f"numActions {num_actions}\n")
file.write("end 8192 8193\n")
for s in range(num_states - 2): # Exclude terminal states 'lost' and 'goal'
for a in range(num_actions):
for s_prime in range(num_states):
prob = trans_probs[s, a, s_prime]
reward = rewards[s, a, s_prime]
if prob != 0 or reward != 0:
file.write(f"transition {s} {a} {s_prime} {prob} {reward}\n")
file.write("mdptype episodic\n")
file.write("discount 0.9\n")
# Example usage:
if __name__ == "__main__":
parser.add_argument("--opponent",type=str,default='./data/football/test-1.txt')
parser.add_argument("--p", type=float)
parser.add_argument("--q", type=float)
args = parser.parse_args()
if not (args.p <=1.0 and args.p >=0.0):
print("p is a probability, should be btw 0,1")
sys.exit(0)
if not (args.q<=1.0 and args.q >=0.0):
print("q is a probability, should be btw 0,1")
sys.exit(0)
enco = PlannerEncoder(args.opponent, args.p, args.q)
enco.save_transition_probabilities_and_rewards('t-2.txt')
| kiluazen/ReinforcementLearning | Policy Iteration/encoder.py | encoder.py | py | 10,197 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 2,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 138,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 174,
"usage_type": "call"
},
{
"api_name": "sys.exit",
"line... |
7886651161 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 18 21:42:00 2021
@author: fyy
"""
import scipy.stats as stats
import numpy as np
import random
import scipy.io as scio
import matplotlib.pyplot as plt
import math
dataFile = './_dat/val_dataset.mat'
ratio = 0.05
sample_num = 100 # 训练样本的大小
max_len = 250
min_len = 180
max_kn = 4
min_kn = 0
s_length = 200
def stable(maxLen,priValue):
priSeq = np.ones(maxLen)*priValue
return priSeq
def jitter(maxLen,priValue,priDev):
maxDevValue = priValue*priDev
lowerBound = priValue-maxDevValue
upperBound = priValue+maxDevValue
priSeq = np.random.randint(lowerBound,upperBound+1,maxLen)#lower<=x<upper
params = [priValue,priDev,maxDevValue]
return priSeq
#周期
def periodic(maxLen,priValue,ratio):
amp=priValue*ratio; #正弦幅度
freq=50; #正弦频率
sample = random.randint(2*freq,8*freq) #正弦采样率
fsample=400#正弦采样率
priDSSeq = np.zeros(maxLen)
for i in range(maxLen):
priDSSeq[i] = amp*math.sin(freq*i/fsample)+priValue#正弦PRI序列
#priDSSeq = priDSSeq[:maxLen] #截断
para = [priValue,ratio,sample]
return priDSSeq
#滑变
def sliding(maxLen,priValue,ratio):
priMax=priValue*ratio #pri最大值
pulseNum=random.randint(ratio,32) #pri点数
slidingStep=(priMax-priValue)/pulseNum; #滑变步长
slidingSeq = np.zeros(pulseNum+1)
for i in range(pulseNum+1):
#一个周期的滑变PRI序列
slidingSeq[i] = i*slidingStep + priValue
seqLen=len(slidingSeq);
cycleNum=math.ceil(maxLen/seqLen)#向上取整周期数
priDSSeq = np.tile(slidingSeq, cycleNum)#重复若干个周期
priDSSeq = priDSSeq[:maxLen] #截断
para = [priValue,ratio,priMax,pulseNum,slidingStep]
return priDSSeq
'''
import numpy as np
a = np.array([[1, 2, 0, 3, 0],
[4, 5, 0, 6, 0],
[7, 8, 0, 9, 0]])
idx = np.argwhere(np.all(a[..., :] == 0, axis=0))
a2 = np.delete(a, idx, axis=1)
'''
#参差 3-10
def stagger(maxLen,priValue,priNum):
seqLen=priNum #一个周期的脉组中脉冲数目
cycleNum=math.ceil(maxLen/seqLen) #周期数
priSeq = priValue
priSSeq=np.tile(priSeq,cycleNum)#重复若干周期
priSSeq=priSSeq[:maxLen]#截断
para = [priValue,priNum,cycleNum]
return priSSeq
def gen_func(m,maxLen):
if m==1:
return stable(maxLen)
elif m==2:
return jitter(maxLen)
elif m==3:
return periodic(maxLen)
elif m==4:
return sliding(maxLen)
elif m==5:
return stagger(maxLen)
else:
print("****error!****")
def solve(nums, x, y) :
if nums == []:
return False
if x>y:
(x,y) = (y,x)
for i in range(len(nums)):
if x<= nums[i] <= y:
return True
else:
continue
return False
def pri2toa(inputseq):
#mask = np.logical_not(inputseq)
mask = inputseq!=0
inputseq = inputseq[mask]
toa = np.zeros(len(inputseq)+1)
i = 0
while(i<len(inputseq)):
toa[i+1] = toa[i]+inputseq[i]
i = i+1
return toa
max_len = 250
def lostPul(inputseq,proportion,label,pattern):#缺失脉冲
# inputseq: 输入TOA序列
# proportion: 缺失百分比
# seqTOA: 缺失的TOA序列
# seqPRI: 缺失的PRI序列
lostPulseSeq=pri2toa(inputseq) #每个proportion下面的缺失脉冲TOA序列
lengthWorkModeSample=len(lostPulseSeq)
rand_num = math.floor(lengthWorkModeSample*proportion)
randomIndex=np.random.randint(0,lengthWorkModeSample,rand_num)#lower<=x<upper
randomIndex = sorted(randomIndex)
j=0
mask = label!=0
label = label[mask]
lostlabel = label*1 #单纯a = b 只是浅复制将a指向b
p = pattern*1
for i in range(len(randomIndex)):
while(j<len(label) and randomIndex[i]>=label[j]):
j = j+1
lostlabel[j:] = lostlabel[j:] - 1
lostPulseSeq=[i for num,i in enumerate(lostPulseSeq) if num not in randomIndex]
p =[i for num,i in enumerate(p) if num not in randomIndex]
p = np.array(p)
for i in range(len(randomIndex)):
p[randomIndex[i]-1-i] = 6
lostPulseSeq = np.array(lostPulseSeq)
seqPRI=lostPulseSeq[1:]-lostPulseSeq[:-1]
seqTOA=lostPulseSeq
z = np.zeros(max_len)
seqPRI = np.append(seqPRI,z)
p = np.append(p,z)
z = np.zeros(5)
lostlabel = np.append(lostlabel,z)
return seqPRI[:max_len],lostlabel[:5],p[:max_len]
def findpos(arr,x):
for i in range(len(arr)):
if arr[i]>x:
return i
return -1
def suprPul(inputseq,proportion,label,p):#虚警脉冲
# inputseq: 输入TOA序列
# proportion: 虚警百分比
# seqTOA: 虚警的TOA序列
# seqPRI: 虚警的PRI序列
# pw: 脉宽,脉冲串脉宽设置为5us
supPulseSeq=pri2toa(inputseq) #每个proportion下面的缺失脉冲TOA序列
lengthWorkModeSample=len(supPulseSeq)
tMax = math.floor(max(supPulseSeq))
randomNum = math.floor(lengthWorkModeSample*proportion)
randomTime=np.random.randint(0,tMax,randomNum)
randomTime = sorted(randomTime)
mask = label!=0
label = label[mask]
pattern = p*1
j = 0
for i in range(len(randomTime)):
pos = findpos(supPulseSeq,randomTime[i])
while(j<len(label) and label[j] < pos):
j = j+1
label[j:] = label[j:] + 1
supPulseSeq = np.insert(supPulseSeq, pos,randomTime[i])
pattern[pos-1] = 6
pattern = np.insert(pattern, pos,6)
randomIndex=[i for i,val in enumerate(supPulseSeq) if val in randomTime]
seqPRI=supPulseSeq[1:]-supPulseSeq[:-1]
seqTOA=supPulseSeq
z = np.zeros(max_len)
seqPRI = np.append(seqPRI,z)
z = np.zeros(5)
label = np.append(label,z)
return seqPRI[:max_len],label[:5],pattern[:max_len]
def meaErr(inputseq,stdPRI):
# inputseq: 输入TOA序列
# stdPRI: 测量误差的标准差
# seqTOA: 输出TOA序列
# seqPRI: 输出PRI序列
seqTOA=pri2toa(inputseq)
lengthWorkModeSample=len(seqTOA)
errGenarated = np.random.normal(0, stdPRI, lengthWorkModeSample)
#errGenarated=normrnd(0,stdPRI,[1,lengthWorkModeSample])
seqTOA=seqTOA+errGenarated
seqPRI=seqTOA[1:]-seqTOA[:-1]
return seqPRI[:max_len]
def indices(a,func):
#实现find函数
return [i for (i,val) in enumerate(a) if func(val)]
#a = [1 2 3 1 2 3]
#find = indices(a,lambda x:x>2) --> [2,5]
data = np.zeros((sample_num, max_len), dtype=np.float32)
label = np.zeros((sample_num, max_kn+1), dtype=np.int)
pattern = np.zeros((sample_num, max_kn+1), dtype=np.int)
p = np.zeros((sample_num, max_len), dtype=np.float32)
for i in range(sample_num):
#seq_len = random.randint(min_len,max_len)
seq_len = max_len
knum = random.randint(min_kn,max_kn)
k = []
for j in range(knum):
a = random.randint(25,s_length-25)
while solve(k,a-25,a+25):
a = random.randint(25,s_length-25)
k.append(a)
k.append(seq_len)
k = np.array(k)
k = sorted(k)
priValue = random.randint(10,20)*10
priDev = random.randint(10,20)/20
for j in range(knum+1):
label[i,j] = k[j]
module = 2
pattern[i,j] = module
flag = random.randint(1,3)
tempValue = priValue
tempDev = priDev
if flag == 1:#均值方差全变
while(tempValue==priValue):
tempValue = random.randint(10,20)*10
while(tempDev==priDev):
tempDev = random.randint(10,20)/20
elif flag == 2:#只变均值
while(tempValue==priValue):
tempValue = random.randint(10,20)*10
else:#只变均值
while(tempDev==priDev):
tempDev = random.randint(10,20)/20
priValue = tempValue
priDev = tempDev
if j==0:
data[i,:k[j]] = jitter(k[j],priValue,priDev)
p[i,:k[j]] = module
else:
data[i,k[j-1]:k[j]] = jitter(k[j]-k[j-1],priValue,priDev)
p[i,k[j-1]:k[j]] = module
d = data*1
l = label*1
result = np.zeros((sample_num, s_length), dtype=np.float32)
L = np.zeros((sample_num, s_length), dtype=np.float32)
'''
for i in range(sample_num):
d[i] =meaErr(data[i],1)
for i in range(sample_num):
d[i],l[i],p[i] = lostPul(data[i],0.1,l[i],p[i])#247.5
for i in range(sample_num):
d[i],l[i],p[i] = suprPul(data[i],0.05,l[i],p[i])#247.5
'''
d = d[:,:s_length]
p = p[:,:s_length]
for i in range(sample_num):
for j in range(max_kn+1):
if l[i,j]>=s_length:
l[i,j:] = 0
l[i,j] = s_length
break
for i in range(sample_num):
for j in range(max_kn+1):
if l[i,j] != s_length and l[i,j] != 0:
result[i,l[i,j]] = 1
L[i,l[i,j]] = 1
result[i,l[i,j]-1] = 0.8
result[i,l[i,j]+1] = 0.8
plt.plot(d[0])
# scio.savemat(dataFile, {'data':d,'label':result,'pattern':p,'L':L,'Y':d,'l_true':l})
| Carty-Bao/BNPHMM | code/gen_new.py | gen_new.py | py | 9,565 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "numpy.ones",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "numpy.random.randint",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 39,
"usage_type": "attribute"
},
{
"api_name": "random.randint",
... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.