seq_id string | text string | repo_name string | sub_path string | file_name string | file_ext string | file_size_in_byte int64 | program_lang string | lang string | doc_type string | stars int64 | dataset string | pt string | api list |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
41902703943 | import cv2
import numpy as np
img = cv2.imread('water_coins.jpg')
gr_img = cv2.cvtColor(img , cv2.COLOR_BGR2GRAY)
_ , thresh = cv2.threshold(gr_img , 0 , 255 , cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)
#noise removal
kernel = np.ones((3 , 3) , np.uint8)
opening = cv2.morphologyEx(thresh , cv2.MORPH_OPEN , kernel , iterations = 2)
#Sure background
kernel = np.ones((3 , 3) , np.uint8)
sure_bg = cv2.dilate(opening , kernel , iterations = 3)
#FINDING SURE FOREGROUND AREA
dist_transform = cv2.distanceTransform(opening , cv2.DIST_L2 , 5)
ret , sure_fg = cv2.threshold(dist_transform , 0.7*dist_transform.max() , 255 , 0)
#finding unknown region
#cv2.imshow('sure_fg0' , sure_fg)
sure_fg = np.uint8(sure_fg)
unknown = cv2.subtract(sure_bg , sure_fg)
#cv2.imshow('sure_fg' , sure_fg)
#cv2.imshow('sure_bg' , sure_bg)
ret , markers = cv2.connectedComponents(sure_fg)
for i in range(markers.shape[0]):
print(set(markers[i]))
#cv2.imshow('here' , markers)
markers = markers + 1
markers[unknown==255] = 0
markers = cv2.watershed(img,markers)
img[markers == -1] = [255,0,0]
#cv2.imshow('img' , img)
| kumar6rishabh/counting_objects | counting_objects.py | counting_objects.py | py | 1,148 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "cv2.imread",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "cv2.cvtColor",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "cv2.COLOR_BGR2GRAY",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "cv2.threshold",
"lin... |
6753837772 | import numpy as np
from scipy import signal
# input:
# data(type:numpy array)(shape:time * 2)
# model(sklearn model or pytorch model)
# flatten(type: bool)(whether to flatten the input as 200 or use 100*2 as the model input)
# output:
# probanility_map(number of split, 12)
def stroke_probability_map(data, model, flatten):
split_list = [1, 2, 4, 8, 3, 6, 9]
probability_map = np.zeros((int(np.sum(split_list)), 12))
N = data.shape[0]
count = 0
for split_idx in range(len(split_list)):
n_ = int(np.floor(N/split_list[split_idx]))
for i in range(split_list[split_idx]):
data_cur = signal.resample(data[(i*n_):((i+1)*n_), :], 100, axis=0)
if flatten:
data_cur = data_cur.reshape((1, -1))
probability_map[count] = model(data_cur)
count += 1
return probability_map | yzhao07/MLMA_EOG | Continuous CNN/stroke probability map/stroke_probability_map.py | stroke_probability_map.py | py | 881 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.zeros",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "numpy.floor",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "scipy.signal.resample",
"line_n... |
10538619256 | from collections import deque
노드개수, 간선개수 = map(int, input().split())
result = 0
# 빈 그래프 그리기
graph = [ [] for i in range(노드개수+1) ]
for i in range(간선개수):
a, b = map(int, input().split())
graph[a].append(b)
graph[b].append(a)
visited = [False] * (노드개수 + 1)
def 너비우선탐색(graph, start, visited):
queue = deque([start])
visited[start] = True
while queue:
v = queue.popleft()
for i in graph[v]:
if not visited[i]:
queue.append(i)
visited[i] = True
for i in range(노드개수):
if not visited[i+1]:
너비우선탐색(graph, i+1, visited)
result += 1
print(result)
"""
7
6
1 2
2 3
1 5
5 2
5 6
4 7
""" | 5pponent/opponent | dfs&bfs/연결 요소의 개수.py | 연결 요소의 개수.py | py | 768 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "collections.deque",
"line_number": 16,
"usage_type": "call"
}
] |
5747062131 | from setuptools import setup
with open('README.md', 'r', encoding='utf-8') as f:
readme = f.read()
setup(
name='gearbest_api',
version='0.0.4',
description='Retrieve info from gearbest api.',
long_description=readme,
long_description_content_type='text/markdown',
url='https://github.com/matteobaldelli/python-gearbest-api',
license='MIT',
author='Matteo Baldelli',
author_email='baldelli.matteo2@google.com',
packages=['gearbest_api'],
install_requires=['requests'],
python_requires='>=3.7',
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
]
)
| matteobaldelli/python-gearbest-api | setup.py | setup.py | py | 716 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "setuptools.setup",
"line_number": 6,
"usage_type": "call"
}
] |
29767005499 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('qsource_user', '0005_auto_20151118_2237'),
]
operations = [
migrations.CreateModel(
name='QuestionsAnswered',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('questionID', models.IntegerField()),
('user', models.ForeignKey(to='qsource_user.UserData')),
],
),
migrations.CreateModel(
name='QuestionsAsked',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('questionID', models.IntegerField()),
('user', models.ForeignKey(to='qsource_user.UserData')),
],
),
]
| SamuelWenninger/QSource-app | qsource_user/migrations/0006_questionsanswered_questionsasked.py | 0006_questionsanswered_questionsasked.py | py | 971 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.db.migrations.Migration",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "django.db.migrations",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "django.db.migrations.CreateModel",
"line_number": 14,
"usage_type": "call"
},
... |
35319730396 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author: mcxiaoke
# @Date: 2016-01-04 11:18:06
import codecs
import os
import sys
import requests
import shutil
import time
HEADERS = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/46.0.1234.0 Safari/537.36',
'Referer': 'https://google.com/'}
def to_dict(o):
d = {}
for n in dir(o):
if n.startswith('__'):
continue
v = getattr(o, n)
# print(type(v),v, callable(v))
if callable(v):
try:
d[n] = v()
except Exception as e:
pass
else:
d[n] = v
return d
def write_list(name, ls):
if not ls:
return
with codecs.open(name, 'w', 'utf-8') as f:
for s in ls:
f.write(s + '\n')
def read_list(name):
if not os.path.isfile(name):
return None
with codecs.open(name, 'r', 'utf-8') as f:
return [line.rstrip('\n') for line in f]
def download_insta_file(media, output='output'):
url = media.get_standard_resolution_url().replace('s640x640/sh0.08/', '')
name = url.split('/')[-1]
fname = media.created_time.strftime("%Y%m%d_%H%M%S") + "_" + name
#print(fname)
tmpname = name + ".tmp"
fpath = os.path.abspath(os.path.join(output, fname))
path = os.path.abspath(os.path.join(output, name))
tmppath = os.path.abspath(os.path.join(output, tmpname))
if os.path.isfile(fpath):
print('skip exists %s' % path)
return fpath
if os.path.isfile(path):
print('skip exists %s' % path)
shutil.move(path,fpath)
return fpath
try:
r = requests.get(url, stream=True, headers=HEADERS)
length = int(r.headers['Content-Length'])
print('downloading %s (%sk)' % (url, length / 2014))
if r.status_code == requests.codes.ok:
with open(tmppath, 'wb') as f:
for chunk in r.iter_content(chunk_size=4096):
f.write(chunk)
shutil.move(tmppath, fpath)
print('saved to %s' % path)
return fpath
else:
print('failed: %s' % r)
except Exception as e:
print("error:%s on downloading file:%s" % (e, url))
def download_insta_files(medias, output='output'):
if not os.path.exists(output):
os.makedirs(output)
for media in medias:
download_insta_file(media, output)
def download_files_multi(urls, output='files', pool_size=4):
if not os.path.exists(output):
os.makedirs(output)
from multiprocessing.dummy import Pool
from functools import partial
partial_download_file = partial(download_file, output=output)
pool = Pool(pool_size)
pool.map(partial_download_file, urls)
pool.close()
pool.join()
| mcxiaoke/python-labs | instagram/utils.py | utils.py | py | 2,838 | python | en | code | 7 | github-code | 36 | [
{
"api_name": "codecs.open",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "os.path.isfile",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 40,
"usage_type": "attribute"
},
{
"api_name": "codecs.open",
"line_numbe... |
40451981469 | """seed event types
Revision ID: 0311eb0fc2e6
Revises: 61043123657a
Create Date: 2021-02-04 14:27:03.847005
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.sql import table, column
from sqlalchemy import String, Integer, Boolean
# revision identifiers, used by Alembic.
revision = '0311eb0fc2e6'
down_revision = '61043123657a'
branch_labels = None
depends_on = None
def upgrade():
guild_settings = table('event_type',
column('id', Integer),
column('name', String),
column('address_required', Boolean),
)
op.bulk_insert(guild_settings,
[
{
"id": 1,
"name": "Virtual Gift Exchange",
"address_required": False
},
{
"id": 2,
"name": "Shipped Gift Exchange",
"address_required": True
}
]
)
def downgrade():
pass
| jcsumlin/secret-santa-discord-bot | alembic/versions/0311eb0fc2e6_seed_event_types.py | 0311eb0fc2e6_seed_event_types.py | py | 1,160 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sqlalchemy.sql.table",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.sql.column",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Integer",
"line_number": 22,
"usage_type": "argument"
},
{
"api_name": "sql... |
70631838504 | import pandas as pd
import numpy as np
import string, re
import nltk
import time,random
import operator
#from tabulate import tabulate
from nltk.stem.snowball import SnowballStemmer
import os.path
stop_list = nltk.corpus.stopwords.words('english')
lemmatizer = nltk.stem.WordNetLemmatizer()
punctuation = list(string.punctuation)
stop_list = stop_list + punctuation +["rt", 'url']
stemmer = SnowballStemmer("english")
HillaryWords = ['hillary clinton','hillaryclinton','hilaryclinton','hillari clinton','hilari clinton','hilary clinton','hillary','clinton']
DonaldWords = ['donald trump','donaldtrump','donald','trump','realdonaldtrump']
CarsonWords = ['realbencarson','bencarson','carson']
BushWords = ['jebbush','bush']
hillary_re = re.compile('|'.join(map(re.escape, HillaryWords)))
donald_re = re.compile('|'.join(map(re.escape, DonaldWords)))
classifier =[]
def preprocess(tweet):
if type(tweet)!=type(2.0):
tweet = tweet.lower()
tweet = " ".join(tweet.split('#'))
tweet = re.sub(r'#([^\s]+)', r'\1', tweet)
tweet = " ".join(tweet.split('@'))
tweet = re.sub(r'@([^\s]+)', r'\1', tweet)
tweet = re.sub('((www\.[^\s]+)|(https://[^\s]+))','URL',tweet)
tweet = re.sub("http\S+", "URL", tweet)
tweet = re.sub("https\S+", "URL", tweet)
tweet = " ".join(tweet.split(':'))
#removes @username from text entirely
#tweet = re.sub('@[^\s]+','AT_USER',tweet)
#tweet = tweet.replace("AT_USER","")
tweet = tweet.replace("URL","")
tweet = tweet.replace(".","")
tweet = tweet.replace('\"',"")
tweet = tweet.replace('&',"")
#remove punctuation words
tweet = " ".join([word for word in tweet.split(" ") if word not in stop_list])
#remove words ending with special character
tweet = " ".join([word for word in tweet.split(" ") if re.search('^[a-z]+$', word)])
#remove common words such as "the"
tweet = " ".join([lemmatizer.lemmatize(word) for word in tweet.split(" ")])
#stem similar words such as "hurt" and "hurting"
tweet = " ".join([stemmer.stem(word) for word in tweet.split(" ")])
tweet = re.sub('[\s]+', ' ', tweet)
tweet = tweet.strip('\'"')
#manually stem similar political words
tweet = hillary_re.sub("hillary", tweet)
tweet = donald_re.sub("donald", tweet)
tweet = carson_re.sub("carson", tweet)
tweet = bush_re.sub("bush", tweet)
tweet = huckabee_re.sub("huckabee", tweet)
tweet = cruz_re.sub("cruz", tweet)
tweet = kasich_re.sub("kasich", tweet)
tweet = rubio_re.sub("rubio", tweet)
else:
tweet=''
return tweet
| zaksoliman/twitter-sentiment-analysis | tweet_analysis/classifiers/process_chars.py | process_chars.py | py | 2,719 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "nltk.corpus.stopwords.words",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "nltk.corpus",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "nltk.stem.WordNetLemmatizer",
"line_number": 13,
"usage_type": "call"
},
{
"api_name... |
74752031784 | import os
import requests
from requests.exceptions import ReadTimeout
from requests_oauthlib import OAuth1
from helpers.configHelpers import decryptEnvVar
from helpers.logHelpers import createLog
from helpers.errorHelpers import URLFetchError
logger = createLog('hathiCover')
class HathiCover():
"""Manager class for finding a cover image for HathiTrust images. This is
done by parsing a METS object obtained through the Hathi API, extracting
the first 25 pages and scoring them based on relevancy as a cover. The
URI to the most relevant page image is ultimately returned.
"""
HATHI_BASE_API = os.environ.get('HATHI_BASE_API', None)
HATHI_CLIENT_KEY = decryptEnvVar('HATHI_CLIENT_KEY')
HATHI_CLIENT_SECRET = decryptEnvVar('HATHI_CLIENT_SECRET')
def __init__(self, htid):
self.htid = htid
self.logger = logger
def generateOAuth(self):
"""Helper method that generates an OAuth1 block that authenticates
requests against the HathiTrust Data API. Due to the structure of the
API this is formatted as part of the query string.
Returns:
[object] -- An OAuth1 authentication block
"""
return OAuth1(
self.HATHI_CLIENT_KEY,
client_secret=self.HATHI_CLIENT_SECRET,
signature_type='query'
)
def getResponse(self, queryURL):
queryAuth = self.generateOAuth()
try:
return requests.get(queryURL, auth=queryAuth, timeout=3)
except ReadTimeout:
raise URLFetchError(
'URL request timed out'.format(queryURL),
504,
queryURL
)
def getPageFromMETS(self):
"""Query method for the best page URI from the record's METS file
Returns:
[uri] -- URI to the page to be used as a cover image
"""
self.logger.debug('Querying {} for cover image'.format(self.htid))
structURL = '{}/structure/{}?format=json&v=2'.format(
self.HATHI_BASE_API,
self.htid
)
try:
structResp = self.getResponse(structURL)
if structResp.status_code == 200:
return self.parseMETS(structResp.json())
except URLFetchError:
self.logger.warning('Request for structure file timed out')
return None
def parseMETS(self, metsJson):
"""Parser that handles the METS file, parsing the first 25 pages into
HathiPage objects that contain a score and position. Once parsed it
sets the "imagePage" as the page that contains the most plausibly
relevant cover.
Arguments:
metsJson {object} -- METS object extracted from the JSON response
Returns:
[uri] -- URI to the page to be used as a cover image
"""
structMap = metsJson['METS:structMap']
self.logger.info('Retrieved METS for {}'.format(self.htid))
self.pages = [
HathiPage(page)
for page in structMap['METS:div']['METS:div'][:25]
]
self.pages.sort(key=lambda x: x.score, reverse=True)
self.imagePage = self.pages[0]
return self.getPageURL()
def getPageURL(self):
"""Extracts a resolvable URI from the page selected as a cover image.
This URI can be used to create a local copy of the cover.
Returns:
[uri] -- The created URI of the cover page
"""
return '{}/volume/pageimage/{}/{}?format=jpeg&v=2'.format(
self.HATHI_BASE_API,
self.htid,
self.imagePage.page
)
class HathiPage():
"""A representation of a single page in a HathiTrust record. This contains
some basic description of the page as well as some metadata that we derive
from Hathi's description to rank it in terms of its suitability as a cover
image.
"""
# These are the "flags" that denote a potential cover page
# They are drawn from a larger set of flags that can be attached to a page
PAGE_FEATURES = set(
['FRONT_COVER', 'TITLE', 'IMAGE_ON_PAGE', 'TABLE_OF_CONTENTS']
)
def __init__(self, pageData):
self.pageData = pageData
self.page = self.getPageNo()
self.flags = self.parseFlags()
self.score = self.setScore()
def getPageNo(self):
"""Extracts the current page number (from the front cover, not number
on the page) from the METS description of the page
Returns:
[integer] -- The current page number
"""
return self.pageData.get('ORDER', 0)
def parseFlags(self):
"""Extracts the flags (in METS these are grouped under "LABEL") that
can be used to determine the contents of a page. These are parsed from
a comma-delimited string into a set.
Returns:
[set] -- Unique set of flags assigned to the page
"""
flagStr = self.pageData.get('LABEL', '')
return set(flagStr.split(', '))
def setScore(self):
"""This takes the union of the flags denoted as potentially interesting
in the class variable above and the current flags set on the page. The
total count of the resulting set is the "score" for the page,
essentially how many potentially interesting elements exist on it. This
allows the HathiCover class to determine the best possible to cover
to display.
Returns:
[integer] -- The score as derived from the union of the flags set
"""
return len(list(self.flags & self.PAGE_FEATURES))
| NYPL/sfr-ingest-pipeline | lambda/sfr-hathi-reader/lib/hathiCover.py | hathiCover.py | py | 5,653 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "helpers.logHelpers.createLog",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "os.environ.get",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "helpers.c... |
25240483381 | import datetime
import json
import time
import numpy as np
from common.args import Runtime
from data.premetheus import DataManger
cpu_data_range = {}
def cpu_data_pretreatment():
y = datetime.datetime.now().year
m = datetime.datetime.now().month
d = datetime.datetime.now().day
dt = str(y) + '-' + str(m) + '-' + str(d) + ' 00:00:00'
start = int(time.mktime(time.strptime(dt, "%Y-%m-%d %H:%M:%S")))
end = int(time.time())
# start = int(Runtime().ENV['data.forecast.start'])
# end = int(Runtime().ENV['data.forecast.end'])
# start = int(Runtime().ENV['data.train.start'])
# end = int(Runtime().ENV['data.train.end'])
datamanage = DataManger(start, end, int(Runtime().ENV['data.sample.period']))
datamanage.get_all()
for item in datamanage.jsonlist:
if item['status'] == 'success':
if item['data']['result']:
for res in item['data']['result']:
instance = res['metric']['instance']
values = res['values']
if instance not in cpu_data_range:
cpu_data_range[instance] = {'index': [], 'columns': ['precent'], 'data': []}
for time_data in values:
cpu_data_range[instance]['index'].append(time_data[0])
s = []
s.append(time_data[1])
cpu_data_range[instance]['data'].append(s)
return cpu_data_range
# with open("/home/sorawingwind/workhome/program/python/AnomalyDetection/src/source/10-16-record.json", "w") as f:
# json.dump(cpu_data_range, f)
def read_train_data():
with open("/home/sorawingwind/workhome/program/python/AnomalyDetection/src/source/05-06-record.json",
'r') as load_f:
cpu_data_range = json.load(load_f)
return cpu_data_range
def read_forecast_data():
with open("/home/sorawingwind/workhome/program/python/AnomalyDetection/src/source/05-07-record.json",
'r') as load_f:
cpu_data_range = json.load(load_f)
return cpu_data_range
def data_reader(cpu_data_range):
ins = {}
for instance, data in cpu_data_range.items():
timestamps = data["index"]
datas = data["data"]
items = []
for index, time in enumerate(timestamps):
item = [time, datas[index][0]]
items.append(item)
ins[instance] = np.array(items)
return ins | falcomlife/klog-ai | src/core/data/pretreatment.py | pretreatment.py | py | 2,476 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "datetime.datetime.now",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime.now",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "da... |
32352030938 | import logging
import glob
import collections
import os
import clang.cindex
from clang.cindex import CursorKind
from . import helpers
from . import enum_decl
from . import class_struct_decl
from . import function_decl
_LOGGER = logging.getLogger(__name__)
def _detect_library_file():
version = os.getenv("PYCODEGEN_LIBCLANG", "")
candidates = glob.glob("/usr/lib/llvm-{version}*/lib/libclang*.so*".format(version=version))
if not candidates:
raise RuntimeError("Unable to find libclang")
# Select the latest libclang version
candidates.sort()
return candidates[-1]
ParserContext = collections.namedtuple("ParserContext", ["input_file"])
class ParserLibClang:
def __init__(self, library_file=None):
self._context = None
if not clang.cindex.Config.loaded:
if library_file is None:
library_file = _detect_library_file()
_LOGGER.debug("Using libclang from: %s", library_file)
clang.cindex.Config.set_library_file(library_file)
def dump(self, filename, arguments=None):
"""
Generate a tree view of the AST
:param filename: File to parse
:param arguments: Extra arguments to pass to clang
:return: String representation of the AST
"""
import asciitree
def format_node(cursor):
return '{name:<10} ({extra})'.format(
name=cursor.spelling or cursor.displayname,
extra=cursor.kind.name)
def get_children(cursor):
return helpers.get_children(cursor, self._context)
translation_unit = self._parse_file(filename, extra_arguments=arguments)
return asciitree.draw_tree(translation_unit.cursor, get_children, format_node)
def parse(self, filename, arguments=None):
"""
Parse and return a simplified version of the AST
:param filename: File to parse
:param arguments: Extra arguments to pass to clang
:return: AST representation
"""
translation_unit = self._parse_file(filename, extra_arguments=arguments)
return self._traverse(translation_unit.cursor)
def _parse_file(self, filename, extra_arguments):
self._context = ParserContext(input_file=filename)
index = clang.cindex.Index.create()
arguments = ["-x", "c++", "-D__CODEGEN__"]
if extra_arguments is not None:
arguments += extra_arguments
options = clang.cindex.TranslationUnit.PARSE_SKIP_FUNCTION_BODIES
translation_unit = index.parse(filename, args=arguments, options=options)
return translation_unit
def _handle_recurse(self, cursor, path=None):
if path is None:
path = []
result = []
for child in helpers.get_children(cursor, self._context):
child_data = self._traverse(child, path + [cursor.spelling])
if child_data is not None:
if isinstance(child_data, list):
result += child_data
else:
result.append(child_data)
return result
def _traverse(self, cursor, qualified_path=None):
if cursor.kind in [CursorKind.TRANSLATION_UNIT,
CursorKind.NAMESPACE]:
return self._handle_recurse(cursor, qualified_path)
if cursor.kind == CursorKind.ENUM_DECL:
return enum_decl.visit(cursor, qualified_path, self._context)
if cursor.kind in [CursorKind.CLASS_DECL, CursorKind.STRUCT_DECL]:
return class_struct_decl.visit(cursor, qualified_path, self._context)
if cursor.kind in [CursorKind.FUNCTION_DECL]:
return function_decl.visit(cursor, qualified_path, self._context)
_LOGGER.warning("Unhandled %s", str(cursor.kind))
return None
| blejdfist/pycodegen | pycodegen/frontend/frontend_cpp/parser_libclang.py | parser_libclang.py | py | 3,854 | python | en | code | 4 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "glob.glob",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "collections.namedtuple",
"l... |
9815611814 | # author:Nicolo
# time:2017/7/23
# function:生成汉字字库并转换为图片
import codecs
import os
import pygame
start,end = (0x4E00, 0x9FA5)
with codecs.open("chinese.txt", "wb", encoding="utf-8") as f:
for codepoint in range(int(start),int(end)):
f.write(chr(codepoint))
chinese_dir = 'chinese'
if not os.path.exists(chinese_dir):
os.mkdir(chinese_dir)
pygame.init()
start,end = (0x4E00, 0x9FA5)#汉字编码范围
for codepoint in range(int(start),int(end)):
word = chr(codepoint)
font = pygame.font.Font("C:\Windows\Fonts\STZHONGS.TTF", 22)#当前目录下要有微软雅黑的字体文件msyh.ttc,或者去c:\Windows\Fonts目录下找
rtext = font.render(word, True, (0, 0, 0), (255, 255, 255))
pygame.image.save(rtext, os.path.join(chinese_dir,word+".png")) | X-Nicolo/ChineseToImg | wordToImg.py | wordToImg.py | py | 824 | python | en | code | 4 | github-code | 36 | [
{
"api_name": "codecs.open",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "os.mkdir",
"line_number": ... |
19354601917 | from __future__ import (absolute_import, print_function,
unicode_literals, division)
import time
import numpy as np
import pandas as pd
import requests
from bokeh import plotting
from bokeh.objects import ColumnDataSource
class QlogPlot:
def __init__(self, base, name, limit, ds):
self.name = name
self.var = requests.get("%s/variable/%s" % (base, name)).json()[name]
self.url = "%s/data/%s?limit=%i" % (base, name, limit)
ds.add([], "%s value" % name)
ds.add([], "%s time" % name)
self.update(ds)
self.plot(ds)
def plot(self, ds):
unit = self.var["unit"]
if self.var["logarithmic"]:
unit = unit + " (log10)"
plotting.line("%s time" % self.name, "%s value" % self.name,
x_axis_type="datetime", source=ds,
legend="%s (%s)" % (self.name, unit), title="")
plotting.circle("%s time" % self.name, "%s value" % self.name,
source=ds, size=2.,
legend="%s (%s)" % (self.name, unit), title="")
def update(self, ds):
df = pd.read_json(self.url)
y = np.array(df)
if self.var["logarithmic"]:
y = np.log10(y)
ds.data["%s value" % self.name] = y
ds.data["%s time" % self.name] = df.index
def simple_line_plot(base, names, limit, interval):
plotting.output_server("QLog")
plotting.hold()
plotting.figure()
ds = ColumnDataSource(data={})
plots = [QlogPlot(base, name, limit, ds) for name in names]
plotting.show()
while True:
time.sleep(interval)
for plot in plots:
plot.update(ds)
ds._dirty = True
plotting.session().store_obj(ds)
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("-b", "--base",
default="http://localhost:6881/1")
parser.add_argument("-l", "--limit", type=int, default=100)
parser.add_argument("-i", "--interval", type=float, default=5)
parser.add_argument("names", nargs="+")
args = parser.parse_args()
simple_line_plot(args.base, args.names, args.limit,
args.interval)
if __name__ == "__main__":
main()
| jordens/qlog | qlog/plot.py | plot.py | py | 2,229 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "requests.get",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "bokeh.plotting.line",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "bokeh.plotting",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "bokeh.plotting.circle... |
274283973 | import os
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
def boxplot(df, output_folder):
#simple version, only makes the 4 boxplots every dataset has in common
fig, (ax1, ax2, ax3, ax4) = plt.subplots(1, 4)
#fig.suptitle('Air Beam', fontsize=20)
dat = [df['Temperature'].dropna()]
ax1.boxplot(dat, labels = ['Temperature'], vert = True)
dat = [df['Humidity'].dropna()]
ax2.boxplot(dat, labels = ['Humidity'], vert = True)
dat = [df['PM2.5'].dropna()]
ax3.boxplot(dat, labels = ['PM 2.5'], vert = True)
dat = [df['PM10.0'].dropna()]
ax4.boxplot(dat, labels = ['PM 10.0'], vert = True)
fig.subplots_adjust(wspace=0.5)
outpath = os.path.join(output_folder, 'boxplot.png')
fig.savefig(outpath)
return outpath
def humidity_graph(df, output_folder):
plt.close()
_, axarr = plt.subplots(2, figsize=[10,8], sharex = True)
axarr[0].plot(df['Datetime'], df['PM2.5'], label='PM 2.5')
axarr[0].plot(df['Datetime'], df['PM10.0'], label='PM 10.0', linestyle="--")
axarr[0].legend()
axarr[0].set_title('Particulate Matter and Humidity')
axarr[1].plot(df['Datetime'], df['Humidity'], label='Humidity (percent)')
axarr[1].legend()
fn = 'humidity_graph.png'
outpath = os.path.join(output_folder, fn)
plt.savefig(outpath, dpi='figure')
return outpath
def threshold_PM25(df, output_folder):
PM25_ANNUAL_PRIMARY_WHO = 10
PM25_ANNUAL_PRIMARY_NAAQS = 12
PM25_ANNUAL_SECONDARY_NAAQS = 15
PM25_24HR_WHO = 25
PM25_24HR_NAAQS = 35
_, axarr = plt.subplots(1, figsize=[10,8], sharex = True)
axarr.plot(df['Datetime'], df['PM2.5'], label='PM 2.5')
axarr.hlines(PM25_ANNUAL_PRIMARY_WHO, df['Datetime'][0], df['Datetime'].tail(1), color='#800080', linestyles='--', label='WHO Annual Primary')
axarr.text(df['Datetime'].tail(1), PM25_ANNUAL_PRIMARY_WHO + 0.2,'WHO AP')
axarr.hlines(PM25_ANNUAL_PRIMARY_NAAQS, df['Datetime'][0], df['Datetime'].tail(1), color='#006400', linestyles='-.', label='NAAQS Annual Primary')
axarr.text(df['Datetime'].tail(1), PM25_ANNUAL_PRIMARY_NAAQS + 0.2,'NAAQS AP')
axarr.hlines(PM25_ANNUAL_SECONDARY_NAAQS, df['Datetime'][0], df['Datetime'].tail(1), color='#FFBE42', linestyles='--', label='NAAQS Annual Secondary')
axarr.text(df['Datetime'].tail(1), PM25_ANNUAL_SECONDARY_NAAQS + 0.2,'NAAQS AS')
axarr.hlines(PM25_24HR_WHO, df['Datetime'][0], df['Datetime'].tail(1), color='#EE7600', linestyles='-.', label='WHO 24 Hour')
axarr.text(df['Datetime'].tail(1), PM25_24HR_WHO + 0.2,'WHO 24 Hr')
axarr.hlines(PM25_24HR_NAAQS, df['Datetime'][0], df['Datetime'].tail(1), color='r', linestyles='--', label='NAAQS 24 Hour')
axarr.text(df['Datetime'].tail(1), PM25_24HR_NAAQS + 0.2,'NAAQS 24 Hr')
axarr.legend()
axarr.set_title('Particulate Matter 2.5')
fn = 'pm25_graph.png'
outpath = os.path.join(output_folder, fn)
plt.savefig(outpath, dpi='figure')
return outpath
def threshold_PM10(df, output_folder):
PM10_24HR_WHO = 50
PM10_ANNUAL_PRIMARY_WHO = 20
PM10_24HR_NAAQS = 150
_, axarr = plt.subplots(1, figsize=[10,8], sharex = True)
axarr.plot(df['Datetime'], df['PM10.0'], label='PM 10.0')
axarr.hlines(PM10_ANNUAL_PRIMARY_WHO, df['Datetime'][0], df['Datetime'].tail(1), color='#006400', linestyles='--', label='WHO Annual Primary')
axarr.text(df['Datetime'].tail(1), PM10_ANNUAL_PRIMARY_WHO + 0.5,'WHO AP')
axarr.hlines(PM10_24HR_WHO, df['Datetime'][0], df['Datetime'].tail(1), color='#EE7600', linestyles='-.', label='WHO 24 Hour')
axarr.text(df['Datetime'].tail(1), PM10_24HR_WHO + 0.5,'WHO 24 Hr')
axarr.hlines(PM10_24HR_NAAQS, df['Datetime'][0], df['Datetime'].tail(1), color='r', linestyles='--', label='NAAQS 24 Hour')
axarr.text(df['Datetime'].tail(1), PM10_24HR_NAAQS + 0.5,'NAAQS 24 Hr')
axarr.legend()
axarr.set_title('Particulate Matter 10.0')
fn = 'pm10_graph.png'
outpath = os.path.join(output_folder, fn)
plt.savefig(outpath, dpi='figure')
return outpath
| bglowniak/Air-Quality-Analysis | src/main/python/vis_utils.py | vis_utils.py | py | 4,065 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "matplotlib.use",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.subplots",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "os.path.join... |
73156319784 | from flask import Flask,render_template,request,make_response
app=Flask(__name__)
@app.route('/')
def input():
return render_template('page2.html')
@app.route('/page3',methods=['POST','GET'])
def page3():
a=request.form.get('nos1',type=int)
b=request.form.get('nos2',type=int)
result=a+b
resp=make_response(render_template('page3.html',result=result))
resp.set_cookie('result',str(result))
return resp
@app.route('/page4',methods=['POST','GET'])
def page4():
c = request.form.get("nos3", type=int)
result=int(request.cookies.get('result'))
final_output=c+result
return render_template('page4.html',result_1=str(final_output))
if __name__=='__main__':
app.run(debug=True) | adityatyagi1998/Flask-Calci | calculator.py | calculator.py | py | 722 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "flask.Flask",
"line_number": 2,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "flask.request.form.get",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "flask.request.... |
74032441703 | import os
import json
import time
import numpy as np
from .FCNN_FA import FCNN_FA
class FCNN_KP(FCNN_FA):
'''
Description: Class to define a Fully Connected Neural Network (FCNN)
with the Kolen-Pollack (KP) algorithm as learning algorithm
'''
def __init__(self, sizes, save_dir):
'''
Description: initialize the biases, forward weights and backward weights using
a Gaussian distribution with mean 0, and variance 1.
Params:
- sizes: a list of size L; where L is the number of layers
in the deep neural network and each element of list contains
the number of neuron in that layer.
first and last elements of the list corresponds to the input
layer and output layer respectively
intermediate layers are hidden layers.
- save_dir: the directory where all the data of experiment will be saved
'''
super(FCNN_KP, self).__init__(sizes, save_dir)
def train(self, X_train, y_train, X_test, y_test, batch_size, learning_rate, epochs, test_frequency, weight_decay=1):
'''
Description: Batch-wise trains image features against corresponding labels.
The forward and backward weights and biases of the neural network are updated through
the Kolen-Pollack algorithm on batches using SGD
del_b and del_w are of same size as all the forward weights and biases
of all the layers. del_b and del_w contains the gradients which
are used to update forward weights and biases
Params:
- X_train, y_train: lists of training features and corresponding labels
- X_test, y_test: lists of testing features and corresponding labels
- batch_size: size of the batch
- learning_rate: eta which controls the size of changes in weights & biases
- epochs: no. of times to iterate over the whole data
- test_frequency: the frequency of the evaluation on the test data
'''
n_batches = int(X_train.shape[0] / batch_size)
for j in range(epochs):
# initialize the epoch field in the data to store
self.data['epoch_{}'.format(j)] = {}
start = time.time()
epoch_loss = []
batch_iter = self.get_batch(X_train, y_train, batch_size)
for i in range(n_batches):
(batch_X, batch_y) = next(batch_iter)
batch_loss, delta_del_b, delta_del_w = self.backpropagate(batch_X, batch_y)
epoch_loss.append(batch_loss)
del_b = delta_del_b
del_w = delta_del_w
# update weight and biases
self.weights = [weight_decay * w - (learning_rate / batch_size)
* delw for w, delw in zip(self.weights, del_w)]
self.biases = [b - (learning_rate / batch_size)
* delb for b, delb in zip(self.biases, del_b)]
# Update the backward matrices of the Kolen-Pollack algorithm
# It is worth noticing that updating the backward weight matrices B with the same
# delw term as the forward matrices W is equivalent to the update equations 16 and 17
# of the paper manuscript
self.backward_weights = [weight_decay * w - (learning_rate / batch_size)
* delw.T for w, delw in zip(self.backward_weights, del_w)]
epoch_loss = np.mean(epoch_loss)
self.data['epoch_{}'.format(j)]['loss'] = epoch_loss
log_str = "\nEpoch {} completed in {:.3f}s, loss: {:.3f}".format(j, time.time() - start, epoch_loss)
self.print_and_log(log_str)
# Evaluate on test set
test_accuracy = self.eval(X_test, y_test)
log_str = "Test accuracy: {}%".format(test_accuracy)
self.print_and_log(log_str)
self.data['epoch_{}'.format(j)]['test_accuracy'] = test_accuracy
# Compute angles between both weights and deltas
deltas_angles, weights_angles = self.evaluate_angles(X_train, y_train)
self.data['epoch_{}'.format(j)]['delta_angles'] = deltas_angles
self.data['epoch_{}'.format(j)]['weight_angles'] = weights_angles
# save results as a json file
with open(os.path.join(self.save_dir, 'results.json'), 'w') as f:
json.dump(self.data, f)
| makrout/Deep-Learning-without-Weight-Transport | fcnn/FCNN_KP.py | FCNN_KP.py | py | 4,665 | python | en | code | 31 | github-code | 36 | [
{
"api_name": "FCNN_FA.FCNN_FA",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "time.time",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "numpy.mean",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 77... |
25743739281 | __all__ = [
"EBCOTCodec"
]
from copy import deepcopy
from multiprocessing import Pool
import numpy as np
from fpeg.base import Codec
from fpeg.config import read_config
from fpeg.funcs import parse_marker, cat_arrays_2d
config = read_config()
D = config.get("jpeg2000", "D")
G = config.get("jpeg2000", "G")
QCD = config.get("jpeg2000", "QCD")
mq_table = config.get("jpeg2000", "mq_table")
min_task_number = config.get("accelerate", "codec_min_task_number")
max_pool_size = config.get("accelerate", "codec_max_pool_size")
class EBCOTCodec(Codec):
"""
EBCOT codec
"""
def __init__(self,
name="EBCOT codec",
mode="encode",
D=D,
G=G,
QCD=QCD,
accelerated=False
):
"""
Init and set attributes of a canonical EBCOT codec.
Explicit Attributes
-------------------
name: str, optional
Name of the codec.
mode: str, optional
Mode of the codec, must in ["encode", "decode"].
G: integer, optional
a parameter for calculate Kmax
D: integer, optional
Depth of graphic.
epsilon_b:integer, must
a parameter for calculate Kmax
accelerated: bool, optional
Whether the process would be accelerated by subprocess pool.
"""
super().__init__()
self.name = name
self.mode = mode
# based on equation 10.22 in jpeg2000
self.D = D
self.G = G
self.QCD = QCD
self.accelerated = accelerated
self.epsilon_b, _ = parse_marker(self.QCD)
self.Kmax = max(0, self.G + self.epsilon_b - 1)
self.min_task_number = min_task_number
self.max_pool_size = max_pool_size
def encode(self, X, **params):
self.logs[-1] += self.formatter.message("Trying to encode received data.")
try:
self.epsilon_b = params["epsilon_b"]
except KeyError:
pass
if self.accelerated:
self.logs[-1] += self.formatter.message("Using multiprocess pool to accelerate EBCOT encoding.")
inputs = [[x, self.D] for x in X]
with Pool(min(self.task_number, self.max_pool_size)) as p:
bitcodes = p.starmap(_EBCOT_encode, inputs)
else:
bitcodes = [_EBCOT_encode(x, self.D) for x in X]
return bitcodes
def decode(self, bitcodes, **params):
self.logs[-1] += self.formatter.message("Trying to decode received data.")
if self.accelerated:
inputs = [[bitcode, self.D] for bitcode in bitcodes]
with Pool(min(self.task_number, self.max_pool_size)) as p:
X = p.starmap(_tile_decode, inputs)
else:
X = [_tile_decode(bitcode, self.D) for bitcode in bitcodes]
# print(X[0])
return X
def _EBCOT_encode(tile, D):
"""
EBCOT encode and decode part
encode part:
| _EBCOT_encode
| _tile_encode
| _band_encode
| _embeddedBlockEncoder _MQencode
| 三个通道过程
| _EBCOT_decode
| _tile_decode
| _band_decode
| _block_decode
| three decode passes and MQdecode
|signdecode and runlengthdecode
"""
bitcode = list(_tile_encode(tile, D))
# with open('test.bin', 'wb') as f:
# f.write(struct.pack(str(l)+'i', *bitcode))
# streamonly = [int(i) for i in streamonly]
# l = len(streamonly)
# with open('streamonly.bin', 'wb') as f:
# f.write(struct.pack(str(l)+'i', *streamonly))
return bitcode
def _MQencode(CX, D):
"""
MQ encode and decode part
|MQencode
|transferbyte
|putbyte
|MQdecode
|fill_lsp
"""
PETTable, CXTable = deepcopy(mq_table)
encoder = EBCOTparam()
for i in range(len(D)):
symbol = D[i][0]
cxLabel = CX[i][0]
expectedSymbol = CXTable[cxLabel][1]
p = PETTable[CXTable[cxLabel][0]][3] # PETTable [CXTable[cxLabel][0]]---[3]
encoder.A = encoder.A - p
if encoder.A < p:
# Conditional exchange of MPS and LPS
expectedSymbol = 1 - expectedSymbol
if symbol == expectedSymbol:
# assign MPS the upper sub-interval
encoder.C = encoder.C + np.uint32(p)
else:
# assign LPS the lower sub-interval
encoder.A = np.uint32(p)
if encoder.A < 32768:
if symbol == CXTable[cxLabel][1]:
CXTable[cxLabel][0] = PETTable[CXTable[cxLabel][0]][0]
else:
CXTable[cxLabel][1] = CXTable[cxLabel][1] ^ PETTable[CXTable[cxLabel][0]][2]
CXTable[cxLabel][0] = PETTable[CXTable[cxLabel][0]][1]
while encoder.A < 32768:
encoder.A = 2 * encoder.A
encoder.C = 2 * encoder.C
encoder.t = encoder.t - 1
if encoder.t == 0:
encoder = _transferbyte(encoder)
encoder = _encode_end(encoder)
return encoder
def _transferbyte(encoder):
CPartialMask = np.uint32(133693440) # 00000111111110000000000000000000
CPartialCmp = np.uint32(4161273855) # 11111000000001111111111111111111
CMsbsMask = np.uint32(267386880) # 27-20msbs标志位 00001111111100000000000000000000
CMsbsCmp = np.uint32(4027580415) # CMsbs的补码 11110000000011111111111111111111
CCarryMask = np.uint32(2 ** 27) # 取进位 00001000000000000000000000000000
if encoder.T == 255:
# 不能将任何进位传给T,需要位填充
encoder = _putbyte(encoder)
encoder.T = np.uint8((encoder.C & CMsbsMask) >> 20) # 27-20位
encoder.C = encoder.C & CMsbsCmp #
encoder.t = 7
else:
# 从C将任何进位传到T
encoder.T = encoder.T + np.uint8((encoder.C & CCarryMask) >> 27)
encoder.C = encoder.C ^ CCarryMask
encoder = _putbyte(encoder)
if encoder.T == 255:
encoder.T = np.uint8((encoder.C & CMsbsMask) >> 20)
encoder.C = encoder.C & CMsbsCmp
encoder.t = 7
else:
encoder.T = np.uint8((encoder.C & CPartialMask) >> 19)
encoder.C = encoder.C & CPartialCmp
encoder.t = 8
return encoder
def _putbyte(encoder):
# 将T中的内容写入字节缓存
if encoder.L >= 0:
encoder.stream = np.append(encoder.stream, encoder.T)
encoder.L = encoder.L + 1
return encoder
def _MQ_decode(stream, CX):
PETTable, CXTable = deepcopy(mq_table)
# MQ decode initializtion
encoder = EBCOTparam()
encoder.A = np.uint16(0)
encoder.C = np.uint32(0)
encoder.t = np.uint8(0)
encoder.T = np.uint8(0)
encoder.L = np.int32(0)
encoder.stream = stream
encoder = _fill_lsb(encoder)
encoder.C = encoder.C << encoder.t
encoder = _fill_lsb(encoder)
encoder.C = encoder.C << 7
encoder.t = encoder.t - 7
encoder.A = np.uint16(2 ** 15)
# MQ decode procedure
CActiveMask = np.uint32(16776960) # 00000000111111111111111100000000
CActiveCmp = np.uint32(4278190335) # 11111111000000000000000011111111
decodeD = []
for i in range(len(CX)):
cxLabel = CX[i][0]
expectedSymbol = CXTable[cxLabel][1]
p = PETTable[CXTable[cxLabel][0]][3]
encoder.A = encoder.A - np.uint16(p)
if encoder.A < np.uint16(p):
expectedSymbol = 1 - expectedSymbol
if ((encoder.C & CActiveMask) >> 8) < p:
symbol = 1 - expectedSymbol
encoder.A = np.uint16(p)
else:
symbol = expectedSymbol
temp = ((encoder.C & CActiveMask) >> 8) - np.uint32(p)
encoder.C = encoder.C & CActiveCmp
encoder.C = encoder.C + np.uint32((np.uint32(temp << 8)) & CActiveMask)
if encoder.A < 2 ** 15:
if symbol == CXTable[cxLabel][1]:
CXTable[cxLabel][0] = PETTable[CXTable[cxLabel][0]][0]
else:
CXTable[cxLabel][1] = CXTable[cxLabel][1] ^ PETTable[CXTable[cxLabel][0]][2]
CXTable[cxLabel][0] = PETTable[CXTable[cxLabel][0]][1]
while encoder.A < 2 ** 15:
if encoder.t == 0:
encoder = _fill_lsb(encoder)
encoder.A = 2 * encoder.A
encoder.C = 2 * encoder.C
encoder.t = encoder.t - 1
# print(i, symbol)
decodeD.append([symbol])
return decodeD
def _fill_lsb(encoder):
encoder.t = 8
if encoder.L == len(encoder.stream) or \
(encoder.T == 255 and encoder.stream[encoder.L] > 143):
encoder.C = encoder.C + 255
else:
if encoder.T == 255:
encoder.t = 7
encoder.T = encoder.stream[encoder.L]
encoder.L = encoder.L + 1
encoder.C = encoder.C + np.uint32((encoder.T) << (8 - encoder.t))
return encoder
def _tile_encode(tile, D, h=64, w=64):
_depthOfDwt = D
tile_cA = tile[0][:, :, 0]
newBit, newStream = _band_encode(tile_cA, 'LL', h, w)
bitcode = newBit
streamOnly = newStream
for i in range(1, _depthOfDwt+1):
newBit, newStream = _band_encode(tile[i][0][:, :, 0], 'LH', h, w)
bitcode = np.hstack((bitcode, newBit))
streamOnly = np.hstack((streamOnly, newStream))
newBit, newStream = _band_encode(tile[i][1][:, :, 0], 'HL', h, w)
bitcode = np.hstack((bitcode, newBit))
streamOnly = np.hstack((streamOnly, newStream))
newBit, newStream = _band_encode(tile[i][2][:, :, 0], 'HH', h, w)
bitcode = np.hstack((bitcode, newBit))
streamOnly = np.hstack((streamOnly, newStream))
tile_cA = tile[0][:, :, 1]
newBit, newStream = _band_encode(tile_cA, 'LL', h, w)
bitcode = np.hstack((bitcode, newBit))
streamOnly = np.hstack((streamOnly, newStream))
for i in range(1, _depthOfDwt+1):
newBit, newStream = _band_encode(tile[i][0][:, :, 1], 'LH', h, w)
bitcode = np.hstack((bitcode, newBit))
streamOnly = np.hstack((streamOnly, newStream))
newBit, newStream = _band_encode(tile[i][1][:, :, 1], 'HL', h, w)
bitcode = np.hstack((bitcode, newBit))
streamOnly = np.hstack((streamOnly, newStream))
newBit, newStream = _band_encode(tile[i][2][:, :, 1], 'HH', h, w)
bitcode = np.hstack((bitcode, newBit))
streamOnly = np.hstack((streamOnly, newStream))
tile_cA = tile[0][:, :, 2]
newBit, newStream = _band_encode(tile_cA, 'LL', h, w)
bitcode = np.hstack((bitcode, newBit))
streamOnly = np.hstack((streamOnly, newStream))
for i in range(1, _depthOfDwt+1):
newBit, newStream = _band_encode(tile[i][0][:, :, 2], 'LH', h, w)
bitcode = np.hstack((bitcode, newBit))
streamOnly = np.hstack((streamOnly, newStream))
newBit, newStream = _band_encode(tile[i][1][:, :, 2], 'HL', h, w)
bitcode = np.hstack((bitcode, newBit))
streamOnly = np.hstack((streamOnly, newStream))
newBit, newStream = _band_encode(tile[i][2][:, :, 2], 'HH', h, w)
bitcode = np.hstack((bitcode, newBit))
streamOnly = np.hstack((streamOnly, newStream))
bitcode = np.hstack((bitcode, [2051]))
return bitcode
def _band_encode(tile, bandMark, h=64, w=64, num=8):
# 码流:[h, w, CX1, 2048, stream1, 2048, ..., CXn, streamn, 2048, 2049,CXn+1, streamn+1, 2048, ...,2050]
h_cA, w_cA = np.shape(tile)
h_left_over = h_cA % h
w_left_over = w_cA % w
cA_extend = np.pad(tile, ((0, h - h_left_over), (0, w - w_left_over)), 'constant')
bitcode = [h_cA, w_cA]
streamOnly = []
for i in range(0, h_cA, h):
for j in range(0, w_cA, w):
codeBlock = cA_extend[i:i + h, j:j + w]
CX, D, bitplanelength= _embeddedBlockEncoder(codeBlock, bandMark, h, w, num)
encoder = _MQencode(CX, D)
bitcode = np.hstack((bitcode, CX.flatten(), [2048], encoder.stream, [2048], bitplanelength,[2048]))
streamOnly = np.hstack((streamOnly, encoder.stream))
bitcode = np.hstack((bitcode, [2049]))
bitcode = np.hstack((bitcode, [2050]))
return (bitcode, streamOnly)
def _embeddedBlockEncoder(codeBlock, bandMark, h=64, w=64, num=8):
S1 = np.zeros((h, w))
S2 = np.zeros((h, w))
S3 = np.zeros((h, w))
MaxInCodeBlock = len(bin(int(np.max(abs(codeBlock)))))-2
signs = (- np.sign(codeBlock) + 1) // 2 # positive: 0, negative: 1
bitPlane = np.zeros((h,w,MaxInCodeBlock),dtype=np.uint8)
for i in range(h):
for j in range(w):
# number = bin(np.abs(codeBlock[i][j], dtype=np.int64))[2:]
number = bin(np.abs(codeBlock[i][j]))[2:]
temp = [0] * (MaxInCodeBlock-len(number)) + [int(num) for num in number]
bitPlane[i][j] = np.array(temp)
bitPlane = np.transpose(bitPlane, (2, 0, 1))
# For Test
"""
signs = np.zeros((8,8))
bitPlane = np.zeros((2,8,8))
bitPlane[0][1][1] = 1
bitPlane[0][4][4] = 1
bitPlane[1][0][2] = 1
bitPlane[1][1] = np.array([0,1,0,0,1,1,0,0])
bitPlane[1][2][2] = 1
bitPlane[1][3][3] = 1
bitPlane[1][4][5] = 1
bitPlane[1][5] = np.array([0,0,0,0,1,1,0,1])
bitPlane[1][6][6] = 1
"""
sizeofCXandD = h*w*MaxInCodeBlock *5
CX = np.zeros((sizeofCXandD, 1), dtype=np.uint32)
D = np.zeros((sizeofCXandD, 1), dtype=np.uint32)
pointer = 0
for i in range(MaxInCodeBlock ):
######
# three function need rename
D, CX, S1, S3, pointer = _SignifiancePropagationPass(D, CX, S1, S3, pointer, bitPlane[i], bandMark, signs, w, h)
D, CX, S2, pointer = _MagnitudeRefinementPass(D, CX, S1, S2, S3, pointer, bitPlane[i], w, h)
D, CX, pointer, S1 = _CLeanUpPass(D, CX, S1, S3, pointer, bitPlane[i], bandMark, signs, w, h)
S3 = np.zeros((h, w))
CX_final = CX[0:pointer]
D_final = D[0:pointer]
return CX_final, D_final, MaxInCodeBlock
# three encode pass start here
# in the sequence of significancePass,magnitudepass,_cleanuppass.
def _SignifiancePropagationPass(D, CX, S1, S3, pointer, plane, bandMark, signs, w=64, h=64):
# input S1: list of significance, size 64*64
# input CX: the list of context
# plane: the value of bits at this plane
# bandMark: LL, HL, HH, or LH
# pointer: the pointer of the CX
# S3: denote that the element has been coded
# output: D, CX, S1, S3, pointer
S1extend = np.pad(S1, ((1, 1), (1, 1)), 'constant')
rounds = h // 4
for i in range(rounds):
for col in range(w):
for ii in range(4):
row = 4 * i + ii
if S1[row][col] != 0:
continue # is significant
temp = S1extend[row][col] + S1extend[row + 1][col] + S1extend[row + 2][col] + S1extend[row][col + 1] + \
S1extend[row + 2][col + 1] + S1extend[row][col + 2] + S1extend[row + 1][col + 2] + S1extend[row + 2][
col + 2]
if temp == 0:
continue # is insignificant
tempCx = _ZeroCoding(S1extend[row:row + 3, col:col + 3], bandMark)
D[pointer][0] = plane[row][col]
CX[pointer][0] = tempCx
pointer = pointer + 1
S3[row][col] = 1 # mark that plane[row][col] has been coded
if plane[row][col] == 1: # _signcoding
signComp, tempCx = _SignCoding(S1extend[row:row + 3, col:col + 3], signs[row][col])
D[pointer][0] = signComp
CX[pointer][0] = tempCx
pointer = pointer + 1
S1[row][col] = 1 # mark as significant
S1extend = np.pad(S1, ((1, 1), (1, 1)), 'constant')
return D, CX, S1, S3, pointer
def _MagnitudeRefinementPass(D, CX, S1, S2, S3, pointer, plane, w=64, h=64):
S1extend = np.pad(S1, ((1, 1), (1, 1)), 'constant')
rounds = h // 4
for i in range(rounds):
for col in range(w):
for ii in range(4):
row = 4 * i + ii
if S1[row][col] != 1 or S3[row][col] != 0:
continue
tempCx = _MagnitudeRefinementCoding(S1extend[row:row + 3, col:col + 3], S2[row][col])
S2[row][col] = 1 # Mark that the element has been refined
D[pointer][0] = plane[row][col]
CX[pointer][0] = tempCx
pointer = pointer + 1
return D, CX, S2, pointer
def _CLeanUpPass(D, CX, S1, S3, pointer, plane, bandMark, signs, w=64, h=64):
S1extend = np.pad(S1, ((1, 1), (1, 1)), 'constant')
rounds = h // 4
for i in range(rounds):
for col in range(w):
ii = 0
row = 4 * i
tempSum = np.sum(S1extend[row:row + 6, col:col + 3]) + np.sum(S3[row:row + 4, col])
# 整一列未被编码,都为非重要,且领域非重要
if tempSum == 0:
ii, tempD, tempCx = _RunLengthCoding(plane[row:row + 4, col])
if len(tempD) == 1:
D[pointer] = tempD
CX[pointer] = tempCx
pointer = pointer + 1
else:
D[pointer], D[pointer + 1], D[pointer + 2] = tempD[0], tempD[1], tempD[2]
CX[pointer], CX[pointer + 1], CX[pointer + 2] = tempCx[0], tempCx[1], tempCx[2]
pointer = pointer + 3
# sign coding
row = i * 4 + ii - 1
signComp, tempCx = _SignCoding(S1extend[row:row + 3, col:col + 3], signs[row][col])
D[pointer] = signComp
CX[pointer] = tempCx
pointer = pointer + 1
S1[row][col] = 1
S1extend = np.pad(S1, ((1, 1), (1, 1)), 'constant')
while ii < 4:
row = i * 4 + ii
ii = ii + 1
if S1[row][col] != 0 or S3[row][col] != 0:
continue
tempCx = _ZeroCoding(S1extend[row:row + 3, col:col + 3], bandMark)
D[pointer] = plane[row][col]
CX[pointer] = tempCx
pointer = pointer + 1
if plane[row][col] == 1: # _signcoding
signComp, tempCx = _SignCoding(S1extend[row:row + 3, col:col + 3], signs[row][col])
D[pointer][0] = signComp
CX[pointer][0] = tempCx
pointer = pointer + 1
S1[row][col] = 1 # mark as significant
S1extend = np.pad(S1, ((1, 1), (1, 1)), 'constant')
return D, CX, pointer, S1
# here is some function used by three passes
def _SignCoding(neighbourS1, sign):
# input neighbourS1: size 3*3, matrix of significance
# input sign
# output: signComp,(equal: 0, not equal: 1) context
if len(neighbourS1) == 3 and len(neighbourS1[0]) == 3:
hstr = str(int(neighbourS1[1][0])) + str(int(neighbourS1[1][2]))
vstr = str(int(neighbourS1[0][1])) + str(int(neighbourS1[2][1]))
dict = {
'00': 0, '1-1': 0, '-11': 0, '01': 1, '10': 1, '11': 1,
'0-1': -1, '-10': -1, '-1-1': -1
}
h = dict[hstr]
v = dict[vstr]
hAndv = str(h) + str(v)
hv2Sign = {
'11': 0, '10': 0, '1-1': 0, '01': 0, '00': 0,
'0-1': 1, '-11': 1, '-10': 1, '-1-1': 1
}
hv2Context = {
'11': 13, '10': 12, '1-1': 11, '01': 10, '00': 9,
'0-1': 10, '-11': 11, '-10': 12, '-1-1': 13
}
signPredict = hv2Sign[hAndv]
context = hv2Context[hAndv]
signComp = int(sign) ^ signPredict
else:
# self.logs[-1] += self.formatter.warning("_SignCoding: Size of neighbourS1 not valid")
signComp = -1
context = -1
"""
try:
raise ValidationError('_SignCoding: Size of neighbourS1 not valid')
except ValidationError as e:
print(e.args)
signComp = -1
context = -1
"""
return signComp, context
def _ZeroCoding(neighbourS1, bandMark):
# input neighbourS1: size 3*3, matrix of significance
# input s2: whether it is the first time for Magnitude Refinement Coding
# output: context
if len(neighbourS1) == 3 and len(neighbourS1[0]) == 3:
h = neighbourS1[1][0] + neighbourS1[1][2]
v = neighbourS1[0][1] + neighbourS1[2][1]
d = neighbourS1[0][0] + neighbourS1[0][2] + neighbourS1[2][0] + neighbourS1[2][2]
if bandMark == 'LL' or bandMark == 'LH':
if h == 2:
cx = 8
elif h == 1 and v >= 1:
cx = 7
elif h == 1 and v == 0 and d >= 1:
cx = 6
elif h == 1 and v == 0 and d == 0:
cx = 5
elif h == 0 and v == 2:
cx = 4
elif h == 0 and v == 1:
cx = 3
elif h == 0 and v == 0 and d >= 2:
cx = 2
elif h == 0 and v == 0 and d == 1:
cx = 1
else:
cx = 0
elif bandMark == 'HL':
if v == 2:
cx = 8
elif v == 1 and h >= 1:
cx = 7
elif v == 1 and h == 0 and d >= 1:
cx = 6
elif v == 1 and h == 0 and d == 0:
cx = 5
elif v == 0 and h == 2:
cx = 4
elif v == 0 and h == 1:
cx = 3
elif v == 0 and h == 0 and d >= 2:
cx = 2
elif v == 0 and h == 0 and d == 1:
cx = 1
else:
cx = 0
elif bandMark == 'HH':
hPlusv = h + v
if d >= 3:
cx = 8
elif d == 2 and hPlusv >= 1:
cx = 7
elif d == 2 and hPlusv == 0:
cx = 6
elif d == 1 and hPlusv >= 2:
cx = 5
elif d == 1 and hPlusv == 1:
cx = 4
elif d == 1 and hPlusv == 0:
cx = 3
elif d == 0 and hPlusv >= 2:
cx = 2
elif d == 0 and hPlusv == 1:
cx = 1
else:
cx = 0
else:
# self.logs[-1] += self.formatter.warning('_ZeroCoding: bandMark not valid')
cx = -1
"""
try:
raise ValidationError('_ZeroCoding: bandMark not valid')
except ValidationError as e:
print(e.args)
cx = -1
"""
else:
# self.logs[-1] += self.formatter.warning('_ZeroCoding: Size of neighbourS1 not valid')
cx = -1
"""
try:
raise ValidationError('_ZeroCoding: Size of neighbourS1 not valid')
except ValidationError as e:
print(e.args)
cx = -1
"""
return cx
def _RunLengthCoding(listS1):
# input listS1: size 1*4, list of significance
# output n: number of elements encoded
# output d: 0 means the _RunLengthCoding does not end.
# [1, x, x] means the _RunLengthCoding ends and the position is indicated.
# output cx: context
if listS1.__len__() == 4:
if listS1[0] == 0 and listS1[1] == 0 and listS1[2] == 0 and listS1[3] == 0:
n = 4
d = [0]
cx = [17]
elif listS1[0] == 1:
n = 1
d = [1, 0, 0]
cx = [17, 18, 18]
elif listS1[0] == 0 and listS1[1] == 1:
n = 2
d = [1, 0, 1]
cx = [17, 18, 18]
elif listS1[0] == 0 and listS1[1] == 0 and listS1[2] == 1:
n = 3
d = [1, 1, 0]
cx = [17, 18, 18]
elif listS1[0] == 0 and listS1[1] == 0 and listS1[2] == 0 and listS1[3] == 1:
n = 4
d = [1, 1, 1]
cx = [17, 18, 18]
else:
# self.logs[-1] += self.formatter.warning('_RunLengthCoding: listS1 not valid')
n, d, cx = 0, -1, -1
"""
try:
raise ValidationError('_RunLengthCoding: listS1 not valid')
except ValidationError as e:
print(e.args)
n, d, cx = 0, -1, -1
"""
else:
# self.logs[-1] += self.formatter.warning('_RunLengthCoding: length of listS1 not valid')
n, d, cx = 0, -1, -1
"""
try:
raise ValidationError('_RunLengthCoding: length of listS1 not valid')
except ValidationError as e:
print(e.args)
n, d, cx = 0, -1, -1
"""
return n, d, cx
def _MagnitudeRefinementCoding(neighbourS1, s2):
# input neighbourS1: size 3*3, matrix of significance
# input s2: whether it is the first time for Magnitude Refinement Coding
# output: context
if len(neighbourS1) == 3 and len(neighbourS1[0]) == 3:
temp = np.sum(neighbourS1) - neighbourS1[1][1]
if s2 == 1:
cx = 16
elif s2 == 0 and temp >= 1:
cx = 15
else:
cx = 14
else:
# self.logs[-1] += self.formatter.warning('_MagnitudeRefinementCoding: Size of neighbourS1 not valid')
cx = -1
"""
try:
raise ValidationError('_MagnitudeRefinementCoding: Size of neighbourS1 not valid')
except ValidationError as e:
print(e.args)
cx = -1
"""
return cx
def _encode_end(encoder):
nbits = 27 - 15 - encoder.t
encoder.C = encoder.C * np.uint32(2 ** encoder.t)
while nbits > 0:
encoder = _transferbyte(encoder)
nbits = nbits - encoder.t
encoder.C = encoder.C * np.uint32(2 ** encoder.t)
encoder = _transferbyte(encoder)
return encoder
def _tile_decode(codestream, D):
_depthOfDWT = D
temp = []
for i in range(0, 9 * _depthOfDWT + 3):
_index = codestream.index(2050)
deStream = codestream[0:_index + 1]
temp.append(_band_decode(deStream))
codestream = codestream[_index + 1:]
start1 = _depthOfDWT*3+1
start2 = _depthOfDWT*6+2
tile = [cat_arrays_2d([temp[0],
temp[start1],
temp[start2]])]
for i in range(_depthOfDWT):
tile.append((cat_arrays_2d([temp[3 * i + 1],
temp[3 * i + start1+1],
temp[3 * i + start2+1]]),
cat_arrays_2d([temp[3 * i + 2],
temp[3 * i + start1+2],
temp[3 * i + start2+2]]),
cat_arrays_2d([temp[3 * i + 3],
temp[3 * i + start1+3],
temp[3 * i + start2+3]])))
return tile
def _band_decode(codestream, h=64, w=64, num=32):
h_cA = codestream[0]
w_cA = codestream[1]
codestream = codestream[2:]
h_num = h_cA // h + 1
w_num = w_cA // w + 1
band_extend = np.zeros((h_num * h, w_num * w))
for i in range(0, h_num):
for j in range(0, w_num):
_index = codestream.index(2048)
deCX = codestream[0:_index]
deCX = np.resize(deCX, (_index + 1, 1))
codestream = codestream[_index + 1:]
_index = codestream.index(2048)
deStream = codestream[0:_index]
codestream = codestream[_index + 1:]
num = codestream[0]
codestream = codestream[2:]
decodeD = _MQ_decode(deStream, deCX)
band_extend[i * h:(i + 1) * h, j * w:(j + 1) * w] = _decode_block(decodeD, deCX, h, w, num)
if codestream[0] != 2049:
print("Error!")
codestream = codestream[1:]
if codestream[0] != 2050:
print("Error!")
print("finish one band")
return band_extend[0:h_cA, 0:w_cA]
def _decode_block(D, CX, h=64, w=64, num=32):
deS1 = np.uint32(np.zeros((h, w)))
deS2 = np.uint32(np.zeros((h, w)))
deS3 = np.uint32(np.zeros((h, w)))
signs = np.uint32(np.zeros((h, w)))
V = np.uint32(np.zeros((num, h, w)))
deCode = np.zeros((h, w))
pointer = 0
for i in range(num):
V[i, :, :], signs, deS1, deS3, pointer = _SignificancePassDecoding(V[i, :, :], D, CX, deS1, deS3, pointer, signs, w,
h)
V[i, :, :], deS2, pointer = _MagnitudePassDecoding(V[i, :, :], D, deS1, deS2, deS3, pointer, w, h)
V[i, :, :], deS1, deS3, signs, pointer = _CleanPassDecoding(V[i, :, :], D, CX, deS1, deS3, pointer, signs, w, h)
deS3 = np.zeros((h, w))
V = np.transpose(V, (1, 2, 0))
tempV = np.zeros((h,w))
for i in range(h):
for j in range(w):
tempV[i][j] = sum([V[i][j][k]*2**(num-1-k) for k in range(num)])
for i in range(h):
for j in range(w):
deCode[i][j] = (1 - 2 * signs[i][j]) * tempV[i][j]
return deCode
def _SignificancePassDecoding(V, D, CX, deS1, deS3, pointer, signs, w=64, h=64):
S1extend = np.pad(deS1, ((1, 1), (1, 1)), 'constant')
rounds = h // 4
for i in range(rounds):
for col in range(w):
for ii in range(4):
row = 4 * i + ii
temp = np.sum(S1extend[row:row + 3, col:col + 3]) - S1extend[row + 1][col + 1]
if deS1[row][col] != 0 or temp == 0:
continue
###
if pointer>=len(D):
continue
V[row][col] = D[pointer][0]
pointer = pointer + 1
deS3[row][col] = 1
if V[row][col] == 1:
signs[row][col] = _SignDecoding(D[pointer], CX[pointer], S1extend[row:row + 3, col:col + 3])
pointer = pointer + 1
deS1[row][col] = 1
S1extend = np.pad(deS1, ((1, 1), (1, 1)), 'constant')
return V, signs, deS1, deS3, pointer
def _MagnitudePassDecoding(V, D, deS1, deS2, deS3, pointer, w=64, h=64):
rounds = h // 4
for i in range(rounds):
for col in range(w):
for ii in range(4):
row = 4 * i + ii
if deS1[row][col] != 1 or deS3[row][col] != 0:
continue
###
if pointer>=len(D):
continue
V[row][col] = D[pointer][0]
pointer = pointer + 1
deS2[row][col] = 1
return V, deS2, pointer
def _CleanPassDecoding(V, D, CX, deS1, deS3, pointer, signs, w=64, h=64):
a = pointer
S1extend = np.pad(deS1, ((1, 1), (1, 1)), 'constant')
rounds = h // 4
for i in range(rounds):
for col in range(w):
ii = 0
row = 4 * i
tempSum = np.sum(S1extend[row:row + 6, col:col + 3]) + np.sum(deS3[row:row + 4, col])
# 整一列未被编码,都为非重要,且领域非重要
if tempSum == 0:
if CX.__len__() < pointer + 3:
CXextend = np.pad(CX, (0, 2), 'constant')
Dextend = np.pad(D, (0, 2), 'constant')
tempCx = CXextend[pointer:pointer + 3]
tempD = Dextend[pointer:pointer + 3]
else:
tempCx = CX[pointer:pointer + 3]
tempD = D[pointer:pointer + 3]
ii, tempV = _RunLengthDecoding(tempCx, tempD)
if tempV == [0, 0, 0, 0]:
V[row][col] = 0
V[row + 1][col] = 0
V[row + 2][col] = 0
V[row + 3][col] = 0
pointer = pointer + 1
else:
if tempV == [1]:
V[row][col] = 1
pointer = pointer + 3
elif tempV == [0, 1]:
V[row][col] = 0
V[row + 1][col] = 1
pointer = pointer + 3
elif tempV == [0, 0, 1]:
V[row][col] = 0
V[row + 1][col] = 0
V[row + 2][col] = 1
pointer = pointer + 3
elif tempV == [0, 0, 0, 1]:
V[row][col] = 0
V[row + 1][col] = 0
V[row + 2][col] = 0
V[row + 3][col] = 1
pointer = pointer + 3
# sign coding
row = row + ii - 1
###
if pointer>=len(D):
continue
signs[row][col] = _SignDecoding(D[pointer], CX[pointer], S1extend[row:row + 3, col:col + 3])
pointer = pointer + 1
deS1[row][col] = 1
S1extend = np.pad(deS1, ((1, 1), (1, 1)), 'constant')
while ii < 4:
row = i * 4 + ii
ii = ii + 1
if deS1[row][col] != 0 or deS3[row][col] != 0:
continue
###
if pointer>=len(D):
continue
V[row][col] = D[pointer][0]
pointer = pointer + 1
deS3[row][col] = 1
if V[row][col] == 1:
signs[row][col] = _SignDecoding(D[pointer], CX[pointer], S1extend[row:row + 3, col:col + 3])
pointer = pointer + 1
deS1[row][col] = 1
S1extend = np.pad(deS1, ((1, 1), (1, 1)), 'constant')
return V, deS1, deS3, signs, pointer
def _RunLengthDecoding(CX, D):
n = CX.__len__()
wrong = 1
if CX[0][0] == 17 and D[0][0] == 0 or CX[0][0] == 17 and CX[1][0] == 18 and CX[2][0] == 18 and D[0][0] == 1:
wrong = 0
if wrong == 0:
if D[0][0] == 0:
deLen = 4
V = [0, 0, 0, 0]
elif D[0][0] == 1 and D[1][0] == 0 and D[2][0] == 0:
deLen = 1
V = [1]
elif D[0][0] == 1 and D[1][0] == 0 and D[2][0] == 1:
deLen = 2
V = [0, 1]
elif D[0][0] == 1 and D[1][0] == 1 and D[2][0] == 0:
deLen = 3
V = [0, 0, 1]
elif D[0][0] == 1 and D[1][0] == 1 and D[2][0] == 1:
deLen = 4
V = [0, 0, 0, 1]
else:
# self.logs[-1] += self.formatter.warning('_RunLengthDecoding: D not valid')
deLen = -1
V = [-1]
"""
try:
raise ValidationError('_RunLengthDecoding: D not valid')
except ValidationError as e:
print(e.args)
deLen = -1
V = [-1]
"""
else:
# self.logs[-1] += self.formatter.warning('_RunLengthDecoding: CX not valid')
deLen = -1
V = [-1]
"""
try:
raise ValidationError('_RunLengthDecoding: CX not valid')
except ValidationError as e:
print(e.args)
deLen = -1
V = [-1]
"""
return deLen, V
def _SignDecoding(D, CX, neighbourS1):
if neighbourS1.__len__() == 3 and neighbourS1[0].__len__() == 3:
hstr = str(int(neighbourS1[1][0])) + str(int(neighbourS1[1][2]))
vstr = str(int(neighbourS1[0][1])) + str(int(neighbourS1[2][1]))
dict = {
'00': 0, '1-1': 0, '-11': 0, '01': 1, '10': 1, '11': 1,
'0-1': -1, '-10': -1, '-1-1': -1
}
h = dict[hstr]
v = dict[vstr]
hAndv = str(h) + str(v)
hv2Sign = {
'11': 0, '10': 0, '1-1': 0, '01': 0, '00': 0,
'0-1': 1, '-11': 1, '-10': 1, '-1-1': 1
}
hv2Context = {
'11': 13, '10': 12, '1-1': 11, '01': 10, '00': 9,
'0-1': 10, '-11': 11, '-10': 12, '-1-1': 13
}
temp = hv2Sign[hAndv]
deCX = hv2Context[hAndv]
if deCX == CX:
deSign = D[0] ^ temp
else:
# self.logs[-1] += self.formatter.warning('_SignDecoding: Context does not match. Error occurs.')
deSign = -1
"""
try:
raise ValidationError('_SignDecoding: Context does not match. Error occurs.')
except ValidationError as e:
print(e.args)
deSign = -1
"""
else:
# self.logs[-1] += self.formatter.warning('_SignDecoding: Size of neighbourS1 not valid')
deSign = -1
"""
try:
raise ValidationError('_SignDecoding: Size of neighbourS1 not valid')
except ValidationError as e:
print(e.args)
deSign = -1
"""
return deSign
class EBCOTparam(object):
"""
EBCOT parameter is parameter used by MQ encode and decode processes
initialized in MQ encoding
interval length A = 8000H
Lower bound register C = 0
Current code byte number L = -1
Temporary byte buffer T = 0
Down-counter t = 12
"""
def __init__(self):
self.C = np.uint32(0)
self.A = np.uint16(32768)
self.t = np.uint8(12)
self.T = np.uint8(0)
self.L = np.int32(-1)
self.stream = np.uint8([])
'''
改了decodeblock,_embeddedBlockEncoder,banddecode和bandencode改了num的预设值
'''
# if __name__ == "__main__":
# h, w = 64, 64
# testblock = np.zeros((h, w), dtype=np.uint8)
# for i in range(h):
# for j in range(w):
# testblock[i][j] = i*4
# #(bitcode, _) = _band_encode(testblock, "LL", h=64, w=64, num=8)
# #decodeblock = _band_decode(list(bitcode), h=64, w=64, num=32)
# CX, D, bitplanelength = _embeddedBlockEncoder(testblock, "LL", h, w, num=8)
# encoder = _MQencode(CX, D)
# decodeD = _MQ_decode(encoder.stream, CX)
# decodeblock = _decode_block(D, CX, h, w, num=8)
# a = 1
| yetiansh/fpeg | fpeg/codec/EBCOT_codec.py | EBCOT_codec.py | py | 31,395 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "fpeg.config.read_config",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "fpeg.base.Codec",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "fpeg.funcs.parse_marker",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "multi... |
25066192842 | import torch
import torch.nn
import os,csv,datetime
import numpy as np
from cnn_structure import CNN
from torch.autograd import Variable
from sklearn.metrics import confusion_matrix
# MODEL_FOLDER = './model/'
MODEL_FOLDER = './'
DATA_FOLDER = './k1000_vec200/'
EMOTION = {'ne':0, 'ha':1, 'sa':2, 'an':3, 'di':4, 'su':5, 'or':6, 'nx':7}
file_list = os.listdir(MODEL_FOLDER)
# cnn = []
# for file in file_list:
# if file.endswith('.pt'):
# cnn.append(torch.load(MODEL_FOLDER+file))
input_data = []
input_tag = []
file_list = os.listdir(DATA_FOLDER)
begin_read = datetime.datetime.now()
for file in file_list:
if file.endswith('.csv'):
frames = []
csvreader = csv.reader(open(DATA_FOLDER+file))
for frame in csvreader:
tmp = np.array(frame)
tmp = [float(i) for i in tmp]
frames.append(tmp)
input_data.append(frames)
input_tag.append(EMOTION[file[-16:-14]])
input_data = np.array(input_data)
input_tag = np.array(input_tag)
end_time = datetime.datetime.now()
print('time of read ',len(input_data),' files: ',datetime.datetime.now()-begin_read)
cnn = torch.load('./k_1000_vec200_featureCNN_8emo_fold0date1227.pt')
acc_count = 0
results = []
time = []
for i in range(len(input_data)):
begin = datetime.datetime.now()
data_tensor = Variable(torch.from_numpy(np.array([[input_data[i]]]))).cuda().float()
# tag_tensor = Variable(torch.from_numpy([test_tag[i]])).cuda().float()
test_output, lastlayer = cnn(data_tensor)
tag_output = torch.max(test_output,1)[1].cuda().data.squeeze()
# data_tensor = Variable(torch.from_numpy(np.array([[test_data[i]]]))).float()
# test_output, lastlayer = cnn(data_tensor)
# tag_output = torch.max(test_output,1)[1].data.squeeze()
print(tag_output[0], ' time:',datetime.datetime.now()-begin)
time.append(datetime.datetime.now()-begin)
results.append(tag_output[0])
if tag_output[0]==input_tag[i]:
acc_count+=1
acc = acc_count/(len(input_tag))
confusion = confusion_matrix(input_tag,results)
print(acc)
print(confusion)
# print('avg_time:',sum(time)/len(time)) | 1021546/test_pytorch | 學長 pytorch/2dCNNpredict/model_predict.py | model_predict.py | py | 2,030 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.listdir",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "os.listdir",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"... |
1488036241 | import os
import re
import json
import glob
import tempfile
import argparse
import ast
import pandas as pd
import sys
import shipyard_utils as shipyard
from google.cloud import bigquery
from google.oauth2 import service_account
from google.api_core.exceptions import NotFound
EXIT_CODE_UNKNOWN_ERROR = 3
EXIT_CODE_INVALID_CREDENTIALS = 200
EXIT_CODE_INVALID_DATASET = 201
EXIT_CODE_INVALID_SCHEMA = 202
EXIT_CODE_SCHEMA_MISMATCH = 203
EXIT_CODE_FILE_NOT_FOUND = 204
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('--dataset', dest='dataset', required=True)
parser.add_argument('--table', dest='table', required=True)
parser.add_argument(
'--service-account',
dest='service_account',
required=True)
parser.add_argument(
'--upload-type',
dest='upload_type',
default='append',
choices={
'append',
'overwrite'},
required=False)
parser.add_argument(
'--source-file-name-match-type',
dest='source_file_name_match_type',
default='exact_match',
choices={
'exact_match',
'regex_match'},
required=False)
parser.add_argument(
'--source-file-name',
dest='source_file_name',
required=True)
parser.add_argument(
'--source-folder-name',
dest='source_folder_name',
default='',
required=False)
parser.add_argument(
'--schema',
dest='schema',
default='',
required=False
)
parser.add_argument(
'--skip-header-rows',
dest='skip_header_rows',
default='',
required=False
)
parser.add_argument('--quoted-newline', dest = 'quoted_newline', default = False, required = False)
args = parser.parse_args()
return args
def set_environment_variables(args):
"""
Set GCP credentials as environment variables if they're provided via keyword
arguments rather than seeded as environment variables. This will override
system defaults.
"""
credentials = args.service_account
try:
json_credentials = json.loads(credentials)
fd, path = tempfile.mkstemp()
print(f'Storing json credentials temporarily at {path}')
with os.fdopen(fd, 'w') as tmp:
tmp.write(credentials)
os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = path
return path
except Exception:
print('Using specified json credentials file')
os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = credentials
return
def string_to_boolean(value):
if isinstance(value, bool):
return value
if value.lower() in ('true', 't', 'y'):
return True
elif value.lower() in ('false', 'f', 'n'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def find_all_local_file_names(source_folder_name):
"""
Returns a list of all files that exist in the current working directory,
filtered by source_folder_name if provided.
"""
cwd = os.getcwd()
cwd_extension = os.path.normpath(f'{cwd}/{source_folder_name}/**')
file_names = glob.glob(cwd_extension, recursive=True)
return [file_name for file_name in file_names if os.path.isfile(file_name)]
def find_all_file_matches(file_names, file_name_re):
"""
Return a list of all file_names that matched the regular expression.
"""
matching_file_names = []
for _file in file_names:
if re.search(file_name_re, _file):
matching_file_names.append(_file)
return matching_file_names
def combine_folder_and_file_name(folder_name, file_name):
"""
Combine together the provided folder_name and file_name into one path
variable.
"""
combined_name = os.path.normpath(
f'{folder_name}{"/" if folder_name else ""}{file_name}')
combined_name = os.path.normpath(combined_name)
return combined_name
def copy_from_csv(
client,
dataset,
table,
source_file_path,
upload_type,
schema=None,
skip_header_rows=None,
quoted_newline=False):
"""
Copy CSV data into Bigquery table.
"""
try:
dataset_ref = client.dataset(dataset)
table_ref = dataset_ref.table(table)
job_config = bigquery.LoadJobConfig()
if upload_type == 'overwrite':
job_config.write_disposition = bigquery.WriteDisposition.WRITE_TRUNCATE
else:
job_config.write_disposition = bigquery.WriteDisposition.WRITE_APPEND
job_config.source_format = bigquery.SourceFormat.CSV
if skip_header_rows:
job_config.skip_leading_rows = skip_header_rows
if schema:
job_config.autodetect = False
job_config.schema = format_schema(schema)
else:
job_config.autodetect = True
if quoted_newline:
job_config.allow_quoted_newlines = True
with open(source_file_path, 'rb') as source_file:
job = client.load_table_from_file(source_file, table_ref,
job_config=job_config)
job.result()
except NotFound as nf_e:
if 'Not found: Dataset' in str(nf_e):
print(
f'The dataset {dataset} could not be found. Please check for typos and try again')
print(nf_e)
sys.exit(EXIT_CODE_INVALID_DATASET)
except Exception as e:
if ('Invalid value for mode' or 'Invalid value for type') in str(e):
print(
'The provided schema was not valid. Please check to make sure that the provided schema matches the following format and accepted values. \n\n \
Format: [["column","datatype"],["column","datatype","MODE"]] \
Accepted Values: https://cloud.google.com/bigquery/docs/schemas')
print(e)
sys.exit(EXIT_CODE_INVALID_SCHEMA)
if 'Provided Schema does not match' in str(e):
print('The provided schema does not match the schema for the existing table. Please check your table and ensure that the column names and data types match up exactly.')
print(e)
sys.exit(EXIT_CODE_SCHEMA_MISMATCH)
else:
print(f'Failed to copy CSV {source_file_path} to BigQuery.')
print(e)
sys.exit(EXIT_CODE_UNKNOWN_ERROR)
print(
f'Successfully copied csv {source_file_path} to {dataset}.{table} on BigQuery')
def get_client(credentials):
"""
Attempts to create the Google Bigquery Client with the associated
environment variables
"""
try:
client = bigquery.Client()
return client
except Exception as e:
print(f'Error accessing Google Bigquery with service account '
f'{credentials}')
print(e)
sys.exit(EXIT_CODE_INVALID_CREDENTIALS)
def format_schema(schema):
formatted_schema = []
schema = ast.literal_eval(schema)
for item in schema:
schema_column = 'bigquery.SchemaField("'
for value in item:
schema_column += value + '","'
schema_column += '")'
formatted_schema.append(eval(schema_column))
return formatted_schema
def main():
args = get_args()
tmp_file = set_environment_variables(args)
dataset = args.dataset
table = args.table
upload_type = args.upload_type
source_file_name = args.source_file_name
source_folder_name = args.source_folder_name
source_full_path = combine_folder_and_file_name(
folder_name=f'{os.getcwd()}/{source_folder_name}',
file_name=source_file_name)
source_file_name_match_type = args.source_file_name_match_type
schema = args.schema
quoted_newline = shipyard.args.convert_to_boolean(args.quoted_newline)
skip_header_rows = args.skip_header_rows
if skip_header_rows:
skip_header_rows = int(args.skip_header_rows)
if tmp_file:
client = get_client(tmp_file)
else:
client = get_client(args.service_account)
if source_file_name_match_type == 'regex_match':
file_names = find_all_local_file_names(source_folder_name)
matching_file_names = find_all_file_matches(
file_names, re.compile(source_file_name))
print(f'{len(matching_file_names)} files found. Preparing to upload...')
for index, file_name in enumerate(matching_file_names):
print(f'Uploading file {index+1} of {len(matching_file_names)}')
copy_from_csv(
client=client,
dataset=dataset,
table=table,
source_file_path=file_name,
upload_type=upload_type,
schema=schema,
skip_header_rows=skip_header_rows,
quoted_newline=quoted_newline)
else:
if not os.path.isfile(source_full_path):
print(f'File {source_full_path} does not exist')
sys.exit(EXIT_CODE_FILE_NOT_FOUND)
copy_from_csv(
client=client,
dataset=dataset,
table=table,
source_file_path=source_full_path,
upload_type=upload_type,
schema=schema,
skip_header_rows=skip_header_rows,
quoted_newline=quoted_newline)
if tmp_file:
print(f'Removing temporary credentials file {tmp_file}')
os.remove(tmp_file)
if __name__ == '__main__':
main()
| shipyardapp/googlebigquery-blueprints | googlebigquery_blueprints/upload_file.py | upload_file.py | py | 9,540 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "tempfile.mkstemp",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "os.fdopen",
"... |
73260920425 | import pymongo
cliente = pymongo.MongoClient("mongodb://localhost:27017/")
database = cliente["bancoDados"]
pessoas = database["pessoas"]
pessoa1 = {"nome":"Gustavo","peso": 58}
insert = pessoas.insert_one(pessoa1)
print(insert.inserted_id)
listaDBs = cliente.list_database_names()
print(listaDBs)
listaCollections = database.list_collection_names()
print(listaCollections) | Gustavo-Baumann/AprendendoSintaxePython | phyton/mongoDB/test.py | test.py | py | 376 | python | pt | code | 0 | github-code | 36 | [
{
"api_name": "pymongo.MongoClient",
"line_number": 3,
"usage_type": "call"
}
] |
6405350957 | """This module contains necessary function needed"""
# Import necessary modules
from imblearn.over_sampling import SMOTE, ADASYN
from imblearn.under_sampling import RandomUnderSampler, NearMiss
import numpy as np
import pandas as pd
import streamlit as st
import math
from sklearn.model_selection import cross_validate
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
import sqlite3
import joblib
@st.cache_data
def load_data(uploaded_file):
"""This function returns the preprocessed data"""
df = pd.read_csv("Parkinsons-Detector/" + uploaded_file)
# Rename the column names in the DataFrame.
df.rename(columns = {"MDVP:Fo(Hz)": "AVFF",}, inplace = True)
df.rename(columns = {"MDVP:Fhi(Hz)": "MAVFF",}, inplace = True)
df.rename(columns = {"MDVP:Flo(Hz)": "MIVFF",}, inplace = True)
# Perform feature and target split
X = df.drop(columns=['status','name'])
y = df['status']
return df, X, y
def train_model(algorithm, K, X, y):
"""This function trains the model and returns the model scores"""
def cross_validation(algorithm, _X, _y, _cv):
_scoring = ['accuracy', 'precision', 'recall', 'f1']
results = cross_validate(estimator=algorithm,
X=_X,
y=_y,
cv=_cv,
scoring=_scoring,
return_train_score=True,
return_estimator=True
)
return results
result = cross_validation(algorithm,X,y,K)
accuracy = result['test_accuracy']
precision = result['test_precision']
recall = result['test_recall']
f1 = result['test_f1']
best_f1 = list(result['test_f1']).index(max(result['test_f1']))
model=result["estimator"][best_f1]
return accuracy, precision, recall, f1, model
def sampling_function(sampling, X, y):
# Get model and model score
if sampling == "Over samling":
X, y = SMOTE().fit_resample(X, y)
elif sampling == "Under samling":
random_undersampler = RandomUnderSampler(sampling_strategy='auto') # You can specify the sampling strategy
near_miss = NearMiss(version=1) # You can specify the version (1, 2, or 3)
X, y = random_undersampler.fit_resample(X, y) # Replace X and y with your data
return X, y
def predict(features, algorithm):
# Get model and model score
conn = sqlite3.connect('Parkinsons-Detector/pakinson_admin.db')
cursor = conn.cursor()
exc = cursor.execute("SELECT model_path, accuracy FROM model WHERE m_default=? AND algorithm=?", ("1",algorithm))
model = exc.fetchall()
# Deserialize the model from the file
model_path = "Parkinsons-Detector/models" + "/" + model[0][0]
loaded_model = joblib.load(model_path)
# Predict the value
prediction = loaded_model.predict([features])
return prediction, model[0][1]
| tobintobin16/Streamlit_CS498 | Parkinsons-Detector/web_functions.py | web_functions.py | py | 3,070 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pandas.read_csv",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "streamlit.cache_data",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "sklearn.model_selection.cross_validate",
"line_number": 38,
"usage_type": "call"
},
{
"... |
24629581794 | from Crypto.PublicKey import RSA
from django.contrib.auth.models import User
from rest_framework import serializers
from app.models import *
import uuid
class UserRelationField(serializers.RelatedField):
def to_representation(self, value):
return '{}'.format(value.user.username)
class AllOthersRelationField(UserRelationField):
def get_queryset(self):
request = self.context.get('request', None)
queryset = super(AllOthersRelationField, self).get_queryset()
if not request or not queryset:
return None
return queryset.all().exclude(user=request.user)
class MyRelationField(UserRelationField):
def get_queryset(self):
request = self.context.get('request', None)
queryset = super(MyRelationField, self).get_queryset()
if not request or not queryset:
return None
return queryset.filter(user=request.user)
class UserFilteredPrimaryKeyRelatedField(serializers.PrimaryKeyRelatedField):
def get_queryset(self):
request = self.context.get('request', None)
queryset = super(UserFilteredPrimaryKeyRelatedField, self).get_queryset()
if not request or not queryset:
return None
return queryset.filter(owner=request.user)
class UserFilteredSlugRelatedField(serializers.SlugRelatedField):
def get_queryset(self):
request = self.context.get('request', None)
queryset = super(UserFilteredSlugRelatedField, self).get_queryset()
if not request or not queryset:
return None
return queryset.filter(owner=request.user)
class UserSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = User
fields = [
'url',
'username',
'email',
]
class SearchSerializer(serializers.ModelSerializer):
class Meta:
model = Search
fields = '__all__'
class UserKeysSerializer(serializers.ModelSerializer):
user = UserSerializer(read_only=True)
signing_key = UserFilteredSlugRelatedField(
many=False,
read_only=False,
queryset=PrivateKey.objects,
slug_field='secure_id',
required=False
)
messaging_key = UserFilteredSlugRelatedField(
many=False,
read_only=False,
queryset=PrivateKey.objects,
slug_field='secure_id',
required=False
)
class Meta:
model = UserKeys
fields = '__all__'
class HashSerializer(serializers.ModelSerializer):
owner = serializers.PrimaryKeyRelatedField(read_only=True)
class Meta:
model = Hash
fields = '__all__'
class SignatureSerializer(serializers.ModelSerializer):
class Meta:
model = Signature
fields = '__all__'
lookup_field = 'user'
class MessageSerializer(serializers.ModelSerializer):
owner = serializers.PrimaryKeyRelatedField(read_only=True)
file_to_decrypt = serializers.HiddenField(default='')
recipient_public_key = AllOthersRelationField(
queryset=UserKeys.objects
)
signing_key = MyRelationField(
queryset=UserKeys.objects
)
class Meta:
model = Message
fields = '__all__'
class DecryptionSerializer(serializers.ModelSerializer):
owner = serializers.PrimaryKeyRelatedField(read_only=True)
recipient_public_key = MyRelationField(
queryset=UserKeys.objects
)
signing_key = AllOthersRelationField(
queryset=UserKeys.objects
)
content = serializers.HiddenField(default='')
class Meta:
model = Message
fields = '__all__'
class PrivateKeySerializer(serializers.ModelSerializer):
id = serializers.ReadOnlyField()
secure_id = serializers.ReadOnlyField()
key_from_bytes = serializers.SerializerMethodField()
owner = UserSerializer(read_only=True)
# attribute method
def get_key_from_bytes(self, obj):
return obj.content
# generate a private key with RSA module
def private_gen(self):
key = RSA.generate(2048) # 2048 is secure enough for modern standards
return key.export_key('PEM')
# called on post to endpoint
def create(self, validated_data):
gen = self.private_gen()
priv_key = PrivateKey(
content=gen,
owner=validated_data['owner'],
secure_id=uuid.uuid4()
)
priv_key.save()
return priv_key
class Meta:
model = PrivateKey
fields = '__all__'
depth = 1
| bobbykemp/cryptoapp | cryptoapp/serializers.py | serializers.py | py | 4,528 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "rest_framework.serializers.RelatedField",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.serializers",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "rest_framework.serializers.PrimaryKeyRelatedField",
"line_number": 28,
... |
11504595660 | import numpy as np
import theano
import theano.tensor as T
from collections import OrderedDict
def simulate_dynamics(initial_pos, initial_vel, stepsize, n_steps, energy_fn):
"""
Return final (position, velocity) obtained after an `n_steps` leapfrog
updates, using Hamiltonian dynamics.
Parameters
----------
initial_pos: shared theano matrix
Initial position at which to start the simulation
initial_vel: shared theano matrix
Initial velocity of particles
stepsize: shared theano scalar
Scalar value controlling amount by which to move
n_steps: shared theano scalar
Scalar value controlling number of steps for which to run the integrator
energy_fn: python function
Python function, operating on symbolic theano variables, used to
compute the potential energy at a given position.
Returns
-------
rval1: theano matrix
Final positions obtained after simulation
rval2: theano matrix
Final velocity obtained after simulation
"""
def leapfrog(pos, vel, step):
"""
Inside loop of Scan. Performs one step of leapfrog update, using
Hamiltonian dynamics.
Parameters
----------
pos: theano matrix
in leapfrog update equations, represents pos(t), position at time t
vel: theano matrix
in leapfrog update equations, represents vel(t - stepsize/2),
velocity at time (t - stepsize/2)
step: theano scalar
scalar value controlling amount by which to move
Returns
-------
rval1: [theano matrix, theano matrix]
Symbolic theano matrices for new position pos(t + stepsize), and
velocity vel(t + stepsize/2)
rval2: dictionary
Dictionary of updates for the Scan Op
"""
# from pos(t) and vel(t-stepsize/2), compute vel(t+stepsize/2)
dE_dpos = T.grad(energy_fn(pos).sum(), pos)
new_vel = vel - step * dE_dpos
# from vel(t+stepsize/2) compute pos(t+stepsize)
new_pos = pos + step * new_vel
return [new_pos, new_vel], {}
# compute velocity at time-step: t + stepsize/2
initial_energy = energy_fn(initial_pos)
dE_dpos = T.grad(initial_energy.sum(), initial_pos)
vel_half_step = initial_vel - 0.5 * stepsize * dE_dpos
# compute position at time-step: t + stepsize
pos_full_step = initial_pos + stepsize * vel_half_step
# perform leapfrog updates: the scan op is used to repeatedly compute
# vel(t + (m-1/2)*stepsize) and pos(t + m*stepsize) for m in [2,n_steps].
(all_pos, all_vel), scan_updates = theano.scan(
leapfrog,
outputs_info=[
dict(initial=pos_full_step),
dict(initial=vel_half_step),
],
non_sequences=[stepsize],
n_steps=n_steps - 1)
final_pos = all_pos[-1]
final_vel = all_vel[-1]
# NOTE: Scan always returns an updates dictionary, in case the
# scanned function draws samples from a RandomStream. These
# updates must then be used when compiling the Theano function, to
# avoid drawing the same random numbers each time the function is
# called. In this case however, we consciously ignore
# "scan_updates" because we know it is empty.
assert not scan_updates
# The last velocity returned by scan is vel(t +
# (n_steps - 1 / 2) * stepsize) We therefore perform one more half-step
# to return vel(t + n_steps * stepsize)
energy = energy_fn(final_pos)
final_vel = final_vel - 0.5 * stepsize * T.grad(energy.sum(), final_pos)
# return new proposal state
return final_pos, final_vel
# start-snippet-1
def kinetic_energy(vel):
"""Returns the kinetic energy associated with the given velocity
and mass of 1.
Parameters
----------
vel: theano matrix
Symbolic matrix whose rows are velocity vectors.
Returns
-------
return: theano vector
Vector whose i-th entry is the kinetic entry associated with vel[i].
"""
return 0.5 * (vel ** 2).sum(axis=0)
def hamiltonian(pos, vel, energy_fn):
"""
Returns the Hamiltonian (sum of potential and kinetic energy) for the given
velocity and position.
Parameters
----------
pos: theano matrix
Symbolic matrix whose rows are position vectors.
vel: theano matrix
Symbolic matrix whose rows are velocity vectors.
energy_fn: python function
Python function, operating on symbolic theano variables, used tox
compute the potential energy at a given position.
Returns
-------
return: theano vector
Vector whose i-th entry is the Hamiltonian at position pos[i] and
velocity vel[i].
"""
# assuming mass is 1
return energy_fn(pos) + kinetic_energy(vel)
def metropolis_hastings_accept(energy_prev, energy_next, s_rng):
"""
Performs a Metropolis-Hastings accept-reject move.
Parameters
----------
energy_prev: theano vector
Symbolic theano tensor which contains the energy associated with the
configuration at time-step t.
energy_next: theano vector
Symbolic theano tensor which contains the energy associated with the
proposed configuration at time-step t+1.
s_rng: theano.tensor.shared_randomstreams.RandomStreams
Theano shared random stream object used to generate the random number
used in proposal.
Returns
-------
return: boolean
True if move is accepted, False otherwise
"""
ediff = energy_prev - energy_next
return (T.exp(ediff) - s_rng.uniform(size=energy_prev.shape)) >= 0
def MJHMC_accept():
return
def hmc_move(s_rng, positions, energy_fn, stepsize=0.1, n_steps=1):
"""
This function performs one-step of Hybrid Monte-Carlo sampling. We start by
sampling a random velocity from a univariate Gaussian distribution, perform
`n_steps` leap-frog updates using Hamiltonian dynamics and accept-reject
using Metropolis-Hastings.
Parameters
----------
s_rng: theano shared random stream
Symbolic random number generator used to draw random velocity and
perform accept-reject move.
positions: shared theano matrix
Symbolic matrix whose rows are position vectors.
energy_fn: python function
Python function, operating on symbolic theano variables, used to
compute the potential energy at a given position.
stepsize: shared theano scalar
Shared variable containing the stepsize to use for `n_steps` of HMC
simulation steps.
n_steps: integer
Number of HMC steps to perform before proposing a new position.
Returns
-------
rval1: boolean
True if move is accepted, False otherwise
rval2: theano matrix
Matrix whose rows contain the proposed "new position"
"""
# sample random velocity
initial_vel = s_rng.normal(size=positions.shape)
final_pos, final_vel = simulate_dynamics(
initial_pos=positions,
initial_vel=initial_vel,
stepsize=stepsize,
n_steps=n_steps,
energy_fn=energy_fn
)
# accept/reject the proposed move based on the joint distribution
accept = metropolis_hastings_accept(
energy_prev=hamiltonian(positions, initial_vel, energy_fn),
energy_next=hamiltonian(final_pos, final_vel, energy_fn),
s_rng=s_rng
)
return accept, final_pos
def hmc_updates(positions,final_pos, accept):
"""def hmc_updates(positions, stepsize, avg_acceptance_rate, final_pos, accept,
target_acceptance_rate, stepsize_inc, stepsize_dec,
stepsize_min, stepsize_max, avg_acceptance_slowness):
This function is executed after `n_steps` of HMC sampling
(`hmc_move` function). It creates the updates dictionary used by
the `simulate` function. It takes care of updating: the position
(if the move is accepted), the stepsize (to track a given target
acceptance rate) and the average acceptance rate (computed as a
moving average).
Parameters
----------
positions: shared variable, theano matrix
Shared theano matrix whose rows contain the old position
stepsize: shared variable, theano scalar
Shared theano scalar containing current step size
avg_acceptance_rate: shared variable, theano scalar
Shared theano scalar containing the current average acceptance rate
final_pos: shared variable, theano matrix
Shared theano matrix whose rows contain the new position
accept: theano scalar
Boolean-type variable representing whether or not the proposed HMC move
should be accepted or not.
target_acceptance_rate: float
The stepsize is modified in order to track this target acceptance rate.
stepsize_inc: float
Amount by which to increment stepsize when acceptance rate is too high.
stepsize_dec: float
Amount by which to decrement stepsize when acceptance rate is too low.
stepsize_min: float
Lower-bound on `stepsize`.
stepsize_min: float
Upper-bound on `stepsize`.
avg_acceptance_slowness: float
Average acceptance rate is computed as an exponential moving average.
(1-avg_acceptance_slowness) is the weight given to the newest
observation.
Returns
-------
rval1: dictionary-like
A dictionary of updates to be used by the `HMC_Sampler.simulate`
function. The updates target the position, stepsize and average
acceptance rate.
"""
## POSITION UPDATES ##
# broadcast `accept` scalar to tensor with the same dimensions as
# final_pos.
#accept_matrix = accept.dimshuffle(0, *(('x',) * (final_pos.ndim - 1)))
# if accept is True, update to `final_pos` else stay put
#new_positions = T.switch(accept_matrix, final_pos, positions)
#new_positions = T.switch(accept.ravel(), final_pos, positions).astype('float32')
#new_positions = T.switch(accept[0].dimshuffle('x',0), final_pos, positions).astype('float32')
new_positions = accept[0]*final_pos + (1-accept[0])*positions
## ACCEPT RATE UPDATES ##
# perform exponential moving average
'''
mean_dtype = theano.scalar.upcast(accept.dtype, avg_acceptance_rate.dtype)
new_acceptance_rate = T.add(
avg_acceptance_slowness * avg_acceptance_rate,
(1.0 - avg_acceptance_slowness) * accept.mean(dtype=mean_dtype))
## STEPSIZE UPDATES ##
# if acceptance rate is too low, our sampler is too "noisy" and we reduce
# the stepsize. If it is too high, our sampler is too conservative, we can
# get away with a larger stepsize (resulting in better mixing).
_new_stepsize = T.switch(avg_acceptance_rate > target_acceptance_rate,
stepsize * stepsize_inc, stepsize * stepsize_dec)
# maintain stepsize in [stepsize_min, stepsize_max]
new_stepsize = T.clip(_new_stepsize, stepsize_min, stepsize_max)
return [(positions, new_positions),
(stepsize, new_stepsize),
(avg_acceptance_rate, new_acceptance_rate)]
'''
update = OrderedDict()
update[positions] = new_positions.astype('float32')
#return [(positions, new_positions)]
return update
def wrapper_hmc(s_rng,energy_fn,dim=np.array([2,1]),L=10, beta = 0.1, epsilon = 0.1):
"""
This should be the wrapper call that calls the various HMC definitions
Parameters:
Potential Energy -- function handle that captures the interest distrbution
Number of Leap Frog Steps -- L (10)
Momentum corruption parameter -- beta (0.1)
Leapfrog Integrator step length -- epsilon (0.1)
nsamples -- The number of samples to be generated (100)
Returns:
samples -- Samples generated
"""
pos = np.random.randn(dim[0],dim[1]).astype('float32')
vel = np.random.randn(dim[0],dim[1]).astype('float32')
pos = theano.shared(pos,name='pos')
vel = theano.shared(vel,name='vel')
epsilon = theano.shared(epsilon,name='epsilon')
L = theano.shared(L,'L')
#pos, vel = simulate_dynamics(initial_pos=pos,initial_vel=vel,stepsize=epsilon,n_steps=L,energy_fn=energy_fn)
accept, final_pos = hmc_move(s_rng=s_rng, positions=pos, energy_fn=energy_fn,stepsize=epsilon,n_steps=L)
#Simulate updates
simulate_updates = hmc_updates(positions=pos,final_pos=final_pos,accept=accept)
simulate = theano.function([],[accept,simulate_updates[pos]],updates=simulate_updates)
return simulate
def autocorrelation():
X = T.tensor3().astype('float32')
shape = X.shape
#Assumes Length T, need to have a switch that also deals with (T/2)-1
t_gap = T.arange(1,shape[2]-1)
outputs_info = T.zeros((1,1,1),dtype='float32')
#function def that computes the mean ac for each time lag
def calc_ac(t_gap,output_t,X):
return T.mean(X[:,:,:-t_gap]*X[:,:,t_gap:],dtype='float32',keepdims=True)
#We will write a scan function that loops over the indices of the data tensor
#and computes the autocorrelation
result,updates = theano.scan(fn= calc_ac,
#ac,updates = theano.scan(fn= lambda X,t_gap,ac: T.mean(X[:,:,:-t_gap]*X[:,:t_gap:]),
outputs_info=[outputs_info],
sequences=[t_gap],
non_sequences=[X])
#Append zero mean value of X to the front of the array and then return
#Also, need to divide by the first element to scale the variances
#For now though, let's do this in the main script
theano_ac = theano.function(inputs=[X],outputs=[result],updates=updates)
return theano_ac
def normed_autocorrelation(df):
theano_ac = autocorrelation()
Time = len(df)
N, nbatch = df.loc[0]['X'].shape
X = np.zeros((N,nbatch,Time))
for tt in range(Time):
X[:,:,tt] = df.loc[tt]['X']
ac= theano_ac(X.astype('float32'))
X_mean = np.mean(X**2,keepdims=True)[0][0]
ac_squeeze = np.squeeze(ac[0])
ac_squeeze = ac_squeeze/X_mean
ac = np.vstack((1.,ac_squeeze.reshape(Time-2,1)))
#This drops the last sample out of the data frame. Unclear, if this is the best way to do things but
#it is the only way we can align the total number of samples from sample generation to
#computing autocorrelation
ac_df = df[:-1]
ac_df.loc[:,'autocorrelation'] = ac
return ac_df[['num energy', 'num grad', 'autocorrelation']]
| rueberger/MJHMC | mjhmc/fast/hmc.py | hmc.py | py | 14,473 | python | en | code | 24 | github-code | 36 | [
{
"api_name": "theano.tensor.grad",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "theano.tensor",
"line_number": 57,
"usage_type": "name"
},
{
"api_name": "theano.tensor.grad",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "theano.tensor",
... |
17457985140 | """
Tests for specific issues and pull requests
"""
import os
import tempfile
import difflib
from textwrap import dedent
import gffutils
from gffutils import feature
from gffutils import merge_criteria as mc
from nose.tools import assert_raises
def test_issue_79():
gtf = gffutils.example_filename("keep-order-test.gtf")
db = gffutils.create_db(
gtf,
"tmp.db",
disable_infer_genes=False,
disable_infer_transcripts=False,
id_spec={"gene": "gene_id", "transcript": "transcript_id"},
merge_strategy="create_unique",
keep_order=True,
force=True,
)
exp = open(gtf).read()
obs = "\n".join([str(i) for i in db.all_features()])
exp_1 = exp.splitlines(True)[0].strip()
obs_1 = obs.splitlines(True)[0].strip()
print("EXP")
print(exp_1)
print("OBS")
print(obs_1)
print("DIFF")
print("".join(difflib.ndiff([exp_1], [obs_1])))
assert obs_1 == exp_1
def test_issue_82():
# key-val separator is inside an unquoted attribute value
x = (
"Spenn-ch12\tsgn_markers\tmatch\t2621812\t2622049\t.\t+\t.\t"
"Alias=SGN-M1347;ID=T0028;Note=marker name(s): T0028 SGN-M1347 |identity=99.58|escore=2e-126"
)
y = feature.feature_from_line(x)
assert y.attributes["Note"] == [
"marker name(s): T0028 SGN-M1347 |identity=99.58|escore=2e-126"
]
gffutils.create_db(gffutils.example_filename("keyval_sep_in_attrs.gff"), ":memory:")
def test_issue_85():
# when start or stop was empty, #85 would fail Should now work with
# blank fields
f = feature.feature_from_line("\t".join([""] * 9))
# or with "." placeholders
f = feature.feature_from_line("\t".join(["."] * 9))
def test_issue_105():
fn = gffutils.example_filename("FBgn0031208.gtf")
home = os.path.expanduser("~")
newfn = os.path.join(home, ".gffutils.test")
with open(newfn, "w") as fout:
fout.write(open(fn).read())
f = gffutils.iterators.DataIterator(newfn)
for i in f:
pass
os.unlink(newfn)
def test_issue_107():
s = dedent(
"""
chr1\t.\tgene\t10\t15\t.\t+\t.\tID=b;
chr1\t.\tgene\t1\t5\t.\t-\t.\tID=a;
chr2\t.\tgene\t25\t50\t.\t-\t.\tID=c;
chr2\t.\tgene\t55\t60\t.\t-\t.\tID=d;
"""
)
tmp = tempfile.NamedTemporaryFile(delete=False).name
with open(tmp, "w") as fout:
fout.write(s + "\n")
db = gffutils.create_db(tmp, ":memory:")
interfeatures = list(
db.interfeatures(db.features_of_type("gene", order_by=("seqid", "start")))
)
assert [str(i) for i in interfeatures] == [
"chr1\tgffutils_derived\tinter_gene_gene\t6\t9\t.\t.\t.\tID=a,b;",
"chr2\tgffutils_derived\tinter_gene_gene\t16\t54\t.\t-\t.\tID=c,d;",
]
def test_issue_119():
# First file has these two exons with no ID:
#
# chr2L FlyBase exon 8193 8589 . + . Parent=FBtr0300690
# chr2L FlyBase exon 7529 8116 . + . Name=CG11023:1;Parent=FBtr0300689,FBtr0300690
#
db0 = gffutils.create_db(gffutils.example_filename("FBgn0031208.gff"), ":memory:")
# And this one, a bunch of reads with no IDs anywhere
db1 = gffutils.create_db(
gffutils.example_filename("F3-unique-3.v2.gff"), ":memory:"
)
# When db1 is updated by db0
db2 = db1.update(db0)
assert (
db2._autoincrements == db1._autoincrements == {"exon": 2, "read": 112}
), db2._autoincrements
assert len(list(db0.features_of_type("exon"))) == 6
# Now we update that with db0 again
db3 = db2.update(db0, merge_strategy="replace")
# Using the "replace" strategy, we should have only gotten another 2 exons
assert len(list(db3.features_of_type("exon"))) == 8
# Make sure that the autoincrements for exons jumped by 2
assert (
db2._autoincrements == db3._autoincrements == {"exon": 4, "read": 112}
), db2._autoincrements
# More isolated test, merging two databases each created from the same file
# which itself contains only a single feature with no ID.
tmp = tempfile.NamedTemporaryFile(delete=False).name
with open(tmp, "w") as fout:
fout.write("chr1\t.\tgene\t10\t15\t.\t+\t.\t\n")
db4 = gffutils.create_db(tmp, tmp + ".db")
db5 = gffutils.create_db(tmp, ":memory:")
assert db4._autoincrements == {"gene": 1}
assert db5._autoincrements == {"gene": 1}
db6 = db4.update(db5)
db7 = gffutils.FeatureDB(db4.dbfn)
# both db4 and db6 should now have the same, updated autoincrements because
# they both point to the same db.
assert db6._autoincrements == db4._autoincrements == {"gene": 2}
# But db5 was created independently and should have unchanged autoincrements
assert db5._autoincrements == {"gene": 1}
# db7 was created from the database pointed to by both db4 and db6. This
# tests that when a FeatureDB is created it should have the
# correctly-updated autoincrements read from the db
assert db7._autoincrements == {"gene": 2}
def test_pr_131():
db = gffutils.create_db(gffutils.example_filename("FBgn0031208.gff"), ":memory:")
# previously would raise ValueError("No lines parsed -- was an empty
# file provided?"); now just does nothing
db2 = db.update([])
def test_pr_133():
# Previously, merge_attributes would not deep-copy the values from the
# second dict, and when the values are then modified, the second dict is
# unintentionally modified.
d1 = {"a": [1]}
d2 = {"a": [2]}
d1a = {"a": [1]}
d2a = {"a": [2]}
d3 = gffutils.helpers.merge_attributes(d1, d2)
assert d1 == d1a, d1
assert d2 == d2a, d2
def test_pr_139():
db = gffutils.create_db(gffutils.example_filename("FBgn0031208.gff"), ":memory:")
exons = list(db.features_of_type("exon"))
inter = list(db.interfeatures(exons))
# previously, the first exon's attributes would show up in subsequent merged features
assert exons[0].attributes["Name"][0] not in inter[1].attributes["Name"]
assert exons[0].attributes["Name"][0] not in inter[2].attributes["Name"]
assert exons[0].attributes["Name"][0] not in inter[3].attributes["Name"]
def test_pr_144():
# previously this would fail with:
# UnboundLocalError: local variable 'part' referenced before assignment
f = gffutils.Feature(attributes={"a": [""]})
# Make sure everything got converted correctly
assert f.attributes["a"] == [""]
assert str(f) == ". . . . . . . . a"
g = gffutils.feature.feature_from_line(str(f))
assert g == f
def test_pr_172():
line = (
"NC_049222.1\tGnomon\tgene\t209085\t282880\t.\t-\t.\t"
'gene_id "ENPP1_3"; transcript_id ""; db_xref "GeneID:100856150";'
'db_xref "VGNC:VGNC:40374"; gbkey "Gene"; gene "ENPP1"; '
'gene_biotype "protein_coding";\n'
)
tmp = tempfile.NamedTemporaryFile(delete=False).name
with open(tmp, "w") as fout:
fout.write(line)
db = gffutils.create_db(tmp, ":memory:")
def test_pr_171():
q = gffutils.parser.Quoter()
assert q.__missing__("\n") == "%0A"
assert q.__missing__("a") == "a"
assert q.__missing__("") == ""
def test_issue_129():
# thanks @Brunox13 for the detailed notes on #129
line = 'chr1\tdemo\tstart_codon\t69091\t69093\t.\t+\t.\tgene_id "demo";\n'
tmp = tempfile.NamedTemporaryFile(delete=False).name
with open(tmp, "w") as fout:
fout.write(line)
db = gffutils.create_db(tmp, ":memory:")
# ASCII art to visualize each test (coords are along the top, from 69087 to
# 69090). The tests slide a 4-bp region over the original 3-bp start codon.
# 7 8 9 0 1 2 3 4 5 6 7
# | | | Orig feature
# | | | | Test feature
res = list(db.region(region=("chr1", 69087, 69090), featuretype="start_codon"))
assert len(res) == 0
# NOTE: prior to #162, this did not return anything
# 7 8 9 0 1 2 3 4 5 6 7
# | | | Orig feature
# | | | | Test feature
res = list(db.region(region=("chr1", 69088, 69091), featuretype="start_codon"))
assert len(res) == 1
# 7 8 9 0 1 2 3 4 5 6 7
# | | | Orig feature
# | | | | Test feature
res = list(db.region(region=("chr1", 69089, 69092), featuretype="start_codon"))
assert len(res) == 1
# 7 8 9 0 1 2 3 4 5 6 7
# | | | Orig feature
# | | | | Test feature
res = list(db.region(region=("chr1", 69090, 69093), featuretype="start_codon"))
assert len(res) == 1
# 7 8 9 0 1 2 3 4 5 6 7
# | | | Orig feature
# | | | | Test feature
res = list(db.region(region=("chr1", 69091, 69094), featuretype="start_codon"))
assert len(res) == 1
# 7 8 9 0 1 2 3 4 5 6 7
# | | | Orig feature
# | | | | Test feature
res = list(db.region(region=("chr1", 69092, 69095), featuretype="start_codon"))
assert len(res) == 1
# NOTE: priro to #162, this did not return anything
# 7 8 9 0 1 2 3 4 5 6 7
# | | | Orig feature
# | | | | Test feature
res = list(db.region(region=("chr1", 69093, 69096), featuretype="start_codon"))
assert len(res) == 1
# 7 8 9 0 1 2 3 4 5 6 7
# | | | Orig feature
# | | | | Test feature
res = list(db.region(region=("chr1", 69094, 69097), featuretype="start_codon"))
assert len(res) == 0
def test_issue_128():
# In #128, some lines had separators of "; " and some with ";". The first
# one in the file would win. Now the detection pays more attention to lines
# with more attributes to make it work properly
gff = gffutils.example_filename('gms2_example.gff3')
db = gffutils.create_db(gff, ":memory:", force=True)
expected = {
'ID': ['1'],
'Parent': ['gene_1'],
'gene_type': ['native'],
'partial': ['11'],
'gc': ['33'],
'length': ['363'],
}
assert dict(db['1'].attributes) == expected
def test_issue_157():
# With the merge overhaul, children_bp incorrectly still used ignore_strand.
db = gffutils.create_db(gffutils.example_filename('FBgn0031208.gff'), ":memory:")
gene = next(db.features_of_type('gene'))
children = list(db.children(gene, featuretype='exon'))
# Modify the last one to have a different strand so we can test the
# ignore_strand argument.
children[-1].strand = '-'
db.update(children[-1:], merge_strategy='replace')
# and, since updating has been problematic in the past, double-check again
# that the strand is changed in the db.
assert list(db.children(gene, featuretype='exon'))[-1].strand == '-'
cbp1 = db.children_bp(gene, child_featuretype='exon')
# Previously this would give:
# TypeError: merge() got an unexpected keyword argument 'ignore_strand'
#
# Now changing to ValueError and suggesting a fix.
assert_raises(ValueError, db.children_bp, gene, child_featuretype='exon', merge=True, ignore_strand=True)
assert_raises(ValueError, db.children_bp, gene, ignore_strand=True, nonexistent=True)
assert_raises(TypeError, db.children_bp, gene, nonexistent=True)
# The way to do it now is the following (we can omit the mc.feature_type
# since we're preselecting for exons anyway):
db.children_bp(gene, child_featuretype='exon', merge=True, merge_criteria=(mc.overlap_end_inclusive))
def test_issue_159():
db = gffutils.create_db(gffutils.example_filename('FBgn0031208.gff'), ":memory:")
fasta = gffutils.example_filename('dm6-chr2L.fa')
for transcript, seq in gffutils.helpers.canonical_transcripts(db, fasta):
pass
def test_issue_164():
# Something strange with the original annotation, but seems fine at least
# after pasting in the offending genes from the GitHub comments.
db = gffutils.create_db(
gffutils.example_filename('sharr.gtf'),
':memory:',
disable_infer_transcripts=True,
disable_infer_genes=True,
id_spec={'gene': 'gene_id', 'transcript': 'transcript_id'},
merge_strategy='create_unique',
keep_order=True)
def test_issue_166():
# Added the new FeatureDB.seqids() method.
db = gffutils.create_db(gffutils.example_filename('nonascii'), ':memory:')
seqs = list(db.seqids())
assert seqs == ['2L', '2R', '3L', '3R', 'X'], seqs
def test_issue_167():
# Previously was causing sqlite3.InterfaceError
db = gffutils.create_db(gffutils.example_filename('issue167.gff'), ':memory:')
def test_issue_174():
db = gffutils.create_db(
gffutils.example_filename('issue174.gtf'),
':memory:',
merge_strategy='warning',
)
introns = [f for f in db.create_introns()]
observed = [i.attributes['exon_number'] for i in introns]
assert observed[7] == ['8', '9']
assert observed[8] == ['10', '9']
assert observed[9] == ['10', '11']
# Now do the same thing, but with the new numeric_sort arg
introns = [f for f in db.create_introns(numeric_sort=True)]
observed = [i.attributes['exon_number'] for i in introns]
assert observed[7] == ['8', '9']
# This should be fixed:
assert observed[8] == ['9', '10']
assert observed[9] == ['10', '11']
def test_issue_181():
db = gffutils.create_db(
gffutils.example_filename('issue181.gff'),
':memory:')
introns = db.create_introns()
# This now warns that the provided ID key has multiple values.
assert_raises(ValueError, db.update, introns)
# The fix is to provide a custom intron ID converter.
def intron_id(f):
return ','.join(f['ID'])
db.update(introns, id_spec={'intron': [intron_id]})
| hpatterton/gffutils | gffutils/test/test_issues.py | test_issues.py | py | 13,843 | python | en | code | null | github-code | 36 | [
{
"api_name": "gffutils.example_filename",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "gffutils.create_db",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "difflib.ndiff",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "gffutils.f... |
17455031519 | #!/usr/bin/env python
#
# Author: Greg Hellings - <ghelling@redhat.com> or <greg.hellings@gmail.com>
#
# Module to configure users in Jenkins authorized to use CLI
import xml.etree.ElementTree as ET
import os
from ansible.module_utils.basic import AnsibleModule
DOCUMENTATION = """
---
version_added: "2.1"
module: jenkins_cli_user
short_description: configure Jenkins CLI users with pub key
description:
- This module configures admin users in Jenkins to utilize the specified
SSH pubkey. Requires that role-based authentication be enabled and that
a user be configured as an admin
options:
jenkins_home:
description:
The root directory for the Jenkins install
required: true
jenkins_user:
description:
The name of the user to configure the SSH key for
key_file:
description:
Path to the SSH keyfile to be listed as authorized
required: true
state:
description:
Currently limited to "present" - will create the user
required: false
author: Gregory Hellings
"""
class UserList(object):
def __init__(self, user_path):
config = os.path.join(user_path, "users.xml")
self.root = ET.parse(config).getroot()
@property
def users(self):
if not hasattr(self, "_users"):
mapping = self.root.find("idToDirectoryNameMap")
if not mapping:
return {}
entries = mapping.getiterator("entry")
if not entries:
return {}
users = {}
for entry in entries:
users[entry[0].text] = entry[1].text
self._users = users
return self._users
class User(object):
def __init__(self, user_path, dirname):
self.config = os.path.join(user_path, dirname, "config.xml")
self.document = ET.parse(self.config)
self.root = self.document.getroot()
@property
def keys(self):
properties = self.root.find("properties")
keys = properties.getiterator("authorizedKeys")
return keys
def add_key(self, pub_key):
# If keys exist
if self.keys:
for key in self.keys:
if pub_key not in str(key.text):
# No actual keys there, so create it, otherwise prepend
if key.text is None:
key.text = pub_key
else:
key.text = str(key.text) + "\n" + pub_key
return True
# No keys exist
else:
properties = self.root.find("properties")
xml = "org.jenkinsci.main.modules.cli.auth.ssh.UserPropertyImpl"
ssh_auth = ET.SubElement(properties, xml)
auth_key = ET.SubElement(ssh_auth, "authorizedKeys")
auth_key.text = pub_key
return True
return False
def save(self):
self.document.write(self.config, encoding="UTF-8")
def main():
module = AnsibleModule(
argument_spec={
"jenkins_home": {"required": True},
"jenkins_user": {"required": True},
"key": {"required": True},
"state": {"choices": ["present"], "default": "present"},
},
supports_check_mode=False,
)
params = type("Params", (object,), module.params)
user_path = os.path.join(params.jenkins_home, "users")
changed = False
# Check if user exists and find directory mapping
users = UserList(user_path)
if params.jenkins_user not in users.users.keys():
module.fail_json(msg="No such user found." + users.users.keys())
# Check user for configured key and add it
user = User(user_path, users.users[params.jenkins_user])
changed = user.add_key(params.key)
if changed:
user.save()
module.exit_json(changed=changed)
main()
| devroles/ansible_collection_system | plugins/modules/jenkins_cli_user.py | jenkins_cli_user.py | py | 3,843 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "os.path.join",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 44,
"usage_type": "attribute"
},
{
"api_name": "xml.etree.ElementTree.parse",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "xml.etree.Eleme... |
23614214391 | import wqpy.read
import aiohttp
import asyncio
import io
async def _basic_aquery(service_url, service_params):
async with aiohttp.ClientSession() as session:
async with session.get(service_url, params = service_params) as r:
return(await r.text())
def multi_query(service_url, service_param_list, parse = True):
query_args = [(service_url, params) for params in service_param_list]
loop = asyncio.get_event_loop()
outputs = loop.run_until_complete(
asyncio.gather(*(_basic_aquery(*args) for args in query_args))
)
if parse:
return([wqpy.read.read(io.StringIO(o)) for o in outputs])
else:
return([io.StringIO(o).read() for o in outputs])
| mkoohafkan/wqpy-clone | wqpy/aquery.py | aquery.py | py | 675 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "aiohttp.ClientSession",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "asyncio.get_event_loop",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "asyncio.gather",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "wqpy.read.... |
37749884465 | from elegy.module import Module
import typing as tp
import haiku as hk
import jax.numpy as jnp
import numpy as np
from elegy import types
def _infer_shape(output_shape, dimensions):
"""
Replaces the -1 wildcard in the output shape vector.
This function infers the correct output shape given the input dimensions.
Args:
output_shape: Output shape.
dimensions: List of input non-batch dimensions.
Returns:
Tuple of non-batch output dimensions.
"""
# Size of input.
n = np.prod(dimensions)
# Size of output where defined.
v = np.array(output_shape)
m = abs(np.prod(v))
# Replace wildcard.
v[v == -1] = n // m
return tuple(v)
class Reshape(Module):
"""
Reshapes input Tensor, preserving the batch dimension.
For example, given an input tensor with shape `[B, H, W, C, D]`:
```python
B, H, W, C, D = range(1, 6)
x = jnp.ones([B, H, W, C, D])
```
The default behavior when `output_shape` is `(-1, D)` is to flatten
all dimensions between `B` and `D`:
```python
mod = elegy.nn.Reshape(output_shape=(-1, D))
assert mod(x).shape == (B, H*W*C, D)
```
You can change the number of preserved leading dimensions via
`preserve_dims`:
```python
mod = elegy.nn.Reshape(output_shape=(-1, D), preserve_dims=2)
assert mod(x).shape == (B, H, W*C, D)
mod = elegy.nn.Reshape(output_shape=(-1, D), preserve_dims=3)
assert mod(x).shape == (B, H, W, C, D)
mod = elegy.nn.Reshape(output_shape=(-1, D), preserve_dims=4)
assert mod(x).shape == (B, H, W, C, 1, D)
```
"""
def __init__(self, output_shape: types.Shape, preserve_dims: int = 1, **kwargs):
"""
Constructs a `Reshape` module.
Args:
output_shape: Shape to reshape the input tensor to while preserving its
first `preserve_dims` dimensions. When the special value -1 appears in
`output_shape` the corresponding size is automatically inferred. Note
that -1 can only appear once in `output_shape`.
To flatten all non-batch dimensions use `Flatten`.
preserve_dims: Number of leading dimensions that will not be reshaped.
kwargs: Additional keyword arguments passed to Module.
Raises:
ValueError: If `preserve_dims` is not positive.
"""
super().__init__(**kwargs)
if preserve_dims <= 0:
raise ValueError("Argument preserve_dims should be >= 1.")
if output_shape.count(-1) > 1:
raise ValueError("-1 can only occur once in `output_shape`.")
self.output_shape = tuple(output_shape)
self.preserve_dims = preserve_dims
def call(self, inputs: np.ndarray) -> np.ndarray:
"""
Arguments:
inputs: the array to be reshaped.
Returns:
A reshaped array.
"""
if inputs.ndim <= self.preserve_dims:
return inputs
if -1 in self.output_shape:
reshaped_shape = _infer_shape(
self.output_shape, inputs.shape[self.preserve_dims :]
)
else:
reshaped_shape = self.output_shape
shape = inputs.shape[: self.preserve_dims] + reshaped_shape
return jnp.reshape(inputs, shape)
class Flatten(Reshape):
"""
Flattens the input, preserving the batch dimension(s).
By default, Flatten combines all dimensions except the first.
Additional leading dimensions can be preserved by setting preserve_dims.
```python
x = jnp.ones([3, 2, 4])
flat = elegy.nn.Flatten()
assert flat(x).shape == (3, 8)
```
When the input to flatten has fewer than `preserve_dims` dimensions it is
returned unchanged:
```python
x = jnp.ones([3])
assert flat(x).shape == (3,)
```
"""
def __init__(
self,
preserve_dims: int = 1,
name: tp.Optional[str] = None,
):
super().__init__(output_shape=(-1,), preserve_dims=preserve_dims, name=name)
| anvelezec/elegy | elegy/nn/flatten.py | flatten.py | py | 4,093 | python | en | code | null | github-code | 36 | [
{
"api_name": "numpy.prod",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "numpy.prod",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "elegy.module.Module",
"line_num... |
8445132228 | import numpy as _numpy
import cupy as _cupy
from cupy_backends.cuda.libs import cublas as _cublas
from cupy.cuda import device as _device
def gesv(a, b):
"""Solve a linear matrix equation using cusolverDn<t>getr[fs]().
Computes the solution to a system of linear equation ``ax = b``.
Args:
a (cupy.ndarray): The matrix with dimension ``(M, M)``.
b (cupy.ndarray): The matrix with dimension ``(M)`` or ``(M, K)``.
Returns:
cupy.ndarray:
The matrix with dimension ``(M)`` or ``(M, K)``.
Note: ``a`` and ``b`` will be overwritten.
"""
from cupy_backends.cuda.libs import cusolver as _cusolver
if a.ndim != 2:
raise ValueError('a.ndim must be 2 (actual: {})'.format(a.ndim))
if b.ndim not in (1, 2):
raise ValueError('b.ndim must be 1 or 2 (actual: {})'.format(b.ndim))
if a.shape[0] != a.shape[1]:
raise ValueError('a must be a square matrix.')
if a.shape[0] != b.shape[0]:
raise ValueError('shape mismatch (a: {}, b: {}).'.
format(a.shape, b.shape))
if a.dtype != b.dtype:
raise TypeError('dtype mismatch (a: {}, b: {})'.
format(a.dtype, b.dtype))
dtype = a.dtype
if dtype == 'f':
t = 's'
elif dtype == 'd':
t = 'd'
elif dtype == 'F':
t = 'c'
elif dtype == 'D':
t = 'z'
else:
raise TypeError('unsupported dtype (actual:{})'.format(a.dtype))
helper = getattr(_cusolver, t + 'getrf_bufferSize')
getrf = getattr(_cusolver, t + 'getrf')
getrs = getattr(_cusolver, t + 'getrs')
n = b.shape[0]
nrhs = b.shape[1] if b.ndim == 2 else 1
if a._f_contiguous:
trans = _cublas.CUBLAS_OP_N
elif a._c_contiguous:
trans = _cublas.CUBLAS_OP_T
else:
raise ValueError('a must be F-contiguous or C-contiguous.')
if not b._f_contiguous:
raise ValueError('b must be F-contiguous.')
handle = _device.get_cusolver_handle()
dipiv = _cupy.empty(n, dtype=_numpy.int32)
dinfo = _cupy.empty(1, dtype=_numpy.int32)
lwork = helper(handle, n, n, a.data.ptr, n)
dwork = _cupy.empty(lwork, dtype=a.dtype)
# LU factrization (A = L * U)
getrf(handle, n, n, a.data.ptr, n, dwork.data.ptr, dipiv.data.ptr,
dinfo.data.ptr)
_cupy.linalg._util._check_cusolver_dev_info_if_synchronization_allowed(
getrf, dinfo)
# Solves Ax = b
getrs(handle, trans, n, nrhs, a.data.ptr, n,
dipiv.data.ptr, b.data.ptr, n, dinfo.data.ptr)
_cupy.linalg._util._check_cusolver_dev_info_if_synchronization_allowed(
getrs, dinfo)
def gels(a, b):
"""Solves over/well/under-determined linear systems.
Computes least-square solution to equation ``ax = b` by QR factorization
using cusolverDn<t>geqrf().
Args:
a (cupy.ndarray): The matrix with dimension ``(M, N)``.
b (cupy.ndarray): The matrix with dimension ``(M)`` or ``(M, K)``.
Returns:
cupy.ndarray:
The matrix with dimension ``(N)`` or ``(N, K)``.
"""
from cupy_backends.cuda.libs import cusolver as _cusolver
if a.ndim != 2:
raise ValueError('a.ndim must be 2 (actual: {})'.format(a.ndim))
if b.ndim == 1:
nrhs = 1
elif b.ndim == 2:
nrhs = b.shape[1]
else:
raise ValueError('b.ndim must be 1 or 2 (actual: {})'.format(b.ndim))
if a.shape[0] != b.shape[0]:
raise ValueError('shape mismatch (a: {}, b: {}).'.
format(a.shape, b.shape))
if a.dtype != b.dtype:
raise ValueError('dtype mismatch (a: {}, b: {}).'.
format(a.dtype, b.dtype))
dtype = a.dtype
if dtype == 'f':
t = 's'
elif dtype == 'd':
t = 'd'
elif dtype == 'F':
t = 'c'
elif dtype == 'D':
t = 'z'
else:
raise ValueError('unsupported dtype (actual: {})'.format(dtype))
geqrf_helper = getattr(_cusolver, t + 'geqrf_bufferSize')
geqrf = getattr(_cusolver, t + 'geqrf')
trsm = getattr(_cublas, t + 'trsm')
if t in 'sd':
ormqr_helper = getattr(_cusolver, t + 'ormqr_bufferSize')
ormqr = getattr(_cusolver, t + 'ormqr')
else:
ormqr_helper = getattr(_cusolver, t + 'unmqr_bufferSize')
ormqr = getattr(_cusolver, t + 'unmqr')
no_trans = _cublas.CUBLAS_OP_N
if dtype.char in 'fd':
trans = _cublas.CUBLAS_OP_T
else:
trans = _cublas.CUBLAS_OP_C
m, n = a.shape
mn_min = min(m, n)
dev_info = _cupy.empty(1, dtype=_numpy.int32)
tau = _cupy.empty(mn_min, dtype=dtype)
cusolver_handle = _device.get_cusolver_handle()
cublas_handle = _device.get_cublas_handle()
one = _numpy.array(1.0, dtype=dtype)
if m >= n: # over/well-determined systems
a = a.copy(order='F')
b = b.copy(order='F')
# geqrf (QR decomposition, A = Q * R)
ws_size = geqrf_helper(cusolver_handle, m, n, a.data.ptr, m)
workspace = _cupy.empty(ws_size, dtype=dtype)
geqrf(cusolver_handle, m, n, a.data.ptr, m, tau.data.ptr,
workspace.data.ptr, ws_size, dev_info.data.ptr)
_cupy.linalg._util._check_cusolver_dev_info_if_synchronization_allowed(
geqrf, dev_info)
# ormqr (Computes Q^T * B)
ws_size = ormqr_helper(
cusolver_handle, _cublas.CUBLAS_SIDE_LEFT, trans, m, nrhs, mn_min,
a.data.ptr, m, tau.data.ptr, b.data.ptr, m)
workspace = _cupy.empty(ws_size, dtype=dtype)
ormqr(cusolver_handle, _cublas.CUBLAS_SIDE_LEFT, trans, m, nrhs,
mn_min, a.data.ptr, m, tau.data.ptr, b.data.ptr, m,
workspace.data.ptr, ws_size, dev_info.data.ptr)
_cupy.linalg._util._check_cusolver_dev_info_if_synchronization_allowed(
ormqr, dev_info)
# trsm (Solves R * X = (Q^T * B))
trsm(cublas_handle, _cublas.CUBLAS_SIDE_LEFT,
_cublas.CUBLAS_FILL_MODE_UPPER, no_trans,
_cublas.CUBLAS_DIAG_NON_UNIT, mn_min, nrhs,
one.ctypes.data, a.data.ptr, m, b.data.ptr, m)
return b[:n]
else: # under-determined systems
a = a.conj().T.copy(order='F')
bb = b
out_shape = (n,) if b.ndim == 1 else (n, nrhs)
b = _cupy.zeros(out_shape, dtype=dtype, order='F')
b[:m] = bb
# geqrf (QR decomposition, A^T = Q * R)
ws_size = geqrf_helper(cusolver_handle, n, m, a.data.ptr, n)
workspace = _cupy.empty(ws_size, dtype=dtype)
geqrf(cusolver_handle, n, m, a.data.ptr, n, tau.data.ptr,
workspace.data.ptr, ws_size, dev_info.data.ptr)
_cupy.linalg._util._check_cusolver_dev_info_if_synchronization_allowed(
geqrf, dev_info)
# trsm (Solves R^T * Z = B)
trsm(cublas_handle, _cublas.CUBLAS_SIDE_LEFT,
_cublas.CUBLAS_FILL_MODE_UPPER, trans,
_cublas.CUBLAS_DIAG_NON_UNIT, m, nrhs,
one.ctypes.data, a.data.ptr, n, b.data.ptr, n)
# ormqr (Computes Q * Z)
ws_size = ormqr_helper(
cusolver_handle, _cublas.CUBLAS_SIDE_LEFT, no_trans, n, nrhs,
mn_min, a.data.ptr, n, tau.data.ptr, b.data.ptr, n)
workspace = _cupy.empty(ws_size, dtype=dtype)
ormqr(cusolver_handle, _cublas.CUBLAS_SIDE_LEFT, no_trans, n, nrhs,
mn_min, a.data.ptr, n, tau.data.ptr, b.data.ptr, n,
workspace.data.ptr, ws_size, dev_info.data.ptr)
_cupy.linalg._util._check_cusolver_dev_info_if_synchronization_allowed(
ormqr, dev_info)
return b
def _batched_posv(a, b):
from cupy_backends.cuda.libs import cusolver as _cusolver
import cupyx.cusolver
if not cupyx.cusolver.check_availability('potrsBatched'):
raise RuntimeError('potrsBatched is not available')
dtype = _numpy.promote_types(a.dtype, b.dtype)
dtype = _numpy.promote_types(dtype, 'f')
if dtype == 'f':
potrfBatched = _cusolver.spotrfBatched
potrsBatched = _cusolver.spotrsBatched
elif dtype == 'd':
potrfBatched = _cusolver.dpotrfBatched
potrsBatched = _cusolver.dpotrsBatched
elif dtype == 'F':
potrfBatched = _cusolver.cpotrfBatched
potrsBatched = _cusolver.cpotrsBatched
elif dtype == 'D':
potrfBatched = _cusolver.zpotrfBatched
potrsBatched = _cusolver.zpotrsBatched
else:
msg = ('dtype must be float32, float64, complex64 or complex128'
' (actual: {})'.format(a.dtype))
raise ValueError(msg)
a = a.astype(dtype, order='C', copy=True)
ap = _cupy._core._mat_ptrs(a)
lda, n = a.shape[-2:]
batch_size = int(_numpy.prod(a.shape[:-2]))
handle = _device.get_cusolver_handle()
uplo = _cublas.CUBLAS_FILL_MODE_LOWER
dev_info = _cupy.empty(batch_size, dtype=_numpy.int32)
# Cholesky factorization
potrfBatched(handle, uplo, n, ap.data.ptr, lda, dev_info.data.ptr,
batch_size)
_cupy.linalg._util._check_cusolver_dev_info_if_synchronization_allowed(
potrfBatched, dev_info)
b_shape = b.shape
b = b.conj().reshape(batch_size, n, -1).astype(dtype, order='C', copy=True)
bp = _cupy._core._mat_ptrs(b)
ldb, nrhs = b.shape[-2:]
dev_info = _cupy.empty(1, dtype=_numpy.int32)
# NOTE: potrsBatched does not currently support nrhs > 1 (CUDA v10.2)
# Solve: A[i] * X[i] = B[i]
potrsBatched(handle, uplo, n, nrhs, ap.data.ptr, lda, bp.data.ptr, ldb,
dev_info.data.ptr, batch_size)
_cupy.linalg._util._check_cusolver_dev_info_if_synchronization_allowed(
potrsBatched, dev_info)
# TODO: check if conj() is necessary when nrhs > 1
return b.conj().reshape(b_shape)
def posv(a, b):
"""Solve the linear equations A x = b via Cholesky factorization of A,
where A is a real symmetric or complex Hermitian positive-definite matrix.
If matrix ``A`` is not positive definite, Cholesky factorization fails
and it raises an error.
Note: For batch input, NRHS > 1 is not currently supported.
Args:
a (cupy.ndarray): Array of real symmetric or complex hermitian
matrices with dimension (..., N, N).
b (cupy.ndarray): right-hand side (..., N) or (..., N, NRHS).
Returns:
x (cupy.ndarray): The solution (shape matches b).
"""
from cupy_backends.cuda.libs import cusolver as _cusolver
_util = _cupy.linalg._util
_util._assert_cupy_array(a, b)
_util._assert_stacked_2d(a)
_util._assert_stacked_square(a)
if a.ndim > 2:
return _batched_posv(a, b)
dtype = _numpy.promote_types(a.dtype, b.dtype)
dtype = _numpy.promote_types(dtype, 'f')
if dtype == 'f':
potrf = _cusolver.spotrf
potrf_bufferSize = _cusolver.spotrf_bufferSize
potrs = _cusolver.spotrs
elif dtype == 'd':
potrf = _cusolver.dpotrf
potrf_bufferSize = _cusolver.dpotrf_bufferSize
potrs = _cusolver.dpotrs
elif dtype == 'F':
potrf = _cusolver.cpotrf
potrf_bufferSize = _cusolver.cpotrf_bufferSize
potrs = _cusolver.cpotrs
elif dtype == 'D':
potrf = _cusolver.zpotrf
potrf_bufferSize = _cusolver.zpotrf_bufferSize
potrs = _cusolver.zpotrs
else:
msg = ('dtype must be float32, float64, complex64 or complex128'
' (actual: {})'.format(a.dtype))
raise ValueError(msg)
a = a.astype(dtype, order='F', copy=True)
lda, n = a.shape
handle = _device.get_cusolver_handle()
uplo = _cublas.CUBLAS_FILL_MODE_LOWER
dev_info = _cupy.empty(1, dtype=_numpy.int32)
worksize = potrf_bufferSize(handle, uplo, n, a.data.ptr, lda)
workspace = _cupy.empty(worksize, dtype=dtype)
# Cholesky factorization
potrf(handle, uplo, n, a.data.ptr, lda, workspace.data.ptr,
worksize, dev_info.data.ptr)
_cupy.linalg._util._check_cusolver_dev_info_if_synchronization_allowed(
potrf, dev_info)
b_shape = b.shape
b = b.reshape(n, -1).astype(dtype, order='F', copy=True)
ldb, nrhs = b.shape
# Solve: A * X = B
potrs(handle, uplo, n, nrhs, a.data.ptr, lda, b.data.ptr, ldb,
dev_info.data.ptr)
_cupy.linalg._util._check_cusolver_dev_info_if_synchronization_allowed(
potrs, dev_info)
return _cupy.ascontiguousarray(b.reshape(b_shape))
| cupy/cupy | cupyx/lapack.py | lapack.py | py | 12,437 | python | en | code | 7,341 | github-code | 36 | [
{
"api_name": "cupy_backends.cuda.libs.cusolver",
"line_number": 48,
"usage_type": "argument"
},
{
"api_name": "cupy_backends.cuda.libs.cusolver",
"line_number": 49,
"usage_type": "argument"
},
{
"api_name": "cupy_backends.cuda.libs.cusolver",
"line_number": 50,
"usage_ty... |
74050641704 | from typing import Any, Dict, List, Optional, Union
from parlai.agents.rag.retrieve_api import (
SearchEngineRetriever,
SearchEngineRetrieverMock,
)
from parlai.agents.rag.retrievers import Document
from parlai.core.agents import Agent
from parlai.core.opt import Opt
from parlai.core.params import ParlaiParser
from parlai.core.message import Message
SERVERS = {
'default': 'RELEVANT_SEARCH_SERVER',
'test': 'http://test_api',
}
class SearchAgent(Agent):
@classmethod
def add_cmdline_args(
cls, parser: ParlaiParser, partial_opt: Optional[Opt] = None
) -> ParlaiParser:
parser.add_argument(
'--server',
type=str,
choices=SERVERS.keys(),
default='default',
help='Which search server to use',
)
parser.add_argument(
'--raw-server',
type=str,
default=None,
help='Specify to override the server choices with your own.',
)
parser.add_argument(
'--n-docs', type=int, default=5, help='How many docs to retrieve'
)
parser.add_argument(
'--intra-doc-delimiter',
type=str,
default='\n',
help='How to delimit intra-document contents',
)
return parser
def __init__(self, opt: Opt, shared=None):
super().__init__(opt, shared=shared)
opt['search_server'] = SERVERS[opt['server']]
if opt.get('raw_server') is not None:
opt['search_server'] = opt['raw_server']
opt['skip_retrieval_token'] = ''
self.intra_doc_delim = opt['intra_doc_delimiter']
self.n_docs = opt['n_docs']
if shared is None:
self.search_client = (
SearchEngineRetriever(opt)
if opt['server'] != 'test'
else SearchEngineRetrieverMock(opt)
)
else:
self.search_client = shared['client']
self.top_docs = []
def reset(self):
super().reset()
self.top_docs = []
def share(self) -> Dict[str, Any]:
shared = super().share()
shared['client'] = self.search_client
return shared
def act(self):
observation = self.observation
results = self.search_client.retrieve([observation['text']], self.n_docs)[0]
documents: List[Document] = []
for doc in results:
content = (
self.intra_doc_delim.join(doc['content'])
if isinstance(doc['content'], list)
else doc['content']
)
documents.append(
Document(docid=doc['url'], text=content, title=doc['title'])
)
reply = {
'text': '\n'.join([str(doc) for doc in documents]),
'top_docs': documents,
}
return reply
def respond(
self, text_or_message: Union[str, Message], **other_message_fields
) -> str:
"""
Override Agent.respond to set top_docs.
"""
if isinstance(text_or_message, str):
observation = Message(text=text_or_message, **other_message_fields)
else:
observation = Message(**text_or_message, **other_message_fields)
if 'text' not in observation:
raise RuntimeError('The agent needs a \'text\' field in the message.')
if 'episode_done' not in observation:
observation['episode_done'] = True
agent = self.clone()
agent.observe(observation)
response = agent.act()
self.top_docs = response['top_docs']
return response['text']
| facebookresearch/ParlAI | projects/bb3/agents/search_agent.py | search_agent.py | py | 3,676 | python | en | code | 10,365 | github-code | 36 | [
{
"api_name": "parlai.core.agents.Agent",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "parlai.core.params.ParlaiParser",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 23,
"usage_type": "name"
},
{
"api_name... |
70050076903 | """
Simple runner that can parse template and run all Sources to write in the Sinks
"""
import sys
from typing import Optional
from pipereport.base.templateregistry import BaseTemplateRegistry
from pipereport.template.template import Template
from pipereport.template.registry import GitFSTemplateRegistry
class PipeRunner:
"""
Simplest sequential runner
"""
def __init__(self, template_registry: Optional[BaseTemplateRegistry] = None):
super().__init__()
self.template_registry = (
GitFSTemplateRegistry() if template_registry is None else template_registry
)
def render_config(self, config: dict) -> Template:
"""
Convert config dictionary into a Template instance
Args:
config (dict): config dictionary
Returns:
template (Template): parsed template
"""
tmpl_dict = self.template_registry.get_template_by_name(config['template_name'])
tmpl = Template.parse_with_config(tmpl_dict, config)
return tmpl
def print_config(self, config: dict):
"""
Renders and prints a config from dictionary
Args:
config (dict): config dictionary
"""
sys.stdout.write(str(self.render_config(config)))
def run_from_config(self, config: dict):
"""
Runs all Sources from a config dictionary
Args:
config (dict): config dictionary
Returns:
(Dict[str, Dict]): dictionary with a resulting telemetry dictionary for each sink name
"""
tmpl_dict = self.template_registry.get_template_by_name(config["template_name"])
tmpl = Template.parse_with_config(tmpl_dict, config)
for src in tmpl.sources.values():
try:
src.connect()
except NotImplementedError:
pass
src.connect_sinks()
src.run()
return {
sn: sink.telemetry.dump()
for sn, sink in tmpl.sinks.items()
}
| enchantner/pipereport | pipereport/runner/runner.py | runner.py | py | 2,078 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "typing.Optional",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "pipereport.base.templateregistry.BaseTemplateRegistry",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "pipereport.template.registry.GitFSTemplateRegistry",
"line_number": 22,... |
22778778238 | #!/usr/bin/python
# encoding: utf-8
import random
import six
import numpy as np
from skimage.transform import resize as imresize
import chainer
import os
import skimage.io as skio
class resizeNormalize(object):
def __init__(self, size):
self.size = size
def __call__(self, img):
# image shape should be (ch, h, w) 0 <= pix <= 1
if len(img.shape) == 2:
img = img[np.newaxis, :]
img = np.transpose(img, (1, 2, 0))
resized_image = imresize(img, self.size, mode='reflect')
resized_image = resized_image.transpose(2, 0, 1).astype(np.float32)
img = resized_image - 0.5
return img
class TextImageDataset(chainer.dataset.DatasetMixin):
def __init__(self, pairs_path, lexicon, label_dict=None, dtype=np.float32,
label_dtype=np.int32, resize=None, random_step=0):
self.path_to_target_txt = '{}/'.format(os.path.split(pairs_path)[0])
if isinstance(pairs_path, six.string_types):
with open(pairs_path) as pairs_file:
pairs = []
for i, line in enumerate(pairs_file):
pair = line.strip().split()
if len(pair) != 2:
raise ValueError(
'invalid format at line {} in file {}'.format(
i, pairs_path))
pairs.append((pair[0], str(pair[1])))
if isinstance(lexicon, six.string_types):
l_names = []
with open(lexicon) as lexicon_file:
for i, line in enumerate(lexicon_file):
name = line.strip().split()
if len(name) != 1:
raise ValueError('invalid format')
l_names.append(str(*name))
self._lexicon = l_names
self._pairs = pairs
self._dtype = dtype
self._label_dtype = label_dtype
self.resize = resize
self.label_dict = label_dict
def __len__(self):
return len(self._pairs)
def get_example(self, i):
img_path, label = self._pairs[i]
full_path = os.path.abspath(self.path_to_target_txt + img_path)
image = _read_image_as_array(full_path, self._dtype)
if len(image.shape) == 2:
image = image[np.newaxis, :]
text = self._lexicon[int(label)]
return image, text
def _read_image_as_array(path, dtype):
image = skio.imread(path, as_grey=True)
image = np.expand_dims(image, axis=0)
image = np.asarray(image, dtype=dtype)
return image
| Swall0w/chainer-crnn | dataset.py | dataset.py | py | 2,600 | python | en | code | 4 | github-code | 36 | [
{
"api_name": "numpy.newaxis",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "numpy.transpose",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "skimage.transform.resize",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "numpy.flo... |
12485964052 | from pathlib import Path
import numpy as np
import pandas as pd
ROOT_DIRECTORY = Path("/code_execution")
DATA_DIRECTORY = Path("/data")
OUTPUT_FILE = ROOT_DIRECTORY / "submission" / "subset_matches.csv"
def generate_matches(query_video_ids) -> pd.DataFrame:
raise NotImplementedError(
"This script is just a template. You should adapt it with your own code."
)
matches = ...
return matches
def main():
# Loading subset of query images
query_subset = pd.read_csv(DATA_DIRECTORY / "query_subset.csv")
query_subset_video_ids = query_subset.video_id.values
# Generation of query matches happens here #
matches = generate_matches(query_subset_video_ids)
matches.to_csv(OUTPUT_FILE, index=False)
if __name__ == "__main__":
main()
| drivendataorg/meta-vsc-matching-runtime | submission_src/main.py | main.py | py | 786 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "pathlib.Path",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "pandas.read_csv",
"... |
8880207451 | import math
from typing import Callable
import numpy as np
from nm.typing import NDArrayOrFloat
def relative_error(
curr_approx: NDArrayOrFloat, prev_approx: NDArrayOrFloat
) -> NDArrayOrFloat:
"""Given current and previous iteration/approximation value returns the
relative error (does not return percentage)."""
return abs((curr_approx - prev_approx) / curr_approx)
def forward_euler(
f: Callable[[NDArrayOrFloat], NDArrayOrFloat],
x: NDArrayOrFloat,
h: NDArrayOrFloat,
) -> NDArrayOrFloat:
"""Calculates the forward Euler approximation for a derivative
Positional arguments:
f -- Function that we want to calculate its derivative
x -- Position at which the derivative will be calculated
h -- An array of step sizes
"""
return (f(x + h) - f(x)) / h
def taylor_series_approx_n(
formula: Callable[[NDArrayOrFloat, int], NDArrayOrFloat],
x: NDArrayOrFloat,
num_terms: int,
) -> NDArrayOrFloat:
"""Computes Taylor Series Approximation of a function up to n terms, given
it's series forumla."""
approx: NDArrayOrFloat = 0.0
for n in range(1, num_terms + 1):
approx += formula(x, n)
return approx
def taylor_series_approx_tol(
formula: Callable[[NDArrayOrFloat, int], NDArrayOrFloat],
x: NDArrayOrFloat,
tolerance: NDArrayOrFloat,
) -> NDArrayOrFloat:
"""Computes the approximation of a continuous function at the vicinity of a = 0.
Positional arguments:
forumla -- A func that represents the specific Taylor series formula of the function
x -- Position at which we want the approximate value of the function
tol -- At what tolerance we should stop iteration (<1)
Returns:
Approximate value of a function at x
"""
nth_term: int = 0
rel_err: NDArrayOrFloat = 1.0
curr: NDArrayOrFloat = 0.0
prev: NDArrayOrFloat = 0.0
while rel_err > tolerance:
curr += formula(x, nth_term)
rel_err = relative_error(curr, prev) * 100
nth_term += 1
prev = curr
return curr
| abzrg/nmpy | nm/error.py | error.py | py | 2,102 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "nm.typing.NDArrayOrFloat",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "nm.typing.NDArrayOrFloat",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "typing.Callable",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "nm.... |
4204565119 | import argparse
import itertools
import random
import sys
import gym
import numpy as np
from gym.wrappers import TimeLimit
from tqdm import trange
import sen.envs
from sen.agents import LimitActionsRandomAgent, RandomAgent
from sen.envs.block_pushing import render_cubes, rot90_action
from sen.utils import save_h5
def rot_coords_img(coords, width):
rot_coords = []
for x, y in coords:
rot_coords.append([width - y - 1, x])
rot_im = render_cubes(rot_coords, width).transpose([2, 0, 1])
return rot_im
def generate_dataset(args):
gym.logger.set_level(gym.logger.INFO)
# Set seed for numpy and random
random.seed(args.seed)
np.random.seed(args.seed)
env = gym.make(args.env_id, **args.env_kwargs)
env = TimeLimit(env.unwrapped, args.env_timelimit)
# Seed env
env.seed(args.seed)
env.action_space.seed(args.seed)
if len(args.actions) > 0:
agent = LimitActionsRandomAgent(
env.action_space,
args.actions,
)
else:
agent = RandomAgent(env.action_space)
buffer = {"obs": [], "action": [], "next_obs": []}
rot_buffer = {"obs": [], "action": [], "next_obs": []}
for _ in trange(args.num_episodes, desc="Episode", file=sys.stdout):
for v in buffer.values():
v.append([])
for v in rot_buffer.values():
v.append([])
ob = env.reset()
done = False
for t in itertools.count():
buffer["obs"][-1].append(ob[1])
rot_ob = rot_coords_img(env.objects, env.width)
rot_buffer["obs"][-1].append(rot_ob)
action = agent.act(ob)
buffer["action"][-1].append(action)
# Save the correct rotated action
rot_buffer["action"][-1].append(rot90_action(action, k=1))
next_ob, _, done, _ = env.step(action)
buffer["next_obs"][-1].append(next_ob[1])
rot_next_ob = rot_coords_img(env.objects, env.width)
rot_buffer["next_obs"][-1].append(rot_next_ob)
ob = next_ob
if done:
break
# Map values to numpy arrays and cast to float32
for k, v in buffer.items():
buffer[k] = np.array(v, dtype=np.float32)
for k, v in rot_buffer.items():
rot_buffer[k] = np.array(v, dtype=np.float32)
env.close()
# Save replay buffer to disk.
save_h5(buffer, args.save_path)
save_h5(rot_buffer, args.rot_save_path)
if __name__ == "__main__":
class ParseKwargs(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
setattr(namespace, self.dest, dict())
for value in values:
key, value = value.split("=")
getattr(namespace, self.dest)[key] = value
parser = argparse.ArgumentParser(description=None)
parser.add_argument(
"--env_id",
type=str,
help="Select the environment to run.",
)
parser.add_argument("--env_kwargs", nargs="*", action=ParseKwargs, default={})
parser.add_argument("--skewed-up-prob", default=None, type=float)
parser.add_argument("--actions", nargs="+", type=int, default=[])
parser.add_argument(
"--num_episodes",
type=int,
default=1000,
help="Total number of episodes to simulate.",
)
parser.add_argument(
"--env_timelimit",
type=int,
default=10,
help="Max timelimit of env",
)
parser.add_argument(
"--save_path",
type=str,
help="Save path for replay buffer (including extension .h5)",
)
parser.add_argument(
"--rot_save_path",
type=str,
help="Save path for replay buffer (including extension .h5)",
)
parser.add_argument("--seed", type=int, default=1, help="Random seed.")
args = parser.parse_args()
generate_dataset(args)
| jypark0/sen | sen/data/gen_cubes.py | gen_cubes.py | py | 3,917 | python | en | code | 4 | github-code | 36 | [
{
"api_name": "sen.envs.block_pushing.render_cubes",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "gym.logger.set_level",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "gym.logger",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name... |
71513904425 | from flask import request, jsonify
from os.path import isfile
from sklearn import svm
import pickle
MODEL_FILE = 'model.p'
class Model(object):
__model_loaded = False
def __init__(self):
self.__model = svm.SVC()
if isfile(MODEL_FILE):
self.__load_model()
def __load_model(self):
fp = open(MODEL_FILE, 'rb')
self.__model = pickle.load(fp)
self.__model_loaded = True
def __save_model(self):
fp = open(MODEL_FILE, 'wb')
pickle.dump(self.__model, fp)
def __load_request_data(self):
self.__request_data = request.get_json(force=True)
def predict(self):
if self.__model_loaded:
self.__load_request_data()
record = self.__request_data
label = self.__model.predict([[
record['septal_length'],
record['septal_width'],
record['petal_length'],
record['petal_width']
]])
return jsonify(label=label[0])
else:
return jsonify(success=False)
def train(self):
features, labels = [], []
self.__load_request_data()
for record in self.__request_data:
features.append([
record['septal_length'],
record['septal_width'],
record['petal_length'],
record['petal_width']
])
labels.append(record['species'])
self.__model.fit(features, labels)
self.__save_model()
return jsonify(success=True)
| ColinShaw/python-sklearn-flask-deployment-example | src/model.py | model.py | py | 1,593 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sklearn.svm.SVC",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "sklearn.svm",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "os.path.isfile",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "pickle.load",
"line_nu... |
75311890665 | import json
import os
import shutil
import torch
from efficientnet_pytorch import EfficientNet
from tensorboardX import SummaryWriter
from torch.nn import CrossEntropyLoss
from torch.optim.lr_scheduler import ExponentialLR, CosineAnnealingLR
from torch.utils.data import DataLoader
from torchsummary import summary
from tqdm import trange, tqdm
import pretrainedmodels
from torchcontrib.optim import SWA
from Dataset.id_rnd_dataset import IDRND_dataset, make_weights_for_balanced_classes
from model.network import DoubleLossModel, DoubleLossModelTwoHead, Model
from src.tools import str2bool
from utils.loss import FocalLoss, RobustFocalLoss2d
from utils.metrics import *
if __name__ == '__main__':
with open('../config.json', 'r') as f:
config = json.load(f)['train']
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
train_dataset = IDRND_dataset(mode=config['mode'],
use_face_detection=str2bool(config['use_face_detection']), double_loss_mode=True,
output_shape=config['image_resolution'])
train_loader = DataLoader(train_dataset, batch_size=96, shuffle=True, num_workers=8,
pin_memory=True, drop_last=True)
model = DoubleLossModelTwoHead(base_model=EfficientNet.from_pretrained('efficientnet-b3')).to(device)
model.load_state_dict(torch.load('../output/models/DoubleModelTwoHead/DoubleModel_11_0.017068892421833598.pth', map_location=device))
model.eval()
train_bar = tqdm(train_loader)
outputs = []
targets = []
user_ids = []
frames = []
for step, batch in enumerate(train_bar):
image = batch['image'].to(device)
label4class = batch['label0'].to(device)
label = batch['label1']
user_id = batch['user_id']
frame = batch['frame']
with torch.no_grad():
output4class, output = model(image)
outputs += output.cpu().detach().view(-1).numpy().tolist()
targets += label.cpu().detach().view(-1).numpy().tolist()
user_ids += user_id
frames += frame
df = pd.DataFrame()
df['user_id'] = user_ids
df['frame'] = frames
df['probability'] = outputs
df['target'] = targets
# df = df.groupby('user_id')['probability', 'target'].mean().reset_index()
# df = df[['user_id', 'probability', 'target']]
df.to_csv("../data/train_predict.csv", index=False)
val_dataset = IDRND_dataset(mode=config['mode'].replace('train', 'val'), use_face_detection=str2bool(config['use_face_detection']),
double_loss_mode=True, output_shape=config['image_resolution'])
val_loader = DataLoader(val_dataset, batch_size=96, shuffle=True, num_workers=8, drop_last=False)
model.eval()
val_bar = tqdm(val_loader)
outputs = []
targets = []
user_ids = []
frames = []
for step, batch in enumerate(val_bar):
image = batch['image'].to(device)
label4class = batch['label0'].to(device)
label = batch['label1']
user_id = batch['user_id']
frame = batch['frame']
with torch.no_grad():
output4class, output = model(image)
outputs += output.cpu().detach().view(-1).numpy().tolist()
targets += label.cpu().detach().view(-1).numpy().tolist()
user_ids += user_id
frames += frame
df = pd.DataFrame()
df['user_id'] = user_ids
df['frame'] = frames
df['probability'] = outputs
df['target'] = targets
# df = df.groupby('user_id')['probability', 'target'].mean().reset_index()
# df = df[['user_id', 'probability', 'target']]
df.to_csv("../data/val_predict.csv", index=False) | Danil328/ID_RND_V2 | src/shuffleMetrics.py | shuffleMetrics.py | py | 3,370 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "json.load",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "torch.device",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "torch.cuda.is_available",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
"line... |
38190199809 | #!/usr/bin/python
import sys
import logging
from rdsConfig import getDbConfig
import json
import config
from responseIO import returnResponse, errString
from keyCheck import verifyPublisher, verifyUsers
import datetime
from modules.post import post
from dbCommons import createModuleIssue
## Creating a lambda handler function which serves as a response to an event
def lambda_handler(event, context):
## Creation of retVal Dict which will act as the body of the returnResponse function (JSON)
retVal = {}
headers = {}
## Initalize Logger
logger = config.getLogger()
## Initalize DB connection
conn = getDbConfig(logger)
if conn is None:
retVal['message'] = "Whoops, something went wrong at out end! Please try again later!"
return returnResponse(502, json.dumps(retVal), headers, logger)
## Check for HTTP Request completion
try:
qsp = event['queryStringParameters']
header = {k.lower(): v for k, v in event['headers'].items()}
httpMethod = str(event['httpMethod'])
except Exception as e:
retVal['message'] = "Invalid / Incomplete HTTP Request"
return returnResponse(400, json.dumps(retVal), headers, logger, conn)
# Debug the event
logger.debug('Event = {}'.format(json.dumps(event)))
## Check if publisher key has been parsed in as part of the header
publisherId = verifyPublisher(conn, header, logger)
if publisherId is None:
retVal['message'] = "Error: PublisherKey invalid or Missing"
return returnResponse(403, json.dumps(retVal), headers, logger, conn)
## Validate user based on Infomo user-Key
userId = verifyUsers(conn, header, logger)
if httpMethod == 'POST':
return post(conn, header, event['body'], publisherId, userId, headers, retVal, logger)
else:
retVal = errString("Invalid Request!", retVal)
return returnResponse(403, json.dumps(retVal), headers, logger, conn) | misternaks/allmoduleissues | allModuleIssues.py | allModuleIssues.py | py | 2,027 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "config.getLogger",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "rdsConfig.getDbConfig",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "responseIO.returnResponse",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "json... |
13076725712 | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
import os
# directory where the FWHM file is located
directory = '//fs03/LTH_Neutimag/hkromer/02_Simulations/06_COMSOL/\
03_BeamOptics/01_OldTarget/IGUN_geometry/2018-09-18_comsolGeometry/\
02.define_release_time/particleData/plots/2D_histograms_lastTimestep/'
# directory = '//fs03/LTH_Neutimag/hkromer/02_Simulations/06_COMSOL/\
# 03_BeamOptics/01_OldTarget/IGUN_geometry/2018-09-24_comsol/\
# define_current/particleData/plots/2D_histograms_lastTimestep/'
fname_fwhm = f'{directory}df_FWHMs.csv'
df_fwhm = pd.read_csv(fname_fwhm, index_col=0)
print(df_fwhm.head())
# plot the fwhms in two separate plots for TD and BIDIR
f, axarr = plt.subplots(2, figsize=(7, 7), sharex=True)
# TD
def plot_TD(df):
# print(df)
df = df.sort_values(by=['id'])
X = df.id.values
Y = [df.FWHM_x.values, df.FWHM_y.values]
if df.run_type.unique()[0] == 'TD':
p1,=axarr[0].plot(X, Y[0], marker='o', color='darkorange')
p2,=axarr[0].plot(X, Y[1], marker='s', color='darkblue')
axarr[0].set_title('TD')
# axarr[0].legend([p1,p2], ['x-direction', 'y-direction'])
axarr[0].grid()
else:
p3,=axarr[1].plot(X, Y[0], marker='o', label='x-direction', color='darkorange')
p4,=axarr[1].plot(X, Y[1], marker='s', label='y-direction', color='darkblue')
axarr[1].legend([p3,p4], ['x-direction', 'y-direction'])
axarr[1].set_title('BIDIR')
axarr[1].grid()
df_fwhm.groupby('run_type').apply(lambda x: plot_TD(x))
plt.xlabel('ID')
plt.xticks(np.arange(1,15,1))
plt.grid()
f.text(0.04, 0.5, 'FWHM [mm]', va='center', rotation='vertical')
figname = f'{directory}FWHM_plots'
plt.savefig(figname + '.png', dpi=600)
plt.close('f')
| kromerh/phd_python | 03_COMSOL/03_BeamOptics/01_particlePosition/2018-09-28_compareFWHMs_oldTarget.py | 2018-09-28_compareFWHMs_oldTarget.py | py | 1,802 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pandas.read_csv",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.subplots",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "matplotl... |
14836136357 | import torch.multiprocessing as mp
from torch.nn.parallel import DistributedDataParallel as DDP
from utils.Manager import Manager
from models.XFormer import XFormer
def main(rank, manager):
""" train/dev/test/tune the model (in distributed)
Args:
rank: current process id
world_size: total gpus
"""
manager.setup(rank)
loaders = manager.prepare()
model = XFormer(manager).to(rank)
if manager.world_size > 1:
model = DDP(model, device_ids=[rank], output_device=rank, find_unused_parameters=False)
if manager.mode == 'dev':
manager.evaluate(model, loaders, load=True)
elif manager.mode == 'train':
manager.train(model, loaders)
elif manager.mode == 'test':
manager.test(model, loaders)
elif manager.mode == 'encode':
manager.encode(model, loaders)
if __name__ == "__main__":
manager = Manager()
if manager.world_size > 1:
mp.spawn(
main,
args=(manager,),
nprocs=manager.world_size,
join=True
)
else:
main(manager.device, manager) | tyh666/News-Recommendation-MIND | xformer.py | xformer.py | py | 1,123 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "models.XFormer.XFormer",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "torch.nn.parallel.DistributedDataParallel",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "utils.Manager.Manager",
"line_number": 36,
"usage_type": "call"
},
{... |
7731571907 | # Pickhacks 2023
# Safer Caver
# This is inspired by https://github.com/UP-RS-ESP/PointCloudWorkshop-May2022/blob/main/2_Alignment/ICP_Registration_ALS_UAV.ipynb
import copy
from pathlib import Path
import numpy as np
import open3d as o3d
import laspy
import distinctipy as colorpy
from scipy.spatial import cKDTree
def draw_1pcd(pcd):
o3d.visualization.draw_geometries([pcd])
def draw_point_clouds(*draw_params):
color_clouds = []
colors = colorpy.get_colors(len(draw_params), [[1.0, 0.0, 0.0]])
for draw_param, color in zip(draw_params, colors):
point_cloud = draw_param[0]
x_offset = draw_param[1]
color_option = draw_param[2]
pc_copy_arr = np.asarray(copy.deepcopy(point_cloud).points)
pc_copy_arr[:, 0] += np.ones_like(pc_copy_arr[:, 0]) * x_offset
color_clouds.append(pc_from_np(pc_copy_arr))
if color_option:
color_clouds[-1].paint_uniform_color(color)
o3d.visualization.draw_geometries(color_clouds)
def read_las(path: Path, downsample: int = 10):
"""Read .las and return numpy array"""
point_cloud = laspy.read(str(path))
return np.vstack((point_cloud.x, point_cloud.y, point_cloud.z)).transpose()[::downsample]
def pc_from_np(point_arr):
"""Create o3d point cloud from numpy array"""
point_cloud = o3d.geometry.PointCloud()
point_cloud.points = o3d.utility.Vector3dVector(point_arr)
return point_cloud
def voxelize(point_cloud, voxel_size=1.0, normals=True):
"""Convert point_cloud into voxel with dimension voxel_size (meters)
Also get normals for later.
"""
pc_vox = point_cloud.voxel_down_sample(voxel_size=voxel_size)
if normals:
pc_vox.estimate_normals(o3d.geometry.KDTreeSearchParamHybrid(radius=3, max_nn=30))
pc_vox.orient_normals_to_align_with_direction([0., 0., 1.])
return pc_vox
def pc_distance(point_cloud1, point_cloud2):
"""Get modified hausdorf distance between two voxel clouds"""
pc1_points = np.asarray(point_cloud1.points)
pc2_points = np.asarray(point_cloud2.points)
pc1_dist = cKDTree(pc1_points).query(pc2_points, k=1, workers=-1)
pc2_dist = cKDTree(pc2_points).query(pc1_points, k=1, workers=-1)
return np.max([np.mean(pc1_dist), np.mean(pc2_dist)])
def get_coarse_transform(point_cloud1, point_cloud2, distance, voxel_size=5.0):
"""Get coarse alignment transformation matrix.
Down samples (ds) point clouds using large voxel size for speed up.
point_cloud1 is the stationary pc (ie the map of the cave)
point_cloud2 is the spelunkers pc (ie where they are lost)
"""
pc1_ds = voxelize(point_cloud1, voxel_size, False)
pc2_ds = voxelize(point_cloud2, voxel_size, False)
# Get Fast Point Feature Histograms (FPFH)
radius_feature = voxel_size * 5
pc1_fpfh = o3d.pipelines.registration.compute_fpfh_feature(pc1_ds, o3d.geometry.KDTreeSearchParamHybrid(radius=radius_feature, max_nn=100))
pc2_fpfh = o3d.pipelines.registration.compute_fpfh_feature(pc2_ds, o3d.geometry.KDTreeSearchParamHybrid(radius=radius_feature, max_nn=100))
# Use RANSAC to get transform matrix
distance_threshold = distance * 2
ransac = o3d.pipelines.registration.registration_ransac_based_on_feature_matching(
pc1_ds, pc2_ds, pc1_fpfh, pc2_fpfh, True, distance_threshold,
o3d.pipelines.registration.TransformationEstimationPointToPoint(False), 3, [
o3d.pipelines.registration.CorrespondenceCheckerBasedOnEdgeLength(0.9),
o3d.pipelines.registration.CorrespondenceCheckerBasedOnDistance(distance_threshold)
],
o3d.pipelines.registration.RANSACConvergenceCriteria(100000, 0.999))
return ransac.transformation
def apply_transformation(point_cloud, transform):
return copy.deepcopy(point_cloud).transform(transform)
def get_fine_transform(point_cloud1, point_cloud2, distance):
"""Get fine alignment transformation matrix.
point_cloud1 is the stationary pc (ie the map of the cave)
point_cloud2 is the spelunkers pc (ie where they are lost)
"""
pc1 = copy.deepcopy(point_cloud1)
max_correspondence_distance = distance * 2
reg_p2p = o3d.pipelines.registration.registration_icp(point_cloud2, pc1, max_correspondence_distance,
estimation_method=o3d.pipelines.registration.TransformationEstimationPointToPoint())
return reg_p2p.transformation
def create_find_neighbors(point_cloud, k=5):
def get_neighbors(node):
distances = np.linalg.norm(
point_cloud[node] - point_cloud,
axis=1
)
neighbors = np.argpartition(distances, k+1)[:k+1]
neighbors = neighbors[neighbors != node]
return neighbors
return get_neighbors
def generate_spe_segment(cave_point_cloud_arr, segment_size, noise=0.1, segment_start=0):
"""Get chunk of cave_point_cloud that spe is stuck in and add some noise
Noise value is based on Apple device LiDAR precision: https://www.nature.com/articles/s41598-021-01763-9
"""
get_neighbors = create_find_neighbors(cave_point_cloud_arr, k=segment_size)
random_node = np.random.randint(cave_point_cloud_arr.shape[0])
neighbor_nodes = get_neighbors(random_node)
neighbors = cave_point_cloud_arr[neighbor_nodes]
noisy_neighbors = neighbors + np.random.uniform(-noise, noise, size=neighbors.shape)
return noisy_neighbors, None
#return spe_point_cloud_arr_noise, segment_start
def align(cave_point_cloud, spe_point_cloud):
# Voxelize point clouds for better performance
cave_voxel_cloud = voxelize(cave_point_cloud, voxel_size=0.1)
spe_voxel_cloud = voxelize(spe_point_cloud, voxel_size=0.1)
# Calculate coarse alignment transform
initial_distance = pc_distance(cave_voxel_cloud, spe_voxel_cloud)
coarse_transform = get_coarse_transform(cave_voxel_cloud, spe_voxel_cloud, initial_distance, voxel_size=2)
# Calculate fine alignment transform
spe_coarse_alignment = apply_transformation(spe_voxel_cloud, coarse_transform)
coarse_distance = pc_distance(cave_voxel_cloud, spe_coarse_alignment)
fine_transform = get_fine_transform(cave_voxel_cloud, spe_coarse_alignment, coarse_distance)
# Apply fine alignment to spelunker's surroundings to match it up with the know cave map
spe_fine_alignment = apply_transformation(spe_coarse_alignment, fine_transform)
return spe_fine_alignment
if __name__ == "__main__":
LAS_FILE = "Calisto.las"
SPE_SEGMENT_SIZE = 75_000
print("Loading LiDAR data...")
cave_point_cloud_arr = read_las(Path("../res/point_clouds") / LAS_FILE, downsample=10)
print("Creating point clouds...")
cave_point_cloud = pc_from_np(cave_point_cloud_arr)
cave_voxel_cloud = voxelize(cave_point_cloud, voxel_size=0.1)
# Emulate what a spelunker (spe) would scan by segmenting chunk from data file and adding noise
# noiseless vars are for draw output later
spe_point_cloud_noiseless_arr, segment_start = generate_spe_segment(cave_point_cloud_arr, SPE_SEGMENT_SIZE, noise=0)
spe_voxel_cloud_noiseless = voxelize(pc_from_np(spe_point_cloud_noiseless_arr), voxel_size=0.1)
spe_point_cloud_arr, _ = generate_spe_segment(cave_point_cloud_arr, SPE_SEGMENT_SIZE, segment_start=segment_start)
spe_point_cloud = pc_from_np(spe_point_cloud_arr)
spe_voxel_cloud = voxelize(spe_point_cloud, voxel_size=0.1)
print("Calculate coarse alignment transform...")
initial_distance = pc_distance(cave_voxel_cloud, spe_voxel_cloud)
coarse_transform = get_coarse_transform(cave_voxel_cloud, spe_voxel_cloud, initial_distance, voxel_size=2)
print("Calculate fine alignment transform...")
spe_coarse_alignment = apply_transformation(spe_voxel_cloud, coarse_transform)
coarse_distance = pc_distance(cave_voxel_cloud, spe_coarse_alignment)
fine_transform = get_fine_transform(cave_voxel_cloud, spe_coarse_alignment, coarse_distance)
print("Apply transformation...")
spe_fine_alignment = apply_transformation(spe_coarse_alignment, fine_transform)
# Draw it pretty
cave_width = np.max(cave_point_cloud_arr[:, 0]) - np.min(cave_point_cloud_arr[:, 0])
spe_width = np.max([np.max(spe_point_cloud_arr[:, 0]) - np.min(spe_point_cloud_arr[:, 0]), 30])
buffer = 10
draw_point_clouds((cave_voxel_cloud, -(cave_width + spe_width + buffer), False),
(spe_voxel_cloud, -(spe_width + buffer), False),
(cave_voxel_cloud, 0, False),
(spe_voxel_cloud_noiseless, 0, True),
(cave_voxel_cloud, cave_width + buffer, False),
(spe_fine_alignment, cave_width + buffer, True))
| cubrink/pickhacks-2023 | safercaver/src/aligner.py | aligner.py | py | 8,767 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "open3d.visualization.draw_geometries",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "open3d.visualization",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "distinctipy.get_colors",
"line_number": 25,
"usage_type": "call"
},
{
... |
21325729011 | import csv
from elasticsearch import Elasticsearch
from elasticsearch import helpers
es = Elasticsearch([{'host': 'localhost', 'port': 9200}])
#es.indices.delete(index='movies', ignore=[400, 404])
print(es.ping())
def convert(filename,indexname,type):
with open(filename, encoding="utf8") as file:
r = csv.DictReader(file)
helpers.bulk(es, r, index=indexname, doc_type=type)
es.indices.create(index = 'movies',ignore=400,body={
"settings":{
"index":{
"similarity":{
"default":{
"type":"BM25"
}
}
}
}})
es.indices.create(index = 'ratings',ignore=400,body={
"settings":{
"index":{
"similarity":{
"default":{
"type":"BM25"
}
}
}
}})
convert('movies.csv','movies','movies')
convert('ratings.csv','ratings','ratings')
print(es.indices.exists('movies'))
print(es.indices.exists('ratings'))
| gdimitropoulos/information-retrieval | part1b/reader.py | reader.py | py | 1,044 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "elasticsearch.Elasticsearch",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "csv.DictReader",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "elasticsearch.helpers.bulk",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "... |
1941680628 | # SHOW CARD LIST
#
import os
import re
import json
import sublime
import sublime_plugin
import subprocess
class UmbertoGetRecipCardLinkCommand(sublime_plugin.TextCommand, sublime_plugin.WindowCommand):
def run(self, edit):
settings = sublime.load_settings('Umberto.sublime-settings')
# Location of the current project.
self.proj_dir = settings.get('current_project_directory')
# load code_dict from json file
dict_fname = self.proj_dir + '/lists/code_list.json'
with open(dict_fname) as json_data:
code_dict = json.load(json_data)
self.code_list = list(code_dict.keys())
self.title_list = list(code_dict.values())
view_list = [[m, n] for m, n in zip(self.title_list, self.code_list)]
self.view.window().show_quick_panel(view_list, self.on_done)
def on_done(self, jj):
if jj < 0:
return
else:
href_start = '\href{run:../'
href_mid = '.pdf}{'
href_end = '}'
code = self.code_list[jj]
code_ref = code[0:2] + '/' + code
title = self.title_list[jj]
# thanks to a quirk of LaTeX, certain characters are not permitted without a preceding backslash.
# THe following code adds in backslashes when they're needed
title = title.replace("_", "\_")
insert_string = href_start + code_ref + href_mid + title + href_end
self.view.run_command('insert', {'characters': insert_string})
self.see_also_link(code, title)
def see_also_link(self, code, title):
'''
Generate a reciprocal link in a "See Also" section
at the bottom of the index card
'''
# INFO ABOUT CURRENT FILE = THISFILE
# get path and file name of the current file
current_file_code = self.view.window().extract_variables()['file_base_name']
typ_thisfile = current_file_code[0:2]
current_file_name = self.view.window().extract_variables()['file_name']
# # COMPILE CURRENT FILE
# get_cd = self.proj_dir + '/' + typ_thisfile
# os.chdir(get_cd)
# build_tex = "pdflatex -halt-on-error " + current_file_name
# process = subprocess.Popen(build_tex, shell=True)
# process.wait()
# GET current file title
spatt = '\\lhead{.*}'
curr_file_contents = self.view.substr(sublime.Region(0, self.view.size()))
try:
curr_title = re.search(spatt, curr_file_contents).group(0)
except: curr_title = 'Title'
curr_title = curr_title.replace('lhead{', '')
curr_title = curr_title.replace('}', '')
# open the file specified by code = THATFILE
typ_thatfile = code[0:2]
thatfile = self.proj_dir + '/' + typ_thatfile + '/' + code + '.tex'
with open(thatfile, 'r') as thatfile_data:
text = thatfile_data.read()
# search tex for \noindent\textbf{See Also}\newline
# if it's there, append new reciprocal link
# if it's not there, add this See Also section and
# append new recip link
# USEFUL STRINGS
code_ref = typ_thisfile + '/' + current_file_code
sa_title = r'\noindent\textbf{See Also}\newline' + '\n'
end_doc = '\end{document}'
href_start = '\href{run:../'
href_mid = '.pdf}{'
href_end = r'}\newline' + '\n'
insert_string = href_start + code_ref + href_mid + curr_title + href_end
new_sec = insert_string + '\n'
if sa_title in text:
new_end_doc = new_sec + end_doc
text = text.replace(end_doc, new_end_doc)
else:
new_end_doc = sa_title + new_sec + end_doc
text = text.replace(end_doc, new_end_doc)
with open(thatfile, 'w') as f:
f.write(text)
# # Recompile
# get_cd = self.proj_dir + '/' + typ_thatfile
# os.chdir(get_cd)
# build_tex = "pdflatex -halt-on-error " + code + '.tex'
# process = subprocess.Popen(build_tex, shell=True)
# process.wait() | tgparton/Umberto | get_recip_card_link.py | get_recip_card_link.py | py | 4,350 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sublime_plugin.TextCommand",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "sublime_plugin.WindowCommand",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "sublime.load_settings",
"line_number": 19,
"usage_type": "call"
},
... |
34358986297 | import SECRETS
import os
import openai
openai.organization = "org-0iQE6DR7AuGXyEw1kD4poyIg"
openai.api_key = SECRETS.open_ai_api_key
from sms.logger import json_logger
def send_init_roast_bot_primer_prompt():
completion = openai.Completion.create(
model = "text-davinci-003",
# prompt="Tell me a joke about a fish.",
# prompt='Roast-bot, I command you to roast user: "MeatballMama55"',
prompt='You are playing the role of Roast Master. You are hosting a Roast to poke fun at different users in a chatroom based on thier usernames. You will be given a username and you will respond a joke about the user based on thier username.',
# temperature = 2,
max_tokens=30
)
return completion.choices[0].text
def get_roast_str_from_username(username, log_json_file_path = None):
model = "text-davinci-003"
prompt='Roast-bot, roast this user based on their username: ' + username
# max_tokens = 30
max_tokens = 64
completion = openai.Completion.create(
model = model,
# prompt="Tell me a joke about a fish.",
# prompt='Roast-bot, I command you to roast user: "MeatballMama55"',
prompt=prompt,
# temperature = 2,
max_tokens=max_tokens
)
resp_str = completion.choices[0].text
# log
if log_json_file_path:
params_resp_dl = json_logger.read(log_json_file_path, return_if_file_not_found = [])
params_resp_dl.append(
{
"params": {
"model": model,
"prompt": prompt,
"max_tokens": max_tokens
},
"resp": resp_str
}
)
json_logger.write(params_resp_dl, log_json_file_path)
return resp_str
| Brandon-Valley/tik_live_host | src/open_ai_api_handler.py | open_ai_api_handler.py | py | 1,813 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "openai.organization",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "openai.api_key",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "SECRETS.open_ai_api_key",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name":... |
5213207430 | from unittest import TestCase
from typing import List, Set, Tuple
"""
You are given a m x n 2D grid initialized with these three possible values.
-1 - A wall or an obstacle.
0 - A gate.
INF - Infinity means an empty room. We use the value 231 - 1 = 2147483647 to represent INF as you may assume that the
distance to a gate is less than 2147483647. Fill each empty room with the distance to its nearest gate.
If it is impossible to reach a gate, it should be filled with INF.
"""
class ObstacleDist(object):
def __init__(self, rooms: List[List[int]]):
self.rooms = rooms
self.rows = len(rooms)
self.cols = len(rooms[0])
def get_adjacent_nodes(self, node: Tuple[int, int]):
row, col = node[0], node[1]
left = row, col - 1
right = row, col + 1
up = row - 1, col
down = row + 1, col
for adj in [left, right, up, down]:
if adj[0] < 0 or adj[0] >= self.rows:
continue
if adj[1] < 0 or adj[1] >= self.cols:
continue
yield adj
def modify_with_closest_dist_to_gate(self):
for row in range(self.rows):
for col in range(self.cols):
node = (row, col)
node_val = self.rooms[row][col]
if node_val == 0:
self.bfs_modify(node)
def bfs_modify(self, node_with_gate: Tuple[int, int]):
q = [node_with_gate]
next_q = []
visited = set()
dist_from_gate = 1
found_gate_with_smaller_dist = False
while len(q) > 0:
node = q.pop(0)
visited.add(node)
for adj_node in self.get_adjacent_nodes(node):
if adj_node in visited:
continue
row, col = adj_node[0], adj_node[1]
node_val = self.rooms[row][col]
if node_val == -1 or node_val == 0:
continue
if node_val > dist_from_gate:
self.rooms[row][col] = dist_from_gate
found_gate_with_smaller_dist = True
next_q.append(adj_node)
if q == []:
if not found_gate_with_smaller_dist:
return
q = next_q
next_q = []
dist_from_gate += 1
found_gate_with_smaller_dist = False
class ObstacleDistTest(TestCase):
def test_bfs_modify(self):
obstacle_dist = ObstacleDist([[0, 100, 100, 100, 100, 0, 100, -1]])
obstacle_dist.bfs_modify((0, 0))
self.assertEqual([[0, 1, 2, 3, 4, 0, 100, -1]], obstacle_dist.rooms)
obstacle_dist.bfs_modify((0, 5))
self.assertEqual([[0, 1, 2, 2, 1, 0, 1, -1]], obstacle_dist.rooms)
def test_1(self):
input1 = [[2147483647,-1,0,2147483647],
[2147483647,2147483647,2147483647,-1],
[2147483647,-1,2147483647,-1],
[0,-1,2147483647,2147483647]]
obstacle_dist = ObstacleDist(input1)
obstacle_dist.modify_with_closest_dist_to_gate()
correct = [[3, -1, 0, 1], [2, 2, 1, -1], [1, -1, 2, -1], [0, -1, 3, 4]]
self.assertEqual(correct, obstacle_dist.rooms)
| tugloo1/leetcode | problem_286.py | problem_286.py | py | 3,246 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "typing.List",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "typing.Tuple",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "typing.Tuple",
"line_number": 43,
"usage_type": "name"
},
{
"api_name": "unittest.TestCase",
"line_n... |
71806898343 |
import pandas as pd
from splinter import Browser
from bs4 import BeautifulSoup as bs
from webdriver_manager.chrome import ChromeDriverManager
import time
def scrape():
# Latest Mars News
# Set up Splinter
executable_path = {'executable_path': ChromeDriverManager().install()}
browser = Browser('chrome', **executable_path, headless=False)
# Visit browser
url = 'https://redplanetscience.com/'
browser.visit(url)
time.sleep(1)
# Scrape page into Soup
html = browser.html
soup = bs(html, 'html.parser')
news_title = soup.body.find("div", class_='content_title').text
news_p = soup.body.find("div", class_='article_teaser_body').text
url_image = 'https://spaceimages-mars.com/'
browser.visit(url_image)
time.sleep(1)
html = browser.html
soup = bs(html, 'html.parser')
image_div_content = soup.find("div", class_="floating_text_area")
link = image_div_content.find('a')
href = link['href']
featured_image_url = url_image + href
url_facts = 'https://galaxyfacts-mars.com/'
browser.visit(url_facts)
time.sleep(1)
table = pd.read_html(url_facts, header=None, index_col=None)
df = pd.DataFrame(table[0])
# Renaming columns and index
df = df.set_index(0).rename(columns={1: "Mars", 2: "Earth"})
df.index.names = ['Description']
html_table = df.to_html(
classes="table table-success table-striped", bold_rows=True)
# Mars Hemisphere
url_hemispheres = 'https://marshemispheres.com/'
browser.visit(url_hemispheres)
time.sleep(1)
html = browser.html
soup = bs(html, 'html.parser')
# Hemisphere Images
hemispheres_info = soup.find_all('div', class_='item')
hemisphere_image_urls = []
for info in hemispheres_info:
href_link = info.find('a')['href']
url = url_hemispheres + href_link
browser.visit(url)
html = browser.html
soup = bs(html, 'html.parser')
title = soup.find('h2').text
image_url = url_hemispheres + \
soup.find('div', class_='downloads').a['href']
hemisphere_title = soup.find('div', class_='cover').h2.text
hemisphere_image_urls.append(
{"title": hemisphere_title, 'img_url': image_url})
# Store data in a dictionary
mars_data = {
'news_title': news_title,
'news_paragraph': news_p,
'featured_image_url': featured_image_url,
'table': html_table,
'hemisphere_image_urls': hemisphere_image_urls
}
# Close the browser after scraping
browser.quit()
# Return results
return mars_data
| NZeinali/Web_Scraping_Challenge | scrape_mars.py | scrape_mars.py | py | 2,652 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "webdriver_manager.chrome.ChromeDriverManager",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "splinter.Browser",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 20,
"usage_type": "call"
},
{
"api_name... |
10076939934 | from langchain.document_loaders import PyPDFLoader
from langchain.document_loaders.csv_loader import CSVLoader
from langchain.text_splitter import MarkdownHeaderTextSplitter,RecursiveCharacterTextSplitter
from langchain.vectorstores import Chroma
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.llms import OpenAI
from langchain.retrievers.self_query.base import SelfQueryRetriever
from langchain.chains.query_constructor.base import AttributeInfo
from langchain.chains import RetrievalQA, ConversationalRetrievalChain
from langchain.text_splitter import CharacterTextSplitter
from langchain.chat_models import ChatOpenAI
from fastapi import UploadFile
from langchain.chains.question_answering import load_qa_chain
import os
import json
import requests
from langchain.document_loaders import PyPDFLoader, TextLoader
import PyPDF2
os.environ['OPENAI_API_KEY'] = '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'
class Upload:
def __init__(self) -> None:
self.temp_path = ''
def uploadDocument(self,uploadedFile : UploadFile, username : str):
try:
file_extension = uploadedFile.filename.split('.')[-1].lower()
if file_extension == 'pdf' or file_extension == 'csv' or file_extension =='txt':
# Read the contents of the uploaded file
#file_content = os.path.basename(uploadedFile.filename)
#print(file_content)
# Get the current working directory
current_directory = os.getcwd()
new_folder_path = os.path.join(current_directory, username)
os.makedirs(new_folder_path, exist_ok=True)
# Construct the path for saving the file in the current folder
file_path = os.path.join(new_folder_path, uploadedFile.filename)
# Save the file to the current folder
with open(file_path, "wb") as f:
f.write(uploadedFile.file.read())
return file_path
else:
return {"errormsg" : "Please upload file with .pdf, .txt or .csv extension"}
except Exception as e:
return {"error": str(e)}
def processDocument(self, question : str, filepaths : list):
response_list = {}
for file_path in filepaths:
print(file_path)
file_extension = file_path.split('.')[-1].lower()
file_name = file_path.split('\\')[-1]
print(file_extension)
if file_extension == 'pdf':
loader = PyPDFLoader(file_path)
elif file_extension == 'txt':
loader = TextLoader(file_path)
elif file_extension == 'csv':
loader = CSVLoader(file_path)
documents = loader.load()
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=200)
documents = text_splitter.split_documents(documents)
vectordb = Chroma.from_documents(
documents,
embedding=OpenAIEmbeddings(),
persist_directory='./data'
)
vectordb.persist()
# we are specifying that OpenAI is the LLM that we want to use in our chain
chain = load_qa_chain(llm=OpenAI())
response = chain.run(input_documents=documents, question=question)
print(response)
if (response != " I don't know."):
response_list[file_name] = response
#json_output = json.dumps(response_list)
return response_list
# {'source': 'docs/cs229_lectures/MachineLearning-Lecture01.pdf', 'page': 0}
| Utsav-ace/LLMGpt | src/mylib/upload.py | upload.py | py | 3,755 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.environ",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "fastapi.UploadFile",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "os.getcwd",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line... |
14559612423 | #!/usr/bin/env python3
from time import time
from datetime import timedelta
import json
import decimal
import os
import sys
import traceback
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
import logging
logging.getLogger("tensorflow").setLevel(logging.WARNING)
import tensorflow as tf
from recipes.baskt_rs_recipes import BasktRecommendationSystemRecipes
from orders.baskt_rs_orders import BasktRecommendationSystemOrders
from favs.baskt_rs_favs import BasktRecommendationSystemFavs
from flask import Flask
from flask import request
import boto3
from botocore.exceptions import ClientError
app = Flask(__name__)
# Helper class to convert a DynamoDB item to JSON.
class DecimalEncoder(json.JSONEncoder):
def default(self, o):
if isinstance(o, decimal.Decimal):
return str(o)
return super(DecimalEncoder, self).default(o)
@app.route("/")
def hello():
return "Flask prediction server is running"
@app.route("/get_recommendations")
def get_recommendations():
try:
upc = request.args.get('upc')
num_recommendations = request.args.get('num_recommendations')
full_info = request.args.get('full_info')
if num_recommendations is None:
num_recommendations = 10
else:
num_recommendations = int(num_recommendations)
if full_info is None:
return_full_info = True
else:
return_full_info = str(full_info) == 'true'
if upc:
predicted = {'recipes_based': [], 'favs_based': [], 'orders_based': []}
predicted_full_info = {'recipes_based': [], 'favs_based': [], 'orders_based': []}
product_name = get_product_name(upc)
if product_name is not None:
predicted['recipes_based'] = recipes_model.predict(product_name,
num_recommendations,
predict_type='product_ids')
else:
predicted['recipes_based'] = recipes_model.predict(str(upc),
num_recommendations,
predict_type='product_ids')
predicted['error'] = "There are no products for upc '{}' at the db, check if you use valid upc. " \
"Returned values consist of search result for '{}' as product name.".format(upc, upc)
predicted_full_info['error'] = "There are no products for upc '{}' at the db, " \
"check if you use valid upc. " \
"Returned values consist of search result for '{}' " \
"as product name.".format(upc, upc)
predicted['favs_based'] = favs_model.predict(upc, num_recommendations)
predicted['orders_based'] = orders_model.predict(upc, num_recommendations)
if return_full_info:
if len(predicted['recipes_based']) > 0:
predicted_full_info['recipes_based'] = get_products_full_info(predicted['recipes_based'])
if len(predicted['favs_based']) > 0:
predicted_full_info['favs_based'] = get_products_full_info(predicted['favs_based'])
if len(predicted['orders_based']) > 0:
predicted_full_info['orders_based'] = get_products_full_info(predicted['orders_based'])
return json.dumps(predicted_full_info)
else:
return json.dumps(predicted)
else:
return "Request hasn't required param 'upc'"
except Exception as ex:
exc_type, exc_value, exc_traceback = sys.exc_info()
traceback.print_tb(exc_traceback, file=sys.stdout)
traceback.print_exception(exc_type, exc_value, exc_traceback, file=sys.stdout)
error = "Exception '{}' caught, details: {}".format(type(ex).__name__, str(ex))
return json.dumps(error)
@app.route("/get_recommendations/recipes")
def get_recommendations_recipes():
try:
product_name = request.args.get('product_name')
num_recommendations = request.args.get('num_recommendations')
predict_type = request.args.get('predict_type')
return_full_info = False
if num_recommendations is None:
num_recommendations = 10
else:
num_recommendations = int(num_recommendations)
if predict_type is None:
predict_type = 'product_ids'
return_full_info = True
if product_name:
predicted = recipes_model.predict(product_name, num_recommendations, predict_type=predict_type)
if return_full_info:
if len(predicted) > 0:
predicted_full_info = get_products_full_info(predicted)
return json.dumps(predicted_full_info)
else:
return json.dumps(predicted)
else:
return json.dumps(predicted)
else:
return "Request hasn't required param 'product_name'"
except Exception as ex:
exc_type, exc_value, exc_traceback = sys.exc_info()
traceback.print_tb(exc_traceback, file=sys.stdout)
traceback.print_exception(exc_type, exc_value, exc_traceback, file=sys.stdout)
error = "Exception '{}' caught, details: {}".format(type(ex).__name__, str(ex))
return json.dumps(error)
@app.route("/get_recommendations/orders")
def get_recommendations_orders():
try:
upc = request.args.get('upc')
num_recommendations = request.args.get('num_recommendations')
full_info = request.args.get('full_info')
if num_recommendations is None:
num_recommendations = 10
else:
num_recommendations = int(num_recommendations)
if full_info is None:
return_full_info = True
else:
return_full_info = str(full_info) == 'true'
if upc:
predicted = orders_model.predict(upc, num_recommendations)
if return_full_info:
if len(predicted) > 0:
predicted_full_info = get_products_full_info(predicted)
return json.dumps(predicted_full_info)
else:
return json.dumps(predicted)
else:
return json.dumps(predicted)
else:
return "Request hasn't required param 'upc'"
except Exception as ex:
exc_type, exc_value, exc_traceback = sys.exc_info()
traceback.print_tb(exc_traceback, file=sys.stdout)
traceback.print_exception(exc_type, exc_value, exc_traceback, file=sys.stdout)
error = "Exception '{}' caught, details: {}".format(type(ex).__name__, str(ex))
return json.dumps(error)
@app.route("/get_recommendations/favs")
def get_recommendations_favs():
try:
upc = request.args.get('upc')
num_recommendations = request.args.get('num_recommendations')
full_info = request.args.get('full_info')
if num_recommendations is None:
num_recommendations = 10
else:
num_recommendations = int(num_recommendations)
if full_info is None:
return_full_info = True
else:
return_full_info = str(full_info) == 'true'
if upc:
predicted = favs_model.predict(upc, num_recommendations)
if return_full_info:
if len(predicted) > 0:
predicted_full_info = get_products_full_info(predicted)
return json.dumps(predicted_full_info)
else:
return json.dumps(predicted)
else:
return json.dumps(predicted)
else:
return "Request hasn't required param 'upc'"
except Exception as ex:
exc_type, exc_value, exc_traceback = sys.exc_info()
traceback.print_tb(exc_traceback, file=sys.stdout)
traceback.print_exception(exc_type, exc_value, exc_traceback, file=sys.stdout)
error = "Exception '{}' caught, details: {}".format(type(ex).__name__, str(ex))
return json.dumps(error)
def get_products_full_info(item_ids):
try:
response = client.batch_get_item(
RequestItems={
'oc_product_description': {
'Keys': [{'upc': {'S': id}} for id in item_ids],
'ConsistentRead': True
}
}
)
return response['Responses']
except ClientError as e:
print(e.response['Error']['Message'])
return []
def get_product_name(upc):
try:
response = client.get_item(
TableName='oc_product_description', Key={'upc': {'S': str(upc)}}
)
if 'Item' in response:
return response['Item']['oc_name']['S']
except ClientError as e:
print(e.response['Error']['Message'])
return None
if __name__ == "__main__":
start_main_time = time()
client = boto3.client('dynamodb', region_name='us-east-1')
print("open tf session...")
with tf.Session() as session:
print("session opened")
print("create and preparing models...")
recipes_model = BasktRecommendationSystemRecipes()
recipes_model.download_data_from_s3()
recipes_model.prepare('predict', session)
orders_model = BasktRecommendationSystemOrders()
orders_model.download_data_from_s3()
orders_model.prepare('predict')
favs_model = BasktRecommendationSystemFavs()
favs_model.download_data_from_s3()
favs_model.prepare('predict')
elapsed_main_time = time() - start_main_time
print("models created and prepared, with elapsed time {}:".format(timedelta(seconds=elapsed_main_time)))
app.run("0.0.0.0", 5001) | Stepka/baskt-recommendation-system | prediction_flask_server.py | prediction_flask_server.py | py | 10,049 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.environ",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "logging.getLogger",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "logging.WARNING",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "flask.Flask",
... |
37360585045 | # 工具类函数
import colorsys
import functools
import glob
import json
import re
from loguru import logger
def parse_range(page_range: str, page_count: int, is_multi_range: bool = False, is_reverse: bool = False, is_unique: bool = True):
# e.g.: "1-3,5-6,7-10", "1,4-5", "3-N", "even", "odd"
page_range = page_range.strip()
if page_range in ["all", ""]:
roi_indices = list(range(page_count))
return roi_indices
if page_range == "even":
roi_indices = list(range(0, page_count, 2))
return roi_indices
if page_range == "odd":
roi_indices = list(range(1, page_count, 2))
return roi_indices
roi_indices = []
parts = page_range.split(",")
neg_count = sum([p.startswith("!") for p in parts])
pos_count = len(parts) - neg_count
if neg_count > 0 and pos_count > 0:
raise ValueError("页码格式错误:不能同时使用正向选择和反向选择语法")
if pos_count > 0:
for part in parts:
part = part.strip()
if re.match("^!?(\d+|N)(\-(\d+|N))?$", part) is None:
raise ValueError("页码格式错误!")
out = part.split("-")
if len(out) == 1:
if out[0] == "N":
roi_indices.append([page_count-1])
else:
roi_indices.append([int(out[0])-1])
elif len(out) == 2:
if out[1] == "N":
roi_indices.append(list(range(int(out[0])-1, page_count)))
else:
roi_indices.append(list(range(int(out[0])-1, int(out[1]))))
if is_multi_range:
return roi_indices
roi_indices = [i for v in roi_indices for i in v]
if is_unique:
roi_indices = list(set(roi_indices))
roi_indices.sort()
if neg_count > 0:
for part in parts:
part = part.strip()
if re.match("^!?(\d+|N)(\-(\d+|N))?$", part) is None:
raise ValueError("页码格式错误!")
out = part[1:].split("-")
if len(out) == 1:
roi_indices.append([int(out[0])-1])
elif len(out) == 2:
if out[1] == "N":
roi_indices.append(list(range(int(out[0])-1, page_count)))
else:
roi_indices.append(list(range(int(out[0])-1, int(out[1]))))
if is_multi_range:
return roi_indices
roi_indices = [i for v in roi_indices for i in v]
if is_unique:
roi_indices = list(set(range(page_count)) - set(roi_indices))
roi_indices.sort()
if is_reverse:
roi_indices = list(set(range(page_count)) - set(roi_indices))
roi_indices.sort()
return roi_indices
def range_compress(arr):
if not arr:
return []
result = []
start = end = arr[0]
for i in range(1, len(arr)):
if arr[i] == end + 1:
end = arr[i]
else:
result.append([start, end])
start = end = arr[i]
result.append([start, end])
return result
def dump_json(path, obj):
with open(path, "w", encoding="utf-8") as f:
json.dump(obj, f, ensure_ascii=False)
def convert_length(length, from_unit, to_unit):
"""
将长度从一个单位转换为另一个单位
:param length: 长度值
:param from_unit: 原单位,可选值:"pt"、"cm"、"mm"、"in"
:param to_unit: 目标单位,可选值:"pt"、"cm"、"mm"、"in"
:param dpi: 屏幕或打印机的分辨率,默认为每英寸72个点(即标准屏幕分辨率)
:return: 转换后的长度值
"""
units = {"pt": 1, "cm": 2.54/72, "mm": 25.4/72, "in": 1/72}
if from_unit not in units or to_unit not in units:
raise ValueError("Invalid unit")
pt_length = length / units[from_unit]
return pt_length * units[to_unit]
def hex_to_rgb(hex_color):
# 去掉 # 号并解析为十六进制数值
hex_value = hex_color.lstrip("#")
# 解析 R、G、B 三个十六进制数值
r, g, b = tuple(int(hex_value[i:i+2], 16) for i in (0, 2, 4))
# 将 R、G、B 转换为 RGB 颜色值
rgb_color = colorsys.rgb_to_hsv(r/255, g/255, b/255)
return tuple(round(c * 255) for c in colorsys.hsv_to_rgb(*rgb_color))
# 阿拉伯数字转罗马数字
def num_to_roman(num):
roman_map = {1: 'I', 4: 'IV', 5: 'V', 9: 'IX', 10: 'X', 40: 'XL', 50: 'L', 90: 'XC', 100: 'C', 400: 'CD', 500: 'D', 900: 'CM', 1000: 'M'}
result = ''
for value, symbol in sorted(roman_map.items(), reverse=True):
while num >= value:
result += symbol
num -= value
return result
# 将阿拉伯数字转换为字母表
def num_to_letter(num):
if num <= 0:
return ""
# 将数字转换为 0-25 的范围
num -= 1
quotient, remainder = divmod(num, 26)
# 递归转换前面的部分
prefix = num_to_letter(quotient)
# 将当前位转换为字母
letter = chr(ord('A') + remainder)
# 拼接前缀和当前字母
return prefix + letter
def num_to_chinese(num):
"""将阿拉伯数字转换为中文数字"""
CHINESE_NUMBERS = {
0: "零", 1: "一", 2: "二", 3: "三", 4: "四", 5: "五",
6: "六", 7: "七", 8: "八", 9: "九", 10: "十"
}
if num == 0:
return CHINESE_NUMBERS[num]
result = ""
if num >= 100000000: # 亿
quotient, remainder = divmod(num, 100000000)
result += num_to_chinese(quotient) + "亿"
num = remainder
if num >= 10000: # 万
quotient, remainder = divmod(num, 10000)
result += num_to_chinese(quotient) + "万"
num = remainder
if num >= 1000: # 千
quotient, remainder = divmod(num, 1000)
result += CHINESE_NUMBERS[quotient] + "千"
num = remainder
if num >= 100: # 百
quotient, remainder = divmod(num, 100)
result += CHINESE_NUMBERS[quotient] + "百"
num = remainder
if num >= 10: # 十
quotient, remainder = divmod(num, 10)
if quotient > 1:
result += CHINESE_NUMBERS[quotient]
result += "十"
num = remainder
if num > 0:
result += CHINESE_NUMBERS[num]
return result
def human_readable_size(size):
"""将文件大小转换为适合人类阅读的格式"""
for unit in ["B", "KB", "MB", "GB", "TB"]:
if size < 1024.0:
return f"{size:.2f} {unit}"
size /= 1024.0
return f"{size:.2f} PB"
def contains_rect(rect1, rect2) -> bool:
x1, y1, x2, y2 = rect1
x3, y3, x4, y4 = rect2
if x1 <= x3 and y1 <= y3 and x2 >= x4 and y2 >= y4:
return True
return False
def batch_process(field: str = "doc_path"):
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
logger.debug(args)
logger.debug(kwargs)
doc_path = kwargs[field]
if "*" in doc_path:
path_list = glob.glob(doc_path)
logger.debug(f"path_list length: {len(path_list) if path_list else 0}")
if path_list:
for path in path_list:
kwargs[field] = path
func(*args, **kwargs)
else:
func(*args, **kwargs)
return wrapper
return decorator
| kevin2li/PDF-Guru | thirdparty/utils.py | utils.py | py | 7,425 | python | en | code | 941 | github-code | 36 | [
{
"api_name": "re.match",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "re.match",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "json.dump",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "colorsys.rgb_to_hsv",
"line_number": ... |
13183244334 | from math import sqrt
from itertools import product
import pandas as pd
import torch
from torch.autograd import Function
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as init
def make_vgg():
layers = []
in_channels = 3
cfg = [64, 64, 'M', 128, 128, 'M', 256, 256,
256, 'MC', 512, 512, 512, 'M', 512, 512, 512]
for v in cfg:
if v == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
elif v == 'MC':
layers += [nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=True)]
else:
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
layers += [conv2d, nn.ReLU()]
in_channels = v
pool5 = nn.MaxPool2d(kernel_size=3, stride=1, padding=1)
conv6 = nn.Conv2d(512, 1024, kernel_size=3, padding=6, dilation=6)
conv7 = nn.Conv2d(1024, 1024, kernel_size=1)
layers += [pool5, conv6, nn.ReLU(), conv7, nn.ReLU()]
return nn.ModuleList(layers)
def make_extras():
layers = []
in_channels = 1024
cfg = [256, 512, 128, 256, 128, 256, 128, 256]
layers += [nn.Conv2d(in_channels, cfg[0], kernel_size=1)]
layers += [nn.Conv2d(cfg[0], cfg[1], kernel_size=3, stride=2, padding=1)]
layers += [nn.Conv2d(cfg[1], cfg[2], kernel_size=1)]
layers += [nn.Conv2d(cfg[2], cfg[3], kernel_size=3, stride=2, padding=1)]
layers += [nn.Conv2d(cfg[3], cfg[4], kernel_size=1)]
layers += [nn.Conv2d(cfg[4], cfg[5], kernel_size=3)]
layers += [nn.Conv2d(cfg[5], cfg[6], kernel_size=1)]
layers += [nn.Conv2d(cfg[6], cfg[7], kernel_size=3)]
return nn.ModuleList(layers)
def make_loc_conf(num_classes=21, bbox_aspect_num=[4, 6, 6, 6, 4, 4]):
loc_layers = []
conf_layers = []
# source1의 합성곱 층
loc_layers += [nn.Conv2d(512, bbox_aspect_num[0]*4, kernel_size=3, padding=1)]
conf_layers += [nn.Conv2d(512, bbox_aspect_num[0]* num_classes, kernel_size=3, padding=1)]
# source2의 합성곱 층
loc_layers += [nn.Conv2d(1024, bbox_aspect_num[1]*4, kernel_size=3, padding=1)]
conf_layers += [nn.Conv2d(1024, bbox_aspect_num[1]*num_classes, kernel_size=3, padding=1)]
# source3의 합성곱 층
loc_layers += [nn.Conv2d(512, bbox_aspect_num[2]*4, kernel_size=3, padding=1)]
conf_layers += [nn.Conv2d(512, bbox_aspect_num[2]*num_classes, kernel_size=3, padding=1)]
# source4의 합성곱 층
loc_layers += [nn.Conv2d(256, bbox_aspect_num[3]*4, kernel_size=3, padding=1)]
conf_layers += [nn.Conv2d(256, bbox_aspect_num[3]*num_classes, kernel_size=3, padding=1)]
# source5의 합성곱 층
loc_layers += [nn.Conv2d(256, bbox_aspect_num[4]*4, kernel_size=3, padding=1)]
conf_layers += [nn.Conv2d(256, bbox_aspect_num[4]*num_classes, kernel_size=3, padding=1)]
# source6의 합성곱 층
loc_layers += [nn.Conv2d(256, bbox_aspect_num[5]*4, kernel_size=3, padding=1)]
conf_layers += [nn.Conv2d(256, bbox_aspect_num[5]*num_classes, kernel_size=3, padding=1)]
return nn.ModuleList(loc_layers), nn.ModuleList(conf_layers)
#
class L2Norm(nn.Module):
def __init__(self, input_channels=512, scale=20):
super(L2Norm, self).__init__()
self.weight = nn.Parameter(torch.Tensor(input_channels))
self.scale = scale
self.reset_parameters()
self.eps = 1e-10
def reset_parameters(self):
init.constant_(self.weight, self.scale) # weight값이 전부 self.sclae의 값이 2개가 됨
# 각 채널의 38*38 특징량의 채널방향 제곱합을 계산
def forward(self, x):
# pow : n승
norm = x.pow(2).sum(dim=1, keepdim=True).sqrt()+self.eps # norm tensor size = torch.size([batch_num, 1, 38, 38])
x = torch.div(x, norm) # x를 norm으로 나눔
# 채널마다 1개의 계수를 가짐
weights = self.weight.unsqueeze(0).unsqueeze(2).unsqueeze(3).expand_as(x) # self.weight = torch.size([512])
out = weights * x # weight = torch.size([batch_num, 512, 38, 38])
return out
class DBox(object):
def __init__(self, cfg):
super(DBox, self).__init__()
self.image_size = cfg['input_size'] # 화상크기 300*300
self.feature_maps = cfg['feature_maps']
self.num_priors = len(cfg["feature_maps"]) # source의 개수 : 6
self.steps = cfg['steps'] # [8, 16, …] DBox의 픽셀 크기
self.min_sizes = cfg['min_sizes'] # [30, 60, …] 작은 정사각형의 DBox 픽셀
self.max_sizes = cfg['max_sizes'] # [60, 111, …] 큰 정사각형의 DBox 픽셀
self.aspect_ratios = cfg['aspect_ratios'] # 정사각형의 DBox 화면비(종횡비)
def make_dbox_list(self):
mean = []
# 'feature_maps': [38, 19, 10, 5, 3, 1] source1~6
for k, f in enumerate(self.feature_maps):
for i, j in product(range(f), repeat=2): # f까지의 수로 조합을 만들어냄
# 특징량의 화상크기
# 300 / 'steps': [8, 16, 32, 64, 100, 300],
f_k = self.image_size / self.steps[k]
# DBox의 중심좌표 x, y 0~1로 정규화되어있음
cx = (j + 0.5) / f_k
cy = (i + 0.5) / f_k
# 화면비 1의 작은 DBox [cx,cy, width, height]
# 'min_sizes': [30, 60, 111, 162, 213, 264]
s_k = self.min_sizes[k]/self.image_size
mean += [cx, cy, s_k, s_k]
# 화면비 1의 큰 DBox [cx,cy, width, height]
# 'max_sizes': [60, 111, 162, 213, 264, 315],
s_k_prime = sqrt(s_k * (self.max_sizes[k]/self.image_size))
mean += [cx, cy, s_k_prime, s_k_prime]
# 그 외 화면비의 defBox [cx,cy, width, height]
for ar in self.aspect_ratios[k]:
mean += [cx, cy, s_k*sqrt(ar), s_k/sqrt(ar)]
mean += [cx, cy, s_k/sqrt(ar), s_k*sqrt(ar)]
# DBox size : torch.Size([8732, 4])
output = torch.Tensor(mean).view(-1, 4)
# DBox가 이미지 밖으로 돌출되는 것을 막기 위해 크기를 0~1로 조정
output.clamp_(max=1, min=0)
return output
def decode(loc, dbox_list):
# 오프셋 정보(loc)로 DBox를 BBox로 변환
# loc : [8732, 4], dbox_list : [8732, 4]
# loc [Δcx, Δcy, Δwidth, Δheight]
# DBox [cx, cy, width, height]
# 오프셋정보로 BBox 구함
boxes = torch.cat((
dbox_list[:, :2] + loc[:, :2] * 0.1 * dbox_list[:, 2:],
dbox_list[:, 2:] * torch.exp(loc[:, 2:] * 0.2)), dim=1)
# boxes torch.Size([8732, 4])
# BBox의 좌표 정보를 [cx, cy, width, height]에서 [xmin, ymin, xmax, ymax]로 변경
boxes[:, :2] -= boxes[:, 2:] / 2 # 좌표 (xmin,ymin)로 변환
boxes[:, 2:] += boxes[:, :2] # 좌표 (xmax,ymax)로 변환
return boxes
def nm_suppression(boxes, scores, overlap=0.45, top_k=200):
"""
boxes중 overlap 이상의 BBox 삭제
Parameters
----------
boxes : [신뢰도 임계값 0.01을 넘은 BBox 수,4]
BBox 정보
scores :[신뢰도 임계값 0.01을 넘은 BBox 수]
conf 정보
Returns
-------
keep : 리스트
conf의 내림차순으로 nms를 통과한 index 저장
count:int
nms를 통과한 BBox 수
"""
# return
count = 0
keep = scores.new(scores.size(0)).zero_().long() # keep:torch.Size([신뢰도 임계값을 넘은 BBox 수]), item은 전부 0
# 각 Bbox의 면적 area 계산
x1 = boxes[:, 0]
y1 = boxes[:, 1]
x2 = boxes[:, 2]
y2 = boxes[:, 3]
area = torch.mul(x2 - x1, y2 - y1)
# boxes 복사. 나중에 BBox 중복도(IOU) 계산 시 모형으로 준비
tmp_x1 = boxes.new()
tmp_y1 = boxes.new()
tmp_x2 = boxes.new()
tmp_y2 = boxes.new()
tmp_w = boxes.new()
tmp_h = boxes.new()
# socre 오름차순으로 나열
v, idx = scores.sort(0)
# 상위 k개 Bbox index 꺼냄(k개보다 적게 존재할 수도 있음)
idx = idx[-top_k:]
# idx의 item수가 0이 아닌 한 loop
while idx.numel() > 0:
i = idx[-1] # 현재 conf 최대 index를 i로
# keep 끝에 conf 최대 index 저장
# 이 index의 BBox와 크게 겹치는 BBox삭제
keep[count] = i
count += 1
# 마지막 BBox는 루프를 빠져나온다
if idx.size(0) == 1:
break
# 현재 conf 최대의 index를 keep에 저장했으므로 idx를 하나 감소시킴
idx = idx[:-1]
# -------------------
# 지금부터 keep에 저장한 BBox와 크게 겹치는 BBox 추출하여 삭제
# -------------------
# 하나 감소시킨 idx까지의 BBox를 out으로 지정한 변수로 작성
# torch.index_select(x1, 0, idx, out=tmp_x1)
# torch.index_select(y1, 0, idx, out=tmp_y1)
# torch.index_select(x2, 0, idx, out= tmp_x2)
# torch.index_select(y2, 0, idx, out=tmp_y2)
tmp_x1 = torch.index_select(x1, 0, idx)#, out=tmp_x1)
tmp_y1 = torch.index_select(y1, 0, idx)#, out=tmp_y1)
tmp_x2 = torch.index_select(x2, 0, idx)#, out=tmp_x2)
tmp_y2 = torch.index_select(y2, 0, idx)#, out=tmp_y2)
#
tmp_x1 = torch.clamp(tmp_x1, min=x1[i]).detach()
tmp_y1 = torch.clamp(tmp_y1, min=y1[i]).detach()
tmp_x2 = torch.clamp(tmp_x2, max=x2[i]).detach()
tmp_y2 = torch.clamp(tmp_y2, max=y2[i]).detach()
# w, h의 텐서 크기를 index 하나 줄인 것으로 함
tmp_w.resize_as_(tmp_x2)
tmp_h.resize_as_(tmp_y2)
# clamp한 상태에서 BBox의 폭과 높이를 구함
tmp_w = tmp_x2 - tmp_x1
tmp_h = tmp_y2 - tmp_y1
# 폭이나 높이가 음수인 것은 0으로 맞춤
tmp_w = torch.clamp(tmp_w, min=0.0)
tmp_h = torch.clamp(tmp_h, min=0.0)
# clamp된 상태의 면적을 구함
inter = tmp_w*tmp_h
# IoU = intersect / (area(a) + area(b) - intersect)
rem_areas = torch.index_select(area, 0, idx) # 각 BBox의 원래 면적
union = (rem_areas - inter) + area[i] # 2영역의 합 면적
IoU = inter/union
# IoU가 overlap보다 작은 idx만 남김
idx = idx[IoU.le(overlap)] # le:Less than or Equal
# IoU가 overlap보더 큰 idx는 처음 선택한 keep에 저장한 idx와 동일한 물체에 BBox를 둘러싸고 있어 삭제
return keep, count
# torch.autograd.Function 상속, SSD추론 시 conf와 loc의 출력에서 중복을 제거한 BBox출력
class Detect(Function):
def __init__(self, conf_thresh=0.01, top_k=200, nms_thresh=0.45):
self.softmax = nn.Softmax(dim=-1) # conf를 softmax로 정규화하기 위해
self.conf_thresh = conf_thresh
self.top_k = top_k # conf(confidence)가 높은 top_k개를 nms_supression으로 게산에 사용하는 top_k
self.nms_thresh = nms_thresh # nm_supression으로 IOU가 nms_thresh보다 크면 동일한 물체의 BBox로 간주
def forward(self, loc_data, conf_data, dbox_list):
"""
Parameters
----------
loc_data: [batch_num,8732,4]
오프셋정보
conf_data: [batch_num, 8732,num_classes]
confidence
dbox_list: [8732,4]
DBox 정보
Returns
-------
output : torch.Size([batch_num, 21, 200, 5])
(batch_num、class、conf의top200、BBox 정보)
"""
# 각 크기 취득
num_batch = loc_data.size(0) # batch 크기
num_dbox = loc_data.size(1) # DBox 수 = 8732
num_classes = conf_data.size(2) # class수 = 21
# conf 소프트맥스 정규화
conf_data = self.softmax(conf_data)
# [batch, 21, 200, 5]
output = torch.zeros(num_batch, num_classes, self.top_k, 5)
# cof_data를[batch_num,8732,num_classes]에서[batch_num, num_classes,8732]으로 변경
conf_preds = conf_data.transpose(2, 1)
for i in range(num_batch):
# 1. loc와DBox로 수정한 BBox [xmin, ymin, xmax, ymax] 구함
decoded_boxes = decode(loc_data[i], dbox_list)
# conf의 복사본 작성
conf_scores = conf_preds[i].clone()
# 이미지 class별로 계산(배경인 0은 계산 X)
for cl in range(1, num_classes):
# 2.conf의 임계값을 넘은 BBox를 꺼냄
# conf의 임계값을 넘고 있늕지 마스크를 작성하여 임계값을 넘은 conf의 인덱스를 c_mask로 얻음
c_mask = conf_scores[cl].gt(self.conf_thresh) # Greater than 의미 gt로 임계값이 넘으면 1, 이하는 0
# conf_scores:torch.Size([21, 8732])
# c_mask:torch.Size([8732])
# scores torch.Size([임계값을 넘은 BBox 수])
scores = conf_scores[cl][c_mask]
if scores.nelement() == 0: # nelement: scores의 합계를 구함
continue
# decoded_boxes에 적용가능하도록 크기 변경
l_mask = c_mask.unsqueeze(1).expand_as(decoded_boxes)
# l_mask:torch.Size([8732, 4])
# l_mask를 decoded_boxes로 적용
boxes = decoded_boxes[l_mask].view(-1, 4)
# decoded_boxes[l_mask]로 1차원이 되기 때문에 view에서 (임계값 넘은 BBox 수, 4)로 크기 바꿈
# 3. Non-Maximum Suppression 실행하여 중복되는 BBox 제거
ids, count = nm_suppression(boxes, scores, self.nms_thresh, self.top_k)
# ids:conf의 내림차로 Non-Maximum Suppression을 통과한 index 저장
# count:Non-Maximum Suppression 통과한 BBox 수
# output에 Non-Maximum Suppression을 뺀 결과
output[i, cl, :count] = torch.cat((scores[ids[:count]].unsqueeze(1), boxes[ids[:count]]), 1)
return output # torch.Size([1, 21, 200, 5])
class SSD(nn.Module):
def __init__(self, phase, cfg):
super(SSD, self).__init__()
self.phase = phase # train or inference 지정
self.num_classes = cfg["num_classes"] # class 수 21
# SSDのネットワークを作る
self.vgg = make_vgg()
self.extras = make_extras()
self.L2Norm = L2Norm()
self.loc, self.conf = make_loc_conf(cfg["num_classes"], cfg["bbox_aspect_num"])
# DBox
dbox = DBox(cfg)
self.dbox_list = dbox.make_dbox_list()
# 추론 시 detect
if phase == 'inference':
self.detect = Detect()
def forward(self, x):
sources = list() # source1~6 저장
loc = list()
conf = list()
# vgg의 conv4_3까지 계산
for k in range(23):
x = self.vgg[k](x)
# conv4_3 출력을 L2norm에 입력하고 source1을 작성하여 sources에 추가
source1 = self.L2Norm(x)
sources.append(source1)
# vgg를 마지막까지 계산하여 source2를 작성하고 sources에 추가
for k in range(23, len(self.vgg)):
x = self.vgg[k](x)
sources.append(x)
# extras의 conv와 ReLU 계산 source3~6을 sources에 추가
for k, v in enumerate(self.extras):
x = F.relu(v(x), inplace=True)
if k % 2 == 1: # conv→ReLU→cov→ReLU하여 sources에 추가
sources.append(x)
# source1~6에 해당하는 conv 1회씩 적용
for (x, l, c) in zip(sources, self.loc, self.conf): # sources엔 1~6개의 source있음
loc.append(l(x).permute(0, 2, 3, 1).contiguous()) # contiguous : 메모리 상에 연속적으로 요소를 배치하는 명령
conf.append(c(x).permute(0, 2, 3, 1).contiguous())
# l(x), c(x)의 출력 크기[batch_num, 4*화면비 종류 수, featuremap 높이, featuremap 폭]
# source에 따라 화면비 종류가 달라 순서 바꾸어 조정
# [minibatch, featuremap수, featuremap수,4*화면비의 종류 수]
# view를 수행하므로 대상의 변수가 메모리 상에 연속적으로 배치되어야함
# loc torch.Size([batch_num, 34928])
# conf torch.Size([batch_num, 183372])になる
loc = torch.cat([o.view(o.size(0), -1) for o in loc], 1)
conf = torch.cat([o.view(o.size(0), -1) for o in conf], 1)
# loc torch.Size([batch_num, 8732, 4])
# conf torch.Size([batch_num, 8732, 21])
loc = loc.view(loc.size(0), -1, 4)
conf = conf.view(conf.size(0), -1, self.num_classes)
output = (loc, conf, self.dbox_list)
if self.phase == "inference":
# output torch.Size([batch_num, 21, 200, 5])
return self.detect.forward(output[0], output[1], output[2])
else:
return output
# output은 (loc, conf, dbox_list)
if __name__ == '__main__':
vgg_test = make_vgg()
print(vgg_test)
extras_test = make_extras()
print(extras_test)
loc_test, conf_test = make_loc_conf()
print(loc_test)
print(conf_test)
# SSD300 config
ssd_cfg = {
'num_classes': 21,
'input_size': 300,
'bbox_aspect_num': [4, 6, 6, 6, 4, 4],
'feature_maps': [38, 19, 10, 5, 3, 1],
'steps': [8, 16, 32, 64, 100, 300],
'min_sizes': [30, 60, 111, 162, 213, 264],
'max_sizes': [60, 111, 162, 213, 264, 315],
'aspect_ratios': [[2], [2, 3], [2, 3], [2, 3], [2], [2]],
}
# DBox 작성
dbox = DBox(ssd_cfg)
dbox_list = dbox.make_dbox_list()
# DBox 출력확인
print(pd.DataFrame(dbox_list.numpy())) | jsw6872/DataScience_ML-DL | DL/lecture/detection_segmentation/SSD/model.py | model.py | py | 17,905 | python | ko | code | 0 | github-code | 36 | [
{
"api_name": "torch.nn.MaxPool2d",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "torch.nn.MaxPool2d",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_n... |
74698899304 | from fastapi import APIRouter
from db import db
from models import Wct
router = APIRouter()
@router.patch("/{syncId}/wct")
async def update_wct(syncId: int, wct: Wct):
await db.update_user_wct(syncId, wct)
return {
'status': 'OK'
}
| NIDILLIN/Kathrin | Microservices/Users(1406)/routers/patch.py | patch.py | py | 258 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "fastapi.APIRouter",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "models.Wct",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "db.db.update_user_wct",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "db.db",
"line_n... |
12979955656 | import sys
from reward_abc import RewardFunctionAbc
# from skimage.measure import approximate_polygon, find_contours
# from skimage.draw import polygon_perimeter, line
from skimage.transform import hough_line, probabilistic_hough_line
# from skimage.transform import hough_line_peaks
import torch
from skimage.draw import line
from numpy.linalg import norm
import numpy as np
import matplotlib.pyplot as plt
# from matplotlib import cm
def plot_debug(single_pred, edge_image, hough_pred_lines):
# generate figure
fig, axes = plt.subplots(1, 3, figsize=(15, 5), sharex=True, sharey=True)
ax = axes.ravel()
# Detect two radii
ax[0].imshow(single_pred)
ax[0].set_title('Input image')
ax[1].imshow(edge_image)
ax[1].set_title('edges')
# plot detected lines
ax[2].imshow(edge_image)
for ln in hough_pred_lines:
p0, p1 = ln
ax[2].plot((p0[0], p1[0]), (p0[1], p1[1]))
ax[2].set_xlim((0, single_pred.shape[1]))
ax[2].set_ylim((single_pred.shape[0], 0))
ax[2].set_title('Probabilistic Hough')
for a4 in ax:
a4.set_axis_off()
plt.tight_layout()
fig.savefig('debug-hough-lines.png')
class HoughLinesReward(RewardFunctionAbc):
def __init__(self, s_subgraph, *args, **kwargs):
self.max_p = torch.nn.MaxPool2d(3, padding=1, stride=1)
self.s_subgraph = s_subgraph
self.line_thresh = 10
self.range_rad = [10, 20] # :?????????
self.range_num = [20, 20]
# super().__init__(s_subgraph, *args, **kwargs)
def __call__(
self, prediction_segmentation, superpixel_segmentation, node_feats, dir_edges, subgraph_indices, actions,
*args, **kwargs
):
dev = prediction_segmentation.device
edge_scores = []
exp_factor = 3
# we consider objects that are bigger than this size to be background
bg_size_threshold = 10000
# we consider objects that are smaller than this size to be noise
false_obj_size_threshold = 100
# -> this means all objects that are in the size range false_obj_size_threshold to bg_size_threshold are
# considered as potential foreground objects
for single_pred, single_sp_seg, s_dir_edges in zip(prediction_segmentation, superpixel_segmentation, dir_edges):
# print("single_sp_seg.max()", single_sp_seg.max())
edge_score = torch.zeros(int((single_sp_seg.max()) + 1, ), device=dev)
if single_pred.max() == 0: # image is empty
edges = s_dir_edges[:, :int(s_dir_edges.shape[1] / 2)]
# print(edge_score.shape)
# print(edges.shape)
edge_score = edge_score[edges].max(dim=0).values
edge_scores.append(edge_score)
continue
# compute the prediction ids and sizes of the current object predictions
pred_ids, label_masses = torch.unique(single_pred, return_counts=True)
# get the ids of bg objects, false positives and foreground objects
bg_pred_ids = pred_ids[label_masses >= bg_size_threshold]
false_pred_ids = pred_ids[label_masses <= false_obj_size_threshold]
fg_pred_ids = pred_ids[
(label_masses > false_obj_size_threshold).logical_and(label_masses < bg_size_threshold)
]
# get the ids of the superpixels corresponding to bg and false objects
bg_sp_ids = torch.unique(
single_sp_seg[torch.isin(single_pred, bg_pred_ids)]
)
false_sp_ids = torch.unique(
single_sp_seg[torch.isin(single_pred, false_pred_ids)]
)
# FIXME this doesn't make much sense, we have to compute the hough scores for the individual
# predicted objects, and not for the whole foreground!!
# (otherwise it doesn't make a difference if an object is split up into many objects or not)
# get a binary image of all foreground objects
# NOTE: in the circle example it looks like the hough trafo is computed for the outlines of the
# circle. I have no idea why that is done instead of doing it for the actual circles
edge_image = torch.isin(single_pred, fg_pred_ids).detach().cpu().numpy().squeeze().astype("float32")
# calculations for hough line reward
tested_angles = np.linspace(0, 2 * np.pi, 360, endpoint=False)
hough_res, angles, distc = hough_line(edge_image, theta=tested_angles)
# accums, angles, distcs = hough_line_peaks(hough_res, angles, distc, num_peaks=self.range_num[1])
hough_pred_lines = probabilistic_hough_line(edge_image, line_length=20, line_gap=10)
r_dists, thetas = self.compute_r_theta(prediction_segmentation, hough_pred_lines)
accums = self.find_accums(r_dists, thetas, hough_res)
# for debugging
plot_debug(single_pred, edge_image, hough_pred_lines)
r0 = []
c0 = []
r1 = []
c1 = []
for lineـ in hough_pred_lines:
p0, p1 = lineـ
r0.append(p0[0])
r1.append(p1[0])
c0.append(p0[1])
c1.append(p1[1])
r0 = np.array(r0)
c0 = np.array(c0)
r1 = np.array(r1)
c1 = np.array(c1)
accums = np.array(accums)
accepted_lines = accums > self.line_thresh
good_obj_cnt = 0
if any(accepted_lines):
print("we accepted", len(accepted_lines), "lines")
r0 = r0[accepted_lines]
c0 = c0[accepted_lines]
r1 = r1[accepted_lines]
c1 = c1[accepted_lines]
accums = accums[accepted_lines]
line_idxs = [line(R0, C0, R1, C1) for R0, C0, R1, C1 in zip(r0, c0, r1, c1)]
line_sps = [torch.unique(single_sp_seg[line_idx[0], line_idx[1]]).long() for line_idx in line_idxs]
obj_ids = [torch.unique(single_pred[line_idx[0], line_idx[1]]) for line_idx in line_idxs]
for line_sp, val, obj_id in zip(line_sps, accums, obj_ids):
hough_score = (val - self.line_thresh) / (1 - self.line_thresh)
# hough_score = torch.sigmoid(torch.tensor([8 * (hough_score - 0.5)])).item()
# num_obj_score = 1 / max(len(obj_id), 1)
# if num_obj_score == 1 and obj_id[0] in potential_object_ids:
# good_obj_cnt += 1
# edge_score[line_sp] = 0.7 * hough_score + 0.3 * num_obj_score
# if num_obj_score == 1 and obj_id[0] in potential_object_ids:
if obj_id[0] in fg_pred_ids:
good_obj_cnt += 1
# edge_score[line_sp] = 0.7 * hough_score + 0.3 * num_obj_score
edge_score[line_sp] = hough_score
score = 1.0 * (good_obj_cnt / 15) * int(good_obj_cnt > 5) + 0.0 * (1 / len(bg_pred_ids))
# score = 1 / len(bg_object_ids)
score = np.exp((score * exp_factor)) / np.exp(np.array([exp_factor]))
edge_score[bg_sp_ids] = score.item()
edge_score[false_sp_ids] = 0.0
if torch.isnan(edge_score).any() or torch.isinf(edge_score).any():
print(Warning("NaN or inf in scores this should not happen"))
sys.stdout.flush()
assert False
edges = s_dir_edges[:, :int(s_dir_edges.shape[1] / 2)]
edge_score = edge_score[edges].max(dim=0).values
edge_scores.append(edge_score)
else:
print("No lines were accepted!!!!!")
t_edge_scores = torch.cat(edge_scores)
t_edge_scores = (t_edge_scores * exp_factor).exp() / (torch.ones_like(t_edge_scores) * exp_factor).exp()
assert not torch.isnan(t_edge_scores).any() and \
not torch.isinf(t_edge_scores).any() and \
(t_edge_scores >= 0).any(), "### found invalid reward"
sg_scores = []
for i, sz in enumerate(self.s_subgraph):
sg_scores.append(t_edge_scores[subgraph_indices[i].view(-1, sz)].mean(1))
# return sg_scores, edge_scores.mean(), t_edge_scores.mean()
return sg_scores, edge_scores, t_edge_scores.mean()
def compute_r_theta(self, pred_seg, lines):
origin_point = np.array([0, pred_seg.shape[1]])
# r_dist = np.linalg.norm(np.cross(p2-p1, p1-p3))/norm(p2-p1)
r_dists = [norm(np.cross(np.asarray(ll[1]) - np.asarray(ll[0]),
np.asarray(ll[0]) - origin_point)) / norm(np.asarray(ll[1])-np.asarray(ll[0]))
for ll in lines]
# thetas = [np.arctan(-(l[1][0] - l[0][0]) / (l[1][1] - l[0][1])) for l in lines]
thetas = []
for ll in lines:
if ll[1][1] == ll[0][1] and ll[0][1] < 0:
thetas.append(3 * np.pi / 2)
elif ll[1][1] == ll[0][1] and ll[0][1] >= 0:
thetas.append(np.pi / 2)
else:
thetas.append(np.arctan(-(ll[1][0] - ll[0][0]) / (ll[1][1] - ll[0][1])))
return r_dists, thetas
def find_accums(self, r_dists, thetas, hough_res):
return [hough_res[int(r)][int(np.rad2deg(th))] for r, th in zip(r_dists, thetas)]
| kreshuklab/rewardchecking | lines_reward.py | lines_reward.py | py | 9,442 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "matplotlib.pyplot.subplots",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.tight_layout",
"line_number": 39,
"usage_type": "call"
},
{
"api_n... |
571034080 | ## BMO CHATBOT CLASS DEFINITION
# Author: Milk + Github Copilot (WOW!)
# Last modified: 2022-10-05
import random
from pathlib import Path
from nltk.tokenize import word_tokenize
from nltk.corpus import stopwords
class BMO():
def __init__(self,debug=False):
#set debug mode
self.DEBUG = debug
if self.DEBUG:
self.debugTxt("-- DEBUG MODE --")
#import the saved category words
self.saveWords = self.importCategories() #import the saved keywords
self.exitWords = ["bye", "goodbye", "see ya", "see you later", "cya", "later", "quit", "exit"] #words that stops the interaction
self.extra_stopwords = ["help","need","want"] #words that are not relevant to the categorization
#html interface variables
self.text_mode = "intro"
#### SAVED WORDS FUNCTIONS ####P
# parse the categories to associate relevant words
def importCategories(self):
saveWords = {}
catTxt = Path(__file__).with_name("category_words.txt").open("r").readlines()
cat = ""
for line in catTxt:
line = line.strip()
#nothing or comment
if line == "" or line[0] == "#":
continue
#new category
elif line[0] == "$":
cat = line[2:]
if cat not in saveWords:
saveWords[cat] = []
#new word
elif line[0] == "+":
word = line[2:]
saveWords[cat].append(word)
self.debugTxt(saveWords)
return saveWords
#export the category data back out to the file
def exportCategories(self):
header_comments = "# Puts relevant words into categories \
\n# Syntax: \
\n# # = comment (do not read) \
\n# $ = topic header \
\n# + = word"
with open("category_words.txt", "w+") as f:
#add the header comment
f.write(header_comments)
#add the categories and words
for cat in self.saveWords:
f.write("\n\n$ " + cat)
for word in self.saveWords[cat]:
f.write("\n+ " + word)
# save a new word to the keywords dictionary
def addNewWord(self, word, category):
self.saveWords[category].append(word)
#add a new category to the keywords dictionary
def addNewCategory(self, category):
self.saveWords[category] = []
# figure out what the user specifically wants
# possible options: [debug, make sprites, new game features, exit]
def matchCategory(self, resp):
return self.rawClosestMatch(resp, self.saveWords)
#adds more words to a particular category to associate later
def associateWords(self, words, category):
#add locally
for word in words:
self.addNewWord(word, category)
#export to file
self.exportCategories()
#### CONVERSATION FUNCTIONS ####
# show the BMO message in a format that shows BMO as the speaker
def formBMO(self,msg):
print("BMO: " + msg)
# show the user message in a format that shows the user as the speaker
def formUser(self):
print("> ", end="")
msg = input()
return msg
def debugTxt(self,txt):
if self.DEBUG:
print(f"% {txt} %")
# continuously have a conversation with the user until they say goodbye (in some form or another)
def talk(self):
#intro
self.formBMO("Hi there! I'm BMO, the game dev chatbot!")
self.formBMO("What do you need? I'll do my best to help you out!")
user_resp = self.formUser()
#keep talking until exit
while user_resp.lower() not in self.exitWords:
#determine what the user wants
user_req, words = self.matchCategory(user_resp)
self.debugTxt(words)
self.debugTxt(f"GUESS => {user_req}")
#unknown request - maybe add the words to the category
if user_req == "?":
#figure out where the words associated fit in
self.formBMO("I'm sorry, I don't understand...")
self.formBMO("Is this related to code debugging, making sprites, or new game features?")
#get the user's response for a new category
user_cat_resp = self.formUser()
#made by copilot (thanks!) - hard code to get all user variations of the
cat_keywords = {
"code debug": ["debug", "debugging", "bug", "bugs", "error", "errors", "fix", "fixing", "code", "programming"],
"sprite generation": ["sprite", "sprites", "art", "graphics", "graphic", "image", "images", "picture", "pictures"],
"game feature idea": ["feature", "features", "game", "games", "new"]
}
best_cat, _ = self.rawClosestMatch(user_cat_resp, cat_keywords)
#skip if the user doesn't want to add a new category, or can't be understood
if best_cat == "?":
self.formBMO("Ok!")
#add request words to the category
else:
self.formBMO(f"Ok, I'll remember that for next time!")
self.associateWords(words, best_cat)
#perform action based on user request
elif user_req == "code debug":
self.formBMO("I can help you debug your code!")
print("*** [ DEBUG CODE HERE ] ***")
elif user_req == "sprite generation":
self.formBMO("I can help you make sprites!")
print("*** [ GENERATE NEW SPRITES HERE ] ***")
elif user_req == "game feature idea":
self.formBMO("I can help you come up with new game feature ideas!")
print("*** [ MAKE NEW GAME FEATURES HERE ] ***")
#get the next user input
user_resp = self.formUser()
self.formBMO("See ya later!")
#talk function for use with the html interface
def talk_html(self, user_resp):
#intro
if self.text_mode == "intro":
self.text_mode = "normal"
return {"txt":"Hi there! I'm BMO, the game dev chatbot!\nWhat do you need? I'll do my best to help you out!","action":"","face":":)"}
#normal request
elif self.text_mode == "normal":
#if the user said an exit word, end the conversation, and close the window
if user_resp.lower() in self.exitWords:
self.text_mode = "exit"
return {"txt":"See ya later!","action":"close"}
#otherwise, determine what the user wants
user_req, words = self.matchCategory(user_resp)
#do one of the three actions
if user_req == "code debug":
return {"txt":"I can help you debug your code!","action":"code"}
elif user_req == "sprite generation":
return {"txt":"I can help you make sprites!","action":"sprites"}
elif user_req == "game feature idea":
return {"txt":"I can help you come up with new game feature ideas!","action":"features"}
#unknown request
else:
#figure out where the words associated fit in
self.text_mode = "learn"
self.last_words = words
return {"txt":"I'm sorry, I don't understand...\nIs this related to code debugging, making sprites, or new game features?", "action":"","face":":|"}
#learn mode
elif self.text_mode == "learn":
#made by copilot (thanks!) - hard code to get all user variations of the categories
cat_keywords = {
"code debug": ["debug", "debugging", "bug", "bugs", "error", "errors", "fix", "fixing", "code", "programming"],
"sprite generation": ["sprite", "sprites", "art", "graphics", "graphic", "image", "images", "picture", "pictures"],
"game feature idea": ["feature", "features", "game", "games", "new"]
}
best_cat, _ = self.rawClosestMatch(user_resp, cat_keywords)
#skip if the user doesn't want to add a new category, or can't be understood
if best_cat == "?":
self.text_mode = "normal"
return {"txt":"Ok!","action":"","face":":)"}
#add request words to the category
else:
self.associateWords(self.last_words, best_cat)
self.last_words = []
self.text_mode = "normal"
return {"txt":"Ok, I'll remember that for next time!","action":"","face":":)"}
#associate a raw user response to the closest matching group
# Input: resp - the raw user response
# wgroups - the dictionary of keywords to match against (form str - list of str)
def rawClosestMatch(self,resp,wgroups):
#tokenize the response for analysis
raw_toks = word_tokenize(resp)
toks = [w.lower() for w in raw_toks if w.lower() not in stopwords.words("english") and w.isalpha() and w not in self.extra_stopwords]
#make word counts for each category
group_ct = {}
for k in wgroups:
group_ct[k] = 0
group_ct["?"] = 0
#count the words in each category
n = list(wgroups.keys())
g = list(wgroups.values())
for t in toks:
wi = self.wordGroupIndex(t,g)
if len(wi) > 0:
for i in wi:
group_ct[n[i]] += 1
else:
group_ct["?"] += 1
self.debugTxt(group_ct)
#get the majority category
return max(group_ct, key=group_ct.get), toks
#find all matching indices of a word in a list of list of words
def wordGroupIndex(self, w, gset):
return [i for i,group in enumerate(gset) if w in group]
#### EXTRA FUNCTIONS ####
# show the face of BMO as ascii art
def showFace(self):
bmo_ascii = open("bmo_ascii.txt", "r").readlines()
for line in bmo_ascii:
print(line, end="")
print("")
## below was written by Github Copilot -- holy fucking shit.... ##
def copilot_intro_test(self):
print("Hello World!")
print("I am a chatbot prototype.")
print("I am not very smart yet.")
print("I can only say hello and goodbye.")
print("What is your name?")
name = input()
print("Hello " + name + "!")
print("Goodbye " + name + "!")
print("I hope you enjoyed your time with me.")
print("Goodbye World!")
| MasterMilkX/BMO_chatbot_prototype | Python/bmo.py | bmo.py | py | 10,867 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "pathlib.Path",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "nltk.tokenize.word_tokenize",
"line_number": 229,
"usage_type": "call"
},
{
"api_name": "nltk.corpus.stopwords.words",
"line_number": 230,
"usage_type": "call"
},
{
"api_name":... |
36947653909 | from __future__ import print_function
__revision__ = "src/engine/SCons/Tool/MSCommon/common.py bee7caf9defd6e108fc2998a2520ddb36a967691 2019-12-17 02:07:09 bdeegan"
import copy
import json
import os
import re
import subprocess
import sys
import SCons.Util
# SCONS_MSCOMMON_DEBUG is internal-use so undocumented:
# set to '-' to print to console, else set to filename to log to
LOGFILE = os.environ.get('SCONS_MSCOMMON_DEBUG')
if LOGFILE == '-':
def debug(message):
print(message)
elif LOGFILE:
import logging
logging.basicConfig(
format='%(relativeCreated)05dms:pid%(process)05d:MSCommon/%(filename)s:%(message)s',
filename=LOGFILE,
level=logging.DEBUG)
debug = logging.getLogger(name=__name__).debug
else:
debug = lambda x: None
# SCONS_CACHE_MSVC_CONFIG is public, and is documented.
CONFIG_CACHE = os.environ.get('SCONS_CACHE_MSVC_CONFIG')
if CONFIG_CACHE in ('1', 'true', 'True'):
CONFIG_CACHE = os.path.join(os.path.expanduser('~'), '.scons_msvc_cache')
def read_script_env_cache():
""" fetch cached msvc env vars if requested, else return empty dict """
envcache = {}
if CONFIG_CACHE:
try:
with open(CONFIG_CACHE, 'r') as f:
envcache = json.load(f)
#TODO can use more specific FileNotFoundError when py2 dropped
except IOError:
# don't fail if no cache file, just proceed without it
pass
return envcache
def write_script_env_cache(cache):
""" write out cache of msvc env vars if requested """
if CONFIG_CACHE:
try:
with open(CONFIG_CACHE, 'w') as f:
json.dump(cache, f, indent=2)
except TypeError:
# data can't serialize to json, don't leave partial file
os.remove(CONFIG_CACHE)
except IOError:
# can't write the file, just skip
pass
_is_win64 = None
def is_win64():
"""Return true if running on windows 64 bits.
Works whether python itself runs in 64 bits or 32 bits."""
# Unfortunately, python does not provide a useful way to determine
# if the underlying Windows OS is 32-bit or 64-bit. Worse, whether
# the Python itself is 32-bit or 64-bit affects what it returns,
# so nothing in sys.* or os.* help.
# Apparently the best solution is to use env vars that Windows
# sets. If PROCESSOR_ARCHITECTURE is not x86, then the python
# process is running in 64 bit mode (on a 64-bit OS, 64-bit
# hardware, obviously).
# If this python is 32-bit but the OS is 64, Windows will set
# ProgramW6432 and PROCESSOR_ARCHITEW6432 to non-null.
# (Checking for HKLM\Software\Wow6432Node in the registry doesn't
# work, because some 32-bit installers create it.)
global _is_win64
if _is_win64 is None:
# I structured these tests to make it easy to add new ones or
# add exceptions in the future, because this is a bit fragile.
_is_win64 = False
if os.environ.get('PROCESSOR_ARCHITECTURE', 'x86') != 'x86':
_is_win64 = True
if os.environ.get('PROCESSOR_ARCHITEW6432'):
_is_win64 = True
if os.environ.get('ProgramW6432'):
_is_win64 = True
return _is_win64
def read_reg(value, hkroot=SCons.Util.HKEY_LOCAL_MACHINE):
return SCons.Util.RegGetValue(hkroot, value)[0]
def has_reg(value):
"""Return True if the given key exists in HKEY_LOCAL_MACHINE, False
otherwise."""
try:
SCons.Util.RegOpenKeyEx(SCons.Util.HKEY_LOCAL_MACHINE, value)
ret = True
except SCons.Util.WinError:
ret = False
return ret
# Functions for fetching environment variable settings from batch files.
def normalize_env(env, keys, force=False):
"""Given a dictionary representing a shell environment, add the variables
from os.environ needed for the processing of .bat files; the keys are
controlled by the keys argument.
It also makes sure the environment values are correctly encoded.
If force=True, then all of the key values that exist are copied
into the returned dictionary. If force=false, values are only
copied if the key does not already exist in the copied dictionary.
Note: the environment is copied."""
normenv = {}
if env:
for k in list(env.keys()):
normenv[k] = copy.deepcopy(env[k])
for k in keys:
if k in os.environ and (force or k not in normenv):
normenv[k] = os.environ[k]
# This shouldn't be necessary, since the default environment should include system32,
# but keep this here to be safe, since it's needed to find reg.exe which the MSVC
# bat scripts use.
sys32_dir = os.path.join(os.environ.get("SystemRoot",
os.environ.get("windir", r"C:\Windows\system32")),
"System32")
if sys32_dir not in normenv['PATH']:
normenv['PATH'] = normenv['PATH'] + os.pathsep + sys32_dir
# Without Wbem in PATH, vcvarsall.bat has a "'wmic' is not recognized"
# error starting with Visual Studio 2017, although the script still
# seems to work anyway.
sys32_wbem_dir = os.path.join(sys32_dir, 'Wbem')
if sys32_wbem_dir not in normenv['PATH']:
normenv['PATH'] = normenv['PATH'] + os.pathsep + sys32_wbem_dir
debug("PATH: %s"%normenv['PATH'])
return normenv
def get_output(vcbat, args = None, env = None):
"""Parse the output of given bat file, with given args."""
if env is None:
# Create a blank environment, for use in launching the tools
env = SCons.Environment.Environment(tools=[])
# TODO: This is a hard-coded list of the variables that (may) need
# to be imported from os.environ[] for v[sc]*vars*.bat file
# execution to work. This list should really be either directly
# controlled by vc.py, or else derived from the common_tools_var
# settings in vs.py.
vs_vc_vars = [
'COMSPEC',
# VS100 and VS110: Still set, but modern MSVC setup scripts will
# discard these if registry has values. However Intel compiler setup
# script still requires these as of 2013/2014.
'VS140COMNTOOLS',
'VS120COMNTOOLS',
'VS110COMNTOOLS',
'VS100COMNTOOLS',
'VS90COMNTOOLS',
'VS80COMNTOOLS',
'VS71COMNTOOLS',
'VS70COMNTOOLS',
'VS60COMNTOOLS',
]
env['ENV'] = normalize_env(env['ENV'], vs_vc_vars, force=False)
if args:
debug("Calling '%s %s'" % (vcbat, args))
popen = SCons.Action._subproc(env,
'"%s" %s & set' % (vcbat, args),
stdin='devnull',
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
else:
debug("Calling '%s'" % vcbat)
popen = SCons.Action._subproc(env,
'"%s" & set' % vcbat,
stdin='devnull',
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
# Use the .stdout and .stderr attributes directly because the
# .communicate() method uses the threading module on Windows
# and won't work under Pythons not built with threading.
with popen.stdout:
stdout = popen.stdout.read()
with popen.stderr:
stderr = popen.stderr.read()
# Extra debug logic, uncomment if necessary
# debug('get_output():stdout:%s'%stdout)
# debug('get_output():stderr:%s'%stderr)
if stderr:
# TODO: find something better to do with stderr;
# this at least prevents errors from getting swallowed.
sys.stderr.write(stderr)
if popen.wait() != 0:
raise IOError(stderr.decode("mbcs"))
output = stdout.decode("mbcs")
return output
KEEPLIST = ("INCLUDE", "LIB", "LIBPATH", "PATH", 'VSCMD_ARG_app_plat')
def parse_output(output, keep=KEEPLIST):
"""
Parse output from running visual c++/studios vcvarsall.bat and running set
To capture the values listed in keep
"""
# dkeep is a dict associating key: path_list, where key is one item from
# keep, and path_list the associated list of paths
dkeep = dict([(i, []) for i in keep])
# rdk will keep the regex to match the .bat file output line starts
rdk = {}
for i in keep:
rdk[i] = re.compile('%s=(.*)' % i, re.I)
def add_env(rmatch, key, dkeep=dkeep):
path_list = rmatch.group(1).split(os.pathsep)
for path in path_list:
# Do not add empty paths (when a var ends with ;)
if path:
# XXX: For some reason, VC98 .bat file adds "" around the PATH
# values, and it screws up the environment later, so we strip
# it.
path = path.strip('"')
dkeep[key].append(str(path))
for line in output.splitlines():
for k, value in rdk.items():
match = value.match(line)
if match:
add_env(match, k)
return dkeep
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| mongodb/mongo | src/third_party/scons-3.1.2/scons-local-3.1.2/SCons/Tool/MSCommon/common.py | common.py | py | 9,322 | python | en | code | 24,670 | github-code | 36 | [
{
"api_name": "os.environ.get",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "logging.basicConfig",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "logging.DEBUG",
... |
73495508264 | from django.shortcuts import render, redirect
from django.http import HttpResponse, HttpResponseNotFound
from django.views import View
from django.conf import settings
import json
import itertools
import random
from datetime import datetime
JSON_DATA = settings.NEWS_JSON_PATH
def get_json_data():
with open(JSON_DATA, 'r') as f:
data = json.load(f)
return data
def add_json_data(updated_data):
with open(JSON_DATA, 'w') as f:
json.dump(updated_data, f)
class ComingSoonView(View):
def get(self, request, *args, **kwargs):
return redirect('/news/')
class ArticlesListView(View):
def get(self, request, *args, **kwargs):
data = get_json_data()
sorted_news = sorted(data, key=lambda i: i['created'], reverse=True)
grouped_news = itertools.groupby(sorted_news, lambda i: i['created'][:10])
articles = []
for k, v in grouped_news:
articles.append((k, list(v)))
context = {'articles': articles}
search = request.GET.get('q')
if search:
filtered = data.copy()
for i in data:
if search not in i.get('title'):
filtered.remove(i)
sorted_filtered = sorted(filtered, key=lambda i: i['created'], reverse=True)
grouped_filtered = itertools.groupby(sorted_filtered, lambda i: i['created'][:10])
articles_filtered = []
for key, value in grouped_filtered:
articles_filtered.append((key, list(value)))
context['articles'] = articles_filtered
return render(request, 'news/articles.html', context=context)
class ArticleView(View):
def get(self, request, article_id, *args, **kwargs):
data = get_json_data()
article = list(filter(lambda x: x['link'] == int(article_id), data))
if not article:
return HttpResponseNotFound('<h1>Page not found</h1>')
context = {'article': article[0]}
return render(request, 'news/article.html', context=context)
class CreateArticleView(View):
data = get_json_data()
def get(self, request, *args, **kwargs):
return render(request, 'news/create.html')
def post(self, request, *args, **kwargs):
title = request.POST.get('title')
text = request.POST.get('text')
created = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
link = random.randint(0, 1000000000)
new_article = {"created": created,
"text": text,
"title": title,
"link": link}
updated_data = self.data
updated_data.append(new_article)
add_json_data(updated_data)
return redirect('/news/') | Vladpetr/NewsPortal | HyperNews Portal/task/news/views.py | views.py | py | 2,754 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.conf.settings.NEWS_JSON_PATH",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "django.conf.settings",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "json.load",
"line_number": 15,
"usage_type": "call"
},
{
"api_name"... |
31091913885 | from django.shortcuts import render
from parse.forms import ParseForm
from parse.tasks import task_parse
def ozon_parse(request):
if request.method == 'POST':
form = ParseForm(request.POST)
if form.is_valid():
id_user = form.cleaned_data.get('id_user')
api_key = form.cleaned_data.get('api_key')
task_parse.delay(id_user=id_user, api_key=api_key)
else:
form = ParseForm()
return render(request, 'parse.html', {'form': form})
| jurawlew/ozon_parse | parse/views.py | views.py | py | 500 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "parse.forms.ParseForm",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "parse.tasks.task_parse.delay",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "parse.tasks.task_parse",
"line_number": 13,
"usage_type": "name"
},
{
"api_name... |
70806938343 | import sys
from collections import deque
sys.stdin = open('input.txt')
def bellmanford():
for n in range(N):
for i in range(N):
for weight, node in linked[i]:
if distance[i] != -1e10 and distance[node] < weight + distance[i]:
distance[node] = distance[i] + weight
paths[node] = i
if n == N-1:
distance[node] = 1e10
N, M = map(int, sys.stdin.readline().split())
linked = [[] for _ in range(N)]
distance = [-1e10] * N
distance[0] = 0
paths = [-1] * N
for _ in range(M):
u, v, w = map(int, sys.stdin.readline().split())
linked[u-1].append((w, v-1))
bellmanford()
if distance[-1] == 1e10:
print(-1)
else:
answer = deque([])
node = N - 1
while node >= 0:
answer.appendleft(node+1)
node = paths[node]
print(*answer)
| unho-lee/TIL | CodeTest/Python/BaekJoon/1738.py | 1738.py | py | 888 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sys.stdin",
"line_number": 3,
"usage_type": "attribute"
},
{
"api_name": "sys.stdin.readline",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "sys.stdin",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "sys.stdin.readline",
... |
20460415252 | import time
import matplotlib.pyplot as plt
import numpy as np
import numpy.random as rnd
import sorting.merge_sort as merge
import sorting.quick_sort as quick
MAX_SIZE = 1000
STEP = 1
NUM_ITERATIONS = 10
def timer(task):
start = time.clock()
task()
end = time.clock()
return end - start
def test_merge_sort(merge_func):
res = []
for list_size in xrange(1, MAX_SIZE, STEP):
inner_res = []
for iteration in xrange(NUM_ITERATIONS):
list_to_sort = [rnd.randint(0, list_size) for r in xrange(list_size)]
inner_res.append(timer(lambda: merge_func(list_to_sort)))
res.append(np.mean(inner_res))
return res
def plot_results(results_one, results_two=None):
x = [i for i in xrange(len(results_one))]
plt.plot(x, results_one, label='results_one')
plt.plot(x, results_two, label='results_two')
plt.legend(loc='upper right')
plt.show()
if __name__ == "__main__":
time_results = test_merge_sort(lambda x: merge.merge_sort(x))
time_results_opt = test_merge_sort(lambda x: quick.lomuto_sort(x, 0, len(x) - 1))
plot_results(time_results, time_results_opt)
| Apophany/coding-exercises | python/performance/perf_test.py | perf_test.py | py | 1,160 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "time.clock",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "time.clock",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "numpy.random.randint",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_n... |
74053950505 | from urllib.request import urlopen
from bs4 import BeautifulSoup
import ssl
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
url = input("Enter URL - ")
############################################### Comment this for first question ##############################
pos = int(input("Enter the position - ")) - 1
count = int(input("Enter the count - "))
count1 = 0
while(count1 < count):
html = urlopen(url).read()
soup = BeautifulSoup(html, 'html.parser')
tags = soup('a')
url = tags[pos].get('href')
name = tags[pos].contents[0]
count1 = count1 + 1
print(name) | maleeha045/python-for-everybody | 3_using_python_to_access_web_data/scrapUrl.py | scrapUrl.py | py | 634 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "ssl.create_default_context",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "ssl.CERT_NONE",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "urllib.request.urlopen",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "bs... |
38985345142 | from functools import partial
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.datasets import fetch_openml
from sklearn.metrics import mean_tweedie_deviance
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import mean_squared_error
def load_mtpl2(n_samples=100000):
"""Fetch the French Motor Third-Party Liability Claims dataset.
Parameters
----------
n_samples: int, default=100000
number of samples to select (for faster run time). Full dataset has
678013 samples.
"""
# freMTPL2freq dataset from https://www.openml.org/d/41214
df_freq = fetch_openml(data_id=41214, as_frame=True)["data"]
df_freq["IDpol"] = df_freq["IDpol"].astype(int)
df_freq.set_index("IDpol", inplace=True)
# freMTPL2sev dataset from https://www.openml.org/d/41215
df_sev = fetch_openml(data_id=41215, as_frame=True)["data"]
# sum ClaimAmount over identical IDs
df_sev = df_sev.groupby("IDpol").sum()
df = df_freq.join(df_sev, how="left")
df["ClaimAmount"].fillna(0, inplace=True)
# unquote string fields
for column_name in df.columns[df.dtypes.values == object]:
df[column_name] = df[column_name].str.strip("'")
return df.iloc[:n_samples]
def plot_obs_pred(
df,
feature,
weight,
observed,
predicted,
y_label=None,
title=None,
ax=None,
fill_legend=False,
):
"""Plot observed and predicted - aggregated per feature level.
Parameters
----------
df : DataFrame
input data
feature: str
a column name of df for the feature to be plotted
weight : str
column name of df with the values of weights or exposure
observed : str
a column name of df with the observed target
predicted : DataFrame
a dataframe, with the same index as df, with the predicted target
fill_legend : bool, default=False
whether to show fill_between legend
"""
# aggregate observed and predicted variables by feature level
df_ = df.loc[:, [feature, weight]].copy()
df_["observed"] = df[observed] * df[weight]
df_["predicted"] = predicted * df[weight]
df_ = (
df_.groupby([feature])[[weight, "observed", "predicted"]]
.sum()
.assign(observed=lambda x: x["observed"] / x[weight])
.assign(predicted=lambda x: x["predicted"] / x[weight])
)
ax = df_.loc[:, ["observed", "predicted"]].plot(style=".", ax=ax)
y_max = df_.loc[:, ["observed", "predicted"]].values.max() * 0.8
p2 = ax.fill_between(
df_.index,
0,
y_max * df_[weight] / df_[weight].values.max(),
color="g",
alpha=0.1,
)
if fill_legend:
ax.legend([p2], ["{} distribution".format(feature)])
ax.set(
ylabel=y_label if y_label is not None else None,
title=title if title is not None else "Train: Observed vs Predicted",
)
def score_estimator(
estimator,
X_train,
X_test,
df_train,
df_test,
target,
weights,
tweedie_powers=None,
):
"""Evaluate an estimator on train and test sets with different metrics"""
metrics = [
("D² explained", None), # Use default scorer if it exists
("mean abs. error", mean_absolute_error),
("mean squared error", mean_squared_error),
]
if tweedie_powers:
metrics += [
(
"mean Tweedie dev p={:.4f}".format(power),
partial(mean_tweedie_deviance, power=power),
)
for power in tweedie_powers
]
res = []
for subset_label, X, df in [
("train", X_train, df_train),
("test", X_test, df_test),
]:
y, _weights = df[target], df[weights]
for score_label, metric in metrics:
if isinstance(estimator, tuple) and len(estimator) == 2:
# Score the model consisting of the product of frequency and
# severity models.
est_freq, est_sev = estimator
y_pred = est_freq.predict(X) * est_sev.predict(X)
else:
y_pred = estimator.predict(X)
if metric is None:
if not hasattr(estimator, "score"):
continue
score = estimator.score(X, y, sample_weight=_weights)
else:
score = metric(y, y_pred, sample_weight=_weights)
res.append({"subset": subset_label, "metric": score_label, "score": score})
res = (
pd.DataFrame(res)
.set_index(["metric", "subset"])
.score.unstack(-1)
.round(4)
.loc[:, ["train", "test"]]
)
return res
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import FunctionTransformer, OneHotEncoder
from sklearn.preprocessing import StandardScaler, KBinsDiscretizer
from sklearn.compose import ColumnTransformer
df = load_mtpl2(n_samples=60000)
# Note: filter out claims with zero amount, as the severity model
# requires strictly positive target values.
df.loc[(df["ClaimAmount"] == 0) & (df["ClaimNb"] >= 1), "ClaimNb"] = 0
# Correct for unreasonable observations (that might be data error)
# and a few exceptionally large claim amounts
df["ClaimNb"] = df["ClaimNb"].clip(upper=4)
df["Exposure"] = df["Exposure"].clip(upper=1)
df["ClaimAmount"] = df["ClaimAmount"].clip(upper=200000)
log_scale_transformer = make_pipeline(
FunctionTransformer(func=np.log), StandardScaler()
)
column_trans = ColumnTransformer(
[
("binned_numeric", KBinsDiscretizer(n_bins=10), ["VehAge", "DrivAge"]),
(
"onehot_categorical",
OneHotEncoder(),
["VehBrand", "VehPower", "VehGas", "Region", "Area"],
),
("passthrough_numeric", "passthrough", ["BonusMalus"]),
("log_scaled_numeric", log_scale_transformer, ["Density"]),
],
remainder="drop",
)
X = column_trans.fit_transform(df)
# Insurances companies are interested in modeling the Pure Premium, that is
# the expected total claim amount per unit of exposure for each policyholder
# in their portfolio:
df["PurePremium"] = df["ClaimAmount"] / df["Exposure"]
# This can be indirectly approximated by a 2-step modeling: the product of the
# Frequency times the average claim amount per claim:
df["Frequency"] = df["ClaimNb"] / df["Exposure"]
df["AvgClaimAmount"] = df["ClaimAmount"] / np.fmax(df["ClaimNb"], 1)
with pd.option_context("display.max_columns", 15):
print(df[df.ClaimAmount > 0].head())
from sklearn.model_selection import train_test_split
from sklearn.linear_model import PoissonRegressor
df_train, df_test, X_train, X_test = train_test_split(df, X, random_state=0)
# The parameters of the model are estimated by minimizing the Poisson deviance
# on the training set via a quasi-Newton solver: l-BFGS. Some of the features
# are collinear, we use a weak penalization to avoid numerical issues.
glm_freq = PoissonRegressor(alpha=1e-3, max_iter=400)
glm_freq.fit(X_train, df_train["Frequency"], sample_weight=df_train["Exposure"])
scores = score_estimator(
glm_freq,
X_train,
X_test,
df_train,
df_test,
target="Frequency",
weights="Exposure",
)
print("Evaluation of PoissonRegressor on target Frequency")
print(scores)
fig, ax = plt.subplots(ncols=2, nrows=2, figsize=(16, 8))
fig.subplots_adjust(hspace=0.3, wspace=0.2)
plot_obs_pred(
df=df_train,
feature="DrivAge",
weight="Exposure",
observed="Frequency",
predicted=glm_freq.predict(X_train),
y_label="Claim Frequency",
title="train data",
ax=ax[0, 0],
)
plot_obs_pred(
df=df_test,
feature="DrivAge",
weight="Exposure",
observed="Frequency",
predicted=glm_freq.predict(X_test),
y_label="Claim Frequency",
title="test data",
ax=ax[0, 1],
fill_legend=True,
)
plot_obs_pred(
df=df_test,
feature="VehAge",
weight="Exposure",
observed="Frequency",
predicted=glm_freq.predict(X_test),
y_label="Claim Frequency",
title="test data",
ax=ax[1, 0],
fill_legend=True,
)
plot_obs_pred(
df=df_test,
feature="BonusMalus",
weight="Exposure",
observed="Frequency",
predicted=glm_freq.predict(X_test),
y_label="Claim Frequency",
title="test data",
ax=ax[1, 1],
fill_legend=True,
)
from sklearn.linear_model import GammaRegressor
mask_train = df_train["ClaimAmount"] > 0
mask_test = df_test["ClaimAmount"] > 0
glm_sev = GammaRegressor(alpha=10.0, max_iter=10000)
glm_sev.fit(
X_train[mask_train.values],
df_train.loc[mask_train, "AvgClaimAmount"],
sample_weight=df_train.loc[mask_train, "ClaimNb"],
)
scores = score_estimator(
glm_sev,
X_train[mask_train.values],
X_test[mask_test.values],
df_train[mask_train],
df_test[mask_test],
target="AvgClaimAmount",
weights="ClaimNb",
)
print("Evaluation of GammaRegressor on target AvgClaimAmount")
print(scores)
print(
"Mean AvgClaim Amount per policy: %.2f "
% df_train["AvgClaimAmount"].mean()
)
print(
"Mean AvgClaim Amount | NbClaim > 0: %.2f"
% df_train["AvgClaimAmount"][df_train["AvgClaimAmount"] > 0].mean()
)
print(
"Predicted Mean AvgClaim Amount | NbClaim > 0: %.2f"
% glm_sev.predict(X_train).mean()
)
from sklearn.linear_model import TweedieRegressor
glm_pure_premium = TweedieRegressor(power=1.9, alpha=0.1, max_iter=10000)
glm_pure_premium.fit(
X_train, df_train["PurePremium"], sample_weight=df_train["Exposure"]
)
tweedie_powers = [1.5, 1.7, 1.8, 1.9, 1.99, 1.999, 1.9999]
scores_product_model = score_estimator(
(glm_freq, glm_sev),
X_train,
X_test,
df_train,
df_test,
target="PurePremium",
weights="Exposure",
tweedie_powers=tweedie_powers,
)
scores_glm_pure_premium = score_estimator(
glm_pure_premium,
X_train,
X_test,
df_train,
df_test,
target="PurePremium",
weights="Exposure",
tweedie_powers=tweedie_powers,
)
scores = pd.concat(
[scores_product_model, scores_glm_pure_premium],
axis=1,
sort=True,
keys=("Product Model", "TweedieRegressor"),
)
print("Evaluation of the Product Model and the Tweedie Regressor on target PurePremium")
with pd.option_context("display.expand_frame_repr", False):
print(scores)
| christopher-parrish/sas_viya | python/tweedie_regressor_python/pure_premium_python_example.py | pure_premium_python_example.py | py | 10,361 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "sklearn.datasets.fetch_openml",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "sklearn.datasets.fetch_openml",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics.mean_absolute_error",
"line_number": 112,
"usage_type": "name... |
11412632632 | import logging
import time
from typing import List
from spaceone.inventory.connector.aws_kinesis_data_stream_connector.schema.data import (
StreamDescription,
Consumers,
)
from spaceone.inventory.connector.aws_kinesis_data_stream_connector.schema.resource import (
StreamResource,
KDSResponse,
)
from spaceone.inventory.connector.aws_kinesis_data_stream_connector.schema.service_type import (
CLOUD_SERVICE_TYPES,
)
from spaceone.inventory.libs.connector import SchematicAWSConnector
_LOGGER = logging.getLogger(__name__)
class KinesisDataStreamConnector(SchematicAWSConnector):
service_name = "kinesis"
def get_resources(self):
print("** kinesis Data Stream Manager Start **")
resources = []
start_time = time.time()
collect_resources = [
{
"request_method": self.request_data,
"resource": StreamResource,
"response_schema": KDSResponse,
}
]
for cst in CLOUD_SERVICE_TYPES:
resources.append(cst)
for region_name in self.region_names:
self.reset_region(region_name)
for collect_resource in collect_resources:
resources.extend(
self.collect_data_by_region(
self.service_name, region_name, collect_resource
)
)
print(f" kinesis Data Stream Manager Finished {time.time() - start_time} Seconds")
return resources
def request_data(self, region_name) -> List[StreamDescription]:
paginator = self.client.get_paginator("list_streams")
response_iterator = paginator.paginate(
PaginationConfig={
"MaxItems": 10000,
"PageSize": 50,
}
)
for data in response_iterator:
for stream_name in data.get("StreamNames", []):
stream_response = self.client.describe_stream(StreamName=stream_name)
stream_info = stream_response.get("StreamDescription", {})
num_of_con, consumers = self.get_consumers(stream_info.get("StreamARN"))
stream_info.update(
{
"stream_status_display": self.get_stream_status_display(
stream_info.get("StreamStatus")
),
"retention_period_days": self.get_retention_period_days(
stream_info.get("RetentionPeriodHours")
),
"retention_period_display": self.get_retention_period_display(
stream_info.get("RetentionPeriodHours")
),
"retention_period_display_hours": f"{stream_info.get('RetentionPeriodHours')} hours",
"encryption_display": self.get_encryption_display(
stream_info.get("EncryptionType")
),
"shard_level_metrics_display": self.get_shard_level_metrics_display(
stream_info.get("EnhancedMonitoring")
),
"open_shards_num": self.get_open_shards_num(
stream_info.get("Shards")
),
"closed_shards_num": self.get_closed_shards_num(
stream_info.get("Shards")
),
"consumers_vo": {
"num_of_consumers": num_of_con,
"consumers": consumers,
},
"tags": self.get_tags(stream_info.get("StreamName")),
"account_id": self.account_id,
}
)
res = StreamDescription(stream_info, strict=False)
yield res
def get_tags(self, name):
tag_response = self.client.list_tags_for_stream(StreamName=name)
return tag_response.get("Tags", [])
def get_consumers(self, arn):
consumer_response = self.client.list_stream_consumers(StreamARN=arn)
consumers_info = consumer_response.get("Consumers", [])
consumers_num = len(consumers_info)
consumers = []
for consumer_info in consumers_info:
consumer_info.update(
{
"consumer_status_display": self.get_consumers_status_display(
consumer_info.get("ConsumerStatus")
),
}
)
consumers.append(Consumers(consumer_info, strict=False))
return consumers_num, consumers
@staticmethod
def get_consumers_num(consumers):
return len(consumers)
@staticmethod
def get_consumers_status_display(raw_status):
return raw_status[0] + raw_status[1:].lower()
@staticmethod
def get_stream_status_display(raw_status):
return raw_status[0] + raw_status[1:].lower()
@staticmethod
def get_retention_period_days(retention_period_hours):
return int(retention_period_hours / 24)
@staticmethod
def get_retention_period_display(retention_period_hours):
day = int(retention_period_hours / 24)
hour = int(retention_period_hours % 24)
day_postfix = f"{day} day" if day == 1 else ("" if not day else f"{day} days")
hour_postfix = (
f" {hour} hour" if hour == 1 else ("" if not hour else f" {hour} hours")
)
return day_postfix + hour_postfix
@staticmethod
def get_encryption_display(raw_encryption):
return "Disabled" if raw_encryption == "NONE" else "Enabled"
@staticmethod
def get_shard_level_metrics_display(enhanced_monitoring):
return (
["Disabled"]
if not enhanced_monitoring[0]["ShardLevelMetrics"]
else enhanced_monitoring[0]["ShardLevelMetrics"]
)
@staticmethod
def get_open_shards_num(shards_list):
return len(
[
shard
for shard in shards_list
if shard.get("SequenceNumberRange", {}).get("EndingSequenceNumber")
is None
]
)
@staticmethod
def get_closed_shards_num(shards_list):
return len(
[
shard
for shard in shards_list
if shard.get("SequenceNumberRange", {}).get("EndingSequenceNumber")
is not None
]
)
| 100sun/plugin-aws-cloud-services | src/spaceone/inventory/connector/aws_kinesis_data_stream_connector/connector.py | connector.py | py | 6,632 | python | en | code | null | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "spaceone.inventory.libs.connector.SchematicAWSConnector",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "time.time",
"line_number": 27,
"usage_type": "call"
},
{
... |
24026596766 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Python version: 3.6
from tqdm import tqdm
import matplotlib.pyplot as plt
import os
import datetime
import numpy as np
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from utils import get_dataset
from options import args_parser
from update import test_inference
from models import CNNMnist, CNNFashion_Mnist, CNNCifar
from resnet import ResNet18
def adjust_learning_rate(optimizer, epoch):
update_list = [150, 250, 350]
if epoch in update_list:
for param_group in optimizer.param_groups:
param_group['lr'] = param_group['lr'] * 0.1
return
if __name__ == '__main__':
args = args_parser()
multi_gpus = False
if ',' in args.gpu:
gpu_ids = [int(id) for id in args.gpu.split(',')]
multi_gpus = True
else:
gpu_ids = [int(args.gpu)]
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# load datasets
train_dataset, test_dataset, _ = get_dataset(args)
# BUILD MODEL
# Convolutional neural netork
if args.dataset == 'mnist':
global_model = CNNMnist(args=args)
elif args.dataset == 'fmnist':
global_model = CNNFashion_Mnist(args=args)
# global_model = MobileNetV2(1, 10, alpha =1)
elif args.dataset == 'cifar':
# global_model = CNNCifar(args=args)
global_model = ResNet18()
# global_model = nn.DataParallel(MobileNetV2(3, 10, alpha = 1), device_ids=gpu_ids)
# global_model = nn.DataParallel(MobileNet(), device_ids=gpu_ids)
else:
exit('Error: unrecognized model')
# Set the model to train and send it to device.
global_model.to(device)
print(global_model)
# Training
# Set optimizer and criterion
if args.optimizer == 'sgd':
optimizer = torch.optim.SGD(global_model.parameters(), lr=args.lr,
momentum=0.9, nesterov=True)
elif args.optimizer == 'adam':
optimizer = torch.optim.Adam(global_model.parameters(), lr=args.lr,
weight_decay=1e-4)
trainloader = DataLoader(train_dataset, batch_size=256, shuffle=True)
criterion = nn.CrossEntropyLoss().to(device)
epoch_loss = []
test_loss = []
train_accs = []
test_accs = []
start_time = datetime.datetime.now()
for epoch in range(args.epochs):
batch_loss = []
total,correct = 0, 0
# Train
adjust_learning_rate(optimizer, epoch)
global_model.train()
for batch_idx, (images, labels) in enumerate(trainloader):
images, labels = images.to(device), labels.to(device)
optimizer.zero_grad()
outputs = global_model(images)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
if batch_idx % 50 == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch+1, batch_idx * len(images), len(trainloader.dataset),
100. * batch_idx / len(trainloader), loss.item()))
batch_loss.append(loss.item())
elapsed_time = datetime.datetime.now() - start_time
train_acc = correct / total
train_accs.append(train_acc)
# Test
test_acc, test_los = test_inference(args, global_model, test_dataset)
loss_avg = sum(batch_loss)/len(batch_loss)
# print('Train Loss:', loss_avg
print('\nTime [%s] : Train loss:%.5f, Acc:%.4f%% ----- Test loss:%.5f, Acc:%.4f%%\n' % (elapsed_time, loss_avg, train_acc*100,test_los, test_acc*100))
epoch_loss.append(loss_avg)
test_loss.append(test_los)
train_accs.append(train_acc)
test_accs.append(test_acc)
# np.save('save/train_loss.npy', epoch_loss)
# np.save('save/test_loss.npy', test_loss)
# np.save('save/train_acc.npy', train_accs)
# np.save('save/test_acc.npy', test_accs)
# Plot loss
# plt.figure()
# plt.plot(range(len(epoch_loss)), epoch_loss)
# plt.xlabel('epochs')
# plt.ylabel('Train loss')
# plt.savefig('../save/nn_{}_{}_{}.png'.format(args.dataset, args.model,args.epochs))
# testing
test_acc, test_loss = test_inference(args, global_model, test_dataset)
print('Test on', len(test_dataset), 'samples')
print("Test Accuracy: {:.2f}%".format(100*test_acc))
torch.save(global_model.state_dict(), 'whole_model.pkl')
| LiruichenSpace/FedFusion | src/baseline_main.py | baseline_main.py | py | 4,708 | python | en | code | 6 | github-code | 36 | [
{
"api_name": "options.args_parser",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 37,
"usage_type": "attribute"
},
{
"api_name": "torch.device",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "torch.cuda.is_availa... |
4693523565 | from bs4 import BeautifulSoup
import requests as google
from nltk import word_tokenize, FreqDist
import string
from nltk.corpus import stopwords
import matplotlib.pyplot as plt
import re
from sklearn.feature_extraction.text import TfidfVectorizer
import gensim
from nltk.tokenize import word_tokenize
stopwords_list = stopwords.words('english') + ['...', '--']
google_url = "https://www.google.com.sg/search?dcr=0&source=hp&ei=wH5dWp_OJIqF8wWp4ISoBg&q="
class SearchEngineInterface():
def __init__(self,search):
self.search = search
self.url = google_url + search
def search_query(self):
documents = self.scrap_data()
if len(documents) == 0:
return ["No Message found"]
sims, dictionary, tf_idf = self.get_similarites(documents)
query_tf_idf = self.get_query_doc_tf_idf(self.search, dictionary, tf_idf)
return self.get_similar_docs(sims, query_tf_idf, documents)
def scrap_data(self):
response = google.get(self.url)
soupdata = BeautifulSoup(response.text,'html.parser')
text_data = soupdata.findAll('span',{'class':'st'})
return self.clean_scrap_data(text_data)
def clean_scrap_data(self, data):
cleaned_documents = []
for i in data:
content = i.text.encode('ascii','ignore').decode("utf-8")
if len(content) == 0: continue
text = content.split("...")
text = [ con.replace("\n", '').replace("\xa0", '').replace("\"",'') for con in text if len(con) > 0 ]
if re.search("^\d{1,2}\s[a-zA-Z]{3}\s\d{4}",text[0]):
text = text[1:]
if len(text) == 0: continue
if text[-1][-1] != '.':
text[-1] = ".".join(text[-1].split(". ")[:-1])
text = [ con.strip() for con in text if con != [''] ]
if text == ['']: continue
cleaned_documents.append("".join(text))
return cleaned_documents
def get_similarites(self, documents):
gen_docs = [[w.lower() for w in word_tokenize(text)] for text in documents]
dictionary = gensim.corpora.Dictionary(gen_docs)
corpus = [dictionary.doc2bow(gen_doc) for gen_doc in gen_docs]
tf_idf = gensim.models.TfidfModel(corpus)
sims = gensim.similarities.Similarity('./',tf_idf[corpus], num_features=len(dictionary))
return sims, dictionary, tf_idf
def get_query_doc_tf_idf(self, query, dictionary, tf_idf):
query_doc = [w.lower() for w in word_tokenize(query)]
query_doc_bow = dictionary.doc2bow(query_doc)
return tf_idf[query_doc_bow]
def get_similar_docs(self, sims, query_doc_tf_idf, documents):
doc_with_rank = {}
for i,val in enumerate(sims[query_doc_tf_idf]):
doc_with_rank[i] = val
doc_sims = sorted(doc_with_rank.items(),key=lambda x : x[1],reverse=True)[:5]
final_text = []
for index,_ in doc_sims:
final_text.append(documents[index])
return final_text
| ethirajsrinivasan/LSIWebScrap | search_engine_interface.py | search_engine_interface.py | py | 3,042 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "nltk.corpus.stopwords.words",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "nltk.corpus.stopwords",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "requests.get",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "bs4.Be... |
33553726731 | '''
单个模型的pipeline
比如多层意图 只能对某一层意图进行infer
'''
from pre_data_deal import Pre_Data_Deal
from NameEntityRec import NameEntityRec
import random
from IntentConfig import Config
from collections import OrderedDict
from model.dl_model.model_lstm_mask.lstm_mask import LstmMask
from model.dl_model.model_lstm_mask.pipeline_lstm_mask import IntentDLB
from model.ml_model.intent_ml import IntentMl
from entity_recognition.ner import EntityRecognition
from collections import defaultdict
import math
import logging
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s',
datefmt='%a, %d %b %Y %H:%M:%S',
filemode='w')
_logger=logging.getLogger("pipeline_tool")
class PipelineTool(object):
def __init__(self,config):
self.config=config
self.corpus_path='./corpus_data/%s'%self.config.corpus_name #数据集路径
self.pdd=Pre_Data_Deal() #数据预处理
if config.save_model_name != 'intent_all':
config.entity_level='level_2'
self.ner=NameEntityRec(config) #
self.origin_ner=EntityRecognition(config_=config)
def pre_data_deal(self,sent):
'''
数据预处理 去停用词
:return:
'''
return self.pdd.main(sent=sent)
def entity_rec(self,sent):
'''
命名实体识别
:param sent:
:return:
'''
return self.ner.main(sent=sent)[1]
def train_dev_split(self,datas,split_rate=0.2):
'''
构建 训练集/测试集
:param datas:
:return:
'''
self.intent_indice = int(len(self.config.root_intent.keys())-1)
intent_dict_ = self.config.root_intent
self.intent_dict = OrderedDict()
for k, v in intent_dict_.items():
v_ = [e.lower() for e in v]
self.intent_dict[k] = v_
fw_train = open('./corpus_data/%s'%self.config.train_name, 'w')
fw_dev = open('./corpus_data/%s'%self.config.dev_name, 'w')
num_dict = {}
random.shuffle(datas)
split_num = int(len(datas) * split_rate)
# dev write
for ele in datas[:split_num]:
eles = ele.split('\t')
left = eles[:-1]
labels = eles[-1].lower().split('##')
true_label = []
if self.intent_indice == 0:
if labels[self.intent_indice] in self.intent_dict[0] and labels[self.intent_indice] != 'none':
left.append(labels[0])
true_label.append(labels[0])
sent = '\t'.join(left)
fw_dev.write(sent)
fw_dev.write('\n')
else:
for i in range(self.intent_indice + 1):
if labels[i] == 'none':
true_label = []
break
elif labels[i] in self.intent_dict[i]:
true_label.append(labels[i])
elif labels[i] not in self.intent_dict[i]:
true_label = []
break
else:
true_label = []
# print(true_label,'\t\t',ele)
if true_label != []:
left.append('_'.join(true_label))
sent = '\t'.join(left)
fw_dev.write(sent)
fw_dev.write('\n')
if true_label != []:
true_label = '_'.join(true_label)
if true_label not in num_dict:
num_dict[true_label] = 1
else:
s = num_dict[true_label]
s += 1
num_dict[true_label] = s
'''train write'''
for ele in datas[split_num::]:
eles = ele.split('\t')
left = eles[:-1]
labels = eles[-1].split('##')
true_label = []
if self.intent_indice == 0:
if labels[self.intent_indice] in self.intent_dict[0] and labels[self.intent_indice] != 'none':
left.append(labels[0])
true_label.append(labels[0])
sent = '\t'.join(left)
fw_train.write(sent)
fw_train.write('\n')
else:
for i in range(self.intent_indice + 1):
if labels[i] == 'none':
true_label = []
break
elif labels[i] in self.intent_dict[i]:
true_label.append(labels[i])
elif labels[i] not in self.intent_dict[i]:
true_label = []
break
else:
true_label = []
if true_label != []:
left.append('_'.join(true_label))
sent = '\t'.join(left)
fw_train.write(sent)
fw_train.write('\n')
if true_label != []:
true_label = '_'.join(true_label)
if true_label not in num_dict:
num_dict[true_label] = 1
else:
s = num_dict[true_label]
s += 1
num_dict[true_label] = s
_logger.info(num_dict)
def train(self):
'''
:return:
'''
datas=[]
with open(self.corpus_path,'r') as fr:
for line in fr.readlines():
pre_line=self.pre_data_deal(sent=line)
entity_sent=self.entity_rec(sent=pre_line)
datas.append(entity_sent)
fr.close()
self.train_dev_split(datas,0.2)
#
if self.config.ml_classifiers:
intent_ml=IntentMl(self.config.ml_classifiers)
intent_ml.build_model()
outs=intent_ml.train(self.config.ml_classifiers)
for ele in outs:
for e in ele:
_logger.info('%s'%e)
if self.config.dl_classifiers=='DLB':
lstm=LstmMask(scope=self.config.save_model_name)
_logger.info('构建模型')
lstm.__build_model__()
_logger.info('LSTM mask is train')
lstm.__train__()
index=0
# for ele in outs:
# _logger.info('第%s次迭代: 训练准确率:%s 测试准确率:%s'%(index,round(ele[0],2),round(ele[1],2)))
# index+=1
_logger.info('模型存储在%s'%'./save_model/model_lstm_mask/%s'%self.config.save_model_name)
'''#############################infer#########################'''
def _get_FAQ(self):
'''
获取人工标注的FAQ意图
:return:
'''
FAQ_dict = {}
with open('./faq_data/FAQ.txt', 'r') as fr:
fr = (line for line in fr.readlines())
for line in fr:
line = line.replace('\n', '').replace('\t\t', '\t')
try:
sent = line.split('\t')[0]
label = line.split('\t')[1].strip().split(' ')[0]
if label not in ['', ' ']:
FAQ_dict[sent] = label
except Exception as ex:
print(ex, [line])
return FAQ_dict
def get_ml_intent(self, sents: list) -> dict:
'''
从ml模型获取意图
:param sents:
:return:
'''
ml=IntentMl(class_name=self.config.ml_classifiers)
datas=[' '.join(e) for e in self.origin_ner.get_entity(sents)]
return ml.infer(datas, classifier_name=self.config.ml_classifiers)
def get_dlA_intent(self, sents: list,config) -> list:
'''
从dl模型获取意图
:param sents:
:return:
'''
dlb=IntentDLB(config)
if self.config.dl_classifiers == 'DLB':
for ele in dlb.get_intent(sents):
yield ele[0][0]
def infer(self,sents,config):
'''
预测
:return:
'''
print('infer save_model',self.config.save_model_name)
res_dict=defaultdict()
all_dict={}
if self.config.dl_classifiers:
dl_result = list(self.get_dlA_intent(sents,config))
_logger.info('DL 意图识别完成 %s'%dl_result)
all_dict['BLSTM'] = dl_result
if self.config.ml_classifiers:
ml_result = self.get_ml_intent(sents)
_logger.info('ML 意图识别完成 %s'%ml_result)
all_dict = ml_result
for sent, ele in zip(sents, self.vote(all_dict)):
res_dict[sent] = ele
return res_dict
def intent_hgyc_level2(self,sent):
'''
:param sent:
:return:
'''
def vote(self, class_result):
'''
投票
:param class_result:
:return:
'''
ss = []
for k, v in dict(class_result).items():
ele = [(e, self.config.classifier_dict[k]) for e in v]
ss.append(ele)
num_ = len(ss[0])
result = []
for i in range(num_):
ss_i_dict = {}
for j in range(len(ss)):
if isinstance(ss[j][i][0], str):
if ss[j][i][0].lower() not in ss_i_dict:
ss_i_dict[ss[j][i][0].lower()] = ss[j][i][1]
else:
num = ss_i_dict[ss[j][i][0].lower()]
num += ss[j][i][1]
ss_i_dict[ss[j][i][0].lower()] = num
else:
for ele in ss[j][i][0]:
if ele.lower() not in ss_i_dict:
ss_i_dict[ele.lower()] = ss[j][i][1]
else:
num = ss_i_dict[ele.lower()]
num += ss[j][i][1]
ss_i_dict[ele.lower()] = num
ss_sort = [[k, v] for k, v in ss_i_dict.items() if k not in ['', ' ']]
ss_sort.sort(key=lambda x: x[1], reverse=True)
fin_res = ss_sort[0][0]
result.append(fin_res)
return result
if __name__ == '__main__':
config=Config()
pipeline=PipelineTool(config)
# pipeline.train()
sent=['今天天气不词']
res=pipeline.infer(sent,config)
print(res)
#
# config.save_model_name='intent_1'
#
# res=pipeline.infer(sent)
# print(res)
| markWJJ/Intent_Detection | pipeline_tool.py | pipeline_tool.py | py | 10,671 | python | en | code | 4 | github-code | 36 | [
{
"api_name": "logging.basicConfig",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "logging.INFO",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "logging.getLogger",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "pre_data_deal... |
4023788958 | import os
import sys
import json
import struct
import logging
from capstone.arm import *
from collections import Counter
from argxtract.common import paths as common_paths
from argxtract.core import utils
from argxtract.core import consts
from argxtract.common import objects as common_objs
from argxtract.core.disassembler import FirmwareDisassembler
from argxtract.resources.vendor.nordic_ant import consts as nordic_consts
class VendorChipsetAnalyser:
def __init__(self):
self.embedded_softdevice = False
def test_binary_against_vendor(self):
logging.info('Checking whether firmware matches Nordic profile.')
# Check for presence of embedded softdevice code.
# Do this only if file is large enough.
file_size_in_bytes = os.stat(common_paths.path_to_fw).st_size
# A very small file wouldn't be firmware.
if file_size_in_bytes < 0xC0:
return None
min_softdevice_size = 1024 * 53
if (file_size_in_bytes > min_softdevice_size):
self.check_for_embedded_softdevice()
if self.embedded_softdevice != True:
# Make sure firmware is Nordic.
is_nordic = self.test_nordic()
if is_nordic == False:
logging.warning(
'The provided firmware file does not follow the pattern '
+ 'for Nordic ANT firmware.'
)
return None
else:
logging.debug(
'File appears to match pattern for Nordic ANT.'
)
# Define disassembler start address.
if self.embedded_softdevice == True:
common_objs.disassembly_start_address = 0x00000000
else:
common_objs.disassembly_start_address = common_objs.app_code_base
# Define code start address.
common_objs.code_start_address = common_objs.vector_table_size \
+ common_objs.app_code_base
logging.info(
'Code start address is: '
+ hex(common_objs.code_start_address)
)
# Get relevant SVC set.
self.get_svc_set()
common_objs.vendor_svc_set = self.vendor_svc_set
return True
def test_nordic(self):
if self.embedded_softdevice == True:
return True
if common_objs.vector_table_size == 0xC0:
self.pre_sdk13 = True
debug_msg = 'Vector table size matches sdk <13'
logging.info(debug_msg)
return True
elif common_objs.vector_table_size == 0x0200:
self.pre_sdk13 = False
debug_msg = 'Vector table size matches sdk >=13'
logging.info(debug_msg)
return True
else:
return False
def check_for_embedded_softdevice(self):
logging.info('Checking for embedded softdevice.')
with open(common_paths.path_to_fw, 'rb') as f:
firmware_contents = f.read().hex()
softdevice_dir = os.path.join(
common_paths.resources_path,
'vendor',
'nordic',
'softdevices'
)
file_list = []
for root, dirs, files in os.walk(softdevice_dir):
for file in files:
file_list.append((file, os.path.join(root, file)))
softdevice_match = None
for one_file in file_list:
file = one_file[0]
softdevice_file = one_file[1]
with open(softdevice_file, 'rb') as f1:
softdevice_contents = f1.read().hex()
if softdevice_contents in firmware_contents:
softdevice_match = file.lower()
break
if (softdevice_match == None):
return
self.embedded_softdevice = True
self.estimated = False
debug_msg = 'Softdevice embedded within firmware:\n'
debug_msg += '\t\t\t\t' + softdevice_match
logging.info(debug_msg)
# Also get application code base.
self.get_app_base_from_softdevice()
# If softdevice is embedded, the AVT will be further down.
logging.info('Recomputing AVT due to embedded softdevice.')
fw_disassembler = FirmwareDisassembler()
fw_disassembler.analyse_vector_table(common_objs.app_code_base)
def get_app_base_from_softdevice(self):
# TODO: Implement
pass
def get_svc_num(self):
return
def get_svc_set(self):
self.vendor_svc_set = {
"sd_ant_stack_reset": "0xC0",
"sd_ant_event_get": "0xc1",
"sd_ant_channel_assign": "0xc2",
"sd_ant_channel_unassign": "0xc3",
"sd_ant_channel_open_with_offset": "0xc4",
"sd_ant_channel_close": "0xc5",
"sd_ant_rx_scan_mode_start": "0xc6",
"sd_ant_broadcast_message_tx": "0xc7",
"sd_ant_acknowledge_message_tx": "0xc8",
"sd_ant_burst_handler_request": "0xc9",
"sd_ant_pending_transmit_clear": "0xca",
"sd_ant_transfer_stop": "0xcb",
"sd_ant_network_address_set": "0xcc",
"sd_ant_channel_radio_freq_set": "0xcd",
"sd_ant_channel_radio_freq_get": "0xce",
"sd_ant_channel_radio_tx_power_set": "0xcf",
"sd_ant_prox_search_set": "0xd0",
"sd_ant_channel_period_set": "0xd1",
"sd_ant_channel_period_get": "0xd2",
"sd_ant_channel_id_set": "0xd3",
"sd_ant_channel_id_get": "0xd4",
"sd_ant_search_waveform_set": "0xd5",
"sd_ant_channel_search_timeout_set": "0xd6",
"sd_ant_search_channel_priority_set": "0xd7",
"sd_ant_active_search_sharing_cycles_set": "0xd8",
"sd_ant_active_search_sharing_cycles_get": "0xd9",
"sd_ant_channel_low_priority_rx_search_timeout_set": "0xda",
"sd_ant_adv_burst_config_set": "0xdb",
"sd_ant_adv_burst_config_get": "0xdc",
"sd_ant_lib_config_set": "0xdd",
"sd_ant_lib_config_clear": "0xde",
"sd_ant_lib_config_get": "0xdf",
"sd_ant_id_list_add": "0xe0",
"sd_ant_id_list_config": "0xe1",
"sd_ant_auto_freq_hop_table_set": "0xe2",
"sd_ant_event_filtering_set": "0xe3",
"sd_ant_event_filtering_get": "0xe4",
"sd_ant_active": "0xe5",
"sd_ant_channel_in_progress": "0xe6",
"sd_ant_channel_status_get": "0xe7",
"sd_ant_pending_transmit": "0xe8",
"sd_ant_cw_test_mode_init": "0xe9",
"sd_ant_cw_test_mode": "0xea",
"sd_ant_version_get": "0xeb",
"sd_ant_capabilities_get": "0xec",
"sd_ant_burst_handler_wait_flag_enable": "0xed",
"sd_ant_burst_handler_wait_flag_disable": "0xee",
"sd_ant_sdu_mask_set": "0xef",
"sd_ant_sdu_mask_get": "0xf0",
"sd_ant_sdu_mask_config": "0xf1",
"sd_ant_crypto_channel_enable": "0xf2",
"sd_ant_crypto_key_set": "0xf3",
"sd_ant_crypto_info_set": "0xf4",
"sd_ant_crypto_info_get": "0xf5",
"sd_ant_rfactive_notification_config_set": "0xf6",
"sd_ant_rfactive_notification_config_get": "0xf7",
"sd_ant_coex_config_set": "0xf8",
"sd_ant_coex_config_get": "0xf9",
"sd_ant_enable": "0xfa",
"SVC_ANT_RESERVED1": "0xfb",
"SVC_ANT_RESERVED2": "0xfc",
"sd_ant_extended0": "0xfd",
"SVC_ANT_EXTENDED1": "0xfe",
"SVC_ANT_EXTENDED2": "0xff"
}
# ==============================================================
def generate_output_metadata(self):
metadata = {
"embedded_softdevice": self.embedded_softdevice
}
return metadata
def reset(self):
self.embedded_softdevice = False | projectbtle/argXtract | argxtract/resources/vendor/nordic_ant/chipset_analyser.py | chipset_analyser.py | py | 8,728 | python | en | code | 25 | github-code | 36 | [
{
"api_name": "logging.info",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "os.stat",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "argxtract.common.paths.path_to_fw",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "argxtract... |
26478313784 | from django.db import models
from django.contrib.auth.models import User
from django.db import IntegrityError
import uuid
class Procedure(models.Model):
title = models.CharField(max_length=255, blank=True)
author = models.CharField(max_length=255, blank=True)
uuid = models.UUIDField(default=uuid.uuid4, editable=False)
owner = models.ForeignKey(User, on_delete=models.CASCADE)
last_modified = models.DateTimeField(auto_now=True)
created = models.DateTimeField(auto_now_add=True)
class Meta():
app_label = 'api'
class Page(models.Model):
display_index = models.PositiveIntegerField()
procedure = models.ForeignKey(Procedure, related_name='pages', on_delete=models.CASCADE)
last_modified = models.DateTimeField(auto_now=True)
created = models.DateTimeField(auto_now_add=True)
def save(self, **kwargs):
super(Page, self).save()
self.procedure.last_modified = self.last_modified
self.procedure.save()
class Meta:
app_label = 'api'
ordering = ['procedure', 'display_index']
class Concept(models.Model):
TYPES = (
('string', 'string'),
('boolean', 'boolean'),
('number', 'number'),
('complex', 'complex')
)
uuid = models.UUIDField(default=uuid.uuid4, null=False, blank=False, editable=False)
created = models.DateTimeField(auto_now_add=True)
last_modified = models.DateTimeField(auto_now=True)
name = models.CharField(max_length=255, null=False, blank=False)
display_name = models.CharField(max_length=255, null=False, blank=False)
description = models.TextField(null=True, blank=True)
data_type = models.CharField(max_length=16, choices=TYPES, null=True, blank=True)
mime_type = models.CharField(max_length=128, null=True, blank=True)
constraint = models.TextField(null=True, blank=True)
def save(self, **kwargs):
if self.data_type and (self.data_type, self.data_type) not in self.TYPES:
raise IntegrityError('Invalid data type')
super(Concept, self).save()
class Meta:
app_label = 'api'
class Element(models.Model):
TYPES = (
('DATE', 'DATE'),
('ENTRY', 'ENTRY'),
('SELECT', 'SELECT'),
('MULTI_SELECT', 'MULTI_SELECT'),
('RADIO', 'RADIO'),
('PICTURE', 'PICTURE'),
('PLUGIN', 'PLUGIN'),
('ENTRY_PLUGIN', 'ENTRY_PLUGIN')
)
CHOICE_TYPES = (
'SELECT',
'MULTI_SELECT',
'RADIO'
)
PLUGIN_TYPES = (
'PLUGIN',
'ENTRY_PLUGIN'
)
display_index = models.PositiveIntegerField()
element_type = models.CharField(max_length=12, choices=TYPES)
choices = models.TextField(null=True, blank=True)
concept = models.ForeignKey(Concept, null=True, related_name='elements')
question = models.TextField(null=True, blank=True)
answer = models.TextField(null=True, blank=True)
required = models.BooleanField(default=False)
image = models.TextField(null=True, blank=True)
audio = models.TextField(null=True, blank=True)
action = models.TextField(null=True, blank=True)
mime_type = models.CharField(max_length=128, null=True, blank=True)
page = models.ForeignKey(Page, related_name='elements', on_delete=models.CASCADE)
last_modified = models.DateTimeField(auto_now=True)
created = models.DateTimeField(auto_now_add=True)
def save(self, **kwargs):
if self.element_type:
if (self.element_type, self.element_type) not in self.TYPES:
raise IntegrityError('Invalid element type')
super(Element, self).save()
self.page.last_modified = self.last_modified
self.page.save()
class Meta:
app_label = 'api'
ordering = ['page', 'display_index']
class ShowIf(models.Model):
LOGICAL_TYPES = (
'AND',
'OR',
'NOT'
)
CRITERIA_TYPES = (
'EQUALS',
'GREATER',
'LESS'
)
page = models.ForeignKey(Page, related_name='show_if', on_delete=models.CASCADE)
last_modified = models.DateTimeField(auto_now=True)
created = models.DateTimeField(auto_now_add=True)
conditions = models.TextField()
def save(self, **kwargs):
super(ShowIf, self).save()
self.page.last_modified = self.last_modified
self.page.save()
class Meta:
app_label = 'api'
| protocolbuilder/sana.protocol_builder | src-django/api/models.py | models.py | py | 4,416 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.db.models.Model",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "django.db.models.CharField",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "... |
23063743554 | import sys
from dataclasses import dataclass
import numpy as np
import pandas as pd
from sklearn.compose import ColumnTransformer
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import OneHotEncoder, StandardScaler
from sklearn.pipeline import Pipeline
from src.exception import CustomException
from src.logger import logging
from src.utils import save_object
import os
@dataclass
class DataTransformationConfig:
preprocessor_obj_file_path = os.path.join('artifacts', "preprocessor.pkl")
class DataTransformation:
def __init__(self):
self.data_transformation_config = DataTransformationConfig()
def get_data_transformer_object(self):
'''
This function is responsible for data tranasformation
'''
try:
numerical_columns = ['tempo', 'valence']
num_pipeline = Pipeline(
steps=[
("imputer", SimpleImputer(strategy="median")),
("scaler", StandardScaler())
]
)
logging.info("Read and transformed the relevant data")
preprocessor = ColumnTransformer(
transformers=[
("num_pipeline", num_pipeline, numerical_columns)
]
)
return preprocessor
except Exception as E:
raise CustomException(E, sys)
def initiate_data_transformation(self, train_path, test_path):
try:
train_df = pd.read_csv(train_path)
test_df = pd.read_csv(test_path)
logging.info("Read train and test data completed")
logging.info("Obtaining preprocessing object")
preprocessing_obj = self.get_data_transformer_object()
numerical_columns = ['tempo', 'valence']
logging.info(
f"Applying preprocessing object on training dataframe and testing dataframe."
)
input_df_train = train_df[numerical_columns]
input_df_test = test_df[numerical_columns]
input_feature_train = preprocessing_obj.fit_transform(input_df_train)
input_feature_test = preprocessing_obj.transform(input_df_test)
emotional_stage_train = (input_feature_train[:, 1] + input_feature_train[:, 0]) / 2
emotional_stage_test = (input_feature_test[:, 1] + input_feature_test[:, 0]) / 2
emotions_train = np.where(emotional_stage_train > 0, 1, 0)
emotions_test = np.where(emotional_stage_test > 0, 1, 0)
input_feature_train = np.column_stack((input_feature_train, emotions_train))
input_feature_test = np.column_stack((input_feature_test, emotions_test))
logging.info("Saved preprocessing object")
save_object(
file_path=self.data_transformation_config.preprocessor_obj_file_path,
obj=preprocessing_obj
)
return (
input_feature_train,
input_feature_test,
self.data_transformation_config.preprocessor_obj_file_path
)
except Exception as E:
raise CustomException(E, sys)
| AnshulDubey1/Music-Recommendation | src/components/data_transformation.py | data_transformation.py | py | 3,195 | python | en | code | 5 | github-code | 36 | [
{
"api_name": "os.path.join",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "dataclasses.dataclass",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "sklearn.pipeline.Pipe... |
2526327823 | # This script scraps Kenyan startups from https://startuplist.africa/
# Import required libraries
import pandas as pd
import numpy as np
from bs4 import BeautifulSoup as soup
from urllib.request import Request, urlopen
from selenium import webdriver
# URLs
url = "https://startuplist.africa/startups-in-kenya"
# Driver path
driver_path = "C:\webdrivers\chromedriver.exe"
# Read Data
driver = webdriver.Chrome(driver_path)
driver.get(url=url)
# Wait for page content to load
driver.implicitly_wait(40)
# Get data from first page
html = driver.page_source
table = pd.read_html(html)
first_page = table[0]
# Create dataframe and append data from first page
data = pd.DataFrame()
data = data.append(first_page, ignore_index=True)
# nextbutton
next_button = driver.find_element_by_class_name("pagination-next")
# Loop through the remaining pages
for i in range(0, 12):
next_button.click()
html = driver.page_source
table = pd.read_html(html)
page_data = table[0]
data = data.append(page_data, ignore_index=True)
df = data
print(df)
# Save data
df.to_csv("./data/startups.csv")
driver.close()
| CharlesIvia/startups_in_kenya | scrapper/companies_scrapper.py | companies_scrapper.py | py | 1,120 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "selenium.webdriver.Chrome",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "pandas.read_html",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "pandas.... |
33207354162 | import Bio
from Bio.Blast import NCBIWWW,NCBIXML
from Bio.Seq import Seq
from Bio import SeqIO
def find_stop_codon(seq, start, stop_codons):
"""Find the next stop codon in the given sequence."""
for i in range(start, len(seq), 3):
codon = seq[i:i+3]
if codon in stop_codons:
return i
return None
def find_orf(sequence, start_codon="ATG", stop_codons=["TAA", "TAG", "TGA"]):
"""Find the longest ORF in the given sequence."""
longest_orf = ""
for i in range(len(sequence)):
if sequence[i:i+3] == start_codon:
stop = find_stop_codon(sequence, i+3, stop_codons)
if stop is not None:
orf = sequence[i:stop]
if len(orf) > len(longest_orf):
longest_orf = orf
return longest_orf
def find_orf2(sequence):
start_codon = "ATG"
stop_codons = ["TAA", "TAG", "TGA"]
orfs = []
for i in range(len(sequence)):
if sequence[i:i+3] == start_codon:
for j in range(i+3, len(sequence), 3):
if sequence[j:j+3] in stop_codons:
orfs.append(sequence[i:j+3])
break
return orfs
def find_longest_shortest(records):
"""Find the longest and shortest records in the given list of records."""
longest_record = None
shortest_record = None
for record in records:
if longest_record is None or len(record.seq) > len(longest_record.seq):
longest_record = record
if shortest_record is None or len(record.seq) < len(shortest_record.seq):
shortest_record = record
return longest_record, shortest_record
def main():
records = list(SeqIO.parse("dna2.fasta", "fasta"))
longest_record, shortest_record = find_longest_shortest(records)
print(f"The longest record is {longest_record.id} with length {len(longest_record.seq)}")
print(f"The shortest record is {shortest_record.id} with length {len(shortest_record.seq)}")
for i, record in enumerate(records):
print(f"Record number: {i+1}")
print(f"ID: {record.id}")
print(f"Sequence: {record.seq}")
orf = find_orf(str(record.seq))
len_orf = len(orf)
if orf:
print(f"ORF: {orf}", "the length of this orf is:", len_orf)
else:
print("No ORF found")
print()
#for i in range(3):
#orfs = find_orf(record.seq[i:])
#print(f"ORFs starting at position {i}:", orfs)
if __name__ == "__main__":
main() | jkjkciog/transcripomics | Chat GPT model improvements (non functional).py | Chat GPT model improvements (non functional).py | py | 2,540 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "Bio.SeqIO.parse",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "Bio.SeqIO",
"line_number": 55,
"usage_type": "name"
}
] |
29836727610 | import logging
import requests
import datetime
from aiogram import Bot, Dispatcher, executor, types
from tg_info import info
from bs4 import BeautifulSoup
weather_token = "6e8d79779a0c362f14c60a1c7f363e29"
API_TOKEN = "5158040057:AAEtt8ByoaJdYMy09MpupqpNAxiCAQnGj-0"
# Configure logging
logging.basicConfig(level=logging.INFO)
# Initialize bot and dispatcher
bot = Bot(token=API_TOKEN)
dp = Dispatcher(bot)
@dp.message_handler(commands=['start', 'help'])
async def send_welcome(message: types.Message):
"""
This handler will be called when user sends `/start` or `/help` command
"""
await message.reply("Привіт!\n")
keyboard = types.ReplyKeyboardMarkup(resize_keyboard=True)
buttons_row1 = ["Погода\U0001F30D", "Курс UAH\U0001F3E6"]
buttons_row2 = ["Covid-19\U0001f9a0", "Офіційні джерела\U00002139"]
keyboard.add(*buttons_row1)
keyboard.add(*buttons_row2)
await message.answer("Обери одну з функцій внизу: ", reply_markup=keyboard)
@dp.message_handler(lambda message: message.text == "Погода\U0001F30D")
async def name_city(message: types.Message):
await message.reply("Введіть назву міста: ")
@dp.message_handler(lambda message: message.text != "Курс UAH\U0001F3E6"
and message.text != "Covid-19\U0001f9a0"
and message.text != "Офіційні джерела\U00002139")
async def without_puree(message: types.Message):
try:
r1 = requests.get(
f"http://api.openweathermap.org/data/2.5/weather?q={message.text}&appid={weather_token}&units=metric")
data = r1.json()
city = data["name"]
temperature = round(data["main"]["temp"])
humidity = round(data["main"]["humidity"])
wind = round(data["wind"]["speed"])
await message.reply(f"***{datetime.datetime.now().strftime('%b %d %Y %H:%M')}***\n"
f"Погода в місті: {city}\n\U0001F321Температура: {temperature} C°\n"
f"\U0001F4A7Вологість повітря: {humidity} %\n"
f"\U0001F32AВітер: {wind} м/с\n ")
except:
await message.reply("\U0001F3D9 Провірте назву міста \U0001F3D9")
@dp.message_handler(lambda message: message.text == "Курс UAH\U0001F3E6")
async def name_city(message: types.Message):
url = 'https://minfin.com.ua/ua/currency/'
response = requests.get(url)
soup = BeautifulSoup(response.text, 'lxml')
nby = soup.find_all('span', class_='mfcur-nbu-full-wrap')
buy = soup.find_all('td', class_='mfm-text-nowrap')
sell = soup.find_all('td', class_='mfm-text-nowrap')
await message.reply(f"\U0001F4B5 USD:\n НБУ: {nby[0].text[1:8]} Купівля: {buy[1].text[1:8]} Продаж:{sell[1].text[14:20]}\n\n"
f"\U0001F4B6 EUR:\n НБУ: {nby[1].text[1:8]} Купівля: {buy[3].text[1:8]} Продаж:{sell[3].text[14:20]}\n\n"
f"\U000020BD RUB:\n НБУ: {nby[2].text[1:8]} Купівля: {buy[5].text[1:7]} Продаж:{sell[5].text[13:19]}")
@dp.message_handler(lambda message: message.text == "Covid-19\U0001f9a0")
async def name_city(message: types.Message):
url = 'https://index.minfin.com.ua/ua/reference/coronavirus/geography/'
response = requests.get(url)
soup = BeautifulSoup(response.text, 'lxml')
info = soup.find_all('td', class_='bg-total')
await message.reply(f"Статистака Covid-19 у світі на сьогодні: "
f"{datetime.datetime.now().strftime('%b %d %Y %H:%M')}\n\n"
f"\U0001f637Захворіли: {info[2].text}\n"
f"\U0001f600Одужали: {info[6].text}\n"
f"\U0001f494Померли: {info[4].text}\n")
@dp.message_handler(lambda message: message.text == "Офіційні джерела\U00002139")
async def name_city(message: types.Message):
await message.reply(info)
if __name__ == '__main__':
executor.start_polling(dp, skip_updates=True)
| sivtv/telegrambot | main.py | main.py | py | 4,353 | python | uk | code | 0 | github-code | 36 | [
{
"api_name": "logging.basicConfig",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "logging.INFO",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "aiogram.Bot",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "aiogram.Dispatcher"... |
71929861545 | from torch import nn
import torch.nn.functional as F
class PeriodDiscriminator(nn.Module):
def __init__(self, period):
super(PeriodDiscriminator, self).__init__()
layer = []
self.period = period
inp = 1
for l in range(4):
out = int(2 ** (5 + l + 1))
layer += [
nn.utils.weight_norm(nn.Conv2d(inp, out, kernel_size=(5, 1), stride=(3, 1))),
nn.LeakyReLU(0.2)
]
inp = out
self.layer = nn.Sequential(*layer)
self.output = nn.Sequential(
nn.utils.weight_norm(nn.Conv2d(out, 1024, kernel_size=(5, 1))),
nn.LeakyReLU(0.2),
nn.utils.weight_norm(nn.Conv2d(1024, 1, kernel_size=(3, 1)))
)
def forward(self, x):
batch_size = x.shape[0]
pad = self.period - (x.shape[-1] % self.period)
x = F.pad(x, (0, pad))
y = x.view(batch_size, -1, self.period).contiguous()
y = y.unsqueeze(1)
out1 = self.layer(y)
return self.output(out1)
| cuongnguyengit/hifigan | model/period_discriminator.py | period_discriminator.py | py | 1,062 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "torch.nn.Module",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "torch.nn.utils.weight_norm",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "torch.nn.utils... |
8290510873 |
import os
import json
import errno
import itertools
import numpy as np
import pandas as pd
def export_labels(root_dir: str):
"""Transforms `Balloon` dataset into Faster R-CNN standard format"""
labels_dir = os.path.join(root_dir, "labels")
if not os.path.exists(labels_dir):
try:
os.makedirs(labels_dir, exist_ok=True)
except OSError as e:
if e.errno != errno.EEXIST:
raise ValueError(e)
json_file = os.path.join(root_dir, "via_region_data.json")
with open(json_file) as f:
images_annotations = json.load(f)
for _, v in images_annotations.items():
filename = os.path.splitext(v["filename"])[0]
annotations = v["regions"]
faster_rcnn_df = pd.DataFrame(columns=['X1', 'Y1', 'X2', 'Y2'])
for _, anno in annotations.items():
assert not anno["region_attributes"]
anno = anno["shape_attributes"]
px = anno["all_points_x"]
py = anno["all_points_y"]
poly = [(x + 0.5, y + 0.5) for x, y in zip(px, py)]
poly = list(itertools.chain.from_iterable(poly))
faster_rcnn_df = faster_rcnn_df.append(
{
'X1': np.min(px),
'Y1': np.min(py),
'X2': np.max(px),
'Y2': np.max(py)
}, ignore_index=True)
faster_rcnn_df = faster_rcnn_df.astype(
{
'X1': 'float64',
'Y2': 'float64',
'X2': 'float64',
'Y2': 'float64'
}
)
faster_rcnn_df.to_csv(os.path.join(
labels_dir, filename + '.txt'), header=None, index=None, sep=' ', mode='w+')
def move_images(src_dir: str, dest_dir: str):
if not os.path.exists(src_dir):
raise ValueError(f"Directory {src_dir} does not exist")
if not os.path.exists(dest_dir):
try:
os.makedirs(dest_dir, exist_ok=True)
except OSError as e:
if e.errno != errno.EEXIST:
raise ValueError(e)
images = [f for f in os.listdir(
src_dir) if os.path.isfile(os.path.join(src_dir, f))]
for image in images:
filename = os.path.basename(image)
dest_filename = os.path.join(dest_dir, filename)
os.rename(os.path.join(src_dir, image), dest_filename)
if __name__ == '__main__':
# export labels for training subset
export_labels("../data/balloon/train")
# export labels for validation subset
export_labels("../data/balloon/val")
# relocate images in training directory
move_images("../data/balloon/train", "../data/balloon/train/images")
# relocate images in validation directory
move_images("../data/balloon/val", "../data/balloon/val/images")
| AndreasKaratzas/faster-rcnn | lib/balloon.py | balloon.py | py | 2,836 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.path.join",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "os.path.exists",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number":... |
34408858736 | from django.conf.urls import include, url
from teacherMan import views as tech
app_name = 'teacher'
urlpatterns = [
url(r'main', tech.main, name='teacher'),
url(r'data', tech.getData),
url(r'edit', tech.editTech),
url(r'delTechInfo', tech.delTech),
url(r'addTechInfo', tech.addTech),
#url(r'fillSelect', std.fillSelect)
] | A11en0/InfoManageSystem | teacherMan/urls.py | urls.py | py | 349 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "django.conf.urls.url",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "teacherMan.views.main",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "teacherMan.views",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "django.... |
27954967699 | import time
import os
import pandas as pd
import requests as re
import numpy as np
import asyncio
from aiohttp import ClientSession
from loguru import logger
url_general = 'https://www.fundamentus.com.br/resultado.php'
url_paper = 'https://www.fundamentus.com.br/detalhes.php?papel='
data_to_save = list()
def get_papers() -> pd.DataFrame:
res = re.get(url_general)
df = pd.read_html(res.content)[0]
# Filter the companies without recent activity
df = df[df['Liq.2meses'] != '000']
df.set_index('Papel', inplace=True)
return df[df['Cotação'] > 0]
async def get_paper_info(paper: str, save=False) -> pd.Series:
async with ClientSession() as session:
time.sleep(0.1)
resp = await session.request(method='GET', url=url_paper+paper)
time.sleep(0.1)
html = await resp.text()
df_list = pd.read_html(html)
cleaner = lambda x: x.replace('?', '') if isinstance(x, str) else x
# Separate and clean diferent tables found on page
df0 = df_list[0].applymap(cleaner)
df1 = df_list[1].applymap(cleaner)
df2 = df_list[2].applymap(cleaner)
df3 = df_list[3].applymap(cleaner)
df4 = df_list[4].applymap(cleaner)
# Drop some headers
df2.drop(0, inplace=True)
df3.drop(0, inplace=True)
df4.drop([0, 1], inplace=True)
index = pd.concat([
df0[0][1:],
df0[2],
df1[0].append(df1[2]),
'oscilations_' + df2[0],
'indicator_' + df2[2].append(df2[4]),
df3[0].append(df3[2]),
'demonstrative12m_' + df4[0],
'demontrative3m_' + df4[2]], ignore_index=True)
data = df0[1][1:].append([df0[3],
df1[1], df1[3],
df2[1], df2[3], df2[5],
df3[1], df3[3],
df4[1], df4[3]]).values
output = pd.Series(index=index, data=data, name=paper)
if save: data_to_save.append(output)
logger.info(f'Terminou: {paper}')
return output
def load_csv(path='./data/data.csv', index_col='Unnamed: 0') -> pd.DataFrame:
return pd.read_csv(path, index_col=index_col)
async def wrapper() -> bool:
status = False
papers = get_papers().index
# Try to load data from previous attempts
try:
partial = load_csv(path='partial_data.csv').index
papers = papers.difference(partial)
if papers.empty:
logger.info('You are up to date')
return status
except FileNotFoundError:
logger.info('Arquivo partial_data.csv não encontrado, um novo será gerado')
logger.info(f'Procurando por {papers.shape}...')
tasks = list()
for paper in papers:
tasks.append(get_paper_info(paper, save=True))
try:
await asyncio.gather(*tasks)
# Errors collecting data
except:
df = pd.DataFrame(data_to_save)
df.to_csv('partial_data.csv', mode='a')
logger.info('Dados parciais salvos')
status = True
# Logs about every operation
finally:
df = pd.DataFrame(data_to_save)
success = df.shape[0]/papers.shape[0]
logger.info(f'{df.shape[0]} Salvos')
logger.info(f'Taxa de resposta: {np.round(100*success)}%')
return status
def run_wrapper():
status = True
while status:
data_to_save = list()
status = asyncio.run(wrapper())
if status:
logger.info('Refazendo operação')
time.sleep(5)
# All papers reached
else:
df = pd.DataFrame(data_to_save)
df.to_csv('partial_data.csv', mode='a')
today = pd.Timestamp.today()
day, month, year = today.day, today.month, today.year
# Save data_day_month_year.csv at data directory remove
final_data = load_csv('partial_data.csv')
final_data = final_data[final_data.index.notna()]
final_data.to_csv(f'../data/data_{day}_{month}_{year}.csv')
# Remove partial result
os.remove('partial_data.csv')
logger.info('DADOS SALVOS NA PASTA DATA')
if __name__ == '__main__':
run_wrapper()
| Muriloozol/InvestCode | scrapper/scrapper.py | scrapper.py | py | 4,364 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "requests.get",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "pandas.read_html",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "aiohttp.ClientSes... |
25625656419 | import logging
import random
import json
import string
import threading
import urllib.request
import spacy
from datetime import datetime
from tinydb import TinyDB, Query
from tinydb.operations import add
from urllib.parse import quote
from urllib.error import HTTPError
import pymorphy2
from operator import itemgetter
from telegram import InlineKeyboardMarkup
from telegram import ReplyKeyboardMarkup
from telegram import InlineKeyboardButton
from telegram import ParseMode
from telegram import ForceReply
from telegram.ext import Updater
from telegram.ext import CommandHandler
from telegram.ext import MessageHandler
from telegram.ext import Filters
from telegram.ext import CallbackQueryHandler
BOT_ID = 1105629394
COMMON_GROUP_ID = -1001314817032
START_STRING = ', введи язык и количество раундов в формате "<ru или en\> <число от 1 до 100000\>"\. Прочитать' \
' правила игры: /rules'
WANNA_JOIN_STRING = 'Хочу присоединиться'
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
level=logging.INFO)
token_file = open("token.txt", "r")
token = token_file.readline().rstrip('\n')
updater = Updater(token=token, use_context=True)
token_file.close()
dispatcher = updater.dispatcher
morph = pymorphy2.MorphAnalyzer()
nlp = spacy.load('en_core_web_sm')
phrases_ru = []
phrases_file = open('phrases_ru.txt', 'r', encoding='utf-8')
for line in phrases_file:
phrases_ru.append(line.rstrip('\n'))
phrases_file.close()
print('Done reading phrases from file')
phrases_en = []
phrases_file = open('phrases_en.txt', 'r', encoding='utf-8')
for line in phrases_file:
phrases_en.append(line.rstrip('\n'))
phrases_file.close()
print('Done reading phrases from file')
def get_phrase(lang):
if lang == 'ru':
return phrases_ru[random.randint(0, len(phrases_ru) - 1)]
else:
return phrases_en[random.randint(0, len(phrases_en) - 1)]
def get_phrases(amount, lang):
res = []
try:
req = urllib.request.urlopen('https://play-name.com/PlayEngine/api/', data=str.encode('lang=' + lang))
words = json.loads(req.read().decode('utf-8'))
for word in words[lang].keys():
res.append(word)
except (HTTPError, json.JSONDecodeError) as e:
for i in range(amount):
res.append(get_phrase(lang))
random.shuffle(res)
return res[:amount]
games = dict()
db = TinyDB('db.json')
if not db.search(Query().groups.exists()):
db.insert({'groups': 1})
if not db.search(Query().games.exists()):
db.insert({'games': 0})
if not db.search(Query().rounds.exists()):
db.insert({'rounds': 0})
if not db.search(Query().joins.exists()):
db.insert({'joins': 0})
def escape_markdown(s, escape_star=True):
markdown = '_*[]()~`>#+-=|{}.!'
res = ''
for c in s:
if c == '*' and not escape_star:
res += '*'
continue
if c in markdown:
res += '\\'
res += c
return res
def user_name(user, mention=False):
res = user.first_name
if user.last_name is not None:
res += ' ' + user.last_name
if mention:
return '[' + escape_markdown(res) + '](tg://user?id=' + str(user.id) + ')'
else:
return res
def normalize(s):
return s.translate(str.maketrans(dict.fromkeys(string.punctuation))).lower()
def get_normal_form(s, lang):
if lang == 'ru':
return normalize(morph.parse(s.lower())[0].normal_form)
elif lang == 'en':
s = nlp(s.lower())[0].lemma_
return normalize(s)
def get_roots(s, lang):
norm = get_normal_form(s, lang)
if lang == 'en':
return [norm]
if len(norm) == 0:
return []
try:
url = 'http://morphemeonline.ru/' + quote(norm[0].upper() + '/' + norm)
req = urllib.request.urlopen(url)
except HTTPError:
return []
page_code = req.read().decode('utf-8')
index = 0
roots = []
while index < len(page_code):
pos = page_code.find('title="корень"', index)
if pos == -1:
break
pos += len('title="корень"') + 1
root = ''
while page_code[pos] != '<':
root += page_code[pos]
pos += 1
index = pos
roots.append(root)
return roots
def check_roots(root1, root2, lang):
if lang == 'ru':
TOO_SHORT = 3
min_len = min(len(root1), len(root2))
if min_len <= TOO_SHORT:
return root1 == root2
else:
lcp = 0
while lcp < min_len:
if root1[lcp] == root2[lcp]:
lcp += 1
else:
break
return lcp + 1 >= min_len
elif lang == 'en':
return root1 == root2
# TODO improve
def print_top(update, context, top):
group_id = update.effective_chat.id
message_text = 'Топ 10 по количеству очков:\n'
for place in top:
message_text += user_name(place[0]) + ' — ' + str(place[1]) + '\n'
context.bot.send_message(chat_id=group_id, text=message_text)
def send_start_game_message(update, context):
context.bot.send_message(chat_id=update.effective_chat.id,
text=user_name(update.effective_user, mention=True) + START_STRING,
reply_markup=ForceReply(selective=True),
parse_mode=ParseMode.MARKDOWN_V2)
def end_round(group_id):
game = games[group_id]
game.round_going = False
game.words_options = []
game.words = []
game.guessed = []
game.roots = []
if game.timer is not None:
game.timer.cancel()
def restart_round(update, context):
group_id = update.effective_chat.id
context.bot.send_message(chat_id=group_id, text='Ведущий слишком долго был неактивным, начинаем новый раунд')
end_round(group_id)
start_round(update, context, True)
def add_points(group_id, user, score):
game = games[group_id]
game.participants[user] += score
found = False
for i in range(len(game.top)):
if game.top[i][0].id == user.id:
game.top[i][1] += score
found = True
break
if not found:
game.top.append([user, game.participants[user]])
game.top.sort(key=itemgetter(1), reverse=True)
game.top = game.top[:10]
start_time = datetime.now()
def dont_spam():
cur_time = datetime.now()
if cur_time.timestamp() - start_time.timestamp() < 2:
return True
return False
class Game:
def __init__(self, lang, rounds):
self.lang = lang
self.rounds = rounds
self.words_options = []
self.words = []
self.guessed = []
self.roots = []
self.participants = dict() # user to wins
self.top = []
self.leader_candidates = set()
self.leader = None
self.round_going = False
self.starter_id = None
self.timer = None
def start_game(update, context):
if dont_spam():
return
group_id = update.effective_chat.id
if group_id in games:
context.bot.send_message(chat_id=group_id, text='Игра уже идет в этом чате!')
else:
db.update(add('games', 1), Query().games.exists())
send_start_game_message(update, context)
def join_game(update, context, secondary=False, callback_user=None):
if dont_spam():
return
group_id = update.effective_chat.id
if secondary:
user = callback_user
else:
user = update.effective_user
if group_id not in games:
context.bot.send_message(chat_id=group_id, text='В этом чате не идет игры')
return
game = games[group_id]
if user in game.participants:
context.bot.send_message(chat_id=group_id, text=user_name(user) + ', ты уже в игре')
return
game.participants[update.effective_user] = 0
game.leader_candidates.add(user)
db.update(add('joins', 1), Query().joins.exists())
context.bot.send_message(chat_id=group_id, text=user_name(user) + ' присоединился к игре!')
def start_round(update, context, secondary=False):
if dont_spam():
return
group_id = update.effective_chat.id
user = update.effective_user
if group_id not in games:
context.bot.send_message(chat_id=group_id, text='В этом чате не идет игры!')
return
game = games[group_id]
if game.round_going:
context.bot.send_message(chat_id=group_id, text='В этом чате уже идет раунд!')
return
if not secondary and user not in game.participants:
context.bot.send_message(chat_id=group_id, text=user_name(user) + ', сначала присоединись к игре!')
return
if not secondary:
db.update(add('rounds', 1), Query().rounds.exists())
if len(game.leader_candidates) == 0:
game.leader_candidates = set(game.participants.keys())
if len(game.leader_candidates) == 1:
leader = random.choice(tuple(game.leader_candidates))
else:
leader = random.choice(tuple(game.leader_candidates.difference(set([game.leader]))))
game.leader = leader
phrases_amount = 6
options = get_phrases(phrases_amount, game.lang)
keyboard_markup = InlineKeyboardMarkup([[], []])
for i in range(phrases_amount):
game.words_options.append(str(i + 1) + '. ' + options[i])
keyboard_markup.inline_keyboard[1].append(InlineKeyboardButton(str(i + 1), callback_data=str(i + 1)))
keyboard_markup.inline_keyboard[0].append(InlineKeyboardButton("Посмотреть слова",
callback_data="words"))
context.bot.send_message(chat_id=group_id,
text='Раунд начался, ведущим был выбран ' + user_name(leader, mention=True),
reply_markup=keyboard_markup, parse_mode=ParseMode.MARKDOWN_V2)
game.timer = threading.Timer(60.0, restart_round, args=[update, context])
game.timer.start()
def leave_game(update, context):
if dont_spam():
return
group_id = update.effective_chat.id
if group_id not in games:
return
game = games[group_id]
user = update.effective_user
res = game.participants.pop(user, None)
game.leader_candidates.discard(user)
if res is not None:
context.bot.send_message(chat_id=group_id, text=user_name(user) + ' покинул игру')
if len(game.participants) == 0:
context.bot.send_message(chat_id=group_id, text='Последний игрок покинул игру, завершаемся :(')
stop_game(update, context, secondary=True)
return
if user.id == game.starter_id:
new_starter = random.choice(tuple(game.participants.keys()))
game.starter_id = new_starter.id
context.bot.send_message(chat_id=group_id,
text='Администратор игры ее покинул, теперь это '
+ user_name(new_starter, mention=True), parse_mode=ParseMode.MARKDOWN_V2)
if user.id == game.leader.id:
end_round(group_id)
context.bot.send_message(chat_id=group_id, text=user_name(user) + ' был ведущим, начинаем новый раунд')
start_round(update, context, secondary=True)
def stop_game(update, context, secondary=False):
if dont_spam():
return
group_id = update.effective_chat.id
user_id = update.effective_user.id
if group_id in games:
game = games[group_id]
if secondary:
user_id = game.starter_id
allowed = False
if user_id == game.starter_id:
allowed = True
if user_id == 33739616:
allowed = True
else:
admins = context.bot.get_chat_administrators(chat_id=group_id)
for admin in admins:
if user_id == admin.user.id:
allowed = True
break
if allowed:
if game.timer is not None:
game.timer.cancel()
context.bot.send_message(chat_id=group_id, text='Игра окончена!')
games.pop(group_id)
else:
context.bot.send_message(chat_id=group_id, text='Игру может завершить только '
'администратор игры или чата')
else:
context.bot.send_message(chat_id=group_id, text='В этом чате не идет игра!')
def rules(update, context):
if dont_spam():
return
rules_msg = '*Правила игры:* 🐊 \n\n' \
'*1.* Все команды доступны при введении символа / \n' \
'*2.* Чтобы начать игру, напишите /start_game и следуйте инструкциям \n' \
'*3.* Чтобы присоединиться к игре, напишите /join_game \n' \
'*4.* Минимальное количество людей: 2 пользователя \n' \
'*5.* Начать раунд: /start_round \n' \
'*6.* *Правила игры*: После этого случайным образом выберется ведущий. Ему нужно будет посмотреть' \
' предложенные ему словосочетания, нажав на кнопку *"Посмотреть слова"*, а затем выбрать одно из них, ' \
'нажав на кнопку с соответствующей цифрой. После этого сменить выбор будет нельзя, а выбранное ' \
'словосочетание можно будет посмотреть, нажав на любую из кнопок. \n' \
'*7.* Если ведущий не выберет слово в течение *одной минуты*, случайным образом выберется другой ведущий.\n' \
'*8.* Сама игра происходит таким образом: игроки могут спрашивать у ведущего про выбранное ' \
'им словосочетание, а ведущий может отвечать на них, *не используя однокоренные с загаданными слова*. ' \
'Если он использует однокоренное, то раунд закончится, а ведущим станет другой игрок. \n' \
'*9.* *Альтернативный вариант:* ведущий может просто объяснять загаданные слова, а игроки пытаться угадать.\n' \
'*10.* Как только кто-то из игроков написал несколько из загаданных слов, ему начислится столько же очков, ' \
'а отгаданные слова откроются. Если кто-то из игроков произнесет уже угаданные слова, ' \
'то ему ничего за них не начислится. Как только будут отгаданы все слова, ведущему за старания ' \
'начислится одно очко, и автоматически начнется следующий раунд, где будет выбран уже другой ведущий. \n' \
'*11.* Если ведущий больше не может объяснять, и хочет сдаться, он может написать /give_up\n' \
'*12.* Если ведущий ничего не будет писать в течение 5 минут, он посчитается покинувшим игру и ' \
'будет выбран другой ведущий \n' \
'*13.* Для того, чтобы покинуть игру, наберите /leave_game ' \
'(если кому-то надоело играть и чтобы его не выбирало ведущим)\n' \
'*14.* Если вдруг понадобилось досрочно закончить игру, администраторы чата и игрок, стартовавший игру, ' \
'("администратор игры") могут сделать это с помощью /stop_game'
context.bot.send_message(chat_id=update.effective_chat.id, text=escape_markdown(rules_msg, escape_star=False),
parse_mode=ParseMode.MARKDOWN_V2)
def check_message(update, context):
if dont_spam():
return
group_id = update.effective_chat.id
if group_id not in games:
replied = update.effective_message.reply_to_message
if replied is not None:
if replied.from_user.id == BOT_ID and replied.text.find(START_STRING) != 0:
text = update.effective_message.text.split()
if len(text) != 2:
send_start_game_message(update, context)
else:
lang = text[0].lower()
if lang != "ru" and lang != "en":
send_start_game_message(update, context)
return
try:
wins = int(text[1])
except ValueError:
send_start_game_message(update, context)
return
if wins < 1 or wins > 100000:
send_start_game_message(update, context)
return
context.bot.send_message(chat_id=group_id, text='Игра на языке ' + lang + ' началась!',
reply_markup=InlineKeyboardMarkup
([[InlineKeyboardButton('Присоединиться', callback_data='join')],
[InlineKeyboardButton('Начать раунд', callback_data='start_round')]]))
games[group_id] = Game(lang, wins)
game = games[group_id]
game.starter_id = update.effective_user.id
game.participants[update.effective_user] = 0
return
if group_id == COMMON_GROUP_ID and update.effective_message.text == WANNA_JOIN_STRING:
join_game(update, context, secondary=True, callback_user=update.effective_user)
game = games[group_id]
if not game.round_going:
return
user_id = update.effective_user.id
text = update.message.text.split()
for i in range(len(text)):
text[i] = normalize(text[i])
text[i] = text[i].lower()
if user_id == game.leader.id:
game.timer.cancel()
game.timer = threading.Timer(300, restart_round, args=[update, context])
game.timer.start()
for word in text:
banned = False
for root in get_roots(word, game.lang):
for game_root in game.roots:
if check_roots(root, game_root, game.lang):
banned = True
break
if banned:
context.bot.send_message(chat_id=group_id, text='Ведущий использовал однокоренное слово :(\n' +
'Было загадано: ' + ' '.join(games[group_id].words)
+ '\nНачинаем новый раунд')
end_round(group_id)
start_round(update, context, secondary=True)
elif update.effective_user in game.participants:
game.leader_candidates.add(update.effective_user)
score = 0
for word in text:
norm_word = get_normal_form(word, game.lang)
for i in range(len(game.words)):
norm_game_word = get_normal_form(game.words[i], game.lang)
if norm_word == norm_game_word:
if not game.guessed[i]:
score += 1
game.guessed[i] = True
for root in get_roots(game.words[i], game.lang):
try:
game.roots.remove(root)
except ValueError:
pass
if score > 0:
msg = user_name(update.effective_user) + ' угадал ' + str(score)
if score == 1:
msg += ' слово'
elif 2 <= score <= 4:
msg += ' слова'
else:
msg += ' слов'
add_points(group_id, update.effective_user, score)
context.bot.send_message(chat_id=group_id, text=msg)
msg = 'На данный момент отгадано '
for i in range(len(game.words)):
if game.guessed[i]:
msg += game.words[i]
else:
msg += '????'
msg += ' '
context.bot.send_message(chat_id=group_id, text=msg)
if sum(game.guessed) == len(game.words):
game.rounds -= 1
end_round(group_id)
context.bot.send_message(chat_id=group_id, text='Все слова отгаданы! Осталось раундов: ' + str(game.rounds))
add_points(group_id, game.leader, 1)
print_top(update, context, game.top)
if game.rounds == 0:
context.bot.send_message(chat_id=group_id, text='Игра окончена, ' +
user_name(game.top[0][0]) + ' победил!')
stop_game(update, context, secondary=True)
else:
start_round(update, context, secondary=True)
return
def check_callback(update, context):
if dont_spam():
return
group_id = update.effective_chat.id
user_id = update.effective_user.id
callback = update.callback_query
if callback.data == 'start_game':
start_game(update, context)
return
if group_id not in games:
context.bot.answer_callback_query(callback_query_id=callback.id, text='Игра не идет!', show_alert=True)
return
game = games[group_id]
if callback.data is None:
return
if callback.data == 'join':
join_game(update, context, secondary=True, callback_user=update.effective_user)
return
if callback.data == 'start_round':
start_round(update, context)
return
if user_id != game.leader.id:
context.bot.answer_callback_query(callback_query_id=callback.id, text='Ты не ведущий!', show_alert=True)
return
if callback.data == "words":
if len(game.words) == 0:
context.bot.answer_callback_query(callback_query_id=callback.id,
text='\n'.join(game.words_options), show_alert=True)
else:
context.bot.answer_callback_query(callback_query_id=callback.id,
text='Ты должен объяснить \"' + ' '.join(game.words) + '\"',
show_alert=True)
elif len(game.words) > 0:
context.bot.answer_callback_query(callback_query_id=callback.id,
text='Ты должен объяснить \"' + ' '.join(game.words) + '\"',
show_alert=True)
else:
choice = int(callback.data) - 1
game.words = game.words_options[choice].split()[1:]
context.bot.answer_callback_query(callback_query_id=callback.id,
text='Теперь ты должен объяснить \"' + ' '.join(game.words) + '\"',
show_alert=True)
game.timer.cancel()
game.timer = threading.Timer(300, restart_round, args=[update, context])
game.timer.start()
game.round_going = True
game.leader_candidates.clear()
game.leader_candidates.add(game.leader)
for word in game.words:
for root in get_roots(word, game.lang):
game.roots.append(root)
for word in game.words:
game.guessed.append(False)
def give_up(update, context):
if dont_spam():
return
group_id = update.effective_chat.id
if group_id not in games:
return
game = games[group_id]
user_id = update.effective_user.id
if user_id != game.leader.id:
context.bot.send_message(chat_id=group_id, text=user_name(user_id) + ', ты не ведущий...')
end_round(group_id)
start_round(update, context, secondary=True)
def start(update, context):
if dont_spam():
return
if update.effective_chat.id < 0:
context.bot.send_message(chat_id=update.effective_chat.id,
text='Привет! Чтобы узнать, как играть со мной, напиши /rules',
reply_markup=InlineKeyboardMarkup(
[[InlineKeyboardButton('Начать игру', callback_data='start_game')]]))
db.update(add('groups', 1), Query().groups.exists())
else:
context.bot.send_message(chat_id=update.effective_chat.id,
text='Добавить бота в чат: https://t.me/playnamegame_bot?startgroup=true')
def get_stats(update, context):
if dont_spam():
return
msg = 'Добавлений в группы: '
msg += str(db.search(Query().groups.exists())[0]['groups']) + '\n'
msg += 'Начато игр: '
msg += str(db.search(Query().games.exists())[0]['games']) + '\n'
msg += 'Начато раундов (вручную): '
msg += str(db.search(Query().rounds.exists())[0]['rounds']) + '\n'
msg += 'Количество присоединений к игре: '
msg += str(db.search(Query().joins.exists())[0]['joins'])
context.bot.send_message(chat_id=update.effective_chat.id, text=msg)
start_handler = CommandHandler('start', start)
dispatcher.add_handler(start_handler)
start_game_handler = CommandHandler('start_game', start_game)
dispatcher.add_handler(start_game_handler)
join_game_handler = CommandHandler('join_game', join_game)
dispatcher.add_handler(join_game_handler)
start_round_handler = CommandHandler('start_round', start_round)
dispatcher.add_handler(start_round_handler)
leave_game_handler = CommandHandler('leave_game', leave_game)
dispatcher.add_handler(leave_game_handler)
stop_game_handler = CommandHandler('stop_game', stop_game)
dispatcher.add_handler(stop_game_handler)
give_up_handler = CommandHandler('give_up', give_up)
dispatcher.add_handler(give_up_handler)
rules_handler = CommandHandler('rules', rules)
dispatcher.add_handler(rules_handler)
get_stats_handler = CommandHandler('get___stats', get_stats)
dispatcher.add_handler(get_stats_handler)
msg_handler = MessageHandler(Filters.text, check_message)
dispatcher.add_handler(msg_handler)
callback_handler = CallbackQueryHandler(check_callback)
dispatcher.add_handler(callback_handler)
updater.start_polling()
start_time = datetime.now()
updater.bot.send_message(chat_id=COMMON_GROUP_ID, text='Игра на языке ' + 'ru' + ' началась!',
reply_markup=InlineKeyboardMarkup
([[InlineKeyboardButton('Присоединиться', callback_data='join')],
[InlineKeyboardButton('Начать раунд', callback_data='start_round')]]))
updater.bot.send_message(chat_id=COMMON_GROUP_ID, text='Чтобы прочитать правила, напиши /rules',
reply_markup=ReplyKeyboardMarkup([[WANNA_JOIN_STRING]], resize_keyboard=True,
one_time_keyboard=True))
games[COMMON_GROUP_ID] = Game('ru', 99999)
game = games[COMMON_GROUP_ID]
game.starter_id = None
updater.idle()
# TODO end game if the only participant is inactive | alkurmtl/playnamegame | run.py | run.py | py | 29,693 | python | ru | code | 0 | github-code | 36 | [
{
"api_name": "logging.basicConfig",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "logging.INFO",
"line_number": 33,
"usage_type": "attribute"
},
{
"api_name": "telegram.ext.Updater",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "pymorphy2.... |
7292695784 | #!/usr/bin/python3
"""
# This file is part of the Pop Icon Theme and is free software; you can
# redistribute it and/or modify it under the terms of the GNU Lesser General
# Public License as published by the Free Software Foundation; version 3.
#
# This file is part of the Pop Icon Theme and is distributed in the hope that
# it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty
# of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, see <https://www.gnu.org/licenses/lgpl-3.0.txt>
"""
import argparse
from ast import parse
import os
import shutil
import subprocess
from pathlib import Path
from importlib_metadata import sys
THEMENAME:str = 'Pop'
BINDIR = Path('/usr/bin')
BASEDIR = Path(os.getcwd())
SRCDIR = BASEDIR / 'src'
THEMEDIR = BASEDIR / THEMENAME
SIZES = (
'8x8',
'16x16',
'24x24',
'32x32',
'48x48',
'64x64',
'128x128',
'256x256',
'512x512'
)
## Rendering Functions
def render_fullcolor() -> None:
print(' -- Rendering bitmap icons...')
os.chdir(SRCDIR / 'fullcolor')
try:
subprocess.run('./render-fullcolor.py', check=True)
except subprocess.CalledProcessError:
print('Failed to render fullcolor icons. See output above.')
sys.exit(1)
def render_symbolics() -> None:
print(' -- Rendering symbolic icons...')
os.chdir(SRCDIR / 'scalable')
try:
subprocess.run('./extract-symbolic-icons.rb', check=True)
except subprocess.CalledProcessError:
print('Failed to render symbolic icons. See output above.')
sys.exit(1)
def cleanup_unoptomized_renders() -> None:
print(' -- Cleaning up any unoptimized output')
def generate_symlinks() -> None:
print(' -- Generating symbolic links...')
os.chdir(SRCDIR / 'symlinks')
try:
subprocess.run('./generate-symlinks.sh', check=True)
except subprocess.CalledProcessError:
print('Failed to generate fullcolor symlinks. See output above.')
sys.exit(1)
os.chdir(SRCDIR / 'scalable')
try:
subprocess.run('./generate-symbolic-symlinks.sh', check=True)
except subprocess.CalledProcessError:
print('Failed to generate sylbolic symlinks. See output above.')
sys.exit(1)
def render_cursors() -> None:
print(' -- Rendering cursors...')
cursors_dir = SRCDIR / 'cursors'
template_dir = cursors_dir / 'templates'
output_dir = cursors_dir / 'bitmaps'
os.chdir(cursors_dir)
if output_dir.exists():
print(' -- Output dir exists, use --clean to re-render')
return
shutil.copytree(template_dir, output_dir)
print(' -- Rendering cursor fullcolor')
subprocess.run(['./render-cursors.py', '-n 0', 'source-cursors.svg'])
print(' -- Generatig cursor files')
subprocess.run('./x11-make.sh')
subprocess.run('./w32-make.sh')
def install_metadata() -> None:
print(' -- Installing theme Metadata...')
for file in ('index.theme', 'cursor.theme'):
file_path = BASEDIR / f'{file}.in'
shutil.copyfile(file_path, f'{THEMEDIR}/{file}')
## Artifact Cleanup/Removal Functions
def clean_fullcolor() -> None:
print(' -- Removing Fullcolor Icons')
for size in SIZES:
size_dir = THEMEDIR / size
if size_dir.exists():
print(f' -- Removing {size_dir}')
shutil.rmtree(size_dir)
else:
print(f' ** Skipping {size_dir}')
def clean_symbolics() -> None:
scalable_dir = THEMEDIR / 'scalable'
if scalable_dir.exists():
print(' -- Removing symbolic icons')
shutil.rmtree(scalable_dir)
def clean_cursors() -> None:
cursor_dir = THEMEDIR / 'cursors'
cursors_dir = SRCDIR / 'cursors'
template_dir = cursors_dir / 'templates'
output_dir = cursors_dir / 'bitmaps'
if cursor_dir.exists():
print(' -- Removing cursors')
shutil.rmtree(cursor_dir)
if output_dir.exists():
print(' -- Cleaning up old render')
shutil.rmtree(output_dir)
def clean_metadata() -> None:
print(' -- Removing Metadata')
for file in ('index.theme', 'cursor.theme'):
file_path = THEMEDIR / file
try:
file_path.unlink()
print(f' -- Removed {file}')
except FileNotFoundError:
print(f' ** Skipping {file}')
def clean_dirs(**kwargs) -> None:
print('\n-- Cleaning up previous renders')
os.chdir(THEMEDIR)
if kwargs['everything']:
print(' -- Performing Full Cleanup')
clean_fullcolor()
clean_symbolics()
clean_cursors()
clean_metadata()
return
# Cleanup Fullcolors
if kwargs['fullcolor']:
clean_fullcolor()
else:
print(' ** Skipping removing fullcolor')
# Cleanup Symbolics
if kwargs['symbolics']:
clean_symbolics()
else:
print(' ** Skipping removing symbolic icons')
# Cleanup Cursors
if kwargs['cursors']:
clean_cursors()
else:
print(' ** Skipping removing Cursors')
# Cleanup Metadata
if kwargs['metadata']:
clean_metadata()
else:
print(' ** Skipping removing Metadata')
print('\n\n')
def do_render(args) -> None:
if args.clean:
clean_dirs(
everything=args.all,
fullcolor=args.fullcolor,
symbolics=args.symbolics,
cursors=args.cursors,
metadata=args.metadata
)
print('\n--Rendering icons')
if args.all:
render_fullcolor()
render_symbolics()
render_cursors()
generate_symlinks()
install_metadata()
return
if args.fullcolor:
render_fullcolor()
if args.symbolics:
render_symbolics()
if args.cursors:
render_cursors()
if args.links:
generate_symlinks()
if args.metadata:
install_metadata()
parser = argparse.ArgumentParser(description='Render icons for the Pop Icon Theme')
parser.add_argument(
'-c',
'--clean',
action='store_true',
help='Remove existing files before rendering (takes a long time to render)'
)
parser.add_argument(
'-a',
'--all',
action='store_true',
help='Render all items (Default)'
)
parser.add_argument(
'-f',
'--fullcolor',
action='store_true',
help='Render fullcolor icons'
)
parser.add_argument(
'-s',
'--symbolics',
action='store_true',
help='Render Symbolic Icons'
)
parser.add_argument(
'-x',
'--cursors',
action='store_true',
help='Render Cursors'
)
parser.add_argument(
'-l',
'--links',
action='store_true',
help='Generate Theme Symlinks'
)
parser.add_argument(
'-m',
'--metadata',
action='store_true',
help='Generate Metadata'
)
args = parser.parse_args()
if not True in (args.fullcolor,
args.symbolics,
args.cursors,
args.links,
args.metadata):
args.all = True
else:
args.all = False
do_render(args)
| pop-os/icon-theme | master-render.py | master-render.py | py | 7,234 | python | en | code | 189 | github-code | 36 | [
{
"api_name": "pathlib.Path",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "os.getcwd",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "os.chdir",
"line_number": 47,... |
74430643303 | import datetime
import json
from flask_restful import Resource
from flask import request
from init import app, db
from Models.player import Player
from Models.playerRequest import PlayerRequest
from decorators import json_required
import random
class RPlayerPost(Resource):
def post(self, **kwargs):
""" Создание нового игрока """
unicue = random.randrange(9999999999)
new_player_model = Player(unique=unicue)
db.session.add(new_player_model)
db.session.commit()
player_model = Player.query.filter_by(unique=unicue).first()
if player_model is not None:
return {"message": "new player created", "player_id": player_model.id, "unique": player_model.unique}
return {"message": "new player error created"}
class RPlayer(Resource):
def get(self, player_id, **kwargs):
""" Получение положения персонажа"""
player_model = Player.query.filter_by(id=player_id).first()
if player_model is None:
return {"message": "player not found"}
enemies_model = Player.query.filter(Player.id != player_id).all()
enemies = dict()
if enemies_model is not None:
for enemy in enemies_model:
enemy_data = {
"id": enemy.id,
"name": enemy.name,
"icon": enemy.icon,
"type": enemy.type,
"world_position": [enemy.y_world, enemy.x_world],
}
enemies[enemy.id] = enemy_data
#print(F"\n\nenemies - {enemies}\n\n")
answer = {
"icon": player_model.icon,
"name": player_model.name,
"type": player_model.type,
"dynamic_position": [player_model.y_dynamic, player_model.x_dynamic],
"world_position": [player_model.y_world, player_model.x_world],
"chunks_use_map": player_model.chunk,
"level": player_model.level,
"vertices": player_model.vertices,
"direction": player_model.direction,
"enemies": enemies,
"assemblage_point": [player_model.assemblage_point_y, player_model.assemblage_point_x]
}
return answer
def post(self, player_id, **kwargs):
""" Отправка запроса """
player_model = Player.query.filter_by(id=player_id).first()
if player_model is None:
return {"message": "player not found"}
data = request.get_json(force=True)
player_request = PlayerRequest.query.filter_by(player_id=player_id).first()
if player_request is None:
player_request = PlayerRequest(player_id=player_id, time=datetime.datetime.today(), type=data['type'],
description=data['description'])
db.session.add(player_request)
else:
player_request.date = datetime.datetime.today()
player_request.type = data['type']
player_request.description = data['description']
db.session.commit()
return {"message": "true"}
| Apolliner/Field-Mini-Game | testOnline/ApiServer/API/player.py | player.py | py | 3,174 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "flask_restful.Resource",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "random.randrange",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "Models.player.Player",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "init.db.... |
7046181493 | import logging
import json
from invokust.aws_lambda import LambdaLoadTest, results_aggregator
logging.basicConfig(level=logging.INFO)
###
# SETTINGS
###
# How long should the test run for in minutes?
# Note that Lambda invokations that are started cannot be stopped.
# Test times will actually be run in intervals of 3 minutes.
test_time = 6
# How many concurrent users to test for?
# threads x 25 = num of concurrent users
threads = 20
# What test file are we using?
test_file_name = 'locust_test_mail_list.py'
def print_stat(type, name, req_count, median, avg, min, max, rps):
return "%-7s %-50s %10s %9s %9s %9s %9s %10s" % (
type,
name,
req_count,
median,
avg,
min,
max,
rps,
)
def print_stats_exit(load_test_state):
summ_stats = load_test_state.get_summary_stats()
agg_results = results_aggregator(load_test_state.get_locust_results())
agg_results["request_fail_ratio"] = summ_stats["request_fail_ratio"]
agg_results["invocation_error_ratio"] = summ_stats["invocation_error_ratio"]
agg_results["locust_settings"] = load_test_state.lambda_payload
agg_results["lambda_function_name"] = load_test_state.lambda_function_name
agg_results["threads"] = load_test_state.threads
agg_results["ramp_time"] = load_test_state.ramp_time
agg_results["time_limit"] = load_test_state.time_limit
logging.info("Aggregated results: {0}".format(json.dumps(agg_results)))
logging.info(
"\n============================================================"
f"\nRamp up time: {agg_results['ramp_time']}s"
f"\nStarted ramp down after {agg_results['time_limit']}s (time_limit)"
f"\nThread count: {agg_results['threads']}"
f"\nLambda invocation count: {agg_results['lambda_invocations']}"
f"\nLambda invocation error ratio: {agg_results['invocation_error_ratio']}"
f"\nCumulative lambda execution time: {agg_results['total_lambda_execution_time']}ms"
f"\nTotal requests sent: {agg_results['num_requests']}"
f"\nTotal requests failed: {agg_results['num_requests_fail']}"
f"\nTotal request failure ratio: {agg_results['request_fail_ratio']}\n"
)
logging.info(
"==========================================================================================================================="
)
logging.info(
print_stat(
"TYPE", "NAME", "#REQUESTS", "MEDIAN", "AVERAGE", "MIN", "MAX", "#REQS/SEC"
)
)
logging.info(
"==========================================================================================================================="
)
reqs = agg_results["requests"]
for k in reqs.keys():
k_arr = k.split("_")
type = k_arr[0]
del k_arr[0]
name = "_".join(k_arr)
logging.info(
print_stat(
type,
name,
reqs[k]["num_requests"],
round(reqs[k]["median_response_time"], 2),
round(reqs[k]["avg_response_time"], 2),
round(reqs[k]["min_response_time"], 2),
round(reqs[k]["max_response_time"], 2),
round(reqs[k]["total_rps"], 2) * agg_results["threads"],
)
)
total_rps = sum(
[
reqs[path]["total_rps"]
for path in reqs
]
) * agg_results["threads"]
logging.info(
print_stat(
"Total",
f"Concurrent Users: {agg_results['threads']*25}",
agg_results["num_requests"],
"",
"",
"",
"",
round(total_rps, 2),
)
)
logging.info("Exiting...")
if __name__ == "__main__":
lambda_runtime = f"{test_time}m" if test_time < 3 else "3m"
lambda_payload = {
'locustfile': f"./tests/{test_file_name}",
'host': 'https://forms-staging.cdssandbox.xyz',
'num_users': 25,
'spawn_rate': 5,
'run_time': lambda_runtime
}
load_test = LambdaLoadTest(
lambda_function_name='LoadTesting',
threads=threads,
ramp_time=0,
time_limit=test_time*60,
lambda_payload=lambda_payload
)
load_test.run()
print_stats_exit(load_test)
output_file = open("threads_output.json", "w")
thread_output = {"threads": load_test.get_locust_results() }
json.dump(thread_output, output_file)
output_file.close() | cds-snc/gc_forms_load_testing | locust_swarm.py | locust_swarm.py | py | 4,479 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "logging.basicConfig",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "logging.INFO",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "invokust.aws_lambda.results_aggregator",
"line_number": 36,
"usage_type": "call"
},
{
"api_na... |
15266305014 | import json
from django.db.models import Q
from django.http import HttpResponse
from django.views import View
from mymodels.models import Posts, CustomUser
from mypackage.MixinClasses import GetUserMixin, SlicerMixin, ExcludeDelPostsMixin
import datetime
from django.utils import timezone
class PostsList(View, SlicerMixin, GetUserMixin, ExcludeDelPostsMixin):
'''Базовый клас для отображения списка постов в тех или иных местах сайта, включая главную. Не используется ListView т.к. там запрещен POST'''
posts_in_page = 5
model = Posts
template_name = 'index/post_list.html'
context_object_name = 'posts'
def get_tags_filter(self):
'''Смотрит куки и возвращет спиок запрещенных постов'''
cookies = self.request.COOKIES
out_list = ['nsfw', 'политика', 'жесть']
if cookies.get('is_18_age') and cookies.get('nsfw'):
out_list.remove('nsfw')
if cookies.get('politic'):
out_list.remove('политика')
if cookies.get('gore'):
out_list.remove('жесть')
return out_list
def get_tags_user(self, value=True):
'''Возвращает список подписаных или забаненых тегов пользователя в зависимоти от value'''
user = self.get_user_session()
if user==False:
return []
list_tags = user.subtags_set.filter(status = value).values('tag_id__tag')
list_tags = [key['tag_id__tag'] for key in list_tags]
return list_tags
def get_queryset(self):
'''Возварщает соответсвующий запрос'''
start, end = self.get_slice(self.request, self.posts_in_page)
list_views_posts = self.get_list_views_posts()
block_tags = self.get_tags_filter() + self.get_tags_user(value=False)
slice_time = timezone.now() - datetime.timedelta(days=90) #расчитываем срез в -90 день от сегодня, напомниаю что у settings стоит USE_TZ = True
queryset = Posts.objects.select_related('author').prefetch_related('evaluationposts_set', 'tags_set', 'comments_set')
queryset = queryset.filter(date_create__gt=slice_time)
queryset = queryset.exclude(Q(id__in=list_views_posts) | Q(tags__tag__in=block_tags))
queryset = self.exclude_del_posts(queryset)
queryset = queryset.order_by('-pk')
return queryset[start:end]
def get_list_views_posts(self):
'''Возвращает массив просмотренных постов из POST если надо, или возвращает пустой массив'''
if (self.request.COOKIES.get('hide_view')):
return self.request.POST.getlist('views_posts[]')
else:
return []
def get_posts(self):
user = self.get_user_session()
posts_list = self.get_queryset()
out_data = []
for one_post in posts_list:
post_data = {}
if not user:
post_data['eval'] = 0
# если get(), то будет кидать исключение при отсутствии объекта
post_eval = one_post.evaluationposts_set.filter(
user_id=user).first()
if (post_eval == None):
post_data['eval'] = 0
elif post_eval.evaluation == True:
post_data['eval'] = 1
elif post_eval.evaluation == False:
post_data['eval'] = -1
tags_list = []
for tag in one_post.tags_set.all():
tags_list.append(tag.tag)
post_data['tags'] = tags_list
# В базу записывается словарь в формате JSON. Но python по умочанию, видит ее как строку.
# для того, что бы python видел ее как объект нужная другая БД с поддержкой JSONField
post_data['content'] = json.loads(one_post.content)
# Функция превращает ее в словарь. Просто послать сроку нелья, т.к. js неадеквато ее пробразует
post_data['name'] = one_post.name
post_data['author'] = one_post.author.username
post_data['avatar'] = one_post.author.profile.avatar.url
post_data['rating'] = one_post.post_rating
post_data['views'] = one_post.views
post_data['comments'] = len(one_post.comments_set.all())
post_data['post_id'] = one_post.pk
if hasattr(one_post, 'deletedposts'): #проверка, удален ли пост, на 16.03.21 удаленные посты get_queryset() возврщает только на странице пользователя и то его собственные
post_data['is_deleted'] = True
if user:
if one_post.author.username == user.username or user.group > 100:
post_data['may_change'] = True
out_data.append(post_data)
return json.dumps(out_data)
def get(self, request, *args, **kwargs):
return HttpResponse('Вообще то я жду POST')
def post(self, request, *args, **kwargs):
data = self.get_posts()
return HttpResponse(data)
| untiwe/citrom_test | mypackage/posts_manager/posts_list.py | posts_list.py | py | 5,721 | python | ru | code | 0 | github-code | 36 | [
{
"api_name": "django.views.View",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "mypackage.MixinClasses.SlicerMixin",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "mypackage.MixinClasses.GetUserMixin",
"line_number": 13,
"usage_type": "name"
},
... |
35425129977 |
# coding: utf-8
# # 보스톤 집 값 예측하기
#
# * 보스턴 주택 데이터는 여러 개의 측정지표들을 포함한, 보스톤 인근의 주택가 중앙값
#
# * Variable in order:
# - CRIM : 마을별 1인당 범죄율
# - ZN : 25,000 평방미터를 초과하는 거주지역의 비율
# - INDUS : 비소매 상업지역이 점유하고 있는 토지의 비율
# - CHAS : 찰스 강에 대한 더미변수(강의 경계에 위치한 경우는 1, 아니면 0
# - NOX : 10ppm 당 농축 일산화질소
# - RM : 주책 1가구당 평균 방의 개수
# - AGE : 1940년 이전에 건축된 소유 주택의 비율
# - DIS : 5개의 보스톤 직업센터까지의 접근성 지수
# - RAD : 방사형 돌까지의 접근성 지수
# - TAX : 10,000달러 당 재산세율
# - PTRATIO : 시별 학생/교사 비율
# - B : 1000(Bk - 0.63) ^2, 여기서 Bk는 시별 흑인의 비율
# - LSTAT : 모집단의 하위계층의 비율(%)
# - MEDV : 본인 소유의 주택가격(중앙값) (단위:$1,000)
# In[24]:
from keras.models import Sequential
from keras.layers import Dense
from sklearn.model_selection import train_test_split
import numpy as np
import pandas as pd
import tensorflow as tf
# In[25]:
## 랜덤 시드값 설정
seed = 0
np.random.seed(seed)
tf.set_random_seed(seed)
# In[26]:
df =pd.read_csv('./data/housing.csv', delim_whitespace = True, header =None)
dataset = df.values
X = dataset[:, 0:13]
Y = dataset[:, 13] ## 집값이 지 ㅇ낳을까?
# In[33]:
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size = 0.3, random_state=seed)
# In[30]:
## 어떤 알고리즘을 적용할 것인지 선택
model = Sequential()
model.add(Dense(30, input_dim = 13, activation='relu'))
model.add(Dense(6, activation='relu'))
model.add(Dense(1))
# In[31]:
model.compile(loss = 'mean_squared_error', optimizer = 'adam')
model.fit(X_train, Y_train, epochs = 200, batch_size =10)
# In[34]:
# 예측값과 실제 값의 비교
Y_prediction = model.predict(X_test).flatten()
for i in range(10):
label = Y_test[i]
prediction = Y_prediction[i]
print('실제가격 : {:.3f}, 예상가격 : {:.3f}'.format(label, prediction))
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
| ALVHA/DeepLearning | 20190722/Boston+House.py | Boston+House.py | py | 2,276 | python | ko | code | 0 | github-code | 36 | [
{
"api_name": "numpy.random.seed",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 39,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.set_random_seed",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "pandas... |
39095320319 |
# coding: utf-8
# In[12]:
import nltk, re, string
from sklearn.preprocessing import normalize
from nltk.corpus import stopwords
# numpy is the package for matrix cacluation
import numpy as np
# for lemma
from nltk.stem import WordNetLemmatizer
from nltk.corpus import wordnet
wordnet_lemmatizer = WordNetLemmatizer()
# import cross validation method
from sklearn.model_selection import cross_validate
from sklearn.metrics import precision_recall_fscore_support
from sklearn.naive_bayes import MultinomialNB
# import pipeline class
from sklearn.pipeline import Pipeline
# import GridSearch
from sklearn.model_selection import GridSearchCV
text = []
target = []
import csv
with open('amazon_review_300.csv', 'rb') as f:
reader = csv.reader(f, delimiter=',')
for row in reader:
text.append(row[2])
target.append(row[0])
#print(text[0])
#print(target[0])
from sklearn.feature_extraction.text import TfidfVectorizer
# initialize the TfidfVectorizer
tfidf_vect = TfidfVectorizer()
# with stop words removed
tfidf_vect = TfidfVectorizer(stop_words="english")
# generate tfidf matrix
dtm= tfidf_vect.fit_transform(text)
#dtm here contains the tfidf matrix using vectoriser
print("type of dtm:", type(dtm))
print("size of tfidf matrix:", dtm.shape)
############# Multinomial NB Starts here
metrics = ['precision_macro', 'recall_macro', "f1_macro"]
#create the classifier
#clf = MultinomialNB()
clf = MultinomialNB(alpha=0.5)
cv = cross_validate(clf, dtm, target, scoring=metrics, cv=7)
print("**************MultinomialNB using vector with CV = 7*******************")
print("Test data set average precision:")
print(cv['test_precision_macro'])
print(np.average(cv['test_precision_macro']))
print("\nTest data set average recall:")
print(cv['test_recall_macro'])
print(np.average(cv['test_recall_macro']))
print("\nTest data set average fscore:")
print(cv['test_f1_macro'])
def get_doc_tokens(doc):
lemmatized = True;
stop_words = stopwords.words('english')
if(lemmatized == True):
tokens=[token.strip() for token in nltk.word_tokenize(doc.lower()) if token.strip() not in stop_words and token.strip() not in string.punctuation]
#tokens = lemma(doc)
tagged_tokens= nltk.pos_tag(tokens)
#print("********************TAgged Tokens***********************")
#print(tagged_tokens)
le_words=[wordnet_lemmatizer.lemmatize(word, get_wordnet_pos(tag)) # tagged_tokens is a list of tuples (word, tag)
for (word, tag) in tagged_tokens \
# remove stop words
if word not in stop_words and \
# remove punctuations
word not in string.punctuation]
# get lemmatized unique tokens as vocabulary
le_vocabulary=set(le_words)
tokens = list(le_vocabulary)
else:
#stop_words = stopwords.words('english')
tokens=[token.strip() for token in nltk.word_tokenize(doc.lower()) if token.strip() not in stop_words and token.strip() not in string.punctuation]
# you can add bigrams, collocations, or lemmatization here
#print("******************** Tokens***********************")
return tokens
def get_wordnet_pos(pos_tag): # 'JJ','NN'
# if pos tag starts with 'J'
if pos_tag.startswith('J'):
# return wordnet tag "ADJ"
return wordnet.ADJ
# if pos tag starts with 'V'
elif pos_tag.startswith('V'):
# return wordnet tag "VERB"
return wordnet.VERB
# if pos tag starts with 'N'
elif pos_tag.startswith('N'):
# return wordnet tag "NOUN"
return wordnet.NOUN
elif pos_tag.startswith('R'):
return wordnet.ADV
else:
# be default, return wordnet tag "NOUN"
return wordnet.NOUN
def tfidf(text):
# step 2. process all documents to get list of token list
docs_tokens=[get_doc_tokens(doc) for doc in text]
#print(docs_tokens)
voc=list(set([token for tokens in docs_tokens for token in tokens]))
dtm2=np.zeros((len(text), len(voc)))
#print(voc)
for row_index,tokens in enumerate(docs_tokens):
for token in tokens:
col_index=voc.index(token)
dtm2[row_index, col_index]+=1
#print(row_index , col_index , dtm[row_index, col_index])
print("*********************Length of the Matrix*****************************")
print(dtm2.shape)
# step 4. get normalized term frequency (tf) matrix
doc_len=dtm2.sum(axis=1, keepdims=True)
tf=np.divide(dtm2, doc_len)
# step 5. get idf
doc_freq=np.copy(dtm2)
doc_freq[np.where(doc_freq>0)]=1
smoothed_idf=np.log(np.divide(len(text)+1, np.sum(doc_freq, axis=0)+1))+1
# step 6. get tf-idf
smoothed_tf_idf=normalize(tf*smoothed_idf)
return smoothed_tf_idf
smoothed_tf_idf = tfidf(text)
metrics = ['precision_macro', 'recall_macro', "f1_macro"]
#clf = MultinomialNB()
clf = MultinomialNB(alpha=0.5)
cv = cross_validate(clf, smoothed_tf_idf, target, scoring=metrics, cv=5)
print("**************MultinomialNB using 3rd Assignment*******************")
print("Test data set average precision:")
print(cv['test_precision_macro'])
print(np.average(cv['test_precision_macro']))
print("\nTest data set average recall:")
print(cv['test_recall_macro'])
print(np.average(cv['test_recall_macro']))
print("\nTest data set average fscore:")
print(cv['test_f1_macro'])
print(np.average(cv['test_f1_macro']))
text_clf = Pipeline([('tfidf', TfidfVectorizer()),
('clf', MultinomialNB())
])
# In[13]:
print("*************text_clf*******************")
parameters = {'tfidf__min_df':[1,2,3],
'tfidf__stop_words':[None,"english"],
'clf__alpha': [0.5,1.0,1.5,2.0],
}
# the metric used to select the best parameters
metric = "f1_macro"
# GridSearch also uses cross validation
gs_clf = GridSearchCV(text_clf, param_grid=parameters, scoring=metric, cv=5)
# due to data volume and large parameter combinations
# it may take long time to search for optimal parameter combination
# you can use a subset of data to test
gs_clf = gs_clf.fit(text, target)
for param_name in gs_clf.best_params_:
print(param_name,": ",gs_clf.best_params_[param_name])
print("best f1 score:", gs_clf.best_score_)
# In[14]:
#This is the last part.Here we will be plotting the performance
import matplotlib.pyplot as plt
text_large=[]
target_large=[]
with open('amazon_review_large.csv', 'rb') as f:
reader = csv.reader(f, delimiter=',')
for row in reader:
text_large.append(row[1])
target_large.append(row[0])
#print(text[0])
#print(target[0])
# In[15]:
from sklearn.feature_extraction.text import TfidfVectorizer
recallList = []
precisionList=[]
f1List = []
metrics = []
# We take increase 300 samples each time and see at what at what
#point we get a constant performance
for i in range(300,len(text_large),300):
newList = []
start = 0
#Get the sample to be computed
sample=text_large[:i]
target_large1 = target_large[:i]
#print(len(sample))
# initialize the TfidfVectorizer
tfidf_vect_large = TfidfVectorizer()
# with stop words removed
tfidf_vect_large = TfidfVectorizer(stop_words="english")
# generate tfidf matrix
dtm_large= tfidf_vect.fit_transform(sample)
#dtm here contains the tfidf matrix using vectoriser
print("type of dtm:", type(dtm_large))
print("size of tfidf matrix:", dtm_large.shape)
metrics_large = ['precision_macro', 'recall_macro', "f1_macro"]
#clf = MultinomialNB()
clf_large = MultinomialNB(alpha=0.5)
cv1 = cross_validate(clf_large, dtm_large, target_large1, scoring=metrics_large, cv=10)
#print("**************MultinomialNB using vector with CV = 5*******************")
#print("Test data set average precision:")
#print(cv1['test_precision_macro'])
#print(np.average(cv1['test_precision_macro']))
#print("\nTest data set average recall:")
#print(cv1['test_recall_macro'])
#print(np.average(cv1['test_recall_macro']))
#print("\nTest data set average fscore:")
#print(cv1['test_f1_macro'])
newList.append(i)
newList.append(np.average(cv1['test_precision_macro']))
newList.append(np.average(cv1['test_recall_macro']))
newList.append(np.average(cv1['test_f1_macro']))
metrics.append(newList)
#print(metrics)
#now we plot the performance graph
results=np.array(metrics)
plt.plot(results[:,0], results[:,1], '-', label='precision')
plt.plot(results[:,0], results[:,2], '-', label='recall')
plt.plot(results[:,0], results[:,3], '-', label='f1')
plt.title('Impact of sample size on classification performance')
plt.ylabel('performance')
plt.xlabel('sample size')
plt.legend()
plt.show()
# In[8]:
# In[ ]:
| vigneshsriram/Python-Tutorials | Multinomial Naive Bayes/Assignment5 (3).py | Assignment5 (3).py | py | 8,988 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "nltk.stem.WordNetLemmatizer",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "csv.reader",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "sklearn.feature_extraction.text.TfidfVectorizer",
"line_number": 42,
"usage_type": "call"
},
{... |
32274445958 | #!/opt/csw/bin/python
# coding=utf-8
from time import time
from ircbot import SingleServerIRCBot, Channel
from irclib import nm_to_n, is_channel, parse_channel_modes
from datetime import datetime
import conf.config as config
import logging
import sys
import traceback
import threading
import pluginloader
from logger import Logger
NICK = config.NICK
PASSWORD = config.PASSWORD
SERVER = config.SERVER
PORT = config.PORT
OWNER_NICK = config.OWNER_NICK
OWNER_PASS=config.OWNER_PASS
COMMAND_PLUGINS = pluginloader.findAllCommandPlugins()
INTERCEPTOR_PLUGINS = pluginloader.findAllInterceptorPlugins()
class MarjuBot(SingleServerIRCBot):
def __init__(self, channels, nickname, password, server, port=6667):
SingleServerIRCBot.__init__(self, [(server, port), password], nickname, nickname)
self.channelsDict = channels
self.password = password
self.botNick = nickname
self.channels = {}
self.logger = Logger(self.channels)
def on_nicknameinuse(self, c, e):
c.nick(c.get_nickname() + "_")
def on_privnotice(self, c, e):
pass
def on_welcome(self, c, e):
c.privmsg("nickserv", "identify " + PASSWORD)
for ch in self.channelsDict.keys():
channel = Channel()
channel.logging = self.channelsDict[ch]["logging"]
channel.folder = "channels/" + self.channelsDict[ch]["folder"]
channel.ai = self.channelsDict[ch]["ai"]
channel.old = self.channelsDict[ch]["old"]
channel.quoting = self.channelsDict[ch]["quoting"]
channel.seen = self.channelsDict[ch]["seen"]
self.channels[ch] = channel
c.join(ch)
def on_topic(self, c, e):
self.logger.logTopic(e)
def on_currenttopic(self, c, e):
self.logger.logCurrentTopic(e)
def on_topicinfo(self, c, e):
self.logger.logTopicInfo(e)
def on_privmsg(self, c, e):
nick = nm_to_n(e.source())
if (nick != OWNER_NICK):
return
command = e.arguments()[0].split(" ",1)
if (len(command) == 1 or command[0] != OWNER_PASS):
return
cmd = command[1]
c.send_raw(cmd)
def on_pubnotice(self, c, e):
self.logger.logPubNotice(e)
def on_nick(self, c, e):
before = nm_to_n(e.source())
after = e.target()
for channelName, channel in self.channels.items():
if channel.has_user(before):
self.logger.logNick(channelName, before, after)
self.doSeen(before, channelName, False)
self.doSeen(after, channelName, True)
def on_join(self, c, e):
nick = nm_to_n(e.source())
channel = e.target()
if (nick == c.get_nickname()):
if (not channel in self.channels):
newChannel = Channel()
newChannel.logging = self.channelsDict[channel]["logging"]
newChannel.folder = "channels/" + self.channelsDict[channel]["folder"]
newChannel.ai = self.channelsDict[channel]["ai"]
newChannel.old = self.channelsDict[channel]["old"]
newChannel.quoting = self.channelsDict[channel]["quoting"]
self.channels[channel] = newChannel
self.channels[channel].add_user(nick)
self.logger.logSelfJoin(e, channel)
return
self.channels[channel].add_user(nick)
self.logger.logJoin(e)
self.doSeen(nick, channel, True)
def on_part(self, c, e):
nick = nm_to_n(e.source())
if (nick == c.get_nickname()):
self.logger.logSelfPart(e)
return
self.logger.logPart(e)
channel = e.target()
self.doSeen(nick, channel, False)
def on_action(self, c, e):
self.logger.logAction(e)
def on_quit(self, c, e):
nick = nm_to_n(e.source())
for channelName, channel in self.channels.items():
if channel.has_user(nick):
self.logger.logQuit(e, channelName)
self.doSeen(nick, channelName, False)
def on_kick(self, c, e):
kickee = e.arguments()[0]
if (kickee == c.get_nickname()):
self.logger.logSelfKick(e)
return
self.logger.logKick(e)
self.doSeen(kickee, e.target(), False)
def on_mode(self, c, e):
modes = parse_channel_modes(" ".join(e.arguments()))
self.logger.logMode(e, modes)
def on_pubmsg(self, c, e):
self.logger.logPubMsg(e)
if (self.parseAndDoCommand(e)):
return
self.doInterceptors(e)
def parseAndDoCommand(self, e):
command = e.arguments()[0].split(" ",1)
parameter = ""
if (len(command) > 1):
parameter = command[1].rstrip('\n')
command = command[0]
if (len(command) > 1 and command[0] == "!"):
self.doCommand(e, command[1:].lower(), parameter)
return True
return False
def doCommand(self, e, cmd, parameter):
author = nm_to_n(e.source())
channel = e.target()
msg = None
if cmd == "disconnect":
pass
#self.disconnect()
elif cmd == "die":
pass
#self.die()
elif cmd == "seen" and self.isCommandAllowedForChannel(cmd, channel):
msg = self.getSeen(channel, parameter)
else:
if not self.isCommandAllowedForChannel(cmd, channel):
return
plugin = self.findPluginByCommand(cmd)
if plugin:
threading.Thread(target=self.commandWorker, args=(plugin, parameter, channel, author)).start()
return
if msg:
self.sendResponse(msg, "MSG", channel, author)
def findPluginByCommand(self, cmd):
for plugin in COMMAND_PLUGINS.values():
if cmd in plugin["commands"]:
return pluginloader.load(plugin)
return None
def isCommandAllowedForChannel(self, cmd, channel):
if (cmd in ["quote", "addquote", "quotestat"] and not self.channels[channel].quoting):
return False
if (cmd == "seen" and not self.channels[channel].seen):
return False
return True
def commandWorker(self, plugin, parameter, channel, author):
responseType = plugin.getResponseType()
response = plugin.get(parameter, channel, author, self.channels[channel].folder)
self.sendResponse(response, responseType, channel, author)
def sendResponse(self, response, responseType, channel, author):
if not response:
return
c = self.connection
if (type(response) is not list):
response = [response]
for line in response:
if (responseType == "NOTICE"):
c.notice(author, line)
elif (responseType == "MSG"):
c.privmsg(channel, line)
line = "<" + c.get_nickname() + "> " + line
self.logger.log(channel, line)
def doInterceptors(self, e):
channel = e.target()
if (not is_channel(channel)):
return
msg = e.arguments()[0].strip()
author = nm_to_n(e.source())
for interceptor in INTERCEPTOR_PLUGINS:
if (self.isInterceptorAllowedForChannel(interceptor, channel)):
threading.Thread(target=self.interceptorWorker, args=(interceptor, msg, channel, author)).start()
def isInterceptorAllowedForChannel(self, interceptor, channel):
if (interceptor == "old" and not self.channels[channel].old):
return False
if (interceptor == "ai" and not self.channels[channel].ai):
return False
return True
def interceptorWorker(self, interceptor, msg, channel, author):
plugin = pluginloader.load(INTERCEPTOR_PLUGINS[interceptor])
responseType = plugin.getResponseType()
response = plugin.get(msg, author, self.channels[channel].folder)
self.sendResponse(response, responseType, channel, author)
def doSeen(self, nick, channel, isJoin):
unixTime = str(int(time()))
folder = self.channels[channel].folder
f = open(folder + "/seen.txt","r")
lines = f.readlines()
f.close()
nickFound = False
for index, line in enumerate(lines):
if (line.split(":")[0] == nick):
newLine = ""
nickFound = True
if (isJoin):
newLine = nick + ":" + unixTime + ":"
else:
newLine = line.split(":")
newLine[2] = unixTime
newLine = ":".join(newLine)
lines[index] = newLine + "\n"
break
f = open(folder + "/seen.txt","w")
for line in lines:
f.write(line)
if (not nickFound):
if (isJoin):
f.write(nick + ":" + unixTime + ":" + "\n")
else:
f.write(nick + "::" + unixTime + "\n")
f.close()
def getSeen(self, channel, parameter):
if (not self.channels[channel].seen):
return
folder = self.channels[channel].folder
for chname, chobj in self.channels.items():
if (channel == chname):
if (chobj.has_user(parameter)):
return parameter + " on kanalis"
break
file = open(folder + "/seen.txt")
result = ""
for line in file:
line = line.split(":")
nick = line[0]
if (nick.lower() == parameter.lower()):
start = line[1].strip()
end = line[2].strip()
timeFormat = '%d/%m/%Y %H:%M:%S'
if (start and end):
start = datetime.fromtimestamp(int(start)).strftime(timeFormat)
end = datetime.fromtimestamp(int(end)).strftime(timeFormat)
result = "Kasutaja " + nick + " oli viimati siin kanalis " + start + " kuni " + end
elif (start):
start = datetime.fromtimestamp(int(start)).strftime(timeFormat)
result = "Kasutajat " + nick + " nähti viimati siin kanalis, kui ta joinis " + start
elif (end):
end = datetime.fromtimestamp(int(end)).strftime(timeFormat)
result = "Kasutajat " + nick + " nähti viimati siin kanalis, kui ta lahkus " + end
else:
result = "???"
break
if (result):
return result
return "Kasutajat " + parameter + " ei leitud."
def log_uncaught_exceptions(ex_cls, ex, tb):
if (ex_cls == KeyboardInterrupt):
return
logging.critical('{0}: {1}'.format(ex_cls, ex))
logging.critical(''.join(traceback.format_tb(tb)))
def main():
logging.basicConfig(
level=logging.DEBUG,
format = '%(asctime)s %(levelname)s %(message)s',
filename='exceptions.log',
)
sys.excepthook = log_uncaught_exceptions
channels = config.channels
bot = MarjuBot(channels, NICK, PASSWORD, SERVER, PORT,)
logging.debug(bot.start())
if __name__ == "__main__":
main()
| sviik/marju | marjubot.py | marjubot.py | py | 11,353 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "conf.config.NICK",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "conf.config",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "conf.config.PASSWORD",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "conf.conf... |
72223824745 | import requests
url = input("Enter the website URL: ")
# SQL injection test payload
payload = "' OR '1'='1"
# XSS test payload
xss_payload = "<script>alert('XSS')</script>"
# Add payload to the login form
data = {"username": "admin", "password": payload}
# Send the request
r = requests.post(url, data=data)
# Check if the response contains the error message
if "SQL syntax" in r.text:
print("SQL injection vulnerability detected!")
else:
print("No SQL injection vulnerability detected.")
# Add payload to the search form
data = {"query": xss_payload}
# Send the request
r = requests.post(url, data=data)
# Check if the payload is reflected in the response
if xss_payload in r.text:
print("XSS vulnerability detected!")
else:
print("No XSS vulnerability detected.")
# Check if the website is vulnerable to CSRF
if "csrf_token" not in r.text:
print("CSRF vulnerability detected!")
else:
print("CSRF protection implemented.")
| TheSyrox/SYVulnScan | VULNSCAN.py | VULNSCAN.py | py | 958 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "requests.post",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "requests.post",
"line_number": 27,
"usage_type": "call"
}
] |
7554777199 | import torch
import torch.nn as nn
import torch.nn.init as init
class Fire(nn.Module):
def __init__(self, inplanes, squeeze_planes, expand1x1_planes, expand3x3_planes):
super(Fire, self).__init__()
self.inplanes = inplanes
self.squeeze = nn.Conv2d(inplanes, squeeze_planes, kernel_size=1)
self.squeeze_activation = nn.ReLU(inplace=True)
self.expand1x1 = nn.Conv2d(squeeze_planes, expand1x1_planes, kernel_size=1)
self.expand1x1_activation = nn.ReLU(inplace=True)
self.expand3x3 = nn.Conv2d(squeeze_planes, expand3x3_planes, kernel_size=3, padding=1)
self.expand3x3_activation = nn.ReLU(inplace=True)
def forward(self, x):
squeeze = self.squeeze(x)
squeeze = self.squeeze_activation(squeeze)
exp1x1 = self.expand1x1(squeeze)
exp1x1 = self.expand1x1_activation(exp1x1)
exp3x3 = self.expand3x3(squeeze)
exp3x3 = self.expand1x1_activation(exp3x3)
out = torch.cat([exp1x1, exp3x3], dim=1)
return out
class SqueezeNet(nn.Module):
def __init__(self, num_classes=1000):
super(SqueezeNet, self).__init__()
self.num_classes = num_classes
self.features = nn.Sequential(
nn.Conv2d(3, 96, kernel_size=7, stride=2),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
Fire(96, 16, 64, 64),
Fire(128, 16, 64, 64),
Fire(128, 32, 128, 128),
nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
Fire(256, 32, 128, 128),
Fire(256, 48, 192, 192),
Fire(384, 48, 192, 192),
Fire(384, 64, 256, 256),
nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
Fire(512, 64, 256, 256),
)
self.final_conv = nn.Conv2d(512, self.num_classes, kernel_size=1)
self.classifier = nn.Sequential(
nn.Dropout(p=0.5),
self.final_conv,
nn.ReLU(inplace=True),
nn.AdaptiveAvgPool2d((1, 1))
)
self._init_weight()
def _init_weight(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
if m is self.final_conv:
init.normal_(m.weight, mean=0.0, std=0.01)
else:
init.kaiming_uniform_(m.weight)
if m.bias is not None:
init.constant_(m.bias, 0)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = self.features(x)
x = self.classifier(x)
x = torch.flatten(x, start_dim=1)
return x
| gaungalif/cifar10.pytorch | cifar/models/squeeze.py | squeeze.py | py | 2,764 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "torch.nn.Module",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_numbe... |
6656532235 | # -*- coding: utf-8 -*-
"""
Created on 2023-11-28 (Tue) 15:03:39
Planar Maximally Filtered Graph implementation in python
@author: I.Azuma
"""
import numpy as np
import pandas as pd
import time
import networkx as nx
from networkx.algorithms.planarity import check_planarity
from tqdm import tqdm
import matplotlib.pyplot as plt
class GraphHandler:
def __init__(self):
self.X = np.array([[],[]])
self.labels = dict()
self.n_node = None
self.n_edge = None
self.graph = None
self.centrality = dict()
self.params = dict()
def set_data(self,df:pd.DataFrame):
"""
set adjucency matrix dataframe
Parameters
----------
df: dataframe
a dataframe of adjucency matrix
"""
idx = list(df.index)
self.n_node = len(idx)
self.labels = dict(zip(list(range(self.n_node)),idx))
self.X = df.values
def set_graph(self,graph):
"""
set networkx.Graph object
Parameters
----------
graph: networkx.Graph
"""
self.graph = graph
self.n_edge = graph.number_of_edges()
self.n_node = graph.number_of_nodes()
def adj2graph(self,X,update:bool=True):
"""
convert adjacency matrix to graph object of networkx
Parameters
----------
X: np.array
adjacency matrix
update: bool
whether the stored graph is replaced by the generated one
"""
n = X.shape[0]
if self.X.shape[0]==0:
self.X = X
idx = list(range(n))
edge = []
ap = edge.append
for i in range(n - 1):
for j in range(i + 1,n):
ap((idx[i],idx[j],X[i][j]))
g = nx.Graph()
g.add_weighted_edges_from(edge)
if update:
self.set_graph(g)
return g
def edge2graph(self,edge:pd.DataFrame,update:bool=True):
"""
construct a graph from edge dataframe
Parameters
----------
edge: pd.DataFrame
should be the form as follows:
1st column: source node (note, src and dest is exchangeable)
2nd column: destination node
3rd column: weight
update: bool
whether the stored graph is replaced by the generated one
"""
x = edge.values
x = [(v[0],v[1],v[2]) for v in x]
g = nx.Graph()
g.add_weighted_edges_from(x)
if update:
self.set_graph(g)
return g
def graph2edge(self,graph,sorting:bool=False):
"""
convert a networkx.Graph object to a dataframe of edge
Parameters
----------
graph: networkx.Graph object
sorting: bool
whether the result is sorted or not
"""
edge = []
ap = edge.append
for v in graph.edges(data=True):
ap([v[0],v[1],v[2]['weight']])
df = pd.DataFrame(edge,columns=['source','dest','weight'])
if sorting:
df = df.sort_values(by='weight',ascending=False)
return df
def calc_centrality(self,graph,centrality='betweenness'):
"""
calculate centrality of the given graph
Parameters
----------
graph: networkx.Graph object
centrality: str
indicates centrality method
'betweenness', 'pagerank', 'degree', or 'closeness'
"""
if centrality=='betweenness':
self.centrality = nx.betweenness_centrality(graph)
elif centrality=='degree':
self.centrality = nx.degree_centrality(graph)
elif centrality=='closness':
self.centrality = nx.closeness_centrality(graph)
elif centrality=='pagerank':
self.centrality = nx.pagerank(graph)
else:
raise KeyError('!! Wrong centrality: use betweenness, pagerank, degree, or closeness !!')
self.params['centrality'] = centrality
return self.centrality
class PMFG(GraphHandler):
def __init__(self):
super().__init__()
def pmfg(self,graph=None,fdr:float=0.05,update:bool=True,boyer=True):
"""
obtain PMFG from the given graph
Parameters
----------
graph: networkx.Graph
a networkx.Graph object
fdr: float
indicates the threshold for termination based on false discovery rate
should be not 0
update: bool
whether the stored graph is replaced by the generated one
"""
sta = time.time()
if graph is None:
if self.graph is None:
if self.X is None:
raise ValueError('!! Provide graph as an argument or adjucency matrix by set_data !!')
else:
graph = self.adj2graph(self.X,update=False)
else:
graph = self.graph
n_node = graph.number_of_nodes()
n_fdr = int(1/fdr)
self.params['fdr'] = fdr
graph = self._sort_graph(graph)
nmax = 3*(n_node - 2)
g = nx.Graph()
rejected = 0
if boyer:
print("Boyer's method")
for e in tqdm(graph):
g.add_edge(e['source'],e['dest'],weight=e['weight'])
if check_planarity(g)[0]:
n_edge = g.number_of_edges()
if n_edge==nmax:
print('--- terminated (#edge={} reached the max) ---'.format(n_edge))
break
else:
rejected = 0
else:
g.remove_edge(e['source'],e['dest'])
rejected += 1
if rejected > n_fdr*n_node: # equivalent to FDR
print('--- terminated (#rejected={} reached the FDR threshold) ---'.format(rejected))
break
else:
print("Left-Right method")
for e in tqdm(graph):
g.add_edge(e['source'],e['dest'],weight=e['weight'])
if check_planarity(g)[0]:
n_edge = g.number_of_edges()
if n_edge==nmax:
print('--- terminated (#edge={} reached the max) ---'.format(n_edge))
break
else:
rejected = 0
else:
g.remove_edge(e['source'],e['dest'])
rejected += 1
if rejected > n_fdr*n_node: # equivalent to FDR
print('--- terminated (#rejected={} reached the FDR threshold) ---'.format(rejected))
break
end = time.time()
h,mod = divmod(end - sta,3600)
m,s = divmod(mod,60)
print("{0} hr {1} min {2} sec".format(int(h),int(m),round(s,4)))
if update:
self.set_graph(g)
return g
def _sort_graph(self,graph):
"""
sort the given graph object of networkx
Parameters
----------
graph: networkx.Graph
"""
sorted_edge = []
ap = sorted_edge.append
for s,d,w in sorted(graph.edges(data=True),key=lambda x:-x[2]['weight']):
ap({'source':s,'dest':d,'weight':w['weight']})
return sorted_edge
class GraphViewer(GraphHandler):
def __init__(self):
super().__init__()
self.pos = None
self.plt_params = {'figsize':(12,8),'edge_color':'lightgrey','width':0.2,
'font_size':14,'node_color':'royalblue','node_size':300,
'alpha':0.8}
def plot(self,graph=None,pos=None,labels:dict=None,n_label:int=5,output:str='',
fix_size:bool=False,fix_color:bool=False,
cmap:str='Blues',centrality:str='pagerank',size_params:dict=None,
plt_params:dict=dict()):
"""
plot the given graph
relatively take a long time
Parameters
----------
graph: networkx.Graph object
pos: dict
indicates the positions of nodes calculated by calc_pos method
labels: dict
indicates the labels
keys and values are node indices and corresponding labels, respectively
like inverter
n_label: int
indicates the number of nodes to be labeled
0 indicates all nodes are labeled
output: str
the path for image when saved
centrality: str
indicates centrality method
'betweenness', 'pagerank', 'degree', or 'closeness'
size_params: dict
indicates the parameters for size preparation in prep_size method
plt_params: dict
indicates the parameters for plot in general
"""
if graph is None:
if self.graph is None:
raise ValueError('!! Provide graph to be visualized !!')
else:
self.set_graph(graph)
if pos is None:
if self.pos is None:
raise ValueError('!! No pos: provide or prepare pos by calc_pos before this process !!')
else:
pos = self.pos
if len(self.centrality)==0:
self.calc_centrality(self.graph,centrality)
# size preparation
if not fix_size:
if size_params is not None:
size = self._prep_size(**size_params)
else:
size = self._prep_size()
size = list(size.values())
# color preparation
if not fix_color:
cm = plt.get_cmap(cmap)
node_color = list(self.centrality.values())
# label preparation
if labels is not None:
if n_label==0:
label_focus = labels
else:
sc = sorted(self.centrality.items(),key=lambda x: x[1],reverse=True)[:n_label]
sc = [v[0] for v in sc]
label_focus = {k:v for k,v in labels.items() if k in sc}
# plot
self.plt_params.update(plt_params)
plt.figure(figsize=self.plt_params['figsize'])
nx.draw_networkx_edges(self.graph,pos,edge_color=self.plt_params['edge_color'],
width=self.plt_params['width'])
alpha = self.plt_params['alpha']
if fix_size:
if fix_color:
nx.draw_networkx_nodes(self.graph,pos,node_size=self.plt_params['node_size'],
node_color=self.plt_params['node_color'],alpha=alpha)
else:
nx.draw_networkx_nodes(self.graph,pos,node_size=self.plt_params['node_size'],
node_color=node_color,cmap=cm,alpha=alpha)
else:
if fix_color:
nx.draw_networkx_nodes(self.graph,pos,node_size=size,node_color=self.plt_params['node_color'],
alpha=alpha)
else:
nx.draw_networkx_nodes(self.graph,pos,node_size=size,node_color=node_color,cmap=cm,alpha=alpha)
if labels is not None:
nx.draw_networkx_labels(self.graph,pos,label_focus,font_size=self.plt_params['font_size'])
plt.axis('off')
if len(output) > 0:
plt.savefig(output)
plt.show()
def plot_cluster(self,graph=None,cluster:dict=None,method:str='pagerank',layout:str='spring'):
"""
plot the given graph
relatively take a long time
Parameters
----------
graph: networkx.Graph object
layout: str
determines the shape of network
'spring' or 'kamada_kawai'
cluster: dict
indicate where each node belongs
method: str
indicates centrality method
'betweenness', 'pagerank', 'degree', or 'closeness'
"""
raise NotImplementedError
def calc_pos(self,graph=None,layout:str='spring'):
"""
set a graph object
relatively take a long time
Parameters
----------
graph: networkx.Graph object
layout: str
determines the shape of network
'spring' or 'kamada_kawai'
"""
if graph is not None:
self.graph = graph
if layout=='spring':
self.pos = nx.spring_layout(self.graph)
elif layout=='kamada_kawai':
self.pos = nx.kamada_kawai_layout(self.graph)
else:
raise KeyError('!! Wrong layout: use spring or kamada_kawai !!')
def set_inverter(self,inverter:dict):
"""
set the index inverter, from int indices to string one
"""
self.idx_inverter = inverter
def _prep_size(self,basesize:int=500,power:bool=False,power_val:float=1.5):
"""
prepare node size according to centrality
basesize: int
indicate the base size of nodes
power: bool
whether node size is prepared by power scaling
power_val: float
the value of power for scaling
"""
size = [basesize*v for v in self.centrality.values()]
if power:
size = [np.power(v,power_val) for v in size]
return dict(zip(self.centrality.keys(),size))
| groovy-phazuma/ImmunSocialNetwork | network_models/pmfg/pmfg.py | pmfg.py | py | 13,712 | python | en | code | null | github-code | 36 | [
{
"api_name": "numpy.array",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "networkx.Graph",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
... |
889446688 | from numpy.lib import average
import pandas as pd
import numpy as np
import re
import nltk
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split,learning_curve,GridSearchCV
from nltk.stem.snowball import SnowballStemmer
from nltk.corpus import stopwords
from sklearn.feature_extraction.text import CountVectorizer,TfidfTransformer,TfidfVectorizer
from sklearn.naive_bayes import MultinomialNB
from sklearn.tree import DecisionTreeClassifier
from sklearn.pipeline import Pipeline
from sklearn import preprocessing, linear_model, naive_bayes, metrics
from sklearn import decomposition, ensemble
from sklearn.svm import SVC
from sklearn.metrics import confusion_matrix,f1_score
df = pd.read_csv("mapping_nb.csv")
stop = stopwords.words('english')
stop.extend(['a','an','the','to'])
#nltk.download('wordnet')
w_tokenizer = nltk.tokenize.WhitespaceTokenizer()
lemmatizer = nltk.stem.WordNetLemmatizer()
def lemmatize_text(text):
return ' '.join([lemmatizer.lemmatize(w) for w in w_tokenizer.tokenize(text)])
df['Clause'] = df.Clause.apply(lemmatize_text)
stemmer = SnowballStemmer("english")
df['Clause'].apply(lambda x: ' '.join([stemmer.stem(y) for y in x]))
df['Clause'] = df['Clause'].str.lower()
df['Clause'] = df['Clause'].str.replace('\t','')
df['Clause'] = df['Clause'].str.replace('\n',' ')
df['Clause'] = df['Clause'].str.replace(r"\(.*\)","",regex=True)
df['Clause'] = df['Clause'].str.replace('\d.\d.\d.','',regex=True)
df['Clause'] = df['Clause'].str.replace('\d', '',regex=True)
df['Clause'] = df['Clause'].str.split(' ').apply(lambda x: ' '.join(k for k in x if k not in stop))
df['Clause'] = df['Clause'].str.strip()
def confusionMatrix(Y_test,prediction):
unique_label = np.unique(Y_test)
cmtx = pd.DataFrame(
confusion_matrix(prediction, Y_test, labels=unique_label),
index=['true:{:}'.format(x) for x in unique_label],
columns=['pred:{:}'.format(x) for x in unique_label])
return cmtx
def data(tag):
X_train,X_test,Y_train,Y_test = train_test_split(df.Clause,df[tag],
test_size=0.2,random_state = 1234)
# label encode the target variable
# encoder = preprocessing.LabelEncoder()
# Y_train = encoder.fit_transform(Y_train)
# Y_test = encoder.fit_transform(Y_test)
tfidf_vect_ngram = TfidfVectorizer(analyzer='word', token_pattern=r'\w{1,}',max_features=5000)
tfidf_vect_ngram.fit(df["Clause"])
xtrain_tfidf_ngram = tfidf_vect_ngram.transform(X_train)
xvalid_tfidf_ngram = tfidf_vect_ngram.transform(X_test)
return xtrain_tfidf_ngram,xvalid_tfidf_ngram,Y_train,Y_test
def learningCurve(estimator,X_train,Y_train):
train_sizes, train_scores, test_scores = learning_curve(estimator=estimator, X=X_train, y=Y_train,
cv=10, train_sizes=np.linspace(0.1, 1.0, 10),
n_jobs=1)
train_mean = np.mean(train_scores, axis=1)
train_std = np.std(train_scores, axis=1)
test_mean = np.mean(test_scores, axis=1)
test_std = np.std(test_scores, axis=1)
#
# Plot the learning curve
#
plt.plot(train_sizes, train_mean, color='blue', marker='o', markersize=5, label='Training Accuracy')
plt.fill_between(train_sizes, train_mean + train_std, train_mean - train_std, alpha=0.15, color='blue')
plt.plot(train_sizes, test_mean, color='green', marker='+', markersize=5, linestyle='--', label='Validation Accuracy')
plt.fill_between(train_sizes, test_mean + test_std, test_mean - test_std, alpha=0.15, color='green')
plt.title('Learning Curve')
plt.xlabel('Training Data Size')
plt.ylabel('Model accuracy')
plt.grid()
plt.legend(loc='lower right')
plt.show()
#plt.savefig('{}.png'.format(estimator))
def svm(tag,parameters):
xtrain_tfidf_ngram,xvalid_tfidf_ngram,Y_train,Y_test = data(tag)
model = SVC(probability=True)
svm_model = GridSearchCV(model, parameters)
svm_model.fit(xtrain_tfidf_ngram,Y_train)
prediction = svm_model.predict(xvalid_tfidf_ngram)
acc = metrics.accuracy_score(prediction,Y_test)
f1_scr = f1_score(Y_test,prediction,average="weighted")
#learningCurve(estimator=svm_model,X_train=xtrain_tfidf_ngram,Y_train=Y_train)
return {"model":svm_model,"accuracy":acc,"f1_Score":f1_scr}
def naive_bayes_classifier(tag):
X_train,X_test,Y_train,Y_test = train_test_split(df.Clause,df[tag],
test_size=0.2,random_state = 123)
# encoder = preprocessing.LabelEncoder()
# Y_train = encoder.fit_transform(Y_train)
# Y_test = encoder.fit_transform(Y_test)
# print(np.unique(Y_test))
#Extracting features from text files
count_vect = CountVectorizer()
X_train_counts = count_vect.fit_transform(X_train)
# TF-IDF
tfidf_transformer = TfidfTransformer()
X_train_tfidf = tfidf_transformer.fit_transform(X_train_counts)
prior_tag1 = [0.676,0.198,0.077,0.049]
#prior_tag2 = [0.54,0.21,0.09,0.08,0.06,0.02]
#prior_tag3 = [0.26,0.23,0.12,0.11,0.09,0.09,0.07,0.03]
#clf = MultinomialNB().fit(X_train_tfidf, Y_train)
# prior = None
text_clf = Pipeline([('vect', CountVectorizer()), ('tfidf', TfidfTransformer()), ('clf', MultinomialNB(class_prior = prior_tag1))])
text_clf = text_clf.fit(X_train,Y_train)
# Performance of NB Classifier
predicted = text_clf.predict(X_test)
acc = np.mean(predicted == Y_test)
f1_scr = f1_score(Y_test,predicted,average="weighted")
#learningCurve(estimator=text_clf,X_train=X_train,Y_train=Y_train)
return {"model":text_clf,"accuracy":acc,"f1_Score":f1_scr}
def random_forest(tag,parameters):
xtrain_tfidf_ngram,xvalid_tfidf_ngram,Y_train,Y_test = data(tag)
model = ensemble.RandomForestClassifier()
rf = GridSearchCV(model,parameters)
rf.fit(xtrain_tfidf_ngram, Y_train)
predicted = rf.predict(xvalid_tfidf_ngram)
print(rf.best_params_)
f1_scr = f1_score(Y_test,predicted,average="weighted")
acc = metrics.accuracy_score(Y_test,predicted)
#learningCurve(estimator=rf,X_train=xtrain_tfidf_ngram,Y_train=Y_train)
return {"model":rf,"accuracy":acc,"f1_Score":f1_scr}
def linearmodel(tag):
xtrain_tfidf_ngram,xvalid_tfidf_ngram,Y_train,Y_test = data(tag)
# learning rate
model = linear_model.LogisticRegression()
rf = model.fit(xtrain_tfidf_ngram, Y_train)
predicted = rf.predict(xvalid_tfidf_ngram)
f1_scr = f1_score(Y_test,predicted,average="weighted")
acc = metrics.accuracy_score(Y_test,predicted)
#learningCurve(estimator=rf,X_train=xtrain_tfidf_ngram,Y_train=Y_train)
return {"model":rf,"accuracy":acc,"f1_Score":f1_scr}
def gradientboosting(tag,parameters):
xtrain_tfidf_ngram,xvalid_tfidf_ngram,Y_train,Y_test = data(tag)
# learning rate
model = ensemble.GradientBoostingClassifier()
gb = GridSearchCV(model,parameters)
gb.fit(xtrain_tfidf_ngram, Y_train)
predicted = gb.predict(xvalid_tfidf_ngram)
f1_scr = f1_score(Y_test,predicted,average="weighted")
acc = metrics.accuracy_score(Y_test,predicted)
print(gb.best_params_)
#learningCurve(estimator=gb,X_train=xtrain_tfidf_ngram,Y_train=Y_train)
return {"model":gb,"accuracy":acc,"f1_Score":f1_scr}
def decision_tree(tag):
xtrain_tfidf_ngram,xvalid_tfidf_ngram,Y_train,Y_test = data(tag)
# learning rate
model = DecisionTreeClassifier(max_depth=10)
decision_tree = model.fit(xtrain_tfidf_ngram, Y_train)
predicted = decision_tree.predict(xvalid_tfidf_ngram)
acc = metrics.accuracy_score(Y_test,predicted)
f1_scr = f1_score(Y_test,predicted,average="weighted")
#learningCurve(estimator=decision_tree,X_train=xtrain_tfidf_ngram,Y_train=Y_train)
return {"model":decision_tree,"accuracy":acc,"f1_Score":f1_scr}
def topic_modeling(tag):
X_train,X_test,Y_train,Y_test = train_test_split(df.Clause,df[tag],
test_size=0.2,random_state = 123)
# Create a count vectorized Object
count_vect = CountVectorizer(analyzer='word', token_pattern=r'\w{1,}')
count_vect.fit(df['Clause'])
# transform the training and validation data using count vectorizer object
xtrain_count = count_vect.transform(X_train)
xvalid_count = count_vect.transform(X_test)
# train a LDA Model
lda_model = decomposition.LatentDirichletAllocation(n_components=20, learning_method='online', max_iter=20)
X_topics = lda_model.fit_transform(xtrain_count)
topic_word = lda_model.components_
vocab = count_vect.get_feature_names()
print(vocab)
# view the topic models
n_top_words = 5
topic_summaries = []
for i, topic_dist in enumerate(topic_word):
topic_words = np.array(vocab)[np.argsort(topic_dist)][:-(n_top_words+1):-1]
topic_summaries.append(' '.join(topic_words))
print(topic_summaries)
# param_rf = {
# 'n_estimators': [100,300,500,600,700,800],
# "max_depth":[20,22,26,28,30,32,34,36]
# }
param_rf = {'max_depth': [32], 'n_estimators': [600]}
# param_boosting = {"n_estimators":[300,400],"max_depth":[10,15]}
param_boosting = {"n_estimators":[300],"max_depth":[10]}
# print("Naive Bayes Tag3: ",naive_bayes_classifier("Tag3"))
# print("SVM Tag3: ",svm("Tag1",{'kernel':('linear', 'rbf')}))
# print("Random Forest Tag3: ",random_forest("Tag3",param_rf))
# print("Linear Model Tag3: ",linearmodel("Tag3"))
# print("Gradient Boosting Tag3: ",gradientboosting("Tag3",param_boosting))
# print("Decision Tree Tag3: ",decision_tree("Tag3"))
#topic_modeling("Tag1")
def voting_clasifier(tag):
clf1 = linearmodel(tag)['model']
clf2 = svm(tag,{'kernel':('linear', 'rbf')})['model']
clf3 = random_forest(tag,param_rf)['model']
eclf3 = ensemble.VotingClassifier(estimators=[('lr', clf1), ('svm', clf2),
('rf',clf3)],voting='soft')#, weights=[2,1,1],flatten_transform=True)
xtrain_tfidf_ngram,xvalid_tfidf_ngram,Y_train,Y_test = data(tag)
eclf3 = eclf3.fit(xtrain_tfidf_ngram,Y_train)
pred = eclf3.predict(xvalid_tfidf_ngram)
acc = metrics.accuracy_score(Y_test,pred)
print("VotingClassifier Accuracy: ",acc)
return eclf3
def classify(par,tag):
model = voting_clasifier(tag)
tfidf_vect_ngram = TfidfVectorizer(analyzer='word', token_pattern=r'\w{1,}',max_features=5000)
tfidf_vect_ngram.fit(df["Clause"])
xtrain_tfidf_ngram = tfidf_vect_ngram.transform(par)
pred = model.predict(xtrain_tfidf_ngram)
return pred
| saarthakbabuta1/loan-agreement | machine_learning.py | machine_learning.py | py | 10,438 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pandas.read_csv",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "nltk.corpus.stopwords.words",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "nltk.corpus.stopwords",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "nlt... |
72027853863 |
# -*- coding: utf-8 -*-
# WindowでGroupByの区間を区切る
import apache_beam as beam
# Dataflowの基本設定
# ジョブ名、プロジェクト名、一時ファイルの置き場を指定します。
options = beam.options.pipeline_options.PipelineOptions()
gcloud_options = options.view_as(
beam.options.pipeline_options.GoogleCloudOptions)
gcloud_options.job_name = 'dataflow-tutorial7'
gcloud_options.project = 'PROJECTID'
gcloud_options.staging_location = 'gs://PROJECTID/staging'
gcloud_options.temp_location = 'gs://PROJECTID/temp'
# Dataflowのスケール設定
# Workerの最大数や、マシンタイプ等を設定します。
# WorkerのDiskサイズはデフォルトで250GB(Batch)、420GB(Streaming)と大きいので、
# ここで必要サイズを指定する事をオススメします。
worker_options = options.view_as(beam.options.pipeline_options.WorkerOptions)
worker_options.disk_size_gb = 20
worker_options.max_num_workers = 2
# worker_options.num_workers = 2
# worker_options.machine_type = 'n1-standard-8'
# 実行環境の切り替え
# DirectRunner: ローカルマシンで実行します
# DataflowRunner: Dataflow上で実行します
# options.view_as(beam.options.pipeline_options.StandardOptions).runner = 'DirectRunner'
options.view_as(beam.options.pipeline_options.StandardOptions).runner = 'DataflowRunner'
def assign_timevalue(v):
# pcollectionのデータにタイムスタンプを付加する
# 後段のwindowはこのタイムスタンプを基準に分割される
# ここでは適当に乱数でタイムスタンプを入れている
import apache_beam.transforms.window as window
import random
import time
return window.TimestampedValue(v, int(time.time()) + random.randint(0, 1))
def modify_data3(kvpair):
# groupbyによりkeyとそのkeyを持つデータのリストのタプルが渡される
# windowで分割されているのでデータ数が少なくなる
# kvpair = (u'word only', [4, 4, 6, 6, 7])
return {'count_type': kvpair[0],
'sum': sum(kvpair[1])
}
p7 = beam.Pipeline(options=options)
query = 'SELECT * FROM [PROJECTID:testdataset.testtable3] LIMIT 20'
(p7 | 'read' >> beam.io.Read(beam.io.BigQuerySource(project='PROJECTID', use_standard_sql=False, query=query))
| "assign tv" >> beam.Map(assign_timevalue)
| 'window' >> beam.WindowInto(beam.window.FixedWindows(1))
| 'pair' >> beam.Map(lambda x: (x['count_type'], x['word_count']))
| "groupby" >> beam.GroupByKey()
| 'modify' >> beam.Map(modify_data3)
| 'write' >> beam.io.Write(beam.io.BigQuerySink(
'testdataset.testtable5',
schema='count_type:STRING, sum:INTEGER',
create_disposition=beam.io.BigQueryDisposition.CREATE_IF_NEEDED,
write_disposition=beam.io.BigQueryDisposition.WRITE_TRUNCATE))
)
p7.run() # .wait_until_finish()
| hayatoy/dataflow-tutorial | tutorial7.py | tutorial7.py | py | 2,908 | python | ja | code | 25 | github-code | 36 | [
{
"api_name": "apache_beam.options.pipeline_options.PipelineOptions",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "apache_beam.options",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "apache_beam.options",
"line_number": 11,
"usage_type": "attri... |
22344125415 | import logging
import re
from investments.models import RealEstate
from .utils import (check_skip, create_investment, extract_data, get_id,
get_interest_range, normalize_meta, normalize_number,
parse_markup_in_url, price_range, scrape_page)
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
LANG = "en"
COUNTRIES = ["IT", ]
TYPE = RealEstate
SOURCE = "caseinpiemonte"
THOUSAND_SEP = "."
CURRENCY = "EUR"
PRICE_REGEXP = re.compile(r"([\d.]+)")
SURFACE_REGEXP = re.compile(r"([\d.]+)")
BASE_URL = "https://caseinpiemonte.com/properties"
SELECTORS = {"title": ".rh_page__head .rh_page__title",
"address": ".rh_page__head .rh_page__property_address",
"price": ".rh_page__head .price",
"efficency": ".energy-efficency",
"description": ".rh_content p@html",
"images": ".flexslider .slides li a@href",
"tags": ".rh_property__features_wrap .rh_property__feature",
"meta": {".rh_property__meta_wrap .rh_property__meta": {
"title": "h4",
"value": ".figure",
"label": ".label",
}},
}
def scrape_site(noupdate):
""" Scrapes the needed pages of the site to extract investments
"""
count = 0
for page in range(1, 100):
url = "{base}/page/{page}".format(base=BASE_URL, page=page)
for url in scrape_page(url, "article.rh_list_card .rh_overlay__contents a"):
count += 1
if check_skip(noupdate, TYPE, SOURCE, url):
yield None
continue
investment = scrape_investment(url)
if not investment:
logger.warning("Ended parsing %d investment on page %d" % (count, page))
break
logger.warning("Parsing investment number %d on page %d: %s" % (count, page, url))
yield investment
if count:
logger.info("Parsed %d investment on page %d" % (count, page))
else:
logger.warning("Stopped parsing on page %d" % page)
def scrape_investment(url):
""" Scrapes a single investments
"""
html = parse_markup_in_url(url)
result = extract_data(SELECTORS, html)
if not result:
logger.error("Empty result for %s" % url)
return
result["id"] = get_id(SOURCE, url)
result["url"] = "https:" + url
if "meta" in result:
if "efficency" in result:
result["meta"].append({"title": "efficency", "value": result["efficency"]})
result["meta"] = normalize_meta(result["meta"])
if "area" in result["meta"]:
result["surface"] = normalize_number(result["meta"]["area"], SURFACE_REGEXP, THOUSAND_SEP)
if "price" in result:
result["currency"] = CURRENCY
result["price"] = normalize_number(
result["price"], PRICE_REGEXP, THOUSAND_SEP)
result["price"] = price_range(result["price"])
result["interest"] = get_interest_range(COUNTRIES)
if "description" in result:
result["description"] = " ".join(result["description"])
return result
def save_investment(item):
return create_investment(item, TYPE, COUNTRIES, SOURCE, LANG)
"""
META
{
"superficie":"190m2",
"prezzo di affitto":"Prezzo di affitto CHF 5'500 - Approssimativamente EUR 5'060-"
}
"""
| Constrictiongithub/constriction_website | importer/management/commands/scrapers/caseinpiemonte.py | caseinpiemonte.py | py | 3,395 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "logging.DEBUG",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "investments.models.RealEstate",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "re... |
72570992424 | # turns out there's a better website for records than the one this scraper uses
# scraper to query AZ SOS website for possible matches for each retailer
import urllib.request
import urllib.parse
import bs4 as bs
import re
import pandas as pd
import os
import numpy as np
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver import ActionChains
from selenium.webdriver.common.keys import Keys
import pyautogui
import time
import string
from wordfreq import word_frequency
# function for getting possible matches for record
def AZ_crawling(name, IMPAQ_ID, first):
# navigate to search url
driver.get('https://apps.azsos.gov/apps/tntp/se.html')
if first == True:
time.sleep(12)
else:
time.sleep(3)
# enter name of address and search
driver.find_element_by_xpath('//*[@id="Search"]').click()
driver.find_element_by_xpath('//*[@class="form-control control vk"]')\
.send_keys(name)
driver.find_element_by_xpath('//*[@id="Right"]').click()
time.sleep(20)
# start a dataframe to keep track of results
result_df = pd.DataFrame(columns=['Filing Number', 'Entity Name',
'Operation Status', 'Agent', 'Agent Address', 'Store Address', 'IMPAQ_ID'])
# go through the rows of the results
if 'yielded no results' not in driver.find_element_by_xpath(
'//*[@class="control-label"]').text:
# set items per page to all
drop_down = driver.find_element_by_xpath('//*[@class="k-icon k-i-arrow-s"]')
drop_down.click()
time.sleep(1)
drop_down.find_element_by_xpath('//*[@data-offset-index="3"]').click()
time.sleep(1)
#filter out trademark/trade name registrations
count = 0
for i in driver.find_elements_by_xpath('//*[@role="row"]'):
regis_type = i.find_elements_by_xpath('//*[@role="gridcell"]')[2].text
if regis_type != 'Trade Name' and regis_type != 'Trademark' and \
regis_type != 'AZCC Name Reservation' and 'Partnership' not in \
regis_type:
result = pd.Series()
# record filing number
result['Filing Number'] = i.find_elements_by_xpath(
'//*[@role="gridcell"]')[0].text
# click on expand arrow
i.find_element_by_xpath('//*[@class="k-icon k-i-expand"]').click()
time.sleep(10)
# record other info
details = driver.find_elements_by_xpath('//*[@class="k-detail-row' +
'k-alt"]')[count]
result['Entity Name'] = details.find_elements_by_xpath(
'//*[@class="row"]')[0].find_elements_by_xpath('//*[@class="'+
'col-xs-12 col-sm-2"]')[0].text
result['Operation Status'] = details.find_elements_by_xpath(
'//*[@class="row"]')[1].find_elements_by_xpath('//*[@class="'+
'col-xs-12 col-sm-2"]')[1].text
result['Date Last Updated'] = details.find_elements_by_xpath(
'//*[@class="row"]')[3].find_elements_by_xpath('//*[@class="'+
'col-xs-12 col-sm-2"]')[1].text
result['Agent Name'] = details.find_elements_by_xpath(
'//*[@class="row"]')[9].find_elements_by_xpath('//*[@class="'+
'col-xs-12 col-sm-2"]')[0].text
result['Agent Address'] = details.find_elements_by_xpath(
'//*[@class="row"]')[10].find_elements_by_xpath('//*[@class="'+
'col-xs-12 col-sm-2"]')[1].text
result['IMPAQ_ID']= IMPAQ_ID
result_df = result_df.append(result)
count +=1
import pdb; pdb.set_trace()
path = 'C:/Users/lpatterson/AnacondaProjects/Tribal_Master'
# start chrome webdriver
chrome_options = Options()
driver = webdriver.Chrome(executable_path= \
path + "/chrome_driver/chromedriver.exe")
actionChains = ActionChains(driver)
df= pd.read_csv(path + "/step_3_work/output/full_retailer_list.csv")
df= df.loc[df['State']=='AZ',:]
df.index=df['IMPAQ_ID']
first = True
for i,row in df.iloc[12:,:].iterrows():
if first == True:
AZ_crawling(row['DBA Name_update'],row['IMPAQ_ID'], first)
first = False
else:
AZ_crawling(row['DBA Name_update'],row['IMPAQ_ID'], first)
# driver = webdriver.Chrome(executable_path= \
# path + "/chrome_driver/chromedriver.exe")
# actionChains = ActionChains(driver)
| Luke-Patterson/state_scrap | AZ/old/AZ_1_crawler_old.py | AZ_1_crawler_old.py | py | 4,561 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "time.sleep",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number"... |
2317107691 |
import os
import numpy as np
import keras
from keras import models, layers
from PIL import Image
from numpy import asarray
from cv2 import cv2
from keras.models import load_model
from os.path import join, dirname, realpath
from flask import Flask,render_template,request
import skimage
from skimage.transform import resize
from matplotlib import pyplot as plt
app = Flask(__name__)
app.config['SEND_FILE_MAX_AGE_DEFAULT'] = 0
app.config["CACHE_TYPE"] = "null"
app.static_folder = 'static'
f = ""
original_filename =""
cwd = str(os.getcwd())
UPLOADS_PATH = join(dirname(realpath(__file__)), 'static\\')
encoder = load_model("encoder.h5")
decoder = load_model("decoder.h5")
compressor = load_model("compressor.h5")
def preprocess(path):
img = Image.open(path)
image = np.asarray(img)
image = image.astype('float32') / 255.
img = np.expand_dims(image,axis=-1)
img = np.expand_dims(image,axis=-0)
return img
@app.route('/')
def upload_image():
if os.path.exists(cwd+"\\static\\decompressed.jpg"):
os.remove(cwd+"\\static\\decompressed.jpg")
if os.path.exists(cwd+"\\static\\intermediate.jpg"):
os.remove(cwd+"\\static\\intermediate.jpg")
if os.path.exists(cwd+"\\static\\compressed.npy"):
os.remove(cwd+"\\static\\compressed.npy")
return render_template('index.html')
@app.route('/imageuploader',methods=['GET','POST'])
def image_upload():
if request.method=='POST':
f = request.files['image']
f.save(os.path.join(UPLOADS_PATH,f.filename))
original_filename = f.filename
path = cwd+"\\static\\"+f.filename
image = Image.open(path)
img = preprocess(path)
output = encoder.predict(img)
path = cwd+"\\static\\compressed"
np.save(path, output)
img = np.resize(image, (28, 28,1))
code = compressor.predict(img[None])[0]
path = cwd+"\\static\\intermediate.jpg"
plt.imsave(path,code.reshape([code.shape[-1]//2,-1]))
return render_template('compress.html',filename = f.filename)
def postprocess(img):
image = np.reshape(img, (28, 28, 1))
image = image * 255
return image
@app.route('/npyuploader',methods=['GET','POST'])
def npy_upload():
if request.method=='POST':
print(original_filename)
f = request.files['npy']
f.save(os.path.join(UPLOADS_PATH,f.filename))
path = cwd+"\\static\\"+f.filename
compressed_img = np.load(path)
decompressed_img = decoder.predict(compressed_img)
decompressed_img = postprocess(decompressed_img)
path = cwd+"\\static\\decompressed.jpg"
cv2.imwrite(path, decompressed_img)
return render_template('decompress.html',filename = original_filename)
if __name__ == '__main__':
app.run() | Roboramv2/Image-compression | 1_autoencoder/flask/app.py | app.py | py | 2,824 | python | en | code | 4 | github-code | 36 | [
{
"api_name": "flask.Flask",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "os.getcwd",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "os.path.dirname",
"line_number... |
3289841382 | import copy
import abc
import logging
import weakref
import math
from collections import defaultdict
try:
from collections import OrderedDict
except ImportError: #pragma:nocover
from ordereddict import OrderedDict
from pyomo.core.kernel.component_interface import \
(IActiveObject,
ICategorizedObject,
IComponent,
IComponentContainer,
_ActiveComponentContainerMixin)
from pyomo.core.kernel.component_objective import IObjective
from pyomo.core.kernel.component_variable import IVariable, variable
from pyomo.core.kernel.component_constraint import IConstraint
from pyomo.core.kernel.component_dict import ComponentDict
from pyomo.core.kernel.component_tuple import ComponentTuple
from pyomo.core.kernel.component_list import ComponentList
from pyomo.core.kernel.component_map import ComponentMap
from pyomo.core.kernel.component_suffix import import_suffix_generator
from pyomo.core.kernel.symbol_map import SymbolMap
import pyomo.opt
import six
from six import itervalues, iteritems
logger = logging.getLogger('pyomo.core')
_no_ctype = object()
# used frequently in this file,
# so I'm caching it here
_active_flag_name = "active"
class IBlockStorage(IComponent,
IComponentContainer,
_ActiveComponentContainerMixin):
"""A container that stores multiple types.
This class is abstract, but it partially implements the
:class:`ICategorizedObject` interface by defining the
following attributes:
Attributes:
_is_component: :const:`True`
_is_container: :const:`True`
"""
_is_component = True
_is_container = True
_child_storage_delimiter_string = "."
_child_storage_entry_string = "%s"
__slots__ = ()
#
# These methods are already declared abstract on
# IComponentContainer, but we redeclare them here to
# point out that they can accept a ctype
#
@abc.abstractmethod
def children(self, *args, **kwds):
raise NotImplementedError #pragma:nocover
@abc.abstractmethod
def components(self, *args, **kwds):
raise NotImplementedError #pragma:nocover
#
# Interface
#
def clone(self):
"""
Clones this block. Returns a new block with whose
parent pointer is set to :const:`None`. Any
components encountered that are descendents of this
block will be deepcopied, otherwise a reference to
the original component is retained.
"""
save_parent, self._parent = self._parent, None
try:
new_block = copy.deepcopy(
self, {
'__block_scope__': {id(self): True, id(None): False},
'__paranoid__': False,
})
except:
new_block = copy.deepcopy(
self, {
'__block_scope__': {id(self): True, id(None): False},
'__paranoid__': True,
})
finally:
self._parent = save_parent
return new_block
@abc.abstractmethod
def blocks(self, *args, **kwds):
raise NotImplementedError #pragma:nocover
@abc.abstractmethod
def collect_ctypes(self, *args, **kwds):
raise NotImplementedError #pragma:nocover
class _block_base(object):
"""
A base class shared by :class:`block` and
:class:`tiny_block` that implements a few
:class:`IBlockStorage` abstract methods.
"""
__slots__ = ()
# Blocks do not change their active status
# based on changes in status of their children
def _increment_active(self):
pass
def _decrement_active(self):
pass
def activate(self,
shallow=True,
descend_into=False,
_from_parent_=False):
"""Activates this block.
Args:
shallow (bool): If :const:`False`, all children
of the block will be activated. By default,
the active status of children are not
changed.
descend_into (bool): Indicates whether or not to
perform the same action on sub-blocks. The
default is :const:`False`, as a shallow
operation on the top-level block is
sufficient.
"""
block_ctype = self.ctype
if (not self.active) and \
(not _from_parent_):
# inform the parent
parent = self.parent
if parent is not None:
parent._increment_active()
self._active = True
if not shallow:
for child in self.children():
if isinstance(child, IActiveObject):
child.activate(_from_parent_=True)
if descend_into:
for obj in self.components(ctype=block_ctype):
obj.activate(shallow=shallow,
descend_into=False,
_from_parent_=True)
def deactivate(self,
shallow=True,
descend_into=False,
_from_parent_=False):
"""Deactivates this block.
Args:
shallow (bool): If :const:`False`, all children
of the block will be deactivated. By
default, the active status of children are
not changed, but they become effectively
inactive for anything above this block.
descend_into (bool): Indicates whether or not to
perform the same action on sub-blocks. The
default is :const:`False`, as a shallow
operation on the top-level block is
sufficient.
"""
block_ctype = self.ctype
if self.active and \
(not _from_parent_):
# inform the parent
parent = self.parent
if parent is not None:
parent._decrement_active()
self._active = False
if not shallow:
for child in self.children():
if isinstance(child, IActiveObject):
child.deactivate(_from_parent_=True)
if descend_into:
for obj in self.components(ctype=block_ctype):
obj.deactivate(shallow=shallow,
descend_into=False,
_from_parent_=True)
def child(self, key):
"""Get the child object associated with a given
storage key for this container.
Raises:
KeyError: if the argument is not a storage key
for any children of this container
"""
try:
return getattr(self, key)
except AttributeError:
raise KeyError(str(key))
def preorder_traversal(self,
ctype=_no_ctype,
active=None,
include_all_parents=True,
return_key=False,
root_key=None):
"""
Generates a preorder traversal of the storage
tree. This includes all components and all component
containers (optionally) matching the requested type.
Args:
ctype: Indicate the type of components to
include. The default value indicates that
all types should be included.
active (:const:`True`/:const:`None`): Set to
:const:`True` to indicate that only active
objects should be included. The default
value of :const:`None` indicates that all
components (including those that have been
deactivated) should be included. *Note*:
This flag is ignored for any objects that do
not have an active flag.
include_all_parents (bool): Indicates if all
parent containers (such as blocks and simple
block containers) should be included in the
traversal even when the :attr:`ctype`
keyword is set to something that is not
Block. Default is :const:`True`.
return_key (bool): Set to :const:`True` to
indicate that the return type should be a
2-tuple consisting of the local storage key
of the object within its parent and the
object itself. By default, only the objects
are returned.
root_key: The key to return with this object.
Ignored when :attr:`return_key` is
:const:`False`.
Returns:
iterator of objects or (key,object) tuples
"""
assert active in (None, True)
block_ctype = self.ctype
# if this block is not active, then nothing below it
# can be active
if active and (not self.active):
return
if include_all_parents or \
(ctype is _no_ctype) or \
(ctype is block_ctype):
if return_key:
yield root_key, self
else:
yield self
for key, child in self.children(return_key=True):
# check for appropriate ctype
if (ctype is not _no_ctype) and \
(child.ctype is not ctype) and \
(child.ctype is not block_ctype):
continue
# check active status (if appropriate)
if (active is not None) and \
not getattr(child, _active_flag_name, True):
continue
if not child._is_container:
# not a container (thus, also not a block),
# so it is a leaf node
if return_key:
yield key, child
else:
yield child
elif not child._is_component:
# a container and not a component (thus, not a block)
if child.ctype is block_ctype:
# this is a simple container of blocks
# Note: we treat the simple block
# containers differently because we
# want to propagate the ctype filter
# beyond the simple container methods
# (which don't have a ctype keyword)
for obj_key, obj in child.preorder_traversal(
active=active,
return_key=True,
root_key=key):
if not obj._is_component:
# a container of blocks
if (ctype is _no_ctype) or \
(ctype is block_ctype) or \
include_all_parents:
if return_key:
yield obj_key, obj
else:
yield obj
else:
# a block
for item in obj.preorder_traversal(
ctype=ctype,
active=active,
include_all_parents=include_all_parents,
return_key=return_key,
root_key=obj_key):
yield item
else:
# a simple container, call its traversal method
for item in child.preorder_traversal(
active=active,
return_key=return_key,
root_key=key):
yield item
else:
# a block, call its traversal method
for item in child.preorder_traversal(
ctype=ctype,
active=active,
include_all_parents=include_all_parents,
return_key=return_key,
root_key=key):
yield item
def preorder_visit(self,
visit,
ctype=_no_ctype,
active=None,
include_all_parents=True,
include_key=False,
root_key=None):
"""
Visits each node in the storage tree using a
preorder traversal. This includes all components and
all component containers (optionally) matching the
requested type.
Args:
visit: A function that is called on each node in
the storage tree. When the
:attr:`include_key` keyword is
:const:`False`, the function signature
should be `visit(node) -> [True|False]`.
When the :attr:`include_key` keyword is
:const:`True`, the function signature should
be `visit(key,node) -> [True|False]`. When
the return value of the function evaluates
to to :const:`True`, this indicates that the
traversal should continue with the children
of the current node; otherwise, the
traversal does not go below the current
node.
ctype: Indicate the type of components to
include. The default value indicates that
all types should be included.
active (:const:`True`/:const:`None`): Set to
:const:`True` to indicate that only active
objects should be included. The default
value of :const:`None` indicates that all
components (including those that have been
deactivated) should be included. *Note*:
This flag is ignored for any objects that do
not have an active flag.
include_all_parents (bool): Indicates if all
parent containers (such as blocks and simple
block containers) should be included in the
traversal even when the :attr:`ctype`
keyword is set to something that is not
Block. Default is :const:`True`.
include_key (bool): Set to :const:`True` to
indicate that 2 arguments should be passed
to the visit function, with the first being
the local storage key of the object within
its parent and the second being the object
itself. By default, only the objects are
passed to the function.
root_key: The key to pass with this object.
Ignored when :attr:`include_key` is
:const:`False`.
"""
assert active in (None, True)
block_ctype = self.ctype
# if this block is not active, then nothing below it
# can be active
if active and (not self.active):
return
go = True
if include_all_parents or \
(ctype is _no_ctype) or \
(ctype is block_ctype):
if include_key:
go = visit(root_key, self)
else:
go = visit(self)
if not go:
return
for key, child in self.children(return_key=True):
# check for appropriate ctype
if (ctype is not _no_ctype) and \
(child.ctype is not ctype) and \
(child.ctype is not block_ctype):
continue
# check active status (if appropriate)
if (active is not None) and \
not getattr(child, _active_flag_name, True):
continue
if not child._is_container:
# not a container (thus, also not a block),
# so it is a leaf node
if include_key:
visit(key, child)
else:
visit(child)
elif not child._is_component:
# a container and not a component (thus, not a block)
if child.ctype is block_ctype:
# this is a simple container of blocks
# Note: we treat the simple block
# containers differently because we
# want to propagate the ctype filter
# beyond the simple container methods
# (which don't have a ctype keyword)
stack = [(key,child)]
while len(stack):
obj_key, obj = stack.pop()
# check active status (if appropriate)
if (active is not None) and \
not getattr(obj, _active_flag_name, True):
continue
if not obj._is_component:
# a simple container of blocks
go = True
if (ctype is _no_ctype) or \
(ctype is block_ctype) or \
include_all_parents:
if include_key:
go = visit(obj_key, obj)
else:
go = visit(obj)
if go:
stack.extend(
obj.children(return_key=True))
else:
# a block
obj.preorder_visit(
visit,
ctype=ctype,
active=active,
include_all_parents=include_all_parents,
include_key=include_key,
root_key=obj_key)
else:
# a simple container, call its visit method
child.preorder_visit(
visit,
active=active,
include_key=include_key,
root_key=key)
else:
# a block, call its visit method
child.preorder_visit(
visit,
ctype=ctype,
active=active,
include_all_parents=include_all_parents,
include_key=include_key,
root_key=key)
def postorder_traversal(self,
ctype=_no_ctype,
active=None,
include_all_parents=True,
return_key=False,
root_key=None):
"""
Generates a postorder traversal of the storage
tree. This includes all components and all component
containers (optionally) matching the requested type.
Args:
ctype: Indicate the type of components to
include. The default value indicates that
all types should be included.
active (:const:`True`/:const:`None`): Set to
:const:`True` to indicate that only active
objects should be included. The default
value of :const:`None` indicates that all
components (including those that have been
deactivated) should be included. *Note*:
This flag is ignored for any objects that do
not have an active flag.
include_all_parents (bool): Indicates if all
parent containers (such as blocks and simple
block containers) should be included in the
traversal even when the :attr:`ctype`
keyword is set to something that is not
Block. Default is :const:`True`.
return_key (bool): Set to :const:`True` to
indicate that the return type should be a
2-tuple consisting of the local storage key
of the object within its parent and the
object itself. By default, only the objects
are returned.
root_key: The key to return with this object.
Ignored when :attr:`return_key` is
:const:`False`.
Returns:
iterator of objects or (key,object) tuples
"""
assert active in (None, True)
block_ctype = self.ctype
# if this block is not active, then nothing below it
# can be active
if active and (not self.active):
return
for key, child in self.children(return_key=True):
# check for appropriate ctype
if (ctype is not _no_ctype) and \
(child.ctype is not ctype) and \
(child.ctype is not block_ctype):
continue
# check active status (if appropriate)
if (active is not None) and \
not getattr(child, _active_flag_name, True):
continue
if not child._is_container:
# not a container (thus, also not a block),
# so it is a leaf node
if return_key:
yield key, child
else:
yield child
elif not child._is_component:
# a container and not a component (thus, not a block)
if child.ctype is block_ctype:
# this is a simple container of blocks
# Note: we treat the simple block
# containers differently because we
# want to propagate the ctype filter
# beyond the simple container methods
# (which don't have a ctype keyword)
for obj_key, obj in child.postorder_traversal(
active=active,
return_key=True,
root_key=key):
if not obj._is_component:
# a container of blocks
if (ctype is _no_ctype) or \
(ctype is block_ctype) or \
include_all_parents:
if return_key:
yield obj_key, obj
else:
yield obj
else:
# a block
for item in obj.postorder_traversal(
ctype=ctype,
active=active,
include_all_parents=include_all_parents,
return_key=return_key,
root_key=obj_key):
yield item
else:
# a simple container, call its traversal method
for item in child.postorder_traversal(
active=active,
return_key=return_key,
root_key=key):
yield item
else:
# a block, call its traversal method
for item in child.postorder_traversal(
ctype=ctype,
active=active,
include_all_parents=include_all_parents,
return_key=return_key,
root_key=key):
yield item
if include_all_parents or \
(ctype is _no_ctype) or \
(ctype is block_ctype):
if return_key:
yield root_key, self
else:
yield self
def components(self,
ctype=_no_ctype,
active=None,
return_key=False,
descend_into=True):
"""
Generates an efficient traversal of all components
stored under this block. Components are leaf nodes
in a storage tree (not containers themselves, except
for blocks).
Args:
ctype: Indicate the type of components to
include. The default value indicates that
all types should be included.
active (:const:`True`/:const:`None`): Set to
:const:`True` to indicate that only active
objects should be included. The default
value of :const:`None` indicates that all
components (including those that have been
deactivated) should be included. *Note*:
This flag is ignored for any objects that do
not have an active flag.
return_key (bool): Set to :const:`True` to
indicate that the return type should be a
2-tuple consisting of the local storage key
of the object within its parent and the
object itself. By default, only the objects
are returned.
descend_into (bool): Indicates whether or not to
include components on sub-blocks. Default is
:const:`True`.
Returns:
iterator of objects or (key,object) tuples
"""
assert active in (None, True)
block_ctype = self.ctype
# if this block is not active, then nothing below it
# can be active
if active and (not self.active):
return
# Generate components from immediate children first
for child_key, child in self.children(ctype=ctype, return_key=True):
# check active status (if appropriate)
if (active is not None) and \
not getattr(child, _active_flag_name, True):
continue
if child._is_component:
# child is a component (includes blocks), so yield it
if return_key:
yield child_key, child
else:
yield child
else:
assert child._is_container
# child is a container (but not a block)
if (active is not None) and \
isinstance(child, _ActiveComponentContainerMixin):
for component_key, component in child.components(return_key=True):
if getattr(component,
_active_flag_name,
True):
if return_key:
yield component_key, component
else:
yield component
else:
for item in child.components(return_key=return_key):
yield item
if descend_into:
# now recurse into subblocks
for child in self.children(ctype=block_ctype):
# check active status (if appropriate)
if (active is not None) and \
not getattr(child, _active_flag_name, True):
continue
if child._is_component:
# child is a block
for item in child.components(
ctype=ctype,
active=active,
return_key=return_key,
descend_into=descend_into):
yield item
else:
# child is a container of blocks,
# but not a block itself
for _comp in child.components():
if (active is None) or \
getattr(_comp,
_active_flag_name,
True):
for item in _comp.components(
ctype=ctype,
active=active,
return_key=return_key,
descend_into=descend_into):
yield item
def blocks(self,
active=None,
descend_into=True):
"""
Generates a traversal of all blocks associated with
this one (including itself). This method yields
identical behavior to calling the components()
method with ctype=Block, except that this block is
included (as the first item in the generator).
"""
assert active in (None, True)
block_ctype = self.ctype
# if this block is not active, then nothing below it
# can be active
if active and (not self.active):
return
yield self
for component in self.components(ctype=block_ctype,
active=active,
descend_into=descend_into):
yield component
def generate_names(self,
ctype=_no_ctype,
active=None,
descend_into=True,
convert=str,
prefix=""):
"""
Generate a container of fully qualified names (up to
this block) for objects stored under this block.
This function is useful in situations where names
are used often, but they do not need to be
dynamically regenerated each time.
Args:
ctype: Indicate the type of components to
include. The default value indicates that
all types should be included.
active (:const:`True`/:const:`None`): Set to
:const:`True` to indicate that only active
components should be included. The default
value of :const:`None` indicates that all
components (including those that have been
deactivated) should be included. *Note*:
This flag is ignored for any objects that do
not have an active flag.
descend_into (bool): Indicates whether or not to
include components on sub-blocks. Default is
:const:`True`.
convert (function): A function that converts a
storage key into a string
representation. Default is str.
prefix (str): A string to prefix names with.
Returns:
A component map that behaves as a dictionary
mapping component objects to names.
"""
assert active in (None, True)
names = ComponentMap()
# if this block is not active, then nothing below it
# can be active
if active and (not self.active):
return names
if descend_into:
traversal = self.preorder_traversal(ctype=ctype,
active=active,
include_all_parents=True,
return_key=True)
# skip the root (this block)
six.next(traversal)
else:
traversal = self.children(ctype=ctype,
return_key=True)
for key, obj in traversal:
parent = obj.parent
name = parent._child_storage_entry_string % convert(key)
if parent is not self:
names[obj] = (names[parent] +
parent._child_storage_delimiter_string +
name)
else:
names[obj] = prefix + name
return names
def write(self,
filename,
format=None,
_solver_capability=None,
_called_by_solver=False,
**kwds):
"""
Write the model to a file, with a given format.
Args:
filename (str): The name of the file to write.
format: The file format to use. If this is not
specified, the file format will be inferred
from the filename suffix.
**kwds: Additional keyword options passed to the
model writer.
Returns:
a :class:`SymbolMap`
"""
#
# Guess the format if none is specified
#
if format is None:
format = pyomo.opt.base.guess_format(filename)
problem_writer = pyomo.opt.WriterFactory(format)
# TODO: I have no idea how to properly check if the
# WriterFactory lookup failed. When it does
# fail, it seems to return something of type:
# 'pyutilib.component.core.core.PluginFactoryFunctor'
# which is not a class in the global namespace
# of that module. So for now, I am simply
# checking for a few methods that exist on this
# strange class.
if (problem_writer is None) or \
(hasattr(problem_writer, 'get_class') and \
hasattr(problem_writer, 'services')):
raise ValueError(
"Cannot write model in format '%s': no model "
"writer registered for that format"
% str(format))
if _solver_capability is None:
_solver_capability = lambda x: True
(filename_, smap) = problem_writer(self,
filename,
_solver_capability,
kwds)
assert filename_ == filename
if _called_by_solver:
# BIG HACK
smap_id = id(smap)
if not hasattr(self, "._symbol_maps"):
setattr(self, "._symbol_maps", {})
getattr(self, "._symbol_maps")[smap_id] = smap
return smap_id
else:
return smap
def _flag_vars_as_stale(self):
from pyomo.core.kernel.component_variable import variable
for var in self.components(variable.ctype,
active=True):
var.stale = True
def load_solution(self,
solution,
allow_consistent_values_for_fixed_vars=False,
comparison_tolerance_for_fixed_vars=1e-5):
"""
Load a solution.
Args:
solution: A :class:`pyomo.opt.Solution` object with a
symbol map. Optionally, the solution can be tagged
with a default variable value (e.g., 0) that will be
applied to those variables in the symbol map that do
not have a value in the solution.
allow_consistent_values_for_fixed_vars:
Indicates whether a solution can specify
consistent values for variables that are
fixed.
comparison_tolerance_for_fixed_vars: The
tolerance used to define whether or not a
value in the solution is consistent with the
value of a fixed variable.
"""
symbol_map = solution.symbol_map
default_variable_value = getattr(solution,
"default_variable_value",
None)
# Generate the list of active import suffixes on
# this top level model
valid_import_suffixes = \
dict(import_suffix_generator(self,
active=True,
return_key=True))
# To ensure that import suffix data gets properly
# overwritten (e.g., the case where nonzero dual
# values exist on the suffix and but only sparse
# dual values exist in the results object) we clear
# all active import suffixes.
for suffix in itervalues(valid_import_suffixes):
suffix.clear()
# Load problem (model) level suffixes. These would
# only come from ampl interfaced solution suffixes
# at this point in time.
for _attr_key, attr_value in iteritems(solution.problem):
attr_key = _attr_key[0].lower() + _attr_key[1:]
if attr_key in valid_import_suffixes:
valid_import_suffixes[attr_key][self] = attr_value
#
# Load variable data
#
self._flag_vars_as_stale()
var_skip_attrs = ['id','canonical_label']
seen_var_ids = set()
for label, entry in iteritems(solution.variable):
var = symbol_map.getObject(label)
if (var is None) or \
(var is SymbolMap.UnknownSymbol):
# NOTE: the following is a hack, to handle
# the ONE_VAR_CONSTANT variable that is
# necessary for the objective
# constant-offset terms. probably should
# create a dummy variable in the model
# map at the same time the objective
# expression is being constructed.
if "ONE_VAR_CONST" in label:
continue
else:
raise KeyError("Variable associated with symbol '%s' "
"is not found on this block"
% (label))
seen_var_ids.add(id(var))
if (not allow_consistent_values_for_fixed_vars) and \
var.fixed:
raise ValueError("Variable '%s' is currently fixed. "
"A new value is not expected "
"in solution" % (var.name))
for _attr_key, attr_value in iteritems(entry):
attr_key = _attr_key[0].lower() + _attr_key[1:]
if attr_key == 'value':
if allow_consistent_values_for_fixed_vars and \
var.fixed and \
(math.fabs(attr_value - var.value) > \
comparison_tolerance_for_fixed_vars):
raise ValueError(
"Variable %s is currently fixed. "
"A value of '%s' in solution is "
"not within tolerance=%s of the current "
"value of '%s'"
% (var.name, attr_value,
comparison_tolerance_for_fixed_vars,
var.value))
var.value = attr_value
var.stale = False
elif attr_key in valid_import_suffixes:
valid_import_suffixes[attr_key][var] = attr_value
# start to build up the set of unseen variable ids
unseen_var_ids = set(symbol_map.byObject.keys())
# at this point it contains ids for non-variable types
unseen_var_ids.difference_update(seen_var_ids)
#
# Load objective solution (should simply be suffixes if
# they exist)
#
objective_skip_attrs = ['id','canonical_label','value']
for label,entry in iteritems(solution.objective):
obj = symbol_map.getObject(label)
if (obj is None) or \
(obj is SymbolMap.UnknownSymbol):
raise KeyError("Objective associated with symbol '%s' "
"is not found on this block"
% (label))
unseen_var_ids.remove(id(obj))
for _attr_key, attr_value in iteritems(entry):
attr_key = _attr_key[0].lower() + _attr_key[1:]
if attr_key in valid_import_suffixes:
valid_import_suffixes[attr_key][obj] = \
attr_value
#
# Load constraint solution
#
con_skip_attrs = ['id', 'canonical_label']
for label, entry in iteritems(solution.constraint):
con = symbol_map.getObject(label)
if con is SymbolMap.UnknownSymbol:
#
# This is a hack - see above.
#
if "ONE_VAR_CONST" in label:
continue
else:
raise KeyError("Constraint associated with symbol '%s' "
"is not found on this block"
% (label))
unseen_var_ids.remove(id(con))
for _attr_key, attr_value in iteritems(entry):
attr_key = _attr_key[0].lower() + _attr_key[1:]
if attr_key in valid_import_suffixes:
valid_import_suffixes[attr_key][con] = \
attr_value
#
# Load sparse variable solution
#
if default_variable_value is not None:
for var_id in unseen_var_ids:
var = symbol_map.getObject(symbol_map.byObject[var_id])
if var.ctype is not variable.ctype:
continue
if (not allow_consistent_values_for_fixed_vars) and \
var.fixed:
raise ValueError("Variable '%s' is currently fixed. "
"A new value is not expected "
"in solution" % (var.name))
if allow_consistent_values_for_fixed_vars and \
var.fixed and \
(math.fabs(default_variable_value - var.value) > \
comparison_tolerance_for_fixed_vars):
raise ValueError(
"Variable %s is currently fixed. "
"A value of '%s' in solution is "
"not within tolerance=%s of the current "
"value of '%s'"
% (var.name, default_variable_value,
comparison_tolerance_for_fixed_vars,
var.value))
var.value = default_variable_value
var.stale = False
class block(_block_base, IBlockStorage):
"""An implementation of the :class:`IBlockStorage` interface."""
# To avoid a circular import, for the time being, this
# property will be set externally
_ctype = None
def __init__(self):
self._parent = None
self._active = True
# This implementation is quite piggish at the
# moment. It can probably be streamlined by doing
# something similar to what _BlockData does in
# block.py (e.g., using _ctypes, _decl, and
# _decl_order). However, considering that we now
# have other means of producing lightweight blocks
# (tiny_block) as well as the more lightweight
# implementation of singleton types, it is hard to
# justify making this implementation harder to
# follow until we do some more concrete profiling.
self._byctype = defaultdict(OrderedDict)
self._order = OrderedDict()
#
# Define the IComponentContainer abstract methods
#
# overridden by the IBlockStorage interface
#def components(self):
# pass
def child_key(self, child):
"""Get the lookup key associated with a child of
this container.
Raises:
ValueError: if the argument is not a child of
this container
"""
if getattr(child, "parent", None) is self:
if child.ctype in self._byctype:
for key, val in iteritems(self._byctype[child.ctype]):
if val is child:
return key
raise ValueError
#
# Define the IBlockStorage abstract methods
#
def children(self,
ctype=_no_ctype,
return_key=False):
"""Iterate over the children of this block.
Args:
ctype: Indicate the type of children to iterate
over. The default value indicates that all
types should be included.
return_key (bool): Set to :const:`True` to
indicate that the return type should be a
2-tuple consisting of the child storage key
and the child object. By default, only the
child objects are returned.
Returns:
iterator of objects or (key,object) tuples
"""
if return_key:
itermethod = iteritems
else:
itermethod = itervalues
if ctype is _no_ctype:
return itermethod(self._order)
else:
return itermethod(self._byctype.get(ctype,{}))
#
# Interface
#
def __setattr__(self, name, obj):
if hasattr(obj, '_is_categorized_object'):
if obj._parent is None:
if hasattr(self, name) and \
hasattr(getattr(self, name),
'_is_categorized_object'):
logger.warning(
"Implicitly replacing the categorized attribute "
"%s (type=%s) on block with a new object "
"(type=%s).\nThis is usually indicative of a "
"modeling error.\nTo avoid this warning, delete "
"the original object from the block before "
"assigning a new object with the same name."
% (name,
type(getattr(self, name)),
type(obj)))
delattr(self, name)
self._byctype[obj.ctype][name] = obj
self._order[name] = obj
obj._parent = weakref.ref(self)
# children that are not of type
# _ActiveComponentMixin retain the active status
# of their parent, which is why the default
# return value from getattr is False
if getattr(obj, _active_flag_name, False):
self._increment_active()
elif hasattr(self, name) and \
(getattr(self, name) is obj):
# a very special case that makes sense to handle
# because the implied order should be: (1) delete
# the object at the current index, (2) insert the
# the new object. This performs both without any
# actions, but it is an extremely rare case, so
# it should go last.
pass
else:
raise ValueError(
"Invalid assignment to %s type with name '%s' "
"at entry %s. A parent container has already "
"been assigned to the object being inserted: %s"
% (self.__class__.__name__,
self.name,
name,
obj.parent.name))
super(block, self).__setattr__(name, obj)
def __delattr__(self, name):
obj = getattr(self, name)
if hasattr(obj, '_is_categorized_object'):
del self._order[name]
del self._byctype[obj.ctype][name]
if len(self._byctype[obj.ctype]) == 0:
del self._byctype[obj.ctype]
obj._parent = None
# children that are not of type
# IActiveObject retain the active status
# of their parent, which is why the default
# return value from getattr is False
if getattr(obj, _active_flag_name, False):
self._decrement_active()
super(block, self).__delattr__(name)
def collect_ctypes(self,
active=None,
descend_into=True):
"""
Count all object category types stored on or under
this block.
Args:
active (:const:`True`/:const:`None`): Set to
:const:`True` to indicate that only active
categorized objects should be counted. The
default value of :const:`None` indicates
that all categorized objects (including
those that have been deactivated) should be
counted. *Note*: This flag is ignored for
any objects that do not have an active flag.
descend_into (bool): Indicates whether or not
category types should be counted on
sub-blocks. Default is :const:`True`.
Returns:
set of category types
"""
assert active in (None, True)
ctypes = set()
if not descend_into:
if active is None:
ctypes.update(ctype for ctype in self._byctype)
else:
assert active is True
for ctype in self._byctype:
for component in self.components(
ctype=ctype,
active=True,
descend_into=False):
ctypes.add(ctype)
# just need 1 to appear in order to
# count the ctype
break
else:
for blk in self.blocks(active=active,
descend_into=True):
ctypes.update(blk.collect_ctypes(
active=active,
descend_into=False))
return ctypes
class tiny_block(_block_base, IBlockStorage):
"""
A memory efficient block for storing a small number
of child components.
"""
# To avoid a circular import, for the time being, this
# property will be set externally
_ctype = None
def __init__(self):
self._parent = None
self._active = True
self._order = []
def __setattr__(self, name, obj):
if hasattr(obj, '_is_categorized_object'):
if obj._parent is None:
if hasattr(self, name) and \
hasattr(getattr(self,name),
'_is_categorized_object'):
logger.warning(
"Implicitly replacing the categorized attribute "
"%s (type=%s) on block with a new object "
"(type=%s).\nThis is usually indicative of a "
"modeling error.\nTo avoid this warning, delete "
"the original object from the block before "
"assigning a new object with the same name."
% (name,
type(getattr(self, name)),
type(obj)))
delattr(self, name)
obj._parent = weakref.ref(self)
self._order.append(name)
# children that are not of type
# IActiveObject retain the active status
# of their parent, which is why the default
# return value from getattr is False
if getattr(obj, _active_flag_name, False):
self._increment_active()
elif hasattr(self, name) and \
(getattr(self, name) is obj):
# a very special case that makes sense to handle
# because the implied order should be: (1) delete
# the object at the current index, (2) insert the
# the new object. This performs both without any
# actions, but it is an extremely rare case, so
# it should go last.
pass
else:
raise ValueError(
"Invalid assignment to %s type with name '%s' "
"at entry %s. A parent container has already "
"been assigned to the object being inserted: %s"
% (self.__class__.__name__,
self.name,
name,
obj.parent.name))
super(tiny_block, self).__setattr__(name, obj)
def __delattr__(self, name):
obj = getattr(self, name)
if hasattr(obj, '_is_categorized_object'):
obj._parent = None
for ndx, key in enumerate(self._order):
if getattr(self, key) is obj:
break
else: #pragma:nocover
# shouldn't happen
assert False
del self._order[ndx]
# children that are not of type
# IActiveObject retain the active status
# of their parent, which is why the default
# return value from getattr is False
if getattr(obj, _active_flag_name, False):
self._decrement_active()
super(tiny_block, self).__delattr__(name)
#
# Define the IComponentContainer abstract methods
#
# overridden by the IBlockStorage interface
#def components(...)
def child_key(self, child):
"""Get the lookup key associated with a child of
this container.
Raises:
ValueError: if the argument is not a child of
this container
"""
if getattr(child, "parent", None) is self:
for key in self._order:
if getattr(self, key) is child:
return key
raise ValueError
# overridden by the IBlockStorage interface
#def children(...)
#
# Define the IBlockStorage abstract methods
#
def children(self,
ctype=_no_ctype,
return_key=False):
"""Iterate over the children of this block.
Args:
ctype: Indicate the type of children to iterate
over. The default value indicates that all
types should be included.
return_key (bool): Set to :const:`True` to
indicate that the return type should be a
2-tuple consisting of the child storage key
and the child object. By default, only the
child objects are returned.
Returns:
iterator of objects or (key,object) tuples
"""
for key in self._order:
child = getattr(self, key)
if (ctype is _no_ctype) or (child.ctype == ctype):
if return_key:
yield key, child
else:
yield child
# implemented by _block_base
# def components(...)
# implemented by _block_base
# def blocks(...)
def collect_ctypes(self,
active=None,
descend_into=True):
"""
Count all object category types stored on or under
this block.
Args:
active (:const:`True`/:const:`None`): Set to
:const:`True` to indicate that only active
categorized objects should be counted. The
default value of :const:`None` indicates
that all categorized objects (including
those that have been deactivated) should be
counted. *Note*: This flag is ignored for
any objects that do not have an active flag.
descend_into (bool): Indicates whether or not
category types should be counted on
sub-blocks. Default is :const:`True`.
Returns:
set of category types
"""
assert active in (None, True)
ctypes = set()
if not descend_into:
for component in self.components(active=active,
descend_into=False):
ctypes.add(component.ctype)
else:
for blk in self.blocks(active=active,
descend_into=True):
ctypes.update(blk.collect_ctypes(
active=active,
descend_into=False))
return ctypes
class block_tuple(ComponentTuple,
_ActiveComponentContainerMixin):
"""A tuple-style container for blocks."""
# To avoid a circular import, for the time being, this
# property will be set externally
_ctype = None
__slots__ = ("_parent",
"_active",
"_data")
if six.PY3:
# This has to do with a bug in the abc module
# prior to python3. They forgot to define the base
# class using empty __slots__, so we shouldn't add a slot
# for __weakref__ because the base class has a __dict__.
__slots__ = list(__slots__) + ["__weakref__"]
def __init__(self, *args, **kwds):
self._parent = None
self._active = True
super(block_tuple, self).__init__(*args, **kwds)
class block_list(ComponentList,
_ActiveComponentContainerMixin):
"""A list-style container for blocks."""
# To avoid a circular import, for the time being, this
# property will be set externally
_ctype = None
__slots__ = ("_parent",
"_active",
"_data")
if six.PY3:
# This has to do with a bug in the abc module
# prior to python3. They forgot to define the base
# class using empty __slots__, so we shouldn't add a slot
# for __weakref__ because the base class has a __dict__.
__slots__ = list(__slots__) + ["__weakref__"]
def __init__(self, *args, **kwds):
self._parent = None
self._active = True
super(block_list, self).__init__(*args, **kwds)
class block_dict(ComponentDict,
_ActiveComponentContainerMixin):
"""A dict-style container for blocks."""
# To avoid a circular import, for the time being, this
# property will be set externally
_ctype = None
__slots__ = ("_parent",
"_active",
"_data")
if six.PY3:
# This has to do with a bug in the abc module
# prior to python3. They forgot to define the base
# class using empty __slots__, so we shouldn't add a slot
# for __weakref__ because the base class has a __dict__.
__slots__ = list(__slots__) + ["__weakref__"]
def __init__(self, *args, **kwds):
self._parent = None
self._active = True
super(block_dict, self).__init__(*args, **kwds)
| igorsowa9/vpp | venv/lib/python3.6/site-packages/pyomo/core/kernel/component_block.py | component_block.py | py | 59,127 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "pyomo.core.kernel.component_interface.IComponent",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "pyomo.core.kernel.component_interface.IComponentContainer",
"line_number":... |
75128039145 | import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import plotly.graph_objs as go
df=pd.read_csv(r'pax_all_agreements_data.csv',sep=',')
df['Dat_Y'],df['Dat_M'],df['Dat_D']=df['Dat'].str.split('-').str
fecha_inicio=min(df['Dat_Y'])
fecha_final=max(df['Dat_Y'])
df_grupo_region_fecha=df.groupby(['Dat_Y','Reg'],as_index=False).size().reset_index()
df_group_region_mid=df_grupo_region_fecha[df_grupo_region_fecha['Reg'].str.contains('Middle East')]
df_group_region_eur=df_grupo_region_fecha[df_grupo_region_fecha['Reg'].str.contains('Europe')]
df_group_region_eur.rename(columns={0:'a'},inplace=True)
df_group_region_eur.a=pd.to_numeric(df_group_region_eur.a)
print(df_group_region_eur.dtypes)
df_group_region_eur.plot.bar(x='a',y='Dat_Y')
df['Reg'].value_counts().plot(kind='bar')
df.Dat_Y=pd.to_numeric(df.Dat_Y)
print(df.dtypes)
ax=df['Dat_Y'].value_counts().sort_index().plot(kind='bar',figsize=(10,4))
ax=df['Dat_M'].value_counts().sort_index().plot(kind='bar')
regions_evolution = pd.crosstab(df['Dat_Y'],df['Reg'])
regions_evolution.plot(color=sns.color_palette('Set2',12),figsize=(18,8))
plt.show()
#plt.figure(figsize = (12,4))
#plt.subplot(121)
#sns.distplot(x=df.dat_Y, kde = False, bins = 25)
#plt.subplot(122)
#sns.distplot(df['Dat_m'], kde = False)
layout = dict(title_text='Mentions of Crime in Peace Treaties over time',
barmode = 'stack')
data = [
go.Bar(name='Corruption', x=df[df['Cor'] >= 1].groupby('Dat_Y').count()['Con'].index, y=df[df['Cor'] >= 1].groupby('Dat_Y').count()['Con'].values),
go.Bar(name='Terrorism', x=df[df['Terr'] >= 1].groupby('Dat_Y').count()['Con'].index, y=df[df['Terr'] >= 1].groupby('Dat_Y').count()['Con'].values),
go.Bar(name='Organised Crime', x=df[df['SsrCrOcr'] >= 1].groupby('Dat_Y').count()['Con'].index, y=df[df['SsrCrOcr'] >= 1].groupby('Dat_Y').count()['Con'].values),
go.Bar(name='Drugs', x=df[df['SsrDrugs'] >= 1].groupby('Dat_Y').count()['Con'].index, y=df[df['SsrDrugs'] >= 1].groupby('Dat_Y').count()['Con'].values),
]
fig = go.Figure(data=data, layout = layout)
fig.show()
"""
"""
df['InclRati']=df['GCh']+df['GDis']+df['GAge']+df['GMig']+df['GRa']+df['GRe']+df['GInd']+df['GOth']+df['GRef']+df['GSoc']
df[['Reg','InclRati']].groupby(['Reg','InclRati'],as_index=False).sum().plot(x='Reg',y='InclRati',rot=45, figsize=(10,4))
regions=list(df.Reg.unique())
plt.figure(figsize=(11, 6))
p3=plt.bar(regions,df[['Reg','SsrDrugs']].groupby(['Reg']).size()+df[['Reg','Terr']].groupby(['Reg']).size()+df[['Reg','Cor']].groupby(['Reg']).size(),color='magenta',edgecolor='black')
p1=plt.bar(regions,df[['Reg','SsrDrugs']].groupby(['Reg']).size()+df[['Reg','Terr']].groupby(['Reg']).size(),color='cyan',edgecolor='black')
p2=plt.bar(regions,df[['Reg','SsrDrugs']].groupby(['Reg']).size(),color='blue',edgecolor='black')
plt.xticks(rotation=90)
plt.legend(labels=['Corruption','Terrorism','Drugs'])
plt.show()
df.groupby('Reg').size().sort_values().plot(kind='bar')
df.groupby('Con').size().sort_values().tail(10).plot(kind='bar',rot=45)
| AlejandroUPC/data_vis_uoc | data_exploring.py | data_exploring.py | py | 3,063 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pandas.read_csv",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "pandas.to_numeric",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "pandas.to_numeric",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "pandas.crosstab",
... |
70569498345 | from fastapi import status, HTTPException, Depends, APIRouter
from sqlalchemy.orm import Session
from .. import models, schemas, utils, oauth2
from ..database import get_db
from sqlalchemy import func, case
router = APIRouter(
prefix="/users",
tags=['Users']
)
@router.post("/", status_code=status.HTTP_201_CREATED, response_model=schemas.UserOut)
async def create_user(user: schemas.UserBase, db: Session = Depends(get_db)):
# hash the password - user.password
hashed_password = utils.hash(user.password)
user.password = hashed_password
new_user = models.User(**user.dict())
db.add(new_user)
db.commit()
db.refresh(new_user)
return new_user
@router.get('/info', response_model=schemas.UserDisplay)
async def get_user_info(db: Session = Depends(get_db), current_user: int = Depends(oauth2.get_current_user)):
notes = db.query(models.Note, func.count(models.Vote.note_id).label("votes"), (func.count(case([(models.Vote.value == 1, 1)], else_=None))).label("likes"), (func.count(case([(models.Vote.value == -1, 1)], else_=None))).label("dislikes")).join(models.Vote, models.Vote.note_id == models.Note.id, isouter=True).group_by(models.Note.id).filter(models.Note.owner_id == current_user.id).all()
notes_display = [schemas.NotesDisplay(Note=note[0], votes=note[1], likes=note[2], dislikes=note[3]) for note in notes]
return schemas.UserDisplay(User=current_user,notes=notes_display)
@router.get("/basicinfo", response_model=schemas.UserOut)
async def get_basic_user_info(db: Session = Depends(get_db), current_user: int = Depends(oauth2.get_current_user)):
return current_user
@router.put("/email", response_model=schemas.UserOut)
async def update_email(email: str, db: Session = Depends(get_db), current_user: int = Depends(oauth2.get_current_user)):
user = db.query(models.User).filter(models.User.id == current_user.id).first()
if not user:
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND,
detail=f"User with id: {current_user.id} does not exist")
user.email = email
db.commit()
db.refresh(user)
return user
@router.get('/{id}', response_model=schemas.UserOut)
async def get_user(id: int, db: Session = Depends(get_db)):
user = db.query(models.User).filter(models.User.id == id).first()
if not user:
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND,
detail=f"User with id: {id} does not exist")
return user
| charl1ecloud/notebank-webapp | backend/app/routers/user.py | user.py | py | 2,505 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "fastapi.APIRouter",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.orm.Session",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "fastapi.Depends",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "database.get_... |
3799851771 | import os
import json
import requests
import logging
from urllib.parse import urlencode
from types import SimpleNamespace
MTYPES = dict(xbox=1, playstation=2, steam=3, blizzard=4, stadia=5, epic=6, bungie=254)
MLEVELS = dict(beginner=1, member=2, admin=3, actingfounder=4, founder=5) # Just like with Halo - Bungie never made a 4th.
class BungieEnumerations():
def __init__(self):
self.mtype = SimpleNamespace(**MTYPES)
self.mlevels = SimpleNamespace(**MLEVELS)
class BungieInterfaceError(Exception):
def __init__(self, status, description):
self.status = status
self.description = description
def __str__(self):
return f'BungieInterface received a {self.status}.'
class BungieInterface():
def __init__(self):
self.log = logging.getLogger(f'{self.__module__}.{self.__class__.__name__}')
self.web = os.getenv('BNET_ENDPOINT')
self.root = f'{self.web}/Platform'
self.key = os.getenv('BNET_API_KEY')
self.id = os.getenv('BNET_CLIENT_ID')
self.secret = os.getenv('BNET_CLIENT_SECRET')
self.enum = BungieEnumerations()
def _agent_(self):
agent_app = f"{os.getenv('APPLICATION_NAME')}/{os.getenv('APPLICATION_VERSION')}"
agent_bnet = f"{os.getenv('BNET_APP_NAME')}/{os.getenv('BNET_APP_ID')}"
agent_contact = f"{os.getenv('WEB_URL')};{os.getenv('CONTACT_EMAIL')}"
agent = f"{agent_app} {agent_bnet} (+{agent_contact})"
return agent
def _get_headers_(self):
"""Attach required API key for Bungie.net interaction."""
headers = {
'User-Agent': self._agent_(), # Bungie nicely asks for us to do this.
'X-API-Key': self.key
}
return headers
def _get_headers_with_token_(self, token):
"""Attach token to Bungie.net interaction to assume user responsibility."""
# Note that the token is passed into the function and not stored within the class.
# This is because we regularly have to rotate tokens or assume user identities.
headers = self._get_headers_()
headers = {
'User-Agent': self._agent_(), # Bungie nicely asks for us to do this.
'Authorization': f"Bearer {token}",
'X-API-Key': self.key
}
return headers
def _get_url_(self, *segments, root=None):
"""Build the API path."""
path = '/'.join(map(str, segments))
if not root:
root = self.root
url = f'{root}/{path}'
return url
def _execute_(self, method, url, headers=None, params=None, json=None, data=None):
"""Provide a `requests` method to execute."""
self.log.info(f'{method.__name__.upper()} -> {url}')
response = method(url, headers=headers, params=params, json=json, data=data)
if not response.ok:
try:
body = response.json()
except requests.exceptions.RequestException as e:
raise BungieInterfaceError('RequestException', str(e)) from e
raise BungieInterfaceError(body.get('ErrorStatus'), body.get('error_description'))
return response.json()
def _strip_outer_(self, body):
return body.get('Response')
def get_authorisation_url(self, state):
url = self._get_url_('en', 'OAuth', 'Authorize', root=self.web)
params = {
'client_id': self.id,
'response_type': 'code',
'state': state
}
url_with_qry = f'{url}?{urlencode(params)}'
return url_with_qry
def get_token(self, code):
url = self._get_url_('App', 'OAuth', 'Token')
headers = self._get_headers_()
data = {
'client_id': self.id,
'client_secret': self.secret,
'grant_type': 'authorization_code',
'code': code
}
response = self._execute_(requests.post, url, headers=headers, data=data)
return response
def refresh_token(self, refresh):
url = self._get_url_('App', 'OAuth', 'Token')
headers = self._get_headers_()
data = {
'client_id': self.id,
'client_secret': self.secret,
'grant_type': 'refresh_token',
'refresh_token': refresh
}
response = self._execute_(requests.post, url, headers=headers, data=data)
return response
def get_destiny_player(self, display_name, display_code, membership_type):
url = self._get_url_('Destiny2', 'SearchDestinyPlayerByBungieName', membership_type)
headers = self._get_headers_()
data = {
'displayName': display_name,
'displayNameCode': int(display_code)
}
response = self._execute_(requests.post, url, headers=headers, json=data)
content = next(iter(self._strip_outer_(response)), dict()) # Return first element of list or an empty structure.
return content
def find_destiny_player(self, display_name, display_code):
# Use to attempt to find a player based on their display name and code.
# There is no guarantee this player will be unique.
# We have to search all membership types.
membership_types = [
self.enum.mtype.steam,
self.enum.mtype.playstation,
self.enum.mtype.xbox,
self.enum.mtype.stadia
]
all_results = list()
final_results = list()
for membership_type in membership_types:
results = self.get_destiny_player(display_name, display_code, membership_type)
if results:
all_results.append(results)
# Check for cross-save membership type override.
cross_save = results.get('crossSaveOverride')
if not cross_save:
# Keep trying to find the player's information.
continue
elif cross_save == membership_type:
final_results.append(results)
return final_results
else:
results = self.get_destiny_player(display_name, display_code, cross_save)
final_results.append(results)
return final_results
return all_results
def get_linked_profiles(self, membership_type, membership_id):
url = self._get_url_('Destiny2', membership_type, 'Profile', membership_id, 'LinkedProfiles')
headers = self._get_headers_()
response = self._execute_(requests.get, url, headers=headers)
content = self._strip_outer_(response)
return content
def get_group_by_id(self, group_id):
url = self._get_url_('GroupV2', group_id)
headers = self._get_headers_()
response = self._execute_(requests.get, url, headers=headers)
detail = self._strip_outer_(response).get('detail')
return detail
def get_members_in_group(self, group_id):
url = self._get_url_('GroupV2', group_id, 'Members')
headers = self._get_headers_()
response = self._execute_(requests.get, url, headers=headers)
results = self._strip_outer_(response).get('results')
return results
def get_groups_for_user(self, membership_type, membership_id):
# Path parameters support filters(?) and group type respectively.
# Just hardcode these for now.
url = self._get_url_('GroupV2', 'User', membership_type, membership_id, 0, 1)
headers = self._get_headers_()
response = self._execute_(requests.get, url, headers=headers)
results = self._strip_outer_(response).get('results')
return results
def get_pending_in_group(self, token, group_id):
url = self._get_url_('GroupV2', group_id, 'Members', 'Pending')
headers = self._get_headers_with_token_(token)
response = self._execute_(requests.get, url, headers=headers)
results = self._strip_outer_(response).get('results')
return results
def get_invited_individuals(self, token, group_id):
url = self._get_url_('GroupV2', group_id, 'Members', 'InvitedIndividuals')
headers = self._get_headers_with_token_(token)
response = self._execute_(requests.get, url, headers=headers)
results = self._strip_outer_(response).get('results')
return results
def invite_user_to_group(self, token, group_id, membership_type, membership_id):
url = self._get_url_('GroupV2', group_id, 'Members', 'IndividualInvite', membership_type, membership_id)
headers = self._get_headers_with_token_(token)
# For some reason this expects a body, even if it's empty.
response = self._execute_(requests.post, url, headers=headers, json=dict())
content = self._strip_outer_(response)
return content
def cancel_invite_to_group(self, token, group_id, membership_type, membership_id):
url = self._get_url_('GroupV2', group_id, 'Members', 'IndividualInviteCancel', membership_type, membership_id)
headers = self._get_headers_with_token_(token)
response = self._execute_(requests.post, url, headers=headers)
content = self._strip_outer_(response)
return content
def accept_request_to_join_group(self, token, group_id, membership_type, membership_id):
url = self._get_url_('GroupV2', group_id, 'Members', 'Approve', membership_type, membership_id)
headers = self._get_headers_with_token_(token)
# For some reason this expects a body, even if it's empty.
response = self._execute_(requests.post, url, headers=headers, json=dict())
content = self._strip_outer_(response)
return content
def _deny_request_to_join_group_(self, token, group_id, membership_type, membership_id):
"""This function should not be called even though it is simpler. Bungie has not implemented this endpoint. We can dream."""
url = self._get_url_('GroupV2', group_id, 'Members', 'Deny', membership_type, membership_id)
headers = self._get_headers_with_token_(token)
# For some reason this expects a body, even if it's empty.
response = self._execute_(requests.post, url, headers=headers, json=dict())
content = self._strip_outer_(response)
return content
def deny_request_to_join_group(self, token, group_id, membership_type, membership_id):
# Bungie is bad so there is no single denial API endpoint.
# Instead, we must deny a single user from the bulk endpoint which requires different construction.
url = self._get_url_('GroupV2', group_id, 'Members', 'DenyList')
headers = self._get_headers_with_token_(token)
data = {
'memberships': [
{
'membershipId': membership_id,
'membershipType': membership_type
}
]
}
# Note that this response will probably return a successful HTTP status.
# However, the _individual_ entity response contained within will return an internal status code.
response = self._execute_(requests.post, url, headers=headers, json=data)
content = self._strip_outer_(response)
# Inspect first entity for result.
# This integer represents an internal value which can be any of over a hundred codes.
# For now, assume all responses other than "1" to represent failure.
if content[0].get('result') != 1:
raise BungieInterfaceError('RequestFailed', '')
return content
def kick_member_from_group(self, token, group_id, membership_type, membership_id):
url = self._get_url_('GroupV2', group_id, 'Members', membership_type, membership_id, 'Kick')
headers = self._get_headers_with_token_(token)
response = self._execute_(requests.post, url, headers=headers)
results = self._strip_outer_(response).get('results')
return results
def set_membership_level(self, token, group_id, membership_type, membership_id, membership_level):
url = self._get_url_('GroupV2', group_id, 'Members', membership_type, membership_id, 'SetMembershipType', membership_level)
headers = self._get_headers_with_token_(token)
response = self._execute_(requests.post, url, headers=headers)
content = self._strip_outer_(response)
return content | xant-tv/ecumene | src/bnet/client.py | client.py | py | 12,445 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "types.SimpleNamespace",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "types.SimpleNamespace",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "logging.getLogger",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "os.gete... |
1486053711 | #! /usr/bin/env python
import rospy
import actionlib
import behavior_common.msg
import time
from std_msgs.msg import Float64
from sensor_msgs.msg import JointState
from geometry_msgs.msg import Twist
from math import radians, degrees
import tf
import os, thread
# for talking
# import actionlib
import actionlib.action_client
import audio_and_speech_common.msg
from geometry_msgs.msg import PointStamped, Point, PoseStamped, Pose, Pose2D
from body_tracker_msgs.msg import BodyTracker
from enum import Enum
import tf
from playsound import playsound
# SHELDON ONLY
#from dynamixel_controllers.srv import TorqueEnable, SetServoTorqueLimit, SetSpeed
#from sheldon_servos.servo_joint_list import all_joints, head_joints, right_arm_joints
#from sheldon_servos.head_servo_publishers import *
#from sheldon_servos.standard_servo_positions import *
#from sheldon_servos.set_servo_speed import *
#from sheldon_servos.set_servo_torque import *
from tb2s_pantilt.set_servo_speed import *
#from sheldon_servos.set_servo_torque import *
# TB2S ONLY
head_pan_pub = rospy.Publisher('/head_pan_controller/command', Float64, queue_size=1)
head_tilt_pub = rospy.Publisher('/head_tilt_controller/command', Float64, queue_size=1)
move_pub = rospy.Publisher('/cmd_vel_mux/behavior', Twist, queue_size=5)
class BehaviorAction(object):
def __init__(self, name):
self._action_name = name
rospy.loginfo('%s: Initializing Python behavior service' % (self._action_name))
self._as = actionlib.SimpleActionServer(self._action_name,
behavior_common.msg.behaviorAction, execute_cb=self.execute_cb, auto_start = False)
self._as.start()
# Behavior Settings
# TODO - How to get these parameters passed from launch file to here?
# for now, just set here.
self.enable_body_tracking = True # rospy.get_param('~enable_body_tracking', True)
# rospy.loginfo('%s: PARAM: enable_body_tracking = %d'.(self._action_name),
# self.enable_body_tracking)
self.enable_random_head_movement = False # rospy.get_param('~enable_random_head_movement', True)
# rospy.loginfo('%s: PARAM: enable_random_head_movement = %d'.(self._action_name),
# self.enable_random_head_movement)
self.camera_link = 'camera_link' # rospy.get_param('~camera_link', 'camera_link')
self.head_pan_joint = rospy.get_param('~head_pan_joint', 'head_pan_joint')
self.head_tilt_joint = rospy.get_param('~head_tilt_joint', 'head_tilt_joint')
self.resource_dir = rospy.get_param('resource_dir',
"/home/system/catkin_robot/src/tb2s/tb2s_behaviors/follow_behavior/scripts/resources/")
self.ding_path = os.path.join(self.resource_dir, "ding.wav")
rospy.loginfo("DBG: DING PATH: %s", self.ding_path)
# constants
self.MAX_PAN = 1.5708 # 90 degrees
self.MAX_TILT = 0.60 # Limit vertical to assure good tracking
self.DEADBAND_ANGLE = 0.0872665 # 5 deg deadband in middle to prevent osc
self.FIRST_TARGET_TIMEOUT_SECONDS = 5.0
self.TRACKING_TIMEOUT_SECONDS = 3.0
self.GESTURE_TIMEOUT_SECONDS = 10.0
self.DEFAULT_TILT_ANGLE = -0.35 # tilt head up slightly to find people more easily
# Timeout timers
self.first_target_timer = 0
self.tracking_timer = 0
self.gesture_timer = 0
self.joint_state = JointState() # for reading servo positions
#self.astra_target = list()
self.id_to_track = 0 # 0 = not tracking anyone
# Initialize State Machine
self.TrackingState = Enum('TrackingState',
'WAITING_FOR_FIRST_ID TRACKING WAITING_FOR_GESTURE')
self.tracking_state = self.TrackingState.WAITING_FOR_FIRST_ID
rospy.loginfo("%s: init complete", self._action_name)
playsound(self.ding_path)
#------------------------------------------------------------------------
def Clamp(self, value, max_value): # clamp between pos and neg max_value
return max(min(value, max_value), -max_value)
def MoveRobot(self, tracking_angle, tracking_distance):
rospy.loginfo("%s: MoveRobot: Angle = %f, Distance = %f",
self._action_name, tracking_angle, tracking_distance)
TURN_MULTIPLIER = 3.0
FORWARD_ACCELERATION_MULTIPLIER = 0.2
TURN_DEADBAND = 0.01
FOWARD_DEADBAND = 1.2 #1.6
BACKWARD_DEADBAND = 0.9 # 1.3
BACKWARD_ACCELERATION_MULTIPLIER = 0.1
speed = 0.0
turn = 0.0
# Set Turn
turn = tracking_angle * TURN_MULTIPLIER;
# Set Speed
if tracking_distance > FOWARD_DEADBAND:
speed = FORWARD_ACCELERATION_MULTIPLIER * (tracking_distance * tracking_distance);
elif (tracking_distance < BACKWARD_DEADBAND) and tracking_distance > 0:
speed = -BACKWARD_ACCELERATION_MULTIPLIER * (1 / (tracking_distance * tracking_distance));
twist = Twist()
twist.linear.x = speed; twist.linear.y = 0; twist.linear.z = 0
twist.angular.x = 0; twist.angular.y = 0; twist.angular.z = turn
move_pub.publish(twist)
def StopRobot(self):
twist = Twist()
twist.linear.x = 0; twist.linear.y = 0; twist.linear.z = 0
twist.angular.x = 0; twist.angular.y = 0; twist.angular.z = 0
move_pub.publish(twist)
#------------------------------------------------------------------------
# Callbacks
def joint_state_cb(self, msg):
#rospy.loginfo("%s: joint_state_cb called", self._action_name)
try:
test = msg.name.index(self.head_pan_joint)
self.joint_state = msg
except:
return
# Get the current servo pan and tilt position
try:
current_pan = self.joint_state.position[
self.joint_state.name.index(self.head_pan_joint)]
current_tilt = self.joint_state.position[
self.joint_state.name.index(self.head_tilt_joint)]
except:
return
#rospy.loginfo("%s: joint_state_cb: Current Pan = %f, Tilt = %f",
# self._action_name, current_pan, current_tilt)
#------------------------------------------------------------------------
# 3D Pose Tracking: Message contains xyz of person
# position is relative to the robot
# NOT USED CURRENTLY. May use later with stationary camera or Astra SDK
def body_pose_cb(self, msg):
rospy.loginfo('%s: ERROR ERROR got 3D body_pose message' % (self._action_name))
return
if person_id != self.id_to_track:
# not the right person, skip
rospy.loginfo("%s: Body Tracker: Tracking ID is %d, skipping pose2D for ID %d",
self._action_name, self.id_to_track, person_id )
return
# position component of the target pose stored as a PointStamped() message.
# create a PointStamped structure to transform via transformPoint
target = PointStamped()
target.header.frame_id = msg.header.frame_id
target.point = msg.pose.position
if target.point.z == 0.0:
rospy.loginfo('%s: skipping blank message' % (self._action_name))
return
rospy.loginfo("%s: Body Tracker: Tracking person at %f, %f, %f", self._action_name,
target.point.x, target.point.y, target.point.z)
# convert from xyz to pan tilt angles
# TODO: 1) Handle Children - currently assumes everyone is 6 foot tall!
# 2) What happens if robot bows?
if target.point.x < 0.2: # min range of most depth cameras
#rospy.loginfo("%s: Body Tracker: Bad Distance (x) value! %f",
# self._action_name, target.point.x)
return
# math shortcut for approx radians
pan_angle = target.point.y / target.point.x
# OPTION 1: Track actual target height
#person_head_offset_radians = 0.52 # TB2S value - TODO Tune this
#tilt_angle = (target.point.z / target.point.x) + person_head_offset_radians
# OPTION 2: Guess height, based upon distance to person
# FUTURE: combine the two, use "guess" when person is too close?
tilt_angle = 0.4 / target.point.x # SHELDON, CHEST MOUNTED camera
rospy.loginfo("%s: Body Tracker: Pan = %f (%f), Tilt = %f (%f)", self._action_name,
pan_angle, degrees(pan_angle), tilt_angle, degrees(tilt_angle))
# Send servo commands
if abs(pan_angle) > MAX_PAN: # just over 45 degrees - TODO put in actual limits here!
rospy.loginfo("%s: Body Tracker: Pan %f exceeds MAX", self._action_name, pan_angle)
return
if abs(tilt_angle) > MAX_TILT: # Limit vertical to assure good tracking
rospy.loginfo("%s: Body Tracker: Tilt %f exceeds MAX", self._action_name, tilt_angle)
return
head_pan_pub.publish(pan_angle)
head_tilt_pub.publish(-tilt_angle)
# SHELDON ONLY
#sidetiltAmt = 0.0
#head_sidetilt_pub.publish(sidetiltAmt)
#------------------------------------------------------------------------
# 2D Tracking: Message contains person horizontal (x) and vertical (y)
# position is relative to the depth image.
# we use this to control the head tracking (less oscillation?)
def position_cb(self, msg):
#rospy.loginfo('%s: got position_cb message' % (self._action_name))
delta_angle_x = msg.position2d.x # position in radians from center of camera lens
delta_angle_y = msg.position2d.y
person_tracking_distance = msg.position2d.z
person_id = msg.body_id
gesture = msg.gesture
if self.tracking_state == self.TrackingState.WAITING_FOR_FIRST_ID:
# this is the first tracking frame we have received
self.id_to_track = person_id # no id assigned yet, so use this one
self.tracking_timer = rospy.Time.now() # start tracking timer
self.tracking_state = self.TrackingState.TRACKING
rospy.loginfo("%s: Tracking Person_ID %d", self._action_name, person_id)
elif self.tracking_state == self.TrackingState.WAITING_FOR_GESTURE:
# lost person, waiting to restart tracking with a gesture
if gesture > -1:
playsound(self.ding_path)
self.id_to_track = person_id # got a gesture, so use this ID
self.tracking_timer = rospy.Time.now() # start tracking timer
self.tracking_state = self.TrackingState.TRACKING
rospy.loginfo("%s: ---------------------> Person_ID %d Gesture detected: %d",
self._action_name, person_id, gesture)
rospy.loginfo("%s: Tracking Person_ID %d", self._action_name, person_id)
# say something
rospy.loginfo("Talking")
goal = audio_and_speech_common.msg.speechGoal(
text_to_speak="ok, i see you")
self.speech_client.send_goal(goal)
#result = self.speech_client.wait_for_result() # wait for speech to complete
#rospy.loginfo("Speech goal returned result: %d", result)
else:
return # continue waiting
elif self.tracking_state != self.TrackingState.TRACKING:
rospy.logwarn("%s: TRACKING STATE ERROR, unknown state: %d",
self._action_name, self.tracking_state)
return
if person_id != self.id_to_track:
# not the right person, skip
time_since_last_target = rospy.Time.now() - self.tracking_timer
rospy.loginfo("%s: Skipping ID %d, tracking ID is %d, timer is %f",
self._action_name, self.id_to_track, person_id, time_since_last_target.to_sec() )
return
# --------------------------------------------------------------------
# GOT A PERSON TO TRACK
rospy.loginfo("%s: Body Tracker: Person %d 2D Delta: x = %f, y = %f",
self._action_name, person_id, delta_angle_x, delta_angle_y )
# Get the current servo pan and tilt position
try:
current_pan = self.joint_state.position[
self.joint_state.name.index(self.head_pan_joint)]
current_tilt = self.joint_state.position[
self.joint_state.name.index(self.head_tilt_joint)] * -1.0
except:
return
#rospy.loginfo("%s: Body Tracker: Current Servo: Pan = %f, Tilt = %f",
# self._action_name, current_pan, current_tilt)
# add target position to current servo position
pan_angle = current_pan + (delta_angle_x * 0.75) #shoot for less
tilt_angle = current_tilt + (delta_angle_y * 0.75)
#rospy.loginfo("%s: Before Clamp: Servo Command: Pan = %f, Tilt = %f",
# self._action_name, pan_angle, tilt_angle)
# limit to valid servo range
pan_angle = self.Clamp(pan_angle, self.MAX_PAN)
tilt_angle = self.Clamp(tilt_angle, self.MAX_TILT)
#rospy.loginfo("%s: After Clamp: Servo Command: Pan = %f, Tilt = %f",
# self._action_name, pan_angle, tilt_angle)
# Save value to steer the robot toward
person_tracking_angle = pan_angle
# command servos to move to target, if not in deadband
pan_on_target = True
tilt_on_target = True
if abs(delta_angle_x) > self.DEADBAND_ANGLE:
head_pan_pub.publish(pan_angle)
pan_on_target = False
if abs(delta_angle_y) > self.DEADBAND_ANGLE:
head_tilt_pub.publish(-tilt_angle)
tilt_on_target = False
#if pan_on_target and tilt_on_target:
# rospy.loginfo("%s: Body Track On target ID %d",
# self._action_name, person_id)
#else:
# rospy.loginfo("%s: Body Track ID %d: Pan delta = %f, Tilt Delta = %f",
# self._action_name, person_id, delta_angle_x, delta_angle_y)
# SHELDON ONLY
#side_tilt_angle = 0.0
#head_sidetilt_pub.publish(side_tilt_angle)
# Move the robot to follow person
self.MoveRobot(person_tracking_angle, person_tracking_distance)
self.tracking_timer = rospy.Time.now() # reset tracking timer
#------------------------------------------------------------------------
# Execute Behavior - Starts when this behavior goes active
def execute_cb(self, goal):
#r = rospy.Rate(1)
# Initalize state engine
self.tracking_state = self.TrackingState.WAITING_FOR_FIRST_ID
# initialize Speech
rospy.loginfo("Waiting for speech server (press ctrl-c to cancel at anytime)")
self.speech_client = actionlib.SimpleActionClient("/speech_service",
audio_and_speech_common.msg.speechAction)
self.speech_client.wait_for_server()
# Subscribers - begin listening for tracking messages
if self.enable_body_tracking:
position_sub = rospy.Subscriber("/body_tracker/position", BodyTracker, self.position_cb, queue_size=1)
#rospy.Subscriber("/body_tracker/pose", PoseStamped, self.body_pose_cb, queue_size=1)
#pose2d_sub = rospy.Subscriber("/body_tracker/pose2d", Pose2D, self.pose_2d_cb, queue_size=1)
servo_sub = rospy.Subscriber('/joint_states', JointState, self.joint_state_cb) # for servos
#gesture_sub = rospy.Subscriber('/body_tracker/gesture', Pose2D, self.gesture_cb)
# Set servos speed and torque
# TODO SetServoTorque(0.5, head_joints)
SetServoSpeed(0.5, head_joints)
# Center Camera Head
#head_pan_pub.publish(0.0)
#head_tilt_pub.publish(self.DEFAULT_TILT_ANGLE) # tilt head up to find people more easily
#head_sidetilt_pub.publish(0.0) # SHELDON ONLY
# start with robot stopped
self.StopRobot()
# say something
rospy.loginfo("Talking")
goal = audio_and_speech_common.msg.speechGoal(text_to_speak="ok, i will follow you")
self.speech_client.send_goal(goal)
#result = self.speech_client.wait_for_result() # wait for speech to complete
#rospy.loginfo("Speech goal returned result: %d", result)
# Initialize timers
self.first_target_timer = rospy.Time.now()
self.tracking_timer = rospy.Time.now()
self.gesture_timer = rospy.Time.now()
rospy.loginfo('==================================================')
rospy.loginfo('==================================================')
rospy.loginfo('==================================================')
rospy.loginfo('%s: waiting to spot first person...' % (self._action_name))
# -------- LOOP --------
while True:
rospy.loginfo('==================================================')
rospy.loginfo('%s: Tracking State: %s', self._action_name, self.tracking_state.name)
rospy.loginfo('==================================================')
if self._as.is_preempt_requested():
break # higher priority behavior requested
if self.tracking_state == self.TrackingState.WAITING_FOR_FIRST_ID:
time_waiting_for_first_target = rospy.Time.now() - self.first_target_timer
if time_waiting_for_first_target > rospy.Duration.from_sec(
self.FIRST_TARGET_TIMEOUT_SECONDS):
rospy.logwarn("%s: time_waiting_for_first_target: I DONT SEE ANYONE TO TRACK!",
self._action_name)
# say something
rospy.loginfo("Talking")
goal = audio_and_speech_common.msg.speechGoal(text_to_speak="darn, I dont see anyone to follow")
self.speech_client.send_goal(goal)
#result = self.speech_client.wait_for_result() # wait for speech to complete
#rospy.loginfo("Speech goal returned result: %d", result)
break # did not find a person to track
elif self.tracking_state == self.TrackingState.TRACKING:
time_since_last_target = rospy.Time.now() - self.tracking_timer
rospy.loginfo("%s: State = TRACKING. Time since last frame: %d",
self._action_name, time_since_last_target.to_sec() )
if time_since_last_target > rospy.Duration.from_sec(
self.TRACKING_TIMEOUT_SECONDS):
# target timed out! Lost User!
head_tilt_pub.publish(self.DEFAULT_TILT_ANGLE) # Set tilt for optimal capture
rospy.loginfo("%s: LOST USER! waiting for gesture...", self._action_name)
self.gesture_timer = rospy.Time.now()
self.tracking_state = self.TrackingState.WAITING_FOR_GESTURE
# say something
rospy.loginfo("Talking")
goal = audio_and_speech_common.msg.speechGoal(
text_to_speak="darn, I lost you. please wave or something")
self.speech_client.send_goal(goal)
#result = self.speech_client.wait_for_result() # wait for speech to complete
#rospy.loginfo("Speech goal returned result: %d", result)
elif self.tracking_state == self.TrackingState.WAITING_FOR_GESTURE:
time_waiting_for_gesture = rospy.Time.now() - self.gesture_timer
if time_waiting_for_gesture > rospy.Duration.from_sec(
self.GESTURE_TIMEOUT_SECONDS):
rospy.logwarn("%s: time_waiting_for_gesture: I DONT SEE ANY GESTURES!",
self._action_name)
# say something
rospy.loginfo("Talking")
goal = audio_and_speech_common.msg.speechGoal(
text_to_speak="i have stopped following, now what")
self.speech_client.send_goal(goal)
#result = self.speech_client.wait_for_result() # wait for speech to complete
#rospy.loginfo("Speech goal returned result: %d", result)
break # did not find a gesture to restart tracking
else:
rospy.logwarn("%s: BAD STATE!", self._action_name )
time.sleep(0.5)
#----------------------------------------------------------------
# Behavior Exit
# Stop wheels before exiting
self.StopRobot()
rospy.loginfo('%s: Behavior Exiting', self._action_name)
position_sub.unregister()
#pose2d_sub.unregister()
servo_sub.unregister()
#gesture_sub.unregister()
# Report exit status
if self._as.is_preempt_requested():
self._as.set_preempted();
else:
self._as.set_succeeded();
if __name__ == '__main__':
rospy.init_node('follow_behavior')
server = BehaviorAction(rospy.get_name())
rospy.spin()
| shinselrobots/tb2s | tb2s_behaviors/follow_behavior/scripts/behavior_service.py | behavior_service.py | py | 21,227 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "rospy.Publisher",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "std_msgs.msg.Float64",
"line_number": 38,
"usage_type": "argument"
},
{
"api_name": "rospy.Publisher",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "std_msgs.msg... |
26558715293 | #!/usr/bin/env python3
import sys
import os
import argparse
import pandas as pd
import vcf
def main():
parser = argparse.ArgumentParser(description="Build reference set consisting of a selection of samples per pangolin lineage.")
parser.add_argument('--vcf', required=True, type=str, nargs='+', help="vcf files per lineage")
parser.add_argument('--freq', required=True, type=str, nargs='+', help="allele frequency files per lineage")
parser.add_argument('--min_aaf', default=0.5, type=float, help="minimal alternative allele frequency (AAF) to consider variation")
parser.add_argument('--max_per_lineage', default=100, type=int, help="select at most k sequences per lineage")
parser.add_argument('-m, --metadata', dest='metadata', type=str, help="metadata tsv file for full sequence database")
parser.add_argument('-f, --fasta', dest='fasta_in', type=str, help="fasta file representing full sequence database")
parser.add_argument('-n, --nonN_counts', dest='nonN_counts', type=str, help="txt file with the number of nonambiguous characters per sequence")
parser.add_argument('-o, --outdir', dest='outdir', type=str, default='.', help="output directory")
args = parser.parse_args()
# Create output directory
try:
os.mkdir(args.outdir)
except FileExistsError:
pass
# Select references per pango lineage
full_df = read_metadata(args.metadata, args.nonN_counts)
selection_df = select_ref_genomes(full_df, args.max_per_lineage, args.vcf,
args.freq, args.min_aaf)
# Write metadata of selected samples to new tsv
metadata_out = args.outdir + "/metadata.tsv"
selection_df.to_csv(metadata_out, sep='\t', index=False)
print("Metadata for selected sequences is in {}".format(metadata_out))
# Filter fasta according to selection and write new fasta
fasta_out = args.outdir + "/sequences.fasta"
filter_fasta(args.fasta_in, fasta_out, selection_df)
print("Selected sequences written to {}".format(fasta_out))
return
def read_metadata(metadata_file, nonN_count_file=None):
"""Read metadata from tsv into dataframe"""
df = pd.read_csv(metadata_file, sep='\t', header=0, dtype=str)
# add field with number of N's in sequence
if nonN_count_file:
df = add_nonN_count(df, nonN_count_file)
else:
df["nonN_count"] = "."
# adjust date representation in dataframe
df["date"] = df["date"].str.replace('-XX','-01')
df["date"] = pd.to_datetime(df.date, yearfirst=True)
# remove samples wich have no pangolin lineage assigned (NaN or None)
df = df[df["pangolin_lineage"].notna()]
df = df[df["pangolin_lineage"] != "None"]
return df
def select_ref_genomes(metadata_df, max_per_lineage, vcf_list, freq_list, min_aaf):
"""For every pangolin lineage, select exactly one sample."""
# check which lineages are present
lineages = metadata_df["pangolin_lineage"].unique()
lineage_counts = metadata_df["pangolin_lineage"].value_counts()
print("# lineages = {}".format(len(lineages)))
# assign vcfs to lineages, assuming vcfs are in current directory and named after the corresponding lineage
vcf_dict = {vcf.split('/')[-1].split('_')[0] : vcf for vcf in vcf_list}
freq_dict = {fname.split('/')[-1].split('_')[0] : fname for fname in freq_list}
# select samples for every lineage
selection_ids = []
for lin_id in lineages:
samples = metadata_df.loc[metadata_df["pangolin_lineage"] == lin_id]
samples = samples.sort_values(by=["nonN_count", "date"], ascending=False)
# read allele frequencies and extract sites with AAF >= minimal alt allele frequency
try:
allele_freq_file = freq_dict[lin_id]
except KeyError as e:
print("WARNING: skipping lineage {}, allele frequency info missing".format(lin_id))
continue
variant_positions = []
with open(allele_freq_file, 'r') as f:
for line in f:
line = line.split('\t')
if line[0] == "CHROM":
continue
ref_info = line[4]
ref_allele, freq = ref_info.split(':')
ref_allele_freq = float(freq)
alt_allele_freq = 1 - ref_allele_freq
if alt_allele_freq > min_aaf:
variant_positions.append(int(line[1]))
print("{} total # sites with alt allele frequency > {} = {}".format(
lin_id, min_aaf, len(variant_positions)))
# read vcf and process samples
try:
vcf_file = vcf_dict[lin_id]
except KeyError as e:
print("WARNING: skipping lineage {}, VCF info missing".format(lin_id))
continue
vcf_reader = vcf.Reader(open(vcf_file, 'rb'))
samples = vcf_reader.samples
sample_patterns = {sample : [] for sample in samples}
for record in vcf_reader:
if record.POS in variant_positions:
alleles = [record.REF] + [str(x) for x in record.ALT]
for sample in samples:
genotype = record.genotype(sample)['GT']
allele_idx = int(genotype[0])
allele = alleles[allele_idx]
sample_patterns[sample].append(allele)
variation_seen = {pos : [] for pos in variant_positions}
selection_count = 0
if len(variant_positions) == 0:
selection_ids.append(samples[0])
selection_count += 1
else:
for sample in samples:
select = False
variation = sample_patterns[sample]
for i, pos in enumerate(variant_positions):
allele = variation[i]
if allele not in variation_seen[pos]:
select = True
variation_seen[pos].append(allele)
if select:
selection_ids.append(sample)
selection_count += 1
if selection_count == max_per_lineage:
break
print("{} sequences selected for lineage {}".format(selection_count,
lin_id))
if selection_count == 0:
print("ERROR: no sequences selected for lineage {}".format(lin_id))
sys.exit(1)
print("{} sequences selected in total".format(len(selection_ids)))
selection_df = metadata_df.loc[
metadata_df["gisaid_epi_isl"].isin(selection_ids)]
return selection_df
def add_nonN_count(df, nonN_count_file):
"""Count number of nonambiguous nucleotides per sequence and
add counts to dataframe"""
count_dict = {}
with open(nonN_count_file, 'r') as f:
for line in f:
id, count = line.rstrip('\n').split('\t')
count_dict[id] = int(count)
assert len(df.index) == len(count_dict)
count_list = [count_dict[id] for id in df["strain"]]
df["nonN_count"] = count_list
return df
def filter_fasta(fasta_in, fasta_out, selection_df):
"""Filter fasta according to selected metadata"""
keep_line = False
selection_identifiers = selection_df["strain"].unique()
with open(fasta_in, 'r') as f_in:
with open(fasta_out, 'w') as f_out:
for line in f_in:
if line[0] == '>':
# sequence identifier
seq_id = line.rstrip('\n').lstrip('>')
if seq_id in selection_identifiers:
f_out.write(line)
keep_line = True
else:
keep_line = False
elif keep_line:
# nucleotide sequence
f_out.write(line)
return
if __name__ == "__main__":
sys.exit(main())
| baymlab/wastewater_analysis | manuscript/select_samples_v1.py | select_samples_v1.py | py | 7,931 | python | en | code | 14 | github-code | 36 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "os.mkdir",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "pandas.to_datetime",... |
34140743906 | import cv2
import numpy as np
from model import *
from scipy.spatial.distance import cosine
def read_pairs(path):
files = []
with open(path) as f:
files = f.readlines()
files = [afile[:-1].split(' ') for afile in files]
files = [[afile[0], afile[1], afile[2]=='1'] for afile in files]
return files
def get_test_pair_images(filename, image_h, image_w):
"""
read in file list
"""
with open(filename) as file1:
filelist = file1.readlines()
img_tensor1 = np.zeros([len(filelist), 1, image_h, image_w])
img_tensor2 = np.zeros([len(filelist), 1, image_h, image_w])
label_tensor = np.zeros([len(filelist)])
for i in range(len(filelist)):
path1, path2, label = filelist[i].split(" ")
img1 = cv2.imread(path1, 0)
img2 = cv2.imread(path2, 0)
shape1, shape2 = img1.shape
if shape1 != image_w or shape2 != image_h:
img1 = cv2.resize(img1, (image_w, image_h))
img2 = cv2.resize(img2, (image_w, image_h))
img1 = img1.reshape(1, img1.shape[0], img1.shape[1])
img2 = img2.reshape(1, img2.shape[0], img2.shape[1])
img1 = img1[:,:,::-1].astype(np.float32)
img2 = img2[:,:,::-1].astype(np.float32)
img_tensor1[i] = img1
img_tensor2[i] = img2
label_tensor[i] = int(label)
return img_tensor1, img_tensor2, label_tensor
def extract_features(img_path):
"""
read in images in [1 x 1 x 128 x 128]
"""
img = cv2.imread(img_path, cv2.IMREAD_GRAYSCALE)
img = cv2.resize(img, (128, 128))
img = img / 255
img0 = np.zeros((1, 1, 128, 128))
img = np.reshape(img, (1, 128, 128))
img0[0] = img
return img0
def test(model, img_list, image_h=128, image_w=128, write=True):
model.eval()
read_list = read_pairs(img_list)
tp, tn, fp, fn = 0, 0, 0, 0
acc = 0
with open('predict.txt', "w+") as f:
for idx, (path1, path2, label) in enumerate(read_list):
img1 = extract_features(path1)
img2 = extract_features(path2)
y = model.lfw_inference(img1, img2)
if y == label and label:
tp += 1
acc += 1
elif y != label and not label:
fp += 1
elif y == label and not label:
tn += 1
acc += 1
else:
fn += 1
if write:
f.write(str(y) + '\n')
if not (idx+1) % 100:
print('TP: {} TN: {} FP: {} FN: {} ACC: {}'.format(tp, tn, fp, fn, float(acc) / (idx + 1)))
print('TP: {} TN: {} FP: {} FN: {} ACC: {}'.format(tp, tn, fp, fn, acc))
return acc
if __name__ == "__main__":
model_path = 'model'
test_path = 'train-aligned-label.txt'
model = lightcnn29(79077, 0.2)
model.load(model_path)
acc = test(model, test_path)
print(acc) | Jasmineysj/Face-verification-system | test.py | test.py | py | 2,930 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.zeros",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "cv2.imread",
"line_number": 2... |
29380151380 | import scrapy
from bs4 import BeautifulSoup
class CNGlobalStock(scrapy.Spider):
# modeled after: https://wallstreetcn.com/articles/3499602
name = "wallstreetcn"
start_urls = ["https://wallstreetcn.com/articles/3499602"]
def parse(self, response):
article_body = response.css("div.rich-text").get()
soup = BeautifulSoup(article_body)
yield {
"title": response.xpath("//title/text()").get(),
"text": soup.get_text(),
"time": response.css("time::attr('datetime')").get()
}
next_page = response.css("div.nav-item.next a::attr('href')").get()
# Follow the next page if one is found
if next_page is not None:
yield response.follow(next_page, self.parse)
| mattfeng/bluefire | scrapers/wallstreetcn/old/wallstreetcn_scraper.py | wallstreetcn_scraper.py | py | 796 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "scrapy.Spider",
"line_number": 4,
"usage_type": "attribute"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 11,
"usage_type": "call"
}
] |
36892858608 | from lxml import html
import requests
import csv
from os.path import exists
import time
import sys
# function printing additional info about request
def my_http_get(url):
print('')
print('REQUEST ' + url)
time.sleep(3) # added to avoid being blocked by fbref server
start = time.time()
result = requests.get(url)
elapsed = time.time() - start
print('request time ' + str(elapsed))
print('status code ' + str(result.status_code))
if(result.status_code != 200):
print('last request failed')
quit()
return result
baseurl = 'https://fbref.com'
currenturl = ''
filename = ''
minimum_matches = 0
if len(sys.argv) <= 1:
print('usage python3 SeasonSquadScraper.py [link to league table of a scraped league] [minimum number of matches a player need to play to be counted (default 0)]')
print('e.g. python3 SeasonSquadScraper.py https://fbref.com/en/comps/36/Ekstraklasa-Stats 5')
quit()
else:
currenturl = sys.argv[1]
splittedUrl = sys.argv[1].split('/')
leagueName = splittedUrl[-1]
filename = leagueName + 'Edges.csv'
if len(sys.argv) >= 3:
minimum_matches = int(sys.argv[2])
if exists(filename) == False:
with open(filename , 'w') as csvfile:
csvwriter = csv.writer(csvfile)
csvwriter.writerow(['#id1', 'id2'])
prevseasonexist = True
while prevseasonexist == True:
page = my_http_get(currenturl)
tree = html.fromstring(page.content)
teams = tree.xpath('/html/body/div[@id="wrap"]/div[@id="content"]/div/div/div/table/tbody/tr/td[@class="left "]/a/@href')
teams = teams[:len(teams)//2]
print('no teams ' + str(len(teams)))
for team in teams:
teamurl = baseurl + team
matchdetails = my_http_get(teamurl)
teamhtml = html.fromstring(matchdetails.content)
playersid = teamhtml.xpath('/html/body/div[@id="wrap"]/div[@id="content"]/div[@id="all_stats_standard"]/div/table/tbody/tr/th[@class="left "]/@data-append-csv')
players_matches = teamhtml.xpath('/html/body/div[@id="wrap"]/div[@id="content"]/div[@id="all_stats_standard"]/div/table/tbody/tr/td[@data-stat="games"]/text()')
players_matches = [int(item) for item in players_matches]
players = list(zip(playersid, players_matches))
players = [item for item in players if item[1] > minimum_matches]
players = [item[0] for item in players]
with open(filename , 'a') as csvfile:
csvwriter = csv.writer(csvfile)
csvwriter.writerow(['# ' + str(teamurl)])
for i in range(len(players)):
for j in range(i + 1, len(players)):
csvwriter.writerow([players[i], players[j]])
# log info to file about scraped season
with open('seasonSquadsParsed', 'a') as file:
file.write(currenturl + '\n')
file.close()
if len(teams) == 0:
break # fbref haven't uploaded all the data yet.
# if there is no data about this season, there probably won't be any data about the previous one either
# if an internal server error occured it will be easy to figure out where to continue
# get link to previous season
prevseason = tree.xpath('/html/body/div[@id="wrap"]/div/div[@id="meta"]/div/div[@class="prevnext"]/a[@class="button2 prev"]/@href')
if len(prevseason) == 0:
prevseasonexist = False
prevseasonurl = str(prevseason[0])
currenturl = baseurl + prevseasonurl
print(currenturl)
print('') | kornasm/GIS | scrapers/SeasonSquadScraper.py | SeasonSquadScraper.py | py | 3,520 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "time.sleep",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 15,
... |
32058498206 | from tkinter import *
import matplotlib
import os
from pandas import DataFrame
import numpy as np
import pandas as pd
from tkinter import ttk
matplotlib.use('TkAgg')
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2Tk
from matplotlib.figure import Figure
from Functions import *
from tkinter import filedialog
from tkinter import colorchooser
matplotlib.use('TkAgg')
from tkinter import messagebox
class BoxPlot(Frame):
def __init__(self, parent, controller):
super().__init__(parent)
##figure frame
self.box_plot = Frame(self, bg='white', relief='solid', borderwidth=0)
self.box_plot.place(relx=0, rely=0, relwidth=1, relheight=0.75)
##customize_column
customize = Frame(self, bg='white', relief='solid', borderwidth=1)
customize.place(relx=0, rely=0.75, relwidth=1, relheight=1)
label_customize = Label(self, text='Customize', bg='white', font='Calibri 14')
label_customize.place(x=50, rely=0.72, relheight=0.06)
Button(self.box_plot, text='Select Data', font='calibri 14', command=self.dataframe_creation, borderwidth=0,
bg='white', cursor='hand2').place(x=3, y=0)
Label(customize, text='Scan:', font=('calibri 13'), bg='white', fg='#28486B').place(x=14, y=30)
self.scan_choice = ['Reverse', 'Forward', 'Together']
self.scan_choice_var = StringVar()
self.scan_choice_var.set('Reverse')
menu_2 = ttk.Combobox(customize, textvariable=self.scan_choice_var, values=self.scan_choice, state='readonly')
menu_2.place(x=100, y=34)
menu_2.config(font=('calibri', (10)), width=8)
self.split_count = 0
self.optionlist = {'PCE': 'Power Conversion Efficiency (%)', 'Jsc': 'Current Density (mA/cm$^2$)',
'FF': 'Fill Factor (%)', 'Voc': 'Open Circuit Voltage (Volts)'}
Label(customize, text='Y-axis:', font=('calibri 12'), bg='white', fg='#28486B').place(x=14, y=74)
global parameter
parameter = StringVar()
parameter.set('PCE')
menu = ttk.Combobox(customize, textvariable=parameter, values=['PCE', 'Jsc', 'FF', 'Voc'], state='readonly')
menu.config(font=('calibri', (10)), width=8)
menu.place(x=100, y=78)
Label(customize, text='Y-Label Rotation:', font=('calibri 13'), bg='white', fg='#28486B').place(x=250, y=30)
self.xlabel_angle = IntVar()
menu_3 = ttk.Combobox(customize, textvariable=self.xlabel_angle, values=[0, 5, 10, 15, 20, 25, 30, 35, 40, 45])
menu_3.config(font=('calibri', (10)), width=2)
menu_3.place(x=450, y=34)
Label(customize, text='Label Size:', font=('calibri 13 '), bg='white', fg='#28486B').place(x=250, y=70)
self.label_size = IntVar()
self.label_size.set(5)
menu_4 = ttk.Combobox(customize, textvariable=self.label_size, values=[1, 2, 3, 4, 5, 6, 7, 8, 9])
menu_4.config(font=('calibri', (10)), width=2)
menu_4.place(x=450, y=74)
CustomButton(customize, text='Box Plot', command=self.plot_it, width=8, borderwidth=0.5).place(x=15, y=120)
CustomButton(customize, text='All in 1', command=self.plot_together, width=8, borderwidth=0.5).place(x=120,
y=120)
CustomButton(customize, text='Plot Hero JVs', command=self.hero_jvs, width=16, borderwidth=0.5).place(x=230,
y=120)
button_export_data = Button(self.box_plot, text='Export Data', command=self.export_data, borderwidth=0,
bg='white', cursor='hand2')
button_export_data.place(x=0, rely=0.88)
button_start_over = Button(self.box_plot, text='Start Over', command=self.refresh, borderwidth=0, bg='white',
cursor='hand2')
button_start_over.place(x=0, rely=0.8)
self.default_color_list = ['yellowgreen','rosybrown','turquoise','mediumpurple', 'lightblue', 'lightgreen','lightpink', 'grey']
self.color_list = []
self.entry_list = [] ##to store all the string variables of entry widget
self.entry_widget_list = [] ## storing all the entry widget
self.DF = pd.DataFrame()
def show_frame(self, cont): # to raise a selected frame
frame = self.frames[cont]
frame.tkraise()
def auto_entry_generation(self, main_dir):
for i in range(len(main_dir)):
self.entry_list.append(['']) ##just a dummy place for it change in the next line
self.entry_list[i] = StringVar()
e = CustomEntry(self.box_plot, width=20, textvariable=self.entry_list[i])
e.place(x=4, y=80 + 40*i, height=30)
self.entry_widget_list.append(e)
def get_directory(self): # getting the directory
dir = filedialog.askdirectory()
if dir is not '':
all_dirs_in_dir = [(dir + '/' + name) for name in os.listdir(dir) if
os.path.isdir(dir + '/' + name)] ## did not use os.join in here as it joins with '\' not with '/'
return all_dirs_in_dir
def dataframe_creation(self):
## asking the user to choose path of the file to analyze
## making dataframe with all the data analyzed
## using the function dataframe() to make data frame and concatinate the output everytime you select one set of file
main_dir = self.get_directory()
## what if user closes the filedialog without choosing file?
## self. filepath will be an empty string..
if main_dir != None:
self.auto_entry_generation(main_dir)
for dir in main_dir:
self.DF = pd.concat([self.DF, dataframe(dir)], axis=1)
split_name = cut_string(dir,
"/") # for getting the name of the folder, check the function cut_string
self.entry_list[self.split_count].set(split_name)
self.split_count = self.split_count + 1
##to avoid any subdirectories which might have present in dir
def plot_it(self):
self.label_list = []
for entry in self.entry_list:
self.label_list.append(entry.get())
x = self.split_count
data = []
data_rev = []
data_fwd = []
if parameter.get() == 'Jsc':
if self.scan_choice_var.get() == 'Reverse':
index_count = 2
for i in range(x):
lst = self.DF.iloc[:, index_count]
lst = lst.dropna()
data.append(lst)
index_count = index_count + 10
elif self.scan_choice_var.get() == 'Forward':
index_count = 3
for i in range(x):
lst = self.DF.iloc[:, index_count]
lst = lst.dropna()
data.append(lst)
index_count = index_count + 10
elif self.scan_choice_var.get() == 'Together':
index_count = 2
for i in range(x):
lst_rv = self.DF.iloc[:, index_count]
lst_rv = lst_rv.dropna()
lst_fwd = self.DF.iloc[:, index_count + 1]
lst_fwd = lst_fwd.dropna()
data_rev.append(lst_rv)
data_fwd.append(lst_fwd)
index_count = index_count + 10
elif parameter.get() == 'Voc':
if self.scan_choice_var.get() == 'Reverse':
index_count = 4
for i in range(x):
lst = self.DF.iloc[:, index_count]
lst = lst.dropna()
data.append(lst)
index_count = index_count + 10
elif self.scan_choice_var.get() == 'Forward':
index_count = 5
for i in range(x):
lst = self.DF.iloc[:, index_count]
lst = lst.dropna()
data.append(lst)
index_count = index_count + 10
elif self.scan_choice_var.get() == 'Together':
index_count = 4
for i in range(x):
lst_rv = self.DF.iloc[:, index_count]
lst_rv = lst_rv.dropna()
lst_fwd = self.DF.iloc[:, index_count + 1]
lst_fwd = lst_fwd.dropna()
data_rev.append(lst_rv)
data_fwd.append(lst_fwd)
index_count = index_count + 10
elif parameter.get() == 'FF':
if self.scan_choice_var.get() == 'Reverse':
index_count = 6
for i in range(x):
lst = self.DF.iloc[:, index_count]
lst = lst.dropna()
data.append(lst)
index_count = index_count + 10
elif self.scan_choice_var.get() == 'Forward':
index_count = 7
for i in range(x):
lst = self.DF.iloc[:, index_count]
lst = lst.dropna()
data.append(lst)
index_count = index_count + 10
elif self.scan_choice_var.get() == 'Together':
index_count = 6
for i in range(x):
lst_rv = self.DF.iloc[:, index_count]
lst_rv = lst_rv.dropna()
lst_fwd = self.DF.iloc[:, index_count + 1]
lst_fwd = lst_fwd.dropna()
data_rev.append(lst_rv)
data_fwd.append(lst_fwd)
index_count = index_count + 10
elif parameter.get() == 'PCE':
if self.scan_choice_var.get() == 'Reverse':
index_count = 8
for i in range(x):
lst = self.DF.iloc[:, index_count]
lst = lst.dropna()
data.append(lst)
index_count = index_count + 10
elif self.scan_choice_var.get() == 'Forward':
index_count = 9
for i in range(x):
lst = self.DF.iloc[:, index_count]
lst = lst.dropna()
data.append(lst)
index_count = index_count + 10
elif self.scan_choice_var.get() == 'Together':
index_count = 8
for i in range(x):
lst_rv = self.DF.iloc[:, index_count]
lst_rv = lst_rv.dropna()
lst_fwd = self.DF.iloc[:, index_count + 1]
lst_fwd = lst_fwd.dropna()
data_rev.append(lst_rv)
data_fwd.append(lst_fwd)
index_count = index_count + 10
## plotting both scans in single graph
f = Figure(figsize=(5, 3), dpi=150, tight_layout=True)
if self.scan_choice_var.get() == 'Together':
chart = f.add_subplot(111)
chart.set_ylabel(self.optionlist[parameter.get()], fontsize=7)
chart.tick_params(axis='both', which='major', labelsize=self.label_size.get(), direction='in')
##plotting fwd scan
chart.boxplot(data_fwd, meanline=True, widths=0.2, positions=[q for q in range(x)],
labels=['' for q in range(x)],
whiskerprops=dict(color='red'), boxprops=dict(color='red'),
capprops=dict(color='red'), showfliers=False)
## plotting rev scans
chart.boxplot(data_rev, meanline=True, widths=0.2, labels=['' for q in range(x)],
positions=[q + 0.3 for q in range(x)], showfliers=False)
##adding random jitters to the box plot
for i in range(self.split_count):
x_axis = np.random.normal(i, 0.03, size=len(data_fwd[i]))
chart.plot(x_axis, data_fwd[i], 'r.', linestyle='none', ms=3)
x_axis = np.random.normal(i + 0.3, 0.03, size=len(data_rev[i]))
chart.plot(x_axis, data_rev[i], 'k.', linestyle='none', ms=3)
# adding ticks in middle of rev and fwd boxes
chart.set_xticks([x + 0.15 for x in range(self.split_count)])
chart.set_xticklabels(self.label_list)
hB, = chart.plot([1, 1], 'k-')
hR, = chart.plot([1, 1], 'r-')
chart.legend((hB, hR), ('Reverse Scans', 'Forward Scans'), prop={'size': 6})
hR.set_visible(False)
hB.set_visible(False)
else:
chart = f.add_subplot(111)
chart.set_ylabel(self.optionlist[parameter.get()], fontsize=7)
chart.tick_params(axis='both', which='major', labelsize=self.label_size.get(), direction='in')
chart.boxplot(data, meanline=True, labels=self.label_list, showfliers=False)
for i in range(self.split_count):
x_axis = np.random.normal(i + 1, 0.05, size=len(data[i]))
chart.plot(x_axis, data[i], 'r.', linestyle='none', ms=4, )
global canvas
global toolbar
try:
canvas.get_tk_widget().place_forget()
toolbar.place_forget()
except:
pass
chart.tick_params(axis='x', length=0)
canvas = FigureCanvasTkAgg(f, self)
canvas.draw()
canvas.get_tk_widget().place(x=300, y=50)
toolbar = NavigationToolbar2Tk(canvas, self)
toolbar.configure(background = 'white')
toolbar._message_label.config(background='white')
toolbar.update()
toolbar.place(x=350, y=0)
def hero_jvs(self):
self.label_list = []
for entry in self.entry_list:
self.label_list.append(entry.get())
labels = self.label_list[0:self.split_count]
f = Figure(figsize=(4, 3), dpi=140, tight_layout=True)
chart = f.add_subplot(111, ylabel='Current Density (mA/cm$^2$)',
xlabel='Voltage (Volts)')
chart.tick_params(axis='both', which='major', labelsize=5, direction='in')
if self.scan_choice_var.get() == 'Reverse':
##plotting all the hero rev scans
index_count_rev = 8
for i in range(self.split_count):
lst_rev = self.DF.iloc[:, index_count_rev]
lst_rev = lst_rev.tolist()
index = lst_rev.index(max(lst_rev))
filename = self.DF.iloc[index, index_count_rev - 8]
data = np.genfromtxt(filename, skip_header=10, dtype=float, delimiter='\t') # loading txt file
voltage_points = data[:, 0]
current_points = data[:, 1]
chart.plot(voltage_points, current_points, label=labels[i])
if labels[i] != '':
chart.legend(prop={'size': self.label_size.get()})
chart.set_ylim(bottom=0, auto=True)
chart.set_xlim(left=0, auto = True)
chart.yaxis.label.set_size(7)
chart.xaxis.label.set_size(7)
index_count_rev = index_count_rev + 10
elif self.scan_choice_var.get() == 'Forward':
## plotting all the hero fwd scans
index_count_fwd = 9
for i in range(self.split_count):
lst_fwd = self.DF.iloc[:, index_count_fwd]
lst_fwd = lst_fwd.tolist()
index = lst_fwd.index(max(lst_fwd))
filename = self.DF.iloc[index, index_count_fwd - 8]
data = np.genfromtxt(filename, skip_header=10, dtype=float, delimiter='\t') # loading txt file
voltage_points = data[:, 0]
current_points = data[:, 1]
chart.plot(voltage_points, current_points, label=labels[i])
if labels[i] != '':
chart.legend(prop={'size': self.label_size.get()})
chart.set_ylim(bottom=0, auto=True)
chart.set_xlim(left=0, auto = True)
chart.yaxis.label.set_size(7)
chart.xaxis.label.set_size(7)
index_count_fwd = index_count_fwd + 10
else:
messagebox.showinfo(message =
'Together is not supported for this function.\nIf you wish to plot Rev and Fwd together, use Plot JV.')
global canvas
global toolbar
try:
canvas.get_tk_widget().place_forget()
toolbar.place_forget()
except:
pass
canvas = FigureCanvasTkAgg(f, self)
canvas.draw()
canvas.get_tk_widget().place(x=470, y=50)
toolbar = NavigationToolbar2Tk(canvas, self)
toolbar.config(background='white')
toolbar._message_label.config(background='white')
toolbar.update()
toolbar.place(x=620, y=470)
def plot_together(self):
try:
self.label_list = []
for entry in self.entry_list:
self.label_list.append(entry.get())
x = self.split_count
labels = self.label_list
data = []
data_rev = []
data_fwd = []
f = Figure(figsize=(7, 3.5), dpi=140, tight_layout=True)
ax1 = f.add_subplot(222, ylabel='Current Density (mA/cm$^2$)', autoscale_on=True)
ax2 = f.add_subplot(223, ylabel='Open Circuit Voltage (Volts)', autoscale_on=True)
ax3 = f.add_subplot(224, ylabel='Fill Factor (%)', autoscale_on=True)
ax4 = f.add_subplot(221, ylabel='Power Conversion Efficiency (%)', autoscale_on=True)
ax_list = [ax1, ax2, ax3, ax4]
for ax in ax_list:
for tick in ax.get_xticklabels():
tick.set_rotation(self.xlabel_angle.get())
ax.yaxis.label.set_size(6)
ax.tick_params(axis='both', which='major', labelsize=self.label_size.get(), direction='in')
if self.scan_choice_var.get() == 'Reverse':
index_count = 2
for ax in ax_list: # doing for all the subplots
data = []
for i in range(x): # for all the splits within a sunplot
lst = self.DF.iloc[:, index_count]
lst = lst.dropna()
data.append(lst)
index_count = index_count + 10
# boxplot of a subplot
ax.boxplot(data, meanline=True, widths=0.2, positions=[i for i in range(x)], labels=labels,
whiskerprops=dict(color='red'), boxprops=dict(color='red'),
capprops=dict(color='red'), showfliers=False)
# adding jitters to the subplot
for i in range(x):
x_axis = np.random.normal(i, 0.03, size=len(data[i]))
ax.plot(x_axis, data[i], 'r.', linestyle='none', ms=2)
index_count = (index_count - (
x * 10)) + 2 # a liitle equation to adjust the index count back to the next graph where x is number of time the increment takes place.
elif self.scan_choice_var.get() == 'Forward':
index_count = 3
for ax in ax_list: # doing for all the subplots
data = []
for i in range(x): # for all the splits within a subplot
lst = self.DF.iloc[:, index_count]
lst = lst.dropna()
data.append(lst)
index_count = index_count + 10
# boxplot of a subplot
ax.boxplot(data, meanline=True, widths=0.2, positions=[i for i in range(x)], labels=labels,
whiskerprops=dict(color='red'), boxprops=dict(color='red'),
capprops=dict(color='red'), showfliers=False)
# adding jitters to the subplot
for i in range(x):
x_axis = np.random.normal(i, 0.03, size=len(data[i]))
ax.plot(x_axis, data[i], 'r.', linestyle='none', ms=2)
index_count = (index_count - (
x * 10)) + 2 # a liitle equation to adjust the index count back to the next graph where x is number of time the increment takes place.
elif self.scan_choice_var.get() == 'Together':
index_count = 2
for ax in ax_list:
data_rev = []
data_fwd = []
for i in range(x):
lst = self.DF.iloc[:, index_count]
lst = lst.dropna()
data_rev.append(lst)
lst = self.DF.iloc[:, index_count + 1]
lst = lst.dropna()
data_fwd.append(lst)
index_count = index_count + 10
ax.boxplot(data_fwd, meanline=True, widths=0.2, positions=[i for i in range(x)],
labels=['' for i in range(x)],
whiskerprops=dict(color='red'), boxprops=dict(color='red'),
capprops=dict(color='red'), showfliers=False)
ax.boxplot(data_rev, meanline=True, widths=0.2, positions=[i + 0.3 for i in range(x)],
labels=['' for i in range(x)],
whiskerprops=dict(color='black'), boxprops=dict(color='black'),
capprops=dict(color='black'), showfliers=False)
for i in range(x):
x_axis = np.random.normal(i, 0.03, size=len(data_fwd[i]))
ax.plot(x_axis, data_fwd[i], 'r.', linestyle='none', ms=2)
x_axis = np.random.normal(i + 0.3, 0.03, size=len(data_rev[i]))
ax.plot(x_axis, data_rev[i], 'k.', linestyle='none', ms=2)
# placing ticks in between rev and forward box plaits
ax.set_xticks([i + 0.15 for i in range(x)])
ax.set_xticklabels(labels)
index_count = (index_count - (
x * 10)) + 2
# a liitle equation to adjust the index count back to the next graph where x is number of time the increment takes place.
global canvas
global toolbar
try:
canvas.get_tk_widget().place_forget()
toolbar.place_forget()
except:
pass
canvas = FigureCanvasTkAgg(f, self)
canvas.draw()
canvas.get_tk_widget().place(x=250, y=50)
toolbar = NavigationToolbar2Tk(canvas, self)
toolbar.config(background='white')
toolbar._message_label.config(background='white')
toolbar.update()
toolbar.place(x=300, y=0)
except:
pass
def export_data(self):
try:
location = filedialog.askdirectory()
self.DF.to_csv(rf'{location}' + '/summary.csv', index=None, header=True)
except:
pass
def refresh(self):
self.DF = DataFrame()
global canvas
global toolbar
try:
canvas.get_tk_widget().place_forget()
toolbar.place_forget()
except:
pass
self.split_count = 0
for e in self.entry_widget_list:
e.place_forget()
self.entry_list = []
class CustomButton(Frame):
def __init__(self, parent, *args, **kwargs):
super().__init__(parent)
button = Button(self,relief ='solid', bg = 'white',*args, **kwargs)
button.pack(fill="both", expand=2, padx=0.5, pady=0.5)
self.configure(background = 'lightblue')
class CustomEntry(Frame):
def __init__(self, parent, *args, **kwargs):
super().__init__(parent)
entry = Entry(self,relief ='sunken',*args, **kwargs)
entry.pack(fill="both", expand=2, padx=1, pady=1)
self.configure(background = '#28486B')
class ScrollableFrame(Frame):
def __init__(self, container, *args, **kwargs):
super().__init__(container, *args, **kwargs)
canvas = Canvas(self)
scrollbar = ttk.Scrollbar(self, orient="vertical", command=canvas.yview)
self.scrollable_frame = Frame(canvas)
self.scrollable_frame.bind(
"<Configure>",
lambda e: canvas.configure(
scrollregion=canvas.bbox("all")
)
)
canvas.create_window((0, 0), window=self.scrollable_frame, anchor="nw")
canvas.configure(yscrollcommand=scrollbar.set)
canvas.pack(side="left", fill="both", expand=True)
scrollbar.pack(side="right", fill="y")
| patidarrahul/PlotIT | extra_python files/boxplot.py | boxplot.py | py | 25,877 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "matplotlib.use",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "matplotlib.use",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "tkinter.ttk.Combobox",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "tkinter.ttk",
"... |
70474642983 |
import torch
import torch.nn.functional as F
from .modules import Module, ModuleList, ModuleDict
from pytorch_transformers import BertModel, BertConfig,BertTokenizer
from .modules.prediction import registry as prediction
from .modules.prediction import Prediction_Bert,Prediction_Bert_GAT
from .modules.GCNS import *
import torch.nn as nn
class PhraseGating(nn.Module):
def __init__ (self, args): # code_length为fc映射到的维度大小
super(PhraseGating, self).__init__()
self.gate_fc = nn.Sequential(
nn.Dropout(args.dropout),
nn.Linear(768, 768 * 2),
nn.LeakyReLU(),
nn.Dropout(args.dropout),
nn.Linear(768 * 2, 768),
nn.Sigmoid()
)
def forward(self,x):
return self.gate_fc(x)
class TextNet(nn.Module):
def __init__(self,args): #code_length为fc映射到的维度大小
super(TextNet, self).__init__()
code_length = args.hidden_size * 4
modelConfig = BertConfig.from_pretrained(args.bert_config_dir)
self.textExtractor = BertModel.from_pretrained(
args.bert_model_dir, config=modelConfig)
self.textExtractor.train()
embedding_dim = self.textExtractor.config.hidden_size
self.fc = nn.Linear(embedding_dim, code_length)
self.tanh = torch.nn.Tanh()
def forward(self, tokens, segments, input_masks):
output = self.textExtractor(tokens, token_type_ids=segments,
attention_mask=input_masks)
text_embeddings = output[0][:, 0, :]
# output[0](batch size, sequence length, model hidden dimension)
features = self.fc(text_embeddings)
features = self.tanh(features)
return features, output[0]
def do_eval(self):
self.textExtractor.eval()
def do_train(self):
self.textExtractor.train()
class Network(Module):
def __init__(self, args):
super().__init__()
self.dropout = args.dropout
self.bert_feature = TextNet(args)
#SIGN-HGAT
self.HeteGAT = HeteGAT(input_dim=768, hidden_dim=args.hidden_size, output_dim=args.hidden_size,n_type = args.n_type,
dropout_rate=self.dropout,layer_num=args.gat_layernum)
#As for SIGN-GAT use:
#self.HeteGAT = GAT(input_dim=768, hidden_dim=args.hidden_size, output_dim=args.hidden_size,n_type = args.n_type,
#dropout_rate=self.dropout,layer_num=args.gat_layernum)
self.fusions_node = nn.Linear(args.hidden_size*3,args.hidden_size)
self.prediction = Prediction_Bert_GAT(args)
self.bert_predict = Prediction_Bert(args)
def forward(self, inputs):
bert_feature,output = self.bert_feature(inputs['text_batch_tensor'],inputs['segment_batch_tensor'],inputs['mask_batch_tensor'])
feature_matrix = []
#
# #output_gating = self.phrase_gate(output)
for id in range(inputs['batch_size']):
# print(id)
text_a = output[id, 1:1+inputs['text1_length'][id], :]
text_b = output[id, 2+inputs['text1_length'][id]:2+inputs['text1_length'][id]+inputs['text2_length'][id], :]
# text_a_gate = output_gating[id, 1:1 + inputs['text1_length'][id], :]
# text_b_gate = output_gating[id,
# 2 + inputs['text1_length'][id]:2 + inputs['text1_length'][id] + inputs['text2_length'][id], :]
# node_list = inputs['Nodelist']
feature_matrix.append(text_a)
# print(text_a.shape)
for i in range(len(inputs['text1_phrase'][id])):
# print(inputs['text1_phrase'][id][i])
if len(inputs['text1_phrase'][id][i]) == 1:
feature_matrix.append(text_a[i, :].unsqueeze(0))
#feature_matrix.append((text_a[i, :]*text_a_gate[i,:]).unsqueeze(0))
else:
feature_matrix.append(self.poolings(text_a[inputs['text1_phrase'][id][i][0]:inputs['text1_phrase'][id][i][-1]+1,:]))
#feature_matrix.append(torch.mean(text_a[inputs['text1_phrase'][id][i][0]:inputs['text1_phrase'][id][i][-1]+1,:],dim=0,keepdim=True))
# feature_matrix.append(torch.sum(
# text_a[inputs['text1_phrase'][id][i][0]:inputs['text1_phrase'][id][i][-1] + 1, :] *
# text_a_gate[inputs['text1_phrase'][id][i][0]:inputs['text1_phrase'][id][i][-1] + 1, :],dim=0,keepdim=True))
feature_matrix.append(text_b)
for i in range(len(inputs['text2_phrase'][id])):
if len(inputs['text2_phrase'][id][i]) == 1:
feature_matrix.append(text_b[i, :].unsqueeze(0))
#feature_matrix.append((text_b[i, :] * text_b_gate[i, :]).unsqueeze(0))
else:
feature_matrix.append(self.poolings(text_b[inputs['text2_phrase'][id][i][0]:inputs['text2_phrase'][id][i][-1]+1,:]))
# feature_matrix.append(
# torch.mean(text_b[inputs['text2_phrase'][id][i][0]:inputs['text2_phrase'][id][i][-1] + 1, :], dim=0,
# keepdim=True))
# feature_matrix.append(torch.sum(
# text_b[inputs['text2_phrase'][id][i][0]:inputs['text2_phrase'][id][i][-1] + 1, :] *
# text_b_gate[inputs['text2_phrase'][id][i][0]:inputs['text2_phrase'][id][i][-1] + 1, :],dim=0,keepdim=True))
feature_matrix.append(self.poolings(text_a))
feature_matrix.append(self.poolings(text_b))
feature_matrix = torch.cat(feature_matrix, dim=0)
GCN_matrix = self.HeteGAT(x=feature_matrix, edge_index=inputs['Coomatrix'])
gat_a = []
gat_b = []
word_a = []
phrase_a = []
word_b = []
phrase_b = []
last_node_num = 0
for id in range(inputs['batch_size']):
gat_a.append(GCN_matrix[inputs['node_num'][id] - 2, :].unsqueeze(0))
gat_b.append(GCN_matrix[inputs['node_num'][id] - 1, :].unsqueeze(0))
word_a_pos = last_node_num # 0
phrase_a_pos = last_node_num + inputs['text1_length'][id] # 0 + 3 = 3
word_b_pos = last_node_num + inputs['text1_length'][id] + len(inputs['text1_phrase'][id]) # 0 + 3 + 2 = 5
phrase_b_pos = last_node_num + inputs['text1_length'][id] + len(inputs['text1_phrase'][id]) + \
inputs['text2_length'][id] # 0 + 3 + 2 + 3 = 8
# node num : 12
word_a.append(self.poolings(GCN_matrix[word_a_pos:phrase_a_pos]))
phrase_a.append(self.poolings(GCN_matrix[phrase_a_pos:word_b_pos]))
word_b.append(self.poolings(GCN_matrix[word_b_pos:phrase_b_pos]))
phrase_b.append(self.poolings(GCN_matrix[phrase_b_pos:inputs['node_num'][id] - 2]))
last_node_num = inputs['node_num'][id]
a = torch.cat(gat_a, dim=0)
b = torch.cat(gat_b, dim=0)
word_a = torch.cat(word_a, dim=0)
phrase_a = torch.cat(phrase_a, dim=0)
word_b = torch.cat(word_b, dim=0)
phrase_b = torch.cat(phrase_b, dim=0)
a_s = self.fusions_node(torch.cat([a, word_a, phrase_a], dim=-1))
b_s = self.fusions_node(torch.cat([b, word_b, phrase_b], dim=-1))
a_s = F.dropout(a_s, p=self.dropout, training=self.training)
b_s = F.dropout(b_s, p=self.dropout, training=self.training)
a_s = F.relu(a_s)
b_s = F.relu(b_s)
result = self.prediction(a_s,b_s,bert_feature)
return result
#return self.prediction(a_s,b_s)
def poolings(self,x):
return x.max(dim=0)[0].unsqueeze(0)
| XuChen0427/Syntactic-Informed-Graph-Networks-for-Sentence-Matching | hetesrc/network.py | network.py | py | 7,773 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "torch.nn.Module",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "torch.nn.Sequential",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"lin... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.